all: blidly swap out glog to our log15, logs need rework

pull/3696/head
Péter Szilágyi 8 years ago
parent 47af53f9aa
commit d4fd06c3dc
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
  1. 7
      accounts/abi/bind/util.go
  2. 17
      accounts/keystore/account_cache.go
  3. 10
      accounts/keystore/watch.go
  4. 32
      accounts/usbwallet/ledger_wallet.go
  5. 2
      build/update-license.go
  6. 33
      cmd/bootnode/main.go
  7. 26
      cmd/ethtest/main.go
  8. 7
      cmd/evm/main.go
  9. 54
      cmd/geth/accountcmd.go
  10. 43
      cmd/geth/chaincmd.go
  11. 16
      cmd/geth/consolecmd.go
  12. 25
      cmd/geth/main.go
  13. 5
      cmd/geth/misccmd.go
  14. 19
      cmd/geth/monitorcmd.go
  15. 21
      cmd/gethrpctest/main.go
  16. 27
      cmd/swarm/main.go
  17. 91
      cmd/swarm/manifest.go
  18. 4
      cmd/swarm/upload.go
  19. 25
      cmd/utils/cmd.go
  20. 49
      cmd/utils/flags.go
  21. 44
      cmd/wnode/main.go
  22. 5
      console/bridge.go
  23. 23
      contracts/chequebook/cheque.go
  24. 19
      contracts/release/release.go
  25. 4
      core/block_validator.go
  26. 104
      core/blockchain.go
  27. 49
      core/database_util.go
  28. 5
      core/genesis.go
  29. 23
      core/headerchain.go
  30. 21
      core/state/state_object.go
  31. 13
      core/state/statedb.go
  32. 6
      core/state_processor.go
  33. 6
      core/state_transition.go
  34. 59
      core/tx_pool.go
  35. 8
      core/vm/contracts.go
  36. 17
      core/vm/interpreter.go
  37. 12
      errs/errors.go
  38. 5
      eth/api.go
  39. 33
      eth/backend.go
  40. 7
      eth/bad_block.go
  41. 25
      eth/db_upgrade.go
  42. 155
      eth/downloader/downloader.go
  43. 25
      eth/downloader/queue.go
  44. 67
      eth/fetcher/fetcher.go
  45. 6
      eth/gasprice/gasprice.go
  46. 53
      eth/handler.go
  47. 15
      eth/peer.go
  48. 3
      eth/protocol_test.go
  49. 10
      eth/sync.go
  50. 24
      ethdb/database.go
  51. 41
      ethstats/ethstats.go
  52. 29
      internal/debug/api.go
  53. 28
      internal/debug/flags.go
  54. 8
      internal/debug/trace.go
  55. 33
      internal/ethapi/api.go
  56. 5
      les/backend.go
  57. 38
      les/fetcher.go
  58. 71
      les/handler.go
  59. 8
      les/odr.go
  60. 76
      les/odr_requests.go
  61. 19
      les/peer.go
  62. 8
      les/server.go
  63. 20
      les/serverpool.go
  64. 34
      light/lightchain.go
  65. 6
      light/odr_util.go
  66. 10
      light/state.go
  67. 21
      light/state_object.go
  68. 15
      light/txpool.go
  69. 2
      log/root.go
  70. 191
      logger/glog/LICENSE
  71. 44
      logger/glog/README
  72. 1223
      logger/glog/glog.go
  73. 128
      logger/glog/glog_file.go
  74. 436
      logger/glog/glog_test.go
  75. 27
      logger/verbosity.go
  76. 8
      metrics/metrics.go
  77. 6
      miner/agent.go
  78. 9
      miner/miner.go
  79. 8
      miner/remote_agent.go
  80. 12
      miner/unconfirmed.go
  81. 38
      miner/worker.go
  82. 7
      mobile/init.go
  83. 6
      mobile/logger.go
  84. 17
      node/config.go
  85. 28
      node/node.go
  86. 17
      p2p/dial.go
  87. 14
      p2p/discover/database.go
  88. 13
      p2p/discover/ntp.go
  89. 24
      p2p/discover/table.go
  90. 23
      p2p/discover/udp.go
  91. 12
      p2p/discv5/database.go
  92. 27
      p2p/discv5/net.go
  93. 13
      p2p/discv5/ntp.go
  94. 8
      p2p/discv5/sim_test.go
  95. 17
      p2p/discv5/udp.go
  96. 13
      p2p/nat/nat.go
  97. 19
      p2p/peer.go
  98. 55
      p2p/server.go
  99. 3
      p2p/server_test.go
  100. 39
      rpc/client.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -22,8 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -39,9 +38,9 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
return receipt, nil return receipt, nil
} }
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("tx %x error: %v", loghash, err) log.Trace(fmt.Sprintf("tx %x error: %v", loghash, err))
} else { } else {
glog.V(logger.Detail).Infof("tx %x not yet mined...", loghash) log.Trace(fmt.Sprintf("tx %x not yet mined...", loghash))
} }
// Wait for the next round. // Wait for the next round.
select { select {

@ -30,8 +30,7 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
// Minimum amount of time between cache reloads. This limit applies if the platform does // Minimum amount of time between cache reloads. This limit applies if the platform does
@ -210,8 +209,8 @@ func (ac *accountCache) close() {
// Callers must hold ac.mu. // Callers must hold ac.mu.
func (ac *accountCache) reload() { func (ac *accountCache) reload() {
accounts, err := ac.scan() accounts, err := ac.scan()
if err != nil && glog.V(logger.Debug) { if err != nil {
glog.Errorf("can't load keys: %v", err) log.Debug(fmt.Sprintf("can't load keys: %v", err))
} }
ac.all = accounts ac.all = accounts
sort.Sort(ac.all) sort.Sort(ac.all)
@ -225,7 +224,7 @@ func (ac *accountCache) reload() {
case ac.notify <- struct{}{}: case ac.notify <- struct{}{}:
default: default:
} }
glog.V(logger.Debug).Infof("reloaded keys, cache has %d accounts", len(ac.all)) log.Debug(fmt.Sprintf("reloaded keys, cache has %d accounts", len(ac.all)))
} }
func (ac *accountCache) scan() ([]accounts.Account, error) { func (ac *accountCache) scan() ([]accounts.Account, error) {
@ -244,12 +243,12 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
for _, fi := range files { for _, fi := range files {
path := filepath.Join(ac.keydir, fi.Name()) path := filepath.Join(ac.keydir, fi.Name())
if skipKeyFile(fi) { if skipKeyFile(fi) {
glog.V(logger.Detail).Infof("ignoring file %s", path) log.Trace(fmt.Sprintf("ignoring file %s", path))
continue continue
} }
fd, err := os.Open(path) fd, err := os.Open(path)
if err != nil { if err != nil {
glog.V(logger.Detail).Infoln(err) log.Trace(fmt.Sprint(err))
continue continue
} }
buf.Reset(fd) buf.Reset(fd)
@ -259,9 +258,9 @@ func (ac *accountCache) scan() ([]accounts.Account, error) {
addr := common.HexToAddress(keyJSON.Address) addr := common.HexToAddress(keyJSON.Address)
switch { switch {
case err != nil: case err != nil:
glog.V(logger.Debug).Infof("can't decode key %s: %v", path, err) log.Debug(fmt.Sprintf("can't decode key %s: %v", path, err))
case (addr == common.Address{}): case (addr == common.Address{}):
glog.V(logger.Debug).Infof("can't decode key %s: missing or zero address", path) log.Debug(fmt.Sprintf("can't decode key %s: missing or zero address", path))
default: default:
addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}}) addrs = append(addrs, accounts.Account{Address: addr, URL: accounts.URL{Scheme: KeyStoreScheme, Path: path}})
} }

@ -19,10 +19,10 @@
package keystore package keystore
import ( import (
"fmt"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/rjeczalik/notify" "github.com/rjeczalik/notify"
) )
@ -67,12 +67,12 @@ func (w *watcher) loop() {
err := notify.Watch(w.ac.keydir, w.ev, notify.All) err := notify.Watch(w.ac.keydir, w.ev, notify.All)
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("can't watch %s: %v", w.ac.keydir, err) log.Trace(fmt.Sprintf("can't watch %s: %v", w.ac.keydir, err))
return return
} }
defer notify.Stop(w.ev) defer notify.Stop(w.ev)
glog.V(logger.Detail).Infof("now watching %s", w.ac.keydir) log.Trace(fmt.Sprintf("now watching %s", w.ac.keydir))
defer glog.V(logger.Detail).Infof("no longer watching %s", w.ac.keydir) defer log.Trace(fmt.Sprintf("no longer watching %s", w.ac.keydir))
w.ac.mu.Lock() w.ac.mu.Lock()
w.running = true w.running = true

@ -34,8 +34,7 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/karalabe/hid" "github.com/karalabe/hid"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -220,8 +219,8 @@ func (w *ledgerWallet) Open(passphrase string) error {
// - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs // - libusb on Windows doesn't support hotplug, so we can't detect USB unplugs
// - communication timeout on the Ledger requires a device power cycle to fix // - communication timeout on the Ledger requires a device power cycle to fix
func (w *ledgerWallet) heartbeat() { func (w *ledgerWallet) heartbeat() {
glog.V(logger.Debug).Infof("%s health-check started", w.url.String()) log.Debug(fmt.Sprintf("%s health-check started", w.url.String()))
defer glog.V(logger.Debug).Infof("%s health-check stopped", w.url.String()) defer log.Debug(fmt.Sprintf("%s health-check stopped", w.url.String()))
// Execute heartbeat checks until termination or error // Execute heartbeat checks until termination or error
var ( var (
@ -260,7 +259,7 @@ func (w *ledgerWallet) heartbeat() {
} }
// In case of error, wait for termination // In case of error, wait for termination
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%s health-check failed: %v", w.url.String(), err) log.Debug(fmt.Sprintf("%s health-check failed: %v", w.url.String(), err))
errc = <-w.healthQuit errc = <-w.healthQuit
} }
errc <- err errc <- err
@ -348,8 +347,8 @@ func (w *ledgerWallet) Accounts() []accounts.Account {
// selfDerive is an account derivation loop that upon request attempts to find // selfDerive is an account derivation loop that upon request attempts to find
// new non-zero accounts. // new non-zero accounts.
func (w *ledgerWallet) selfDerive() { func (w *ledgerWallet) selfDerive() {
glog.V(logger.Debug).Infof("%s self-derivation started", w.url.String()) log.Debug(fmt.Sprintf("%s self-derivation started", w.url.String()))
defer glog.V(logger.Debug).Infof("%s self-derivation stopped", w.url.String()) defer log.Debug(fmt.Sprintf("%s self-derivation stopped", w.url.String()))
// Execute self-derivations until termination or error // Execute self-derivations until termination or error
var ( var (
@ -394,7 +393,7 @@ func (w *ledgerWallet) selfDerive() {
// Retrieve the next derived Ethereum account // Retrieve the next derived Ethereum account
if nextAddr == (common.Address{}) { if nextAddr == (common.Address{}) {
if nextAddr, err = w.ledgerDerive(nextPath); err != nil { if nextAddr, err = w.ledgerDerive(nextPath); err != nil {
glog.V(logger.Warn).Infof("%s self-derivation failed: %v", w.url.String(), err) log.Warn(fmt.Sprintf("%s self-derivation failed: %v", w.url.String(), err))
break break
} }
} }
@ -405,12 +404,12 @@ func (w *ledgerWallet) selfDerive() {
) )
balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil) balance, err = w.deriveChain.BalanceAt(context, nextAddr, nil)
if err != nil { if err != nil {
glog.V(logger.Warn).Infof("%s self-derivation balance retrieval failed: %v", w.url.String(), err) log.Warn(fmt.Sprintf("%s self-derivation balance retrieval failed: %v", w.url.String(), err))
break break
} }
nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil) nonce, err = w.deriveChain.NonceAt(context, nextAddr, nil)
if err != nil { if err != nil {
glog.V(logger.Warn).Infof("%s self-derivation nonce retrieval failed: %v", w.url.String(), err) log.Warn(fmt.Sprintf("%s self-derivation nonce retrieval failed: %v", w.url.String(), err))
break break
} }
// If the next account is empty, stop self-derivation, but add it nonetheless // If the next account is empty, stop self-derivation, but add it nonetheless
@ -430,7 +429,7 @@ func (w *ledgerWallet) selfDerive() {
// Display a log message to the user for new (or previously empty accounts) // Display a log message to the user for new (or previously empty accounts)
if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) { if _, known := w.paths[nextAddr]; !known || (!empty && nextAddr == w.deriveNextAddr) {
glog.V(logger.Info).Infof("%s discovered %s (balance %22v, nonce %4d) at %s", w.url.String(), nextAddr.Hex(), balance, nonce, path) log.Info(fmt.Sprintf("%s discovered %s (balance %22v, nonce %4d) at %s", w.url.String(), nextAddr.Hex(), balance, nonce, path))
} }
// Fetch the next potential account // Fetch the next potential account
if !empty { if !empty {
@ -469,7 +468,7 @@ func (w *ledgerWallet) selfDerive() {
} }
// In case of error, wait for termination // In case of error, wait for termination
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%s self-derivation failed: %s", w.url.String(), err) log.Debug(fmt.Sprintf("%s self-derivation failed: %s", w.url.String(), err))
errc = <-w.deriveQuit errc = <-w.deriveQuit
} }
errc <- err errc <- err
@ -849,9 +848,7 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
apdu = nil apdu = nil
} }
// Send over to the device // Send over to the device
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string { return fmt.Sprintf("-> %s: %x", w.device.Path, chunk) }})
glog.Infof("-> %s: %x", w.device.Path, chunk)
}
if _, err := w.device.Write(chunk); err != nil { if _, err := w.device.Write(chunk); err != nil {
return nil, err return nil, err
} }
@ -864,9 +861,8 @@ func (w *ledgerWallet) ledgerExchange(opcode ledgerOpcode, p1 ledgerParam1, p2 l
if _, err := io.ReadFull(w.device, chunk); err != nil { if _, err := io.ReadFull(w.device, chunk); err != nil {
return nil, err return nil, err
} }
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string { return fmt.Sprintf("<- %s: %x", w.device.Path, chunk) }})
glog.Infof("<- %s: %x", w.device.Path, chunk)
}
// Make sure the transport header matches // Make sure the transport header matches
if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 { if chunk[0] != 0x01 || chunk[1] != 0x01 || chunk[2] != 0x05 {
return nil, errReplyInvalidHeader return nil, errReplyInvalidHeader

@ -47,7 +47,7 @@ var (
// boring stuff // boring stuff
"vendor/", "tests/files/", "build/", "vendor/", "tests/files/", "build/",
// don't relicense vendored sources // don't relicense vendored sources
"crypto/sha3/", "crypto/ecies/", "logger/glog/", "crypto/sha3/", "crypto/ecies/", "log/",
"crypto/secp256k1/curve.go", "crypto/secp256k1/curve.go",
// don't license generated files // don't license generated files
"contracts/chequebook/contract/", "contracts/chequebook/contract/",

@ -23,9 +23,8 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
@ -42,39 +41,43 @@ func main() {
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)") natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)")
netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)") netrestrict = flag.String("netrestrict", "", "restrict network communication to the given IP networks (CIDR masks)")
runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode") runv5 = flag.Bool("v5", false, "run a v5 topic discovery bootnode")
verbosity = flag.Int("verbosity", int(log.LvlInfo), "log verbosity (0-9)")
vmodule = flag.String("vmodule", "", "log verbosity pattern")
nodeKey *ecdsa.PrivateKey nodeKey *ecdsa.PrivateKey
err error err error
) )
flag.Var(glog.GetVerbosity(), "verbosity", "log verbosity (0-9)")
flag.Var(glog.GetVModule(), "vmodule", "log verbosity pattern")
glog.SetToStderr(true)
flag.Parse() flag.Parse()
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
glogger.Verbosity(log.Lvl(*verbosity))
glogger.Vmodule(*vmodule)
log.Root().SetHandler(glogger)
natm, err := nat.Parse(*natdesc) natm, err := nat.Parse(*natdesc)
if err != nil { if err != nil {
utils.Fatalf("-nat: %v", err) log.Crit(fmt.Sprintf("-nat: %v", err))
} }
switch { switch {
case *genKey != "": case *genKey != "":
nodeKey, err = crypto.GenerateKey() nodeKey, err = crypto.GenerateKey()
if err != nil { if err != nil {
utils.Fatalf("could not generate key: %v", err) log.Crit(fmt.Sprintf("could not generate key: %v", err))
} }
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil { if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
utils.Fatalf("%v", err) log.Crit(fmt.Sprintf("%v", err))
} }
case *nodeKeyFile == "" && *nodeKeyHex == "": case *nodeKeyFile == "" && *nodeKeyHex == "":
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key") log.Crit(fmt.Sprintf("Use -nodekey or -nodekeyhex to specify a private key"))
case *nodeKeyFile != "" && *nodeKeyHex != "": case *nodeKeyFile != "" && *nodeKeyHex != "":
utils.Fatalf("Options -nodekey and -nodekeyhex are mutually exclusive") log.Crit(fmt.Sprintf("Options -nodekey and -nodekeyhex are mutually exclusive"))
case *nodeKeyFile != "": case *nodeKeyFile != "":
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil { if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil {
utils.Fatalf("-nodekey: %v", err) log.Crit(fmt.Sprintf("-nodekey: %v", err))
} }
case *nodeKeyHex != "": case *nodeKeyHex != "":
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil { if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil {
utils.Fatalf("-nodekeyhex: %v", err) log.Crit(fmt.Sprintf("-nodekeyhex: %v", err))
} }
} }
@ -87,17 +90,17 @@ func main() {
if *netrestrict != "" { if *netrestrict != "" {
restrictList, err = netutil.ParseNetlist(*netrestrict) restrictList, err = netutil.ParseNetlist(*netrestrict)
if err != nil { if err != nil {
utils.Fatalf("-netrestrict: %v", err) log.Crit(fmt.Sprintf("-netrestrict: %v", err))
} }
} }
if *runv5 { if *runv5 {
if _, err := discv5.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil { if _, err := discv5.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
utils.Fatalf("%v", err) log.Crit(fmt.Sprintf("%v", err))
} }
} else { } else {
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil { if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm, "", restrictList); err != nil {
utils.Fatalf("%v", err) log.Crit(fmt.Sprintf("%v", err))
} }
} }

@ -25,7 +25,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
@ -70,7 +70,7 @@ var (
) )
func runTestWithReader(test string, r io.Reader) error { func runTestWithReader(test string, r io.Reader) error {
glog.Infoln("runTest", test) log.Info(fmt.Sprint("runTest", test))
var err error var err error
switch strings.ToLower(test) { switch strings.ToLower(test) {
case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests": case "bk", "block", "blocktest", "blockchaintest", "blocktests", "blockchaintests":
@ -92,7 +92,7 @@ func runTestWithReader(test string, r io.Reader) error {
} }
func getFiles(path string) ([]string, error) { func getFiles(path string) ([]string, error) {
glog.Infoln("getFiles", path) log.Info(fmt.Sprint("getFiles", path))
var files []string var files []string
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
@ -113,7 +113,7 @@ func getFiles(path string) ([]string, error) {
// only go 1 depth and leave directory entires blank // only go 1 depth and leave directory entires blank
if !v.IsDir() && v.Name()[len(v.Name())-len(testExtension):len(v.Name())] == testExtension { if !v.IsDir() && v.Name()[len(v.Name())-len(testExtension):len(v.Name())] == testExtension {
files[i] = filepath.Join(path, v.Name()) files[i] = filepath.Join(path, v.Name())
glog.Infoln("Found file", files[i]) log.Info(fmt.Sprint("Found file", files[i]))
} }
} }
case mode.IsRegular(): case mode.IsRegular():
@ -134,7 +134,7 @@ func runSuite(test, file string) {
} }
for _, curTest := range tests { for _, curTest := range tests {
glog.Infoln("runSuite", curTest, file) log.Info(fmt.Sprint("runSuite", curTest, file))
var err error var err error
var files []string var files []string
if test == defaultTest { if test == defaultTest {
@ -149,11 +149,11 @@ func runSuite(test, file string) {
files, err = getFiles(file) files, err = getFiles(file)
} }
if err != nil { if err != nil {
glog.Fatalln(err) log.Crit(fmt.Sprint(err))
} }
if len(files) == 0 { if len(files) == 0 {
glog.Warningln("No files matched path") log.Warn("No files matched path")
} }
for _, curFile := range files { for _, curFile := range files {
// Skip blank entries // Skip blank entries
@ -163,16 +163,16 @@ func runSuite(test, file string) {
r, err := os.Open(curFile) r, err := os.Open(curFile)
if err != nil { if err != nil {
glog.Fatalln(err) log.Crit(fmt.Sprint(err))
} }
defer r.Close() defer r.Close()
err = runTestWithReader(curTest, r) err = runTestWithReader(curTest, r)
if err != nil { if err != nil {
if continueOnError { if continueOnError {
glog.Errorln(err) log.Error(fmt.Sprint(err))
} else { } else {
glog.Fatalln(err) log.Crit(fmt.Sprint(err))
} }
} }
} }
@ -190,14 +190,14 @@ func setupApp(c *cli.Context) error {
runSuite(flagTest, flagFile) runSuite(flagTest, flagFile)
} else { } else {
if err := runTestWithReader(flagTest, os.Stdin); err != nil { if err := runTestWithReader(flagTest, os.Stdin); err != nil {
glog.Fatalln(err) log.Crit(fmt.Sprint(err))
} }
} }
return nil return nil
} }
func main() { func main() {
glog.SetToStderr(true) log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
app := cli.NewApp() app := cli.NewApp()
app.Name = "ethtest" app.Name = "ethtest"
@ -216,7 +216,7 @@ func main() {
} }
if err := app.Run(os.Args); err != nil { if err := app.Run(os.Args); err != nil {
glog.Fatalln(err) log.Crit(fmt.Sprint(err))
} }
} }

@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/log"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -111,8 +111,9 @@ func init() {
} }
func run(ctx *cli.Context) error { func run(ctx *cli.Context) error {
glog.SetToStderr(true) glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
glog.SetV(ctx.GlobalInt(VerbosityFlag.Name)) glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
var ( var (
db, _ = ethdb.NewMemDatabase() db, _ = ethdb.NewMemDatabase()

@ -19,14 +19,14 @@ package main
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -196,18 +196,19 @@ func accountList(ctx *cli.Context) error {
func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i int, passwords []string) (accounts.Account, string) { func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i int, passwords []string) (accounts.Account, string) {
account, err := utils.MakeAddress(ks, address) account, err := utils.MakeAddress(ks, address)
if err != nil { if err != nil {
utils.Fatalf("Could not list accounts: %v", err) fmt.Printf("Fatal: Could not list accounts: %v\n", err)
os.Exit(1)
} }
for trials := 0; trials < 3; trials++ { for trials := 0; trials < 3; trials++ {
prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3) prompt := fmt.Sprintf("Unlocking account %s | Attempt %d/%d", address, trials+1, 3)
password := getPassPhrase(prompt, false, i, passwords) password := getPassPhrase(prompt, false, i, passwords)
err = ks.Unlock(account, password) err = ks.Unlock(account, password)
if err == nil { if err == nil {
glog.V(logger.Info).Infof("Unlocked account %x", account.Address) log.Info(fmt.Sprintf("Unlocked account %x", account.Address))
return account, password return account, password
} }
if err, ok := err.(*keystore.AmbiguousAddrError); ok { if err, ok := err.(*keystore.AmbiguousAddrError); ok {
glog.V(logger.Info).Infof("Unlocked account %x", account.Address) log.Info(fmt.Sprintf("Unlocked account %x", account.Address))
return ambiguousAddrRecovery(ks, err, password), password return ambiguousAddrRecovery(ks, err, password), password
} }
if err != keystore.ErrDecrypt { if err != keystore.ErrDecrypt {
@ -216,7 +217,9 @@ func unlockAccount(ctx *cli.Context, ks *keystore.KeyStore, address string, i in
} }
} }
// All trials expended to unlock account, bail out // All trials expended to unlock account, bail out
utils.Fatalf("Failed to unlock account %s (%v)", address, err) fmt.Printf("Fatal: Failed to unlock account %s (%v)\n", address, err)
os.Exit(1)
return accounts.Account{}, "" return accounts.Account{}, ""
} }
@ -236,15 +239,18 @@ func getPassPhrase(prompt string, confirmation bool, i int, passwords []string)
} }
password, err := console.Stdin.PromptPassword("Passphrase: ") password, err := console.Stdin.PromptPassword("Passphrase: ")
if err != nil { if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err) fmt.Printf("Fatal: Failed to read passphrase: %v\n", err)
os.Exit(1)
} }
if confirmation { if confirmation {
confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ") confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ")
if err != nil { if err != nil {
utils.Fatalf("Failed to read passphrase confirmation: %v", err) fmt.Printf("Fatal: Failed to read passphrase confirmation: %v\n", err)
os.Exit(1)
} }
if password != confirm { if password != confirm {
utils.Fatalf("Passphrases do not match") fmt.Printf("Fatal: Passphrases do not match\n")
os.Exit(1)
} }
} }
return password return password
@ -264,7 +270,8 @@ func ambiguousAddrRecovery(ks *keystore.KeyStore, err *keystore.AmbiguousAddrErr
} }
} }
if match == nil { if match == nil {
utils.Fatalf("None of the listed files could be unlocked.") fmt.Printf("Fatal: None of the listed files could be unlocked.\n")
os.Exit(1)
} }
fmt.Printf("Your passphrase unlocked %s\n", match.URL) fmt.Printf("Your passphrase unlocked %s\n", match.URL)
fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:") fmt.Println("In order to avoid this warning, you need to remove the following duplicate key files:")
@ -284,7 +291,8 @@ func accountCreate(ctx *cli.Context) error {
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
account, err := ks.NewAccount(password) account, err := ks.NewAccount(password)
if err != nil { if err != nil {
utils.Fatalf("Failed to create account: %v", err) fmt.Printf("Fatal: Failed to create account: %v\n", err)
os.Exit(1)
} }
fmt.Printf("Address: {%x}\n", account.Address) fmt.Printf("Address: {%x}\n", account.Address)
return nil return nil
@ -294,7 +302,8 @@ func accountCreate(ctx *cli.Context) error {
// one, also providing the possibility to change the pass-phrase. // one, also providing the possibility to change the pass-phrase.
func accountUpdate(ctx *cli.Context) error { func accountUpdate(ctx *cli.Context) error {
if len(ctx.Args()) == 0 { if len(ctx.Args()) == 0 {
utils.Fatalf("No accounts specified to update") fmt.Printf("Fatal: No accounts specified to update\n")
os.Exit(1)
} }
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
@ -302,7 +311,8 @@ func accountUpdate(ctx *cli.Context) error {
account, oldPassword := unlockAccount(ctx, ks, ctx.Args().First(), 0, nil) account, oldPassword := unlockAccount(ctx, ks, ctx.Args().First(), 0, nil)
newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil) newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil)
if err := ks.Update(account, oldPassword, newPassword); err != nil { if err := ks.Update(account, oldPassword, newPassword); err != nil {
utils.Fatalf("Could not update the account: %v", err) fmt.Printf("Fatal: Could not update the account: %v\n", err)
os.Exit(1)
} }
return nil return nil
} }
@ -310,11 +320,13 @@ func accountUpdate(ctx *cli.Context) error {
func importWallet(ctx *cli.Context) error { func importWallet(ctx *cli.Context) error {
keyfile := ctx.Args().First() keyfile := ctx.Args().First()
if len(keyfile) == 0 { if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument") fmt.Printf("Fatal: keyfile must be given as argument\n")
os.Exit(1)
} }
keyJson, err := ioutil.ReadFile(keyfile) keyJson, err := ioutil.ReadFile(keyfile)
if err != nil { if err != nil {
utils.Fatalf("Could not read wallet file: %v", err) fmt.Printf("Fatal: Could not read wallet file: %v\n", err)
os.Exit(1)
} }
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
@ -323,7 +335,8 @@ func importWallet(ctx *cli.Context) error {
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
acct, err := ks.ImportPreSaleKey(keyJson, passphrase) acct, err := ks.ImportPreSaleKey(keyJson, passphrase)
if err != nil { if err != nil {
utils.Fatalf("%v", err) fmt.Printf("Fatal: %v\n", err)
os.Exit(1)
} }
fmt.Printf("Address: {%x}\n", acct.Address) fmt.Printf("Address: {%x}\n", acct.Address)
return nil return nil
@ -332,11 +345,13 @@ func importWallet(ctx *cli.Context) error {
func accountImport(ctx *cli.Context) error { func accountImport(ctx *cli.Context) error {
keyfile := ctx.Args().First() keyfile := ctx.Args().First()
if len(keyfile) == 0 { if len(keyfile) == 0 {
utils.Fatalf("keyfile must be given as argument") fmt.Printf("Fatal: keyfile must be given as argument\n")
os.Exit(1)
} }
key, err := crypto.LoadECDSA(keyfile) key, err := crypto.LoadECDSA(keyfile)
if err != nil { if err != nil {
utils.Fatalf("Failed to load the private key: %v", err) fmt.Printf("Fatal: Failed to load the private key: %v\n", err)
os.Exit(1)
} }
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
passphrase := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx)) passphrase := getPassPhrase("Your new account is locked with a password. Please give a password. Do not forget this password.", true, 0, utils.MakePasswordList(ctx))
@ -344,7 +359,8 @@ func accountImport(ctx *cli.Context) error {
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
acct, err := ks.ImportECDSA(key, passphrase) acct, err := ks.ImportECDSA(key, passphrase)
if err != nil { if err != nil {
utils.Fatalf("Could not create the account: %v", err) fmt.Printf("Fatal: Could not create the account: %v\n", err)
os.Exit(1)
} }
fmt.Printf("Address: {%x}\n", acct.Address) fmt.Printf("Address: {%x}\n", acct.Address)
return nil return nil

@ -32,8 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/syndtr/goleveldb/leveldb/util" "github.com/syndtr/goleveldb/leveldb/util"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
@ -113,7 +112,7 @@ Use "ethereum dump 0" to dump the genesis block.
func initGenesis(ctx *cli.Context) error { func initGenesis(ctx *cli.Context) error {
genesisPath := ctx.Args().First() genesisPath := ctx.Args().First()
if len(genesisPath) == 0 { if len(genesisPath) == 0 {
utils.Fatalf("must supply path to genesis JSON file") log.Crit(fmt.Sprintf("must supply path to genesis JSON file"))
} }
stack := makeFullNode(ctx) stack := makeFullNode(ctx)
@ -121,21 +120,21 @@ func initGenesis(ctx *cli.Context) error {
genesisFile, err := os.Open(genesisPath) genesisFile, err := os.Open(genesisPath)
if err != nil { if err != nil {
utils.Fatalf("failed to read genesis file: %v", err) log.Crit(fmt.Sprintf("failed to read genesis file: %v", err))
} }
defer genesisFile.Close() defer genesisFile.Close()
block, err := core.WriteGenesisBlock(chaindb, genesisFile) block, err := core.WriteGenesisBlock(chaindb, genesisFile)
if err != nil { if err != nil {
utils.Fatalf("failed to write genesis block: %v", err) log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
} }
glog.V(logger.Info).Infof("successfully wrote genesis block and/or chain rule set: %x", block.Hash()) log.Info(fmt.Sprintf("successfully wrote genesis block and/or chain rule set: %x", block.Hash()))
return nil return nil
} }
func importChain(ctx *cli.Context) error { func importChain(ctx *cli.Context) error {
if len(ctx.Args()) != 1 { if len(ctx.Args()) != 1 {
utils.Fatalf("This command requires an argument.") log.Crit(fmt.Sprintf("This command requires an argument."))
} }
stack := makeFullNode(ctx) stack := makeFullNode(ctx)
chain, chainDb := utils.MakeChain(ctx, stack) chain, chainDb := utils.MakeChain(ctx, stack)
@ -159,7 +158,7 @@ func importChain(ctx *cli.Context) error {
// Import the chain // Import the chain
start := time.Now() start := time.Now()
if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
utils.Fatalf("Import error: %v", err) log.Crit(fmt.Sprintf("Import error: %v", err))
} }
fmt.Printf("Import done in %v.\n\n", time.Since(start)) fmt.Printf("Import done in %v.\n\n", time.Since(start))
@ -168,7 +167,7 @@ func importChain(ctx *cli.Context) error {
stats, err := db.LDB().GetProperty("leveldb.stats") stats, err := db.LDB().GetProperty("leveldb.stats")
if err != nil { if err != nil {
utils.Fatalf("Failed to read database stats: %v", err) log.Crit(fmt.Sprintf("Failed to read database stats: %v", err))
} }
fmt.Println(stats) fmt.Println(stats)
fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses()) fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
@ -187,13 +186,13 @@ func importChain(ctx *cli.Context) error {
start = time.Now() start = time.Now()
fmt.Println("Compacting entire database...") fmt.Println("Compacting entire database...")
if err = db.LDB().CompactRange(util.Range{}); err != nil { if err = db.LDB().CompactRange(util.Range{}); err != nil {
utils.Fatalf("Compaction failed: %v", err) log.Crit(fmt.Sprintf("Compaction failed: %v", err))
} }
fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
stats, err = db.LDB().GetProperty("leveldb.stats") stats, err = db.LDB().GetProperty("leveldb.stats")
if err != nil { if err != nil {
utils.Fatalf("Failed to read database stats: %v", err) log.Crit(fmt.Sprintf("Failed to read database stats: %v", err))
} }
fmt.Println(stats) fmt.Println(stats)
@ -202,7 +201,7 @@ func importChain(ctx *cli.Context) error {
func exportChain(ctx *cli.Context) error { func exportChain(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.") log.Crit(fmt.Sprintf("This command requires an argument."))
} }
stack := makeFullNode(ctx) stack := makeFullNode(ctx)
chain, _ := utils.MakeChain(ctx, stack) chain, _ := utils.MakeChain(ctx, stack)
@ -217,16 +216,16 @@ func exportChain(ctx *cli.Context) error {
first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
if ferr != nil || lerr != nil { if ferr != nil || lerr != nil {
utils.Fatalf("Export error in parsing parameters: block number not an integer\n") log.Crit(fmt.Sprintf("Export error in parsing parameters: block number not an integer\n"))
} }
if first < 0 || last < 0 { if first < 0 || last < 0 {
utils.Fatalf("Export error: block number must be greater than 0\n") log.Crit(fmt.Sprintf("Export error: block number must be greater than 0\n"))
} }
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
} }
if err != nil { if err != nil {
utils.Fatalf("Export error: %v\n", err) log.Crit(fmt.Sprintf("Export error: %v\n", err))
} }
fmt.Printf("Export done in %v", time.Since(start)) fmt.Printf("Export done in %v", time.Since(start))
return nil return nil
@ -244,7 +243,7 @@ func removeDB(ctx *cli.Context) error {
confirm, err := console.Stdin.PromptConfirm("Remove this database?") confirm, err := console.Stdin.PromptConfirm("Remove this database?")
switch { switch {
case err != nil: case err != nil:
utils.Fatalf("%v", err) log.Crit(fmt.Sprintf("%v", err))
case !confirm: case !confirm:
fmt.Println("Operation aborted") fmt.Println("Operation aborted")
default: default:
@ -257,7 +256,7 @@ func removeDB(ctx *cli.Context) error {
} }
func upgradeDB(ctx *cli.Context) error { func upgradeDB(ctx *cli.Context) error {
glog.Infoln("Upgrading blockchain database") log.Info(fmt.Sprint("Upgrading blockchain database"))
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
chain, chainDb := utils.MakeChain(ctx, stack) chain, chainDb := utils.MakeChain(ctx, stack)
@ -270,7 +269,7 @@ func upgradeDB(ctx *cli.Context) error {
filename := fmt.Sprintf("blockchain_%d_%s.chain", bcVersion, time.Now().Format("20060102_150405")) filename := fmt.Sprintf("blockchain_%d_%s.chain", bcVersion, time.Now().Format("20060102_150405"))
exportFile := filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), filename) exportFile := filepath.Join(ctx.GlobalString(utils.DataDirFlag.Name), filename)
if err := utils.ExportChain(chain, exportFile); err != nil { if err := utils.ExportChain(chain, exportFile); err != nil {
utils.Fatalf("Unable to export chain for reimport %s", err) log.Crit(fmt.Sprintf("Unable to export chain for reimport %s", err))
} }
chainDb.Close() chainDb.Close()
if dir := dbDirectory(chainDb); dir != "" { if dir := dbDirectory(chainDb); dir != "" {
@ -283,10 +282,10 @@ func upgradeDB(ctx *cli.Context) error {
err := utils.ImportChain(chain, exportFile) err := utils.ImportChain(chain, exportFile)
chainDb.Close() chainDb.Close()
if err != nil { if err != nil {
utils.Fatalf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile) log.Crit(fmt.Sprintf("Import error %v (a backup is made in %s, use the import command to import it)", err, exportFile))
} else { } else {
os.Remove(exportFile) os.Remove(exportFile)
glog.Infoln("Import finished") log.Info(fmt.Sprint("Import finished"))
} }
return nil return nil
} }
@ -312,11 +311,11 @@ func dump(ctx *cli.Context) error {
} }
if block == nil { if block == nil {
fmt.Println("{}") fmt.Println("{}")
utils.Fatalf("block not found") log.Crit(fmt.Sprintf("block not found"))
} else { } else {
state, err := state.New(block.Root(), chainDb) state, err := state.New(block.Root(), chainDb)
if err != nil { if err != nil {
utils.Fatalf("could not create new state: %v", err) log.Crit(fmt.Sprintf("could not create new state: %v", err))
} }
fmt.Printf("%s\n", state.Dump()) fmt.Printf("%s\n", state.Dump())
} }

@ -17,12 +17,14 @@
package main package main
import ( import (
"fmt"
"os" "os"
"os/signal" "os/signal"
"strings" "strings"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
@ -78,7 +80,7 @@ func localConsole(ctx *cli.Context) error {
// Attach to the newly started node and start the JavaScript console // Attach to the newly started node and start the JavaScript console
client, err := node.Attach() client, err := node.Attach()
if err != nil { if err != nil {
utils.Fatalf("Failed to attach to the inproc geth: %v", err) log.Crit(fmt.Sprintf("Failed to attach to the inproc geth: %v", err))
} }
config := console.Config{ config := console.Config{
DataDir: node.DataDir(), DataDir: node.DataDir(),
@ -88,7 +90,7 @@ func localConsole(ctx *cli.Context) error {
} }
console, err := console.New(config) console, err := console.New(config)
if err != nil { if err != nil {
utils.Fatalf("Failed to start the JavaScript console: %v", err) log.Crit(fmt.Sprintf("Failed to start the JavaScript console: %v", err))
} }
defer console.Stop(false) defer console.Stop(false)
@ -110,7 +112,7 @@ func remoteConsole(ctx *cli.Context) error {
// Attach to a remotely running geth instance and start the JavaScript console // Attach to a remotely running geth instance and start the JavaScript console
client, err := dialRPC(ctx.Args().First()) client, err := dialRPC(ctx.Args().First())
if err != nil { if err != nil {
utils.Fatalf("Unable to attach to remote geth: %v", err) log.Crit(fmt.Sprintf("Unable to attach to remote geth: %v", err))
} }
config := console.Config{ config := console.Config{
DataDir: utils.MakeDataDir(ctx), DataDir: utils.MakeDataDir(ctx),
@ -120,7 +122,7 @@ func remoteConsole(ctx *cli.Context) error {
} }
console, err := console.New(config) console, err := console.New(config)
if err != nil { if err != nil {
utils.Fatalf("Failed to start the JavaScript console: %v", err) log.Crit(fmt.Sprintf("Failed to start the JavaScript console: %v", err))
} }
defer console.Stop(false) defer console.Stop(false)
@ -162,7 +164,7 @@ func ephemeralConsole(ctx *cli.Context) error {
// Attach to the newly started node and start the JavaScript console // Attach to the newly started node and start the JavaScript console
client, err := node.Attach() client, err := node.Attach()
if err != nil { if err != nil {
utils.Fatalf("Failed to attach to the inproc geth: %v", err) log.Crit(fmt.Sprintf("Failed to attach to the inproc geth: %v", err))
} }
config := console.Config{ config := console.Config{
DataDir: node.DataDir(), DataDir: node.DataDir(),
@ -172,14 +174,14 @@ func ephemeralConsole(ctx *cli.Context) error {
} }
console, err := console.New(config) console, err := console.New(config)
if err != nil { if err != nil {
utils.Fatalf("Failed to start the JavaScript console: %v", err) log.Crit(fmt.Sprintf("Failed to start the JavaScript console: %v", err))
} }
defer console.Stop(false) defer console.Stop(false)
// Evaluate each of the specified JavaScript files // Evaluate each of the specified JavaScript files
for _, file := range ctx.Args() { for _, file := range ctx.Args() {
if err = console.Execute(file); err != nil { if err = console.Execute(file); err != nil {
utils.Fatalf("Failed to execute %s: %v", file, err) log.Crit(fmt.Sprintf("Failed to execute %s: %v", file, err))
} }
} }
// Wait for pending callbacks, but stop for Ctrl-C. // Wait for pending callbacks, but stop for Ctrl-C.

@ -34,8 +34,7 @@ import (
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -204,11 +203,11 @@ func makeFullNode(ctx *cli.Context) *node.Node {
}{uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), clientIdentifier, runtime.Version(), runtime.GOOS} }{uint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch), clientIdentifier, runtime.Version(), runtime.GOOS}
extra, err := rlp.EncodeToBytes(clientInfo) extra, err := rlp.EncodeToBytes(clientInfo)
if err != nil { if err != nil {
glog.V(logger.Warn).Infoln("error setting canonical miner information:", err) log.Warn(fmt.Sprint("error setting canonical miner information:", err))
} }
if uint64(len(extra)) > params.MaximumExtraDataSize { if uint64(len(extra)) > params.MaximumExtraDataSize {
glog.V(logger.Warn).Infoln("error setting canonical miner information: extra exceeds", params.MaximumExtraDataSize) log.Warn(fmt.Sprint("error setting canonical miner information: extra exceeds", params.MaximumExtraDataSize))
glog.V(logger.Debug).Infof("extra: %x\n", extra) log.Debug(fmt.Sprintf("extra: %x\n", extra))
extra = nil extra = nil
} }
stack := utils.MakeNode(ctx, clientIdentifier, gitCommit) stack := utils.MakeNode(ctx, clientIdentifier, gitCommit)
@ -236,7 +235,7 @@ func makeFullNode(ctx *cli.Context) *node.Node {
copy(config.Commit[:], commit) copy(config.Commit[:], commit)
return release.NewReleaseService(ctx, config) return release.NewReleaseService(ctx, config)
}); err != nil { }); err != nil {
utils.Fatalf("Failed to register the Geth release oracle service: %v", err) log.Crit(fmt.Sprintf("Failed to register the Geth release oracle service: %v", err))
} }
return stack return stack
} }
@ -266,14 +265,14 @@ func startNode(ctx *cli.Context, stack *node.Node) {
// Create an chain state reader for self-derivation // Create an chain state reader for self-derivation
rpcClient, err := stack.Attach() rpcClient, err := stack.Attach()
if err != nil { if err != nil {
utils.Fatalf("Failed to attach to self: %v", err) log.Crit(fmt.Sprintf("Failed to attach to self: %v", err))
} }
stateReader := ethclient.NewClient(rpcClient) stateReader := ethclient.NewClient(rpcClient)
// Open and self derive any wallets already attached // Open and self derive any wallets already attached
for _, wallet := range stack.AccountManager().Wallets() { for _, wallet := range stack.AccountManager().Wallets() {
if err := wallet.Open(""); err != nil { if err := wallet.Open(""); err != nil {
glog.V(logger.Warn).Infof("Failed to open wallet %s: %v", wallet.URL(), err) log.Warn(fmt.Sprintf("Failed to open wallet %s: %v", wallet.URL(), err))
} else { } else {
wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader) wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
} }
@ -282,13 +281,13 @@ func startNode(ctx *cli.Context, stack *node.Node) {
for event := range events { for event := range events {
if event.Arrive { if event.Arrive {
if err := event.Wallet.Open(""); err != nil { if err := event.Wallet.Open(""); err != nil {
glog.V(logger.Info).Infof("New wallet appeared: %s, failed to open: %s", event.Wallet.URL(), err) log.Info(fmt.Sprintf("New wallet appeared: %s, failed to open: %s", event.Wallet.URL(), err))
} else { } else {
glog.V(logger.Info).Infof("New wallet appeared: %s, %s", event.Wallet.URL(), event.Wallet.Status()) log.Info(fmt.Sprintf("New wallet appeared: %s, %s", event.Wallet.URL(), event.Wallet.Status()))
event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader) event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
} }
} else { } else {
glog.V(logger.Info).Infof("Old wallet dropped: %s", event.Wallet.URL()) log.Info(fmt.Sprintf("Old wallet dropped: %s", event.Wallet.URL()))
event.Wallet.Close() event.Wallet.Close()
} }
} }
@ -297,10 +296,10 @@ func startNode(ctx *cli.Context, stack *node.Node) {
if ctx.GlobalBool(utils.MiningEnabledFlag.Name) { if ctx.GlobalBool(utils.MiningEnabledFlag.Name) {
var ethereum *eth.Ethereum var ethereum *eth.Ethereum
if err := stack.Service(&ethereum); err != nil { if err := stack.Service(&ethereum); err != nil {
utils.Fatalf("ethereum service not running: %v", err) log.Crit(fmt.Sprintf("ethereum service not running: %v", err))
} }
if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name)); err != nil { if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name)); err != nil {
utils.Fatalf("Failed to start mining: %v", err) log.Crit(fmt.Sprintf("Failed to start mining: %v", err))
} }
} }
} }

@ -28,6 +28,7 @@ import (
"github.com/ethereum/ethash" "github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -68,7 +69,7 @@ The output of this command is supposed to be machine-readable.
func makedag(ctx *cli.Context) error { func makedag(ctx *cli.Context) error {
args := ctx.Args() args := ctx.Args()
wrongArgs := func() { wrongArgs := func() {
utils.Fatalf(`Usage: geth makedag <block number> <outputdir>`) log.Crit(fmt.Sprintf(`Usage: geth makedag <block number> <outputdir>`))
} }
switch { switch {
case len(args) == 2: case len(args) == 2:
@ -84,7 +85,7 @@ func makedag(ctx *cli.Context) error {
} }
_, err = ioutil.ReadDir(dir) _, err = ioutil.ReadDir(dir)
if err != nil { if err != nil {
utils.Fatalf("Can't find dir") log.Crit(fmt.Sprintf("Can't find dir"))
} }
fmt.Println("making DAG, this could take awhile...") fmt.Println("making DAG, this could take awhile...")
ethash.MakeDAG(blockNum, dir) ethash.MakeDAG(blockNum, dir)

@ -26,6 +26,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/gizak/termui" "github.com/gizak/termui"
@ -76,14 +77,14 @@ func monitor(ctx *cli.Context) error {
// Attach to an Ethereum node over IPC or RPC // Attach to an Ethereum node over IPC or RPC
endpoint := ctx.String(monitorCommandAttachFlag.Name) endpoint := ctx.String(monitorCommandAttachFlag.Name)
if client, err = dialRPC(endpoint); err != nil { if client, err = dialRPC(endpoint); err != nil {
utils.Fatalf("Unable to attach to geth node: %v", err) log.Crit(fmt.Sprintf("Unable to attach to geth node: %v", err))
} }
defer client.Close() defer client.Close()
// Retrieve all the available metrics and resolve the user pattens // Retrieve all the available metrics and resolve the user pattens
metrics, err := retrieveMetrics(client) metrics, err := retrieveMetrics(client)
if err != nil { if err != nil {
utils.Fatalf("Failed to retrieve system metrics: %v", err) log.Crit(fmt.Sprintf("Failed to retrieve system metrics: %v", err))
} }
monitored := resolveMetrics(metrics, ctx.Args()) monitored := resolveMetrics(metrics, ctx.Args())
if len(monitored) == 0 { if len(monitored) == 0 {
@ -91,18 +92,18 @@ func monitor(ctx *cli.Context) error {
sort.Strings(list) sort.Strings(list)
if len(list) > 0 { if len(list) > 0 {
utils.Fatalf("No metrics specified.\n\nAvailable:\n - %s", strings.Join(list, "\n - ")) log.Crit(fmt.Sprintf("No metrics specified.\n\nAvailable:\n - %s", strings.Join(list, "\n - ")))
} else { } else {
utils.Fatalf("No metrics collected by geth (--%s).\n", utils.MetricsEnabledFlag.Name) log.Crit(fmt.Sprintf("No metrics collected by geth (--%s).\n", utils.MetricsEnabledFlag.Name))
} }
} }
sort.Strings(monitored) sort.Strings(monitored)
if cols := len(monitored) / ctx.Int(monitorCommandRowsFlag.Name); cols > 6 { if cols := len(monitored) / ctx.Int(monitorCommandRowsFlag.Name); cols > 6 {
utils.Fatalf("Requested metrics (%d) spans more that 6 columns:\n - %s", len(monitored), strings.Join(monitored, "\n - ")) log.Crit(fmt.Sprintf("Requested metrics (%d) spans more that 6 columns:\n - %s", len(monitored), strings.Join(monitored, "\n - ")))
} }
// Create and configure the chart UI defaults // Create and configure the chart UI defaults
if err := termui.Init(); err != nil { if err := termui.Init(); err != nil {
utils.Fatalf("Unable to initialize terminal UI: %v", err) log.Crit(fmt.Sprintf("Unable to initialize terminal UI: %v", err))
} }
defer termui.Close() defer termui.Close()
@ -186,7 +187,7 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string)
if len(parts) > 1 { if len(parts) > 1 {
for _, variation := range strings.Split(parts[0], ",") { for _, variation := range strings.Split(parts[0], ",") {
if submetrics, ok := metrics[variation].(map[string]interface{}); !ok { if submetrics, ok := metrics[variation].(map[string]interface{}); !ok {
utils.Fatalf("Failed to retrieve system metrics: %s", path+variation) log.Crit(fmt.Sprintf("Failed to retrieve system metrics: %s", path+variation))
return nil return nil
} else { } else {
results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...) results = append(results, resolveMetric(submetrics, parts[1], path+variation+"/")...)
@ -205,7 +206,7 @@ func resolveMetric(metrics map[string]interface{}, pattern string, path string)
results = append(results, expandMetrics(metric, path+variation+"/")...) results = append(results, expandMetrics(metric, path+variation+"/")...)
default: default:
utils.Fatalf("Metric pattern resolved to unexpected type: %v", reflect.TypeOf(metric)) log.Crit(fmt.Sprintf("Metric pattern resolved to unexpected type: %v", reflect.TypeOf(metric)))
return nil return nil
} }
} }
@ -227,7 +228,7 @@ func expandMetrics(metrics map[string]interface{}, path string) []string {
list = append(list, expandMetrics(metric, path+name+"/")...) list = append(list, expandMetrics(metric, path+name+"/")...)
default: default:
utils.Fatalf("Metric pattern %s resolved to unexpected type: %v", path+name, reflect.TypeOf(metric)) log.Crit(fmt.Sprintf("Metric pattern %s resolved to unexpected type: %v", path+name, reflect.TypeOf(metric)))
return nil return nil
} }
} }

@ -19,7 +19,7 @@ package main
import ( import (
"flag" "flag"
"log" "fmt"
"os" "os"
"os/signal" "os/signal"
@ -27,7 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
@ -46,35 +46,34 @@ func main() {
flag.Parse() flag.Parse()
// Enable logging errors, we really do want to see those // Enable logging errors, we really do want to see those
glog.SetV(2) log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StreamHandler(os.Stderr, log.TerminalFormat())))
glog.SetToStderr(true)
// Load the test suite to run the RPC against // Load the test suite to run the RPC against
tests, err := tests.LoadBlockTests(*testFile) tests, err := tests.LoadBlockTests(*testFile)
if err != nil { if err != nil {
log.Fatalf("Failed to load test suite: %v", err) log.Crit(fmt.Sprintf("Failed to load test suite: %v", err))
} }
test, found := tests[*testName] test, found := tests[*testName]
if !found { if !found {
log.Fatalf("Requested test (%s) not found within suite", *testName) log.Crit(fmt.Sprintf("Requested test (%s) not found within suite", *testName))
} }
stack, err := MakeSystemNode(*testKey, test) stack, err := MakeSystemNode(*testKey, test)
if err != nil { if err != nil {
log.Fatalf("Failed to assemble test stack: %v", err) log.Crit(fmt.Sprintf("Failed to assemble test stack: %v", err))
} }
if err := stack.Start(); err != nil { if err := stack.Start(); err != nil {
log.Fatalf("Failed to start test node: %v", err) log.Crit(fmt.Sprintf("Failed to start test node: %v", err))
} }
defer stack.Stop() defer stack.Stop()
log.Println("Test node started...") log.Info("Test node started...")
// Make sure the tests contained within the suite pass // Make sure the tests contained within the suite pass
if err := RunTest(stack, test); err != nil { if err := RunTest(stack, test); err != nil {
log.Fatalf("Failed to run the pre-configured test: %v", err) log.Crit(fmt.Sprintf("Failed to run the pre-configured test: %v", err))
} }
log.Println("Initial test suite passed...") log.Info("Initial test suite passed...")
quit := make(chan os.Signal, 1) quit := make(chan os.Signal, 1)
signal.Notify(quit, os.Interrupt) signal.Notify(quit, os.Interrupt)

@ -35,8 +35,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
@ -278,7 +277,7 @@ func bzzd(ctx *cli.Context) error {
signal.Notify(sigc, syscall.SIGTERM) signal.Notify(sigc, syscall.SIGTERM)
defer signal.Stop(sigc) defer signal.Stop(sigc)
<-sigc <-sigc
glog.V(logger.Info).Infoln("Got sigterm, shutting down...") log.Info(fmt.Sprint("Got sigterm, shutting down..."))
stack.Stop() stack.Stop()
}() }()
networkId := ctx.GlobalUint64(SwarmNetworkIdFlag.Name) networkId := ctx.GlobalUint64(SwarmNetworkIdFlag.Name)
@ -308,7 +307,7 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
bzzconfig, err := bzzapi.NewConfig(bzzdir, chbookaddr, prvkey, ctx.GlobalUint64(SwarmNetworkIdFlag.Name)) bzzconfig, err := bzzapi.NewConfig(bzzdir, chbookaddr, prvkey, ctx.GlobalUint64(SwarmNetworkIdFlag.Name))
if err != nil { if err != nil {
utils.Fatalf("unable to configure swarm: %v", err) log.Crit(fmt.Sprintf("unable to configure swarm: %v", err))
} }
bzzport := ctx.GlobalString(SwarmPortFlag.Name) bzzport := ctx.GlobalString(SwarmPortFlag.Name)
if len(bzzport) > 0 { if len(bzzport) > 0 {
@ -325,13 +324,13 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
if len(ethapi) > 0 { if len(ethapi) > 0 {
client, err = ethclient.Dial(ethapi) client, err = ethclient.Dial(ethapi)
if err != nil { if err != nil {
utils.Fatalf("Can't connect: %v", err) log.Crit(fmt.Sprintf("Can't connect: %v", err))
} }
} }
return swarm.NewSwarm(ctx, client, bzzconfig, swapEnabled, syncEnabled, cors) return swarm.NewSwarm(ctx, client, bzzconfig, swapEnabled, syncEnabled, cors)
} }
if err := stack.Register(boot); err != nil { if err := stack.Register(boot); err != nil {
utils.Fatalf("Failed to register the Swarm service: %v", err) log.Crit(fmt.Sprintf("Failed to register the Swarm service: %v", err))
} }
} }
@ -339,11 +338,11 @@ func getAccount(ctx *cli.Context, stack *node.Node) *ecdsa.PrivateKey {
keyid := ctx.GlobalString(SwarmAccountFlag.Name) keyid := ctx.GlobalString(SwarmAccountFlag.Name)
if keyid == "" { if keyid == "" {
utils.Fatalf("Option %q is required", SwarmAccountFlag.Name) log.Crit(fmt.Sprintf("Option %q is required", SwarmAccountFlag.Name))
} }
// Try to load the arg as a hex key file. // Try to load the arg as a hex key file.
if key, err := crypto.LoadECDSA(keyid); err == nil { if key, err := crypto.LoadECDSA(keyid); err == nil {
glog.V(logger.Info).Infof("swarm account key loaded: %#x", crypto.PubkeyToAddress(key.PublicKey)) log.Info(fmt.Sprintf("swarm account key loaded: %#x", crypto.PubkeyToAddress(key.PublicKey)))
return key return key
} }
// Otherwise try getting it from the keystore. // Otherwise try getting it from the keystore.
@ -365,14 +364,14 @@ func decryptStoreAccount(ks *keystore.KeyStore, account string) *ecdsa.PrivateKe
err = fmt.Errorf("index %d higher than number of accounts %d", ix, len(accounts)) err = fmt.Errorf("index %d higher than number of accounts %d", ix, len(accounts))
} }
} else { } else {
utils.Fatalf("Can't find swarm account key %s", account) log.Crit(fmt.Sprintf("Can't find swarm account key %s", account))
} }
if err != nil { if err != nil {
utils.Fatalf("Can't find swarm account key: %v", err) log.Crit(fmt.Sprintf("Can't find swarm account key: %v", err))
} }
keyjson, err := ioutil.ReadFile(a.URL.Path) keyjson, err := ioutil.ReadFile(a.URL.Path)
if err != nil { if err != nil {
utils.Fatalf("Can't load swarm account key: %v", err) log.Crit(fmt.Sprintf("Can't load swarm account key: %v", err))
} }
for i := 1; i <= 3; i++ { for i := 1; i <= 3; i++ {
passphrase := promptPassphrase(fmt.Sprintf("Unlocking swarm account %s [%d/3]", a.Address.Hex(), i)) passphrase := promptPassphrase(fmt.Sprintf("Unlocking swarm account %s [%d/3]", a.Address.Hex(), i))
@ -381,7 +380,7 @@ func decryptStoreAccount(ks *keystore.KeyStore, account string) *ecdsa.PrivateKe
return key.PrivateKey return key.PrivateKey
} }
} }
utils.Fatalf("Can't decrypt swarm account key") log.Crit(fmt.Sprintf("Can't decrypt swarm account key"))
return nil return nil
} }
@ -391,7 +390,7 @@ func promptPassphrase(prompt string) string {
} }
password, err := console.Stdin.PromptPassword("Passphrase: ") password, err := console.Stdin.PromptPassword("Passphrase: ")
if err != nil { if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err) log.Crit(fmt.Sprintf("Failed to read passphrase: %v", err))
} }
return password return password
} }
@ -400,7 +399,7 @@ func injectBootnodes(srv *p2p.Server, nodes []string) {
for _, url := range nodes { for _, url := range nodes {
n, err := discover.ParseNode(url) n, err := discover.ParseNode(url)
if err != nil { if err != nil {
glog.Errorf("invalid bootnode %q", err) log.Error(fmt.Sprintf("invalid bootnode %q", err))
continue continue
} }
srv.AddPeer(n) srv.AddPeer(n)

@ -18,13 +18,14 @@
package main package main
import ( import (
"gopkg.in/urfave/cli.v1" "encoding/json"
"fmt"
"log" "log"
"mime" "mime"
"path/filepath" "path/filepath"
"strings" "strings"
"fmt"
"encoding/json" "gopkg.in/urfave/cli.v1"
) )
func add(ctx *cli.Context) { func add(ctx *cli.Context) {
@ -35,23 +36,22 @@ func add(ctx *cli.Context) {
} }
var ( var (
mhash = args[0] mhash = args[0]
path = args[1] path = args[1]
hash = args[2] hash = args[2]
ctype string ctype string
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
mroot manifest mroot manifest
) )
if len(args) > 3 { if len(args) > 3 {
ctype = args[3] ctype = args[3]
} else { } else {
ctype = mime.TypeByExtension(filepath.Ext(path)) ctype = mime.TypeByExtension(filepath.Ext(path))
} }
newManifest := addEntryToManifest (ctx, mhash, path, hash, ctype) newManifest := addEntryToManifest(ctx, mhash, path, hash, ctype)
fmt.Println(newManifest) fmt.Println(newManifest)
if !wantManifest { if !wantManifest {
@ -70,13 +70,13 @@ func update(ctx *cli.Context) {
} }
var ( var (
mhash = args[0] mhash = args[0]
path = args[1] path = args[1]
hash = args[2] hash = args[2]
ctype string ctype string
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
mroot manifest mroot manifest
) )
if len(args) > 3 { if len(args) > 3 {
ctype = args[3] ctype = args[3]
@ -84,7 +84,7 @@ func update(ctx *cli.Context) {
ctype = mime.TypeByExtension(filepath.Ext(path)) ctype = mime.TypeByExtension(filepath.Ext(path))
} }
newManifest := updateEntryInManifest (ctx, mhash, path, hash, ctype) newManifest := updateEntryInManifest(ctx, mhash, path, hash, ctype)
fmt.Println(newManifest) fmt.Println(newManifest)
if !wantManifest { if !wantManifest {
@ -102,14 +102,14 @@ func remove(ctx *cli.Context) {
} }
var ( var (
mhash = args[0] mhash = args[0]
path = args[1] path = args[1]
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
mroot manifest mroot manifest
) )
newManifest := removeEntryFromManifest (ctx, mhash, path) newManifest := removeEntryFromManifest(ctx, mhash, path)
fmt.Println(newManifest) fmt.Println(newManifest)
if !wantManifest { if !wantManifest {
@ -120,15 +120,15 @@ func remove(ctx *cli.Context) {
} }
} }
func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) string { func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
var ( var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = &client{api: bzzapi} client = &client{api: bzzapi}
longestPathEntry = manifestEntry{ longestPathEntry = manifestEntry{
Path: "", Path: "",
Hash: "", Hash: "",
ContentType: "", ContentType: "",
} }
) )
@ -143,12 +143,11 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) st
log.Fatalln("hash to add is not present:", err) log.Fatalln("hash to add is not present:", err)
} }
// See if we path is in this Manifest or do we have to dig deeper // See if we path is in this Manifest or do we have to dig deeper
for _, entry := range mroot.Entries { for _, entry := range mroot.Entries {
if path == entry.Path { if path == entry.Path {
log.Fatal(path, "Already present, not adding anything") log.Fatal(path, "Already present, not adding anything")
}else { } else {
if entry.ContentType == "application/bzz-manifest+json" { if entry.ContentType == "application/bzz-manifest+json" {
prfxlen := strings.HasPrefix(path, entry.Path) prfxlen := strings.HasPrefix(path, entry.Path)
if prfxlen && len(path) > len(longestPathEntry.Path) { if prfxlen && len(path) > len(longestPathEntry.Path) {
@ -161,7 +160,7 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) st
if longestPathEntry.Path != "" { if longestPathEntry.Path != "" {
// Load the child Manifest add the entry there // Load the child Manifest add the entry there
newPath := path[len(longestPathEntry.Path):] newPath := path[len(longestPathEntry.Path):]
newHash := addEntryToManifest (ctx, longestPathEntry.Hash, newPath, hash, ctype) newHash := addEntryToManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
// Replace the hash for parent Manifests // Replace the hash for parent Manifests
newMRoot := manifest{} newMRoot := manifest{}
@ -182,31 +181,28 @@ func addEntryToManifest(ctx *cli.Context, mhash , path, hash , ctype string) st
mroot.Entries = append(mroot.Entries, newEntry) mroot.Entries = append(mroot.Entries, newEntry)
} }
newManifestHash, err := client.uploadManifest(mroot) newManifestHash, err := client.uploadManifest(mroot)
if err != nil { if err != nil {
log.Fatalln("manifest upload failed:", err) log.Fatalln("manifest upload failed:", err)
} }
return newManifestHash return newManifestHash
} }
func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string) string { func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) string {
var ( var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = &client{api: bzzapi} client = &client{api: bzzapi}
newEntry = manifestEntry{ newEntry = manifestEntry{
Path: "", Path: "",
Hash: "", Hash: "",
ContentType: "", ContentType: "",
} }
longestPathEntry = manifestEntry{ longestPathEntry = manifestEntry{
Path: "", Path: "",
Hash: "", Hash: "",
ContentType: "", ContentType: "",
} }
) )
@ -217,12 +213,11 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
//TODO: check if the "hash" with which to update is valid and present in swarm //TODO: check if the "hash" with which to update is valid and present in swarm
// See if we path is in this Manifest or do we have to dig deeper // See if we path is in this Manifest or do we have to dig deeper
for _, entry := range mroot.Entries { for _, entry := range mroot.Entries {
if path == entry.Path { if path == entry.Path {
newEntry = entry newEntry = entry
}else { } else {
if entry.ContentType == "application/bzz-manifest+json" { if entry.ContentType == "application/bzz-manifest+json" {
prfxlen := strings.HasPrefix(path, entry.Path) prfxlen := strings.HasPrefix(path, entry.Path)
if prfxlen && len(path) > len(longestPathEntry.Path) { if prfxlen && len(path) > len(longestPathEntry.Path) {
@ -239,7 +234,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
if longestPathEntry.Path != "" { if longestPathEntry.Path != "" {
// Load the child Manifest add the entry there // Load the child Manifest add the entry there
newPath := path[len(longestPathEntry.Path):] newPath := path[len(longestPathEntry.Path):]
newHash := updateEntryInManifest (ctx, longestPathEntry.Hash, newPath, hash, ctype) newHash := updateEntryInManifest(ctx, longestPathEntry.Hash, newPath, hash, ctype)
// Replace the hash for parent Manifests // Replace the hash for parent Manifests
newMRoot := manifest{} newMRoot := manifest{}
@ -271,7 +266,6 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
mroot = newMRoot mroot = newMRoot
} }
newManifestHash, err := client.uploadManifest(mroot) newManifestHash, err := client.uploadManifest(mroot)
if err != nil { if err != nil {
log.Fatalln("manifest upload failed:", err) log.Fatalln("manifest upload failed:", err)
@ -279,20 +273,20 @@ func updateEntryInManifest(ctx *cli.Context, mhash , path, hash , ctype string)
return newManifestHash return newManifestHash
} }
func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string { func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
var ( var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
client = &client{api: bzzapi} client = &client{api: bzzapi}
entryToRemove = manifestEntry{ entryToRemove = manifestEntry{
Path: "", Path: "",
Hash: "", Hash: "",
ContentType: "", ContentType: "",
} }
longestPathEntry = manifestEntry{ longestPathEntry = manifestEntry{
Path: "", Path: "",
Hash: "", Hash: "",
ContentType: "", ContentType: "",
} }
) )
@ -301,13 +295,11 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
log.Fatalln("manifest download failed:", err) log.Fatalln("manifest download failed:", err)
} }
// See if we path is in this Manifest or do we have to dig deeper // See if we path is in this Manifest or do we have to dig deeper
for _, entry := range mroot.Entries { for _, entry := range mroot.Entries {
if path == entry.Path { if path == entry.Path {
entryToRemove = entry entryToRemove = entry
}else { } else {
if entry.ContentType == "application/bzz-manifest+json" { if entry.ContentType == "application/bzz-manifest+json" {
prfxlen := strings.HasPrefix(path, entry.Path) prfxlen := strings.HasPrefix(path, entry.Path)
if prfxlen && len(path) > len(longestPathEntry.Path) { if prfxlen && len(path) > len(longestPathEntry.Path) {
@ -324,7 +316,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
if longestPathEntry.Path != "" { if longestPathEntry.Path != "" {
// Load the child Manifest remove the entry there // Load the child Manifest remove the entry there
newPath := path[len(longestPathEntry.Path):] newPath := path[len(longestPathEntry.Path):]
newHash := removeEntryFromManifest (ctx, longestPathEntry.Hash, newPath) newHash := removeEntryFromManifest(ctx, longestPathEntry.Hash, newPath)
// Replace the hash for parent Manifests // Replace the hash for parent Manifests
newMRoot := manifest{} newMRoot := manifest{}
@ -348,13 +340,10 @@ func removeEntryFromManifest(ctx *cli.Context, mhash , path string) string {
mroot = newMRoot mroot = newMRoot
} }
newManifestHash, err := client.uploadManifest(mroot) newManifestHash, err := client.uploadManifest(mroot)
if err != nil { if err != nil {
log.Fatalln("manifest upload failed:", err) log.Fatalln("manifest upload failed:", err)
} }
return newManifestHash return newManifestHash
} }

@ -233,7 +233,7 @@ func (c *client) postRaw(mimetype string, size int64, body io.ReadCloser) (strin
func (c *client) downloadManifest(mhash string) (manifest, error) { func (c *client) downloadManifest(mhash string) (manifest, error) {
mroot := manifest{} mroot := manifest{}
req, err := http.NewRequest("GET", c.api + "/bzzr:/" + mhash, nil) req, err := http.NewRequest("GET", c.api+"/bzzr:/"+mhash, nil)
if err != nil { if err != nil {
return mroot, err return mroot, err
} }
@ -254,4 +254,4 @@ func (c *client) downloadManifest(mhash string) (manifest, error) {
return mroot, fmt.Errorf("Manifest %v is malformed: %v", mhash, err) return mroot, fmt.Errorf("Manifest %v is malformed: %v", mhash, err)
} }
return mroot, err return mroot, err
} }

@ -31,8 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -72,19 +71,19 @@ func Fatalf(format string, args ...interface{}) {
func StartNode(stack *node.Node) { func StartNode(stack *node.Node) {
if err := stack.Start(); err != nil { if err := stack.Start(); err != nil {
Fatalf("Error starting protocol stack: %v", err) log.Crit(fmt.Sprintf("Error starting protocol stack: %v", err))
} }
go func() { go func() {
sigc := make(chan os.Signal, 1) sigc := make(chan os.Signal, 1)
signal.Notify(sigc, os.Interrupt) signal.Notify(sigc, os.Interrupt)
defer signal.Stop(sigc) defer signal.Stop(sigc)
<-sigc <-sigc
glog.V(logger.Info).Infoln("Got interrupt, shutting down...") log.Info(fmt.Sprint("Got interrupt, shutting down..."))
go stack.Stop() go stack.Stop()
for i := 10; i > 0; i-- { for i := 10; i > 0; i-- {
<-sigc <-sigc
if i > 1 { if i > 1 {
glog.V(logger.Info).Infof("Already shutting down, interrupt %d more times for panic.", i-1) log.Info(fmt.Sprintf("Already shutting down, interrupt %d more times for panic.", i-1))
} }
} }
debug.Exit() // ensure trace and CPU profile data is flushed. debug.Exit() // ensure trace and CPU profile data is flushed.
@ -115,7 +114,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
defer close(interrupt) defer close(interrupt)
go func() { go func() {
if _, ok := <-interrupt; ok { if _, ok := <-interrupt; ok {
glog.Info("caught interrupt during import, will stop at next batch") log.Info(fmt.Sprint("caught interrupt during import, will stop at next batch"))
} }
close(stop) close(stop)
}() }()
@ -128,7 +127,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
} }
} }
glog.Infoln("Importing blockchain ", fn) log.Info(fmt.Sprint("Importing blockchain ", fn))
fh, err := os.Open(fn) fh, err := os.Open(fn)
if err != nil { if err != nil {
return err return err
@ -176,8 +175,8 @@ func ImportChain(chain *core.BlockChain, fn string) error {
return fmt.Errorf("interrupted") return fmt.Errorf("interrupted")
} }
if hasAllBlocks(chain, blocks[:i]) { if hasAllBlocks(chain, blocks[:i]) {
glog.Infof("skipping batch %d, all blocks present [%x / %x]", log.Info(fmt.Sprintf("skipping batch %d, all blocks present [%x / %x]",
batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4]) batch, blocks[0].Hash().Bytes()[:4], blocks[i-1].Hash().Bytes()[:4]))
continue continue
} }
@ -198,7 +197,7 @@ func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {
} }
func ExportChain(blockchain *core.BlockChain, fn string) error { func ExportChain(blockchain *core.BlockChain, fn string) error {
glog.Infoln("Exporting blockchain to ", fn) log.Info(fmt.Sprint("Exporting blockchain to ", fn))
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil { if err != nil {
return err return err
@ -214,13 +213,13 @@ func ExportChain(blockchain *core.BlockChain, fn string) error {
if err := blockchain.Export(writer); err != nil { if err := blockchain.Export(writer); err != nil {
return err return err
} }
glog.Infoln("Exported blockchain to ", fn) log.Info(fmt.Sprint("Exported blockchain to ", fn))
return nil return nil
} }
func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
glog.Infoln("Exporting blockchain to ", fn) log.Info(fmt.Sprint("Exporting blockchain to ", fn))
// TODO verify mode perms // TODO verify mode perms
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
if err != nil { if err != nil {
@ -237,6 +236,6 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
if err := blockchain.ExportN(writer, first, last); err != nil { if err := blockchain.ExportN(writer, first, last); err != nil {
return err return err
} }
glog.Infoln("Exported blockchain to ", fn) log.Info(fmt.Sprint("Exported blockchain to ", fn))
return nil return nil
} }

@ -41,8 +41,7 @@ import (
"github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/ethstats"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
@ -426,7 +425,7 @@ func MakeDataDir(ctx *cli.Context) string {
} }
return path return path
} }
Fatalf("Cannot determine default data directory, please set manually (--datadir)") log.Crit(fmt.Sprintf("Cannot determine default data directory, please set manually (--datadir)"))
return "" return ""
} }
@ -452,16 +451,16 @@ func MakeNodeKey(ctx *cli.Context) *ecdsa.PrivateKey {
) )
switch { switch {
case file != "" && hex != "": case file != "" && hex != "":
Fatalf("Options %q and %q are mutually exclusive", NodeKeyFileFlag.Name, NodeKeyHexFlag.Name) log.Crit(fmt.Sprintf("Options %q and %q are mutually exclusive", NodeKeyFileFlag.Name, NodeKeyHexFlag.Name))
case file != "": case file != "":
if key, err = crypto.LoadECDSA(file); err != nil { if key, err = crypto.LoadECDSA(file); err != nil {
Fatalf("Option %q: %v", NodeKeyFileFlag.Name, err) log.Crit(fmt.Sprintf("Option %q: %v", NodeKeyFileFlag.Name, err))
} }
case hex != "": case hex != "":
if key, err = crypto.HexToECDSA(hex); err != nil { if key, err = crypto.HexToECDSA(hex); err != nil {
Fatalf("Option %q: %v", NodeKeyHexFlag.Name, err) log.Crit(fmt.Sprintf("Option %q: %v", NodeKeyHexFlag.Name, err))
} }
} }
return key return key
@ -493,7 +492,7 @@ func MakeBootstrapNodes(ctx *cli.Context) []*discover.Node {
for _, url := range urls { for _, url := range urls {
node, err := discover.ParseNode(url) node, err := discover.ParseNode(url)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err) log.Error(fmt.Sprintf("Bootstrap URL %s: %v\n", url, err))
continue continue
} }
bootnodes = append(bootnodes, node) bootnodes = append(bootnodes, node)
@ -513,7 +512,7 @@ func MakeBootstrapNodesV5(ctx *cli.Context) []*discv5.Node {
for _, url := range urls { for _, url := range urls {
node, err := discv5.ParseNode(url) node, err := discv5.ParseNode(url)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Bootstrap URL %s: %v\n", url, err) log.Error(fmt.Sprintf("Bootstrap URL %s: %v\n", url, err))
continue continue
} }
bootnodes = append(bootnodes, node) bootnodes = append(bootnodes, node)
@ -537,7 +536,7 @@ func MakeDiscoveryV5Address(ctx *cli.Context) string {
func MakeNAT(ctx *cli.Context) nat.Interface { func MakeNAT(ctx *cli.Context) nat.Interface {
natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name)) natif, err := nat.Parse(ctx.GlobalString(NATFlag.Name))
if err != nil { if err != nil {
Fatalf("Option %s: %v", NATFlag.Name, err) log.Crit(fmt.Sprintf("Option %s: %v", NATFlag.Name, err))
} }
return natif return natif
} }
@ -574,11 +573,11 @@ func MakeWSRpcHost(ctx *cli.Context) string {
// for Geth and returns half of the allowance to assign to the database. // for Geth and returns half of the allowance to assign to the database.
func MakeDatabaseHandles() int { func MakeDatabaseHandles() int {
if err := raiseFdLimit(2048); err != nil { if err := raiseFdLimit(2048); err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err) log.Crit(fmt.Sprintf("Failed to raise file descriptor allowance: %v", err))
} }
limit, err := getFdLimit() limit, err := getFdLimit()
if err != nil { if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err) log.Crit(fmt.Sprintf("Failed to retrieve file descriptor allowance: %v", err))
} }
if limit > 2048 { // cap database file descriptors even if more is available if limit > 2048 { // cap database file descriptors even if more is available
limit = 2048 limit = 2048
@ -610,7 +609,7 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address { func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address {
accounts := ks.Accounts() accounts := ks.Accounts()
if !ctx.GlobalIsSet(EtherbaseFlag.Name) && len(accounts) == 0 { if !ctx.GlobalIsSet(EtherbaseFlag.Name) && len(accounts) == 0 {
glog.V(logger.Error).Infoln("WARNING: No etherbase set and no accounts found as default") log.Error(fmt.Sprint("WARNING: No etherbase set and no accounts found as default"))
return common.Address{} return common.Address{}
} }
etherbase := ctx.GlobalString(EtherbaseFlag.Name) etherbase := ctx.GlobalString(EtherbaseFlag.Name)
@ -620,7 +619,7 @@ func MakeEtherbase(ks *keystore.KeyStore, ctx *cli.Context) common.Address {
// If the specified etherbase is a valid address, return it // If the specified etherbase is a valid address, return it
account, err := MakeAddress(ks, etherbase) account, err := MakeAddress(ks, etherbase)
if err != nil { if err != nil {
Fatalf("Option %q: %v", EtherbaseFlag.Name, err) log.Crit(fmt.Sprintf("Option %q: %v", EtherbaseFlag.Name, err))
} }
return account.Address return account.Address
} }
@ -642,7 +641,7 @@ func MakePasswordList(ctx *cli.Context) []string {
} }
text, err := ioutil.ReadFile(path) text, err := ioutil.ReadFile(path)
if err != nil { if err != nil {
Fatalf("Failed to read password file: %v", err) log.Crit(fmt.Sprintf("Failed to read password file: %v", err))
} }
lines := strings.Split(string(text), "\n") lines := strings.Split(string(text), "\n")
// Sanitise DOS line endings. // Sanitise DOS line endings.
@ -701,14 +700,14 @@ func MakeNode(ctx *cli.Context, name, gitCommit string) *node.Node {
if netrestrict := ctx.GlobalString(NetrestrictFlag.Name); netrestrict != "" { if netrestrict := ctx.GlobalString(NetrestrictFlag.Name); netrestrict != "" {
list, err := netutil.ParseNetlist(netrestrict) list, err := netutil.ParseNetlist(netrestrict)
if err != nil { if err != nil {
Fatalf("Option %q: %v", NetrestrictFlag.Name, err) log.Crit(fmt.Sprintf("Option %q: %v", NetrestrictFlag.Name, err))
} }
config.NetRestrict = list config.NetRestrict = list
} }
stack, err := node.New(config) stack, err := node.New(config)
if err != nil { if err != nil {
Fatalf("Failed to create the protocol stack: %v", err) log.Crit(fmt.Sprintf("Failed to create the protocol stack: %v", err))
} }
return stack return stack
} }
@ -724,7 +723,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
} }
} }
if networks > 1 { if networks > 1 {
Fatalf("The %v flags are mutually exclusive", netFlags) log.Crit(fmt.Sprintf("The %v flags are mutually exclusive", netFlags))
} }
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
@ -778,7 +777,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
return les.New(ctx, ethConf) return les.New(ctx, ethConf)
}); err != nil { }); err != nil {
Fatalf("Failed to register the Ethereum light node service: %v", err) log.Crit(fmt.Sprintf("Failed to register the Ethereum light node service: %v", err))
} }
} else { } else {
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) { if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
@ -789,7 +788,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
} }
return fullNode, err return fullNode, err
}); err != nil { }); err != nil {
Fatalf("Failed to register the Ethereum full node service: %v", err) log.Crit(fmt.Sprintf("Failed to register the Ethereum full node service: %v", err))
} }
} }
} }
@ -797,7 +796,7 @@ func RegisterEthService(ctx *cli.Context, stack *node.Node, extra []byte) {
// RegisterShhService configures Whisper and adds it to the given node. // RegisterShhService configures Whisper and adds it to the given node.
func RegisterShhService(stack *node.Node) { func RegisterShhService(stack *node.Node) {
if err := stack.Register(func(*node.ServiceContext) (node.Service, error) { return whisper.New(), nil }); err != nil { if err := stack.Register(func(*node.ServiceContext) (node.Service, error) { return whisper.New(), nil }); err != nil {
Fatalf("Failed to register the Whisper service: %v", err) log.Crit(fmt.Sprintf("Failed to register the Whisper service: %v", err))
} }
} }
@ -814,7 +813,7 @@ func RegisterEthStatsService(stack *node.Node, url string) {
return ethstats.New(url, ethServ, lesServ) return ethstats.New(url, ethServ, lesServ)
}); err != nil { }); err != nil {
Fatalf("Failed to register the Ethereum Stats service: %v", err) log.Crit(fmt.Sprintf("Failed to register the Ethereum Stats service: %v", err))
} }
} }
@ -845,7 +844,7 @@ func MakeChainConfigFromDb(ctx *cli.Context, db ethdb.Database) *params.ChainCon
case core.ChainConfigNotFoundErr: case core.ChainConfigNotFoundErr:
// No configs found, use empty, will populate below // No configs found, use empty, will populate below
default: default:
Fatalf("Could not make chain configuration: %v", err) log.Crit(fmt.Sprintf("Could not make chain configuration: %v", err))
} }
} }
// set chain id in case it's zero. // set chain id in case it's zero.
@ -900,7 +899,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
chainDb, err := stack.OpenDatabase(name, cache, handles) chainDb, err := stack.OpenDatabase(name, cache, handles)
if err != nil { if err != nil {
Fatalf("Could not open database: %v", err) log.Crit(fmt.Sprintf("Could not open database: %v", err))
} }
return chainDb return chainDb
} }
@ -913,7 +912,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
if ctx.GlobalBool(TestNetFlag.Name) { if ctx.GlobalBool(TestNetFlag.Name) {
_, err := core.WriteTestNetGenesisBlock(chainDb) _, err := core.WriteTestNetGenesisBlock(chainDb)
if err != nil { if err != nil {
glog.Fatalln(err) log.Crit(fmt.Sprint(err))
} }
} }
@ -925,7 +924,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
} }
chain, err = core.NewBlockChain(chainDb, chainConfig, pow, new(event.TypeMux), vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}) chain, err = core.NewBlockChain(chainDb, chainConfig, pow, new(event.TypeMux), vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)})
if err != nil { if err != nil {
Fatalf("Could not start chainmanager: %v", err) log.Crit(fmt.Sprintf("Could not start chainmanager: %v", err))
} }
return chain, chainDb return chain, chainDb
} }

@ -36,8 +36,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
@ -82,7 +81,7 @@ var (
testMode = flag.Bool("t", false, "use of predefined parameters for diagnostics") testMode = flag.Bool("t", false, "use of predefined parameters for diagnostics")
generateKey = flag.Bool("k", false, "generate and show the private key") generateKey = flag.Bool("k", false, "generate and show the private key")
argVerbosity = flag.Int("verbosity", logger.Warn, "log verbosity level") argVerbosity = flag.Int("verbosity", int(log.LvlWarn), "log verbosity level")
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds") argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
argWorkTime = flag.Uint("work", 5, "work time in seconds") argWorkTime = flag.Uint("work", 5, "work time in seconds")
argPoW = flag.Float64("pow", whisper.MinimumPoW, "PoW for normal messages in float format (e.g. 2.7)") argPoW = flag.Float64("pow", whisper.MinimumPoW, "PoW for normal messages in float format (e.g. 2.7)")
@ -109,7 +108,7 @@ func processArgs() {
var err error var err error
nodeid, err = crypto.LoadECDSA(*argIDFile) nodeid, err = crypto.LoadECDSA(*argIDFile)
if err != nil { if err != nil {
utils.Fatalf("Failed to load file [%s]: %s.", *argIDFile, err) log.Crit(fmt.Sprintf("Failed to load file [%s]: %s.", *argIDFile, err))
} }
} }
@ -123,7 +122,7 @@ func processArgs() {
if len(*argTopic) > 0 { if len(*argTopic) > 0 {
x, err := hex.DecodeString(*argTopic) x, err := hex.DecodeString(*argTopic)
if err != nil { if err != nil {
utils.Fatalf("Failed to parse the topic: %s", err) log.Crit(fmt.Sprintf("Failed to parse the topic: %s", err))
} }
topic = whisper.BytesToTopic(x) topic = whisper.BytesToTopic(x)
} }
@ -131,7 +130,7 @@ func processArgs() {
if *asymmetricMode && len(*argPub) > 0 { if *asymmetricMode && len(*argPub) > 0 {
pub = crypto.ToECDSAPub(common.FromHex(*argPub)) pub = crypto.ToECDSAPub(common.FromHex(*argPub))
if !isKeyValid(pub) { if !isKeyValid(pub) {
utils.Fatalf("invalid public key") log.Crit(fmt.Sprintf("invalid public key"))
} }
} }
@ -153,8 +152,7 @@ func echo() {
} }
func initialize() { func initialize() {
glog.SetV(*argVerbosity) log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*argVerbosity), log.StreamHandler(os.Stderr, log.TerminalFormat())))
glog.SetToStderr(true)
done = make(chan struct{}) done = make(chan struct{})
var peers []*discover.Node var peers []*discover.Node
@ -163,7 +161,7 @@ func initialize() {
if *generateKey { if *generateKey {
key, err := crypto.GenerateKey() key, err := crypto.GenerateKey()
if err != nil { if err != nil {
utils.Fatalf("Failed to generate private key: %s", err) log.Crit(fmt.Sprintf("Failed to generate private key: %s", err))
} }
k := hex.EncodeToString(crypto.FromECDSA(key)) k := hex.EncodeToString(crypto.FromECDSA(key))
fmt.Printf("Random private key: %s \n", k) fmt.Printf("Random private key: %s \n", k)
@ -191,7 +189,7 @@ func initialize() {
if len(msPassword) == 0 { if len(msPassword) == 0 {
msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ") msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
if err != nil { if err != nil {
utils.Fatalf("Failed to read Mail Server password: %s", err) log.Crit(fmt.Sprintf("Failed to read Mail Server password: %s", err))
} }
} }
shh = whisper.New() shh = whisper.New()
@ -229,7 +227,7 @@ func initialize() {
func startServer() { func startServer() {
err := server.Start() err := server.Start()
if err != nil { if err != nil {
utils.Fatalf("Failed to start Whisper peer: %s.", err) log.Crit(fmt.Sprintf("Failed to start Whisper peer: %s.", err))
} }
fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey))) fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey)))
@ -267,7 +265,7 @@ func configureNode() {
s := scanLine("Please enter the peer's public key: ") s := scanLine("Please enter the peer's public key: ")
pub = crypto.ToECDSAPub(common.FromHex(s)) pub = crypto.ToECDSAPub(common.FromHex(s))
if !isKeyValid(pub) { if !isKeyValid(pub) {
utils.Fatalf("Error: invalid public key") log.Crit(fmt.Sprintf("Error: invalid public key"))
} }
} }
} }
@ -277,7 +275,7 @@ func configureNode() {
if len(msPassword) == 0 { if len(msPassword) == 0 {
msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ") msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
if err != nil { if err != nil {
utils.Fatalf("Failed to read Mail Server password: %s", err) log.Crit(fmt.Sprintf("Failed to read Mail Server password: %s", err))
} }
} }
} }
@ -286,7 +284,7 @@ func configureNode() {
if len(symPass) == 0 { if len(symPass) == 0 {
symPass, err = console.Stdin.PromptPassword("Please enter the password: ") symPass, err = console.Stdin.PromptPassword("Please enter the password: ")
if err != nil { if err != nil {
utils.Fatalf("Failed to read passphrase: %v", err) log.Crit(fmt.Sprintf("Failed to read passphrase: %v", err))
} }
} }
@ -332,7 +330,7 @@ func waitForConnection(timeout bool) {
if timeout { if timeout {
cnt++ cnt++
if cnt > 1000 { if cnt > 1000 {
utils.Fatalf("Timeout expired, failed to connect") log.Crit(fmt.Sprintf("Timeout expired, failed to connect"))
} }
} }
} }
@ -384,7 +382,7 @@ func scanLine(prompt string) string {
} }
txt, err := input.ReadString('\n') txt, err := input.ReadString('\n')
if err != nil { if err != nil {
utils.Fatalf("input error: %s", err) log.Crit(fmt.Sprintf("input error: %s", err))
} }
txt = strings.TrimRight(txt, "\n\r") txt = strings.TrimRight(txt, "\n\r")
return txt return txt
@ -399,7 +397,7 @@ func scanUint(prompt string) uint32 {
s := scanLine(prompt) s := scanLine(prompt)
i, err := strconv.Atoi(s) i, err := strconv.Atoi(s)
if err != nil { if err != nil {
utils.Fatalf("Fail to parse the lower time limit: %s", err) log.Crit(fmt.Sprintf("Fail to parse the lower time limit: %s", err))
} }
return uint32(i) return uint32(i)
} }
@ -432,7 +430,7 @@ func sendMsg(payload []byte) {
func messageLoop() { func messageLoop() {
f := shh.GetFilter(filterID) f := shh.GetFilter(filterID)
if f == nil { if f == nil {
utils.Fatalf("filter is not installed") log.Crit(fmt.Sprintf("filter is not installed"))
} }
ticker := time.NewTicker(time.Millisecond * 50) ticker := time.NewTicker(time.Millisecond * 50)
@ -474,7 +472,7 @@ func requestExpiredMessagesLoop() {
err := shh.AddSymKey(mailserver.MailServerKeyName, []byte(msPassword)) err := shh.AddSymKey(mailserver.MailServerKeyName, []byte(msPassword))
if err != nil { if err != nil {
utils.Fatalf("Failed to create symmetric key for mail request: %s", err) log.Crit(fmt.Sprintf("Failed to create symmetric key for mail request: %s", err))
} }
key = shh.GetSymKey(mailserver.MailServerKeyName) key = shh.GetSymKey(mailserver.MailServerKeyName)
peerID = extractIdFromEnode(*argEnode) peerID = extractIdFromEnode(*argEnode)
@ -487,7 +485,7 @@ func requestExpiredMessagesLoop() {
if len(t) >= whisper.TopicLength*2 { if len(t) >= whisper.TopicLength*2 {
x, err := hex.DecodeString(t) x, err := hex.DecodeString(t)
if err != nil { if err != nil {
utils.Fatalf("Failed to parse the topic: %s", err) log.Crit(fmt.Sprintf("Failed to parse the topic: %s", err))
} }
xt = whisper.BytesToTopic(x) xt = whisper.BytesToTopic(x)
} }
@ -513,12 +511,12 @@ func requestExpiredMessagesLoop() {
msg := whisper.NewSentMessage(&params) msg := whisper.NewSentMessage(&params)
env, err := msg.Wrap(&params) env, err := msg.Wrap(&params)
if err != nil { if err != nil {
utils.Fatalf("Wrap failed: %s", err) log.Crit(fmt.Sprintf("Wrap failed: %s", err))
} }
err = shh.RequestHistoricMessages(peerID, env) err = shh.RequestHistoricMessages(peerID, env)
if err != nil { if err != nil {
utils.Fatalf("Failed to send P2P message: %s", err) log.Crit(fmt.Sprintf("Failed to send P2P message: %s", err))
} }
time.Sleep(time.Second * 5) time.Sleep(time.Second * 5)
@ -528,7 +526,7 @@ func requestExpiredMessagesLoop() {
func extractIdFromEnode(s string) []byte { func extractIdFromEnode(s string) []byte {
n, err := discover.ParseNode(s) n, err := discover.ParseNode(s)
if err != nil { if err != nil {
utils.Fatalf("Failed to parse enode: %s", err) log.Crit(fmt.Sprintf("Failed to parse enode: %s", err))
return nil return nil
} }
return n.ID[:] return n.ID[:]

@ -22,8 +22,7 @@ import (
"io" "io"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/robertkrimen/otto" "github.com/robertkrimen/otto"
) )
@ -306,7 +305,7 @@ func setError(resp *otto.Object, code int, msg string) {
func throwJSException(msg interface{}) otto.Value { func throwJSException(msg interface{}) otto.Value {
val, err := otto.ToValue(msg) val, err := otto.ToValue(msg)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Failed to serialize JavaScript exception %v: %v", msg, err) log.Error(fmt.Sprintf("Failed to serialize JavaScript exception %v: %v", msg, err))
} }
panic(val) panic(val)
} }

@ -40,8 +40,7 @@ import (
"github.com/ethereum/go-ethereum/contracts/chequebook/contract" "github.com/ethereum/go-ethereum/contracts/chequebook/contract"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/swarm/services/swap/swap" "github.com/ethereum/go-ethereum/swarm/services/swap/swap"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -140,7 +139,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
if (contractAddr != common.Address{}) { if (contractAddr != common.Address{}) {
self.setBalanceFromBlockChain() self.setBalanceFromBlockChain()
glog.V(logger.Detail).Infof("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String()) log.Trace(fmt.Sprintf("new chequebook initialised from %s (owner: %v, balance: %s)", contractAddr.Hex(), self.owner.Hex(), self.balance.String()))
} }
return return
} }
@ -148,7 +147,7 @@ func NewChequebook(path string, contractAddr common.Address, prvKey *ecdsa.Priva
func (self *Chequebook) setBalanceFromBlockChain() { func (self *Chequebook) setBalanceFromBlockChain() {
balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil) balance, err := self.backend.BalanceAt(context.TODO(), self.contractAddr, nil)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("can't get balance: %v", err) log.Error(fmt.Sprintf("can't get balance: %v", err))
} else { } else {
self.balance.Set(balance) self.balance.Set(balance)
} }
@ -172,7 +171,7 @@ func LoadChequebook(path string, prvKey *ecdsa.PrivateKey, backend Backend, chec
self.setBalanceFromBlockChain() self.setBalanceFromBlockChain()
} }
glog.V(logger.Detail).Infof("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path) log.Trace(fmt.Sprintf("loaded chequebook (%s, owner: %v, balance: %v) initialised from %v", self.contractAddr.Hex(), self.owner.Hex(), self.balance, path))
return return
} }
@ -227,7 +226,7 @@ func (self *Chequebook) Save() (err error) {
if err != nil { if err != nil {
return err return err
} }
glog.V(logger.Detail).Infof("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path) log.Trace(fmt.Sprintf("saving chequebook (%s) to %v", self.contractAddr.Hex(), self.path))
return ioutil.WriteFile(self.path, data, os.ModePerm) return ioutil.WriteFile(self.path, data, os.ModePerm)
} }
@ -340,12 +339,12 @@ func (self *Chequebook) deposit(amount *big.Int) (string, error) {
chbookRaw := &contract.ChequebookRaw{Contract: self.contract} chbookRaw := &contract.ChequebookRaw{Contract: self.contract}
tx, err := chbookRaw.Transfer(depositTransactor) tx, err := chbookRaw.Transfer(depositTransactor)
if err != nil { if err != nil {
glog.V(logger.Warn).Infof("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err) log.Warn(fmt.Sprintf("error depositing %d wei to chequebook (%s, balance: %v, target: %v): %v", amount, self.contractAddr.Hex(), self.balance, self.buffer, err))
return "", err return "", err
} }
// assume that transaction is actually successful, we add the amount to balance right away // assume that transaction is actually successful, we add the amount to balance right away
self.balance.Add(self.balance, amount) self.balance.Add(self.balance, amount)
glog.V(logger.Detail).Infof("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer) log.Trace(fmt.Sprintf("deposited %d wei to chequebook (%s, balance: %v, target: %v)", amount, self.contractAddr.Hex(), self.balance, self.buffer))
return tx.Hash().Hex(), nil return tx.Hash().Hex(), nil
} }
@ -469,7 +468,7 @@ func NewInbox(prvKey *ecdsa.PrivateKey, contractAddr, beneficiary common.Address
session: session, session: session,
cashed: new(big.Int).Set(common.Big0), cashed: new(big.Int).Set(common.Big0),
} }
glog.V(logger.Detail).Infof("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer)) log.Trace(fmt.Sprintf("initialised inbox (%s -> %s) expected signer: %x", self.contract.Hex(), self.beneficiary.Hex(), crypto.FromECDSAPub(signer)))
return return
} }
@ -491,7 +490,7 @@ func (self *Inbox) Stop() {
func (self *Inbox) Cash() (txhash string, err error) { func (self *Inbox) Cash() (txhash string, err error) {
if self.cheque != nil { if self.cheque != nil {
txhash, err = self.cheque.Cash(self.session) txhash, err = self.cheque.Cash(self.session)
glog.V(logger.Detail).Infof("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex()) log.Trace(fmt.Sprintf("cashing cheque (total: %v) on chequebook (%s) sending to %v", self.cheque.Amount, self.contract.Hex(), self.beneficiary.Hex()))
self.cashed = self.cheque.Amount self.cashed = self.cheque.Amount
} }
return return
@ -575,7 +574,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
self.Cash() self.Cash()
} }
} }
glog.V(logger.Detail).Infof("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed) log.Trace(fmt.Sprintf("received cheque of %v wei in inbox (%s, uncashed: %v)", amount, self.contract.Hex(), uncashed))
} }
return amount, err return amount, err
@ -583,7 +582,7 @@ func (self *Inbox) Receive(promise swap.Promise) (*big.Int, error) {
// Verify verifies cheque for signer, contract, beneficiary, amount, valid signature. // Verify verifies cheque for signer, contract, beneficiary, amount, valid signature.
func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) { func (self *Cheque) Verify(signerKey *ecdsa.PublicKey, contract, beneficiary common.Address, sum *big.Int) (*big.Int, error) {
glog.V(logger.Detail).Infof("verify cheque: %v - sum: %v", self, sum) log.Trace(fmt.Sprintf("verify cheque: %v - sum: %v", self, sum))
if sum == nil { if sum == nil {
return nil, fmt.Errorf("invalid amount") return nil, fmt.Errorf("invalid amount")
} }

@ -29,8 +29,7 @@ import (
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
@ -128,10 +127,10 @@ func (r *ReleaseService) checker() {
version, err := r.oracle.CurrentVersion(opts) version, err := r.oracle.CurrentVersion(opts)
if err != nil { if err != nil {
if err == bind.ErrNoCode { if err == bind.ErrNoCode {
glog.V(logger.Debug).Infof("Release oracle not found at %x", r.config.Oracle) log.Debug(fmt.Sprintf("Release oracle not found at %x", r.config.Oracle))
continue continue
} }
glog.V(logger.Error).Infof("Failed to retrieve current release: %v", err) log.Error(fmt.Sprintf("Failed to retrieve current release: %v", err))
continue continue
} }
// Version was successfully retrieved, notify if newer than ours // Version was successfully retrieved, notify if newer than ours
@ -144,13 +143,13 @@ func (r *ReleaseService) checker() {
howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases") howtofix := fmt.Sprintf("Please check https://github.com/ethereum/go-ethereum/releases for new releases")
separator := strings.Repeat("-", len(warning)) separator := strings.Repeat("-", len(warning))
glog.V(logger.Warn).Info(separator) log.Warn(fmt.Sprint(separator))
glog.V(logger.Warn).Info(warning) log.Warn(fmt.Sprint(warning))
glog.V(logger.Warn).Info(howtofix) log.Warn(fmt.Sprint(howtofix))
glog.V(logger.Warn).Info(separator) log.Warn(fmt.Sprint(separator))
} else { } else {
glog.V(logger.Debug).Infof("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x", log.Debug(fmt.Sprintf("Client v%d.%d.%d-%x seems up to date with upstream v%d.%d.%d-%x",
r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]) r.config.Major, r.config.Minor, r.config.Patch, r.config.Commit[:4], version.Major, version.Minor, version.Patch, version.Commit[:4]))
} }
// If termination was requested, return // If termination was requested, return

@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
"gopkg.in/fatih/set.v0" "gopkg.in/fatih/set.v0"
@ -169,7 +169,7 @@ func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
for h := range ancestors { for h := range ancestors {
branch += fmt.Sprintf(" O - %x\n |\n", h) branch += fmt.Sprintf(" O - %x\n |\n", h)
} }
glog.Infoln(branch) log.Info(fmt.Sprint(branch))
return UncleError("uncle[%d](%x) is ancestor", i, hash[:4]) return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
} }

@ -36,8 +36,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
@ -161,9 +160,9 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64())
// make sure the headerByNumber (if present) is in our current canonical chain // make sure the headerByNumber (if present) is in our current canonical chain
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]) log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
bc.SetHead(header.Number.Uint64() - 1) bc.SetHead(header.Number.Uint64() - 1)
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation") log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
} }
} }
} }
@ -220,9 +219,9 @@ func (self *BlockChain) loadLastState() error {
blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64()) blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64())
fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()) fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64())
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd) log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd))
glog.V(logger.Info).Infof("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd) log.Info(fmt.Sprintf("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd))
glog.V(logger.Info).Infof("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd) log.Info(fmt.Sprintf("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd))
return nil return nil
} }
@ -263,10 +262,10 @@ func (bc *BlockChain) SetHead(head uint64) {
} }
if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil { if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil {
glog.Fatalf("failed to reset head block hash: %v", err) log.Crit(fmt.Sprintf("failed to reset head block hash: %v", err))
} }
if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil { if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil {
glog.Fatalf("failed to reset head fast block hash: %v", err) log.Crit(fmt.Sprintf("failed to reset head fast block hash: %v", err))
} }
bc.loadLastState() bc.loadLastState()
} }
@ -287,7 +286,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error {
self.currentBlock = block self.currentBlock = block
self.mu.Unlock() self.mu.Unlock()
glog.V(logger.Info).Infof("committed block #%d [%x…] as new head", block.Number(), hash[:4]) log.Info(fmt.Sprintf("committed block #%d [%x…] as new head", block.Number(), hash[:4]))
return nil return nil
} }
@ -391,10 +390,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) {
// Prepare the genesis block and reinitialise the chain // Prepare the genesis block and reinitialise the chain
if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err) log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
} }
if err := WriteBlock(bc.chainDb, genesis); err != nil { if err := WriteBlock(bc.chainDb, genesis); err != nil {
glog.Fatalf("failed to write genesis block: %v", err) log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
} }
bc.genesisBlock = genesis bc.genesisBlock = genesis
bc.insert(bc.genesisBlock) bc.insert(bc.genesisBlock)
@ -418,7 +417,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
} }
glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1) log.Info(fmt.Sprintf("exporting %d blocks...\n", last-first+1))
for nr := first; nr <= last; nr++ { for nr := first; nr <= last; nr++ {
block := self.GetBlockByNumber(nr) block := self.GetBlockByNumber(nr)
@ -446,10 +445,10 @@ func (bc *BlockChain) insert(block *types.Block) {
// Add the block to the canonical chain number scheme and mark as the head // Add the block to the canonical chain number scheme and mark as the head
if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil {
glog.Fatalf("failed to insert block number: %v", err) log.Crit(fmt.Sprintf("failed to insert block number: %v", err))
} }
if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert head block hash: %v", err) log.Crit(fmt.Sprintf("failed to insert head block hash: %v", err))
} }
bc.currentBlock = block bc.currentBlock = block
@ -458,7 +457,7 @@ func (bc *BlockChain) insert(block *types.Block) {
bc.hc.SetCurrentHeader(block.Header()) bc.hc.SetCurrentHeader(block.Header())
if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil { if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil {
glog.Fatalf("failed to insert head fast block hash: %v", err) log.Crit(fmt.Sprintf("failed to insert head fast block hash: %v", err))
} }
bc.currentFastBlock = block bc.currentFastBlock = block
} }
@ -590,7 +589,7 @@ func (bc *BlockChain) Stop() {
bc.wg.Wait() bc.wg.Wait()
glog.V(logger.Info).Infoln("Chain manager stopped") log.Info(fmt.Sprint("Chain manager stopped"))
} }
func (self *BlockChain) procFutureBlocks() { func (self *BlockChain) procFutureBlocks() {
@ -687,7 +686,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(),
blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4])
glog.V(logger.Error).Info(failure.Error()) log.Error(fmt.Sprint(failure.Error()))
return 0, failure return 0, failure
} }
} }
@ -735,31 +734,31 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil { if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil {
errs[index] = fmt.Errorf("failed to write block body: %v", err) errs[index] = fmt.Errorf("failed to write block body: %v", err)
atomic.AddInt32(&failed, 1) atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index]) log.Crit(fmt.Sprint(errs[index]))
return return
} }
if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil { if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write block receipts: %v", err) errs[index] = fmt.Errorf("failed to write block receipts: %v", err)
atomic.AddInt32(&failed, 1) atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index]) log.Crit(fmt.Sprint(errs[index]))
return return
} }
if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil { if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write log blooms: %v", err) errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
atomic.AddInt32(&failed, 1) atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index]) log.Crit(fmt.Sprint(errs[index]))
return return
} }
if err := WriteTransactions(self.chainDb, block); err != nil { if err := WriteTransactions(self.chainDb, block); err != nil {
errs[index] = fmt.Errorf("failed to write individual transactions: %v", err) errs[index] = fmt.Errorf("failed to write individual transactions: %v", err)
atomic.AddInt32(&failed, 1) atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index]) log.Crit(fmt.Sprint(errs[index]))
return return
} }
if err := WriteReceipts(self.chainDb, receipts); err != nil { if err := WriteReceipts(self.chainDb, receipts); err != nil {
errs[index] = fmt.Errorf("failed to write individual receipts: %v", err) errs[index] = fmt.Errorf("failed to write individual receipts: %v", err)
atomic.AddInt32(&failed, 1) atomic.AddInt32(&failed, 1)
glog.Fatal(errs[index]) log.Crit(fmt.Sprint(errs[index]))
return return
} }
atomic.AddInt32(&stats.processed, 1) atomic.AddInt32(&stats.processed, 1)
@ -785,7 +784,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
} }
} }
if atomic.LoadInt32(&self.procInterrupt) == 1 { if atomic.LoadInt32(&self.procInterrupt) == 1 {
glog.V(logger.Debug).Infoln("premature abort during receipt chain processing") log.Debug(fmt.Sprint("premature abort during receipt chain processing"))
return 0, nil return 0, nil
} }
// Update the head fast sync block if better // Update the head fast sync block if better
@ -793,7 +792,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
head := blockChain[len(errs)-1] head := blockChain[len(errs)-1]
if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 { if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 {
if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil { if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil {
glog.Fatalf("failed to update head fast block hash: %v", err) log.Crit(fmt.Sprintf("failed to update head fast block hash: %v", err))
} }
self.currentFastBlock = head self.currentFastBlock = head
} }
@ -806,7 +805,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain
if stats.ignored > 0 { if stats.ignored > 0 {
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored) ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
} }
glog.V(logger.Info).Infof("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored) log.Info(fmt.Sprintf("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored))
return 0, nil return 0, nil
} }
@ -830,10 +829,10 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err
// Irrelevant of the canonical status, write the block itself to the database // Irrelevant of the canonical status, write the block itself to the database
if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
glog.Fatalf("failed to write block total difficulty: %v", err) log.Crit(fmt.Sprintf("failed to write block total difficulty: %v", err))
} }
if err := WriteBlock(self.chainDb, block); err != nil { if err := WriteBlock(self.chainDb, block); err != nil {
glog.Fatalf("failed to write block contents: %v", err) log.Crit(fmt.Sprintf("failed to write block contents: %v", err))
} }
// If the total difficulty is higher than our known, add it to the canonical chain // If the total difficulty is higher than our known, add it to the canonical chain
@ -867,7 +866,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4])
glog.V(logger.Error).Info(failure.Error()) log.Error(fmt.Sprint(failure.Error()))
return 0, failure return 0, failure
} }
} }
@ -894,7 +893,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
for i, block := range chain { for i, block := range chain {
if atomic.LoadInt32(&self.procInterrupt) == 1 { if atomic.LoadInt32(&self.procInterrupt) == 1 {
glog.V(logger.Debug).Infoln("Premature abort during block chain processing") log.Debug(fmt.Sprint("Premature abort during block chain processing"))
break break
} }
bstart := time.Now() bstart := time.Now()
@ -991,9 +990,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
switch status { switch status {
case CanonStatTy: case CanonStatTy:
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles())) return fmt.Sprintf("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles()))
} }})
blockInsertTimer.UpdateSince(bstart) blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs}) events = append(events, ChainEvent{block, block.Hash(), logs})
@ -1014,9 +1013,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
return i, err return i, err
} }
case SideStatTy: case SideStatTy:
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
glog.Infof("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles())) return fmt.Sprintf("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles()))
} }})
blockInsertTimer.UpdateSince(bstart) blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainSideEvent{block}) events = append(events, ChainSideEvent{block})
@ -1025,10 +1024,8 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
} }
stats.processed++ stats.processed++
if glog.V(logger.Info) { stats.usedGas += usedGas.Uint64()
stats.usedGas += usedGas.Uint64() stats.report(chain, i)
stats.report(chain, i)
}
} }
go self.postChainEvents(events, coalescedLogs) go self.postChainEvents(events, coalescedLogs)
@ -1070,7 +1067,7 @@ func (st *insertStats) report(chain []*types.Block, index int) {
} else { } else {
hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4]) hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4])
} }
glog.Infof("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra) log.Info(fmt.Sprintf("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra))
*st = insertStats{startTime: now, lastIndex: index} *st = insertStats{startTime: now, lastIndex: index}
} }
@ -1150,21 +1147,24 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
return fmt.Errorf("Invalid new chain") return fmt.Errorf("Invalid new chain")
} }
} }
// Ensure the user sees large reorgs
logFn := log.Debug
if len(oldChain) > 63 {
logFn = log.Warn
}
logFn("", "msg", log.Lazy{Fn: func() string {
oldLen, newLen := len(oldChain), len(newChain)
newLast, newFirst := newChain[0], newChain[newLen-1]
oldLast, oldFirst := oldChain[0], oldChain[oldLen-1]
if oldLen := len(oldChain); oldLen > 63 || glog.V(logger.Debug) { return fmt.Sprintf("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
newLen := len(newChain)
newLast := newChain[0]
newFirst := newChain[newLen-1]
oldLast := oldChain[0]
oldFirst := oldChain[oldLen-1]
glog.Infof("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]",
commonBlock.Number(), commonBlock.Hash().Bytes()[:4], commonBlock.Number(), commonBlock.Hash().Bytes()[:4],
oldLen, newLen, oldLen, newLen,
oldFirst.Number(), oldLast.Number(), oldFirst.Number(), oldLast.Number(),
oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4], oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4],
newFirst.Number(), newLast.Number(), newFirst.Number(), newLast.Number(),
newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4]) newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4])
} }})
var addedTxs types.Transactions var addedTxs types.Transactions
// insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly // insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly
@ -1271,12 +1271,12 @@ func (bc *BlockChain) addBadBlock(block *types.Block) {
// reportBlock logs a bad block error. // reportBlock logs a bad block error.
func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) {
bc.addBadBlock(block) bc.addBadBlock(block)
if glog.V(logger.Error) { log.Error("", "msg", log.Lazy{Fn: func() string {
var receiptString string var receiptString string
for _, receipt := range receipts { for _, receipt := range receipts {
receiptString += fmt.Sprintf("\t%v\n", receipt) receiptString += fmt.Sprintf("\t%v\n", receipt)
} }
glog.Errorf(` return fmt.Sprintf(`
########## BAD BLOCK ######### ########## BAD BLOCK #########
Chain config: %v Chain config: %v
@ -1287,7 +1287,7 @@ Hash: 0x%x
Error: %v Error: %v
############################## ##############################
`, bc.config, block.Number(), block.Hash(), receiptString, err) `, bc.config, block.Number(), block.Hash(), receiptString, err)
} }})
} }
// InsertHeaderChain attempts to insert the given header chain in to the local // InsertHeaderChain attempts to insert the given header chain in to the local

@ -28,8 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -107,7 +106,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
} }
header := new(types.Header) header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil { if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
glog.Fatalf("failed to decode block header: %v", err) log.Crit(fmt.Sprintf("failed to decode block header: %v", err))
} }
return header.Number.Uint64() return header.Number.Uint64()
} }
@ -167,7 +166,7 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
} }
header := new(types.Header) header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil { if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err) log.Error(fmt.Sprintf("invalid block header RLP for hash %x: %v", hash, err))
return nil return nil
} }
return header return header
@ -191,7 +190,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
} }
body := new(types.Body) body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil { if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err) log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
return nil return nil
} }
return body return body
@ -209,7 +208,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
} }
td := new(big.Int) td := new(big.Int)
if err := rlp.Decode(bytes.NewReader(data), td); err != nil { if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err) log.Error(fmt.Sprintf("invalid block total difficulty RLP for hash %x: %v", hash, err))
return nil return nil
} }
return td return td
@ -247,7 +246,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
} }
storageReceipts := []*types.ReceiptForStorage{} storageReceipts := []*types.ReceiptForStorage{}
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err) log.Error(fmt.Sprintf("invalid receipt array RLP for hash %x: %v", hash, err))
return nil return nil
} }
receipts := make(types.Receipts, len(storageReceipts)) receipts := make(types.Receipts, len(storageReceipts))
@ -294,7 +293,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
var receipt types.ReceiptForStorage var receipt types.ReceiptForStorage
err := rlp.DecodeBytes(data, &receipt) err := rlp.DecodeBytes(data, &receipt)
if err != nil { if err != nil {
glog.V(logger.Debug).Infoln("GetReceipt err:", err) log.Debug(fmt.Sprint("GetReceipt err:", err))
} }
return (*types.Receipt)(&receipt) return (*types.Receipt)(&receipt)
} }
@ -303,7 +302,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt {
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error { func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error {
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...) key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
if err := db.Put(key, hash.Bytes()); err != nil { if err := db.Put(key, hash.Bytes()); err != nil {
glog.Fatalf("failed to store number to hash mapping into database: %v", err) log.Crit(fmt.Sprintf("failed to store number to hash mapping into database: %v", err))
} }
return nil return nil
} }
@ -311,7 +310,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
// WriteHeadHeaderHash stores the head header's hash. // WriteHeadHeaderHash stores the head header's hash.
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error { func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last header's hash into database: %v", err) log.Crit(fmt.Sprintf("failed to store last header's hash into database: %v", err))
} }
return nil return nil
} }
@ -319,7 +318,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
// WriteHeadBlockHash stores the head block's hash. // WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error { func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil { if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last block's hash into database: %v", err) log.Crit(fmt.Sprintf("failed to store last block's hash into database: %v", err))
} }
return nil return nil
} }
@ -327,7 +326,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
// WriteHeadFastBlockHash stores the fast head block's hash. // WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error { func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil { if err := db.Put(headFastKey, hash.Bytes()); err != nil {
glog.Fatalf("failed to store last fast block's hash into database: %v", err) log.Crit(fmt.Sprintf("failed to store last fast block's hash into database: %v", err))
} }
return nil return nil
} }
@ -343,13 +342,13 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
encNum := encodeBlockNumber(num) encNum := encodeBlockNumber(num)
key := append(blockHashPrefix, hash...) key := append(blockHashPrefix, hash...)
if err := db.Put(key, encNum); err != nil { if err := db.Put(key, encNum); err != nil {
glog.Fatalf("failed to store hash to number mapping into database: %v", err) log.Crit(fmt.Sprintf("failed to store hash to number mapping into database: %v", err))
} }
key = append(append(headerPrefix, encNum...), hash...) key = append(append(headerPrefix, encNum...), hash...)
if err := db.Put(key, data); err != nil { if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store header into database: %v", err) log.Crit(fmt.Sprintf("failed to store header into database: %v", err))
} }
glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, hash[:4]) log.Debug(fmt.Sprintf("stored header #%v [%x…]", header.Number, hash[:4]))
return nil return nil
} }
@ -366,9 +365,9 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error { func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error {
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, rlp); err != nil { if err := db.Put(key, rlp); err != nil {
glog.Fatalf("failed to store block body into database: %v", err) log.Crit(fmt.Sprintf("failed to store block body into database: %v", err))
} }
glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4]) log.Debug(fmt.Sprintf("stored block body [%x…]", hash.Bytes()[:4]))
return nil return nil
} }
@ -380,9 +379,9 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
} }
key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...) key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)
if err := db.Put(key, data); err != nil { if err := db.Put(key, data); err != nil {
glog.Fatalf("failed to store block total difficulty into database: %v", err) log.Crit(fmt.Sprintf("failed to store block total difficulty into database: %v", err))
} }
glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td) log.Debug(fmt.Sprintf("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td))
return nil return nil
} }
@ -415,9 +414,9 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, rece
// Store the flattened receipt slice // Store the flattened receipt slice
key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, bytes); err != nil { if err := db.Put(key, bytes); err != nil {
glog.Fatalf("failed to store block receipts into database: %v", err) log.Crit(fmt.Sprintf("failed to store block receipts into database: %v", err))
} }
glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4]) log.Debug(fmt.Sprintf("stored block receipts [%x…]", hash.Bytes()[:4]))
return nil return nil
} }
@ -458,7 +457,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error {
} }
// Write the scheduled data into the database // Write the scheduled data into the database
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
glog.Fatalf("failed to store transactions into database: %v", err) log.Crit(fmt.Sprintf("failed to store transactions into database: %v", err))
} }
return nil return nil
} }
@ -490,7 +489,7 @@ func WriteReceipts(db ethdb.Database, receipts types.Receipts) error {
} }
// Write the scheduled data into the database // Write the scheduled data into the database
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
glog.Fatalf("failed to store receipts into database: %v", err) log.Crit(fmt.Sprintf("failed to store receipts into database: %v", err))
} }
return nil return nil
} }
@ -552,7 +551,7 @@ func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block {
} }
var block types.StorageBlock var block types.StorageBlock
if err := rlp.Decode(bytes.NewReader(data), &block); err != nil { if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err) log.Error(fmt.Sprintf("invalid block RLP for hash %x: %v", hash, err))
return nil return nil
} }
return (*types.Block)(&block) return (*types.Block)(&block)
@ -623,7 +622,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
return fmt.Errorf("preimage write fail for block %d: %v", number, err) return fmt.Errorf("preimage write fail for block %d: %v", number, err)
} }
glog.V(logger.Debug).Infof("%d preimages in block %d, including %d new", len(preimages), number, hitCount) log.Debug(fmt.Sprintf("%d preimages in block %d, including %d new", len(preimages), number, hitCount))
} }
return nil return nil
} }

@ -31,8 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -92,7 +91,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block,
}, nil, nil, nil) }, nil, nil, nil)
if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil { if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil {
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number") log.Info(fmt.Sprint("Genesis block already in chain. Writing canonical number"))
err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64())
if err != nil { if err != nil {
return nil, err return nil, err

@ -30,8 +30,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
"github.com/hashicorp/golang-lru" "github.com/hashicorp/golang-lru"
@ -102,7 +101,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
if err != nil { if err != nil {
return nil, err return nil, err
} }
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
hc.genesisHeader = genesisBlock.Header() hc.genesisHeader = genesisBlock.Header()
} }
@ -155,10 +154,10 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// Irrelevant of the canonical status, write the td and header to the database // Irrelevant of the canonical status, write the td and header to the database
if err := hc.WriteTd(hash, number, externTd); err != nil { if err := hc.WriteTd(hash, number, externTd); err != nil {
glog.Fatalf("failed to write header total difficulty: %v", err) log.Crit(fmt.Sprintf("failed to write header total difficulty: %v", err))
} }
if err := WriteHeader(hc.chainDb, header); err != nil { if err := WriteHeader(hc.chainDb, header); err != nil {
glog.Fatalf("failed to write header contents: %v", err) log.Crit(fmt.Sprintf("failed to write header contents: %v", err))
} }
// If the total difficulty is higher than our known, add it to the canonical chain // If the total difficulty is higher than our known, add it to the canonical chain
@ -189,10 +188,10 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// Extend the canonical chain with the new header // Extend the canonical chain with the new header
if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil { if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil {
glog.Fatalf("failed to insert header number: %v", err) log.Crit(fmt.Sprintf("failed to insert header number: %v", err))
} }
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil { if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
glog.Fatalf("failed to insert head header hash: %v", err) log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
} }
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header) hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
@ -231,7 +230,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])",
i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4]) i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4])
glog.V(logger.Error).Info(failure.Error()) log.Error(fmt.Sprint(failure.Error()))
return 0, failure return 0, failure
} }
} }
@ -317,7 +316,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
for i, header := range chain { for i, header := range chain {
// Short circuit insertion if shutting down // Short circuit insertion if shutting down
if hc.procInterrupt() { if hc.procInterrupt() {
glog.V(logger.Debug).Infoln("premature abort during header chain processing") log.Debug(fmt.Sprint("premature abort during header chain processing"))
break break
} }
hash := header.Hash() hash := header.Hash()
@ -339,7 +338,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w
if stats.ignored > 0 { if stats.ignored > 0 {
ignored = fmt.Sprintf(" (%d ignored)", stats.ignored) ignored = fmt.Sprintf(" (%d ignored)", stats.ignored)
} }
glog.V(logger.Info).Infof("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4]) log.Info(fmt.Sprintf("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4]))
return 0, nil return 0, nil
} }
@ -446,7 +445,7 @@ func (hc *HeaderChain) CurrentHeader() *types.Header {
// SetCurrentHeader sets the current head header of the canonical chain. // SetCurrentHeader sets the current head header of the canonical chain.
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil { if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
glog.Fatalf("failed to insert head header hash: %v", err) log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err))
} }
hc.currentHeader = head hc.currentHeader = head
hc.currentHeaderHash = head.Hash() hc.currentHeaderHash = head.Hash()
@ -489,7 +488,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
hc.currentHeaderHash = hc.currentHeader.Hash() hc.currentHeaderHash = hc.currentHeader.Hash()
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil { if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
glog.Fatalf("failed to reset head header hash: %v", err) log.Crit(fmt.Sprintf("failed to reset head header hash: %v", err))
} }
} }

@ -24,8 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
@ -135,9 +134,9 @@ func (self *stateObject) markSuicided() {
self.onDirty(self.Address()) self.onDirty(self.Address())
self.onDirty = nil self.onDirty = nil
} }
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance()) return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.Nonce(), self.Balance())
} }})
} }
func (c *stateObject) touch() { func (c *stateObject) touch() {
@ -253,9 +252,9 @@ func (c *stateObject) AddBalance(amount *big.Int) {
} }
c.SetBalance(new(big.Int).Add(c.Balance(), amount)) c.SetBalance(new(big.Int).Add(c.Balance(), amount))
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("%x: #%d %v (+ %v)\n", c.Address(), c.Nonce(), c.Balance(), amount) return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
} }})
} }
// SubBalance removes amount from c's balance. // SubBalance removes amount from c's balance.
@ -266,9 +265,9 @@ func (c *stateObject) SubBalance(amount *big.Int) {
} }
c.SetBalance(new(big.Int).Sub(c.Balance(), amount)) c.SetBalance(new(big.Int).Sub(c.Balance(), amount))
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount) return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.Nonce(), c.Balance(), amount)
} }})
} }
func (self *stateObject) SetBalance(amount *big.Int) { func (self *stateObject) SetBalance(amount *big.Int) {

@ -27,8 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
@ -411,7 +410,7 @@ func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObje
} }
var data Account var data Account
if err := rlp.DecodeBytes(enc, &data); err != nil { if err := rlp.DecodeBytes(enc, &data); err != nil {
glog.Errorf("can't decode object at %x: %v", addr[:], err) log.Error(fmt.Sprintf("can't decode object at %x: %v", addr[:], err))
return nil return nil
} }
// Insert into the live set. // Insert into the live set.
@ -446,9 +445,9 @@ func (self *StateDB) createObject(addr common.Address) (newobj, prev *stateObjec
newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty) newobj = newObject(self, addr, Account{}, self.MarkStateObjectDirty)
newobj.setNonce(0) // sets the object to dirty newobj.setNonce(0) // sets the object to dirty
if prev == nil { if prev == nil {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("(+) %x\n", addr) return fmt.Sprintf("(+) %x\n", addr)
} }})
self.journal = append(self.journal, createObjectChange{account: &addr}) self.journal = append(self.journal, createObjectChange{account: &addr})
} else { } else {
self.journal = append(self.journal, resetObjectChange{prev: prev}) self.journal = append(self.journal, resetObjectChange{prev: prev})
@ -617,7 +616,7 @@ func (s *StateDB) CommitBatch(deleteEmptyObjects bool) (root common.Hash, batch
batch = s.db.NewBatch() batch = s.db.NewBatch()
root, _ = s.commit(batch, deleteEmptyObjects) root, _ = s.commit(batch, deleteEmptyObjects)
glog.V(logger.Debug).Infof("Trie cache stats: %d misses, %d unloads", trie.CacheMisses(), trie.CacheUnloads()) log.Debug(fmt.Sprintf("Trie cache stats: %d misses, %d unloads", trie.CacheMisses(), trie.CacheUnloads()))
return root, batch return root, batch
} }

@ -17,14 +17,14 @@
package core package core
import ( import (
"fmt"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -122,7 +122,7 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
receipt.Logs = statedb.GetLogs(tx.Hash()) receipt.Logs = statedb.GetLogs(tx.Hash())
receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
glog.V(logger.Debug).Infoln(receipt) log.Debug(fmt.Sprint(receipt))
return receipt, gas, err return receipt, gas, err
} }

@ -18,12 +18,12 @@ package core
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -255,7 +255,7 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value) ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
} }
if vmerr != nil { if vmerr != nil {
glog.V(logger.Debug).Infoln("vm returned with error:", err) log.Debug(fmt.Sprint("vm returned with error:", err))
// The only possible consensus-error would be if there wasn't // The only possible consensus-error would be if there wasn't
// sufficient balance to make the transfer happen. The first // sufficient balance to make the transfer happen. The first
// balance transfer may never fail. // balance transfer may never fail.

@ -28,8 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"gopkg.in/karalabe/cookiejar.v2/collections/prque" "gopkg.in/karalabe/cookiejar.v2/collections/prque"
@ -163,12 +162,12 @@ func (pool *TxPool) eventLoop() {
func (pool *TxPool) resetState() { func (pool *TxPool) resetState() {
currentState, err := pool.currentState() currentState, err := pool.currentState()
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Failed to get current state: %v", err) log.Error(fmt.Sprintf("Failed to get current state: %v", err))
return return
} }
managedState := state.ManageState(currentState) managedState := state.ManageState(currentState)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Failed to get managed state: %v", err) log.Error(fmt.Sprintf("Failed to get managed state: %v", err))
return return
} }
pool.pendingState = managedState pool.pendingState = managedState
@ -193,7 +192,7 @@ func (pool *TxPool) Stop() {
pool.events.Unsubscribe() pool.events.Unsubscribe()
close(pool.quit) close(pool.quit)
pool.wg.Wait() pool.wg.Wait()
glog.V(logger.Info).Infoln("Transaction pool stopped") log.Info(fmt.Sprint("Transaction pool stopped"))
} }
func (pool *TxPool) State() *state.ManagedState { func (pool *TxPool) State() *state.ManagedState {
@ -334,14 +333,14 @@ func (pool *TxPool) add(tx *types.Transaction) error {
pool.enqueueTx(hash, tx) pool.enqueueTx(hash, tx)
// Print a log message if low enough level is set // Print a log message if low enough level is set
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
rcpt := "[NEW_CONTRACT]" rcpt := "[NEW_CONTRACT]"
if to := tx.To(); to != nil { if to := tx.To(); to != nil {
rcpt = common.Bytes2Hex(to[:4]) rcpt = common.Bytes2Hex(to[:4])
} }
from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation
glog.Infof("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value, hash) return fmt.Sprintf("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value(), hash)
} }})
return nil return nil
} }
@ -423,7 +422,7 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
for _, tx := range txs { for _, tx := range txs {
if err := pool.add(tx); err != nil { if err := pool.add(tx); err != nil {
glog.V(logger.Debug).Infoln("tx error:", err) log.Debug(fmt.Sprint("tx error:", err))
} }
} }
@ -514,32 +513,32 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
for addr, list := range pool.queue { for addr, list := range pool.queue {
// Drop all transactions that are deemed too old (low nonce) // Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(state.GetNonce(addr)) { for _, tx := range list.Forward(state.GetNonce(addr)) {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Removed old queued transaction: %v", tx) return fmt.Sprintf("Removed old queued transaction: %v", tx)
} }})
delete(pool.all, tx.Hash()) delete(pool.all, tx.Hash())
} }
// Drop all transactions that are too costly (low balance) // Drop all transactions that are too costly (low balance)
drops, _ := list.Filter(state.GetBalance(addr)) drops, _ := list.Filter(state.GetBalance(addr))
for _, tx := range drops { for _, tx := range drops {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Removed unpayable queued transaction: %v", tx) return fmt.Sprintf("Removed unpayable queued transaction: %v", tx)
} }})
delete(pool.all, tx.Hash()) delete(pool.all, tx.Hash())
queuedNofundsCounter.Inc(1) queuedNofundsCounter.Inc(1)
} }
// Gather all executable transactions and promote them // Gather all executable transactions and promote them
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) { for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Promoting queued transaction: %v", tx) return fmt.Sprintf("Promoting queued transaction: %v", tx)
} }})
pool.promoteTx(addr, tx.Hash(), tx) pool.promoteTx(addr, tx.Hash(), tx)
} }
// Drop all transactions over the allowed limit // Drop all transactions over the allowed limit
for _, tx := range list.Cap(int(maxQueuedPerAccount)) { for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Removed cap-exceeding queued transaction: %v", tx) return fmt.Sprintf("Removed cap-exceeding queued transaction: %v", tx)
} }})
delete(pool.all, tx.Hash()) delete(pool.all, tx.Hash())
queuedRLCounter.Inc(1) queuedRLCounter.Inc(1)
} }
@ -651,24 +650,24 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
// Drop all transactions that are deemed too old (low nonce) // Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(nonce) { for _, tx := range list.Forward(nonce) {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Removed old pending transaction: %v", tx) return fmt.Sprintf("Removed old pending transaction: %v", tx)
} }})
delete(pool.all, tx.Hash()) delete(pool.all, tx.Hash())
} }
// Drop all transactions that are too costly (low balance), and queue any invalids back for later // Drop all transactions that are too costly (low balance), and queue any invalids back for later
drops, invalids := list.Filter(state.GetBalance(addr)) drops, invalids := list.Filter(state.GetBalance(addr))
for _, tx := range drops { for _, tx := range drops {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Removed unpayable pending transaction: %v", tx) return fmt.Sprintf("Removed unpayable pending transaction: %v", tx)
} }})
delete(pool.all, tx.Hash()) delete(pool.all, tx.Hash())
pendingNofundsCounter.Inc(1) pendingNofundsCounter.Inc(1)
} }
for _, tx := range invalids { for _, tx := range invalids {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Demoting pending transaction: %v", tx) return fmt.Sprintf("Demoting pending transaction: %v", tx)
} }})
pool.enqueueTx(tx.Hash(), tx) pool.enqueueTx(tx.Hash(), tx)
} }
// Delete the entire queue entry if it became empty. // Delete the entire queue entry if it became empty.

@ -18,11 +18,11 @@ package vm
import ( import (
"crypto/sha256" "crypto/sha256"
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"golang.org/x/crypto/ripemd160" "golang.org/x/crypto/ripemd160"
) )
@ -75,14 +75,14 @@ func (c *ecrecover) Run(in []byte) []byte {
// tighter sig s values in homestead only apply to tx sigs // tighter sig s values in homestead only apply to tx sigs
if common.Bytes2Big(in[32:63]).BitLen() > 0 || !crypto.ValidateSignatureValues(v, r, s, false) { if common.Bytes2Big(in[32:63]).BitLen() > 0 || !crypto.ValidateSignatureValues(v, r, s, false) {
glog.V(logger.Detail).Infof("ECRECOVER error: v, r or s value invalid") log.Trace(fmt.Sprintf("ECRECOVER error: v, r or s value invalid"))
return nil return nil
} }
// v needs to be at the end for libsecp256k1 // v needs to be at the end for libsecp256k1
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v)) pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
// make sure the public key is a valid one // make sure the public key is a valid one
if err != nil { if err != nil {
glog.V(logger.Detail).Infoln("ECRECOVER error: ", err) log.Trace(fmt.Sprint("ECRECOVER error: ", err))
return nil return nil
} }

@ -25,8 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -124,13 +123,13 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
} }
}() }()
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("evm running: %x\n", codehash[:4]) return fmt.Sprintf("evm running: %x\n", codehash[:4])
tstart := time.Now() }})
defer func() { tstart := time.Now()
glog.Infof("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart)) defer log.Debug("", "msg", log.Lazy{Fn: func() string {
}() return fmt.Sprintf("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart))
} }})
// The Interpreter main run loop (contextual). This loop runs until either an // The Interpreter main run loop (contextual). This loop runs until either an
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during

@ -16,11 +16,7 @@
package errs package errs
import ( import "fmt"
"fmt"
"github.com/ethereum/go-ethereum/logger/glog"
)
/* /*
Errors implements an error handler providing standardised errors for a package. Errors implements an error handler providing standardised errors for a package.
@ -80,9 +76,3 @@ func (self Error) Error() (message string) {
} }
return self.message return self.message
} }
func (self Error) Log(v glog.Verbose) {
if v {
v.Infoln(self)
}
}

@ -37,8 +37,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -113,7 +112,7 @@ func (s *PublicMinerAPI) GetWork() (work [3]string, err error) {
if work, err = s.agent.GetWork(); err == nil { if work, err = s.agent.GetWork(); err == nil {
return return
} }
glog.V(logger.Debug).Infof("%v", err) log.Debug(fmt.Sprintf("%v", err))
return work, fmt.Errorf("mining not ready") return work, fmt.Errorf("mining not ready")
} }

@ -40,8 +40,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
@ -184,7 +183,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
return nil, err return nil, err
} }
glog.V(logger.Info).Infof("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId) log.Info(fmt.Sprintf("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId))
if !config.SkipBcVersionCheck { if !config.SkipBcVersionCheck {
bcVersion := core.GetBlockChainVersion(chainDb) bcVersion := core.GetBlockChainVersion(chainDb)
@ -202,7 +201,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
} }
if config.ChainConfig == nil { if config.ChainConfig == nil {
@ -212,7 +211,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
eth.chainConfig = config.ChainConfig eth.chainConfig = config.ChainConfig
glog.V(logger.Info).Infoln("Chain config:", eth.chainConfig) log.Info(fmt.Sprint("Chain config:", eth.chainConfig))
eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.EventMux(), vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}) eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.EventMux(), vm.Config{EnablePreimageRecording: config.EnablePreimageRecording})
if err != nil { if err != nil {
@ -273,7 +272,7 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
if err != nil { if err != nil {
return err return err
} }
glog.V(logger.Info).Infof("Successfully wrote custom genesis block: %x", block.Hash()) log.Info(fmt.Sprintf("Successfully wrote custom genesis block: %x", block.Hash()))
} }
// Load up a test setup if directly injected // Load up a test setup if directly injected
if config.TestGenesisState != nil { if config.TestGenesisState != nil {
@ -292,13 +291,13 @@ func SetupGenesisBlock(chainDb *ethdb.Database, config *Config) error {
func CreatePoW(config *Config) (pow.PoW, error) { func CreatePoW(config *Config) (pow.PoW, error) {
switch { switch {
case config.PowFake: case config.PowFake:
glog.V(logger.Info).Infof("ethash used in fake mode") log.Info(fmt.Sprintf("ethash used in fake mode"))
return pow.PoW(core.FakePow{}), nil return pow.PoW(core.FakePow{}), nil
case config.PowTest: case config.PowTest:
glog.V(logger.Info).Infof("ethash used in test mode") log.Info(fmt.Sprintf("ethash used in test mode"))
return ethash.NewForTesting() return ethash.NewForTesting()
case config.PowShared: case config.PowShared:
glog.V(logger.Info).Infof("ethash used in shared mode") log.Info(fmt.Sprintf("ethash used in shared mode"))
return ethash.NewShared(), nil return ethash.NewShared(), nil
default: default:
return ethash.New(), nil return ethash.New(), nil
@ -382,7 +381,7 @@ func (s *Ethereum) StartMining(threads int) error {
eb, err := s.Etherbase() eb, err := s.Etherbase()
if err != nil { if err != nil {
err = fmt.Errorf("Cannot start mining without etherbase address: %v", err) err = fmt.Errorf("Cannot start mining without etherbase address: %v", err)
glog.V(logger.Error).Infoln(err) log.Error(fmt.Sprint(err))
return err return err
} }
go s.miner.Start(eb, threads) go s.miner.Start(eb, threads)
@ -470,14 +469,14 @@ func (self *Ethereum) StartAutoDAG() {
return // already started return // already started
} }
go func() { go func() {
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir) log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG ON (ethash dir: %s)", ethash.DefaultDir))
var nextEpoch uint64 var nextEpoch uint64
timer := time.After(0) timer := time.After(0)
self.autodagquit = make(chan bool) self.autodagquit = make(chan bool)
for { for {
select { select {
case <-timer: case <-timer:
glog.V(logger.Info).Infof("checking DAG (ethash dir: %s)", ethash.DefaultDir) log.Info(fmt.Sprintf("checking DAG (ethash dir: %s)", ethash.DefaultDir))
currentBlock := self.BlockChain().CurrentBlock().NumberU64() currentBlock := self.BlockChain().CurrentBlock().NumberU64()
thisEpoch := currentBlock / epochLength thisEpoch := currentBlock / epochLength
if nextEpoch <= thisEpoch { if nextEpoch <= thisEpoch {
@ -486,19 +485,19 @@ func (self *Ethereum) StartAutoDAG() {
previousDag, previousDagFull := dagFiles(thisEpoch - 1) previousDag, previousDagFull := dagFiles(thisEpoch - 1)
os.Remove(filepath.Join(ethash.DefaultDir, previousDag)) os.Remove(filepath.Join(ethash.DefaultDir, previousDag))
os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull)) os.Remove(filepath.Join(ethash.DefaultDir, previousDagFull))
glog.V(logger.Info).Infof("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag) log.Info(fmt.Sprintf("removed DAG for epoch %d (%s)", thisEpoch-1, previousDag))
} }
nextEpoch = thisEpoch + 1 nextEpoch = thisEpoch + 1
dag, _ := dagFiles(nextEpoch) dag, _ := dagFiles(nextEpoch)
if _, err := os.Stat(dag); os.IsNotExist(err) { if _, err := os.Stat(dag); os.IsNotExist(err) {
glog.V(logger.Info).Infof("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag) log.Info(fmt.Sprintf("Pregenerating DAG for epoch %d (%s)", nextEpoch, dag))
err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir err := ethash.MakeDAG(nextEpoch*epochLength, "") // "" -> ethash.DefaultDir
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Error generating DAG for epoch %d (%s)", nextEpoch, dag) log.Error(fmt.Sprintf("Error generating DAG for epoch %d (%s)", nextEpoch, dag))
return return
} }
} else { } else {
glog.V(logger.Error).Infof("DAG for epoch %d (%s)", nextEpoch, dag) log.Error(fmt.Sprintf("DAG for epoch %d (%s)", nextEpoch, dag))
} }
} }
} }
@ -516,7 +515,7 @@ func (self *Ethereum) StopAutoDAG() {
close(self.autodagquit) close(self.autodagquit)
self.autodagquit = nil self.autodagquit = nil
} }
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir) log.Info(fmt.Sprintf("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir))
} }
// dagFiles(epoch) returns the two alternative DAG filenames (not a path) // dagFiles(epoch) returns the two alternative DAG filenames (not a path)

@ -25,8 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -66,9 +65,9 @@ func sendBadBlockReport(block *types.Block, err error) {
client := http.Client{Timeout: 8 * time.Second} client := http.Client{Timeout: 8 * time.Second}
resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr)) resp, err := client.Post(badBlocksURL, "application/json", bytes.NewReader(jsonStr))
if err != nil { if err != nil {
glog.V(logger.Debug).Infoln(err) log.Debug(fmt.Sprint(err))
return return
} }
glog.V(logger.Debug).Infof("Bad Block Report posted (%d)", resp.StatusCode) log.Debug(fmt.Sprintf("Bad Block Report posted (%d)", resp.StatusCode))
resp.Body.Close() resp.Body.Close()
} }

@ -28,8 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -50,7 +49,7 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
return nil // empty database, nothing to do return nil // empty database, nothing to do
} }
glog.V(logger.Info).Infof("Upgrading chain database to use sequential keys") log.Info(fmt.Sprintf("Upgrading chain database to use sequential keys"))
stopChn := make(chan struct{}) stopChn := make(chan struct{})
stoppedChn := make(chan struct{}) stoppedChn := make(chan struct{})
@ -73,11 +72,11 @@ func upgradeSequentialKeys(db ethdb.Database) (stopFn func()) {
err, stopped = upgradeSequentialOrphanedReceipts(db, stopFn) err, stopped = upgradeSequentialOrphanedReceipts(db, stopFn)
} }
if err == nil && !stopped { if err == nil && !stopped {
glog.V(logger.Info).Infof("Database conversion successful") log.Info(fmt.Sprintf("Database conversion successful"))
db.Put(useSequentialKeys, []byte{42}) db.Put(useSequentialKeys, []byte{42})
} }
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Database conversion failed: %v", err) log.Error(fmt.Sprintf("Database conversion failed: %v", err))
} }
close(stoppedChn) close(stoppedChn)
}() }()
@ -106,7 +105,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
it.Release() it.Release()
it = db.(*ethdb.LDBDatabase).NewIterator() it = db.(*ethdb.LDBDatabase).NewIterator()
it.Seek(keyPtr) it.Seek(keyPtr)
glog.V(logger.Info).Infof("converting %d canonical numbers...", cnt) log.Info(fmt.Sprintf("converting %d canonical numbers...", cnt))
} }
number := big.NewInt(0).SetBytes(keyPtr[10:]).Uint64() number := big.NewInt(0).SetBytes(keyPtr[10:]).Uint64()
newKey := []byte("h12345678n") newKey := []byte("h12345678n")
@ -125,7 +124,7 @@ func upgradeSequentialCanonicalNumbers(db ethdb.Database, stopFn func() bool) (e
it.Next() it.Next()
} }
if cnt > 0 { if cnt > 0 {
glog.V(logger.Info).Infof("converted %d canonical numbers...", cnt) log.Info(fmt.Sprintf("converted %d canonical numbers...", cnt))
} }
return nil, false return nil, false
} }
@ -149,7 +148,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
it.Release() it.Release()
it = db.(*ethdb.LDBDatabase).NewIterator() it = db.(*ethdb.LDBDatabase).NewIterator()
it.Seek(keyPtr) it.Seek(keyPtr)
glog.V(logger.Info).Infof("converting %d blocks...", cnt) log.Info(fmt.Sprintf("converting %d blocks...", cnt))
} }
// convert header, body, td and block receipts // convert header, body, td and block receipts
var keyPrefix [38]byte var keyPrefix [38]byte
@ -177,7 +176,7 @@ func upgradeSequentialBlocks(db ethdb.Database, stopFn func() bool) (error, bool
} }
} }
if cnt > 0 { if cnt > 0 {
glog.V(logger.Info).Infof("converted %d blocks...", cnt) log.Info(fmt.Sprintf("converted %d blocks...", cnt))
} }
return nil, false return nil, false
} }
@ -204,7 +203,7 @@ func upgradeSequentialOrphanedReceipts(db ethdb.Database, stopFn func() bool) (e
it.Next() it.Next()
} }
if cnt > 0 { if cnt > 0 {
glog.V(logger.Info).Infof("removed %d orphaned block receipts...", cnt) log.Info(fmt.Sprintf("removed %d orphaned block receipts...", cnt))
} }
return nil, false return nil, false
} }
@ -267,7 +266,7 @@ func upgradeChainDatabase(db ethdb.Database) error {
return nil return nil
} }
// At least some of the database is still the old format, upgrade (skip the head block!) // At least some of the database is still the old format, upgrade (skip the head block!)
glog.V(logger.Info).Info("Old database detected, upgrading...") log.Info(fmt.Sprint("Old database detected, upgrading..."))
if db, ok := db.(*ethdb.LDBDatabase); ok { if db, ok := db.(*ethdb.LDBDatabase); ok {
blockPrefix := []byte("block-hash-") blockPrefix := []byte("block-hash-")
@ -343,7 +342,7 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
} }
tstart := time.Now() tstart := time.Now()
glog.V(logger.Info).Infoln("upgrading db log bloom bins") log.Info(fmt.Sprint("upgrading db log bloom bins"))
for i := uint64(0); i <= latestBlock.NumberU64(); i++ { for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
hash := core.GetCanonicalHash(db, i) hash := core.GetCanonicalHash(db, i)
if (hash == common.Hash{}) { if (hash == common.Hash{}) {
@ -351,6 +350,6 @@ func addMipmapBloomBins(db ethdb.Database) (err error) {
} }
core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i)) core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
} }
glog.V(logger.Info).Infoln("upgrade completed in", time.Since(tstart)) log.Info(fmt.Sprint("upgrade completed in", time.Since(tstart)))
return nil return nil
} }

@ -33,8 +33,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
@ -249,9 +248,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn,
getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error { getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error {
glog.V(logger.Detail).Infoln("Registering peer", id) log.Trace(fmt.Sprint("Registering peer", id))
if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil { if err := d.peers.Register(newPeer(id, version, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)); err != nil {
glog.V(logger.Error).Infoln("Register failed:", err) log.Error(fmt.Sprint("Register failed:", err))
return err return err
} }
d.qosReduceConfidence() d.qosReduceConfidence()
@ -264,9 +263,9 @@ func (d *Downloader) RegisterPeer(id string, version int, currentHead currentHea
// the queue. // the queue.
func (d *Downloader) UnregisterPeer(id string) error { func (d *Downloader) UnregisterPeer(id string) error {
// Unregister the peer from the active peer set and revoke any fetch tasks // Unregister the peer from the active peer set and revoke any fetch tasks
glog.V(logger.Detail).Infoln("Unregistering peer", id) log.Trace(fmt.Sprint("Unregistering peer", id))
if err := d.peers.Unregister(id); err != nil { if err := d.peers.Unregister(id); err != nil {
glog.V(logger.Error).Infoln("Unregister failed:", err) log.Error(fmt.Sprint("Unregister failed:", err))
return err return err
} }
d.queue.Revoke(id) d.queue.Revoke(id)
@ -285,24 +284,24 @@ func (d *Downloader) UnregisterPeer(id string) error {
// Synchronise tries to sync up our local block chain with a remote peer, both // Synchronise tries to sync up our local block chain with a remote peer, both
// adding various sanity checks as well as wrapping it with various log entries. // adding various sanity checks as well as wrapping it with various log entries.
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
glog.V(logger.Detail).Infof("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td) log.Trace(fmt.Sprintf("Attempting synchronisation: %v, head [%x…], TD %v", id, head[:4], td))
err := d.synchronise(id, head, td, mode) err := d.synchronise(id, head, td, mode)
switch err { switch err {
case nil: case nil:
glog.V(logger.Detail).Infof("Synchronisation completed") log.Trace(fmt.Sprintf("Synchronisation completed"))
case errBusy: case errBusy:
glog.V(logger.Detail).Infof("Synchronisation already in progress") log.Trace(fmt.Sprintf("Synchronisation already in progress"))
case errTimeout, errBadPeer, errStallingPeer, case errTimeout, errBadPeer, errStallingPeer,
errEmptyHeaderSet, errPeersUnavailable, errTooOld, errEmptyHeaderSet, errPeersUnavailable, errTooOld,
errInvalidAncestor, errInvalidChain: errInvalidAncestor, errInvalidChain:
glog.V(logger.Debug).Infof("Removing peer %v: %v", id, err) log.Debug(fmt.Sprintf("Removing peer %v: %v", id, err))
d.dropPeer(id) d.dropPeer(id)
default: default:
glog.V(logger.Warn).Infof("Synchronisation failed: %v", err) log.Warn(fmt.Sprintf("Synchronisation failed: %v", err))
} }
return err return err
} }
@ -323,7 +322,7 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode
// Post a user notification of the sync (only once per session) // Post a user notification of the sync (only once per session)
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
glog.V(logger.Info).Infoln("Block synchronisation started") log.Info(fmt.Sprint("Block synchronisation started"))
} }
// Reset the queue, peer set and wake channels to clean any internal leftover state // Reset the queue, peer set and wake channels to clean any internal leftover state
d.queue.Reset() d.queue.Reset()
@ -388,9 +387,9 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
return errTooOld return errTooOld
} }
glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version) log.Debug(fmt.Sprintf("Synchronising with the network using: %s [eth/%d]", p.id, p.version))
defer func(start time.Time) { defer func(start time.Time) {
glog.V(logger.Debug).Infof("Synchronisation terminated after %v", time.Since(start)) log.Debug(fmt.Sprintf("Synchronisation terminated after %v", time.Since(start)))
}(time.Now()) }(time.Now())
// Look up the sync boundaries: the common ancestor and the target block // Look up the sync boundaries: the common ancestor and the target block
@ -438,7 +437,7 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err e
origin = 0 origin = 0
} }
} }
glog.V(logger.Debug).Infof("Fast syncing until pivot block #%d", pivot) log.Debug(fmt.Sprintf("Fast syncing until pivot block #%d", pivot))
} }
d.queue.Prepare(origin+1, d.mode, pivot, latest) d.queue.Prepare(origin+1, d.mode, pivot, latest)
if d.syncInitHook != nil { if d.syncInitHook != nil {
@ -523,7 +522,7 @@ func (d *Downloader) Terminate() {
// fetchHeight retrieves the head header of the remote peer to aid in estimating // fetchHeight retrieves the head header of the remote peer to aid in estimating
// the total time a pending synchronisation would take. // the total time a pending synchronisation would take.
func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) { func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p) log.Debug(fmt.Sprintf("%v: retrieving remote chain height", p))
// Request the advertised remote head block and wait for the response // Request the advertised remote head block and wait for the response
head, _ := p.currentHead() head, _ := p.currentHead()
@ -538,19 +537,19 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
case packet := <-d.headerCh: case packet := <-d.headerCh:
// Discard anything not from the origin peer // Discard anything not from the origin peer
if packet.PeerId() != p.id { if packet.PeerId() != p.id {
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId()) log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
break break
} }
// Make sure the peer actually gave something valid // Make sure the peer actually gave something valid
headers := packet.(*headerPack).headers headers := packet.(*headerPack).headers
if len(headers) != 1 { if len(headers) != 1 {
glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers)) log.Debug(fmt.Sprintf("%v: invalid number of head headers: %d != 1", p, len(headers)))
return nil, errBadPeer return nil, errBadPeer
} }
return headers[0], nil return headers[0], nil
case <-timeout: case <-timeout:
glog.V(logger.Debug).Infof("%v: head header timeout", p) log.Debug(fmt.Sprintf("%v: head header timeout", p))
return nil, errTimeout return nil, errTimeout
case <-d.bodyCh: case <-d.bodyCh:
@ -567,7 +566,7 @@ func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) {
// In the rare scenario when we ended up on a long reorganisation (i.e. none of // In the rare scenario when we ended up on a long reorganisation (i.e. none of
// the head links match), we do a binary search to find the common ancestor. // the head links match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height) log.Debug(fmt.Sprintf("%v: looking for common ancestor (remote height %d)", p, height))
// Figure out the valid ancestor range to prevent rewrite attacks // Figure out the valid ancestor range to prevent rewrite attacks
floor, ceil := int64(-1), d.headHeader().Number.Uint64() floor, ceil := int64(-1), d.headHeader().Number.Uint64()
@ -608,19 +607,19 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
case packet := <-d.headerCh: case packet := <-d.headerCh:
// Discard anything not from the origin peer // Discard anything not from the origin peer
if packet.PeerId() != p.id { if packet.PeerId() != p.id {
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId()) log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packet.PeerId()))
break break
} }
// Make sure the peer actually gave something valid // Make sure the peer actually gave something valid
headers := packet.(*headerPack).headers headers := packet.(*headerPack).headers
if len(headers) == 0 { if len(headers) == 0 {
glog.V(logger.Warn).Infof("%v: empty head header set", p) log.Warn(fmt.Sprintf("%v: empty head header set", p))
return 0, errEmptyHeaderSet return 0, errEmptyHeaderSet
} }
// Make sure the peer's reply conforms to the request // Make sure the peer's reply conforms to the request
for i := 0; i < len(headers); i++ { for i := 0; i < len(headers); i++ {
if number := headers[i].Number.Int64(); number != from+int64(i)*16 { if number := headers[i].Number.Int64(); number != from+int64(i)*16 {
glog.V(logger.Warn).Infof("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number) log.Warn(fmt.Sprintf("%v: head header set (item %d) broke chain ordering: requested %d, got %d", p, i, from+int64(i)*16, number))
return 0, errInvalidChain return 0, errInvalidChain
} }
} }
@ -637,7 +636,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
// If every header is known, even future ones, the peer straight out lied about its head // If every header is known, even future ones, the peer straight out lied about its head
if number > height && i == limit-1 { if number > height && i == limit-1 {
glog.V(logger.Warn).Infof("%v: lied about chain head: reported %d, found above %d", p, height, number) log.Warn(fmt.Sprintf("%v: lied about chain head: reported %d, found above %d", p, height, number))
return 0, errStallingPeer return 0, errStallingPeer
} }
break break
@ -645,7 +644,7 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
} }
case <-timeout: case <-timeout:
glog.V(logger.Debug).Infof("%v: head header timeout", p) log.Debug(fmt.Sprintf("%v: head header timeout", p))
return 0, errTimeout return 0, errTimeout
case <-d.bodyCh: case <-d.bodyCh:
@ -657,10 +656,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
// If the head fetch already found an ancestor, return // If the head fetch already found an ancestor, return
if !common.EmptyHash(hash) { if !common.EmptyHash(hash) {
if int64(number) <= floor { if int64(number) <= floor {
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor) log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, number, hash[:4], floor))
return 0, errInvalidAncestor return 0, errInvalidAncestor
} }
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, number, hash[:4]) log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, number, hash[:4]))
return number, nil return number, nil
} }
// Ancestor not found, we need to binary search over our chain // Ancestor not found, we need to binary search over our chain
@ -684,13 +683,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
case packer := <-d.headerCh: case packer := <-d.headerCh:
// Discard anything not from the origin peer // Discard anything not from the origin peer
if packer.PeerId() != p.id { if packer.PeerId() != p.id {
glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packer.PeerId()) log.Debug(fmt.Sprintf("Received headers from incorrect peer(%s)", packer.PeerId()))
break break
} }
// Make sure the peer actually gave something valid // Make sure the peer actually gave something valid
headers := packer.(*headerPack).headers headers := packer.(*headerPack).headers
if len(headers) != 1 { if len(headers) != 1 {
glog.V(logger.Debug).Infof("%v: invalid search header set (%d)", p, len(headers)) log.Debug(fmt.Sprintf("%v: invalid search header set (%d)", p, len(headers)))
return 0, errBadPeer return 0, errBadPeer
} }
arrived = true arrived = true
@ -702,13 +701,13 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
} }
header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists header := d.getHeader(headers[0].Hash()) // Independent of sync mode, header surely exists
if header.Number.Uint64() != check { if header.Number.Uint64() != check {
glog.V(logger.Debug).Infof("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check) log.Debug(fmt.Sprintf("%v: non requested header #%d [%x…], instead of #%d", p, header.Number, header.Hash().Bytes()[:4], check))
return 0, errBadPeer return 0, errBadPeer
} }
start = check start = check
case <-timeout: case <-timeout:
glog.V(logger.Debug).Infof("%v: search header timeout", p) log.Debug(fmt.Sprintf("%v: search header timeout", p))
return 0, errTimeout return 0, errTimeout
case <-d.bodyCh: case <-d.bodyCh:
@ -720,10 +719,10 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
} }
// Ensure valid ancestry and return // Ensure valid ancestry and return
if int64(start) <= floor { if int64(start) <= floor {
glog.V(logger.Warn).Infof("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor) log.Warn(fmt.Sprintf("%v: potential rewrite attack: #%d [%x…] <= #%d limit", p, start, hash[:4], floor))
return 0, errInvalidAncestor return 0, errInvalidAncestor
} }
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x…]", p, start, hash[:4]) log.Debug(fmt.Sprintf("%v: common ancestor: #%d [%x…]", p, start, hash[:4]))
return start, nil return start, nil
} }
@ -736,8 +735,8 @@ func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) {
// can fill in the skeleton - not even the origin peer - it's assumed invalid and // can fill in the skeleton - not even the origin peer - it's assumed invalid and
// the origin is dropped. // the origin is dropped.
func (d *Downloader) fetchHeaders(p *peer, from uint64) error { func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
glog.V(logger.Debug).Infof("%v: directing header downloads from #%d", p, from) log.Debug(fmt.Sprintf("%v: directing header downloads from #%d", p, from))
defer glog.V(logger.Debug).Infof("%v: header download terminated", p) defer log.Debug(fmt.Sprintf("%v: header download terminated", p))
// Create a timeout timer, and the associated header fetcher // Create a timeout timer, and the associated header fetcher
skeleton := true // Skeleton assembly phase or finishing up skeleton := true // Skeleton assembly phase or finishing up
@ -751,10 +750,10 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
timeout.Reset(d.requestTTL()) timeout.Reset(d.requestTTL())
if skeleton { if skeleton {
glog.V(logger.Detail).Infof("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from) log.Trace(fmt.Sprintf("%v: fetching %d skeleton headers from #%d", p, MaxHeaderFetch, from))
go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
} else { } else {
glog.V(logger.Detail).Infof("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from) log.Trace(fmt.Sprintf("%v: fetching %d full headers from #%d", p, MaxHeaderFetch, from))
go p.getAbsHeaders(from, MaxHeaderFetch, 0, false) go p.getAbsHeaders(from, MaxHeaderFetch, 0, false)
} }
} }
@ -769,7 +768,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
case packet := <-d.headerCh: case packet := <-d.headerCh:
// Make sure the active peer is giving us the skeleton headers // Make sure the active peer is giving us the skeleton headers
if packet.PeerId() != p.id { if packet.PeerId() != p.id {
glog.V(logger.Debug).Infof("Received skeleton headers from incorrect peer (%s)", packet.PeerId()) log.Debug(fmt.Sprintf("Received skeleton headers from incorrect peer (%s)", packet.PeerId()))
break break
} }
headerReqTimer.UpdateSince(request) headerReqTimer.UpdateSince(request)
@ -783,7 +782,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
} }
// If no more headers are inbound, notify the content fetchers and return // If no more headers are inbound, notify the content fetchers and return
if packet.Items() == 0 { if packet.Items() == 0 {
glog.V(logger.Debug).Infof("%v: no available headers", p) log.Debug(fmt.Sprintf("%v: no available headers", p))
select { select {
case d.headerProcCh <- nil: case d.headerProcCh <- nil:
return nil return nil
@ -797,7 +796,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
if skeleton { if skeleton {
filled, proced, err := d.fillHeaderSkeleton(from, headers) filled, proced, err := d.fillHeaderSkeleton(from, headers)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%v: skeleton chain invalid: %v", p, err) log.Debug(fmt.Sprintf("%v: skeleton chain invalid: %v", p, err))
return errInvalidChain return errInvalidChain
} }
headers = filled[proced:] headers = filled[proced:]
@ -805,7 +804,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
} }
// Insert all the new headers and fetch the next batch // Insert all the new headers and fetch the next batch
if len(headers) > 0 { if len(headers) > 0 {
glog.V(logger.Detail).Infof("%v: schedule %d headers from #%d", p, len(headers), from) log.Trace(fmt.Sprintf("%v: schedule %d headers from #%d", p, len(headers), from))
select { select {
case d.headerProcCh <- headers: case d.headerProcCh <- headers:
case <-d.cancelCh: case <-d.cancelCh:
@ -817,7 +816,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
case <-timeout.C: case <-timeout.C:
// Header retrieval timed out, consider the peer bad and drop // Header retrieval timed out, consider the peer bad and drop
glog.V(logger.Debug).Infof("%v: header request timed out", p) log.Debug(fmt.Sprintf("%v: header request timed out", p))
headerTimeoutMeter.Mark(1) headerTimeoutMeter.Mark(1)
d.dropPeer(p.id) d.dropPeer(p.id)
@ -847,7 +846,7 @@ func (d *Downloader) fetchHeaders(p *peer, from uint64) error {
// The method returs the entire filled skeleton and also the number of headers // The method returs the entire filled skeleton and also the number of headers
// already forwarded for processing. // already forwarded for processing.
func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
glog.V(logger.Debug).Infof("Filling up skeleton from #%d", from) log.Debug(fmt.Sprintf("Filling up skeleton from #%d", from))
d.queue.ScheduleSkeleton(from, skeleton) d.queue.ScheduleSkeleton(from, skeleton)
var ( var (
@ -868,7 +867,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header") nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "Header")
glog.V(logger.Debug).Infof("Skeleton fill terminated: %v", err) log.Debug(fmt.Sprintf("Skeleton fill terminated: %v", err))
filled, proced := d.queue.RetrieveHeaders() filled, proced := d.queue.RetrieveHeaders()
return filled, proced, err return filled, proced, err
@ -878,7 +877,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) (
// available peers, reserving a chunk of blocks for each, waiting for delivery // available peers, reserving a chunk of blocks for each, waiting for delivery
// and also periodically checking for timeouts. // and also periodically checking for timeouts.
func (d *Downloader) fetchBodies(from uint64) error { func (d *Downloader) fetchBodies(from uint64) error {
glog.V(logger.Debug).Infof("Downloading block bodies from #%d", from) log.Debug(fmt.Sprintf("Downloading block bodies from #%d", from))
var ( var (
deliver = func(packet dataPack) (int, error) { deliver = func(packet dataPack) (int, error) {
@ -894,7 +893,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body") d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "Body")
glog.V(logger.Debug).Infof("Block body download terminated: %v", err) log.Debug(fmt.Sprintf("Block body download terminated: %v", err))
return err return err
} }
@ -902,7 +901,7 @@ func (d *Downloader) fetchBodies(from uint64) error {
// available peers, reserving a chunk of receipts for each, waiting for delivery // available peers, reserving a chunk of receipts for each, waiting for delivery
// and also periodically checking for timeouts. // and also periodically checking for timeouts.
func (d *Downloader) fetchReceipts(from uint64) error { func (d *Downloader) fetchReceipts(from uint64) error {
glog.V(logger.Debug).Infof("Downloading receipts from #%d", from) log.Debug(fmt.Sprintf("Downloading receipts from #%d", from))
var ( var (
deliver = func(packet dataPack) (int, error) { deliver = func(packet dataPack) (int, error) {
@ -918,7 +917,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt") d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "Receipt")
glog.V(logger.Debug).Infof("Receipt download terminated: %v", err) log.Debug(fmt.Sprintf("Receipt download terminated: %v", err))
return err return err
} }
@ -926,7 +925,7 @@ func (d *Downloader) fetchReceipts(from uint64) error {
// available peers, reserving a chunk of nodes for each, waiting for delivery and // available peers, reserving a chunk of nodes for each, waiting for delivery and
// also periodically checking for timeouts. // also periodically checking for timeouts.
func (d *Downloader) fetchNodeData() error { func (d *Downloader) fetchNodeData() error {
glog.V(logger.Debug).Infof("Downloading node state data") log.Debug(fmt.Sprintf("Downloading node state data"))
var ( var (
deliver = func(packet dataPack) (int, error) { deliver = func(packet dataPack) (int, error) {
@ -934,12 +933,12 @@ func (d *Downloader) fetchNodeData() error {
return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) { return d.queue.DeliverNodeData(packet.PeerId(), packet.(*statePack).states, func(delivered int, progressed bool, err error) {
// If the peer returned old-requested data, forgive // If the peer returned old-requested data, forgive
if err == trie.ErrNotRequested { if err == trie.ErrNotRequested {
glog.V(logger.Debug).Infof("peer %s: replied to stale state request, forgiving", packet.PeerId()) log.Debug(fmt.Sprintf("peer %s: replied to stale state request, forgiving", packet.PeerId()))
return return
} }
if err != nil { if err != nil {
// If the node data processing failed, the root hash is very wrong, abort // If the node data processing failed, the root hash is very wrong, abort
glog.V(logger.Error).Infof("peer %d: state processing failed: %v", packet.PeerId(), err) log.Error(fmt.Sprintf("peer %d: state processing failed: %v", packet.PeerId(), err))
d.cancel() d.cancel()
return return
} }
@ -958,12 +957,12 @@ func (d *Downloader) fetchNodeData() error {
// If real database progress was made, reset any fast-sync pivot failure // If real database progress was made, reset any fast-sync pivot failure
if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 { if progressed && atomic.LoadUint32(&d.fsPivotFails) > 1 {
glog.V(logger.Debug).Infof("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails)) log.Debug(fmt.Sprintf("fast-sync progressed, resetting fail counter from %d", atomic.LoadUint32(&d.fsPivotFails)))
atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block atomic.StoreUint32(&d.fsPivotFails, 1) // Don't ever reset to 0, as that will unlock the pivot block
} }
// Log a message to the user and return // Log a message to the user and return
if delivered > 0 { if delivered > 0 {
glog.V(logger.Info).Infof("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending) log.Info(fmt.Sprintf("imported %3d state entries in %9v: processed %d, pending at least %d", delivered, common.PrettyDuration(time.Since(start)), syncStatsStateDone, pending))
} }
}) })
} }
@ -980,7 +979,7 @@ func (d *Downloader) fetchNodeData() error {
d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch, d.queue.PendingNodeData, d.queue.InFlightNodeData, throttle, reserve, nil, fetch,
d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State") d.queue.CancelNodeData, capacity, d.peers.NodeDataIdlePeers, setIdle, "State")
glog.V(logger.Debug).Infof("Node state data download terminated: %v", err) log.Debug(fmt.Sprintf("Node state data download terminated: %v", err))
return err return err
} }
@ -1045,11 +1044,11 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
// Issue a log to the user to see what's going on // Issue a log to the user to see what's going on
switch { switch {
case err == nil && packet.Items() == 0: case err == nil && packet.Items() == 0:
glog.V(logger.Detail).Infof("%s: no %s delivered", peer, strings.ToLower(kind)) log.Trace(fmt.Sprintf("%s: no %s delivered", peer, strings.ToLower(kind)))
case err == nil: case err == nil:
glog.V(logger.Detail).Infof("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)) log.Trace(fmt.Sprintf("%s: delivered %s %s(s)", peer, packet.Stats(), strings.ToLower(kind)))
default: default:
glog.V(logger.Detail).Infof("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err) log.Trace(fmt.Sprintf("%s: %s delivery failed: %v", peer, strings.ToLower(kind), err))
} }
} }
// Blocks assembled, try to update the progress // Blocks assembled, try to update the progress
@ -1092,10 +1091,10 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
// how response times reacts, to it always requests one more than the minimum (i.e. min 2). // how response times reacts, to it always requests one more than the minimum (i.e. min 2).
if fails > 2 { if fails > 2 {
glog.V(logger.Detail).Infof("%s: %s delivery timeout", peer, strings.ToLower(kind)) log.Trace(fmt.Sprintf("%s: %s delivery timeout", peer, strings.ToLower(kind)))
setIdle(peer, 0) setIdle(peer, 0)
} else { } else {
glog.V(logger.Debug).Infof("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind)) log.Debug(fmt.Sprintf("%s: stalling %s delivery, dropping", peer, strings.ToLower(kind)))
d.dropPeer(pid) d.dropPeer(pid)
} }
} }
@ -1103,7 +1102,7 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
// If there's nothing more to fetch, wait or terminate // If there's nothing more to fetch, wait or terminate
if pending() == 0 { if pending() == 0 {
if !inFlight() && finished { if !inFlight() && finished {
glog.V(logger.Debug).Infof("%s fetching completed", kind) log.Debug(fmt.Sprintf("%s fetching completed", kind))
return nil return nil
} }
break break
@ -1131,15 +1130,15 @@ func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliv
if request == nil { if request == nil {
continue continue
} }
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
if request.From > 0 { if request.From > 0 {
glog.Infof("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From) return fmt.Sprintf("%s: requesting %s(s) from #%d", peer, strings.ToLower(kind), request.From)
} else if len(request.Headers) > 0 { } else if len(request.Headers) > 0 {
glog.Infof("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number) return fmt.Sprintf("%s: requesting %d %s(s), first at #%d", peer, len(request.Headers), strings.ToLower(kind), request.Headers[0].Number)
} else { } else {
glog.Infof("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind)) return fmt.Sprintf("%s: requesting %d %s(s)", peer, len(request.Hashes), strings.ToLower(kind))
} }
} }})
// Fetch the chunk and make sure any errors return the hashes to the queue // Fetch the chunk and make sure any errors return the hashes to the queue
if fetchHook != nil { if fetchHook != nil {
fetchHook(request.Headers) fetchHook(request.Headers)
@ -1194,8 +1193,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
if d.headBlock != nil { if d.headBlock != nil {
curBlock = d.headBlock().Number() curBlock = d.headBlock().Number()
} }
glog.V(logger.Warn).Infof("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)", log.Warn(fmt.Sprintf("Rolled back %d headers (LH: %d->%d, FB: %d->%d, LB: %d->%d)",
len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock) len(hashes), lastHeader, d.headHeader().Number, lastFastBlock, curFastBlock, lastBlock, curBlock))
// If we're already past the pivot point, this could be an attack, thread carefully // If we're already past the pivot point, this could be an attack, thread carefully
if rollback[len(rollback)-1].Number.Uint64() > pivot { if rollback[len(rollback)-1].Number.Uint64() > pivot {
@ -1203,7 +1202,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
if atomic.LoadUint32(&d.fsPivotFails) == 0 { if atomic.LoadUint32(&d.fsPivotFails) == 0 {
for _, header := range rollback { for _, header := range rollback {
if header.Number.Uint64() == pivot { if header.Number.Uint64() == pivot {
glog.V(logger.Warn).Infof("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]) log.Warn(fmt.Sprintf("Fast-sync critical section failure, locked pivot to header #%d [%x…]", pivot, header.Hash().Bytes()[:4]))
d.fsPivotLock = header d.fsPivotLock = header
} }
} }
@ -1299,7 +1298,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
if n > 0 { if n > 0 {
rollback = append(rollback, chunk[:n]...) rollback = append(rollback, chunk[:n]...)
} }
glog.V(logger.Debug).Infof("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err) log.Debug(fmt.Sprintf("invalid header #%d [%x…]: %v", chunk[n].Number, chunk[n].Hash().Bytes()[:4], err))
return errInvalidChain return errInvalidChain
} }
// All verifications passed, store newly found uncertain headers // All verifications passed, store newly found uncertain headers
@ -1311,7 +1310,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
// If we're fast syncing and just pulled in the pivot, make sure it's the one locked in // If we're fast syncing and just pulled in the pivot, make sure it's the one locked in
if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot { if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot {
if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() { if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() {
glog.V(logger.Warn).Infof("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]) log.Warn(fmt.Sprintf("Pivot doesn't match locked in version: have #%v [%x…], want #%v [%x…]", pivot.Number, pivot.Hash().Bytes()[:4], d.fsPivotLock.Number, d.fsPivotLock.Hash().Bytes()[:4]))
return errInvalidChain return errInvalidChain
} }
} }
@ -1328,7 +1327,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
// Otherwise insert the headers for content retrieval // Otherwise insert the headers for content retrieval
inserts := d.queue.Schedule(chunk, origin) inserts := d.queue.Schedule(chunk, origin)
if len(inserts) != len(chunk) { if len(inserts) != len(chunk) {
glog.V(logger.Debug).Infof("stale headers") log.Debug(fmt.Sprintf("stale headers"))
return errBadPeer return errBadPeer
} }
} }
@ -1359,10 +1358,10 @@ func (d *Downloader) processContent() error {
d.chainInsertHook(results) d.chainInsertHook(results)
} }
// Actually import the blocks // Actually import the blocks
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
first, last := results[0].Header, results[len(results)-1].Header first, last := results[0].Header, results[len(results)-1].Header
glog.Infof("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4]) return fmt.Sprintf("Inserting chain with %d items (#%d [%x…] - #%d [%x…])", len(results), first.Number, first.Hash().Bytes()[:4], last.Number, last.Hash().Bytes()[:4])
} }})
for len(results) != 0 { for len(results) != 0 {
// Check for any termination requests // Check for any termination requests
select { select {
@ -1396,14 +1395,14 @@ func (d *Downloader) processContent() error {
case len(receipts) > 0: case len(receipts) > 0:
index, err = d.insertReceipts(blocks, receipts) index, err = d.insertReceipts(blocks, receipts)
if err == nil && blocks[len(blocks)-1].NumberU64() == pivot { if err == nil && blocks[len(blocks)-1].NumberU64() == pivot {
glog.V(logger.Debug).Infof("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]) log.Debug(fmt.Sprintf("Committing block #%d [%x…] as the new head", blocks[len(blocks)-1].Number(), blocks[len(blocks)-1].Hash().Bytes()[:4]))
index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash()) index, err = len(blocks)-1, d.commitHeadBlock(blocks[len(blocks)-1].Hash())
} }
default: default:
index, err = d.insertBlocks(blocks) index, err = d.insertBlocks(blocks)
} }
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err) log.Debug(fmt.Sprintf("Result #%d [%x…] processing failed: %v", results[index].Header.Number, results[index].Header.Hash().Bytes()[:4], err))
return errInvalidChain return errInvalidChain
} }
// Shift the results to the next batch // Shift the results to the next batch
@ -1471,7 +1470,7 @@ func (d *Downloader) qosTuner() {
atomic.StoreUint64(&d.rttConfidence, conf) atomic.StoreUint64(&d.rttConfidence, conf)
// Log the new QoS values and sleep until the next RTT // Log the new QoS values and sleep until the next RTT
glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()) log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
select { select {
case <-d.quitCh: case <-d.quitCh:
return return
@ -1501,7 +1500,7 @@ func (d *Downloader) qosReduceConfidence() {
atomic.StoreUint64(&d.rttConfidence, conf) atomic.StoreUint64(&d.rttConfidence, conf)
rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()) log.Debug(fmt.Sprintf("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, d.requestTTL()))
} }
// requestRTT returns the current target round trip time for a download request // requestRTT returns the current target round trip time for a download request

@ -30,8 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"gopkg.in/karalabe/cookiejar.v2/collections/prque" "gopkg.in/karalabe/cookiejar.v2/collections/prque"
@ -365,20 +364,20 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
// Make sure chain order is honoured and preserved throughout // Make sure chain order is honoured and preserved throughout
hash := header.Hash() hash := header.Hash()
if header.Number == nil || header.Number.Uint64() != from { if header.Number == nil || header.Number.Uint64() != from {
glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from) log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ordering, expected %d", header.Number, hash[:4], from))
break break
} }
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {
glog.V(logger.Warn).Infof("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4]) log.Warn(fmt.Sprintf("Header #%v [%x…] broke chain ancestry", header.Number, hash[:4]))
break break
} }
// Make sure no duplicate requests are executed // Make sure no duplicate requests are executed
if _, ok := q.blockTaskPool[hash]; ok { if _, ok := q.blockTaskPool[hash]; ok {
glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4]) log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for block fetch", header.Number.Uint64(), hash[:4]))
continue continue
} }
if _, ok := q.receiptTaskPool[hash]; ok { if _, ok := q.receiptTaskPool[hash]; ok {
glog.V(logger.Warn).Infof("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]) log.Warn(fmt.Sprintf("Header #%d [%x…] already scheduled for receipt fetch", header.Number.Uint64(), hash[:4]))
continue continue
} }
// Queue the header for content retrieval // Queue the header for content retrieval
@ -392,7 +391,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {
} }
if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot { if q.mode == FastSync && header.Number.Uint64() == q.fastSyncPivot {
// Pivoting point of the fast sync, switch the state retrieval to this // Pivoting point of the fast sync, switch the state retrieval to this
glog.V(logger.Debug).Infof("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4]) log.Debug(fmt.Sprintf("Switching state downloads to %d [%x…]", header.Number.Uint64(), header.Hash().Bytes()[:4]))
q.stateTaskIndex = 0 q.stateTaskIndex = 0
q.stateTaskPool = make(map[common.Hash]int) q.stateTaskPool = make(map[common.Hash]int)
@ -873,10 +872,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
accepted := len(headers) == MaxHeaderFetch accepted := len(headers) == MaxHeaderFetch
if accepted { if accepted {
if headers[0].Number.Uint64() != request.From { if headers[0].Number.Uint64() != request.From {
glog.V(logger.Detail).Infof("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From) log.Trace(fmt.Sprintf("Peer %s: first header #%v [%x…] broke chain ordering, expected %d", id, headers[0].Number, headers[0].Hash().Bytes()[:4], request.From))
accepted = false accepted = false
} else if headers[len(headers)-1].Hash() != target { } else if headers[len(headers)-1].Hash() != target {
glog.V(logger.Detail).Infof("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]) log.Trace(fmt.Sprintf("Peer %s: last header #%v [%x…] broke skeleton structure, expected %x", id, headers[len(headers)-1].Number, headers[len(headers)-1].Hash().Bytes()[:4], target[:4]))
accepted = false accepted = false
} }
} }
@ -884,12 +883,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
for i, header := range headers[1:] { for i, header := range headers[1:] {
hash := header.Hash() hash := header.Hash()
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { if want := request.From + 1 + uint64(i); header.Number.Uint64() != want {
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want) log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ordering, expected %d", id, header.Number, hash[:4], want))
accepted = false accepted = false
break break
} }
if headers[i].Hash() != header.ParentHash { if headers[i].Hash() != header.ParentHash {
glog.V(logger.Warn).Infof("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4]) log.Warn(fmt.Sprintf("Peer %s: header #%v [%x…] broke chain ancestry", id, header.Number, hash[:4]))
accepted = false accepted = false
break break
} }
@ -897,7 +896,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
} }
// If the batch of headers wasn't accepted, mark as unavailable // If the batch of headers wasn't accepted, mark as unavailable
if !accepted { if !accepted {
glog.V(logger.Detail).Infof("Peer %s: skeleton filling from header #%d not accepted", id, request.From) log.Trace(fmt.Sprintf("Peer %s: skeleton filling from header #%d not accepted", id, request.From))
miss := q.headerPeerMiss[id] miss := q.headerPeerMiss[id]
if miss == nil { if miss == nil {
@ -924,7 +923,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
select { select {
case headerProcCh <- process: case headerProcCh <- process:
glog.V(logger.Detail).Infof("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number) log.Trace(fmt.Sprintf("%s: pre-scheduled %d headers from #%v", id, len(process), process[0].Number))
q.headerProced += len(process) q.headerProced += len(process)
default: default:
} }

@ -26,8 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"gopkg.in/karalabe/cookiejar.v2/collections/prque" "gopkg.in/karalabe/cookiejar.v2/collections/prque"
) )
@ -221,7 +220,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher, // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
// returning those that should be handled differently. // returning those that should be handled differently.
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header { func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers)) log.Trace(fmt.Sprintf("[eth/62] filtering %d headers", len(headers)))
// Send the filter channel to the fetcher // Send the filter channel to the fetcher
filter := make(chan *headerFilterTask) filter := make(chan *headerFilterTask)
@ -249,7 +248,7 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
// FilterBodies extracts all the block bodies that were explicitly requested by // FilterBodies extracts all the block bodies that were explicitly requested by
// the fetcher, returning those that should be handled differently. // the fetcher, returning those that should be handled differently.
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles)) log.Trace(fmt.Sprintf("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles)))
// Send the filter channel to the fetcher // Send the filter channel to the fetcher
filter := make(chan *bodyFilterTask) filter := make(chan *bodyFilterTask)
@ -324,14 +323,14 @@ func (f *Fetcher) loop() {
count := f.announces[notification.origin] + 1 count := f.announces[notification.origin] + 1
if count > hashLimit { if count > hashLimit {
glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit) log.Debug(fmt.Sprintf("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit))
propAnnounceDOSMeter.Mark(1) propAnnounceDOSMeter.Mark(1)
break break
} }
// If we have a valid block number, check that it's potentially useful // If we have a valid block number, check that it's potentially useful
if notification.number > 0 { if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist) log.Debug(fmt.Sprintf("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist))
propAnnounceDropMeter.Mark(1) propAnnounceDropMeter.Mark(1)
break break
} }
@ -381,13 +380,15 @@ func (f *Fetcher) loop() {
} }
// Send out all block header requests // Send out all block header requests
for peer, hashes := range request { for peer, hashes := range request {
if glog.V(logger.Detail) && len(hashes) > 0 { if len(hashes) > 0 {
list := "[" log.Trace("", "msg", log.Lazy{Fn: func() string {
for _, hash := range hashes { list := "["
list += fmt.Sprintf("%x…, ", hash[:4]) for _, hash := range hashes {
} list += fmt.Sprintf("%x…, ", hash[:4])
list = list[:len(list)-2] + "]" }
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list) list = list[:len(list)-2] + "]"
return fmt.Sprintf("[eth/62] Peer %s: fetching headers %s", peer, list)
}})
} }
// Create a closure of the fetch and schedule in on a new thread // Create a closure of the fetch and schedule in on a new thread
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
@ -421,14 +422,16 @@ func (f *Fetcher) loop() {
} }
// Send out all block body requests // Send out all block body requests
for peer, hashes := range request { for peer, hashes := range request {
if glog.V(logger.Detail) && len(hashes) > 0 { if len(hashes) > 0 {
list := "[" log.Trace("", "msg", log.Lazy{Fn: func() string {
for _, hash := range hashes { list := "["
list += fmt.Sprintf("%x…, ", hash[:4]) for _, hash := range hashes {
} list += fmt.Sprintf("%x…, ", hash[:4])
list = list[:len(list)-2] + "]" }
list = list[:len(list)-2] + "]"
glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list) return fmt.Sprintf("[eth/62] Peer %s: fetching bodies %s", peer, list)
}})
} }
// Create a closure of the fetch and schedule in on a new thread // Create a closure of the fetch and schedule in on a new thread
if f.completingHook != nil { if f.completingHook != nil {
@ -462,7 +465,7 @@ func (f *Fetcher) loop() {
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
// If the delivered header does not match the promised number, drop the announcer // If the delivered header does not match the promised number, drop the announcer
if header.Number.Uint64() != announce.number { if header.Number.Uint64() != announce.number {
glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()) log.Trace(fmt.Sprintf("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()))
f.dropPeer(announce.origin) f.dropPeer(announce.origin)
f.forgetHash(hash) f.forgetHash(hash)
continue continue
@ -474,7 +477,7 @@ func (f *Fetcher) loop() {
// If the block is empty (header only), short circuit into the final import queue // If the block is empty (header only), short circuit into the final import queue
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]) log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
block := types.NewBlockWithHeader(header) block := types.NewBlockWithHeader(header)
block.ReceivedAt = task.time block.ReceivedAt = task.time
@ -486,7 +489,7 @@ func (f *Fetcher) loop() {
// Otherwise add to the list of blocks needing completion // Otherwise add to the list of blocks needing completion
incomplete = append(incomplete, announce) incomplete = append(incomplete, announce)
} else { } else {
glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]) log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
f.forgetHash(hash) f.forgetHash(hash)
} }
} else { } else {
@ -617,14 +620,14 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
// Ensure the peer isn't DOSing us // Ensure the peer isn't DOSing us
count := f.queues[peer] + 1 count := f.queues[peer] + 1
if count > blockLimit { if count > blockLimit {
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit) log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit))
propBroadcastDOSMeter.Mark(1) propBroadcastDOSMeter.Mark(1)
f.forgetHash(hash) f.forgetHash(hash)
return return
} }
// Discard any past or too distant blocks // Discard any past or too distant blocks
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist) log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist))
propBroadcastDropMeter.Mark(1) propBroadcastDropMeter.Mark(1)
f.forgetHash(hash) f.forgetHash(hash)
return return
@ -641,9 +644,9 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
if f.queueChangeHook != nil { if f.queueChangeHook != nil {
f.queueChangeHook(op.block.Hash(), true) f.queueChangeHook(op.block.Hash(), true)
} }
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size()) return fmt.Sprintf("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
} }})
} }
} }
@ -654,14 +657,14 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
hash := block.Hash() hash := block.Hash()
// Run the import on a new thread // Run the import on a new thread
glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]) log.Debug(fmt.Sprintf("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]))
go func() { go func() {
defer func() { f.done <- hash }() defer func() { f.done <- hash }()
// If the parent's unknown, abort insertion // If the parent's unknown, abort insertion
parent := f.getBlock(block.ParentHash()) parent := f.getBlock(block.ParentHash())
if parent == nil { if parent == nil {
glog.V(logger.Debug).Infof("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4]) log.Debug(fmt.Sprintf("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4]))
return return
} }
// Quickly validate the header and propagate the block if it passes // Quickly validate the header and propagate the block if it passes
@ -676,13 +679,13 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
default: default:
// Something went very wrong, drop the peer // Something went very wrong, drop the peer
glog.V(logger.Debug).Infof("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err) log.Debug(fmt.Sprintf("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err))
f.dropPeer(peer) f.dropPeer(peer)
return return
} }
// Run the actual import and log any issues // Run the actual import and log any issues
if _, err := f.insertChain(types.Blocks{block}); err != nil { if _, err := f.insertChain(types.Blocks{block}); err != nil {
glog.V(logger.Warn).Infof("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err) log.Warn(fmt.Sprintf("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err))
return return
} }
// If import succeeded, broadcast the block // If import succeeded, broadcast the block

@ -17,6 +17,7 @@
package gasprice package gasprice
import ( import (
"fmt"
"math/big" "math/big"
"math/rand" "math/rand"
"sync" "sync"
@ -25,8 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
const ( const (
@ -176,7 +176,7 @@ func (self *GasPriceOracle) processBlock(block *types.Block) {
self.lastBase = newBase self.lastBase = newBase
self.lastBaseMutex.Unlock() self.lastBaseMutex.Unlock()
glog.V(logger.Detail).Infof("Processed block #%v, base price is %v\n", i, newBase.Int64()) log.Trace(fmt.Sprintf("Processed block #%v, base price is %v\n", i, newBase.Int64()))
} }
// returns the lowers possible price with which a tx was or could have been included // returns the lowers possible price with which a tx was or could have been included

@ -33,8 +33,7 @@ import (
"github.com/ethereum/go-ethereum/eth/fetcher" "github.com/ethereum/go-ethereum/eth/fetcher"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -116,7 +115,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
} }
// Figure out whether to allow fast sync or not // Figure out whether to allow fast sync or not
if fastSync && blockchain.CurrentBlock().NumberU64() > 0 { if fastSync && blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("blockchain not empty, fast sync disabled") log.Info(fmt.Sprintf("blockchain not empty, fast sync disabled"))
fastSync = false fastSync = false
} }
if fastSync { if fastSync {
@ -179,7 +178,7 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer) manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 { if blockchain.Genesis().Hash().Hex() == defaultGenesisHash && networkId == 1 {
glog.V(logger.Debug).Infoln("Bad Block Reporting is enabled") log.Debug(fmt.Sprint("Bad Block Reporting is enabled"))
manager.badBlockReportingEnabled = true manager.badBlockReportingEnabled = true
} }
@ -200,12 +199,12 @@ func (pm *ProtocolManager) removePeer(id string) {
if peer == nil { if peer == nil {
return return
} }
glog.V(logger.Debug).Infoln("Removing peer", id) log.Debug(fmt.Sprint("Removing peer", id))
// Unregister the peer from the downloader and Ethereum peer set // Unregister the peer from the downloader and Ethereum peer set
pm.downloader.UnregisterPeer(id) pm.downloader.UnregisterPeer(id)
if err := pm.peers.Unregister(id); err != nil { if err := pm.peers.Unregister(id); err != nil {
glog.V(logger.Error).Infoln("Removal failed:", err) log.Error(fmt.Sprint("Removal failed:", err))
} }
// Hard disconnect at the networking layer // Hard disconnect at the networking layer
if peer != nil { if peer != nil {
@ -227,7 +226,7 @@ func (pm *ProtocolManager) Start() {
} }
func (pm *ProtocolManager) Stop() { func (pm *ProtocolManager) Stop() {
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...") log.Info(fmt.Sprint("Stopping ethereum protocol handler..."))
pm.txSub.Unsubscribe() // quits txBroadcastLoop pm.txSub.Unsubscribe() // quits txBroadcastLoop
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
@ -248,7 +247,7 @@ func (pm *ProtocolManager) Stop() {
// Wait for all peer handler goroutines and the loops to come down. // Wait for all peer handler goroutines and the loops to come down.
pm.wg.Wait() pm.wg.Wait()
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped") log.Info(fmt.Sprint("Ethereum protocol handler stopped"))
} }
func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
@ -262,21 +261,21 @@ func (pm *ProtocolManager) handle(p *peer) error {
return p2p.DiscTooManyPeers return p2p.DiscTooManyPeers
} }
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
// Execute the Ethereum handshake // Execute the Ethereum handshake
td, head, genesis := pm.blockchain.Status() td, head, genesis := pm.blockchain.Status()
if err := p.Handshake(pm.networkId, td, head, genesis); err != nil { if err := p.Handshake(pm.networkId, td, head, genesis); err != nil {
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err) log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
return err return err
} }
if rw, ok := p.rw.(*meteredMsgReadWriter); ok { if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version) rw.Init(p.version)
} }
// Register the peer locally // Register the peer locally
glog.V(logger.Detail).Infof("%v: adding peer", p) log.Trace(fmt.Sprintf("%v: adding peer", p))
if err := pm.peers.Register(p); err != nil { if err := pm.peers.Register(p); err != nil {
glog.V(logger.Error).Infof("%v: addition failed: %v", p, err) log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
return err return err
} }
defer pm.removePeer(p.id) defer pm.removePeer(p.id)
@ -297,7 +296,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
} }
// Start a timer to disconnect if the peer doesn't reply in time // Start a timer to disconnect if the peer doesn't reply in time
p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() { p.forkDrop = time.AfterFunc(daoChallengeTimeout, func() {
glog.V(logger.Debug).Infof("%v: timed out DAO fork-check, dropping", p) log.Debug(fmt.Sprintf("%v: timed out DAO fork-check, dropping", p))
pm.removePeer(p.id) pm.removePeer(p.id)
}) })
// Make sure it's cleaned up if the peer dies off // Make sure it's cleaned up if the peer dies off
@ -311,7 +310,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
// main loop. handle incoming messages. // main loop. handle incoming messages.
for { for {
if err := pm.handleMsg(p); err != nil { if err := pm.handleMsg(p); err != nil {
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err) log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
return err return err
} }
} }
@ -387,7 +386,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
) )
if next <= current { if next <= current {
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
glog.V(logger.Warn).Infof("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos) log.Warn(fmt.Sprintf("%v: GetBlockHeaders skip overflow attack (current %v, skip %v, next %v)\nMalicious peer infos: %s", p, current, query.Skip, next, infos))
unknown = true unknown = true
} else { } else {
if header := pm.blockchain.GetHeaderByNumber(next); header != nil { if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
@ -435,7 +434,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
// If we're seemingly on the same chain, disable the drop timer // If we're seemingly on the same chain, disable the drop timer
if verifyDAO { if verifyDAO {
glog.V(logger.Debug).Infof("%v: seems to be on the same side of the DAO fork", p) log.Debug(fmt.Sprintf("%v: seems to be on the same side of the DAO fork", p))
p.forkDrop.Stop() p.forkDrop.Stop()
p.forkDrop = nil p.forkDrop = nil
return nil return nil
@ -452,10 +451,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Validate the header and either drop the peer or continue // Validate the header and either drop the peer or continue
if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil { if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
glog.V(logger.Debug).Infof("%v: verified to be on the other side of the DAO fork, dropping", p) log.Debug(fmt.Sprintf("%v: verified to be on the other side of the DAO fork, dropping", p))
return err return err
} }
glog.V(logger.Debug).Infof("%v: verified to be on the same side of the DAO fork", p) log.Debug(fmt.Sprintf("%v: verified to be on the same side of the DAO fork", p))
return nil return nil
} }
// Irrelevant of the fork checks, send the header to the fetcher just in case // Irrelevant of the fork checks, send the header to the fetcher just in case
@ -464,7 +463,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if len(headers) > 0 || !filter { if len(headers) > 0 || !filter {
err := pm.downloader.DeliverHeaders(p.id, headers) err := pm.downloader.DeliverHeaders(p.id, headers)
if err != nil { if err != nil {
glog.V(logger.Debug).Infoln(err) log.Debug(fmt.Sprint(err))
} }
} }
@ -517,7 +516,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if len(trasactions) > 0 || len(uncles) > 0 || !filter { if len(trasactions) > 0 || len(uncles) > 0 || !filter {
err := pm.downloader.DeliverBodies(p.id, trasactions, uncles) err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
if err != nil { if err != nil {
glog.V(logger.Debug).Infoln(err) log.Debug(fmt.Sprint(err))
} }
} }
@ -556,7 +555,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
// Deliver all to the downloader // Deliver all to the downloader
if err := pm.downloader.DeliverNodeData(p.id, data); err != nil { if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
glog.V(logger.Debug).Infof("failed to deliver node state data: %v", err) log.Debug(fmt.Sprintf("failed to deliver node state data: %v", err))
} }
case p.version >= eth63 && msg.Code == GetReceiptsMsg: case p.version >= eth63 && msg.Code == GetReceiptsMsg:
@ -587,7 +586,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
// If known, encode and queue for response packet // If known, encode and queue for response packet
if encoded, err := rlp.EncodeToBytes(results); err != nil { if encoded, err := rlp.EncodeToBytes(results); err != nil {
glog.V(logger.Error).Infof("failed to encode receipt: %v", err) log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
} else { } else {
receipts = append(receipts, encoded) receipts = append(receipts, encoded)
bytes += len(encoded) bytes += len(encoded)
@ -603,7 +602,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
// Deliver all to the downloader // Deliver all to the downloader
if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil { if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
glog.V(logger.Debug).Infof("failed to deliver receipts: %v", err) log.Debug(fmt.Sprintf("failed to deliver receipts: %v", err))
} }
case msg.Code == NewBlockHashesMsg: case msg.Code == NewBlockHashesMsg:
@ -696,7 +695,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil { if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)) td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
} else { } else {
glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]) log.Error(fmt.Sprintf("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]))
return return
} }
// Send the block to a subset of our peers // Send the block to a subset of our peers
@ -704,14 +703,14 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
for _, peer := range transfer { for _, peer := range transfer {
peer.SendNewBlock(block, td) peer.SendNewBlock(block, td)
} }
glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)) log.Trace(fmt.Sprintf("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)))
} }
// Otherwise if the block is indeed in out own chain, announce it // Otherwise if the block is indeed in out own chain, announce it
if pm.blockchain.HasBlock(hash) { if pm.blockchain.HasBlock(hash) {
for _, peer := range peers { for _, peer := range peers {
peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()}) peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
} }
glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)) log.Trace(fmt.Sprintf("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)))
} }
} }
@ -724,7 +723,7 @@ func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction)
for _, peer := range peers { for _, peer := range peers {
peer.SendTransactions(types.Transactions{tx}) peer.SendTransactions(types.Transactions{tx})
} }
glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers") log.Trace(fmt.Sprint("broadcast tx to", len(peers), "peers"))
} }
// Mined broadcast loop // Mined broadcast loop

@ -25,8 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"gopkg.in/fatih/set.v0" "gopkg.in/fatih/set.v0"
@ -192,41 +191,41 @@ func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error {
// RequestHeaders is a wrapper around the header query functions to fetch a // RequestHeaders is a wrapper around the header query functions to fetch a
// single header. It is used solely by the fetcher. // single header. It is used solely by the fetcher.
func (p *peer) RequestOneHeader(hash common.Hash) error { func (p *peer) RequestOneHeader(hash common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching a single header: %x", p, hash) log.Debug(fmt.Sprintf("%v fetching a single header: %x", p, hash))
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false}) return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false})
} }
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block. // specified header query, based on the hash of an origin block.
func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse) log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
} }
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block. // specified header query, based on the number of an origin block.
func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse) log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
} }
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified. // specified.
func (p *peer) RequestBodies(hashes []common.Hash) error { func (p *peer) RequestBodies(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes)) log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
return p2p.Send(p.rw, GetBlockBodiesMsg, hashes) return p2p.Send(p.rw, GetBlockBodiesMsg, hashes)
} }
// RequestNodeData fetches a batch of arbitrary data from a node's known state // RequestNodeData fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes. // data, corresponding to the specified hashes.
func (p *peer) RequestNodeData(hashes []common.Hash) error { func (p *peer) RequestNodeData(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(hashes)) log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(hashes)))
return p2p.Send(p.rw, GetNodeDataMsg, hashes) return p2p.Send(p.rw, GetNodeDataMsg, hashes)
} }
// RequestReceipts fetches a batch of transaction receipts from a remote node. // RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *peer) RequestReceipts(hashes []common.Hash) error { func (p *peer) RequestReceipts(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes)) log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
return p2p.Send(p.rw, GetReceiptsMsg, hashes) return p2p.Send(p.rw, GetReceiptsMsg, hashes)
} }

@ -30,8 +30,7 @@ import (
) )
func init() { func init() {
// glog.SetToStderr(true) // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat())))
// glog.SetV(6)
} }
var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")

@ -17,6 +17,7 @@
package eth package eth
import ( import (
"fmt"
"math/rand" "math/rand"
"sync/atomic" "sync/atomic"
"time" "time"
@ -24,8 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
) )
@ -87,7 +87,7 @@ func (pm *ProtocolManager) txsyncLoop() {
delete(pending, s.p.ID()) delete(pending, s.p.ID())
} }
// Send the pack in the background. // Send the pack in the background.
glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size) log.Trace(fmt.Sprintf("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size))
sending = true sending = true
go func() { done <- pack.p.SendTransactions(pack.txs) }() go func() { done <- pack.p.SendTransactions(pack.txs) }()
} }
@ -117,7 +117,7 @@ func (pm *ProtocolManager) txsyncLoop() {
sending = false sending = false
// Stop tracking peers that cause send failures. // Stop tracking peers that cause send failures.
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%v: tx send failed: %v", pack.p.Peer, err) log.Debug(fmt.Sprintf("%v: tx send failed: %v", pack.p.Peer, err))
delete(pending, pack.p.ID()) delete(pending, pack.p.ID())
} }
// Schedule the next send. // Schedule the next send.
@ -187,7 +187,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
if atomic.LoadUint32(&pm.fastSync) == 1 { if atomic.LoadUint32(&pm.fastSync) == 1 {
// Disable fast sync if we indeed have something in our chain // Disable fast sync if we indeed have something in our chain
if pm.blockchain.CurrentBlock().NumberU64() > 0 { if pm.blockchain.CurrentBlock().NumberU64() > 0 {
glog.V(logger.Info).Infof("fast sync complete, auto disabling") log.Info(fmt.Sprintf("fast sync complete, auto disabling"))
atomic.StoreUint32(&pm.fastSync, 0) atomic.StoreUint32(&pm.fastSync, 0)
} }
} }

@ -17,14 +17,14 @@
package ethdb package ethdb
import ( import (
"fmt"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/errors"
@ -80,7 +80,7 @@ func NewLDBDatabase(file string, cache int, handles int) (*LDBDatabase, error) {
if handles < 16 { if handles < 16 {
handles = 16 handles = 16
} }
glog.V(logger.Info).Infof("Allotted %dMB cache and %d file handles to %s", cache, handles, file) log.Info(fmt.Sprintf("Allotted %dMB cache and %d file handles to %s", cache, handles, file))
// Open the db and recover any potential corruptions // Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, &opt.Options{ db, err := leveldb.OpenFile(file, &opt.Options{
@ -167,16 +167,14 @@ func (self *LDBDatabase) Close() {
errc := make(chan error) errc := make(chan error)
self.quitChan <- errc self.quitChan <- errc
if err := <-errc; err != nil { if err := <-errc; err != nil {
glog.V(logger.Error).Infof("metrics failure in '%s': %v\n", self.fn, err) log.Error(fmt.Sprintf("metrics failure in '%s': %v\n", self.fn, err))
} }
} }
err := self.db.Close() err := self.db.Close()
if glog.V(logger.Error) { if err == nil {
if err == nil { log.Info(fmt.Sprint("closed db:", self.fn))
glog.Infoln("closed db:", self.fn) } else {
} else { log.Error(fmt.Sprintf("error closing db %s: %v", self.fn, err))
glog.Errorf("error closing db %s: %v", self.fn, err)
}
} }
} }
@ -231,7 +229,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
// Retrieve the database stats // Retrieve the database stats
stats, err := self.db.GetProperty("leveldb.stats") stats, err := self.db.GetProperty("leveldb.stats")
if err != nil { if err != nil {
glog.V(logger.Error).Infof("failed to read database stats: %v", err) log.Error(fmt.Sprintf("failed to read database stats: %v", err))
return return
} }
// Find the compaction table, skip the header // Find the compaction table, skip the header
@ -240,7 +238,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
lines = lines[1:] lines = lines[1:]
} }
if len(lines) <= 3 { if len(lines) <= 3 {
glog.V(logger.Error).Infof("compaction table not found") log.Error(fmt.Sprintf("compaction table not found"))
return return
} }
lines = lines[3:] lines = lines[3:]
@ -256,7 +254,7 @@ func (self *LDBDatabase) meter(refresh time.Duration) {
} }
for idx, counter := range parts[3:] { for idx, counter := range parts[3:] {
if value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64); err != nil { if value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64); err != nil {
glog.V(logger.Error).Infof("compaction entry parsing failed: %v", err) log.Error(fmt.Sprintf("compaction entry parsing failed: %v", err))
return return
} else { } else {
counters[i%2][idx] += value counters[i%2][idx] += value

@ -34,8 +34,7 @@ import (
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
@ -96,13 +95,13 @@ func (s *Service) Start(server *p2p.Server) error {
s.server = server s.server = server
go s.loop() go s.loop()
glog.V(logger.Info).Infoln("Stats daemon started") log.Info(fmt.Sprint("Stats daemon started"))
return nil return nil
} }
// Stop implements node.Service, terminating the monitoring and reporting daemon. // Stop implements node.Service, terminating the monitoring and reporting daemon.
func (s *Service) Stop() error { func (s *Service) Stop() error {
glog.V(logger.Info).Infoln("Stats daemon stopped") log.Info(fmt.Sprint("Stats daemon stopped"))
return nil return nil
} }
@ -131,7 +130,7 @@ func (s *Service) loop() {
} }
conn, err := websocket.Dial(url, "", "http://localhost/") conn, err := websocket.Dial(url, "", "http://localhost/")
if err != nil { if err != nil {
glog.V(logger.Warn).Infof("Stats server unreachable: %v", err) log.Warn(fmt.Sprintf("Stats server unreachable: %v", err))
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
continue continue
} }
@ -139,7 +138,7 @@ func (s *Service) loop() {
out := json.NewEncoder(conn) out := json.NewEncoder(conn)
if err = s.login(in, out); err != nil { if err = s.login(in, out); err != nil {
glog.V(logger.Warn).Infof("Stats login failed: %v", err) log.Warn(fmt.Sprintf("Stats login failed: %v", err))
conn.Close() conn.Close()
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
continue continue
@ -148,12 +147,12 @@ func (s *Service) loop() {
// Send the initial stats so our node looks decent from the get go // Send the initial stats so our node looks decent from the get go
if err = s.report(out); err != nil { if err = s.report(out); err != nil {
glog.V(logger.Warn).Infof("Initial stats report failed: %v", err) log.Warn(fmt.Sprintf("Initial stats report failed: %v", err))
conn.Close() conn.Close()
continue continue
} }
if err = s.reportHistory(out, nil); err != nil { if err = s.reportHistory(out, nil); err != nil {
glog.V(logger.Warn).Infof("History report failed: %v", err) log.Warn(fmt.Sprintf("History report failed: %v", err))
conn.Close() conn.Close()
continue continue
} }
@ -164,11 +163,11 @@ func (s *Service) loop() {
select { select {
case <-fullReport.C: case <-fullReport.C:
if err = s.report(out); err != nil { if err = s.report(out); err != nil {
glog.V(logger.Warn).Infof("Full stats report failed: %v", err) log.Warn(fmt.Sprintf("Full stats report failed: %v", err))
} }
case list := <-s.histCh: case list := <-s.histCh:
if err = s.reportHistory(out, list); err != nil { if err = s.reportHistory(out, list); err != nil {
glog.V(logger.Warn).Infof("Block history report failed: %v", err) log.Warn(fmt.Sprintf("Block history report failed: %v", err))
} }
case head, ok := <-headSub.Chan(): case head, ok := <-headSub.Chan():
if !ok { // node stopped if !ok { // node stopped
@ -176,10 +175,10 @@ func (s *Service) loop() {
return return
} }
if err = s.reportBlock(out, head.Data.(core.ChainHeadEvent).Block); err != nil { if err = s.reportBlock(out, head.Data.(core.ChainHeadEvent).Block); err != nil {
glog.V(logger.Warn).Infof("Block stats report failed: %v", err) log.Warn(fmt.Sprintf("Block stats report failed: %v", err))
} }
if err = s.reportPending(out); err != nil { if err = s.reportPending(out); err != nil {
glog.V(logger.Warn).Infof("Post-block transaction stats report failed: %v", err) log.Warn(fmt.Sprintf("Post-block transaction stats report failed: %v", err))
} }
case _, ok := <-txSub.Chan(): case _, ok := <-txSub.Chan():
if !ok { // node stopped if !ok { // node stopped
@ -195,7 +194,7 @@ func (s *Service) loop() {
} }
} }
if err = s.reportPending(out); err != nil { if err = s.reportPending(out); err != nil {
glog.V(logger.Warn).Infof("Transaction stats report failed: %v", err) log.Warn(fmt.Sprintf("Transaction stats report failed: %v", err))
} }
} }
} }
@ -216,16 +215,16 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
// Retrieve the next generic network packet and bail out on error // Retrieve the next generic network packet and bail out on error
var msg map[string][]interface{} var msg map[string][]interface{}
if err := in.Decode(&msg); err != nil { if err := in.Decode(&msg); err != nil {
glog.V(logger.Warn).Infof("Failed to decode stats server message: %v", err) log.Warn(fmt.Sprintf("Failed to decode stats server message: %v", err))
return return
} }
if len(msg["emit"]) == 0 { if len(msg["emit"]) == 0 {
glog.V(logger.Warn).Infof("Stats server sent non-broadcast: %v", msg) log.Warn(fmt.Sprintf("Stats server sent non-broadcast: %v", msg))
return return
} }
command, ok := msg["emit"][0].(string) command, ok := msg["emit"][0].(string)
if !ok { if !ok {
glog.V(logger.Warn).Infof("Invalid stats server message type: %v", msg["emit"][0]) log.Warn(fmt.Sprintf("Invalid stats server message type: %v", msg["emit"][0]))
return return
} }
// If the message is a ping reply, deliver (someone must be listening!) // If the message is a ping reply, deliver (someone must be listening!)
@ -236,7 +235,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
continue continue
default: default:
// Ping routine dead, abort // Ping routine dead, abort
glog.V(logger.Warn).Infof("Stats server pinger seems to have died") log.Warn(fmt.Sprintf("Stats server pinger seems to have died"))
return return
} }
} }
@ -245,12 +244,12 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
// Make sure the request is valid and doesn't crash us // Make sure the request is valid and doesn't crash us
request, ok := msg["emit"][1].(map[string]interface{}) request, ok := msg["emit"][1].(map[string]interface{})
if !ok { if !ok {
glog.V(logger.Warn).Infof("Invalid history request: %v", msg["emit"][1]) log.Warn(fmt.Sprintf("Invalid history request: %v", msg["emit"][1]))
return return
} }
list, ok := request["list"].([]interface{}) list, ok := request["list"].([]interface{})
if !ok { if !ok {
glog.V(logger.Warn).Infof("Invalid history block list: %v", request["list"]) log.Warn(fmt.Sprintf("Invalid history block list: %v", request["list"]))
return return
} }
// Convert the block number list to an integer list // Convert the block number list to an integer list
@ -258,7 +257,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
for i, num := range list { for i, num := range list {
n, ok := num.(float64) n, ok := num.(float64)
if !ok { if !ok {
glog.V(logger.Warn).Infof("Invalid history block number: %v", num) log.Warn(fmt.Sprintf("Invalid history block number: %v", num))
return return
} }
numbers[i] = uint64(n) numbers[i] = uint64(n)
@ -270,7 +269,7 @@ func (s *Service) readLoop(conn *websocket.Conn, in *json.Decoder) {
} }
} }
// Report anything else and continue // Report anything else and continue
glog.V(logger.Info).Infof("Unknown stats message: %v", msg) log.Info(fmt.Sprintf("Unknown stats message: %v", msg))
} }
} }

@ -22,6 +22,7 @@ package debug
import ( import (
"errors" "errors"
"fmt"
"io" "io"
"os" "os"
"os/user" "os/user"
@ -33,8 +34,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
// Handler is the global debugging handler. // Handler is the global debugging handler.
@ -51,23 +51,22 @@ type HandlerT struct {
traceFile string traceFile string
} }
// Verbosity sets the glog verbosity ceiling. // Verbosity sets the log verbosity ceiling. The verbosity of individual packages
// The verbosity of individual packages and source files // and source files can be raised using Vmodule.
// can be raised using Vmodule.
func (*HandlerT) Verbosity(level int) { func (*HandlerT) Verbosity(level int) {
glog.SetV(level) glogger.Verbosity(log.Lvl(level))
} }
// Vmodule sets the glog verbosity pattern. See package // Vmodule sets the log verbosity pattern. See package log for details on the
// glog for details on pattern syntax. // pattern syntax.
func (*HandlerT) Vmodule(pattern string) error { func (*HandlerT) Vmodule(pattern string) error {
return glog.GetVModule().Set(pattern) return glogger.Vmodule(pattern)
} }
// BacktraceAt sets the glog backtrace location. // BacktraceAt sets the log backtrace location. See package log for details on
// See package glog for details on pattern syntax. // the pattern syntax.
func (*HandlerT) BacktraceAt(location string) error { func (*HandlerT) BacktraceAt(location string) error {
return glog.GetTraceLocation().Set(location) return glogger.BacktraceAt(location)
} }
// MemStats returns detailed runtime memory statistics. // MemStats returns detailed runtime memory statistics.
@ -112,7 +111,7 @@ func (h *HandlerT) StartCPUProfile(file string) error {
} }
h.cpuW = f h.cpuW = f
h.cpuFile = file h.cpuFile = file
glog.V(logger.Info).Infoln("CPU profiling started, writing to", h.cpuFile) log.Info(fmt.Sprint("CPU profiling started, writing to", h.cpuFile))
return nil return nil
} }
@ -124,7 +123,7 @@ func (h *HandlerT) StopCPUProfile() error {
if h.cpuW == nil { if h.cpuW == nil {
return errors.New("CPU profiling not in progress") return errors.New("CPU profiling not in progress")
} }
glog.V(logger.Info).Infoln("done writing CPU profile to", h.cpuFile) log.Info(fmt.Sprint("done writing CPU profile to", h.cpuFile))
h.cpuW.Close() h.cpuW.Close()
h.cpuW = nil h.cpuW = nil
h.cpuFile = "" h.cpuFile = ""
@ -180,7 +179,7 @@ func (*HandlerT) Stacks() string {
func writeProfile(name, file string) error { func writeProfile(name, file string) error {
p := pprof.Lookup(name) p := pprof.Lookup(name)
glog.V(logger.Info).Infof("writing %d %s profile records to %s", p.Count(), name, file) log.Info(fmt.Sprintf("writing %d %s profile records to %s", p.Count(), name, file))
f, err := os.Create(expandHome(file)) f, err := os.Create(expandHome(file))
if err != nil { if err != nil {
return err return err

@ -20,28 +20,28 @@ import (
"fmt" "fmt"
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
"os"
"runtime" "runtime"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
var ( var (
verbosityFlag = cli.GenericFlag{ verbosityFlag = cli.IntFlag{
Name: "verbosity", Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=core, 5=debug, 6=detail", Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=core, 5=debug, 6=detail",
Value: glog.GetVerbosity(), Value: 3,
} }
vmoduleFlag = cli.GenericFlag{ vmoduleFlag = cli.StringFlag{
Name: "vmodule", Name: "vmodule",
Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=6,p2p=5)", Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=6,p2p=5)",
Value: glog.GetVModule(), Value: "",
} }
backtraceAtFlag = cli.GenericFlag{ backtraceAtFlag = cli.StringFlag{
Name: "backtrace", Name: "backtrace",
Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")", Usage: "Request a stack trace at a specific logging statement (e.g. \"block.go:271\")",
Value: glog.GetTraceLocation(), Value: "",
} }
pprofFlag = cli.BoolFlag{ pprofFlag = cli.BoolFlag{
Name: "pprof", Name: "pprof",
@ -83,12 +83,16 @@ var Flags = []cli.Flag{
memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag, memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag,
} }
var glogger = log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat()))
// Setup initializes profiling and logging based on the CLI flags. // Setup initializes profiling and logging based on the CLI flags.
// It should be called as early as possible in the program. // It should be called as early as possible in the program.
func Setup(ctx *cli.Context) error { func Setup(ctx *cli.Context) error {
// logging // logging
glog.CopyStandardLogTo("INFO") glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name)))
glog.SetToStderr(true) glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name))
glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name))
log.Root().SetHandler(glogger)
// profiling, tracing // profiling, tracing
runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name)
@ -108,8 +112,8 @@ func Setup(ctx *cli.Context) error {
if ctx.GlobalBool(pprofFlag.Name) { if ctx.GlobalBool(pprofFlag.Name) {
address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name)) address := fmt.Sprintf("%s:%d", ctx.GlobalString(pprofAddrFlag.Name), ctx.GlobalInt(pprofPortFlag.Name))
go func() { go func() {
glog.V(logger.Info).Infof("starting pprof server at http://%s/debug/pprof", address) log.Info(fmt.Sprintf("starting pprof server at http://%s/debug/pprof", address))
glog.Errorln(http.ListenAndServe(address, nil)) log.Error(fmt.Sprint(http.ListenAndServe(address, nil)))
}() }()
} }
return nil return nil

@ -20,11 +20,11 @@ package debug
import ( import (
"errors" "errors"
"fmt"
"os" "os"
"runtime/trace" "runtime/trace"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
// StartGoTrace turns on tracing, writing to the given file. // StartGoTrace turns on tracing, writing to the given file.
@ -44,7 +44,7 @@ func (h *HandlerT) StartGoTrace(file string) error {
} }
h.traceW = f h.traceW = f
h.traceFile = file h.traceFile = file
glog.V(logger.Info).Infoln("trace started, writing to", h.traceFile) log.Info(fmt.Sprint("trace started, writing to", h.traceFile))
return nil return nil
} }
@ -56,7 +56,7 @@ func (h *HandlerT) StopGoTrace() error {
if h.traceW == nil { if h.traceW == nil {
return errors.New("trace not in progress") return errors.New("trace not in progress")
} }
glog.V(logger.Info).Infoln("done writing trace to", h.traceFile) log.Info(fmt.Sprint("done writing trace to", h.traceFile))
h.traceW.Close() h.traceW.Close()
h.traceW = nil h.traceW = nil
h.traceFile = "" h.traceFile = ""

@ -36,8 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
@ -475,7 +474,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context,
if block != nil { if block != nil {
uncles := block.Uncles() uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) { if index >= hexutil.Uint(len(uncles)) {
glog.V(logger.Debug).Infof("uncle block on index %d not found for block #%d", index, blockNr) log.Debug(fmt.Sprintf("uncle block on index %d not found for block #%d", index, blockNr))
return nil, nil return nil, nil
} }
block = types.NewBlockWithHeader(uncles[index]) block = types.NewBlockWithHeader(uncles[index])
@ -491,7 +490,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, b
if block != nil { if block != nil {
uncles := block.Uncles() uncles := block.Uncles()
if index >= hexutil.Uint(len(uncles)) { if index >= hexutil.Uint(len(uncles)) {
glog.V(logger.Debug).Infof("uncle block on index %d not found for block %s", index, blockHash.Hex()) log.Debug(fmt.Sprintf("uncle block on index %d not found for block %s", index, blockHash.Hex()))
return nil, nil return nil, nil
} }
block = types.NewBlockWithHeader(uncles[index]) block = types.NewBlockWithHeader(uncles[index])
@ -577,7 +576,7 @@ type CallArgs struct {
} }
func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, error) { func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config) ([]byte, *big.Int, error) {
defer func(start time.Time) { glog.V(logger.Debug).Infof("call took %v", time.Since(start)) }(time.Now()) defer func(start time.Time) { log.Debug(fmt.Sprintf("call took %v", time.Since(start))) }(time.Now())
state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr) state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
if state == nil || err != nil { if state == nil || err != nil {
@ -1003,7 +1002,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
var err error var err error
if tx, isPending, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil { if tx, isPending, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
glog.V(logger.Debug).Infof("%v\n", err) log.Debug(fmt.Sprintf("%v\n", err))
return nil, nil return nil, nil
} else if tx == nil { } else if tx == nil {
return nil, nil return nil, nil
@ -1015,7 +1014,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, txH
blockHash, _, _, err := getTransactionBlockData(s.b.ChainDb(), txHash) blockHash, _, _, err := getTransactionBlockData(s.b.ChainDb(), txHash)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%v\n", err) log.Debug(fmt.Sprintf("%v\n", err))
return nil, nil return nil, nil
} }
@ -1032,7 +1031,7 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
var err error var err error
if tx, _, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil { if tx, _, err = getTransaction(s.b.ChainDb(), s.b, txHash); err != nil {
glog.V(logger.Debug).Infof("%v\n", err) log.Debug(fmt.Sprintf("%v\n", err))
return nil, nil return nil, nil
} else if tx == nil { } else if tx == nil {
return nil, nil return nil, nil
@ -1045,19 +1044,19 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context,
func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) { func (s *PublicTransactionPoolAPI) GetTransactionReceipt(txHash common.Hash) (map[string]interface{}, error) {
receipt := core.GetReceipt(s.b.ChainDb(), txHash) receipt := core.GetReceipt(s.b.ChainDb(), txHash)
if receipt == nil { if receipt == nil {
glog.V(logger.Debug).Infof("receipt not found for transaction %s", txHash.Hex()) log.Debug(fmt.Sprintf("receipt not found for transaction %s", txHash.Hex()))
return nil, nil return nil, nil
} }
tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash) tx, _, err := getTransaction(s.b.ChainDb(), s.b, txHash)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%v\n", err) log.Debug(fmt.Sprintf("%v\n", err))
return nil, nil return nil, nil
} }
txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash) txBlock, blockIndex, index, err := getTransactionBlockData(s.b.ChainDb(), txHash)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%v\n", err) log.Debug(fmt.Sprintf("%v\n", err))
return nil, nil return nil, nil
} }
@ -1160,9 +1159,9 @@ func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number()) signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number())
from, _ := types.Sender(signer, tx) from, _ := types.Sender(signer, tx)
addr := crypto.CreateAddress(from, tx.Nonce()) addr := crypto.CreateAddress(from, tx.Nonce())
glog.V(logger.Info).Infof("Tx(%s) created: %s\n", tx.Hash().Hex(), addr.Hex()) log.Info(fmt.Sprintf("Tx(%s) created: %s\n", tx.Hash().Hex(), addr.Hex()))
} else { } else {
glog.V(logger.Info).Infof("Tx(%s) to: %s\n", tx.Hash().Hex(), tx.To().Hex()) log.Info(fmt.Sprintf("Tx(%s) to: %s\n", tx.Hash().Hex(), tx.To().Hex()))
} }
return tx.Hash(), nil return tx.Hash(), nil
} }
@ -1214,9 +1213,9 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encod
return "", err return "", err
} }
addr := crypto.CreateAddress(from, tx.Nonce()) addr := crypto.CreateAddress(from, tx.Nonce())
glog.V(logger.Info).Infof("Tx(%x) created: %x\n", tx.Hash(), addr) log.Info(fmt.Sprintf("Tx(%x) created: %x\n", tx.Hash(), addr))
} else { } else {
glog.V(logger.Info).Infof("Tx(%x) to: %x\n", tx.Hash(), tx.To()) log.Info(fmt.Sprintf("Tx(%x) to: %x\n", tx.Hash(), tx.To()))
} }
return tx.Hash().Hex(), nil return tx.Hash().Hex(), nil
@ -1421,10 +1420,10 @@ func (api *PrivateDebugAPI) ChaindbCompact() error {
return fmt.Errorf("chaindbCompact does not work for memory databases") return fmt.Errorf("chaindbCompact does not work for memory databases")
} }
for b := byte(0); b < 255; b++ { for b := byte(0); b < 255; b++ {
glog.V(logger.Info).Infof("compacting chain DB range 0x%0.2X-0x%0.2X", b, b+1) log.Info(fmt.Sprintf("compacting chain DB range 0x%0.2X-0x%0.2X", b, b+1))
err := ldb.LDB().CompactRange(util.Range{Start: []byte{b}, Limit: []byte{b + 1}}) err := ldb.LDB().CompactRange(util.Range{Start: []byte{b}, Limit: []byte{b + 1}})
if err != nil { if err != nil {
glog.Errorf("compaction error: %v", err) log.Error(fmt.Sprintf("compaction error: %v", err))
return err return err
} }
} }

@ -36,8 +36,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
@ -188,7 +187,7 @@ func (s *LightEthereum) Protocols() []p2p.Protocol {
// Start implements node.Service, starting all internal goroutines needed by the // Start implements node.Service, starting all internal goroutines needed by the
// Ethereum protocol implementation. // Ethereum protocol implementation.
func (s *LightEthereum) Start(srvr *p2p.Server) error { func (s *LightEthereum) Start(srvr *p2p.Server) error {
glog.V(logger.Info).Infof("WARNING: light client mode is an experimental feature") log.Info(fmt.Sprintf("WARNING: light client mode is an experimental feature"))
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId) s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.netVersionId)
s.protocolManager.Start(srvr) s.protocolManager.Start(srvr)
return nil return nil

@ -18,6 +18,7 @@
package les package les
import ( import (
"fmt"
"math/big" "math/big"
"sync" "sync"
"time" "time"
@ -27,8 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
const ( const (
@ -174,7 +174,7 @@ func (f *lightFetcher) syncLoop() {
f.reqMu.Unlock() f.reqMu.Unlock()
if ok { if ok {
f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true) f.pm.serverPool.adjustResponseTime(req.peer.poolEntry, time.Duration(mclock.Now()-req.sent), true)
glog.V(logger.Debug).Infof("hard timeout by peer %v", req.peer.id) log.Debug(fmt.Sprintf("hard timeout by peer %v", req.peer.id))
go f.pm.removePeer(req.peer.id) go f.pm.removePeer(req.peer.id)
} }
case resp := <-f.deliverChn: case resp := <-f.deliverChn:
@ -192,13 +192,13 @@ func (f *lightFetcher) syncLoop() {
} }
f.lock.Lock() f.lock.Lock()
if !ok || !(f.syncing || f.processResponse(req, resp)) { if !ok || !(f.syncing || f.processResponse(req, resp)) {
glog.V(logger.Debug).Infof("failed processing response by peer %v", resp.peer.id) log.Debug(fmt.Sprintf("failed processing response by peer %v", resp.peer.id))
go f.pm.removePeer(resp.peer.id) go f.pm.removePeer(resp.peer.id)
} }
f.lock.Unlock() f.lock.Unlock()
case p := <-f.syncDone: case p := <-f.syncDone:
f.lock.Lock() f.lock.Lock()
glog.V(logger.Debug).Infof("done synchronising with peer %v", p.id) log.Debug(fmt.Sprintf("done synchronising with peer %v", p.id))
f.checkSyncedHeaders(p) f.checkSyncedHeaders(p)
f.syncing = false f.syncing = false
f.lock.Unlock() f.lock.Unlock()
@ -239,17 +239,17 @@ func (f *lightFetcher) removePeer(p *peer) {
func (f *lightFetcher) announce(p *peer, head *announceData) { func (f *lightFetcher) announce(p *peer, head *announceData) {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
glog.V(logger.Debug).Infof("received announce from peer %v #%d %016x reorg: %d", p.id, head.Number, head.Hash[:8], head.ReorgDepth) log.Debug(fmt.Sprintf("received announce from peer %v #%d %016x reorg: %d", p.id, head.Number, head.Hash[:8], head.ReorgDepth))
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
glog.V(logger.Debug).Infof("announce: unknown peer") log.Debug(fmt.Sprintf("announce: unknown peer"))
return return
} }
if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 { if fp.lastAnnounced != nil && head.Td.Cmp(fp.lastAnnounced.td) <= 0 {
// announced tds should be strictly monotonic // announced tds should be strictly monotonic
glog.V(logger.Debug).Infof("non-monotonic Td from peer %v", p.id) log.Debug(fmt.Sprintf("non-monotonic Td from peer %v", p.id))
go f.pm.removePeer(p.id) go f.pm.removePeer(p.id)
return return
} }
@ -355,14 +355,14 @@ func (f *lightFetcher) peerHasBlock(p *peer, hash common.Hash, number uint64) bo
func (f *lightFetcher) request(p *peer, reqID uint64, n *fetcherTreeNode, amount uint64) (uint64, bool) { func (f *lightFetcher) request(p *peer, reqID uint64, n *fetcherTreeNode, amount uint64) (uint64, bool) {
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
glog.V(logger.Debug).Infof("request: unknown peer") log.Debug(fmt.Sprintf("request: unknown peer"))
p.fcServer.DeassignRequest(reqID) p.fcServer.DeassignRequest(reqID)
return 0, false return 0, false
} }
if fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) { if fp.bestConfirmed == nil || fp.root == nil || !f.checkKnownNode(p, fp.root) {
f.syncing = true f.syncing = true
go func() { go func() {
glog.V(logger.Debug).Infof("synchronising with peer %v", p.id) log.Debug(fmt.Sprintf("synchronising with peer %v", p.id))
f.pm.synchronise(p) f.pm.synchronise(p)
f.syncDone <- p f.syncDone <- p
}() }()
@ -457,7 +457,7 @@ func (f *lightFetcher) deliverHeaders(peer *peer, reqID uint64, headers []*types
// processResponse processes header download request responses, returns true if successful // processResponse processes header download request responses, returns true if successful
func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool { func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) bool {
if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash { if uint64(len(resp.headers)) != req.amount || resp.headers[0].Hash() != req.hash {
glog.V(logger.Debug).Infof("response mismatch %v %016x != %v %016x", len(resp.headers), resp.headers[0].Hash().Bytes()[:8], req.amount, req.hash[:8]) log.Debug(fmt.Sprintf("response mismatch %v %016x != %v %016x", len(resp.headers), resp.headers[0].Hash().Bytes()[:8], req.amount, req.hash[:8]))
return false return false
} }
headers := make([]*types.Header, req.amount) headers := make([]*types.Header, req.amount)
@ -468,14 +468,14 @@ func (f *lightFetcher) processResponse(req fetchRequest, resp fetchResponse) boo
if err == core.BlockFutureErr { if err == core.BlockFutureErr {
return true return true
} }
glog.V(logger.Debug).Infof("InsertHeaderChain error: %v", err) log.Debug(fmt.Sprintf("InsertHeaderChain error: %v", err))
return false return false
} }
tds := make([]*big.Int, len(headers)) tds := make([]*big.Int, len(headers))
for i, header := range headers { for i, header := range headers {
td := f.chain.GetTd(header.Hash(), header.Number.Uint64()) td := f.chain.GetTd(header.Hash(), header.Number.Uint64())
if td == nil { if td == nil {
glog.V(logger.Debug).Infof("TD not found for header %v of %v", i+1, len(headers)) log.Debug(fmt.Sprintf("TD not found for header %v of %v", i+1, len(headers)))
return false return false
} }
tds[i] = td tds[i] = td
@ -490,7 +490,7 @@ func (f *lightFetcher) newHeaders(headers []*types.Header, tds []*big.Int) {
var maxTd *big.Int var maxTd *big.Int
for p, fp := range f.peers { for p, fp := range f.peers {
if !f.checkAnnouncedHeaders(fp, headers, tds) { if !f.checkAnnouncedHeaders(fp, headers, tds) {
glog.V(logger.Debug).Infof("announce inconsistency by peer %v", p.id) log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
go f.pm.removePeer(p.id) go f.pm.removePeer(p.id)
} }
if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) { if fp.confirmedTd != nil && (maxTd == nil || maxTd.Cmp(fp.confirmedTd) > 0) {
@ -576,7 +576,7 @@ func (f *lightFetcher) checkAnnouncedHeaders(fp *fetcherPeerInfo, headers []*typ
func (f *lightFetcher) checkSyncedHeaders(p *peer) { func (f *lightFetcher) checkSyncedHeaders(p *peer) {
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
glog.V(logger.Debug).Infof("checkSyncedHeaders: unknown peer") log.Debug(fmt.Sprintf("checkSyncedHeaders: unknown peer"))
return return
} }
n := fp.lastAnnounced n := fp.lastAnnounced
@ -589,7 +589,7 @@ func (f *lightFetcher) checkSyncedHeaders(p *peer) {
} }
// now n is the latest downloaded header after syncing // now n is the latest downloaded header after syncing
if n == nil { if n == nil {
glog.V(logger.Debug).Infof("synchronisation failed with peer %v", p.id) log.Debug(fmt.Sprintf("synchronisation failed with peer %v", p.id))
go f.pm.removePeer(p.id) go f.pm.removePeer(p.id)
} else { } else {
header := f.chain.GetHeader(n.hash, n.number) header := f.chain.GetHeader(n.hash, n.number)
@ -610,12 +610,12 @@ func (f *lightFetcher) checkKnownNode(p *peer, n *fetcherTreeNode) bool {
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
glog.V(logger.Debug).Infof("checkKnownNode: unknown peer") log.Debug(fmt.Sprintf("checkKnownNode: unknown peer"))
return false return false
} }
header := f.chain.GetHeader(n.hash, n.number) header := f.chain.GetHeader(n.hash, n.number)
if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) { if !f.checkAnnouncedHeaders(fp, []*types.Header{header}, []*big.Int{td}) {
glog.V(logger.Debug).Infof("announce inconsistency by peer %v", p.id) log.Debug(fmt.Sprintf("announce inconsistency by peer %v", p.id))
go f.pm.removePeer(p.id) go f.pm.removePeer(p.id)
} }
if fp.confirmedTd != nil { if fp.confirmedTd != nil {
@ -700,7 +700,7 @@ func (f *lightFetcher) checkUpdateStats(p *peer, newEntry *updateStatsEntry) {
now := mclock.Now() now := mclock.Now()
fp := f.peers[p] fp := f.peers[p]
if fp == nil { if fp == nil {
glog.V(logger.Debug).Infof("checkUpdateStats: unknown peer") log.Debug(fmt.Sprintf("checkUpdateStats: unknown peer"))
return return
} }
if newEntry != nil && fp.firstUpdateStats == nil { if newEntry != nil && fp.firstUpdateStats == nil {

@ -34,8 +34,7 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
@ -199,7 +198,7 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, network
} }
if lightSync { if lightSync {
glog.V(logger.Debug).Infof("LES: create downloader") log.Debug(fmt.Sprintf("LES: create downloader"))
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash, manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, blockchain.HasHeader, nil, blockchain.GetHeaderByHash,
nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash, nil, blockchain.CurrentHeader, nil, nil, nil, blockchain.GetTdByHash,
blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer) blockchain.InsertHeaderChain, nil, nil, blockchain.Rollback, removePeer)
@ -230,12 +229,12 @@ func (pm *ProtocolManager) removePeer(id string) {
if err == errNotRegistered { if err == errNotRegistered {
return return
} }
glog.V(logger.Error).Infoln("Removal failed:", err) log.Error(fmt.Sprint("Removal failed:", err))
} }
glog.V(logger.Debug).Infoln("Removing peer", id) log.Debug(fmt.Sprint("Removing peer", id))
// Unregister the peer from the downloader and Ethereum peer set // Unregister the peer from the downloader and Ethereum peer set
glog.V(logger.Debug).Infof("LES: unregister peer %v", id) log.Debug(fmt.Sprintf("LES: unregister peer %v", id))
if pm.lightSync { if pm.lightSync {
pm.downloader.UnregisterPeer(id) pm.downloader.UnregisterPeer(id)
if pm.txrelay != nil { if pm.txrelay != nil {
@ -268,9 +267,9 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
} else { } else {
if topicDisc != nil { if topicDisc != nil {
go func() { go func() {
glog.V(logger.Info).Infoln("Starting registering topic", string(lesTopic)) log.Info(fmt.Sprint("Starting registering topic", string(lesTopic)))
topicDisc.RegisterTopic(lesTopic, pm.quitSync) topicDisc.RegisterTopic(lesTopic, pm.quitSync)
glog.V(logger.Info).Infoln("Stopped registering topic", string(lesTopic)) log.Info(fmt.Sprint("Stopped registering topic", string(lesTopic)))
}() }()
} }
go func() { go func() {
@ -283,7 +282,7 @@ func (pm *ProtocolManager) Start(srvr *p2p.Server) {
func (pm *ProtocolManager) Stop() { func (pm *ProtocolManager) Stop() {
// Showing a log message. During download / process this could actually // Showing a log message. During download / process this could actually
// take between 5 to 10 seconds and therefor feedback is required. // take between 5 to 10 seconds and therefor feedback is required.
glog.V(logger.Info).Infoln("Stopping light ethereum protocol handler...") log.Info(fmt.Sprint("Stopping light ethereum protocol handler..."))
// Quit the sync loop. // Quit the sync loop.
// After this send has completed, no new peers will be accepted. // After this send has completed, no new peers will be accepted.
@ -300,7 +299,7 @@ func (pm *ProtocolManager) Stop() {
// Wait for any process action // Wait for any process action
pm.wg.Wait() pm.wg.Wait()
glog.V(logger.Info).Infoln("Light ethereum protocol handler stopped") log.Info(fmt.Sprint("Light ethereum protocol handler stopped"))
} }
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
@ -310,22 +309,22 @@ func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter
// handle is the callback invoked to manage the life cycle of a les peer. When // handle is the callback invoked to manage the life cycle of a les peer. When
// this function terminates, the peer is disconnected. // this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error { func (pm *ProtocolManager) handle(p *peer) error {
glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name()) log.Debug(fmt.Sprintf("%v: peer connected [%s]", p, p.Name()))
// Execute the LES handshake // Execute the LES handshake
td, head, genesis := pm.blockchain.Status() td, head, genesis := pm.blockchain.Status()
headNum := core.GetBlockNumber(pm.chainDb, head) headNum := core.GetBlockNumber(pm.chainDb, head)
if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil { if err := p.Handshake(td, head, headNum, genesis, pm.server); err != nil {
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err) log.Debug(fmt.Sprintf("%v: handshake failed: %v", p, err))
return err return err
} }
if rw, ok := p.rw.(*meteredMsgReadWriter); ok { if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version) rw.Init(p.version)
} }
// Register the peer locally // Register the peer locally
glog.V(logger.Detail).Infof("%v: adding peer", p) log.Trace(fmt.Sprintf("%v: adding peer", p))
if err := pm.peers.Register(p); err != nil { if err := pm.peers.Register(p); err != nil {
glog.V(logger.Error).Infof("%v: addition failed: %v", p, err) log.Error(fmt.Sprintf("%v: addition failed: %v", p, err))
return err return err
} }
defer func() { defer func() {
@ -336,7 +335,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
}() }()
// Register the peer in the downloader. If the downloader considers it banned, we disconnect // Register the peer in the downloader. If the downloader considers it banned, we disconnect
glog.V(logger.Debug).Infof("LES: register peer %v", p.id) log.Debug(fmt.Sprintf("LES: register peer %v", p.id))
if pm.lightSync { if pm.lightSync {
requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error { requestHeadersByHash := func(origin common.Hash, amount int, skip int, reverse bool) error {
reqID := getNextReqID() reqID := getNextReqID()
@ -390,7 +389,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
// main loop. handle incoming messages. // main loop. handle incoming messages.
for { for {
if err := pm.handleMsg(p); err != nil { if err := pm.handleMsg(p); err != nil {
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err) log.Debug(fmt.Sprintf("%v: message handling failed: %v", p, err))
return err return err
} }
} }
@ -407,7 +406,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return err return err
} }
glog.V(logger.Debug).Infoln("msg:", msg.Code, msg.Size) log.Debug(fmt.Sprint("msg:", msg.Code, msg.Size))
costs := p.fcCosts[msg.Code] costs := p.fcCosts[msg.Code]
reject := func(reqCnt, maxCnt uint64) bool { reject := func(reqCnt, maxCnt uint64) bool {
@ -420,7 +419,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
cost = pm.server.defParams.BufLimit cost = pm.server.defParams.BufLimit
} }
if cost > bufValue { if cost > bufValue {
glog.V(logger.Error).Infof("Request from %v came %v too early", p.id, time.Duration((cost-bufValue)*1000000/pm.server.defParams.MinRecharge)) log.Error(fmt.Sprintf("Request from %v came %v too early", p.id, time.Duration((cost-bufValue)*1000000/pm.server.defParams.MinRecharge)))
return true return true
} }
return false return false
@ -436,25 +435,25 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Handle the message depending on its contents // Handle the message depending on its contents
switch msg.Code { switch msg.Code {
case StatusMsg: case StatusMsg:
glog.V(logger.Debug).Infof("<=== StatusMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== StatusMsg from peer %v", p.id))
// Status messages should never arrive after the handshake // Status messages should never arrive after the handshake
return errResp(ErrExtraStatusMsg, "uncontrolled status message") return errResp(ErrExtraStatusMsg, "uncontrolled status message")
// Block header query, collect the requested headers and reply // Block header query, collect the requested headers and reply
case AnnounceMsg: case AnnounceMsg:
glog.V(logger.Debug).Infoln("<=== AnnounceMsg from peer %v:", p.id) log.Debug(fmt.Sprint("<=== AnnounceMsg from peer %v:", p.id))
var req announceData var req announceData
if err := msg.Decode(&req); err != nil { if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err) return errResp(ErrDecode, "%v: %v", msg, err)
} }
glog.V(logger.Detail).Infoln("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth) log.Trace(fmt.Sprint("AnnounceMsg:", req.Number, req.Hash, req.Td, req.ReorgDepth))
if pm.fetcher != nil { if pm.fetcher != nil {
pm.fetcher.announce(p, &req) pm.fetcher.announce(p, &req)
} }
case GetBlockHeadersMsg: case GetBlockHeadersMsg:
glog.V(logger.Debug).Infof("<=== GetBlockHeadersMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== GetBlockHeadersMsg from peer %v", p.id))
// Decode the complex header query // Decode the complex header query
var req struct { var req struct {
ReqID uint64 ReqID uint64
@ -539,7 +538,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrUnexpectedResponse, "") return errResp(ErrUnexpectedResponse, "")
} }
glog.V(logger.Debug).Infof("<=== BlockHeadersMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== BlockHeadersMsg from peer %v", p.id))
// A batch of headers arrived to one of our previous requests // A batch of headers arrived to one of our previous requests
var resp struct { var resp struct {
ReqID, BV uint64 ReqID, BV uint64
@ -554,12 +553,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} else { } else {
err := pm.downloader.DeliverHeaders(p.id, resp.Headers) err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
if err != nil { if err != nil {
glog.V(logger.Debug).Infoln(err) log.Debug(fmt.Sprint(err))
} }
} }
case GetBlockBodiesMsg: case GetBlockBodiesMsg:
glog.V(logger.Debug).Infof("<=== GetBlockBodiesMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== GetBlockBodiesMsg from peer %v", p.id))
// Decode the retrieval message // Decode the retrieval message
var req struct { var req struct {
ReqID uint64 ReqID uint64
@ -596,7 +595,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrUnexpectedResponse, "") return errResp(ErrUnexpectedResponse, "")
} }
glog.V(logger.Debug).Infof("<=== BlockBodiesMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== BlockBodiesMsg from peer %v", p.id))
// A batch of block bodies arrived to one of our previous requests // A batch of block bodies arrived to one of our previous requests
var resp struct { var resp struct {
ReqID, BV uint64 ReqID, BV uint64
@ -613,7 +612,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
case GetCodeMsg: case GetCodeMsg:
glog.V(logger.Debug).Infof("<=== GetCodeMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== GetCodeMsg from peer %v", p.id))
// Decode the retrieval message // Decode the retrieval message
var req struct { var req struct {
ReqID uint64 ReqID uint64
@ -657,7 +656,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrUnexpectedResponse, "") return errResp(ErrUnexpectedResponse, "")
} }
glog.V(logger.Debug).Infof("<=== CodeMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== CodeMsg from peer %v", p.id))
// A batch of node state data arrived to one of our previous requests // A batch of node state data arrived to one of our previous requests
var resp struct { var resp struct {
ReqID, BV uint64 ReqID, BV uint64
@ -674,7 +673,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
case GetReceiptsMsg: case GetReceiptsMsg:
glog.V(logger.Debug).Infof("<=== GetReceiptsMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== GetReceiptsMsg from peer %v", p.id))
// Decode the retrieval message // Decode the retrieval message
var req struct { var req struct {
ReqID uint64 ReqID uint64
@ -705,7 +704,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
// If known, encode and queue for response packet // If known, encode and queue for response packet
if encoded, err := rlp.EncodeToBytes(results); err != nil { if encoded, err := rlp.EncodeToBytes(results); err != nil {
glog.V(logger.Error).Infof("failed to encode receipt: %v", err) log.Error(fmt.Sprintf("failed to encode receipt: %v", err))
} else { } else {
receipts = append(receipts, encoded) receipts = append(receipts, encoded)
bytes += len(encoded) bytes += len(encoded)
@ -720,7 +719,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrUnexpectedResponse, "") return errResp(ErrUnexpectedResponse, "")
} }
glog.V(logger.Debug).Infof("<=== ReceiptsMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== ReceiptsMsg from peer %v", p.id))
// A batch of receipts arrived to one of our previous requests // A batch of receipts arrived to one of our previous requests
var resp struct { var resp struct {
ReqID, BV uint64 ReqID, BV uint64
@ -737,7 +736,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
case GetProofsMsg: case GetProofsMsg:
glog.V(logger.Debug).Infof("<=== GetProofsMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== GetProofsMsg from peer %v", p.id))
// Decode the retrieval message // Decode the retrieval message
var req struct { var req struct {
ReqID uint64 ReqID uint64
@ -787,7 +786,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrUnexpectedResponse, "") return errResp(ErrUnexpectedResponse, "")
} }
glog.V(logger.Debug).Infof("<=== ProofsMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== ProofsMsg from peer %v", p.id))
// A batch of merkle proofs arrived to one of our previous requests // A batch of merkle proofs arrived to one of our previous requests
var resp struct { var resp struct {
ReqID, BV uint64 ReqID, BV uint64
@ -804,7 +803,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
case GetHeaderProofsMsg: case GetHeaderProofsMsg:
glog.V(logger.Debug).Infof("<=== GetHeaderProofsMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== GetHeaderProofsMsg from peer %v", p.id))
// Decode the retrieval message // Decode the retrieval message
var req struct { var req struct {
ReqID uint64 ReqID uint64
@ -848,7 +847,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrUnexpectedResponse, "") return errResp(ErrUnexpectedResponse, "")
} }
glog.V(logger.Debug).Infof("<=== HeaderProofsMsg from peer %v", p.id) log.Debug(fmt.Sprintf("<=== HeaderProofsMsg from peer %v", p.id))
var resp struct { var resp struct {
ReqID, BV uint64 ReqID, BV uint64
Data []ChtResp Data []ChtResp
@ -885,7 +884,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost) pm.server.fcCostStats.update(msg.Code, uint64(reqCnt), rcost)
default: default:
glog.V(logger.Debug).Infof("<=== unknown message with code %d from peer %v", msg.Code, p.id) log.Debug(fmt.Sprintf("<=== unknown message with code %d from peer %v", msg.Code, p.id))
return errResp(ErrInvalidMsgCode, "%v", msg.Code) return errResp(ErrInvalidMsgCode, "%v", msg.Code)
} }

@ -19,14 +19,14 @@ package les
import ( import (
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"fmt"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -151,7 +151,7 @@ func (self *LesOdr) requestPeer(req *sentReq, peer *peer, delivered, timeout cha
select { select {
case <-delivered: case <-delivered:
case <-time.After(hardRequestTimeout): case <-time.After(hardRequestTimeout):
glog.V(logger.Debug).Infof("ODR hard request timeout from peer %v", peer.id) log.Debug(fmt.Sprintf("ODR hard request timeout from peer %v", peer.id))
go self.removePeer(peer.id) go self.removePeer(peer.id)
case <-self.stop: case <-self.stop:
return return
@ -237,7 +237,7 @@ func (self *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err err
// retrieved from network, store in db // retrieved from network, store in db
req.StoreResult(self.db) req.StoreResult(self.db)
} else { } else {
glog.V(logger.Debug).Infof("networkRequest err = %v", err) log.Debug(fmt.Sprintf("networkRequest err = %v", err))
} }
return return
} }

@ -21,6 +21,7 @@ package les
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -28,8 +29,7 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
@ -74,7 +74,7 @@ func (self *BlockRequest) CanSend(peer *peer) bool {
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *BlockRequest) Request(reqID uint64, peer *peer) error { func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id) log.Debug(fmt.Sprintf("ODR: requesting body of block %08x from peer %v", self.Hash[:4], peer.id))
return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash}) return peer.RequestBodies(reqID, self.GetCost(peer), []common.Hash{self.Hash})
} }
@ -82,39 +82,39 @@ func (self *BlockRequest) Request(reqID uint64, peer *peer) error {
// returns true and stores results in memory if the message was a valid reply // returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest) // to the request (implementation of LesOdrRequest)
func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool { func (self *BlockRequest) Valid(db ethdb.Database, msg *Msg) bool {
glog.V(logger.Debug).Infof("ODR: validating body of block %08x", self.Hash[:4]) log.Debug(fmt.Sprintf("ODR: validating body of block %08x", self.Hash[:4]))
if msg.MsgType != MsgBlockBodies { if msg.MsgType != MsgBlockBodies {
glog.V(logger.Debug).Infof("ODR: invalid message type") log.Debug(fmt.Sprintf("ODR: invalid message type"))
return false return false
} }
bodies := msg.Obj.([]*types.Body) bodies := msg.Obj.([]*types.Body)
if len(bodies) != 1 { if len(bodies) != 1 {
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(bodies)) log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(bodies)))
return false return false
} }
body := bodies[0] body := bodies[0]
header := core.GetHeader(db, self.Hash, self.Number) header := core.GetHeader(db, self.Hash, self.Number)
if header == nil { if header == nil {
glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4]) log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
return false return false
} }
txHash := types.DeriveSha(types.Transactions(body.Transactions)) txHash := types.DeriveSha(types.Transactions(body.Transactions))
if header.TxHash != txHash { if header.TxHash != txHash {
glog.V(logger.Debug).Infof("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4]) log.Debug(fmt.Sprintf("ODR: header.TxHash %08x does not match received txHash %08x", header.TxHash[:4], txHash[:4]))
return false return false
} }
uncleHash := types.CalcUncleHash(body.Uncles) uncleHash := types.CalcUncleHash(body.Uncles)
if header.UncleHash != uncleHash { if header.UncleHash != uncleHash {
glog.V(logger.Debug).Infof("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4]) log.Debug(fmt.Sprintf("ODR: header.UncleHash %08x does not match received uncleHash %08x", header.UncleHash[:4], uncleHash[:4]))
return false return false
} }
data, err := rlp.EncodeToBytes(body) data, err := rlp.EncodeToBytes(body)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("ODR: body RLP encode error: %v", err) log.Debug(fmt.Sprintf("ODR: body RLP encode error: %v", err))
return false return false
} }
self.Rlp = data self.Rlp = data
glog.V(logger.Debug).Infof("ODR: validation successful") log.Debug(fmt.Sprintf("ODR: validation successful"))
return true return true
} }
@ -134,7 +134,7 @@ func (self *ReceiptsRequest) CanSend(peer *peer) bool {
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error { func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id) log.Debug(fmt.Sprintf("ODR: requesting receipts for block %08x from peer %v", self.Hash[:4], peer.id))
return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash}) return peer.RequestReceipts(reqID, self.GetCost(peer), []common.Hash{self.Hash})
} }
@ -142,28 +142,28 @@ func (self *ReceiptsRequest) Request(reqID uint64, peer *peer) error {
// returns true and stores results in memory if the message was a valid reply // returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest) // to the request (implementation of LesOdrRequest)
func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool { func (self *ReceiptsRequest) Valid(db ethdb.Database, msg *Msg) bool {
glog.V(logger.Debug).Infof("ODR: validating receipts for block %08x", self.Hash[:4]) log.Debug(fmt.Sprintf("ODR: validating receipts for block %08x", self.Hash[:4]))
if msg.MsgType != MsgReceipts { if msg.MsgType != MsgReceipts {
glog.V(logger.Debug).Infof("ODR: invalid message type") log.Debug(fmt.Sprintf("ODR: invalid message type"))
return false return false
} }
receipts := msg.Obj.([]types.Receipts) receipts := msg.Obj.([]types.Receipts)
if len(receipts) != 1 { if len(receipts) != 1 {
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(receipts)) log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(receipts)))
return false return false
} }
hash := types.DeriveSha(receipts[0]) hash := types.DeriveSha(receipts[0])
header := core.GetHeader(db, self.Hash, self.Number) header := core.GetHeader(db, self.Hash, self.Number)
if header == nil { if header == nil {
glog.V(logger.Debug).Infof("ODR: header not found for block %08x", self.Hash[:4]) log.Debug(fmt.Sprintf("ODR: header not found for block %08x", self.Hash[:4]))
return false return false
} }
if !bytes.Equal(header.ReceiptHash[:], hash[:]) { if !bytes.Equal(header.ReceiptHash[:], hash[:]) {
glog.V(logger.Debug).Infof("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4]) log.Debug(fmt.Sprintf("ODR: header receipts hash %08x does not match calculated RLP hash %08x", header.ReceiptHash[:4], hash[:4]))
return false return false
} }
self.Receipts = receipts[0] self.Receipts = receipts[0]
glog.V(logger.Debug).Infof("ODR: validation successful") log.Debug(fmt.Sprintf("ODR: validation successful"))
return true return true
} }
@ -189,7 +189,7 @@ func (self *TrieRequest) CanSend(peer *peer) bool {
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *TrieRequest) Request(reqID uint64, peer *peer) error { func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id) log.Debug(fmt.Sprintf("ODR: requesting trie root %08x key %08x from peer %v", self.Id.Root[:4], self.Key[:4], peer.id))
req := &ProofReq{ req := &ProofReq{
BHash: self.Id.BlockHash, BHash: self.Id.BlockHash,
AccKey: self.Id.AccKey, AccKey: self.Id.AccKey,
@ -202,24 +202,24 @@ func (self *TrieRequest) Request(reqID uint64, peer *peer) error {
// returns true and stores results in memory if the message was a valid reply // returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest) // to the request (implementation of LesOdrRequest)
func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool { func (self *TrieRequest) Valid(db ethdb.Database, msg *Msg) bool {
glog.V(logger.Debug).Infof("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4]) log.Debug(fmt.Sprintf("ODR: validating trie root %08x key %08x", self.Id.Root[:4], self.Key[:4]))
if msg.MsgType != MsgProofs { if msg.MsgType != MsgProofs {
glog.V(logger.Debug).Infof("ODR: invalid message type") log.Debug(fmt.Sprintf("ODR: invalid message type"))
return false return false
} }
proofs := msg.Obj.([][]rlp.RawValue) proofs := msg.Obj.([][]rlp.RawValue)
if len(proofs) != 1 { if len(proofs) != 1 {
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs)) log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
return false return false
} }
_, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0]) _, err := trie.VerifyProof(self.Id.Root, self.Key, proofs[0])
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("ODR: merkle proof verification error: %v", err) log.Debug(fmt.Sprintf("ODR: merkle proof verification error: %v", err))
return false return false
} }
self.Proof = proofs[0] self.Proof = proofs[0]
glog.V(logger.Debug).Infof("ODR: validation successful") log.Debug(fmt.Sprintf("ODR: validation successful"))
return true return true
} }
@ -244,7 +244,7 @@ func (self *CodeRequest) CanSend(peer *peer) bool {
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *CodeRequest) Request(reqID uint64, peer *peer) error { func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id) log.Debug(fmt.Sprintf("ODR: requesting node data for hash %08x from peer %v", self.Hash[:4], peer.id))
req := &CodeReq{ req := &CodeReq{
BHash: self.Id.BlockHash, BHash: self.Id.BlockHash,
AccKey: self.Id.AccKey, AccKey: self.Id.AccKey,
@ -256,23 +256,23 @@ func (self *CodeRequest) Request(reqID uint64, peer *peer) error {
// returns true and stores results in memory if the message was a valid reply // returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest) // to the request (implementation of LesOdrRequest)
func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool { func (self *CodeRequest) Valid(db ethdb.Database, msg *Msg) bool {
glog.V(logger.Debug).Infof("ODR: validating node data for hash %08x", self.Hash[:4]) log.Debug(fmt.Sprintf("ODR: validating node data for hash %08x", self.Hash[:4]))
if msg.MsgType != MsgCode { if msg.MsgType != MsgCode {
glog.V(logger.Debug).Infof("ODR: invalid message type") log.Debug(fmt.Sprintf("ODR: invalid message type"))
return false return false
} }
reply := msg.Obj.([][]byte) reply := msg.Obj.([][]byte)
if len(reply) != 1 { if len(reply) != 1 {
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(reply)) log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(reply)))
return false return false
} }
data := reply[0] data := reply[0]
if hash := crypto.Keccak256Hash(data); self.Hash != hash { if hash := crypto.Keccak256Hash(data); self.Hash != hash {
glog.V(logger.Debug).Infof("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4]) log.Debug(fmt.Sprintf("ODR: requested hash %08x does not match received data hash %08x", self.Hash[:4], hash[:4]))
return false return false
} }
self.Data = data self.Data = data
glog.V(logger.Debug).Infof("ODR: validation successful") log.Debug(fmt.Sprintf("ODR: validation successful"))
return true return true
} }
@ -304,7 +304,7 @@ func (self *ChtRequest) CanSend(peer *peer) bool {
// Request sends an ODR request to the LES network (implementation of LesOdrRequest) // Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (self *ChtRequest) Request(reqID uint64, peer *peer) error { func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
glog.V(logger.Debug).Infof("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id) log.Debug(fmt.Sprintf("ODR: requesting CHT #%d block #%d from peer %v", self.ChtNum, self.BlockNum, peer.id))
req := &ChtReq{ req := &ChtReq{
ChtNum: self.ChtNum, ChtNum: self.ChtNum,
BlockNum: self.BlockNum, BlockNum: self.BlockNum,
@ -316,15 +316,15 @@ func (self *ChtRequest) Request(reqID uint64, peer *peer) error {
// returns true and stores results in memory if the message was a valid reply // returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest) // to the request (implementation of LesOdrRequest)
func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool { func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
glog.V(logger.Debug).Infof("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum) log.Debug(fmt.Sprintf("ODR: validating CHT #%d block #%d", self.ChtNum, self.BlockNum))
if msg.MsgType != MsgHeaderProofs { if msg.MsgType != MsgHeaderProofs {
glog.V(logger.Debug).Infof("ODR: invalid message type") log.Debug(fmt.Sprintf("ODR: invalid message type"))
return false return false
} }
proofs := msg.Obj.([]ChtResp) proofs := msg.Obj.([]ChtResp)
if len(proofs) != 1 { if len(proofs) != 1 {
glog.V(logger.Debug).Infof("ODR: invalid number of entries: %d", len(proofs)) log.Debug(fmt.Sprintf("ODR: invalid number of entries: %d", len(proofs)))
return false return false
} }
proof := proofs[0] proof := proofs[0]
@ -332,22 +332,22 @@ func (self *ChtRequest) Valid(db ethdb.Database, msg *Msg) bool {
binary.BigEndian.PutUint64(encNumber[:], self.BlockNum) binary.BigEndian.PutUint64(encNumber[:], self.BlockNum)
value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof) value, err := trie.VerifyProof(self.ChtRoot, encNumber[:], proof.Proof)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("ODR: CHT merkle proof verification error: %v", err) log.Debug(fmt.Sprintf("ODR: CHT merkle proof verification error: %v", err))
return false return false
} }
var node light.ChtNode var node light.ChtNode
if err := rlp.DecodeBytes(value, &node); err != nil { if err := rlp.DecodeBytes(value, &node); err != nil {
glog.V(logger.Debug).Infof("ODR: error decoding CHT node: %v", err) log.Debug(fmt.Sprintf("ODR: error decoding CHT node: %v", err))
return false return false
} }
if node.Hash != proof.Header.Hash() { if node.Hash != proof.Header.Hash() {
glog.V(logger.Debug).Infof("ODR: CHT header hash does not match") log.Debug(fmt.Sprintf("ODR: CHT header hash does not match"))
return false return false
} }
self.Proof = proof.Proof self.Proof = proof.Proof
self.Header = proof.Header self.Header = proof.Header
self.Td = node.Td self.Td = node.Td
glog.V(logger.Debug).Infof("ODR: validation successful") log.Debug(fmt.Sprintf("ODR: validation successful"))
return true return true
} }

@ -27,8 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/flowcontrol" "github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -196,51 +195,51 @@ func (p *peer) SendHeaderProofs(reqID, bv uint64, proofs []ChtResp) error {
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block. // specified header query, based on the hash of an origin block.
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error { func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
glog.V(logger.Debug).Infof("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse) log.Debug(fmt.Sprintf("%v fetching %d headers from %x, skipping %d (reverse = %v)", p, amount, origin[:4], skip, reverse))
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
} }
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block. // specified header query, based on the number of an origin block.
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error { func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
glog.V(logger.Debug).Infof("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse) log.Debug(fmt.Sprintf("%v fetching %d headers from #%d, skipping %d (reverse = %v)", p, amount, origin, skip, reverse))
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
} }
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified. // specified.
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error { func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %d block bodies", p, len(hashes)) log.Debug(fmt.Sprintf("%v fetching %d block bodies", p, len(hashes)))
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes) return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
} }
// RequestCode fetches a batch of arbitrary data from a node's known state // RequestCode fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes. // data, corresponding to the specified hashes.
func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error { func (p *peer) RequestCode(reqID, cost uint64, reqs []*CodeReq) error {
glog.V(logger.Debug).Infof("%v fetching %v state data", p, len(reqs)) log.Debug(fmt.Sprintf("%v fetching %v state data", p, len(reqs)))
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs) return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
} }
// RequestReceipts fetches a batch of transaction receipts from a remote node. // RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error { func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
glog.V(logger.Debug).Infof("%v fetching %v receipts", p, len(hashes)) log.Debug(fmt.Sprintf("%v fetching %v receipts", p, len(hashes)))
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes) return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
} }
// RequestProofs fetches a batch of merkle proofs from a remote node. // RequestProofs fetches a batch of merkle proofs from a remote node.
func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error { func (p *peer) RequestProofs(reqID, cost uint64, reqs []*ProofReq) error {
glog.V(logger.Debug).Infof("%v fetching %v proofs", p, len(reqs)) log.Debug(fmt.Sprintf("%v fetching %v proofs", p, len(reqs)))
return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs) return sendRequest(p.rw, GetProofsMsg, reqID, cost, reqs)
} }
// RequestHeaderProofs fetches a batch of header merkle proofs from a remote node. // RequestHeaderProofs fetches a batch of header merkle proofs from a remote node.
func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error { func (p *peer) RequestHeaderProofs(reqID, cost uint64, reqs []*ChtReq) error {
glog.V(logger.Debug).Infof("%v fetching %v header proofs", p, len(reqs)) log.Debug(fmt.Sprintf("%v fetching %v header proofs", p, len(reqs)))
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs) return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
} }
func (p *peer) SendTxs(cost uint64, txs types.Transactions) error { func (p *peer) SendTxs(cost uint64, txs types.Transactions) error {
glog.V(logger.Debug).Infof("%v relaying %v txs", p, len(txs)) log.Debug(fmt.Sprintf("%v relaying %v txs", p, len(txs)))
reqID := getNextReqID() reqID := getNextReqID()
p.fcServer.MustAssignRequest(reqID) p.fcServer.MustAssignRequest(reqID)
p.fcServer.SendRequest(reqID, cost) p.fcServer.SendRequest(reqID, cost)

@ -19,6 +19,7 @@ package les
import ( import (
"encoding/binary" "encoding/binary"
"fmt"
"math" "math"
"sync" "sync"
"time" "time"
@ -30,8 +31,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/flowcontrol" "github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
@ -292,7 +292,7 @@ func (pm *ProtocolManager) blockLoop() {
lastHead = header lastHead = header
lastBroadcastTd = td lastBroadcastTd = td
glog.V(logger.Debug).Infoln("===> ", number, hash, td, reorg) log.Debug(fmt.Sprint("===> ", number, hash, td, reorg))
announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg} announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}
for _, p := range peers { for _, p := range peers {
@ -396,7 +396,7 @@ func makeCht(db ethdb.Database) bool {
} else { } else {
lastChtNum++ lastChtNum++
glog.V(logger.Detail).Infof("cht: %d %064x", lastChtNum, root) log.Trace(fmt.Sprintf("cht: %d %064x", lastChtNum, root))
storeChtRoot(db, lastChtNum, root) storeChtRoot(db, lastChtNum, root)
var data [8]byte var data [8]byte

@ -18,6 +18,7 @@
package les package les
import ( import (
"fmt"
"io" "io"
"math" "math"
"math/rand" "math/rand"
@ -28,8 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
@ -162,7 +162,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
if entry == nil { if entry == nil {
entry = pool.findOrNewNode(p.ID(), ip, port) entry = pool.findOrNewNode(p.ID(), ip, port)
} }
glog.V(logger.Debug).Infof("connecting to %v, state: %v", p.id, entry.state) log.Debug(fmt.Sprintf("connecting to %v, state: %v", p.id, entry.state))
if entry.state == psConnected || entry.state == psRegistered { if entry.state == psConnected || entry.state == psRegistered {
return nil return nil
} }
@ -184,7 +184,7 @@ func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry {
// registered should be called after a successful handshake // registered should be called after a successful handshake
func (pool *serverPool) registered(entry *poolEntry) { func (pool *serverPool) registered(entry *poolEntry) {
glog.V(logger.Debug).Infof("registered %v", entry.id.String()) log.Debug(fmt.Sprintf("registered %v", entry.id.String()))
pool.lock.Lock() pool.lock.Lock()
defer pool.lock.Unlock() defer pool.lock.Unlock()
@ -202,7 +202,7 @@ func (pool *serverPool) registered(entry *poolEntry) {
// can be updated optionally (not updated if no registration happened, in this case // can be updated optionally (not updated if no registration happened, in this case
// only connection statistics are updated, just like in case of timeout) // only connection statistics are updated, just like in case of timeout)
func (pool *serverPool) disconnect(entry *poolEntry) { func (pool *serverPool) disconnect(entry *poolEntry) {
glog.V(logger.Debug).Infof("disconnected %v", entry.id.String()) log.Debug(fmt.Sprintf("disconnected %v", entry.id.String()))
pool.lock.Lock() pool.lock.Lock()
defer pool.lock.Unlock() defer pool.lock.Unlock()
@ -418,7 +418,7 @@ func (pool *serverPool) findOrNewNode(id discover.NodeID, ip net.IP, port uint16
now := mclock.Now() now := mclock.Now()
entry := pool.entries[id] entry := pool.entries[id]
if entry == nil { if entry == nil {
glog.V(logger.Debug).Infof("discovered %v", id.String()) log.Debug(fmt.Sprintf("discovered %v", id.String()))
entry = &poolEntry{ entry = &poolEntry{
id: id, id: id,
addr: make(map[string]*poolEntryAddress), addr: make(map[string]*poolEntryAddress),
@ -459,11 +459,11 @@ func (pool *serverPool) loadNodes() {
var list []*poolEntry var list []*poolEntry
err = rlp.DecodeBytes(enc, &list) err = rlp.DecodeBytes(enc, &list)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("node list decode error: %v", err) log.Debug(fmt.Sprintf("node list decode error: %v", err))
return return
} }
for _, e := range list { for _, e := range list {
glog.V(logger.Debug).Infof("loaded server stats %016x fails: %v connStats: %v / %v delayStats: %v / %v responseStats: %v / %v timeoutStats: %v / %v", e.id[0:8], e.lastConnected.fails, e.connectStats.avg, e.connectStats.weight, time.Duration(e.delayStats.avg), e.delayStats.weight, time.Duration(e.responseStats.avg), e.responseStats.weight, e.timeoutStats.avg, e.timeoutStats.weight) log.Debug(fmt.Sprintf("loaded server stats %016x fails: %v connStats: %v / %v delayStats: %v / %v responseStats: %v / %v timeoutStats: %v / %v", e.id[0:8], e.lastConnected.fails, e.connectStats.avg, e.connectStats.weight, time.Duration(e.delayStats.avg), e.delayStats.weight, time.Duration(e.responseStats.avg), e.responseStats.weight, e.timeoutStats.avg, e.timeoutStats.weight))
pool.entries[e.id] = e pool.entries[e.id] = e
pool.knownQueue.setLatest(e) pool.knownQueue.setLatest(e)
pool.knownSelect.update((*knownEntry)(e)) pool.knownSelect.update((*knownEntry)(e))
@ -568,7 +568,7 @@ func (pool *serverPool) dial(entry *poolEntry, knownSelected bool) {
pool.newSelected++ pool.newSelected++
} }
addr := entry.addrSelect.choose().(*poolEntryAddress) addr := entry.addrSelect.choose().(*poolEntryAddress)
glog.V(logger.Debug).Infof("dialing %v out of %v, known: %v", entry.id.String()+"@"+addr.strKey(), len(entry.addr), knownSelected) log.Debug(fmt.Sprintf("dialing %v out of %v, known: %v", entry.id.String()+"@"+addr.strKey(), len(entry.addr), knownSelected))
entry.dialed = addr entry.dialed = addr
go func() { go func() {
pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port)) pool.server.AddPeer(discover.NewNode(entry.id, addr.ip, addr.port, addr.port))
@ -589,7 +589,7 @@ func (pool *serverPool) checkDialTimeout(entry *poolEntry) {
if entry.state != psDialed { if entry.state != psDialed {
return return
} }
glog.V(logger.Debug).Infof("timeout %v", entry.id.String()+"@"+entry.dialed.strKey()) log.Debug(fmt.Sprintf("timeout %v", entry.id.String()+"@"+entry.dialed.strKey()))
entry.state = psNotConnected entry.state = psNotConnected
if entry.knownSelected { if entry.knownSelected {
pool.knownSelected-- pool.knownSelected--

@ -17,6 +17,7 @@
package light package light
import ( import (
"fmt"
"math/big" "math/big"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -27,8 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -101,7 +101,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
if err != nil { if err != nil {
return nil, err return nil, err
} }
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block"))
} }
if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) { if bc.genesisBlock.Hash() == (common.Hash{212, 229, 103, 64, 248, 118, 174, 248, 192, 16, 184, 106, 64, 213, 245, 103, 69, 161, 24, 208, 144, 106, 52, 230, 154, 236, 140, 13, 177, 203, 143, 163}) {
@ -117,7 +117,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
Root: common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"), Root: common.HexToHash("c035076523faf514038f619715de404a65398c51899b5dccca9c05b00bc79315"),
}) })
} }
glog.V(logger.Info).Infoln("Added trusted CHT for mainnet") log.Info(fmt.Sprint("Added trusted CHT for mainnet"))
} else { } else {
if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) { if bc.genesisBlock.Hash() == (common.Hash{12, 215, 134, 162, 66, 93, 22, 241, 82, 198, 88, 49, 108, 66, 62, 108, 225, 24, 30, 21, 195, 41, 88, 38, 215, 201, 144, 76, 186, 156, 227, 3}) {
// add trusted CHT for testnet // add trusted CHT for testnet
@ -125,7 +125,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
Number: 452, Number: 452,
Root: common.HexToHash("511da2c88e32b14cf4a4e62f7fcbb297139faebc260a4ab5eb43cce6edcba324"), Root: common.HexToHash("511da2c88e32b14cf4a4e62f7fcbb297139faebc260a4ab5eb43cce6edcba324"),
}) })
glog.V(logger.Info).Infoln("Added trusted CHT for testnet") log.Info(fmt.Sprint("Added trusted CHT for testnet"))
} else { } else {
DeleteTrustedCht(bc.chainDb) DeleteTrustedCht(bc.chainDb)
} }
@ -137,9 +137,9 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash := range core.BadHashes { for hash := range core.BadHashes {
if header := bc.GetHeaderByHash(hash); header != nil { if header := bc.GetHeaderByHash(hash); header != nil {
glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]) log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]))
bc.SetHead(header.Number.Uint64() - 1) bc.SetHead(header.Number.Uint64() - 1)
glog.V(logger.Error).Infoln("Chain rewind was successful, resuming normal operation") log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation"))
} }
} }
return bc, nil return bc, nil
@ -169,7 +169,7 @@ func (self *LightChain) loadLastState() error {
// Issue a status log and return // Issue a status log and return
header := self.hc.CurrentHeader() header := self.hc.CurrentHeader()
headerTd := self.GetTd(header.Hash(), header.Number.Uint64()) headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
glog.V(logger.Info).Infof("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd) log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", self.hc.CurrentHeader().Number, self.hc.CurrentHeader().Hash().Bytes()[:4], headerTd))
return nil return nil
} }
@ -246,10 +246,10 @@ func (bc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
// Prepare the genesis block and reinitialise the chain // Prepare the genesis block and reinitialise the chain
if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { if err := core.WriteTd(bc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
glog.Fatalf("failed to write genesis block TD: %v", err) log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err))
} }
if err := core.WriteBlock(bc.chainDb, genesis); err != nil { if err := core.WriteBlock(bc.chainDb, genesis); err != nil {
glog.Fatalf("failed to write genesis block: %v", err) log.Crit(fmt.Sprintf("failed to write genesis block: %v", err))
} }
bc.genesisBlock = genesis bc.genesisBlock = genesis
bc.hc.SetGenesis(bc.genesisBlock.Header()) bc.hc.SetGenesis(bc.genesisBlock.Header())
@ -346,7 +346,7 @@ func (bc *LightChain) Stop() {
bc.wg.Wait() bc.wg.Wait()
glog.V(logger.Info).Infoln("Chain manager stopped") log.Info(fmt.Sprint("Chain manager stopped"))
} }
// Rollback is designed to remove a chain of links from the database that aren't // Rollback is designed to remove a chain of links from the database that aren't
@ -406,15 +406,15 @@ func (self *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int)
switch status { switch status {
case core.CanonStatTy: case core.CanonStatTy:
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4]) return fmt.Sprintf("[%v] inserted header #%d (%x...).\n", time.Now().UnixNano(), header.Number, header.Hash().Bytes()[0:4])
} }})
events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()}) events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()})
case core.SideStatTy: case core.SideStatTy:
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
glog.Infof("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4]) return fmt.Sprintf("inserted forked header #%d (TD=%v) (%x...).\n", header.Number, header.Difficulty, header.Hash().Bytes()[0:4])
} }})
events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)}) events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)})
case core.SplitStatTy: case core.SplitStatTy:

@ -19,6 +19,7 @@ package light
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -26,8 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -149,7 +149,7 @@ func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint6
} }
body := new(types.Body) body := new(types.Body)
if err := rlp.Decode(bytes.NewReader(data), body); err != nil { if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err) log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err))
return nil, err return nil, err
} }
return body, nil return body, nil

@ -17,12 +17,12 @@
package light package light
import ( import (
"fmt"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -239,9 +239,9 @@ func (self *LightState) GetOrNewStateObject(ctx context.Context, addr common.Add
// newStateObject creates a state object whether it exists in the state or not // newStateObject creates a state object whether it exists in the state or not
func (self *LightState) newStateObject(addr common.Address) *StateObject { func (self *LightState) newStateObject(addr common.Address) *StateObject {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("(+) %x\n", addr) return fmt.Sprintf("(+) %x\n", addr)
} }})
stateObject := NewStateObject(addr, self.odr) stateObject := NewStateObject(addr, self.odr)
self.stateObjects[addr.Str()] = stateObject self.stateObjects[addr.Str()] = stateObject

@ -23,8 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -109,9 +108,9 @@ func (self *StateObject) MarkForDeletion() {
self.remove = true self.remove = true
self.dirty = true self.dirty = true
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("%x: #%d %v X\n", self.Address(), self.nonce, self.balance) return fmt.Sprintf("%x: #%d %v X\n", self.Address(), self.nonce, self.balance)
} }})
} }
// getAddr gets the storage value at the given address from the trie // getAddr gets the storage value at the given address from the trie
@ -158,18 +157,18 @@ func (self *StateObject) SetState(k, value common.Hash) {
func (c *StateObject) AddBalance(amount *big.Int) { func (c *StateObject) AddBalance(amount *big.Int) {
c.SetBalance(new(big.Int).Add(c.balance, amount)) c.SetBalance(new(big.Int).Add(c.balance, amount))
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("%x: #%d %v (+ %v)\n", c.Address(), c.nonce, c.balance, amount) return fmt.Sprintf("%x: #%d %v (+ %v)\n", c.Address(), c.nonce, c.balance, amount)
} }})
} }
// SubBalance subtracts the given amount from the account balance // SubBalance subtracts the given amount from the account balance
func (c *StateObject) SubBalance(amount *big.Int) { func (c *StateObject) SubBalance(amount *big.Int) {
c.SetBalance(new(big.Int).Sub(c.balance, amount)) c.SetBalance(new(big.Int).Sub(c.balance, amount))
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Infof("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount) return fmt.Sprintf("%x: #%d %v (- %v)\n", c.Address(), c.nonce, c.balance, amount)
} }})
} }
// SetBalance sets the account balance to the given amount // SetBalance sets the account balance to the given amount

@ -26,8 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/net/context" "golang.org/x/net/context"
@ -321,7 +320,7 @@ func (pool *TxPool) eventLoop() {
func (pool *TxPool) Stop() { func (pool *TxPool) Stop() {
close(pool.quit) close(pool.quit)
pool.events.Unsubscribe() pool.events.Unsubscribe()
glog.V(logger.Info).Infoln("Transaction pool stopped") log.Info(fmt.Sprint("Transaction pool stopped"))
} }
// Stats returns the number of currently pending (locally created) transactions // Stats returns the number of currently pending (locally created) transactions
@ -417,7 +416,7 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
go self.eventMux.Post(core.TxPreEvent{Tx: tx}) go self.eventMux.Post(core.TxPreEvent{Tx: tx})
} }
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
var toname string var toname string
if to := tx.To(); to != nil { if to := tx.To(); to != nil {
toname = common.Bytes2Hex(to[:4]) toname = common.Bytes2Hex(to[:4])
@ -428,8 +427,8 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error {
// verified in ValidateTransaction. // verified in ValidateTransaction.
f, _ := types.Sender(self.signer, tx) f, _ := types.Sender(self.signer, tx)
from := common.Bytes2Hex(f[:4]) from := common.Bytes2Hex(f[:4])
glog.Infof("(t) %x => %s (%v) %x\n", from, toname, tx.Value, hash) return fmt.Sprintf("(t) %x => %s (%v) %x\n", from, toname, tx.Value(), hash)
} }})
return nil return nil
} }
@ -464,11 +463,11 @@ func (self *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) {
for _, tx := range txs { for _, tx := range txs {
if err := self.add(ctx, tx); err != nil { if err := self.add(ctx, tx); err != nil {
glog.V(logger.Debug).Infoln("tx error:", err) log.Debug(fmt.Sprint("tx error:", err))
} else { } else {
sendTx = append(sendTx, tx) sendTx = append(sendTx, tx)
h := tx.Hash() h := tx.Hash()
glog.V(logger.Debug).Infof("tx %x\n", h[:4]) log.Debug(fmt.Sprintf("tx %x\n", h[:4]))
} }
} }

@ -23,7 +23,7 @@ func init() {
} }
root = &logger{[]interface{}{}, new(swapHandler)} root = &logger{[]interface{}{}, new(swapHandler)}
root.SetHandler(StdoutHandler) root.SetHandler(LvlFilterHandler(LvlInfo, StdoutHandler))
} }
// New returns a new logger with the given context. // New returns a new logger with the given context.

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -1,44 +0,0 @@
glog
====
Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package
http://code.google.com/p/google-glog
By binding methods to booleans it is possible to use the log package
without paying the expense of evaluating the arguments to the log.
Through the -vmodule flag, the package also provides fine-grained
control over logging at the file level.
The comment from glog.go introduces the ideas:
Package glog implements logging analogous to the Google-internal
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
Error, Fatal, plus formatting variants such as Infof. It
also provides V-style logging controlled by the -v and
-vmodule=file=2 flags.
Basic examples:
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:
if glog.V(2) {
glog.Info("Starting transaction...")
}
glog.V(2).Infoln("Processed", nItems, "elements")
The repository contains an open source version of the log package
used inside Google. The master copy of the source lives inside
Google, not here. The code in this repo is for export only and is not itself
under development. Feature requests will be ignored.
Send bug reports to golang-nuts@googlegroups.com.

File diff suppressed because it is too large Load Diff

@ -1,128 +0,0 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// File I/O for logs.
package glog
import (
"errors"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"time"
)
// MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
//var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
var logDir *string = new(string)
func SetLogDir(str string) {
*logDir = str
}
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
}
logDirs = append(logDirs, os.TempDir())
}
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// logName returns a new log file name containing tag, with start time t, and
// the name for the symlink for tag.
func logName(tag string, t time.Time) (name, link string) {
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
program,
host,
userName,
tag,
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
pid)
return name, program + "." + tag
}
var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
}
name, link := logName(tag, t)
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
f, err := os.Create(fname)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
os.Symlink(name, symlink) // ignore err
return f, fname, nil
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}

@ -1,436 +0,0 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package glog
import (
"bytes"
"fmt"
stdLog "log"
"path/filepath"
"runtime"
"strconv"
"strings"
"testing"
"time"
)
// Test that shortHostname works as advertised.
func TestShortHostname(t *testing.T) {
for hostname, expect := range map[string]string{
"": "",
"host": "host",
"host.google.com": "host",
} {
if got := shortHostname(hostname); expect != got {
t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
}
}
}
// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
type flushBuffer struct {
bytes.Buffer
}
func (f *flushBuffer) Flush() error {
return nil
}
func (f *flushBuffer) Sync() error {
return nil
}
// swap sets the log writers and returns the old array.
func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
l.mu.Lock()
defer l.mu.Unlock()
old = l.file
for i, w := range writers {
logging.file[i] = w
}
return
}
// newBuffers sets the log writers to all new byte buffers and returns the old array.
func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
}
// contents returns the specified log value as a string.
func contents(s severity) string {
return logging.file[s].(*flushBuffer).String()
}
// contains reports whether the string is contained in the log.
func contains(s severity, str string, t *testing.T) bool {
return strings.Contains(contents(s), str)
}
// setFlags configures the logging flags how the test expects them.
func setFlags() {
logging.toStderr = false
}
// Test that Info works as advertised.
func TestInfo(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
func TestInfoDepth(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
f := func() { InfoDepth(1, "depth-test1") }
// The next three lines must stay together
_, _, wantLine, _ := runtime.Caller(0)
InfoDepth(0, "depth-test0")
f()
msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
if len(msgs) != 2 {
t.Fatalf("Got %d lines, expected 2", len(msgs))
}
for i, m := range msgs {
if !strings.HasPrefix(m, "I") {
t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
}
w := fmt.Sprintf("depth-test%d", i)
if !strings.Contains(m, w) {
t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
}
// pull out the line number (between : and ])
msg := m[strings.LastIndex(m, ":")+1:]
x := strings.Index(msg, "]")
if x < 0 {
t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
continue
}
line, err := strconv.Atoi(msg[:x])
if err != nil {
t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
continue
}
wantLine++
if wantLine != line {
t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
}
}
}
func init() {
CopyStandardLogTo("INFO")
}
// Test that CopyStandardLogTo panics on bad input.
func TestCopyStandardLogToPanic(t *testing.T) {
defer func() {
if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
}
}()
CopyStandardLogTo("LOG")
}
// Test that using the standard log package logs to INFO.
func TestStandardLog(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
stdLog.Print("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that the header has the correct format.
func TestHeader(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
defer func(previous func() time.Time) { timeNow = previous }(timeNow)
timeNow = func() time.Time {
return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
}
pid = 1234
Info("test")
var line int
format := "I0102 15:04:05.067890 logger/glog/glog_test.go:%d] test\n"
n, err := fmt.Sscanf(contents(infoLog), format, &line)
if n != 1 || err != nil {
t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
}
// Scanf treats multiple spaces as equivalent to a single space,
// so check for correct space-padding also.
want := fmt.Sprintf(format, line)
if contents(infoLog) != want {
t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
}
}
// Test that an Error log goes to Warning and Info.
// Even in the Info log, the source character will be E, so the data should
// all be identical.
func TestError(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Error("test")
if !contains(errorLog, "E", t) {
t.Errorf("Error has wrong character: %q", contents(errorLog))
}
if !contains(errorLog, "test", t) {
t.Error("Error failed")
}
str := contents(errorLog)
if !contains(warningLog, str, t) {
t.Error("Warning failed")
}
if !contains(infoLog, str, t) {
t.Error("Info failed")
}
}
// Test that a Warning log goes to Info.
// Even in the Info log, the source character will be W, so the data should
// all be identical.
func TestWarning(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Warning("test")
if !contains(warningLog, "W", t) {
t.Errorf("Warning has wrong character: %q", contents(warningLog))
}
if !contains(warningLog, "test", t) {
t.Error("Warning failed")
}
str := contents(warningLog)
if !contains(infoLog, str, t) {
t.Error("Info failed")
}
}
// Test that a V log goes to Info.
func TestV(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.verbosity.Set("2")
defer logging.verbosity.Set("0")
V(2).Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that a vmodule enables a log in this file.
func TestVmoduleOn(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.vmodule.Set("glog_test.go=2")
defer logging.vmodule.Set("")
if !V(1) {
t.Error("V not enabled for 1")
}
if !V(2) {
t.Error("V not enabled for 2")
}
if V(3) {
t.Error("V enabled for 3")
}
V(2).Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that a vmodule of another file does not enable a log in this file.
func TestVmoduleOff(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.vmodule.Set("notthisfile=2")
defer logging.vmodule.Set("")
for i := 1; i <= 3; i++ {
if V(Level(i)) {
t.Errorf("V enabled for %d", i)
}
}
V(2).Info("test")
if contents(infoLog) != "" {
t.Error("V logged incorrectly")
}
}
var patternTests = []struct{ input, want string }{
{"foo/bar/x.go", ".*/foo/bar/x\\.go$"},
{"foo/*/x.go", ".*/foo(/.*)?/x\\.go$"},
{"foo/*", ".*/foo(/.*)?/[^/]+\\.go$"},
}
func TestCompileModulePattern(t *testing.T) {
for _, test := range patternTests {
re, err := compileModulePattern(test.input)
if err != nil {
t.Fatalf("%s: %v", test.input, err)
}
if re.String() != test.want {
t.Errorf("mismatch for %q: got %q, want %q", test.input, re.String(), test.want)
}
}
}
// vGlobs are patterns that match/don't match this file at V=2.
var vGlobs = map[string]bool{
// Easy to test the numeric match here.
"glog_test.go=1": false, // If -vmodule sets V to 1, V(2) will fail.
"glog_test.go=2": true,
"glog_test.go=3": true, // If -vmodule sets V to 1, V(3) will succeed.
// Import path prefix matching
"logger/glog=1": false,
"logger/glog=2": true,
"logger/glog=3": true,
// Import path glob matching
"logger/*=1": false,
"logger/*=2": true,
"logger/*=3": true,
// These all use 2 and check the patterns.
"*=2": true,
}
// Test that vmodule globbing works as advertised.
func testVmoduleGlob(pat string, match bool, t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
defer logging.vmodule.Set("")
logging.vmodule.Set(pat)
if V(2) != Verbose(match) {
t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
}
}
// Test that a vmodule globbing works as advertised.
func TestVmoduleGlob(t *testing.T) {
for glob, match := range vGlobs {
testVmoduleGlob(glob, match, t)
}
}
func TestRollover(t *testing.T) {
setFlags()
var err error
defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
logExitFunc = func(e error) {
err = e
}
defer func(previous uint64) { MaxSize = previous }(MaxSize)
MaxSize = 512
Info("x") // Be sure we have a file.
info, ok := logging.file[infoLog].(*syncBuffer)
if !ok {
t.Fatal("info wasn't created")
}
if err != nil {
t.Fatalf("info has initial error: %v", err)
}
fname0 := info.file.Name()
Info(strings.Repeat("x", int(MaxSize))) // force a rollover
if err != nil {
t.Fatalf("info has error after big write: %v", err)
}
// Make sure the next log file gets a file name with a different
// time stamp.
//
// TODO: determine whether we need to support subsecond log
// rotation. C++ does not appear to handle this case (nor does it
// handle Daylight Savings Time properly).
time.Sleep(1 * time.Second)
Info("x") // create a new file
if err != nil {
t.Fatalf("error after rotation: %v", err)
}
fname1 := info.file.Name()
if fname0 == fname1 {
t.Errorf("info.f.Name did not change: %v", fname0)
}
if info.nbytes >= MaxSize {
t.Errorf("file size was not reset: %d", info.nbytes)
}
}
func TestLogBacktraceAt(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
// The peculiar style of this code simplifies line counting and maintenance of the
// tracing block below.
var infoLine string
setTraceLocation := func(file string, line int, ok bool, delta int) {
if !ok {
t.Fatal("could not get file:line")
}
_, file = filepath.Split(file)
infoLine = fmt.Sprintf("%s:%d", file, line+delta)
err := logging.traceLocation.Set(infoLine)
if err != nil {
t.Fatal("error setting log_backtrace_at: ", err)
}
}
{
// Start of tracing block. These lines know about each other's relative position.
_, file, line, ok := runtime.Caller(0)
setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
Info("we want a stack trace here")
}
numAppearances := strings.Count(contents(infoLog), infoLine)
if numAppearances < 2 {
// Need 2 appearances, one in the log header and one in the trace:
// log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
// ...
// github.com/glog/glog_test.go:280 (0x41ba91)
// ...
// We could be more precise but that would require knowing the details
// of the traceback format, which may not be dependable.
t.Fatal("got no trace back; log is ", contents(infoLog))
}
}
func BenchmarkHeader(b *testing.B) {
for i := 0; i < b.N; i++ {
buf, _, _ := logging.header(infoLog, 0)
logging.putBuffer(buf)
}
}

@ -1,27 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package logger
const (
Error = iota + 1
Warn
Info
Debug
Detail
Ridiculousness = 100
)

@ -18,13 +18,13 @@
package metrics package metrics
import ( import (
"fmt"
"os" "os"
"runtime" "runtime"
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/rcrowley/go-metrics/exp" "github.com/rcrowley/go-metrics/exp"
) )
@ -41,7 +41,7 @@ var Enabled = false
func init() { func init() {
for _, arg := range os.Args { for _, arg := range os.Args {
if strings.TrimLeft(arg, "-") == MetricsEnabledFlag { if strings.TrimLeft(arg, "-") == MetricsEnabledFlag {
glog.V(logger.Info).Infof("Enabling metrics collection") log.Info(fmt.Sprintf("Enabling metrics collection"))
Enabled = true Enabled = true
} }
} }
@ -102,7 +102,7 @@ func CollectProcessMetrics(refresh time.Duration) {
diskWrites = metrics.GetOrRegisterMeter("system/disk/writecount", metrics.DefaultRegistry) diskWrites = metrics.GetOrRegisterMeter("system/disk/writecount", metrics.DefaultRegistry)
diskWriteBytes = metrics.GetOrRegisterMeter("system/disk/writedata", metrics.DefaultRegistry) diskWriteBytes = metrics.GetOrRegisterMeter("system/disk/writedata", metrics.DefaultRegistry)
} else { } else {
glog.V(logger.Debug).Infof("failed to read disk metrics: %v", err) log.Debug(fmt.Sprintf("failed to read disk metrics: %v", err))
} }
// Iterate loading the different stats and updating the meters // Iterate loading the different stats and updating the meters
for i := 1; ; i++ { for i := 1; ; i++ {

@ -17,14 +17,14 @@
package miner package miner
import ( import (
"fmt"
"sync" "sync"
"sync/atomic" "sync/atomic"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
) )
@ -108,7 +108,7 @@ done:
} }
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) { func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
glog.V(logger.Debug).Infof("(re)started agent[%d]. mining...\n", self.index) log.Debug(fmt.Sprintf("(re)started agent[%d]. mining...\n", self.index))
// Mine // Mine
nonce, mixDigest := self.pow.Search(work.Block, stop, self.index) nonce, mixDigest := self.pow.Search(work.Block, stop, self.index)

@ -30,8 +30,7 @@ import (
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
) )
@ -87,7 +86,7 @@ out:
if self.Mining() { if self.Mining() {
self.Stop() self.Stop()
atomic.StoreInt32(&self.shouldStart, 1) atomic.StoreInt32(&self.shouldStart, 1)
glog.V(logger.Info).Infoln("Mining operation aborted due to sync operation") log.Info(fmt.Sprint("Mining operation aborted due to sync operation"))
} }
case downloader.DoneEvent, downloader.FailedEvent: case downloader.DoneEvent, downloader.FailedEvent:
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1 shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
@ -124,7 +123,7 @@ func (self *Miner) Start(coinbase common.Address, threads int) {
self.threads = threads self.threads = threads
if atomic.LoadInt32(&self.canStart) == 0 { if atomic.LoadInt32(&self.canStart) == 0 {
glog.V(logger.Info).Infoln("Can not start mining operation due to network sync (starts when finished)") log.Info(fmt.Sprint("Can not start mining operation due to network sync (starts when finished)"))
return return
} }
atomic.StoreInt32(&self.mining, 1) atomic.StoreInt32(&self.mining, 1)
@ -133,7 +132,7 @@ func (self *Miner) Start(coinbase common.Address, threads int) {
self.worker.register(NewCpuAgent(i, self.pow)) self.worker.register(NewCpuAgent(i, self.pow))
} }
glog.V(logger.Info).Infof("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents)) log.Info(fmt.Sprintf("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents)))
self.worker.start() self.worker.start()
self.worker.commitNewWork() self.worker.commitNewWork()
} }

@ -18,6 +18,7 @@ package miner
import ( import (
"errors" "errors"
"fmt"
"math/big" "math/big"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -26,8 +27,7 @@ import (
"github.com/ethereum/ethash" "github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
) )
@ -140,13 +140,13 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.
// Make sure the work submitted is present // Make sure the work submitted is present
work := a.work[hash] work := a.work[hash]
if work == nil { if work == nil {
glog.V(logger.Info).Infof("Work was submitted for %x but no pending work found", hash) log.Info(fmt.Sprintf("Work was submitted for %x but no pending work found", hash))
return false return false
} }
// Make sure the PoW solutions is indeed valid // Make sure the PoW solutions is indeed valid
block := work.Block.WithMiningResult(nonce, mixDigest) block := work.Block.WithMiningResult(nonce, mixDigest)
if !a.pow.Verify(block) { if !a.pow.Verify(block) {
glog.V(logger.Warn).Infof("Invalid PoW submitted for %x", hash) log.Warn(fmt.Sprintf("Invalid PoW submitted for %x", hash))
return false return false
} }
// Solutions seems to be valid, return to the miner and notify acceptance // Solutions seems to be valid, return to the miner and notify acceptance

@ -18,12 +18,12 @@ package miner
import ( import (
"container/ring" "container/ring"
"fmt"
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
// headerRetriever is used by the unconfirmed block set to verify whether a previously // headerRetriever is used by the unconfirmed block set to verify whether a previously
@ -80,7 +80,7 @@ func (set *unconfirmedBlocks) Insert(index uint64, hash common.Hash) {
set.blocks.Move(-1).Link(item) set.blocks.Move(-1).Link(item)
} }
// Display a log for the user to notify of a new mined block unconfirmed // Display a log for the user to notify of a new mined block unconfirmed
glog.V(logger.Info).Infof("🔨 mined potential block #%d [%x…], waiting for %d blocks to confirm", index, hash.Bytes()[:4], set.depth) log.Info(fmt.Sprintf("🔨 mined potential block #%d [%x…], waiting for %d blocks to confirm", index, hash.Bytes()[:4], set.depth))
} }
// Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth // Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth
@ -100,11 +100,11 @@ func (set *unconfirmedBlocks) Shift(height uint64) {
header := set.chain.GetHeaderByNumber(next.index) header := set.chain.GetHeaderByNumber(next.index)
switch { switch {
case header == nil: case header == nil:
glog.V(logger.Warn).Infof("failed to retrieve header of mined block #%d [%x…]", next.index, next.hash.Bytes()[:4]) log.Warn(fmt.Sprintf("failed to retrieve header of mined block #%d [%x…]", next.index, next.hash.Bytes()[:4]))
case header.Hash() == next.hash: case header.Hash() == next.hash:
glog.V(logger.Info).Infof("🔗 mined block #%d [%x…] reached canonical chain", next.index, next.hash.Bytes()[:4]) log.Info(fmt.Sprintf("🔗 mined block #%d [%x…] reached canonical chain", next.index, next.hash.Bytes()[:4]))
default: default:
glog.V(logger.Info).Infof("⑂ mined block #%d [%x…] became a side fork", next.index, next.hash.Bytes()[:4]) log.Info(fmt.Sprintf("⑂ mined block #%d [%x…] became a side fork", next.index, next.hash.Bytes()[:4]))
} }
// Drop the block out of the ring // Drop the block out of the ring
if set.blocks.Value == set.blocks.Next().Value { if set.blocks.Value == set.blocks.Next().Value {

@ -32,8 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/pow" "github.com/ethereum/go-ethereum/pow"
"gopkg.in/fatih/set.v0" "gopkg.in/fatih/set.v0"
@ -278,7 +277,7 @@ func (self *worker) wait() {
if self.fullValidation { if self.fullValidation {
if _, err := self.chain.InsertChain(types.Blocks{block}); err != nil { if _, err := self.chain.InsertChain(types.Blocks{block}); err != nil {
glog.V(logger.Error).Infoln("mining err", err) log.Error(fmt.Sprint("mining err", err))
continue continue
} }
go self.mux.Post(core.NewMinedBlockEvent{Block: block}) go self.mux.Post(core.NewMinedBlockEvent{Block: block})
@ -286,19 +285,19 @@ func (self *worker) wait() {
work.state.Commit(self.config.IsEIP158(block.Number())) work.state.Commit(self.config.IsEIP158(block.Number()))
parent := self.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) parent := self.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil { if parent == nil {
glog.V(logger.Error).Infoln("Invalid block found during mining") log.Error(fmt.Sprint("Invalid block found during mining"))
continue continue
} }
auxValidator := self.eth.BlockChain().AuxValidator() auxValidator := self.eth.BlockChain().AuxValidator()
if err := core.ValidateHeader(self.config, auxValidator, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr { if err := core.ValidateHeader(self.config, auxValidator, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
glog.V(logger.Error).Infoln("Invalid header on mined block:", err) log.Error(fmt.Sprint("Invalid header on mined block:", err))
continue continue
} }
stat, err := self.chain.WriteBlock(block) stat, err := self.chain.WriteBlock(block)
if err != nil { if err != nil {
glog.V(logger.Error).Infoln("error writing block to chain", err) log.Error(fmt.Sprint("error writing block to chain", err))
continue continue
} }
@ -334,7 +333,7 @@ func (self *worker) wait() {
self.mux.Post(logs) self.mux.Post(logs)
} }
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil { if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
glog.V(logger.Warn).Infoln("error writing block receipts:", err) log.Warn(fmt.Sprint("error writing block receipts:", err))
} }
}(block, work.state.Logs(), work.receipts) }(block, work.state.Logs(), work.receipts)
} }
@ -427,7 +426,7 @@ func (self *worker) commitNewWork() {
// this will ensure we're not going off too far in the future // this will ensure we're not going off too far in the future
if now := time.Now().Unix(); tstamp > now+4 { if now := time.Now().Unix(); tstamp > now+4 {
wait := time.Duration(tstamp-now) * time.Second wait := time.Duration(tstamp-now) * time.Second
glog.V(logger.Info).Infoln("We are too far in the future. Waiting for", wait) log.Info(fmt.Sprint("We are too far in the future. Waiting for", wait))
time.Sleep(wait) time.Sleep(wait)
} }
@ -458,7 +457,7 @@ func (self *worker) commitNewWork() {
// Could potentially happen if starting to mine in an odd state. // Could potentially happen if starting to mine in an odd state.
err := self.makeCurrent(parent, header) err := self.makeCurrent(parent, header)
if err != nil { if err != nil {
glog.V(logger.Info).Infoln("Could not create new env for mining, retrying on next block.") log.Info(fmt.Sprint("Could not create new env for mining, retrying on next block."))
return return
} }
// Create the current work task and check any fork transitions needed // Create the current work task and check any fork transitions needed
@ -469,7 +468,7 @@ func (self *worker) commitNewWork() {
pending, err := self.eth.TxPool().Pending() pending, err := self.eth.TxPool().Pending()
if err != nil { if err != nil {
glog.Errorf("Could not fetch pending transactions: %v", err) log.Error(fmt.Sprintf("Could not fetch pending transactions: %v", err))
return return
} }
@ -489,13 +488,12 @@ func (self *worker) commitNewWork() {
break break
} }
if err := self.commitUncle(work, uncle.Header()); err != nil { if err := self.commitUncle(work, uncle.Header()); err != nil {
if glog.V(logger.Ridiculousness) { log.Trace(fmt.Sprintf("Bad uncle found and will be removed (%x)\n", hash[:4]))
glog.V(logger.Detail).Infof("Bad uncle found and will be removed (%x)\n", hash[:4]) log.Trace(fmt.Sprint(uncle))
glog.V(logger.Detail).Infoln(uncle)
}
badUncles = append(badUncles, hash) badUncles = append(badUncles, hash)
} else { } else {
glog.V(logger.Debug).Infof("committing %x as uncle\n", hash[:4]) log.Debug(fmt.Sprintf("committing %x as uncle\n", hash[:4]))
uncles = append(uncles, uncle.Header()) uncles = append(uncles, uncle.Header())
} }
} }
@ -514,7 +512,7 @@ func (self *worker) commitNewWork() {
// We only care about logging if we're actually mining. // We only care about logging if we're actually mining.
if atomic.LoadInt32(&self.mining) == 1 { if atomic.LoadInt32(&self.mining) == 1 {
glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart)) log.Info(fmt.Sprintf("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart)))
self.unconfirmed.Shift(work.Block.NumberU64() - 1) self.unconfirmed.Shift(work.Block.NumberU64() - 1)
} }
self.push(work) self.push(work)
@ -554,7 +552,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
// Check whether the tx is replay protected. If we're not in the EIP155 hf // Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do. // phase, start ignoring the sender until we do.
if tx.Protected() && !env.config.IsEIP155(env.header.Number) { if tx.Protected() && !env.config.IsEIP155(env.header.Number) {
glog.V(logger.Detail).Infof("Transaction (%x) is replay protected, but we haven't yet hardforked. Transaction will be ignored until we hardfork.\n", tx.Hash()) log.Trace(fmt.Sprintf("Transaction (%x) is replay protected, but we haven't yet hardforked. Transaction will be ignored until we hardfork.\n", tx.Hash()))
txs.Pop() txs.Pop()
continue continue
@ -563,7 +561,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
// Ignore any transactions (and accounts subsequently) with low gas limits // Ignore any transactions (and accounts subsequently) with low gas limits
if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) { if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
// Pop the current low-priced transaction without shifting in the next from the account // Pop the current low-priced transaction without shifting in the next from the account
glog.V(logger.Info).Infof("Transaction (%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4]) log.Info(fmt.Sprintf("Transaction (%x) below gas price (tx=%v ask=%v). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], common.CurrencyToString(tx.GasPrice()), common.CurrencyToString(gasPrice), from[:4]))
env.lowGasTxs = append(env.lowGasTxs, tx) env.lowGasTxs = append(env.lowGasTxs, tx)
txs.Pop() txs.Pop()
@ -577,12 +575,12 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
switch { switch {
case core.IsGasLimitErr(err): case core.IsGasLimitErr(err):
// Pop the current out-of-gas transaction without shifting in the next from the account // Pop the current out-of-gas transaction without shifting in the next from the account
glog.V(logger.Detail).Infof("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4]) log.Trace(fmt.Sprintf("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4]))
txs.Pop() txs.Pop()
case err != nil: case err != nil:
// Pop the current failed transaction without shifting in the next from the account // Pop the current failed transaction without shifting in the next from the account
glog.V(logger.Detail).Infof("Transaction (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err) log.Trace(fmt.Sprintf("Transaction (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err))
env.failedTxs = append(env.failedTxs, tx) env.failedTxs = append(env.failedTxs, tx)
txs.Pop() txs.Pop()

@ -19,16 +19,15 @@
package geth package geth
import ( import (
"os"
"runtime" "runtime"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
func init() { func init() {
// Initialize the logger // Initialize the logger
glog.SetV(logger.Info) log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat())))
glog.SetToStderr(true)
// Initialize the goroutine count // Initialize the goroutine count
runtime.GOMAXPROCS(runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU())

@ -17,10 +17,12 @@
package geth package geth
import ( import (
"github.com/ethereum/go-ethereum/logger/glog" "os"
"github.com/ethereum/go-ethereum/log"
) )
// SetVerbosity sets the global verbosity level (between 0 and 6 - see logger/verbosity.go). // SetVerbosity sets the global verbosity level (between 0 and 6 - see logger/verbosity.go).
func SetVerbosity(level int) { func SetVerbosity(level int) {
glog.SetV(level) log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(level), log.StreamHandler(os.Stderr, log.TerminalFormat())))
} }

@ -31,8 +31,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
@ -334,7 +333,7 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
if c.DataDir == "" { if c.DataDir == "" {
key, err := crypto.GenerateKey() key, err := crypto.GenerateKey()
if err != nil { if err != nil {
glog.Fatalf("Failed to generate ephemeral node key: %v", err) log.Crit(fmt.Sprintf("Failed to generate ephemeral node key: %v", err))
} }
return key return key
} }
@ -346,16 +345,16 @@ func (c *Config) NodeKey() *ecdsa.PrivateKey {
// No persistent key found, generate and store a new one. // No persistent key found, generate and store a new one.
key, err := crypto.GenerateKey() key, err := crypto.GenerateKey()
if err != nil { if err != nil {
glog.Fatalf("Failed to generate node key: %v", err) log.Crit(fmt.Sprintf("Failed to generate node key: %v", err))
} }
instanceDir := filepath.Join(c.DataDir, c.name()) instanceDir := filepath.Join(c.DataDir, c.name())
if err := os.MkdirAll(instanceDir, 0700); err != nil { if err := os.MkdirAll(instanceDir, 0700); err != nil {
glog.V(logger.Error).Infof("Failed to persist node key: %v", err) log.Error(fmt.Sprintf("Failed to persist node key: %v", err))
return key return key
} }
keyfile = filepath.Join(instanceDir, datadirPrivateKey) keyfile = filepath.Join(instanceDir, datadirPrivateKey)
if err := crypto.SaveECDSA(keyfile, key); err != nil { if err := crypto.SaveECDSA(keyfile, key); err != nil {
glog.V(logger.Error).Infof("Failed to persist node key: %v", err) log.Error(fmt.Sprintf("Failed to persist node key: %v", err))
} }
return key return key
} }
@ -383,7 +382,7 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
// Load the nodes from the config file. // Load the nodes from the config file.
var nodelist []string var nodelist []string
if err := common.LoadJSON(path, &nodelist); err != nil { if err := common.LoadJSON(path, &nodelist); err != nil {
glog.V(logger.Error).Infof("Can't load node file %s: %v", path, err) log.Error(fmt.Sprintf("Can't load node file %s: %v", path, err))
return nil return nil
} }
// Interpret the list as a discovery node array // Interpret the list as a discovery node array
@ -394,7 +393,7 @@ func (c *Config) parsePersistentNodes(path string) []*discover.Node {
} }
node, err := discover.ParseNode(url) node, err := discover.ParseNode(url)
if err != nil { if err != nil {
glog.V(logger.Error).Infof("Node URL %s: %v\n", url, err) log.Error(fmt.Sprintf("Node URL %s: %v\n", url, err))
continue continue
} }
nodes = append(nodes, node) nodes = append(nodes, node)
@ -442,7 +441,7 @@ func makeAccountManager(conf *Config) (*accounts.Manager, string, error) {
keystore.NewKeyStore(keydir, scryptN, scryptP), keystore.NewKeyStore(keydir, scryptN, scryptP),
} }
if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil { if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil {
glog.V(logger.Warn).Infof("Failed to start Ledger hub, disabling: %v", err) log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err))
} else { } else {
backends = append(backends, ledgerhub) backends = append(backends, ledgerhub)
} }

@ -18,6 +18,7 @@ package node
import ( import (
"errors" "errors"
"fmt"
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
@ -30,8 +31,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/storage"
@ -173,7 +173,7 @@ func (n *Node) Start() error {
MaxPendingPeers: n.config.MaxPendingPeers, MaxPendingPeers: n.config.MaxPendingPeers,
} }
running := &p2p.Server{Config: n.serverConfig} running := &p2p.Server{Config: n.serverConfig}
glog.V(logger.Info).Infoln("instance:", n.serverConfig.Name) log.Info(fmt.Sprint("instance:", n.serverConfig.Name))
// Otherwise copy and specialize the P2P configuration // Otherwise copy and specialize the P2P configuration
services := make(map[reflect.Type]Service) services := make(map[reflect.Type]Service)
@ -301,7 +301,7 @@ func (n *Node) startInProc(apis []rpc.API) error {
if err := handler.RegisterName(api.Namespace, api.Service); err != nil { if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
return err return err
} }
glog.V(logger.Debug).Infof("InProc registered %T under '%s'", api.Service, api.Namespace) log.Debug(fmt.Sprintf("InProc registered %T under '%s'", api.Service, api.Namespace))
} }
n.inprocHandler = handler n.inprocHandler = handler
return nil return nil
@ -327,7 +327,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
if err := handler.RegisterName(api.Namespace, api.Service); err != nil { if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
return err return err
} }
glog.V(logger.Debug).Infof("IPC registered %T under '%s'", api.Service, api.Namespace) log.Debug(fmt.Sprintf("IPC registered %T under '%s'", api.Service, api.Namespace))
} }
// All APIs registered, start the IPC listener // All APIs registered, start the IPC listener
var ( var (
@ -338,7 +338,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
return err return err
} }
go func() { go func() {
glog.V(logger.Info).Infof("IPC endpoint opened: %s", n.ipcEndpoint) log.Info(fmt.Sprintf("IPC endpoint opened: %s", n.ipcEndpoint))
for { for {
conn, err := listener.Accept() conn, err := listener.Accept()
@ -351,7 +351,7 @@ func (n *Node) startIPC(apis []rpc.API) error {
return return
} }
// Not closed, just some error; report and continue // Not closed, just some error; report and continue
glog.V(logger.Error).Infof("IPC accept failed: %v", err) log.Error(fmt.Sprintf("IPC accept failed: %v", err))
continue continue
} }
go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions) go handler.ServeCodec(rpc.NewJSONCodec(conn), rpc.OptionMethodInvocation|rpc.OptionSubscriptions)
@ -370,7 +370,7 @@ func (n *Node) stopIPC() {
n.ipcListener.Close() n.ipcListener.Close()
n.ipcListener = nil n.ipcListener = nil
glog.V(logger.Info).Infof("IPC endpoint closed: %s", n.ipcEndpoint) log.Info(fmt.Sprintf("IPC endpoint closed: %s", n.ipcEndpoint))
} }
if n.ipcHandler != nil { if n.ipcHandler != nil {
n.ipcHandler.Stop() n.ipcHandler.Stop()
@ -396,7 +396,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
if err := handler.RegisterName(api.Namespace, api.Service); err != nil { if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
return err return err
} }
glog.V(logger.Debug).Infof("HTTP registered %T under '%s'", api.Service, api.Namespace) log.Debug(fmt.Sprintf("HTTP registered %T under '%s'", api.Service, api.Namespace))
} }
} }
// All APIs registered, start the HTTP listener // All APIs registered, start the HTTP listener
@ -408,7 +408,7 @@ func (n *Node) startHTTP(endpoint string, apis []rpc.API, modules []string, cors
return err return err
} }
go rpc.NewHTTPServer(cors, handler).Serve(listener) go rpc.NewHTTPServer(cors, handler).Serve(listener)
glog.V(logger.Info).Infof("HTTP endpoint opened: http://%s", endpoint) log.Info(fmt.Sprintf("HTTP endpoint opened: http://%s", endpoint))
// All listeners booted successfully // All listeners booted successfully
n.httpEndpoint = endpoint n.httpEndpoint = endpoint
@ -424,7 +424,7 @@ func (n *Node) stopHTTP() {
n.httpListener.Close() n.httpListener.Close()
n.httpListener = nil n.httpListener = nil
glog.V(logger.Info).Infof("HTTP endpoint closed: http://%s", n.httpEndpoint) log.Info(fmt.Sprintf("HTTP endpoint closed: http://%s", n.httpEndpoint))
} }
if n.httpHandler != nil { if n.httpHandler != nil {
n.httpHandler.Stop() n.httpHandler.Stop()
@ -450,7 +450,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
if err := handler.RegisterName(api.Namespace, api.Service); err != nil { if err := handler.RegisterName(api.Namespace, api.Service); err != nil {
return err return err
} }
glog.V(logger.Debug).Infof("WebSocket registered %T under '%s'", api.Service, api.Namespace) log.Debug(fmt.Sprintf("WebSocket registered %T under '%s'", api.Service, api.Namespace))
} }
} }
// All APIs registered, start the HTTP listener // All APIs registered, start the HTTP listener
@ -462,7 +462,7 @@ func (n *Node) startWS(endpoint string, apis []rpc.API, modules []string, wsOrig
return err return err
} }
go rpc.NewWSServer(wsOrigins, handler).Serve(listener) go rpc.NewWSServer(wsOrigins, handler).Serve(listener)
glog.V(logger.Info).Infof("WebSocket endpoint opened: ws://%s", endpoint) log.Info(fmt.Sprintf("WebSocket endpoint opened: ws://%s", endpoint))
// All listeners booted successfully // All listeners booted successfully
n.wsEndpoint = endpoint n.wsEndpoint = endpoint
@ -478,7 +478,7 @@ func (n *Node) stopWS() {
n.wsListener.Close() n.wsListener.Close()
n.wsListener = nil n.wsListener = nil
glog.V(logger.Info).Infof("WebSocket endpoint closed: ws://%s", n.wsEndpoint) log.Info(fmt.Sprintf("WebSocket endpoint closed: ws://%s", n.wsEndpoint))
} }
if n.wsHandler != nil { if n.wsHandler != nil {
n.wsHandler.Stop() n.wsHandler.Stop()

@ -24,8 +24,7 @@ import (
"net" "net"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
) )
@ -134,7 +133,7 @@ func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now
var newtasks []task var newtasks []task
addDial := func(flag connFlag, n *discover.Node) bool { addDial := func(flag connFlag, n *discover.Node) bool {
if err := s.checkDial(n, peers); err != nil { if err := s.checkDial(n, peers); err != nil {
glog.V(logger.Debug).Infof("skipping dial candidate %x@%v:%d: %v", n.ID[:8], n.IP, n.TCP, err) log.Debug(fmt.Sprintf("skipping dial candidate %x@%v:%d: %v", n.ID[:8], n.IP, n.TCP, err))
return false return false
} }
s.dialing[n.ID] = flag s.dialing[n.ID] = flag
@ -163,7 +162,7 @@ func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now
err := s.checkDial(t.dest, peers) err := s.checkDial(t.dest, peers)
switch err { switch err {
case errNotWhitelisted, errSelf: case errNotWhitelisted, errSelf:
glog.V(logger.Debug).Infof("removing static dial candidate %x@%v:%d: %v", t.dest.ID[:8], t.dest.IP, t.dest.TCP, err) log.Debug(fmt.Sprintf("removing static dial candidate %x@%v:%d: %v", t.dest.ID[:8], t.dest.IP, t.dest.TCP, err))
delete(s.static, t.dest.ID) delete(s.static, t.dest.ID)
case nil: case nil:
s.dialing[id] = t.flags s.dialing[id] = t.flags
@ -267,7 +266,7 @@ func (t *dialTask) Do(srv *Server) {
// The backoff delay resets when the node is found. // The backoff delay resets when the node is found.
func (t *dialTask) resolve(srv *Server) bool { func (t *dialTask) resolve(srv *Server) bool {
if srv.ntab == nil { if srv.ntab == nil {
glog.V(logger.Debug).Infof("can't resolve node %x: discovery is disabled", t.dest.ID[:6]) log.Debug(fmt.Sprintf("can't resolve node %x: discovery is disabled", t.dest.ID[:6]))
return false return false
} }
if t.resolveDelay == 0 { if t.resolveDelay == 0 {
@ -283,23 +282,23 @@ func (t *dialTask) resolve(srv *Server) bool {
if t.resolveDelay > maxResolveDelay { if t.resolveDelay > maxResolveDelay {
t.resolveDelay = maxResolveDelay t.resolveDelay = maxResolveDelay
} }
glog.V(logger.Debug).Infof("resolving node %x failed (new delay: %v)", t.dest.ID[:6], t.resolveDelay) log.Debug(fmt.Sprintf("resolving node %x failed (new delay: %v)", t.dest.ID[:6], t.resolveDelay))
return false return false
} }
// The node was found. // The node was found.
t.resolveDelay = initialResolveDelay t.resolveDelay = initialResolveDelay
t.dest = resolved t.dest = resolved
glog.V(logger.Debug).Infof("resolved node %x: %v:%d", t.dest.ID[:6], t.dest.IP, t.dest.TCP) log.Debug(fmt.Sprintf("resolved node %x: %v:%d", t.dest.ID[:6], t.dest.IP, t.dest.TCP))
return true return true
} }
// dial performs the actual connection attempt. // dial performs the actual connection attempt.
func (t *dialTask) dial(srv *Server, dest *discover.Node) bool { func (t *dialTask) dial(srv *Server, dest *discover.Node) bool {
addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)} addr := &net.TCPAddr{IP: dest.IP, Port: int(dest.TCP)}
glog.V(logger.Debug).Infof("dial tcp %v (%x)", addr, dest.ID[:6]) log.Debug(fmt.Sprintf("dial tcp %v (%x)", addr, dest.ID[:6]))
fd, err := srv.Dialer.Dial("tcp", addr.String()) fd, err := srv.Dialer.Dial("tcp", addr.String())
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("%v", err) log.Trace(fmt.Sprintf("%v", err))
return false return false
} }
mfd := newMeteredConn(fd, false) mfd := newMeteredConn(fd, false)

@ -23,13 +23,13 @@ import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"fmt"
"os" "os"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/errors"
@ -180,12 +180,12 @@ func (db *nodeDB) storeInt64(key []byte, n int64) error {
func (db *nodeDB) node(id NodeID) *Node { func (db *nodeDB) node(id NodeID) *Node {
blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil) blob, err := db.lvl.Get(makeKey(id, nodeDBDiscoverRoot), nil)
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("failed to retrieve node %v: %v", id, err) log.Trace(fmt.Sprintf("failed to retrieve node %v: %v", id, err))
return nil return nil
} }
node := new(Node) node := new(Node)
if err := rlp.DecodeBytes(blob, node); err != nil { if err := rlp.DecodeBytes(blob, node); err != nil {
glog.V(logger.Warn).Infof("failed to decode node RLP: %v", err) log.Warn(fmt.Sprintf("failed to decode node RLP: %v", err))
return nil return nil
} }
node.sha = crypto.Keccak256Hash(node.ID[:]) node.sha = crypto.Keccak256Hash(node.ID[:])
@ -233,7 +233,7 @@ func (db *nodeDB) expirer() {
select { select {
case <-tick: case <-tick:
if err := db.expireNodes(); err != nil { if err := db.expireNodes(); err != nil {
glog.V(logger.Error).Infof("Failed to expire nodedb items: %v", err) log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err))
} }
case <-db.quit: case <-db.quit:
@ -352,9 +352,7 @@ func nextNode(it iterator.Iterator) *Node {
} }
var n Node var n Node
if err := rlp.DecodeBytes(it.Value(), &n); err != nil { if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
if glog.V(logger.Warn) { log.Warn(fmt.Sprintf("invalid node %x: %v", id, err))
glog.Errorf("invalid node %x: %v", id, err)
}
continue continue
} }
return &n return &n

@ -26,8 +26,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
const ( const (
@ -55,12 +54,12 @@ func checkClockDrift() {
howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings") howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
separator := strings.Repeat("-", len(warning)) separator := strings.Repeat("-", len(warning))
glog.V(logger.Warn).Info(separator) log.Warn(fmt.Sprint(separator))
glog.V(logger.Warn).Info(warning) log.Warn(fmt.Sprint(warning))
glog.V(logger.Warn).Info(howtofix) log.Warn(fmt.Sprint(howtofix))
glog.V(logger.Warn).Info(separator) log.Warn(fmt.Sprint(separator))
} else { } else {
glog.V(logger.Debug).Infof("Sanity NTP check reported %v drift, all ok", drift) log.Debug(fmt.Sprintf("Sanity NTP check reported %v drift, all ok", drift))
} }
} }

@ -34,8 +34,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
const ( const (
@ -278,10 +277,10 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
// Bump the failure counter to detect and evacuate non-bonded entries // Bump the failure counter to detect and evacuate non-bonded entries
fails := tab.db.findFails(n.ID) + 1 fails := tab.db.findFails(n.ID) + 1
tab.db.updateFindFails(n.ID, fails) tab.db.updateFindFails(n.ID, fails)
glog.V(logger.Detail).Infof("Bumping failures for %x: %d", n.ID[:8], fails) log.Trace(fmt.Sprintf("Bumping failures for %x: %d", n.ID[:8], fails))
if fails >= maxFindnodeFailures { if fails >= maxFindnodeFailures {
glog.V(logger.Detail).Infof("Evacuating node %x: %d findnode failures", n.ID[:8], fails) log.Trace(fmt.Sprintf("Evacuating node %x: %d findnode failures", n.ID[:8], fails))
tab.delete(n) tab.delete(n)
} }
} }
@ -384,14 +383,15 @@ func (tab *Table) doRefresh(done chan struct{}) {
// (hopefully) still alive. // (hopefully) still alive.
seeds := tab.db.querySeeds(seedCount, seedMaxAge) seeds := tab.db.querySeeds(seedCount, seedMaxAge)
seeds = tab.bondall(append(seeds, tab.nursery...)) seeds = tab.bondall(append(seeds, tab.nursery...))
if glog.V(logger.Debug) {
if len(seeds) == 0 { if len(seeds) == 0 {
glog.Infof("no seed nodes found") log.Debug(fmt.Sprintf("no seed nodes found"))
} }
for _, n := range seeds { for _, n := range seeds {
log.Debug("", "msg", log.Lazy{Fn: func() string {
age := time.Since(tab.db.lastPong(n.ID)) age := time.Since(tab.db.lastPong(n.ID))
glog.Infof("seed node (age %v): %v", age, n) return fmt.Sprintf("seed node (age %v): %v", age, n)
} }})
} }
tab.mutex.Lock() tab.mutex.Lock()
tab.stuff(seeds) tab.stuff(seeds)
@ -470,7 +470,7 @@ func (tab *Table) bond(pinged bool, id NodeID, addr *net.UDPAddr, tcpPort uint16
var result error var result error
age := time.Since(tab.db.lastPong(id)) age := time.Since(tab.db.lastPong(id))
if node == nil || fails > 0 || age > nodeDBNodeExpiration { if node == nil || fails > 0 || age > nodeDBNodeExpiration {
glog.V(logger.Detail).Infof("Bonding %x: known=%t, fails=%d age=%v", id[:8], node != nil, fails, age) log.Trace(fmt.Sprintf("Bonding %x: known=%t, fails=%d age=%v", id[:8], node != nil, fails, age))
tab.bondmu.Lock() tab.bondmu.Lock()
w := tab.bonding[id] w := tab.bonding[id]

@ -26,8 +26,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -224,7 +223,7 @@ func ListenUDP(priv *ecdsa.PrivateKey, laddr string, natm nat.Interface, nodeDBP
if err != nil { if err != nil {
return nil, err return nil, err
} }
glog.V(logger.Info).Infoln("Listening,", tab.self) log.Info(fmt.Sprint("Listening,", tab.self))
return tab, nil return tab, nil
} }
@ -294,7 +293,7 @@ func (t *udp) findnode(toid NodeID, toaddr *net.UDPAddr, target NodeID) ([]*Node
nreceived++ nreceived++
n, err := t.nodeFromRPC(toaddr, rn) n, err := t.nodeFromRPC(toaddr, rn)
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("invalid neighbor node (%v) from %v: %v", rn.IP, toaddr, err) log.Trace(fmt.Sprintf("invalid neighbor node (%v) from %v: %v", rn.IP, toaddr, err))
continue continue
} }
nodes = append(nodes, n) nodes = append(nodes, n)
@ -464,9 +463,9 @@ func (t *udp) send(toaddr *net.UDPAddr, ptype byte, req interface{}) error {
if err != nil { if err != nil {
return err return err
} }
glog.V(logger.Detail).Infof(">>> %v %T", toaddr, req) log.Trace(fmt.Sprintf(">>> %v %T", toaddr, req))
if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil { if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
glog.V(logger.Detail).Infoln("UDP send failed:", err) log.Trace(fmt.Sprint("UDP send failed:", err))
} }
return err return err
} }
@ -476,13 +475,13 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) ([]byte,
b.Write(headSpace) b.Write(headSpace)
b.WriteByte(ptype) b.WriteByte(ptype)
if err := rlp.Encode(b, req); err != nil { if err := rlp.Encode(b, req); err != nil {
glog.V(logger.Error).Infoln("error encoding packet:", err) log.Error(fmt.Sprint("error encoding packet:", err))
return nil, err return nil, err
} }
packet := b.Bytes() packet := b.Bytes()
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv) sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
if err != nil { if err != nil {
glog.V(logger.Error).Infoln("could not sign packet:", err) log.Error(fmt.Sprint("could not sign packet:", err))
return nil, err return nil, err
} }
copy(packet[macSize:], sig) copy(packet[macSize:], sig)
@ -504,11 +503,11 @@ func (t *udp) readLoop() {
nbytes, from, err := t.conn.ReadFromUDP(buf) nbytes, from, err := t.conn.ReadFromUDP(buf)
if netutil.IsTemporaryError(err) { if netutil.IsTemporaryError(err) {
// Ignore temporary read errors. // Ignore temporary read errors.
glog.V(logger.Debug).Infof("Temporary read error: %v", err) log.Debug(fmt.Sprintf("Temporary read error: %v", err))
continue continue
} else if err != nil { } else if err != nil {
// Shut down the loop for permament errors. // Shut down the loop for permament errors.
glog.V(logger.Debug).Infof("Read error: %v", err) log.Debug(fmt.Sprintf("Read error: %v", err))
return return
} }
t.handlePacket(from, buf[:nbytes]) t.handlePacket(from, buf[:nbytes])
@ -518,14 +517,14 @@ func (t *udp) readLoop() {
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error { func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
packet, fromID, hash, err := decodePacket(buf) packet, fromID, hash, err := decodePacket(buf)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("Bad packet from %v: %v", from, err) log.Debug(fmt.Sprintf("Bad packet from %v: %v", from, err))
return err return err
} }
status := "ok" status := "ok"
if err = packet.handle(t, from, fromID, hash); err != nil { if err = packet.handle(t, from, fromID, hash); err != nil {
status = err.Error() status = err.Error()
} }
glog.V(logger.Detail).Infof("<<< %v %T: %s", from, packet, status) log.Trace(fmt.Sprintf("<<< %v %T: %s", from, packet, status))
return err return err
} }

@ -23,13 +23,13 @@ import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"fmt"
"os" "os"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/errors"
@ -192,7 +192,7 @@ func (db *nodeDB) fetchRLP(key []byte, val interface{}) error {
} }
err = rlp.DecodeBytes(blob, val) err = rlp.DecodeBytes(blob, val)
if err != nil { if err != nil {
glog.V(logger.Warn).Infof("key %x (%T) %v", key, val, err) log.Warn(fmt.Sprintf("key %x (%T) %v", key, val, err))
} }
return err return err
} }
@ -244,7 +244,7 @@ func (db *nodeDB) expirer() {
select { select {
case <-tick: case <-tick:
if err := db.expireNodes(); err != nil { if err := db.expireNodes(); err != nil {
glog.V(logger.Error).Infof("Failed to expire nodedb items: %v", err) log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err))
} }
case <-db.quit: case <-db.quit:
@ -396,9 +396,7 @@ func nextNode(it iterator.Iterator) *Node {
} }
var n Node var n Node
if err := rlp.DecodeBytes(it.Value(), &n); err != nil { if err := rlp.DecodeBytes(it.Value(), &n); err != nil {
if glog.V(logger.Warn) { log.Warn(fmt.Sprintf("invalid node %x: %v", id, err))
glog.Errorf("invalid node %x: %v", id, err)
}
continue continue
} }
return &n return &n

@ -28,8 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -437,10 +436,10 @@ loop:
if err := net.handle(n, pkt.ev, &pkt); err != nil { if err := net.handle(n, pkt.ev, &pkt); err != nil {
status = err.Error() status = err.Error()
} }
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
glog.Infof("<<< (%d) %v from %x@%v: %v -> %v (%v)", return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)",
net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status) net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status)
} }})
// TODO: persist state if n.state goes >= known, delete if it goes <= known // TODO: persist state if n.state goes >= known, delete if it goes <= known
// State transition timeouts. // State transition timeouts.
@ -456,10 +455,10 @@ loop:
if err := net.handle(timeout.node, timeout.ev, nil); err != nil { if err := net.handle(timeout.node, timeout.ev, nil); err != nil {
status = err.Error() status = err.Error()
} }
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
glog.Infof("--- (%d) %v for %x@%v: %v -> %v (%v)", return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)",
net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status) net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status)
} }})
// Querying. // Querying.
case q := <-net.queryReq: case q := <-net.queryReq:
@ -655,7 +654,7 @@ loop:
} }
debugLog("loop stopped") debugLog("loop stopped")
glog.V(logger.Debug).Infof("shutting down") log.Debug(fmt.Sprintf("shutting down"))
if net.conn != nil { if net.conn != nil {
net.conn.Close() net.conn.Close()
} }
@ -685,20 +684,20 @@ func (net *Network) refresh(done chan<- struct{}) {
seeds = net.nursery seeds = net.nursery
} }
if len(seeds) == 0 { if len(seeds) == 0 {
glog.V(logger.Detail).Info("no seed nodes found") log.Trace(fmt.Sprint("no seed nodes found"))
close(done) close(done)
return return
} }
for _, n := range seeds { for _, n := range seeds {
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
var age string var age string
if net.db != nil { if net.db != nil {
age = time.Since(net.db.lastPong(n.ID)).String() age = time.Since(net.db.lastPong(n.ID)).String()
} else { } else {
age = "unknown" age = "unknown"
} }
glog.Infof("seed node (age %s): %v", age, n) return fmt.Sprintf("seed node (age %s): %v", age, n)
} }})
n = net.internNodeFromDB(n) n = net.internNodeFromDB(n)
if n.state == unknown { if n.state == unknown {
net.transition(n, verifyinit) net.transition(n, verifyinit)
@ -1254,7 +1253,7 @@ func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error {
for i, rn := range req.Nodes { for i, rn := range req.Nodes {
nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn) nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err) log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err))
continue continue
} }
nodes[i] = nn nodes[i] = nn

@ -26,8 +26,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
) )
const ( const (
@ -55,12 +54,12 @@ func checkClockDrift() {
howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings") howtofix := fmt.Sprintf("Please enable network time synchronisation in system settings")
separator := strings.Repeat("-", len(warning)) separator := strings.Repeat("-", len(warning))
glog.V(logger.Warn).Info(separator) log.Warn(fmt.Sprint(separator))
glog.V(logger.Warn).Info(warning) log.Warn(fmt.Sprint(warning))
glog.V(logger.Warn).Info(howtofix) log.Warn(fmt.Sprint(howtofix))
glog.V(logger.Warn).Info(separator) log.Warn(fmt.Sprint(separator))
} else { } else {
glog.V(logger.Debug).Infof("Sanity NTP check reported %v drift, all ok", drift) log.Debug(fmt.Sprintf("Sanity NTP check reported %v drift, all ok", drift))
} }
} }

@ -65,10 +65,6 @@ func TestSimTopics(t *testing.T) {
if runWithPlaygroundTime(t) { if runWithPlaygroundTime(t) {
return return
} }
// glog.SetV(6)
// glog.SetToStderr(true)
sim := newSimulation() sim := newSimulation()
bootnode := sim.launchNode(false) bootnode := sim.launchNode(false)
@ -158,10 +154,6 @@ func TestSimTopicHierarchy(t *testing.T) {
if runWithPlaygroundTime(t) { if runWithPlaygroundTime(t) {
return return
} }
// glog.SetV(6)
// glog.SetToStderr(true)
sim := newSimulation() sim := newSimulation()
bootnode := sim.launchNode(false) bootnode := sim.launchNode(false)

@ -26,8 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
@ -348,9 +347,9 @@ func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req inter
//fmt.Println(err) //fmt.Println(err)
return hash, err return hash, err
} }
glog.V(logger.Detail).Infof(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr) log.Trace(fmt.Sprintf(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr))
if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil { if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil {
glog.V(logger.Detail).Infoln("UDP send failed:", err) log.Trace(fmt.Sprint("UDP send failed:", err))
} }
//fmt.Println(err) //fmt.Println(err)
return hash, err return hash, err
@ -364,13 +363,13 @@ func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (p, hash
b.Write(headSpace) b.Write(headSpace)
b.WriteByte(ptype) b.WriteByte(ptype)
if err := rlp.Encode(b, req); err != nil { if err := rlp.Encode(b, req); err != nil {
glog.V(logger.Error).Infoln("error encoding packet:", err) log.Error(fmt.Sprint("error encoding packet:", err))
return nil, nil, err return nil, nil, err
} }
packet := b.Bytes() packet := b.Bytes()
sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv) sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv)
if err != nil { if err != nil {
glog.V(logger.Error).Infoln("could not sign packet:", err) log.Error(fmt.Sprint("could not sign packet:", err))
return nil, nil, err return nil, nil, err
} }
copy(packet[macSize:], sig) copy(packet[macSize:], sig)
@ -393,11 +392,11 @@ func (t *udp) readLoop() {
nbytes, from, err := t.conn.ReadFromUDP(buf) nbytes, from, err := t.conn.ReadFromUDP(buf)
if netutil.IsTemporaryError(err) { if netutil.IsTemporaryError(err) {
// Ignore temporary read errors. // Ignore temporary read errors.
glog.V(logger.Debug).Infof("Temporary read error: %v", err) log.Debug(fmt.Sprintf("Temporary read error: %v", err))
continue continue
} else if err != nil { } else if err != nil {
// Shut down the loop for permament errors. // Shut down the loop for permament errors.
glog.V(logger.Debug).Infof("Read error: %v", err) log.Debug(fmt.Sprintf("Read error: %v", err))
return return
} }
t.handlePacket(from, buf[:nbytes]) t.handlePacket(from, buf[:nbytes])
@ -407,7 +406,7 @@ func (t *udp) readLoop() {
func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error { func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error {
pkt := ingressPacket{remoteAddr: from} pkt := ingressPacket{remoteAddr: from}
if err := decodePacket(buf, &pkt); err != nil { if err := decodePacket(buf, &pkt); err != nil {
glog.V(logger.Debug).Infof("Bad packet from %v: %v", from, err) log.Debug(fmt.Sprintf("Bad packet from %v: %v", from, err))
//fmt.Println("bad packet", err) //fmt.Println("bad packet", err)
return err return err
} }

@ -25,8 +25,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/jackpal/go-nat-pmp" "github.com/jackpal/go-nat-pmp"
) )
@ -102,13 +101,13 @@ func Map(m Interface, c chan struct{}, protocol string, extport, intport int, na
refresh := time.NewTimer(mapUpdateInterval) refresh := time.NewTimer(mapUpdateInterval)
defer func() { defer func() {
refresh.Stop() refresh.Stop()
glog.V(logger.Debug).Infof("deleting port mapping: %s %d -> %d (%s) using %s", protocol, extport, intport, name, m) log.Debug(fmt.Sprintf("deleting port mapping: %s %d -> %d (%s) using %s", protocol, extport, intport, name, m))
m.DeleteMapping(protocol, extport, intport) m.DeleteMapping(protocol, extport, intport)
}() }()
if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil { if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil {
glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v", protocol, intport, err) log.Debug(fmt.Sprintf("network port %s:%d could not be mapped: %v", protocol, intport, err))
} else { } else {
glog.V(logger.Info).Infof("mapped network port %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m) log.Info(fmt.Sprintf("mapped network port %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m))
} }
for { for {
select { select {
@ -117,9 +116,9 @@ func Map(m Interface, c chan struct{}, protocol string, extport, intport int, na
return return
} }
case <-refresh.C: case <-refresh.C:
glog.V(logger.Detail).Infof("refresh port mapping %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m) log.Trace(fmt.Sprintf("refresh port mapping %s:%d -> %d (%s) using %s", protocol, extport, intport, name, m))
if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil { if err := m.AddMapping(protocol, extport, intport, name, mapTimeout); err != nil {
glog.V(logger.Debug).Infof("network port %s:%d could not be mapped: %v", protocol, intport, err) log.Debug(fmt.Sprintf("network port %s:%d could not be mapped: %v", protocol, intport, err))
} }
refresh.Reset(mapUpdateInterval) refresh.Reset(mapUpdateInterval)
} }

@ -25,8 +25,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -157,27 +156,27 @@ loop:
// A write finished. Allow the next write to start if // A write finished. Allow the next write to start if
// there was no error. // there was no error.
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("%v: write error: %v", p, err) log.Trace(fmt.Sprintf("%v: write error: %v", p, err))
reason = DiscNetworkError reason = DiscNetworkError
break loop break loop
} }
writeStart <- struct{}{} writeStart <- struct{}{}
case err := <-readErr: case err := <-readErr:
if r, ok := err.(DiscReason); ok { if r, ok := err.(DiscReason); ok {
glog.V(logger.Debug).Infof("%v: remote requested disconnect: %v", p, r) log.Debug(fmt.Sprintf("%v: remote requested disconnect: %v", p, r))
requested = true requested = true
reason = r reason = r
} else { } else {
glog.V(logger.Detail).Infof("%v: read error: %v", p, err) log.Trace(fmt.Sprintf("%v: read error: %v", p, err))
reason = DiscNetworkError reason = DiscNetworkError
} }
break loop break loop
case err := <-p.protoErr: case err := <-p.protoErr:
reason = discReasonForError(err) reason = discReasonForError(err)
glog.V(logger.Debug).Infof("%v: protocol error: %v (%v)", p, err, reason) log.Debug(fmt.Sprintf("%v: protocol error: %v (%v)", p, err, reason))
break loop break loop
case reason = <-p.disc: case reason = <-p.disc:
glog.V(logger.Debug).Infof("%v: locally requested disconnect: %v", p, reason) log.Debug(fmt.Sprintf("%v: locally requested disconnect: %v", p, reason))
break loop break loop
} }
} }
@ -298,14 +297,14 @@ func (p *Peer) startProtocols(writeStart <-chan struct{}, writeErr chan<- error)
proto.closed = p.closed proto.closed = p.closed
proto.wstart = writeStart proto.wstart = writeStart
proto.werr = writeErr proto.werr = writeErr
glog.V(logger.Detail).Infof("%v: Starting protocol %s/%d", p, proto.Name, proto.Version) log.Trace(fmt.Sprintf("%v: Starting protocol %s/%d", p, proto.Name, proto.Version))
go func() { go func() {
err := proto.Run(p, proto) err := proto.Run(p, proto)
if err == nil { if err == nil {
glog.V(logger.Detail).Infof("%v: Protocol %s/%d returned", p, proto.Name, proto.Version) log.Trace(fmt.Sprintf("%v: Protocol %s/%d returned", p, proto.Name, proto.Version))
err = errors.New("protocol returned") err = errors.New("protocol returned")
} else if err != io.EOF { } else if err != io.EOF {
glog.V(logger.Detail).Infof("%v: Protocol %s/%d error: %v", p, proto.Name, proto.Version, err) log.Trace(fmt.Sprintf("%v: Protocol %s/%d error: %v", p, proto.Name, proto.Version, err))
} }
p.protoErr <- err p.protoErr <- err
p.wg.Done() p.wg.Done()

@ -25,8 +25,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/nat"
@ -337,7 +336,7 @@ func (srv *Server) Start() (err error) {
return errors.New("server already running") return errors.New("server already running")
} }
srv.running = true srv.running = true
glog.V(logger.Info).Infoln("Starting Server") log.Info(fmt.Sprint("Starting Server"))
// static fields // static fields
if srv.PrivateKey == nil { if srv.PrivateKey == nil {
@ -399,7 +398,7 @@ func (srv *Server) Start() (err error) {
} }
} }
if srv.NoDial && srv.ListenAddr == "" { if srv.NoDial && srv.ListenAddr == "" {
glog.V(logger.Warn).Infoln("I will be kind-of useless, neither dialing nor listening.") log.Warn(fmt.Sprint("I will be kind-of useless, neither dialing nor listening."))
} }
srv.loopWG.Add(1) srv.loopWG.Add(1)
@ -467,7 +466,7 @@ func (srv *Server) run(dialstate dialer) {
i := 0 i := 0
for ; len(runningTasks) < maxActiveDialTasks && i < len(ts); i++ { for ; len(runningTasks) < maxActiveDialTasks && i < len(ts); i++ {
t := ts[i] t := ts[i]
glog.V(logger.Detail).Infoln("new task:", t) log.Trace(fmt.Sprint("new task:", t))
go func() { t.Do(srv); taskdone <- t }() go func() { t.Do(srv); taskdone <- t }()
runningTasks = append(runningTasks, t) runningTasks = append(runningTasks, t)
} }
@ -490,19 +489,19 @@ running:
select { select {
case <-srv.quit: case <-srv.quit:
// The server was stopped. Run the cleanup logic. // The server was stopped. Run the cleanup logic.
glog.V(logger.Detail).Infoln("<-quit: spinning down") log.Trace(fmt.Sprint("<-quit: spinning down"))
break running break running
case n := <-srv.addstatic: case n := <-srv.addstatic:
// This channel is used by AddPeer to add to the // This channel is used by AddPeer to add to the
// ephemeral static peer list. Add it to the dialer, // ephemeral static peer list. Add it to the dialer,
// it will keep the node connected. // it will keep the node connected.
glog.V(logger.Detail).Infoln("<-addstatic:", n) log.Trace(fmt.Sprint("<-addstatic:", n))
dialstate.addStatic(n) dialstate.addStatic(n)
case n := <-srv.removestatic: case n := <-srv.removestatic:
// This channel is used by RemovePeer to send a // This channel is used by RemovePeer to send a
// disconnect request to a peer and begin the // disconnect request to a peer and begin the
// stop keeping the node connected // stop keeping the node connected
glog.V(logger.Detail).Infoln("<-removestatic:", n) log.Trace(fmt.Sprint("<-removestatic:", n))
dialstate.removeStatic(n) dialstate.removeStatic(n)
if p, ok := peers[n.ID]; ok { if p, ok := peers[n.ID]; ok {
p.Disconnect(DiscRequested) p.Disconnect(DiscRequested)
@ -515,7 +514,7 @@ running:
// A task got done. Tell dialstate about it so it // A task got done. Tell dialstate about it so it
// can update its state and remove it from the active // can update its state and remove it from the active
// tasks list. // tasks list.
glog.V(logger.Detail).Infoln("<-taskdone:", t) log.Trace(fmt.Sprint("<-taskdone:", t))
dialstate.taskDone(t, time.Now()) dialstate.taskDone(t, time.Now())
delTask(t) delTask(t)
case c := <-srv.posthandshake: case c := <-srv.posthandshake:
@ -525,16 +524,16 @@ running:
// Ensure that the trusted flag is set before checking against MaxPeers. // Ensure that the trusted flag is set before checking against MaxPeers.
c.flags |= trustedConn c.flags |= trustedConn
} }
glog.V(logger.Detail).Infoln("<-posthandshake:", c) log.Trace(fmt.Sprint("<-posthandshake:", c))
// TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them. // TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them.
c.cont <- srv.encHandshakeChecks(peers, c) c.cont <- srv.encHandshakeChecks(peers, c)
case c := <-srv.addpeer: case c := <-srv.addpeer:
// At this point the connection is past the protocol handshake. // At this point the connection is past the protocol handshake.
// Its capabilities are known and the remote identity is verified. // Its capabilities are known and the remote identity is verified.
glog.V(logger.Detail).Infoln("<-addpeer:", c) log.Trace(fmt.Sprint("<-addpeer:", c))
err := srv.protoHandshakeChecks(peers, c) err := srv.protoHandshakeChecks(peers, c)
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("Not adding %v as peer: %v", c, err) log.Trace(fmt.Sprintf("Not adding %v as peer: %v", c, err))
} else { } else {
// The handshakes are done and it passed all checks. // The handshakes are done and it passed all checks.
p := newPeer(c, srv.Protocols) p := newPeer(c, srv.Protocols)
@ -547,7 +546,7 @@ running:
c.cont <- err c.cont <- err
case p := <-srv.delpeer: case p := <-srv.delpeer:
// A peer disconnected. // A peer disconnected.
glog.V(logger.Detail).Infoln("<-delpeer:", p) log.Trace(fmt.Sprint("<-delpeer:", p))
delete(peers, p.ID()) delete(peers, p.ID())
} }
} }
@ -566,10 +565,10 @@ running:
// Wait for peers to shut down. Pending connections and tasks are // Wait for peers to shut down. Pending connections and tasks are
// not handled here and will terminate soon-ish because srv.quit // not handled here and will terminate soon-ish because srv.quit
// is closed. // is closed.
glog.V(logger.Detail).Infof("ignoring %d pending tasks at spindown", len(runningTasks)) log.Trace(fmt.Sprintf("ignoring %d pending tasks at spindown", len(runningTasks)))
for len(peers) > 0 { for len(peers) > 0 {
p := <-srv.delpeer p := <-srv.delpeer
glog.V(logger.Detail).Infoln("<-delpeer (spindown):", p) log.Trace(fmt.Sprint("<-delpeer (spindown):", p))
delete(peers, p.ID()) delete(peers, p.ID())
} }
} }
@ -605,7 +604,7 @@ type tempError interface {
// inbound connections. // inbound connections.
func (srv *Server) listenLoop() { func (srv *Server) listenLoop() {
defer srv.loopWG.Done() defer srv.loopWG.Done()
glog.V(logger.Info).Infoln("Listening on", srv.listener.Addr()) log.Info(fmt.Sprint("Listening on", srv.listener.Addr()))
// This channel acts as a semaphore limiting // This channel acts as a semaphore limiting
// active inbound connections that are lingering pre-handshake. // active inbound connections that are lingering pre-handshake.
@ -630,10 +629,10 @@ func (srv *Server) listenLoop() {
for { for {
fd, err = srv.listener.Accept() fd, err = srv.listener.Accept()
if tempErr, ok := err.(tempError); ok && tempErr.Temporary() { if tempErr, ok := err.(tempError); ok && tempErr.Temporary() {
glog.V(logger.Debug).Infof("Temporary read error: %v", err) log.Debug(fmt.Sprintf("Temporary read error: %v", err))
continue continue
} else if err != nil { } else if err != nil {
glog.V(logger.Debug).Infof("Read error: %v", err) log.Debug(fmt.Sprintf("Read error: %v", err))
return return
} }
break break
@ -642,7 +641,7 @@ func (srv *Server) listenLoop() {
// Reject connections that do not match NetRestrict. // Reject connections that do not match NetRestrict.
if srv.NetRestrict != nil { if srv.NetRestrict != nil {
if tcp, ok := fd.RemoteAddr().(*net.TCPAddr); ok && !srv.NetRestrict.Contains(tcp.IP) { if tcp, ok := fd.RemoteAddr().(*net.TCPAddr); ok && !srv.NetRestrict.Contains(tcp.IP) {
glog.V(logger.Debug).Infof("Rejected conn %v because it is not whitelisted in NetRestrict", fd.RemoteAddr()) log.Debug(fmt.Sprintf("Rejected conn %v because it is not whitelisted in NetRestrict", fd.RemoteAddr()))
fd.Close() fd.Close()
slots <- struct{}{} slots <- struct{}{}
continue continue
@ -650,7 +649,7 @@ func (srv *Server) listenLoop() {
} }
fd = newMeteredConn(fd, true) fd = newMeteredConn(fd, true)
glog.V(logger.Debug).Infof("Accepted conn %v", fd.RemoteAddr()) log.Debug(fmt.Sprintf("Accepted conn %v", fd.RemoteAddr()))
// Spawn the handler. It will give the slot back when the connection // Spawn the handler. It will give the slot back when the connection
// has been established. // has been established.
@ -677,36 +676,36 @@ func (srv *Server) setupConn(fd net.Conn, flags connFlag, dialDest *discover.Nod
// Run the encryption handshake. // Run the encryption handshake.
var err error var err error
if c.id, err = c.doEncHandshake(srv.PrivateKey, dialDest); err != nil { if c.id, err = c.doEncHandshake(srv.PrivateKey, dialDest); err != nil {
glog.V(logger.Debug).Infof("%v faild enc handshake: %v", c, err) log.Debug(fmt.Sprintf("%v faild enc handshake: %v", c, err))
c.close(err) c.close(err)
return return
} }
// For dialed connections, check that the remote public key matches. // For dialed connections, check that the remote public key matches.
if dialDest != nil && c.id != dialDest.ID { if dialDest != nil && c.id != dialDest.ID {
c.close(DiscUnexpectedIdentity) c.close(DiscUnexpectedIdentity)
glog.V(logger.Debug).Infof("%v dialed identity mismatch, want %x", c, dialDest.ID[:8]) log.Debug(fmt.Sprintf("%v dialed identity mismatch, want %x", c, dialDest.ID[:8]))
return return
} }
if err := srv.checkpoint(c, srv.posthandshake); err != nil { if err := srv.checkpoint(c, srv.posthandshake); err != nil {
glog.V(logger.Debug).Infof("%v failed checkpoint posthandshake: %v", c, err) log.Debug(fmt.Sprintf("%v failed checkpoint posthandshake: %v", c, err))
c.close(err) c.close(err)
return return
} }
// Run the protocol handshake // Run the protocol handshake
phs, err := c.doProtoHandshake(srv.ourHandshake) phs, err := c.doProtoHandshake(srv.ourHandshake)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("%v failed proto handshake: %v", c, err) log.Debug(fmt.Sprintf("%v failed proto handshake: %v", c, err))
c.close(err) c.close(err)
return return
} }
if phs.ID != c.id { if phs.ID != c.id {
glog.V(logger.Debug).Infof("%v wrong proto handshake identity: %x", c, phs.ID[:8]) log.Debug(fmt.Sprintf("%v wrong proto handshake identity: %x", c, phs.ID[:8]))
c.close(DiscUnexpectedIdentity) c.close(DiscUnexpectedIdentity)
return return
} }
c.caps, c.name = phs.Caps, phs.Name c.caps, c.name = phs.Caps, phs.Name
if err := srv.checkpoint(c, srv.addpeer); err != nil { if err := srv.checkpoint(c, srv.addpeer); err != nil {
glog.V(logger.Debug).Infof("%v failed checkpoint addpeer: %v", c, err) log.Debug(fmt.Sprintf("%v failed checkpoint addpeer: %v", c, err))
c.close(err) c.close(err)
return return
} }
@ -734,7 +733,7 @@ func (srv *Server) checkpoint(c *conn, stage chan<- *conn) error {
// it waits until the Peer logic returns and removes // it waits until the Peer logic returns and removes
// the peer. // the peer.
func (srv *Server) runPeer(p *Peer) { func (srv *Server) runPeer(p *Peer) {
glog.V(logger.Debug).Infof("Added %v\n", p) log.Debug(fmt.Sprintf("Added %v", p))
if srv.newPeerHook != nil { if srv.newPeerHook != nil {
srv.newPeerHook(p) srv.newPeerHook(p)
@ -744,7 +743,7 @@ func (srv *Server) runPeer(p *Peer) {
// before returning, so this send should not select on srv.quit. // before returning, so this send should not select on srv.quit.
srv.delpeer <- p srv.delpeer <- p
glog.V(logger.Debug).Infof("Removed %v (%v)\n", p, discreason) log.Debug(fmt.Sprintf("Removed %v (%v)", p, discreason))
} }
// NodeInfo represents a short summary of the information known about the host. // NodeInfo represents a short summary of the information known about the host.

@ -31,8 +31,7 @@ import (
) )
func init() { func init() {
// glog.SetV(6) // log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StreamHandler(os.Stderr, log.TerminalFormat())))
// glog.SetToStderr(true)
} }
type testTransport struct { type testTransport struct {

@ -30,8 +30,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/logger/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -408,9 +407,9 @@ func (c *Client) newMessage(method string, paramsIn ...interface{}) (*jsonrpcMes
func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error { func (c *Client) send(ctx context.Context, op *requestOp, msg interface{}) error {
select { select {
case c.requestOp <- op: case c.requestOp <- op:
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
glog.Info("sending ", msg) return fmt.Sprint("sending ", msg)
} }})
err := c.write(ctx, msg) err := c.write(ctx, msg)
c.sendDone <- err c.sendDone <- err
return err return err
@ -445,7 +444,7 @@ func (c *Client) write(ctx context.Context, msg interface{}) error {
func (c *Client) reconnect(ctx context.Context) error { func (c *Client) reconnect(ctx context.Context) error {
newconn, err := c.connectFunc(ctx) newconn, err := c.connectFunc(ctx)
if err != nil { if err != nil {
glog.V(logger.Detail).Infof("reconnect failed: %v", err) log.Trace(fmt.Sprintf("reconnect failed: %v", err))
return err return err
} }
select { select {
@ -496,31 +495,31 @@ func (c *Client) dispatch(conn net.Conn) {
for _, msg := range batch { for _, msg := range batch {
switch { switch {
case msg.isNotification(): case msg.isNotification():
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
glog.Info("<-readResp: notification ", msg) return fmt.Sprint("<-readResp: notification ", msg)
} }})
c.handleNotification(msg) c.handleNotification(msg)
case msg.isResponse(): case msg.isResponse():
if glog.V(logger.Detail) { log.Trace("", "msg", log.Lazy{Fn: func() string {
glog.Info("<-readResp: response ", msg) return fmt.Sprint("<-readResp: response ", msg)
} }})
c.handleResponse(msg) c.handleResponse(msg)
default: default:
if glog.V(logger.Debug) { log.Debug("", "msg", log.Lazy{Fn: func() string {
glog.Error("<-readResp: dropping weird message", msg) return fmt.Sprint("<-readResp: dropping weird message", msg)
} }})
// TODO: maybe close // TODO: maybe close
} }
} }
case err := <-c.readErr: case err := <-c.readErr:
glog.V(logger.Debug).Infof("<-readErr: %v", err) log.Debug(fmt.Sprintf("<-readErr: %v", err))
c.closeRequestOps(err) c.closeRequestOps(err)
conn.Close() conn.Close()
reading = false reading = false
case newconn := <-c.reconnected: case newconn := <-c.reconnected:
glog.V(logger.Debug).Infof("<-reconnected: (reading=%t) %v", reading, conn.RemoteAddr()) log.Debug(fmt.Sprintf("<-reconnected: (reading=%t) %v", reading, conn.RemoteAddr()))
if reading { if reading {
// Wait for the previous read loop to exit. This is a rare case. // Wait for the previous read loop to exit. This is a rare case.
conn.Close() conn.Close()
@ -577,7 +576,7 @@ func (c *Client) closeRequestOps(err error) {
func (c *Client) handleNotification(msg *jsonrpcMessage) { func (c *Client) handleNotification(msg *jsonrpcMessage) {
if msg.Method != notificationMethod { if msg.Method != notificationMethod {
glog.V(logger.Debug).Info("dropping non-subscription message: ", msg) log.Debug(fmt.Sprint("dropping non-subscription message: ", msg))
return return
} }
var subResult struct { var subResult struct {
@ -585,7 +584,7 @@ func (c *Client) handleNotification(msg *jsonrpcMessage) {
Result json.RawMessage `json:"result"` Result json.RawMessage `json:"result"`
} }
if err := json.Unmarshal(msg.Params, &subResult); err != nil { if err := json.Unmarshal(msg.Params, &subResult); err != nil {
glog.V(logger.Debug).Info("dropping invalid subscription message: ", msg) log.Debug(fmt.Sprint("dropping invalid subscription message: ", msg))
return return
} }
if c.subs[subResult.ID] != nil { if c.subs[subResult.ID] != nil {
@ -596,7 +595,7 @@ func (c *Client) handleNotification(msg *jsonrpcMessage) {
func (c *Client) handleResponse(msg *jsonrpcMessage) { func (c *Client) handleResponse(msg *jsonrpcMessage) {
op := c.respWait[string(msg.ID)] op := c.respWait[string(msg.ID)]
if op == nil { if op == nil {
glog.V(logger.Debug).Infof("unsolicited response %v", msg) log.Debug(fmt.Sprintf("unsolicited response %v", msg))
return return
} }
delete(c.respWait, string(msg.ID)) delete(c.respWait, string(msg.ID))

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save