build: upgrade to go 1.19 (#25726)

This changes the CI / release builds to use the latest Go version. It also
upgrades golangci-lint to a newer version compatible with Go 1.19.

In Go 1.19, godoc has gained official support for links and lists. The
syntax for code blocks in doc comments has changed and now requires a
leading tab character. gofmt adapts comments to the new syntax
automatically, so there are a lot of comment re-formatting changes in this
PR. We need to apply the new format in order to pass the CI lint stage with
Go 1.19.

With the linter upgrade, I have decided to disable 'gosec' - it produces
too many false-positive warnings. The 'deadcode' and 'varcheck' linters
have also been removed because golangci-lint warns about them being
unmaintained. 'unused' provides similar coverage and we already have it
enabled, so we don't lose much with this change.
pull/25733/head
Felix Lange 2 years ago committed by GitHub
parent 389021a5af
commit b628d72766
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 17
      .golangci.yml
  2. 20
      .travis.yml
  3. 6
      accounts/abi/abi_test.go
  4. 5
      accounts/abi/bind/backends/simulated_test.go
  5. 21
      accounts/abi/reflect.go
  6. 5
      accounts/abi/utils.go
  7. 2
      accounts/accounts.go
  8. 3
      accounts/scwallet/wallet.go
  9. 1
      accounts/url.go
  10. 2
      accounts/usbwallet/ledger.go
  11. 6
      accounts/usbwallet/trezor.go
  12. 70
      build/checksums.txt
  13. 5
      build/ci.go
  14. 1
      cmd/evm/internal/t8ntool/transition.go
  15. 1
      cmd/p2psim/main.go
  16. 2
      common/hexutil/hexutil.go
  17. 1
      common/prque/lazyqueue.go
  18. 1
      consensus/beacon/consensus.go
  19. 1
      consensus/ethash/api.go
  20. 1
      consensus/ethash/sealer.go
  21. 7
      consensus/misc/dao.go
  22. 2
      core/beacon/types.go
  23. 5
      core/blockchain_test.go
  24. 2
      core/mkalloc.go
  25. 2
      core/state/snapshot/generate_test.go
  26. 48
      core/state_transition.go
  27. 53
      core/vm/gas_table.go
  28. 26
      core/vm/instructions.go
  29. 1
      crypto/secp256k1/curve.go
  30. 1
      crypto/secp256k1/libsecp256k1/contrib/dummy.go
  31. 1
      crypto/secp256k1/libsecp256k1/dummy.go
  32. 1
      crypto/secp256k1/libsecp256k1/include/dummy.go
  33. 1
      crypto/secp256k1/libsecp256k1/src/dummy.go
  34. 1
      crypto/secp256k1/libsecp256k1/src/modules/dummy.go
  35. 1
      crypto/secp256k1/libsecp256k1/src/modules/ecdh/dummy.go
  36. 1
      crypto/secp256k1/libsecp256k1/src/modules/recovery/dummy.go
  37. 22
      eth/catalyst/api.go
  38. 2
      eth/catalyst/api_test.go
  39. 2
      eth/downloader/downloader.go
  40. 1
      eth/downloader/queue.go
  41. 1
      eth/downloader/resultstore.go
  42. 1
      eth/gasprice/feehistory.go
  43. 4
      eth/protocols/snap/sync_test.go
  44. 1
      eth/tracers/native/4byte.go
  45. 32
      eth/tracers/native/tracer.go
  46. 1
      ethdb/leveldb/leveldb.go
  47. 16
      ethstats/ethstats.go
  48. 6
      internal/ethapi/api.go
  49. 2
      les/api.go
  50. 22
      les/catalyst/api.go
  51. 2
      les/downloader/downloader.go
  52. 1
      les/downloader/queue.go
  53. 1
      les/downloader/resultstore.go
  54. 2
      les/fetcher.go
  55. 17
      light/txpool.go
  56. 36
      log/doc.go
  57. 2
      log/format.go
  58. 4
      log/handler.go
  59. 8
      metrics/influxdb/influxdbv2.go
  60. 1
      mobile/big.go
  61. 2
      mobile/doc.go
  62. 14
      node/doc.go
  63. 1
      p2p/dial.go
  64. 1
      p2p/discover/v5wire/encoding_test.go
  65. 10
      p2p/dnsdisc/tree.go
  66. 2
      p2p/enr/enr.go
  67. 1
      p2p/message.go
  68. 7
      p2p/simulations/adapters/types.go
  69. 1
      params/denomination.go
  70. 9
      rlp/doc.go
  71. 7
      rpc/doc.go
  72. 2
      signer/core/apitypes/types.go
  73. 3
      tests/block_test_util.go
  74. 10
      tests/fuzzers/bls12381/precompile_fuzzer.go
  75. 8
      tests/fuzzers/difficulty/difficulty-fuzz.go
  76. 9
      tests/fuzzers/rangeproof/rangeproof-fuzzer.go
  77. 8
      tests/fuzzers/stacktrie/trie_fuzzer.go
  78. 8
      tests/fuzzers/trie/trie-fuzzer.go
  79. 1
      tests/init_test.go
  80. 1
      trie/stacktrie.go

@ -12,7 +12,6 @@ run:
linters:
disable-all: true
enable:
- deadcode
- goconst
- goimports
- gosimple
@ -20,14 +19,12 @@ linters:
- ineffassign
- misspell
- unconvert
- varcheck
- typecheck
- unused
- staticcheck
- bidichk
- durationcheck
- exportloopref
- gosec
- whitespace
# - structcheck # lots of false positives
@ -45,11 +42,6 @@ linters-settings:
goconst:
min-len: 3 # minimum length of string constant
min-occurrences: 6 # minimum number of occurrences
gosec:
excludes:
- G404 # Use of weak random number generator - lots of FP
- G107 # Potential http request -- those are intentional
- G306 # G306: Expect WriteFile permissions to be 0600 or less
issues:
exclude-rules:
@ -58,16 +50,15 @@ issues:
- deadcode
- staticcheck
- path: internal/build/pgp.go
text: 'SA1019: package golang.org/x/crypto/openpgp is deprecated'
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
- path: core/vm/contracts.go
text: 'SA1019: package golang.org/x/crypto/ripemd160 is deprecated'
text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
- path: accounts/usbwallet/trezor.go
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
- path: accounts/usbwallet/trezor/
text: 'SA1019: package github.com/golang/protobuf/proto is deprecated'
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
exclude:
- 'SA1019: event.TypeMux is deprecated: use Feed'
- 'SA1019: strings.Title is deprecated'
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
- 'SA1029: should not use built-in type string as key for value'
- 'G306: Expect WriteFile permissions to be 0600 or less'

@ -16,7 +16,7 @@ jobs:
- stage: lint
os: linux
dist: bionic
go: 1.18.x
go: 1.19.x
env:
- lint
git:
@ -31,7 +31,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
go: 1.18.x
go: 1.19.x
env:
- docker
services:
@ -48,7 +48,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
go: 1.18.x
go: 1.19.x
env:
- docker
services:
@ -65,7 +65,7 @@ jobs:
if: type = push
os: linux
dist: bionic
go: 1.18.x
go: 1.19.x
env:
- ubuntu-ppa
- GO111MODULE=on
@ -90,7 +90,7 @@ jobs:
os: linux
dist: bionic
sudo: required
go: 1.18.x
go: 1.19.x
env:
- azure-linux
- GO111MODULE=on
@ -162,7 +162,7 @@ jobs:
- stage: build
if: type = push
os: osx
go: 1.18.x
go: 1.19.x
env:
- azure-osx
- azure-ios
@ -194,7 +194,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
go: 1.18.x
go: 1.19.x
env:
- GO111MODULE=on
script:
@ -214,7 +214,7 @@ jobs:
- stage: build
os: linux
dist: bionic
go: 1.17.x
go: 1.18.x
env:
- GO111MODULE=on
script:
@ -225,7 +225,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
go: 1.18.x
go: 1.19.x
env:
- azure-purge
- GO111MODULE=on
@ -239,7 +239,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
go: 1.18.x
go: 1.19.x
env:
- GO111MODULE=on
script:

@ -165,6 +165,7 @@ func TestInvalidABI(t *testing.T) {
// TestConstructor tests a constructor function.
// The test is based on the following contract:
//
// contract TestConstructor {
// constructor(uint256 a, uint256 b) public{}
// }
@ -724,6 +725,7 @@ func TestBareEvents(t *testing.T) {
}
// TestUnpackEvent is based on this contract:
//
// contract T {
// event received(address sender, uint amount, bytes memo);
// event receivedAddr(address sender);
@ -732,7 +734,9 @@ func TestBareEvents(t *testing.T) {
// receivedAddr(msg.sender);
// }
// }
//
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
//
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
func TestUnpackEvent(t *testing.T) {
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
@ -1078,6 +1082,7 @@ func TestDoubleDuplicateMethodNames(t *testing.T) {
// TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name
// conflict and that the second send event will be renamed send1.
// The test runs the abi of the following contract.
//
// contract DuplicateEvent {
// event send(uint256 a);
// event send0();
@ -1106,6 +1111,7 @@ func TestDoubleDuplicateEventNames(t *testing.T) {
// TestUnnamedEventParam checks that an event with unnamed parameters is
// correctly handled.
// The test runs the abi of the following contract.
//
// contract TestEvent {
// event send(uint256, uint256);
// }

@ -94,6 +94,7 @@ func TestSimulatedBackend(t *testing.T) {
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
// the following is based on this contract:
//
// contract T {
// event received(address sender, uint amount, bytes memo);
// event receivedAddr(address sender);
@ -422,7 +423,8 @@ func TestEstimateGas(t *testing.T) {
function OOG() public { for (uint i = 0; ; i++) {}}
function Assert() public { assert(false);}
function Valid() public {}
}*/
}
*/
const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033"
@ -994,6 +996,7 @@ func TestCodeAt(t *testing.T) {
}
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
//
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
func TestPendingAndCallContract(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)

@ -25,15 +25,18 @@ import (
)
// ConvertType converts an interface of a runtime type into a interface of the
// given type
// e.g. turn
// given type, e.g. turn this code:
//
// var fields []reflect.StructField
//
// fields = append(fields, reflect.StructField{
// Name: "X",
// Type: reflect.TypeOf(new(big.Int)),
// Tag: reflect.StructTag("json:\"" + "x" + "\""),
// }
// into
//
// into:
//
// type TupleT struct { X *big.Int }
func ConvertType(in interface{}, proto interface{}) interface{} {
protoType := reflect.TypeOf(proto)
@ -170,11 +173,13 @@ func setStruct(dst, src reflect.Value) error {
}
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
// first round: for each Exportable field that contains a `abi:""` tag
// and this field name exists in the given argument name list, pair them together.
// second round: for each argument name that has not been already linked,
// find what variable is expected to be mapped into, if it exists and has not been
// used, pair them.
//
// first round: for each Exportable field that contains a `abi:""` tag and this field name
// exists in the given argument name list, pair them together.
//
// second round: for each argument name that has not been already linked, find what
// variable is expected to be mapped into, if it exists and has not been used, pair them.
//
// Note this function assumes the given value is a struct value.
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type()

@ -27,9 +27,8 @@ import "fmt"
// and struct definition) name will be converted to camelcase style which
// may eventually lead to name conflicts.
//
// Name conflicts are mostly resolved by adding number suffix.
// e.g. if the abi contains Methods send, send1
// ResolveNameConflict would return send2 for input send.
// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains
// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send".
func ResolveNameConflict(rawName string, used func(string) bool) string {
name := rawName
ok := used(name)

@ -177,6 +177,7 @@ type Backend interface {
// safely used to calculate a signature from.
//
// The hash is calculated as
//
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.
@ -189,6 +190,7 @@ func TextHash(data []byte) []byte {
// safely used to calculate a signature from.
//
// The hash is calculated as
//
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.

@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
}
// derivationPath fetches the wallet's current derivation path from the card.
//
//lint:ignore U1000 needs to be added to the console interface
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
@ -994,6 +995,7 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
}
// keyExport contains information on an exported keypair.
//
//lint:ignore U1000 needs to be added to the console interface
type keyExport struct {
PublicKey []byte `asn1:"tag:0"`
@ -1001,6 +1003,7 @@ type keyExport struct {
}
// publicKey returns the public key for the current derivation path.
//
//lint:ignore U1000 needs to be added to the console interface
func (s *Session) publicKey() ([]byte, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)

@ -95,7 +95,6 @@ func (u *URL) UnmarshalJSON(input []byte) error {
// -1 if x < y
// 0 if x == y
// +1 if x > y
//
func (u URL) Cmp(url URL) int {
if u.Scheme == url.Scheme {
return strings.Compare(u.Path, url.Path)

@ -407,8 +407,6 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction
// domain hash | 32 bytes
// message hash | 32 bytes
//
//
//
// And the output data is:
//
// Description | Length

@ -84,14 +84,14 @@ func (w *trezorDriver) Status() (string, error) {
// Open implements usbwallet.driver, attempting to initialize the connection to
// the Trezor hardware wallet. Initializing the Trezor is a two or three phase operation:
// * The first phase is to initialize the connection and read the wallet's
// - The first phase is to initialize the connection and read the wallet's
// features. This phase is invoked if the provided passphrase is empty. The
// device will display the pinpad as a result and will return an appropriate
// error to notify the user that a second open phase is needed.
// * The second phase is to unlock access to the Trezor, which is done by the
// - The second phase is to unlock access to the Trezor, which is done by the
// user actually providing a passphrase mapping a keyboard keypad to the pin
// number of the user (shuffled according to the pinpad displayed).
// * If needed the device will ask for passphrase which will require calling
// - If needed the device will ask for passphrase which will require calling
// open again with the actual passphrase (3rd phase)
func (w *trezorDriver) Open(device io.ReadWriter, passphrase string) error {
w.device, w.failure = device, nil

@ -1,38 +1,38 @@
# This file contains sha256 checksums of optional build dependencies.
9920d3306a1ac536cdd2c796d6cb3c54bc559c226fc3cc39c32f1e0bd7f50d2a go1.18.5.src.tar.gz
828eeca8b5abea3e56921df8fa4b1101380a5ebcfee10acbc8ffe7ec0bf5876b go1.18.5.darwin-amd64.tar.gz
923a377c6fc9a2c789f5db61c24b8f64133f7889056897449891f256af34065f go1.18.5.darwin-arm64.tar.gz
c3d90264a706e2d88cfb44126dc6f0d008a48f00732e04bc377cea1a2b716a7c go1.18.5.freebsd-386.tar.gz
0de23843c568d388bc0f0e390a8966938cccaae0d74b698325f7175bac04e0c6 go1.18.5.freebsd-amd64.tar.gz
0c44f85d146c6f98c34e8ff436a42af22e90e36fe232d3d9d3101f23fd61362b go1.18.5.linux-386.tar.gz
9e5de37f9c49942c601b191ac5fba404b868bfc21d446d6960acc12283d6e5f2 go1.18.5.linux-amd64.tar.gz
006f6622718212363fa1ff004a6ab4d87bbbe772ec5631bab7cac10be346e4f1 go1.18.5.linux-arm64.tar.gz
d5ac34ac5f060a5274319aa04b7b11e41b123bd7887d64efb5f44ead236957af go1.18.5.linux-armv6l.tar.gz
2e37fb9c7cbaedd4e729492d658aa4cde821fc94117391a8105c13b25ca1c84b go1.18.5.linux-ppc64le.tar.gz
e3d536e7873639f85353e892444f83b14cb6670603961f215986ae8e28e8e07a go1.18.5.linux-s390x.tar.gz
7b3142ec0c5db991e7f73a231662a92429b90ee151fe47557acb566d8d9ae4d3 go1.18.5.windows-386.zip
73753620602d4b4469770040c53db55e5dd6af2ad07ecc18f71f164c3224eaad go1.18.5.windows-amd64.zip
4d154626affff12ef73ea1017af0e5b52dbc839ef92f6f9e76cf4f71278a5744 go1.18.5.windows-arm64.zip
27871baa490f3401414ad793fba49086f6c855b1c584385ed7771e1204c7e179 go1.19.1.src.tar.gz
b2828a2b05f0d2169afc74c11ed010775bf7cf0061822b275697b2f470495fb7 go1.19.1.darwin-amd64.tar.gz
e46aecce83a9289be16ce4ba9b8478a5b89b8aa0230171d5c6adbc0c66640548 go1.19.1.darwin-arm64.tar.gz
cfaca8c1d5784d2bc21e12d8893cfd2dc885a60db4c1a9a95e4ffc694d0925ce go1.19.1.freebsd-386.tar.gz
db5b8f232e12c655cc6cde6af1adf4d27d842541807802d747c86161e89efa0a go1.19.1.freebsd-amd64.tar.gz
9acc57342400c5b0c2da07b5b01b50da239dd4a7fad41a1fb56af8363ef4133f go1.19.1.linux-386.tar.gz
acc512fbab4f716a8f97a8b3fbaa9ddd39606a28be6c2515ef7c6c6311acffde go1.19.1.linux-amd64.tar.gz
49960821948b9c6b14041430890eccee58c76b52e2dbaafce971c3c38d43df9f go1.19.1.linux-arm64.tar.gz
efe93f5671621ee84ce5e262e1e21acbc72acefbaba360f21778abd083d4ad16 go1.19.1.linux-armv6l.tar.gz
4137984aa353de9c5ec1bd8fb3cd00a0624b75eafa3d4ec13d2f3f48261dba2e go1.19.1.linux-ppc64le.tar.gz
ca1005cc80a3dd726536b4c6ea5fef0318939351ff273eff420bd66a377c74eb go1.19.1.linux-s390x.tar.gz
bc7043e7a9a8d34aacd06f8c2f70e166d1d148f6800814cff790c45b9ab31cee go1.19.1.windows-386.zip
b33584c1d93b0e9c783de876b7aa99d3018bdeccd396aeb6d516a74e9d88d55f go1.19.1.windows-amd64.zip
d8cf3f04762fa7d5d9c82dfa15b5adaae2404463af3bc8dcd7f89837512501fe go1.19.1.windows-arm64.zip
658078aaaf7608693f37c4cf1380b2af418ab8b2d23fdb33e7e2d4339328590e golangci-lint-1.46.2-darwin-amd64.tar.gz
81f9b4afd62ec5e612ef8bc3b1d612a88b56ff289874831845cdad394427385f golangci-lint-1.46.2-darwin-arm64.tar.gz
943486e703e62ec55ecd90caeb22bcd39f8cc3962a93eec18c06b7bae12cb46f golangci-lint-1.46.2-freebsd-386.tar.gz
a75dd9ba7e08e8315c411697171db5375c0f6a1ece9e6fbeb9e9a4386822e17d golangci-lint-1.46.2-freebsd-amd64.tar.gz
83eedca1af72e8be055a1235177eb1b33524fbf08bec5730df2e6c3efade2b23 golangci-lint-1.46.2-freebsd-armv6.tar.gz
513d276c490de6f82baa01f9346d8d78b385f2ae97608f42f05d1f0f1314cd54 golangci-lint-1.46.2-freebsd-armv7.tar.gz
461a60016d516c69d406dc3e2d4957b722dbe684b7085dfac4802d0f84409e27 golangci-lint-1.46.2-linux-386.tar.gz
242cd4f2d6ac0556e315192e8555784d13da5d1874e51304711570769c4f2b9b golangci-lint-1.46.2-linux-amd64.tar.gz
ff5448ada2b3982581984d64b0dec614dba0a3ea4cab2d6a343c77927fc89f7e golangci-lint-1.46.2-linux-arm64.tar.gz
177f5210ef04aee282bfbc6ec519d36af5fb7d2b2c8d3f4ea5e59fdba71b0a27 golangci-lint-1.46.2-linux-armv6.tar.gz
10dd512a36ee978a1009edbca3ba3af410f0fda8df4d85f0e4793a24213870cc golangci-lint-1.46.2-linux-armv7.tar.gz
67779fa517c688c9db1090c3c456117d95c6b92979c623fe8cce8fb84251f21e golangci-lint-1.46.2-linux-mips64.tar.gz
c085f0f57bdccbb2c902a41b72ce210a3dfff16ca856789374745ab52004b6ee golangci-lint-1.46.2-linux-mips64le.tar.gz
abecef6421499248e58ed75d2938bc12b4b1f98b057f25060680b77bb51a881e golangci-lint-1.46.2-linux-ppc64le.tar.gz
134843a8f5c5c182c11979ea75f5866945d54757b2a04f3e5e04a0cf4fbf3a39 golangci-lint-1.46.2-linux-riscv64.tar.gz
9fe21a9476567aafe7a2e1a926b9641a39f920d4c0ea8eda9d968bc6136337f9 golangci-lint-1.46.2-linux-s390x.tar.gz
b48a421ec12a43f8fc8f977b9cf7d4a1ea1c4b97f803a238de7d3ce4ab23a84b golangci-lint-1.46.2-windows-386.zip
604acc1378a566abb0eac799362f3a37b7fcb5fa2268aeb2d5d954c829367301 golangci-lint-1.46.2-windows-amd64.zip
927def10db073da9687594072e6a3d9c891f67fa897105a2cfd715e018e7386c golangci-lint-1.46.2-windows-arm64.zip
729b76ed1d8b4e2612e38772b211503cb940e00a137bbaace1aa066f7c943737 golangci-lint-1.46.2-windows-armv6.zip
ea27c86d91e0b245ecbcfbf6cdb4ac0522d4bc6dca56bba02ea1bc77ad2917ac golangci-lint-1.46.2-windows-armv7.zip
20cd1215e0420db8cfa94a6cd3c9d325f7b39c07f2415a02d111568d8bc9e271 golangci-lint-1.49.0-darwin-amd64.tar.gz
cabb1a4c35fe1dadbe5a81550a00871281a331e7660cd85ae16e936a7f0f6cfc golangci-lint-1.49.0-darwin-arm64.tar.gz
f834c3b09580cf763b5d30b0c33c83cb13d7a822b5ed5d724143f121ffe28c97 golangci-lint-1.49.0-freebsd-386.tar.gz
4ca91c9f3aa79a71da441b7220a3e799365ff7a24caf9f04fcda12066c5ab0f7 golangci-lint-1.49.0-freebsd-amd64.tar.gz
37de789245248eea375d05080e11b4662a08762c353752575167611e65658454 golangci-lint-1.49.0-freebsd-armv6.tar.gz
3abed2bd3a8134b501fdc9cc9a0e60d616c86389e4fcdd1f79ceae7458974378 golangci-lint-1.49.0-freebsd-armv7.tar.gz
ef2860d90d83aee6713f697f23372cd93ac41a16439fdcb3c4ac86ba0f306860 golangci-lint-1.49.0-linux-386.tar.gz
5badc6e9fee2003621efa07e385910d9a88c89b38f6c35aded153193c5125178 golangci-lint-1.49.0-linux-amd64.tar.gz
b57ed03d29b8ca69be9925edd67ea305b6013cd5c97507d205fbe2979f71f2b5 golangci-lint-1.49.0-linux-arm64.tar.gz
4a41cff3af7f5304751f7bbf4ea617c14ebc1f88481a28a013e61b06d1f7102c golangci-lint-1.49.0-linux-armv6.tar.gz
14a9683af483ee7052dd0ce7d6140e0b502d6001bea3de606b8e7cce2c673539 golangci-lint-1.49.0-linux-armv7.tar.gz
33edf757bc2611204fdb40b212900866a57ded4eea62c1b19c10bfc375359afa golangci-lint-1.49.0-linux-mips64.tar.gz
280f7902f90d162566f1691a300663dd8db6e225e65384fe66b6fb2362e0b314 golangci-lint-1.49.0-linux-mips64le.tar.gz
103bcb7ce6c668e0a7e95e5c5355892d74f5d15391443430472e66d652906a15 golangci-lint-1.49.0-linux-ppc64le.tar.gz
4636ff9b01ddb18a2c1a953fc134207711b0a5d874d04ac66f915e9cfff0e8e0 golangci-lint-1.49.0-linux-riscv64.tar.gz
029e0844931a2d3edc771d67e17fe17928f04f80c1a9aa165160a543e8a7e8d4 golangci-lint-1.49.0-linux-s390x.tar.gz
e9cb6f691e62a4d8b28dd52d2eab57cca72acfd5083b3c5417a72d2eb64def09 golangci-lint-1.49.0-windows-386.zip
d058dfb0c7fbd73be70f285d3f8d4d424192fe9b19760ddbb0b2c4b743b8656c golangci-lint-1.49.0-windows-amd64.zip
c049d80297228db7065eabeac5114f77f04415dcd9b944e8d7c6426d9dd6e9dd golangci-lint-1.49.0-windows-arm64.zip
ec9164bab7134ddb94f51c17fd37c109b0801ecd5494b6c0e27ca7898fbd7469 golangci-lint-1.49.0-windows-armv6.zip
68fd9e880d98073f436c58b6f6d2c141881ef49b06ca31137bc19da4e4e3b996 golangci-lint-1.49.0-windows-armv7.zip

@ -36,7 +36,6 @@ Available commands are:
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
For all commands, -n prevents execution of external programs (dry run mode).
*/
package main
@ -149,7 +148,7 @@ var (
// This is the version of go that will be downloaded by
//
// go run ci.go install -dlgo
dlgoVersion = "1.18.5"
dlgoVersion = "1.19.1"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -347,7 +346,7 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string {
const version = "1.46.2"
const version = "1.49.0"
csdb := build.MustLoadChecksums("build/checksums.txt")
arch := runtime.GOARCH

@ -336,6 +336,7 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error {
// The transactions can have two forms, either
// 1. unsigned or
// 2. signed
//
// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set.
// If so, we sign it here and now, with the given `secretKey`
// If the condition above is not met, then it's considered a signed transaction.

@ -33,7 +33,6 @@
//
// $ p2psim node connect node01 node02
// Connected node01 to node02
//
package main
import (

@ -18,7 +18,7 @@
Package hexutil implements hex encoding with 0x prefix.
This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads.
Encoding Rules
# Encoding Rules
All hex data must have prefix "0x".

@ -29,6 +29,7 @@ import (
// - priority evaluates the actual priority of an item
// - maxPriority gives an upper estimate for the priority in any moment between
// now and the given absolute time
//
// If the upper estimate is exceeded then Update should be called for that item.
// A global Refresh function should also be called periodically.
type LazyQueue struct {

@ -228,6 +228,7 @@ func (beacon *Beacon) VerifyUncles(chain consensus.ChainReader, block *types.Blo
// - nonce is expected to be 0
// - unclehash is expected to be Hash(emptyHeader)
// to be the desired constants
//
// (b) we don't verify if a block is in the future anymore
// (c) the extradata is limited to 32 bytes
func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header) error {

@ -34,6 +34,7 @@ type API struct {
// GetWork returns a work package for external miner.
//
// The work package consists of 3 strings:
//
// result[0] - 32 bytes hex encoded current block header pow-hash
// result[1] - 32 bytes hex encoded seed hash used for DAG
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty

@ -339,6 +339,7 @@ func (s *remoteSealer) loop() {
// makeWork creates a work package for external miner.
//
// The work package consists of 3 strings:
//
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty

@ -40,9 +40,10 @@ var (
// ensure it conforms to DAO hard-fork rules.
//
// DAO hard-fork extension to the header validity:
// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range
// with the fork specific extra-data set
// b) if the node is pro-fork, require blocks in the specific range to have the
//
// - if the node is no-fork, do not accept blocks in the [fork, fork+10) range
// with the fork specific extra-data set.
// - if the node is pro-fork, require blocks in the specific range to have the
// unique extra-data set.
func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
// Short circuit validation if the node doesn't care about the DAO fork

@ -136,9 +136,11 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// ExecutableDataToBlock constructs a block from executable data.
// It verifies that the following fields:
//
// len(extraData) <= 32
// uncleHash = emptyUncleHash
// difficulty = 0
//
// and that the blockhash of the constructed block matches the parameters.
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
txs, err := decodeTransactions(params.Transactions)

@ -2027,8 +2027,10 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
// - C is canon chain, containing blocks [G..Cn..Cm]
// - The common ancestor Cc is pruned
// - The first block in S: Sn, is == Cn
//
// That is: the sidechain for import contains some blocks already present in canon chain.
// So the blocks are
// So the blocks are:
//
// [ Cn, Cn+1, Cc, Sn+3 ... Sm]
// ^ ^ ^ pruned
func TestPrunedImportSide(t *testing.T) {
@ -3282,7 +3284,6 @@ func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
// to the destructset in case something is created "onto" an existing item.
// We need to either roll back the snapDestructs, or not place it into snapDestructs
// in the first place.
//
func TestInitThenFailCreateContract(t *testing.T) {
var (
engine = ethash.NewFaker()

@ -18,12 +18,10 @@
// +build none
/*
The mkalloc tool creates the genesis allocation constants in genesis_alloc.go
It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples.
go run mkalloc.go genesis.json
*/
package main

@ -220,10 +220,12 @@ func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) {
// - miss in the beginning
// - miss in the middle
// - miss in the end
//
// - the contract(non-empty storage) has wrong storage slots
// - wrong slots in the beginning
// - wrong slots in the middle
// - wrong slots in the end
//
// - the contract(non-empty storage) has extra storage slots
// - extra slots in the beginning
// - extra slots in the middle

@ -31,23 +31,26 @@ import (
var emptyCodeHash = crypto.Keccak256Hash(nil)
/*
The State Transitioning Model
A state transition is a change made when a transaction is applied to the current world state
The state transitioning model does all the necessary work to work out a valid new state root.
1) Nonce handling
2) Pre pay gas
3) Create a new state object if the recipient is \0*32
4) Value transfer
== If contract creation ==
4a) Attempt to run transaction data
4b) If valid, use result as code for the new state object
== end ==
5) Run Script section
6) Derive new state root
*/
// The State Transitioning Model
//
// A state transition is a change made when a transaction is applied to the current world
// state. The state transitioning model does all the necessary work to work out a valid new
// state root.
//
// 1. Nonce handling
// 2. Pre pay gas
// 3. Create a new state object if the recipient is \0*32
// 4. Value transfer
//
// == If contract creation ==
//
// 4a. Attempt to run transaction data
// 4b. If valid, use result as code for the new state object
//
// == end ==
//
// 5. Run Script section
// 6. Derive new state root
type StateTransition struct {
gp *GasPool
msg Message
@ -262,13 +265,10 @@ func (st *StateTransition) preCheck() error {
// TransitionDb will transition the state by applying the current message and
// returning the evm execution result with following fields.
//
// - used gas:
// total gas used (including gas being refunded)
// - returndata:
// the returned data from evm
// - concrete execution error:
// various **EVM** error which aborts the execution,
// e.g. ErrOutOfGas, ErrExecutionReverted
// - used gas: total gas used (including gas being refunded)
// - returndata: the returned data from evm
// - concrete execution error: various EVM errors which abort the execution, e.g.
// ErrOutOfGas, ErrExecutionReverted
//
// However if any consensus issue encountered, return the error directly with
// nil evm execution result.

@ -117,20 +117,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
return params.SstoreResetGas, nil
}
}
// The new gas metering is based on net gas costs (EIP-1283):
//
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
// 2. If current value does not equal new value
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context)
// 2.1.1. If original value is 0, 20000 gas is deducted.
// 2.1.2. Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter.
// 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
// 2.2.1. If original value is not 0
// 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
// 2.2.2. If original value equals new value (this storage slot is reset)
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
// (1.) If current value equals new value (this is a no-op), 200 gas is deducted.
// (2.) If current value does not equal new value
// (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context)
// (2.1.1.) If original value is 0, 20000 gas is deducted.
// (2.1.2.) Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter.
// (2.2.) If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses.
// (2.2.1.) If original value is not 0
// (2.2.1.1.) If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0.
// (2.2.1.2.) If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter.
// (2.2.2.) If original value equals new value (this storage slot is reset)
// (2.2.2.1.) If original value is 0, add 19800 gas to refund counter.
// (2.2.2.2.) Otherwise, add 4800 gas to refund counter.
value := common.Hash(y.Bytes32())
if current == value { // noop (1)
return params.NetSstoreNoopGas, nil
@ -162,19 +163,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
return params.NetSstoreDirtyGas, nil
}
// 0. If *gasleft* is less than or equal to 2300, fail the current call.
// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted.
// 2. If current value does not equal new value:
// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context):
// 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted.
// 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter.
// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses:
// 2.2.1. If original value is not 0:
// 2.2.1.1. If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter.
// 2.2.1.2. If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter.
// 2.2.2. If original value equals new value (this storage slot is reset):
// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter.
// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter.
// Here come the EIP220 rules:
//
// (0.) If *gasleft* is less than or equal to 2300, fail the current call.
// (1.) If current value equals new value (this is a no-op), SLOAD_GAS is deducted.
// (2.) If current value does not equal new value:
// (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context):
// (2.1.1.) If original value is 0, SSTORE_SET_GAS (20K) gas is deducted.
// (2.1.2.) Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter.
// (2.2.) If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses:
// (2.2.1.) If original value is not 0:
// (2.2.1.1.) If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter.
// (2.2.1.2.) If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter.
// (2.2.2.) If original value equals new value (this storage slot is reset):
// (2.2.2.1.) If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter.
// (2.2.2.2.) Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter.
func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
// If we fail the minimum gas availability invariant, fail (0)
if contract.Gas <= params.SstoreSentryGasEIP2200 {

@ -392,29 +392,29 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
// opExtCodeHash returns the code hash of a specified account.
// There are several cases when the function is called, while we can relay everything
// to `state.GetCodeHash` function to ensure the correctness.
// (1) Caller tries to get the code hash of a normal contract account, state
//
// 1. Caller tries to get the code hash of a normal contract account, state
// should return the relative code hash and set it as the result.
//
// (2) Caller tries to get the code hash of a non-existent account, state should
// 2. Caller tries to get the code hash of a non-existent account, state should
// return common.Hash{} and zero will be set as the result.
//
// (3) Caller tries to get the code hash for an account without contract code,
// state should return emptyCodeHash(0xc5d246...) as the result.
// 3. Caller tries to get the code hash for an account without contract code, state
// should return emptyCodeHash(0xc5d246...) as the result.
//
// (4) Caller tries to get the code hash of a precompiled account, the result
// should be zero or emptyCodeHash.
// 4. Caller tries to get the code hash of a precompiled account, the result should be
// zero or emptyCodeHash.
//
// It is worth noting that in order to avoid unnecessary create and clean,
// all precompile accounts on mainnet have been transferred 1 wei, so the return
// here should be emptyCodeHash.
// If the precompile account is not transferred any amount on a private or
// It is worth noting that in order to avoid unnecessary create and clean, all precompile
// accounts on mainnet have been transferred 1 wei, so the return here should be
// emptyCodeHash. If the precompile account is not transferred any amount on a private or
// customized chain, the return value will be zero.
//
// (5) Caller tries to get the code hash for an account which is marked as suicided
// 5. Caller tries to get the code hash for an account which is marked as suicided
// in the current transaction, the code hash of this account should be returned.
//
// (6) Caller tries to get the code hash for an account which is marked as deleted,
// this account should be regarded as a non-existent account and zero should be returned.
// 6. Caller tries to get the code hash for an account which is marked as deleted, this
// account should be regarded as a non-existent account and zero should be returned.
func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
address := common.Address(slot.Bytes20())

@ -105,7 +105,6 @@ func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool {
return x3.Cmp(y2) == 0
}
//TODO: double check if the function is okay
// affineFromJacobian reverses the Jacobian transform. See the comment at the
// top of the file.
func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {

@ -1,3 +1,4 @@
//go:build dummy
// +build dummy
// Package c contains only a C file.

@ -1,3 +1,4 @@
//go:build dummy
// +build dummy
// Package c contains only a C file.

@ -1,3 +1,4 @@
//go:build dummy
// +build dummy
// Package c contains only a C file.

@ -1,3 +1,4 @@
//go:build dummy
// +build dummy
// Package c contains only a C file.

@ -1,3 +1,4 @@
//go:build dummy
// +build dummy
// Package c contains only a C file.

@ -1,3 +1,4 @@
//go:build dummy
// +build dummy
// Package c contains only a C file.

@ -1,3 +1,4 @@
//go:build dummy
// +build dummy
// Package c contains only a C file.

@ -142,15 +142,19 @@ func NewConsensusAPI(eth *eth.Ethereum) *ConsensusAPI {
}
// ForkchoiceUpdatedV1 has several responsibilities:
// If the method is called with an empty head block:
// we return success, which can be used to check if the engine API is enabled
// If the total difficulty was not reached:
// we return INVALID
// If the finalizedBlockHash is set:
// we check if we have the finalizedBlockHash in our db, if not we start a sync
// We try to set our blockchain to the headBlock
// If there are payloadAttributes:
// we try to assemble a block with the payloadAttributes and return its payloadID
//
// We try to set our blockchain to the headBlock.
//
// If the method is called with an empty head block: we return success, which can be used
// to check if the engine API is enabled.
//
// If the total difficulty was not reached: we return INVALID.
//
// If the finalizedBlockHash is set: we check if we have the finalizedBlockHash in our db,
// if not we start a sync.
//
// If there are payloadAttributes: we try to assemble a block with the payloadAttributes
// and return its payloadID.
func (api *ConsensusAPI) ForkchoiceUpdatedV1(update beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
api.forkchoiceLock.Lock()
defer api.forkchoiceLock.Unlock()

@ -519,7 +519,7 @@ func TestExchangeTransitionConfig(t *testing.T) {
TestNewPayloadOnInvalidChain sets up a valid chain and tries to feed blocks
from an invalid chain to test if latestValidHash (LVH) works correctly.
We set up the following chain where P1 ... Pn and P1'' are valid while
We set up the following chain where P1 ... Pn and P1 are valid while
P1' is invalid.
We expect
(1) The LVH to point to the current inserted payload if it was valid.

@ -741,9 +741,11 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
// common ancestor.
// It returns parameters to be used for peer.RequestHeadersByNumber:
//
// from - starting block number
// count - number of headers to request
// skip - number of headers to skip
//
// and also returns 'max', the last block which is expected to be returned by the remote peers,
// given the (from,count,skip)
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {

@ -480,6 +480,7 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo
// to access the queue, so they already need a lock anyway.
//
// Returns:
//
// item - the fetchRequest
// progress - whether any progress was made
// throttle - if the caller should throttle for a while

@ -71,6 +71,7 @@ func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
// wants to reserve headers for fetching.
//
// It returns the following:
//
// stale - if true, this item is already passed, and should not be requested again
// throttled - if true, the store is at capacity, this particular header is not prio now
// item - the result to store data into

@ -212,6 +212,7 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, reqEnd rpc.BlockNum
// block, sorted in ascending order and weighted by gas used.
// - baseFee: base fee per gas in the given block
// - gasUsedRatio: gasUsed/gasLimit in the given block
//
// Note: baseFee includes the next block after the newest of the returned range, because this
// value can be derived from the newest block.
func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) {

@ -368,8 +368,8 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
return hashes, slots, proofs
}
// the createStorageRequestResponseAlwaysProve tests a cornercase, where it always
// supplies the proof for the last account, even if it is 'complete'.h
// createStorageRequestResponseAlwaysProve tests a cornercase, where the peer always
// supplies the proof for the last account, even if it is 'complete'.
func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {
var size uint64
max = max * 3 / 4

@ -37,6 +37,7 @@ func init() {
// a reversed signature can be matched against the size of the data.
//
// Example:
//
// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"})
// {
// 0x27dc297e-128: 1,

@ -14,24 +14,20 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
/*
Package native is a collection of tracers written in go.
In order to add a native tracer and have it compiled into the binary, a new
file needs to be added to this folder, containing an implementation of the
`eth.tracers.Tracer` interface.
Aside from implementing the tracer, it also needs to register itself, using the
`register` method -- and this needs to be done in the package initialization.
Example:
```golang
func init() {
register("noopTracerNative", newNoopTracer)
}
```
*/
// Package native is a collection of tracers written in go.
//
// In order to add a native tracer and have it compiled into the binary, a new
// file needs to be added to this folder, containing an implementation of the
// `eth.tracers.Tracer` interface.
//
// Aside from implementing the tracer, it also needs to register itself, using the
// `register` method -- and this needs to be done in the package initialization.
//
// Example:
//
// func init() {
// register("noopTracerNative", newNoopTracer)
// }
package native
import (

@ -266,6 +266,7 @@ func (db *Database) Path() string {
// the metrics subsystem.
//
// This is how a LevelDB stats table looks like (currently):
//
// Compactions
// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)
// -------+------------+---------------+---------------+---------------+---------------

@ -102,12 +102,16 @@ type Service struct {
// websocket.
//
// From Gorilla websocket docs:
// Connections support one concurrent reader and one concurrent writer.
// Applications are responsible for ensuring that no more than one goroutine calls the write methods
// - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel
// concurrently and that no more than one goroutine calls the read methods
// - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler
// concurrently.
//
// Connections support one concurrent reader and one concurrent writer. Applications are
// responsible for ensuring that
// - no more than one goroutine calls the write methods
// NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression,
// SetCompressionLevel concurrently; and
// - that no more than one goroutine calls the
// read methods NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler,
// SetPingHandler concurrently.
//
// The Close and WriteControl methods can be called concurrently with all other methods.
type connWrapper struct {
conn *websocket.Conn

@ -731,9 +731,9 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m
}
// GetBlockByNumber returns the requested canonical block.
// * When blockNr is -1 the chain head is returned.
// * When blockNr is -2 the pending chain head is returned.
// * When fullTx is true all transactions in the block are returned, otherwise
// - When blockNr is -1 the chain head is returned.
// - When blockNr is -2 the pending chain head is returned.
// - When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
block, err := s.b.BlockByNumber(ctx, number)

@ -366,6 +366,7 @@ func NewLightAPI(backend *lesCommons) *LightAPI {
// LatestCheckpoint returns the latest local checkpoint package.
//
// The checkpoint package consists of 4 strings:
//
// result[0], hex encoded latest section index
// result[1], 32 bytes hex encoded latest section head hash
// result[2], 32 bytes hex encoded latest section canonical hash trie root hash
@ -384,6 +385,7 @@ func (api *LightAPI) LatestCheckpoint() ([4]string, error) {
// GetLocalCheckpoint returns the specific local checkpoint package.
//
// The checkpoint package consists of 3 strings:
//
// result[0], 32 bytes hex encoded latest section head hash
// result[1], 32 bytes hex encoded latest section canonical hash trie root hash
// result[2], 32 bytes hex encoded latest section bloom trie root hash

@ -56,15 +56,19 @@ func NewConsensusAPI(les *les.LightEthereum) *ConsensusAPI {
}
// ForkchoiceUpdatedV1 has several responsibilities:
// If the method is called with an empty head block:
// we return success, which can be used to check if the catalyst mode is enabled
// If the total difficulty was not reached:
// we return INVALID
// If the finalizedBlockHash is set:
// we check if we have the finalizedBlockHash in our db, if not we start a sync
// We try to set our blockchain to the headBlock
// If there are payloadAttributes:
// we return an error since block creation is not supported in les mode
//
// We try to set our blockchain to the headBlock.
//
// If the method is called with an empty head block: we return success, which can be used
// to check if the catalyst mode is enabled.
//
// If the total difficulty was not reached: we return INVALID.
//
// If the finalizedBlockHash is set: we check if we have the finalizedBlockHash in our db,
// if not we start a sync.
//
// If there are payloadAttributes: we return an error since block creation is not
// supported in les mode.
func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads beacon.ForkchoiceStateV1, payloadAttributes *beacon.PayloadAttributesV1) (beacon.ForkChoiceResponse, error) {
if heads.HeadBlockHash == (common.Hash{}) {
log.Warn("Forkchoice requested update to zero hash")

@ -693,9 +693,11 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
// common ancestor.
// It returns parameters to be used for peer.RequestHeadersByNumber:
//
// from - starting block number
// count - number of headers to request
// skip - number of headers to skip
//
// and also returns 'max', the last block which is expected to be returned by the remote peers,
// given the (from,count,skip)
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {

@ -477,6 +477,7 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo
// to access the queue, so they already need a lock anyway.
//
// Returns:
//
// item - the fetchRequest
// progress - whether any progress was made
// throttle - if the caller should throttle for a while

@ -71,6 +71,7 @@ func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {
// wants to reserve headers for fetching.
//
// It returns the following:
//
// stale - if true, this item is already passed, and should not be requested again
// throttled - if true, the store is at capacity, this particular header is not prio now
// item - the result to store data into

@ -242,7 +242,9 @@ func (f *lightFetcher) forEachPeer(check func(id enode.ID, p *fetcherPeer) bool)
}
// mainloop is the main event loop of the light fetcher, which is responsible for
//
// - announcement maintenance(ulc)
//
// If we are running in ultra light client mode, then all announcements from
// the trusted servers are maintained. If the same announcements from trusted
// servers reach the threshold, then the relevant header is requested for retrieval.

@ -71,15 +71,16 @@ type TxPool struct {
eip2718 bool // Fork indicator whether we are in the eip2718 stage.
}
// TxRelayBackend provides an interface to the mechanism that forwards transactions
// to the ETH network. The implementations of the functions should be non-blocking.
// TxRelayBackend provides an interface to the mechanism that forwards transactions to the
// ETH network. The implementations of the functions should be non-blocking.
//
// Send instructs backend to forward new transactions
// NewHead notifies backend about a new head after processed by the tx pool,
// including mined and rolled back transactions since the last event
// Discard notifies backend about transactions that should be discarded either
// because they have been replaced by a re-send or because they have been mined
// long ago and no rollback is expected
// Send instructs backend to forward new transactions NewHead notifies backend about a new
// head after processed by the tx pool, including mined and rolled back transactions since
// the last event.
//
// Discard notifies backend about transactions that should be discarded either because
// they have been replaced by a re-send or because they have been mined long ago and no
// rollback is expected.
type TxRelayBackend interface {
Send(txs types.Transactions)
NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash)

@ -13,21 +13,19 @@ This will output a line that looks like:
lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9
Getting Started
# Getting Started
To get started, you'll want to import the library:
import log "github.com/inconshreveable/log15"
Now you're ready to start logging:
func main() {
log.Info("Program starting", "args", os.Args())
}
Convention
# Convention
Because recording a human-meaningful message is common and good practice, the first argument to every
logging method is the value to the *implicit* key 'msg'.
@ -46,8 +44,7 @@ If you really do favor your type-safety, you may choose to pass a log.Ctx instea
log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val})
Context loggers
# Context loggers
Frequently, you want to add context to a logger so that you can track actions associated with it. An http
request is a good example. You can easily create new loggers that have context that is automatically included
@ -62,8 +59,7 @@ This will output a log line that includes the path context that is attached to t
lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12
Handlers
# Handlers
The Handler interface defines where log lines are printed to and how they are formatted. Handler is a
single interface that is inspired by net/http's handler interface:
@ -72,7 +68,6 @@ single interface that is inspired by net/http's handler interface:
Log(r *Record) error
}
Handlers can filter records, format them, or dispatch to multiple other Handlers.
This package implements a number of Handlers for common logging patterns that are
easily composed to create flexible, custom logging structures.
@ -90,7 +85,7 @@ or above in JSON formatted output to the file /var/log/service.json
log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler())
)
Logging File Names and Line Numbers
# Logging File Names and Line Numbers
This package implements three Handlers that add debugging information to the
context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's
@ -121,7 +116,7 @@ The "%+v" format instructs the handler to include the path of the source file
relative to the compile time GOPATH. The github.com/go-stack/stack package
documents the full list of formatting verbs and modifiers available.
Custom Handlers
# Custom Handlers
The Handler interface is so simple that it's also trivial to write your own. Let's create an
example handler which tries to write to one handler, but if that fails it falls back to
@ -146,7 +141,7 @@ fails you want to log those records to a file on disk.
This pattern is so useful that a generic version that handles an arbitrary number of Handlers
is included as part of this library called FailoverHandler.
Logging Expensive Operations
# Logging Expensive Operations
Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay
the price of computing them if you haven't turned up your logging level to a high level of detail.
@ -164,7 +159,7 @@ filters it out. Just wrap any function which takes no arguments with the log.Laz
If this message is not logged for any reason (like logging at the Error level), then
factorRSAKey is never evaluated.
Dynamic context values
# Dynamic context values
The same log.Lazy mechanism can be used to attach context to a logger which you want to be
evaluated when the message is logged, but not when the logger is created. For example, let's imagine
@ -191,14 +186,14 @@ current state no matter when the log message is written:
isAlive := func() bool { return p.alive }
player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive})
Terminal Format
# Terminal Format
If log15 detects that stdout is a terminal, it will configure the default
handler for it (which is log.StdoutHandler) to use TerminalFormat. This format
logs records nicely for your terminal, including color-coded output based
on log level.
Error Handling
# Error Handling
Becasuse log15 allows you to step around the type system, there are a few ways you can specify
invalid arguments to the logging functions. You could, for example, wrap something that is not
@ -216,7 +211,7 @@ are encouraged to return errors only if they fail to write their log records out
syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures
like the FailoverHandler.
Library Use
# Library Use
log15 is intended to be useful for library authors as a way to provide configurable logging to
users of their library. Best practice for use in a library is to always disable all output for your logger
@ -242,7 +237,7 @@ Users of your library may then enable it if they like:
yourlib.Log.SetHandler(handler)
}
Best practices attaching logger context
# Best practices attaching logger context
The ability to attach context to a logger is a powerful one. Where should you do it and why?
I favor embedding a Logger directly into any persistent object in my application and adding
@ -298,7 +293,7 @@ Tab example, we would prefer to set up our Logger like so:
Now we'll have a unique traceable identifier even across loading new urls, but
we'll still be able to see the tab's current url in the log messages.
Must
# Must
For all Handler functions which can return an error, there is a version of that
function which will return no error but panics on failure. They are all available
@ -307,7 +302,7 @@ on the Must object. For example:
log.Must.FileHandler("/path", log.JSONFormat)
log.Must.NetHandler("tcp", ":1234", log.JSONFormat)
Inspiration and Credit
# Inspiration and Credit
All of the following excellent projects inspired the design of this library:
@ -325,9 +320,8 @@ github.com/spacemonkeygo/spacelog
golang's stdlib, notably io and net/http
The Name
# The Name
https://xkcd.com/927/
*/
package log

@ -84,7 +84,6 @@ type TerminalStringer interface {
// Example:
//
// [DBUG] [May 16 20:58:45] remove route ns=haproxy addr=127.0.0.1:50002
//
func TerminalFormat(usecolor bool) Format {
return FormatFunc(func(r *Record) []byte {
var color = 0
@ -149,7 +148,6 @@ func TerminalFormat(usecolor bool) Format {
// format for key/value pairs.
//
// For more details see: http://godoc.org/github.com/kr/logfmt
//
func LogfmtFormat() Format {
return FormatFunc(func(r *Record) []byte {
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}

@ -144,7 +144,6 @@ func CallerStackHandler(format string, h Handler) Handler {
// }
// return false
// }, h))
//
func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
return FuncHandler(func(r *Record) error {
if fn(r) {
@ -160,7 +159,6 @@ func FilterHandler(fn func(r *Record) bool, h Handler) Handler {
// from your ui package:
//
// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler)
//
func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
return FilterHandler(func(r *Record) (pass bool) {
switch key {
@ -187,7 +185,6 @@ func MatchFilterHandler(key string, value interface{}, h Handler) Handler {
// log Error/Crit records:
//
// log.LvlFilterHandler(log.LvlError, log.StdoutHandler)
//
func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
return FilterHandler(func(r *Record) (pass bool) {
return r.Lvl <= maxLvl
@ -202,7 +199,6 @@ func LvlFilterHandler(maxLvl Lvl, h Handler) Handler {
// log.MultiHandler(
// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()),
// log.StderrHandler)
//
func MultiHandler(hs ...Handler) Handler {
return FuncHandler(func(r *Record) error {
for _, h := range hs {

@ -1,11 +1,3 @@
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package influxdb
import (

@ -77,7 +77,6 @@ func (bi *BigInt) SetInt64(x int64) {
// -1 if x < 0
// 0 if x == 0
// +1 if x > 0
//
func (bi *BigInt) Sign() int {
return bi.bigint.Sign()
}

@ -20,7 +20,7 @@
// with pieces plucked from go-ethereum, rather to allow writing native dapps on
// mobile platforms. Keep this in mind when using or extending this package!
//
// API limitations
// # API limitations
//
// Since gomobile cannot bridge arbitrary types between Go and Android/iOS, the
// exposed APIs need to be manually wrapped into simplified types, with custom

@ -21,13 +21,11 @@ In the model exposed by this package, a node is a collection of services which u
resources to provide RPC APIs. Services can also offer devp2p protocols, which are wired
up to the devp2p network when the node instance is started.
Node Lifecycle
# Node Lifecycle
The Node object has a lifecycle consisting of three basic states, INITIALIZING, RUNNING
and CLOSED.
New()
@ -40,7 +38,6 @@ and CLOSED.
CLOSED Close()
Creating a Node allocates basic resources such as the data directory and returns the node
in its INITIALIZING state. Lifecycle objects, RPC APIs and peer-to-peer networking
protocols can be registered in this state. Basic operations such as opening a key-value
@ -58,8 +55,7 @@ objects and shuts down RPC and peer-to-peer networking.
You must always call Close on Node, even if the node was not started.
Resources Managed By Node
# Resources Managed By Node
All file-system resources used by a node instance are located in a directory called the
data directory. The location of each resource can be overridden through additional node
@ -83,8 +79,7 @@ without a data directory, databases are opened in memory instead.
Node also creates the shared store of encrypted Ethereum account keys. Services can access
the account manager through the service context.
Sharing Data Directory Among Instances
# Sharing Data Directory Among Instances
Multiple node instances can share a single data directory if they have distinct instance
names (set through the Name config option). Sharing behaviour depends on the type of
@ -102,8 +97,7 @@ create one database for each instance.
The account key store is shared among all node instances using the same data directory
unless its location is changed through the KeyStoreDir configuration option.
Data Directory Sharing Example
# Data Directory Sharing Example
In this example, two node instances named A and B are started with the same data
directory. Node instance A opens the database "db", node instance B opens the databases

@ -90,7 +90,6 @@ var (
// - dynamic dials are created from node discovery results. The dialer
// continuously reads candidate nodes from its input iterator and attempts
// to create peer connections to nodes arriving through the iterator.
//
type dialScheduler struct {
dialConfig
setupFunc dialSetupFunc

@ -39,7 +39,6 @@ import (
// To regenerate discv5 test vectors, run
//
// go test -run TestVectors -write-test-vectors
//
var writeTestVectorsFlag = flag.Bool("write-test-vectors", false, "Overwrite discv5 test vectors in testdata/")
var (

@ -133,16 +133,16 @@ UPD Payload length:
- dns.txt.length 1
- dns.txt resp_data_size
So the total size is roughly a fixed overhead of `39`, and the size of the
query (domain name) and response.
The query size is, for example, FVY6INQ6LZ33WLCHO3BPR3FH6Y.snap.mainnet.ethdisco.net (52)
So the total size is roughly a fixed overhead of `39`, and the size of the query (domain
name) and response. The query size is, for example,
FVY6INQ6LZ33WLCHO3BPR3FH6Y.snap.mainnet.ethdisco.net (52)
We also have some static data in the response, such as `enrtree-branch:`, and potentially
splitting the response up with `" "`, leaving us with a size of roughly `400` that we need
to stay below.
The number `370` is used to have some margin for extra overhead (for example, the dns query
may be larger - more subdomains).
The number `370` is used to have some margin for extra overhead (for example, the dns
query may be larger - more subdomains).
*/
const (
hashAbbrevSize = 1 + 16*13/8 // Size of an encoded hash (plus comma)

@ -19,7 +19,7 @@
// stored in key/value pairs. To store and retrieve key/values in a record, use the Entry
// interface.
//
// Signature Handling
// # Signature Handling
//
// Records must be signed before transmitting them to another node.
//

@ -112,7 +112,6 @@ func Send(w MsgWriter, msgcode uint64, data interface{}) error {
// the message payload will be an RLP list containing the items:
//
// [e1, e2, e3]
//
func SendItems(w MsgWriter, msgcode uint64, elems ...interface{}) error {
return Send(w, msgcode, elems)
}

@ -39,10 +39,9 @@ import (
// Node represents a node in a simulation network which is created by a
// NodeAdapter, for example:
//
// * SimNode - An in-memory node
// * ExecNode - A child process node
// * DockerNode - A Docker container node
//
// - SimNode, an in-memory node in the same process
// - ExecNode, a child process node
// - DockerNode, a node running in a Docker container
type Node interface {
// Addr returns the node's address (e.g. an Enode URL)
Addr() []byte

@ -20,7 +20,6 @@ package params
// Example: To get the wei value of an amount in 'gwei', use
//
// new(big.Int).Mul(value, big.NewInt(params.GWei))
//
const (
Wei = 1
GWei = 1e9

@ -27,8 +27,7 @@ value zero equivalent to the empty string).
RLP values are distinguished by a type tag. The type tag precedes the value in the input
stream and defines the size and kind of the bytes that follow.
Encoding Rules
# Encoding Rules
Package rlp uses reflection and encodes RLP based on the Go type of the value.
@ -58,8 +57,7 @@ An interface value encodes as the value contained in the interface.
Floating point numbers, maps, channels and functions are not supported.
Decoding Rules
# Decoding Rules
Decoding uses the following type-dependent rules:
@ -99,8 +97,7 @@ To decode into an interface value, one of these types is stored in the value:
Non-empty interface types are not supported when decoding.
Signed integers, floating point numbers, maps, channels and functions cannot be decoded into.
Struct Tags
# Struct Tags
As with other encoding packages, the "-" tag ignores fields.

@ -15,7 +15,6 @@
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
/*
Package rpc implements bi-directional JSON-RPC 2.0 on multiple transports.
It provides access to the exported methods of an object across a network or other I/O
@ -23,7 +22,7 @@ connection. After creating a server or client instance, objects can be registere
them visible as 'services'. Exported methods that follow specific conventions can be
called remotely. It also has support for the publish/subscribe pattern.
RPC Methods
# RPC Methods
Methods that satisfy the following criteria are made available for remote access:
@ -75,7 +74,7 @@ An example server which uses the JSON codec:
l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"})
server.ServeListener(l)
Subscriptions
# Subscriptions
The package also supports the publish subscribe pattern through the use of subscriptions.
A method that is considered eligible for notifications must satisfy the following
@ -101,7 +100,7 @@ the client and server. The server will close the connection for any write error.
For more information about subscriptions, see https://github.com/ethereum/go-ethereum/wiki/RPC-PUB-SUB.
Reverse Calls
# Reverse Calls
In any method handler, an instance of rpc.Client can be accessed through the
ClientFromContext method. Using this client instance, server-to-client method calls can be

@ -64,7 +64,7 @@ func (vs *ValidationMessages) Info(msg string) {
vs.Messages = append(vs.Messages, ValidationInfo{INFO, msg})
}
/// getWarnings returns an error with all messages of type WARN of above, or nil if no warnings were present
// getWarnings returns an error with all messages of type WARN of above, or nil if no warnings were present
func (v *ValidationMessages) GetWarnings() error {
var messages []string
for _, msg := range v.Messages {

@ -175,7 +175,8 @@ func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
}
}
/* See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II
/*
See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II
Whether a block is valid or not is a bit subtle, it's defined by presence of
blockHeader, transactions and uncleHeaders fields. If they are missing, the block is

@ -70,12 +70,14 @@ func checkInput(id byte, inputLen int) bool {
panic("programmer error")
}
// The fuzzer functions must return
// 1 if the fuzzer should increase priority of the
// The function must return
//
// - 1 if the fuzzer should increase priority of the
// given input during subsequent fuzzing (for example, the input is lexically
// correct and was parsed successfully);
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise
// - -1 if the input must not be added to corpus even if gives new coverage; and
// - 0 otherwise
//
// other values are reserved for future use.
func fuzz(id byte, data []byte) int {
// Even on bad input, it should not crash, so we still test the gas calc

@ -67,11 +67,13 @@ func (f *fuzzer) readBool() bool {
}
// The function must return
// 1 if the fuzzer should increase priority of the
//
// - 1 if the fuzzer should increase priority of the
// given input during subsequent fuzzing (for example, the input is lexically
// correct and was parsed successfully);
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise
// - -1 if the input must not be added to corpus even if gives new coverage; and
// - 0 otherwise
//
// other values are reserved for future use.
func Fuzz(data []byte) int {
f := fuzzer{

@ -180,11 +180,14 @@ func (f *fuzzer) fuzz() int {
}
// The function must return
// 1 if the fuzzer should increase priority of the
//
// - 1 if the fuzzer should increase priority of the
// given input during subsequent fuzzing (for example, the input is lexically
// correct and was parsed successfully);
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise; other values are reserved for future use.
// - -1 if the input must not be added to corpus even if gives new coverage; and
// - 0 otherwise
//
// other values are reserved for future use.
func Fuzz(input []byte) int {
if len(input) < 100 {
return 0

@ -114,11 +114,13 @@ func (k kvs) Swap(i, j int) {
}
// The function must return
// 1 if the fuzzer should increase priority of the
//
// - 1 if the fuzzer should increase priority of the
// given input during subsequent fuzzing (for example, the input is lexically
// correct and was parsed successfully);
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise
// - -1 if the input must not be added to corpus even if gives new coverage; and
// - 0 otherwise
//
// other values are reserved for future use.
func Fuzz(data []byte) int {
f := fuzzer{

@ -119,11 +119,13 @@ func Generate(input []byte) randTest {
}
// The function must return
// 1 if the fuzzer should increase priority of the
//
// - 1 if the fuzzer should increase priority of the
// given input during subsequent fuzzing (for example, the input is lexically
// correct and was parsed successfully);
// -1 if the input must not be added to corpus even if gives new coverage; and
// 0 otherwise
// - -1 if the input must not be added to corpus even if gives new coverage; and
// - 0 otherwise
//
// other values are reserved for future use.
func Fuzz(input []byte) int {
program := Generate(input)

@ -116,6 +116,7 @@ func (tm *testMatcher) skipLoad(pattern string) {
}
// fails adds an expected failure for tests matching the pattern.
//
//nolint:unused
func (tm *testMatcher) fails(pattern string, reason string) {
if reason == "" {

@ -377,6 +377,7 @@ func (st *StackTrie) insert(key, value []byte) {
// 1. The rlp-encoded value was >= 32 bytes:
// - Then the 32-byte `hash` will be accessible in `st.val`.
// - And the 'st.type' will be 'hashedNode'
//
// 2. The rlp-encoded value was < 32 bytes
// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'.
// - And the 'st.type' will be 'hashedNode' AGAIN

Loading…
Cancel
Save