cmd, les, tests: remove light client code (#28586)

* cmd, les, tests: remove light client code

This commit removes the light client (LES) code.
Since the merge the light client has been broken and
it is hard to maintain it alongside the normal client.
We decided it would be best to remove it for now and
maybe rework and reintroduce it in the future.

* cmd, eth: remove some more mentions of light mode

* cmd: re-add flags and mark as deprecated

* cmd: warn the user about deprecated flags

* eth: better error message
pull/28596/head
Marius van der Wijden 10 months ago committed by GitHub
parent d76efbb9be
commit bdf5e388ca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 52
      cmd/faucet/README.md
  2. 891
      cmd/faucet/faucet.go
  3. 233
      cmd/faucet/faucet.html
  4. 46
      cmd/faucet/faucet_test.go
  5. 3
      cmd/geth/config.go
  6. 206
      cmd/geth/les_test.go
  7. 31
      cmd/geth/main.go
  8. 9
      cmd/geth/run_test.go
  9. 128
      cmd/utils/flags.go
  10. 41
      cmd/utils/flags_legacy.go
  11. 2
      eth/backend.go
  12. 10
      eth/ethconfig/config.go
  13. 3
      ethstats/ethstats.go
  14. 349
      les/api.go
  15. 337
      les/api_backend.go
  16. 512
      les/api_test.go
  17. 351
      les/benchmark.go
  18. 75
      les/bloombits.go
  19. 377
      les/client.go
  20. 309
      les/client_handler.go
  21. 99
      les/commons.go
  22. 517
      les/costtracker.go
  23. 313
      les/distributor.go
  24. 189
      les/distributor_test.go
  25. 72
      les/enr_entry.go
  26. 433
      les/flowcontrol/control.go
  27. 65
      les/flowcontrol/logger.go
  28. 476
      les/flowcontrol/manager.go
  29. 130
      les/flowcontrol/manager_test.go
  30. 754
      les/handler_test.go
  31. 151
      les/metrics.go
  32. 237
      les/odr.go
  33. 537
      les/odr_requests.go
  34. 458
      les/odr_test.go
  35. 1362
      les/peer.go
  36. 166
      les/peer_test.go
  37. 327
      les/protocol.go
  38. 129
      les/request_test.go
  39. 421
      les/retrieve.go
  40. 281
      les/server.go
  41. 436
      les/server_handler.go
  42. 566
      les/server_requests.go
  43. 365
      les/servingqueue.go
  44. 80
      les/state_accessor.go
  45. 626
      les/test_helper.go
  46. 179
      les/txrelay.go
  47. 105
      les/utils/exec_queue.go
  48. 60
      les/utils/exec_queue_test.go
  49. 270
      les/utils/expiredvalue.go
  50. 195
      les/utils/expiredvalue_test.go
  51. 398
      les/utils/limiter.go
  52. 206
      les/utils/limiter_test.go
  53. 69
      les/utils/timeutils.go
  54. 47
      les/utils/timeutils_test.go
  55. 183
      les/utils/weighted_select.go
  56. 68
      les/utils/weighted_select_test.go
  57. 107
      les/vflux/client/api.go
  58. 107
      les/vflux/client/fillset.go
  59. 119
      les/vflux/client/fillset_test.go
  60. 123
      les/vflux/client/queueiterator.go
  61. 99
      les/vflux/client/queueiterator_test.go
  62. 285
      les/vflux/client/requestbasket.go
  63. 171
      les/vflux/client/requestbasket_test.go
  64. 605
      les/vflux/client/serverpool.go
  65. 424
      les/vflux/client/serverpool_test.go
  66. 237
      les/vflux/client/timestats.go
  67. 145
      les/vflux/client/timestats_test.go
  68. 506
      les/vflux/client/valuetracker.go
  69. 137
      les/vflux/client/valuetracker_test.go
  70. 127
      les/vflux/client/wrsiterator.go
  71. 105
      les/vflux/client/wrsiterator_test.go
  72. 180
      les/vflux/requests.go
  73. 693
      les/vflux/server/balance.go
  74. 459
      les/vflux/server/balance_test.go
  75. 300
      les/vflux/server/balance_tracker.go
  76. 250
      les/vflux/server/clientdb.go
  77. 148
      les/vflux/server/clientdb_test.go
  78. 328
      les/vflux/server/clientpool.go
  79. 640
      les/vflux/server/clientpool_test.go
  80. 35
      les/vflux/server/metrics.go
  81. 695
      les/vflux/server/prioritypool.go
  82. 237
      les/vflux/server/prioritypool_test.go
  83. 120
      les/vflux/server/service.go
  84. 59
      les/vflux/server/status.go
  85. 411
      tests/fuzzers/les/les-fuzzer.go
  86. 25
      tests/fuzzers/les/les_test.go
  87. 333
      tests/fuzzers/vflux/clientpool-fuzzer.go
  88. 25
      tests/fuzzers/vflux/clientpool_test.go

@ -1,52 +0,0 @@
# Faucet
The `faucet` is a simplistic web application with the goal of distributing small amounts of Ether in private and test networks.
Users need to post their Ethereum addresses to fund in a Twitter status update or public Facebook post and share the link to the faucet. The faucet will in turn deduplicate user requests and send the Ether. After a funding round, the faucet prevents the same user from requesting again for a pre-configured amount of time, proportional to the amount of Ether requested.
## Operation
The `faucet` is a single binary app (everything included) with all configurations set via command line flags and a few files.
First things first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set:
- `-genesis` is a path to a file containing the network `genesis.json`. or using:
- `-goerli` with the faucet with Görli network config
- `-sepolia` with the faucet with Sepolia network config
- `-network` is the devp2p network id used during connection
- `-bootnodes` is a list of `enode://` ids to join the network through
The `faucet` will use the `les` protocol to join the configured Ethereum network and will store its data in `$HOME/.faucet` (currently not configurable).
## Funding
To be able to distribute funds, the `faucet` needs access to an already funded Ethereum account. This can be configured via:
- `-account.json` is a path to the Ethereum account's JSON key file
- `-account.pass` is a path to a text file with the decryption passphrase
The faucet is able to distribute various amounts of Ether in exchange for various timeouts. These can be configured via:
- `-faucet.amount` is the number of Ethers to send by default
- `-faucet.minutes` is the time to wait before allowing a rerequest
- `-faucet.tiers` is the funding tiers to support (x3 time, x2.5 funds)
## Sybil protection
To prevent the same user from exhausting funds in a loop, the `faucet` ties requests to social networks and captcha resolvers.
Captcha protection uses Google's invisible ReCaptcha, thus the `faucet` needs to run on a live domain. The domain needs to be registered in Google's systems to retrieve the captcha API token and secrets. After doing so, captcha protection may be enabled via:
- `-captcha.token` is the API token for ReCaptcha
- `-captcha.secret` is the API secret for ReCaptcha
Sybil protection via Twitter requires an API key as of 15th December, 2020. To obtain it, a Twitter user must be upgraded to developer status and a new Twitter App deployed with it. The app's `Bearer` token is required by the faucet to retrieve tweet data:
- `-twitter.token` is the Bearer token for `v2` API access
- `-twitter.token.v1` is the Bearer token for `v1` API access
Sybil protection via Facebook uses the website to directly download post data thus does not currently require an API configuration.
## Miscellaneous
Beside the above - mostly essential - CLI flags, there are a number that can be used to fine-tune the `faucet`'s operation. Please see `faucet --help` for a full list.

@ -1,891 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// faucet is an Ether faucet backed by a light client.
package main
import (
"bytes"
"context"
_ "embed"
"encoding/json"
"errors"
"flag"
"fmt"
"html/template"
"io"
"math"
"math/big"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethstats"
"github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/params"
"github.com/gorilla/websocket"
)
var (
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
tiersFlag = flag.Int("faucet.tiers", 3, "Number of funding tiers to enable (x3 time, x2.5 funds)")
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication")
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API")
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config")
sepoliaFlag = flag.Bool("sepolia", false, "Initializes the faucet with Sepolia network config")
)
var (
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
)
//go:embed faucet.html
var websiteTmpl string
func main() {
// Parse the flags and set up the logger to print everything requested
flag.Parse()
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// Construct the payout tiers
amounts := make([]string, *tiersFlag)
periods := make([]string, *tiersFlag)
for i := 0; i < *tiersFlag; i++ {
// Calculate the amount for the next tier and format it
amount := float64(*payoutFlag) * math.Pow(2.5, float64(i))
amounts[i] = fmt.Sprintf("%s Ethers", strconv.FormatFloat(amount, 'f', -1, 64))
if amount == 1 {
amounts[i] = strings.TrimSuffix(amounts[i], "s")
}
// Calculate the period for the next tier and format it
period := *minutesFlag * int(math.Pow(3, float64(i)))
periods[i] = fmt.Sprintf("%d mins", period)
if period%60 == 0 {
period /= 60
periods[i] = fmt.Sprintf("%d hours", period)
if period%24 == 0 {
period /= 24
periods[i] = fmt.Sprintf("%d days", period)
}
}
if period == 1 {
periods[i] = strings.TrimSuffix(periods[i], "s")
}
}
website := new(bytes.Buffer)
err := template.Must(template.New("").Parse(websiteTmpl)).Execute(website, map[string]interface{}{
"Network": *netnameFlag,
"Amounts": amounts,
"Periods": periods,
"Recaptcha": *captchaToken,
"NoAuth": *noauthFlag,
})
if err != nil {
log.Crit("Failed to render the faucet template", "err", err)
}
// Load and parse the genesis block requested by the user
genesis, err := getGenesis(*genesisFlag, *goerliFlag, *sepoliaFlag)
if err != nil {
log.Crit("Failed to parse genesis config", "err", err)
}
// Convert the bootnodes to internal enode representations
var enodes []*enode.Node
for _, boot := range strings.Split(*bootFlag, ",") {
if url, err := enode.Parse(enode.ValidSchemes, boot); err == nil {
enodes = append(enodes, url)
} else {
log.Error("Failed to parse bootnode URL", "url", boot, "err", err)
}
}
// Load up the account key and decrypt its password
blob, err := os.ReadFile(*accPassFlag)
if err != nil {
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
}
pass := strings.TrimSuffix(string(blob), "\n")
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
if blob, err = os.ReadFile(*accJSONFlag); err != nil {
log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err)
}
acc, err := ks.Import(blob, pass, pass)
if err != nil && err != keystore.ErrAccountAlreadyExists {
log.Crit("Failed to import faucet signer account", "err", err)
}
if err := ks.Unlock(acc, pass); err != nil {
log.Crit("Failed to unlock faucet signer account", "err", err)
}
// Assemble and start the faucet light service
faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes())
if err != nil {
log.Crit("Failed to start faucet", "err", err)
}
defer faucet.close()
if err := faucet.listenAndServe(*apiPortFlag); err != nil {
log.Crit("Failed to launch faucet API", "err", err)
}
}
// request represents an accepted funding request.
type request struct {
Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
Account common.Address `json:"account"` // Ethereum address being funded
Time time.Time `json:"time"` // Timestamp when the request was accepted
Tx *types.Transaction `json:"tx"` // Transaction funding the account
}
// faucet represents a crypto faucet backed by an Ethereum light client.
type faucet struct {
config *params.ChainConfig // Chain configurations for signing
stack *node.Node // Ethereum protocol stack
client *ethclient.Client // Client connection to the Ethereum chain
index []byte // Index page to serve up on the web
keystore *keystore.KeyStore // Keystore containing the single signer
account accounts.Account // Account funding user faucet requests
head *types.Header // Current head header of the faucet
balance *big.Int // Current balance of the faucet
nonce uint64 // Current pending nonce of the faucet
price *big.Int // Current gas price to issue funds with
conns []*wsConn // Currently live websocket connections
timeouts map[string]time.Time // History of users and their funding timeouts
reqs []*request // Currently pending funding requests
update chan struct{} // Channel to signal request updates
lock sync.RWMutex // Lock protecting the faucet's internals
}
// wsConn wraps a websocket connection with a write mutex as the underlying
// websocket library does not synchronize access to the stream.
type wsConn struct {
conn *websocket.Conn
wlock sync.Mutex
}
func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
// Assemble the raw devp2p protocol stack
git, _ := version.VCS()
stack, err := node.New(&node.Config{
Name: "geth",
Version: params.VersionWithCommit(git.Commit, git.Date),
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
P2P: p2p.Config{
NAT: nat.Any(),
NoDiscovery: true,
DiscoveryV5: true,
ListenAddr: fmt.Sprintf(":%d", port),
MaxPeers: 25,
BootstrapNodesV5: enodes,
},
})
if err != nil {
return nil, err
}
// Assemble the Ethereum light client protocol
cfg := ethconfig.Defaults
cfg.SyncMode = downloader.LightSync
cfg.NetworkId = network
cfg.Genesis = genesis
utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock().Hash())
lesBackend, err := les.New(stack, &cfg)
if err != nil {
return nil, fmt.Errorf("failed to register the Ethereum service: %w", err)
}
// Assemble the ethstats monitoring and reporting service'
if stats != "" {
if err := ethstats.New(stack, lesBackend.ApiBackend, lesBackend.Engine(), stats); err != nil {
return nil, err
}
}
// Boot up the client and ensure it connects to bootnodes
if err := stack.Start(); err != nil {
return nil, err
}
for _, boot := range enodes {
old, err := enode.Parse(enode.ValidSchemes, boot.String())
if err == nil {
stack.Server().AddPeer(old)
}
}
// Attach to the client and retrieve and interesting metadatas
api := stack.Attach()
client := ethclient.NewClient(api)
return &faucet{
config: genesis.Config,
stack: stack,
client: client,
index: index,
keystore: ks,
account: ks.Accounts()[0],
timeouts: make(map[string]time.Time),
update: make(chan struct{}, 1),
}, nil
}
// close terminates the Ethereum connection and tears down the faucet.
func (f *faucet) close() error {
return f.stack.Close()
}
// listenAndServe registers the HTTP handlers for the faucet and boots it up
// for service user funding requests.
func (f *faucet) listenAndServe(port int) error {
go f.loop()
http.HandleFunc("/", f.webHandler)
http.HandleFunc("/api", f.apiHandler)
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
// webHandler handles all non-api requests, simply flattening and returning the
// faucet website.
func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
w.Write(f.index)
}
// apiHandler handles requests for Ether grants and transaction statuses.
func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
upgrader := websocket.Upgrader{}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
return
}
// Start tracking the connection and drop at the end
defer conn.Close()
f.lock.Lock()
wsconn := &wsConn{conn: conn}
f.conns = append(f.conns, wsconn)
f.lock.Unlock()
defer func() {
f.lock.Lock()
for i, c := range f.conns {
if c.conn == conn {
f.conns = append(f.conns[:i], f.conns[i+1:]...)
break
}
}
f.lock.Unlock()
}()
// Gather the initial stats from the network to report
var (
head *types.Header
balance *big.Int
nonce uint64
)
for head == nil || balance == nil {
// Retrieve the current stats cached by the faucet
f.lock.RLock()
if f.head != nil {
head = types.CopyHeader(f.head)
}
if f.balance != nil {
balance = new(big.Int).Set(f.balance)
}
nonce = f.nonce
f.lock.RUnlock()
if head == nil || balance == nil {
// Report the faucet offline until initial stats are ready
//lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(wsconn, errors.New("Faucet offline")); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
}
time.Sleep(3 * time.Second)
}
}
// Send over the initial stats and the latest header
f.lock.RLock()
reqs := f.reqs
f.lock.RUnlock()
if err = send(wsconn, map[string]interface{}{
"funds": new(big.Int).Div(balance, ether),
"funded": nonce,
"peers": f.stack.Server().PeerCount(),
"requests": reqs,
}, 3*time.Second); err != nil {
log.Warn("Failed to send initial stats to client", "err", err)
return
}
if err = send(wsconn, head, 3*time.Second); err != nil {
log.Warn("Failed to send initial header to client", "err", err)
return
}
// Keep reading requests from the websocket until the connection breaks
for {
// Fetch the next funding request and validate against github
var msg struct {
URL string `json:"url"`
Tier uint `json:"tier"`
Captcha string `json:"captcha"`
}
if err = conn.ReadJSON(&msg); err != nil {
return
}
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
if err = sendError(wsconn, errors.New("URL doesn't link to supported services")); err != nil {
log.Warn("Failed to send URL error to client", "err", err)
return
}
continue
}
if msg.Tier >= uint(*tiersFlag) {
//lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(wsconn, errors.New("Invalid funding tier requested")); err != nil {
log.Warn("Failed to send tier error to client", "err", err)
return
}
continue
}
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
// If captcha verifications are enabled, make sure we're not dealing with a robot
if *captchaToken != "" {
form := url.Values{}
form.Add("secret", *captchaSecret)
form.Add("response", msg.Captcha)
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
if err != nil {
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send captcha post error to client", "err", err)
return
}
continue
}
var result struct {
Success bool `json:"success"`
Errors json.RawMessage `json:"error-codes"`
}
err = json.NewDecoder(res.Body).Decode(&result)
res.Body.Close()
if err != nil {
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send captcha decode error to client", "err", err)
return
}
continue
}
if !result.Success {
log.Warn("Captcha verification failed", "err", string(result.Errors))
//lint:ignore ST1005 it's funny and the robot won't mind
if err = sendError(wsconn, errors.New("Beep-bop, you're a robot!")); err != nil {
log.Warn("Failed to send captcha failure to client", "err", err)
return
}
continue
}
}
// Retrieve the Ethereum address to fund, the requesting user and a profile picture
var (
id string
username string
avatar string
address common.Address
)
switch {
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
id, username, avatar, address, err = authTwitter(msg.URL, *twitterTokenV1Flag, *twitterTokenFlag)
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
username, avatar, address, err = authFacebook(msg.URL)
id = username
case *noauthFlag:
username, avatar, address, err = authNoAuth(msg.URL)
id = username
default:
//lint:ignore ST1005 This error is to be displayed in the browser
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
}
if err != nil {
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send prefix error to client", "err", err)
return
}
continue
}
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
// Ensure the user didn't request funds too recently
f.lock.Lock()
var (
fund bool
timeout time.Time
)
if timeout = f.timeouts[id]; time.Now().After(timeout) {
// User wasn't funded recently, create the funding transaction
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
if err != nil {
f.lock.Unlock()
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send transaction creation error to client", "err", err)
return
}
continue
}
// Submit the transaction and mark as funded if successful
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
f.lock.Unlock()
if err = sendError(wsconn, err); err != nil {
log.Warn("Failed to send transaction transmission error to client", "err", err)
return
}
continue
}
f.reqs = append(f.reqs, &request{
Avatar: avatar,
Account: address,
Time: time.Now(),
Tx: signed,
})
timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute
grace := timeout / 288 // 24h timeout => 5m grace
f.timeouts[id] = time.Now().Add(timeout - grace)
fund = true
}
f.lock.Unlock()
// Send an error if too frequent funding, othewise a success
if !fund {
if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err)
return
}
continue
}
if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
log.Warn("Failed to send funding success to client", "err", err)
return
}
select {
case f.update <- struct{}{}:
default:
}
}
}
// refresh attempts to retrieve the latest header from the chain and extract the
// associated faucet balance and nonce for connectivity caching.
func (f *faucet) refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
balance *big.Int
nonce uint64
price *big.Int
)
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
return err
}
// Everything succeeded, update the cached stats and eject old requests
f.lock.Lock()
f.head, f.balance = head, balance
f.price, f.nonce = price, nonce
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:]
}
f.lock.Unlock()
return nil
}
// loop keeps waiting for interesting events and pushes them out to connected
// websockets.
func (f *faucet) loop() {
// Wait for chain events and push them to clients
heads := make(chan *types.Header, 16)
sub, err := f.client.SubscribeNewHead(context.Background(), heads)
if err != nil {
log.Crit("Failed to subscribe to head events", "err", err)
}
defer sub.Unsubscribe()
// Start a goroutine to update the state from head notifications in the background
update := make(chan *types.Header)
go func() {
for head := range update {
// New chain head arrived, query the current stats and stream to clients
timestamp := time.Unix(int64(head.Time), 0)
if time.Since(timestamp) > time.Hour {
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
continue
}
if err := f.refresh(head); err != nil {
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
continue
}
// Faucet state retrieved, update locally and send to clients
f.lock.RLock()
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
balance := new(big.Int).Div(f.balance, ether)
peers := f.stack.Server().PeerCount()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{
"funds": balance,
"funded": f.nonce,
"peers": peers,
"requests": f.reqs,
}, time.Second); err != nil {
log.Warn("Failed to send stats to client", "err", err)
conn.conn.Close()
continue
}
if err := send(conn, head, time.Second); err != nil {
log.Warn("Failed to send header to client", "err", err)
conn.conn.Close()
}
}
f.lock.RUnlock()
}
}()
// Wait for various events and assing to the appropriate background threads
for {
select {
case head := <-heads:
// New head arrived, send if for state update if there's none running
select {
case update <- head:
default:
}
case <-f.update:
// Pending requests updated, stream to clients
f.lock.RLock()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
log.Warn("Failed to send requests to client", "err", err)
conn.conn.Close()
}
}
f.lock.RUnlock()
}
}
}
// sends transmits a data packet to the remote end of the websocket, but also
// setting a write deadline to prevent waiting forever on the node.
func send(conn *wsConn, value interface{}, timeout time.Duration) error {
if timeout == 0 {
timeout = 60 * time.Second
}
conn.wlock.Lock()
defer conn.wlock.Unlock()
conn.conn.SetWriteDeadline(time.Now().Add(timeout))
return conn.conn.WriteJSON(value)
}
// sendError transmits an error to the remote end of the websocket, also setting
// the write deadline to 1 second to prevent waiting forever.
func sendError(conn *wsConn, err error) error {
return send(conn, map[string]string{"error": err.Error()}, time.Second)
}
// sendSuccess transmits a success message to the remote end of the websocket, also
// setting the write deadline to 1 second to prevent waiting forever.
func sendSuccess(conn *wsConn, msg string) error {
return send(conn, map[string]string{"success": msg}, time.Second)
}
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
// the uniqueness identifier (user id/username), username, avatar URL and Ethereum address to fund on success.
func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
// Strip any query parameters from the tweet id and ensure it's numeric
tweetID := strings.Split(parts[len(parts)-1], "?")[0]
if !regexp.MustCompile("^[0-9]+$").MatchString(tweetID) {
return "", "", "", common.Address{}, errors.New("Invalid Tweet URL")
}
// Twitter's API isn't really friendly with direct links.
// It is restricted to 300 queries / 15 minute with an app api key.
// Anything more will require read only authorization from the users and that we want to avoid.
// If Twitter bearer token is provided, use the API, selecting the version
// the user would prefer (currently there's a limit of 1 v2 app / developer
// but unlimited v1.1 apps).
switch {
case tokenV1 != "":
return authTwitterWithTokenV1(tweetID, tokenV1)
case tokenV2 != "":
return authTwitterWithTokenV2(tweetID, tokenV2)
}
// Twitter API token isn't provided so we just load the public posts
// and scrape it for the Ethereum address and profile URL. We need to load
// the mobile page though since the main page loads tweet contents via JS.
url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1)
res, err := http.Get(url)
if err != nil {
return "", "", "", common.Address{}, err
}
defer res.Body.Close()
// Resolve the username from the final redirect, no intermediate junk
parts = strings.Split(res.Request.URL.String(), "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
body, err := io.ReadAll(res.Body)
if err != nil {
return "", "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
if parts = regexp.MustCompile(`src="([^"]+twimg\.com/profile_images[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@twitter", username, avatar, address, nil
}
// authTwitterWithTokenV1 tries to authenticate a faucet request using Twitter's v1
// API, returning the user id, username, avatar URL and Ethereum address to fund on
// success.
func authTwitterWithTokenV1(tweetID string, token string) (string, string, string, common.Address, error) {
// Query the tweet details from Twitter
url := fmt.Sprintf("https://api.twitter.com/1.1/statuses/show.json?id=%s", tweetID)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return "", "", "", common.Address{}, err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", "", common.Address{}, err
}
defer res.Body.Close()
var result struct {
Text string `json:"text"`
User struct {
ID string `json:"id_str"`
Username string `json:"screen_name"`
Avatar string `json:"profile_image_url"`
} `json:"user"`
}
err = json.NewDecoder(res.Body).Decode(&result)
if err != nil {
return "", "", "", common.Address{}, err
}
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Text))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return result.User.ID + "@twitter", result.User.Username, result.User.Avatar, address, nil
}
// authTwitterWithTokenV2 tries to authenticate a faucet request using Twitter's v2
// API, returning the user id, username, avatar URL and Ethereum address to fund on
// success.
func authTwitterWithTokenV2(tweetID string, token string) (string, string, string, common.Address, error) {
// Query the tweet details from Twitter
url := fmt.Sprintf("https://api.twitter.com/2/tweets/%s?expansions=author_id&user.fields=profile_image_url", tweetID)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return "", "", "", common.Address{}, err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", "", common.Address{}, err
}
defer res.Body.Close()
var result struct {
Data struct {
AuthorID string `json:"author_id"`
Text string `json:"text"`
} `json:"data"`
Includes struct {
Users []struct {
ID string `json:"id"`
Username string `json:"username"`
Avatar string `json:"profile_image_url"`
} `json:"users"`
} `json:"includes"`
}
err = json.NewDecoder(res.Body).Decode(&result)
if err != nil {
return "", "", "", common.Address{}, err
}
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Data.Text))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return result.Data.AuthorID + "@twitter", result.Includes.Users[0].Username, result.Includes.Users[0].Avatar, address, nil
}
// authFacebook tries to authenticate a faucet request using Facebook posts,
// returning the username, avatar URL and Ethereum address to fund on success.
func authFacebook(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(strings.Split(url, "?")[0], "/")
if parts[len(parts)-1] == "" {
parts = parts[0 : len(parts)-1]
}
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
}
username := parts[len(parts)-3]
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
//
// Facebook recently changed their desktop webpage to use AJAX for loading post
// content, so switch over to the mobile site for now. Will probably end up having
// to use the API eventually.
crawl := strings.Replace(url, "www.facebook.com", "m.facebook.com", 1)
res, err := http.Get(crawl)
if err != nil {
return "", "", common.Address{}, err
}
defer res.Body.Close()
body, err := io.ReadAll(res.Body)
if err != nil {
return "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund. Please check the post URL and verify that it can be viewed publicly.")
}
var avatar string
if parts = regexp.MustCompile(`src="([^"]+fbcdn\.net[^"]+)"`).FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@facebook", avatar, address, nil
}
// authNoAuth tries to interpret a faucet request as a plain Ethereum address,
// without actually performing any remote authentication. This mode is prone to
// Byzantine attack, so only ever use for truly private networks.
func authNoAuth(url string) (string, string, common.Address, error) {
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return address.Hex() + "@noauth", "", address, nil
}
// getGenesis returns a genesis based on input args
func getGenesis(genesisFlag string, goerliFlag bool, sepoliaFlag bool) (*core.Genesis, error) {
switch {
case genesisFlag != "":
var genesis core.Genesis
err := common.LoadJSON(genesisFlag, &genesis)
return &genesis, err
case goerliFlag:
return core.DefaultGoerliGenesisBlock(), nil
case sepoliaFlag:
return core.DefaultSepoliaGenesisBlock(), nil
default:
return nil, errors.New("no genesis flag provided")
}
}

@ -1,233 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>{{.Network}}: Authenticated Faucet</title>
<link href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet" />
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-noty/2.4.1/packaged/jquery.noty.packaged.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.18.0/moment.min.js"></script>
<style>
.vertical-center {
min-height: 100%;
min-height: 100vh;
display: flex;
align-items: center;
}
.progress {
position: relative;
}
.progress span {
position: absolute;
display: block;
width: 100%;
color: white;
}
pre {
padding: 6px;
margin: 0;
}
</style>
</head>
<body>
<div class="vertical-center">
<div class="container">
<div class="row" style="margin-bottom: 16px;">
<div class="col-lg-12">
<h1 style="text-align: center;"><i class="fa fa-bath" aria-hidden="true"></i> {{.Network}} Authenticated Faucet</h1>
</div>
</div>
<div class="row">
<div class="col-lg-8 col-lg-offset-2">
<div class="input-group">
<input id="url" name="url" type="text" class="form-control" placeholder="Social network URL containing your Ethereum address..."/>
<span class="input-group-btn">
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
<li><a style="text-align: center;" onclick="tier={{$idx}}; {{if $.Recaptcha}}grecaptcha.execute(){{else}}submit({{$idx}}){{end}}">{{$amount}} / {{index $.Periods $idx}}</a></li>{{end}}
</ul>
</span>
</div>{{if .Recaptcha}}
<div class="g-recaptcha" data-sitekey="{{.Recaptcha}}" data-callback="submit" data-size="invisible"></div>{{end}}
</div>
</div>
<div class="row" style="margin-top: 32px;">
<div class="col-lg-6 col-lg-offset-3">
<div class="panel panel-small panel-default">
<div class="panel-body" style="padding: 0; overflow: auto; max-height: 300px;">
<table id="requests" class="table table-condensed" style="margin: 0;"></table>
</div>
<div class="panel-footer">
<table style="width: 100%"><tr>
<td style="text-align: center;"><i class="fa fa-rss" aria-hidden="true"></i> <span id="peers"></span> peers</td>
<td style="text-align: center;"><i class="fa fa-database" aria-hidden="true"></i> <span id="block"></span> blocks</td>
<td style="text-align: center;"><i class="fa fa-heartbeat" aria-hidden="true"></i> <span id="funds"></span> Ethers</td>
<td style="text-align: center;"><i class="fa fa-university" aria-hidden="true"></i> <span id="funded"></span> funded</td>
</tr></table>
</div>
</div>
</div>
</div>
<div class="row" style="margin-top: 32px;">
<div class="col-lg-12">
<h3>How does this work?</h3>
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to common 3rd party social network accounts. Anyone having a Twitter or Facebook account may request funds within the permitted limits.</p>
<dl class="dl-horizontal">
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-twitter" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Twitter, make a <a href="https://twitter.com/intent/tweet?text=Requesting%20faucet%20funds%20into%200x0000000000000000000000000000000000000000%20on%20the%20%23{{.Network}}%20%23Ethereum%20test%20network." target="_about:blank">tweet</a> with your Ethereum address pasted into the contents (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://support.twitter.com/articles/80586" target="_about:blank">tweets URL</a> into the above input box and fire away!</dd>
<dt style="width: auto; margin-left: 40px;"><i class="fa fa-facebook" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds via Facebook, publish a new <strong>public</strong> post with your Ethereum address embedded into the content (surrounding text doesn't matter).<br/>Copy-paste the <a href="https://www.facebook.com/help/community/question/?id=282662498552845" target="_about:blank">posts URL</a> into the above input box and fire away!</dd>
{{if .NoAuth}}
<dt class="text-danger" style="width: auto; margin-left: 40px;"><i class="fa fa-unlock-alt" aria-hidden="true" style="font-size: 36px;"></i></dt>
<dd class="text-danger" style="margin-left: 88px; margin-bottom: 10px;"></i> To request funds <strong>without authentication</strong>, simply copy-paste your Ethereum address into the above input box (surrounding text doesn't matter) and fire away.<br/>This mode is susceptible to Byzantine attacks. Only use for debugging or private networks!</dd>
{{end}}
</dl>
<p>You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
</div>
</div>
</div>
</div>
<script>
// Global variables to hold the current status of the faucet
var attempt = 0;
var server;
var tier = 0;
var requests = [];
// Define a function that creates closures to drop old requests
var dropper = function(hash) {
return function() {
for (var i=0; i<requests.length; i++) {
if (requests[i].tx.hash == hash) {
requests.splice(i, 1);
break;
}
}
}
};
// Define the function that submits a gist url to the server
var submit = function({{if .Recaptcha}}captcha{{end}}) {
server.send(JSON.stringify({url: $("#url")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
grecaptcha.reset();{{end}}
};
// Define a method to reconnect upon server loss
var reconnect = function() {
server = new WebSocket(((window.location.protocol === "https:") ? "wss://" : "ws://") + window.location.host + "/api");
server.onmessage = function(event) {
var msg = JSON.parse(event.data);
if (msg === null) {
return;
}
if (msg.funds !== undefined) {
$("#funds").text(msg.funds);
}
if (msg.funded !== undefined) {
$("#funded").text(msg.funded);
}
if (msg.peers !== undefined) {
$("#peers").text(msg.peers);
}
if (msg.number !== undefined) {
$("#block").text(parseInt(msg.number, 16));
}
if (msg.error !== undefined) {
noty({layout: 'topCenter', text: msg.error, type: 'error', timeout: 5000, progressBar: true});
}
if (msg.success !== undefined) {
noty({layout: 'topCenter', text: msg.success, type: 'success', timeout: 5000, progressBar: true});
}
if (msg.requests !== undefined && msg.requests !== null) {
// Mark all previous requests missing as done
for (var i=0; i<requests.length; i++) {
if (msg.requests.length > 0 && msg.requests[0].tx.hash == requests[i].tx.hash) {
break;
}
if (requests[i].time != "") {
requests[i].time = "";
setTimeout(dropper(requests[i].tx.hash), 3000);
}
}
// Append any new requests into our local collection
var common = -1;
if (requests.length > 0) {
for (var i=0; i<msg.requests.length; i++) {
if (requests[requests.length-1].tx.hash == msg.requests[i].tx.hash) {
common = i;
break;
}
}
}
for (var i=common+1; i<msg.requests.length; i++) {
requests.push(msg.requests[i]);
}
// Iterate over our entire local collection and re-render the funding table
var content = "";
for (var i=requests.length-1; i >= 0; i--) {
var done = requests[i].time == "";
var elapsed = moment().unix()-moment(requests[i].time).unix();
content += "<tr id='" + requests[i].tx.hash + "'>";
content += " <td><div style=\"background: url('" + requests[i].avatar + "'); background-size: cover; width:32px; height: 32px; border-radius: 4px;\"></div></td>";
content += " <td><pre>" + requests[i].account + "</pre></td>";
content += " <td style=\"width: 100%; text-align: center; vertical-align: middle;\">";
if (done) {
content += " funded";
} else {
content += " <span id='time-" + i + "' class='timer'>" + moment.duration(-elapsed, 'seconds').humanize(true) + "</span>";
}
content += " <div class='progress' style='height: 4px; margin: 0;'>";
if (done) {
content += " <div class='progress-bar progress-bar-success' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
} else if (elapsed > 30) {
content += " <div class='progress-bar progress-bar-danger progress-bar-striped active' role='progressbar' aria-valuenow='30' style='width:100%;'></div>";
} else {
content += " <div class='progress-bar progress-bar-striped active' role='progressbar' aria-valuenow='" + elapsed + "' style='width:" + (elapsed * 100 / 30) + "%;'></div>";
}
content += " </div>";
content += " </td>";
content += "</tr>";
}
$("#requests").html("<tbody>" + content + "</tbody>");
}
}
server.onclose = function() { setTimeout(reconnect, 3000); };
}
// Start a UI updater to push the progress bars forward until they are done
setInterval(function() {
$('.progress-bar').each(function() {
var progress = Number($(this).attr('aria-valuenow')) + 1;
if (progress < 30) {
$(this).attr('aria-valuenow', progress);
$(this).css('width', (progress * 100 / 30) + '%');
} else if (progress == 30) {
$(this).css('width', '100%');
$(this).addClass("progress-bar-danger");
}
})
$('.timer').each(function() {
var index = Number($(this).attr('id').substring(5));
$(this).html(moment.duration(moment(requests[index].time).unix()-moment().unix(), 'seconds').humanize(true));
})
}, 1000);
// Establish a websocket connection to the API server
reconnect();
</script>{{if .Recaptcha}}
<script src="https://www.google.com/recaptcha/api.js" async defer></script>{{end}}
</body>
</html>

@ -1,46 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestFacebook(t *testing.T) {
t.Parallel()
// TODO: Remove facebook auth or implement facebook api, which seems to require an API key
t.Skipf("The facebook access is flaky, needs to be reimplemented or removed")
for _, tt := range []struct {
url string
want common.Address
}{
{
"https://www.facebook.com/fooz.gazonk/posts/2837228539847129",
common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"),
},
} {
_, _, gotAddress, err := authFacebook(tt.url)
if err != nil {
t.Fatal(err)
}
if gotAddress != tt.want {
t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want)
}
}
}

@ -35,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
@ -222,7 +221,7 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
}
catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon)
stack.RegisterLifecycle(simBeacon)
} else if cfg.Eth.SyncMode != downloader.LightSync {
} else {
err := catalyst.Register(stack, eth)
if err != nil {
utils.Fatalf("failed to register catalyst service: %v", err)

@ -1,206 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc"
)
type gethrpc struct {
name string
rpc *rpc.Client
geth *testgeth
nodeInfo *p2p.NodeInfo
}
func (g *gethrpc) killAndWait() {
g.geth.Kill()
g.geth.WaitExit()
}
func (g *gethrpc) callRPC(result interface{}, method string, args ...interface{}) {
if err := g.rpc.Call(&result, method, args...); err != nil {
g.geth.Fatalf("callRPC %v: %v", method, err)
}
}
func (g *gethrpc) addPeer(peer *gethrpc) {
g.geth.Logf("%v.addPeer(%v)", g.name, peer.name)
enode := peer.getNodeInfo().Enode
peerCh := make(chan *p2p.PeerEvent)
sub, err := g.rpc.Subscribe(context.Background(), "admin", peerCh, "peerEvents")
if err != nil {
g.geth.Fatalf("subscribe %v: %v", g.name, err)
}
defer sub.Unsubscribe()
g.callRPC(nil, "admin_addPeer", enode)
dur := 14 * time.Second
timeout := time.After(dur)
select {
case ev := <-peerCh:
g.geth.Logf("%v received event: type=%v, peer=%v", g.name, ev.Type, ev.Peer)
case err := <-sub.Err():
g.geth.Fatalf("%v sub error: %v", g.name, err)
case <-timeout:
g.geth.Error("timeout adding peer after", dur)
}
}
// Use this function instead of `g.nodeInfo` directly
func (g *gethrpc) getNodeInfo() *p2p.NodeInfo {
if g.nodeInfo != nil {
return g.nodeInfo
}
g.nodeInfo = &p2p.NodeInfo{}
g.callRPC(&g.nodeInfo, "admin_nodeInfo")
return g.nodeInfo
}
// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into
// account the set data folders as well as the designated platform we're currently
// running on.
func ipcEndpoint(ipcPath, datadir string) string {
// On windows we can only use plain top-level pipes
if runtime.GOOS == "windows" {
if strings.HasPrefix(ipcPath, `\\.\pipe\`) {
return ipcPath
}
return `\\.\pipe\` + ipcPath
}
// Resolve names into the data directory full paths otherwise
if filepath.Base(ipcPath) == ipcPath {
if datadir == "" {
return filepath.Join(os.TempDir(), ipcPath)
}
return filepath.Join(datadir, ipcPath)
}
return ipcPath
}
// nextIPC ensures that each ipc pipe gets a unique name.
// On linux, it works well to use ipc pipes all over the filesystem (in datadirs),
// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several
// nodes simultaneously, we need to distinguish between them, which we do by
// the pipe filename instead of folder.
var nextIPC atomic.Uint32
func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1))
args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...)
t.Logf("Starting %v with rpc: %v", name, args)
g := &gethrpc{
name: name,
geth: runGeth(t, args...),
}
ipcpath := ipcEndpoint(ipcName, g.geth.Datadir)
// We can't know exactly how long geth will take to start, so we try 10
// times over a 5 second period.
var err error
for i := 0; i < 10; i++ {
time.Sleep(500 * time.Millisecond)
if g.rpc, err = rpc.Dial(ipcpath); err == nil {
return g
}
}
t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err)
return nil
}
func initGeth(t *testing.T) string {
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
t.Logf("Initializing geth: %v ", args)
g := runGeth(t, args...)
datadir := g.Datadir
g.WaitExit()
return datadir
}
func startLightServer(t *testing.T) *gethrpc {
datadir := initGeth(t)
t.Logf("Importing keys to geth")
runGeth(t, "account", "import", "--datadir", datadir, "--password", "./testdata/password.txt", "--lightkdf", "./testdata/key.prv").WaitExit()
account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--discv4=false", "--nat=extip:127.0.0.1", "--verbosity=4")
return server
}
func startClient(t *testing.T, name string) *gethrpc {
datadir := initGeth(t)
return startGethWithIpc(t, name, "--datadir", datadir, "--discv4=false", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
}
func TestPriorityClient(t *testing.T) {
t.Parallel()
lightServer := startLightServer(t)
defer lightServer.killAndWait()
// Start client and add lightServer as peer
freeCli := startClient(t, "freeCli")
defer freeCli.killAndWait()
freeCli.addPeer(lightServer)
var peers []*p2p.PeerInfo
freeCli.callRPC(&peers, "admin_peers")
if len(peers) != 1 {
t.Errorf("Expected: # of client peers == 1, actual: %v", len(peers))
return
}
// Set up priority client, get its nodeID, increase its balance on the lightServer
prioCli := startClient(t, "prioCli")
defer prioCli.killAndWait()
// 3_000_000_000 once we move to Go 1.13
tokens := uint64(3000000000)
lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens)
prioCli.addPeer(lightServer)
// Check if priority client is actually syncing and the regular client got kicked out
prioCli.callRPC(&peers, "admin_peers")
if len(peers) != 1 {
t.Errorf("Expected: # of prio peers == 1, actual: %v", len(peers))
}
nodes := map[string]*gethrpc{
lightServer.getNodeInfo().ID: lightServer,
freeCli.getNodeInfo().ID: freeCli,
prioCli.getNodeInfo().ID: prioCli,
}
time.Sleep(1 * time.Second)
lightServer.callRPC(&peers, "admin_peers")
peersWithNames := make(map[string]string)
for _, p := range peers {
peersWithNames[nodes[p.ID].name] = p.ID
}
if _, freeClientFound := peersWithNames[freeCli.name]; freeClientFound {
t.Error("client is still a peer of lightServer", peersWithNames)
}
if _, prioClientFound := peersWithNames[prioCli.name]; !prioClientFound {
t.Error("prio client is not among lightServer peers", peersWithNames)
}
}

@ -62,7 +62,7 @@ var (
utils.MinFreeDiskSpaceFlag,
utils.KeyStoreDirFlag,
utils.ExternalSignerFlag,
utils.NoUSBFlag,
utils.NoUSBFlag, // deprecated
utils.USBFlag,
utils.SmartCardDaemonPathFlag,
utils.OverrideCancun,
@ -87,24 +87,24 @@ var (
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.SnapshotFlag,
utils.TxLookupLimitFlag,
utils.TxLookupLimitFlag, // deprecated
utils.TransactionHistoryFlag,
utils.StateHistoryFlag,
utils.LightServeFlag,
utils.LightIngressFlag,
utils.LightEgressFlag,
utils.LightMaxPeersFlag,
utils.LightNoPruneFlag,
utils.LightServeFlag, // deprecated
utils.LightIngressFlag, // deprecated
utils.LightEgressFlag, // deprecated
utils.LightMaxPeersFlag, // deprecated
utils.LightNoPruneFlag, // deprecated
utils.LightKDFFlag,
utils.LightNoSyncServeFlag,
utils.LightNoSyncServeFlag, // deprecated
utils.EthRequiredBlocksFlag,
utils.LegacyWhitelistFlag,
utils.LegacyWhitelistFlag, // deprecated
utils.BloomFilterSizeFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
utils.CacheTrieFlag,
utils.CacheTrieJournalFlag,
utils.CacheTrieRejournalFlag,
utils.CacheTrieJournalFlag, // deprecated
utils.CacheTrieRejournalFlag, // deprecated
utils.CacheGCFlag,
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
@ -127,7 +127,7 @@ var (
utils.NoDiscoverFlag,
utils.DiscoveryV4Flag,
utils.DiscoveryV5Flag,
utils.LegacyDiscoveryV5Flag,
utils.LegacyDiscoveryV5Flag, // deprecated
utils.NetrestrictFlag,
utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag,
@ -306,7 +306,7 @@ func prepare(ctx *cli.Context) {
log.Info("Starting Geth on Ethereum mainnet...")
}
// If we're a full node on mainnet without --cache specified, bump default cache allowance
if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
if !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
// Make sure we're not on any supported preconfigured testnet either
if !ctx.IsSet(utils.HoleskyFlag.Name) &&
!ctx.IsSet(utils.SepoliaFlag.Name) &&
@ -317,11 +317,6 @@ func prepare(ctx *cli.Context) {
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
}
}
// If we're running a light client on any network, drop the cache to some meaningfully low amount
if ctx.String(utils.SyncModeFlag.Name) == "light" && !ctx.IsSet(utils.CacheFlag.Name) {
log.Info("Dropping default light client cache", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 128)
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(128))
}
// Start metrics export if enabled
utils.SetupMetrics(ctx)

@ -55,6 +55,15 @@ func TestMain(m *testing.M) {
os.Exit(m.Run())
}
func initGeth(t *testing.T) string {
args := []string{"--networkid=42", "init", "./testdata/clique.json"}
t.Logf("Initializing geth: %v ", args)
g := runGeth(t, args...)
datadir := g.Datadir
g.WaitExit()
return datadir
}
// spawns geth with the given command line args. If the args don't set --datadir, the
// child g gets a temporary data directory.
func runGeth(t *testing.T, args ...string) *testgeth {

@ -57,7 +57,6 @@ import (
"github.com/ethereum/go-ethereum/graphql"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp"
@ -255,7 +254,7 @@ var (
}
SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
Usage: `Blockchain sync mode ("snap" or "full")`,
Value: &defaultSyncMode,
Category: flags.StateCategory,
}
@ -282,41 +281,6 @@ var (
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.StateCategory,
}
// Light server and client settings
LightServeFlag = &cli.IntFlag{
Name: "light.serve",
Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)",
Value: ethconfig.Defaults.LightServ,
Category: flags.LightCategory,
}
LightIngressFlag = &cli.IntFlag{
Name: "light.ingress",
Usage: "Incoming bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
Value: ethconfig.Defaults.LightIngress,
Category: flags.LightCategory,
}
LightEgressFlag = &cli.IntFlag{
Name: "light.egress",
Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)",
Value: ethconfig.Defaults.LightEgress,
Category: flags.LightCategory,
}
LightMaxPeersFlag = &cli.IntFlag{
Name: "light.maxpeers",
Usage: "Maximum number of light clients to serve, or light servers to attach to",
Value: ethconfig.Defaults.LightPeers,
Category: flags.LightCategory,
}
LightNoPruneFlag = &cli.BoolFlag{
Name: "light.nopruning",
Usage: "Disable ancient light chain data pruning",
Category: flags.LightCategory,
}
LightNoSyncServeFlag = &cli.BoolFlag{
Name: "light.nosyncserve",
Usage: "Enables serving light clients before syncing",
Category: flags.LightCategory,
}
// Transaction pool settings
TxPoolLocalsFlag = &cli.StringFlag{
Name: "txpool.locals",
@ -1224,25 +1188,25 @@ func setIPC(ctx *cli.Context, cfg *node.Config) {
}
}
// setLes configures the les server and ultra light client settings from the command line flags.
// setLes shows the deprecation warnings for LES flags.
func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(LightServeFlag.Name) {
cfg.LightServ = ctx.Int(LightServeFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightServeFlag.Name)
}
if ctx.IsSet(LightIngressFlag.Name) {
cfg.LightIngress = ctx.Int(LightIngressFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightIngressFlag.Name)
}
if ctx.IsSet(LightEgressFlag.Name) {
cfg.LightEgress = ctx.Int(LightEgressFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightEgressFlag.Name)
}
if ctx.IsSet(LightMaxPeersFlag.Name) {
cfg.LightPeers = ctx.Int(LightMaxPeersFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightMaxPeersFlag.Name)
}
if ctx.IsSet(LightNoPruneFlag.Name) {
cfg.LightNoPrune = ctx.Bool(LightNoPruneFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoPruneFlag.Name)
}
if ctx.IsSet(LightNoSyncServeFlag.Name) {
cfg.LightNoSyncServe = ctx.Bool(LightNoSyncServeFlag.Name)
log.Warn("The light server has been deprecated, please remove this flag", "flag", LightNoSyncServeFlag.Name)
}
}
@ -1340,58 +1304,24 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
setBootstrapNodes(ctx, cfg)
setBootstrapNodesV5(ctx, cfg)
lightClient := ctx.String(SyncModeFlag.Name) == "light"
lightServer := (ctx.Int(LightServeFlag.Name) != 0)
lightPeers := ctx.Int(LightMaxPeersFlag.Name)
if lightClient && !ctx.IsSet(LightMaxPeersFlag.Name) {
// dynamic default - for clients we use 1/10th of the default for servers
lightPeers /= 10
}
if ctx.IsSet(MaxPeersFlag.Name) {
cfg.MaxPeers = ctx.Int(MaxPeersFlag.Name)
if lightServer && !ctx.IsSet(LightMaxPeersFlag.Name) {
cfg.MaxPeers += lightPeers
}
} else {
if lightServer {
cfg.MaxPeers += lightPeers
}
if lightClient && ctx.IsSet(LightMaxPeersFlag.Name) && cfg.MaxPeers < lightPeers {
cfg.MaxPeers = lightPeers
}
}
if !(lightClient || lightServer) {
lightPeers = 0
}
ethPeers := cfg.MaxPeers - lightPeers
if lightClient {
ethPeers = 0
}
log.Info("Maximum peer count", "ETH", ethPeers, "LES", lightPeers, "total", cfg.MaxPeers)
ethPeers := cfg.MaxPeers
log.Info("Maximum peer count", "ETH", ethPeers, "total", cfg.MaxPeers)
if ctx.IsSet(MaxPendingPeersFlag.Name) {
cfg.MaxPendingPeers = ctx.Int(MaxPendingPeersFlag.Name)
}
if ctx.IsSet(NoDiscoverFlag.Name) || lightClient {
if ctx.IsSet(NoDiscoverFlag.Name) {
cfg.NoDiscovery = true
}
// Disallow --nodiscover when used in conjunction with light mode.
if (lightClient || lightServer) && ctx.Bool(NoDiscoverFlag.Name) {
Fatalf("Cannot use --" + NoDiscoverFlag.Name + " in light client or light server mode")
}
CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag)
CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag)
cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name)
cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name)
// If we're running a light client or server, force enable the v5 peer discovery.
if lightClient || lightServer {
cfg.DiscoveryV5 = true
}
if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" {
list, err := netutil.ParseNetlist(netrestrict)
if err != nil {
@ -1496,12 +1426,7 @@ func SetDataDir(ctx *cli.Context, cfg *node.Config) {
}
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
// If we are running the light client, apply another group
// settings for gas oracle.
if light {
*cfg = ethconfig.LightClientGPO
}
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
if ctx.IsSet(GpoBlocksFlag.Name) {
cfg.Blocks = ctx.Int(GpoBlocksFlag.Name)
}
@ -1650,12 +1575,11 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
// Set configurations from CLI flags
setEtherbase(ctx, cfg)
setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light")
setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setMiner(ctx, &cfg.Miner)
setRequiredBlocks(ctx, cfg)
@ -1734,9 +1658,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.TransactionHistory = 0
log.Warn("Disabled transaction unindexing for archive node")
}
if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 {
log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
}
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
}
@ -1913,9 +1834,6 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
return // already set through flags/config
}
protocol := "all"
if cfg.SyncMode == downloader.LightSync {
protocol = "les"
}
if url := params.KnownDNSNetwork(genesis, protocol); url != "" {
cfg.EthDiscoveryURLs = []string{url}
cfg.SnapDiscoveryURLs = cfg.EthDiscoveryURLs
@ -1923,27 +1841,12 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
}
// RegisterEthService adds an Ethereum client to the stack.
// The second return value is the full node instance, which may be nil if the
// node is running as a light client.
// The second return value is the full node instance.
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
if cfg.SyncMode == downloader.LightSync {
backend, err := les.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
return backend.ApiBackend, nil
}
backend, err := eth.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
if cfg.LightServ > 0 {
_, err := les.NewLesServer(stack, backend, cfg)
if err != nil {
Fatalf("Failed to create the LES server: %v", err)
}
}
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
return backend.APIBackend, backend
}
@ -1965,13 +1868,12 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst
// RegisterFilterAPI adds the eth log filtering RPC API to the node.
func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconfig.Config) *filters.FilterSystem {
isLightClient := ethcfg.SyncMode == downloader.LightSync
filterSystem := filters.NewFilterSystem(backend, filters.Config{
LogCacheSize: ethcfg.FilterLogCacheSize,
})
stack.RegisterAPIs([]rpc.API{{
Namespace: "eth",
Service: filters.NewFilterAPI(filterSystem, isLightClient),
Service: filters.NewFilterAPI(filterSystem, false),
}})
return filterSystem
}

@ -39,6 +39,12 @@ var DeprecatedFlags = []cli.Flag{
CacheTrieRejournalFlag,
LegacyDiscoveryV5Flag,
TxLookupLimitFlag,
LightServeFlag,
LightIngressFlag,
LightEgressFlag,
LightMaxPeersFlag,
LightNoPruneFlag,
LightNoSyncServeFlag,
}
var (
@ -77,6 +83,41 @@ var (
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.DeprecatedCategory,
}
// Light server and client settings, Deprecated November 2023
LightServeFlag = &cli.IntFlag{
Name: "light.serve",
Usage: "Maximum percentage of time allowed for serving LES requests (deprecated)",
Value: ethconfig.Defaults.LightServ,
Category: flags.LightCategory,
}
LightIngressFlag = &cli.IntFlag{
Name: "light.ingress",
Usage: "Incoming bandwidth limit for serving light clients (deprecated)",
Value: ethconfig.Defaults.LightIngress,
Category: flags.LightCategory,
}
LightEgressFlag = &cli.IntFlag{
Name: "light.egress",
Usage: "Outgoing bandwidth limit for serving light clients (deprecated)",
Value: ethconfig.Defaults.LightEgress,
Category: flags.LightCategory,
}
LightMaxPeersFlag = &cli.IntFlag{
Name: "light.maxpeers",
Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated)",
Value: ethconfig.Defaults.LightPeers,
Category: flags.LightCategory,
}
LightNoPruneFlag = &cli.BoolFlag{
Name: "light.nopruning",
Usage: "Disable ancient light chain data pruning (deprecated)",
Category: flags.LightCategory,
}
LightNoSyncServeFlag = &cli.BoolFlag{
Name: "light.nosyncserve",
Usage: "Enables serving light clients before syncing (deprecated)",
Category: flags.LightCategory,
}
)
// showDeprecated displays deprecated flags that will be soon removed from the codebase.

@ -108,7 +108,7 @@ type Ethereum struct {
func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Ensure configuration values are compatible and sane
if config.SyncMode == downloader.LightSync {
return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum")
return nil, errors.New("can't run eth.Ethereum in light sync mode, light mode has been deprecated")
}
if !config.SyncMode.IsValid() {
return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode)

@ -46,16 +46,6 @@ var FullNodeGPO = gasprice.Config{
IgnorePrice: gasprice.DefaultIgnorePrice,
}
// LightClientGPO contains default gasprice oracle settings for light client.
var LightClientGPO = gasprice.Config{
Blocks: 2,
Percentile: 60,
MaxHeaderHistory: 300,
MaxBlockHistory: 5,
MaxPrice: gasprice.DefaultMaxPrice,
IgnorePrice: gasprice.DefaultIgnorePrice,
}
// Defaults contains default settings for use on the Ethereum main net.
var Defaults = Config{
SyncMode: downloader.SnapSync,

@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/core/types"
ethproto "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
@ -486,7 +485,7 @@ func (s *Service) login(conn *connWrapper) error {
if info := infos.Protocols["eth"]; info != nil {
network = fmt.Sprintf("%d", info.(*ethproto.NodeInfo).Network)
} else {
network = fmt.Sprintf("%d", infos.Protocols["les"].(*les.NodeInfo).Network)
return errors.New("no eth protocol available")
}
auth := &authMsg{
ID: s.node,

@ -1,349 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"errors"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
"github.com/ethereum/go-ethereum/p2p/enode"
)
var errUnknownBenchmarkType = errors.New("unknown benchmark type")
// LightServerAPI provides an API to access the LES light server.
type LightServerAPI struct {
server *LesServer
defaultPosFactors, defaultNegFactors vfs.PriceFactors
}
// NewLightServerAPI creates a new LES light server API.
func NewLightServerAPI(server *LesServer) *LightServerAPI {
return &LightServerAPI{
server: server,
defaultPosFactors: defaultPosFactors,
defaultNegFactors: defaultNegFactors,
}
}
// parseNode parses either an enode address a raw hex node id
func parseNode(node string) (enode.ID, error) {
if id, err := enode.ParseID(node); err == nil {
return id, nil
}
if node, err := enode.Parse(enode.ValidSchemes, node); err == nil {
return node.ID(), nil
} else {
return enode.ID{}, err
}
}
// ServerInfo returns global server parameters
func (api *LightServerAPI) ServerInfo() map[string]interface{} {
res := make(map[string]interface{})
res["minimumCapacity"] = api.server.minCapacity
res["maximumCapacity"] = api.server.maxCapacity
_, res["totalCapacity"] = api.server.clientPool.Limits()
_, res["totalConnectedCapacity"] = api.server.clientPool.Active()
res["priorityConnectedCapacity"] = 0 //TODO connect when token sale module is added
return res
}
// ClientInfo returns information about clients listed in the ids list or matching the given tags
func (api *LightServerAPI) ClientInfo(nodes []string) map[enode.ID]map[string]interface{} {
var ids []enode.ID
for _, node := range nodes {
if id, err := parseNode(node); err == nil {
ids = append(ids, id)
}
}
res := make(map[enode.ID]map[string]interface{})
if len(ids) == 0 {
ids = api.server.peers.ids()
}
for _, id := range ids {
if peer := api.server.peers.peer(id); peer != nil {
res[id] = api.clientInfo(peer, peer.balance)
} else {
api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) {
res[id] = api.clientInfo(nil, balance)
})
}
}
return res
}
// PriorityClientInfo returns information about clients with a positive balance
// in the given ID range (stop excluded). If stop is null then the iterator stops
// only at the end of the ID space. MaxCount limits the number of results returned.
// If maxCount limit is applied but there are more potential results then the ID
// of the next potential result is included in the map with an empty structure
// assigned to it.
func (api *LightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCount int) map[enode.ID]map[string]interface{} {
res := make(map[enode.ID]map[string]interface{})
ids := api.server.clientPool.GetPosBalanceIDs(start, stop, maxCount+1)
if len(ids) > maxCount {
res[ids[maxCount]] = make(map[string]interface{})
ids = ids[:maxCount]
}
for _, id := range ids {
if peer := api.server.peers.peer(id); peer != nil {
res[id] = api.clientInfo(peer, peer.balance)
} else {
api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) {
res[id] = api.clientInfo(nil, balance)
})
}
}
return res
}
// clientInfo creates a client info data structure
func (api *LightServerAPI) clientInfo(peer *clientPeer, balance vfs.ReadOnlyBalance) map[string]interface{} {
info := make(map[string]interface{})
pb, nb := balance.GetBalance()
info["isConnected"] = peer != nil
info["pricing/balance"] = pb
info["priority"] = pb != 0
// cb := api.server.clientPool.ndb.getCurrencyBalance(id)
// info["pricing/currency"] = cb.amount
if peer != nil {
info["connectionTime"] = float64(mclock.Now()-peer.connectedAt) / float64(time.Second)
info["capacity"] = peer.getCapacity()
info["pricing/negBalance"] = nb
}
return info
}
// setParams either sets the given parameters for a single connected client (if specified)
// or the default parameters applicable to clients connected in the future
func (api *LightServerAPI) setParams(params map[string]interface{}, client *clientPeer, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) {
defParams := client == nil
for name, value := range params {
errValue := func() error {
return fmt.Errorf("invalid value for parameter '%s'", name)
}
setFactor := func(v *float64) {
if val, ok := value.(float64); ok && val >= 0 {
*v = val / float64(time.Second)
updateFactors = true
} else {
err = errValue()
}
}
switch {
case name == "pricing/timeFactor":
setFactor(&posFactors.TimeFactor)
case name == "pricing/capacityFactor":
setFactor(&posFactors.CapacityFactor)
case name == "pricing/requestCostFactor":
setFactor(&posFactors.RequestFactor)
case name == "pricing/negative/timeFactor":
setFactor(&negFactors.TimeFactor)
case name == "pricing/negative/capacityFactor":
setFactor(&negFactors.CapacityFactor)
case name == "pricing/negative/requestCostFactor":
setFactor(&negFactors.RequestFactor)
case !defParams && name == "capacity":
if capacity, ok := value.(float64); ok && uint64(capacity) >= api.server.minCapacity {
_, err = api.server.clientPool.SetCapacity(client.Node(), uint64(capacity), 0, false)
// time factor recalculation is performed automatically by the balance tracker
} else {
err = errValue()
}
default:
if defParams {
err = fmt.Errorf("invalid default parameter '%s'", name)
} else {
err = fmt.Errorf("invalid client parameter '%s'", name)
}
}
if err != nil {
return
}
}
return
}
// SetClientParams sets client parameters for all clients listed in the ids list
// or all connected clients if the list is empty
func (api *LightServerAPI) SetClientParams(nodes []string, params map[string]interface{}) error {
var err error
for _, node := range nodes {
var id enode.ID
if id, err = parseNode(node); err != nil {
return err
}
if peer := api.server.peers.peer(id); peer != nil {
posFactors, negFactors := peer.balance.GetPriceFactors()
update, e := api.setParams(params, peer, &posFactors, &negFactors)
if update {
peer.balance.SetPriceFactors(posFactors, negFactors)
}
if e != nil {
err = e
}
} else {
err = fmt.Errorf("client %064x is not connected", id)
}
}
return err
}
// SetDefaultParams sets the default parameters applicable to clients connected in the future
func (api *LightServerAPI) SetDefaultParams(params map[string]interface{}) error {
update, err := api.setParams(params, nil, &api.defaultPosFactors, &api.defaultNegFactors)
if update {
api.server.clientPool.SetDefaultFactors(api.defaultPosFactors, api.defaultNegFactors)
}
return err
}
// SetConnectedBias set the connection bias, which is applied to already connected clients
// So that already connected client won't be kicked out very soon and we can ensure all
// connected clients can have enough time to request or sync some data.
// When the input parameter `bias` < 0 (illegal), return error.
func (api *LightServerAPI) SetConnectedBias(bias time.Duration) error {
if bias < time.Duration(0) {
return fmt.Errorf("bias illegal: %v less than 0", bias)
}
api.server.clientPool.SetConnectedBias(bias)
return nil
}
// AddBalance adds the given amount to the balance of a client if possible and returns
// the balance before and after the operation
func (api *LightServerAPI) AddBalance(node string, amount int64) (balance [2]uint64, err error) {
var id enode.ID
if id, err = parseNode(node); err != nil {
return
}
api.server.clientPool.BalanceOperation(id, "", func(nb vfs.AtomicBalanceOperator) {
balance[0], balance[1], err = nb.AddBalance(amount)
})
return
}
// Benchmark runs a request performance benchmark with a given set of measurement setups
// in multiple passes specified by passCount. The measurement time for each setup in each
// pass is specified in milliseconds by length.
//
// Note: measurement time is adjusted for each pass depending on the previous ones.
// Therefore a controlled total measurement time is achievable in multiple passes.
func (api *LightServerAPI) Benchmark(setups []map[string]interface{}, passCount, length int) ([]map[string]interface{}, error) {
benchmarks := make([]requestBenchmark, len(setups))
for i, setup := range setups {
if t, ok := setup["type"].(string); ok {
getInt := func(field string, def int) int {
if value, ok := setup[field].(float64); ok {
return int(value)
}
return def
}
getBool := func(field string, def bool) bool {
if value, ok := setup[field].(bool); ok {
return value
}
return def
}
switch t {
case "header":
benchmarks[i] = &benchmarkBlockHeaders{
amount: getInt("amount", 1),
skip: getInt("skip", 1),
byHash: getBool("byHash", false),
reverse: getBool("reverse", false),
}
case "body":
benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: false}
case "receipts":
benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: true}
case "proof":
benchmarks[i] = &benchmarkProofsOrCode{code: false}
case "code":
benchmarks[i] = &benchmarkProofsOrCode{code: true}
case "cht":
benchmarks[i] = &benchmarkHelperTrie{
bloom: false,
reqCount: getInt("amount", 1),
}
case "bloom":
benchmarks[i] = &benchmarkHelperTrie{
bloom: true,
reqCount: getInt("amount", 1),
}
case "txSend":
benchmarks[i] = &benchmarkTxSend{}
case "txStatus":
benchmarks[i] = &benchmarkTxStatus{}
default:
return nil, errUnknownBenchmarkType
}
} else {
return nil, errUnknownBenchmarkType
}
}
rs := api.server.handler.runBenchmark(benchmarks, passCount, time.Millisecond*time.Duration(length))
result := make([]map[string]interface{}, len(setups))
for i, r := range rs {
res := make(map[string]interface{})
if r.err == nil {
res["totalCount"] = r.totalCount
res["avgTime"] = r.avgTime
res["maxInSize"] = r.maxInSize
res["maxOutSize"] = r.maxOutSize
} else {
res["error"] = r.err.Error()
}
result[i] = res
}
return result, nil
}
// DebugAPI provides an API to debug LES light server functionality.
type DebugAPI struct {
server *LesServer
}
// NewDebugAPI creates a new LES light server debug API.
func NewDebugAPI(server *LesServer) *DebugAPI {
return &DebugAPI{
server: server,
}
}
// FreezeClient forces a temporary client freeze which normally happens when the server is overloaded
func (api *DebugAPI) FreezeClient(node string) error {
var (
id enode.ID
err error
)
if id, err = parseNode(node); err != nil {
return err
}
if peer := api.server.peers.peer(id); peer != nil {
peer.freeze()
return nil
} else {
return fmt.Errorf("client %064x is not connected", id[:])
}
}

@ -1,337 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"context"
"errors"
"math/big"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
type LesApiBackend struct {
extRPCEnabled bool
allowUnprotectedTxs bool
eth *LightEthereum
gpo *gasprice.Oracle
}
func (b *LesApiBackend) ChainConfig() *params.ChainConfig {
return b.eth.chainConfig
}
func (b *LesApiBackend) CurrentBlock() *types.Header {
return b.eth.BlockChain().CurrentHeader()
}
func (b *LesApiBackend) SetHead(number uint64) {
b.eth.blockchain.SetHead(number)
}
func (b *LesApiBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {
// Return the latest current as the pending one since there
// is no pending notion in the light client. TODO(rjl493456442)
// unify the behavior of `HeaderByNumber` and `PendingBlockAndReceipts`.
if number == rpc.PendingBlockNumber {
return b.eth.blockchain.CurrentHeader(), nil
}
if number == rpc.LatestBlockNumber {
return b.eth.blockchain.CurrentHeader(), nil
}
return b.eth.blockchain.GetHeaderByNumberOdr(ctx, uint64(number))
}
func (b *LesApiBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {
if blockNr, ok := blockNrOrHash.Number(); ok {
return b.HeaderByNumber(ctx, blockNr)
}
if hash, ok := blockNrOrHash.Hash(); ok {
header, err := b.HeaderByHash(ctx, hash)
if err != nil {
return nil, err
}
if header == nil {
return nil, errors.New("header for hash not found")
}
if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {
return nil, errors.New("hash is not currently canonical")
}
return header, nil
}
return nil, errors.New("invalid arguments; neither block nor hash specified")
}
func (b *LesApiBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
return b.eth.blockchain.GetHeaderByHash(hash), nil
}
func (b *LesApiBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {
header, err := b.HeaderByNumber(ctx, number)
if header == nil || err != nil {
return nil, err
}
return b.BlockByHash(ctx, header.Hash())
}
func (b *LesApiBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
return b.eth.blockchain.GetBlockByHash(ctx, hash)
}
func (b *LesApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {
if blockNr, ok := blockNrOrHash.Number(); ok {
return b.BlockByNumber(ctx, blockNr)
}
if hash, ok := blockNrOrHash.Hash(); ok {
block, err := b.BlockByHash(ctx, hash)
if err != nil {
return nil, err
}
if block == nil {
return nil, errors.New("header found, but block body is missing")
}
if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(block.NumberU64()) != hash {
return nil, errors.New("hash is not currently canonical")
}
return block, nil
}
return nil, errors.New("invalid arguments; neither block nor hash specified")
}
func (b *LesApiBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) {
return light.GetBody(ctx, b.eth.odr, hash, uint64(number))
}
func (b *LesApiBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) {
return nil, nil
}
func (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) {
header, err := b.HeaderByNumber(ctx, number)
if err != nil {
return nil, nil, err
}
if header == nil {
return nil, nil, errors.New("header not found")
}
return light.NewState(ctx, header, b.eth.odr), header, nil
}
func (b *LesApiBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {
if blockNr, ok := blockNrOrHash.Number(); ok {
return b.StateAndHeaderByNumber(ctx, blockNr)
}
if hash, ok := blockNrOrHash.Hash(); ok {
header := b.eth.blockchain.GetHeaderByHash(hash)
if header == nil {
return nil, nil, errors.New("header for hash not found")
}
if blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {
return nil, nil, errors.New("hash is not currently canonical")
}
return light.NewState(ctx, header, b.eth.odr), header, nil
}
return nil, nil, errors.New("invalid arguments; neither block nor hash specified")
}
func (b *LesApiBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
if number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil {
return light.GetBlockReceipts(ctx, b.eth.odr, hash, *number)
}
return nil, nil
}
func (b *LesApiBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
return light.GetBlockLogs(ctx, b.eth.odr, hash, number)
}
func (b *LesApiBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
if number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil {
return b.eth.blockchain.GetTdOdr(ctx, hash, *number)
}
return nil
}
func (b *LesApiBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) (*vm.EVM, func() error) {
if vmConfig == nil {
vmConfig = new(vm.Config)
}
txContext := core.NewEVMTxContext(msg)
context := core.NewEVMBlockContext(header, b.eth.blockchain, nil)
if blockCtx != nil {
context = *blockCtx
}
return vm.NewEVM(context, txContext, state, b.eth.chainConfig, *vmConfig), state.Error
}
func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
return b.eth.txPool.Add(ctx, signedTx)
}
func (b *LesApiBackend) RemoveTx(txHash common.Hash) {
b.eth.txPool.RemoveTx(txHash)
}
func (b *LesApiBackend) GetPoolTransactions() (types.Transactions, error) {
return b.eth.txPool.GetTransactions()
}
func (b *LesApiBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction {
return b.eth.txPool.GetTransaction(txHash)
}
func (b *LesApiBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {
return light.GetTransaction(ctx, b.eth.odr, txHash)
}
func (b *LesApiBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) {
return b.eth.txPool.GetNonce(ctx, addr)
}
func (b *LesApiBackend) Stats() (pending int, queued int) {
return b.eth.txPool.Stats(), 0
}
func (b *LesApiBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) {
return b.eth.txPool.Content()
}
func (b *LesApiBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) {
return b.eth.txPool.ContentFrom(addr)
}
func (b *LesApiBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
return b.eth.txPool.SubscribeNewTxsEvent(ch)
}
func (b *LesApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return b.eth.blockchain.SubscribeChainEvent(ch)
}
func (b *LesApiBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {
return b.eth.blockchain.SubscribeChainHeadEvent(ch)
}
func (b *LesApiBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {
return b.eth.blockchain.SubscribeChainSideEvent(ch)
}
func (b *LesApiBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return b.eth.blockchain.SubscribeLogsEvent(ch)
}
func (b *LesApiBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit
return nil
})
}
func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
return b.eth.blockchain.SubscribeRemovedLogsEvent(ch)
}
func (b *LesApiBackend) SyncProgress() ethereum.SyncProgress {
return ethereum.SyncProgress{}
}
func (b *LesApiBackend) ProtocolVersion() int {
return b.eth.LesVersion() + 10000
}
func (b *LesApiBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) {
return b.gpo.SuggestTipCap(ctx)
}
func (b *LesApiBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (firstBlock *big.Int, reward [][]*big.Int, baseFee []*big.Int, gasUsedRatio []float64, err error) {
return b.gpo.FeeHistory(ctx, blockCount, lastBlock, rewardPercentiles)
}
func (b *LesApiBackend) ChainDb() ethdb.Database {
return b.eth.chainDb
}
func (b *LesApiBackend) AccountManager() *accounts.Manager {
return b.eth.accountManager
}
func (b *LesApiBackend) ExtRPCEnabled() bool {
return b.extRPCEnabled
}
func (b *LesApiBackend) UnprotectedAllowed() bool {
return b.allowUnprotectedTxs
}
func (b *LesApiBackend) RPCGasCap() uint64 {
return b.eth.config.RPCGasCap
}
func (b *LesApiBackend) RPCEVMTimeout() time.Duration {
return b.eth.config.RPCEVMTimeout
}
func (b *LesApiBackend) RPCTxFeeCap() float64 {
return b.eth.config.RPCTxFeeCap
}
func (b *LesApiBackend) BloomStatus() (uint64, uint64) {
if b.eth.bloomIndexer == nil {
return 0, 0
}
sections, _, _ := b.eth.bloomIndexer.Sections()
return params.BloomBitsBlocksClient, sections
}
func (b *LesApiBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
for i := 0; i < bloomFilterThreads; i++ {
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)
}
}
func (b *LesApiBackend) Engine() consensus.Engine {
return b.eth.engine
}
func (b *LesApiBackend) CurrentHeader() *types.Header {
return b.eth.blockchain.CurrentHeader()
}
func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) {
return b.eth.stateAtBlock(ctx, block, reexec)
}
func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
return b.eth.stateAtTransaction(ctx, block, txIndex, reexec)
}

@ -1,512 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"context"
crand "crypto/rand"
"errors"
"flag"
"math/rand"
"os"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/mattn/go-colorable"
)
// Additional command line flags for the test binary.
var (
loglevel = flag.Int("loglevel", 0, "verbosity of logs")
simAdapter = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker")
)
func TestMain(m *testing.M) {
flag.Parse()
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
// register the Delivery service which will run as a devp2p
// protocol when using the exec adapter
adapters.RegisterLifecycles(services)
os.Exit(m.Run())
}
// This test is not meant to be a part of the automatic testing process because it
// runs for a long time and also requires a large database in order to do a meaningful
// request performance test. When testServerDataDir is empty, the test is skipped.
const (
testServerDataDir = "" // should always be empty on the master branch
testServerCapacity = 200
testMaxClients = 10
testTolerance = 0.1
minRelCap = 0.2
)
func TestCapacityAPI3(t *testing.T) {
testCapacityAPI(t, 3)
}
func TestCapacityAPI6(t *testing.T) {
testCapacityAPI(t, 6)
}
func TestCapacityAPI10(t *testing.T) {
testCapacityAPI(t, 10)
}
// testCapacityAPI runs an end-to-end simulation test connecting one server with
// a given number of clients. It sets different priority capacities to all clients
// except a randomly selected one which runs in free client mode. All clients send
// similar requests at the maximum allowed rate and the test verifies whether the
// ratio of processed requests is close enough to the ratio of assigned capacities.
// Running multiple rounds with different settings ensures that changing capacity
// while connected and going back and forth between free and priority mode with
// the supplied API calls is also thoroughly tested.
func testCapacityAPI(t *testing.T, clientCount int) {
// Skip test if no data dir specified
if testServerDataDir == "" {
return
}
for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool {
if len(servers) != 1 {
t.Fatalf("Invalid number of servers: %d", len(servers))
}
server := servers[0]
serverRpcClient, err := server.Client()
if err != nil {
t.Fatalf("Failed to obtain rpc client: %v", err)
}
headNum, headHash := getHead(ctx, t, serverRpcClient)
minCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)
testCap := totalCap * 3 / 4
t.Logf("Server testCap: %d minCap: %d head number: %d head hash: %064x\n", testCap, minCap, headNum, headHash)
reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))
if minCap > reqMinCap {
t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap)
}
freeIdx := rand.Intn(len(clients))
clientRpcClients := make([]*rpc.Client, len(clients))
for i, client := range clients {
var err error
clientRpcClients[i], err = client.Client()
if err != nil {
t.Fatalf("Failed to obtain rpc client: %v", err)
}
t.Log("connecting client", i)
if i != freeIdx {
setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients)))
}
net.Connect(client.ID(), server.ID())
for {
select {
case <-ctx.Done():
t.Fatalf("Timeout")
default:
}
num, hash := getHead(ctx, t, clientRpcClients[i])
if num == headNum && hash == headHash {
t.Log("client", i, "synced")
break
}
time.Sleep(time.Millisecond * 200)
}
}
var wg sync.WaitGroup
stop := make(chan struct{})
reqCount := make([]atomic.Uint64, len(clientRpcClients))
// Send light request like crazy.
for i, c := range clientRpcClients {
wg.Add(1)
i, c := i, c
go func() {
defer wg.Done()
queue := make(chan struct{}, 100)
reqCount[i].Store(0)
for {
select {
case queue <- struct{}{}:
select {
case <-stop:
return
case <-ctx.Done():
return
default:
wg.Add(1)
go func() {
ok := testRequest(ctx, t, c)
wg.Done()
<-queue
if ok {
if reqCount[i].Add(1)%10000 == 0 {
freezeClient(ctx, t, serverRpcClient, clients[i].ID())
}
}
}()
}
case <-stop:
return
case <-ctx.Done():
return
}
}
}()
}
processedSince := func(start []uint64) []uint64 {
res := make([]uint64, len(reqCount))
for i := range reqCount {
res[i] = reqCount[i].Load()
if start != nil {
res[i] -= start[i]
}
}
return res
}
weights := make([]float64, len(clients))
for c := 0; c < 5; c++ {
setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), minCap)
freeIdx = rand.Intn(len(clients))
var sum float64
for i := range clients {
if i == freeIdx {
weights[i] = 0
} else {
weights[i] = rand.Float64()*(1-minRelCap) + minRelCap
}
sum += weights[i]
}
for i, client := range clients {
weights[i] *= float64(testCap-minCap-100) / sum
capacity := uint64(weights[i])
if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {
setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
}
}
setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0)
for i, client := range clients {
capacity := uint64(weights[i])
if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) {
setCapacity(ctx, t, serverRpcClient, client.ID(), capacity)
}
}
weights[freeIdx] = float64(minCap)
for i := range clients {
weights[i] /= float64(testCap)
}
time.Sleep(flowcontrol.DecParamDelay)
t.Log("Starting measurement")
t.Logf("Relative weights:")
for i := range clients {
t.Logf(" %f", weights[i])
}
t.Log()
start := processedSince(nil)
for {
select {
case <-ctx.Done():
t.Fatalf("Timeout")
default:
}
_, totalCap = getCapacityInfo(ctx, t, serverRpcClient)
if totalCap < testCap {
t.Log("Total capacity underrun")
close(stop)
wg.Wait()
return false
}
processed := processedSince(start)
var avg uint64
t.Logf("Processed")
for i, p := range processed {
t.Logf(" %d", p)
processed[i] = uint64(float64(p) / weights[i])
avg += processed[i]
}
avg /= uint64(len(processed))
if avg >= 10000 {
var maxDev float64
for _, p := range processed {
dev := float64(int64(p-avg)) / float64(avg)
t.Logf(" %7.4f", dev)
if dev < 0 {
dev = -dev
}
if dev > maxDev {
maxDev = dev
}
}
t.Logf(" max deviation: %f totalCap: %d\n", maxDev, totalCap)
if maxDev <= testTolerance {
t.Log("success")
break
}
} else {
t.Log()
}
time.Sleep(time.Millisecond * 200)
}
}
close(stop)
wg.Wait()
for i := range reqCount {
t.Log("client", i, "processed", reqCount[i].Load())
}
return true
}) {
t.Log("restarting test")
}
}
func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) {
res := make(map[string]interface{})
if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil {
t.Fatalf("Failed to obtain head block: %v", err)
}
numStr, ok := res["number"].(string)
if !ok {
t.Fatalf("RPC block number field invalid")
}
num, err := hexutil.DecodeUint64(numStr)
if err != nil {
t.Fatalf("Failed to decode RPC block number: %v", err)
}
hashStr, ok := res["hash"].(string)
if !ok {
t.Fatalf("RPC block number field invalid")
}
hash := common.HexToHash(hashStr)
return num, hash
}
func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool {
var res string
var addr common.Address
crand.Read(addr[:])
c, cancel := context.WithTimeout(ctx, time.Second*12)
defer cancel()
err := client.CallContext(c, &res, "eth_getBalance", addr, "latest")
if err != nil {
t.Log("request error:", err)
}
return err == nil
}
func freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) {
if err := server.CallContext(ctx, nil, "debug_freezeClient", clientID); err != nil {
t.Fatalf("Failed to freeze client: %v", err)
}
}
func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) {
params := make(map[string]interface{})
params["capacity"] = cap
if err := server.CallContext(ctx, nil, "les_setClientParams", []enode.ID{clientID}, []string{}, params); err != nil {
t.Fatalf("Failed to set client capacity: %v", err)
}
}
func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 {
var res map[enode.ID]map[string]interface{}
if err := server.CallContext(ctx, &res, "les_clientInfo", []enode.ID{clientID}, []string{}); err != nil {
t.Fatalf("Failed to get client info: %v", err)
}
info, ok := res[clientID]
if !ok {
t.Fatalf("Missing client info")
}
v, ok := info["capacity"]
if !ok {
t.Fatalf("Missing field in client info: capacity")
}
vv, ok := v.(float64)
if !ok {
t.Fatalf("Failed to decode capacity field")
}
return uint64(vv)
}
func getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, totalCap uint64) {
var res map[string]interface{}
if err := server.CallContext(ctx, &res, "les_serverInfo"); err != nil {
t.Fatalf("Failed to query server info: %v", err)
}
decode := func(s string) uint64 {
v, ok := res[s]
if !ok {
t.Fatalf("Missing field in server info: %s", s)
}
vv, ok := v.(float64)
if !ok {
t.Fatalf("Failed to decode server info field: %s", s)
}
return uint64(vv)
}
minCap = decode("minimumCapacity")
totalCap = decode("totalCapacity")
return
}
var services = adapters.LifecycleConstructors{
"lesclient": newLesClientService,
"lesserver": newLesServerService,
}
func NewNetwork() (*simulations.Network, func(), error) {
adapter, adapterTeardown, err := NewAdapter(*simAdapter, services)
if err != nil {
return nil, adapterTeardown, err
}
defaultService := "streamer"
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
ID: "0",
DefaultService: defaultService,
})
teardown := func() {
adapterTeardown()
net.Shutdown()
}
return net, teardown, nil
}
func NewAdapter(adapterType string, services adapters.LifecycleConstructors) (adapter adapters.NodeAdapter, teardown func(), err error) {
teardown = func() {}
switch adapterType {
case "sim":
adapter = adapters.NewSimAdapter(services)
// case "socket":
// adapter = adapters.NewSocketAdapter(services)
case "exec":
baseDir, err0 := os.MkdirTemp("", "les-test")
if err0 != nil {
return nil, teardown, err0
}
teardown = func() { os.RemoveAll(baseDir) }
adapter = adapters.NewExecAdapter(baseDir)
/*case "docker":
adapter, err = adapters.NewDockerAdapter()
if err != nil {
return nil, teardown, err
}*/
default:
return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker")
}
return adapter, teardown, nil
}
func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool {
net, teardown, err := NewNetwork()
defer teardown()
if err != nil {
t.Fatalf("Failed to create network: %v", err)
}
timeout := 1800 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
servers := make([]*simulations.Node, serverCount)
clients := make([]*simulations.Node, clientCount)
for i := range clients {
clientconf := adapters.RandomNodeConfig()
clientconf.Lifecycles = []string{"lesclient"}
if len(clientDir) == clientCount {
clientconf.DataDir = clientDir[i]
}
client, err := net.NewNodeWithConfig(clientconf)
if err != nil {
t.Fatalf("Failed to create client: %v", err)
}
clients[i] = client
}
for i := range servers {
serverconf := adapters.RandomNodeConfig()
serverconf.Lifecycles = []string{"lesserver"}
if len(serverDir) == serverCount {
serverconf.DataDir = serverDir[i]
}
server, err := net.NewNodeWithConfig(serverconf)
if err != nil {
t.Fatalf("Failed to create server: %v", err)
}
servers[i] = server
}
for _, client := range clients {
if err := net.Start(client.ID()); err != nil {
t.Fatalf("Failed to start client node: %v", err)
}
}
for _, server := range servers {
if err := net.Start(server.ID()); err != nil {
t.Fatalf("Failed to start server node: %v", err)
}
}
return test(ctx, net, servers, clients)
}
func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
config := ethconfig.Defaults
config.SyncMode = downloader.LightSync
return New(stack, &config)
}
func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {
config := ethconfig.Defaults
config.SyncMode = downloader.FullSync
config.LightServ = testServerCapacity
config.LightPeers = testMaxClients
ethereum, err := eth.New(stack, &config)
if err != nil {
return nil, err
}
_, err = NewLesServer(stack, ethereum, &config)
if err != nil {
return nil, err
}
return ethereum, nil
}

@ -1,351 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
crand "crypto/rand"
"encoding/binary"
"errors"
"math/big"
"math/rand"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// requestBenchmark is an interface for different randomized request generators
type requestBenchmark interface {
// init initializes the generator for generating the given number of randomized requests
init(h *serverHandler, count int) error
// request initiates sending a single request to the given peer
request(peer *serverPeer, index int) error
}
// benchmarkBlockHeaders implements requestBenchmark
type benchmarkBlockHeaders struct {
amount, skip int
reverse, byHash bool
offset, randMax int64
hashes []common.Hash
}
func (b *benchmarkBlockHeaders) init(h *serverHandler, count int) error {
d := int64(b.amount-1) * int64(b.skip+1)
b.offset = 0
b.randMax = h.blockchain.CurrentHeader().Number.Int64() + 1 - d
if b.randMax < 0 {
return errors.New("chain is too short")
}
if b.reverse {
b.offset = d
}
if b.byHash {
b.hashes = make([]common.Hash, count)
for i := range b.hashes {
b.hashes[i] = rawdb.ReadCanonicalHash(h.chainDb, uint64(b.offset+rand.Int63n(b.randMax)))
}
}
return nil
}
func (b *benchmarkBlockHeaders) request(peer *serverPeer, index int) error {
if b.byHash {
return peer.requestHeadersByHash(0, b.hashes[index], b.amount, b.skip, b.reverse)
}
return peer.requestHeadersByNumber(0, uint64(b.offset+rand.Int63n(b.randMax)), b.amount, b.skip, b.reverse)
}
// benchmarkBodiesOrReceipts implements requestBenchmark
type benchmarkBodiesOrReceipts struct {
receipts bool
hashes []common.Hash
}
func (b *benchmarkBodiesOrReceipts) init(h *serverHandler, count int) error {
randMax := h.blockchain.CurrentHeader().Number.Int64() + 1
b.hashes = make([]common.Hash, count)
for i := range b.hashes {
b.hashes[i] = rawdb.ReadCanonicalHash(h.chainDb, uint64(rand.Int63n(randMax)))
}
return nil
}
func (b *benchmarkBodiesOrReceipts) request(peer *serverPeer, index int) error {
if b.receipts {
return peer.requestReceipts(0, []common.Hash{b.hashes[index]})
}
return peer.requestBodies(0, []common.Hash{b.hashes[index]})
}
// benchmarkProofsOrCode implements requestBenchmark
type benchmarkProofsOrCode struct {
code bool
headHash common.Hash
}
func (b *benchmarkProofsOrCode) init(h *serverHandler, count int) error {
b.headHash = h.blockchain.CurrentHeader().Hash()
return nil
}
func (b *benchmarkProofsOrCode) request(peer *serverPeer, index int) error {
key := make([]byte, 32)
crand.Read(key)
if b.code {
return peer.requestCode(0, []CodeReq{{BHash: b.headHash, AccountAddress: key}})
}
return peer.requestProofs(0, []ProofReq{{BHash: b.headHash, Key: key}})
}
// benchmarkHelperTrie implements requestBenchmark
type benchmarkHelperTrie struct {
bloom bool
reqCount int
sectionCount, headNum uint64
}
func (b *benchmarkHelperTrie) init(h *serverHandler, count int) error {
if b.bloom {
b.sectionCount, b.headNum, _ = h.server.bloomTrieIndexer.Sections()
} else {
b.sectionCount, _, _ = h.server.chtIndexer.Sections()
b.headNum = b.sectionCount*params.CHTFrequency - 1
}
if b.sectionCount == 0 {
return errors.New("no processed sections available")
}
return nil
}
func (b *benchmarkHelperTrie) request(peer *serverPeer, index int) error {
reqs := make([]HelperTrieReq, b.reqCount)
if b.bloom {
bitIdx := uint16(rand.Intn(2048))
for i := range reqs {
key := make([]byte, 10)
binary.BigEndian.PutUint16(key[:2], bitIdx)
binary.BigEndian.PutUint64(key[2:], uint64(rand.Int63n(int64(b.sectionCount))))
reqs[i] = HelperTrieReq{Type: htBloomBits, TrieIdx: b.sectionCount - 1, Key: key}
}
} else {
for i := range reqs {
key := make([]byte, 8)
binary.BigEndian.PutUint64(key[:], uint64(rand.Int63n(int64(b.headNum))))
reqs[i] = HelperTrieReq{Type: htCanonical, TrieIdx: b.sectionCount - 1, Key: key, AuxReq: htAuxHeader}
}
}
return peer.requestHelperTrieProofs(0, reqs)
}
// benchmarkTxSend implements requestBenchmark
type benchmarkTxSend struct {
txs types.Transactions
}
func (b *benchmarkTxSend) init(h *serverHandler, count int) error {
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
signer := types.LatestSigner(h.server.chainConfig)
b.txs = make(types.Transactions, count)
for i := range b.txs {
data := make([]byte, txSizeCostLimit)
crand.Read(data)
tx, err := types.SignTx(types.NewTransaction(0, addr, new(big.Int), 0, new(big.Int), data), signer, key)
if err != nil {
panic(err)
}
b.txs[i] = tx
}
return nil
}
func (b *benchmarkTxSend) request(peer *serverPeer, index int) error {
enc, _ := rlp.EncodeToBytes(types.Transactions{b.txs[index]})
return peer.sendTxs(0, 1, enc)
}
// benchmarkTxStatus implements requestBenchmark
type benchmarkTxStatus struct{}
func (b *benchmarkTxStatus) init(h *serverHandler, count int) error {
return nil
}
func (b *benchmarkTxStatus) request(peer *serverPeer, index int) error {
var hash common.Hash
crand.Read(hash[:])
return peer.requestTxStatus(0, []common.Hash{hash})
}
// benchmarkSetup stores measurement data for a single benchmark type
type benchmarkSetup struct {
req requestBenchmark
totalCount int
totalTime, avgTime time.Duration
maxInSize, maxOutSize uint32
err error
}
// runBenchmark runs a benchmark cycle for all benchmark types in the specified
// number of passes
func (h *serverHandler) runBenchmark(benchmarks []requestBenchmark, passCount int, targetTime time.Duration) []*benchmarkSetup {
setup := make([]*benchmarkSetup, len(benchmarks))
for i, b := range benchmarks {
setup[i] = &benchmarkSetup{req: b}
}
for i := 0; i < passCount; i++ {
log.Info("Running benchmark", "pass", i+1, "total", passCount)
todo := make([]*benchmarkSetup, len(benchmarks))
copy(todo, setup)
for len(todo) > 0 {
// select a random element
index := rand.Intn(len(todo))
next := todo[index]
todo[index] = todo[len(todo)-1]
todo = todo[:len(todo)-1]
if next.err == nil {
// calculate request count
count := 50
if next.totalTime > 0 {
count = int(uint64(next.totalCount) * uint64(targetTime) / uint64(next.totalTime))
}
if err := h.measure(next, count); err != nil {
next.err = err
}
}
}
}
log.Info("Benchmark completed")
for _, s := range setup {
if s.err == nil {
s.avgTime = s.totalTime / time.Duration(s.totalCount)
}
}
return setup
}
// meteredPipe implements p2p.MsgReadWriter and remembers the largest single
// message size sent through the pipe
type meteredPipe struct {
rw p2p.MsgReadWriter
maxSize uint32
}
func (m *meteredPipe) ReadMsg() (p2p.Msg, error) {
return m.rw.ReadMsg()
}
func (m *meteredPipe) WriteMsg(msg p2p.Msg) error {
if msg.Size > m.maxSize {
m.maxSize = msg.Size
}
return m.rw.WriteMsg(msg)
}
// measure runs a benchmark for a single type in a single pass, with the given
// number of requests
func (h *serverHandler) measure(setup *benchmarkSetup, count int) error {
clientPipe, serverPipe := p2p.MsgPipe()
clientMeteredPipe := &meteredPipe{rw: clientPipe}
serverMeteredPipe := &meteredPipe{rw: serverPipe}
var id enode.ID
crand.Read(id[:])
peer1 := newServerPeer(lpv2, NetworkId, false, p2p.NewPeer(id, "client", nil), clientMeteredPipe)
peer2 := newClientPeer(lpv2, NetworkId, p2p.NewPeer(id, "server", nil), serverMeteredPipe)
peer2.announceType = announceTypeNone
peer2.fcCosts = make(requestCostTable)
c := &requestCosts{}
for code := range requests {
peer2.fcCosts[code] = c
}
peer2.fcParams = flowcontrol.ServerParams{BufLimit: 1, MinRecharge: 1}
peer2.fcClient = flowcontrol.NewClientNode(h.server.fcManager, peer2.fcParams)
defer peer2.fcClient.Disconnect()
if err := setup.req.init(h, count); err != nil {
return err
}
errCh := make(chan error, 10)
start := mclock.Now()
go func() {
for i := 0; i < count; i++ {
if err := setup.req.request(peer1, i); err != nil {
errCh <- err
return
}
}
}()
go func() {
for i := 0; i < count; i++ {
if err := h.handleMsg(peer2, &sync.WaitGroup{}); err != nil {
errCh <- err
return
}
}
}()
go func() {
for i := 0; i < count; i++ {
msg, err := clientPipe.ReadMsg()
if err != nil {
errCh <- err
return
}
var i interface{}
msg.Decode(&i)
}
// at this point we can be sure that the other two
// goroutines finished successfully too
close(errCh)
}()
select {
case err := <-errCh:
if err != nil {
return err
}
case <-h.closeCh:
clientPipe.Close()
serverPipe.Close()
return errors.New("benchmark cancelled")
}
setup.totalTime += time.Duration(mclock.Now() - start)
setup.totalCount += count
setup.maxInSize = clientMeteredPipe.maxSize
setup.maxOutSize = serverMeteredPipe.maxSize
clientPipe.Close()
serverPipe.Close()
return nil
}

@ -1,75 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"time"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/light"
)
const (
// bloomServiceThreads is the number of goroutines used globally by an Ethereum
// instance to service bloombits lookups for all running filters.
bloomServiceThreads = 16
// bloomFilterThreads is the number of goroutines used locally per filter to
// multiplex requests onto the global servicing goroutines.
bloomFilterThreads = 3
// bloomRetrievalBatch is the maximum number of bloom bit retrievals to service
// in a single batch.
bloomRetrievalBatch = 16
// bloomRetrievalWait is the maximum time to wait for enough bloom bit requests
// to accumulate request an entire batch (avoiding hysteresis).
bloomRetrievalWait = time.Microsecond * 100
)
// startBloomHandlers starts a batch of goroutines to accept bloom bit database
// retrievals from possibly a range of filters and serving the data to satisfy.
func (eth *LightEthereum) startBloomHandlers(sectionSize uint64) {
for i := 0; i < bloomServiceThreads; i++ {
go func() {
defer eth.wg.Done()
for {
select {
case <-eth.closeCh:
return
case request := <-eth.bloomRequests:
task := <-request
task.Bitsets = make([][]byte, len(task.Sections))
compVectors, err := light.GetBloomBits(task.Context, eth.odr, task.Bit, task.Sections)
if err == nil {
for i := range task.Sections {
if blob, err := bitutil.DecompressBytes(compVectors[i], int(sectionSize/8)); err == nil {
task.Bitsets[i] = blob
} else {
task.Error = err
}
}
} else {
task.Error = err
}
request <- task
}
}
}()
}
}

@ -1,377 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package les implements the Light Ethereum Subprotocol.
package les
import (
"errors"
"strings"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/shutdowncheck"
"github.com/ethereum/go-ethereum/les/vflux"
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
)
type LightEthereum struct {
lesCommons
peers *serverPeerSet
reqDist *requestDistributor
retriever *retrieveManager
odr *LesOdr
relay *lesTxRelay
handler *clientHandler
txPool *light.TxPool
blockchain *light.LightChain
serverPool *vfc.ServerPool
serverPoolIterator enode.Iterator
merger *consensus.Merger
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports
ApiBackend *LesApiBackend
eventMux *event.TypeMux
engine consensus.Engine
accountManager *accounts.Manager
netRPCService *ethapi.NetAPI
p2pServer *p2p.Server
p2pConfig *p2p.Config
udpEnabled bool
shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully
}
// New creates an instance of the light client.
func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/", false)
if err != nil {
return nil, err
}
lesDb, err := stack.OpenDatabase("les.client", 0, 0, "eth/db/lesclient/", false)
if err != nil {
return nil, err
}
var overrides core.ChainOverrides
if config.OverrideCancun != nil {
overrides.OverrideCancun = config.OverrideCancun
}
if config.OverrideVerkle != nil {
overrides.OverrideVerkle = config.OverrideVerkle
}
triedb := trie.NewDatabase(chainDb, trie.HashDefaults)
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, triedb, config.Genesis, &overrides)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}
engine, err := ethconfig.CreateConsensusEngine(chainConfig, chainDb)
if err != nil {
return nil, err
}
log.Info("")
log.Info(strings.Repeat("-", 153))
for _, line := range strings.Split(chainConfig.Description(), "\n") {
log.Info(line)
}
log.Info(strings.Repeat("-", 153))
log.Info("")
peers := newServerPeerSet()
merger := consensus.NewMerger(chainDb)
leth := &LightEthereum{
lesCommons: lesCommons{
genesis: genesisHash,
config: config,
chainConfig: chainConfig,
iConfig: light.DefaultClientIndexerConfig,
chainDb: chainDb,
lesDb: lesDb,
closeCh: make(chan struct{}),
},
peers: peers,
eventMux: stack.EventMux(),
reqDist: newRequestDistributor(peers, &mclock.System{}),
accountManager: stack.AccountManager(),
merger: merger,
engine: engine,
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),
p2pServer: stack.Server(),
p2pConfig: &stack.Config().P2P,
udpEnabled: stack.Config().P2P.DiscoveryV5,
shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb),
}
var prenegQuery vfc.QueryFunc
if leth.udpEnabled {
prenegQuery = leth.prenegQuery
}
leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, nil, requestList)
leth.serverPool.AddMetrics(suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge, sessionValueMeter, serverDialedMeter)
leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool.GetTimeout)
leth.relay = newLesTxRelay(peers, leth.retriever)
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.peers, leth.retriever)
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations, config.LightNoPrune)
leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency, config.LightNoPrune)
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
// Note: NewLightChain adds the trusted checkpoint so it needs an ODR with
// indexers already set but not started yet
if leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine); err != nil {
return nil, err
}
leth.chainReader = leth.blockchain
leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)
// Note: AddChildIndexer starts the update process for the child
leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)
leth.chtIndexer.Start(leth.blockchain)
leth.bloomIndexer.Start(leth.blockchain)
// Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
log.Warn("Rewinding chain to upgrade configuration", "err", compat)
if compat.RewindToTime > 0 {
leth.blockchain.SetHeadWithTimestamp(compat.RewindToTime)
} else {
leth.blockchain.SetHead(compat.RewindToBlock)
}
rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
leth.ApiBackend = &LesApiBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, leth, nil}
gpoParams := config.GPO
if gpoParams.Default == nil {
gpoParams.Default = config.Miner.GasPrice
}
leth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams)
leth.handler = newClientHandler(leth)
leth.netRPCService = ethapi.NewNetAPI(leth.p2pServer, leth.config.NetworkId)
// Register the backend on the node
stack.RegisterAPIs(leth.APIs())
stack.RegisterProtocols(leth.Protocols())
stack.RegisterLifecycle(leth)
// Successful startup; push a marker and check previous unclean shutdowns.
leth.shutdownTracker.MarkStartup()
return leth, nil
}
// VfluxRequest sends a batch of requests to the given node through discv5 UDP TalkRequest and returns the responses
func (s *LightEthereum) VfluxRequest(n *enode.Node, reqs vflux.Requests) vflux.Replies {
if !s.udpEnabled {
return nil
}
reqsEnc, _ := rlp.EncodeToBytes(&reqs)
repliesEnc, _ := s.p2pServer.DiscV5.TalkRequest(s.serverPool.DialNode(n), "vfx", reqsEnc)
var replies vflux.Replies
if len(repliesEnc) == 0 || rlp.DecodeBytes(repliesEnc, &replies) != nil {
return nil
}
return replies
}
// vfxVersion returns the version number of the "les" service subdomain of the vflux UDP
// service, as advertised in the ENR record
func (s *LightEthereum) vfxVersion(n *enode.Node) uint {
if n.Seq() == 0 {
var err error
if !s.udpEnabled {
return 0
}
if n, err = s.p2pServer.DiscV5.RequestENR(n); n != nil && err == nil && n.Seq() != 0 {
s.serverPool.Persist(n)
} else {
return 0
}
}
var les []rlp.RawValue
if err := n.Load(enr.WithEntry("les", &les)); err != nil || len(les) < 1 {
return 0
}
var version uint
rlp.DecodeBytes(les[0], &version) // Ignore additional fields (for forward compatibility).
return version
}
// prenegQuery sends a capacity query to the given server node to determine whether
// a connection slot is immediately available
func (s *LightEthereum) prenegQuery(n *enode.Node) int {
if s.vfxVersion(n) < 1 {
// UDP query not supported, always try TCP connection
return 1
}
var requests vflux.Requests
requests.Add("les", vflux.CapacityQueryName, vflux.CapacityQueryReq{
Bias: 180,
AddTokens: []vflux.IntOrInf{{}},
})
replies := s.VfluxRequest(n, requests)
var cqr vflux.CapacityQueryReply
if replies.Get(0, &cqr) != nil || len(cqr) != 1 { // Note: Get returns an error if replies is nil
return -1
}
if cqr[0] > 0 {
return 1
}
return 0
}
type LightDummyAPI struct{}
// Etherbase is the address that mining rewards will be sent to
func (s *LightDummyAPI) Etherbase() (common.Address, error) {
return common.Address{}, errors.New("mining is not supported in light mode")
}
// Coinbase is the address that mining rewards will be sent to (alias for Etherbase)
func (s *LightDummyAPI) Coinbase() (common.Address, error) {
return common.Address{}, errors.New("mining is not supported in light mode")
}
// Hashrate returns the POW hashrate
func (s *LightDummyAPI) Hashrate() hexutil.Uint {
return 0
}
// Mining returns an indication if this node is currently mining.
func (s *LightDummyAPI) Mining() bool {
return false
}
// APIs returns the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *LightEthereum) APIs() []rpc.API {
apis := ethapi.GetAPIs(s.ApiBackend)
apis = append(apis, s.engine.APIs(s.BlockChain().HeaderChain())...)
return append(apis, []rpc.API{
{
Namespace: "eth",
Service: &LightDummyAPI{},
}, {
Namespace: "net",
Service: s.netRPCService,
}, {
Namespace: "vflux",
Service: s.serverPool.API(),
},
}...)
}
func (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) {
s.blockchain.ResetWithGenesisBlock(gb)
}
func (s *LightEthereum) BlockChain() *light.LightChain { return s.blockchain }
func (s *LightEthereum) TxPool() *light.TxPool { return s.txPool }
func (s *LightEthereum) Engine() consensus.Engine { return s.engine }
func (s *LightEthereum) LesVersion() int { return int(ClientProtocolVersions[0]) }
func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux }
func (s *LightEthereum) Merger() *consensus.Merger { return s.merger }
// Protocols returns all the currently configured network protocols to start.
func (s *LightEthereum) Protocols() []p2p.Protocol {
return s.makeProtocols(ClientProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
if p := s.peers.peer(id.String()); p != nil {
return p.Info()
}
return nil
}, s.serverPoolIterator)
}
// Start implements node.Lifecycle, starting all internal goroutines needed by the
// light ethereum protocol implementation.
func (s *LightEthereum) Start() error {
log.Warn("Light client mode is an experimental feature")
// Regularly update shutdown marker
s.shutdownTracker.Start()
if s.udpEnabled && s.p2pServer.DiscV5 == nil {
s.udpEnabled = false
log.Error("Discovery v5 is not initialized")
}
discovery, err := s.setupDiscovery()
if err != nil {
return err
}
s.serverPool.AddSource(discovery)
s.serverPool.Start()
// Start bloom request workers.
s.wg.Add(bloomServiceThreads)
s.startBloomHandlers(params.BloomBitsBlocksClient)
return nil
}
// Stop implements node.Lifecycle, terminating all internal goroutines used by the
// Ethereum protocol.
func (s *LightEthereum) Stop() error {
close(s.closeCh)
s.serverPool.Stop()
s.peers.close()
s.reqDist.close()
s.odr.Stop()
s.relay.Stop()
s.bloomIndexer.Close()
s.chtIndexer.Close()
s.blockchain.Stop()
s.handler.stop()
s.txPool.Stop()
s.engine.Close()
s.eventMux.Stop()
// Clean shutdown marker as the last thing before closing db
s.shutdownTracker.Stop()
s.chainDb.Close()
s.lesDb.Close()
s.wg.Wait()
log.Info("Light ethereum stopped")
return nil
}

@ -1,309 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/trie/trienode"
)
// clientHandler is responsible for receiving and processing all incoming server
// responses.
type clientHandler struct {
forkFilter forkid.Filter
backend *LightEthereum
closeCh chan struct{}
wg sync.WaitGroup // WaitGroup used to track all connected peers.
}
func newClientHandler(backend *LightEthereum) *clientHandler {
handler := &clientHandler{
forkFilter: forkid.NewFilter(backend.blockchain),
backend: backend,
closeCh: make(chan struct{}),
}
return handler
}
func (h *clientHandler) stop() {
close(h.closeCh)
h.wg.Wait()
}
// runPeer is the p2p protocol run function for the given version.
func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := newServerPeer(int(version), h.backend.config.NetworkId, false, p, newMeteredMsgWriter(rw, int(version)))
defer peer.close()
h.wg.Add(1)
defer h.wg.Done()
err := h.handle(peer, false)
return err
}
func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error {
if h.backend.peers.len() >= h.backend.config.LightPeers && !p.Peer.Info().Network.Trusted {
return p2p.DiscTooManyPeers
}
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
// Execute the LES handshake
forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.BlockChain().Genesis(), h.backend.blockchain.CurrentHeader().Number.Uint64(), h.backend.blockchain.CurrentHeader().Time)
if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil {
p.Log().Debug("Light Ethereum handshake failed", "err", err)
return err
}
// Register peer with the server pool
if h.backend.serverPool != nil {
if nvt, err := h.backend.serverPool.RegisterNode(p.Node()); err == nil {
p.setValueTracker(nvt)
p.updateVtParams()
defer func() {
p.setValueTracker(nil)
h.backend.serverPool.UnregisterNode(p.Node())
}()
} else {
return err
}
}
// Register the peer locally
if err := h.backend.peers.register(p); err != nil {
p.Log().Error("Light Ethereum peer registration failed", "err", err)
return err
}
serverConnectionGauge.Update(int64(h.backend.peers.len()))
connectedAt := mclock.Now()
defer func() {
h.backend.peers.unregister(p.id)
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt))
serverConnectionGauge.Update(int64(h.backend.peers.len()))
}()
// Mark the peer starts to be served.
p.serving.Store(true)
defer p.serving.Store(false)
// Spawn a main loop to handle all incoming messages.
for {
if err := h.handleMsg(p); err != nil {
p.Log().Debug("Light Ethereum message handling failed", "err", err)
p.fcServer.DumpLogs()
return err
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (h *clientHandler) handleMsg(p *serverPeer) error {
// Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size)
if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
defer msg.Discard()
var deliverMsg *Msg
// Handle the message depending on its contents
switch {
case msg.Code == AnnounceMsg:
p.Log().Trace("Received announce message")
var req announceData
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
if err := req.sanityCheck(); err != nil {
return err
}
update, size := req.Update.decode()
if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "")
}
p.updateFlowControl(update)
p.updateVtParams()
if req.Hash != (common.Hash{}) {
if p.announceType == announceTypeNone {
return errResp(ErrUnexpectedResponse, "")
}
if p.announceType == announceTypeSigned {
if err := req.checkSignature(p.ID(), update); err != nil {
p.Log().Trace("Invalid announcement signature", "err", err)
return err
}
p.Log().Trace("Valid announcement signature")
}
p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth)
// Update peer head information first and then notify the announcement
p.updateHead(req.Hash, req.Number, req.Td)
}
case msg.Code == BlockHeadersMsg:
p.Log().Trace("Received block header response message")
var resp struct {
ReqID, BV uint64
Headers []*types.Header
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgBlockHeaders,
ReqID: resp.ReqID,
Obj: resp.Headers,
}
case msg.Code == BlockBodiesMsg:
p.Log().Trace("Received block bodies response")
var resp struct {
ReqID, BV uint64
Data []*types.Body
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgBlockBodies,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case msg.Code == CodeMsg:
p.Log().Trace("Received code response")
var resp struct {
ReqID, BV uint64
Data [][]byte
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgCode,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case msg.Code == ReceiptsMsg:
p.Log().Trace("Received receipts response")
var resp struct {
ReqID, BV uint64
Receipts []types.Receipts
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgReceipts,
ReqID: resp.ReqID,
Obj: resp.Receipts,
}
case msg.Code == ProofsV2Msg:
p.Log().Trace("Received les/2 proofs response")
var resp struct {
ReqID, BV uint64
Data trienode.ProofList
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgProofsV2,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case msg.Code == HelperTrieProofsMsg:
p.Log().Trace("Received helper trie proof response")
var resp struct {
ReqID, BV uint64
Data HelperTrieResps
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgHelperTrieProofs,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case msg.Code == TxStatusMsg:
p.Log().Trace("Received tx status response")
var resp struct {
ReqID, BV uint64
Status []light.TxStatus
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
p.answeredRequest(resp.ReqID)
deliverMsg = &Msg{
MsgType: MsgTxStatus,
ReqID: resp.ReqID,
Obj: resp.Status,
}
case msg.Code == StopMsg && p.version >= lpv3:
p.freeze()
h.backend.retriever.frozen(p)
p.Log().Debug("Service stopped")
case msg.Code == ResumeMsg && p.version >= lpv3:
var bv uint64
if err := msg.Decode(&bv); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ResumeFreeze(bv)
p.unfreeze()
p.Log().Debug("Service resumed")
default:
p.Log().Trace("Received invalid message", "code", msg.Code)
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
// Deliver the received response to retriever.
if deliverMsg != nil {
if err := h.backend.retriever.deliver(p, deliverMsg); err != nil {
if val := p.errCount.Add(1, mclock.Now()); val > maxResponseErrors {
return err
}
}
}
return nil
}

@ -1,99 +0,0 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
)
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
type chainReader interface {
CurrentHeader() *types.Header
}
// lesCommons contains fields needed by both server and client.
type lesCommons struct {
genesis common.Hash
config *ethconfig.Config
chainConfig *params.ChainConfig
iConfig *light.IndexerConfig
chainDb, lesDb ethdb.Database
chainReader chainReader
chtIndexer, bloomTrieIndexer *core.ChainIndexer
closeCh chan struct{}
wg sync.WaitGroup
}
// NodeInfo represents a short summary of the Ethereum sub-protocol metadata
// known about the host peer.
type NodeInfo struct {
Network uint64 `json:"network"` // Ethereum network ID (1=Mainnet, Goerli=5)
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block
}
// makeProtocols creates protocol descriptors for the given LES versions.
func (c *lesCommons) makeProtocols(versions []uint, runPeer func(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error, peerInfo func(id enode.ID) interface{}, dialCandidates enode.Iterator) []p2p.Protocol {
protos := make([]p2p.Protocol, len(versions))
for i, version := range versions {
version := version
protos[i] = p2p.Protocol{
Name: "les",
Version: version,
Length: ProtocolLengths[version],
NodeInfo: c.nodeInfo,
Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
return runPeer(version, peer, rw)
},
PeerInfo: peerInfo,
DialCandidates: dialCandidates,
}
}
return protos
}
// nodeInfo retrieves some protocol metadata about the running host node.
func (c *lesCommons) nodeInfo() interface{} {
head := c.chainReader.CurrentHeader()
hash := head.Hash()
return &NodeInfo{
Network: c.config.NetworkId,
Difficulty: rawdb.ReadTd(c.chainDb, hash, head.Number.Uint64()),
Genesis: c.genesis,
Config: c.chainConfig,
Head: hash,
}
}

@ -1,517 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"encoding/binary"
"math"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
const makeCostStats = false // make request cost statistics during operation
var (
// average request cost estimates based on serving time
reqAvgTimeCost = requestCostTable{
GetBlockHeadersMsg: {150000, 30000},
GetBlockBodiesMsg: {0, 700000},
GetReceiptsMsg: {0, 1000000},
GetCodeMsg: {0, 450000},
GetProofsV2Msg: {0, 600000},
GetHelperTrieProofsMsg: {0, 1000000},
SendTxV2Msg: {0, 450000},
GetTxStatusMsg: {0, 250000},
}
// maximum incoming message size estimates
reqMaxInSize = requestCostTable{
GetBlockHeadersMsg: {40, 0},
GetBlockBodiesMsg: {0, 40},
GetReceiptsMsg: {0, 40},
GetCodeMsg: {0, 80},
GetProofsV2Msg: {0, 80},
GetHelperTrieProofsMsg: {0, 20},
SendTxV2Msg: {0, 16500},
GetTxStatusMsg: {0, 50},
}
// maximum outgoing message size estimates
reqMaxOutSize = requestCostTable{
GetBlockHeadersMsg: {0, 556},
GetBlockBodiesMsg: {0, 100000},
GetReceiptsMsg: {0, 200000},
GetCodeMsg: {0, 50000},
GetProofsV2Msg: {0, 4000},
GetHelperTrieProofsMsg: {0, 4000},
SendTxV2Msg: {0, 100},
GetTxStatusMsg: {0, 100},
}
// request amounts that have to fit into the minimum buffer size minBufferMultiplier times
minBufferReqAmount = map[uint64]uint64{
GetBlockHeadersMsg: 192,
GetBlockBodiesMsg: 1,
GetReceiptsMsg: 1,
GetCodeMsg: 1,
GetProofsV2Msg: 1,
GetHelperTrieProofsMsg: 16,
SendTxV2Msg: 8,
GetTxStatusMsg: 64,
}
minBufferMultiplier = 3
)
const (
maxCostFactor = 2 // ratio of maximum and average cost estimates
bufLimitRatio = 6000 // fixed bufLimit/MRR ratio
gfUsageThreshold = 0.5
gfUsageTC = time.Second
gfRaiseTC = time.Second * 200
gfDropTC = time.Second * 50
gfDbKey = "_globalCostFactorV6"
)
// costTracker is responsible for calculating costs and cost estimates on the
// server side. It continuously updates the global cost factor which is defined
// as the number of cost units per nanosecond of serving time in a single thread.
// It is based on statistics collected during serving requests in high-load periods
// and practically acts as a one-dimension request price scaling factor over the
// pre-defined cost estimate table.
//
// The reason for dynamically maintaining the global factor on the server side is:
// the estimated time cost of the request is fixed(hardcoded) but the configuration
// of the machine running the server is really different. Therefore, the request serving
// time in different machine will vary greatly. And also, the request serving time
// in same machine may vary greatly with different request pressure.
//
// In order to more effectively limit resources, we apply the global factor to serving
// time to make the result as close as possible to the estimated time cost no matter
// the server is slow or fast. And also we scale the totalRecharge with global factor
// so that fast server can serve more requests than estimation and slow server can
// reduce request pressure.
//
// Instead of scaling the cost values, the real value of cost units is changed by
// applying the factor to the serving times. This is more convenient because the
// changes in the cost factor can be applied immediately without always notifying
// the clients about the changed cost tables.
type costTracker struct {
db ethdb.Database
stopCh chan chan struct{}
inSizeFactor float64
outSizeFactor float64
factor float64
utilTarget float64
minBufLimit uint64
gfLock sync.RWMutex
reqInfoCh chan reqInfo
totalRechargeCh chan uint64
stats map[uint64][]atomic.Uint64 // Used for testing purpose.
// TestHooks
testing bool // Disable real cost evaluation for testing purpose.
testCostList RequestCostList // Customized cost table for testing purpose.
}
// newCostTracker creates a cost tracker and loads the cost factor statistics from the database.
// It also returns the minimum capacity that can be assigned to any peer.
func newCostTracker(db ethdb.Database, config *ethconfig.Config) (*costTracker, uint64) {
utilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100
ct := &costTracker{
db: db,
stopCh: make(chan chan struct{}),
reqInfoCh: make(chan reqInfo, 100),
utilTarget: utilTarget,
}
if config.LightIngress > 0 {
ct.inSizeFactor = utilTarget / float64(config.LightIngress)
}
if config.LightEgress > 0 {
ct.outSizeFactor = utilTarget / float64(config.LightEgress)
}
if makeCostStats {
ct.stats = make(map[uint64][]atomic.Uint64)
for code := range reqAvgTimeCost {
ct.stats[code] = make([]atomic.Uint64, 10)
}
}
ct.gfLoop()
costList := ct.makeCostList(ct.globalFactor() * 1.25)
for _, c := range costList {
amount := minBufferReqAmount[c.MsgCode]
cost := c.BaseCost + amount*c.ReqCost
if cost > ct.minBufLimit {
ct.minBufLimit = cost
}
}
ct.minBufLimit *= uint64(minBufferMultiplier)
return ct, (ct.minBufLimit-1)/bufLimitRatio + 1
}
// stop stops the cost tracker and saves the cost factor statistics to the database
func (ct *costTracker) stop() {
stopCh := make(chan struct{})
ct.stopCh <- stopCh
<-stopCh
if makeCostStats {
ct.printStats()
}
}
// makeCostList returns upper cost estimates based on the hardcoded cost estimate
// tables and the optionally specified incoming/outgoing bandwidth limits
func (ct *costTracker) makeCostList(globalFactor float64) RequestCostList {
maxCost := func(avgTimeCost, inSize, outSize uint64) uint64 {
cost := avgTimeCost * maxCostFactor
inSizeCost := uint64(float64(inSize) * ct.inSizeFactor * globalFactor)
if inSizeCost > cost {
cost = inSizeCost
}
outSizeCost := uint64(float64(outSize) * ct.outSizeFactor * globalFactor)
if outSizeCost > cost {
cost = outSizeCost
}
return cost
}
var list RequestCostList
for code, data := range reqAvgTimeCost {
baseCost := maxCost(data.baseCost, reqMaxInSize[code].baseCost, reqMaxOutSize[code].baseCost)
reqCost := maxCost(data.reqCost, reqMaxInSize[code].reqCost, reqMaxOutSize[code].reqCost)
if ct.minBufLimit != 0 {
// if minBufLimit is set then always enforce maximum request cost <= minBufLimit
maxCost := baseCost + reqCost*minBufferReqAmount[code]
if maxCost > ct.minBufLimit {
mul := 0.999 * float64(ct.minBufLimit) / float64(maxCost)
baseCost = uint64(float64(baseCost) * mul)
reqCost = uint64(float64(reqCost) * mul)
}
}
list = append(list, requestCostListItem{
MsgCode: code,
BaseCost: baseCost,
ReqCost: reqCost,
})
}
return list
}
// reqInfo contains the estimated time cost and the actual request serving time
// which acts as a feed source to update factor maintained by costTracker.
type reqInfo struct {
// avgTimeCost is the estimated time cost corresponding to maxCostTable.
avgTimeCost float64
// servingTime is the CPU time corresponding to the actual processing of
// the request.
servingTime float64
// msgCode indicates the type of request.
msgCode uint64
}
// gfLoop starts an event loop which updates the global cost factor which is
// calculated as a weighted average of the average estimate / serving time ratio.
// The applied weight equals the serving time if gfUsage is over a threshold,
// zero otherwise. gfUsage is the recent average serving time per time unit in
// an exponential moving window. This ensures that statistics are collected only
// under high-load circumstances where the measured serving times are relevant.
// The total recharge parameter of the flow control system which controls the
// total allowed serving time per second but nominated in cost units, should
// also be scaled with the cost factor and is also updated by this loop.
func (ct *costTracker) gfLoop() {
var (
factor, totalRecharge float64
gfLog, recentTime, recentAvg float64
lastUpdate, expUpdate = mclock.Now(), mclock.Now()
)
// Load historical cost factor statistics from the database.
data, _ := ct.db.Get([]byte(gfDbKey))
if len(data) == 8 {
gfLog = math.Float64frombits(binary.BigEndian.Uint64(data[:]))
}
ct.factor = math.Exp(gfLog)
factor, totalRecharge = ct.factor, ct.utilTarget*ct.factor
// In order to perform factor data statistics under the high request pressure,
// we only adjust factor when recent factor usage beyond the threshold.
threshold := gfUsageThreshold * float64(gfUsageTC) * ct.utilTarget / flowcontrol.FixedPointMultiplier
go func() {
saveCostFactor := func() {
var data [8]byte
binary.BigEndian.PutUint64(data[:], math.Float64bits(gfLog))
ct.db.Put([]byte(gfDbKey), data[:])
log.Debug("global cost factor saved", "value", factor)
}
saveTicker := time.NewTicker(time.Minute * 10)
defer saveTicker.Stop()
for {
select {
case r := <-ct.reqInfoCh:
relCost := int64(factor * r.servingTime * 100 / r.avgTimeCost) // Convert the value to a percentage form
// Record more metrics if we are debugging
if metrics.EnabledExpensive {
switch r.msgCode {
case GetBlockHeadersMsg:
relativeCostHeaderHistogram.Update(relCost)
case GetBlockBodiesMsg:
relativeCostBodyHistogram.Update(relCost)
case GetReceiptsMsg:
relativeCostReceiptHistogram.Update(relCost)
case GetCodeMsg:
relativeCostCodeHistogram.Update(relCost)
case GetProofsV2Msg:
relativeCostProofHistogram.Update(relCost)
case GetHelperTrieProofsMsg:
relativeCostHelperProofHistogram.Update(relCost)
case SendTxV2Msg:
relativeCostSendTxHistogram.Update(relCost)
case GetTxStatusMsg:
relativeCostTxStatusHistogram.Update(relCost)
}
}
// SendTxV2 and GetTxStatus requests are two special cases.
// All other requests will only put pressure on the database, and
// the corresponding delay is relatively stable. While these two
// requests involve txpool query, which is usually unstable.
//
// TODO(rjl493456442) fixes this.
if r.msgCode == SendTxV2Msg || r.msgCode == GetTxStatusMsg {
continue
}
requestServedMeter.Mark(int64(r.servingTime))
requestServedTimer.Update(time.Duration(r.servingTime))
requestEstimatedMeter.Mark(int64(r.avgTimeCost / factor))
requestEstimatedTimer.Update(time.Duration(r.avgTimeCost / factor))
relativeCostHistogram.Update(relCost)
now := mclock.Now()
dt := float64(now - expUpdate)
expUpdate = now
exp := math.Exp(-dt / float64(gfUsageTC))
// calculate factor correction until now, based on previous values
var gfCorr float64
max := recentTime
if recentAvg > max {
max = recentAvg
}
// we apply continuous correction when MAX(recentTime, recentAvg) > threshold
if max > threshold {
// calculate correction time between last expUpdate and now
if max*exp >= threshold {
gfCorr = dt
} else {
gfCorr = math.Log(max/threshold) * float64(gfUsageTC)
}
// calculate log(factor) correction with the right direction and time constant
if recentTime > recentAvg {
// drop factor if actual serving times are larger than average estimates
gfCorr /= -float64(gfDropTC)
} else {
// raise factor if actual serving times are smaller than average estimates
gfCorr /= float64(gfRaiseTC)
}
}
// update recent cost values with current request
recentTime = recentTime*exp + r.servingTime
recentAvg = recentAvg*exp + r.avgTimeCost/factor
if gfCorr != 0 {
// Apply the correction to factor
gfLog += gfCorr
factor = math.Exp(gfLog)
// Notify outside modules the new factor and totalRecharge.
if time.Duration(now-lastUpdate) > time.Second {
totalRecharge, lastUpdate = ct.utilTarget*factor, now
ct.gfLock.Lock()
ct.factor = factor
ch := ct.totalRechargeCh
ct.gfLock.Unlock()
if ch != nil {
select {
case ct.totalRechargeCh <- uint64(totalRecharge):
default:
}
}
globalFactorGauge.Update(int64(1000 * factor))
log.Debug("global cost factor updated", "factor", factor)
}
}
recentServedGauge.Update(int64(recentTime))
recentEstimatedGauge.Update(int64(recentAvg))
case <-saveTicker.C:
saveCostFactor()
case stopCh := <-ct.stopCh:
saveCostFactor()
close(stopCh)
return
}
}
}()
}
// globalFactor returns the current value of the global cost factor
func (ct *costTracker) globalFactor() float64 {
ct.gfLock.RLock()
defer ct.gfLock.RUnlock()
return ct.factor
}
// totalRecharge returns the current total recharge parameter which is used by
// flowcontrol.ClientManager and is scaled by the global cost factor
func (ct *costTracker) totalRecharge() uint64 {
ct.gfLock.RLock()
defer ct.gfLock.RUnlock()
return uint64(ct.factor * ct.utilTarget)
}
// subscribeTotalRecharge returns all future updates to the total recharge value
// through a channel and also returns the current value
func (ct *costTracker) subscribeTotalRecharge(ch chan uint64) uint64 {
ct.gfLock.Lock()
defer ct.gfLock.Unlock()
ct.totalRechargeCh = ch
return uint64(ct.factor * ct.utilTarget)
}
// updateStats updates the global cost factor and (if enabled) the real cost vs.
// average estimate statistics
func (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) {
avg := reqAvgTimeCost[code]
avgTimeCost := avg.baseCost + amount*avg.reqCost
select {
case ct.reqInfoCh <- reqInfo{float64(avgTimeCost), float64(servingTime), code}:
default:
}
if makeCostStats {
realCost <<= 4
l := 0
for l < 9 && realCost > avgTimeCost {
l++
realCost >>= 1
}
ct.stats[code][l].Add(1)
}
}
// realCost calculates the final cost of a request based on actual serving time,
// incoming and outgoing message size
//
// Note: message size is only taken into account if bandwidth limitation is applied
// and the cost based on either message size is greater than the cost based on
// serving time. A maximum of the three costs is applied instead of their sum
// because the three limited resources (serving thread time and i/o bandwidth) can
// also be maxed out simultaneously.
func (ct *costTracker) realCost(servingTime uint64, inSize, outSize uint32) uint64 {
cost := float64(servingTime)
inSizeCost := float64(inSize) * ct.inSizeFactor
if inSizeCost > cost {
cost = inSizeCost
}
outSizeCost := float64(outSize) * ct.outSizeFactor
if outSizeCost > cost {
cost = outSizeCost
}
return uint64(cost * ct.globalFactor())
}
// printStats prints the distribution of real request cost relative to the average estimates
func (ct *costTracker) printStats() {
if ct.stats == nil {
return
}
for code, arr := range ct.stats {
log.Info("Request cost statistics", "code", code, "1/16", arr[0].Load(), "1/8", arr[1].Load(), "1/4", arr[2].Load(), "1/2", arr[3].Load(), "1", arr[4].Load(), "2", arr[5].Load(), "4", arr[6].Load(), "8", arr[7].Load(), "16", arr[8].Load(), ">16", arr[9].Load())
}
}
type (
// requestCostTable assigns a cost estimate function to each request type
// which is a linear function of the requested amount
// (cost = baseCost + reqCost * amount)
requestCostTable map[uint64]*requestCosts
requestCosts struct {
baseCost, reqCost uint64
}
// RequestCostList is a list representation of request costs which is used for
// database storage and communication through the network
RequestCostList []requestCostListItem
requestCostListItem struct {
MsgCode, BaseCost, ReqCost uint64
}
)
// getMaxCost calculates the estimated cost for a given request type and amount
func (table requestCostTable) getMaxCost(code, amount uint64) uint64 {
costs := table[code]
return costs.baseCost + amount*costs.reqCost
}
// decode converts a cost list to a cost table
func (list RequestCostList) decode(protocolLength uint64) requestCostTable {
table := make(requestCostTable)
for _, e := range list {
if e.MsgCode < protocolLength {
table[e.MsgCode] = &requestCosts{
baseCost: e.BaseCost,
reqCost: e.ReqCost,
}
}
}
return table
}
// testCostList returns a dummy request cost list used by tests
func testCostList(testCost uint64) RequestCostList {
cl := make(RequestCostList, len(reqAvgTimeCost))
var max uint64
for code := range reqAvgTimeCost {
if code > max {
max = code
}
}
i := 0
for code := uint64(0); code <= max; code++ {
if _, ok := reqAvgTimeCost[code]; ok {
cl[i].MsgCode = code
cl[i].BaseCost = testCost
cl[i].ReqCost = 0
i++
}
}
return cl
}

@ -1,313 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"container/list"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/les/utils"
)
// requestDistributor implements a mechanism that distributes requests to
// suitable peers, obeying flow control rules and prioritizing them in creation
// order (even when a resend is necessary).
type requestDistributor struct {
clock mclock.Clock
reqQueue *list.List
lastReqOrder uint64
peers map[distPeer]struct{}
peerLock sync.RWMutex
loopChn chan struct{}
loopNextSent bool
lock sync.Mutex
closeCh chan struct{}
wg sync.WaitGroup
}
// distPeer is an LES server peer interface for the request distributor.
// waitBefore returns either the necessary waiting time before sending a request
// with the given upper estimated cost or the estimated remaining relative buffer
// value after sending such a request (in which case the request can be sent
// immediately). At least one of these values is always zero.
type distPeer interface {
waitBefore(uint64) (time.Duration, float64)
canQueue() bool
queueSend(f func()) bool
}
// distReq is the request abstraction used by the distributor. It is based on
// three callback functions:
// - getCost returns the upper estimate of the cost of sending the request to a given peer
// - canSend tells if the server peer is suitable to serve the request
// - request prepares sending the request to the given peer and returns a function that
// does the actual sending. Request order should be preserved but the callback itself should not
// block until it is sent because other peers might still be able to receive requests while
// one of them is blocking. Instead, the returned function is put in the peer's send queue.
type distReq struct {
getCost func(distPeer) uint64
canSend func(distPeer) bool
request func(distPeer) func()
reqOrder uint64
sentChn chan distPeer
element *list.Element
waitForPeers mclock.AbsTime
enterQueue mclock.AbsTime
}
// newRequestDistributor creates a new request distributor
func newRequestDistributor(peers *serverPeerSet, clock mclock.Clock) *requestDistributor {
d := &requestDistributor{
clock: clock,
reqQueue: list.New(),
loopChn: make(chan struct{}, 2),
closeCh: make(chan struct{}),
peers: make(map[distPeer]struct{}),
}
if peers != nil {
peers.subscribe(d)
}
d.wg.Add(1)
go d.loop()
return d
}
// registerPeer implements peerSetNotify
func (d *requestDistributor) registerPeer(p *serverPeer) {
d.peerLock.Lock()
d.peers[p] = struct{}{}
d.peerLock.Unlock()
}
// unregisterPeer implements peerSetNotify
func (d *requestDistributor) unregisterPeer(p *serverPeer) {
d.peerLock.Lock()
delete(d.peers, p)
d.peerLock.Unlock()
}
// registerTestPeer adds a new test peer
func (d *requestDistributor) registerTestPeer(p distPeer) {
d.peerLock.Lock()
d.peers[p] = struct{}{}
d.peerLock.Unlock()
}
var (
// distMaxWait is the maximum waiting time after which further necessary waiting
// times are recalculated based on new feedback from the servers
distMaxWait = time.Millisecond * 50
// waitForPeers is the time window in which a request does not fail even if it
// has no suitable peers to send to at the moment
waitForPeers = time.Second * 3
)
// main event loop
func (d *requestDistributor) loop() {
defer d.wg.Done()
for {
select {
case <-d.closeCh:
d.lock.Lock()
elem := d.reqQueue.Front()
for elem != nil {
req := elem.Value.(*distReq)
close(req.sentChn)
req.sentChn = nil
elem = elem.Next()
}
d.lock.Unlock()
return
case <-d.loopChn:
d.lock.Lock()
d.loopNextSent = false
loop:
for {
peer, req, wait := d.nextRequest()
if req != nil && wait == 0 {
chn := req.sentChn // save sentChn because remove sets it to nil
d.remove(req)
send := req.request(peer)
if send != nil {
peer.queueSend(send)
requestSendDelay.Update(time.Duration(d.clock.Now() - req.enterQueue))
}
chn <- peer
close(chn)
} else {
if wait == 0 {
// no request to send and nothing to wait for; the next
// queued request will wake up the loop
break loop
}
d.loopNextSent = true // a "next" signal has been sent, do not send another one until this one has been received
if wait > distMaxWait {
// waiting times may be reduced by incoming request replies, if it is too long, recalculate it periodically
wait = distMaxWait
}
go func() {
d.clock.Sleep(wait)
d.loopChn <- struct{}{}
}()
break loop
}
}
d.lock.Unlock()
}
}
}
// selectPeerItem represents a peer to be selected for a request by weightedRandomSelect
type selectPeerItem struct {
peer distPeer
req *distReq
weight uint64
}
func selectPeerWeight(i interface{}) uint64 {
return i.(selectPeerItem).weight
}
// nextRequest returns the next possible request from any peer, along with the
// associated peer and necessary waiting time
func (d *requestDistributor) nextRequest() (distPeer, *distReq, time.Duration) {
checkedPeers := make(map[distPeer]struct{})
elem := d.reqQueue.Front()
var (
bestWait time.Duration
sel *utils.WeightedRandomSelect
)
d.peerLock.RLock()
defer d.peerLock.RUnlock()
peerCount := len(d.peers)
for (len(checkedPeers) < peerCount || elem == d.reqQueue.Front()) && elem != nil {
req := elem.Value.(*distReq)
canSend := false
now := d.clock.Now()
if req.waitForPeers > now {
canSend = true
wait := time.Duration(req.waitForPeers - now)
if bestWait == 0 || wait < bestWait {
bestWait = wait
}
}
for peer := range d.peers {
if _, ok := checkedPeers[peer]; !ok && peer.canQueue() && req.canSend(peer) {
canSend = true
cost := req.getCost(peer)
wait, bufRemain := peer.waitBefore(cost)
if wait == 0 {
if sel == nil {
sel = utils.NewWeightedRandomSelect(selectPeerWeight)
}
sel.Update(selectPeerItem{peer: peer, req: req, weight: uint64(bufRemain*1000000) + 1})
} else {
if bestWait == 0 || wait < bestWait {
bestWait = wait
}
}
checkedPeers[peer] = struct{}{}
}
}
next := elem.Next()
if !canSend && elem == d.reqQueue.Front() {
close(req.sentChn)
d.remove(req)
}
elem = next
}
if sel != nil {
c := sel.Choose().(selectPeerItem)
return c.peer, c.req, 0
}
return nil, nil, bestWait
}
// queue adds a request to the distribution queue, returns a channel where the
// receiving peer is sent once the request has been sent (request callback returned).
// If the request is cancelled or timed out without suitable peers, the channel is
// closed without sending any peer references to it.
func (d *requestDistributor) queue(r *distReq) chan distPeer {
d.lock.Lock()
defer d.lock.Unlock()
if r.reqOrder == 0 {
d.lastReqOrder++
r.reqOrder = d.lastReqOrder
r.waitForPeers = d.clock.Now().Add(waitForPeers)
}
// Assign the timestamp when the request is queued no matter it's
// a new one or re-queued one.
r.enterQueue = d.clock.Now()
back := d.reqQueue.Back()
if back == nil || r.reqOrder > back.Value.(*distReq).reqOrder {
r.element = d.reqQueue.PushBack(r)
} else {
before := d.reqQueue.Front()
for before.Value.(*distReq).reqOrder < r.reqOrder {
before = before.Next()
}
r.element = d.reqQueue.InsertBefore(r, before)
}
if !d.loopNextSent {
d.loopNextSent = true
d.loopChn <- struct{}{}
}
r.sentChn = make(chan distPeer, 1)
return r.sentChn
}
// cancel removes a request from the queue if it has not been sent yet (returns
// false if it has been sent already). It is guaranteed that the callback functions
// will not be called after cancel returns.
func (d *requestDistributor) cancel(r *distReq) bool {
d.lock.Lock()
defer d.lock.Unlock()
if r.sentChn == nil {
return false
}
close(r.sentChn)
d.remove(r)
return true
}
// remove removes a request from the queue
func (d *requestDistributor) remove(r *distReq) {
r.sentChn = nil
if r.element != nil {
d.reqQueue.Remove(r.element)
r.element = nil
}
}
func (d *requestDistributor) close() {
close(d.closeCh)
d.wg.Wait()
}

@ -1,189 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"math/rand"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
type testDistReq struct {
cost, procTime, order uint64
canSendTo map[*testDistPeer]struct{}
}
func (r *testDistReq) getCost(dp distPeer) uint64 {
return r.cost
}
func (r *testDistReq) canSend(dp distPeer) bool {
_, ok := r.canSendTo[dp.(*testDistPeer)]
return ok
}
func (r *testDistReq) request(dp distPeer) func() {
return func() { dp.(*testDistPeer).send(r) }
}
type testDistPeer struct {
sent []*testDistReq
sumCost uint64
lock sync.RWMutex
}
func (p *testDistPeer) send(r *testDistReq) {
p.lock.Lock()
defer p.lock.Unlock()
p.sent = append(p.sent, r)
p.sumCost += r.cost
}
func (p *testDistPeer) worker(t *testing.T, checkOrder bool, stop chan struct{}) {
var last uint64
for {
wait := time.Millisecond
p.lock.Lock()
if len(p.sent) > 0 {
rq := p.sent[0]
wait = time.Duration(rq.procTime)
p.sumCost -= rq.cost
if checkOrder {
if rq.order <= last {
t.Errorf("Requests processed in wrong order")
}
last = rq.order
}
p.sent = p.sent[1:]
}
p.lock.Unlock()
select {
case <-stop:
return
case <-time.After(wait):
}
}
}
const (
testDistBufLimit = 10000000
testDistMaxCost = 1000000
testDistPeerCount = 2
testDistReqCount = 10
testDistMaxResendCount = 3
)
func (p *testDistPeer) waitBefore(cost uint64) (time.Duration, float64) {
p.lock.RLock()
sumCost := p.sumCost + cost
p.lock.RUnlock()
if sumCost < testDistBufLimit {
return 0, float64(testDistBufLimit-sumCost) / float64(testDistBufLimit)
}
return time.Duration(sumCost - testDistBufLimit), 0
}
func (p *testDistPeer) canQueue() bool {
return true
}
func (p *testDistPeer) queueSend(f func()) bool {
f()
return true
}
func TestRequestDistributor(t *testing.T) {
testRequestDistributor(t, false)
}
func TestRequestDistributorResend(t *testing.T) {
testRequestDistributor(t, true)
}
func testRequestDistributor(t *testing.T, resend bool) {
stop := make(chan struct{})
defer close(stop)
dist := newRequestDistributor(nil, &mclock.System{})
var peers [testDistPeerCount]*testDistPeer
for i := range peers {
peers[i] = &testDistPeer{}
go peers[i].worker(t, !resend, stop)
dist.registerTestPeer(peers[i])
}
// Disable the mechanism that we will wait a few time for request
// even there is no suitable peer to send right now.
waitForPeers = 0
var wg sync.WaitGroup
for i := 1; i <= testDistReqCount; i++ {
cost := uint64(rand.Int63n(testDistMaxCost))
procTime := uint64(rand.Int63n(int64(cost + 1)))
rq := &testDistReq{
cost: cost,
procTime: procTime,
order: uint64(i),
canSendTo: make(map[*testDistPeer]struct{}),
}
for _, peer := range peers {
if rand.Intn(2) != 0 {
rq.canSendTo[peer] = struct{}{}
}
}
wg.Add(1)
req := &distReq{
getCost: rq.getCost,
canSend: rq.canSend,
request: rq.request,
}
chn := dist.queue(req)
go func() {
cnt := 1
if resend && len(rq.canSendTo) != 0 {
cnt = rand.Intn(testDistMaxResendCount) + 1
}
for i := 0; i < cnt; i++ {
if i != 0 {
chn = dist.queue(req)
}
p := <-chn
if p == nil {
if len(rq.canSendTo) != 0 {
t.Errorf("Request that could have been sent was dropped")
}
} else {
peer := p.(*testDistPeer)
if _, ok := rq.canSendTo[peer]; !ok {
t.Errorf("Request sent to wrong peer")
}
}
}
wg.Done()
}()
if rand.Intn(1000) == 0 {
time.Sleep(time.Duration(rand.Intn(5000000)))
}
}
wg.Wait()
}

@ -1,72 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
// lesEntry is the "les" ENR entry. This is set for LES servers only.
type lesEntry struct {
// Ignore additional fields (for forward compatibility).
VfxVersion uint
Rest []rlp.RawValue `rlp:"tail"`
}
func (lesEntry) ENRKey() string { return "les" }
// ethEntry is the "eth" ENR entry. This is redeclared here to avoid depending on package eth.
type ethEntry struct {
ForkID forkid.ID
Tail []rlp.RawValue `rlp:"tail"`
}
func (ethEntry) ENRKey() string { return "eth" }
// setupDiscovery creates the node discovery source for the eth protocol.
func (eth *LightEthereum) setupDiscovery() (enode.Iterator, error) {
it := enode.NewFairMix(0)
// Enable DNS discovery.
if len(eth.config.EthDiscoveryURLs) != 0 {
client := dnsdisc.NewClient(dnsdisc.Config{})
dns, err := client.NewIterator(eth.config.EthDiscoveryURLs...)
if err != nil {
return nil, err
}
it.AddSource(dns)
}
// Enable DHT.
if eth.udpEnabled {
it.AddSource(eth.p2pServer.DiscV5.RandomNodes())
}
forkFilter := forkid.NewFilter(eth.blockchain)
iterator := enode.Filter(it, func(n *enode.Node) bool { return nodeIsServer(forkFilter, n) })
return iterator, nil
}
// nodeIsServer checks whether n is an LES server node.
func nodeIsServer(forkFilter forkid.Filter, n *enode.Node) bool {
var les lesEntry
var eth ethEntry
return n.Load(&les) == nil && n.Load(&eth) == nil && forkFilter(eth.ForkID) == nil
}

@ -1,433 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package flowcontrol implements a client side flow control mechanism
package flowcontrol
import (
"fmt"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/log"
)
const (
// fcTimeConst is the time constant applied for MinRecharge during linear
// buffer recharge period
fcTimeConst = time.Millisecond
// DecParamDelay is applied at server side when decreasing capacity in order to
// avoid a buffer underrun error due to requests sent by the client before
// receiving the capacity update announcement
DecParamDelay = time.Second * 2
// keepLogs is the duration of keeping logs; logging is not used if zero
keepLogs = 0
)
// ServerParams are the flow control parameters specified by a server for a client
//
// Note: a server can assign different amounts of capacity to each client by giving
// different parameters to them.
type ServerParams struct {
BufLimit, MinRecharge uint64
}
// scheduledUpdate represents a delayed flow control parameter update
type scheduledUpdate struct {
time mclock.AbsTime
params ServerParams
}
// ClientNode is the flow control system's representation of a client
// (used in server mode only)
type ClientNode struct {
params ServerParams
bufValue int64
lastTime mclock.AbsTime
updateSchedule []scheduledUpdate
sumCost uint64 // sum of req costs received from this client
accepted map[uint64]uint64 // value = sumCost after accepting the given req
connected bool
lock sync.Mutex
cm *ClientManager
log *logger
cmNodeFields
}
// NewClientNode returns a new ClientNode
func NewClientNode(cm *ClientManager, params ServerParams) *ClientNode {
node := &ClientNode{
cm: cm,
params: params,
bufValue: int64(params.BufLimit),
lastTime: cm.clock.Now(),
accepted: make(map[uint64]uint64),
connected: true,
}
if keepLogs > 0 {
node.log = newLogger(keepLogs)
}
cm.connect(node)
return node
}
// Disconnect should be called when a client is disconnected
func (node *ClientNode) Disconnect() {
node.lock.Lock()
defer node.lock.Unlock()
node.connected = false
node.cm.disconnect(node)
}
// BufferStatus returns the current buffer value and limit
func (node *ClientNode) BufferStatus() (uint64, uint64) {
node.lock.Lock()
defer node.lock.Unlock()
if !node.connected {
return 0, 0
}
now := node.cm.clock.Now()
node.update(now)
node.cm.updateBuffer(node, 0, now)
bv := node.bufValue
if bv < 0 {
bv = 0
}
return uint64(bv), node.params.BufLimit
}
// OneTimeCost subtracts the given amount from the node's buffer.
//
// Note: this call can take the buffer into the negative region internally.
// In this case zero buffer value is returned by exported calls and no requests
// are accepted.
func (node *ClientNode) OneTimeCost(cost uint64) {
node.lock.Lock()
defer node.lock.Unlock()
now := node.cm.clock.Now()
node.update(now)
node.bufValue -= int64(cost)
node.cm.updateBuffer(node, -int64(cost), now)
}
// Freeze notifies the client manager about a client freeze event in which case
// the total capacity allowance is slightly reduced.
func (node *ClientNode) Freeze() {
node.lock.Lock()
frozenCap := node.params.MinRecharge
node.lock.Unlock()
node.cm.reduceTotalCapacity(frozenCap)
}
// update recalculates the buffer value at a specified time while also performing
// scheduled flow control parameter updates if necessary
func (node *ClientNode) update(now mclock.AbsTime) {
for len(node.updateSchedule) > 0 && node.updateSchedule[0].time <= now {
node.recalcBV(node.updateSchedule[0].time)
node.updateParams(node.updateSchedule[0].params, now)
node.updateSchedule = node.updateSchedule[1:]
}
node.recalcBV(now)
}
// recalcBV recalculates the buffer value at a specified time
func (node *ClientNode) recalcBV(now mclock.AbsTime) {
dt := uint64(now - node.lastTime)
if now < node.lastTime {
dt = 0
}
node.bufValue += int64(node.params.MinRecharge * dt / uint64(fcTimeConst))
if node.bufValue > int64(node.params.BufLimit) {
node.bufValue = int64(node.params.BufLimit)
}
if node.log != nil {
node.log.add(now, fmt.Sprintf("updated bv=%d MRR=%d BufLimit=%d", node.bufValue, node.params.MinRecharge, node.params.BufLimit))
}
node.lastTime = now
}
// UpdateParams updates the flow control parameters of a client node
func (node *ClientNode) UpdateParams(params ServerParams) {
node.lock.Lock()
defer node.lock.Unlock()
now := node.cm.clock.Now()
node.update(now)
if params.MinRecharge >= node.params.MinRecharge {
node.updateSchedule = nil
node.updateParams(params, now)
} else {
for i, s := range node.updateSchedule {
if params.MinRecharge >= s.params.MinRecharge {
s.params = params
node.updateSchedule = node.updateSchedule[:i+1]
return
}
}
node.updateSchedule = append(node.updateSchedule, scheduledUpdate{time: now.Add(DecParamDelay), params: params})
}
}
// updateParams updates the flow control parameters of the node
func (node *ClientNode) updateParams(params ServerParams, now mclock.AbsTime) {
diff := int64(params.BufLimit - node.params.BufLimit)
if diff > 0 {
node.bufValue += diff
} else if node.bufValue > int64(params.BufLimit) {
node.bufValue = int64(params.BufLimit)
}
node.cm.updateParams(node, params, now)
}
// AcceptRequest returns whether a new request can be accepted and the missing
// buffer amount if it was rejected due to a buffer underrun. If accepted, maxCost
// is deducted from the flow control buffer.
func (node *ClientNode) AcceptRequest(reqID, index, maxCost uint64) (accepted bool, bufShort uint64, priority int64) {
node.lock.Lock()
defer node.lock.Unlock()
now := node.cm.clock.Now()
node.update(now)
if int64(maxCost) > node.bufValue {
if node.log != nil {
node.log.add(now, fmt.Sprintf("rejected reqID=%d bv=%d maxCost=%d", reqID, node.bufValue, maxCost))
node.log.dump(now)
}
return false, maxCost - uint64(node.bufValue), 0
}
node.bufValue -= int64(maxCost)
node.sumCost += maxCost
if node.log != nil {
node.log.add(now, fmt.Sprintf("accepted reqID=%d bv=%d maxCost=%d sumCost=%d", reqID, node.bufValue, maxCost, node.sumCost))
}
node.accepted[index] = node.sumCost
return true, 0, node.cm.accepted(node, maxCost, now)
}
// RequestProcessed should be called when the request has been processed
func (node *ClientNode) RequestProcessed(reqID, index, maxCost, realCost uint64) uint64 {
node.lock.Lock()
defer node.lock.Unlock()
now := node.cm.clock.Now()
node.update(now)
node.cm.processed(node, maxCost, realCost, now)
bv := node.bufValue + int64(node.sumCost-node.accepted[index])
if node.log != nil {
node.log.add(now, fmt.Sprintf("processed reqID=%d bv=%d maxCost=%d realCost=%d sumCost=%d oldSumCost=%d reportedBV=%d", reqID, node.bufValue, maxCost, realCost, node.sumCost, node.accepted[index], bv))
}
delete(node.accepted, index)
if bv < 0 {
return 0
}
return uint64(bv)
}
// ServerNode is the flow control system's representation of a server
// (used in client mode only)
type ServerNode struct {
clock mclock.Clock
bufEstimate uint64
bufRecharge bool
lastTime mclock.AbsTime
params ServerParams
sumCost uint64 // sum of req costs sent to this server
pending map[uint64]uint64 // value = sumCost after sending the given req
log *logger
lock sync.RWMutex
}
// NewServerNode returns a new ServerNode
func NewServerNode(params ServerParams, clock mclock.Clock) *ServerNode {
node := &ServerNode{
clock: clock,
bufEstimate: params.BufLimit,
bufRecharge: false,
lastTime: clock.Now(),
params: params,
pending: make(map[uint64]uint64),
}
if keepLogs > 0 {
node.log = newLogger(keepLogs)
}
return node
}
// UpdateParams updates the flow control parameters of the node
func (node *ServerNode) UpdateParams(params ServerParams) {
node.lock.Lock()
defer node.lock.Unlock()
node.recalcBLE(mclock.Now())
if params.BufLimit > node.params.BufLimit {
node.bufEstimate += params.BufLimit - node.params.BufLimit
} else {
if node.bufEstimate > params.BufLimit {
node.bufEstimate = params.BufLimit
}
}
node.params = params
}
// recalcBLE recalculates the lowest estimate for the client's buffer value at
// the given server at the specified time
func (node *ServerNode) recalcBLE(now mclock.AbsTime) {
if now < node.lastTime {
return
}
if node.bufRecharge {
dt := uint64(now - node.lastTime)
node.bufEstimate += node.params.MinRecharge * dt / uint64(fcTimeConst)
if node.bufEstimate >= node.params.BufLimit {
node.bufEstimate = node.params.BufLimit
node.bufRecharge = false
}
}
node.lastTime = now
if node.log != nil {
node.log.add(now, fmt.Sprintf("updated bufEst=%d MRR=%d BufLimit=%d", node.bufEstimate, node.params.MinRecharge, node.params.BufLimit))
}
}
// safetyMargin is added to the flow control waiting time when estimated buffer value is low
const safetyMargin = time.Millisecond
// CanSend returns the minimum waiting time required before sending a request
// with the given maximum estimated cost. Second return value is the relative
// estimated buffer level after sending the request (divided by BufLimit).
func (node *ServerNode) CanSend(maxCost uint64) (time.Duration, float64) {
node.lock.RLock()
defer node.lock.RUnlock()
if node.params.BufLimit == 0 {
return time.Duration(math.MaxInt64), 0
}
now := node.clock.Now()
node.recalcBLE(now)
maxCost += uint64(safetyMargin) * node.params.MinRecharge / uint64(fcTimeConst)
if maxCost > node.params.BufLimit {
maxCost = node.params.BufLimit
}
if node.bufEstimate >= maxCost {
relBuf := float64(node.bufEstimate-maxCost) / float64(node.params.BufLimit)
if node.log != nil {
node.log.add(now, fmt.Sprintf("canSend bufEst=%d maxCost=%d true relBuf=%f", node.bufEstimate, maxCost, relBuf))
}
return 0, relBuf
}
timeLeft := time.Duration((maxCost - node.bufEstimate) * uint64(fcTimeConst) / node.params.MinRecharge)
if node.log != nil {
node.log.add(now, fmt.Sprintf("canSend bufEst=%d maxCost=%d false timeLeft=%v", node.bufEstimate, maxCost, timeLeft))
}
return timeLeft, 0
}
// QueuedRequest should be called when the request has been assigned to the given
// server node, before putting it in the send queue. It is mandatory that requests
// are sent in the same order as the QueuedRequest calls are made.
func (node *ServerNode) QueuedRequest(reqID, maxCost uint64) {
node.lock.Lock()
defer node.lock.Unlock()
now := node.clock.Now()
node.recalcBLE(now)
// Note: we do not know when requests actually arrive to the server so bufRecharge
// is not turned on here if buffer was full; in this case it is going to be turned
// on by the first reply's bufValue feedback
if node.bufEstimate >= maxCost {
node.bufEstimate -= maxCost
} else {
log.Error("Queued request with insufficient buffer estimate")
node.bufEstimate = 0
}
node.sumCost += maxCost
node.pending[reqID] = node.sumCost
if node.log != nil {
node.log.add(now, fmt.Sprintf("queued reqID=%d bufEst=%d maxCost=%d sumCost=%d", reqID, node.bufEstimate, maxCost, node.sumCost))
}
}
// ReceivedReply adjusts estimated buffer value according to the value included in
// the latest request reply.
func (node *ServerNode) ReceivedReply(reqID, bv uint64) {
node.lock.Lock()
defer node.lock.Unlock()
now := node.clock.Now()
node.recalcBLE(now)
if bv > node.params.BufLimit {
bv = node.params.BufLimit
}
sc, ok := node.pending[reqID]
if !ok {
return
}
delete(node.pending, reqID)
cc := node.sumCost - sc
newEstimate := uint64(0)
if bv > cc {
newEstimate = bv - cc
}
if newEstimate > node.bufEstimate {
// Note: we never reduce the buffer estimate based on the reported value because
// this can only happen because of the delayed delivery of the latest reply.
// The lowest estimate based on the previous reply can still be considered valid.
node.bufEstimate = newEstimate
}
node.bufRecharge = node.bufEstimate < node.params.BufLimit
node.lastTime = now
if node.log != nil {
node.log.add(now, fmt.Sprintf("received reqID=%d bufEst=%d reportedBv=%d sumCost=%d oldSumCost=%d", reqID, node.bufEstimate, bv, node.sumCost, sc))
}
}
// ResumeFreeze cleans all pending requests and sets the buffer estimate to the
// reported value after resuming from a frozen state
func (node *ServerNode) ResumeFreeze(bv uint64) {
node.lock.Lock()
defer node.lock.Unlock()
for reqID := range node.pending {
delete(node.pending, reqID)
}
now := node.clock.Now()
node.recalcBLE(now)
if bv > node.params.BufLimit {
bv = node.params.BufLimit
}
node.bufEstimate = bv
node.bufRecharge = node.bufEstimate < node.params.BufLimit
node.lastTime = now
if node.log != nil {
node.log.add(now, fmt.Sprintf("unfreeze bv=%d sumCost=%d", bv, node.sumCost))
}
}
// DumpLogs dumps the event log if logging is used
func (node *ServerNode) DumpLogs() {
node.lock.Lock()
defer node.lock.Unlock()
if node.log != nil {
node.log.dump(node.clock.Now())
}
}

@ -1,65 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package flowcontrol
import (
"fmt"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
// logger collects events in string format and discards events older than the
// "keep" parameter
type logger struct {
events map[uint64]logEvent
writePtr, delPtr uint64
keep time.Duration
}
// logEvent describes a single event
type logEvent struct {
time mclock.AbsTime
event string
}
// newLogger creates a new logger
func newLogger(keep time.Duration) *logger {
return &logger{
events: make(map[uint64]logEvent),
keep: keep,
}
}
// add adds a new event and discards old events if possible
func (l *logger) add(now mclock.AbsTime, event string) {
keepAfter := now - mclock.AbsTime(l.keep)
for l.delPtr < l.writePtr && l.events[l.delPtr].time <= keepAfter {
delete(l.events, l.delPtr)
l.delPtr++
}
l.events[l.writePtr] = logEvent{now, event}
l.writePtr++
}
// dump prints all stored events
func (l *logger) dump(now mclock.AbsTime) {
for i := l.delPtr; i < l.writePtr; i++ {
e := l.events[i]
fmt.Println(time.Duration(e.time-now), e.event)
}
}

@ -1,476 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package flowcontrol
import (
"fmt"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
)
// cmNodeFields are ClientNode fields used by the client manager
// Note: these fields are locked by the client manager's mutex
type cmNodeFields struct {
corrBufValue int64 // buffer value adjusted with the extra recharge amount
rcLastIntValue int64 // past recharge integrator value when corrBufValue was last updated
rcFullIntValue int64 // future recharge integrator value when corrBufValue will reach maximum
queueIndex int // position in the recharge queue (-1 if not queued)
}
// FixedPointMultiplier is applied to the recharge integrator and the recharge curve.
//
// Note: fixed point arithmetic is required for the integrator because it is a
// constantly increasing value that can wrap around int64 limits (which behavior is
// also supported by the priority queue). A floating point value would gradually lose
// precision in this application.
// The recharge curve and all recharge values are encoded as fixed point because
// sumRecharge is frequently updated by adding or subtracting individual recharge
// values and perfect precision is required.
const FixedPointMultiplier = 1000000
var (
capacityDropFactor = 0.1
capacityRaiseTC = 1 / (3 * float64(time.Hour)) // time constant for raising the capacity factor
capacityRaiseThresholdRatio = 1.125 // total/connected capacity ratio threshold for raising the capacity factor
)
// ClientManager controls the capacity assigned to the clients of a server.
// Since ServerParams guarantee a safe lower estimate for processable requests
// even in case of all clients being active, ClientManager calculates a
// corrugated buffer value and usually allows a higher remaining buffer value
// to be returned with each reply.
type ClientManager struct {
clock mclock.Clock
lock sync.Mutex
stop chan chan struct{}
curve PieceWiseLinear
sumRecharge, totalRecharge, totalConnected uint64
logTotalCap, totalCapacity float64
logTotalCapRaiseLimit float64
minLogTotalCap, maxLogTotalCap float64
capacityRaiseThreshold uint64
capLastUpdate mclock.AbsTime
totalCapacityCh chan uint64
// recharge integrator is increasing in each moment with a rate of
// (totalRecharge / sumRecharge)*FixedPointMultiplier or 0 if sumRecharge==0
rcLastUpdate mclock.AbsTime // last time the recharge integrator was updated
rcLastIntValue int64 // last updated value of the recharge integrator
priorityOffset int64 // offset for prque priority values ensures that all priorities stay in the int64 range
// recharge queue is a priority queue with currently recharging client nodes
// as elements. The priority value is rcFullIntValue which allows to quickly
// determine which client will first finish recharge.
rcQueue *prque.Prque[int64, *ClientNode]
}
// NewClientManager returns a new client manager.
// Client manager enhances flow control performance by allowing client buffers
// to recharge quicker than the minimum guaranteed recharge rate if possible.
// The sum of all minimum recharge rates (sumRecharge) is updated each time
// a clients starts or finishes buffer recharging. Then an adjusted total
// recharge rate is calculated using a piecewise linear recharge curve:
//
// totalRecharge = curve(sumRecharge)
// (totalRecharge >= sumRecharge is enforced)
//
// Then the "bonus" buffer recharge is distributed between currently recharging
// clients proportionally to their minimum recharge rates.
//
// Note: total recharge is proportional to the average number of parallel running
// serving threads. A recharge value of 1000000 corresponds to one thread in average.
// The maximum number of allowed serving threads should always be considerably
// higher than the targeted average number.
//
// Note 2: although it is possible to specify a curve allowing the total target
// recharge starting from zero sumRecharge, it makes sense to add a linear ramp
// starting from zero in order to not let a single low-priority client use up
// the entire server capacity and thus ensure quick availability for others at
// any moment.
func NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager {
cm := &ClientManager{
clock: clock,
rcQueue: prque.New[int64, *ClientNode](func(a *ClientNode, i int) { a.queueIndex = i }),
capLastUpdate: clock.Now(),
stop: make(chan chan struct{}),
}
if curve != nil {
cm.SetRechargeCurve(curve)
}
go func() {
// regularly recalculate and update total capacity
for {
select {
case <-time.After(time.Minute):
cm.lock.Lock()
cm.updateTotalCapacity(cm.clock.Now(), true)
cm.lock.Unlock()
case stop := <-cm.stop:
close(stop)
return
}
}
}()
return cm
}
// Stop stops the client manager
func (cm *ClientManager) Stop() {
stop := make(chan struct{})
cm.stop <- stop
<-stop
}
// SetRechargeCurve updates the recharge curve
func (cm *ClientManager) SetRechargeCurve(curve PieceWiseLinear) {
cm.lock.Lock()
defer cm.lock.Unlock()
now := cm.clock.Now()
cm.updateRecharge(now)
cm.curve = curve
if len(curve) > 0 {
cm.totalRecharge = curve[len(curve)-1].Y
} else {
cm.totalRecharge = 0
}
}
// SetCapacityLimits sets a threshold value used for raising capFactor.
// Either if the difference between total allowed and connected capacity is less
// than this threshold or if their ratio is less than capacityRaiseThresholdRatio
// then capFactor is allowed to slowly raise.
func (cm *ClientManager) SetCapacityLimits(min, max, raiseThreshold uint64) {
if min < 1 {
min = 1
}
cm.minLogTotalCap = math.Log(float64(min))
if max < 1 {
max = 1
}
cm.maxLogTotalCap = math.Log(float64(max))
cm.logTotalCap = cm.maxLogTotalCap
cm.capacityRaiseThreshold = raiseThreshold
cm.refreshCapacity()
}
// connect should be called when a client is connected, before passing it to any
// other ClientManager function
func (cm *ClientManager) connect(node *ClientNode) {
cm.lock.Lock()
defer cm.lock.Unlock()
now := cm.clock.Now()
cm.updateRecharge(now)
node.corrBufValue = int64(node.params.BufLimit)
node.rcLastIntValue = cm.rcLastIntValue
node.queueIndex = -1
cm.updateTotalCapacity(now, true)
cm.totalConnected += node.params.MinRecharge
cm.updateRaiseLimit()
}
// disconnect should be called when a client is disconnected
func (cm *ClientManager) disconnect(node *ClientNode) {
cm.lock.Lock()
defer cm.lock.Unlock()
now := cm.clock.Now()
cm.updateRecharge(cm.clock.Now())
cm.updateTotalCapacity(now, true)
cm.totalConnected -= node.params.MinRecharge
cm.updateRaiseLimit()
}
// accepted is called when a request with given maximum cost is accepted.
// It returns a priority indicator for the request which is used to determine placement
// in the serving queue. Older requests have higher priority by default. If the client
// is almost out of buffer, request priority is reduced.
func (cm *ClientManager) accepted(node *ClientNode, maxCost uint64, now mclock.AbsTime) (priority int64) {
cm.lock.Lock()
defer cm.lock.Unlock()
cm.updateNodeRc(node, -int64(maxCost), &node.params, now)
rcTime := (node.params.BufLimit - uint64(node.corrBufValue)) * FixedPointMultiplier / node.params.MinRecharge
return -int64(now) - int64(rcTime)
}
// processed updates the client buffer according to actual request cost after
// serving has been finished.
//
// Note: processed should always be called for all accepted requests
func (cm *ClientManager) processed(node *ClientNode, maxCost, realCost uint64, now mclock.AbsTime) {
if realCost > maxCost {
realCost = maxCost
}
cm.updateBuffer(node, int64(maxCost-realCost), now)
}
// updateBuffer recalculates the corrected buffer value, adds the given value to it
// and updates the node's actual buffer value if possible
func (cm *ClientManager) updateBuffer(node *ClientNode, add int64, now mclock.AbsTime) {
cm.lock.Lock()
defer cm.lock.Unlock()
cm.updateNodeRc(node, add, &node.params, now)
if node.corrBufValue > node.bufValue {
if node.log != nil {
node.log.add(now, fmt.Sprintf("corrected bv=%d oldBv=%d", node.corrBufValue, node.bufValue))
}
node.bufValue = node.corrBufValue
}
}
// updateParams updates the flow control parameters of a client node
func (cm *ClientManager) updateParams(node *ClientNode, params ServerParams, now mclock.AbsTime) {
cm.lock.Lock()
defer cm.lock.Unlock()
cm.updateRecharge(now)
cm.updateTotalCapacity(now, true)
cm.totalConnected += params.MinRecharge - node.params.MinRecharge
cm.updateRaiseLimit()
cm.updateNodeRc(node, 0, &params, now)
}
// updateRaiseLimit recalculates the limiting value until which logTotalCap
// can be raised when no client freeze events occur
func (cm *ClientManager) updateRaiseLimit() {
if cm.capacityRaiseThreshold == 0 {
cm.logTotalCapRaiseLimit = 0
return
}
limit := float64(cm.totalConnected + cm.capacityRaiseThreshold)
limit2 := float64(cm.totalConnected) * capacityRaiseThresholdRatio
if limit2 > limit {
limit = limit2
}
if limit < 1 {
limit = 1
}
cm.logTotalCapRaiseLimit = math.Log(limit)
}
// updateRecharge updates the recharge integrator and checks the recharge queue
// for nodes with recently filled buffers
func (cm *ClientManager) updateRecharge(now mclock.AbsTime) {
lastUpdate := cm.rcLastUpdate
cm.rcLastUpdate = now
// updating is done in multiple steps if node buffers are filled and sumRecharge
// is decreased before the given target time
for cm.sumRecharge > 0 {
sumRecharge := cm.sumRecharge
if sumRecharge > cm.totalRecharge {
sumRecharge = cm.totalRecharge
}
bonusRatio := float64(1)
v := cm.curve.ValueAt(sumRecharge)
s := float64(sumRecharge)
if v > s && s > 0 {
bonusRatio = v / s
}
dt := now - lastUpdate
// fetch the client that finishes first
rcqNode := cm.rcQueue.PopItem() // if sumRecharge > 0 then the queue cannot be empty
// check whether it has already finished
dtNext := mclock.AbsTime(float64(rcqNode.rcFullIntValue-cm.rcLastIntValue) / bonusRatio)
if dt < dtNext {
// not finished yet, put it back, update integrator according
// to current bonusRatio and return
cm.addToQueue(rcqNode)
cm.rcLastIntValue += int64(bonusRatio * float64(dt))
return
}
lastUpdate += dtNext
// finished recharging, update corrBufValue and sumRecharge if necessary and do next step
if rcqNode.corrBufValue < int64(rcqNode.params.BufLimit) {
rcqNode.corrBufValue = int64(rcqNode.params.BufLimit)
cm.sumRecharge -= rcqNode.params.MinRecharge
}
cm.rcLastIntValue = rcqNode.rcFullIntValue
}
}
func (cm *ClientManager) addToQueue(node *ClientNode) {
if cm.priorityOffset-node.rcFullIntValue < -0x4000000000000000 {
cm.priorityOffset += 0x4000000000000000
// recreate priority queue with new offset to avoid overflow; should happen very rarely
newRcQueue := prque.New[int64, *ClientNode](func(a *ClientNode, i int) { a.queueIndex = i })
for cm.rcQueue.Size() > 0 {
n := cm.rcQueue.PopItem()
newRcQueue.Push(n, cm.priorityOffset-n.rcFullIntValue)
}
cm.rcQueue = newRcQueue
}
cm.rcQueue.Push(node, cm.priorityOffset-node.rcFullIntValue)
}
// updateNodeRc updates a node's corrBufValue and adds an external correction value.
// It also adds or removes the rcQueue entry and updates ServerParams and sumRecharge if necessary.
func (cm *ClientManager) updateNodeRc(node *ClientNode, bvc int64, params *ServerParams, now mclock.AbsTime) {
cm.updateRecharge(now)
wasFull := true
if node.corrBufValue != int64(node.params.BufLimit) {
wasFull = false
node.corrBufValue += (cm.rcLastIntValue - node.rcLastIntValue) * int64(node.params.MinRecharge) / FixedPointMultiplier
if node.corrBufValue > int64(node.params.BufLimit) {
node.corrBufValue = int64(node.params.BufLimit)
}
node.rcLastIntValue = cm.rcLastIntValue
}
node.corrBufValue += bvc
diff := int64(params.BufLimit - node.params.BufLimit)
if diff > 0 {
node.corrBufValue += diff
}
isFull := false
if node.corrBufValue >= int64(params.BufLimit) {
node.corrBufValue = int64(params.BufLimit)
isFull = true
}
if !wasFull {
cm.sumRecharge -= node.params.MinRecharge
}
if params != &node.params {
node.params = *params
}
if !isFull {
cm.sumRecharge += node.params.MinRecharge
if node.queueIndex != -1 {
cm.rcQueue.Remove(node.queueIndex)
}
node.rcLastIntValue = cm.rcLastIntValue
node.rcFullIntValue = cm.rcLastIntValue + (int64(node.params.BufLimit)-node.corrBufValue)*FixedPointMultiplier/int64(node.params.MinRecharge)
cm.addToQueue(node)
}
}
// reduceTotalCapacity reduces the total capacity allowance in case of a client freeze event
func (cm *ClientManager) reduceTotalCapacity(frozenCap uint64) {
cm.lock.Lock()
defer cm.lock.Unlock()
ratio := float64(1)
if frozenCap < cm.totalConnected {
ratio = float64(frozenCap) / float64(cm.totalConnected)
}
now := cm.clock.Now()
cm.updateTotalCapacity(now, false)
cm.logTotalCap -= capacityDropFactor * ratio
if cm.logTotalCap < cm.minLogTotalCap {
cm.logTotalCap = cm.minLogTotalCap
}
cm.updateTotalCapacity(now, true)
}
// updateTotalCapacity updates the total capacity factor. The capacity factor allows
// the total capacity of the system to go over the allowed total recharge value
// if clients go to frozen state sufficiently rarely.
// The capacity factor is dropped instantly by a small amount if a clients is frozen.
// It is raised slowly (with a large time constant) if the total connected capacity
// is close to the total allowed amount and no clients are frozen.
func (cm *ClientManager) updateTotalCapacity(now mclock.AbsTime, refresh bool) {
dt := now - cm.capLastUpdate
cm.capLastUpdate = now
if cm.logTotalCap < cm.logTotalCapRaiseLimit {
cm.logTotalCap += capacityRaiseTC * float64(dt)
if cm.logTotalCap > cm.logTotalCapRaiseLimit {
cm.logTotalCap = cm.logTotalCapRaiseLimit
}
}
if cm.logTotalCap > cm.maxLogTotalCap {
cm.logTotalCap = cm.maxLogTotalCap
}
if refresh {
cm.refreshCapacity()
}
}
// refreshCapacity recalculates the total capacity value and sends an update to the subscription
// channel if the relative change of the value since the last update is more than 0.1 percent
func (cm *ClientManager) refreshCapacity() {
totalCapacity := math.Exp(cm.logTotalCap)
if totalCapacity >= cm.totalCapacity*0.999 && totalCapacity <= cm.totalCapacity*1.001 {
return
}
cm.totalCapacity = totalCapacity
if cm.totalCapacityCh != nil {
select {
case cm.totalCapacityCh <- uint64(cm.totalCapacity):
default:
}
}
}
// SubscribeTotalCapacity returns all future updates to the total capacity value
// through a channel and also returns the current value
func (cm *ClientManager) SubscribeTotalCapacity(ch chan uint64) uint64 {
cm.lock.Lock()
defer cm.lock.Unlock()
cm.totalCapacityCh = ch
return uint64(cm.totalCapacity)
}
// PieceWiseLinear is used to describe recharge curves
type PieceWiseLinear []struct{ X, Y uint64 }
// ValueAt returns the curve's value at a given point
func (pwl PieceWiseLinear) ValueAt(x uint64) float64 {
l := 0
h := len(pwl)
if h == 0 {
return 0
}
for h != l {
m := (l + h) / 2
if x > pwl[m].X {
l = m + 1
} else {
h = m
}
}
if l == 0 {
return float64(pwl[0].Y)
}
l--
if h == len(pwl) {
return float64(pwl[l].Y)
}
dx := pwl[h].X - pwl[l].X
if dx < 1 {
return float64(pwl[l].Y)
}
return float64(pwl[l].Y) + float64(pwl[h].Y-pwl[l].Y)*float64(x-pwl[l].X)/float64(dx)
}
// Valid returns true if the X coordinates of the curve points are non-strictly monotonic
func (pwl PieceWiseLinear) Valid() bool {
var lastX uint64
for _, i := range pwl {
if i.X < lastX {
return false
}
lastX = i.X
}
return true
}

@ -1,130 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package flowcontrol
import (
"math"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
type testNode struct {
node *ClientNode
bufLimit, capacity uint64
waitUntil mclock.AbsTime
index, totalCost uint64
}
const (
testMaxCost = 1000000
testLength = 100000
)
// testConstantTotalCapacity simulates multiple request sender nodes and verifies
// whether the total amount of served requests matches the expected value based on
// the total capacity and the duration of the test.
// Some nodes are sending requests occasionally so that their buffer should regularly
// reach the maximum while other nodes (the "max capacity nodes") are sending at the
// maximum permitted rate. The max capacity nodes are changed multiple times during
// a single test.
func TestConstantTotalCapacity(t *testing.T) {
testConstantTotalCapacity(t, 10, 1, 0, false)
testConstantTotalCapacity(t, 10, 1, 1, false)
testConstantTotalCapacity(t, 30, 1, 0, false)
testConstantTotalCapacity(t, 30, 2, 3, false)
testConstantTotalCapacity(t, 100, 1, 0, false)
testConstantTotalCapacity(t, 100, 3, 5, false)
testConstantTotalCapacity(t, 100, 5, 10, false)
testConstantTotalCapacity(t, 100, 3, 5, true)
}
func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, randomSend int, priorityOverflow bool) {
clock := &mclock.Simulated{}
nodes := make([]*testNode, nodeCount)
var totalCapacity uint64
for i := range nodes {
nodes[i] = &testNode{capacity: uint64(50000 + rand.Intn(100000))}
totalCapacity += nodes[i].capacity
}
m := NewClientManager(PieceWiseLinear{{0, totalCapacity}}, clock)
if priorityOverflow {
// provoke a situation where rcLastUpdate overflow needs to be handled
m.rcLastIntValue = math.MaxInt64 - 10000000000
}
for _, n := range nodes {
n.bufLimit = n.capacity * 6000
n.node = NewClientNode(m, ServerParams{BufLimit: n.bufLimit, MinRecharge: n.capacity})
}
maxNodes := make([]int, maxCapacityNodes)
for i := range maxNodes {
// we don't care if some indexes are selected multiple times
// in that case we have fewer max nodes
maxNodes[i] = rand.Intn(nodeCount)
}
var sendCount int
for i := 0; i < testLength; i++ {
now := clock.Now()
for _, idx := range maxNodes {
for nodes[idx].send(t, now) {
}
}
if rand.Intn(testLength) < maxCapacityNodes*3 {
maxNodes[rand.Intn(maxCapacityNodes)] = rand.Intn(nodeCount)
}
sendCount += randomSend
failCount := randomSend * 10
for sendCount > 0 && failCount > 0 {
if nodes[rand.Intn(nodeCount)].send(t, now) {
sendCount--
} else {
failCount--
}
}
clock.Run(time.Millisecond)
}
var totalCost uint64
for _, n := range nodes {
totalCost += n.totalCost
}
ratio := float64(totalCost) / float64(totalCapacity) / testLength
if ratio < 0.98 || ratio > 1.02 {
t.Errorf("totalCost/totalCapacity/testLength ratio incorrect (expected: 1, got: %f)", ratio)
}
}
func (n *testNode) send(t *testing.T, now mclock.AbsTime) bool {
if now < n.waitUntil {
return false
}
n.index++
if ok, _, _ := n.node.AcceptRequest(0, n.index, testMaxCost); !ok {
t.Fatalf("Rejected request after expected waiting time has passed")
}
rcost := uint64(rand.Int63n(testMaxCost))
bv := n.node.RequestProcessed(0, n.index, testMaxCost, rcost)
if bv < testMaxCost {
n.waitUntil = now + mclock.AbsTime((testMaxCost-bv)*1001000/n.capacity)
}
n.totalCost += rcost
return true
}

@ -1,754 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"encoding/binary"
"math/big"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
)
func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {
type resp struct {
ReqID, BV uint64
Data interface{}
}
return p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }
func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }
func TestGetBlockHeadersLes4(t *testing.T) { testGetBlockHeaders(t, 4) }
func testGetBlockHeaders(t *testing.T, protocol int) {
netconfig := testnetConfig{
blocks: downloader.MaxHeaderFetch + 15,
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
// Create a "random" unknown hash for testing
var unknown common.Hash
for i := range unknown {
unknown[i] = byte(i)
}
// Create a batch of tests for various scenarios
limit := uint64(MaxHeaderFetch)
tests := []struct {
query *GetBlockHeadersData // The query to execute for header retrieval
expect []common.Hash // The hashes of the block whose headers are expected
}{
// A single random block should be retrievable by hash and number too
{
&GetBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},
[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
}, {
&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},
[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},
},
// Multiple headers should be retrievable in both directions
{
&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},
[]common.Hash{
bc.GetBlockByNumber(limit / 2).Hash(),
bc.GetBlockByNumber(limit/2 + 1).Hash(),
bc.GetBlockByNumber(limit/2 + 2).Hash(),
},
}, {
&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},
[]common.Hash{
bc.GetBlockByNumber(limit / 2).Hash(),
bc.GetBlockByNumber(limit/2 - 1).Hash(),
bc.GetBlockByNumber(limit/2 - 2).Hash(),
},
},
// Multiple headers with skip lists should be retrievable
{
&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},
[]common.Hash{
bc.GetBlockByNumber(limit / 2).Hash(),
bc.GetBlockByNumber(limit/2 + 4).Hash(),
bc.GetBlockByNumber(limit/2 + 8).Hash(),
},
}, {
&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
bc.GetBlockByNumber(limit / 2).Hash(),
bc.GetBlockByNumber(limit/2 - 4).Hash(),
bc.GetBlockByNumber(limit/2 - 8).Hash(),
},
},
// The chain endpoints should be retrievable
{
&GetBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},
[]common.Hash{bc.GetBlockByNumber(0).Hash()},
}, {
&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().Number.Uint64()}, Amount: 1},
[]common.Hash{bc.CurrentBlock().Hash()},
},
// Ensure protocol limits are honored
//{
// &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().Number.Uint64()() - 1}, Amount: limit + 10, Reverse: true},
// []common.Hash{},
//},
// Check that requesting more than available is handled gracefully
{
&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3},
[]common.Hash{
bc.GetBlockByNumber(bc.CurrentBlock().Number.Uint64() - 4).Hash(),
bc.GetBlockByNumber(bc.CurrentBlock().Number.Uint64()).Hash(),
},
}, {
&GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},
[]common.Hash{
bc.GetBlockByNumber(4).Hash(),
bc.GetBlockByNumber(0).Hash(),
},
},
// Check that requesting more than available is handled gracefully, even if mid skip
{
&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3},
[]common.Hash{
bc.GetBlockByNumber(bc.CurrentBlock().Number.Uint64() - 4).Hash(),
bc.GetBlockByNumber(bc.CurrentBlock().Number.Uint64() - 1).Hash(),
},
}, {
&GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},
[]common.Hash{
bc.GetBlockByNumber(4).Hash(),
bc.GetBlockByNumber(1).Hash(),
},
},
// Check that non existing headers aren't returned
{
&GetBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},
[]common.Hash{},
}, {
&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().Number.Uint64() + 1}, Amount: 1},
[]common.Hash{},
},
}
// Run each of the tests and verify the results against the chain
var reqID uint64
for i, tt := range tests {
// Collect the headers to expect in the response
var headers []*types.Header
for _, hash := range tt.expect {
headers = append(headers, bc.GetHeaderByHash(hash))
}
// Send the hash request and verify the response
reqID++
sendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, tt.query)
if err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {
t.Errorf("test %d: headers mismatch: %v", i, err)
}
}
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }
func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }
func TestGetBlockBodiesLes4(t *testing.T) { testGetBlockBodies(t, 4) }
func testGetBlockBodies(t *testing.T, protocol int) {
netconfig := testnetConfig{
blocks: downloader.MaxHeaderFetch + 15,
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
// Create a batch of tests for various scenarios
limit := MaxBodyFetch
tests := []struct {
random int // Number of blocks to fetch randomly from the chain
explicit []common.Hash // Explicitly requested blocks
available []bool // Availability of explicitly requested blocks
expected int // Total number of existing blocks to expect
}{
{1, nil, nil, 1}, // A single random block should be retrievable
{10, nil, nil, 10}, // Multiple random blocks should be retrievable
{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable
//{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned
{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable
{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable
{0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned
// Existing and non-existing blocks interleaved should not cause problems
{0, []common.Hash{
{},
bc.GetBlockByNumber(1).Hash(),
{},
bc.GetBlockByNumber(10).Hash(),
{},
bc.GetBlockByNumber(100).Hash(),
{},
}, []bool{false, true, false, true, false, true, false}, 3},
}
// Run each of the tests and verify the results against the chain
var reqID uint64
for i, tt := range tests {
// Collect the hashes to request, and the response to expect
var hashes []common.Hash
seen := make(map[int64]bool)
var bodies []*types.Body
for j := 0; j < tt.random; j++ {
for {
num := rand.Int63n(int64(bc.CurrentBlock().Number.Uint64()))
if !seen[num] {
seen[num] = true
block := bc.GetBlockByNumber(uint64(num))
hashes = append(hashes, block.Hash())
if len(bodies) < tt.expected {
bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
}
break
}
}
}
for j, hash := range tt.explicit {
hashes = append(hashes, hash)
if tt.available[j] && len(bodies) < tt.expected {
block := bc.GetBlockByHash(hash)
bodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})
}
}
reqID++
// Send the hash request and verify the response
sendRequest(rawPeer.app, GetBlockBodiesMsg, reqID, hashes)
if err := expectResponse(rawPeer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {
t.Errorf("test %d: bodies mismatch: %v", i, err)
}
}
}
// Tests that the contract codes can be retrieved based on account addresses.
func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }
func TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) }
func TestGetCodeLes4(t *testing.T) { testGetCode(t, 4) }
func testGetCode(t *testing.T, protocol int) {
// Assemble the test environment
netconfig := testnetConfig{
blocks: 4,
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
var codereqs []*CodeReq
var codes [][]byte
for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ {
header := bc.GetHeaderByNumber(i)
req := &CodeReq{
BHash: header.Hash(),
AccountAddress: testContractAddr[:],
}
codereqs = append(codereqs, req)
if i >= testContractDeployed {
codes = append(codes, testContractCodeDeployed)
}
}
sendRequest(rawPeer.app, GetCodeMsg, 42, codereqs)
if err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, codes); err != nil {
t.Errorf("codes mismatch: %v", err)
}
}
// Tests that the stale contract codes can't be retrieved based on account addresses.
func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }
func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }
func TestGetStaleCodeLes4(t *testing.T) { testGetStaleCode(t, 4) }
func testGetStaleCode(t *testing.T, protocol int) {
netconfig := testnetConfig{
blocks: core.TriesInMemory + 4,
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
check := func(number uint64, expected [][]byte) {
req := &CodeReq{
BHash: bc.GetHeaderByNumber(number).Hash(),
AccountAddress: testContractAddr[:],
}
sendRequest(rawPeer.app, GetCodeMsg, 42, []*CodeReq{req})
if err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, expected); err != nil {
t.Errorf("codes mismatch: %v", err)
}
}
check(0, [][]byte{}) // Non-exist contract
check(testContractDeployed, [][]byte{}) // Stale contract
check(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract
}
// Tests that the transaction receipts can be retrieved based on hashes.
func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }
func TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) }
func TestGetReceiptLes4(t *testing.T) { testGetReceipt(t, 4) }
func testGetReceipt(t *testing.T, protocol int) {
// Assemble the test environment
netconfig := testnetConfig{
blocks: 4,
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
// Collect the hashes to request, and the response to expect
var receipts []types.Receipts
var hashes []common.Hash
for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ {
block := bc.GetBlockByNumber(i)
hashes = append(hashes, block.Hash())
receipts = append(receipts, rawdb.ReadReceipts(server.db, block.Hash(), block.NumberU64(), block.Time(), bc.Config()))
}
// Send the hash request and verify the response
sendRequest(rawPeer.app, GetReceiptsMsg, 42, hashes)
if err := expectResponse(rawPeer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {
t.Errorf("receipts mismatch: %v", err)
}
}
// Tests that trie merkle proofs can be retrieved
func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }
func TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) }
func TestGetProofsLes4(t *testing.T) { testGetProofs(t, 4) }
func testGetProofs(t *testing.T, protocol int) {
// Assemble the test environment
netconfig := testnetConfig{
blocks: 4,
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
var proofreqs []ProofReq
proofsV2 := trienode.NewProofSet()
accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}
for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ {
header := bc.GetHeaderByNumber(i)
trie, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB())
for _, acc := range accounts {
req := ProofReq{
BHash: header.Hash(),
Key: crypto.Keccak256(acc[:]),
}
proofreqs = append(proofreqs, req)
trie.Prove(crypto.Keccak256(acc[:]), proofsV2)
}
}
// Send the proof request and verify the response
sendRequest(rawPeer.app, GetProofsV2Msg, 42, proofreqs)
if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.List()); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
}
// Tests that the stale contract codes can't be retrieved based on account addresses.
func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }
func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }
func TestGetStaleProofLes4(t *testing.T) { testGetStaleProof(t, 4) }
func testGetStaleProof(t *testing.T, protocol int) {
netconfig := testnetConfig{
blocks: core.TriesInMemory + 4,
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
check := func(number uint64, wantOK bool) {
var (
header = bc.GetHeaderByNumber(number)
account = crypto.Keccak256(userAddr1.Bytes())
)
req := &ProofReq{
BHash: header.Hash(),
Key: account,
}
sendRequest(rawPeer.app, GetProofsV2Msg, 42, []*ProofReq{req})
var expected []rlp.RawValue
if wantOK {
proofsV2 := trienode.NewProofSet()
t, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB())
t.Prove(account, proofsV2)
expected = proofsV2.List()
}
if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {
t.Errorf("codes mismatch: %v", err)
}
}
check(0, false) // Non-exist proof
check(2, false) // Stale proof
check(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof
}
// Tests that CHT proofs can be correctly retrieved.
func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }
func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }
func TestGetCHTProofsLes4(t *testing.T) { testGetCHTProofs(t, 4) }
func testGetCHTProofs(t *testing.T, protocol int) {
var (
config = light.TestServerIndexerConfig
waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
for {
cs, _, _ := cIndexer.Sections()
if cs >= 1 {
break
}
time.Sleep(10 * time.Millisecond)
}
}
netconfig = testnetConfig{
blocks: int(config.ChtSize + config.ChtConfirms),
protocol: protocol,
indexFn: waitIndexers,
nopruning: true,
}
)
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
// Assemble the proofs from the different protocols
header := bc.GetHeaderByNumber(config.ChtSize - 1)
rlp, _ := rlp.EncodeToBytes(header)
key := make([]byte, 8)
binary.BigEndian.PutUint64(key, config.ChtSize-1)
proofsV2 := HelperTrieResps{
AuxData: [][]byte{rlp},
}
root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())
trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.ChtTablePrefix)), trie.HashDefaults))
trie.Prove(key, &proofsV2.Proofs)
// Assemble the requests for the different protocols
requestsV2 := []HelperTrieReq{{
Type: htCanonical,
TrieIdx: 0,
Key: key,
AuxReq: htAuxHeader,
}}
// Send the proof request and verify the response
sendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requestsV2)
if err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {
t.Errorf("proofs mismatch: %v", err)
}
}
func TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) }
func TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) }
func TestGetBloombitsProofsLes4(t *testing.T) { testGetBloombitsProofs(t, 4) }
// Tests that bloombits proofs can be correctly retrieved.
func testGetBloombitsProofs(t *testing.T, protocol int) {
var (
config = light.TestServerIndexerConfig
waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {
for {
bts, _, _ := btIndexer.Sections()
if bts >= 1 {
break
}
time.Sleep(10 * time.Millisecond)
}
}
netconfig = testnetConfig{
blocks: int(config.BloomTrieSize + config.BloomTrieConfirms),
protocol: protocol,
indexFn: waitIndexers,
nopruning: true,
}
)
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
bc := server.handler.blockchain
// Request and verify each bit of the bloom bits proofs
for bit := 0; bit < 2048; bit++ {
// Assemble the request and proofs for the bloombits
key := make([]byte, 10)
binary.BigEndian.PutUint16(key[:2], uint16(bit))
// Only the first bloom section has data.
binary.BigEndian.PutUint64(key[2:], 0)
requests := []HelperTrieReq{{
Type: htBloomBits,
TrieIdx: 0,
Key: key,
}}
var proofs HelperTrieResps
root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())
trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.BloomTrieTablePrefix)), trie.HashDefaults))
trie.Prove(key, &proofs.Proofs)
// Send the proof request and verify the response
sendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requests)
if err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {
t.Errorf("bit %d: proofs mismatch: %v", bit, err)
}
}
}
func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, lpv2) }
func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, lpv3) }
func TestTransactionStatusLes4(t *testing.T) { testTransactionStatus(t, lpv4) }
func testTransactionStatus(t *testing.T, protocol int) {
netconfig := testnetConfig{
protocol: protocol,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
server.handler.addTxsSync = true
chain := server.handler.blockchain
var reqID uint64
test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {
t.Helper()
reqID++
if send {
sendRequest(rawPeer.app, SendTxV2Msg, reqID, types.Transactions{tx})
} else {
sendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})
}
if err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {
t.Errorf("transaction status mismatch: %v", err)
}
}
signer := types.HomesteadSigner{}
// test error status by sending an underpriced transaction
tx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)
test(tx0, true, light.TxStatus{Status: txpool.TxStatusUnknown, Error: "transaction underpriced: tip needed 1, tip permitted 0"})
tx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
test(tx1, false, light.TxStatus{Status: txpool.TxStatusUnknown}) // query before sending, should be unknown
test(tx1, true, light.TxStatus{Status: txpool.TxStatusPending}) // send valid processable tx, should return pending
test(tx1, true, light.TxStatus{Status: txpool.TxStatusPending}) // adding it again should not return an error
tx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
tx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)
// send transactions in the wrong order, tx3 should be queued
test(tx3, true, light.TxStatus{Status: txpool.TxStatusQueued})
test(tx2, true, light.TxStatus{Status: txpool.TxStatusPending})
// query again, now tx3 should be pending too
test(tx3, false, light.TxStatus{Status: txpool.TxStatusPending})
// generate and add a block with tx1 and tx2 included
gchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {
block.AddTx(tx1)
block.AddTx(tx2)
})
if _, err := chain.InsertChain(gchain); err != nil {
panic(err)
}
// wait until TxPool processes the inserted block
for i := 0; i < 10; i++ {
if pending, _ := server.handler.txpool.Stats(); pending == 1 {
break
}
time.Sleep(100 * time.Millisecond)
}
if pending, _ := server.handler.txpool.Stats(); pending != 1 {
t.Fatalf("pending count mismatch: have %d, want 1", pending)
}
// Discard new block announcement
msg, _ := rawPeer.app.ReadMsg()
msg.Discard()
// check if their status is included now
block1hash := rawdb.ReadCanonicalHash(server.db, 1)
test(tx1, false, light.TxStatus{Status: txpool.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})
test(tx2, false, light.TxStatus{Status: txpool.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})
// create a reorg that rolls them back
gchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})
if _, err := chain.InsertChain(gchain); err != nil {
panic(err)
}
// wait until TxPool processes the reorg
for i := 0; i < 10; i++ {
if pending, _ := server.handler.txpool.Stats(); pending == 3 {
break
}
time.Sleep(100 * time.Millisecond)
}
if pending, _ := server.handler.txpool.Stats(); pending != 3 {
t.Fatalf("pending count mismatch: have %d, want 3", pending)
}
// Discard new block announcement
msg, _ = rawPeer.app.ReadMsg()
msg.Discard()
// check if their status is pending again
test(tx1, false, light.TxStatus{Status: txpool.TxStatusPending})
test(tx2, false, light.TxStatus{Status: txpool.TxStatusPending})
}
func TestStopResumeLES3(t *testing.T) { testStopResume(t, lpv3) }
func TestStopResumeLES4(t *testing.T) { testStopResume(t, lpv4) }
func testStopResume(t *testing.T, protocol int) {
netconfig := testnetConfig{
protocol: protocol,
simClock: true,
nopruning: true,
}
server, _, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
server.handler.server.costTracker.testing = true
server.handler.server.costTracker.testCostList = testCostList(testBufLimit / 10)
rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol)
defer closePeer()
var (
reqID uint64
expBuf = testBufLimit
testCost = testBufLimit / 10
)
header := server.handler.blockchain.CurrentHeader()
req := func() {
reqID++
sendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})
}
for i := 1; i <= 5; i++ {
// send requests while we still have enough buffer and expect a response
for expBuf >= testCost {
req()
expBuf -= testCost
if err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {
t.Errorf("expected response and failed: %v", err)
}
}
// send some more requests in excess and expect a single StopMsg
c := i
for c > 0 {
req()
c--
}
if err := p2p.ExpectMsg(rawPeer.app, StopMsg, nil); err != nil {
t.Errorf("expected StopMsg and failed: %v", err)
}
// wait until the buffer is recharged by half of the limit
wait := testBufLimit / testBufRecharge / 2
server.clock.(*mclock.Simulated).Run(time.Millisecond * time.Duration(wait))
// expect a ResumeMsg with the partially recharged buffer value
expBuf += testBufRecharge * wait
if err := p2p.ExpectMsg(rawPeer.app, ResumeMsg, expBuf); err != nil {
t.Errorf("expected ResumeMsg and failed: %v", err)
}
}
}

@ -1,151 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
)
var (
miscInPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/total", nil)
miscInTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/total", nil)
miscInHeaderPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/header", nil)
miscInHeaderTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/header", nil)
miscInBodyPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/body", nil)
miscInBodyTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/body", nil)
miscInCodePacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/code", nil)
miscInCodeTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/code", nil)
miscInReceiptPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/receipt", nil)
miscInReceiptTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/receipt", nil)
miscInTrieProofPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/proof", nil)
miscInTrieProofTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/proof", nil)
miscInHelperTriePacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/helperTrie", nil)
miscInHelperTrieTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/helperTrie", nil)
miscInTxsPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/txs", nil)
miscInTxsTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/txs", nil)
miscInTxStatusPacketsMeter = metrics.NewRegisteredMeter("les/misc/in/packets/txStatus", nil)
miscInTxStatusTrafficMeter = metrics.NewRegisteredMeter("les/misc/in/traffic/txStatus", nil)
miscOutPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/total", nil)
miscOutTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/total", nil)
miscOutHeaderPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/header", nil)
miscOutHeaderTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/header", nil)
miscOutBodyPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/body", nil)
miscOutBodyTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/body", nil)
miscOutCodePacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/code", nil)
miscOutCodeTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/code", nil)
miscOutReceiptPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/receipt", nil)
miscOutReceiptTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/receipt", nil)
miscOutTrieProofPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/proof", nil)
miscOutTrieProofTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/proof", nil)
miscOutHelperTriePacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/helperTrie", nil)
miscOutHelperTrieTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/helperTrie", nil)
miscOutTxsPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/txs", nil)
miscOutTxsTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/txs", nil)
miscOutTxStatusPacketsMeter = metrics.NewRegisteredMeter("les/misc/out/packets/txStatus", nil)
miscOutTxStatusTrafficMeter = metrics.NewRegisteredMeter("les/misc/out/traffic/txStatus", nil)
miscServingTimeHeaderTimer = metrics.NewRegisteredTimer("les/misc/serve/header", nil)
miscServingTimeBodyTimer = metrics.NewRegisteredTimer("les/misc/serve/body", nil)
miscServingTimeCodeTimer = metrics.NewRegisteredTimer("les/misc/serve/code", nil)
miscServingTimeReceiptTimer = metrics.NewRegisteredTimer("les/misc/serve/receipt", nil)
miscServingTimeTrieProofTimer = metrics.NewRegisteredTimer("les/misc/serve/proof", nil)
miscServingTimeHelperTrieTimer = metrics.NewRegisteredTimer("les/misc/serve/helperTrie", nil)
miscServingTimeTxTimer = metrics.NewRegisteredTimer("les/misc/serve/txs", nil)
miscServingTimeTxStatusTimer = metrics.NewRegisteredTimer("les/misc/serve/txStatus", nil)
connectionTimer = metrics.NewRegisteredTimer("les/connection/duration", nil)
serverConnectionGauge = metrics.NewRegisteredGauge("les/connection/server", nil)
totalCapacityGauge = metrics.NewRegisteredGauge("les/server/totalCapacity", nil)
totalRechargeGauge = metrics.NewRegisteredGauge("les/server/totalRecharge", nil)
blockProcessingTimer = metrics.NewRegisteredTimer("les/server/blockProcessingTime", nil)
requestServedMeter = metrics.NewRegisteredMeter("les/server/req/avgServedTime", nil)
requestServedTimer = metrics.NewRegisteredTimer("les/server/req/servedTime", nil)
requestEstimatedMeter = metrics.NewRegisteredMeter("les/server/req/avgEstimatedTime", nil)
requestEstimatedTimer = metrics.NewRegisteredTimer("les/server/req/estimatedTime", nil)
relativeCostHistogram = metrics.NewRegisteredHistogram("les/server/req/relative", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostHeaderHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/header", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostBodyHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/body", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostReceiptHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/receipt", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostCodeHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/code", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostProofHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/proof", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostHelperProofHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/helperTrie", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostSendTxHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/txs", nil, metrics.NewExpDecaySample(1028, 0.015))
relativeCostTxStatusHistogram = metrics.NewRegisteredHistogram("les/server/req/relative/txStatus", nil, metrics.NewExpDecaySample(1028, 0.015))
globalFactorGauge = metrics.NewRegisteredGauge("les/server/globalFactor", nil)
recentServedGauge = metrics.NewRegisteredGauge("les/server/recentRequestServed", nil)
recentEstimatedGauge = metrics.NewRegisteredGauge("les/server/recentRequestEstimated", nil)
sqServedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/served", nil)
sqQueuedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/queued", nil)
clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil)
clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil)
requestRTT = metrics.NewRegisteredTimer("les/client/req/rtt", nil)
requestSendDelay = metrics.NewRegisteredTimer("les/client/req/sendDelay", nil)
serverSelectableGauge = metrics.NewRegisteredGauge("les/client/serverPool/selectable", nil)
serverDialedMeter = metrics.NewRegisteredMeter("les/client/serverPool/dialed", nil)
serverConnectedGauge = metrics.NewRegisteredGauge("les/client/serverPool/connected", nil)
sessionValueMeter = metrics.NewRegisteredMeter("les/client/serverPool/sessionValue", nil)
totalValueGauge = metrics.NewRegisteredGauge("les/client/serverPool/totalValue", nil)
suggestedTimeoutGauge = metrics.NewRegisteredGauge("les/client/serverPool/timeout", nil)
)
// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
// accumulating the above defined metrics based on the data stream contents.
type meteredMsgReadWriter struct {
p2p.MsgReadWriter // Wrapped message stream to meter
version int // Protocol version to select correct meters
}
// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
// metrics system is disabled, this function returns the original object.
func newMeteredMsgWriter(rw p2p.MsgReadWriter, version int) p2p.MsgReadWriter {
if !metrics.Enabled {
return rw
}
return &meteredMsgReadWriter{MsgReadWriter: rw, version: version}
}
func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
// Read the message and short circuit in case of an error
msg, err := rw.MsgReadWriter.ReadMsg()
if err != nil {
return msg, err
}
// Account for the data traffic
packets, traffic := miscInPacketsMeter, miscInTrafficMeter
packets.Mark(1)
traffic.Mark(int64(msg.Size))
return msg, err
}
func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
// Account for the data traffic
packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
packets.Mark(1)
traffic.Mark(int64(msg.Size))
// Send the packet to the p2p layer
return rw.MsgReadWriter.WriteMsg(msg)
}

@ -1,237 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"context"
"math/rand"
"sort"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
)
// LesOdr implements light.OdrBackend
type LesOdr struct {
db ethdb.Database
indexerConfig *light.IndexerConfig
chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer
peers *serverPeerSet
retriever *retrieveManager
stop chan struct{}
}
func NewLesOdr(db ethdb.Database, config *light.IndexerConfig, peers *serverPeerSet, retriever *retrieveManager) *LesOdr {
return &LesOdr{
db: db,
indexerConfig: config,
peers: peers,
retriever: retriever,
stop: make(chan struct{}),
}
}
// Stop cancels all pending retrievals
func (odr *LesOdr) Stop() {
close(odr.stop)
}
// Database returns the backing database
func (odr *LesOdr) Database() ethdb.Database {
return odr.db
}
// SetIndexers adds the necessary chain indexers to the ODR backend
func (odr *LesOdr) SetIndexers(chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer) {
odr.chtIndexer = chtIndexer
odr.bloomTrieIndexer = bloomTrieIndexer
odr.bloomIndexer = bloomIndexer
}
// ChtIndexer returns the CHT chain indexer
func (odr *LesOdr) ChtIndexer() *core.ChainIndexer {
return odr.chtIndexer
}
// BloomTrieIndexer returns the bloom trie chain indexer
func (odr *LesOdr) BloomTrieIndexer() *core.ChainIndexer {
return odr.bloomTrieIndexer
}
// BloomIndexer returns the bloombits chain indexer
func (odr *LesOdr) BloomIndexer() *core.ChainIndexer {
return odr.bloomIndexer
}
// IndexerConfig returns the indexer config.
func (odr *LesOdr) IndexerConfig() *light.IndexerConfig {
return odr.indexerConfig
}
const (
MsgBlockHeaders = iota
MsgBlockBodies
MsgCode
MsgReceipts
MsgProofsV2
MsgHelperTrieProofs
MsgTxStatus
)
// Msg encodes a LES message that delivers reply data for a request
type Msg struct {
MsgType int
ReqID uint64
Obj interface{}
}
// peerByTxHistory is a heap.Interface implementation which can sort
// the peerset by transaction history.
type peerByTxHistory []*serverPeer
func (h peerByTxHistory) Len() int { return len(h) }
func (h peerByTxHistory) Less(i, j int) bool {
if h[i].txHistory == txIndexUnlimited {
return false
}
if h[j].txHistory == txIndexUnlimited {
return true
}
return h[i].txHistory < h[j].txHistory
}
func (h peerByTxHistory) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
const (
maxTxStatusRetry = 3 // The maximum retries will be made for tx status request.
maxTxStatusCandidates = 5 // The maximum les servers the tx status requests will be sent to.
)
// RetrieveTxStatus retrieves the transaction status from the LES network.
// There is no guarantee in the LES protocol that the mined transaction will
// be retrieved back for sure because of different reasons(the transaction
// is unindexed, the malicious server doesn't reply it deliberately, etc).
// Therefore, unretrieved transactions(UNKNOWN) will receive a certain number
// of retries, thus giving a weak guarantee.
func (odr *LesOdr) RetrieveTxStatus(ctx context.Context, req *light.TxStatusRequest) error {
// Sort according to the transaction history supported by the peer and
// select the peers with longest history.
var (
retries int
peers []*serverPeer
missing = len(req.Hashes)
result = make([]light.TxStatus, len(req.Hashes))
canSend = make(map[string]bool)
)
for _, peer := range odr.peers.allPeers() {
if peer.txHistory == txIndexDisabled {
continue
}
peers = append(peers, peer)
}
sort.Sort(sort.Reverse(peerByTxHistory(peers)))
for i := 0; i < maxTxStatusCandidates && i < len(peers); i++ {
canSend[peers[i].id] = true
}
// Send out the request and assemble the result.
for {
if retries >= maxTxStatusRetry || len(canSend) == 0 {
break
}
var (
// Deep copy the request, so that the partial result won't be mixed.
req = &TxStatusRequest{Hashes: req.Hashes}
id = rand.Uint64()
distreq = &distReq{
getCost: func(dp distPeer) uint64 { return req.GetCost(dp.(*serverPeer)) },
canSend: func(dp distPeer) bool { return canSend[dp.(*serverPeer).id] },
request: func(dp distPeer) func() {
p := dp.(*serverPeer)
p.fcServer.QueuedRequest(id, req.GetCost(p))
delete(canSend, p.id)
return func() { req.Request(id, p) }
},
}
)
if err := odr.retriever.retrieve(ctx, id, distreq, func(p distPeer, msg *Msg) error { return req.Validate(odr.db, msg) }, odr.stop); err != nil {
return err
}
// Collect the response and assemble them to the final result.
// All the response is not verifiable, so always pick the first
// one we get.
for index, status := range req.Status {
if result[index].Status != txpool.TxStatusUnknown {
continue
}
if status.Status == txpool.TxStatusUnknown {
continue
}
result[index], missing = status, missing-1
}
// Abort the procedure if all the status are retrieved
if missing == 0 {
break
}
retries += 1
}
req.Status = result
return nil
}
// Retrieve tries to fetch an object from the LES network. It's a common API
// for most of the LES requests except for the TxStatusRequest which needs
// the additional retry mechanism.
// If the network retrieval was successful, it stores the object in local db.
func (odr *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err error) {
lreq := LesRequest(req)
reqID := rand.Uint64()
rq := &distReq{
getCost: func(dp distPeer) uint64 {
return lreq.GetCost(dp.(*serverPeer))
},
canSend: func(dp distPeer) bool {
p := dp.(*serverPeer)
if !p.onlyAnnounce {
return lreq.CanSend(p)
}
return false
},
request: func(dp distPeer) func() {
p := dp.(*serverPeer)
cost := lreq.GetCost(p)
p.fcServer.QueuedRequest(reqID, cost)
return func() { lreq.Request(reqID, p) }
},
}
defer func(sent mclock.AbsTime) {
if err != nil {
return
}
requestRTT.Update(time.Duration(mclock.Now() - sent))
}(mclock.Now())
if err := odr.retriever.retrieve(ctx, reqID, rq, func(p distPeer, msg *Msg) error { return lreq.Validate(odr.db, msg) }, odr.stop); err != nil {
return err
}
req.StoreResult(odr.db)
return nil
}

@ -1,537 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"encoding/binary"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
)
var (
errInvalidMessageType = errors.New("invalid message type")
errInvalidEntryCount = errors.New("invalid number of response entries")
errHeaderUnavailable = errors.New("header unavailable")
errTxHashMismatch = errors.New("transaction hash mismatch")
errUncleHashMismatch = errors.New("uncle hash mismatch")
errReceiptHashMismatch = errors.New("receipt hash mismatch")
errDataHashMismatch = errors.New("data hash mismatch")
errCHTHashMismatch = errors.New("cht hash mismatch")
errCHTNumberMismatch = errors.New("cht number mismatch")
errUselessNodes = errors.New("useless nodes in merkle proof nodeset")
)
type LesOdrRequest interface {
GetCost(*serverPeer) uint64
CanSend(*serverPeer) bool
Request(uint64, *serverPeer) error
Validate(ethdb.Database, *Msg) error
}
func LesRequest(req light.OdrRequest) LesOdrRequest {
switch r := req.(type) {
case *light.BlockRequest:
return (*BlockRequest)(r)
case *light.ReceiptsRequest:
return (*ReceiptsRequest)(r)
case *light.TrieRequest:
return (*TrieRequest)(r)
case *light.CodeRequest:
return (*CodeRequest)(r)
case *light.ChtRequest:
return (*ChtRequest)(r)
case *light.BloomRequest:
return (*BloomRequest)(r)
case *light.TxStatusRequest:
return (*TxStatusRequest)(r)
default:
return nil
}
}
// BlockRequest is the ODR request type for block bodies
type BlockRequest light.BlockRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *BlockRequest) GetCost(peer *serverPeer) uint64 {
return peer.getRequestCost(GetBlockBodiesMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (r *BlockRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Hash, r.Number, false)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *BlockRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting block body", "hash", r.Hash)
return peer.requestBodies(reqID, []common.Hash{r.Hash})
}
// Validate processes an ODR request reply message from the LES network
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *BlockRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating block body", "hash", r.Hash)
// Ensure we have a correct message with a single block body
if msg.MsgType != MsgBlockBodies {
return errInvalidMessageType
}
bodies := msg.Obj.([]*types.Body)
if len(bodies) != 1 {
return errInvalidEntryCount
}
body := bodies[0]
// Retrieve our stored header and validate block content against it
if r.Header == nil {
r.Header = rawdb.ReadHeader(db, r.Hash, r.Number)
}
if r.Header == nil {
return errHeaderUnavailable
}
if r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions), trie.NewStackTrie(nil)) {
return errTxHashMismatch
}
if r.Header.UncleHash != types.CalcUncleHash(body.Uncles) {
return errUncleHashMismatch
}
// Validations passed, encode and store RLP
data, err := rlp.EncodeToBytes(body)
if err != nil {
return err
}
r.Rlp = data
return nil
}
// ReceiptsRequest is the ODR request type for block receipts by block hash
type ReceiptsRequest light.ReceiptsRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *ReceiptsRequest) GetCost(peer *serverPeer) uint64 {
return peer.getRequestCost(GetReceiptsMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (r *ReceiptsRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Hash, r.Number, false)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *ReceiptsRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting block receipts", "hash", r.Hash)
return peer.requestReceipts(reqID, []common.Hash{r.Hash})
}
// Validate processes an ODR request reply message from the LES network
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating block receipts", "hash", r.Hash)
// Ensure we have a correct message with a single block receipt
if msg.MsgType != MsgReceipts {
return errInvalidMessageType
}
receipts := msg.Obj.([]types.Receipts)
if len(receipts) != 1 {
return errInvalidEntryCount
}
receipt := receipts[0]
// Retrieve our stored header and validate receipt content against it
if r.Header == nil {
r.Header = rawdb.ReadHeader(db, r.Hash, r.Number)
}
if r.Header == nil {
return errHeaderUnavailable
}
if r.Header.ReceiptHash != types.DeriveSha(receipt, trie.NewStackTrie(nil)) {
return errReceiptHashMismatch
}
// Validations passed, store and return
r.Receipts = receipt
return nil
}
type ProofReq struct {
BHash common.Hash
AccountAddress, Key []byte
FromLevel uint
}
// ODR request type for state/storage trie entries, see LesOdrRequest interface
type TrieRequest light.TrieRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *TrieRequest) GetCost(peer *serverPeer) uint64 {
return peer.getRequestCost(GetProofsV2Msg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (r *TrieRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *TrieRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting trie proof", "root", r.Id.Root, "key", r.Key)
req := ProofReq{
BHash: r.Id.BlockHash,
AccountAddress: r.Id.AccountAddress,
Key: r.Key,
}
return peer.requestProofs(reqID, []ProofReq{req})
}
// Validate processes an ODR request reply message from the LES network
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating trie proof", "root", r.Id.Root, "key", r.Key)
if msg.MsgType != MsgProofsV2 {
return errInvalidMessageType
}
proofs := msg.Obj.(trienode.ProofList)
// Verify the proof and store if checks out
nodeSet := proofs.Set()
reads := &readTraceDB{db: nodeSet}
if _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
// check if all nodes have been read by VerifyProof
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
r.Proof = nodeSet
return nil
}
type CodeReq struct {
BHash common.Hash
AccountAddress []byte
}
// CodeRequest is the ODR request type for node data (used for retrieving contract code), see LesOdrRequest interface
type CodeRequest light.CodeRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *CodeRequest) GetCost(peer *serverPeer) uint64 {
return peer.getRequestCost(GetCodeMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (r *CodeRequest) CanSend(peer *serverPeer) bool {
return peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *CodeRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting code data", "hash", r.Hash)
req := CodeReq{
BHash: r.Id.BlockHash,
AccountAddress: r.Id.AccountAddress,
}
return peer.requestCode(reqID, []CodeReq{req})
}
// Validate processes an ODR request reply message from the LES network
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *CodeRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating code data", "hash", r.Hash)
// Ensure we have a correct message with a single code element
if msg.MsgType != MsgCode {
return errInvalidMessageType
}
reply := msg.Obj.([][]byte)
if len(reply) != 1 {
return errInvalidEntryCount
}
data := reply[0]
// Verify the data and store if checks out
if hash := crypto.Keccak256Hash(data); r.Hash != hash {
return errDataHashMismatch
}
r.Data = data
return nil
}
const (
// helper trie type constants
htCanonical = iota // Canonical hash trie
htBloomBits // BloomBits trie
// helper trie auxiliary types
// htAuxNone = 1 ; deprecated number, used in les2/3 previously.
htAuxHeader = 2 // applicable for htCanonical, requests for relevant headers
)
type HelperTrieReq struct {
Type uint
TrieIdx uint64
Key []byte
FromLevel, AuxReq uint
}
type HelperTrieResps struct { // describes all responses, not just a single one
Proofs trienode.ProofList
AuxData [][]byte
}
// ChtRequest is the ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
type ChtRequest light.ChtRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *ChtRequest) GetCost(peer *serverPeer) uint64 {
return peer.getRequestCost(GetHelperTrieProofsMsg, 1)
}
// CanSend tells if a certain peer is suitable for serving the given request
func (r *ChtRequest) CanSend(peer *serverPeer) bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.headInfo.Number >= r.Config.ChtConfirms && r.ChtNum <= (peer.headInfo.Number-r.Config.ChtConfirms)/r.Config.ChtSize
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *ChtRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting CHT", "cht", r.ChtNum, "block", r.BlockNum)
var encNum [8]byte
binary.BigEndian.PutUint64(encNum[:], r.BlockNum)
req := HelperTrieReq{
Type: htCanonical,
TrieIdx: r.ChtNum,
Key: encNum[:],
AuxReq: htAuxHeader,
}
return peer.requestHelperTrieProofs(reqID, []HelperTrieReq{req})
}
// Validate processes an ODR request reply message from the LES network
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating CHT", "cht", r.ChtNum, "block", r.BlockNum)
if msg.MsgType != MsgHelperTrieProofs {
return errInvalidMessageType
}
resp := msg.Obj.(HelperTrieResps)
if len(resp.AuxData) != 1 {
return errInvalidEntryCount
}
nodeSet := resp.Proofs.Set()
headerEnc := resp.AuxData[0]
if len(headerEnc) == 0 {
return errHeaderUnavailable
}
header := new(types.Header)
if err := rlp.DecodeBytes(headerEnc, header); err != nil {
return errHeaderUnavailable
}
// Verify the CHT
var (
node light.ChtNode
encNumber [8]byte
)
binary.BigEndian.PutUint64(encNumber[:], r.BlockNum)
reads := &readTraceDB{db: nodeSet}
value, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)
if err != nil {
return fmt.Errorf("merkle proof verification failed: %v", err)
}
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
if err := rlp.DecodeBytes(value, &node); err != nil {
return err
}
if node.Hash != header.Hash() {
return errCHTHashMismatch
}
if r.BlockNum != header.Number.Uint64() {
return errCHTNumberMismatch
}
// Verifications passed, store and return
r.Header = header
r.Proof = nodeSet
r.Td = node.Td
return nil
}
type BloomReq struct {
BloomTrieNum, BitIdx, SectionIndex, FromLevel uint64
}
// BloomRequest is the ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface
type BloomRequest light.BloomRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *BloomRequest) GetCost(peer *serverPeer) uint64 {
return peer.getRequestCost(GetHelperTrieProofsMsg, len(r.SectionIndexList))
}
// CanSend tells if a certain peer is suitable for serving the given request
func (r *BloomRequest) CanSend(peer *serverPeer) bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
if peer.version < lpv2 {
return false
}
return peer.headInfo.Number >= r.Config.BloomTrieConfirms && r.BloomTrieNum <= (peer.headInfo.Number-r.Config.BloomTrieConfirms)/r.Config.BloomTrieSize
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *BloomRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList)
reqs := make([]HelperTrieReq, len(r.SectionIndexList))
var encNumber [10]byte
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
for i, sectionIdx := range r.SectionIndexList {
binary.BigEndian.PutUint64(encNumber[2:], sectionIdx)
reqs[i] = HelperTrieReq{
Type: htBloomBits,
TrieIdx: r.BloomTrieNum,
Key: common.CopyBytes(encNumber[:]),
}
}
return peer.requestHelperTrieProofs(reqID, reqs)
}
// Validate processes an ODR request reply message from the LES network
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating BloomBits", "bloomTrie", r.BloomTrieNum, "bitIdx", r.BitIdx, "sections", r.SectionIndexList)
// Ensure we have a correct message with a single proof element
if msg.MsgType != MsgHelperTrieProofs {
return errInvalidMessageType
}
resps := msg.Obj.(HelperTrieResps)
proofs := resps.Proofs
nodeSet := proofs.Set()
reads := &readTraceDB{db: nodeSet}
r.BloomBits = make([][]byte, len(r.SectionIndexList))
// Verify the proofs
var encNumber [10]byte
binary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))
for i, idx := range r.SectionIndexList {
binary.BigEndian.PutUint64(encNumber[2:], idx)
value, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)
if err != nil {
return err
}
r.BloomBits[i] = value
}
if len(reads.reads) != nodeSet.KeyCount() {
return errUselessNodes
}
r.Proofs = nodeSet
return nil
}
// TxStatusRequest is the ODR request type for transaction status
type TxStatusRequest light.TxStatusRequest
// GetCost returns the cost of the given ODR request according to the serving
// peer's cost table (implementation of LesOdrRequest)
func (r *TxStatusRequest) GetCost(peer *serverPeer) uint64 {
return peer.getRequestCost(GetTxStatusMsg, len(r.Hashes))
}
// CanSend tells if a certain peer is suitable for serving the given request
func (r *TxStatusRequest) CanSend(peer *serverPeer) bool {
return peer.txHistory != txIndexDisabled
}
// Request sends an ODR request to the LES network (implementation of LesOdrRequest)
func (r *TxStatusRequest) Request(reqID uint64, peer *serverPeer) error {
peer.Log().Debug("Requesting transaction status", "count", len(r.Hashes))
return peer.requestTxStatus(reqID, r.Hashes)
}
// Validate processes an ODR request reply message from the LES network
// returns true and stores results in memory if the message was a valid reply
// to the request (implementation of LesOdrRequest)
func (r *TxStatusRequest) Validate(db ethdb.Database, msg *Msg) error {
log.Debug("Validating transaction status", "count", len(r.Hashes))
if msg.MsgType != MsgTxStatus {
return errInvalidMessageType
}
status := msg.Obj.([]light.TxStatus)
if len(status) != len(r.Hashes) {
return errInvalidEntryCount
}
r.Status = status
return nil
}
// readTraceDB stores the keys of database reads. We use this to check that received node
// sets contain only the trie nodes necessary to make proofs pass.
type readTraceDB struct {
db ethdb.KeyValueReader
reads map[string]struct{}
}
// Get returns a stored node
func (db *readTraceDB) Get(k []byte) ([]byte, error) {
if db.reads == nil {
db.reads = make(map[string]struct{})
}
db.reads[string(k)] = struct{}{}
return db.db.Get(k)
}
// Has returns true if the node set contains the given key
func (db *readTraceDB) Has(key []byte) (bool, error) {
_, err := db.Get(key)
return err == nil, nil
}

@ -1,458 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
// Note: these tests are disabled now because they cannot work with the old sync
// mechanism removed but will be useful again once the PoS ultralight mode is implemented
/*
import (
"bytes"
"context"
"crypto/rand"
"fmt"
"math/big"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
type odrTestFn func(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte
func TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, true, odrGetBlock) }
func TestOdrGetBlockLes3(t *testing.T) { testOdr(t, 3, 1, true, odrGetBlock) }
func TestOdrGetBlockLes4(t *testing.T) { testOdr(t, 4, 1, true, odrGetBlock) }
func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
var block *types.Block
if bc != nil {
block = bc.GetBlockByHash(bhash)
} else {
block, _ = lc.GetBlockByHash(ctx, bhash)
}
if block == nil {
return nil
}
rlp, _ := rlp.EncodeToBytes(block)
return rlp
}
func TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, true, odrGetReceipts) }
func TestOdrGetReceiptsLes3(t *testing.T) { testOdr(t, 3, 1, true, odrGetReceipts) }
func TestOdrGetReceiptsLes4(t *testing.T) { testOdr(t, 4, 1, true, odrGetReceipts) }
func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
var receipts types.Receipts
if bc != nil {
if number := rawdb.ReadHeaderNumber(db, bhash); number != nil {
if header := rawdb.ReadHeader(db, bhash, *number); header != nil {
receipts = rawdb.ReadReceipts(db, bhash, *number, header.Time, config)
}
}
} else {
if number := rawdb.ReadHeaderNumber(db, bhash); number != nil {
receipts, _ = light.GetBlockReceipts(ctx, lc.Odr(), bhash, *number)
}
}
if receipts == nil {
return nil
}
rlp, _ := rlp.EncodeToBytes(receipts)
return rlp
}
func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, true, odrAccounts) }
func TestOdrAccountsLes3(t *testing.T) { testOdr(t, 3, 1, true, odrAccounts) }
func TestOdrAccountsLes4(t *testing.T) { testOdr(t, 4, 1, true, odrAccounts) }
func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678")
acc := []common.Address{bankAddr, userAddr1, userAddr2, dummyAddr}
var (
res []byte
st *state.StateDB
err error
)
for _, addr := range acc {
if bc != nil {
header := bc.GetHeaderByHash(bhash)
st, err = state.New(header.Root, bc.StateCache(), nil)
} else {
header := lc.GetHeaderByHash(bhash)
st = light.NewState(ctx, header, lc.Odr())
}
if err == nil {
bal := st.GetBalance(addr)
rlp, _ := rlp.EncodeToBytes(bal)
res = append(res, rlp...)
}
}
return res
}
func TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, true, odrContractCall) }
func TestOdrContractCallLes3(t *testing.T) { testOdr(t, 3, 2, true, odrContractCall) }
func TestOdrContractCallLes4(t *testing.T) { testOdr(t, 4, 2, true, odrContractCall) }
func odrContractCall(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
data := common.Hex2Bytes("60CD26850000000000000000000000000000000000000000000000000000000000000000")
var res []byte
for i := 0; i < 3; i++ {
data[35] = byte(i)
if bc != nil {
header := bc.GetHeaderByHash(bhash)
statedb, err := state.New(header.Root, bc.StateCache(), nil)
if err == nil {
from := statedb.GetOrNewStateObject(bankAddr)
from.SetBalance(math.MaxBig256)
msg := &core.Message{
From: from.Address(),
To: &testContractAddr,
Value: new(big.Int),
GasLimit: 100000,
GasPrice: big.NewInt(params.InitialBaseFee),
GasFeeCap: big.NewInt(params.InitialBaseFee),
GasTipCap: new(big.Int),
Data: data,
SkipAccountChecks: true,
}
context := core.NewEVMBlockContext(header, bc, nil)
txContext := core.NewEVMTxContext(msg)
vmenv := vm.NewEVM(context, txContext, statedb, config, vm.Config{NoBaseFee: true})
//vmenv := core.NewEnv(statedb, config, bc, msg, header, vm.Config{})
gp := new(core.GasPool).AddGas(math.MaxUint64)
result, _ := core.ApplyMessage(vmenv, msg, gp)
res = append(res, result.Return()...)
}
} else {
header := lc.GetHeaderByHash(bhash)
state := light.NewState(ctx, header, lc.Odr())
state.SetBalance(bankAddr, math.MaxBig256)
msg := &core.Message{
From: bankAddr,
To: &testContractAddr,
Value: new(big.Int),
GasLimit: 100000,
GasPrice: big.NewInt(params.InitialBaseFee),
GasFeeCap: big.NewInt(params.InitialBaseFee),
GasTipCap: new(big.Int),
Data: data,
SkipAccountChecks: true,
}
context := core.NewEVMBlockContext(header, lc, nil)
txContext := core.NewEVMTxContext(msg)
vmenv := vm.NewEVM(context, txContext, state, config, vm.Config{NoBaseFee: true})
gp := new(core.GasPool).AddGas(math.MaxUint64)
result, _ := core.ApplyMessage(vmenv, msg, gp)
if state.Error() == nil {
res = append(res, result.Return()...)
}
}
}
return res
}
func TestOdrTxStatusLes2(t *testing.T) { testOdr(t, 2, 1, false, odrTxStatus) }
func TestOdrTxStatusLes3(t *testing.T) { testOdr(t, 3, 1, false, odrTxStatus) }
func TestOdrTxStatusLes4(t *testing.T) { testOdr(t, 4, 1, false, odrTxStatus) }
func odrTxStatus(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {
var txs types.Transactions
if bc != nil {
block := bc.GetBlockByHash(bhash)
txs = block.Transactions()
} else {
if block, _ := lc.GetBlockByHash(ctx, bhash); block != nil {
btxs := block.Transactions()
txs = make(types.Transactions, len(btxs))
for i, tx := range btxs {
var err error
txs[i], _, _, _, err = light.GetTransaction(ctx, lc.Odr(), tx.Hash())
if err != nil {
return nil
}
}
}
}
rlp, _ := rlp.EncodeToBytes(txs)
return rlp
}
// testOdr tests odr requests whose validation guaranteed by block headers.
func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn odrTestFn) {
// Assemble the test environment
netconfig := testnetConfig{
blocks: 4,
protocol: protocol,
connect: true,
nopruning: true,
}
server, client, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
// Ensure the client has synced all necessary data.
clientHead := client.handler.backend.blockchain.CurrentHeader()
if clientHead.Number.Uint64() != 4 {
t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64())
}
// Disable the mechanism that we will wait a few time for request
// even there is no suitable peer to send right now.
waitForPeers = 0
test := func(expFail uint64) {
// Mark this as a helper to put the failures at the correct lines
t.Helper()
for i := uint64(0); i <= server.handler.blockchain.CurrentHeader().Number.Uint64(); i++ {
bhash := rawdb.ReadCanonicalHash(server.db, i)
b1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash)
// Set the timeout as 1 second here, ensure there is enough time
// for travis to make the action.
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash)
cancel()
eq := bytes.Equal(b1, b2)
exp := i < expFail
if exp && !eq {
t.Fatalf("odr mismatch: have %x, want %x", b2, b1)
}
if !exp && eq {
t.Fatalf("unexpected odr match")
}
}
}
// expect retrievals to fail (except genesis block) without a les peer
client.handler.backend.peers.lock.Lock()
client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return false }
client.handler.backend.peers.lock.Unlock()
test(expFail)
// expect all retrievals to pass
client.handler.backend.peers.lock.Lock()
client.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return true }
client.handler.backend.peers.lock.Unlock()
test(5)
// still expect all retrievals to pass, now data should be cached locally
if checkCached {
client.handler.backend.peers.unregister(client.peer.speer.id)
time.Sleep(time.Millisecond * 10) // ensure that all peerSetNotify callbacks are executed
test(5)
}
}
func TestGetTxStatusFromUnindexedPeersLES4(t *testing.T) { testGetTxStatusFromUnindexedPeers(t, lpv4) }
func testGetTxStatusFromUnindexedPeers(t *testing.T, protocol int) {
var (
blocks = 8
netconfig = testnetConfig{
blocks: blocks,
protocol: protocol,
nopruning: true,
}
)
server, client, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
// Iterate the chain, create the tx indexes locally
var (
testHash common.Hash
testStatus light.TxStatus
txs = make(map[common.Hash]*types.Transaction) // Transaction objects set
blockNumbers = make(map[common.Hash]uint64) // Transaction hash to block number mappings
blockHashes = make(map[common.Hash]common.Hash) // Transaction hash to block hash mappings
intraIndex = make(map[common.Hash]uint64) // Transaction intra-index in block
)
for number := uint64(1); number < server.backend.Blockchain().CurrentBlock().Number.Uint64(); number++ {
block := server.backend.Blockchain().GetBlockByNumber(number)
if block == nil {
t.Fatalf("Failed to retrieve block %d", number)
}
for index, tx := range block.Transactions() {
txs[tx.Hash()] = tx
blockNumbers[tx.Hash()] = number
blockHashes[tx.Hash()] = block.Hash()
intraIndex[tx.Hash()] = uint64(index)
if testHash == (common.Hash{}) {
testHash = tx.Hash()
testStatus = light.TxStatus{
Status: txpool.TxStatusIncluded,
Lookup: &rawdb.LegacyTxLookupEntry{
BlockHash: block.Hash(),
BlockIndex: block.NumberU64(),
Index: uint64(index),
},
}
}
}
}
// serveMsg processes incoming GetTxStatusMsg and sends the response back.
serveMsg := func(peer *testPeer, txLookup uint64) error {
msg, err := peer.app.ReadMsg()
if err != nil {
return err
}
if msg.Code != GetTxStatusMsg {
return fmt.Errorf("message code mismatch: got %d, expected %d", msg.Code, GetTxStatusMsg)
}
var r GetTxStatusPacket
if err := msg.Decode(&r); err != nil {
return err
}
stats := make([]light.TxStatus, len(r.Hashes))
for i, hash := range r.Hashes {
number, exist := blockNumbers[hash]
if !exist {
continue // Filter out unknown transactions
}
min := uint64(blocks) - txLookup
if txLookup != txIndexUnlimited && (txLookup == txIndexDisabled || number < min) {
continue // Filter out unindexed transactions
}
stats[i].Status = txpool.TxStatusIncluded
stats[i].Lookup = &rawdb.LegacyTxLookupEntry{
BlockHash: blockHashes[hash],
BlockIndex: number,
Index: intraIndex[hash],
}
}
data, _ := rlp.EncodeToBytes(stats)
reply := &reply{peer.app, TxStatusMsg, r.ReqID, data}
reply.send(testBufLimit)
return nil
}
var testspecs = []struct {
peers int
txLookups []uint64
txs []common.Hash
results []light.TxStatus
}{
// Retrieve mined transaction from the empty peerset
{
peers: 0,
txLookups: []uint64{},
txs: []common.Hash{testHash},
results: []light.TxStatus{{}},
},
// Retrieve unknown transaction from the full peers
{
peers: 3,
txLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited},
txs: []common.Hash{randomHash()},
results: []light.TxStatus{{}},
},
// Retrieve mined transaction from the full peers
{
peers: 3,
txLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited},
txs: []common.Hash{testHash},
results: []light.TxStatus{testStatus},
},
// Retrieve mixed transactions from the full peers
{
peers: 3,
txLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited},
txs: []common.Hash{randomHash(), testHash},
results: []light.TxStatus{{}, testStatus},
},
// Retrieve mixed transactions from unindexed peer(but the target is still available)
{
peers: 3,
txLookups: []uint64{uint64(blocks) - testStatus.Lookup.BlockIndex, uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 2},
txs: []common.Hash{randomHash(), testHash},
results: []light.TxStatus{{}, testStatus},
},
// Retrieve mixed transactions from unindexed peer(but the target is not available)
{
peers: 3,
txLookups: []uint64{uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 2},
txs: []common.Hash{randomHash(), testHash},
results: []light.TxStatus{{}, {}},
},
}
for _, testspec := range testspecs {
// Create a bunch of server peers with different tx history
var (
closeFns []func()
)
for i := 0; i < testspec.peers; i++ {
peer, closePeer, _ := client.newRawPeer(t, fmt.Sprintf("server-%d", i), protocol, testspec.txLookups[i])
closeFns = append(closeFns, closePeer)
// Create a one-time routine for serving message
go func(i int, peer *testPeer, lookup uint64) {
serveMsg(peer, lookup)
}(i, peer, testspec.txLookups[i])
}
// Send out the GetTxStatus requests, compare the result with
// expected value.
r := &light.TxStatusRequest{Hashes: testspec.txs}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
err := client.handler.backend.odr.RetrieveTxStatus(ctx, r)
if err != nil {
t.Errorf("Failed to retrieve tx status %v", err)
} else {
if !reflect.DeepEqual(testspec.results, r.Status) {
t.Errorf("Result mismatch, diff")
}
}
// Close all connected peers and start the next round
for _, closeFn := range closeFns {
closeFn()
}
}
}
// randomHash generates a random blob of data and returns it as a hash.
func randomHash() common.Hash {
var hash common.Hash
if n, err := rand.Read(hash[:]); n != common.HashLength || err != nil {
panic(err)
}
return hash
}
*/

File diff suppressed because it is too large Load Diff

@ -1,166 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"crypto/rand"
"errors"
"math/big"
"reflect"
"sort"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
)
type testServerPeerSub struct {
regCh chan *serverPeer
unregCh chan *serverPeer
}
func newTestServerPeerSub() *testServerPeerSub {
return &testServerPeerSub{
regCh: make(chan *serverPeer, 1),
unregCh: make(chan *serverPeer, 1),
}
}
func (t *testServerPeerSub) registerPeer(p *serverPeer) { t.regCh <- p }
func (t *testServerPeerSub) unregisterPeer(p *serverPeer) { t.unregCh <- p }
func TestPeerSubscription(t *testing.T) {
peers := newServerPeerSet()
defer peers.close()
checkIds := func(expect []string) {
given := peers.ids()
if len(given) == 0 && len(expect) == 0 {
return
}
sort.Strings(given)
sort.Strings(expect)
if !reflect.DeepEqual(given, expect) {
t.Fatalf("all peer ids mismatch, want %v, given %v", expect, given)
}
}
checkPeers := func(peerCh chan *serverPeer) {
select {
case <-peerCh:
case <-time.NewTimer(100 * time.Millisecond).C:
t.Fatalf("timeout, no event received")
}
select {
case <-peerCh:
t.Fatalf("unexpected event received")
case <-time.NewTimer(10 * time.Millisecond).C:
}
}
checkIds([]string{})
sub := newTestServerPeerSub()
peers.subscribe(sub)
// Generate a random id and create the peer
var id enode.ID
rand.Read(id[:])
peer := newServerPeer(2, NetworkId, false, p2p.NewPeer(id, "name", nil), nil)
peers.register(peer)
checkIds([]string{peer.id})
checkPeers(sub.regCh)
peers.unregister(peer.id)
checkIds([]string{})
checkPeers(sub.unregCh)
}
type fakeChain struct{}
func (f *fakeChain) Config() *params.ChainConfig { return params.MainnetChainConfig }
func (f *fakeChain) Genesis() *types.Block {
return core.DefaultGenesisBlock().ToBlock()
}
func (f *fakeChain) CurrentHeader() *types.Header { return &types.Header{Number: big.NewInt(10000000)} }
func TestHandshake(t *testing.T) {
// Create a message pipe to communicate through
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id enode.ID
rand.Read(id[:])
peer1 := newClientPeer(2, NetworkId, p2p.NewPeer(id, "name", nil), net)
peer2 := newServerPeer(2, NetworkId, true, p2p.NewPeer(id, "name", nil), app)
var (
errCh1 = make(chan error, 1)
errCh2 = make(chan error, 1)
td = big.NewInt(100)
head = common.HexToHash("deadbeef")
headNum = uint64(10)
genesis = common.HexToHash("cafebabe")
chain1, chain2 = &fakeChain{}, &fakeChain{}
forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis(), chain1.CurrentHeader().Number.Uint64(), chain1.CurrentHeader().Time)
forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis(), chain2.CurrentHeader().Number.Uint64(), chain2.CurrentHeader().Time)
filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2)
)
go func() {
errCh1 <- peer1.handshake(td, head, headNum, genesis, forkID1, filter1, func(list *keyValueList) {
var announceType uint64 = announceTypeSigned
*list = (*list).add("announceType", announceType)
}, nil)
}()
go func() {
errCh2 <- peer2.handshake(td, head, headNum, genesis, forkID2, filter2, nil, func(recv keyValueMap) error {
var reqType uint64
err := recv.get("announceType", &reqType)
if err != nil {
return err
}
if reqType != announceTypeSigned {
return errors.New("expected announceTypeSigned")
}
return nil
})
}()
for i := 0; i < 2; i++ {
select {
case err := <-errCh1:
if err != nil {
t.Fatalf("handshake failed, %v", err)
}
case err := <-errCh2:
if err != nil {
t.Fatalf("handshake failed, %v", err)
}
case <-time.After(time.Second):
t.Fatalf("timeout")
}
}
}

@ -1,327 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"crypto/ecdsa"
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
vfc "github.com/ethereum/go-ethereum/les/vflux/client"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
// Constants to match up protocol versions and messages
const (
lpv2 = 2
lpv3 = 3
lpv4 = 4
)
// Supported versions of the les protocol (first is primary)
var (
ClientProtocolVersions = []uint{lpv2, lpv3, lpv4}
ServerProtocolVersions = []uint{lpv2, lpv3, lpv4}
)
// ProtocolLengths is the number of implemented message corresponding to different protocol versions.
var ProtocolLengths = map[uint]uint64{lpv2: 22, lpv3: 24, lpv4: 24}
const (
NetworkId = 1
ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
blockSafetyMargin = 4 // safety margin applied to block ranges specified relative to head block
txIndexUnlimited = 0 // this value in the "recentTxLookup" handshake field means the entire tx index history is served
txIndexDisabled = 1 // this value means tx index is not served at all
txIndexRecentOffset = 1 // txIndexRecentOffset + N in the handshake field means then tx index of the last N blocks is supported
)
// les protocol message codes
const (
// Protocol messages inherited from LPV1
StatusMsg = 0x00
AnnounceMsg = 0x01
GetBlockHeadersMsg = 0x02
BlockHeadersMsg = 0x03
GetBlockBodiesMsg = 0x04
BlockBodiesMsg = 0x05
GetReceiptsMsg = 0x06
ReceiptsMsg = 0x07
GetCodeMsg = 0x0a
CodeMsg = 0x0b
// Protocol messages introduced in LPV2
GetProofsV2Msg = 0x0f
ProofsV2Msg = 0x10
GetHelperTrieProofsMsg = 0x11
HelperTrieProofsMsg = 0x12
SendTxV2Msg = 0x13
GetTxStatusMsg = 0x14
TxStatusMsg = 0x15
// Protocol messages introduced in LPV3
StopMsg = 0x16
ResumeMsg = 0x17
)
// GetBlockHeadersData represents a block header query (the request ID is not included)
type GetBlockHeadersData struct {
Origin hashOrNumber // Block from which to retrieve headers
Amount uint64 // Maximum number of headers to retrieve
Skip uint64 // Blocks to skip between consecutive headers
Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis)
}
// GetBlockHeadersPacket represents a block header request
type GetBlockHeadersPacket struct {
ReqID uint64
Query GetBlockHeadersData
}
// GetBlockBodiesPacket represents a block body request
type GetBlockBodiesPacket struct {
ReqID uint64
Hashes []common.Hash
}
// GetCodePacket represents a contract code request
type GetCodePacket struct {
ReqID uint64
Reqs []CodeReq
}
// GetReceiptsPacket represents a block receipts request
type GetReceiptsPacket struct {
ReqID uint64
Hashes []common.Hash
}
// GetProofsPacket represents a proof request
type GetProofsPacket struct {
ReqID uint64
Reqs []ProofReq
}
// GetHelperTrieProofsPacket represents a helper trie proof request
type GetHelperTrieProofsPacket struct {
ReqID uint64
Reqs []HelperTrieReq
}
// SendTxPacket represents a transaction propagation request
type SendTxPacket struct {
ReqID uint64
Txs []*types.Transaction
}
// GetTxStatusPacket represents a transaction status query
type GetTxStatusPacket struct {
ReqID uint64
Hashes []common.Hash
}
type requestInfo struct {
name string
maxCount uint64
refBasketFirst, refBasketRest float64
}
// reqMapping maps an LES request to one or two vflux service vector entries.
// If rest != -1 and the request type is used with amounts larger than one then the
// first one of the multi-request is mapped to first while the rest is mapped to rest.
type reqMapping struct {
first, rest int
}
var (
// requests describes the available LES request types and their initializing amounts
// in the vfc.ValueTracker reference basket. Initial values are estimates
// based on the same values as the server's default cost estimates (reqAvgTimeCost).
requests = map[uint64]requestInfo{
GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch, 10, 1000},
GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch, 1, 0},
GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch, 1, 0},
GetCodeMsg: {"GetCode", MaxCodeFetch, 1, 0},
GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch, 10, 0},
GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch, 10, 100},
SendTxV2Msg: {"SendTxV2", MaxTxSend, 1, 0},
GetTxStatusMsg: {"GetTxStatus", MaxTxStatus, 10, 0},
}
requestList []vfc.RequestInfo
requestMapping map[uint32]reqMapping
)
// init creates a request list and mapping between protocol message codes and vflux
// service vector indices.
func init() {
requestMapping = make(map[uint32]reqMapping)
for code, req := range requests {
cost := reqAvgTimeCost[code]
rm := reqMapping{len(requestList), -1}
requestList = append(requestList, vfc.RequestInfo{
Name: req.name + ".first",
InitAmount: req.refBasketFirst,
InitValue: float64(cost.baseCost + cost.reqCost),
})
if req.refBasketRest != 0 {
rm.rest = len(requestList)
requestList = append(requestList, vfc.RequestInfo{
Name: req.name + ".rest",
InitAmount: req.refBasketRest,
InitValue: float64(cost.reqCost),
})
}
requestMapping[uint32(code)] = rm
}
}
type errCode int
const (
ErrMsgTooLarge = iota
ErrDecode
ErrInvalidMsgCode
ErrProtocolVersionMismatch
ErrNetworkIdMismatch
ErrGenesisBlockMismatch
ErrNoStatusMsg
ErrExtraStatusMsg
ErrSuspendedPeer
ErrUselessPeer
ErrRequestRejected
ErrUnexpectedResponse
ErrInvalidResponse
ErrTooManyTimeouts
ErrMissingKey
ErrForkIDRejected
)
func (e errCode) String() string {
return errorToString[int(e)]
}
// XXX change once legacy code is out
var errorToString = map[int]string{
ErrMsgTooLarge: "Message too long",
ErrDecode: "Invalid message",
ErrInvalidMsgCode: "Invalid message code",
ErrProtocolVersionMismatch: "Protocol version mismatch",
ErrNetworkIdMismatch: "NetworkId mismatch",
ErrGenesisBlockMismatch: "Genesis block mismatch",
ErrNoStatusMsg: "No status message",
ErrExtraStatusMsg: "Extra status message",
ErrSuspendedPeer: "Suspended peer",
ErrRequestRejected: "Request rejected",
ErrUnexpectedResponse: "Unexpected response",
ErrInvalidResponse: "Invalid response",
ErrTooManyTimeouts: "Too many request timeouts",
ErrMissingKey: "Key missing from list",
ErrForkIDRejected: "ForkID rejected",
}
// announceData is the network packet for the block announcements.
type announceData struct {
Hash common.Hash // Hash of one particular block being announced
Number uint64 // Number of one particular block being announced
Td *big.Int // Total difficulty of one particular block being announced
ReorgDepth uint64
Update keyValueList
}
// sanityCheck verifies that the values are reasonable, as a DoS protection
func (a *announceData) sanityCheck() error {
if tdlen := a.Td.BitLen(); tdlen > 100 {
return fmt.Errorf("too large block TD: bitlen %d", tdlen)
}
return nil
}
// sign adds a signature to the block announcement by the given privKey
func (a *announceData) sign(privKey *ecdsa.PrivateKey) {
rlp, _ := rlp.EncodeToBytes(blockInfo{a.Hash, a.Number, a.Td})
sig, _ := crypto.Sign(crypto.Keccak256(rlp), privKey)
a.Update = a.Update.add("sign", sig)
}
// checkSignature verifies if the block announcement has a valid signature by the given pubKey
func (a *announceData) checkSignature(id enode.ID, update keyValueMap) error {
var sig []byte
if err := update.get("sign", &sig); err != nil {
return err
}
rlp, _ := rlp.EncodeToBytes(blockInfo{a.Hash, a.Number, a.Td})
recPubkey, err := crypto.SigToPub(crypto.Keccak256(rlp), sig)
if err != nil {
return err
}
if id == enode.PubkeyToIDV4(recPubkey) {
return nil
}
return errors.New("wrong signature")
}
type blockInfo struct {
Hash common.Hash // Hash of one particular block being announced
Number uint64 // Number of one particular block being announced
Td *big.Int // Total difficulty of one particular block being announced
}
// hashOrNumber is a combined field for specifying an origin block.
type hashOrNumber struct {
Hash common.Hash // Block hash from which to retrieve headers (excludes Number)
Number uint64 // Block hash from which to retrieve headers (excludes Hash)
}
// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the
// two contained union fields.
func (hn *hashOrNumber) EncodeRLP(w io.Writer) error {
if hn.Hash == (common.Hash{}) {
return rlp.Encode(w, hn.Number)
}
if hn.Number != 0 {
return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number)
}
return rlp.Encode(w, hn.Hash)
}
// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents
// into either a block hash or a block number.
func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error {
_, size, err := s.Kind()
switch {
case err != nil:
return err
case size == 32:
hn.Number = 0
return s.Decode(&hn.Hash)
case size <= 8:
hn.Hash = common.Hash{}
return s.Decode(&hn.Number)
default:
return fmt.Errorf("invalid input size %d for origin", size)
}
}
// CodeData is the network response packet for a node data retrieval.
type CodeData []struct {
Value []byte
}

@ -1,129 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
// Note: these tests are disabled now because they cannot work with the old sync
// mechanism removed but will be useful again once the PoS ultralight mode is implemented
/*
import (
"context"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/light"
)
var testBankSecureTrieKey = secAddr(bankAddr)
func secAddr(addr common.Address) []byte {
return crypto.Keccak256(addr[:])
}
type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest
func TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) }
func TestBlockAccessLes3(t *testing.T) { testAccess(t, 3, tfBlockAccess) }
func TestBlockAccessLes4(t *testing.T) { testAccess(t, 4, tfBlockAccess) }
func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
return &light.BlockRequest{Hash: bhash, Number: number}
}
func TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) }
func TestReceiptsAccessLes3(t *testing.T) { testAccess(t, 3, tfReceiptsAccess) }
func TestReceiptsAccessLes4(t *testing.T) { testAccess(t, 4, tfReceiptsAccess) }
func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
return &light.ReceiptsRequest{Hash: bhash, Number: number}
}
func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) }
func TestTrieEntryAccessLes3(t *testing.T) { testAccess(t, 3, tfTrieEntryAccess) }
func TestTrieEntryAccessLes4(t *testing.T) { testAccess(t, 4, tfTrieEntryAccess) }
func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {
if number := rawdb.ReadHeaderNumber(db, bhash); number != nil {
return &light.TrieRequest{Id: light.StateTrieID(rawdb.ReadHeader(db, bhash, *number)), Key: testBankSecureTrieKey}
}
return nil
}
func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) }
func TestCodeAccessLes3(t *testing.T) { testAccess(t, 3, tfCodeAccess) }
func TestCodeAccessLes4(t *testing.T) { testAccess(t, 4, tfCodeAccess) }
func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest {
number := rawdb.ReadHeaderNumber(db, bhash)
if number != nil {
return nil
}
header := rawdb.ReadHeader(db, bhash, *number)
if header.Number.Uint64() < testContractDeployed {
return nil
}
sti := light.StateTrieID(header)
ci := light.StorageTrieID(sti, testContractAddr, types.EmptyRootHash)
return &light.CodeRequest{Id: ci, Hash: crypto.Keccak256Hash(testContractCodeDeployed)}
}
func testAccess(t *testing.T, protocol int, fn accessTestFn) {
// Assemble the test environment
netconfig := testnetConfig{
blocks: 4,
protocol: protocol,
indexFn: nil,
connect: true,
nopruning: true,
}
server, client, tearDown := newClientServerEnv(t, netconfig)
defer tearDown()
// Ensure the client has synced all necessary data.
clientHead := client.handler.backend.blockchain.CurrentHeader()
if clientHead.Number.Uint64() != 4 {
t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64())
}
test := func(expFail uint64) {
for i := uint64(0); i <= server.handler.blockchain.CurrentHeader().Number.Uint64(); i++ {
bhash := rawdb.ReadCanonicalHash(server.db, i)
if req := fn(client.db, bhash, i); req != nil {
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
err := client.handler.backend.odr.Retrieve(ctx, req)
cancel()
got := err == nil
exp := i < expFail
if exp && !got {
t.Errorf("object retrieval failed")
}
if !exp && got {
t.Errorf("unexpected object retrieval success")
}
}
}
}
test(5)
}
*/

@ -1,421 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"context"
"errors"
"sync"
"time"
"github.com/ethereum/go-ethereum/light"
)
var (
retryQueue = time.Millisecond * 100
hardRequestTimeout = time.Second * 10
)
// retrieveManager is a layer on top of requestDistributor which takes care of
// matching replies by request ID and handles timeouts and resends if necessary.
type retrieveManager struct {
dist *requestDistributor
peers *serverPeerSet
softRequestTimeout func() time.Duration
lock sync.RWMutex
sentReqs map[uint64]*sentReq
}
// validatorFunc is a function that processes a reply message
type validatorFunc func(distPeer, *Msg) error
// sentReq represents a request sent and tracked by retrieveManager
type sentReq struct {
rm *retrieveManager
req *distReq
id uint64
validate validatorFunc
eventsCh chan reqPeerEvent
stopCh chan struct{}
stopped bool
err error
lock sync.RWMutex // protect access to sentTo map
sentTo map[distPeer]sentReqToPeer
lastReqQueued bool // last request has been queued but not sent
lastReqSentTo distPeer // if not nil then last request has been sent to given peer but not timed out
reqSrtoCount int // number of requests that reached soft (but not hard) timeout
}
// sentReqToPeer notifies the request-from-peer goroutine (tryRequest) about a response
// delivered by the given peer. Only one delivery is allowed per request per peer,
// after which delivered is set to true, the validity of the response is sent on the
// valid channel and no more responses are accepted.
type sentReqToPeer struct {
delivered, frozen bool
event chan int
}
// reqPeerEvent is sent by the request-from-peer goroutine (tryRequest) to the
// request state machine (retrieveLoop) through the eventsCh channel.
type reqPeerEvent struct {
event int
peer distPeer
}
const (
rpSent = iota // if peer == nil, not sent (no suitable peers)
rpSoftTimeout
rpHardTimeout
rpDeliveredValid
rpDeliveredInvalid
rpNotDelivered
)
// newRetrieveManager creates the retrieve manager
func newRetrieveManager(peers *serverPeerSet, dist *requestDistributor, srto func() time.Duration) *retrieveManager {
return &retrieveManager{
peers: peers,
dist: dist,
sentReqs: make(map[uint64]*sentReq),
softRequestTimeout: srto,
}
}
// retrieve sends a request (to multiple peers if necessary) and waits for an answer
// that is delivered through the deliver function and successfully validated by the
// validator callback. It returns when a valid answer is delivered or the context is
// cancelled.
func (rm *retrieveManager) retrieve(ctx context.Context, reqID uint64, req *distReq, val validatorFunc, shutdown chan struct{}) error {
sentReq := rm.sendReq(reqID, req, val)
select {
case <-sentReq.stopCh:
case <-ctx.Done():
sentReq.stop(ctx.Err())
case <-shutdown:
sentReq.stop(errors.New("client is shutting down"))
}
return sentReq.getError()
}
// sendReq starts a process that keeps trying to retrieve a valid answer for a
// request from any suitable peers until stopped or succeeded.
func (rm *retrieveManager) sendReq(reqID uint64, req *distReq, val validatorFunc) *sentReq {
r := &sentReq{
rm: rm,
req: req,
id: reqID,
sentTo: make(map[distPeer]sentReqToPeer),
stopCh: make(chan struct{}),
eventsCh: make(chan reqPeerEvent, 10),
validate: val,
}
canSend := req.canSend
req.canSend = func(p distPeer) bool {
// add an extra check to canSend: the request has not been sent to the same peer before
r.lock.RLock()
_, sent := r.sentTo[p]
r.lock.RUnlock()
return !sent && canSend(p)
}
request := req.request
req.request = func(p distPeer) func() {
// before actually sending the request, put an entry into the sentTo map
r.lock.Lock()
r.sentTo[p] = sentReqToPeer{delivered: false, frozen: false, event: make(chan int, 1)}
r.lock.Unlock()
return request(p)
}
rm.lock.Lock()
rm.sentReqs[reqID] = r
rm.lock.Unlock()
go r.retrieveLoop()
return r
}
// deliver is called by the LES protocol manager to deliver reply messages to waiting requests
func (rm *retrieveManager) deliver(peer distPeer, msg *Msg) error {
rm.lock.RLock()
req, ok := rm.sentReqs[msg.ReqID]
rm.lock.RUnlock()
if ok {
return req.deliver(peer, msg)
}
return errResp(ErrUnexpectedResponse, "reqID = %v", msg.ReqID)
}
// frozen is called by the LES protocol manager when a server has suspended its service and we
// should not expect an answer for the requests already sent there
func (rm *retrieveManager) frozen(peer distPeer) {
rm.lock.RLock()
defer rm.lock.RUnlock()
for _, req := range rm.sentReqs {
req.frozen(peer)
}
}
// reqStateFn represents a state of the retrieve loop state machine
type reqStateFn func() reqStateFn
// retrieveLoop is the retrieval state machine event loop
func (r *sentReq) retrieveLoop() {
go r.tryRequest()
r.lastReqQueued = true
state := r.stateRequesting
for state != nil {
state = state()
}
r.rm.lock.Lock()
delete(r.rm.sentReqs, r.id)
r.rm.lock.Unlock()
}
// stateRequesting: a request has been queued or sent recently; when it reaches soft timeout,
// a new request is sent to a new peer
func (r *sentReq) stateRequesting() reqStateFn {
select {
case ev := <-r.eventsCh:
r.update(ev)
switch ev.event {
case rpSent:
if ev.peer == nil {
// request send failed, no more suitable peers
if r.waiting() {
// we are already waiting for sent requests which may succeed so keep waiting
return r.stateNoMorePeers
}
// nothing to wait for, no more peers to ask, return with error
r.stop(light.ErrNoPeers)
// no need to go to stopped state because waiting() already returned false
return nil
}
case rpSoftTimeout:
// last request timed out, try asking a new peer
go r.tryRequest()
r.lastReqQueued = true
return r.stateRequesting
case rpDeliveredInvalid, rpNotDelivered:
// if it was the last sent request (set to nil by update) then start a new one
if !r.lastReqQueued && r.lastReqSentTo == nil {
go r.tryRequest()
r.lastReqQueued = true
}
return r.stateRequesting
case rpDeliveredValid:
r.stop(nil)
return r.stateStopped
}
return r.stateRequesting
case <-r.stopCh:
return r.stateStopped
}
}
// stateNoMorePeers: could not send more requests because no suitable peers are available.
// Peers may become suitable for a certain request later or new peers may appear so we
// keep trying.
func (r *sentReq) stateNoMorePeers() reqStateFn {
select {
case <-time.After(retryQueue):
go r.tryRequest()
r.lastReqQueued = true
return r.stateRequesting
case ev := <-r.eventsCh:
r.update(ev)
if ev.event == rpDeliveredValid {
r.stop(nil)
return r.stateStopped
}
if r.waiting() {
return r.stateNoMorePeers
}
r.stop(light.ErrNoPeers)
return nil
case <-r.stopCh:
return r.stateStopped
}
}
// stateStopped: request succeeded or cancelled, just waiting for some peers
// to either answer or time out hard
func (r *sentReq) stateStopped() reqStateFn {
for r.waiting() {
r.update(<-r.eventsCh)
}
return nil
}
// update updates the queued/sent flags and timed out peers counter according to the event
func (r *sentReq) update(ev reqPeerEvent) {
switch ev.event {
case rpSent:
r.lastReqQueued = false
r.lastReqSentTo = ev.peer
case rpSoftTimeout:
r.lastReqSentTo = nil
r.reqSrtoCount++
case rpHardTimeout:
r.reqSrtoCount--
case rpDeliveredValid, rpDeliveredInvalid, rpNotDelivered:
if ev.peer == r.lastReqSentTo {
r.lastReqSentTo = nil
} else {
r.reqSrtoCount--
}
}
}
// waiting returns true if the retrieval mechanism is waiting for an answer from
// any peer
func (r *sentReq) waiting() bool {
return r.lastReqQueued || r.lastReqSentTo != nil || r.reqSrtoCount > 0
}
// tryRequest tries to send the request to a new peer and waits for it to either
// succeed or time out if it has been sent. It also sends the appropriate reqPeerEvent
// messages to the request's event channel.
func (r *sentReq) tryRequest() {
sent := r.rm.dist.queue(r.req)
var p distPeer
select {
case p = <-sent:
case <-r.stopCh:
if r.rm.dist.cancel(r.req) {
p = nil
} else {
p = <-sent
}
}
r.eventsCh <- reqPeerEvent{rpSent, p}
if p == nil {
return
}
hrto := false
r.lock.RLock()
s, ok := r.sentTo[p]
r.lock.RUnlock()
if !ok {
panic(nil)
}
defer func() {
pp, ok := p.(*serverPeer)
if hrto && ok {
pp.Log().Debug("Request timed out hard")
if r.rm.peers != nil {
r.rm.peers.unregister(pp.id)
}
}
}()
select {
case event := <-s.event:
if event == rpNotDelivered {
r.lock.Lock()
delete(r.sentTo, p)
r.lock.Unlock()
}
r.eventsCh <- reqPeerEvent{event, p}
return
case <-time.After(r.rm.softRequestTimeout()):
r.eventsCh <- reqPeerEvent{rpSoftTimeout, p}
}
select {
case event := <-s.event:
if event == rpNotDelivered {
r.lock.Lock()
delete(r.sentTo, p)
r.lock.Unlock()
}
r.eventsCh <- reqPeerEvent{event, p}
case <-time.After(hardRequestTimeout):
hrto = true
r.eventsCh <- reqPeerEvent{rpHardTimeout, p}
}
}
// deliver a reply belonging to this request
func (r *sentReq) deliver(peer distPeer, msg *Msg) error {
r.lock.Lock()
defer r.lock.Unlock()
s, ok := r.sentTo[peer]
if !ok || s.delivered {
return errResp(ErrUnexpectedResponse, "reqID = %v", msg.ReqID)
}
if s.frozen {
return nil
}
valid := r.validate(peer, msg) == nil
r.sentTo[peer] = sentReqToPeer{delivered: true, frozen: false, event: s.event}
if valid {
s.event <- rpDeliveredValid
} else {
s.event <- rpDeliveredInvalid
}
if !valid {
return errResp(ErrInvalidResponse, "reqID = %v", msg.ReqID)
}
return nil
}
// frozen sends a "not delivered" event to the peer event channel belonging to the
// given peer if the request has been sent there, causing the state machine to not
// expect an answer and potentially even send the request to the same peer again
// when canSend allows it.
func (r *sentReq) frozen(peer distPeer) {
r.lock.Lock()
defer r.lock.Unlock()
s, ok := r.sentTo[peer]
if ok && !s.delivered && !s.frozen {
r.sentTo[peer] = sentReqToPeer{delivered: false, frozen: true, event: s.event}
s.event <- rpNotDelivered
}
}
// stop stops the retrieval process and sets an error code that will be returned
// by getError
func (r *sentReq) stop(err error) {
r.lock.Lock()
if !r.stopped {
r.stopped = true
r.err = err
close(r.stopCh)
}
r.lock.Unlock()
}
// getError returns any retrieval error (either internally generated or set by the
// stop function) after stopCh has been closed
func (r *sentReq) getError() error {
return r.err
}

@ -1,281 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"crypto/ecdsa"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/flowcontrol"
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
var (
defaultPosFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}
defaultNegFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}
)
const defaultConnectedBias = time.Minute * 3
type ethBackend interface {
ArchiveMode() bool
BlockChain() *core.BlockChain
BloomIndexer() *core.ChainIndexer
ChainDb() ethdb.Database
Synced() bool
TxPool() *txpool.TxPool
}
type LesServer struct {
lesCommons
archiveMode bool // Flag whether the ethereum node runs in archive mode.
handler *serverHandler
peers *clientPeerSet
serverset *serverSet
vfluxServer *vfs.Server
privateKey *ecdsa.PrivateKey
// Flow control and capacity management
fcManager *flowcontrol.ClientManager
costTracker *costTracker
defParams flowcontrol.ServerParams
servingQueue *servingQueue
clientPool *vfs.ClientPool
minCapacity, maxCapacity uint64
threadsIdle int // Request serving threads count when system is idle.
threadsBusy int // Request serving threads count when system is busy(block insertion).
p2pSrv *p2p.Server
}
func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*LesServer, error) {
lesDb, err := node.OpenDatabase("les.server", 0, 0, "eth/db/lesserver/", false)
if err != nil {
return nil, err
}
// Calculate the number of threads used to service the light client
// requests based on the user-specified value.
threads := config.LightServ * 4 / 100
if threads < 4 {
threads = 4
}
srv := &LesServer{
lesCommons: lesCommons{
genesis: e.BlockChain().Genesis().Hash(),
config: config,
chainConfig: e.BlockChain().Config(),
iConfig: light.DefaultServerIndexerConfig,
chainDb: e.ChainDb(),
lesDb: lesDb,
chainReader: e.BlockChain(),
chtIndexer: light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations, true),
bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency, true),
closeCh: make(chan struct{}),
},
archiveMode: e.ArchiveMode(),
peers: newClientPeerSet(),
serverset: newServerSet(),
vfluxServer: vfs.NewServer(time.Millisecond * 10),
fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}),
servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),
threadsBusy: config.LightServ/100 + 1,
threadsIdle: threads,
p2pSrv: node.Server(),
}
issync := e.Synced
if config.LightNoSyncServe {
issync = func() bool { return true }
}
srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), issync)
srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)
// Initialize the bloom trie indexer.
e.BloomIndexer().AddChildIndexer(srv.bloomTrieIndexer)
// Initialize server capacity management fields.
srv.defParams = flowcontrol.ServerParams{
BufLimit: srv.minCapacity * bufLimitRatio,
MinRecharge: srv.minCapacity,
}
// LES flow control tries to more or less guarantee the possibility for the
// clients to send a certain amount of requests at any time and get a quick
// response. Most of the clients want this guarantee but don't actually need
// to send requests most of the time. Our goal is to serve as many clients as
// possible while the actually used server capacity does not exceed the limits
totalRecharge := srv.costTracker.totalRecharge()
srv.maxCapacity = srv.minCapacity * uint64(srv.config.LightPeers)
if totalRecharge > srv.maxCapacity {
srv.maxCapacity = totalRecharge
}
srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2)
srv.clientPool = vfs.NewClientPool(lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, issync)
srv.clientPool.Start()
srv.clientPool.SetDefaultFactors(defaultPosFactors, defaultNegFactors)
srv.vfluxServer.Register(srv.clientPool, "les", "Ethereum light client service")
srv.chtIndexer.Start(e.BlockChain())
node.RegisterProtocols(srv.Protocols())
node.RegisterAPIs(srv.APIs())
node.RegisterLifecycle(srv)
return srv, nil
}
func (s *LesServer) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "les",
Service: NewLightServerAPI(s),
},
{
Namespace: "debug",
Service: NewDebugAPI(s),
},
}
}
func (s *LesServer) Protocols() []p2p.Protocol {
ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {
if p := s.peers.peer(id); p != nil {
return p.Info()
}
return nil
}, nil)
// Add "les" ENR entries.
for i := range ps {
ps[i].Attributes = []enr.Entry{&lesEntry{
VfxVersion: 1,
}}
}
return ps
}
// Start starts the LES server
func (s *LesServer) Start() error {
s.privateKey = s.p2pSrv.PrivateKey
s.peers.setSignerKey(s.privateKey)
s.handler.start()
s.wg.Add(1)
go s.capacityManagement()
if s.p2pSrv.DiscV5 != nil {
s.p2pSrv.DiscV5.RegisterTalkHandler("vfx", s.vfluxServer.ServeEncoded)
}
return nil
}
// Stop stops the LES service
func (s *LesServer) Stop() error {
close(s.closeCh)
s.clientPool.Stop()
if s.serverset != nil {
s.serverset.close()
}
s.peers.close()
s.fcManager.Stop()
s.costTracker.stop()
s.handler.stop()
s.servingQueue.stop()
if s.vfluxServer != nil {
s.vfluxServer.Stop()
}
// Note, bloom trie indexer is closed by parent bloombits indexer.
if s.chtIndexer != nil {
s.chtIndexer.Close()
}
if s.lesDb != nil {
s.lesDb.Close()
}
s.wg.Wait()
log.Info("Les server stopped")
return nil
}
// capacityManagement starts an event handler loop that updates the recharge curve of
// the client manager and adjusts the client pool's size according to the total
// capacity updates coming from the client manager
func (s *LesServer) capacityManagement() {
defer s.wg.Done()
processCh := make(chan bool, 100)
sub := s.handler.blockchain.SubscribeBlockProcessingEvent(processCh)
defer sub.Unsubscribe()
totalRechargeCh := make(chan uint64, 100)
totalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)
totalCapacityCh := make(chan uint64, 100)
totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)
s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity)
var (
busy bool
freePeers uint64
blockProcess mclock.AbsTime
)
updateRecharge := func() {
if busy {
s.servingQueue.setThreads(s.threadsBusy)
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})
} else {
s.servingQueue.setThreads(s.threadsIdle)
s.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 10, totalRecharge}, {totalRecharge, totalRecharge}})
}
}
updateRecharge()
for {
select {
case busy = <-processCh:
if busy {
blockProcess = mclock.Now()
} else {
blockProcessingTimer.Update(time.Duration(mclock.Now() - blockProcess))
}
updateRecharge()
case totalRecharge = <-totalRechargeCh:
totalRechargeGauge.Update(int64(totalRecharge))
updateRecharge()
case totalCapacity = <-totalCapacityCh:
totalCapacityGauge.Update(int64(totalCapacity))
newFreePeers := totalCapacity / s.minCapacity
if newFreePeers < freePeers && newFreePeers < uint64(s.config.LightPeers) {
log.Warn("Reduced free peer connections", "from", freePeers, "to", newFreePeers)
}
freePeers = newFreePeers
s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity)
case <-s.closeCh:
return
}
}
}

@ -1,436 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"errors"
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/trie"
)
const (
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request
MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request
MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request
MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
MaxHelperTrieProofsFetch = 64 // Amount of helper tries to be fetched per retrieval request
MaxTxSend = 64 // Amount of transactions to be send per request
MaxTxStatus = 256 // Amount of transactions to queried per request
)
var (
errTooManyInvalidRequest = errors.New("too many invalid requests made")
)
// serverHandler is responsible for serving light client and process
// all incoming light requests.
type serverHandler struct {
forkFilter forkid.Filter
blockchain *core.BlockChain
chainDb ethdb.Database
txpool *txpool.TxPool
server *LesServer
closeCh chan struct{} // Channel used to exit all background routines of handler.
wg sync.WaitGroup // WaitGroup used to track all background routines of handler.
synced func() bool // Callback function used to determine whether local node is synced.
// Testing fields
addTxsSync bool
}
func newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb ethdb.Database, txpool *txpool.TxPool, synced func() bool) *serverHandler {
handler := &serverHandler{
forkFilter: forkid.NewFilter(blockchain),
server: server,
blockchain: blockchain,
chainDb: chainDb,
txpool: txpool,
closeCh: make(chan struct{}),
synced: synced,
}
return handler
}
// start starts the server handler.
func (h *serverHandler) start() {
h.wg.Add(1)
go h.broadcastLoop()
}
// stop stops the server handler.
func (h *serverHandler) stop() {
close(h.closeCh)
h.wg.Wait()
}
// runPeer is the p2p protocol run function for the given version.
func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := newClientPeer(int(version), h.server.config.NetworkId, p, newMeteredMsgWriter(rw, int(version)))
defer peer.close()
h.wg.Add(1)
defer h.wg.Done()
return h.handle(peer)
}
func (h *serverHandler) handle(p *clientPeer) error {
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
// Execute the LES handshake
var (
head = h.blockchain.CurrentHeader()
hash = head.Hash()
number = head.Number.Uint64()
td = h.blockchain.GetTd(hash, number)
forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis(), number, head.Time)
)
if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {
p.Log().Debug("Light Ethereum handshake failed", "err", err)
return err
}
// Connected to another server, no messages expected, just wait for disconnection
if p.server {
if err := h.server.serverset.register(p); err != nil {
return err
}
_, err := p.rw.ReadMsg()
h.server.serverset.unregister(p)
return err
}
// Setup flow control mechanism for the peer
p.fcClient = flowcontrol.NewClientNode(h.server.fcManager, p.fcParams)
defer p.fcClient.Disconnect()
// Reject light clients if server is not synced. Put this checking here, so
// that "non-synced" les-server peers are still allowed to keep the connection.
if !h.synced() {
p.Log().Debug("Light server not synced, rejecting peer")
return p2p.DiscRequested
}
// Register the peer into the peerset and clientpool
if err := h.server.peers.register(p); err != nil {
return err
}
if p.balance = h.server.clientPool.Register(p); p.balance == nil {
h.server.peers.unregister(p.ID())
p.Log().Debug("Client pool already closed")
return p2p.DiscRequested
}
p.connectedAt = mclock.Now()
var wg sync.WaitGroup // Wait group used to track all in-flight task routines.
defer func() {
wg.Wait() // Ensure all background task routines have exited.
h.server.clientPool.Unregister(p)
h.server.peers.unregister(p.ID())
p.balance = nil
connectionTimer.Update(time.Duration(mclock.Now() - p.connectedAt))
}()
// Mark the peer as being served.
p.serving.Store(true)
defer p.serving.Store(false)
// Spawn a main loop to handle all incoming messages.
for {
select {
case err := <-p.errCh:
p.Log().Debug("Failed to send light ethereum response", "err", err)
return err
default:
}
if err := h.handleMsg(p, &wg); err != nil {
p.Log().Debug("Light Ethereum message handling failed", "err", err)
return err
}
}
}
// beforeHandle will do a series of prechecks before handling message.
func (h *serverHandler) beforeHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, reqCnt uint64, maxCount uint64) (*servingTask, uint64) {
// Ensure that the request sent by client peer is valid
inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0)
if reqCnt == 0 || reqCnt > maxCount {
p.fcClient.OneTimeCost(inSizeCost)
return nil, 0
}
// Ensure that the client peer complies with the flow control
// rules agreed by both sides.
if p.isFrozen() {
p.fcClient.OneTimeCost(inSizeCost)
return nil, 0
}
maxCost := p.fcCosts.getMaxCost(msg.Code, reqCnt)
accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost)
if !accepted {
p.freeze()
p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
p.fcClient.OneTimeCost(inSizeCost)
return nil, 0
}
// Create a multi-stage task, estimate the time it takes for the task to
// execute, and cache it in the request service queue.
factor := h.server.costTracker.globalFactor()
if factor < 0.001 {
factor = 1
p.Log().Error("Invalid global cost factor", "factor", factor)
}
maxTime := uint64(float64(maxCost) / factor)
task := h.server.servingQueue.newTask(p, maxTime, priority)
if !task.start() {
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost)
return nil, 0
}
return task, maxCost
}
// Afterhandle will perform a series of operations after message handling,
// such as updating flow control data, sending reply, etc.
func (h *serverHandler) afterHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, maxCost uint64, reqCnt uint64, task *servingTask, reply *reply) {
if reply != nil {
task.done()
}
p.responseLock.Lock()
defer p.responseLock.Unlock()
// Short circuit if the client is already frozen.
if p.isFrozen() {
realCost := h.server.costTracker.realCost(task.servingTime, msg.Size, 0)
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
return
}
// Positive correction buffer value with real cost.
var replySize uint32
if reply != nil {
replySize = reply.size()
}
var realCost uint64
if h.server.costTracker.testing {
realCost = maxCost // Assign a fake cost for testing purpose
} else {
realCost = h.server.costTracker.realCost(task.servingTime, msg.Size, replySize)
if realCost > maxCost {
realCost = maxCost
}
}
bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
if reply != nil {
// Feed cost tracker request serving statistic.
h.server.costTracker.updateStats(msg.Code, reqCnt, task.servingTime, realCost)
// Reduce priority "balance" for the specific peer.
p.balance.RequestServed(realCost)
p.queueSend(func() {
if err := reply.send(bv); err != nil {
select {
case p.errCh <- err:
default:
}
}
})
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
// Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size)
// Discard large message which exceeds the limitation.
if msg.Size > ProtocolMaxMsgSize {
clientErrorMeter.Mark(1)
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
defer msg.Discard()
// Lookup the request handler table, ensure it's supported
// message type by the protocol.
req, ok := Les3[msg.Code]
if !ok {
p.Log().Trace("Received invalid message", "code", msg.Code)
clientErrorMeter.Mark(1)
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
p.Log().Trace("Received " + req.Name)
// Decode the p2p message, resolve the concrete handler for it.
serve, reqID, reqCnt, err := req.Handle(msg)
if err != nil {
clientErrorMeter.Mark(1)
return errResp(ErrDecode, "%v: %v", msg, err)
}
if metrics.EnabledExpensive {
req.InPacketsMeter.Mark(1)
req.InTrafficMeter.Mark(int64(msg.Size))
}
p.responseCount++
responseCount := p.responseCount
// First check this client message complies all rules before
// handling it and return a processor if all checks are passed.
task, maxCost := h.beforeHandle(p, reqID, responseCount, msg, reqCnt, req.MaxCount)
if task == nil {
return nil
}
wg.Add(1)
go func() {
defer wg.Done()
reply := serve(h, p, task.waitOrStop)
h.afterHandle(p, reqID, responseCount, msg, maxCost, reqCnt, task, reply)
if metrics.EnabledExpensive {
size := uint32(0)
if reply != nil {
size = reply.size()
}
req.OutPacketsMeter.Mark(1)
req.OutTrafficMeter.Mark(int64(size))
req.ServingTimeMeter.Update(time.Duration(task.servingTime))
}
}()
// If the client has made too much invalid request(e.g. request a non-existent data),
// reject them to prevent SPAM attack.
if p.getInvalid() > maxRequestErrors {
clientErrorMeter.Mark(1)
return errTooManyInvalidRequest
}
return nil
}
// BlockChain implements serverBackend
func (h *serverHandler) BlockChain() *core.BlockChain {
return h.blockchain
}
// TxPool implements serverBackend
func (h *serverHandler) TxPool() *txpool.TxPool {
return h.txpool
}
// ArchiveMode implements serverBackend
func (h *serverHandler) ArchiveMode() bool {
return h.server.archiveMode
}
// AddTxsSync implements serverBackend
func (h *serverHandler) AddTxsSync() bool {
return h.addTxsSync
}
// getAccount retrieves an account from the state based on root.
func getAccount(triedb *trie.Database, root common.Hash, addr common.Address) (types.StateAccount, error) {
trie, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
if err != nil {
return types.StateAccount{}, err
}
acc, err := trie.GetAccount(addr)
if err != nil {
return types.StateAccount{}, err
}
if acc == nil {
return types.StateAccount{}, fmt.Errorf("account %#x is not present", addr)
}
return *acc, nil
}
// GetHelperTrie returns the post-processed trie root for the given trie ID and section index
func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie {
var (
root common.Hash
prefix string
)
switch typ {
case htCanonical:
sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.ChtSize-1)
root, prefix = light.GetChtRoot(h.chainDb, index, sectionHead), string(rawdb.ChtTablePrefix)
case htBloomBits:
sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.BloomTrieSize-1)
root, prefix = light.GetBloomTrieRoot(h.chainDb, index, sectionHead), string(rawdb.BloomTrieTablePrefix)
}
if root == (common.Hash{}) {
return nil
}
triedb := trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix), trie.HashDefaults)
trie, _ := trie.New(trie.TrieID(root), triedb)
return trie
}
// broadcastLoop broadcasts new block information to all connected light
// clients. According to the agreement between client and server, server should
// only broadcast new announcement if the total difficulty is higher than the
// last one. Besides server will add the signature if client requires.
func (h *serverHandler) broadcastLoop() {
defer h.wg.Done()
headCh := make(chan core.ChainHeadEvent, 10)
headSub := h.blockchain.SubscribeChainHeadEvent(headCh)
defer headSub.Unsubscribe()
var (
lastHead = h.blockchain.CurrentHeader()
lastTd = common.Big0
)
for {
select {
case ev := <-headCh:
header := ev.Block.Header()
hash, number := header.Hash(), header.Number.Uint64()
td := h.blockchain.GetTd(hash, number)
if td == nil || td.Cmp(lastTd) <= 0 {
continue
}
var reorg uint64
if lastHead != nil {
// If a setHead has been performed, the common ancestor can be nil.
if ancestor := rawdb.FindCommonAncestor(h.chainDb, header, lastHead); ancestor != nil {
reorg = lastHead.Number.Uint64() - ancestor.Number.Uint64()
}
}
lastHead, lastTd = header, td
log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg)
h.server.peers.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg})
case <-h.closeCh:
return
}
}
}

@ -1,566 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"encoding/binary"
"encoding/json"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
)
// serverBackend defines the backend functions needed for serving LES requests
type serverBackend interface {
ArchiveMode() bool
AddTxsSync() bool
BlockChain() *core.BlockChain
TxPool() *txpool.TxPool
GetHelperTrie(typ uint, index uint64) *trie.Trie
}
// Decoder is implemented by the messages passed to the handler functions
type Decoder interface {
Decode(val interface{}) error
}
// RequestType is a static struct that describes an LES request type and references
// its handler function.
type RequestType struct {
Name string
MaxCount uint64
InPacketsMeter, InTrafficMeter, OutPacketsMeter, OutTrafficMeter metrics.Meter
ServingTimeMeter metrics.Timer
Handle func(msg Decoder) (serve serveRequestFn, reqID, amount uint64, err error)
}
// serveRequestFn is returned by the request handler functions after decoding the request.
// This function does the actual request serving using the supplied backend. waitOrStop is
// called between serving individual request items and may block if the serving process
// needs to be throttled. If it returns false then the process is terminated.
// The reply is not sent by this function yet. The flow control feedback value is supplied
// by the protocol handler when calling the send function of the returned reply struct.
type serveRequestFn func(backend serverBackend, peer *clientPeer, waitOrStop func() bool) *reply
// Les3 contains the request types supported by les/2 and les/3
var Les3 = map[uint64]RequestType{
GetBlockHeadersMsg: {
Name: "block header request",
MaxCount: MaxHeaderFetch,
InPacketsMeter: miscInHeaderPacketsMeter,
InTrafficMeter: miscInHeaderTrafficMeter,
OutPacketsMeter: miscOutHeaderPacketsMeter,
OutTrafficMeter: miscOutHeaderTrafficMeter,
ServingTimeMeter: miscServingTimeHeaderTimer,
Handle: handleGetBlockHeaders,
},
GetBlockBodiesMsg: {
Name: "block bodies request",
MaxCount: MaxBodyFetch,
InPacketsMeter: miscInBodyPacketsMeter,
InTrafficMeter: miscInBodyTrafficMeter,
OutPacketsMeter: miscOutBodyPacketsMeter,
OutTrafficMeter: miscOutBodyTrafficMeter,
ServingTimeMeter: miscServingTimeBodyTimer,
Handle: handleGetBlockBodies,
},
GetCodeMsg: {
Name: "code request",
MaxCount: MaxCodeFetch,
InPacketsMeter: miscInCodePacketsMeter,
InTrafficMeter: miscInCodeTrafficMeter,
OutPacketsMeter: miscOutCodePacketsMeter,
OutTrafficMeter: miscOutCodeTrafficMeter,
ServingTimeMeter: miscServingTimeCodeTimer,
Handle: handleGetCode,
},
GetReceiptsMsg: {
Name: "receipts request",
MaxCount: MaxReceiptFetch,
InPacketsMeter: miscInReceiptPacketsMeter,
InTrafficMeter: miscInReceiptTrafficMeter,
OutPacketsMeter: miscOutReceiptPacketsMeter,
OutTrafficMeter: miscOutReceiptTrafficMeter,
ServingTimeMeter: miscServingTimeReceiptTimer,
Handle: handleGetReceipts,
},
GetProofsV2Msg: {
Name: "les/2 proofs request",
MaxCount: MaxProofsFetch,
InPacketsMeter: miscInTrieProofPacketsMeter,
InTrafficMeter: miscInTrieProofTrafficMeter,
OutPacketsMeter: miscOutTrieProofPacketsMeter,
OutTrafficMeter: miscOutTrieProofTrafficMeter,
ServingTimeMeter: miscServingTimeTrieProofTimer,
Handle: handleGetProofs,
},
GetHelperTrieProofsMsg: {
Name: "helper trie proof request",
MaxCount: MaxHelperTrieProofsFetch,
InPacketsMeter: miscInHelperTriePacketsMeter,
InTrafficMeter: miscInHelperTrieTrafficMeter,
OutPacketsMeter: miscOutHelperTriePacketsMeter,
OutTrafficMeter: miscOutHelperTrieTrafficMeter,
ServingTimeMeter: miscServingTimeHelperTrieTimer,
Handle: handleGetHelperTrieProofs,
},
SendTxV2Msg: {
Name: "new transactions",
MaxCount: MaxTxSend,
InPacketsMeter: miscInTxsPacketsMeter,
InTrafficMeter: miscInTxsTrafficMeter,
OutPacketsMeter: miscOutTxsPacketsMeter,
OutTrafficMeter: miscOutTxsTrafficMeter,
ServingTimeMeter: miscServingTimeTxTimer,
Handle: handleSendTx,
},
GetTxStatusMsg: {
Name: "transaction status query request",
MaxCount: MaxTxStatus,
InPacketsMeter: miscInTxStatusPacketsMeter,
InTrafficMeter: miscInTxStatusTrafficMeter,
OutPacketsMeter: miscOutTxStatusPacketsMeter,
OutTrafficMeter: miscOutTxStatusTrafficMeter,
ServingTimeMeter: miscServingTimeTxStatusTimer,
Handle: handleGetTxStatus,
},
}
// handleGetBlockHeaders handles a block header request
func handleGetBlockHeaders(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r GetBlockHeadersPacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
// Gather headers until the fetch or network limits is reached
var (
bc = backend.BlockChain()
hashMode = r.Query.Origin.Hash != (common.Hash{})
first = true
maxNonCanonical = uint64(100)
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(r.Query.Amount) && bytes < softResponseLimit {
if !first && !waitOrStop() {
return nil
}
// Retrieve the next header satisfying the r
var origin *types.Header
if hashMode {
if first {
origin = bc.GetHeaderByHash(r.Query.Origin.Hash)
if origin != nil {
r.Query.Origin.Number = origin.Number.Uint64()
}
} else {
origin = bc.GetHeader(r.Query.Origin.Hash, r.Query.Origin.Number)
}
} else {
origin = bc.GetHeaderByNumber(r.Query.Origin.Number)
}
if origin == nil {
break
}
headers = append(headers, origin)
bytes += estHeaderRlpSize
// Advance to the next header of the r
switch {
case hashMode && r.Query.Reverse:
// Hash based traversal towards the genesis block
ancestor := r.Query.Skip + 1
if ancestor == 0 {
unknown = true
} else {
r.Query.Origin.Hash, r.Query.Origin.Number = bc.GetAncestor(r.Query.Origin.Hash, r.Query.Origin.Number, ancestor, &maxNonCanonical)
unknown = r.Query.Origin.Hash == common.Hash{}
}
case hashMode && !r.Query.Reverse:
// Hash based traversal towards the leaf block
var (
current = origin.Number.Uint64()
next = current + r.Query.Skip + 1
)
if next <= current {
infos, _ := json.Marshal(p.Peer.Info())
p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", r.Query.Skip, "next", next, "attacker", string(infos))
unknown = true
} else {
if header := bc.GetHeaderByNumber(next); header != nil {
nextHash := header.Hash()
expOldHash, _ := bc.GetAncestor(nextHash, next, r.Query.Skip+1, &maxNonCanonical)
if expOldHash == r.Query.Origin.Hash {
r.Query.Origin.Hash, r.Query.Origin.Number = nextHash, next
} else {
unknown = true
}
} else {
unknown = true
}
}
case r.Query.Reverse:
// Number based traversal towards the genesis block
if r.Query.Origin.Number >= r.Query.Skip+1 {
r.Query.Origin.Number -= r.Query.Skip + 1
} else {
unknown = true
}
case !r.Query.Reverse:
// Number based traversal towards the leaf block
r.Query.Origin.Number += r.Query.Skip + 1
}
first = false
}
return p.replyBlockHeaders(r.ReqID, headers)
}, r.ReqID, r.Query.Amount, nil
}
// handleGetBlockBodies handles a block body request
func handleGetBlockBodies(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r GetBlockBodiesPacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
var (
bytes int
bodies []rlp.RawValue
)
bc := backend.BlockChain()
for i, hash := range r.Hashes {
if i != 0 && !waitOrStop() {
return nil
}
if bytes >= softResponseLimit {
break
}
body := bc.GetBodyRLP(hash)
if body == nil {
p.bumpInvalid()
continue
}
bodies = append(bodies, body)
bytes += len(body)
}
return p.replyBlockBodiesRLP(r.ReqID, bodies)
}, r.ReqID, uint64(len(r.Hashes)), nil
}
// handleGetCode handles a contract code request
func handleGetCode(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r GetCodePacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
var (
bytes int
data [][]byte
)
bc := backend.BlockChain()
for i, request := range r.Reqs {
if i != 0 && !waitOrStop() {
return nil
}
// Look up the root hash belonging to the request
header := bc.GetHeaderByHash(request.BHash)
if header == nil {
p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash)
p.bumpInvalid()
continue
}
// Refuse to search stale state data in the database since looking for
// a non-exist key is kind of expensive.
local := bc.CurrentHeader().Number.Uint64()
if !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local {
p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local)
p.bumpInvalid()
continue
}
address := common.BytesToAddress(request.AccountAddress)
account, err := getAccount(bc.TrieDB(), header.Root, address)
if err != nil {
p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", address, "err", err)
p.bumpInvalid()
continue
}
code, err := bc.StateCache().ContractCode(address, common.BytesToHash(account.CodeHash))
if err != nil {
p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", address, "codehash", common.BytesToHash(account.CodeHash), "err", err)
continue
}
// Accumulate the code and abort if enough data was retrieved
data = append(data, code)
if bytes += len(code); bytes >= softResponseLimit {
break
}
}
return p.replyCode(r.ReqID, data)
}, r.ReqID, uint64(len(r.Reqs)), nil
}
// handleGetReceipts handles a block receipts request
func handleGetReceipts(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r GetReceiptsPacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
var (
bytes int
receipts []rlp.RawValue
)
bc := backend.BlockChain()
for i, hash := range r.Hashes {
if i != 0 && !waitOrStop() {
return nil
}
if bytes >= softResponseLimit {
break
}
// Retrieve the requested block's receipts, skipping if unknown to us
results := bc.GetReceiptsByHash(hash)
if results == nil {
if header := bc.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyReceiptsHash {
p.bumpInvalid()
continue
}
}
// If known, encode and queue for response packet
if encoded, err := rlp.EncodeToBytes(results); err != nil {
log.Error("Failed to encode receipt", "err", err)
} else {
receipts = append(receipts, encoded)
bytes += len(encoded)
}
}
return p.replyReceiptsRLP(r.ReqID, receipts)
}, r.ReqID, uint64(len(r.Hashes)), nil
}
// handleGetProofs handles a proof request
func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r GetProofsPacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
var (
lastBHash common.Hash
root common.Hash
header *types.Header
err error
)
bc := backend.BlockChain()
nodes := trienode.NewProofSet()
for i, request := range r.Reqs {
if i != 0 && !waitOrStop() {
return nil
}
// Look up the root hash belonging to the request
if request.BHash != lastBHash {
root, lastBHash = common.Hash{}, request.BHash
if header = bc.GetHeaderByHash(request.BHash); header == nil {
p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash)
p.bumpInvalid()
continue
}
// Refuse to search stale state data in the database since looking for
// a non-exist key is kind of expensive.
local := bc.CurrentHeader().Number.Uint64()
if !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local {
p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local)
p.bumpInvalid()
continue
}
root = header.Root
}
// If a header lookup failed (non existent), ignore subsequent requests for the same header
if root == (common.Hash{}) {
p.bumpInvalid()
continue
}
// Open the account or storage trie for the request
statedb := bc.StateCache()
var trie state.Trie
switch len(request.AccountAddress) {
case 0:
// No account key specified, open an account trie
trie, err = statedb.OpenTrie(root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err)
continue
}
default:
// Account key specified, open a storage trie
address := common.BytesToAddress(request.AccountAddress)
account, err := getAccount(bc.TrieDB(), root, address)
if err != nil {
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", address, "err", err)
p.bumpInvalid()
continue
}
trie, err = statedb.OpenStorageTrie(root, address, account.Root, nil)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", address, "root", account.Root, "err", err)
continue
}
}
// Prove the user's request from the account or storage trie
if err := trie.Prove(request.Key, nodes); err != nil {
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
continue
}
if nodes.DataSize() >= softResponseLimit {
break
}
}
return p.replyProofsV2(r.ReqID, nodes.List())
}, r.ReqID, uint64(len(r.Reqs)), nil
}
// handleGetHelperTrieProofs handles a helper trie proof request
func handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r GetHelperTrieProofsPacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
var (
lastIdx uint64
lastType uint
auxTrie *trie.Trie
auxBytes int
auxData [][]byte
)
bc := backend.BlockChain()
nodes := trienode.NewProofSet()
for i, request := range r.Reqs {
if i != 0 && !waitOrStop() {
return nil
}
if auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx {
lastType, lastIdx = request.Type, request.TrieIdx
auxTrie = backend.GetHelperTrie(request.Type, request.TrieIdx)
}
if auxTrie == nil {
return nil
}
// TODO(rjl493456442) short circuit if the proving is failed.
// The original client side code has a dirty hack to retrieve
// the headers with no valid proof. Keep the compatibility for
// legacy les protocol and drop this hack when the les2/3 are
// not supported.
err := auxTrie.Prove(request.Key, nodes)
if p.version >= lpv4 && err != nil {
return nil
}
if request.Type == htCanonical && request.AuxReq == htAuxHeader && len(request.Key) == 8 {
header := bc.GetHeaderByNumber(binary.BigEndian.Uint64(request.Key))
data, err := rlp.EncodeToBytes(header)
if err != nil {
log.Error("Failed to encode header", "err", err)
return nil
}
auxData = append(auxData, data)
auxBytes += len(data)
}
if nodes.DataSize()+auxBytes >= softResponseLimit {
break
}
}
return p.replyHelperTrieProofs(r.ReqID, HelperTrieResps{Proofs: nodes.List(), AuxData: auxData})
}, r.ReqID, uint64(len(r.Reqs)), nil
}
// handleSendTx handles a transaction propagation request
func handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r SendTxPacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
amount := uint64(len(r.Txs))
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
stats := make([]light.TxStatus, len(r.Txs))
for i, tx := range r.Txs {
if i != 0 && !waitOrStop() {
return nil
}
hash := tx.Hash()
stats[i] = txStatus(backend, hash)
if stats[i].Status == txpool.TxStatusUnknown {
if errs := backend.TxPool().Add([]*types.Transaction{tx}, false, backend.AddTxsSync()); errs[0] != nil {
stats[i].Error = errs[0].Error()
continue
}
stats[i] = txStatus(backend, hash)
}
}
return p.replyTxStatus(r.ReqID, stats)
}, r.ReqID, amount, nil
}
// handleGetTxStatus handles a transaction status query
func handleGetTxStatus(msg Decoder) (serveRequestFn, uint64, uint64, error) {
var r GetTxStatusPacket
if err := msg.Decode(&r); err != nil {
return nil, 0, 0, err
}
return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {
stats := make([]light.TxStatus, len(r.Hashes))
for i, hash := range r.Hashes {
if i != 0 && !waitOrStop() {
return nil
}
stats[i] = txStatus(backend, hash)
}
return p.replyTxStatus(r.ReqID, stats)
}, r.ReqID, uint64(len(r.Hashes)), nil
}
// txStatus returns the status of a specified transaction.
func txStatus(b serverBackend, hash common.Hash) light.TxStatus {
var stat light.TxStatus
// Looking the transaction in txpool first.
stat.Status = b.TxPool().Status(hash)
// If the transaction is unknown to the pool, try looking it up locally.
if stat.Status == txpool.TxStatusUnknown {
lookup := b.BlockChain().GetTransactionLookup(hash)
if lookup != nil {
stat.Status = txpool.TxStatusIncluded
stat.Lookup = lookup
}
}
return stat
}

@ -1,365 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
"golang.org/x/exp/slices"
)
// servingQueue allows running tasks in a limited number of threads and puts the
// waiting tasks in a priority queue
type servingQueue struct {
recentTime, queuedTime uint64
servingTimeDiff atomic.Uint64
burstLimit, burstDropLimit uint64
burstDecRate float64
lastUpdate mclock.AbsTime
queueAddCh, queueBestCh chan *servingTask
stopThreadCh, quit chan struct{}
setThreadsCh chan int
wg sync.WaitGroup
threadCount int // number of currently running threads
queue *prque.Prque[int64, *servingTask] // priority queue for waiting or suspended tasks
best *servingTask // the highest priority task (not included in the queue)
suspendBias int64 // priority bias against suspending an already running task
}
// servingTask represents a request serving task. Tasks can be implemented to
// run in multiple steps, allowing the serving queue to suspend execution between
// steps if higher priority tasks are entered. The creator of the task should
// set the following fields:
//
// - priority: greater value means higher priority; values can wrap around the int64 range
// - run: execute a single step; return true if finished
// - after: executed after run finishes or returns an error, receives the total serving time
type servingTask struct {
sq *servingQueue
servingTime, timeAdded, maxTime, expTime uint64
peer *clientPeer
priority int64
biasAdded bool
token runToken
tokenCh chan runToken
}
// runToken received by servingTask.start allows the task to run. Closing the
// channel by servingTask.stop signals the thread controller to allow a new task
// to start running.
type runToken chan struct{}
// start blocks until the task can start and returns true if it is allowed to run.
// Returning false means that the task should be cancelled.
func (t *servingTask) start() bool {
if t.peer.isFrozen() {
return false
}
t.tokenCh = make(chan runToken, 1)
select {
case t.sq.queueAddCh <- t:
case <-t.sq.quit:
return false
}
select {
case t.token = <-t.tokenCh:
case <-t.sq.quit:
return false
}
if t.token == nil {
return false
}
t.servingTime -= uint64(mclock.Now())
return true
}
// done signals the thread controller about the task being finished and returns
// the total serving time of the task in nanoseconds.
func (t *servingTask) done() uint64 {
t.servingTime += uint64(mclock.Now())
close(t.token)
diff := t.servingTime - t.timeAdded
t.timeAdded = t.servingTime
if t.expTime > diff {
t.expTime -= diff
t.sq.servingTimeDiff.Add(t.expTime)
} else {
t.expTime = 0
}
return t.servingTime
}
// waitOrStop can be called during the execution of the task. It blocks if there
// is a higher priority task waiting (a bias is applied in favor of the currently
// running task). Returning true means that the execution can be resumed. False
// means the task should be cancelled.
func (t *servingTask) waitOrStop() bool {
t.done()
if !t.biasAdded {
t.priority += t.sq.suspendBias
t.biasAdded = true
}
return t.start()
}
// newServingQueue returns a new servingQueue
func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue {
sq := &servingQueue{
queue: prque.New[int64, *servingTask](nil),
suspendBias: suspendBias,
queueAddCh: make(chan *servingTask, 100),
queueBestCh: make(chan *servingTask),
stopThreadCh: make(chan struct{}),
quit: make(chan struct{}),
setThreadsCh: make(chan int, 10),
burstLimit: uint64(utilTarget * bufLimitRatio * 1200000),
burstDropLimit: uint64(utilTarget * bufLimitRatio * 1000000),
burstDecRate: utilTarget,
lastUpdate: mclock.Now(),
}
sq.wg.Add(2)
go sq.queueLoop()
go sq.threadCountLoop()
return sq
}
// newTask creates a new task with the given priority
func (sq *servingQueue) newTask(peer *clientPeer, maxTime uint64, priority int64) *servingTask {
return &servingTask{
sq: sq,
peer: peer,
maxTime: maxTime,
expTime: maxTime,
priority: priority,
}
}
// threadController is started in multiple goroutines and controls the execution
// of tasks. The number of active thread controllers equals the allowed number of
// concurrently running threads. It tries to fetch the highest priority queued
// task first. If there are no queued tasks waiting then it can directly catch
// run tokens from the token channel and allow the corresponding tasks to run
// without entering the priority queue.
func (sq *servingQueue) threadController() {
defer sq.wg.Done()
for {
token := make(runToken)
select {
case best := <-sq.queueBestCh:
best.tokenCh <- token
case <-sq.stopThreadCh:
return
case <-sq.quit:
return
}
select {
case <-sq.stopThreadCh:
return
case <-sq.quit:
return
case <-token:
}
}
}
// peerTasks lists the tasks received from a given peer when selecting peers to freeze
type peerTasks struct {
peer *clientPeer
list []*servingTask
sumTime uint64
priority float64
}
// freezePeers selects the peers with the worst priority queued tasks and freezes
// them until burstTime goes under burstDropLimit or all peers are frozen
func (sq *servingQueue) freezePeers() {
peerMap := make(map[*clientPeer]*peerTasks)
var peerList []*peerTasks
if sq.best != nil {
sq.queue.Push(sq.best, sq.best.priority)
}
sq.best = nil
for sq.queue.Size() > 0 {
task := sq.queue.PopItem()
tasks := peerMap[task.peer]
if tasks == nil {
bufValue, bufLimit := task.peer.fcClient.BufferStatus()
if bufLimit < 1 {
bufLimit = 1
}
tasks = &peerTasks{
peer: task.peer,
priority: float64(bufValue) / float64(bufLimit), // lower value comes first
}
peerMap[task.peer] = tasks
peerList = append(peerList, tasks)
}
tasks.list = append(tasks.list, task)
tasks.sumTime += task.expTime
}
slices.SortFunc(peerList, func(a, b *peerTasks) int {
if a.priority < b.priority {
return -1
}
if a.priority > b.priority {
return 1
}
return 0
})
drop := true
for _, tasks := range peerList {
if drop {
tasks.peer.freeze()
tasks.peer.fcClient.Freeze()
sq.queuedTime -= tasks.sumTime
sqQueuedGauge.Update(int64(sq.queuedTime))
clientFreezeMeter.Mark(1)
drop = sq.recentTime+sq.queuedTime > sq.burstDropLimit
for _, task := range tasks.list {
task.tokenCh <- nil
}
} else {
for _, task := range tasks.list {
sq.queue.Push(task, task.priority)
}
}
}
if sq.queue.Size() > 0 {
sq.best = sq.queue.PopItem()
}
}
// updateRecentTime recalculates the recent serving time value
func (sq *servingQueue) updateRecentTime() {
subTime := sq.servingTimeDiff.Swap(0)
now := mclock.Now()
dt := now - sq.lastUpdate
sq.lastUpdate = now
if dt > 0 {
subTime += uint64(float64(dt) * sq.burstDecRate)
}
if sq.recentTime > subTime {
sq.recentTime -= subTime
} else {
sq.recentTime = 0
}
}
// addTask inserts a task into the priority queue
func (sq *servingQueue) addTask(task *servingTask) {
if sq.best == nil {
sq.best = task
} else if task.priority-sq.best.priority > 0 {
sq.queue.Push(sq.best, sq.best.priority)
sq.best = task
} else {
sq.queue.Push(task, task.priority)
}
sq.updateRecentTime()
sq.queuedTime += task.expTime
sqServedGauge.Update(int64(sq.recentTime))
sqQueuedGauge.Update(int64(sq.queuedTime))
if sq.recentTime+sq.queuedTime > sq.burstLimit {
sq.freezePeers()
}
}
// queueLoop is an event loop running in a goroutine. It receives tasks from queueAddCh
// and always tries to send the highest priority task to queueBestCh. Successfully sent
// tasks are removed from the queue.
func (sq *servingQueue) queueLoop() {
defer sq.wg.Done()
for {
if sq.best != nil {
expTime := sq.best.expTime
select {
case task := <-sq.queueAddCh:
sq.addTask(task)
case sq.queueBestCh <- sq.best:
sq.updateRecentTime()
sq.queuedTime -= expTime
sq.recentTime += expTime
sqServedGauge.Update(int64(sq.recentTime))
sqQueuedGauge.Update(int64(sq.queuedTime))
if sq.queue.Size() == 0 {
sq.best = nil
} else {
sq.best = sq.queue.PopItem()
}
case <-sq.quit:
return
}
} else {
select {
case task := <-sq.queueAddCh:
sq.addTask(task)
case <-sq.quit:
return
}
}
}
}
// threadCountLoop is an event loop running in a goroutine. It adjusts the number
// of active thread controller goroutines.
func (sq *servingQueue) threadCountLoop() {
var threadCountTarget int
defer sq.wg.Done()
for {
for threadCountTarget > sq.threadCount {
sq.wg.Add(1)
go sq.threadController()
sq.threadCount++
}
if threadCountTarget < sq.threadCount {
select {
case threadCountTarget = <-sq.setThreadsCh:
case sq.stopThreadCh <- struct{}{}:
sq.threadCount--
case <-sq.quit:
return
}
} else {
select {
case threadCountTarget = <-sq.setThreadsCh:
case <-sq.quit:
return
}
}
}
}
// setThreads sets the allowed processing thread count, suspending tasks as soon as
// possible if necessary.
func (sq *servingQueue) setThreads(threadCount int) {
select {
case sq.setThreadsCh <- threadCount:
case <-sq.quit:
return
}
}
// stop stops task processing as soon as possible and shuts down the serving queue.
func (sq *servingQueue) stop() {
close(sq.quit)
sq.wg.Wait()
}

@ -1,80 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"context"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/light"
)
// noopReleaser is returned in case there is no operation expected
// for releasing state.
var noopReleaser = tracers.StateReleaseFunc(func() {})
// stateAtBlock retrieves the state database associated with a certain block.
func (leth *LightEthereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, tracers.StateReleaseFunc, error) {
return light.NewState(ctx, block.Header(), leth.odr), noopReleaser, nil
}
// stateAtTransaction returns the execution environment of a certain transaction.
func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
// Short circuit if it's genesis block.
if block.NumberU64() == 0 {
return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis")
}
// Create the parent state database
parent, err := leth.blockchain.GetBlock(ctx, block.ParentHash(), block.NumberU64()-1)
if err != nil {
return nil, vm.BlockContext{}, nil, nil, err
}
statedb, release, err := leth.stateAtBlock(ctx, parent, reexec)
if err != nil {
return nil, vm.BlockContext{}, nil, nil, err
}
if txIndex == 0 && len(block.Transactions()) == 0 {
return nil, vm.BlockContext{}, statedb, release, nil
}
// Recompute transactions up to the target index.
signer := types.MakeSigner(leth.blockchain.Config(), block.Number(), block.Time())
for idx, tx := range block.Transactions() {
// Assemble the transaction call message and return if the requested offset
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
txContext := core.NewEVMTxContext(msg)
context := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil)
statedb.SetTxContext(tx.Hash(), idx)
if idx == txIndex {
return msg, context, statedb, release, nil
}
// Not yet the searched for transaction, execute on top of the current state
vmenv := vm.NewEVM(context, txContext, statedb, leth.blockchain.Config(), vm.Config{})
if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err)
}
// Ensure any modifications are committed to the state
// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect
statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))
}
return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash())
}

@ -1,626 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// This file contains some shares testing functionality, common to multiple
// different files and modules being tested. Client based network and Server
// based network can be created easily with available APIs.
package les
import (
"context"
"crypto/rand"
"fmt"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/les/flowcontrol"
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
var (
bankKey, _ = crypto.GenerateKey()
bankAddr = crypto.PubkeyToAddress(bankKey.PublicKey)
bankFunds = big.NewInt(1_000_000_000_000_000_000)
userKey1, _ = crypto.GenerateKey()
userKey2, _ = crypto.GenerateKey()
userAddr1 = crypto.PubkeyToAddress(userKey1.PublicKey)
userAddr2 = crypto.PubkeyToAddress(userKey2.PublicKey)
testContractAddr common.Address
testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056")
testContractCodeDeployed = testContractCode[16:]
testContractDeployed = uint64(2)
testEventEmitterCode = common.Hex2Bytes("60606040523415600e57600080fd5b7f57050ab73f6b9ebdd9f76b8d4997793f48cf956e965ee070551b9ca0bb71584e60405160405180910390a160358060476000396000f3006060604052600080fd00a165627a7a723058203f727efcad8b5811f8cb1fc2620ce5e8c63570d697aef968172de296ea3994140029")
// Checkpoint oracle relative fields
signerKey, _ = crypto.GenerateKey()
signerAddr = crypto.PubkeyToAddress(signerKey.PublicKey)
)
var (
// The token bucket buffer limit for testing purpose.
testBufLimit = uint64(1000000)
// The buffer recharging speed for testing purpose.
testBufRecharge = uint64(1000)
)
/*
contract test {
uint256[100] data;
function Put(uint256 addr, uint256 value) {
data[addr] = value;
}
function Get(uint256 addr) constant returns (uint256 value) {
return data[addr];
}
}
*/
// prepare pre-commits specified number customized blocks into chain.
func prepare(n int, backend *backends.SimulatedBackend) {
var (
ctx = context.Background()
signer = types.HomesteadSigner{}
)
for i := 0; i < n; i++ {
switch i {
case 0:
// Builtin-block
// number: 1
// txs: 1
// bankUser transfers some ether to user1
nonce, _ := backend.PendingNonceAt(ctx, bankAddr)
tx, _ := types.SignTx(types.NewTransaction(nonce, userAddr1, big.NewInt(10_000_000_000_000_000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, bankKey)
backend.SendTransaction(ctx, tx)
case 1:
// Builtin-block
// number: 2
// txs: 4
bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)
userNonce1, _ := backend.PendingNonceAt(ctx, userAddr1)
// bankUser transfers more ether to user1
tx1, _ := types.SignTx(types.NewTransaction(bankNonce, userAddr1, big.NewInt(1_000_000_000_000_000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, bankKey)
backend.SendTransaction(ctx, tx1)
// user1 relays ether to user2
tx2, _ := types.SignTx(types.NewTransaction(userNonce1, userAddr2, big.NewInt(1_000_000_000_000_000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, userKey1)
backend.SendTransaction(ctx, tx2)
// user1 deploys a test contract
tx3, _ := types.SignTx(types.NewContractCreation(userNonce1+1, big.NewInt(0), 200000, big.NewInt(params.InitialBaseFee), testContractCode), signer, userKey1)
backend.SendTransaction(ctx, tx3)
testContractAddr = crypto.CreateAddress(userAddr1, userNonce1+1)
// user1 deploys a event contract
tx4, _ := types.SignTx(types.NewContractCreation(userNonce1+2, big.NewInt(0), 200000, big.NewInt(params.InitialBaseFee), testEventEmitterCode), signer, userKey1)
backend.SendTransaction(ctx, tx4)
case 2:
// Builtin-block
// number: 3
// txs: 2
// bankUser transfer some ether to signer
bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)
tx1, _ := types.SignTx(types.NewTransaction(bankNonce, signerAddr, big.NewInt(1000000000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, bankKey)
backend.SendTransaction(ctx, tx1)
// invoke test contract
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001")
tx2, _ := types.SignTx(types.NewTransaction(bankNonce+1, testContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data), signer, bankKey)
backend.SendTransaction(ctx, tx2)
case 3:
// Builtin-block
// number: 4
// txs: 1
// invoke test contract
bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002")
tx, _ := types.SignTx(types.NewTransaction(bankNonce, testContractAddr, big.NewInt(0), 100000, big.NewInt(params.InitialBaseFee), data), signer, bankKey)
backend.SendTransaction(ctx, tx)
}
backend.Commit()
}
}
// testIndexers creates a set of indexers with specified params for testing purpose.
func testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.IndexerConfig, disablePruning bool) []*core.ChainIndexer {
var indexers [3]*core.ChainIndexer
indexers[0] = light.NewChtIndexer(db, odr, config.ChtSize, config.ChtConfirms, disablePruning)
indexers[1] = core.NewBloomIndexer(db, config.BloomSize, config.BloomConfirms)
indexers[2] = light.NewBloomTrieIndexer(db, odr, config.BloomSize, config.BloomTrieSize, disablePruning)
// make bloomTrieIndexer as a child indexer of bloom indexer.
indexers[1].AddChildIndexer(indexers[2])
return indexers[:]
}
func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *serverPeerSet) (*clientHandler, func()) {
var (
evmux = new(event.TypeMux)
engine = ethash.NewFaker()
gspec = core.Genesis{
Config: params.AllEthashProtocolChanges,
Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}},
GasLimit: 100000000,
BaseFee: big.NewInt(params.InitialBaseFee),
}
)
genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
chain, _ := light.NewLightChain(odr, gspec.Config, engine)
client := &LightEthereum{
lesCommons: lesCommons{
genesis: genesis.Hash(),
config: &ethconfig.Config{LightPeers: 100, NetworkId: NetworkId},
chainConfig: params.AllEthashProtocolChanges,
iConfig: light.TestClientIndexerConfig,
chainDb: db,
chainReader: chain,
closeCh: make(chan struct{}),
},
peers: peers,
reqDist: odr.retriever.dist,
retriever: odr.retriever,
odr: odr,
engine: engine,
blockchain: chain,
eventMux: evmux,
merger: consensus.NewMerger(rawdb.NewMemoryDatabase()),
}
client.handler = newClientHandler(client)
return client.handler, func() {
client.handler.stop()
}
}
func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Database, clock mclock.Clock) (*serverHandler, *backends.SimulatedBackend, func()) {
var (
gspec = core.Genesis{
Config: params.AllEthashProtocolChanges,
Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}},
GasLimit: 100000000,
BaseFee: big.NewInt(params.InitialBaseFee),
}
)
genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
// create a simulation backend and pre-commit several customized block to the database.
simulation := backends.NewSimulatedBackendWithDatabase(db, gspec.Alloc, 100000000)
prepare(blocks, simulation)
txpoolConfig := legacypool.DefaultConfig
txpoolConfig.Journal = ""
pool := legacypool.New(txpoolConfig, simulation.Blockchain())
txpool, _ := txpool.New(new(big.Int).SetUint64(txpoolConfig.PriceLimit), simulation.Blockchain(), []txpool.SubPool{pool})
server := &LesServer{
lesCommons: lesCommons{
genesis: genesis.Hash(),
config: &ethconfig.Config{LightPeers: 100, NetworkId: NetworkId},
chainConfig: params.AllEthashProtocolChanges,
iConfig: light.TestServerIndexerConfig,
chainDb: db,
chainReader: simulation.Blockchain(),
closeCh: make(chan struct{}),
},
peers: newClientPeerSet(),
servingQueue: newServingQueue(int64(time.Millisecond*10), 1),
defParams: flowcontrol.ServerParams{
BufLimit: testBufLimit,
MinRecharge: testBufRecharge,
},
fcManager: flowcontrol.NewClientManager(nil, clock),
}
server.costTracker, server.minCapacity = newCostTracker(db, server.config)
server.costTracker.testCostList = testCostList(0) // Disable flow control mechanism.
server.clientPool = vfs.NewClientPool(db, testBufRecharge, defaultConnectedBias, clock, alwaysTrueFn)
server.clientPool.Start()
server.clientPool.SetLimits(10000, 10000) // Assign enough capacity for clientpool
server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true })
server.servingQueue.setThreads(4)
server.handler.start()
closer := func() { server.Stop() }
return server.handler, simulation, closer
}
func alwaysTrueFn() bool {
return true
}
// testPeer is a simulated peer to allow testing direct network calls.
type testPeer struct {
cpeer *clientPeer
speer *serverPeer
net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging
app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side
}
// handshakeWithServer executes the handshake with the remote server peer.
func (p *testPeer) handshakeWithServer(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID) {
// It only works for the simulated client peer
if p.cpeer == nil {
t.Fatal("handshake for client peer only")
}
var sendList keyValueList
sendList = sendList.add("protocolVersion", uint64(p.cpeer.version))
sendList = sendList.add("networkId", uint64(NetworkId))
sendList = sendList.add("headTd", td)
sendList = sendList.add("headHash", head)
sendList = sendList.add("headNum", headNum)
sendList = sendList.add("genesisHash", genesis)
if p.cpeer.version >= lpv4 {
sendList = sendList.add("forkID", &forkID)
}
if err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil {
t.Fatalf("status recv: %v", err)
}
if err := p2p.Send(p.app, StatusMsg, &sendList); err != nil {
t.Fatalf("status send: %v", err)
}
}
// handshakeWithClient executes the handshake with the remote client peer.
// (used by temporarily disabled tests)
/*func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, costList RequestCostList, recentTxLookup uint64) {
// It only works for the simulated client peer
if p.speer == nil {
t.Fatal("handshake for server peer only")
}
var sendList keyValueList
sendList = sendList.add("protocolVersion", uint64(p.speer.version))
sendList = sendList.add("networkId", uint64(NetworkId))
sendList = sendList.add("headTd", td)
sendList = sendList.add("headHash", head)
sendList = sendList.add("headNum", headNum)
sendList = sendList.add("genesisHash", genesis)
sendList = sendList.add("serveHeaders", nil)
sendList = sendList.add("serveChainSince", uint64(0))
sendList = sendList.add("serveStateSince", uint64(0))
sendList = sendList.add("serveRecentState", uint64(core.TriesInMemory-4))
sendList = sendList.add("txRelay", nil)
sendList = sendList.add("flowControl/BL", testBufLimit)
sendList = sendList.add("flowControl/MRR", testBufRecharge)
sendList = sendList.add("flowControl/MRC", costList)
if p.speer.version >= lpv4 {
sendList = sendList.add("forkID", &forkID)
sendList = sendList.add("recentTxLookup", recentTxLookup)
}
if err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil {
t.Fatalf("status recv: %v", err)
}
if err := p2p.Send(p.app, StatusMsg, &sendList); err != nil {
t.Fatalf("status send: %v", err)
}
}*/
// close terminates the local side of the peer, notifying the remote protocol
// manager of termination.
func (p *testPeer) close() {
p.app.Close()
}
func newTestPeerPair(name string, version int, server *serverHandler, client *clientHandler, noInitAnnounce bool) (*testPeer, *testPeer, error) {
// Create a message pipe to communicate through
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id enode.ID
rand.Read(id[:])
peer1 := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
peer2 := newServerPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), app)
// Start the peer on a new thread
errc1 := make(chan error, 1)
errc2 := make(chan error, 1)
go func() {
select {
case <-server.closeCh:
errc1 <- p2p.DiscQuitting
case errc1 <- server.handle(peer1):
}
}()
go func() {
select {
case <-client.closeCh:
errc2 <- p2p.DiscQuitting
case errc2 <- client.handle(peer2, noInitAnnounce):
}
}()
// Ensure the connection is established or exits when any error occurs
for {
select {
case err := <-errc1:
return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err)
case err := <-errc2:
return nil, nil, fmt.Errorf("failed to establish protocol connection %v", err)
default:
}
if peer1.serving.Load() && peer2.serving.Load() {
break
}
time.Sleep(50 * time.Millisecond)
}
return &testPeer{cpeer: peer1, net: net, app: app}, &testPeer{speer: peer2, net: app, app: net}, nil
}
type indexerCallback func(*core.ChainIndexer, *core.ChainIndexer, *core.ChainIndexer)
// testClient represents a client object for testing with necessary auxiliary fields.
type testClient struct {
clock mclock.Clock
db ethdb.Database
peer *testPeer
handler *clientHandler
chtIndexer *core.ChainIndexer
bloomIndexer *core.ChainIndexer
bloomTrieIndexer *core.ChainIndexer
}
// newRawPeer creates a new server peer connects to the server and do the handshake.
// (used by temporarily disabled tests)
/*func (client *testClient) newRawPeer(t *testing.T, name string, version int, recentTxLookup uint64) (*testPeer, func(), <-chan error) {
// Create a message pipe to communicate through
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id enode.ID
rand.Read(id[:])
peer := newServerPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), net)
// Start the peer on a new thread
errCh := make(chan error, 1)
go func() {
select {
case <-client.handler.closeCh:
errCh <- p2p.DiscQuitting
case errCh <- client.handler.handle(peer, false):
}
}()
tp := &testPeer{
app: app,
net: net,
speer: peer,
}
var (
genesis = client.handler.backend.blockchain.Genesis()
head = client.handler.backend.blockchain.CurrentHeader()
td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64())
)
forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64(), head.Time)
tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default
// Ensure the connection is established or exits when any error occurs
for {
select {
case <-errCh:
return nil, nil, nil
default:
}
if peer.serving.Load() {
break
}
time.Sleep(50 * time.Millisecond)
}
closePeer := func() {
tp.speer.close()
tp.close()
}
return tp, closePeer, errCh
}*/
// testServer represents a server object for testing with necessary auxiliary fields.
type testServer struct {
clock mclock.Clock
backend *backends.SimulatedBackend
db ethdb.Database
peer *testPeer
handler *serverHandler
chtIndexer *core.ChainIndexer
bloomIndexer *core.ChainIndexer
bloomTrieIndexer *core.ChainIndexer
}
// newRawPeer creates a new client peer connects to the server and do the handshake.
func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*testPeer, func(), <-chan error) {
// Create a message pipe to communicate through
app, net := p2p.MsgPipe()
// Generate a random id and create the peer
var id enode.ID
rand.Read(id[:])
peer := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)
// Start the peer on a new thread
errCh := make(chan error, 1)
go func() {
select {
case <-server.handler.closeCh:
errCh <- p2p.DiscQuitting
case errCh <- server.handler.handle(peer):
}
}()
tp := &testPeer{
app: app,
net: net,
cpeer: peer,
}
var (
genesis = server.handler.blockchain.Genesis()
head = server.handler.blockchain.CurrentHeader()
td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64())
)
forkID := forkid.NewID(server.handler.blockchain.Config(), genesis, head.Number.Uint64(), head.Time)
tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID)
// Ensure the connection is established or exits when any error occurs
for {
select {
case <-errCh:
return nil, nil, nil
default:
}
if peer.serving.Load() {
break
}
time.Sleep(50 * time.Millisecond)
}
closePeer := func() {
tp.cpeer.close()
tp.close()
}
return tp, closePeer, errCh
}
// testnetConfig wraps all the configurations for testing network.
type testnetConfig struct {
blocks int
protocol int
indexFn indexerCallback
simClock bool
connect bool
nopruning bool
}
func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testClient, func()) {
var (
sdb = rawdb.NewMemoryDatabase()
cdb = rawdb.NewMemoryDatabase()
speers = newServerPeerSet()
)
var clock mclock.Clock = &mclock.System{}
if config.simClock {
clock = &mclock.Simulated{}
}
dist := newRequestDistributor(speers, clock)
rm := newRetrieveManager(speers, dist, func() time.Duration { return time.Millisecond * 500 })
odr := NewLesOdr(cdb, light.TestClientIndexerConfig, speers, rm)
sindexers := testIndexers(sdb, nil, light.TestServerIndexerConfig, true)
cIndexers := testIndexers(cdb, odr, light.TestClientIndexerConfig, config.nopruning)
scIndexer, sbIndexer, sbtIndexer := sindexers[0], sindexers[1], sindexers[2]
ccIndexer, cbIndexer, cbtIndexer := cIndexers[0], cIndexers[1], cIndexers[2]
odr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer)
server, b, serverClose := newTestServerHandler(config.blocks, sindexers, sdb, clock)
client, clientClose := newTestClientHandler(b, odr, cIndexers, cdb, speers)
scIndexer.Start(server.blockchain)
sbIndexer.Start(server.blockchain)
ccIndexer.Start(client.backend.blockchain)
cbIndexer.Start(client.backend.blockchain)
if config.indexFn != nil {
config.indexFn(scIndexer, sbIndexer, sbtIndexer)
}
var (
err error
speer, cpeer *testPeer
)
if config.connect {
done := make(chan struct{})
cpeer, speer, err = newTestPeerPair("peer", config.protocol, server, client, false)
if err != nil {
t.Fatalf("Failed to connect testing peers %v", err)
}
select {
case <-done:
case <-time.After(10 * time.Second):
t.Fatal("test peer did not connect and sync within 3s")
}
}
s := &testServer{
clock: clock,
backend: b,
db: sdb,
peer: cpeer,
handler: server,
chtIndexer: scIndexer,
bloomIndexer: sbIndexer,
bloomTrieIndexer: sbtIndexer,
}
c := &testClient{
clock: clock,
db: cdb,
peer: speer,
handler: client,
chtIndexer: ccIndexer,
bloomIndexer: cbIndexer,
bloomTrieIndexer: cbtIndexer,
}
teardown := func() {
if config.connect {
speer.close()
cpeer.close()
cpeer.cpeer.close()
speer.speer.close()
}
ccIndexer.Close()
cbIndexer.Close()
scIndexer.Close()
sbIndexer.Close()
dist.close()
serverClose()
b.Close()
clientClose()
}
return s, c, teardown
}
// NewFuzzerPeer creates a client peer for test purposes, and also returns
// a function to close the peer: this is needed to avoid goroutine leaks in the
// exec queue.
func NewFuzzerPeer(version int) (p *clientPeer, closer func()) {
p = newClientPeer(version, 0, p2p.NewPeer(enode.ID{}, "", nil), nil)
return p, func() { p.peerCommons.close() }
}

@ -1,179 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"context"
"math/rand"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
type lesTxRelay struct {
txSent map[common.Hash]*types.Transaction
txPending map[common.Hash]struct{}
peerList []*serverPeer
peerStartPos int
lock sync.Mutex
stop chan struct{}
retriever *retrieveManager
}
func newLesTxRelay(ps *serverPeerSet, retriever *retrieveManager) *lesTxRelay {
r := &lesTxRelay{
txSent: make(map[common.Hash]*types.Transaction),
txPending: make(map[common.Hash]struct{}),
retriever: retriever,
stop: make(chan struct{}),
}
ps.subscribe(r)
return r
}
func (ltrx *lesTxRelay) Stop() {
close(ltrx.stop)
}
func (ltrx *lesTxRelay) registerPeer(p *serverPeer) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
// Short circuit if the peer is announce only.
if p.onlyAnnounce {
return
}
ltrx.peerList = append(ltrx.peerList, p)
}
func (ltrx *lesTxRelay) unregisterPeer(p *serverPeer) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
for i, peer := range ltrx.peerList {
if peer == p {
// Remove from the peer list
ltrx.peerList = append(ltrx.peerList[:i], ltrx.peerList[i+1:]...)
return
}
}
}
// send sends a list of transactions to at most a given number of peers.
func (ltrx *lesTxRelay) send(txs types.Transactions, count int) {
sendTo := make(map[*serverPeer]types.Transactions)
ltrx.peerStartPos++ // rotate the starting position of the peer list
if ltrx.peerStartPos >= len(ltrx.peerList) {
ltrx.peerStartPos = 0
}
for _, tx := range txs {
hash := tx.Hash()
_, ok := ltrx.txSent[hash]
if !ok {
ltrx.txSent[hash] = tx
ltrx.txPending[hash] = struct{}{}
}
if len(ltrx.peerList) > 0 {
cnt := count
pos := ltrx.peerStartPos
for {
peer := ltrx.peerList[pos]
sendTo[peer] = append(sendTo[peer], tx)
cnt--
if cnt == 0 {
break // sent it to the desired number of peers
}
pos++
if pos == len(ltrx.peerList) {
pos = 0
}
if pos == ltrx.peerStartPos {
break // tried all available peers
}
}
}
}
for p, list := range sendTo {
pp := p
ll := list
enc, _ := rlp.EncodeToBytes(ll)
reqID := rand.Uint64()
rq := &distReq{
getCost: func(dp distPeer) uint64 {
peer := dp.(*serverPeer)
return peer.getTxRelayCost(len(ll), len(enc))
},
canSend: func(dp distPeer) bool {
return !dp.(*serverPeer).onlyAnnounce && dp.(*serverPeer) == pp
},
request: func(dp distPeer) func() {
peer := dp.(*serverPeer)
cost := peer.getTxRelayCost(len(ll), len(enc))
peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.sendTxs(reqID, len(ll), enc) }
},
}
go ltrx.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, ltrx.stop)
}
}
func (ltrx *lesTxRelay) Send(txs types.Transactions) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
ltrx.send(txs, 3)
}
func (ltrx *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
for _, hash := range mined {
delete(ltrx.txPending, hash)
}
for _, hash := range rollback {
ltrx.txPending[hash] = struct{}{}
}
if len(ltrx.txPending) > 0 {
txs := make(types.Transactions, len(ltrx.txPending))
i := 0
for hash := range ltrx.txPending {
txs[i] = ltrx.txSent[hash]
i++
}
ltrx.send(txs, 1)
}
}
func (ltrx *lesTxRelay) Discard(hashes []common.Hash) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
for _, hash := range hashes {
delete(ltrx.txSent, hash)
delete(ltrx.txPending, hash)
}
}

@ -1,105 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import "sync"
// ExecQueue implements a queue that executes function calls in a single thread,
// in the same order as they have been queued.
type ExecQueue struct {
mu sync.Mutex
cond *sync.Cond
funcs []func()
closeWait chan struct{}
}
// NewExecQueue creates a new execution Queue.
func NewExecQueue(capacity int) *ExecQueue {
q := &ExecQueue{funcs: make([]func(), 0, capacity)}
q.cond = sync.NewCond(&q.mu)
go q.loop()
return q
}
func (q *ExecQueue) loop() {
for f := q.waitNext(false); f != nil; f = q.waitNext(true) {
f()
}
close(q.closeWait)
}
func (q *ExecQueue) waitNext(drop bool) (f func()) {
q.mu.Lock()
if drop && len(q.funcs) > 0 {
// Remove the function that just executed. We do this here instead of when
// dequeuing so len(q.funcs) includes the function that is running.
q.funcs = append(q.funcs[:0], q.funcs[1:]...)
}
for !q.isClosed() {
if len(q.funcs) > 0 {
f = q.funcs[0]
break
}
q.cond.Wait()
}
q.mu.Unlock()
return f
}
func (q *ExecQueue) isClosed() bool {
return q.closeWait != nil
}
// CanQueue returns true if more function calls can be added to the execution Queue.
func (q *ExecQueue) CanQueue() bool {
q.mu.Lock()
ok := !q.isClosed() && len(q.funcs) < cap(q.funcs)
q.mu.Unlock()
return ok
}
// Queue adds a function call to the execution Queue. Returns true if successful.
func (q *ExecQueue) Queue(f func()) bool {
q.mu.Lock()
ok := !q.isClosed() && len(q.funcs) < cap(q.funcs)
if ok {
q.funcs = append(q.funcs, f)
q.cond.Signal()
}
q.mu.Unlock()
return ok
}
// Clear drops all queued functions.
func (q *ExecQueue) Clear() {
q.mu.Lock()
q.funcs = q.funcs[:0]
q.mu.Unlock()
}
// Quit stops the exec Queue.
//
// Quit waits for the current execution to finish before returning.
func (q *ExecQueue) Quit() {
q.mu.Lock()
if !q.isClosed() {
q.closeWait = make(chan struct{})
q.cond.Signal()
}
q.mu.Unlock()
<-q.closeWait
}

@ -1,60 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import "testing"
func TestExecQueue(t *testing.T) {
var (
N = 10000
q = NewExecQueue(N)
counter int
execd = make(chan int)
testexit = make(chan struct{})
)
defer q.Quit()
defer close(testexit)
check := func(state string, wantOK bool) {
c := counter
counter++
qf := func() {
select {
case execd <- c:
case <-testexit:
}
}
if q.CanQueue() != wantOK {
t.Fatalf("CanQueue() == %t for %s", !wantOK, state)
}
if q.Queue(qf) != wantOK {
t.Fatalf("Queue() == %t for %s", !wantOK, state)
}
}
for i := 0; i < N; i++ {
check("queue below cap", true)
}
check("full queue", false)
for i := 0; i < N; i++ {
if c := <-execd; c != i {
t.Fatal("execution out of order")
}
}
q.Quit()
check("closed queue", false)
}

@ -1,270 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"math"
"sync"
"github.com/ethereum/go-ethereum/common/mclock"
)
// ExpiredValue is a scalar value that is continuously expired (decreased
// exponentially) based on the provided logarithmic expiration offset value.
//
// The formula for value calculation is: base*2^(exp-logOffset). In order to
// simplify the calculation of ExpiredValue, its value is expressed in the form
// of an exponent with a base of 2.
//
// Also here is a trick to reduce a lot of calculations. In theory, when a value X
// decays over time and then a new value Y is added, the final result should be
// X*2^(exp-logOffset)+Y. However it's very hard to represent in memory.
// So the trick is using the idea of inflation instead of exponential decay. At this
// moment the temporary value becomes: X*2^exp+Y*2^logOffset_1, apply the exponential
// decay when we actually want to calculate the value.
//
// e.g.
// t0: V = 100
// t1: add 30, inflationary value is: 100 + 30/0.3, 0.3 is the decay coefficient
// t2: get value, decay coefficient is 0.2 now, final result is: 200*0.2 = 40
type ExpiredValue struct {
Base, Exp uint64 // rlp encoding works by default
}
// ExpirationFactor is calculated from logOffset. 1 <= Factor < 2 and Factor*2^Exp
// describes the multiplier applicable for additions and the divider for readouts.
// If logOffset changes slowly then it saves some expensive operations to not calculate
// them for each addition and readout but cache this intermediate form for some time.
// It is also useful for structures where multiple values are expired with the same
// Expirer.
type ExpirationFactor struct {
Exp uint64
Factor float64
}
// ExpFactor calculates ExpirationFactor based on logOffset
func ExpFactor(logOffset Fixed64) ExpirationFactor {
return ExpirationFactor{Exp: logOffset.ToUint64(), Factor: logOffset.Fraction().Pow2()}
}
// Value calculates the expired value based on a floating point base and integer
// power-of-2 exponent. This function should be used by multi-value expired structures.
func (e ExpirationFactor) Value(base float64, exp uint64) float64 {
return base / e.Factor * math.Pow(2, float64(int64(exp-e.Exp)))
}
// Value calculates the value at the given moment.
func (e ExpiredValue) Value(logOffset Fixed64) uint64 {
offset := Uint64ToFixed64(e.Exp) - logOffset
return uint64(float64(e.Base) * offset.Pow2())
}
// Add adds a signed value at the given moment
func (e *ExpiredValue) Add(amount int64, logOffset Fixed64) int64 {
integer, frac := logOffset.ToUint64(), logOffset.Fraction()
factor := frac.Pow2()
base := factor * float64(amount)
if integer < e.Exp {
base /= math.Pow(2, float64(e.Exp-integer))
}
if integer > e.Exp {
e.Base >>= (integer - e.Exp)
e.Exp = integer
}
if base >= 0 || uint64(-base) <= e.Base {
// The conversion from negative float64 to
// uint64 is undefined in golang, and doesn't
// work with ARMv8. More details at:
// https://github.com/golang/go/issues/43047
if base >= 0 {
e.Base += uint64(base)
} else {
e.Base -= uint64(-base)
}
return amount
}
net := int64(-float64(e.Base) / factor)
e.Base = 0
return net
}
// AddExp adds another ExpiredValue
func (e *ExpiredValue) AddExp(a ExpiredValue) {
if e.Exp > a.Exp {
a.Base >>= (e.Exp - a.Exp)
}
if e.Exp < a.Exp {
e.Base >>= (a.Exp - e.Exp)
e.Exp = a.Exp
}
e.Base += a.Base
}
// SubExp subtracts another ExpiredValue
func (e *ExpiredValue) SubExp(a ExpiredValue) {
if e.Exp > a.Exp {
a.Base >>= (e.Exp - a.Exp)
}
if e.Exp < a.Exp {
e.Base >>= (a.Exp - e.Exp)
e.Exp = a.Exp
}
if e.Base > a.Base {
e.Base -= a.Base
} else {
e.Base = 0
}
}
// IsZero returns true if the value is zero
func (e *ExpiredValue) IsZero() bool {
return e.Base == 0
}
// LinearExpiredValue is very similar with the expiredValue which the value
// will continuously expired. But the different part is it's expired linearly.
type LinearExpiredValue struct {
Offset uint64 // The latest time offset
Val uint64 // The remaining value, can never be negative
Rate mclock.AbsTime `rlp:"-"` // Expiration rate(by nanosecond), will ignored by RLP
}
// Value calculates the value at the given moment. This function always has the
// assumption that the given timestamp shouldn't less than the recorded one.
func (e LinearExpiredValue) Value(now mclock.AbsTime) uint64 {
offset := uint64(now / e.Rate)
if e.Offset < offset {
diff := offset - e.Offset
if e.Val >= diff {
e.Val -= diff
} else {
e.Val = 0
}
}
return e.Val
}
// Add adds a signed value at the given moment. This function always has the
// assumption that the given timestamp shouldn't less than the recorded one.
func (e *LinearExpiredValue) Add(amount int64, now mclock.AbsTime) uint64 {
offset := uint64(now / e.Rate)
if e.Offset < offset {
diff := offset - e.Offset
if e.Val >= diff {
e.Val -= diff
} else {
e.Val = 0
}
e.Offset = offset
}
if amount < 0 && uint64(-amount) > e.Val {
e.Val = 0
} else {
e.Val = uint64(int64(e.Val) + amount)
}
return e.Val
}
// ValueExpirer controls value expiration rate
type ValueExpirer interface {
SetRate(now mclock.AbsTime, rate float64)
SetLogOffset(now mclock.AbsTime, logOffset Fixed64)
LogOffset(now mclock.AbsTime) Fixed64
}
// Expirer changes logOffset with a linear rate which can be changed during operation.
// It is not thread safe, if access by multiple goroutines is needed then it should be
// encapsulated into a locked structure.
// Note that if neither SetRate nor SetLogOffset are used during operation then LogOffset
// is thread safe.
type Expirer struct {
lock sync.RWMutex
logOffset Fixed64
rate float64
lastUpdate mclock.AbsTime
}
// SetRate changes the expiration rate which is the inverse of the time constant in
// nanoseconds.
func (e *Expirer) SetRate(now mclock.AbsTime, rate float64) {
e.lock.Lock()
defer e.lock.Unlock()
dt := now - e.lastUpdate
if dt > 0 {
e.logOffset += Fixed64(logToFixedFactor * float64(dt) * e.rate)
}
e.lastUpdate = now
e.rate = rate
}
// SetLogOffset sets logOffset instantly.
func (e *Expirer) SetLogOffset(now mclock.AbsTime, logOffset Fixed64) {
e.lock.Lock()
defer e.lock.Unlock()
e.lastUpdate = now
e.logOffset = logOffset
}
// LogOffset returns the current logarithmic offset.
func (e *Expirer) LogOffset(now mclock.AbsTime) Fixed64 {
e.lock.RLock()
defer e.lock.RUnlock()
dt := now - e.lastUpdate
if dt <= 0 {
return e.logOffset
}
return e.logOffset + Fixed64(logToFixedFactor*float64(dt)*e.rate)
}
// fixedFactor is the fixed point multiplier factor used by Fixed64.
const fixedFactor = 0x1000000
// Fixed64 implements 64-bit fixed point arithmetic functions.
type Fixed64 int64
// Uint64ToFixed64 converts uint64 integer to Fixed64 format.
func Uint64ToFixed64(f uint64) Fixed64 {
return Fixed64(f * fixedFactor)
}
// Float64ToFixed64 converts float64 to Fixed64 format.
func Float64ToFixed64(f float64) Fixed64 {
return Fixed64(f * fixedFactor)
}
// ToUint64 converts Fixed64 format to uint64.
func (f64 Fixed64) ToUint64() uint64 {
return uint64(f64) / fixedFactor
}
// Fraction returns the fractional part of a Fixed64 value.
func (f64 Fixed64) Fraction() Fixed64 {
return f64 % fixedFactor
}
var (
logToFixedFactor = float64(fixedFactor) / math.Log(2)
fixedToLogFactor = math.Log(2) / float64(fixedFactor)
)
// Pow2 returns the base 2 power of the fixed point value.
func (f64 Fixed64) Pow2() float64 {
return math.Exp(float64(f64) * fixedToLogFactor)
}

@ -1,195 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"testing"
"github.com/ethereum/go-ethereum/common/mclock"
)
func TestValueExpiration(t *testing.T) {
var cases = []struct {
input ExpiredValue
timeOffset Fixed64
expect uint64
}{
{ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 128},
{ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 64},
{ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(2), 32},
{ExpiredValue{Base: 128, Exp: 2}, Uint64ToFixed64(2), 128},
{ExpiredValue{Base: 128, Exp: 2}, Uint64ToFixed64(3), 64},
}
for _, c := range cases {
if got := c.input.Value(c.timeOffset); got != c.expect {
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
}
}
}
func TestValueAddition(t *testing.T) {
var cases = []struct {
input ExpiredValue
addend int64
timeOffset Fixed64
expect uint64
expectNet int64
}{
// Addition
{ExpiredValue{Base: 128, Exp: 0}, 128, Uint64ToFixed64(0), 256, 128},
{ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(0), 640, 128},
// Addition with offset
{ExpiredValue{Base: 128, Exp: 0}, 128, Uint64ToFixed64(1), 192, 128},
{ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(1), 384, 128},
{ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(3), 192, 128},
// Subtraction
{ExpiredValue{Base: 128, Exp: 0}, -64, Uint64ToFixed64(0), 64, -64},
{ExpiredValue{Base: 128, Exp: 0}, -128, Uint64ToFixed64(0), 0, -128},
{ExpiredValue{Base: 128, Exp: 0}, -192, Uint64ToFixed64(0), 0, -128},
// Subtraction with offset
{ExpiredValue{Base: 128, Exp: 0}, -64, Uint64ToFixed64(1), 0, -64},
{ExpiredValue{Base: 128, Exp: 0}, -128, Uint64ToFixed64(1), 0, -64},
{ExpiredValue{Base: 128, Exp: 2}, -128, Uint64ToFixed64(1), 128, -128},
{ExpiredValue{Base: 128, Exp: 2}, -128, Uint64ToFixed64(2), 0, -128},
}
for _, c := range cases {
if net := c.input.Add(c.addend, c.timeOffset); net != c.expectNet {
t.Fatalf("Net amount mismatch, want=%d, got=%d", c.expectNet, net)
}
if got := c.input.Value(c.timeOffset); got != c.expect {
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
}
}
}
func TestExpiredValueAddition(t *testing.T) {
var cases = []struct {
input ExpiredValue
another ExpiredValue
timeOffset Fixed64
expect uint64
}{
{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 256},
{ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 384},
{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 1}, Uint64ToFixed64(0), 384},
{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 128},
}
for _, c := range cases {
c.input.AddExp(c.another)
if got := c.input.Value(c.timeOffset); got != c.expect {
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
}
}
}
func TestExpiredValueSubtraction(t *testing.T) {
var cases = []struct {
input ExpiredValue
another ExpiredValue
timeOffset Fixed64
expect uint64
}{
{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 0},
{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 1}, Uint64ToFixed64(0), 0},
{ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 128},
{ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 64},
}
for _, c := range cases {
c.input.SubExp(c.another)
if got := c.input.Value(c.timeOffset); got != c.expect {
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, got)
}
}
}
func TestLinearExpiredValue(t *testing.T) {
var cases = []struct {
value LinearExpiredValue
now mclock.AbsTime
expect uint64
}{
{LinearExpiredValue{
Offset: 0,
Val: 0,
Rate: mclock.AbsTime(1),
}, 0, 0},
{LinearExpiredValue{
Offset: 1,
Val: 1,
Rate: mclock.AbsTime(1),
}, 0, 1},
{LinearExpiredValue{
Offset: 1,
Val: 1,
Rate: mclock.AbsTime(1),
}, mclock.AbsTime(2), 0},
{LinearExpiredValue{
Offset: 1,
Val: 1,
Rate: mclock.AbsTime(1),
}, mclock.AbsTime(3), 0},
}
for _, c := range cases {
if value := c.value.Value(c.now); value != c.expect {
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value)
}
}
}
func TestLinearExpiredAddition(t *testing.T) {
var cases = []struct {
value LinearExpiredValue
amount int64
now mclock.AbsTime
expect uint64
}{
{LinearExpiredValue{
Offset: 0,
Val: 0,
Rate: mclock.AbsTime(1),
}, -1, 0, 0},
{LinearExpiredValue{
Offset: 1,
Val: 1,
Rate: mclock.AbsTime(1),
}, -1, 0, 0},
{LinearExpiredValue{
Offset: 1,
Val: 2,
Rate: mclock.AbsTime(1),
}, -1, mclock.AbsTime(2), 0},
{LinearExpiredValue{
Offset: 1,
Val: 2,
Rate: mclock.AbsTime(1),
}, -2, mclock.AbsTime(2), 0},
}
for _, c := range cases {
if value := c.value.Add(c.amount, c.now); value != c.expect {
t.Fatalf("Value mismatch, want=%d, got=%d", c.expect, value)
}
}
}

@ -1,398 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"sync"
"github.com/ethereum/go-ethereum/p2p/enode"
"golang.org/x/exp/slices"
)
const maxSelectionWeight = 1000000000 // maximum selection weight of each individual node/address group
// Limiter protects a network request serving mechanism from denial-of-service attacks.
// It limits the total amount of resources used for serving requests while ensuring that
// the most valuable connections always have a reasonable chance of being served.
type Limiter struct {
lock sync.Mutex
cond *sync.Cond
quit bool
nodes map[enode.ID]*nodeQueue
addresses map[string]*addressGroup
addressSelect, valueSelect *WeightedRandomSelect
maxValue float64
maxCost, sumCost, sumCostLimit uint
selectAddressNext bool
}
// nodeQueue represents queued requests coming from a single node ID
type nodeQueue struct {
queue []request // always nil if penaltyCost != 0
id enode.ID
address string
value float64
flatWeight, valueWeight uint64 // current selection weights in the address/value selectors
sumCost uint // summed cost of requests queued by the node
penaltyCost uint // cumulative cost of dropped requests since last processed request
groupIndex int
}
// addressGroup is a group of node IDs that have sent their last requests from the same
// network address
type addressGroup struct {
nodes []*nodeQueue
nodeSelect *WeightedRandomSelect
sumFlatWeight, groupWeight uint64
}
// request represents an incoming request scheduled for processing
type request struct {
process chan chan struct{}
cost uint
}
// flatWeight distributes weights equally between each active network address
func flatWeight(item interface{}) uint64 { return item.(*nodeQueue).flatWeight }
// add adds the node queue to the address group. It is the caller's responsibility to
// add the address group to the address map and the address selector if it wasn't
// there before.
func (ag *addressGroup) add(nq *nodeQueue) {
if nq.groupIndex != -1 {
panic("added node queue is already in an address group")
}
l := len(ag.nodes)
nq.groupIndex = l
ag.nodes = append(ag.nodes, nq)
ag.sumFlatWeight += nq.flatWeight
ag.groupWeight = ag.sumFlatWeight / uint64(l+1)
ag.nodeSelect.Update(ag.nodes[l])
}
// update updates the selection weight of the node queue inside the address group.
// It is the caller's responsibility to update the group's selection weight in the
// address selector.
func (ag *addressGroup) update(nq *nodeQueue, weight uint64) {
if nq.groupIndex == -1 || nq.groupIndex >= len(ag.nodes) || ag.nodes[nq.groupIndex] != nq {
panic("updated node queue is not in this address group")
}
ag.sumFlatWeight += weight - nq.flatWeight
nq.flatWeight = weight
ag.groupWeight = ag.sumFlatWeight / uint64(len(ag.nodes))
ag.nodeSelect.Update(nq)
}
// remove removes the node queue from the address group. It is the caller's responsibility
// to remove the address group from the address map if it is empty.
func (ag *addressGroup) remove(nq *nodeQueue) {
if nq.groupIndex == -1 || nq.groupIndex >= len(ag.nodes) || ag.nodes[nq.groupIndex] != nq {
panic("removed node queue is not in this address group")
}
l := len(ag.nodes) - 1
if nq.groupIndex != l {
ag.nodes[nq.groupIndex] = ag.nodes[l]
ag.nodes[nq.groupIndex].groupIndex = nq.groupIndex
}
nq.groupIndex = -1
ag.nodes = ag.nodes[:l]
ag.sumFlatWeight -= nq.flatWeight
if l >= 1 {
ag.groupWeight = ag.sumFlatWeight / uint64(l)
} else {
ag.groupWeight = 0
}
ag.nodeSelect.Remove(nq)
}
// choose selects one of the node queues belonging to the address group
func (ag *addressGroup) choose() *nodeQueue {
return ag.nodeSelect.Choose().(*nodeQueue)
}
// NewLimiter creates a new Limiter
func NewLimiter(sumCostLimit uint) *Limiter {
l := &Limiter{
addressSelect: NewWeightedRandomSelect(func(item interface{}) uint64 { return item.(*addressGroup).groupWeight }),
valueSelect: NewWeightedRandomSelect(func(item interface{}) uint64 { return item.(*nodeQueue).valueWeight }),
nodes: make(map[enode.ID]*nodeQueue),
addresses: make(map[string]*addressGroup),
sumCostLimit: sumCostLimit,
}
l.cond = sync.NewCond(&l.lock)
go l.processLoop()
return l
}
// selectionWeights calculates the selection weights of a node for both the address and
// the value selector. The selection weight depends on the next request cost or the
// summed cost of recently dropped requests.
func (l *Limiter) selectionWeights(reqCost uint, value float64) (flatWeight, valueWeight uint64) {
if value > l.maxValue {
l.maxValue = value
}
if value > 0 {
// normalize value to <= 1
value /= l.maxValue
}
if reqCost > l.maxCost {
l.maxCost = reqCost
}
relCost := float64(reqCost) / float64(l.maxCost)
var f float64
if relCost <= 0.001 {
f = 1
} else {
f = 0.001 / relCost
}
f *= maxSelectionWeight
flatWeight, valueWeight = uint64(f), uint64(f*value)
if flatWeight == 0 {
flatWeight = 1
}
return
}
// Add adds a new request to the node queue belonging to the given id. Value belongs
// to the requesting node. A higher value gives the request a higher chance of being
// served quickly in case of heavy load or a DDoS attack. Cost is a rough estimate
// of the serving cost of the request. A lower cost also gives the request a
// better chance.
func (l *Limiter) Add(id enode.ID, address string, value float64, reqCost uint) chan chan struct{} {
l.lock.Lock()
defer l.lock.Unlock()
process := make(chan chan struct{}, 1)
if l.quit {
close(process)
return process
}
if reqCost == 0 {
reqCost = 1
}
if nq, ok := l.nodes[id]; ok {
if nq.queue != nil {
nq.queue = append(nq.queue, request{process, reqCost})
nq.sumCost += reqCost
nq.value = value
if address != nq.address {
// known id sending request from a new address, move to different address group
l.removeFromGroup(nq)
l.addToGroup(nq, address)
}
} else {
// already waiting on a penalty, just add to the penalty cost and drop the request
nq.penaltyCost += reqCost
l.update(nq)
close(process)
return process
}
} else {
nq := &nodeQueue{
queue: []request{{process, reqCost}},
id: id,
value: value,
sumCost: reqCost,
groupIndex: -1,
}
nq.flatWeight, nq.valueWeight = l.selectionWeights(reqCost, value)
if len(l.nodes) == 0 {
l.cond.Signal()
}
l.nodes[id] = nq
if nq.valueWeight != 0 {
l.valueSelect.Update(nq)
}
l.addToGroup(nq, address)
}
l.sumCost += reqCost
if l.sumCost > l.sumCostLimit {
l.dropRequests()
}
return process
}
// update updates the selection weights of the node queue
func (l *Limiter) update(nq *nodeQueue) {
var cost uint
if nq.queue != nil {
cost = nq.queue[0].cost
} else {
cost = nq.penaltyCost
}
flatWeight, valueWeight := l.selectionWeights(cost, nq.value)
ag := l.addresses[nq.address]
ag.update(nq, flatWeight)
l.addressSelect.Update(ag)
nq.valueWeight = valueWeight
l.valueSelect.Update(nq)
}
// addToGroup adds the node queue to the given address group. The group is created if
// it does not exist yet.
func (l *Limiter) addToGroup(nq *nodeQueue, address string) {
nq.address = address
ag := l.addresses[address]
if ag == nil {
ag = &addressGroup{nodeSelect: NewWeightedRandomSelect(flatWeight)}
l.addresses[address] = ag
}
ag.add(nq)
l.addressSelect.Update(ag)
}
// removeFromGroup removes the node queue from its address group
func (l *Limiter) removeFromGroup(nq *nodeQueue) {
ag := l.addresses[nq.address]
ag.remove(nq)
if len(ag.nodes) == 0 {
delete(l.addresses, nq.address)
}
l.addressSelect.Update(ag)
}
// remove removes the node queue from its address group, the nodes map and the value
// selector
func (l *Limiter) remove(nq *nodeQueue) {
l.removeFromGroup(nq)
if nq.valueWeight != 0 {
l.valueSelect.Remove(nq)
}
delete(l.nodes, nq.id)
}
// choose selects the next node queue to process.
func (l *Limiter) choose() *nodeQueue {
if l.valueSelect.IsEmpty() || l.selectAddressNext {
if ag, ok := l.addressSelect.Choose().(*addressGroup); ok {
l.selectAddressNext = false
return ag.choose()
}
}
nq, _ := l.valueSelect.Choose().(*nodeQueue)
l.selectAddressNext = true
return nq
}
// processLoop processes requests sequentially
func (l *Limiter) processLoop() {
l.lock.Lock()
defer l.lock.Unlock()
for {
if l.quit {
for _, nq := range l.nodes {
for _, request := range nq.queue {
close(request.process)
}
}
return
}
nq := l.choose()
if nq == nil {
l.cond.Wait()
continue
}
if nq.queue != nil {
request := nq.queue[0]
nq.queue = nq.queue[1:]
nq.sumCost -= request.cost
l.sumCost -= request.cost
l.lock.Unlock()
ch := make(chan struct{})
request.process <- ch
<-ch
l.lock.Lock()
if len(nq.queue) > 0 {
l.update(nq)
} else {
l.remove(nq)
}
} else {
// penalized queue removed, next request will be added to a clean queue
l.remove(nq)
}
}
}
// Stop stops the processing loop. All queued and future requests are rejected.
func (l *Limiter) Stop() {
l.lock.Lock()
defer l.lock.Unlock()
l.quit = true
l.cond.Signal()
}
type dropListItem struct {
nq *nodeQueue
priority float64
}
// dropRequests selects the nodes with the highest queued request cost to selection
// weight ratio and drops their queued request. The empty node queues stay in the
// selectors with a low selection weight in order to penalize these nodes.
func (l *Limiter) dropRequests() {
var (
sumValue float64
list []dropListItem
)
for _, nq := range l.nodes {
sumValue += nq.value
}
for _, nq := range l.nodes {
if nq.sumCost == 0 {
continue
}
w := 1 / float64(len(l.addresses)*len(l.addresses[nq.address].nodes))
if sumValue > 0 {
w += nq.value / sumValue
}
list = append(list, dropListItem{
nq: nq,
priority: w / float64(nq.sumCost),
})
}
slices.SortFunc(list, func(a, b dropListItem) int {
if a.priority < b.priority {
return -1
}
if a.priority < b.priority {
return 1
}
return 0
})
for _, item := range list {
for _, request := range item.nq.queue {
close(request.process)
}
// make the queue penalized; no more requests are accepted until the node is
// selected based on the penalty cost which is the cumulative cost of all dropped
// requests. This ensures that sending excess requests is always penalized
// and incentivizes the sender to stop for a while if no replies are received.
item.nq.queue = nil
item.nq.penaltyCost = item.nq.sumCost
l.sumCost -= item.nq.sumCost // penalty costs are not counted in sumCost
item.nq.sumCost = 0
l.update(item.nq)
if l.sumCost <= l.sumCostLimit/2 {
return
}
}
}

@ -1,206 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"crypto/rand"
"testing"
"github.com/ethereum/go-ethereum/p2p/enode"
)
const (
ltTolerance = 0.03
ltRounds = 7
)
type (
ltNode struct {
addr, id int
value, exp float64
cost uint
reqRate float64
reqMax, runCount int
lastTotalCost uint
served, dropped int
}
ltResult struct {
node *ltNode
ch chan struct{}
}
limTest struct {
limiter *Limiter
results chan ltResult
runCount int
expCost, totalCost uint
}
)
func (lt *limTest) request(n *ltNode) {
var (
address string
id enode.ID
)
if n.addr >= 0 {
address = string([]byte{byte(n.addr)})
} else {
var b [32]byte
rand.Read(b[:])
address = string(b[:])
}
if n.id >= 0 {
id = enode.ID{byte(n.id)}
} else {
rand.Read(id[:])
}
lt.runCount++
n.runCount++
cch := lt.limiter.Add(id, address, n.value, n.cost)
go func() {
lt.results <- ltResult{n, <-cch}
}()
}
func (lt *limTest) moreRequests(n *ltNode) {
maxStart := int(float64(lt.totalCost-n.lastTotalCost) * n.reqRate)
if maxStart != 0 {
n.lastTotalCost = lt.totalCost
}
for n.reqMax > n.runCount && maxStart > 0 {
lt.request(n)
maxStart--
}
}
func (lt *limTest) process() {
res := <-lt.results
lt.runCount--
res.node.runCount--
if res.ch != nil {
res.node.served++
if res.node.exp != 0 {
lt.expCost += res.node.cost
}
lt.totalCost += res.node.cost
close(res.ch)
} else {
res.node.dropped++
}
}
func TestLimiter(t *testing.T) {
limTests := [][]*ltNode{
{ // one id from an individual address and two ids from a shared address
{addr: 0, id: 0, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.5},
{addr: 1, id: 1, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},
{addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},
},
{ // varying request costs
{addr: 0, id: 0, value: 0, cost: 10, reqRate: 0.2, reqMax: 1, exp: 0.5},
{addr: 1, id: 1, value: 0, cost: 3, reqRate: 0.5, reqMax: 1, exp: 0.25},
{addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},
},
{ // different request rate
{addr: 0, id: 0, value: 0, cost: 1, reqRate: 2, reqMax: 2, exp: 0.5},
{addr: 1, id: 1, value: 0, cost: 1, reqRate: 10, reqMax: 10, exp: 0.25},
{addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},
},
{ // adding value
{addr: 0, id: 0, value: 3, cost: 1, reqRate: 1, reqMax: 1, exp: (0.5 + 0.3) / 2},
{addr: 1, id: 1, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25 / 2},
{addr: 1, id: 2, value: 7, cost: 1, reqRate: 1, reqMax: 1, exp: (0.25 + 0.7) / 2},
},
{ // DoS attack from a single address with a single id
{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 3, id: 3, value: 0, cost: 1, reqRate: 10, reqMax: 1000000000, exp: 0},
},
{ // DoS attack from a single address with different ids
{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 3, id: -1, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0},
},
{ // DDoS attack from different addresses with a single id
{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: -1, id: 3, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0},
},
{ // DDoS attack from different addresses with different ids
{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},
{addr: -1, id: -1, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0},
},
}
lt := &limTest{
limiter: NewLimiter(100),
results: make(chan ltResult),
}
for _, test := range limTests {
lt.expCost, lt.totalCost = 0, 0
iterCount := 10000
for j := 0; j < ltRounds; j++ {
// try to reach expected target range in multiple rounds with increasing iteration counts
last := j == ltRounds-1
for _, n := range test {
lt.request(n)
}
for i := 0; i < iterCount; i++ {
lt.process()
for _, n := range test {
lt.moreRequests(n)
}
}
for lt.runCount > 0 {
lt.process()
}
if spamRatio := 1 - float64(lt.expCost)/float64(lt.totalCost); spamRatio > 0.5*(1+ltTolerance) {
t.Errorf("Spam ratio too high (%f)", spamRatio)
}
fail, success := false, true
for _, n := range test {
if n.exp != 0 {
if n.dropped > 0 {
t.Errorf("Dropped %d requests of non-spam node", n.dropped)
fail = true
}
r := float64(n.served) * float64(n.cost) / float64(lt.expCost)
if r < n.exp*(1-ltTolerance) || r > n.exp*(1+ltTolerance) {
if last {
// print error only if the target is still not reached in the last round
t.Errorf("Request ratio (%f) does not match expected value (%f)", r, n.exp)
}
success = false
}
}
}
if fail || success {
break
}
// neither failed nor succeeded; try more iterations to reach probability targets
iterCount *= 2
}
}
lt.limiter.Stop()
}

@ -1,69 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
type UpdateTimer struct {
clock mclock.Clock
lock sync.Mutex
last mclock.AbsTime
threshold time.Duration
}
func NewUpdateTimer(clock mclock.Clock, threshold time.Duration) *UpdateTimer {
// We don't accept the update threshold less than 0.
if threshold < 0 {
return nil
}
// Don't panic for lazy users
if clock == nil {
clock = mclock.System{}
}
return &UpdateTimer{
clock: clock,
last: clock.Now(),
threshold: threshold,
}
}
func (t *UpdateTimer) Update(callback func(diff time.Duration) bool) bool {
return t.UpdateAt(t.clock.Now(), callback)
}
func (t *UpdateTimer) UpdateAt(at mclock.AbsTime, callback func(diff time.Duration) bool) bool {
t.lock.Lock()
defer t.lock.Unlock()
diff := time.Duration(at - t.last)
if diff < 0 {
diff = 0
}
if diff < t.threshold {
return false
}
if callback(diff) {
t.last = at
return true
}
return false
}

@ -1,47 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
func TestUpdateTimer(t *testing.T) {
timer := NewUpdateTimer(mclock.System{}, -1)
if timer != nil {
t.Fatalf("Create update timer with negative threshold")
}
sim := &mclock.Simulated{}
timer = NewUpdateTimer(sim, time.Second)
if updated := timer.Update(func(diff time.Duration) bool { return true }); updated {
t.Fatalf("Update the clock without reaching the threshold")
}
sim.Run(time.Second)
if updated := timer.Update(func(diff time.Duration) bool { return true }); !updated {
t.Fatalf("Doesn't update the clock when reaching the threshold")
}
if updated := timer.UpdateAt(sim.Now().Add(time.Second), func(diff time.Duration) bool { return true }); !updated {
t.Fatalf("Doesn't update the clock when reaching the threshold")
}
timer = NewUpdateTimer(sim, 0)
if updated := timer.Update(func(diff time.Duration) bool { return true }); !updated {
t.Fatalf("Doesn't update the clock without threshold limitaion")
}
}

@ -1,183 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"math"
"math/rand"
"github.com/ethereum/go-ethereum/log"
)
type (
// WeightedRandomSelect is capable of weighted random selection from a set of items
WeightedRandomSelect struct {
root *wrsNode
idx map[WrsItem]int
wfn WeightFn
}
WrsItem interface{}
WeightFn func(interface{}) uint64
)
// NewWeightedRandomSelect returns a new WeightedRandomSelect structure
func NewWeightedRandomSelect(wfn WeightFn) *WeightedRandomSelect {
return &WeightedRandomSelect{root: &wrsNode{maxItems: wrsBranches}, idx: make(map[WrsItem]int), wfn: wfn}
}
// Update updates an item's weight, adds it if it was non-existent or removes it if
// the new weight is zero. Note that explicitly updating decreasing weights is not necessary.
func (w *WeightedRandomSelect) Update(item WrsItem) {
w.setWeight(item, w.wfn(item))
}
// Remove removes an item from the set
func (w *WeightedRandomSelect) Remove(item WrsItem) {
w.setWeight(item, 0)
}
// IsEmpty returns true if the set is empty
func (w *WeightedRandomSelect) IsEmpty() bool {
return w.root.sumCost == 0
}
// setWeight sets an item's weight to a specific value (removes it if zero)
func (w *WeightedRandomSelect) setWeight(item WrsItem, weight uint64) {
if weight > math.MaxInt64-w.root.sumCost {
// old weight is still included in sumCost, remove and check again
w.setWeight(item, 0)
if weight > math.MaxInt64-w.root.sumCost {
log.Error("WeightedRandomSelect overflow", "sumCost", w.root.sumCost, "new weight", weight)
weight = math.MaxInt64 - w.root.sumCost
}
}
idx, ok := w.idx[item]
if ok {
w.root.setWeight(idx, weight)
if weight == 0 {
delete(w.idx, item)
}
} else {
if weight != 0 {
if w.root.itemCnt == w.root.maxItems {
// add a new level
newRoot := &wrsNode{sumCost: w.root.sumCost, itemCnt: w.root.itemCnt, level: w.root.level + 1, maxItems: w.root.maxItems * wrsBranches}
newRoot.items[0] = w.root
newRoot.weights[0] = w.root.sumCost
w.root = newRoot
}
w.idx[item] = w.root.insert(item, weight)
}
}
}
// Choose randomly selects an item from the set, with a chance proportional to its
// current weight. If the weight of the chosen element has been decreased since the
// last stored value, returns it with a newWeight/oldWeight chance, otherwise just
// updates its weight and selects another one
func (w *WeightedRandomSelect) Choose() WrsItem {
for {
if w.root.sumCost == 0 {
return nil
}
val := uint64(rand.Int63n(int64(w.root.sumCost)))
choice, lastWeight := w.root.choose(val)
weight := w.wfn(choice)
if weight != lastWeight {
w.setWeight(choice, weight)
}
if weight >= lastWeight || uint64(rand.Int63n(int64(lastWeight))) < weight {
return choice
}
}
}
const wrsBranches = 8 // max number of branches in the wrsNode tree
// wrsNode is a node of a tree structure that can store WrsItems or further wrsNodes.
type wrsNode struct {
items [wrsBranches]interface{}
weights [wrsBranches]uint64
sumCost uint64
level, itemCnt, maxItems int
}
// insert recursively inserts a new item to the tree and returns the item index
func (n *wrsNode) insert(item WrsItem, weight uint64) int {
branch := 0
for n.items[branch] != nil && (n.level == 0 || n.items[branch].(*wrsNode).itemCnt == n.items[branch].(*wrsNode).maxItems) {
branch++
if branch == wrsBranches {
panic(nil)
}
}
n.itemCnt++
n.sumCost += weight
n.weights[branch] += weight
if n.level == 0 {
n.items[branch] = item
return branch
}
var subNode *wrsNode
if n.items[branch] == nil {
subNode = &wrsNode{maxItems: n.maxItems / wrsBranches, level: n.level - 1}
n.items[branch] = subNode
} else {
subNode = n.items[branch].(*wrsNode)
}
subIdx := subNode.insert(item, weight)
return subNode.maxItems*branch + subIdx
}
// setWeight updates the weight of a certain item (which should exist) and returns
// the change of the last weight value stored in the tree
func (n *wrsNode) setWeight(idx int, weight uint64) uint64 {
if n.level == 0 {
oldWeight := n.weights[idx]
n.weights[idx] = weight
diff := weight - oldWeight
n.sumCost += diff
if weight == 0 {
n.items[idx] = nil
n.itemCnt--
}
return diff
}
branchItems := n.maxItems / wrsBranches
branch := idx / branchItems
diff := n.items[branch].(*wrsNode).setWeight(idx-branch*branchItems, weight)
n.weights[branch] += diff
n.sumCost += diff
if weight == 0 {
n.itemCnt--
}
return diff
}
// choose recursively selects an item from the tree and returns it along with its weight
func (n *wrsNode) choose(val uint64) (WrsItem, uint64) {
for i, w := range n.weights {
if val < w {
if n.level == 0 {
return n.items[i].(WrsItem), n.weights[i]
}
return n.items[i].(*wrsNode).choose(val)
}
val -= w
}
panic(nil)
}

@ -1,68 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"math/rand"
"testing"
)
type testWrsItem struct {
idx int
widx *int
}
func testWeight(i interface{}) uint64 {
t := i.(*testWrsItem)
w := *t.widx
if w == -1 || w == t.idx {
return uint64(t.idx + 1)
}
return 0
}
func TestWeightedRandomSelect(t *testing.T) {
testFn := func(cnt int) {
s := NewWeightedRandomSelect(testWeight)
w := -1
list := make([]testWrsItem, cnt)
for i := range list {
list[i] = testWrsItem{idx: i, widx: &w}
s.Update(&list[i])
}
w = rand.Intn(cnt)
c := s.Choose()
if c == nil {
t.Errorf("expected item, got nil")
} else {
if c.(*testWrsItem).idx != w {
t.Errorf("expected another item")
}
}
w = -2
if s.Choose() != nil {
t.Errorf("expected nil, got item")
}
}
testFn(1)
testFn(10)
testFn(100)
testFn(1000)
testFn(10000)
testFn(100000)
testFn(1000000)
}

@ -1,107 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
)
// PrivateClientAPI implements the vflux client side API
type PrivateClientAPI struct {
vt *ValueTracker
}
// NewPrivateClientAPI creates a PrivateClientAPI
func NewPrivateClientAPI(vt *ValueTracker) *PrivateClientAPI {
return &PrivateClientAPI{vt}
}
// parseNodeStr converts either an enode address or a plain hex node id to enode.ID
func parseNodeStr(nodeStr string) (enode.ID, error) {
if id, err := enode.ParseID(nodeStr); err == nil {
return id, nil
}
if node, err := enode.Parse(enode.ValidSchemes, nodeStr); err == nil {
return node.ID(), nil
} else {
return enode.ID{}, err
}
}
// RequestStats returns the current contents of the reference request basket, with
// request values meaning average per request rather than total.
func (api *PrivateClientAPI) RequestStats() []RequestStatsItem {
return api.vt.RequestStats()
}
// Distribution returns a distribution as a series of (X, Y) chart coordinates,
// where the X axis is the response time in seconds while the Y axis is the amount of
// service value received with a response time close to the X coordinate.
// The distribution is optionally normalized to a sum of 1.
// If nodeStr == "" then the global distribution is returned, otherwise the individual
// distribution of the specified server node.
func (api *PrivateClientAPI) Distribution(nodeStr string, normalized bool) (RtDistribution, error) {
var expFactor utils.ExpirationFactor
if !normalized {
expFactor = utils.ExpFactor(api.vt.StatsExpirer().LogOffset(mclock.Now()))
}
if nodeStr == "" {
return api.vt.RtStats().Distribution(normalized, expFactor), nil
}
if id, err := parseNodeStr(nodeStr); err == nil {
return api.vt.GetNode(id).RtStats().Distribution(normalized, expFactor), nil
} else {
return RtDistribution{}, err
}
}
// Timeout suggests a timeout value based on either the global distribution or the
// distribution of the specified node. The parameter is the desired rate of timeouts
// assuming a similar distribution in the future.
// Note that the actual timeout should have a sensible minimum bound so that operating
// under ideal working conditions for a long time (for example, using a local server
// with very low response times) will not make it very hard for the system to accommodate
// longer response times in the future.
func (api *PrivateClientAPI) Timeout(nodeStr string, failRate float64) (float64, error) {
if nodeStr == "" {
return float64(api.vt.RtStats().Timeout(failRate)) / float64(time.Second), nil
}
if id, err := parseNodeStr(nodeStr); err == nil {
return float64(api.vt.GetNode(id).RtStats().Timeout(failRate)) / float64(time.Second), nil
} else {
return 0, err
}
}
// Value calculates the total service value provided either globally or by the specified
// server node, using a weight function based on the given timeout.
func (api *PrivateClientAPI) Value(nodeStr string, timeout float64) (float64, error) {
wt := TimeoutWeights(time.Duration(timeout * float64(time.Second)))
expFactor := utils.ExpFactor(api.vt.StatsExpirer().LogOffset(mclock.Now()))
if nodeStr == "" {
return api.vt.RtStats().Value(wt, expFactor), nil
}
if id, err := parseNodeStr(nodeStr); err == nil {
return api.vt.GetNode(id).RtStats().Value(wt, expFactor), nil
} else {
return 0, err
}
}

@ -1,107 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"sync"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
// FillSet tries to read nodes from an input iterator and add them to a node set by
// setting the specified node state flag(s) until the size of the set reaches the target.
// Note that other mechanisms (like other FillSet instances reading from different inputs)
// can also set the same flag(s) and FillSet will always care about the total number of
// nodes having those flags.
type FillSet struct {
lock sync.Mutex
cond *sync.Cond
ns *nodestate.NodeStateMachine
input enode.Iterator
closed bool
flags nodestate.Flags
count, target int
}
// NewFillSet creates a new FillSet
func NewFillSet(ns *nodestate.NodeStateMachine, input enode.Iterator, flags nodestate.Flags) *FillSet {
fs := &FillSet{
ns: ns,
input: input,
flags: flags,
}
fs.cond = sync.NewCond(&fs.lock)
ns.SubscribeState(flags, func(n *enode.Node, oldState, newState nodestate.Flags) {
fs.lock.Lock()
if oldState.Equals(flags) {
fs.count--
}
if newState.Equals(flags) {
fs.count++
}
if fs.target > fs.count {
fs.cond.Signal()
}
fs.lock.Unlock()
})
go fs.readLoop()
return fs
}
// readLoop keeps reading nodes from the input and setting the specified flags for them
// whenever the node set size is under the current target
func (fs *FillSet) readLoop() {
for {
fs.lock.Lock()
for fs.target <= fs.count && !fs.closed {
fs.cond.Wait()
}
fs.lock.Unlock()
if !fs.input.Next() {
return
}
fs.ns.SetState(fs.input.Node(), fs.flags, nodestate.Flags{}, 0)
}
}
// SetTarget sets the current target for node set size. If the previous target was not
// reached and FillSet was still waiting for the next node from the input then the next
// incoming node will be added to the set regardless of the target. This ensures that
// all nodes coming from the input are eventually added to the set.
func (fs *FillSet) SetTarget(target int) {
fs.lock.Lock()
defer fs.lock.Unlock()
fs.target = target
if fs.target > fs.count {
fs.cond.Signal()
}
}
// Close shuts FillSet down and closes the input iterator
func (fs *FillSet) Close() {
fs.lock.Lock()
defer fs.lock.Unlock()
fs.closed = true
fs.input.Close()
fs.cond.Signal()
}

@ -1,119 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"crypto/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
type testIter struct {
waitCh chan struct{}
nodeCh chan *enode.Node
node *enode.Node
}
func (i *testIter) Next() bool {
if _, ok := <-i.waitCh; !ok {
return false
}
i.node = <-i.nodeCh
return true
}
func (i *testIter) Node() *enode.Node {
return i.node
}
func (i *testIter) Close() {
close(i.waitCh)
}
func (i *testIter) push() {
var id enode.ID
rand.Read(id[:])
i.nodeCh <- enode.SignNull(new(enr.Record), id)
}
func (i *testIter) waiting(timeout time.Duration) bool {
select {
case i.waitCh <- struct{}{}:
return true
case <-time.After(timeout):
return false
}
}
func TestFillSet(t *testing.T) {
t.Parallel()
ns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)
iter := &testIter{
waitCh: make(chan struct{}),
nodeCh: make(chan *enode.Node),
}
fs := NewFillSet(ns, iter, sfTest1)
ns.Start()
expWaiting := func(i int, push bool) {
for ; i > 0; i-- {
if !iter.waiting(time.Second * 10) {
t.Fatalf("FillSet not waiting for new nodes")
}
if push {
iter.push()
}
}
}
expNotWaiting := func() {
if iter.waiting(time.Millisecond * 100) {
t.Fatalf("FillSet unexpectedly waiting for new nodes")
}
}
expNotWaiting()
fs.SetTarget(3)
expWaiting(3, true)
expNotWaiting()
fs.SetTarget(100)
expWaiting(2, true)
expWaiting(1, false)
// lower the target before the previous one has been filled up
fs.SetTarget(0)
iter.push()
expNotWaiting()
fs.SetTarget(10)
expWaiting(4, true)
expNotWaiting()
// remove all previously set flags
ns.ForEach(sfTest1, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
ns.SetState(node, nodestate.Flags{}, sfTest1, 0)
})
// now expect FillSet to fill the set up again with 10 new nodes
expWaiting(10, true)
expNotWaiting()
fs.Close()
ns.Stop()
}

@ -1,123 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"sync"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
// QueueIterator returns nodes from the specified selectable set in the same order as
// they entered the set.
type QueueIterator struct {
lock sync.Mutex
cond *sync.Cond
ns *nodestate.NodeStateMachine
queue []*enode.Node
nextNode *enode.Node
waitCallback func(bool)
fifo, closed bool
}
// NewQueueIterator creates a new QueueIterator. Nodes are selectable if they have all the required
// and none of the disabled flags set. When a node is selected the selectedFlag is set which also
// disables further selectability until it is removed or times out.
func NewQueueIterator(ns *nodestate.NodeStateMachine, requireFlags, disableFlags nodestate.Flags, fifo bool, waitCallback func(bool)) *QueueIterator {
qi := &QueueIterator{
ns: ns,
fifo: fifo,
waitCallback: waitCallback,
}
qi.cond = sync.NewCond(&qi.lock)
ns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState nodestate.Flags) {
oldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags)
newMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags)
if newMatch == oldMatch {
return
}
qi.lock.Lock()
defer qi.lock.Unlock()
if newMatch {
qi.queue = append(qi.queue, n)
} else {
id := n.ID()
for i, qn := range qi.queue {
if qn.ID() == id {
copy(qi.queue[i:len(qi.queue)-1], qi.queue[i+1:])
qi.queue = qi.queue[:len(qi.queue)-1]
break
}
}
}
qi.cond.Signal()
})
return qi
}
// Next moves to the next selectable node.
func (qi *QueueIterator) Next() bool {
qi.lock.Lock()
if !qi.closed && len(qi.queue) == 0 {
if qi.waitCallback != nil {
qi.waitCallback(true)
}
for !qi.closed && len(qi.queue) == 0 {
qi.cond.Wait()
}
if qi.waitCallback != nil {
qi.waitCallback(false)
}
}
if qi.closed {
qi.nextNode = nil
qi.lock.Unlock()
return false
}
// Move to the next node in queue.
if qi.fifo {
qi.nextNode = qi.queue[0]
copy(qi.queue[:len(qi.queue)-1], qi.queue[1:])
qi.queue = qi.queue[:len(qi.queue)-1]
} else {
qi.nextNode = qi.queue[len(qi.queue)-1]
qi.queue = qi.queue[:len(qi.queue)-1]
}
qi.lock.Unlock()
return true
}
// Close ends the iterator.
func (qi *QueueIterator) Close() {
qi.lock.Lock()
qi.closed = true
qi.lock.Unlock()
qi.cond.Signal()
}
// Node returns the current node.
func (qi *QueueIterator) Node() *enode.Node {
qi.lock.Lock()
defer qi.lock.Unlock()
return qi.nextNode
}

@ -1,99 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
func testNode(i int) *enode.Node {
return enode.SignNull(new(enr.Record), testNodeID(i))
}
func TestQueueIteratorFIFO(t *testing.T) {
t.Parallel()
testQueueIterator(t, true)
}
func TestQueueIteratorLIFO(t *testing.T) {
t.Parallel()
testQueueIterator(t, false)
}
func testQueueIterator(t *testing.T, fifo bool) {
ns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)
qi := NewQueueIterator(ns, sfTest2, sfTest3.Or(sfTest4), fifo, nil)
ns.Start()
for i := 1; i <= iterTestNodeCount; i++ {
ns.SetState(testNode(i), sfTest1, nodestate.Flags{}, 0)
}
next := func() int {
ch := make(chan struct{})
go func() {
qi.Next()
close(ch)
}()
select {
case <-ch:
case <-time.After(time.Second * 5):
t.Fatalf("Iterator.Next() timeout")
}
node := qi.Node()
ns.SetState(node, sfTest4, nodestate.Flags{}, 0)
return testNodeIndex(node.ID())
}
exp := func(i int) {
n := next()
if n != i {
t.Errorf("Wrong item returned by iterator (expected %d, got %d)", i, n)
}
}
explist := func(list []int) {
for i := range list {
if fifo {
exp(list[i])
} else {
exp(list[len(list)-1-i])
}
}
}
ns.SetState(testNode(1), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(2), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(3), sfTest2, nodestate.Flags{}, 0)
explist([]int{1, 2, 3})
ns.SetState(testNode(4), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(5), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(6), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(5), sfTest3, nodestate.Flags{}, 0)
explist([]int{4, 6})
ns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)
ns.SetState(testNode(2), nodestate.Flags{}, sfTest4, 0)
ns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)
ns.SetState(testNode(2), sfTest3, nodestate.Flags{}, 0)
ns.SetState(testNode(2), nodestate.Flags{}, sfTest3, 0)
explist([]int{1, 3, 2})
ns.Stop()
}

@ -1,285 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"io"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/rlp"
)
const basketFactor = 1000000 // reference basket amount and value scale factor
// referenceBasket keeps track of global request usage statistics and the usual prices
// of each used request type relative to each other. The amounts in the basket are scaled
// up by basketFactor because of the exponential expiration of long-term statistical data.
// Values are scaled so that the sum of all amounts and the sum of all values are equal.
//
// reqValues represent the internal relative value estimates for each request type and are
// calculated as value / amount. The average reqValue of all used requests is 1.
// In other words: SUM(refBasket[type].amount * reqValue[type]) = SUM(refBasket[type].amount)
type referenceBasket struct {
basket requestBasket
reqValues []float64 // contents are read only, new slice is created for each update
}
// serverBasket collects served request amount and value statistics for a single server.
//
// Values are gradually transferred to the global reference basket with a long time
// constant so that each server basket represents long term usage and price statistics.
// When the transferred part is added to the reference basket the values are scaled so
// that their sum equals the total value calculated according to the previous reqValues.
// The ratio of request values coming from the server basket represent the pricing of
// the specific server and modify the global estimates with a weight proportional to
// the amount of service provided by the server.
type serverBasket struct {
basket requestBasket
rvFactor float64
}
type (
// requestBasket holds amounts and values for each request type.
// These values are exponentially expired (see utils.ExpiredValue). The power of 2
// exponent is applicable to all values within.
requestBasket struct {
items []basketItem
exp uint64
}
// basketItem holds amount and value for a single request type. Value is the total
// relative request value accumulated for served requests while amount is the counter
// for each request type.
// Note that these values are both scaled up by basketFactor because of the exponential
// expiration.
basketItem struct {
amount, value uint64
}
)
// setExp sets the power of 2 exponent of the structure, scaling base values (the amounts
// and request values) up or down if necessary.
func (b *requestBasket) setExp(exp uint64) {
if exp > b.exp {
shift := exp - b.exp
for i, item := range b.items {
item.amount >>= shift
item.value >>= shift
b.items[i] = item
}
b.exp = exp
}
if exp < b.exp {
shift := b.exp - exp
for i, item := range b.items {
item.amount <<= shift
item.value <<= shift
b.items[i] = item
}
b.exp = exp
}
}
// init initializes a new server basket with the given service vector size (number of
// different request types)
func (s *serverBasket) init(size int) {
if s.basket.items == nil {
s.basket.items = make([]basketItem, size)
}
}
// add adds the give type and amount of requests to the basket. Cost is calculated
// according to the server's own cost table.
func (s *serverBasket) add(reqType, reqAmount uint32, reqCost uint64, expFactor utils.ExpirationFactor) {
s.basket.setExp(expFactor.Exp)
i := &s.basket.items[reqType]
i.amount += uint64(float64(uint64(reqAmount)*basketFactor) * expFactor.Factor)
i.value += uint64(float64(reqCost) * s.rvFactor * expFactor.Factor)
}
// updateRvFactor updates the request value factor that scales server costs into the
// local value dimensions.
func (s *serverBasket) updateRvFactor(rvFactor float64) {
s.rvFactor = rvFactor
}
// transfer decreases amounts and values in the basket with the given ratio and
// moves the removed amounts into a new basket which is returned and can be added
// to the global reference basket.
func (s *serverBasket) transfer(ratio float64) requestBasket {
res := requestBasket{
items: make([]basketItem, len(s.basket.items)),
exp: s.basket.exp,
}
for i, v := range s.basket.items {
ta := uint64(float64(v.amount) * ratio)
tv := uint64(float64(v.value) * ratio)
if ta > v.amount {
ta = v.amount
}
if tv > v.value {
tv = v.value
}
s.basket.items[i] = basketItem{v.amount - ta, v.value - tv}
res.items[i] = basketItem{ta, tv}
}
return res
}
// init initializes the reference basket with the given service vector size (number of
// different request types)
func (r *referenceBasket) init(size int) {
r.reqValues = make([]float64, size)
r.normalize()
r.updateReqValues()
}
// add adds the transferred part of a server basket to the reference basket while scaling
// value amounts so that their sum equals the total value calculated according to the
// previous reqValues.
func (r *referenceBasket) add(newBasket requestBasket) {
r.basket.setExp(newBasket.exp)
// scale newBasket to match service unit value
var (
totalCost uint64
totalValue float64
)
for i, v := range newBasket.items {
totalCost += v.value
totalValue += float64(v.amount) * r.reqValues[i]
}
if totalCost > 0 {
// add to reference with scaled values
scaleValues := totalValue / float64(totalCost)
for i, v := range newBasket.items {
r.basket.items[i].amount += v.amount
r.basket.items[i].value += uint64(float64(v.value) * scaleValues)
}
}
r.updateReqValues()
}
// updateReqValues recalculates reqValues after adding transferred baskets. Note that
// values should be normalized first.
func (r *referenceBasket) updateReqValues() {
r.reqValues = make([]float64, len(r.reqValues))
for i, b := range r.basket.items {
if b.amount > 0 {
r.reqValues[i] = float64(b.value) / float64(b.amount)
} else {
r.reqValues[i] = 0
}
}
}
// normalize ensures that the sum of values equal the sum of amounts in the basket.
func (r *referenceBasket) normalize() {
var sumAmount, sumValue uint64
for _, b := range r.basket.items {
sumAmount += b.amount
sumValue += b.value
}
add := float64(int64(sumAmount-sumValue)) / float64(sumValue)
for i, b := range r.basket.items {
b.value += uint64(int64(float64(b.value) * add))
r.basket.items[i] = b
}
}
// reqValueFactor calculates the request value factor applicable to the server with
// the given announced request cost list
func (r *referenceBasket) reqValueFactor(costList []uint64) float64 {
var (
totalCost float64
totalValue uint64
)
for i, b := range r.basket.items {
totalCost += float64(costList[i]) * float64(b.amount) // use floats to avoid overflow
totalValue += b.value
}
if totalCost < 1 {
return 0
}
return float64(totalValue) * basketFactor / totalCost
}
// EncodeRLP implements rlp.Encoder
func (b *basketItem) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{b.amount, b.value})
}
// DecodeRLP implements rlp.Decoder
func (b *basketItem) DecodeRLP(s *rlp.Stream) error {
var item struct {
Amount, Value uint64
}
if err := s.Decode(&item); err != nil {
return err
}
b.amount, b.value = item.Amount, item.Value
return nil
}
// EncodeRLP implements rlp.Encoder
func (r *requestBasket) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, []interface{}{r.items, r.exp})
}
// DecodeRLP implements rlp.Decoder
func (r *requestBasket) DecodeRLP(s *rlp.Stream) error {
var enc struct {
Items []basketItem
Exp uint64
}
if err := s.Decode(&enc); err != nil {
return err
}
r.items, r.exp = enc.Items, enc.Exp
return nil
}
// convertMapping converts a basket loaded from the database into the current format.
// If the available request types and their mapping into the service vector differ from
// the one used when saving the basket then this function reorders old fields and fills
// in previously unknown fields by scaling up amounts and values taken from the
// initialization basket.
func (r requestBasket) convertMapping(oldMapping, newMapping []string, initBasket requestBasket) requestBasket {
nameMap := make(map[string]int)
for i, name := range oldMapping {
nameMap[name] = i
}
rc := requestBasket{items: make([]basketItem, len(newMapping))}
var scale, oldScale, newScale float64
for i, name := range newMapping {
if ii, ok := nameMap[name]; ok {
rc.items[i] = r.items[ii]
oldScale += float64(initBasket.items[i].amount) * float64(initBasket.items[i].amount)
newScale += float64(rc.items[i].amount) * float64(initBasket.items[i].amount)
}
}
if oldScale > 1e-10 {
scale = newScale / oldScale
} else {
scale = 1
}
for i, name := range newMapping {
if _, ok := nameMap[name]; !ok {
rc.items[i].amount = uint64(float64(initBasket.items[i].amount) * scale)
rc.items[i].value = uint64(float64(initBasket.items[i].value) * scale)
}
}
return rc
}

@ -1,171 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/les/utils"
)
func checkU64(t *testing.T, name string, value, exp uint64) {
if value != exp {
t.Errorf("Incorrect value for %s: got %d, expected %d", name, value, exp)
}
}
func checkF64(t *testing.T, name string, value, exp, tol float64) {
if value < exp-tol || value > exp+tol {
t.Errorf("Incorrect value for %s: got %f, expected %f", name, value, exp)
}
}
func TestServerBasket(t *testing.T) {
t.Parallel()
var s serverBasket
s.init(2)
// add some requests with different request value factors
s.updateRvFactor(1)
noexp := utils.ExpirationFactor{Factor: 1}
s.add(0, 1000, 10000, noexp)
s.add(1, 3000, 60000, noexp)
s.updateRvFactor(10)
s.add(0, 4000, 4000, noexp)
s.add(1, 2000, 4000, noexp)
s.updateRvFactor(10)
// check basket contents directly
checkU64(t, "s.basket[0].amount", s.basket.items[0].amount, 5000*basketFactor)
checkU64(t, "s.basket[0].value", s.basket.items[0].value, 50000)
checkU64(t, "s.basket[1].amount", s.basket.items[1].amount, 5000*basketFactor)
checkU64(t, "s.basket[1].value", s.basket.items[1].value, 100000)
// transfer 50% of the contents of the basket
transfer1 := s.transfer(0.5)
checkU64(t, "transfer1[0].amount", transfer1.items[0].amount, 2500*basketFactor)
checkU64(t, "transfer1[0].value", transfer1.items[0].value, 25000)
checkU64(t, "transfer1[1].amount", transfer1.items[1].amount, 2500*basketFactor)
checkU64(t, "transfer1[1].value", transfer1.items[1].value, 50000)
// add more requests
s.updateRvFactor(100)
s.add(0, 1000, 100, noexp)
// transfer 25% of the contents of the basket
transfer2 := s.transfer(0.25)
checkU64(t, "transfer2[0].amount", transfer2.items[0].amount, (2500+1000)/4*basketFactor)
checkU64(t, "transfer2[0].value", transfer2.items[0].value, (25000+10000)/4)
checkU64(t, "transfer2[1].amount", transfer2.items[1].amount, 2500/4*basketFactor)
checkU64(t, "transfer2[1].value", transfer2.items[1].value, 50000/4)
}
func TestConvertMapping(t *testing.T) {
t.Parallel()
b := requestBasket{items: []basketItem{{3, 3}, {1, 1}, {2, 2}}}
oldMap := []string{"req3", "req1", "req2"}
newMap := []string{"req1", "req2", "req3", "req4"}
init := requestBasket{items: []basketItem{{2, 2}, {4, 4}, {6, 6}, {8, 8}}}
bc := b.convertMapping(oldMap, newMap, init)
checkU64(t, "bc[0].amount", bc.items[0].amount, 1)
checkU64(t, "bc[1].amount", bc.items[1].amount, 2)
checkU64(t, "bc[2].amount", bc.items[2].amount, 3)
checkU64(t, "bc[3].amount", bc.items[3].amount, 4) // 8 should be scaled down to 4
}
func TestReqValueFactor(t *testing.T) {
t.Parallel()
var ref referenceBasket
ref.basket = requestBasket{items: make([]basketItem, 4)}
for i := range ref.basket.items {
ref.basket.items[i].amount = uint64(i+1) * basketFactor
ref.basket.items[i].value = uint64(i+1) * basketFactor
}
ref.init(4)
rvf := ref.reqValueFactor([]uint64{1000, 2000, 3000, 4000})
// expected value is (1000000+2000000+3000000+4000000) / (1*1000+2*2000+3*3000+4*4000) = 10000000/30000 = 333.333
checkF64(t, "reqValueFactor", rvf, 333.333, 1)
}
func TestNormalize(t *testing.T) {
t.Parallel()
for cycle := 0; cycle < 100; cycle += 1 {
// Initialize data for testing
valueRange, lower := 1000000, 1000000
ref := referenceBasket{basket: requestBasket{items: make([]basketItem, 10)}}
for i := 0; i < 10; i++ {
ref.basket.items[i].amount = uint64(rand.Intn(valueRange) + lower)
ref.basket.items[i].value = uint64(rand.Intn(valueRange) + lower)
}
ref.normalize()
// Check whether SUM(amount) ~= SUM(value)
var sumAmount, sumValue uint64
for i := 0; i < 10; i++ {
sumAmount += ref.basket.items[i].amount
sumValue += ref.basket.items[i].value
}
var epsilon = 0.01
if float64(sumAmount)*(1+epsilon) < float64(sumValue) || float64(sumAmount)*(1-epsilon) > float64(sumValue) {
t.Fatalf("Failed to normalize sumAmount: %d sumValue: %d", sumAmount, sumValue)
}
}
}
func TestReqValueAdjustment(t *testing.T) {
t.Parallel()
var s1, s2 serverBasket
s1.init(3)
s2.init(3)
cost1 := []uint64{30000, 60000, 90000}
cost2 := []uint64{100000, 200000, 300000}
var ref referenceBasket
ref.basket = requestBasket{items: make([]basketItem, 3)}
for i := range ref.basket.items {
ref.basket.items[i].amount = 123 * basketFactor
ref.basket.items[i].value = 123 * basketFactor
}
ref.init(3)
// initial reqValues are expected to be {1, 1, 1}
checkF64(t, "reqValues[0]", ref.reqValues[0], 1, 0.01)
checkF64(t, "reqValues[1]", ref.reqValues[1], 1, 0.01)
checkF64(t, "reqValues[2]", ref.reqValues[2], 1, 0.01)
var logOffset utils.Fixed64
for period := 0; period < 1000; period++ {
exp := utils.ExpFactor(logOffset)
s1.updateRvFactor(ref.reqValueFactor(cost1))
s2.updateRvFactor(ref.reqValueFactor(cost2))
// throw in random requests into each basket using their internal pricing
for i := 0; i < 1000; i++ {
reqType, reqAmount := uint32(rand.Intn(3)), uint32(rand.Intn(10)+1)
reqCost := uint64(reqAmount) * cost1[reqType]
s1.add(reqType, reqAmount, reqCost, exp)
reqType, reqAmount = uint32(rand.Intn(3)), uint32(rand.Intn(10)+1)
reqCost = uint64(reqAmount) * cost2[reqType]
s2.add(reqType, reqAmount, reqCost, exp)
}
ref.add(s1.transfer(0.1))
ref.add(s2.transfer(0.1))
ref.normalize()
ref.updateReqValues()
logOffset += utils.Float64ToFixed64(0.1)
}
checkF64(t, "reqValues[0]", ref.reqValues[0], 0.5, 0.01)
checkF64(t, "reqValues[1]", ref.reqValues[1], 1, 0.01)
checkF64(t, "reqValues[2]", ref.reqValues[2], 1.5, 0.01)
}

@ -1,605 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"errors"
"math/rand"
"reflect"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
"github.com/ethereum/go-ethereum/rlp"
)
const (
minTimeout = time.Millisecond * 500 // minimum request timeout suggested by the server pool
timeoutRefresh = time.Second * 5 // recalculate timeout if older than this
dialCost = 10000 // cost of a TCP dial (used for known node selection weight calculation)
dialWaitStep = 1.5 // exponential multiplier of redial wait time when no value was provided by the server
queryCost = 500 // cost of a UDP pre-negotiation query
queryWaitStep = 1.02 // exponential multiplier of redial wait time when no value was provided by the server
waitThreshold = time.Hour * 2000 // drop node if waiting time is over the threshold
nodeWeightMul = 1000000 // multiplier constant for node weight calculation
nodeWeightThreshold = 100 // minimum weight for keeping a node in the known (valuable) set
minRedialWait = 10 // minimum redial wait time in seconds
preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries
warnQueryFails = 20 // number of consecutive UDP query failures before we print a warning
maxQueryFails = 100 // number of consecutive UDP query failures when then chance of skipping a query reaches 50%
)
// ServerPool provides a node iterator for dial candidates. The output is a mix of newly discovered
// nodes, a weighted random selection of known (previously valuable) nodes and trusted/paid nodes.
type ServerPool struct {
clock mclock.Clock
unixTime func() int64
db ethdb.KeyValueStore
ns *nodestate.NodeStateMachine
vt *ValueTracker
mixer *enode.FairMix
mixSources []enode.Iterator
dialIterator enode.Iterator
validSchemes enr.IdentityScheme
trustedURLs []string
fillSet *FillSet
started, queryFails uint32
timeoutLock sync.RWMutex
timeout time.Duration
timeWeights ResponseTimeWeights
timeoutRefreshed mclock.AbsTime
suggestedTimeoutGauge, totalValueGauge metrics.Gauge
sessionValueMeter metrics.Meter
}
// nodeHistory keeps track of dial costs which determine node weight together with the
// service value calculated by ValueTracker.
type nodeHistory struct {
dialCost utils.ExpiredValue
redialWaitStart, redialWaitEnd int64 // unix time (seconds)
}
type nodeHistoryEnc struct {
DialCost utils.ExpiredValue
RedialWaitStart, RedialWaitEnd uint64
}
// QueryFunc sends a pre-negotiation query and blocks until a response arrives or timeout occurs.
// It returns 1 if the remote node has confirmed that connection is possible, 0 if not
// possible and -1 if no response arrived (timeout).
type QueryFunc func(*enode.Node) int
var (
clientSetup = &nodestate.Setup{Version: 2}
sfHasValue = clientSetup.NewPersistentFlag("hasValue")
sfQuery = clientSetup.NewFlag("query")
sfCanDial = clientSetup.NewFlag("canDial")
sfDialing = clientSetup.NewFlag("dialed")
sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout")
sfConnected = clientSetup.NewFlag("connected")
sfRedialWait = clientSetup.NewFlag("redialWait")
sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect")
sfDialProcess = nodestate.MergeFlags(sfQuery, sfCanDial, sfDialing, sfConnected, sfRedialWait)
sfiNodeHistory = clientSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}),
func(field interface{}) ([]byte, error) {
if n, ok := field.(nodeHistory); ok {
ne := nodeHistoryEnc{
DialCost: n.dialCost,
RedialWaitStart: uint64(n.redialWaitStart),
RedialWaitEnd: uint64(n.redialWaitEnd),
}
enc, err := rlp.EncodeToBytes(&ne)
return enc, err
}
return nil, errors.New("invalid field type")
},
func(enc []byte) (interface{}, error) {
var ne nodeHistoryEnc
err := rlp.DecodeBytes(enc, &ne)
n := nodeHistory{
dialCost: ne.DialCost,
redialWaitStart: int64(ne.RedialWaitStart),
redialWaitEnd: int64(ne.RedialWaitEnd),
}
return n, err
},
)
sfiNodeWeight = clientSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0)))
sfiConnectedStats = clientSetup.NewField("connectedStats", reflect.TypeOf(ResponseTimeStats{}))
sfiLocalAddress = clientSetup.NewPersistentField("localAddress", reflect.TypeOf(&enr.Record{}),
func(field interface{}) ([]byte, error) {
if enr, ok := field.(*enr.Record); ok {
enc, err := rlp.EncodeToBytes(enr)
return enc, err
}
return nil, errors.New("invalid field type")
},
func(enc []byte) (interface{}, error) {
var enr enr.Record
if err := rlp.DecodeBytes(enc, &enr); err != nil {
return nil, err
}
return &enr, nil
},
)
)
// NewServerPool creates a new server pool
func NewServerPool(db ethdb.KeyValueStore, dbKey []byte, mixTimeout time.Duration, query QueryFunc, clock mclock.Clock, trustedURLs []string, requestList []RequestInfo) (*ServerPool, enode.Iterator) {
s := &ServerPool{
db: db,
clock: clock,
unixTime: func() int64 { return time.Now().Unix() },
validSchemes: enode.ValidSchemes,
trustedURLs: trustedURLs,
vt: NewValueTracker(db, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)),
ns: nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+"ns:"), clock, clientSetup),
}
s.recalTimeout()
s.mixer = enode.NewFairMix(mixTimeout)
knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDialProcess, sfiNodeWeight)
alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDialProcess, true, nil)
s.mixSources = append(s.mixSources, knownSelector)
s.mixSources = append(s.mixSources, alwaysConnect)
s.dialIterator = s.mixer
if query != nil {
s.dialIterator = s.addPreNegFilter(s.dialIterator, query)
}
s.ns.SubscribeState(nodestate.MergeFlags(sfWaitDialTimeout, sfConnected), func(n *enode.Node, oldState, newState nodestate.Flags) {
if oldState.Equals(sfWaitDialTimeout) && newState.IsEmpty() {
// dial timeout, no connection
s.setRedialWait(n, dialCost, dialWaitStep)
s.ns.SetStateSub(n, nodestate.Flags{}, sfDialing, 0)
}
})
return s, &serverPoolIterator{
dialIterator: s.dialIterator,
nextFn: func(node *enode.Node) {
s.ns.Operation(func() {
s.ns.SetStateSub(node, sfDialing, sfCanDial, 0)
s.ns.SetStateSub(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10)
})
},
nodeFn: s.DialNode,
}
}
type serverPoolIterator struct {
dialIterator enode.Iterator
nextFn func(*enode.Node)
nodeFn func(*enode.Node) *enode.Node
}
// Next implements enode.Iterator
func (s *serverPoolIterator) Next() bool {
if s.dialIterator.Next() {
s.nextFn(s.dialIterator.Node())
return true
}
return false
}
// Node implements enode.Iterator
func (s *serverPoolIterator) Node() *enode.Node {
return s.nodeFn(s.dialIterator.Node())
}
// Close implements enode.Iterator
func (s *serverPoolIterator) Close() {
s.dialIterator.Close()
}
// AddMetrics adds metrics to the server pool. Should be called before Start().
func (s *ServerPool) AddMetrics(
suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge metrics.Gauge,
sessionValueMeter, serverDialedMeter metrics.Meter) {
s.suggestedTimeoutGauge = suggestedTimeoutGauge
s.totalValueGauge = totalValueGauge
s.sessionValueMeter = sessionValueMeter
if serverSelectableGauge != nil {
s.ns.AddLogMetrics(sfHasValue, sfDialProcess, "selectable", nil, nil, serverSelectableGauge)
}
if serverDialedMeter != nil {
s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil)
}
if serverConnectedGauge != nil {
s.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, "connected", nil, nil, serverConnectedGauge)
}
}
// AddSource adds a node discovery source to the server pool (should be called before start)
func (s *ServerPool) AddSource(source enode.Iterator) {
if source != nil {
s.mixSources = append(s.mixSources, source)
}
}
// addPreNegFilter installs a node filter mechanism that performs a pre-negotiation query.
// Nodes that are filtered out and does not appear on the output iterator are put back
// into redialWait state.
func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enode.Iterator {
s.fillSet = NewFillSet(s.ns, input, sfQuery)
s.ns.SubscribeState(sfDialProcess, func(n *enode.Node, oldState, newState nodestate.Flags) {
if !newState.Equals(sfQuery) {
if newState.HasAll(sfQuery) {
// remove query flag if the node is already somewhere in the dial process
s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0)
}
return
}
fails := atomic.LoadUint32(&s.queryFails)
failMax := fails
if failMax > maxQueryFails {
failMax = maxQueryFails
}
if rand.Intn(maxQueryFails*2) < int(failMax) {
// skip pre-negotiation with increasing chance, max 50%
// this ensures that the client can operate even if UDP is not working at all
s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10)
// set canDial before resetting queried so that FillSet will not read more
// candidates unnecessarily
s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0)
return
}
go func() {
q := query(n)
if q == -1 {
atomic.AddUint32(&s.queryFails, 1)
fails++
if fails%warnQueryFails == 0 {
// warn if a large number of consecutive queries have failed
log.Warn("UDP connection queries failed", "count", fails)
}
} else {
atomic.StoreUint32(&s.queryFails, 0)
}
s.ns.Operation(func() {
// we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait
if q == 1 {
s.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10)
} else {
s.setRedialWait(n, queryCost, queryWaitStep)
}
s.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0)
})
}()
})
return NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) {
if waiting {
s.fillSet.SetTarget(preNegLimit)
} else {
s.fillSet.SetTarget(0)
}
})
}
// Start starts the server pool. Note that NodeStateMachine should be started first.
func (s *ServerPool) Start() {
s.ns.Start()
for _, iter := range s.mixSources {
// add sources to mixer at startup because the mixer instantly tries to read them
// which should only happen after NodeStateMachine has been started
s.mixer.AddSource(iter)
}
for _, url := range s.trustedURLs {
if node, err := enode.Parse(s.validSchemes, url); err == nil {
s.ns.SetState(node, sfAlwaysConnect, nodestate.Flags{}, 0)
} else {
log.Error("Invalid trusted server URL", "url", url, "error", err)
}
}
unixTime := s.unixTime()
s.ns.Operation(func() {
s.ns.ForEach(sfHasValue, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
s.calculateWeight(node)
if n, ok := s.ns.GetField(node, sfiNodeHistory).(nodeHistory); ok && n.redialWaitEnd > unixTime {
wait := n.redialWaitEnd - unixTime
lastWait := n.redialWaitEnd - n.redialWaitStart
if wait > lastWait {
// if the time until expiration is larger than the last suggested
// waiting time then the system clock was probably adjusted
wait = lastWait
}
s.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, time.Duration(wait)*time.Second)
}
})
})
atomic.StoreUint32(&s.started, 1)
}
// Stop stops the server pool
func (s *ServerPool) Stop() {
if s.fillSet != nil {
s.fillSet.Close()
}
s.ns.Operation(func() {
s.ns.ForEach(sfConnected, nodestate.Flags{}, func(n *enode.Node, state nodestate.Flags) {
// recalculate weight of connected nodes in order to update hasValue flag if necessary
s.calculateWeight(n)
})
})
s.ns.Stop()
s.vt.Stop()
}
// RegisterNode implements serverPeerSubscriber
func (s *ServerPool) RegisterNode(node *enode.Node) (*NodeValueTracker, error) {
if atomic.LoadUint32(&s.started) == 0 {
return nil, errors.New("server pool not started yet")
}
nvt := s.vt.Register(node.ID())
s.ns.Operation(func() {
s.ns.SetStateSub(node, sfConnected, sfDialing.Or(sfWaitDialTimeout), 0)
s.ns.SetFieldSub(node, sfiConnectedStats, nvt.RtStats())
if node.IP().IsLoopback() {
s.ns.SetFieldSub(node, sfiLocalAddress, node.Record())
}
})
return nvt, nil
}
// UnregisterNode implements serverPeerSubscriber
func (s *ServerPool) UnregisterNode(node *enode.Node) {
s.ns.Operation(func() {
s.setRedialWait(node, dialCost, dialWaitStep)
s.ns.SetStateSub(node, nodestate.Flags{}, sfConnected, 0)
s.ns.SetFieldSub(node, sfiConnectedStats, nil)
})
s.vt.Unregister(node.ID())
}
// recalTimeout calculates the current recommended timeout. This value is used by
// the client as a "soft timeout" value. It also affects the service value calculation
// of individual nodes.
func (s *ServerPool) recalTimeout() {
// Use cached result if possible, avoid recalculating too frequently.
s.timeoutLock.RLock()
refreshed := s.timeoutRefreshed
s.timeoutLock.RUnlock()
now := s.clock.Now()
if refreshed != 0 && time.Duration(now-refreshed) < timeoutRefresh {
return
}
// Cached result is stale, recalculate a new one.
rts := s.vt.RtStats()
// Add a fake statistic here. It is an easy way to initialize with some
// conservative values when the database is new. As soon as we have a
// considerable amount of real stats this small value won't matter.
rts.Add(time.Second*2, 10, s.vt.StatsExpFactor())
// Use either 10% failure rate timeout or twice the median response time
// as the recommended timeout.
timeout := minTimeout
if t := rts.Timeout(0.1); t > timeout {
timeout = t
}
if t := rts.Timeout(0.5) * 2; t > timeout {
timeout = t
}
s.timeoutLock.Lock()
if s.timeout != timeout {
s.timeout = timeout
s.timeWeights = TimeoutWeights(s.timeout)
if s.suggestedTimeoutGauge != nil {
s.suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond))
}
if s.totalValueGauge != nil {
s.totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor())))
}
}
s.timeoutRefreshed = now
s.timeoutLock.Unlock()
}
// GetTimeout returns the recommended request timeout.
func (s *ServerPool) GetTimeout() time.Duration {
s.recalTimeout()
s.timeoutLock.RLock()
defer s.timeoutLock.RUnlock()
return s.timeout
}
// getTimeoutAndWeight returns the recommended request timeout as well as the
// response time weight which is necessary to calculate service value.
func (s *ServerPool) getTimeoutAndWeight() (time.Duration, ResponseTimeWeights) {
s.recalTimeout()
s.timeoutLock.RLock()
defer s.timeoutLock.RUnlock()
return s.timeout, s.timeWeights
}
// addDialCost adds the given amount of dial cost to the node history and returns the current
// amount of total dial cost
func (s *ServerPool) addDialCost(n *nodeHistory, amount int64) uint64 {
logOffset := s.vt.StatsExpirer().LogOffset(s.clock.Now())
if amount > 0 {
n.dialCost.Add(amount, logOffset)
}
totalDialCost := n.dialCost.Value(logOffset)
if totalDialCost < dialCost {
totalDialCost = dialCost
}
return totalDialCost
}
// serviceValue returns the service value accumulated in this session and in total
func (s *ServerPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) {
nvt := s.vt.GetNode(node.ID())
if nvt == nil {
return 0, 0
}
currentStats := nvt.RtStats()
_, timeWeights := s.getTimeoutAndWeight()
expFactor := s.vt.StatsExpFactor()
totalValue = currentStats.Value(timeWeights, expFactor)
if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(ResponseTimeStats); ok {
diff := currentStats
diff.SubStats(&connStats)
sessionValue = diff.Value(timeWeights, expFactor)
if s.sessionValueMeter != nil {
s.sessionValueMeter.Mark(int64(sessionValue))
}
}
return
}
// updateWeight calculates the node weight and updates the nodeWeight field and the
// hasValue flag. It also saves the node state if necessary.
// Note: this function should run inside a NodeStateMachine operation
func (s *ServerPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) {
weight := uint64(totalValue * nodeWeightMul / float64(totalDialCost))
if weight >= nodeWeightThreshold {
s.ns.SetStateSub(node, sfHasValue, nodestate.Flags{}, 0)
s.ns.SetFieldSub(node, sfiNodeWeight, weight)
} else {
s.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0)
s.ns.SetFieldSub(node, sfiNodeWeight, nil)
s.ns.SetFieldSub(node, sfiNodeHistory, nil)
s.ns.SetFieldSub(node, sfiLocalAddress, nil)
}
s.ns.Persist(node) // saved if node history or hasValue changed
}
// setRedialWait calculates and sets the redialWait timeout based on the service value
// and dial cost accumulated during the last session/attempt and in total.
// The waiting time is raised exponentially if no service value has been received in order
// to prevent dialing an unresponsive node frequently for a very long time just because it
// was useful in the past. It can still be occasionally dialed though and once it provides
// a significant amount of service value again its waiting time is quickly reduced or reset
// to the minimum.
// Note: node weight is also recalculated and updated by this function.
// Note 2: this function should run inside a NodeStateMachine operation
func (s *ServerPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) {
n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory)
sessionValue, totalValue := s.serviceValue(node)
totalDialCost := s.addDialCost(&n, addDialCost)
// if the current dial session has yielded at least the average value/dial cost ratio
// then the waiting time should be reset to the minimum. If the session value
// is below average but still positive then timeout is limited to the ratio of
// average / current service value multiplied by the minimum timeout. If the attempt
// was unsuccessful then timeout is raised exponentially without limitation.
// Note: dialCost is used in the formula below even if dial was not attempted at all
// because the pre-negotiation query did not return a positive result. In this case
// the ratio has no meaning anyway and waitFactor is always raised, though in smaller
// steps because queries are cheaper and therefore we can allow more failed attempts.
unixTime := s.unixTime()
plannedTimeout := float64(n.redialWaitEnd - n.redialWaitStart) // last planned redialWait timeout
var actualWait float64 // actual waiting time elapsed
if unixTime > n.redialWaitEnd {
// the planned timeout has elapsed
actualWait = plannedTimeout
} else {
// if the node was redialed earlier then we do not raise the planned timeout
// exponentially because that could lead to the timeout rising very high in
// a short amount of time
// Note that in case of an early redial actualWait also includes the dial
// timeout or connection time of the last attempt but it still serves its
// purpose of preventing the timeout rising quicker than linearly as a function
// of total time elapsed without a successful connection.
actualWait = float64(unixTime - n.redialWaitStart)
}
// raise timeout exponentially if the last planned timeout has elapsed
// (use at least the last planned timeout otherwise)
nextTimeout := actualWait * waitStep
if plannedTimeout > nextTimeout {
nextTimeout = plannedTimeout
}
// we reduce the waiting time if the server has provided service value during the
// connection (but never under the minimum)
a := totalValue * dialCost * float64(minRedialWait)
b := float64(totalDialCost) * sessionValue
if a < b*nextTimeout {
nextTimeout = a / b
}
if nextTimeout < minRedialWait {
nextTimeout = minRedialWait
}
wait := time.Duration(float64(time.Second) * nextTimeout)
if wait < waitThreshold {
n.redialWaitStart = unixTime
n.redialWaitEnd = unixTime + int64(nextTimeout)
s.ns.SetFieldSub(node, sfiNodeHistory, n)
s.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, wait)
s.updateWeight(node, totalValue, totalDialCost)
} else {
// discard known node statistics if waiting time is very long because the node
// hasn't been responsive for a very long time
s.ns.SetFieldSub(node, sfiNodeHistory, nil)
s.ns.SetFieldSub(node, sfiNodeWeight, nil)
s.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0)
}
}
// calculateWeight calculates and sets the node weight without altering the node history.
// This function should be called during startup and shutdown only, otherwise setRedialWait
// will keep the weights updated as the underlying statistics are adjusted.
// Note: this function should run inside a NodeStateMachine operation
func (s *ServerPool) calculateWeight(node *enode.Node) {
n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory)
_, totalValue := s.serviceValue(node)
totalDialCost := s.addDialCost(&n, 0)
s.updateWeight(node, totalValue, totalDialCost)
}
// API returns the vflux client API
func (s *ServerPool) API() *PrivateClientAPI {
return NewPrivateClientAPI(s.vt)
}
type dummyIdentity enode.ID
func (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil }
func (id dummyIdentity) NodeAddr(r *enr.Record) []byte { return id[:] }
// DialNode replaces the given enode with a locally generated one containing the ENR
// stored in the sfiLocalAddress field if present. This workaround ensures that nodes
// on the local network can be dialed at the local address if a connection has been
// successfully established previously.
// Note that NodeStateMachine always remembers the enode with the latest version of
// the remote signed ENR. ENR filtering should be performed on that version while
// dialNode should be used for dialing the node over TCP or UDP.
func (s *ServerPool) DialNode(n *enode.Node) *enode.Node {
if enr, ok := s.ns.GetField(n, sfiLocalAddress).(*enr.Record); ok {
n, _ := enode.New(dummyIdentity(n.ID()), enr)
return n
}
return n
}
// Persist immediately stores the state of a node in the node database
func (s *ServerPool) Persist(n *enode.Node) {
s.ns.Persist(n)
}

@ -1,424 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"math/rand"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
)
const (
spTestNodes = 1000
spTestTarget = 5
spTestLength = 10000
spMinTotal = 40000
spMaxTotal = 50000
)
func testNodeID(i int) enode.ID {
return enode.ID{42, byte(i % 256), byte(i / 256)}
}
func testNodeIndex(id enode.ID) int {
if id[0] != 42 {
return -1
}
return int(id[1]) + int(id[2])*256
}
type ServerPoolTest struct {
db ethdb.KeyValueStore
clock *mclock.Simulated
quit chan chan struct{}
preNeg, preNegFail bool
sp *ServerPool
spi enode.Iterator
input enode.Iterator
testNodes []spTestNode
trusted []string
waitCount, waitEnded int32
// preNegLock protects the cycle counter, testNodes list and its connected field
// (accessed from both the main thread and the preNeg callback)
preNegLock sync.Mutex
queryWg *sync.WaitGroup // a new wait group is created each time the simulation is started
stopping bool // stopping avoid calling queryWg.Add after queryWg.Wait
cycle, conn, servedConn int
serviceCycles, dialCount int
disconnect map[int][]int
}
type spTestNode struct {
connectCycles, waitCycles int
nextConnCycle, totalConn int
connected, service bool
node *enode.Node
}
func newServerPoolTest(preNeg, preNegFail bool) *ServerPoolTest {
nodes := make([]*enode.Node, spTestNodes)
for i := range nodes {
nodes[i] = enode.SignNull(&enr.Record{}, testNodeID(i))
}
return &ServerPoolTest{
clock: &mclock.Simulated{},
db: memorydb.New(),
input: enode.CycleNodes(nodes),
testNodes: make([]spTestNode, spTestNodes),
preNeg: preNeg,
preNegFail: preNegFail,
}
}
func (s *ServerPoolTest) beginWait() {
// ensure that dialIterator and the maximal number of pre-neg queries are not all stuck in a waiting state
for atomic.AddInt32(&s.waitCount, 1) > preNegLimit {
atomic.AddInt32(&s.waitCount, -1)
s.clock.Run(time.Second)
}
}
func (s *ServerPoolTest) endWait() {
atomic.AddInt32(&s.waitCount, -1)
atomic.AddInt32(&s.waitEnded, 1)
}
func (s *ServerPoolTest) addTrusted(i int) {
s.trusted = append(s.trusted, enode.SignNull(&enr.Record{}, testNodeID(i)).String())
}
func (s *ServerPoolTest) start() {
var testQuery QueryFunc
s.queryWg = new(sync.WaitGroup)
if s.preNeg {
testQuery = func(node *enode.Node) int {
s.preNegLock.Lock()
if s.stopping {
s.preNegLock.Unlock()
return 0
}
s.queryWg.Add(1)
idx := testNodeIndex(node.ID())
n := &s.testNodes[idx]
canConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle
s.preNegLock.Unlock()
defer s.queryWg.Done()
if s.preNegFail {
// simulate a scenario where UDP queries never work
s.beginWait()
s.clock.Sleep(time.Second * 5)
s.endWait()
return -1
}
switch idx % 3 {
case 0:
// pre-neg returns true only if connection is possible
if canConnect {
return 1
}
return 0
case 1:
// pre-neg returns true but connection might still fail
return 1
case 2:
// pre-neg returns true if connection is possible, otherwise timeout (node unresponsive)
if canConnect {
return 1
}
s.beginWait()
s.clock.Sleep(time.Second * 5)
s.endWait()
return -1
}
return -1
}
}
requestList := make([]RequestInfo, testReqTypes)
for i := range requestList {
requestList[i] = RequestInfo{Name: "testreq" + strconv.Itoa(i), InitAmount: 1, InitValue: 1}
}
s.sp, s.spi = NewServerPool(s.db, []byte("sp:"), 0, testQuery, s.clock, s.trusted, requestList)
s.sp.AddSource(s.input)
s.sp.validSchemes = enode.ValidSchemesForTesting
s.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) }
s.disconnect = make(map[int][]int)
s.sp.Start()
s.quit = make(chan chan struct{})
go func() {
last := int32(-1)
for {
select {
case <-time.After(time.Millisecond * 100):
c := atomic.LoadInt32(&s.waitEnded)
if c == last {
// advance clock if test is stuck (might happen in rare cases)
s.clock.Run(time.Second)
}
last = c
case quit := <-s.quit:
close(quit)
return
}
}
}()
}
func (s *ServerPoolTest) stop() {
// disable further queries and wait if one is currently running
s.preNegLock.Lock()
s.stopping = true
s.preNegLock.Unlock()
s.queryWg.Wait()
quit := make(chan struct{})
s.quit <- quit
<-quit
s.sp.Stop()
s.spi.Close()
s.preNegLock.Lock()
s.stopping = false
s.preNegLock.Unlock()
for i := range s.testNodes {
n := &s.testNodes[i]
if n.connected {
n.totalConn += s.cycle
}
n.connected = false
n.node = nil
n.nextConnCycle = 0
}
s.conn, s.servedConn = 0, 0
}
func (s *ServerPoolTest) run() {
for count := spTestLength; count > 0; count-- {
if dcList := s.disconnect[s.cycle]; dcList != nil {
for _, idx := range dcList {
n := &s.testNodes[idx]
s.sp.UnregisterNode(n.node)
n.totalConn += s.cycle
s.preNegLock.Lock()
n.connected = false
s.preNegLock.Unlock()
n.node = nil
s.conn--
if n.service {
s.servedConn--
}
n.nextConnCycle = s.cycle + n.waitCycles
}
delete(s.disconnect, s.cycle)
}
if s.conn < spTestTarget {
s.dialCount++
s.beginWait()
s.spi.Next()
s.endWait()
dial := s.spi.Node()
id := dial.ID()
idx := testNodeIndex(id)
n := &s.testNodes[idx]
if !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle {
s.conn++
if n.service {
s.servedConn++
}
n.totalConn -= s.cycle
s.preNegLock.Lock()
n.connected = true
s.preNegLock.Unlock()
dc := s.cycle + n.connectCycles
s.disconnect[dc] = append(s.disconnect[dc], idx)
n.node = dial
nv, _ := s.sp.RegisterNode(n.node)
if n.service {
nv.Served([]ServedRequest{{ReqType: 0, Amount: 100}}, 0)
}
}
}
s.serviceCycles += s.servedConn
s.clock.Run(time.Second)
s.preNegLock.Lock()
s.cycle++
s.preNegLock.Unlock()
}
}
func (s *ServerPoolTest) setNodes(count, conn, wait int, service, trusted bool) (res []int) {
for ; count > 0; count-- {
idx := rand.Intn(spTestNodes)
for s.testNodes[idx].connectCycles != 0 || s.testNodes[idx].connected {
idx = rand.Intn(spTestNodes)
}
res = append(res, idx)
s.preNegLock.Lock()
s.testNodes[idx] = spTestNode{
connectCycles: conn,
waitCycles: wait,
service: service,
}
s.preNegLock.Unlock()
if trusted {
s.addTrusted(idx)
}
}
return
}
func (s *ServerPoolTest) resetNodes() {
for i, n := range s.testNodes {
if n.connected {
n.totalConn += s.cycle
s.sp.UnregisterNode(n.node)
}
s.preNegLock.Lock()
s.testNodes[i] = spTestNode{totalConn: n.totalConn}
s.preNegLock.Unlock()
}
s.conn, s.servedConn = 0, 0
s.disconnect = make(map[int][]int)
s.trusted = nil
}
func (s *ServerPoolTest) checkNodes(t *testing.T, nodes []int) {
var sum int
for _, idx := range nodes {
n := &s.testNodes[idx]
if n.connected {
n.totalConn += s.cycle
}
sum += n.totalConn
n.totalConn = 0
if n.connected {
n.totalConn -= s.cycle
}
}
if sum < spMinTotal || sum > spMaxTotal {
t.Errorf("Total connection amount %d outside expected range %d to %d", sum, spMinTotal, spMaxTotal)
}
}
func TestServerPool(t *testing.T) {
t.Parallel()
testServerPool(t, false, false)
}
func TestServerPoolWithPreNeg(t *testing.T) {
t.Parallel()
testServerPool(t, true, false)
}
func TestServerPoolWithPreNegFail(t *testing.T) {
t.Parallel()
testServerPool(t, true, true)
}
func testServerPool(t *testing.T, preNeg, fail bool) {
s := newServerPoolTest(preNeg, fail)
nodes := s.setNodes(100, 200, 200, true, false)
s.setNodes(100, 20, 20, false, false)
s.start()
s.run()
s.stop()
s.checkNodes(t, nodes)
}
func TestServerPoolChangedNodes(t *testing.T) {
t.Parallel()
testServerPoolChangedNodes(t, false)
}
func TestServerPoolChangedNodesWithPreNeg(t *testing.T) {
t.Parallel()
testServerPoolChangedNodes(t, true)
}
func testServerPoolChangedNodes(t *testing.T, preNeg bool) {
s := newServerPoolTest(preNeg, false)
nodes := s.setNodes(100, 200, 200, true, false)
s.setNodes(100, 20, 20, false, false)
s.start()
s.run()
s.checkNodes(t, nodes)
for i := 0; i < 3; i++ {
s.resetNodes()
nodes := s.setNodes(100, 200, 200, true, false)
s.setNodes(100, 20, 20, false, false)
s.run()
s.checkNodes(t, nodes)
}
s.stop()
}
func TestServerPoolRestartNoDiscovery(t *testing.T) {
t.Parallel()
testServerPoolRestartNoDiscovery(t, false)
}
func TestServerPoolRestartNoDiscoveryWithPreNeg(t *testing.T) {
t.Parallel()
testServerPoolRestartNoDiscovery(t, true)
}
func testServerPoolRestartNoDiscovery(t *testing.T, preNeg bool) {
s := newServerPoolTest(preNeg, false)
nodes := s.setNodes(100, 200, 200, true, false)
s.setNodes(100, 20, 20, false, false)
s.start()
s.run()
s.stop()
s.checkNodes(t, nodes)
s.input = nil
s.start()
s.run()
s.stop()
s.checkNodes(t, nodes)
}
func TestServerPoolTrustedNoDiscovery(t *testing.T) {
t.Parallel()
testServerPoolTrustedNoDiscovery(t, false)
}
func TestServerPoolTrustedNoDiscoveryWithPreNeg(t *testing.T) {
t.Parallel()
testServerPoolTrustedNoDiscovery(t, true)
}
func testServerPoolTrustedNoDiscovery(t *testing.T, preNeg bool) {
s := newServerPoolTest(preNeg, false)
trusted := s.setNodes(200, 200, 200, true, true)
s.input = nil
s.start()
s.run()
s.stop()
s.checkNodes(t, trusted)
}

@ -1,237 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"io"
"math"
"time"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/rlp"
)
const (
minResponseTime = time.Millisecond * 50
maxResponseTime = time.Second * 10
timeStatLength = 32
weightScaleFactor = 1000000
)
// ResponseTimeStats is the response time distribution of a set of answered requests,
// weighted with request value, either served by a single server or aggregated for
// multiple servers.
// It it a fixed length (timeStatLength) distribution vector with linear interpolation.
// The X axis (the time values) are not linear, they should be transformed with
// TimeToStatScale and StatScaleToTime.
type (
ResponseTimeStats struct {
stats [timeStatLength]uint64
exp uint64
}
ResponseTimeWeights [timeStatLength]float64
)
var timeStatsLogFactor = (timeStatLength - 1) / (math.Log(float64(maxResponseTime)/float64(minResponseTime)) + 1)
// TimeToStatScale converts a response time to a distribution vector index. The index
// is represented by a float64 so that linear interpolation can be applied.
func TimeToStatScale(d time.Duration) float64 {
if d < 0 {
return 0
}
r := float64(d) / float64(minResponseTime)
if r > 1 {
r = math.Log(r) + 1
}
r *= timeStatsLogFactor
if r > timeStatLength-1 {
return timeStatLength - 1
}
return r
}
// StatScaleToTime converts a distribution vector index to a response time. The index
// is represented by a float64 so that linear interpolation can be applied.
func StatScaleToTime(r float64) time.Duration {
r /= timeStatsLogFactor
if r > 1 {
r = math.Exp(r - 1)
}
return time.Duration(r * float64(minResponseTime))
}
// TimeoutWeights calculates the weight function used for calculating service value
// based on the response time distribution of the received service.
// It is based on the request timeout value of the system. It consists of a half cosine
// function starting with 1, crossing zero at timeout and reaching -1 at 2*timeout.
// After 2*timeout the weight is constant -1.
func TimeoutWeights(timeout time.Duration) (res ResponseTimeWeights) {
for i := range res {
t := StatScaleToTime(float64(i))
if t < 2*timeout {
res[i] = math.Cos(math.Pi / 2 * float64(t) / float64(timeout))
} else {
res[i] = -1
}
}
return
}
// EncodeRLP implements rlp.Encoder
func (rt *ResponseTimeStats) EncodeRLP(w io.Writer) error {
enc := struct {
Stats [timeStatLength]uint64
Exp uint64
}{rt.stats, rt.exp}
return rlp.Encode(w, &enc)
}
// DecodeRLP implements rlp.Decoder
func (rt *ResponseTimeStats) DecodeRLP(s *rlp.Stream) error {
var enc struct {
Stats [timeStatLength]uint64
Exp uint64
}
if err := s.Decode(&enc); err != nil {
return err
}
rt.stats, rt.exp = enc.Stats, enc.Exp
return nil
}
// Add adds a new response time with the given weight to the distribution.
func (rt *ResponseTimeStats) Add(respTime time.Duration, weight float64, expFactor utils.ExpirationFactor) {
rt.setExp(expFactor.Exp)
weight *= expFactor.Factor * weightScaleFactor
r := TimeToStatScale(respTime)
i := int(r)
r -= float64(i)
rt.stats[i] += uint64(weight * (1 - r))
if i < timeStatLength-1 {
rt.stats[i+1] += uint64(weight * r)
}
}
// setExp sets the power of 2 exponent of the structure, scaling base values (the vector
// itself) up or down if necessary.
func (rt *ResponseTimeStats) setExp(exp uint64) {
if exp > rt.exp {
shift := exp - rt.exp
for i, v := range rt.stats {
rt.stats[i] = v >> shift
}
rt.exp = exp
}
if exp < rt.exp {
shift := rt.exp - exp
for i, v := range rt.stats {
rt.stats[i] = v << shift
}
rt.exp = exp
}
}
// Value calculates the total service value based on the given distribution, using the
// specified weight function.
func (rt ResponseTimeStats) Value(weights ResponseTimeWeights, expFactor utils.ExpirationFactor) float64 {
var v float64
for i, s := range rt.stats {
v += float64(s) * weights[i]
}
if v < 0 {
return 0
}
return expFactor.Value(v, rt.exp) / weightScaleFactor
}
// AddStats adds the given ResponseTimeStats to the current one.
func (rt *ResponseTimeStats) AddStats(s *ResponseTimeStats) {
rt.setExp(s.exp)
for i, v := range s.stats {
rt.stats[i] += v
}
}
// SubStats subtracts the given ResponseTimeStats from the current one.
func (rt *ResponseTimeStats) SubStats(s *ResponseTimeStats) {
rt.setExp(s.exp)
for i, v := range s.stats {
if v < rt.stats[i] {
rt.stats[i] -= v
} else {
rt.stats[i] = 0
}
}
}
// Timeout suggests a timeout value based on the previous distribution. The parameter
// is the desired rate of timeouts assuming a similar distribution in the future.
// Note that the actual timeout should have a sensible minimum bound so that operating
// under ideal working conditions for a long time (for example, using a local server
// with very low response times) will not make it very hard for the system to accommodate
// longer response times in the future.
func (rt ResponseTimeStats) Timeout(failRatio float64) time.Duration {
var sum uint64
for _, v := range rt.stats {
sum += v
}
s := uint64(float64(sum) * failRatio)
i := timeStatLength - 1
for i > 0 && s >= rt.stats[i] {
s -= rt.stats[i]
i--
}
r := float64(i) + 0.5
if rt.stats[i] > 0 {
r -= float64(s) / float64(rt.stats[i])
}
if r < 0 {
r = 0
}
th := StatScaleToTime(r)
if th > maxResponseTime {
th = maxResponseTime
}
return th
}
// RtDistribution represents a distribution as a series of (X, Y) chart coordinates,
// where the X axis is the response time in seconds while the Y axis is the amount of
// service value received with a response time close to the X coordinate.
type RtDistribution [timeStatLength][2]float64
// Distribution returns a RtDistribution, optionally normalized to a sum of 1.
func (rt ResponseTimeStats) Distribution(normalized bool, expFactor utils.ExpirationFactor) (res RtDistribution) {
var mul float64
if normalized {
var sum uint64
for _, v := range rt.stats {
sum += v
}
if sum > 0 {
mul = 1 / float64(sum)
}
} else {
mul = expFactor.Value(float64(1)/weightScaleFactor, rt.exp)
}
for i, v := range rt.stats {
res[i][0] = float64(StatScaleToTime(float64(i))) / float64(time.Second)
res[i][1] = float64(v) * mul
}
return
}

@ -1,145 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"math"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/les/utils"
)
func TestTransition(t *testing.T) {
t.Parallel()
var epsilon = 0.01
var cases = []time.Duration{
time.Millisecond, minResponseTime,
time.Second, time.Second * 5, maxResponseTime,
}
for _, c := range cases {
got := StatScaleToTime(TimeToStatScale(c))
if float64(got)*(1+epsilon) < float64(c) || float64(got)*(1-epsilon) > float64(c) {
t.Fatalf("Failed to transition back")
}
}
// If the time is too large(exceeds the max response time.
got := StatScaleToTime(TimeToStatScale(2 * maxResponseTime))
if float64(got)*(1+epsilon) < float64(maxResponseTime) || float64(got)*(1-epsilon) > float64(maxResponseTime) {
t.Fatalf("Failed to transition back")
}
}
var maxResponseWeights = TimeoutWeights(maxResponseTime)
func TestValue(t *testing.T) {
t.Parallel()
noexp := utils.ExpirationFactor{Factor: 1}
for i := 0; i < 1000; i++ {
max := minResponseTime + time.Duration(rand.Int63n(int64(maxResponseTime-minResponseTime)))
min := minResponseTime + time.Duration(rand.Int63n(int64(max-minResponseTime)))
timeout := max/2 + time.Duration(rand.Int63n(int64(maxResponseTime-max/2)))
s := makeRangeStats(min, max, 1000, noexp)
value := s.Value(TimeoutWeights(timeout), noexp)
// calculate the average weight (the average of the given range of the half cosine
// weight function).
minx := math.Pi / 2 * float64(min) / float64(timeout)
maxx := math.Pi / 2 * float64(max) / float64(timeout)
avgWeight := (math.Sin(maxx) - math.Sin(minx)) / (maxx - minx)
expv := 1000 * avgWeight
if expv < 0 {
expv = 0
}
if value < expv-10 || value > expv+10 {
t.Errorf("Value failed (expected %v, got %v)", expv, value)
}
}
}
func TestAddSubExpire(t *testing.T) {
t.Parallel()
var (
sum1, sum2 ResponseTimeStats
sum1ValueExp, sum2ValueExp float64
logOffset utils.Fixed64
)
for i := 0; i < 1000; i++ {
exp := utils.ExpFactor(logOffset)
max := minResponseTime + time.Duration(rand.Int63n(int64(maxResponseTime-minResponseTime)))
min := minResponseTime + time.Duration(rand.Int63n(int64(max-minResponseTime)))
s := makeRangeStats(min, max, 1000, exp)
value := s.Value(maxResponseWeights, exp)
sum1.AddStats(&s)
sum1ValueExp += value
if rand.Intn(2) == 1 {
sum2.AddStats(&s)
sum2ValueExp += value
}
logOffset += utils.Float64ToFixed64(0.001 / math.Log(2))
sum1ValueExp -= sum1ValueExp * 0.001
sum2ValueExp -= sum2ValueExp * 0.001
}
exp := utils.ExpFactor(logOffset)
sum1Value := sum1.Value(maxResponseWeights, exp)
if sum1Value < sum1ValueExp*0.99 || sum1Value > sum1ValueExp*1.01 {
t.Errorf("sum1Value failed (expected %v, got %v)", sum1ValueExp, sum1Value)
}
sum2Value := sum2.Value(maxResponseWeights, exp)
if sum2Value < sum2ValueExp*0.99 || sum2Value > sum2ValueExp*1.01 {
t.Errorf("sum2Value failed (expected %v, got %v)", sum2ValueExp, sum2Value)
}
diff := sum1
diff.SubStats(&sum2)
diffValue := diff.Value(maxResponseWeights, exp)
diffValueExp := sum1ValueExp - sum2ValueExp
if diffValue < diffValueExp*0.99 || diffValue > diffValueExp*1.01 {
t.Errorf("diffValue failed (expected %v, got %v)", diffValueExp, diffValue)
}
}
func TestTimeout(t *testing.T) {
t.Parallel()
testTimeoutRange(t, 0, time.Second)
testTimeoutRange(t, time.Second, time.Second*2)
testTimeoutRange(t, time.Second, maxResponseTime)
}
func testTimeoutRange(t *testing.T, min, max time.Duration) {
s := makeRangeStats(min, max, 1000, utils.ExpirationFactor{Factor: 1})
for i := 2; i < 9; i++ {
to := s.Timeout(float64(i) / 10)
exp := max - (max-min)*time.Duration(i)/10
tol := (max - min) / 50
if to < exp-tol || to > exp+tol {
t.Errorf("Timeout failed (expected %v, got %v)", exp, to)
}
}
}
func makeRangeStats(min, max time.Duration, amount float64, exp utils.ExpirationFactor) ResponseTimeStats {
var s ResponseTimeStats
amount /= 1000
for i := 0; i < 1000; i++ {
s.Add(min+(max-min)*time.Duration(i)/999, amount, exp)
}
return s
}

@ -1,506 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"bytes"
"fmt"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
const (
vtVersion = 1 // database encoding format for ValueTracker
nvtVersion = 1 // database encoding format for NodeValueTracker
)
var (
vtKey = []byte("vt:")
vtNodeKey = []byte("vtNode:")
)
// NodeValueTracker collects service value statistics for a specific server node
type NodeValueTracker struct {
lock sync.Mutex
vt *ValueTracker
rtStats, lastRtStats ResponseTimeStats
lastTransfer mclock.AbsTime
basket serverBasket
reqCosts []uint64
reqValues []float64
}
// UpdateCosts updates the node value tracker's request cost table
func (nv *NodeValueTracker) UpdateCosts(reqCosts []uint64) {
nv.vt.lock.Lock()
defer nv.vt.lock.Unlock()
nv.updateCosts(reqCosts, nv.vt.refBasket.reqValues, nv.vt.refBasket.reqValueFactor(reqCosts))
}
// updateCosts updates the request cost table of the server. The request value factor
// is also updated based on the given cost table and the current reference basket.
// Note that the contents of the referenced reqValues slice will not change; a new
// reference is passed if the values are updated by ValueTracker.
func (nv *NodeValueTracker) updateCosts(reqCosts []uint64, reqValues []float64, rvFactor float64) {
nv.lock.Lock()
defer nv.lock.Unlock()
nv.reqCosts = reqCosts
nv.reqValues = reqValues
nv.basket.updateRvFactor(rvFactor)
}
// transferStats returns request basket and response time statistics that should be
// added to the global statistics. The contents of the server's own request basket are
// gradually transferred to the main reference basket and removed from the server basket
// with the specified transfer rate.
// The response time statistics are retained at both places and therefore the global
// distribution is always the sum of the individual server distributions.
func (nv *NodeValueTracker) transferStats(now mclock.AbsTime, transferRate float64) (requestBasket, ResponseTimeStats) {
nv.lock.Lock()
defer nv.lock.Unlock()
dt := now - nv.lastTransfer
nv.lastTransfer = now
if dt < 0 {
dt = 0
}
recentRtStats := nv.rtStats
recentRtStats.SubStats(&nv.lastRtStats)
nv.lastRtStats = nv.rtStats
return nv.basket.transfer(-math.Expm1(-transferRate * float64(dt))), recentRtStats
}
type ServedRequest struct {
ReqType, Amount uint32
}
// Served adds a served request to the node's statistics. An actual request may be composed
// of one or more request types (service vector indices).
func (nv *NodeValueTracker) Served(reqs []ServedRequest, respTime time.Duration) {
nv.vt.statsExpLock.RLock()
expFactor := nv.vt.statsExpFactor
nv.vt.statsExpLock.RUnlock()
nv.lock.Lock()
defer nv.lock.Unlock()
var value float64
for _, r := range reqs {
nv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor)
value += nv.reqValues[r.ReqType] * float64(r.Amount)
}
nv.rtStats.Add(respTime, value, expFactor)
}
// RtStats returns the node's own response time distribution statistics
func (nv *NodeValueTracker) RtStats() ResponseTimeStats {
nv.lock.Lock()
defer nv.lock.Unlock()
return nv.rtStats
}
// ValueTracker coordinates service value calculation for individual servers and updates
// global statistics
type ValueTracker struct {
clock mclock.Clock
lock sync.Mutex
quit chan chan struct{}
db ethdb.KeyValueStore
connected map[enode.ID]*NodeValueTracker
reqTypeCount int
refBasket referenceBasket
mappings [][]string
currentMapping int
initRefBasket requestBasket
rtStats ResponseTimeStats
transferRate float64
statsExpLock sync.RWMutex
statsExpRate, offlineExpRate float64
statsExpirer utils.Expirer
statsExpFactor utils.ExpirationFactor
}
type valueTrackerEncV1 struct {
Mappings [][]string
RefBasketMapping uint
RefBasket requestBasket
RtStats ResponseTimeStats
ExpOffset, SavedAt uint64
}
type nodeValueTrackerEncV1 struct {
RtStats ResponseTimeStats
ServerBasketMapping uint
ServerBasket requestBasket
}
// RequestInfo is an initializer structure for the service vector.
type RequestInfo struct {
// Name identifies the request type and is used for re-mapping the service vector if necessary
Name string
// InitAmount and InitValue are used to initialize the reference basket
InitAmount, InitValue float64
}
// NewValueTracker creates a new ValueTracker and loads its previously saved state from
// the database if possible.
func NewValueTracker(db ethdb.KeyValueStore, clock mclock.Clock, reqInfo []RequestInfo, updatePeriod time.Duration, transferRate, statsExpRate, offlineExpRate float64) *ValueTracker {
now := clock.Now()
initRefBasket := requestBasket{items: make([]basketItem, len(reqInfo))}
mapping := make([]string, len(reqInfo))
var sumAmount, sumValue float64
for _, req := range reqInfo {
sumAmount += req.InitAmount
sumValue += req.InitAmount * req.InitValue
}
scaleValues := sumAmount * basketFactor / sumValue
for i, req := range reqInfo {
mapping[i] = req.Name
initRefBasket.items[i].amount = uint64(req.InitAmount * basketFactor)
initRefBasket.items[i].value = uint64(req.InitAmount * req.InitValue * scaleValues)
}
vt := &ValueTracker{
clock: clock,
connected: make(map[enode.ID]*NodeValueTracker),
quit: make(chan chan struct{}),
db: db,
reqTypeCount: len(initRefBasket.items),
initRefBasket: initRefBasket,
transferRate: transferRate,
statsExpRate: statsExpRate,
offlineExpRate: offlineExpRate,
}
if vt.loadFromDb(mapping) != nil {
// previous state not saved or invalid, init with default values
vt.refBasket.basket = initRefBasket
vt.mappings = [][]string{mapping}
vt.currentMapping = 0
}
vt.statsExpirer.SetRate(now, statsExpRate)
vt.refBasket.init(vt.reqTypeCount)
vt.periodicUpdate()
go func() {
for {
select {
case <-clock.After(updatePeriod):
vt.lock.Lock()
vt.periodicUpdate()
vt.lock.Unlock()
case quit := <-vt.quit:
close(quit)
return
}
}
}()
return vt
}
// StatsExpirer returns the statistics expirer so that other values can be expired
// with the same rate as the service value statistics.
func (vt *ValueTracker) StatsExpirer() *utils.Expirer {
return &vt.statsExpirer
}
// StatsExpFactor returns the current expiration factor so that other values can be expired
// with the same rate as the service value statistics.
func (vt *ValueTracker) StatsExpFactor() utils.ExpirationFactor {
vt.statsExpLock.RLock()
defer vt.statsExpLock.RUnlock()
return vt.statsExpFactor
}
// loadFromDb loads the value tracker's state from the database and converts saved
// request basket index mapping if it does not match the specified index to name mapping.
func (vt *ValueTracker) loadFromDb(mapping []string) error {
enc, err := vt.db.Get(vtKey)
if err != nil {
return err
}
r := bytes.NewReader(enc)
var version uint
if err := rlp.Decode(r, &version); err != nil {
log.Error("Decoding value tracker state failed", "err", err)
return err
}
if version != vtVersion {
log.Error("Unknown ValueTracker version", "stored", version, "current", nvtVersion)
return fmt.Errorf("unknown ValueTracker version %d (current version is %d)", version, vtVersion)
}
var vte valueTrackerEncV1
if err := rlp.Decode(r, &vte); err != nil {
log.Error("Decoding value tracker state failed", "err", err)
return err
}
logOffset := utils.Fixed64(vte.ExpOffset)
dt := time.Now().UnixNano() - int64(vte.SavedAt)
if dt > 0 {
logOffset += utils.Float64ToFixed64(float64(dt) * vt.offlineExpRate / math.Log(2))
}
vt.statsExpirer.SetLogOffset(vt.clock.Now(), logOffset)
vt.rtStats = vte.RtStats
vt.mappings = vte.Mappings
vt.currentMapping = -1
loop:
for i, m := range vt.mappings {
if len(m) != len(mapping) {
continue loop
}
for j, s := range mapping {
if m[j] != s {
continue loop
}
}
vt.currentMapping = i
break
}
if vt.currentMapping == -1 {
vt.currentMapping = len(vt.mappings)
vt.mappings = append(vt.mappings, mapping)
}
if int(vte.RefBasketMapping) == vt.currentMapping {
vt.refBasket.basket = vte.RefBasket
} else {
if vte.RefBasketMapping >= uint(len(vt.mappings)) {
log.Error("Unknown request basket mapping", "stored", vte.RefBasketMapping, "current", vt.currentMapping)
return fmt.Errorf("unknown request basket mapping %d (current version is %d)", vte.RefBasketMapping, vt.currentMapping)
}
vt.refBasket.basket = vte.RefBasket.convertMapping(vt.mappings[vte.RefBasketMapping], mapping, vt.initRefBasket)
}
return nil
}
// saveToDb saves the value tracker's state to the database
func (vt *ValueTracker) saveToDb() {
vte := valueTrackerEncV1{
Mappings: vt.mappings,
RefBasketMapping: uint(vt.currentMapping),
RefBasket: vt.refBasket.basket,
RtStats: vt.rtStats,
ExpOffset: uint64(vt.statsExpirer.LogOffset(vt.clock.Now())),
SavedAt: uint64(time.Now().UnixNano()),
}
enc1, err := rlp.EncodeToBytes(uint(vtVersion))
if err != nil {
log.Error("Encoding value tracker state failed", "err", err)
return
}
enc2, err := rlp.EncodeToBytes(&vte)
if err != nil {
log.Error("Encoding value tracker state failed", "err", err)
return
}
if err := vt.db.Put(vtKey, append(enc1, enc2...)); err != nil {
log.Error("Saving value tracker state failed", "err", err)
}
}
// Stop saves the value tracker's state and each loaded node's individual state and
// returns after shutting the internal goroutines down.
func (vt *ValueTracker) Stop() {
quit := make(chan struct{})
vt.quit <- quit
<-quit
vt.lock.Lock()
vt.periodicUpdate()
for id, nv := range vt.connected {
vt.saveNode(id, nv)
}
vt.connected = nil
vt.saveToDb()
vt.lock.Unlock()
}
// Register adds a server node to the value tracker
func (vt *ValueTracker) Register(id enode.ID) *NodeValueTracker {
vt.lock.Lock()
defer vt.lock.Unlock()
if vt.connected == nil {
// ValueTracker has already been stopped
return nil
}
nv := vt.loadOrNewNode(id)
reqTypeCount := len(vt.refBasket.reqValues)
nv.reqCosts = make([]uint64, reqTypeCount)
nv.lastTransfer = vt.clock.Now()
nv.reqValues = vt.refBasket.reqValues
nv.basket.init(reqTypeCount)
vt.connected[id] = nv
return nv
}
// Unregister removes a server node from the value tracker
func (vt *ValueTracker) Unregister(id enode.ID) {
vt.lock.Lock()
defer vt.lock.Unlock()
if nv := vt.connected[id]; nv != nil {
vt.saveNode(id, nv)
delete(vt.connected, id)
}
}
// GetNode returns an individual server node's value tracker. If it did not exist before
// then a new node is created.
func (vt *ValueTracker) GetNode(id enode.ID) *NodeValueTracker {
vt.lock.Lock()
defer vt.lock.Unlock()
return vt.loadOrNewNode(id)
}
// loadOrNewNode returns an individual server node's value tracker. If it did not exist before
// then a new node is created.
func (vt *ValueTracker) loadOrNewNode(id enode.ID) *NodeValueTracker {
if nv, ok := vt.connected[id]; ok {
return nv
}
nv := &NodeValueTracker{vt: vt, lastTransfer: vt.clock.Now()}
enc, err := vt.db.Get(append(vtNodeKey, id[:]...))
if err != nil {
return nv
}
r := bytes.NewReader(enc)
var version uint
if err := rlp.Decode(r, &version); err != nil {
log.Error("Failed to decode node value tracker", "id", id, "err", err)
return nv
}
if version != nvtVersion {
log.Error("Unknown NodeValueTracker version", "stored", version, "current", nvtVersion)
return nv
}
var nve nodeValueTrackerEncV1
if err := rlp.Decode(r, &nve); err != nil {
log.Error("Failed to decode node value tracker", "id", id, "err", err)
return nv
}
nv.rtStats = nve.RtStats
nv.lastRtStats = nve.RtStats
if int(nve.ServerBasketMapping) == vt.currentMapping {
nv.basket.basket = nve.ServerBasket
} else {
if nve.ServerBasketMapping >= uint(len(vt.mappings)) {
log.Error("Unknown request basket mapping", "stored", nve.ServerBasketMapping, "current", vt.currentMapping)
return nv
}
nv.basket.basket = nve.ServerBasket.convertMapping(vt.mappings[nve.ServerBasketMapping], vt.mappings[vt.currentMapping], vt.initRefBasket)
}
return nv
}
// saveNode saves a server node's value tracker to the database
func (vt *ValueTracker) saveNode(id enode.ID, nv *NodeValueTracker) {
recentRtStats := nv.rtStats
recentRtStats.SubStats(&nv.lastRtStats)
vt.rtStats.AddStats(&recentRtStats)
nv.lastRtStats = nv.rtStats
nve := nodeValueTrackerEncV1{
RtStats: nv.rtStats,
ServerBasketMapping: uint(vt.currentMapping),
ServerBasket: nv.basket.basket,
}
enc1, err := rlp.EncodeToBytes(uint(nvtVersion))
if err != nil {
log.Error("Failed to encode service value information", "id", id, "err", err)
return
}
enc2, err := rlp.EncodeToBytes(&nve)
if err != nil {
log.Error("Failed to encode service value information", "id", id, "err", err)
return
}
if err := vt.db.Put(append(vtNodeKey, id[:]...), append(enc1, enc2...)); err != nil {
log.Error("Failed to save service value information", "id", id, "err", err)
}
}
// RtStats returns the global response time distribution statistics
func (vt *ValueTracker) RtStats() ResponseTimeStats {
vt.lock.Lock()
defer vt.lock.Unlock()
vt.periodicUpdate()
return vt.rtStats
}
// periodicUpdate transfers individual node data to the global statistics, normalizes
// the reference basket and updates request values. The global state is also saved to
// the database with each update.
func (vt *ValueTracker) periodicUpdate() {
now := vt.clock.Now()
vt.statsExpLock.Lock()
vt.statsExpFactor = utils.ExpFactor(vt.statsExpirer.LogOffset(now))
vt.statsExpLock.Unlock()
for _, nv := range vt.connected {
basket, rtStats := nv.transferStats(now, vt.transferRate)
vt.refBasket.add(basket)
vt.rtStats.AddStats(&rtStats)
}
vt.refBasket.normalize()
vt.refBasket.updateReqValues()
for _, nv := range vt.connected {
nv.updateCosts(nv.reqCosts, vt.refBasket.reqValues, vt.refBasket.reqValueFactor(nv.reqCosts))
}
vt.saveToDb()
}
type RequestStatsItem struct {
Name string
ReqAmount, ReqValue float64
}
// RequestStats returns the current contents of the reference request basket, with
// request values meaning average per request rather than total.
func (vt *ValueTracker) RequestStats() []RequestStatsItem {
vt.statsExpLock.RLock()
expFactor := vt.statsExpFactor
vt.statsExpLock.RUnlock()
vt.lock.Lock()
defer vt.lock.Unlock()
vt.periodicUpdate()
res := make([]RequestStatsItem, len(vt.refBasket.basket.items))
for i, item := range vt.refBasket.basket.items {
res[i].Name = vt.mappings[vt.currentMapping][i]
res[i].ReqAmount = expFactor.Value(float64(item.amount)/basketFactor, vt.refBasket.basket.exp)
res[i].ReqValue = vt.refBasket.reqValues[i]
}
return res
}

@ -1,137 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"math"
"math/rand"
"strconv"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/les/utils"
)
const (
testReqTypes = 3
testNodeCount = 5
testReqCount = 10000
testRounds = 10
)
func TestValueTracker(t *testing.T) {
t.Parallel()
db := memorydb.New()
clock := &mclock.Simulated{}
requestList := make([]RequestInfo, testReqTypes)
relPrices := make([]float64, testReqTypes)
totalAmount := make([]uint64, testReqTypes)
for i := range requestList {
requestList[i] = RequestInfo{Name: "testreq" + strconv.Itoa(i), InitAmount: 1, InitValue: 1}
totalAmount[i] = 1
relPrices[i] = rand.Float64() + 0.1
}
nodes := make([]*NodeValueTracker, testNodeCount)
for round := 0; round < testRounds; round++ {
makeRequests := round < testRounds-2
useExpiration := round == testRounds-1
var expRate float64
if useExpiration {
expRate = math.Log(2) / float64(time.Hour*100)
}
vt := NewValueTracker(db, clock, requestList, time.Minute, 1/float64(time.Hour), expRate, expRate)
updateCosts := func(i int) {
costList := make([]uint64, testReqTypes)
baseCost := rand.Float64()*10000000 + 100000
for j := range costList {
costList[j] = uint64(baseCost * relPrices[j])
}
nodes[i].UpdateCosts(costList)
}
for i := range nodes {
nodes[i] = vt.Register(enode.ID{byte(i)})
updateCosts(i)
}
if makeRequests {
for i := 0; i < testReqCount; i++ {
reqType := rand.Intn(testReqTypes)
reqAmount := rand.Intn(10) + 1
node := rand.Intn(testNodeCount)
respTime := time.Duration((rand.Float64() + 1) * float64(time.Second) * float64(node+1) / testNodeCount)
totalAmount[reqType] += uint64(reqAmount)
nodes[node].Served([]ServedRequest{{uint32(reqType), uint32(reqAmount)}}, respTime)
clock.Run(time.Second)
}
} else {
clock.Run(time.Hour * 100)
if useExpiration {
for i, a := range totalAmount {
totalAmount[i] = a / 2
}
}
}
vt.Stop()
var sumrp, sumrv float64
for i, rp := range relPrices {
sumrp += rp
sumrv += vt.refBasket.reqValues[i]
}
for i, rp := range relPrices {
ratio := vt.refBasket.reqValues[i] * sumrp / (rp * sumrv)
if ratio < 0.99 || ratio > 1.01 {
t.Errorf("reqValues (%v) does not match relPrices (%v)", vt.refBasket.reqValues, relPrices)
break
}
}
exp := utils.ExpFactor(vt.StatsExpirer().LogOffset(clock.Now()))
basketAmount := make([]uint64, testReqTypes)
for i, bi := range vt.refBasket.basket.items {
basketAmount[i] += uint64(exp.Value(float64(bi.amount), vt.refBasket.basket.exp))
}
if makeRequests {
// if we did not make requests in this round then we expect all amounts to be
// in the reference basket
for _, node := range nodes {
for i, bi := range node.basket.basket.items {
basketAmount[i] += uint64(exp.Value(float64(bi.amount), node.basket.basket.exp))
}
}
}
for i, a := range basketAmount {
amount := a / basketFactor
if amount+10 < totalAmount[i] || amount > totalAmount[i]+10 {
t.Errorf("totalAmount[%d] mismatch in round %d (expected %d, got %d)", i, round, totalAmount[i], amount)
}
}
var sumValue float64
for _, node := range nodes {
s := node.RtStats()
sumValue += s.Value(maxResponseWeights, exp)
}
s := vt.RtStats()
mainValue := s.Value(maxResponseWeights, exp)
if sumValue < mainValue-10 || sumValue > mainValue+10 {
t.Errorf("Main rtStats value does not match sum of node rtStats values in round %d (main %v, sum %v)", round, mainValue, sumValue)
}
}
}

@ -1,127 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"sync"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
// WrsIterator returns nodes from the specified selectable set with a weighted random
// selection. Selection weights are provided by a callback function.
type WrsIterator struct {
lock sync.Mutex
cond *sync.Cond
ns *nodestate.NodeStateMachine
wrs *utils.WeightedRandomSelect
nextNode *enode.Node
closed bool
}
// NewWrsIterator creates a new WrsIterator. Nodes are selectable if they have all the required
// and none of the disabled flags set. When a node is selected the selectedFlag is set which also
// disables further selectability until it is removed or times out.
func NewWrsIterator(ns *nodestate.NodeStateMachine, requireFlags, disableFlags nodestate.Flags, weightField nodestate.Field) *WrsIterator {
wfn := func(i interface{}) uint64 {
n := ns.GetNode(i.(enode.ID))
if n == nil {
return 0
}
wt, _ := ns.GetField(n, weightField).(uint64)
return wt
}
w := &WrsIterator{
ns: ns,
wrs: utils.NewWeightedRandomSelect(wfn),
}
w.cond = sync.NewCond(&w.lock)
ns.SubscribeField(weightField, func(n *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
if state.HasAll(requireFlags) && state.HasNone(disableFlags) {
w.lock.Lock()
w.wrs.Update(n.ID())
w.lock.Unlock()
w.cond.Signal()
}
})
ns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState nodestate.Flags) {
oldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags)
newMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags)
if newMatch == oldMatch {
return
}
w.lock.Lock()
if newMatch {
w.wrs.Update(n.ID())
} else {
w.wrs.Remove(n.ID())
}
w.lock.Unlock()
w.cond.Signal()
})
return w
}
// Next selects the next node.
func (w *WrsIterator) Next() bool {
w.nextNode = w.chooseNode()
return w.nextNode != nil
}
func (w *WrsIterator) chooseNode() *enode.Node {
w.lock.Lock()
defer w.lock.Unlock()
for {
for !w.closed && w.wrs.IsEmpty() {
w.cond.Wait()
}
if w.closed {
return nil
}
// Choose the next node at random. Even though w.wrs is guaranteed
// non-empty here, Choose might return nil if all items have weight
// zero.
if c := w.wrs.Choose(); c != nil {
id := c.(enode.ID)
w.wrs.Remove(id)
return w.ns.GetNode(id)
}
}
}
// Close ends the iterator.
func (w *WrsIterator) Close() {
w.lock.Lock()
w.closed = true
w.lock.Unlock()
w.cond.Signal()
}
// Node returns the current node.
func (w *WrsIterator) Node() *enode.Node {
w.lock.Lock()
defer w.lock.Unlock()
return w.nextNode
}

@ -1,105 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
var (
testSetup = &nodestate.Setup{}
sfTest1 = testSetup.NewFlag("test1")
sfTest2 = testSetup.NewFlag("test2")
sfTest3 = testSetup.NewFlag("test3")
sfTest4 = testSetup.NewFlag("test4")
sfiTestWeight = testSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0)))
)
const iterTestNodeCount = 6
func TestWrsIterator(t *testing.T) {
t.Parallel()
ns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)
w := NewWrsIterator(ns, sfTest2, sfTest3.Or(sfTest4), sfiTestWeight)
ns.Start()
for i := 1; i <= iterTestNodeCount; i++ {
ns.SetState(testNode(i), sfTest1, nodestate.Flags{}, 0)
ns.SetField(testNode(i), sfiTestWeight, uint64(1))
}
next := func() int {
ch := make(chan struct{})
go func() {
w.Next()
close(ch)
}()
select {
case <-ch:
case <-time.After(time.Second * 5):
t.Fatalf("Iterator.Next() timeout")
}
node := w.Node()
ns.SetState(node, sfTest4, nodestate.Flags{}, 0)
return testNodeIndex(node.ID())
}
set := make(map[int]bool)
expset := func() {
for len(set) > 0 {
n := next()
if !set[n] {
t.Errorf("Item returned by iterator not in the expected set (got %d)", n)
}
delete(set, n)
}
}
ns.SetState(testNode(1), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(2), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(3), sfTest2, nodestate.Flags{}, 0)
set[1] = true
set[2] = true
set[3] = true
expset()
ns.SetState(testNode(4), sfTest2, nodestate.Flags{}, 0)
ns.SetState(testNode(5), sfTest2.Or(sfTest3), nodestate.Flags{}, 0)
ns.SetState(testNode(6), sfTest2, nodestate.Flags{}, 0)
set[4] = true
set[6] = true
expset()
ns.SetField(testNode(2), sfiTestWeight, uint64(0))
ns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)
ns.SetState(testNode(2), nodestate.Flags{}, sfTest4, 0)
ns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)
set[1] = true
set[3] = true
expset()
ns.SetField(testNode(2), sfiTestWeight, uint64(1))
ns.SetState(testNode(2), nodestate.Flags{}, sfTest2, 0)
ns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)
ns.SetState(testNode(2), sfTest2, sfTest4, 0)
ns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)
set[1] = true
set[2] = true
set[3] = true
expset()
ns.Stop()
}

@ -1,180 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vflux
import (
"errors"
"math"
"math/big"
"github.com/ethereum/go-ethereum/rlp"
)
var ErrNoReply = errors.New("no reply for given request")
const (
MaxRequestLength = 16 // max number of individual requests in a batch
CapacityQueryName = "cq"
CapacityQueryMaxLen = 16
)
type (
// Request describes a single vflux request inside a batch. Service and request
// type are identified by strings, parameters are RLP encoded.
Request struct {
Service, Name string
Params []byte
}
// Requests are a batch of vflux requests
Requests []Request
// Replies are the replies to a batch of requests
Replies [][]byte
// CapacityQueryReq is the encoding format of the capacity query
CapacityQueryReq struct {
Bias uint64 // seconds
AddTokens []IntOrInf
}
// CapacityQueryReply is the encoding format of the response to the capacity query
CapacityQueryReply []uint64
)
// Add encodes and adds a new request to the batch
func (r *Requests) Add(service, name string, val interface{}) (int, error) {
enc, err := rlp.EncodeToBytes(val)
if err != nil {
return -1, err
}
*r = append(*r, Request{
Service: service,
Name: name,
Params: enc,
})
return len(*r) - 1, nil
}
// Get decodes the reply to the i-th request in the batch
func (r Replies) Get(i int, val interface{}) error {
if i < 0 || i >= len(r) {
return ErrNoReply
}
return rlp.DecodeBytes(r[i], val)
}
const (
IntNonNegative = iota
IntNegative
IntPlusInf
IntMinusInf
)
// IntOrInf is the encoding format for arbitrary length signed integers that can also
// hold the values of +Inf or -Inf
type IntOrInf struct {
Type uint8
Value big.Int
}
// BigInt returns the value as a big.Int or panics if the value is infinity
func (i *IntOrInf) BigInt() *big.Int {
switch i.Type {
case IntNonNegative:
return new(big.Int).Set(&i.Value)
case IntNegative:
return new(big.Int).Neg(&i.Value)
case IntPlusInf:
panic(nil) // caller should check Inf() before trying to convert to big.Int
case IntMinusInf:
panic(nil)
}
return &big.Int{} // invalid type decodes to 0 value
}
// Inf returns 1 if the value is +Inf, -1 if it is -Inf, 0 otherwise
func (i *IntOrInf) Inf() int {
switch i.Type {
case IntPlusInf:
return 1
case IntMinusInf:
return -1
}
return 0 // invalid type decodes to 0 value
}
// Int64 limits the value between MinInt64 and MaxInt64 (even if it is +-Inf) and returns an int64 type
func (i *IntOrInf) Int64() int64 {
switch i.Type {
case IntNonNegative:
if i.Value.IsInt64() {
return i.Value.Int64()
} else {
return math.MaxInt64
}
case IntNegative:
if i.Value.IsInt64() {
return -i.Value.Int64()
} else {
return math.MinInt64
}
case IntPlusInf:
return math.MaxInt64
case IntMinusInf:
return math.MinInt64
}
return 0 // invalid type decodes to 0 value
}
// SetBigInt sets the value to the given big.Int
func (i *IntOrInf) SetBigInt(v *big.Int) {
if v.Sign() >= 0 {
i.Type = IntNonNegative
i.Value.Set(v)
} else {
i.Type = IntNegative
i.Value.Neg(v)
}
}
// SetInt64 sets the value to the given int64. Note that MaxInt64 translates to +Inf
// while MinInt64 translates to -Inf.
func (i *IntOrInf) SetInt64(v int64) {
if v >= 0 {
if v == math.MaxInt64 {
i.Type = IntPlusInf
} else {
i.Type = IntNonNegative
i.Value.SetInt64(v)
}
} else {
if v == math.MinInt64 {
i.Type = IntMinusInf
} else {
i.Type = IntNegative
i.Value.SetInt64(-v)
}
}
}
// SetInf sets the value to +Inf or -Inf
func (i *IntOrInf) SetInf(sign int) {
if sign == 1 {
i.Type = IntPlusInf
} else {
i.Type = IntMinusInf
}
}

@ -1,693 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"errors"
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
var errBalanceOverflow = errors.New("balance overflow")
const maxBalance = math.MaxInt64 // maximum allowed balance value
const (
balanceCallbackUpdate = iota // called when priority drops below the last minimum estimate
balanceCallbackZero // called when priority drops to zero (positive balance exhausted)
balanceCallbackCount // total number of balance callbacks
)
// PriceFactors determine the pricing policy (may apply either to positive or
// negative balances which may have different factors).
// - TimeFactor is cost unit per nanosecond of connection time
// - CapacityFactor is cost unit per nanosecond of connection time per 1000000 capacity
// - RequestFactor is cost unit per request "realCost" unit
type PriceFactors struct {
TimeFactor, CapacityFactor, RequestFactor float64
}
// connectionPrice returns the price of connection per nanosecond at the given capacity
// and the estimated average request cost.
func (p PriceFactors) connectionPrice(cap uint64, avgReqCost float64) float64 {
return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 + p.RequestFactor*avgReqCost
}
type (
// nodePriority interface provides current and estimated future priorities on demand
nodePriority interface {
// priority should return the current priority of the node (higher is better)
priority(cap uint64) int64
// estimatePriority should return a lower estimate for the minimum of the node priority
// value starting from the current moment until the given time. If the priority goes
// under the returned estimate before the specified moment then it is the caller's
// responsibility to signal with updateFlag.
estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64
}
// ReadOnlyBalance provides read-only operations on the node balance
ReadOnlyBalance interface {
nodePriority
GetBalance() (uint64, uint64)
GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue)
GetPriceFactors() (posFactor, negFactor PriceFactors)
}
// ConnectedBalance provides operations permitted on connected nodes (non-read-only
// operations are not permitted inside a BalanceOperation)
ConnectedBalance interface {
ReadOnlyBalance
SetPriceFactors(posFactor, negFactor PriceFactors)
RequestServed(cost uint64) uint64
}
// AtomicBalanceOperator provides operations permitted in an atomic BalanceOperation
AtomicBalanceOperator interface {
ReadOnlyBalance
AddBalance(amount int64) (uint64, uint64, error)
SetBalance(pos, neg uint64) error
}
)
// nodeBalance keeps track of the positive and negative balances of a connected
// client and calculates actual and projected future priority values.
// Implements nodePriority interface.
type nodeBalance struct {
bt *balanceTracker
lock sync.RWMutex
node *enode.Node
connAddress string
active, hasPriority, setFlags bool
capacity uint64
balance balance
posFactor, negFactor PriceFactors
sumReqCost uint64
lastUpdate, nextUpdate, initTime mclock.AbsTime
updateEvent mclock.Timer
// since only a limited and fixed number of callbacks are needed, they are
// stored in a fixed size array ordered by priority threshold.
callbacks [balanceCallbackCount]balanceCallback
// callbackIndex maps balanceCallback constants to callbacks array indexes (-1 if not active)
callbackIndex [balanceCallbackCount]int
callbackCount int // number of active callbacks
}
// balance represents a pair of positive and negative balances
type balance struct {
pos, neg utils.ExpiredValue
posExp, negExp utils.ValueExpirer
}
// posValue returns the value of positive balance at a given timestamp.
func (b balance) posValue(now mclock.AbsTime) uint64 {
return b.pos.Value(b.posExp.LogOffset(now))
}
// negValue returns the value of negative balance at a given timestamp.
func (b balance) negValue(now mclock.AbsTime) uint64 {
return b.neg.Value(b.negExp.LogOffset(now))
}
// addValue adds the value of a given amount to the balance. The original value and
// updated value will also be returned if the addition is successful.
// Returns the error if the given value is too large and the value overflows.
func (b *balance) addValue(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) {
var (
val utils.ExpiredValue
offset utils.Fixed64
)
if pos {
offset, val = b.posExp.LogOffset(now), b.pos
} else {
offset, val = b.negExp.LogOffset(now), b.neg
}
old := val.Value(offset)
if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) {
if !force {
return old, 0, 0, errBalanceOverflow
}
val = utils.ExpiredValue{}
amount = maxBalance
}
net := val.Add(amount, offset)
if pos {
b.pos = val
} else {
b.neg = val
}
return old, val.Value(offset), net, nil
}
// setValue sets the internal balance amount to the given values. Returns the
// error if the given value is too large.
func (b *balance) setValue(now mclock.AbsTime, pos uint64, neg uint64) error {
if pos > maxBalance || neg > maxBalance {
return errBalanceOverflow
}
var pb, nb utils.ExpiredValue
pb.Add(int64(pos), b.posExp.LogOffset(now))
nb.Add(int64(neg), b.negExp.LogOffset(now))
b.pos = pb
b.neg = nb
return nil
}
// balanceCallback represents a single callback that is activated when client priority
// reaches the given threshold
type balanceCallback struct {
id int
threshold int64
callback func()
}
// GetBalance returns the current positive and negative balance.
func (n *nodeBalance) GetBalance() (uint64, uint64) {
n.lock.Lock()
defer n.lock.Unlock()
now := n.bt.clock.Now()
n.updateBalance(now)
return n.balance.posValue(now), n.balance.negValue(now)
}
// GetRawBalance returns the current positive and negative balance
// but in the raw(expired value) format.
func (n *nodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) {
n.lock.Lock()
defer n.lock.Unlock()
now := n.bt.clock.Now()
n.updateBalance(now)
return n.balance.pos, n.balance.neg
}
// AddBalance adds the given amount to the positive balance and returns the balance
// before and after the operation. Exceeding maxBalance results in an error (balance is
// unchanged) while adding a negative amount higher than the current balance results in
// zero balance.
// Note: this function should run inside a NodeStateMachine operation
func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) {
var (
err error
old, new uint64
now = n.bt.clock.Now()
callbacks []func()
setPriority bool
)
// Operation with holding the lock
n.bt.updateTotalBalance(n, func() bool {
n.updateBalance(now)
if old, new, _, err = n.balance.addValue(now, amount, true, false); err != nil {
return false
}
callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus()
n.storeBalance(true, false)
return true
})
if err != nil {
return old, old, err
}
// Operation without holding the lock
for _, cb := range callbacks {
cb()
}
if n.setFlags {
if setPriority {
n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0)
}
// Note: priority flag is automatically removed by the zero priority callback if necessary
n.signalPriorityUpdate()
}
return old, new, nil
}
// SetBalance sets the positive and negative balance to the given values
// Note: this function should run inside a NodeStateMachine operation
func (n *nodeBalance) SetBalance(pos, neg uint64) error {
var (
now = n.bt.clock.Now()
callbacks []func()
setPriority bool
)
// Operation with holding the lock
n.bt.updateTotalBalance(n, func() bool {
n.updateBalance(now)
if err := n.balance.setValue(now, pos, neg); err != nil {
return false
}
callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus()
n.storeBalance(true, true)
return true
})
// Operation without holding the lock
for _, cb := range callbacks {
cb()
}
if n.setFlags {
if setPriority {
n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0)
}
// Note: priority flag is automatically removed by the zero priority callback if necessary
n.signalPriorityUpdate()
}
return nil
}
// RequestServed should be called after serving a request for the given peer
func (n *nodeBalance) RequestServed(cost uint64) (newBalance uint64) {
n.lock.Lock()
var (
check bool
fcost = float64(cost)
now = n.bt.clock.Now()
)
n.updateBalance(now)
if !n.balance.pos.IsZero() {
posCost := -int64(fcost * n.posFactor.RequestFactor)
if posCost == 0 {
fcost = 0
newBalance = n.balance.posValue(now)
} else {
var net int64
_, newBalance, net, _ = n.balance.addValue(now, posCost, true, false)
if posCost == net {
fcost = 0
} else {
fcost *= 1 - float64(net)/float64(posCost)
}
check = true
}
}
if fcost > 0 && n.negFactor.RequestFactor != 0 {
n.balance.addValue(now, int64(fcost*n.negFactor.RequestFactor), false, false)
check = true
}
n.sumReqCost += cost
var callbacks []func()
if check {
callbacks = n.checkCallbacks(now)
}
n.lock.Unlock()
if callbacks != nil {
n.bt.ns.Operation(func() {
for _, cb := range callbacks {
cb()
}
})
}
return
}
// priority returns the actual priority based on the current balance
func (n *nodeBalance) priority(capacity uint64) int64 {
n.lock.Lock()
defer n.lock.Unlock()
now := n.bt.clock.Now()
n.updateBalance(now)
return n.balanceToPriority(now, n.balance, capacity)
}
// EstMinPriority gives a lower estimate for the priority at a given time in the future.
// An average request cost per time is assumed that is twice the average cost per time
// in the current session.
// If update is true then a priority callback is added that turns updateFlag on and off
// in case the priority goes below the estimated minimum.
func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 {
n.lock.Lock()
defer n.lock.Unlock()
now := n.bt.clock.Now()
n.updateBalance(now)
b := n.balance // copy the balance
if addBalance != 0 {
b.addValue(now, addBalance, true, true)
}
if future > 0 {
var avgReqCost float64
dt := time.Duration(n.lastUpdate - n.initTime)
if dt > time.Second {
avgReqCost = float64(n.sumReqCost) * 2 / float64(dt)
}
b = n.reducedBalance(b, now, future, capacity, avgReqCost)
}
if bias > 0 {
b = n.reducedBalance(b, now.Add(future), bias, capacity, 0)
}
pri := n.balanceToPriority(now, b, capacity)
// Ensure that biased estimates are always lower than actual priorities, even if
// the bias is very small.
// This ensures that two nodes will not ping-pong update signals forever if both of
// them have zero estimated priority drop in the projected future.
current := n.balanceToPriority(now, n.balance, capacity)
if pri >= current {
pri = current - 1
}
if update {
n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate)
}
return pri
}
// SetPriceFactors sets the price factors. TimeFactor is the price of a nanosecond of
// connection while RequestFactor is the price of a request cost unit.
func (n *nodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) {
n.lock.Lock()
now := n.bt.clock.Now()
n.updateBalance(now)
n.posFactor, n.negFactor = posFactor, negFactor
callbacks := n.checkCallbacks(now)
n.lock.Unlock()
if callbacks != nil {
n.bt.ns.Operation(func() {
for _, cb := range callbacks {
cb()
}
})
}
}
// GetPriceFactors returns the price factors
func (n *nodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) {
n.lock.Lock()
defer n.lock.Unlock()
return n.posFactor, n.negFactor
}
// activate starts time/capacity cost deduction.
func (n *nodeBalance) activate() {
n.bt.updateTotalBalance(n, func() bool {
if n.active {
return false
}
n.active = true
n.lastUpdate = n.bt.clock.Now()
return true
})
}
// deactivate stops time/capacity cost deduction and saves the balances in the database
func (n *nodeBalance) deactivate() {
n.bt.updateTotalBalance(n, func() bool {
if !n.active {
return false
}
n.updateBalance(n.bt.clock.Now())
if n.updateEvent != nil {
n.updateEvent.Stop()
n.updateEvent = nil
}
n.storeBalance(true, true)
n.active = false
return true
})
}
// updateBalance updates balance based on the time factor
func (n *nodeBalance) updateBalance(now mclock.AbsTime) {
if n.active && now > n.lastUpdate {
n.balance = n.reducedBalance(n.balance, n.lastUpdate, time.Duration(now-n.lastUpdate), n.capacity, 0)
n.lastUpdate = now
}
}
// storeBalance stores the positive and/or negative balance of the node in the database
func (n *nodeBalance) storeBalance(pos, neg bool) {
if pos {
n.bt.storeBalance(n.node.ID().Bytes(), false, n.balance.pos)
}
if neg {
n.bt.storeBalance([]byte(n.connAddress), true, n.balance.neg)
}
}
// addCallback sets up a one-time callback to be called when priority reaches
// the threshold. If it has already reached the threshold the callback is called
// immediately.
// Note: should be called while n.lock is held
// Note 2: the callback function runs inside a NodeStateMachine operation
func (n *nodeBalance) addCallback(id int, threshold int64, callback func()) {
n.removeCallback(id)
idx := 0
for idx < n.callbackCount && threshold > n.callbacks[idx].threshold {
idx++
}
for i := n.callbackCount - 1; i >= idx; i-- {
n.callbackIndex[n.callbacks[i].id]++
n.callbacks[i+1] = n.callbacks[i]
}
n.callbackCount++
n.callbackIndex[id] = idx
n.callbacks[idx] = balanceCallback{id, threshold, callback}
now := n.bt.clock.Now()
n.updateBalance(now)
n.scheduleCheck(now)
}
// removeCallback removes the given callback and returns true if it was active
// Note: should be called while n.lock is held
func (n *nodeBalance) removeCallback(id int) bool {
idx := n.callbackIndex[id]
if idx == -1 {
return false
}
n.callbackIndex[id] = -1
for i := idx; i < n.callbackCount-1; i++ {
n.callbackIndex[n.callbacks[i+1].id]--
n.callbacks[i] = n.callbacks[i+1]
}
n.callbackCount--
return true
}
// checkCallbacks checks whether the threshold of any of the active callbacks
// have been reached and returns triggered callbacks.
// Note: checkCallbacks assumes that the balance has been recently updated.
func (n *nodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) {
if n.callbackCount == 0 || n.capacity == 0 {
return
}
pri := n.balanceToPriority(now, n.balance, n.capacity)
for n.callbackCount != 0 && n.callbacks[n.callbackCount-1].threshold >= pri {
n.callbackCount--
n.callbackIndex[n.callbacks[n.callbackCount].id] = -1
callbacks = append(callbacks, n.callbacks[n.callbackCount].callback)
}
n.scheduleCheck(now)
return
}
// scheduleCheck sets up or updates a scheduled event to ensure that it will be called
// again just after the next threshold has been reached.
func (n *nodeBalance) scheduleCheck(now mclock.AbsTime) {
if n.callbackCount != 0 {
d, ok := n.timeUntil(n.callbacks[n.callbackCount-1].threshold)
if !ok {
n.nextUpdate = 0
n.updateAfter(0)
return
}
if n.nextUpdate == 0 || n.nextUpdate > now.Add(d) {
if d > time.Second {
// Note: if the scheduled update is not in the very near future then we
// schedule the update a bit earlier. This way we do need to update a few
// extra times but don't need to reschedule every time a processed request
// brings the expected firing time a little bit closer.
d = ((d - time.Second) * 7 / 8) + time.Second
}
n.nextUpdate = now.Add(d)
n.updateAfter(d)
}
} else {
n.nextUpdate = 0
n.updateAfter(0)
}
}
// updateAfter schedules a balance update and callback check in the future
func (n *nodeBalance) updateAfter(dt time.Duration) {
if n.updateEvent == nil || n.updateEvent.Stop() {
if dt == 0 {
n.updateEvent = nil
} else {
n.updateEvent = n.bt.clock.AfterFunc(dt, func() {
var callbacks []func()
n.lock.Lock()
if n.callbackCount != 0 {
now := n.bt.clock.Now()
n.updateBalance(now)
callbacks = n.checkCallbacks(now)
}
n.lock.Unlock()
if callbacks != nil {
n.bt.ns.Operation(func() {
for _, cb := range callbacks {
cb()
}
})
}
})
}
}
}
// balanceExhausted should be called when the positive balance is exhausted (priority goes to zero/negative)
// Note: this function should run inside a NodeStateMachine operation
func (n *nodeBalance) balanceExhausted() {
n.lock.Lock()
n.storeBalance(true, false)
n.hasPriority = false
n.lock.Unlock()
if n.setFlags {
n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.priorityFlag, 0)
}
}
// checkPriorityStatus checks whether the node has gained priority status and sets the priority
// callback and flag if necessary. It assumes that the balance has been recently updated.
// Note that the priority flag has to be set by the caller after the mutex has been released.
func (n *nodeBalance) checkPriorityStatus() bool {
if !n.hasPriority && !n.balance.pos.IsZero() {
n.hasPriority = true
n.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() })
return true
}
return false
}
// signalPriorityUpdate signals that the priority fell below the previous minimum estimate
// Note: this function should run inside a NodeStateMachine operation
func (n *nodeBalance) signalPriorityUpdate() {
n.bt.ns.SetStateSub(n.node, n.bt.setup.updateFlag, nodestate.Flags{}, 0)
n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.updateFlag, 0)
}
// setCapacity updates the capacity value used for priority calculation
// Note: capacity should never be zero
// Note 2: this function should run inside a NodeStateMachine operation
func (n *nodeBalance) setCapacity(capacity uint64) {
n.lock.Lock()
now := n.bt.clock.Now()
n.updateBalance(now)
n.capacity = capacity
callbacks := n.checkCallbacks(now)
n.lock.Unlock()
for _, cb := range callbacks {
cb()
}
}
// balanceToPriority converts a balance to a priority value. Lower priority means
// first to disconnect. Positive balance translates to positive priority. If positive
// balance is zero then negative balance translates to a negative priority.
func (n *nodeBalance) balanceToPriority(now mclock.AbsTime, b balance, capacity uint64) int64 {
pos := b.posValue(now)
if pos > 0 {
return int64(pos / capacity)
}
return -int64(b.negValue(now))
}
// priorityToBalance converts a target priority to a requested balance value.
// If the priority is negative, then minimal negative balance is returned;
// otherwise the minimal positive balance is returned.
func (n *nodeBalance) priorityToBalance(priority int64, capacity uint64) (uint64, uint64) {
if priority > 0 {
return uint64(priority) * n.capacity, 0
}
return 0, uint64(-priority)
}
// reducedBalance estimates the reduced balance at a given time in the future based
// on the given balance, the time factor and an estimated average request cost per time ratio
func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance {
// since the costs are applied continuously during the dt time period we calculate
// the expiration offset at the middle of the period
var (
at = start.Add(dt / 2)
dtf = float64(dt)
)
if !b.pos.IsZero() {
factor := n.posFactor.connectionPrice(capacity, avgReqCost)
diff := -int64(dtf * factor)
_, _, net, _ := b.addValue(at, diff, true, false)
if net == diff {
dtf = 0
} else {
dtf += float64(net) / factor
}
}
if dtf > 0 {
factor := n.negFactor.connectionPrice(capacity, avgReqCost)
b.addValue(at, int64(dtf*factor), false, false)
}
return b
}
// timeUntil calculates the remaining time needed to reach a given priority level
// assuming that no requests are processed until then. If the given level is never
// reached then (0, false) is returned. If it has already been reached then (0, true)
// is returned.
// Note: the function assumes that the balance has been recently updated and
// calculates the time starting from the last update.
func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) {
var (
now = n.bt.clock.Now()
pos = n.balance.posValue(now)
targetPos, targetNeg = n.priorityToBalance(priority, n.capacity)
diffTime float64
)
if pos > 0 {
timePrice := n.posFactor.connectionPrice(n.capacity, 0)
if timePrice < 1e-100 {
return 0, false
}
if targetPos > 0 {
if targetPos > pos {
return 0, true
}
diffTime = float64(pos-targetPos) / timePrice
return time.Duration(diffTime), true
} else {
diffTime = float64(pos) / timePrice
}
} else {
if targetPos > 0 {
return 0, true
}
}
neg := n.balance.negValue(now)
if targetNeg > neg {
timePrice := n.negFactor.connectionPrice(n.capacity, 0)
if timePrice < 1e-100 {
return 0, false
}
diffTime += float64(targetNeg-neg) / timePrice
}
return time.Duration(diffTime), true
}

@ -1,459 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"math"
"math/rand"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
type zeroExpirer struct{}
func (z zeroExpirer) SetRate(now mclock.AbsTime, rate float64) {}
func (z zeroExpirer) SetLogOffset(now mclock.AbsTime, logOffset utils.Fixed64) {}
func (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64 { return 0 }
type balanceTestClient struct{}
func (client balanceTestClient) FreeClientId() string { return "" }
type balanceTestSetup struct {
clock *mclock.Simulated
db ethdb.KeyValueStore
ns *nodestate.NodeStateMachine
setup *serverSetup
bt *balanceTracker
}
func newBalanceTestSetup(db ethdb.KeyValueStore, posExp, negExp utils.ValueExpirer) *balanceTestSetup {
// Initialize and customize the setup for the balance testing
clock := &mclock.Simulated{}
setup := newServerSetup()
setup.clientField = setup.setup.NewField("balanceTestClient", reflect.TypeOf(balanceTestClient{}))
ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup)
if posExp == nil {
posExp = zeroExpirer{}
}
if negExp == nil {
negExp = zeroExpirer{}
}
if db == nil {
db = memorydb.New()
}
bt := newBalanceTracker(ns, setup, db, clock, posExp, negExp)
ns.Start()
return &balanceTestSetup{
clock: clock,
db: db,
ns: ns,
setup: setup,
bt: bt,
}
}
func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance {
node := enode.SignNull(&enr.Record{}, enode.ID{})
b.ns.SetField(node, b.setup.clientField, balanceTestClient{})
if capacity != 0 {
b.ns.SetField(node, b.setup.capacityField, capacity)
}
n, _ := b.ns.GetField(node, b.setup.balanceField).(*nodeBalance)
return n
}
func (b *balanceTestSetup) setBalance(node *nodeBalance, pos, neg uint64) (err error) {
b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) {
err = balance.SetBalance(pos, neg)
})
return
}
func (b *balanceTestSetup) addBalance(node *nodeBalance, add int64) (old, new uint64, err error) {
b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) {
old, new, err = balance.AddBalance(add)
})
return
}
func (b *balanceTestSetup) stop() {
b.bt.stop()
b.ns.Stop()
}
func TestAddBalance(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000)
var inputs = []struct {
delta int64
expect [2]uint64
total uint64
expectErr bool
}{
{100, [2]uint64{0, 100}, 100, false},
{-100, [2]uint64{100, 0}, 0, false},
{-100, [2]uint64{0, 0}, 0, false},
{1, [2]uint64{0, 1}, 1, false},
{maxBalance, [2]uint64{0, 0}, 0, true},
}
for _, i := range inputs {
old, new, err := b.addBalance(node, i.delta)
if i.expectErr {
if err == nil {
t.Fatalf("Expect get error but nil")
}
continue
} else if err != nil {
t.Fatalf("Expect get no error but %v", err)
}
if old != i.expect[0] || new != i.expect[1] {
t.Fatalf("Positive balance mismatch, got %v -> %v", old, new)
}
if b.bt.TotalTokenAmount() != i.total {
t.Fatalf("Total positive balance mismatch, want %v, got %v", i.total, b.bt.TotalTokenAmount())
}
}
}
func TestSetBalance(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000)
var inputs = []struct {
pos, neg uint64
}{
{1000, 0},
{0, 1000},
{1000, 1000},
}
for _, i := range inputs {
b.setBalance(node, i.pos, i.neg)
pos, neg := node.GetBalance()
if pos != i.pos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos)
}
if neg != i.neg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.neg, neg)
}
}
}
func TestBalanceTimeCost(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
b.setBalance(node, uint64(time.Minute), 0) // 1 minute time allowance
var inputs = []struct {
runTime time.Duration
expPos uint64
expNeg uint64
}{
{time.Second, uint64(time.Second * 59), 0},
{0, uint64(time.Second * 59), 0},
{time.Second * 59, 0, 0},
{time.Second, 0, uint64(time.Second)},
}
for _, i := range inputs {
b.clock.Run(i.runTime)
if pos, _ := node.GetBalance(); pos != i.expPos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
}
if _, neg := node.GetBalance(); neg != i.expNeg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
}
}
b.setBalance(node, uint64(time.Minute), 0) // Refill 1 minute time allowance
for _, i := range inputs {
b.clock.Run(i.runTime)
if pos, _ := node.GetBalance(); pos != i.expPos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
}
if _, neg := node.GetBalance(); neg != i.expNeg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
}
}
}
func TestBalanceReqCost(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
b.setBalance(node, uint64(time.Minute), 0) // 1 minute time serving time allowance
var inputs = []struct {
reqCost uint64
expPos uint64
expNeg uint64
}{
{uint64(time.Second), uint64(time.Second * 59), 0},
{0, uint64(time.Second * 59), 0},
{uint64(time.Second * 59), 0, 0},
{uint64(time.Second), 0, uint64(time.Second)},
}
for _, i := range inputs {
node.RequestServed(i.reqCost)
if pos, _ := node.GetBalance(); pos != i.expPos {
t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos)
}
if _, neg := node.GetBalance(); neg != i.expNeg {
t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg)
}
}
}
func TestBalanceToPriority(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
var inputs = []struct {
pos uint64
neg uint64
priority int64
}{
{1000, 0, 1},
{2000, 0, 2}, // Higher balance, higher priority value
{0, 0, 0},
{0, 1000, -1000},
}
for _, i := range inputs {
b.setBalance(node, i.pos, i.neg)
priority := node.priority(1000)
if priority != i.priority {
t.Fatalf("priority mismatch, want %v, got %v", i.priority, priority)
}
}
}
func TestEstimatedPriority(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000000000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
b.setBalance(node, uint64(time.Minute), 0)
var inputs = []struct {
runTime time.Duration // time cost
futureTime time.Duration // diff of future time
reqCost uint64 // single request cost
priority int64 // expected estimated priority
}{
{time.Second, time.Second, 0, 58},
{0, time.Second, 0, 58},
// 2 seconds time cost, 1 second estimated time cost, 10^9 request cost,
// 10^9 estimated request cost per second.
{time.Second, time.Second, 1000000000, 55},
// 3 seconds time cost, 3 second estimated time cost, 10^9*2 request cost,
// 4*10^9 estimated request cost.
{time.Second, 3 * time.Second, 1000000000, 48},
// All positive balance is used up
{time.Second * 55, 0, 0, -1},
// 1 minute estimated time cost, 4/58 * 10^9 estimated request cost per sec.
{0, time.Minute, 0, -int64(time.Minute) - int64(time.Second)*120/29},
}
for _, i := range inputs {
b.clock.Run(i.runTime)
node.RequestServed(i.reqCost)
priority := node.estimatePriority(1000000000, 0, i.futureTime, 0, false)
if priority != i.priority {
t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority)
}
}
}
func TestPositiveBalanceCounting(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
var nodes []*nodeBalance
for i := 0; i < 100; i += 1 {
node := b.newNode(1000000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
nodes = append(nodes, node)
}
// Allocate service token
var sum uint64
for i := 0; i < 100; i += 1 {
amount := int64(rand.Intn(100) + 100)
b.addBalance(nodes[i], amount)
sum += uint64(amount)
}
if b.bt.TotalTokenAmount() != sum {
t.Fatalf("Invalid token amount")
}
// Change client status
for i := 0; i < 100; i += 1 {
if rand.Intn(2) == 0 {
b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1))
}
}
if b.bt.TotalTokenAmount() != sum {
t.Fatalf("Invalid token amount")
}
for i := 0; i < 100; i += 1 {
if rand.Intn(2) == 0 {
b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1))
}
}
if b.bt.TotalTokenAmount() != sum {
t.Fatalf("Invalid token amount")
}
}
func TestCallbackChecking(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
var inputs = []struct {
priority int64
expDiff time.Duration
}{
{500, time.Millisecond * 500},
{0, time.Second},
{-int64(time.Second), 2 * time.Second},
}
b.setBalance(node, uint64(time.Second), 0)
for _, i := range inputs {
diff, _ := node.timeUntil(i.priority)
if diff != i.expDiff {
t.Fatalf("Time difference mismatch, want %v, got %v", i.expDiff, diff)
}
}
}
func TestCallback(t *testing.T) {
t.Parallel()
b := newBalanceTestSetup(nil, nil, nil)
defer b.stop()
node := b.newNode(1000)
node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})
callCh := make(chan struct{}, 1)
b.setBalance(node, uint64(time.Minute), 0)
node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
b.clock.Run(time.Minute)
select {
case <-callCh:
case <-time.NewTimer(time.Second).C:
t.Fatalf("Callback hasn't been called yet")
}
b.setBalance(node, uint64(time.Minute), 0)
node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })
node.removeCallback(balanceCallbackZero)
b.clock.Run(time.Minute)
select {
case <-callCh:
t.Fatalf("Callback shouldn't be called")
case <-time.NewTimer(time.Millisecond * 100).C:
}
}
func TestBalancePersistence(t *testing.T) {
t.Parallel()
posExp := &utils.Expirer{}
negExp := &utils.Expirer{}
posExp.SetRate(0, math.Log(2)/float64(time.Hour*2)) // halves every two hours
negExp.SetRate(0, math.Log(2)/float64(time.Hour)) // halves every hour
setup := newBalanceTestSetup(nil, posExp, negExp)
exp := func(balance *nodeBalance, expPos, expNeg uint64) {
pos, neg := balance.GetBalance()
if pos != expPos {
t.Fatalf("Positive balance incorrect, want %v, got %v", expPos, pos)
}
if neg != expNeg {
t.Fatalf("Positive balance incorrect, want %v, got %v", expPos, pos)
}
}
expTotal := func(expTotal uint64) {
total := setup.bt.TotalTokenAmount()
if total != expTotal {
t.Fatalf("Total token amount incorrect, want %v, got %v", expTotal, total)
}
}
expTotal(0)
balance := setup.newNode(0)
expTotal(0)
setup.setBalance(balance, 16000000000, 16000000000)
exp(balance, 16000000000, 16000000000)
expTotal(16000000000)
setup.clock.Run(time.Hour * 2)
exp(balance, 8000000000, 4000000000)
expTotal(8000000000)
setup.stop()
// Test the functionalities after restart
setup = newBalanceTestSetup(setup.db, posExp, negExp)
expTotal(8000000000)
balance = setup.newNode(0)
exp(balance, 8000000000, 4000000000)
expTotal(8000000000)
setup.clock.Run(time.Hour * 2)
exp(balance, 4000000000, 1000000000)
expTotal(4000000000)
setup.stop()
}

@ -1,300 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
const (
posThreshold = 1000000 // minimum positive balance that is persisted in the database
negThreshold = 1000000 // minimum negative balance that is persisted in the database
persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence
)
// balanceTracker tracks positive and negative balances for connected nodes.
// After clientField is set externally, a nodeBalance is created and previous
// balance values are loaded from the database. Both balances are exponentially expired
// values. Costs are deducted from the positive balance if present, otherwise added to
// the negative balance. If the capacity is non-zero then a time cost is applied
// continuously while individual request costs are applied immediately.
// The two balances are translated into a single priority value that also depends
// on the actual capacity.
type balanceTracker struct {
setup *serverSetup
clock mclock.Clock
lock sync.Mutex
ns *nodestate.NodeStateMachine
ndb *nodeDB
posExp, negExp utils.ValueExpirer
posExpTC, negExpTC uint64
defaultPosFactors, defaultNegFactors PriceFactors
active, inactive utils.ExpiredValue
balanceTimer *utils.UpdateTimer
quit chan struct{}
}
// newBalanceTracker creates a new balanceTracker
func newBalanceTracker(ns *nodestate.NodeStateMachine, setup *serverSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *balanceTracker {
ndb := newNodeDB(db, clock)
bt := &balanceTracker{
ns: ns,
setup: setup,
ndb: ndb,
clock: clock,
posExp: posExp,
negExp: negExp,
balanceTimer: utils.NewUpdateTimer(clock, time.Second*10),
quit: make(chan struct{}),
}
posOffset, negOffset := bt.ndb.getExpiration()
posExp.SetLogOffset(clock.Now(), posOffset)
negExp.SetLogOffset(clock.Now(), negOffset)
// Load all persisted balance entries of priority nodes,
// calculate the total number of issued service tokens.
bt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool {
bt.inactive.AddExp(balance)
return true
})
ns.SubscribeField(bt.setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
n, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance)
if n == nil {
return
}
ov, _ := oldValue.(uint64)
nv, _ := newValue.(uint64)
if ov == 0 && nv != 0 {
n.activate()
}
if nv != 0 {
n.setCapacity(nv)
}
if ov != 0 && nv == 0 {
n.deactivate()
}
})
ns.SubscribeField(bt.setup.clientField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
type peer interface {
FreeClientId() string
}
if newValue != nil {
n := bt.newNodeBalance(node, newValue.(peer).FreeClientId(), true)
bt.lock.Lock()
n.SetPriceFactors(bt.defaultPosFactors, bt.defaultNegFactors)
bt.lock.Unlock()
ns.SetFieldSub(node, bt.setup.balanceField, n)
} else {
ns.SetStateSub(node, nodestate.Flags{}, bt.setup.priorityFlag, 0)
if b, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance); b != nil {
b.deactivate()
}
ns.SetFieldSub(node, bt.setup.balanceField, nil)
}
})
// The positive and negative balances of clients are stored in database
// and both of these decay exponentially over time. Delete them if the
// value is small enough.
bt.ndb.evictCallBack = bt.canDropBalance
go func() {
for {
select {
case <-clock.After(persistExpirationRefresh):
now := clock.Now()
bt.ndb.setExpiration(posExp.LogOffset(now), negExp.LogOffset(now))
case <-bt.quit:
return
}
}
}()
return bt
}
// Stop saves expiration offset and unsaved node balances and shuts balanceTracker down
func (bt *balanceTracker) stop() {
now := bt.clock.Now()
bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now))
close(bt.quit)
bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok {
n.lock.Lock()
n.storeBalance(true, true)
n.lock.Unlock()
bt.ns.SetField(node, bt.setup.balanceField, nil)
}
})
bt.ndb.close()
}
// TotalTokenAmount returns the current total amount of service tokens in existence
func (bt *balanceTracker) TotalTokenAmount() uint64 {
bt.lock.Lock()
defer bt.lock.Unlock()
bt.balanceTimer.Update(func(_ time.Duration) bool {
bt.active = utils.ExpiredValue{}
bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {
if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok && n.active {
pos, _ := n.GetRawBalance()
bt.active.AddExp(pos)
}
})
return true
})
total := bt.active
total.AddExp(bt.inactive)
return total.Value(bt.posExp.LogOffset(bt.clock.Now()))
}
// GetPosBalanceIDs lists node IDs with an associated positive balance
func (bt *balanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
return bt.ndb.getPosBalanceIDs(start, stop, maxCount)
}
// SetDefaultFactors sets the default price factors applied to subsequently connected clients
func (bt *balanceTracker) SetDefaultFactors(posFactors, negFactors PriceFactors) {
bt.lock.Lock()
bt.defaultPosFactors = posFactors
bt.defaultNegFactors = negFactors
bt.lock.Unlock()
}
// SetExpirationTCs sets positive and negative token expiration time constants.
// Specified in seconds, 0 means infinite (no expiration).
func (bt *balanceTracker) SetExpirationTCs(pos, neg uint64) {
bt.lock.Lock()
defer bt.lock.Unlock()
bt.posExpTC, bt.negExpTC = pos, neg
now := bt.clock.Now()
if pos > 0 {
bt.posExp.SetRate(now, 1/float64(pos*uint64(time.Second)))
} else {
bt.posExp.SetRate(now, 0)
}
if neg > 0 {
bt.negExp.SetRate(now, 1/float64(neg*uint64(time.Second)))
} else {
bt.negExp.SetRate(now, 0)
}
}
// GetExpirationTCs returns the current positive and negative token expiration
// time constants
func (bt *balanceTracker) GetExpirationTCs() (pos, neg uint64) {
bt.lock.Lock()
defer bt.lock.Unlock()
return bt.posExpTC, bt.negExpTC
}
// BalanceOperation allows atomic operations on the balance of a node regardless of whether
// it is currently connected or not
func (bt *balanceTracker) BalanceOperation(id enode.ID, connAddress string, cb func(AtomicBalanceOperator)) {
bt.ns.Operation(func() {
var nb *nodeBalance
if node := bt.ns.GetNode(id); node != nil {
nb, _ = bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance)
}
if nb == nil {
node := enode.SignNull(&enr.Record{}, id)
nb = bt.newNodeBalance(node, connAddress, false)
}
cb(nb)
})
}
// newNodeBalance loads balances from the database and creates a nodeBalance instance
// for the given node. It also sets the priorityFlag and adds balanceCallbackZero if
// the node has a positive balance.
// Note: this function should run inside a NodeStateMachine operation
func (bt *balanceTracker) newNodeBalance(node *enode.Node, connAddress string, setFlags bool) *nodeBalance {
pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false)
nb := bt.ndb.getOrNewBalance([]byte(connAddress), true)
n := &nodeBalance{
bt: bt,
node: node,
setFlags: setFlags,
connAddress: connAddress,
balance: balance{pos: pb, neg: nb, posExp: bt.posExp, negExp: bt.negExp},
initTime: bt.clock.Now(),
lastUpdate: bt.clock.Now(),
}
for i := range n.callbackIndex {
n.callbackIndex[i] = -1
}
if setFlags && n.checkPriorityStatus() {
n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0)
}
return n
}
// storeBalance stores either a positive or a negative balance in the database
func (bt *balanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) {
if bt.canDropBalance(bt.clock.Now(), neg, value) {
bt.ndb.delBalance(id, neg) // balance is small enough, drop it directly.
} else {
bt.ndb.setBalance(id, neg, value)
}
}
// canDropBalance tells whether a positive or negative balance is below the threshold
// and therefore can be dropped from the database
func (bt *balanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool {
if neg {
return b.Value(bt.negExp.LogOffset(now)) <= negThreshold
}
return b.Value(bt.posExp.LogOffset(now)) <= posThreshold
}
// updateTotalBalance adjusts the total balance after executing given callback.
func (bt *balanceTracker) updateTotalBalance(n *nodeBalance, callback func() bool) {
bt.lock.Lock()
defer bt.lock.Unlock()
n.lock.Lock()
defer n.lock.Unlock()
original, active := n.balance.pos, n.active
if !callback() {
return
}
if active {
bt.active.SubExp(original)
} else {
bt.inactive.SubExp(original)
}
if n.active {
bt.active.AddExp(n.balance.pos)
} else {
bt.inactive.AddExp(n.balance.pos)
}
}

@ -1,250 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"bytes"
"encoding/binary"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
const (
balanceCacheLimit = 8192 // the maximum number of cached items in service token balance queue
// nodeDBVersion is the version identifier of the node data in db
//
// Changelog:
// Version 0 => 1
// * Replace `lastTotal` with `meta` in positive balance: version 0=>1
//
// Version 1 => 2
// * Positive Balance and negative balance is changed:
// * Cumulative time is replaced with expiration
nodeDBVersion = 2
// dbCleanupCycle is the cycle of db for useless data cleanup
dbCleanupCycle = time.Hour
)
var (
positiveBalancePrefix = []byte("pb:") // dbVersion(uint16 big endian) + positiveBalancePrefix + id -> balance
negativeBalancePrefix = []byte("nb:") // dbVersion(uint16 big endian) + negativeBalancePrefix + ip -> balance
expirationKey = []byte("expiration:") // dbVersion(uint16 big endian) + expirationKey -> posExp, negExp
)
type nodeDB struct {
db ethdb.KeyValueStore
cache *lru.Cache[string, utils.ExpiredValue]
auxbuf []byte // 37-byte auxiliary buffer for key encoding
verbuf [2]byte // 2-byte auxiliary buffer for db version
evictCallBack func(mclock.AbsTime, bool, utils.ExpiredValue) bool // Callback to determine whether the balance can be evicted.
clock mclock.Clock
closeCh chan struct{}
cleanupHook func() // Test hook used for testing
}
func newNodeDB(db ethdb.KeyValueStore, clock mclock.Clock) *nodeDB {
ndb := &nodeDB{
db: db,
cache: lru.NewCache[string, utils.ExpiredValue](balanceCacheLimit),
auxbuf: make([]byte, 37),
clock: clock,
closeCh: make(chan struct{}),
}
binary.BigEndian.PutUint16(ndb.verbuf[:], uint16(nodeDBVersion))
go ndb.expirer()
return ndb
}
func (db *nodeDB) close() {
close(db.closeCh)
}
func (db *nodeDB) getPrefix(neg bool) []byte {
prefix := positiveBalancePrefix
if neg {
prefix = negativeBalancePrefix
}
return append(db.verbuf[:], prefix...)
}
func (db *nodeDB) key(id []byte, neg bool) []byte {
prefix := positiveBalancePrefix
if neg {
prefix = negativeBalancePrefix
}
if len(prefix)+len(db.verbuf)+len(id) > len(db.auxbuf) {
db.auxbuf = append(db.auxbuf, make([]byte, len(prefix)+len(db.verbuf)+len(id)-len(db.auxbuf))...)
}
copy(db.auxbuf[:len(db.verbuf)], db.verbuf[:])
copy(db.auxbuf[len(db.verbuf):len(db.verbuf)+len(prefix)], prefix)
copy(db.auxbuf[len(prefix)+len(db.verbuf):len(prefix)+len(db.verbuf)+len(id)], id)
return db.auxbuf[:len(prefix)+len(db.verbuf)+len(id)]
}
func (db *nodeDB) getExpiration() (utils.Fixed64, utils.Fixed64) {
blob, err := db.db.Get(append(db.verbuf[:], expirationKey...))
if err != nil || len(blob) != 16 {
return 0, 0
}
return utils.Fixed64(binary.BigEndian.Uint64(blob[:8])), utils.Fixed64(binary.BigEndian.Uint64(blob[8:16]))
}
func (db *nodeDB) setExpiration(pos, neg utils.Fixed64) {
var buff [16]byte
binary.BigEndian.PutUint64(buff[:8], uint64(pos))
binary.BigEndian.PutUint64(buff[8:16], uint64(neg))
db.db.Put(append(db.verbuf[:], expirationKey...), buff[:16])
}
func (db *nodeDB) getOrNewBalance(id []byte, neg bool) utils.ExpiredValue {
key := db.key(id, neg)
item, exist := db.cache.Get(string(key))
if exist {
return item
}
var b utils.ExpiredValue
enc, err := db.db.Get(key)
if err != nil || len(enc) == 0 {
return b
}
if err := rlp.DecodeBytes(enc, &b); err != nil {
log.Crit("Failed to decode positive balance", "err", err)
}
db.cache.Add(string(key), b)
return b
}
func (db *nodeDB) setBalance(id []byte, neg bool, b utils.ExpiredValue) {
key := db.key(id, neg)
enc, err := rlp.EncodeToBytes(&(b))
if err != nil {
log.Crit("Failed to encode positive balance", "err", err)
}
db.db.Put(key, enc)
db.cache.Add(string(key), b)
}
func (db *nodeDB) delBalance(id []byte, neg bool) {
key := db.key(id, neg)
db.db.Delete(key)
db.cache.Remove(string(key))
}
// getPosBalanceIDs returns a lexicographically ordered list of IDs of accounts
// with a positive balance
func (db *nodeDB) getPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {
if maxCount <= 0 {
return
}
prefix := db.getPrefix(false)
keylen := len(prefix) + len(enode.ID{})
it := db.db.NewIterator(prefix, start.Bytes())
defer it.Release()
for it.Next() {
var id enode.ID
if len(it.Key()) != keylen {
return
}
copy(id[:], it.Key()[keylen-len(id):])
if bytes.Compare(id.Bytes(), stop.Bytes()) >= 0 {
return
}
result = append(result, id)
if len(result) == maxCount {
return
}
}
return
}
// forEachBalance iterates all balances and passes values to callback.
func (db *nodeDB) forEachBalance(neg bool, callback func(id enode.ID, balance utils.ExpiredValue) bool) {
prefix := db.getPrefix(neg)
keylen := len(prefix) + len(enode.ID{})
it := db.db.NewIterator(prefix, nil)
defer it.Release()
for it.Next() {
var id enode.ID
if len(it.Key()) != keylen {
return
}
copy(id[:], it.Key()[keylen-len(id):])
var b utils.ExpiredValue
if err := rlp.DecodeBytes(it.Value(), &b); err != nil {
continue
}
if !callback(id, b) {
return
}
}
}
func (db *nodeDB) expirer() {
for {
select {
case <-db.clock.After(dbCleanupCycle):
db.expireNodes()
case <-db.closeCh:
return
}
}
}
// expireNodes iterates the whole node db and checks whether the
// token balances can be deleted.
func (db *nodeDB) expireNodes() {
var (
visited int
deleted int
start = time.Now()
)
for _, neg := range []bool{false, true} {
iter := db.db.NewIterator(db.getPrefix(neg), nil)
for iter.Next() {
visited++
var balance utils.ExpiredValue
if err := rlp.DecodeBytes(iter.Value(), &balance); err != nil {
log.Crit("Failed to decode negative balance", "err", err)
}
if db.evictCallBack != nil && db.evictCallBack(db.clock.Now(), neg, balance) {
deleted++
db.db.Delete(iter.Key())
}
}
}
// Invoke testing hook if it's not nil.
if db.cleanupHook != nil {
db.cleanupHook()
}
log.Debug("Expire nodes", "visited", visited, "deleted", deleted, "elapsed", common.PrettyDuration(time.Since(start)))
}

@ -1,148 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/p2p/enode"
)
func expval(v uint64) utils.ExpiredValue {
return utils.ExpiredValue{Base: v}
}
func TestNodeDB(t *testing.T) {
t.Parallel()
ndb := newNodeDB(rawdb.NewMemoryDatabase(), mclock.System{})
defer ndb.close()
var cases = []struct {
id enode.ID
ip string
balance utils.ExpiredValue
positive bool
}{
{enode.ID{0x00, 0x01, 0x02}, "", expval(100), true},
{enode.ID{0x00, 0x01, 0x02}, "", expval(200), true},
{enode.ID{}, "127.0.0.1", expval(100), false},
{enode.ID{}, "127.0.0.1", expval(200), false},
}
for _, c := range cases {
if c.positive {
ndb.setBalance(c.id.Bytes(), false, c.balance)
if pb := ndb.getOrNewBalance(c.id.Bytes(), false); !reflect.DeepEqual(pb, c.balance) {
t.Fatalf("Positive balance mismatch, want %v, got %v", c.balance, pb)
}
} else {
ndb.setBalance([]byte(c.ip), true, c.balance)
if nb := ndb.getOrNewBalance([]byte(c.ip), true); !reflect.DeepEqual(nb, c.balance) {
t.Fatalf("Negative balance mismatch, want %v, got %v", c.balance, nb)
}
}
}
for _, c := range cases {
if c.positive {
ndb.delBalance(c.id.Bytes(), false)
if pb := ndb.getOrNewBalance(c.id.Bytes(), false); !reflect.DeepEqual(pb, utils.ExpiredValue{}) {
t.Fatalf("Positive balance mismatch, want %v, got %v", utils.ExpiredValue{}, pb)
}
} else {
ndb.delBalance([]byte(c.ip), true)
if nb := ndb.getOrNewBalance([]byte(c.ip), true); !reflect.DeepEqual(nb, utils.ExpiredValue{}) {
t.Fatalf("Negative balance mismatch, want %v, got %v", utils.ExpiredValue{}, nb)
}
}
}
posExp, negExp := utils.Fixed64(1000), utils.Fixed64(2000)
ndb.setExpiration(posExp, negExp)
if pos, neg := ndb.getExpiration(); pos != posExp || neg != negExp {
t.Fatalf("Expiration mismatch, want %v / %v, got %v / %v", posExp, negExp, pos, neg)
}
/* curBalance := currencyBalance{typ: "ETH", amount: 10000}
ndb.setCurrencyBalance(enode.ID{0x01, 0x02}, curBalance)
if got := ndb.getCurrencyBalance(enode.ID{0x01, 0x02}); !reflect.DeepEqual(got, curBalance) {
t.Fatalf("Currency balance mismatch, want %v, got %v", curBalance, got)
}*/
}
func TestNodeDBExpiration(t *testing.T) {
t.Parallel()
var (
iterated int
done = make(chan struct{}, 1)
)
callback := func(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool {
iterated += 1
return true
}
clock := &mclock.Simulated{}
ndb := newNodeDB(rawdb.NewMemoryDatabase(), clock)
defer ndb.close()
ndb.evictCallBack = callback
ndb.cleanupHook = func() { done <- struct{}{} }
var cases = []struct {
id []byte
neg bool
balance utils.ExpiredValue
}{
{[]byte{0x01, 0x02}, false, expval(1)},
{[]byte{0x03, 0x04}, false, expval(1)},
{[]byte{0x05, 0x06}, false, expval(1)},
{[]byte{0x07, 0x08}, false, expval(1)},
{[]byte("127.0.0.1"), true, expval(1)},
{[]byte("127.0.0.2"), true, expval(1)},
{[]byte("127.0.0.3"), true, expval(1)},
{[]byte("127.0.0.4"), true, expval(1)},
}
for _, c := range cases {
ndb.setBalance(c.id, c.neg, c.balance)
}
clock.WaitForTimers(1)
clock.Run(time.Hour + time.Minute)
select {
case <-done:
case <-time.NewTimer(time.Second).C:
t.Fatalf("timeout")
}
if iterated != 8 {
t.Fatalf("Failed to evict useless balances, want %v, got %d", 8, iterated)
}
for _, c := range cases {
ndb.setBalance(c.id, c.neg, c.balance)
}
clock.WaitForTimers(1)
clock.Run(time.Hour + time.Minute)
select {
case <-done:
case <-time.NewTimer(time.Second).C:
t.Fatalf("timeout")
}
if iterated != 16 {
t.Fatalf("Failed to evict useless balances, want %v, got %d", 16, iterated)
}
}

@ -1,328 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"errors"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/les/vflux"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
"github.com/ethereum/go-ethereum/rlp"
)
var (
ErrNotConnected = errors.New("client not connected")
ErrNoPriority = errors.New("priority too low to raise capacity")
ErrCantFindMaximum = errors.New("unable to find maximum allowed capacity")
)
// ClientPool implements a client database that assigns a priority to each client
// based on a positive and negative balance. Positive balance is externally assigned
// to prioritized clients and is decreased with connection time and processed
// requests (unless the price factors are zero). If the positive balance is zero
// then negative balance is accumulated.
//
// Balance tracking and priority calculation for connected clients is done by
// balanceTracker. PriorityQueue ensures that clients with the lowest positive or
// highest negative balance get evicted when the total capacity allowance is full
// and new clients with a better balance want to connect.
//
// Already connected nodes receive a small bias in their favor in order to avoid
// accepting and instantly kicking out clients. In theory, we try to ensure that
// each client can have several minutes of connection time.
//
// Balances of disconnected clients are stored in nodeDB including positive balance
// and negative balance. Both positive balance and negative balance will decrease
// exponentially. If the balance is low enough, then the record will be dropped.
type ClientPool struct {
*priorityPool
*balanceTracker
setup *serverSetup
clock mclock.Clock
ns *nodestate.NodeStateMachine
synced func() bool
lock sync.RWMutex
connectedBias time.Duration
minCap uint64 // the minimal capacity value allowed for any client
capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation
}
// clientPeer represents a peer in the client pool. None of the callbacks should block.
type clientPeer interface {
Node() *enode.Node
FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address)
InactiveAllowance() time.Duration // disconnection timeout for inactive non-priority peers
UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer
Disconnect() // initiates disconnection (Unregister should always be called)
}
// NewClientPool creates a new client pool
func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool {
setup := newServerSetup()
ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup)
cp := &ClientPool{
priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4, 100),
balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}),
setup: setup,
ns: ns,
clock: clock,
minCap: minCap,
connectedBias: connectedBias,
synced: synced,
}
ns.SubscribeState(nodestate.MergeFlags(setup.activeFlag, setup.inactiveFlag, setup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
if newState.Equals(setup.inactiveFlag) {
// set timeout for non-priority inactive client
var timeout time.Duration
if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok {
timeout = c.InactiveAllowance()
}
ns.AddTimeout(node, setup.inactiveFlag, timeout)
}
if oldState.Equals(setup.inactiveFlag) && newState.Equals(setup.inactiveFlag.Or(setup.priorityFlag)) {
ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout
}
if newState.Equals(setup.activeFlag) {
// active with no priority; limit capacity to minCap
cap, _ := ns.GetField(node, setup.capacityField).(uint64)
if cap > minCap {
cp.requestCapacity(node, minCap, minCap, 0)
}
}
if newState.Equals(nodestate.Flags{}) {
if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok {
c.Disconnect()
}
}
})
ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok {
newCap, _ := newValue.(uint64)
c.UpdateCapacity(newCap, node == cp.capReqNode)
}
})
// add metrics
cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
if oldState.IsEmpty() && !newState.IsEmpty() {
clientConnectedMeter.Mark(1)
}
if !oldState.IsEmpty() && newState.IsEmpty() {
clientDisconnectedMeter.Mark(1)
}
if oldState.HasNone(cp.setup.activeFlag) && oldState.HasAll(cp.setup.activeFlag) {
clientActivatedMeter.Mark(1)
}
if oldState.HasAll(cp.setup.activeFlag) && oldState.HasNone(cp.setup.activeFlag) {
clientDeactivatedMeter.Mark(1)
}
activeCount, activeCap := cp.Active()
totalActiveCountGauge.Update(int64(activeCount))
totalActiveCapacityGauge.Update(int64(activeCap))
totalInactiveCountGauge.Update(int64(cp.Inactive()))
})
return cp
}
// Start starts the client pool. Should be called before Register/Unregister.
func (cp *ClientPool) Start() {
cp.ns.Start()
}
// Stop shuts the client pool down. The clientPeer interface callbacks will not be called
// after Stop. Register calls will return nil.
func (cp *ClientPool) Stop() {
cp.balanceTracker.stop()
cp.ns.Stop()
}
// Register registers the peer into the client pool. If the peer has insufficient
// priority and remains inactive for longer than the allowed timeout then it will be
// disconnected by calling the Disconnect function of the clientPeer interface.
func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance {
cp.ns.SetField(peer.Node(), cp.setup.clientField, peerWrapper{peer})
balance, _ := cp.ns.GetField(peer.Node(), cp.setup.balanceField).(*nodeBalance)
return balance
}
// Unregister removes the peer from the client pool
func (cp *ClientPool) Unregister(peer clientPeer) {
cp.ns.SetField(peer.Node(), cp.setup.clientField, nil)
}
// SetConnectedBias sets the connection bias, which is applied to already connected clients
// So that already connected client won't be kicked out very soon and we can ensure all
// connected clients can have enough time to request or sync some data.
func (cp *ClientPool) SetConnectedBias(bias time.Duration) {
cp.lock.Lock()
cp.connectedBias = bias
cp.setActiveBias(bias)
cp.lock.Unlock()
}
// SetCapacity sets the assigned capacity of a connected client
func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Duration, requested bool) (capacity uint64, err error) {
cp.lock.RLock()
if cp.connectedBias > bias {
bias = cp.connectedBias
}
cp.lock.RUnlock()
cp.ns.Operation(func() {
balance, _ := cp.ns.GetField(node, cp.setup.balanceField).(*nodeBalance)
if balance == nil {
err = ErrNotConnected
return
}
capacity, _ = cp.ns.GetField(node, cp.setup.capacityField).(uint64)
if capacity == 0 {
// if the client is inactive then it has insufficient priority for the minimal capacity
// (will be activated automatically with minCap when possible)
return
}
if reqCap < cp.minCap {
// can't request less than minCap; switching between 0 (inactive state) and minCap is
// performed by the server automatically as soon as necessary/possible
reqCap = cp.minCap
}
if reqCap > cp.minCap && cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) {
err = ErrNoPriority
return
}
if reqCap == capacity {
return
}
if requested {
// mark the requested node so that the UpdateCapacity callback can signal
// whether the update is the direct result of a SetCapacity call on the given node
cp.capReqNode = node
defer func() {
cp.capReqNode = nil
}()
}
var minTarget, maxTarget uint64
if reqCap > capacity {
// Estimate maximum available capacity at the current priority level and request
// the estimated amount.
// Note: requestCapacity could find the highest available capacity between the
// current and the requested capacity but it could cost a lot of iterations with
// fine step adjustment if the requested capacity is very high. By doing a quick
// estimation of the maximum available capacity based on the capacity curve we
// can limit the number of required iterations.
curve := cp.getCapacityCurve().exclude(node.ID())
maxTarget = curve.maxCapacity(func(capacity uint64) int64 {
return balance.estimatePriority(capacity, 0, 0, bias, false)
})
if maxTarget < reqCap {
return
}
maxTarget = reqCap
// Specify a narrow target range that allows a limited number of fine step
// iterations
minTarget = maxTarget - maxTarget/20
if minTarget < capacity {
minTarget = capacity
}
} else {
minTarget, maxTarget = reqCap, reqCap
}
if newCap := cp.requestCapacity(node, minTarget, maxTarget, bias); newCap >= minTarget && newCap <= maxTarget {
capacity = newCap
return
}
// we should be able to find the maximum allowed capacity in a few iterations
log.Error("Unable to find maximum allowed capacity")
err = ErrCantFindMaximum
})
return
}
// serveCapQuery serves a vflux capacity query. It receives multiple token amount values
// and a bias time value. For each given token amount it calculates the maximum achievable
// capacity in case the amount is added to the balance.
func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte {
var req vflux.CapacityQueryReq
if rlp.DecodeBytes(data, &req) != nil {
return nil
}
if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen {
return nil
}
result := make(vflux.CapacityQueryReply, len(req.AddTokens))
if !cp.synced() {
capacityQueryZeroMeter.Mark(1)
reply, _ := rlp.EncodeToBytes(&result)
return reply
}
bias := time.Second * time.Duration(req.Bias)
cp.lock.RLock()
if cp.connectedBias > bias {
bias = cp.connectedBias
}
cp.lock.RUnlock()
// use capacityCurve to answer request for multiple newly bought token amounts
curve := cp.getCapacityCurve().exclude(id)
cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) {
pb, _ := balance.GetBalance()
for i, addTokens := range req.AddTokens {
add := addTokens.Int64()
result[i] = curve.maxCapacity(func(capacity uint64) int64 {
return balance.estimatePriority(capacity, add, 0, bias, false) / int64(capacity)
})
if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap {
result[i] = cp.minCap
}
if result[i] < cp.minCap {
result[i] = 0
}
}
})
// add first result to metrics (don't care about priority client multi-queries yet)
if result[0] == 0 {
capacityQueryZeroMeter.Mark(1)
} else {
capacityQueryNonZeroMeter.Mark(1)
}
reply, _ := rlp.EncodeToBytes(&result)
return reply
}
// Handle implements Service
func (cp *ClientPool) Handle(id enode.ID, address string, name string, data []byte) []byte {
switch name {
case vflux.CapacityQueryName:
return cp.serveCapQuery(id, address, data)
default:
return nil
}
}

@ -1,640 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
const defaultConnectedBias = time.Minute * 3
func TestClientPoolL10C100Free(t *testing.T) {
t.Parallel()
testClientPool(t, 10, 100, 0, true)
}
func TestClientPoolL40C200Free(t *testing.T) {
t.Parallel()
testClientPool(t, 40, 200, 0, true)
}
func TestClientPoolL100C300Free(t *testing.T) {
t.Parallel()
testClientPool(t, 100, 300, 0, true)
}
func TestClientPoolL10C100P4(t *testing.T) {
t.Parallel()
testClientPool(t, 10, 100, 4, false)
}
func TestClientPoolL40C200P30(t *testing.T) {
t.Parallel()
testClientPool(t, 40, 200, 30, false)
}
func TestClientPoolL100C300P20(t *testing.T) {
t.Parallel()
testClientPool(t, 100, 300, 20, false)
}
const testClientPoolTicks = 100000
type poolTestPeer struct {
node *enode.Node
index int
disconnCh chan int
cap uint64
inactiveAllowed bool
}
func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {
return &poolTestPeer{
index: i,
disconnCh: disconnCh,
node: enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),
}
}
func (i *poolTestPeer) Node() *enode.Node {
return i.node
}
func (i *poolTestPeer) FreeClientId() string {
return fmt.Sprintf("addr #%d", i.index)
}
func (i *poolTestPeer) InactiveAllowance() time.Duration {
if i.inactiveAllowed {
return time.Second * 10
}
return 0
}
func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) {
i.cap = capacity
}
func (i *poolTestPeer) Disconnect() {
if i.disconnCh == nil {
return
}
id := i.node.ID()
i.disconnCh <- int(id[0]) + int(id[1])<<8
}
func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) {
pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) {
pos, neg = nb.GetBalance()
})
return
}
func addBalance(pool *ClientPool, id enode.ID, amount int64) {
pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) {
nb.AddBalance(amount)
})
}
func checkDiff(a, b uint64) bool {
maxDiff := (a + b) / 2000
if maxDiff < 1 {
maxDiff = 1
}
return a > b+maxDiff || b > a+maxDiff
}
func connect(pool *ClientPool, peer *poolTestPeer) uint64 {
pool.Register(peer)
return peer.cap
}
func disconnect(pool *ClientPool, peer *poolTestPeer) {
pool.Unregister(peer)
}
func alwaysTrueFn() bool {
return true
}
func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
connected = make([]bool, clientCount)
connTicks = make([]int, clientCount)
disconnCh = make(chan int, clientCount)
pool = NewClientPool(db, 1, 0, &clock, alwaysTrueFn)
)
pool.Start()
pool.SetExpirationTCs(0, 1000)
pool.SetLimits(uint64(activeLimit), uint64(activeLimit))
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
// pool should accept new peers up to its connected limit
for i := 0; i < activeLimit; i++ {
if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
connected[i] = true
} else {
t.Fatalf("Test peer #%d rejected", i)
}
}
// randomly connect and disconnect peers, expect to have a similar total connection time at the end
for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {
clock.Run(1 * time.Second)
if tickCounter == testClientPoolTicks/4 {
// give a positive balance to some of the peers
amount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period
for i := 0; i < paidCount; i++ {
addBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)
}
}
i := rand.Intn(clientCount)
if connected[i] {
if randomDisconnect {
disconnect(pool, newPoolTestPeer(i, disconnCh))
connected[i] = false
connTicks[i] += tickCounter
}
} else {
if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 {
connected[i] = true
connTicks[i] -= tickCounter
} else {
disconnect(pool, newPoolTestPeer(i, disconnCh))
}
}
pollDisconnects:
for {
select {
case i := <-disconnCh:
disconnect(pool, newPoolTestPeer(i, disconnCh))
if connected[i] {
connTicks[i] += tickCounter
connected[i] = false
}
default:
break pollDisconnects
}
}
}
expTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)
expMin := expTicks - expTicks/5
expMax := expTicks + expTicks/5
paidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2
paidMin := paidTicks - paidTicks/5
paidMax := paidTicks + paidTicks/5
// check if the total connected time of peers are all in the expected range
for i, c := range connected {
if c {
connTicks[i] += testClientPoolTicks
}
min, max := expMin, expMax
if i < paidCount {
// expect a higher amount for clients with a positive balance
min, max = paidMin, paidMax
}
if connTicks[i] < min || connTicks[i] > max {
t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max)
}
}
pool.Stop()
}
func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) {
if cap := connect(pool, p); cap == 0 {
if expSuccess {
t.Fatalf("Failed to connect paid client")
} else {
return
}
}
if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap {
if expSuccess {
t.Fatalf("Failed to raise capacity of paid client")
} else {
return
}
}
if !expSuccess {
t.Fatalf("Should reject high capacity paid client")
}
}
func TestConnectPaidClient(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10))
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
// Add balance for an external client and mark it as paid client
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)
}
func TestConnectPaidClientToSmallPool(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
// Add balance for an external client and mark it as paid client
addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))
// connect a fat paid client to pool, should reject it.
testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)
}
func TestConnectPaidClientToFullPool(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))
connect(pool, newPoolTestPeer(i, nil))
}
addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client
if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
t.Fatalf("Low balance paid client should be rejected")
}
clock.Run(time.Second)
addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client
if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 {
t.Fatalf("High balance paid client should be accepted")
}
}
func TestPaidClientKickedOut(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
kickedCh = make(chan int, 100)
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
pool.SetExpirationTCs(0, 0)
defer pool.Stop()
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance
connect(pool, newPoolTestPeer(i, kickedCh))
clock.Run(time.Millisecond)
}
clock.Run(defaultConnectedBias + time.Second*11)
if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 {
t.Fatalf("Free client should be accepted")
}
clock.Run(0)
select {
case id := <-kickedCh:
if id != 0 {
t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id)
}
default:
t.Fatalf("timeout")
}
}
func TestConnectFreeClient(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10))
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 {
t.Fatalf("Failed to connect free client")
}
testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)
}
func TestConnectFreeClientToFullPool(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
connect(pool, newPoolTestPeer(i, nil))
}
if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 {
t.Fatalf("New free client should be rejected")
}
clock.Run(time.Minute)
if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 {
t.Fatalf("New free client should be rejected")
}
clock.Run(time.Millisecond)
clock.Run(4 * time.Minute)
if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 {
t.Fatalf("Old client connects more than 5min should be kicked")
}
}
func TestFreeClientKickedOut(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
kicked = make(chan int, 100)
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
connect(pool, newPoolTestPeer(i, kicked))
clock.Run(time.Millisecond)
}
if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 {
t.Fatalf("New free client should be rejected")
}
clock.Run(0)
select {
case <-kicked:
default:
t.Fatalf("timeout")
}
disconnect(pool, newPoolTestPeer(10, kicked))
clock.Run(5 * time.Minute)
for i := 0; i < 10; i++ {
connect(pool, newPoolTestPeer(i+10, kicked))
}
clock.Run(0)
for i := 0; i < 10; i++ {
select {
case id := <-kicked:
if id >= 10 {
t.Fatalf("Old client should be kicked, now got: %d", id)
}
default:
t.Fatalf("timeout")
}
}
}
func TestPositiveBalanceCalculation(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
kicked = make(chan int, 10)
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))
testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)
clock.Run(time.Minute)
disconnect(pool, newPoolTestPeer(0, kicked))
pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
if checkDiff(pb, uint64(time.Minute*2)) {
t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb)
}
}
func TestDowngradePriorityClient(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
kicked = make(chan int, 10)
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})
p := newPoolTestPeer(0, kicked)
addBalance(pool, p.node.ID(), int64(time.Minute))
testPriorityConnect(t, pool, p, 10, true)
if p.cap != 10 {
t.Fatalf("The capacity of priority peer hasn't been updated, got: %d", p.cap)
}
clock.Run(time.Minute) // All positive balance should be used up.
time.Sleep(300 * time.Millisecond) // Ensure the callback is called
if p.cap != 1 {
t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
}
pb, _ := getBalance(pool, newPoolTestPeer(0, kicked))
if pb != 0 {
t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb)
}
addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))
pb, _ = getBalance(pool, newPoolTestPeer(0, kicked))
if checkDiff(pb, uint64(time.Minute)) {
t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb)
}
}
func TestNegativeBalanceCalculation(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetExpirationTCs(0, 3600)
pool.SetLimits(10, uint64(10)) // Total capacity limit is 10
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})
for i := 0; i < 10; i++ {
connect(pool, newPoolTestPeer(i, nil))
}
clock.Run(time.Second)
for i := 0; i < 10; i++ {
disconnect(pool, newPoolTestPeer(i, nil))
_, nb := getBalance(pool, newPoolTestPeer(i, nil))
if nb != 0 {
t.Fatalf("Short connection shouldn't be recorded")
}
}
for i := 0; i < 10; i++ {
connect(pool, newPoolTestPeer(i, nil))
}
clock.Run(time.Minute)
for i := 0; i < 10; i++ {
disconnect(pool, newPoolTestPeer(i, nil))
_, nb := getBalance(pool, newPoolTestPeer(i, nil))
exp := uint64(time.Minute) / 1000
exp -= exp / 120 // correct for negative balance expiration
if checkDiff(nb, exp) {
t.Fatalf("Negative balance mismatch, want %v, got %v", exp, nb)
}
}
}
func TestInactiveClient(t *testing.T) {
t.Parallel()
var (
clock mclock.Simulated
db = rawdb.NewMemoryDatabase()
)
pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn)
pool.Start()
defer pool.Stop()
pool.SetLimits(2, uint64(2))
p1 := newPoolTestPeer(1, nil)
p1.inactiveAllowed = true
p2 := newPoolTestPeer(2, nil)
p2.inactiveAllowed = true
p3 := newPoolTestPeer(3, nil)
p3.inactiveAllowed = true
addBalance(pool, p1.node.ID(), 1000*int64(time.Second))
addBalance(pool, p3.node.ID(), 2000*int64(time.Second))
// p1: 1000 p2: 0 p3: 2000
p1.cap = connect(pool, p1)
if p1.cap != 1 {
t.Fatalf("Failed to connect peer #1")
}
p2.cap = connect(pool, p2)
if p2.cap != 1 {
t.Fatalf("Failed to connect peer #2")
}
p3.cap = connect(pool, p3)
if p3.cap != 1 {
t.Fatalf("Failed to connect peer #3")
}
if p2.cap != 0 {
t.Fatalf("Failed to deactivate peer #2")
}
addBalance(pool, p2.node.ID(), 3000*int64(time.Second))
// p1: 1000 p2: 3000 p3: 2000
if p2.cap != 1 {
t.Fatalf("Failed to activate peer #2")
}
if p1.cap != 0 {
t.Fatalf("Failed to deactivate peer #1")
}
addBalance(pool, p2.node.ID(), -2500*int64(time.Second))
// p1: 1000 p2: 500 p3: 2000
if p1.cap != 1 {
t.Fatalf("Failed to activate peer #1")
}
if p2.cap != 0 {
t.Fatalf("Failed to deactivate peer #2")
}
pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})
p4 := newPoolTestPeer(4, nil)
addBalance(pool, p4.node.ID(), 1500*int64(time.Second))
// p1: 1000 p2: 500 p3: 2000 p4: 1500
p4.cap = connect(pool, p4)
if p4.cap != 1 {
t.Fatalf("Failed to activate peer #4")
}
if p1.cap != 0 {
t.Fatalf("Failed to deactivate peer #1")
}
clock.Run(time.Second * 600)
// manually trigger a check to avoid a long real-time wait
pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0)
pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0)
// p1: 1000 p2: 500 p3: 2000 p4: 900
if p1.cap != 1 {
t.Fatalf("Failed to activate peer #1")
}
if p4.cap != 0 {
t.Fatalf("Failed to deactivate peer #4")
}
disconnect(pool, p2)
disconnect(pool, p4)
addBalance(pool, p1.node.ID(), -1000*int64(time.Second))
if p1.cap != 1 {
t.Fatalf("Should not deactivate peer #1")
}
if p2.cap != 0 {
t.Fatalf("Should not activate peer #2")
}
}

@ -1,35 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"github.com/ethereum/go-ethereum/metrics"
)
var (
totalActiveCapacityGauge = metrics.NewRegisteredGauge("vflux/server/active/capacity", nil)
totalActiveCountGauge = metrics.NewRegisteredGauge("vflux/server/active/count", nil)
totalInactiveCountGauge = metrics.NewRegisteredGauge("vflux/server/inactive/count", nil)
clientConnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/connected", nil)
clientActivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/activated", nil)
clientDeactivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/deactivated", nil)
clientDisconnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/disconnected", nil)
capacityQueryZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryZero", nil)
capacityQueryNonZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryNonZero", nil)
)

@ -1,695 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
const (
lazyQueueRefresh = time.Second * 10 // refresh period of the active queue
)
// priorityPool handles a set of nodes where each node has a capacity (a scalar value)
// and a priority (which can change over time and can also depend on the capacity).
// A node is active if it has at least the necessary minimal amount of capacity while
// inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).
// The pool ensures that the number and total capacity of all active nodes are limited
// and the highest priority nodes are active at all times (limits can be changed
// during operation with immediate effect).
//
// When activating clients a priority bias is applied in favor of the already active
// nodes in order to avoid nodes quickly alternating between active and inactive states
// when their priorities are close to each other. The bias is specified in terms of
// duration (time) because priorities are expected to usually get lower over time and
// therefore a future minimum prediction (see EstMinPriority) should monotonously
// decrease with the specified time parameter.
// This time bias can be interpreted as minimum expected active time at the given
// capacity (if the threshold priority stays the same).
//
// Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is
// added to the pool by externally setting inactiveFlag. priorityPool can switch a node
// between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool
// by externally resetting both flags. activeFlag should not be set externally.
//
// The highest priority nodes in "inactive" state are moved to "active" state as soon as
// the minimum capacity can be granted for them. The capacity of lower priority active
// nodes is reduced or they are demoted to "inactive" state if their priority is
// insufficient even at minimal capacity.
type priorityPool struct {
setup *serverSetup
ns *nodestate.NodeStateMachine
clock mclock.Clock
lock sync.Mutex
maxCount, maxCap uint64
minCap uint64
activeBias time.Duration
capacityStepDiv, fineStepDiv uint64
// The snapshot of priority pool for query.
cachedCurve *capacityCurve
ccUpdatedAt mclock.AbsTime
ccUpdateForced bool
// Runtime status of prioritypool, represents the
// temporary state if tempState is not empty
tempState []*ppNodeInfo
activeCount, activeCap uint64
activeQueue *prque.LazyQueue[int64, *ppNodeInfo]
inactiveQueue *prque.Prque[int64, *ppNodeInfo]
}
// ppNodeInfo is the internal node descriptor of priorityPool
type ppNodeInfo struct {
nodePriority nodePriority
node *enode.Node
connected bool
capacity uint64 // only changed when temporary state is committed
activeIndex, inactiveIndex int
tempState bool // should only be true while the priorityPool lock is held
tempCapacity uint64 // equals capacity when tempState is false
// the following fields only affect the temporary state and they are set to their
// default value when leaving the temp state
minTarget, stepDiv uint64
bias time.Duration
}
// newPriorityPool creates a new priorityPool
func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv, fineStepDiv uint64) *priorityPool {
pp := &priorityPool{
setup: setup,
ns: ns,
clock: clock,
inactiveQueue: prque.New[int64, *ppNodeInfo](inactiveSetIndex),
minCap: minCap,
activeBias: activeBias,
capacityStepDiv: capacityStepDiv,
fineStepDiv: fineStepDiv,
}
if pp.activeBias < time.Duration(1) {
pp.activeBias = time.Duration(1)
}
pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh)
ns.SubscribeField(pp.setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
if newValue != nil {
c := &ppNodeInfo{
node: node,
nodePriority: newValue.(nodePriority),
activeIndex: -1,
inactiveIndex: -1,
}
ns.SetFieldSub(node, pp.setup.queueField, c)
ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0)
} else {
ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0)
if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil {
pp.disconnectNode(n)
}
ns.SetFieldSub(node, pp.setup.capacityField, nil)
ns.SetFieldSub(node, pp.setup.queueField, nil)
}
})
ns.SubscribeState(pp.setup.activeFlag.Or(pp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {
if c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); c != nil {
if oldState.IsEmpty() {
pp.connectNode(c)
}
if newState.IsEmpty() {
pp.disconnectNode(c)
}
}
})
ns.SubscribeState(pp.setup.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {
if !newState.IsEmpty() {
pp.updatePriority(node)
}
})
return pp
}
// requestCapacity tries to set the capacity of a connected node to the highest possible
// value inside the given target range. If maxTarget is not reachable then the capacity is
// iteratively reduced in fine steps based on the fineStepDiv parameter until minTarget is reached.
// The function returns the new capacity if successful and the original capacity otherwise.
// Note: this function should run inside a NodeStateMachine operation
func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 {
pp.lock.Lock()
pp.activeQueue.Refresh()
if minTarget < pp.minCap {
minTarget = pp.minCap
}
if maxTarget < minTarget {
maxTarget = minTarget
}
if bias < pp.activeBias {
bias = pp.activeBias
}
c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
if c == nil {
log.Error("requestCapacity called for unknown node", "id", node.ID())
pp.lock.Unlock()
return 0
}
pp.setTempState(c)
if maxTarget > c.capacity {
pp.setTempStepDiv(c, pp.fineStepDiv)
pp.setTempBias(c, bias)
}
pp.setTempCapacity(c, maxTarget)
c.minTarget = minTarget
pp.removeFromQueues(c)
pp.activeQueue.Push(c)
pp.enforceLimits()
updates := pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity)
pp.lock.Unlock()
pp.updateFlags(updates)
return c.capacity
}
// SetLimits sets the maximum number and total capacity of simultaneously active nodes
func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) {
pp.lock.Lock()
pp.activeQueue.Refresh()
inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)
dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)
pp.maxCount, pp.maxCap = maxCount, maxCap
var updates []capUpdate
if dec {
pp.enforceLimits()
updates = pp.finalizeChanges(true)
}
if inc {
updates = append(updates, pp.tryActivate(false)...)
}
pp.lock.Unlock()
pp.ns.Operation(func() { pp.updateFlags(updates) })
}
// setActiveBias sets the bias applied when trying to activate inactive nodes
func (pp *priorityPool) setActiveBias(bias time.Duration) {
pp.lock.Lock()
pp.activeBias = bias
if pp.activeBias < time.Duration(1) {
pp.activeBias = time.Duration(1)
}
updates := pp.tryActivate(false)
pp.lock.Unlock()
pp.ns.Operation(func() { pp.updateFlags(updates) })
}
// Active returns the number and total capacity of currently active nodes
func (pp *priorityPool) Active() (uint64, uint64) {
pp.lock.Lock()
defer pp.lock.Unlock()
return pp.activeCount, pp.activeCap
}
// Inactive returns the number of currently inactive nodes
func (pp *priorityPool) Inactive() int {
pp.lock.Lock()
defer pp.lock.Unlock()
return pp.inactiveQueue.Size()
}
// Limits returns the maximum allowed number and total capacity of active nodes
func (pp *priorityPool) Limits() (uint64, uint64) {
pp.lock.Lock()
defer pp.lock.Unlock()
return pp.maxCount, pp.maxCap
}
// inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue
func inactiveSetIndex(a *ppNodeInfo, index int) {
a.inactiveIndex = index
}
// activeSetIndex callback updates ppNodeInfo item index in activeQueue
func activeSetIndex(a *ppNodeInfo, index int) {
a.activeIndex = index
}
// invertPriority inverts a priority value. The active queue uses inverted priorities
// because the node on the top is the first to be deactivated.
func invertPriority(p int64) int64 {
if p == math.MinInt64 {
return math.MaxInt64
}
return -p
}
// activePriority callback returns actual priority of ppNodeInfo item in activeQueue
func activePriority(c *ppNodeInfo) int64 {
if c.bias == 0 {
return invertPriority(c.nodePriority.priority(c.tempCapacity))
} else {
return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, 0, c.bias, true))
}
}
// activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue
func (pp *priorityPool) activeMaxPriority(c *ppNodeInfo, until mclock.AbsTime) int64 {
future := time.Duration(until - pp.clock.Now())
if future < 0 {
future = 0
}
return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, future, c.bias, false))
}
// inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue
func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 {
return p.nodePriority.priority(pp.minCap)
}
// removeFromQueues removes the node from the active/inactive queues
func (pp *priorityPool) removeFromQueues(c *ppNodeInfo) {
if c.activeIndex >= 0 {
pp.activeQueue.Remove(c.activeIndex)
}
if c.inactiveIndex >= 0 {
pp.inactiveQueue.Remove(c.inactiveIndex)
}
}
// connectNode is called when a new node has been added to the pool (inactiveFlag set)
// Note: this function should run inside a NodeStateMachine operation
func (pp *priorityPool) connectNode(c *ppNodeInfo) {
pp.lock.Lock()
pp.activeQueue.Refresh()
if c.connected {
pp.lock.Unlock()
return
}
c.connected = true
pp.inactiveQueue.Push(c, pp.inactivePriority(c))
updates := pp.tryActivate(false)
pp.lock.Unlock()
pp.updateFlags(updates)
}
// disconnectNode is called when a node has been removed from the pool (both inactiveFlag
// and activeFlag reset)
// Note: this function should run inside a NodeStateMachine operation
func (pp *priorityPool) disconnectNode(c *ppNodeInfo) {
pp.lock.Lock()
pp.activeQueue.Refresh()
if !c.connected {
pp.lock.Unlock()
return
}
c.connected = false
pp.removeFromQueues(c)
var updates []capUpdate
if c.capacity != 0 {
pp.setTempState(c)
pp.setTempCapacity(c, 0)
updates = pp.tryActivate(true)
}
pp.lock.Unlock()
pp.updateFlags(updates)
}
// setTempState internally puts a node in a temporary state that can either be reverted
// or confirmed later. This temporary state allows changing the capacity of a node and
// moving it between the active and inactive queue. activeFlag/inactiveFlag and
// capacityField are not changed while the changes are still temporary.
func (pp *priorityPool) setTempState(c *ppNodeInfo) {
if c.tempState {
return
}
c.tempState = true
if c.tempCapacity != c.capacity { // should never happen
log.Error("tempCapacity != capacity when entering tempState")
}
// Assign all the defaults to the temp state.
c.minTarget = pp.minCap
c.stepDiv = pp.capacityStepDiv
c.bias = 0
pp.tempState = append(pp.tempState, c)
}
// unsetTempState revokes the temp status of the node and reset all internal
// fields to the default value.
func (pp *priorityPool) unsetTempState(c *ppNodeInfo) {
if !c.tempState {
return
}
c.tempState = false
if c.tempCapacity != c.capacity { // should never happen
log.Error("tempCapacity != capacity when leaving tempState")
}
c.minTarget = pp.minCap
c.stepDiv = pp.capacityStepDiv
c.bias = 0
}
// setTempCapacity changes the capacity of a node in the temporary state and adjusts
// activeCap and activeCount accordingly. Since this change is performed in the temporary
// state it should be called after setTempState and before finalizeChanges.
func (pp *priorityPool) setTempCapacity(c *ppNodeInfo, cap uint64) {
if !c.tempState { // should never happen
log.Error("Node is not in temporary state")
return
}
pp.activeCap += cap - c.tempCapacity
if c.tempCapacity == 0 {
pp.activeCount++
}
if cap == 0 {
pp.activeCount--
}
c.tempCapacity = cap
}
// setTempBias changes the connection bias of a node in the temporary state.
func (pp *priorityPool) setTempBias(c *ppNodeInfo, bias time.Duration) {
if !c.tempState { // should never happen
log.Error("Node is not in temporary state")
return
}
c.bias = bias
}
// setTempStepDiv changes the capacity divisor of a node in the temporary state.
func (pp *priorityPool) setTempStepDiv(c *ppNodeInfo, stepDiv uint64) {
if !c.tempState { // should never happen
log.Error("Node is not in temporary state")
return
}
c.stepDiv = stepDiv
}
// enforceLimits enforces active node count and total capacity limits. It returns the
// lowest active node priority. Note that this function is performed on the temporary
// internal state.
func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) {
if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {
return nil, math.MinInt64
}
var (
lastNode *ppNodeInfo
maxActivePriority int64
)
pp.activeQueue.MultiPop(func(c *ppNodeInfo, priority int64) bool {
lastNode = c
pp.setTempState(c)
maxActivePriority = priority
if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount {
pp.setTempCapacity(c, 0)
} else {
sub := c.tempCapacity / c.stepDiv
if sub == 0 {
sub = 1
}
if c.tempCapacity-sub < c.minTarget {
sub = c.tempCapacity - c.minTarget
}
pp.setTempCapacity(c, c.tempCapacity-sub)
pp.activeQueue.Push(c)
}
return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount
})
return lastNode, invertPriority(maxActivePriority)
}
// finalizeChanges either commits or reverts temporary changes. The necessary capacity
// field and according flag updates are not performed here but returned in a list because
// they should be performed while the mutex is not held.
func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) {
for _, c := range pp.tempState {
// always remove and push back in order to update biased priority
pp.removeFromQueues(c)
oldCapacity := c.capacity
if commit {
c.capacity = c.tempCapacity
} else {
pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap
}
pp.unsetTempState(c)
if c.connected {
if c.capacity != 0 {
pp.activeQueue.Push(c)
} else {
pp.inactiveQueue.Push(c, pp.inactivePriority(c))
}
if c.capacity != oldCapacity {
updates = append(updates, capUpdate{c.node, oldCapacity, c.capacity})
}
}
}
pp.tempState = nil
if commit {
pp.ccUpdateForced = true
}
return
}
// capUpdate describes a capacityField and activeFlag/inactiveFlag update
type capUpdate struct {
node *enode.Node
oldCap, newCap uint64
}
// updateFlags performs capacityField and activeFlag/inactiveFlag updates while the
// pool mutex is not held
// Note: this function should run inside a NodeStateMachine operation
func (pp *priorityPool) updateFlags(updates []capUpdate) {
for _, f := range updates {
if f.oldCap == 0 {
pp.ns.SetStateSub(f.node, pp.setup.activeFlag, pp.setup.inactiveFlag, 0)
}
if f.newCap == 0 {
pp.ns.SetStateSub(f.node, pp.setup.inactiveFlag, pp.setup.activeFlag, 0)
pp.ns.SetFieldSub(f.node, pp.setup.capacityField, nil)
} else {
pp.ns.SetFieldSub(f.node, pp.setup.capacityField, f.newCap)
}
}
}
// tryActivate tries to activate inactive nodes if possible
func (pp *priorityPool) tryActivate(commit bool) []capUpdate {
for pp.inactiveQueue.Size() > 0 {
c := pp.inactiveQueue.PopItem()
pp.setTempState(c)
pp.setTempBias(c, pp.activeBias)
pp.setTempCapacity(c, pp.minCap)
pp.activeQueue.Push(c)
pp.enforceLimits()
if c.tempCapacity > 0 {
commit = true
pp.setTempBias(c, 0)
} else {
break
}
}
pp.ccUpdateForced = true
return pp.finalizeChanges(commit)
}
// updatePriority gets the current priority value of the given node from the nodePriority
// interface and performs the necessary changes. It is triggered by updateFlag.
// Note: this function should run inside a NodeStateMachine operation
func (pp *priorityPool) updatePriority(node *enode.Node) {
pp.lock.Lock()
pp.activeQueue.Refresh()
c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo)
if c == nil || !c.connected {
pp.lock.Unlock()
return
}
pp.removeFromQueues(c)
if c.capacity != 0 {
pp.activeQueue.Push(c)
} else {
pp.inactiveQueue.Push(c, pp.inactivePriority(c))
}
updates := pp.tryActivate(false)
pp.lock.Unlock()
pp.updateFlags(updates)
}
// capacityCurve is a snapshot of the priority pool contents in a format that can efficiently
// estimate how much capacity could be granted to a given node at a given priority level.
type capacityCurve struct {
points []curvePoint // curve points sorted in descending order of priority
index map[enode.ID][]int // curve point indexes belonging to each node
excludeList []int // curve point indexes of excluded node
excludeFirst bool // true if activeCount == maxCount
}
type curvePoint struct {
freeCap uint64 // available capacity and node count at the current priority level
nextPri int64 // next priority level where more capacity will be available
}
// getCapacityCurve returns a new or recently cached capacityCurve based on the contents of the pool
func (pp *priorityPool) getCapacityCurve() *capacityCurve {
pp.lock.Lock()
defer pp.lock.Unlock()
now := pp.clock.Now()
dt := time.Duration(now - pp.ccUpdatedAt)
if !pp.ccUpdateForced && pp.cachedCurve != nil && dt < time.Second*10 {
return pp.cachedCurve
}
pp.ccUpdateForced = false
pp.ccUpdatedAt = now
curve := &capacityCurve{
index: make(map[enode.ID][]int),
}
pp.cachedCurve = curve
var excludeID enode.ID
excludeFirst := pp.maxCount == pp.activeCount
// reduce node capacities or remove nodes until nothing is left in the queue;
// record the available capacity and the necessary priority after each step
lastPri := int64(math.MinInt64)
for pp.activeCap > 0 {
cp := curvePoint{}
if pp.activeCap > pp.maxCap {
log.Error("Active capacity is greater than allowed maximum", "active", pp.activeCap, "maximum", pp.maxCap)
} else {
cp.freeCap = pp.maxCap - pp.activeCap
}
// temporarily increase activeCap to enforce reducing or removing a node capacity
tempCap := cp.freeCap + 1
pp.activeCap += tempCap
var next *ppNodeInfo
// enforceLimits removes the lowest priority node if it has minimal capacity,
// otherwise reduces its capacity
next, cp.nextPri = pp.enforceLimits()
if cp.nextPri < lastPri {
// enforce monotonicity which may be broken by continuously changing priorities
cp.nextPri = lastPri
} else {
lastPri = cp.nextPri
}
pp.activeCap -= tempCap
if next == nil {
log.Error("getCapacityCurve: cannot remove next element from the priority queue")
break
}
id := next.node.ID()
if excludeFirst {
// if the node count limit is already reached then mark the node with the
// lowest priority for exclusion
curve.excludeFirst = true
excludeID = id
excludeFirst = false
}
// multiple curve points and therefore multiple indexes may belong to a node
// if it was removed in multiple steps (if its capacity was more than the minimum)
curve.index[id] = append(curve.index[id], len(curve.points))
curve.points = append(curve.points, cp)
}
// restore original state of the queue
pp.finalizeChanges(false)
curve.points = append(curve.points, curvePoint{
freeCap: pp.maxCap,
nextPri: math.MaxInt64,
})
if curve.excludeFirst {
curve.excludeList = curve.index[excludeID]
}
return curve
}
// exclude returns a capacityCurve with the given node excluded from the original curve
func (cc *capacityCurve) exclude(id enode.ID) *capacityCurve {
if excludeList, ok := cc.index[id]; ok {
// return a new version of the curve (only one excluded node can be selected)
// Note: if the first node was excluded by default (excludeFirst == true) then
// we can forget about that and exclude the node with the given id instead.
return &capacityCurve{
points: cc.points,
index: cc.index,
excludeList: excludeList,
}
}
return cc
}
func (cc *capacityCurve) getPoint(i int) curvePoint {
cp := cc.points[i]
if i == 0 && cc.excludeFirst {
cp.freeCap = 0
return cp
}
for ii := len(cc.excludeList) - 1; ii >= 0; ii-- {
ei := cc.excludeList[ii]
if ei < i {
break
}
e1, e2 := cc.points[ei], cc.points[ei+1]
cp.freeCap += e2.freeCap - e1.freeCap
}
return cp
}
// maxCapacity calculates the maximum capacity available for a node with a given
// (monotonically decreasing) priority vs. capacity function. Note that if the requesting
// node is already in the pool then it should be excluded from the curve in order to get
// the correct result.
func (cc *capacityCurve) maxCapacity(priority func(cap uint64) int64) uint64 {
min, max := 0, len(cc.points)-1 // the curve always has at least one point
for min < max {
mid := (min + max) / 2
cp := cc.getPoint(mid)
if cp.freeCap == 0 || priority(cp.freeCap) > cp.nextPri {
min = mid + 1
} else {
max = mid
}
}
cp2 := cc.getPoint(min)
if cp2.freeCap == 0 || min == 0 {
return cp2.freeCap
}
cp1 := cc.getPoint(min - 1)
if priority(cp2.freeCap) > cp1.nextPri {
return cp2.freeCap
}
minc, maxc := cp1.freeCap, cp2.freeCap-1
for minc < maxc {
midc := (minc + maxc + 1) / 2
if midc == 0 || priority(midc) > cp1.nextPri {
minc = midc
} else {
maxc = midc - 1
}
}
return maxc
}

@ -1,237 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"math/rand"
"reflect"
"testing"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
const (
testCapacityStepDiv = 100
testCapacityToleranceDiv = 10
testMinCap = 100
)
type ppTestClient struct {
node *enode.Node
balance, cap uint64
}
func (c *ppTestClient) priority(cap uint64) int64 {
return int64(c.balance / cap)
}
func (c *ppTestClient) estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 {
return int64(c.balance / cap)
}
func TestPriorityPool(t *testing.T) {
t.Parallel()
clock := &mclock.Simulated{}
setup := newServerSetup()
setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{}))
ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup)
ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {
if n := ns.GetField(node, setup.balanceField); n != nil {
c := n.(*ppTestClient)
c.cap = newValue.(uint64)
}
})
pp := newPriorityPool(ns, setup, clock, testMinCap, 0, testCapacityStepDiv, testCapacityStepDiv)
ns.Start()
pp.SetLimits(100, 1000000)
clients := make([]*ppTestClient, 100)
raise := func(c *ppTestClient) {
for {
var ok bool
ns.Operation(func() {
newCap := c.cap + c.cap/testCapacityStepDiv
ok = pp.requestCapacity(c.node, newCap, newCap, 0) == newCap
})
if !ok {
return
}
}
}
var sumBalance uint64
check := func(c *ppTestClient) {
expCap := 1000000 * c.balance / sumBalance
capTol := expCap / testCapacityToleranceDiv
if c.cap < expCap-capTol || c.cap > expCap+capTol {
t.Errorf("Wrong node capacity (expected %d, got %d)", expCap, c.cap)
}
}
for i := range clients {
c := &ppTestClient{
node: enode.SignNull(&enr.Record{}, enode.ID{byte(i)}),
balance: 100000000000,
cap: 1000,
}
sumBalance += c.balance
clients[i] = c
ns.SetField(c.node, setup.balanceField, c)
ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0)
raise(c)
check(c)
}
for count := 0; count < 100; count++ {
c := clients[rand.Intn(len(clients))]
oldBalance := c.balance
c.balance = uint64(rand.Int63n(100000000000) + 100000000000)
sumBalance += c.balance - oldBalance
pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0)
pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0)
if c.balance > oldBalance {
raise(c)
} else {
for _, c := range clients {
raise(c)
}
}
// check whether capacities are proportional to balances
for _, c := range clients {
check(c)
}
if count%10 == 0 {
// test available capacity calculation with capacity curve
c = clients[rand.Intn(len(clients))]
curve := pp.getCapacityCurve().exclude(c.node.ID())
add := uint64(rand.Int63n(10000000000000))
c.balance += add
sumBalance += add
expCap := curve.maxCapacity(func(cap uint64) int64 {
return int64(c.balance / cap)
})
var ok bool
expFail := expCap + 10
if expFail < testMinCap {
expFail = testMinCap
}
ns.Operation(func() {
ok = pp.requestCapacity(c.node, expFail, expFail, 0) == expFail
})
if ok {
t.Errorf("Request for more than expected available capacity succeeded")
}
if expCap >= testMinCap {
ns.Operation(func() {
ok = pp.requestCapacity(c.node, expCap, expCap, 0) == expCap
})
if !ok {
t.Errorf("Request for expected available capacity failed")
}
}
c.balance -= add
sumBalance -= add
pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0)
pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0)
for _, c := range clients {
raise(c)
}
}
}
ns.Stop()
}
func TestCapacityCurve(t *testing.T) {
t.Parallel()
clock := &mclock.Simulated{}
setup := newServerSetup()
setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{}))
ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup)
pp := newPriorityPool(ns, setup, clock, 400000, 0, 2, 2)
ns.Start()
pp.SetLimits(10, 10000000)
clients := make([]*ppTestClient, 10)
for i := range clients {
c := &ppTestClient{
node: enode.SignNull(&enr.Record{}, enode.ID{byte(i)}),
balance: 100000000000 * uint64(i+1),
cap: 1000000,
}
clients[i] = c
ns.SetField(c.node, setup.balanceField, c)
ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0)
ns.Operation(func() {
pp.requestCapacity(c.node, c.cap, c.cap, 0)
})
}
curve := pp.getCapacityCurve()
check := func(balance, expCap uint64) {
cap := curve.maxCapacity(func(cap uint64) int64 {
return int64(balance / cap)
})
var fail bool
if cap == 0 || expCap == 0 {
fail = cap != expCap
} else {
pri := balance / cap
expPri := balance / expCap
fail = pri != expPri && pri != expPri+1
}
if fail {
t.Errorf("Incorrect capacity for %d balance (got %d, expected %d)", balance, cap, expCap)
}
}
check(0, 0)
check(10000000000, 100000)
check(50000000000, 500000)
check(100000000000, 1000000)
check(200000000000, 1000000)
check(300000000000, 1500000)
check(450000000000, 1500000)
check(600000000000, 2000000)
check(800000000000, 2000000)
check(1000000000000, 2500000)
pp.SetLimits(11, 10000000)
curve = pp.getCapacityCurve()
check(0, 0)
check(10000000000, 100000)
check(50000000000, 500000)
check(150000000000, 750000)
check(200000000000, 1000000)
check(220000000000, 1100000)
check(275000000000, 1100000)
check(375000000000, 1500000)
check(450000000000, 1500000)
check(600000000000, 2000000)
check(800000000000, 2000000)
check(1000000000000, 2500000)
ns.Stop()
}

@ -1,120 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"net"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/les/utils"
"github.com/ethereum/go-ethereum/les/vflux"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
type (
// Server serves vflux requests
Server struct {
limiter *utils.Limiter
lock sync.Mutex
services map[string]*serviceEntry
delayPerRequest time.Duration
}
// Service is a service registered at the Server and identified by a string id
Service interface {
Handle(id enode.ID, address string, name string, data []byte) []byte // never called concurrently
}
serviceEntry struct {
id, desc string
backend Service
}
)
// NewServer creates a new Server
func NewServer(delayPerRequest time.Duration) *Server {
return &Server{
limiter: utils.NewLimiter(1000),
delayPerRequest: delayPerRequest,
services: make(map[string]*serviceEntry),
}
}
// Register registers a Service
func (s *Server) Register(b Service, id, desc string) {
srv := &serviceEntry{backend: b, id: id, desc: desc}
if strings.Contains(srv.id, ":") {
// srv.id + ":" will be used as a service database prefix
log.Error("Service ID contains ':'", "id", srv.id)
return
}
s.lock.Lock()
s.services[srv.id] = srv
s.lock.Unlock()
}
// Serve serves a vflux request batch
// Note: requests are served by the Handle functions of the registered services. Serve
// may be called concurrently but the Handle functions are called sequentially and
// therefore thread safety is guaranteed.
func (s *Server) Serve(id enode.ID, address string, requests vflux.Requests) vflux.Replies {
reqLen := uint(len(requests))
if reqLen == 0 || reqLen > vflux.MaxRequestLength {
return nil
}
// Note: the value parameter will be supplied by the token sale module (total amount paid)
ch := <-s.limiter.Add(id, address, 0, reqLen)
if ch == nil {
return nil
}
// Note: the limiter ensures that the following section is not running concurrently,
// the lock only protects against contention caused by new service registration
s.lock.Lock()
results := make(vflux.Replies, len(requests))
for i, req := range requests {
if service := s.services[req.Service]; service != nil {
results[i] = service.backend.Handle(id, address, req.Name, req.Params)
}
}
s.lock.Unlock()
time.Sleep(s.delayPerRequest * time.Duration(reqLen))
close(ch)
return results
}
// ServeEncoded serves an encoded vflux request batch and returns the encoded replies
func (s *Server) ServeEncoded(id enode.ID, addr *net.UDPAddr, req []byte) []byte {
var requests vflux.Requests
if err := rlp.DecodeBytes(req, &requests); err != nil {
return nil
}
results := s.Serve(id, addr.String(), requests)
if results == nil {
return nil
}
res, _ := rlp.EncodeToBytes(&results)
return res
}
// Stop shuts down the server
func (s *Server) Stop() {
s.limiter.Stop()
}

@ -1,59 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package server
import (
"reflect"
"github.com/ethereum/go-ethereum/p2p/nodestate"
)
type peerWrapper struct{ clientPeer } // the NodeStateMachine type system needs this wrapper
// serverSetup is a wrapper of the node state machine setup, which contains
// all the created flags and fields used in the vflux server side.
type serverSetup struct {
setup *nodestate.Setup
clientField nodestate.Field // Field contains the client peer handler
// Flags and fields controlled by balance tracker. BalanceTracker
// is responsible for setting/deleting these flags or fields.
priorityFlag nodestate.Flags // Flag is set if the node has a positive balance
updateFlag nodestate.Flags // Flag is set whenever the node balance is changed(priority changed)
balanceField nodestate.Field // Field contains the client balance for priority calculation
// Flags and fields controlled by priority queue. Priority queue
// is responsible for setting/deleting these flags or fields.
activeFlag nodestate.Flags // Flag is set if the node is active
inactiveFlag nodestate.Flags // Flag is set if the node is inactive
capacityField nodestate.Field // Field contains the capacity of the node
queueField nodestate.Field // Field contains the information in the priority queue
}
// newServerSetup initializes the setup for state machine and returns the flags/fields group.
func newServerSetup() *serverSetup {
setup := &serverSetup{setup: &nodestate.Setup{}}
setup.clientField = setup.setup.NewField("client", reflect.TypeOf(peerWrapper{}))
setup.priorityFlag = setup.setup.NewFlag("priority")
setup.updateFlag = setup.setup.NewFlag("update")
setup.balanceField = setup.setup.NewField("balance", reflect.TypeOf(&nodeBalance{}))
setup.activeFlag = setup.setup.NewFlag("active")
setup.inactiveFlag = setup.setup.NewFlag("inactive")
setup.capacityField = setup.setup.NewField("capacity", reflect.TypeOf(uint64(0)))
setup.queueField = setup.setup.NewField("queue", reflect.TypeOf(&ppNodeInfo{}))
return setup
}

@ -1,411 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"bytes"
"encoding/binary"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
l "github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
var (
bankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
bankAddr = crypto.PubkeyToAddress(bankKey.PublicKey)
bankFunds = new(big.Int).Mul(big.NewInt(100), big.NewInt(params.Ether))
testChainLen = 256
testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056")
chain *core.BlockChain
addresses []common.Address
txHashes []common.Hash
chtTrie *trie.Trie
bloomTrie *trie.Trie
chtKeys [][]byte
bloomKeys [][]byte
)
func makechain() (bc *core.BlockChain, addresses []common.Address, txHashes []common.Hash) {
gspec := &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}},
GasLimit: 100000000,
}
signer := types.HomesteadSigner{}
_, blocks, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), testChainLen,
func(i int, gen *core.BlockGen) {
var (
tx *types.Transaction
addr common.Address
)
nonce := uint64(i)
if i%4 == 0 {
tx, _ = types.SignTx(types.NewContractCreation(nonce, big.NewInt(0), 200000, big.NewInt(params.GWei), testContractCode), signer, bankKey)
addr = crypto.CreateAddress(bankAddr, nonce)
} else {
addr = common.BigToAddress(big.NewInt(int64(i)))
tx, _ = types.SignTx(types.NewTransaction(nonce, addr, big.NewInt(10000), params.TxGas, big.NewInt(params.GWei), nil), signer, bankKey)
}
gen.AddTx(tx)
addresses = append(addresses, addr)
txHashes = append(txHashes, tx.Hash())
})
bc, _ = core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
if _, err := bc.InsertChain(blocks); err != nil {
panic(err)
}
return
}
func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [][]byte) {
chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults))
bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults))
for i := 0; i < testChainLen; i++ {
// The element in CHT is <big-endian block number> -> <block hash>
key := make([]byte, 8)
binary.BigEndian.PutUint64(key, uint64(i+1))
chtTrie.MustUpdate(key, []byte{0x1, 0xf})
chtKeys = append(chtKeys, key)
// The element in Bloom trie is <2 byte bit index> + <big-endian block number> -> bloom
key2 := make([]byte, 10)
binary.BigEndian.PutUint64(key2[2:], uint64(i+1))
bloomTrie.MustUpdate(key2, []byte{0x2, 0xe})
bloomKeys = append(bloomKeys, key2)
}
return
}
func init() {
chain, addresses, txHashes = makechain()
chtTrie, bloomTrie, chtKeys, bloomKeys = makeTries()
}
type fuzzer struct {
chain *core.BlockChain
pool *txpool.TxPool
chainLen int
addresses []common.Address
txs []common.Hash
nonce uint64
chtKeys [][]byte
bloomKeys [][]byte
chtTrie *trie.Trie
bloomTrie *trie.Trie
input io.Reader
exhausted bool
}
func newFuzzer(input []byte) *fuzzer {
pool := legacypool.New(legacypool.DefaultConfig, chain)
txpool, _ := txpool.New(new(big.Int).SetUint64(legacypool.DefaultConfig.PriceLimit), chain, []txpool.SubPool{pool})
return &fuzzer{
chain: chain,
chainLen: testChainLen,
addresses: addresses,
txs: txHashes,
chtTrie: chtTrie,
bloomTrie: bloomTrie,
chtKeys: chtKeys,
bloomKeys: bloomKeys,
nonce: uint64(len(txHashes)),
pool: txpool,
input: bytes.NewReader(input),
}
}
func (f *fuzzer) read(size int) []byte {
out := make([]byte, size)
if _, err := f.input.Read(out); err != nil {
f.exhausted = true
}
return out
}
func (f *fuzzer) randomByte() byte {
d := f.read(1)
return d[0]
}
func (f *fuzzer) randomBool() bool {
d := f.read(1)
return d[0]&1 == 1
}
func (f *fuzzer) randomInt(max int) int {
if max == 0 {
return 0
}
if max <= 256 {
return int(f.randomByte()) % max
}
var a uint16
if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil {
f.exhausted = true
}
return int(a % uint16(max))
}
func (f *fuzzer) randomX(max int) uint64 {
var a uint16
if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil {
f.exhausted = true
}
if a < 0x8000 {
return uint64(a%uint16(max+1)) - 1
}
return (uint64(1)<<(a%64+1) - 1) & (uint64(a) * 343897772345826595)
}
func (f *fuzzer) randomBlockHash() common.Hash {
h := f.chain.GetCanonicalHash(uint64(f.randomInt(3 * f.chainLen)))
if h != (common.Hash{}) {
return h
}
return common.BytesToHash(f.read(common.HashLength))
}
func (f *fuzzer) randomAddress() []byte {
i := f.randomInt(3 * len(f.addresses))
if i < len(f.addresses) {
return f.addresses[i].Bytes()
}
return f.read(common.AddressLength)
}
func (f *fuzzer) randomCHTTrieKey() []byte {
i := f.randomInt(3 * len(f.chtKeys))
if i < len(f.chtKeys) {
return f.chtKeys[i]
}
return f.read(8)
}
func (f *fuzzer) randomBloomTrieKey() []byte {
i := f.randomInt(3 * len(f.bloomKeys))
if i < len(f.bloomKeys) {
return f.bloomKeys[i]
}
return f.read(10)
}
func (f *fuzzer) randomTxHash() common.Hash {
i := f.randomInt(3 * len(f.txs))
if i < len(f.txs) {
return f.txs[i]
}
return common.BytesToHash(f.read(common.HashLength))
}
func (f *fuzzer) BlockChain() *core.BlockChain {
return f.chain
}
func (f *fuzzer) TxPool() *txpool.TxPool {
return f.pool
}
func (f *fuzzer) ArchiveMode() bool {
return false
}
func (f *fuzzer) AddTxsSync() bool {
return false
}
func (f *fuzzer) GetHelperTrie(typ uint, index uint64) *trie.Trie {
if typ == 0 {
return f.chtTrie
} else if typ == 1 {
return f.bloomTrie
}
return nil
}
type dummyMsg struct {
data []byte
}
func (d dummyMsg) Decode(val interface{}) error {
return rlp.DecodeBytes(d.data, val)
}
func (f *fuzzer) doFuzz(msgCode uint64, packet interface{}) {
enc, err := rlp.EncodeToBytes(packet)
if err != nil {
panic(err)
}
version := f.randomInt(3) + 2 // [LES2, LES3, LES4]
peer, closeFn := l.NewFuzzerPeer(version)
defer closeFn()
fn, _, _, err := l.Les3[msgCode].Handle(dummyMsg{enc})
if err != nil {
panic(err)
}
fn(f, peer, func() bool { return true })
}
func fuzz(input []byte) int {
// We expect some large inputs
if len(input) < 100 {
return -1
}
f := newFuzzer(input)
if f.exhausted {
return -1
}
for !f.exhausted {
switch f.randomInt(8) {
case 0:
req := &l.GetBlockHeadersPacket{
Query: l.GetBlockHeadersData{
Amount: f.randomX(l.MaxHeaderFetch + 1),
Skip: f.randomX(10),
Reverse: f.randomBool(),
},
}
if f.randomBool() {
req.Query.Origin.Hash = f.randomBlockHash()
} else {
req.Query.Origin.Number = uint64(f.randomInt(f.chainLen * 2))
}
f.doFuzz(l.GetBlockHeadersMsg, req)
case 1:
req := &l.GetBlockBodiesPacket{Hashes: make([]common.Hash, f.randomInt(l.MaxBodyFetch+1))}
for i := range req.Hashes {
req.Hashes[i] = f.randomBlockHash()
}
f.doFuzz(l.GetBlockBodiesMsg, req)
case 2:
req := &l.GetCodePacket{Reqs: make([]l.CodeReq, f.randomInt(l.MaxCodeFetch+1))}
for i := range req.Reqs {
req.Reqs[i] = l.CodeReq{
BHash: f.randomBlockHash(),
AccountAddress: f.randomAddress(),
}
}
f.doFuzz(l.GetCodeMsg, req)
case 3:
req := &l.GetReceiptsPacket{Hashes: make([]common.Hash, f.randomInt(l.MaxReceiptFetch+1))}
for i := range req.Hashes {
req.Hashes[i] = f.randomBlockHash()
}
f.doFuzz(l.GetReceiptsMsg, req)
case 4:
req := &l.GetProofsPacket{Reqs: make([]l.ProofReq, f.randomInt(l.MaxProofsFetch+1))}
for i := range req.Reqs {
if f.randomBool() {
req.Reqs[i] = l.ProofReq{
BHash: f.randomBlockHash(),
AccountAddress: f.randomAddress(),
Key: f.randomAddress(),
FromLevel: uint(f.randomX(3)),
}
} else {
req.Reqs[i] = l.ProofReq{
BHash: f.randomBlockHash(),
Key: f.randomAddress(),
FromLevel: uint(f.randomX(3)),
}
}
}
f.doFuzz(l.GetProofsV2Msg, req)
case 5:
req := &l.GetHelperTrieProofsPacket{Reqs: make([]l.HelperTrieReq, f.randomInt(l.MaxHelperTrieProofsFetch+1))}
for i := range req.Reqs {
switch f.randomInt(3) {
case 0:
// Canonical hash trie
req.Reqs[i] = l.HelperTrieReq{
Type: 0,
TrieIdx: f.randomX(3),
Key: f.randomCHTTrieKey(),
FromLevel: uint(f.randomX(3)),
AuxReq: uint(2),
}
case 1:
// Bloom trie
req.Reqs[i] = l.HelperTrieReq{
Type: 1,
TrieIdx: f.randomX(3),
Key: f.randomBloomTrieKey(),
FromLevel: uint(f.randomX(3)),
AuxReq: 0,
}
default:
// Random trie
req.Reqs[i] = l.HelperTrieReq{
Type: 2,
TrieIdx: f.randomX(3),
Key: f.randomCHTTrieKey(),
FromLevel: uint(f.randomX(3)),
AuxReq: 0,
}
}
}
f.doFuzz(l.GetHelperTrieProofsMsg, req)
case 6:
req := &l.SendTxPacket{Txs: make([]*types.Transaction, f.randomInt(l.MaxTxSend+1))}
signer := types.HomesteadSigner{}
for i := range req.Txs {
var nonce uint64
if f.randomBool() {
nonce = uint64(f.randomByte())
} else {
nonce = f.nonce
f.nonce += 1
}
req.Txs[i], _ = types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(10000), params.TxGas, big.NewInt(1000000000*int64(f.randomByte())), nil), signer, bankKey)
}
f.doFuzz(l.SendTxV2Msg, req)
case 7:
req := &l.GetTxStatusPacket{Hashes: make([]common.Hash, f.randomInt(l.MaxTxStatus+1))}
for i := range req.Hashes {
req.Hashes[i] = f.randomTxHash()
}
f.doFuzz(l.GetTxStatusMsg, req)
}
}
return 0
}

@ -1,25 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import "testing"
func Fuzz(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzz(data)
})
}

@ -1,333 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vflux
import (
"bytes"
"encoding/binary"
"io"
"math"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/les/vflux"
vfs "github.com/ethereum/go-ethereum/les/vflux/server"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rlp"
)
var (
debugMode = false
doLog = func(msg string, ctx ...interface{}) {
if !debugMode {
return
}
log.Info(msg, ctx...)
}
)
type fuzzer struct {
peers [256]*clientPeer
disconnectList []*clientPeer
input io.Reader
exhausted bool
activeCount, activeCap uint64
maxCount, maxCap uint64
}
type clientPeer struct {
fuzzer *fuzzer
node *enode.Node
freeID string
timeout time.Duration
balance vfs.ConnectedBalance
capacity uint64
}
func (p *clientPeer) Node() *enode.Node {
return p.node
}
func (p *clientPeer) FreeClientId() string {
return p.freeID
}
func (p *clientPeer) InactiveAllowance() time.Duration {
return p.timeout
}
func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) {
origin, originTotal := p.capacity, p.fuzzer.activeCap
p.fuzzer.activeCap -= p.capacity
if p.capacity != 0 {
p.fuzzer.activeCount--
}
p.capacity = newCap
p.fuzzer.activeCap += p.capacity
if p.capacity != 0 {
p.fuzzer.activeCount++
}
doLog("Update capacity", "peer", p.node.ID(), "origin", origin, "cap", newCap, "origintotal", originTotal, "total", p.fuzzer.activeCap, "requested", requested)
}
func (p *clientPeer) Disconnect() {
origin, originTotal := p.capacity, p.fuzzer.activeCap
p.fuzzer.disconnectList = append(p.fuzzer.disconnectList, p)
p.fuzzer.activeCap -= p.capacity
if p.capacity != 0 {
p.fuzzer.activeCount--
}
p.capacity = 0
p.balance = nil
doLog("Disconnect", "peer", p.node.ID(), "origin", origin, "origintotal", originTotal, "total", p.fuzzer.activeCap)
}
func newFuzzer(input []byte) *fuzzer {
f := &fuzzer{
input: bytes.NewReader(input),
}
for i := range f.peers {
f.peers[i] = &clientPeer{
fuzzer: f,
node: enode.SignNull(new(enr.Record), enode.ID{byte(i)}),
freeID: string([]byte{byte(i)}),
timeout: f.randomDelay(),
}
}
return f
}
func (f *fuzzer) read(size int) []byte {
out := make([]byte, size)
if _, err := f.input.Read(out); err != nil {
f.exhausted = true
}
return out
}
func (f *fuzzer) randomByte() byte {
d := f.read(1)
return d[0]
}
func (f *fuzzer) randomBool() bool {
d := f.read(1)
return d[0]&1 == 1
}
func (f *fuzzer) randomInt(max int) int {
if max == 0 {
return 0
}
if max <= 256 {
return int(f.randomByte()) % max
}
var a uint16
if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil {
f.exhausted = true
}
return int(a % uint16(max))
}
func (f *fuzzer) randomTokenAmount(signed bool) int64 {
x := uint64(f.randomInt(65000))
x = x * x * x * x
if signed && (x&1) == 1 {
if x <= math.MaxInt64 {
return -int64(x)
}
return math.MinInt64
}
if x <= math.MaxInt64 {
return int64(x)
}
return math.MaxInt64
}
func (f *fuzzer) randomDelay() time.Duration {
delay := f.randomByte()
if delay < 128 {
return time.Duration(delay) * time.Second
}
return 0
}
func (f *fuzzer) randomFactors() vfs.PriceFactors {
return vfs.PriceFactors{
TimeFactor: float64(f.randomByte()) / 25500,
CapacityFactor: float64(f.randomByte()) / 255,
RequestFactor: float64(f.randomByte()) / 255,
}
}
func (f *fuzzer) connectedBalanceOp(balance vfs.ConnectedBalance, id enode.ID) {
switch f.randomInt(3) {
case 0:
cost := uint64(f.randomTokenAmount(false))
balance.RequestServed(cost)
doLog("Serve request cost", "id", id, "amount", cost)
case 1:
posFactor, negFactor := f.randomFactors(), f.randomFactors()
balance.SetPriceFactors(posFactor, negFactor)
doLog("Set price factor", "pos", posFactor, "neg", negFactor)
case 2:
balance.GetBalance()
balance.GetRawBalance()
balance.GetPriceFactors()
}
}
func (f *fuzzer) atomicBalanceOp(balance vfs.AtomicBalanceOperator, id enode.ID) {
switch f.randomInt(3) {
case 0:
amount := f.randomTokenAmount(true)
balance.AddBalance(amount)
doLog("Add balance", "id", id, "amount", amount)
case 1:
pos, neg := uint64(f.randomTokenAmount(false)), uint64(f.randomTokenAmount(false))
balance.SetBalance(pos, neg)
doLog("Set balance", "id", id, "pos", pos, "neg", neg)
case 2:
balance.GetBalance()
balance.GetRawBalance()
balance.GetPriceFactors()
}
}
func fuzzClientPool(input []byte) int {
if len(input) > 10000 {
return -1
}
f := newFuzzer(input)
if f.exhausted {
return 0
}
clock := &mclock.Simulated{}
db := memorydb.New()
pool := vfs.NewClientPool(db, 10, f.randomDelay(), clock, func() bool { return true })
pool.Start()
defer pool.Stop()
count := 0
for !f.exhausted && count < 1000 {
count++
switch f.randomInt(11) {
case 0:
i := int(f.randomByte())
f.peers[i].balance = pool.Register(f.peers[i])
doLog("Register peer", "id", f.peers[i].node.ID())
case 1:
i := int(f.randomByte())
f.peers[i].Disconnect()
doLog("Disconnect peer", "id", f.peers[i].node.ID())
case 2:
f.maxCount = uint64(f.randomByte())
f.maxCap = uint64(f.randomByte())
f.maxCap *= f.maxCap
count, cap := pool.Limits()
pool.SetLimits(f.maxCount, f.maxCap)
doLog("Set limits", "maxcount", f.maxCount, "maxcap", f.maxCap, "origincount", count, "oricap", cap)
case 3:
bias := f.randomDelay()
pool.SetConnectedBias(f.randomDelay())
doLog("Set connection bias", "bias", bias)
case 4:
pos, neg := f.randomFactors(), f.randomFactors()
pool.SetDefaultFactors(pos, neg)
doLog("Set default factors", "pos", pos, "neg", neg)
case 5:
pos, neg := uint64(f.randomInt(50000)), uint64(f.randomInt(50000))
pool.SetExpirationTCs(pos, neg)
doLog("Set expiration constants", "pos", pos, "neg", neg)
case 6:
var (
index = f.randomByte()
reqCap = uint64(f.randomByte())
bias = f.randomDelay()
requested = f.randomBool()
)
pool.SetCapacity(f.peers[index].node, reqCap, bias, requested)
doLog("Set capacity", "id", f.peers[index].node.ID(), "reqcap", reqCap, "bias", bias, "requested", requested)
case 7:
index := f.randomByte()
if balance := f.peers[index].balance; balance != nil {
f.connectedBalanceOp(balance, f.peers[index].node.ID())
}
case 8:
index := f.randomByte()
pool.BalanceOperation(f.peers[index].node.ID(), f.peers[index].freeID, func(balance vfs.AtomicBalanceOperator) {
count := f.randomInt(4)
for i := 0; i < count; i++ {
f.atomicBalanceOp(balance, f.peers[index].node.ID())
}
})
case 9:
pool.TotalTokenAmount()
pool.GetExpirationTCs()
pool.Active()
pool.Limits()
pool.GetPosBalanceIDs(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].node.ID(), f.randomInt(100))
case 10:
req := vflux.CapacityQueryReq{
Bias: uint64(f.randomByte()),
AddTokens: make([]vflux.IntOrInf, f.randomInt(vflux.CapacityQueryMaxLen+1)),
}
for i := range req.AddTokens {
v := vflux.IntOrInf{Type: uint8(f.randomInt(4))}
if v.Type < 2 {
v.Value = *big.NewInt(f.randomTokenAmount(false))
}
req.AddTokens[i] = v
}
reqEnc, err := rlp.EncodeToBytes(&req)
if err != nil {
panic(err)
}
p := int(f.randomByte())
if p < len(reqEnc) {
reqEnc[p] = f.randomByte()
}
pool.Handle(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, vflux.CapacityQueryName, reqEnc)
}
for _, peer := range f.disconnectList {
pool.Unregister(peer)
doLog("Unregister peer", "id", peer.node.ID())
}
f.disconnectList = nil
if d := f.randomDelay(); d > 0 {
clock.Run(d)
}
doLog("Clientpool stats in fuzzer", "count", f.activeCap, "maxcount", f.maxCount, "cap", f.activeCap, "maxcap", f.maxCap)
activeCount, activeCap := pool.Active()
doLog("Clientpool stats in pool", "count", activeCount, "cap", activeCap)
if activeCount != f.activeCount || activeCap != f.activeCap {
panic(nil)
}
if f.activeCount > f.maxCount || f.activeCap > f.maxCap {
panic(nil)
}
}
return 0
}

@ -1,25 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vflux
import "testing"
func FuzzClientPool(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
fuzzClientPool(data)
})
}
Loading…
Cancel
Save