mirror of https://github.com/ethereum/go-ethereum
les, les/flowcontrol: improved request serving and flow control (#18230)
This change - implements concurrent LES request serving even for a single peer. - replaces the request cost estimation method with a cost table based on benchmarks which gives much more consistent results. Until now the allowed number of light peers was just a guess which probably contributed a lot to the fluctuating quality of available service. Everything related to request cost is implemented in a single object, the 'cost tracker'. It uses a fixed cost table with a global 'correction factor'. Benchmark code is included and can be run at any time to adapt costs to low-level implementation changes. - reimplements flowcontrol.ClientManager in a cleaner and more efficient way, with added capabilities: There is now control over bandwidth, which allows using the flow control parameters for client prioritization. Target utilization over 100 percent is now supported to model concurrent request processing. Total serving bandwidth is reduced during block processing to prevent database contention. - implements an RPC API for the LES servers allowing server operators to assign priority bandwidth to certain clients and change prioritized status even while the client is connected. The new API is meant for cases where server operators charge for LES using an off-protocol mechanism. - adds a unit test for the new client manager. - adds an end-to-end test using the network simulator that tests bandwidth control functions through the new API.pull/19171/head
parent
c2b33a117f
commit
c2003ed63b
@ -0,0 +1,454 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
) |
||||
|
||||
var ( |
||||
ErrMinCap = errors.New("capacity too small") |
||||
ErrTotalCap = errors.New("total capacity exceeded") |
||||
ErrUnknownBenchmarkType = errors.New("unknown benchmark type") |
||||
|
||||
dropCapacityDelay = time.Second // delay applied to decreasing capacity changes
|
||||
) |
||||
|
||||
// PrivateLightServerAPI provides an API to access the LES light server.
|
||||
// It offers only methods that operate on public data that is freely available to anyone.
|
||||
type PrivateLightServerAPI struct { |
||||
server *LesServer |
||||
} |
||||
|
||||
// NewPrivateLightServerAPI creates a new LES light server API.
|
||||
func NewPrivateLightServerAPI(server *LesServer) *PrivateLightServerAPI { |
||||
return &PrivateLightServerAPI{ |
||||
server: server, |
||||
} |
||||
} |
||||
|
||||
// TotalCapacity queries total available capacity for all clients
|
||||
func (api *PrivateLightServerAPI) TotalCapacity() hexutil.Uint64 { |
||||
return hexutil.Uint64(api.server.priorityClientPool.totalCapacity()) |
||||
} |
||||
|
||||
// SubscribeTotalCapacity subscribes to changed total capacity events.
|
||||
// If onlyUnderrun is true then notification is sent only if the total capacity
|
||||
// drops under the total capacity of connected priority clients.
|
||||
//
|
||||
// Note: actually applying decreasing total capacity values is delayed while the
|
||||
// notification is sent instantly. This allows lowering the capacity of a priority client
|
||||
// or choosing which one to drop before the system drops some of them automatically.
|
||||
func (api *PrivateLightServerAPI) SubscribeTotalCapacity(ctx context.Context, onlyUnderrun bool) (*rpc.Subscription, error) { |
||||
notifier, supported := rpc.NotifierFromContext(ctx) |
||||
if !supported { |
||||
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported |
||||
} |
||||
rpcSub := notifier.CreateSubscription() |
||||
api.server.priorityClientPool.subscribeTotalCapacity(&tcSubscription{notifier, rpcSub, onlyUnderrun}) |
||||
return rpcSub, nil |
||||
} |
||||
|
||||
type ( |
||||
// tcSubscription represents a total capacity subscription
|
||||
tcSubscription struct { |
||||
notifier *rpc.Notifier |
||||
rpcSub *rpc.Subscription |
||||
onlyUnderrun bool |
||||
} |
||||
tcSubs map[*tcSubscription]struct{} |
||||
) |
||||
|
||||
// send sends a changed total capacity event to the subscribers
|
||||
func (s tcSubs) send(tc uint64, underrun bool) { |
||||
for sub := range s { |
||||
select { |
||||
case <-sub.rpcSub.Err(): |
||||
delete(s, sub) |
||||
case <-sub.notifier.Closed(): |
||||
delete(s, sub) |
||||
default: |
||||
if underrun || !sub.onlyUnderrun { |
||||
sub.notifier.Notify(sub.rpcSub.ID, tc) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// MinimumCapacity queries minimum assignable capacity for a single client
|
||||
func (api *PrivateLightServerAPI) MinimumCapacity() hexutil.Uint64 { |
||||
return hexutil.Uint64(minCapacity) |
||||
} |
||||
|
||||
// FreeClientCapacity queries the capacity provided for free clients
|
||||
func (api *PrivateLightServerAPI) FreeClientCapacity() hexutil.Uint64 { |
||||
return hexutil.Uint64(api.server.freeClientCap) |
||||
} |
||||
|
||||
// SetClientCapacity sets the priority capacity assigned to a given client.
|
||||
// If the assigned capacity is bigger than zero then connection is always
|
||||
// guaranteed. The sum of capacity assigned to priority clients can not exceed
|
||||
// the total available capacity.
|
||||
//
|
||||
// Note: assigned capacity can be changed while the client is connected with
|
||||
// immediate effect.
|
||||
func (api *PrivateLightServerAPI) SetClientCapacity(id enode.ID, cap uint64) error { |
||||
if cap != 0 && cap < minCapacity { |
||||
return ErrMinCap |
||||
} |
||||
return api.server.priorityClientPool.setClientCapacity(id, cap) |
||||
} |
||||
|
||||
// GetClientCapacity returns the capacity assigned to a given client
|
||||
func (api *PrivateLightServerAPI) GetClientCapacity(id enode.ID) hexutil.Uint64 { |
||||
api.server.priorityClientPool.lock.Lock() |
||||
defer api.server.priorityClientPool.lock.Unlock() |
||||
|
||||
return hexutil.Uint64(api.server.priorityClientPool.clients[id].cap) |
||||
} |
||||
|
||||
// clientPool is implemented by both the free and priority client pools
|
||||
type clientPool interface { |
||||
peerSetNotify |
||||
setLimits(count int, totalCap uint64) |
||||
} |
||||
|
||||
// priorityClientPool stores information about prioritized clients
|
||||
type priorityClientPool struct { |
||||
lock sync.Mutex |
||||
child clientPool |
||||
ps *peerSet |
||||
clients map[enode.ID]priorityClientInfo |
||||
totalCap, totalCapAnnounced uint64 |
||||
totalConnectedCap, freeClientCap uint64 |
||||
maxPeers, priorityCount int |
||||
|
||||
subs tcSubs |
||||
updateSchedule []scheduledUpdate |
||||
scheduleCounter uint64 |
||||
} |
||||
|
||||
// scheduledUpdate represents a delayed total capacity update
|
||||
type scheduledUpdate struct { |
||||
time mclock.AbsTime |
||||
totalCap, id uint64 |
||||
} |
||||
|
||||
// priorityClientInfo entries exist for all prioritized clients and currently connected non-priority clients
|
||||
type priorityClientInfo struct { |
||||
cap uint64 // zero for non-priority clients
|
||||
connected bool |
||||
peer *peer |
||||
} |
||||
|
||||
// newPriorityClientPool creates a new priority client pool
|
||||
func newPriorityClientPool(freeClientCap uint64, ps *peerSet, child clientPool) *priorityClientPool { |
||||
return &priorityClientPool{ |
||||
clients: make(map[enode.ID]priorityClientInfo), |
||||
freeClientCap: freeClientCap, |
||||
ps: ps, |
||||
child: child, |
||||
} |
||||
} |
||||
|
||||
// registerPeer is called when a new client is connected. If the client has no
|
||||
// priority assigned then it is passed to the child pool which may either keep it
|
||||
// or disconnect it.
|
||||
//
|
||||
// Note: priorityClientPool also stores a record about free clients while they are
|
||||
// connected in order to be able to assign priority to them later.
|
||||
func (v *priorityClientPool) registerPeer(p *peer) { |
||||
v.lock.Lock() |
||||
defer v.lock.Unlock() |
||||
|
||||
id := p.ID() |
||||
c := v.clients[id] |
||||
if c.connected { |
||||
return |
||||
} |
||||
if c.cap == 0 && v.child != nil { |
||||
v.child.registerPeer(p) |
||||
} |
||||
if c.cap != 0 && v.totalConnectedCap+c.cap > v.totalCap { |
||||
go v.ps.Unregister(p.id) |
||||
return |
||||
} |
||||
|
||||
c.connected = true |
||||
c.peer = p |
||||
v.clients[id] = c |
||||
if c.cap != 0 { |
||||
v.priorityCount++ |
||||
v.totalConnectedCap += c.cap |
||||
if v.child != nil { |
||||
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap) |
||||
} |
||||
p.updateCapacity(c.cap) |
||||
} |
||||
} |
||||
|
||||
// unregisterPeer is called when a client is disconnected. If the client has no
|
||||
// priority assigned then it is also removed from the child pool.
|
||||
func (v *priorityClientPool) unregisterPeer(p *peer) { |
||||
v.lock.Lock() |
||||
defer v.lock.Unlock() |
||||
|
||||
id := p.ID() |
||||
c := v.clients[id] |
||||
if !c.connected { |
||||
return |
||||
} |
||||
if c.cap != 0 { |
||||
c.connected = false |
||||
v.clients[id] = c |
||||
v.priorityCount-- |
||||
v.totalConnectedCap -= c.cap |
||||
if v.child != nil { |
||||
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap) |
||||
} |
||||
} else { |
||||
if v.child != nil { |
||||
v.child.unregisterPeer(p) |
||||
} |
||||
delete(v.clients, id) |
||||
} |
||||
} |
||||
|
||||
// setLimits updates the allowed peer count and total capacity of the priority
|
||||
// client pool. Since the free client pool is a child of the priority pool the
|
||||
// remaining peer count and capacity is assigned to the free pool by calling its
|
||||
// own setLimits function.
|
||||
//
|
||||
// Note: a decreasing change of the total capacity is applied with a delay.
|
||||
func (v *priorityClientPool) setLimits(count int, totalCap uint64) { |
||||
v.lock.Lock() |
||||
defer v.lock.Unlock() |
||||
|
||||
v.totalCapAnnounced = totalCap |
||||
if totalCap > v.totalCap { |
||||
v.setLimitsNow(count, totalCap) |
||||
v.subs.send(totalCap, false) |
||||
return |
||||
} |
||||
v.setLimitsNow(count, v.totalCap) |
||||
if totalCap < v.totalCap { |
||||
v.subs.send(totalCap, totalCap < v.totalConnectedCap) |
||||
for i, s := range v.updateSchedule { |
||||
if totalCap >= s.totalCap { |
||||
s.totalCap = totalCap |
||||
v.updateSchedule = v.updateSchedule[:i+1] |
||||
return |
||||
} |
||||
} |
||||
v.updateSchedule = append(v.updateSchedule, scheduledUpdate{time: mclock.Now() + mclock.AbsTime(dropCapacityDelay), totalCap: totalCap}) |
||||
if len(v.updateSchedule) == 1 { |
||||
v.scheduleCounter++ |
||||
id := v.scheduleCounter |
||||
v.updateSchedule[0].id = id |
||||
time.AfterFunc(dropCapacityDelay, func() { v.checkUpdate(id) }) |
||||
} |
||||
} else { |
||||
v.updateSchedule = nil |
||||
} |
||||
} |
||||
|
||||
// checkUpdate performs the next scheduled update if possible and schedules
|
||||
// the one after that
|
||||
func (v *priorityClientPool) checkUpdate(id uint64) { |
||||
v.lock.Lock() |
||||
defer v.lock.Unlock() |
||||
|
||||
if len(v.updateSchedule) == 0 || v.updateSchedule[0].id != id { |
||||
return |
||||
} |
||||
v.setLimitsNow(v.maxPeers, v.updateSchedule[0].totalCap) |
||||
v.updateSchedule = v.updateSchedule[1:] |
||||
if len(v.updateSchedule) != 0 { |
||||
v.scheduleCounter++ |
||||
id := v.scheduleCounter |
||||
v.updateSchedule[0].id = id |
||||
dt := time.Duration(v.updateSchedule[0].time - mclock.Now()) |
||||
time.AfterFunc(dt, func() { v.checkUpdate(id) }) |
||||
} |
||||
} |
||||
|
||||
// setLimits updates the allowed peer count and total capacity immediately
|
||||
func (v *priorityClientPool) setLimitsNow(count int, totalCap uint64) { |
||||
if v.priorityCount > count || v.totalConnectedCap > totalCap { |
||||
for id, c := range v.clients { |
||||
if c.connected { |
||||
c.connected = false |
||||
v.totalConnectedCap -= c.cap |
||||
v.priorityCount-- |
||||
v.clients[id] = c |
||||
go v.ps.Unregister(c.peer.id) |
||||
if v.priorityCount <= count && v.totalConnectedCap <= totalCap { |
||||
break |
||||
} |
||||
} |
||||
} |
||||
} |
||||
v.maxPeers = count |
||||
v.totalCap = totalCap |
||||
if v.child != nil { |
||||
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap) |
||||
} |
||||
} |
||||
|
||||
// totalCapacity queries total available capacity for all clients
|
||||
func (v *priorityClientPool) totalCapacity() uint64 { |
||||
v.lock.Lock() |
||||
defer v.lock.Unlock() |
||||
|
||||
return v.totalCapAnnounced |
||||
} |
||||
|
||||
// subscribeTotalCapacity subscribes to changed total capacity events
|
||||
func (v *priorityClientPool) subscribeTotalCapacity(sub *tcSubscription) { |
||||
v.lock.Lock() |
||||
defer v.lock.Unlock() |
||||
|
||||
v.subs[sub] = struct{}{} |
||||
} |
||||
|
||||
// setClientCapacity sets the priority capacity assigned to a given client
|
||||
func (v *priorityClientPool) setClientCapacity(id enode.ID, cap uint64) error { |
||||
v.lock.Lock() |
||||
defer v.lock.Unlock() |
||||
|
||||
c := v.clients[id] |
||||
if c.cap == cap { |
||||
return nil |
||||
} |
||||
if c.connected { |
||||
if v.totalConnectedCap+cap > v.totalCap+c.cap { |
||||
return ErrTotalCap |
||||
} |
||||
if c.cap == 0 { |
||||
if v.child != nil { |
||||
v.child.unregisterPeer(c.peer) |
||||
} |
||||
v.priorityCount++ |
||||
} |
||||
if cap == 0 { |
||||
v.priorityCount-- |
||||
} |
||||
v.totalConnectedCap += cap - c.cap |
||||
if v.child != nil { |
||||
v.child.setLimits(v.maxPeers-v.priorityCount, v.totalCap-v.totalConnectedCap) |
||||
} |
||||
if cap == 0 { |
||||
if v.child != nil { |
||||
v.child.registerPeer(c.peer) |
||||
} |
||||
c.peer.updateCapacity(v.freeClientCap) |
||||
} else { |
||||
c.peer.updateCapacity(cap) |
||||
} |
||||
} |
||||
if cap != 0 || c.connected { |
||||
c.cap = cap |
||||
v.clients[id] = c |
||||
} else { |
||||
delete(v.clients, id) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Benchmark runs a request performance benchmark with a given set of measurement setups
|
||||
// in multiple passes specified by passCount. The measurement time for each setup in each
|
||||
// pass is specified in milliseconds by length.
|
||||
//
|
||||
// Note: measurement time is adjusted for each pass depending on the previous ones.
|
||||
// Therefore a controlled total measurement time is achievable in multiple passes.
|
||||
func (api *PrivateLightServerAPI) Benchmark(setups []map[string]interface{}, passCount, length int) ([]map[string]interface{}, error) { |
||||
benchmarks := make([]requestBenchmark, len(setups)) |
||||
for i, setup := range setups { |
||||
if t, ok := setup["type"].(string); ok { |
||||
getInt := func(field string, def int) int { |
||||
if value, ok := setup[field].(float64); ok { |
||||
return int(value) |
||||
} |
||||
return def |
||||
} |
||||
getBool := func(field string, def bool) bool { |
||||
if value, ok := setup[field].(bool); ok { |
||||
return value |
||||
} |
||||
return def |
||||
} |
||||
switch t { |
||||
case "header": |
||||
benchmarks[i] = &benchmarkBlockHeaders{ |
||||
amount: getInt("amount", 1), |
||||
skip: getInt("skip", 1), |
||||
byHash: getBool("byHash", false), |
||||
reverse: getBool("reverse", false), |
||||
} |
||||
case "body": |
||||
benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: false} |
||||
case "receipts": |
||||
benchmarks[i] = &benchmarkBodiesOrReceipts{receipts: true} |
||||
case "proof": |
||||
benchmarks[i] = &benchmarkProofsOrCode{code: false} |
||||
case "code": |
||||
benchmarks[i] = &benchmarkProofsOrCode{code: true} |
||||
case "cht": |
||||
benchmarks[i] = &benchmarkHelperTrie{ |
||||
bloom: false, |
||||
reqCount: getInt("amount", 1), |
||||
} |
||||
case "bloom": |
||||
benchmarks[i] = &benchmarkHelperTrie{ |
||||
bloom: true, |
||||
reqCount: getInt("amount", 1), |
||||
} |
||||
case "txSend": |
||||
benchmarks[i] = &benchmarkTxSend{} |
||||
case "txStatus": |
||||
benchmarks[i] = &benchmarkTxStatus{} |
||||
default: |
||||
return nil, ErrUnknownBenchmarkType |
||||
} |
||||
} else { |
||||
return nil, ErrUnknownBenchmarkType |
||||
} |
||||
} |
||||
rs := api.server.protocolManager.runBenchmark(benchmarks, passCount, time.Millisecond*time.Duration(length)) |
||||
result := make([]map[string]interface{}, len(setups)) |
||||
for i, r := range rs { |
||||
res := make(map[string]interface{}) |
||||
if r.err == nil { |
||||
res["totalCount"] = r.totalCount |
||||
res["avgTime"] = r.avgTime |
||||
res["maxInSize"] = r.maxInSize |
||||
res["maxOutSize"] = r.maxOutSize |
||||
} else { |
||||
res["error"] = r.err.Error() |
||||
} |
||||
result[i] = res |
||||
} |
||||
return result, nil |
||||
} |
@ -0,0 +1,525 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"flag" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"math/rand" |
||||
"os" |
||||
"sync" |
||||
"sync/atomic" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||
"github.com/ethereum/go-ethereum/eth" |
||||
"github.com/ethereum/go-ethereum/eth/downloader" |
||||
"github.com/ethereum/go-ethereum/les/flowcontrol" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
colorable "github.com/mattn/go-colorable" |
||||
) |
||||
|
||||
/* |
||||
This test is not meant to be a part of the automatic testing process because it |
||||
runs for a long time and also requires a large database in order to do a meaningful |
||||
request performance test. When testServerDataDir is empty, the test is skipped. |
||||
*/ |
||||
|
||||
const ( |
||||
testServerDataDir = "" // should always be empty on the master branch
|
||||
testServerCapacity = 200 |
||||
testMaxClients = 10 |
||||
testTolerance = 0.1 |
||||
minRelCap = 0.2 |
||||
) |
||||
|
||||
func TestCapacityAPI3(t *testing.T) { |
||||
testCapacityAPI(t, 3) |
||||
} |
||||
|
||||
func TestCapacityAPI6(t *testing.T) { |
||||
testCapacityAPI(t, 6) |
||||
} |
||||
|
||||
func TestCapacityAPI10(t *testing.T) { |
||||
testCapacityAPI(t, 10) |
||||
} |
||||
|
||||
// testCapacityAPI runs an end-to-end simulation test connecting one server with
|
||||
// a given number of clients. It sets different priority capacities to all clients
|
||||
// except a randomly selected one which runs in free client mode. All clients send
|
||||
// similar requests at the maximum allowed rate and the test verifies whether the
|
||||
// ratio of processed requests is close enough to the ratio of assigned capacities.
|
||||
// Running multiple rounds with different settings ensures that changing capacity
|
||||
// while connected and going back and forth between free and priority mode with
|
||||
// the supplied API calls is also thoroughly tested.
|
||||
func testCapacityAPI(t *testing.T, clientCount int) { |
||||
if testServerDataDir == "" { |
||||
// Skip test if no data dir specified
|
||||
return |
||||
} |
||||
|
||||
for !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool { |
||||
if len(servers) != 1 { |
||||
t.Fatalf("Invalid number of servers: %d", len(servers)) |
||||
} |
||||
server := servers[0] |
||||
|
||||
clientRpcClients := make([]*rpc.Client, len(clients)) |
||||
|
||||
serverRpcClient, err := server.Client() |
||||
if err != nil { |
||||
t.Fatalf("Failed to obtain rpc client: %v", err) |
||||
} |
||||
headNum, headHash := getHead(ctx, t, serverRpcClient) |
||||
totalCap := getTotalCap(ctx, t, serverRpcClient) |
||||
minCap := getMinCap(ctx, t, serverRpcClient) |
||||
testCap := totalCap * 3 / 4 |
||||
fmt.Printf("Server testCap: %d minCap: %d head number: %d head hash: %064x\n", testCap, minCap, headNum, headHash) |
||||
reqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1))) |
||||
if minCap > reqMinCap { |
||||
t.Fatalf("Minimum client capacity (%d) bigger than required minimum for this test (%d)", minCap, reqMinCap) |
||||
} |
||||
|
||||
freeIdx := rand.Intn(len(clients)) |
||||
freeCap := getFreeCap(ctx, t, serverRpcClient) |
||||
|
||||
for i, client := range clients { |
||||
var err error |
||||
clientRpcClients[i], err = client.Client() |
||||
if err != nil { |
||||
t.Fatalf("Failed to obtain rpc client: %v", err) |
||||
} |
||||
|
||||
fmt.Println("connecting client", i) |
||||
if i != freeIdx { |
||||
setCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients))) |
||||
} |
||||
net.Connect(client.ID(), server.ID()) |
||||
|
||||
for { |
||||
select { |
||||
case <-ctx.Done(): |
||||
t.Fatalf("Timeout") |
||||
default: |
||||
} |
||||
num, hash := getHead(ctx, t, clientRpcClients[i]) |
||||
if num == headNum && hash == headHash { |
||||
fmt.Println("client", i, "synced") |
||||
break |
||||
} |
||||
time.Sleep(time.Millisecond * 200) |
||||
} |
||||
} |
||||
|
||||
var wg sync.WaitGroup |
||||
stop := make(chan struct{}) |
||||
|
||||
reqCount := make([]uint64, len(clientRpcClients)) |
||||
|
||||
for i, c := range clientRpcClients { |
||||
wg.Add(1) |
||||
i, c := i, c |
||||
go func() { |
||||
queue := make(chan struct{}, 100) |
||||
var count uint64 |
||||
for { |
||||
select { |
||||
case queue <- struct{}{}: |
||||
select { |
||||
case <-stop: |
||||
wg.Done() |
||||
return |
||||
case <-ctx.Done(): |
||||
wg.Done() |
||||
return |
||||
default: |
||||
wg.Add(1) |
||||
go func() { |
||||
ok := testRequest(ctx, t, c) |
||||
wg.Done() |
||||
<-queue |
||||
if ok { |
||||
count++ |
||||
atomic.StoreUint64(&reqCount[i], count) |
||||
} |
||||
}() |
||||
} |
||||
case <-stop: |
||||
wg.Done() |
||||
return |
||||
case <-ctx.Done(): |
||||
wg.Done() |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
processedSince := func(start []uint64) []uint64 { |
||||
res := make([]uint64, len(reqCount)) |
||||
for i := range reqCount { |
||||
res[i] = atomic.LoadUint64(&reqCount[i]) |
||||
if start != nil { |
||||
res[i] -= start[i] |
||||
} |
||||
} |
||||
return res |
||||
} |
||||
|
||||
weights := make([]float64, len(clients)) |
||||
for c := 0; c < 5; c++ { |
||||
setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), freeCap) |
||||
freeIdx = rand.Intn(len(clients)) |
||||
var sum float64 |
||||
for i := range clients { |
||||
if i == freeIdx { |
||||
weights[i] = 0 |
||||
} else { |
||||
weights[i] = rand.Float64()*(1-minRelCap) + minRelCap |
||||
} |
||||
sum += weights[i] |
||||
} |
||||
for i, client := range clients { |
||||
weights[i] *= float64(testCap-freeCap-100) / sum |
||||
capacity := uint64(weights[i]) |
||||
if i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) { |
||||
setCapacity(ctx, t, serverRpcClient, client.ID(), capacity) |
||||
} |
||||
} |
||||
setCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0) |
||||
for i, client := range clients { |
||||
capacity := uint64(weights[i]) |
||||
if i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) { |
||||
setCapacity(ctx, t, serverRpcClient, client.ID(), capacity) |
||||
} |
||||
} |
||||
weights[freeIdx] = float64(freeCap) |
||||
for i := range clients { |
||||
weights[i] /= float64(testCap) |
||||
} |
||||
|
||||
time.Sleep(flowcontrol.DecParamDelay) |
||||
fmt.Println("Starting measurement") |
||||
fmt.Printf("Relative weights:") |
||||
for i := range clients { |
||||
fmt.Printf(" %f", weights[i]) |
||||
} |
||||
fmt.Println() |
||||
start := processedSince(nil) |
||||
for { |
||||
select { |
||||
case <-ctx.Done(): |
||||
t.Fatalf("Timeout") |
||||
default: |
||||
} |
||||
|
||||
totalCap = getTotalCap(ctx, t, serverRpcClient) |
||||
if totalCap < testCap { |
||||
fmt.Println("Total capacity underrun") |
||||
close(stop) |
||||
wg.Wait() |
||||
return false |
||||
} |
||||
|
||||
processed := processedSince(start) |
||||
var avg uint64 |
||||
fmt.Printf("Processed") |
||||
for i, p := range processed { |
||||
fmt.Printf(" %d", p) |
||||
processed[i] = uint64(float64(p) / weights[i]) |
||||
avg += processed[i] |
||||
} |
||||
avg /= uint64(len(processed)) |
||||
|
||||
if avg >= 10000 { |
||||
var maxDev float64 |
||||
for _, p := range processed { |
||||
dev := float64(int64(p-avg)) / float64(avg) |
||||
fmt.Printf(" %7.4f", dev) |
||||
if dev < 0 { |
||||
dev = -dev |
||||
} |
||||
if dev > maxDev { |
||||
maxDev = dev |
||||
} |
||||
} |
||||
fmt.Printf(" max deviation: %f totalCap: %d\n", maxDev, totalCap) |
||||
if maxDev <= testTolerance { |
||||
fmt.Println("success") |
||||
break |
||||
} |
||||
} else { |
||||
fmt.Println() |
||||
} |
||||
time.Sleep(time.Millisecond * 200) |
||||
} |
||||
} |
||||
|
||||
close(stop) |
||||
wg.Wait() |
||||
|
||||
for i, count := range reqCount { |
||||
fmt.Println("client", i, "processed", count) |
||||
} |
||||
return true |
||||
}) { |
||||
fmt.Println("restarting test") |
||||
} |
||||
} |
||||
|
||||
func getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) { |
||||
res := make(map[string]interface{}) |
||||
if err := client.CallContext(ctx, &res, "eth_getBlockByNumber", "latest", false); err != nil { |
||||
t.Fatalf("Failed to obtain head block: %v", err) |
||||
} |
||||
numStr, ok := res["number"].(string) |
||||
if !ok { |
||||
t.Fatalf("RPC block number field invalid") |
||||
} |
||||
num, err := hexutil.DecodeUint64(numStr) |
||||
if err != nil { |
||||
t.Fatalf("Failed to decode RPC block number: %v", err) |
||||
} |
||||
hashStr, ok := res["hash"].(string) |
||||
if !ok { |
||||
t.Fatalf("RPC block number field invalid") |
||||
} |
||||
hash := common.HexToHash(hashStr) |
||||
return num, hash |
||||
} |
||||
|
||||
func testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool { |
||||
//res := make(map[string]interface{})
|
||||
var res string |
||||
var addr common.Address |
||||
rand.Read(addr[:]) |
||||
c, _ := context.WithTimeout(ctx, time.Second*12) |
||||
// if err := client.CallContext(ctx, &res, "eth_getProof", addr, nil, "latest"); err != nil {
|
||||
err := client.CallContext(c, &res, "eth_getBalance", addr, "latest") |
||||
if err != nil { |
||||
fmt.Println("request error:", err) |
||||
} |
||||
return err == nil |
||||
} |
||||
|
||||
func setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) { |
||||
if err := server.CallContext(ctx, nil, "les_setClientCapacity", clientID, cap); err != nil { |
||||
t.Fatalf("Failed to set client capacity: %v", err) |
||||
} |
||||
} |
||||
|
||||
func getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 { |
||||
var s string |
||||
if err := server.CallContext(ctx, &s, "les_getClientCapacity", clientID); err != nil { |
||||
t.Fatalf("Failed to get client capacity: %v", err) |
||||
} |
||||
cap, err := hexutil.DecodeUint64(s) |
||||
if err != nil { |
||||
t.Fatalf("Failed to decode client capacity: %v", err) |
||||
} |
||||
return cap |
||||
} |
||||
|
||||
func getTotalCap(ctx context.Context, t *testing.T, server *rpc.Client) uint64 { |
||||
var s string |
||||
if err := server.CallContext(ctx, &s, "les_totalCapacity"); err != nil { |
||||
t.Fatalf("Failed to query total capacity: %v", err) |
||||
} |
||||
total, err := hexutil.DecodeUint64(s) |
||||
if err != nil { |
||||
t.Fatalf("Failed to decode total capacity: %v", err) |
||||
} |
||||
return total |
||||
} |
||||
|
||||
func getMinCap(ctx context.Context, t *testing.T, server *rpc.Client) uint64 { |
||||
var s string |
||||
if err := server.CallContext(ctx, &s, "les_minimumCapacity"); err != nil { |
||||
t.Fatalf("Failed to query minimum capacity: %v", err) |
||||
} |
||||
min, err := hexutil.DecodeUint64(s) |
||||
if err != nil { |
||||
t.Fatalf("Failed to decode minimum capacity: %v", err) |
||||
} |
||||
return min |
||||
} |
||||
|
||||
func getFreeCap(ctx context.Context, t *testing.T, server *rpc.Client) uint64 { |
||||
var s string |
||||
if err := server.CallContext(ctx, &s, "les_freeClientCapacity"); err != nil { |
||||
t.Fatalf("Failed to query free client capacity: %v", err) |
||||
} |
||||
free, err := hexutil.DecodeUint64(s) |
||||
if err != nil { |
||||
t.Fatalf("Failed to decode free client capacity: %v", err) |
||||
} |
||||
return free |
||||
} |
||||
|
||||
func init() { |
||||
flag.Parse() |
||||
// register the Delivery service which will run as a devp2p
|
||||
// protocol when using the exec adapter
|
||||
adapters.RegisterServices(services) |
||||
|
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) |
||||
} |
||||
|
||||
var ( |
||||
adapter = flag.String("adapter", "exec", "type of simulation: sim|socket|exec|docker") |
||||
loglevel = flag.Int("loglevel", 0, "verbosity of logs") |
||||
nodes = flag.Int("nodes", 0, "number of nodes") |
||||
) |
||||
|
||||
var services = adapters.Services{ |
||||
"lesclient": newLesClientService, |
||||
"lesserver": newLesServerService, |
||||
} |
||||
|
||||
func NewNetwork() (*simulations.Network, func(), error) { |
||||
adapter, adapterTeardown, err := NewAdapter(*adapter, services) |
||||
if err != nil { |
||||
return nil, adapterTeardown, err |
||||
} |
||||
defaultService := "streamer" |
||||
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ |
||||
ID: "0", |
||||
DefaultService: defaultService, |
||||
}) |
||||
teardown := func() { |
||||
adapterTeardown() |
||||
net.Shutdown() |
||||
} |
||||
|
||||
return net, teardown, nil |
||||
} |
||||
|
||||
func NewAdapter(adapterType string, services adapters.Services) (adapter adapters.NodeAdapter, teardown func(), err error) { |
||||
teardown = func() {} |
||||
switch adapterType { |
||||
case "sim": |
||||
adapter = adapters.NewSimAdapter(services) |
||||
// case "socket":
|
||||
// adapter = adapters.NewSocketAdapter(services)
|
||||
case "exec": |
||||
baseDir, err0 := ioutil.TempDir("", "les-test") |
||||
if err0 != nil { |
||||
return nil, teardown, err0 |
||||
} |
||||
teardown = func() { os.RemoveAll(baseDir) } |
||||
adapter = adapters.NewExecAdapter(baseDir) |
||||
/*case "docker": |
||||
adapter, err = adapters.NewDockerAdapter() |
||||
if err != nil { |
||||
return nil, teardown, err |
||||
}*/ |
||||
default: |
||||
return nil, teardown, errors.New("adapter needs to be one of sim, socket, exec, docker") |
||||
} |
||||
return adapter, teardown, nil |
||||
} |
||||
|
||||
func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool { |
||||
net, teardown, err := NewNetwork() |
||||
defer teardown() |
||||
if err != nil { |
||||
t.Fatalf("Failed to create network: %v", err) |
||||
} |
||||
timeout := 1800 * time.Second |
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) |
||||
defer cancel() |
||||
|
||||
servers := make([]*simulations.Node, serverCount) |
||||
clients := make([]*simulations.Node, clientCount) |
||||
|
||||
for i := range clients { |
||||
clientconf := adapters.RandomNodeConfig() |
||||
clientconf.Services = []string{"lesclient"} |
||||
if len(clientDir) == clientCount { |
||||
clientconf.DataDir = clientDir[i] |
||||
} |
||||
client, err := net.NewNodeWithConfig(clientconf) |
||||
if err != nil { |
||||
t.Fatalf("Failed to create client: %v", err) |
||||
} |
||||
clients[i] = client |
||||
} |
||||
|
||||
for i := range servers { |
||||
serverconf := adapters.RandomNodeConfig() |
||||
serverconf.Services = []string{"lesserver"} |
||||
if len(serverDir) == serverCount { |
||||
serverconf.DataDir = serverDir[i] |
||||
} |
||||
server, err := net.NewNodeWithConfig(serverconf) |
||||
if err != nil { |
||||
t.Fatalf("Failed to create server: %v", err) |
||||
} |
||||
servers[i] = server |
||||
} |
||||
|
||||
for _, client := range clients { |
||||
if err := net.Start(client.ID()); err != nil { |
||||
t.Fatalf("Failed to start client node: %v", err) |
||||
} |
||||
} |
||||
for _, server := range servers { |
||||
if err := net.Start(server.ID()); err != nil { |
||||
t.Fatalf("Failed to start server node: %v", err) |
||||
} |
||||
} |
||||
|
||||
return test(ctx, net, servers, clients) |
||||
} |
||||
|
||||
func newLesClientService(ctx *adapters.ServiceContext) (node.Service, error) { |
||||
config := eth.DefaultConfig |
||||
config.SyncMode = downloader.LightSync |
||||
config.Ethash.PowMode = ethash.ModeFake |
||||
return New(ctx.NodeContext, &config) |
||||
} |
||||
|
||||
func newLesServerService(ctx *adapters.ServiceContext) (node.Service, error) { |
||||
config := eth.DefaultConfig |
||||
config.SyncMode = downloader.FullSync |
||||
config.LightServ = testServerCapacity |
||||
config.LightPeers = testMaxClients |
||||
ethereum, err := eth.New(ctx.NodeContext, &config) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
server, err := NewLesServer(ethereum, &config) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ethereum.AddLesServer(server) |
||||
return ethereum, nil |
||||
} |
@ -0,0 +1,353 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"fmt" |
||||
"math/big" |
||||
"math/rand" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/les/flowcontrol" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
// requestBenchmark is an interface for different randomized request generators
|
||||
type requestBenchmark interface { |
||||
// init initializes the generator for generating the given number of randomized requests
|
||||
init(pm *ProtocolManager, count int) error |
||||
// request initiates sending a single request to the given peer
|
||||
request(peer *peer, index int) error |
||||
} |
||||
|
||||
// benchmarkBlockHeaders implements requestBenchmark
|
||||
type benchmarkBlockHeaders struct { |
||||
amount, skip int |
||||
reverse, byHash bool |
||||
offset, randMax int64 |
||||
hashes []common.Hash |
||||
} |
||||
|
||||
func (b *benchmarkBlockHeaders) init(pm *ProtocolManager, count int) error { |
||||
d := int64(b.amount-1) * int64(b.skip+1) |
||||
b.offset = 0 |
||||
b.randMax = pm.blockchain.CurrentHeader().Number.Int64() + 1 - d |
||||
if b.randMax < 0 { |
||||
return fmt.Errorf("chain is too short") |
||||
} |
||||
if b.reverse { |
||||
b.offset = d |
||||
} |
||||
if b.byHash { |
||||
b.hashes = make([]common.Hash, count) |
||||
for i := range b.hashes { |
||||
b.hashes[i] = rawdb.ReadCanonicalHash(pm.chainDb, uint64(b.offset+rand.Int63n(b.randMax))) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (b *benchmarkBlockHeaders) request(peer *peer, index int) error { |
||||
if b.byHash { |
||||
return peer.RequestHeadersByHash(0, 0, b.hashes[index], b.amount, b.skip, b.reverse) |
||||
} else { |
||||
return peer.RequestHeadersByNumber(0, 0, uint64(b.offset+rand.Int63n(b.randMax)), b.amount, b.skip, b.reverse) |
||||
} |
||||
} |
||||
|
||||
// benchmarkBodiesOrReceipts implements requestBenchmark
|
||||
type benchmarkBodiesOrReceipts struct { |
||||
receipts bool |
||||
hashes []common.Hash |
||||
} |
||||
|
||||
func (b *benchmarkBodiesOrReceipts) init(pm *ProtocolManager, count int) error { |
||||
randMax := pm.blockchain.CurrentHeader().Number.Int64() + 1 |
||||
b.hashes = make([]common.Hash, count) |
||||
for i := range b.hashes { |
||||
b.hashes[i] = rawdb.ReadCanonicalHash(pm.chainDb, uint64(rand.Int63n(randMax))) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (b *benchmarkBodiesOrReceipts) request(peer *peer, index int) error { |
||||
if b.receipts { |
||||
return peer.RequestReceipts(0, 0, []common.Hash{b.hashes[index]}) |
||||
} else { |
||||
return peer.RequestBodies(0, 0, []common.Hash{b.hashes[index]}) |
||||
} |
||||
} |
||||
|
||||
// benchmarkProofsOrCode implements requestBenchmark
|
||||
type benchmarkProofsOrCode struct { |
||||
code bool |
||||
headHash common.Hash |
||||
} |
||||
|
||||
func (b *benchmarkProofsOrCode) init(pm *ProtocolManager, count int) error { |
||||
b.headHash = pm.blockchain.CurrentHeader().Hash() |
||||
return nil |
||||
} |
||||
|
||||
func (b *benchmarkProofsOrCode) request(peer *peer, index int) error { |
||||
key := make([]byte, 32) |
||||
rand.Read(key) |
||||
if b.code { |
||||
return peer.RequestCode(0, 0, []CodeReq{{BHash: b.headHash, AccKey: key}}) |
||||
} else { |
||||
return peer.RequestProofs(0, 0, []ProofReq{{BHash: b.headHash, Key: key}}) |
||||
} |
||||
} |
||||
|
||||
// benchmarkHelperTrie implements requestBenchmark
|
||||
type benchmarkHelperTrie struct { |
||||
bloom bool |
||||
reqCount int |
||||
sectionCount, headNum uint64 |
||||
} |
||||
|
||||
func (b *benchmarkHelperTrie) init(pm *ProtocolManager, count int) error { |
||||
if b.bloom { |
||||
b.sectionCount, b.headNum, _ = pm.server.bloomTrieIndexer.Sections() |
||||
} else { |
||||
b.sectionCount, _, _ = pm.server.chtIndexer.Sections() |
||||
b.sectionCount /= (params.CHTFrequencyClient / params.CHTFrequencyServer) |
||||
b.headNum = b.sectionCount*params.CHTFrequencyClient - 1 |
||||
} |
||||
if b.sectionCount == 0 { |
||||
return fmt.Errorf("no processed sections available") |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (b *benchmarkHelperTrie) request(peer *peer, index int) error { |
||||
reqs := make([]HelperTrieReq, b.reqCount) |
||||
|
||||
if b.bloom { |
||||
bitIdx := uint16(rand.Intn(2048)) |
||||
for i := range reqs { |
||||
key := make([]byte, 10) |
||||
binary.BigEndian.PutUint16(key[:2], bitIdx) |
||||
binary.BigEndian.PutUint64(key[2:], uint64(rand.Int63n(int64(b.sectionCount)))) |
||||
reqs[i] = HelperTrieReq{Type: htBloomBits, TrieIdx: b.sectionCount - 1, Key: key} |
||||
} |
||||
} else { |
||||
for i := range reqs { |
||||
key := make([]byte, 8) |
||||
binary.BigEndian.PutUint64(key[:], uint64(rand.Int63n(int64(b.headNum)))) |
||||
reqs[i] = HelperTrieReq{Type: htCanonical, TrieIdx: b.sectionCount - 1, Key: key, AuxReq: auxHeader} |
||||
} |
||||
} |
||||
|
||||
return peer.RequestHelperTrieProofs(0, 0, reqs) |
||||
} |
||||
|
||||
// benchmarkTxSend implements requestBenchmark
|
||||
type benchmarkTxSend struct { |
||||
txs types.Transactions |
||||
} |
||||
|
||||
func (b *benchmarkTxSend) init(pm *ProtocolManager, count int) error { |
||||
key, _ := crypto.GenerateKey() |
||||
addr := crypto.PubkeyToAddress(key.PublicKey) |
||||
signer := types.NewEIP155Signer(big.NewInt(18)) |
||||
b.txs = make(types.Transactions, count) |
||||
|
||||
for i := range b.txs { |
||||
data := make([]byte, txSizeCostLimit) |
||||
rand.Read(data) |
||||
tx, err := types.SignTx(types.NewTransaction(0, addr, new(big.Int), 0, new(big.Int), data), signer, key) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
b.txs[i] = tx |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (b *benchmarkTxSend) request(peer *peer, index int) error { |
||||
enc, _ := rlp.EncodeToBytes(types.Transactions{b.txs[index]}) |
||||
return peer.SendTxs(0, 0, enc) |
||||
} |
||||
|
||||
// benchmarkTxStatus implements requestBenchmark
|
||||
type benchmarkTxStatus struct{} |
||||
|
||||
func (b *benchmarkTxStatus) init(pm *ProtocolManager, count int) error { |
||||
return nil |
||||
} |
||||
|
||||
func (b *benchmarkTxStatus) request(peer *peer, index int) error { |
||||
var hash common.Hash |
||||
rand.Read(hash[:]) |
||||
return peer.RequestTxStatus(0, 0, []common.Hash{hash}) |
||||
} |
||||
|
||||
// benchmarkSetup stores measurement data for a single benchmark type
|
||||
type benchmarkSetup struct { |
||||
req requestBenchmark |
||||
totalCount int |
||||
totalTime, avgTime time.Duration |
||||
maxInSize, maxOutSize uint32 |
||||
err error |
||||
} |
||||
|
||||
// runBenchmark runs a benchmark cycle for all benchmark types in the specified
|
||||
// number of passes
|
||||
func (pm *ProtocolManager) runBenchmark(benchmarks []requestBenchmark, passCount int, targetTime time.Duration) []*benchmarkSetup { |
||||
setup := make([]*benchmarkSetup, len(benchmarks)) |
||||
for i, b := range benchmarks { |
||||
setup[i] = &benchmarkSetup{req: b} |
||||
} |
||||
for i := 0; i < passCount; i++ { |
||||
log.Info("Running benchmark", "pass", i+1, "total", passCount) |
||||
todo := make([]*benchmarkSetup, len(benchmarks)) |
||||
copy(todo, setup) |
||||
for len(todo) > 0 { |
||||
// select a random element
|
||||
index := rand.Intn(len(todo)) |
||||
next := todo[index] |
||||
todo[index] = todo[len(todo)-1] |
||||
todo = todo[:len(todo)-1] |
||||
|
||||
if next.err == nil { |
||||
// calculate request count
|
||||
count := 50 |
||||
if next.totalTime > 0 { |
||||
count = int(uint64(next.totalCount) * uint64(targetTime) / uint64(next.totalTime)) |
||||
} |
||||
if err := pm.measure(next, count); err != nil { |
||||
next.err = err |
||||
} |
||||
} |
||||
} |
||||
} |
||||
log.Info("Benchmark completed") |
||||
|
||||
for _, s := range setup { |
||||
if s.err == nil { |
||||
s.avgTime = s.totalTime / time.Duration(s.totalCount) |
||||
} |
||||
} |
||||
return setup |
||||
} |
||||
|
||||
// meteredPipe implements p2p.MsgReadWriter and remembers the largest single
|
||||
// message size sent through the pipe
|
||||
type meteredPipe struct { |
||||
rw p2p.MsgReadWriter |
||||
maxSize uint32 |
||||
} |
||||
|
||||
func (m *meteredPipe) ReadMsg() (p2p.Msg, error) { |
||||
return m.rw.ReadMsg() |
||||
} |
||||
|
||||
func (m *meteredPipe) WriteMsg(msg p2p.Msg) error { |
||||
if msg.Size > m.maxSize { |
||||
m.maxSize = msg.Size |
||||
} |
||||
return m.rw.WriteMsg(msg) |
||||
} |
||||
|
||||
// measure runs a benchmark for a single type in a single pass, with the given
|
||||
// number of requests
|
||||
func (pm *ProtocolManager) measure(setup *benchmarkSetup, count int) error { |
||||
clientPipe, serverPipe := p2p.MsgPipe() |
||||
clientMeteredPipe := &meteredPipe{rw: clientPipe} |
||||
serverMeteredPipe := &meteredPipe{rw: serverPipe} |
||||
var id enode.ID |
||||
rand.Read(id[:]) |
||||
clientPeer := pm.newPeer(lpv2, NetworkId, p2p.NewPeer(id, "client", nil), clientMeteredPipe) |
||||
serverPeer := pm.newPeer(lpv2, NetworkId, p2p.NewPeer(id, "server", nil), serverMeteredPipe) |
||||
serverPeer.sendQueue = newExecQueue(count) |
||||
serverPeer.announceType = announceTypeNone |
||||
serverPeer.fcCosts = make(requestCostTable) |
||||
c := &requestCosts{} |
||||
for code := range requests { |
||||
serverPeer.fcCosts[code] = c |
||||
} |
||||
serverPeer.fcParams = flowcontrol.ServerParams{BufLimit: 1, MinRecharge: 1} |
||||
serverPeer.fcClient = flowcontrol.NewClientNode(pm.server.fcManager, serverPeer.fcParams) |
||||
defer serverPeer.fcClient.Disconnect() |
||||
|
||||
if err := setup.req.init(pm, count); err != nil { |
||||
return err |
||||
} |
||||
|
||||
errCh := make(chan error, 10) |
||||
start := mclock.Now() |
||||
|
||||
go func() { |
||||
for i := 0; i < count; i++ { |
||||
if err := setup.req.request(clientPeer, i); err != nil { |
||||
errCh <- err |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
go func() { |
||||
for i := 0; i < count; i++ { |
||||
if err := pm.handleMsg(serverPeer); err != nil { |
||||
errCh <- err |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
go func() { |
||||
for i := 0; i < count; i++ { |
||||
msg, err := clientPipe.ReadMsg() |
||||
if err != nil { |
||||
errCh <- err |
||||
return |
||||
} |
||||
var i interface{} |
||||
msg.Decode(&i) |
||||
} |
||||
// at this point we can be sure that the other two
|
||||
// goroutines finished successfully too
|
||||
close(errCh) |
||||
}() |
||||
select { |
||||
case err := <-errCh: |
||||
if err != nil { |
||||
return err |
||||
} |
||||
case <-pm.quitSync: |
||||
clientPipe.Close() |
||||
serverPipe.Close() |
||||
return fmt.Errorf("Benchmark cancelled") |
||||
} |
||||
|
||||
setup.totalTime += time.Duration(mclock.Now() - start) |
||||
setup.totalCount += count |
||||
setup.maxInSize = clientMeteredPipe.maxSize |
||||
setup.maxOutSize = serverMeteredPipe.maxSize |
||||
clientPipe.Close() |
||||
serverPipe.Close() |
||||
return nil |
||||
} |
@ -0,0 +1,388 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more detailct.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"math" |
||||
"sync" |
||||
"sync/atomic" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/eth" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/les/flowcontrol" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
const makeCostStats = false // make request cost statistics during operation
|
||||
|
||||
var ( |
||||
// average request cost estimates based on serving time
|
||||
reqAvgTimeCost = requestCostTable{ |
||||
GetBlockHeadersMsg: {150000, 30000}, |
||||
GetBlockBodiesMsg: {0, 700000}, |
||||
GetReceiptsMsg: {0, 1000000}, |
||||
GetCodeMsg: {0, 450000}, |
||||
GetProofsV1Msg: {0, 600000}, |
||||
GetProofsV2Msg: {0, 600000}, |
||||
GetHeaderProofsMsg: {0, 1000000}, |
||||
GetHelperTrieProofsMsg: {0, 1000000}, |
||||
SendTxMsg: {0, 450000}, |
||||
SendTxV2Msg: {0, 450000}, |
||||
GetTxStatusMsg: {0, 250000}, |
||||
} |
||||
// maximum incoming message size estimates
|
||||
reqMaxInSize = requestCostTable{ |
||||
GetBlockHeadersMsg: {40, 0}, |
||||
GetBlockBodiesMsg: {0, 40}, |
||||
GetReceiptsMsg: {0, 40}, |
||||
GetCodeMsg: {0, 80}, |
||||
GetProofsV1Msg: {0, 80}, |
||||
GetProofsV2Msg: {0, 80}, |
||||
GetHeaderProofsMsg: {0, 20}, |
||||
GetHelperTrieProofsMsg: {0, 20}, |
||||
SendTxMsg: {0, 66000}, |
||||
SendTxV2Msg: {0, 66000}, |
||||
GetTxStatusMsg: {0, 50}, |
||||
} |
||||
// maximum outgoing message size estimates
|
||||
reqMaxOutSize = requestCostTable{ |
||||
GetBlockHeadersMsg: {0, 556}, |
||||
GetBlockBodiesMsg: {0, 100000}, |
||||
GetReceiptsMsg: {0, 200000}, |
||||
GetCodeMsg: {0, 50000}, |
||||
GetProofsV1Msg: {0, 4000}, |
||||
GetProofsV2Msg: {0, 4000}, |
||||
GetHeaderProofsMsg: {0, 4000}, |
||||
GetHelperTrieProofsMsg: {0, 4000}, |
||||
SendTxMsg: {0, 0}, |
||||
SendTxV2Msg: {0, 100}, |
||||
GetTxStatusMsg: {0, 100}, |
||||
} |
||||
minBufLimit = uint64(50000000 * maxCostFactor) // minimum buffer limit allowed for a client
|
||||
minCapacity = (minBufLimit-1)/bufLimitRatio + 1 // minimum capacity allowed for a client
|
||||
) |
||||
|
||||
const ( |
||||
maxCostFactor = 2 // ratio of maximum and average cost estimates
|
||||
gfInitWeight = time.Second * 10 |
||||
gfMaxWeight = time.Hour |
||||
gfUsageThreshold = 0.5 |
||||
gfUsageTC = time.Second |
||||
gfDbKey = "_globalCostFactor" |
||||
) |
||||
|
||||
// costTracker is responsible for calculating costs and cost estimates on the
|
||||
// server side. It continuously updates the global cost factor which is defined
|
||||
// as the number of cost units per nanosecond of serving time in a single thread.
|
||||
// It is based on statistics collected during serving requests in high-load periods
|
||||
// and practically acts as a one-dimension request price scaling factor over the
|
||||
// pre-defined cost estimate table. Instead of scaling the cost values, the real
|
||||
// value of cost units is changed by applying the factor to the serving times. This
|
||||
// is more convenient because the changes in the cost factor can be applied immediately
|
||||
// without always notifying the clients about the changed cost tables.
|
||||
type costTracker struct { |
||||
db ethdb.Database |
||||
stopCh chan chan struct{} |
||||
|
||||
inSizeFactor, outSizeFactor float64 |
||||
gf, utilTarget float64 |
||||
|
||||
gfUpdateCh chan gfUpdate |
||||
gfLock sync.RWMutex |
||||
totalRechargeCh chan uint64 |
||||
|
||||
stats map[uint64][]uint64 |
||||
} |
||||
|
||||
// newCostTracker creates a cost tracker and loads the cost factor statistics from the database
|
||||
func newCostTracker(db ethdb.Database, config *eth.Config) *costTracker { |
||||
utilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100 |
||||
ct := &costTracker{ |
||||
db: db, |
||||
stopCh: make(chan chan struct{}), |
||||
utilTarget: utilTarget, |
||||
} |
||||
if config.LightBandwidthIn > 0 { |
||||
ct.inSizeFactor = utilTarget / float64(config.LightBandwidthIn) |
||||
} |
||||
if config.LightBandwidthOut > 0 { |
||||
ct.outSizeFactor = utilTarget / float64(config.LightBandwidthOut) |
||||
} |
||||
if makeCostStats { |
||||
ct.stats = make(map[uint64][]uint64) |
||||
for code := range reqAvgTimeCost { |
||||
ct.stats[code] = make([]uint64, 10) |
||||
} |
||||
} |
||||
ct.gfLoop() |
||||
return ct |
||||
} |
||||
|
||||
// stop stops the cost tracker and saves the cost factor statistics to the database
|
||||
func (ct *costTracker) stop() { |
||||
stopCh := make(chan struct{}) |
||||
ct.stopCh <- stopCh |
||||
<-stopCh |
||||
if makeCostStats { |
||||
ct.printStats() |
||||
} |
||||
} |
||||
|
||||
// makeCostList returns upper cost estimates based on the hardcoded cost estimate
|
||||
// tables and the optionally specified incoming/outgoing bandwidth limits
|
||||
func (ct *costTracker) makeCostList() RequestCostList { |
||||
maxCost := func(avgTime, inSize, outSize uint64) uint64 { |
||||
globalFactor := ct.globalFactor() |
||||
|
||||
cost := avgTime * maxCostFactor |
||||
inSizeCost := uint64(float64(inSize) * ct.inSizeFactor * globalFactor * maxCostFactor) |
||||
if inSizeCost > cost { |
||||
cost = inSizeCost |
||||
} |
||||
outSizeCost := uint64(float64(outSize) * ct.outSizeFactor * globalFactor * maxCostFactor) |
||||
if outSizeCost > cost { |
||||
cost = outSizeCost |
||||
} |
||||
return cost |
||||
} |
||||
var list RequestCostList |
||||
for code, data := range reqAvgTimeCost { |
||||
list = append(list, requestCostListItem{ |
||||
MsgCode: code, |
||||
BaseCost: maxCost(data.baseCost, reqMaxInSize[code].baseCost, reqMaxOutSize[code].baseCost), |
||||
ReqCost: maxCost(data.reqCost, reqMaxInSize[code].reqCost, reqMaxOutSize[code].reqCost), |
||||
}) |
||||
} |
||||
return list |
||||
} |
||||
|
||||
type gfUpdate struct { |
||||
avgTime, servingTime float64 |
||||
} |
||||
|
||||
// gfLoop starts an event loop which updates the global cost factor which is
|
||||
// calculated as a weighted average of the average estimate / serving time ratio.
|
||||
// The applied weight equals the serving time if gfUsage is over a threshold,
|
||||
// zero otherwise. gfUsage is the recent average serving time per time unit in
|
||||
// an exponential moving window. This ensures that statistics are collected only
|
||||
// under high-load circumstances where the measured serving times are relevant.
|
||||
// The total recharge parameter of the flow control system which controls the
|
||||
// total allowed serving time per second but nominated in cost units, should
|
||||
// also be scaled with the cost factor and is also updated by this loop.
|
||||
func (ct *costTracker) gfLoop() { |
||||
var gfUsage, gfSum, gfWeight float64 |
||||
lastUpdate := mclock.Now() |
||||
expUpdate := lastUpdate |
||||
|
||||
data, _ := ct.db.Get([]byte(gfDbKey)) |
||||
if len(data) == 16 { |
||||
gfSum = math.Float64frombits(binary.BigEndian.Uint64(data[0:8])) |
||||
gfWeight = math.Float64frombits(binary.BigEndian.Uint64(data[8:16])) |
||||
} |
||||
if gfWeight < float64(gfInitWeight) { |
||||
gfSum = float64(gfInitWeight) |
||||
gfWeight = float64(gfInitWeight) |
||||
} |
||||
gf := gfSum / gfWeight |
||||
ct.gf = gf |
||||
ct.gfUpdateCh = make(chan gfUpdate, 100) |
||||
|
||||
go func() { |
||||
for { |
||||
select { |
||||
case r := <-ct.gfUpdateCh: |
||||
now := mclock.Now() |
||||
max := r.servingTime * gf |
||||
if r.avgTime > max { |
||||
max = r.avgTime |
||||
} |
||||
dt := float64(now - expUpdate) |
||||
expUpdate = now |
||||
gfUsage = gfUsage*math.Exp(-dt/float64(gfUsageTC)) + max*1000000/float64(gfUsageTC) |
||||
|
||||
if gfUsage >= gfUsageThreshold*ct.utilTarget*gf { |
||||
gfSum += r.avgTime |
||||
gfWeight += r.servingTime |
||||
if time.Duration(now-lastUpdate) > time.Second { |
||||
gf = gfSum / gfWeight |
||||
if gfWeight >= float64(gfMaxWeight) { |
||||
gfSum = gf * float64(gfMaxWeight) |
||||
gfWeight = float64(gfMaxWeight) |
||||
} |
||||
lastUpdate = now |
||||
ct.gfLock.Lock() |
||||
ct.gf = gf |
||||
ch := ct.totalRechargeCh |
||||
ct.gfLock.Unlock() |
||||
if ch != nil { |
||||
select { |
||||
case ct.totalRechargeCh <- uint64(ct.utilTarget * gf): |
||||
default: |
||||
} |
||||
} |
||||
log.Debug("global cost factor updated", "gf", gf, "weight", time.Duration(gfWeight)) |
||||
} |
||||
} |
||||
case stopCh := <-ct.stopCh: |
||||
var data [16]byte |
||||
binary.BigEndian.PutUint64(data[0:8], math.Float64bits(gfSum)) |
||||
binary.BigEndian.PutUint64(data[8:16], math.Float64bits(gfWeight)) |
||||
ct.db.Put([]byte(gfDbKey), data[:]) |
||||
log.Debug("global cost factor saved", "sum", time.Duration(gfSum), "weight", time.Duration(gfWeight)) |
||||
close(stopCh) |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// globalFactor returns the current value of the global cost factor
|
||||
func (ct *costTracker) globalFactor() float64 { |
||||
ct.gfLock.RLock() |
||||
defer ct.gfLock.RUnlock() |
||||
|
||||
return ct.gf |
||||
} |
||||
|
||||
// totalRecharge returns the current total recharge parameter which is used by
|
||||
// flowcontrol.ClientManager and is scaled by the global cost factor
|
||||
func (ct *costTracker) totalRecharge() uint64 { |
||||
ct.gfLock.RLock() |
||||
defer ct.gfLock.RUnlock() |
||||
|
||||
return uint64(ct.gf * ct.utilTarget) |
||||
} |
||||
|
||||
// subscribeTotalRecharge returns all future updates to the total recharge value
|
||||
// through a channel and also returns the current value
|
||||
func (ct *costTracker) subscribeTotalRecharge(ch chan uint64) uint64 { |
||||
ct.gfLock.Lock() |
||||
defer ct.gfLock.Unlock() |
||||
|
||||
ct.totalRechargeCh = ch |
||||
return uint64(ct.gf * ct.utilTarget) |
||||
} |
||||
|
||||
// updateStats updates the global cost factor and (if enabled) the real cost vs.
|
||||
// average estimate statistics
|
||||
func (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) { |
||||
avg := reqAvgTimeCost[code] |
||||
avgTime := avg.baseCost + amount*avg.reqCost |
||||
select { |
||||
case ct.gfUpdateCh <- gfUpdate{float64(avgTime), float64(servingTime)}: |
||||
default: |
||||
} |
||||
if makeCostStats { |
||||
realCost <<= 4 |
||||
l := 0 |
||||
for l < 9 && realCost > avgTime { |
||||
l++ |
||||
realCost >>= 1 |
||||
} |
||||
atomic.AddUint64(&ct.stats[code][l], 1) |
||||
} |
||||
} |
||||
|
||||
// realCost calculates the final cost of a request based on actual serving time,
|
||||
// incoming and outgoing message size
|
||||
//
|
||||
// Note: message size is only taken into account if bandwidth limitation is applied
|
||||
// and the cost based on either message size is greater than the cost based on
|
||||
// serving time. A maximum of the three costs is applied instead of their sum
|
||||
// because the three limited resources (serving thread time and i/o bandwidth) can
|
||||
// also be maxed out simultaneously.
|
||||
func (ct *costTracker) realCost(servingTime uint64, inSize, outSize uint32) uint64 { |
||||
cost := float64(servingTime) |
||||
inSizeCost := float64(inSize) * ct.inSizeFactor |
||||
if inSizeCost > cost { |
||||
cost = inSizeCost |
||||
} |
||||
outSizeCost := float64(outSize) * ct.outSizeFactor |
||||
if outSizeCost > cost { |
||||
cost = outSizeCost |
||||
} |
||||
return uint64(cost * ct.globalFactor()) |
||||
} |
||||
|
||||
// printStats prints the distribution of real request cost relative to the average estimates
|
||||
func (ct *costTracker) printStats() { |
||||
if ct.stats == nil { |
||||
return |
||||
} |
||||
for code, arr := range ct.stats { |
||||
log.Info("Request cost statistics", "code", code, "1/16", arr[0], "1/8", arr[1], "1/4", arr[2], "1/2", arr[3], "1", arr[4], "2", arr[5], "4", arr[6], "8", arr[7], "16", arr[8], ">16", arr[9]) |
||||
} |
||||
} |
||||
|
||||
type ( |
||||
// requestCostTable assigns a cost estimate function to each request type
|
||||
// which is a linear function of the requested amount
|
||||
// (cost = baseCost + reqCost * amount)
|
||||
requestCostTable map[uint64]*requestCosts |
||||
requestCosts struct { |
||||
baseCost, reqCost uint64 |
||||
} |
||||
|
||||
// RequestCostList is a list representation of request costs which is used for
|
||||
// database storage and communication through the network
|
||||
RequestCostList []requestCostListItem |
||||
requestCostListItem struct { |
||||
MsgCode, BaseCost, ReqCost uint64 |
||||
} |
||||
) |
||||
|
||||
// getCost calculates the estimated cost for a given request type and amount
|
||||
func (table requestCostTable) getCost(code, amount uint64) uint64 { |
||||
costs := table[code] |
||||
return costs.baseCost + amount*costs.reqCost |
||||
} |
||||
|
||||
// decode converts a cost list to a cost table
|
||||
func (list RequestCostList) decode() requestCostTable { |
||||
table := make(requestCostTable) |
||||
for _, e := range list { |
||||
table[e.MsgCode] = &requestCosts{ |
||||
baseCost: e.BaseCost, |
||||
reqCost: e.ReqCost, |
||||
} |
||||
} |
||||
return table |
||||
} |
||||
|
||||
// testCostList returns a dummy request cost list used by tests
|
||||
func testCostList() RequestCostList { |
||||
cl := make(RequestCostList, len(reqAvgTimeCost)) |
||||
var max uint64 |
||||
for code := range reqAvgTimeCost { |
||||
if code > max { |
||||
max = code |
||||
} |
||||
} |
||||
i := 0 |
||||
for code := uint64(0); code <= max; code++ { |
||||
if _, ok := reqAvgTimeCost[code]; ok { |
||||
cl[i].MsgCode = code |
||||
cl[i].BaseCost = 0 |
||||
cl[i].ReqCost = 0 |
||||
i++ |
||||
} |
||||
} |
||||
return cl |
||||
} |
@ -0,0 +1,65 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package flowcontrol |
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
) |
||||
|
||||
// logger collects events in string format and discards events older than the
|
||||
// "keep" parameter
|
||||
type logger struct { |
||||
events map[uint64]logEvent |
||||
writePtr, delPtr uint64 |
||||
keep time.Duration |
||||
} |
||||
|
||||
// logEvent describes a single event
|
||||
type logEvent struct { |
||||
time mclock.AbsTime |
||||
event string |
||||
} |
||||
|
||||
// newLogger creates a new logger
|
||||
func newLogger(keep time.Duration) *logger { |
||||
return &logger{ |
||||
events: make(map[uint64]logEvent), |
||||
keep: keep, |
||||
} |
||||
} |
||||
|
||||
// add adds a new event and discards old events if possible
|
||||
func (l *logger) add(now mclock.AbsTime, event string) { |
||||
keepAfter := now - mclock.AbsTime(l.keep) |
||||
for l.delPtr < l.writePtr && l.events[l.delPtr].time <= keepAfter { |
||||
delete(l.events, l.delPtr) |
||||
l.delPtr++ |
||||
} |
||||
l.events[l.writePtr] = logEvent{now, event} |
||||
l.writePtr++ |
||||
} |
||||
|
||||
// dump prints all stored events
|
||||
func (l *logger) dump(now mclock.AbsTime) { |
||||
for i := l.delPtr; i < l.writePtr; i++ { |
||||
e := l.events[i] |
||||
fmt.Println(time.Duration(e.time-now), e.event) |
||||
} |
||||
} |
@ -0,0 +1,123 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package flowcontrol |
||||
|
||||
import ( |
||||
"math/rand" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
) |
||||
|
||||
type testNode struct { |
||||
node *ClientNode |
||||
bufLimit, capacity uint64 |
||||
waitUntil mclock.AbsTime |
||||
index, totalCost uint64 |
||||
} |
||||
|
||||
const ( |
||||
testMaxCost = 1000000 |
||||
testLength = 100000 |
||||
) |
||||
|
||||
// testConstantTotalCapacity simulates multiple request sender nodes and verifies
|
||||
// whether the total amount of served requests matches the expected value based on
|
||||
// the total capacity and the duration of the test.
|
||||
// Some nodes are sending requests occasionally so that their buffer should regularly
|
||||
// reach the maximum while other nodes (the "max capacity nodes") are sending at the
|
||||
// maximum permitted rate. The max capacity nodes are changed multiple times during
|
||||
// a single test.
|
||||
func TestConstantTotalCapacity(t *testing.T) { |
||||
testConstantTotalCapacity(t, 10, 1, 0) |
||||
testConstantTotalCapacity(t, 10, 1, 1) |
||||
testConstantTotalCapacity(t, 30, 1, 0) |
||||
testConstantTotalCapacity(t, 30, 2, 3) |
||||
testConstantTotalCapacity(t, 100, 1, 0) |
||||
testConstantTotalCapacity(t, 100, 3, 5) |
||||
testConstantTotalCapacity(t, 100, 5, 10) |
||||
} |
||||
|
||||
func testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, randomSend int) { |
||||
clock := &mclock.Simulated{} |
||||
nodes := make([]*testNode, nodeCount) |
||||
var totalCapacity uint64 |
||||
for i := range nodes { |
||||
nodes[i] = &testNode{capacity: uint64(50000 + rand.Intn(100000))} |
||||
totalCapacity += nodes[i].capacity |
||||
} |
||||
m := NewClientManager(PieceWiseLinear{{0, totalCapacity}}, clock) |
||||
for _, n := range nodes { |
||||
n.bufLimit = n.capacity * 6000 //uint64(2000+rand.Intn(10000))
|
||||
n.node = NewClientNode(m, ServerParams{BufLimit: n.bufLimit, MinRecharge: n.capacity}) |
||||
} |
||||
maxNodes := make([]int, maxCapacityNodes) |
||||
for i := range maxNodes { |
||||
// we don't care if some indexes are selected multiple times
|
||||
// in that case we have fewer max nodes
|
||||
maxNodes[i] = rand.Intn(nodeCount) |
||||
} |
||||
|
||||
for i := 0; i < testLength; i++ { |
||||
now := clock.Now() |
||||
for _, idx := range maxNodes { |
||||
for nodes[idx].send(t, now) { |
||||
} |
||||
} |
||||
if rand.Intn(testLength) < maxCapacityNodes*3 { |
||||
maxNodes[rand.Intn(maxCapacityNodes)] = rand.Intn(nodeCount) |
||||
} |
||||
|
||||
sendCount := randomSend |
||||
for sendCount > 0 { |
||||
if nodes[rand.Intn(nodeCount)].send(t, now) { |
||||
sendCount-- |
||||
} |
||||
} |
||||
|
||||
clock.Run(time.Millisecond) |
||||
} |
||||
|
||||
var totalCost uint64 |
||||
for _, n := range nodes { |
||||
totalCost += n.totalCost |
||||
} |
||||
ratio := float64(totalCost) / float64(totalCapacity) / testLength |
||||
if ratio < 0.98 || ratio > 1.02 { |
||||
t.Errorf("totalCost/totalCapacity/testLength ratio incorrect (expected: 1, got: %f)", ratio) |
||||
} |
||||
|
||||
} |
||||
|
||||
func (n *testNode) send(t *testing.T, now mclock.AbsTime) bool { |
||||
if now < n.waitUntil { |
||||
return false |
||||
} |
||||
n.index++ |
||||
if ok, _, _ := n.node.AcceptRequest(0, n.index, testMaxCost); !ok { |
||||
t.Fatalf("Rejected request after expected waiting time has passed") |
||||
} |
||||
rcost := uint64(rand.Int63n(testMaxCost)) |
||||
bv := n.node.RequestProcessed(0, n.index, testMaxCost, rcost) |
||||
if bv < testMaxCost { |
||||
n.waitUntil = now + mclock.AbsTime((testMaxCost-bv)*1001000/n.capacity) |
||||
} |
||||
//n.waitUntil = now + mclock.AbsTime(float64(testMaxCost)*1001000/float64(n.capacity)*(1-float64(bv)/float64(n.bufLimit)))
|
||||
n.totalCost += rcost |
||||
return true |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,261 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les |
||||
|
||||
import ( |
||||
"sync" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/common/prque" |
||||
) |
||||
|
||||
// servingQueue allows running tasks in a limited number of threads and puts the
|
||||
// waiting tasks in a priority queue
|
||||
type servingQueue struct { |
||||
tokenCh chan runToken |
||||
queueAddCh, queueBestCh chan *servingTask |
||||
stopThreadCh, quit chan struct{} |
||||
setThreadsCh chan int |
||||
|
||||
wg sync.WaitGroup |
||||
threadCount int // number of currently running threads
|
||||
queue *prque.Prque // priority queue for waiting or suspended tasks
|
||||
best *servingTask // the highest priority task (not included in the queue)
|
||||
suspendBias int64 // priority bias against suspending an already running task
|
||||
} |
||||
|
||||
// servingTask represents a request serving task. Tasks can be implemented to
|
||||
// run in multiple steps, allowing the serving queue to suspend execution between
|
||||
// steps if higher priority tasks are entered. The creator of the task should
|
||||
// set the following fields:
|
||||
//
|
||||
// - priority: greater value means higher priority; values can wrap around the int64 range
|
||||
// - run: execute a single step; return true if finished
|
||||
// - after: executed after run finishes or returns an error, receives the total serving time
|
||||
type servingTask struct { |
||||
sq *servingQueue |
||||
servingTime uint64 |
||||
priority int64 |
||||
biasAdded bool |
||||
token runToken |
||||
tokenCh chan runToken |
||||
} |
||||
|
||||
// runToken received by servingTask.start allows the task to run. Closing the
|
||||
// channel by servingTask.stop signals the thread controller to allow a new task
|
||||
// to start running.
|
||||
type runToken chan struct{} |
||||
|
||||
// start blocks until the task can start and returns true if it is allowed to run.
|
||||
// Returning false means that the task should be cancelled.
|
||||
func (t *servingTask) start() bool { |
||||
select { |
||||
case t.token = <-t.sq.tokenCh: |
||||
default: |
||||
t.tokenCh = make(chan runToken, 1) |
||||
select { |
||||
case t.sq.queueAddCh <- t: |
||||
case <-t.sq.quit: |
||||
return false |
||||
} |
||||
select { |
||||
case t.token = <-t.tokenCh: |
||||
case <-t.sq.quit: |
||||
return false |
||||
} |
||||
} |
||||
if t.token == nil { |
||||
return false |
||||
} |
||||
t.servingTime -= uint64(mclock.Now()) |
||||
return true |
||||
} |
||||
|
||||
// done signals the thread controller about the task being finished and returns
|
||||
// the total serving time of the task in nanoseconds.
|
||||
func (t *servingTask) done() uint64 { |
||||
t.servingTime += uint64(mclock.Now()) |
||||
close(t.token) |
||||
return t.servingTime |
||||
} |
||||
|
||||
// waitOrStop can be called during the execution of the task. It blocks if there
|
||||
// is a higher priority task waiting (a bias is applied in favor of the currently
|
||||
// running task). Returning true means that the execution can be resumed. False
|
||||
// means the task should be cancelled.
|
||||
func (t *servingTask) waitOrStop() bool { |
||||
t.done() |
||||
if !t.biasAdded { |
||||
t.priority += t.sq.suspendBias |
||||
t.biasAdded = true |
||||
} |
||||
return t.start() |
||||
} |
||||
|
||||
// newServingQueue returns a new servingQueue
|
||||
func newServingQueue(suspendBias int64) *servingQueue { |
||||
sq := &servingQueue{ |
||||
queue: prque.New(nil), |
||||
suspendBias: suspendBias, |
||||
tokenCh: make(chan runToken), |
||||
queueAddCh: make(chan *servingTask, 100), |
||||
queueBestCh: make(chan *servingTask), |
||||
stopThreadCh: make(chan struct{}), |
||||
quit: make(chan struct{}), |
||||
setThreadsCh: make(chan int, 10), |
||||
} |
||||
sq.wg.Add(2) |
||||
go sq.queueLoop() |
||||
go sq.threadCountLoop() |
||||
return sq |
||||
} |
||||
|
||||
// newTask creates a new task with the given priority
|
||||
func (sq *servingQueue) newTask(priority int64) *servingTask { |
||||
return &servingTask{ |
||||
sq: sq, |
||||
priority: priority, |
||||
} |
||||
} |
||||
|
||||
// threadController is started in multiple goroutines and controls the execution
|
||||
// of tasks. The number of active thread controllers equals the allowed number of
|
||||
// concurrently running threads. It tries to fetch the highest priority queued
|
||||
// task first. If there are no queued tasks waiting then it can directly catch
|
||||
// run tokens from the token channel and allow the corresponding tasks to run
|
||||
// without entering the priority queue.
|
||||
func (sq *servingQueue) threadController() { |
||||
for { |
||||
token := make(runToken) |
||||
select { |
||||
case best := <-sq.queueBestCh: |
||||
best.tokenCh <- token |
||||
default: |
||||
select { |
||||
case best := <-sq.queueBestCh: |
||||
best.tokenCh <- token |
||||
case sq.tokenCh <- token: |
||||
case <-sq.stopThreadCh: |
||||
sq.wg.Done() |
||||
return |
||||
case <-sq.quit: |
||||
sq.wg.Done() |
||||
return |
||||
} |
||||
} |
||||
<-token |
||||
select { |
||||
case <-sq.stopThreadCh: |
||||
sq.wg.Done() |
||||
return |
||||
case <-sq.quit: |
||||
sq.wg.Done() |
||||
return |
||||
default: |
||||
} |
||||
} |
||||
} |
||||
|
||||
// addTask inserts a task into the priority queue
|
||||
func (sq *servingQueue) addTask(task *servingTask) { |
||||
if sq.best == nil { |
||||
sq.best = task |
||||
} else if task.priority > sq.best.priority { |
||||
sq.queue.Push(sq.best, sq.best.priority) |
||||
sq.best = task |
||||
return |
||||
} else { |
||||
sq.queue.Push(task, task.priority) |
||||
} |
||||
} |
||||
|
||||
// queueLoop is an event loop running in a goroutine. It receives tasks from queueAddCh
|
||||
// and always tries to send the highest priority task to queueBestCh. Successfully sent
|
||||
// tasks are removed from the queue.
|
||||
func (sq *servingQueue) queueLoop() { |
||||
for { |
||||
if sq.best != nil { |
||||
select { |
||||
case task := <-sq.queueAddCh: |
||||
sq.addTask(task) |
||||
case sq.queueBestCh <- sq.best: |
||||
if sq.queue.Size() == 0 { |
||||
sq.best = nil |
||||
} else { |
||||
sq.best, _ = sq.queue.PopItem().(*servingTask) |
||||
} |
||||
case <-sq.quit: |
||||
sq.wg.Done() |
||||
return |
||||
} |
||||
} else { |
||||
select { |
||||
case task := <-sq.queueAddCh: |
||||
sq.addTask(task) |
||||
case <-sq.quit: |
||||
sq.wg.Done() |
||||
return |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// threadCountLoop is an event loop running in a goroutine. It adjusts the number
|
||||
// of active thread controller goroutines.
|
||||
func (sq *servingQueue) threadCountLoop() { |
||||
var threadCountTarget int |
||||
for { |
||||
for threadCountTarget > sq.threadCount { |
||||
sq.wg.Add(1) |
||||
go sq.threadController() |
||||
sq.threadCount++ |
||||
} |
||||
if threadCountTarget < sq.threadCount { |
||||
select { |
||||
case threadCountTarget = <-sq.setThreadsCh: |
||||
case sq.stopThreadCh <- struct{}{}: |
||||
sq.threadCount-- |
||||
case <-sq.quit: |
||||
sq.wg.Done() |
||||
return |
||||
} |
||||
} else { |
||||
select { |
||||
case threadCountTarget = <-sq.setThreadsCh: |
||||
case <-sq.quit: |
||||
sq.wg.Done() |
||||
return |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// setThreads sets the allowed processing thread count, suspending tasks as soon as
|
||||
// possible if necessary.
|
||||
func (sq *servingQueue) setThreads(threadCount int) { |
||||
select { |
||||
case sq.setThreadsCh <- threadCount: |
||||
case <-sq.quit: |
||||
return |
||||
} |
||||
} |
||||
|
||||
// stop stops task processing as soon as possible and shuts down the serving queue.
|
||||
func (sq *servingQueue) stop() { |
||||
close(sq.quit) |
||||
sq.wg.Wait() |
||||
} |
Loading…
Reference in new issue