forked from mirror/go-ethereum
les: move client pool to les/vflux/server (#22495)
* les: move client pool to les/vflux/server * les/vflux/server: un-expose NodeBalance, remove unused fn, fix bugs * tests/fuzzers/vflux: add ClientPool fuzzer * les/vflux/server: fixed balance tests * les: rebase fix * les/vflux/server: fixed more bugs * les/vflux/server: unexported NodeStateMachine fields and flags * les/vflux/server: unexport all internal components and functions * les/vflux/server: fixed priorityPool test * les/vflux/server: polish balance * les/vflux/server: fixed mutex locking error * les/vflux/server: priorityPool bug fixed * common/prque: make Prque wrap-around priority handling optional * les/vflux/server: rename funcs, small optimizations * les/vflux/server: fixed timeUntil * les/vflux/server: separated balance.posValue and negValue * les/vflux/server: polish setup * les/vflux/server: enforce capacity curve monotonicity * les/vflux/server: simplified requestCapacity * les/vflux/server: requestCapacity with target range, no iterations in SetCapacity * les/vflux/server: minor changes * les/vflux/server: moved default factors to balanceTracker * les/vflux/server: set inactiveFlag in priorityPool * les/vflux/server: moved related metrics to vfs package * les/vflux/client: make priorityPool temp state logic cleaner * les/vflux/server: changed log.Crit to log.Error * add vflux fuzzer to oss-fuzz Co-authored-by: rjl493456442 <garyrong0905@gmail.com>revert-23120-drop-eth-65
parent
e275b1a293
commit
2d89fe0883
@ -1,453 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package les |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/les/utils" |
||||
"github.com/ethereum/go-ethereum/les/vflux" |
||||
vfs "github.com/ethereum/go-ethereum/les/vflux/server" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/p2p/enr" |
||||
"github.com/ethereum/go-ethereum/p2p/nodestate" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
const ( |
||||
defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance
|
||||
|
||||
// defaultConnectedBias is applied to already connected clients So that
|
||||
// already connected client won't be kicked out very soon and we
|
||||
// can ensure all connected clients can have enough time to request
|
||||
// or sync some data.
|
||||
//
|
||||
// todo(rjl493456442) make it configurable. It can be the option of
|
||||
// free trial time!
|
||||
defaultConnectedBias = time.Minute * 3 |
||||
inactiveTimeout = time.Second * 10 |
||||
) |
||||
|
||||
// clientPool implements a client database that assigns a priority to each client
|
||||
// based on a positive and negative balance. Positive balance is externally assigned
|
||||
// to prioritized clients and is decreased with connection time and processed
|
||||
// requests (unless the price factors are zero). If the positive balance is zero
|
||||
// then negative balance is accumulated.
|
||||
//
|
||||
// Balance tracking and priority calculation for connected clients is done by
|
||||
// balanceTracker. activeQueue ensures that clients with the lowest positive or
|
||||
// highest negative balance get evicted when the total capacity allowance is full
|
||||
// and new clients with a better balance want to connect.
|
||||
//
|
||||
// Already connected nodes receive a small bias in their favor in order to avoid
|
||||
// accepting and instantly kicking out clients. In theory, we try to ensure that
|
||||
// each client can have several minutes of connection time.
|
||||
//
|
||||
// Balances of disconnected clients are stored in nodeDB including positive balance
|
||||
// and negative banalce. Boeth positive balance and negative balance will decrease
|
||||
// exponentially. If the balance is low enough, then the record will be dropped.
|
||||
type clientPool struct { |
||||
vfs.BalanceTrackerSetup |
||||
vfs.PriorityPoolSetup |
||||
lock sync.Mutex |
||||
clock mclock.Clock |
||||
closed bool |
||||
removePeer func(enode.ID) |
||||
synced func() bool |
||||
ns *nodestate.NodeStateMachine |
||||
pp *vfs.PriorityPool |
||||
bt *vfs.BalanceTracker |
||||
|
||||
defaultPosFactors, defaultNegFactors vfs.PriceFactors |
||||
posExpTC, negExpTC uint64 |
||||
minCap uint64 // The minimal capacity value allowed for any client
|
||||
connectedBias time.Duration |
||||
capLimit uint64 |
||||
} |
||||
|
||||
// clientPoolPeer represents a client peer in the pool.
|
||||
// Positive balances are assigned to node key while negative balances are assigned
|
||||
// to freeClientId. Currently network IP address without port is used because
|
||||
// clients have a limited access to IP addresses while new node keys can be easily
|
||||
// generated so it would be useless to assign a negative value to them.
|
||||
type clientPoolPeer interface { |
||||
Node() *enode.Node |
||||
freeClientId() string |
||||
updateCapacity(uint64) |
||||
freeze() |
||||
allowInactive() bool |
||||
} |
||||
|
||||
// clientInfo defines all information required by clientpool.
|
||||
type clientInfo struct { |
||||
node *enode.Node |
||||
address string |
||||
peer clientPoolPeer |
||||
connected, priority bool |
||||
connectedAt mclock.AbsTime |
||||
balance *vfs.NodeBalance |
||||
} |
||||
|
||||
// newClientPool creates a new client pool
|
||||
func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID), synced func() bool) *clientPool { |
||||
pool := &clientPool{ |
||||
ns: ns, |
||||
BalanceTrackerSetup: balanceTrackerSetup, |
||||
PriorityPoolSetup: priorityPoolSetup, |
||||
clock: clock, |
||||
minCap: minCap, |
||||
connectedBias: connectedBias, |
||||
removePeer: removePeer, |
||||
synced: synced, |
||||
} |
||||
pool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lesDb, clock, &utils.Expirer{}, &utils.Expirer{}) |
||||
pool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4) |
||||
|
||||
// set default expiration constants used by tests
|
||||
// Note: server overwrites this if token sale is active
|
||||
pool.bt.SetExpirationTCs(0, defaultNegExpTC) |
||||
|
||||
ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { |
||||
if newState.Equals(pool.InactiveFlag) { |
||||
ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout) |
||||
} |
||||
if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) { |
||||
ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout
|
||||
} |
||||
}) |
||||
|
||||
ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { |
||||
c, _ := ns.GetField(node, clientInfoField).(*clientInfo) |
||||
if c == nil { |
||||
return |
||||
} |
||||
c.priority = newState.HasAll(pool.PriorityFlag) |
||||
if newState.Equals(pool.ActiveFlag) { |
||||
cap, _ := ns.GetField(node, pool.CapacityField).(uint64) |
||||
if cap > minCap { |
||||
pool.pp.RequestCapacity(node, minCap, 0, true) |
||||
} |
||||
} |
||||
}) |
||||
|
||||
ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { |
||||
if oldState.IsEmpty() { |
||||
clientConnectedMeter.Mark(1) |
||||
log.Debug("Client connected", "id", node.ID()) |
||||
} |
||||
if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) { |
||||
clientActivatedMeter.Mark(1) |
||||
log.Debug("Client activated", "id", node.ID()) |
||||
} |
||||
if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) { |
||||
clientDeactivatedMeter.Mark(1) |
||||
log.Debug("Client deactivated", "id", node.ID()) |
||||
c, _ := ns.GetField(node, clientInfoField).(*clientInfo) |
||||
if c == nil || !c.peer.allowInactive() { |
||||
pool.removePeer(node.ID()) |
||||
} |
||||
} |
||||
if newState.IsEmpty() { |
||||
clientDisconnectedMeter.Mark(1) |
||||
log.Debug("Client disconnected", "id", node.ID()) |
||||
pool.removePeer(node.ID()) |
||||
} |
||||
}) |
||||
|
||||
var totalConnected uint64 |
||||
ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { |
||||
oldCap, _ := oldValue.(uint64) |
||||
newCap, _ := newValue.(uint64) |
||||
totalConnected += newCap - oldCap |
||||
totalConnectedGauge.Update(int64(totalConnected)) |
||||
c, _ := ns.GetField(node, clientInfoField).(*clientInfo) |
||||
if c != nil { |
||||
c.peer.updateCapacity(newCap) |
||||
} |
||||
}) |
||||
return pool |
||||
} |
||||
|
||||
// stop shuts the client pool down
|
||||
func (f *clientPool) stop() { |
||||
f.lock.Lock() |
||||
f.closed = true |
||||
f.lock.Unlock() |
||||
f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { |
||||
// enforces saving all balances in BalanceTracker
|
||||
f.disconnectNode(node) |
||||
}) |
||||
f.bt.Stop() |
||||
} |
||||
|
||||
// connect should be called after a successful handshake. If the connection was
|
||||
// rejected, there is no need to call disconnect.
|
||||
func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
// Short circuit if clientPool is already closed.
|
||||
if f.closed { |
||||
return 0, fmt.Errorf("Client pool is already closed") |
||||
} |
||||
// Dedup connected peers.
|
||||
node, freeID := peer.Node(), peer.freeClientId() |
||||
if f.ns.GetField(node, clientInfoField) != nil { |
||||
log.Debug("Client already connected", "address", freeID, "id", node.ID().String()) |
||||
return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String()) |
||||
} |
||||
now := f.clock.Now() |
||||
c := &clientInfo{ |
||||
node: node, |
||||
address: freeID, |
||||
peer: peer, |
||||
connected: true, |
||||
connectedAt: now, |
||||
} |
||||
f.ns.SetField(node, clientInfoField, c) |
||||
f.ns.SetField(node, connAddressField, freeID) |
||||
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { |
||||
f.disconnect(peer) |
||||
return 0, nil |
||||
} |
||||
c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors) |
||||
|
||||
f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0) |
||||
var allowed bool |
||||
f.ns.Operation(func() { |
||||
_, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true) |
||||
}) |
||||
if allowed { |
||||
return f.minCap, nil |
||||
} |
||||
if !peer.allowInactive() { |
||||
f.disconnect(peer) |
||||
} |
||||
return 0, nil |
||||
} |
||||
|
||||
// setConnectedBias sets the connection bias, which is applied to already connected clients
|
||||
// So that already connected client won't be kicked out very soon and we can ensure all
|
||||
// connected clients can have enough time to request or sync some data.
|
||||
func (f *clientPool) setConnectedBias(bias time.Duration) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
f.connectedBias = bias |
||||
f.pp.SetActiveBias(bias) |
||||
} |
||||
|
||||
// disconnect should be called when a connection is terminated. If the disconnection
|
||||
// was initiated by the pool itself using disconnectFn then calling disconnect is
|
||||
// not necessary but permitted.
|
||||
func (f *clientPool) disconnect(p clientPoolPeer) { |
||||
f.disconnectNode(p.Node()) |
||||
} |
||||
|
||||
// disconnectNode removes node fields and flags related to connected status
|
||||
func (f *clientPool) disconnectNode(node *enode.Node) { |
||||
f.ns.SetField(node, connAddressField, nil) |
||||
f.ns.SetField(node, clientInfoField, nil) |
||||
} |
||||
|
||||
// setDefaultFactors sets the default price factors applied to subsequently connected clients
|
||||
func (f *clientPool) setDefaultFactors(posFactors, negFactors vfs.PriceFactors) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
f.defaultPosFactors = posFactors |
||||
f.defaultNegFactors = negFactors |
||||
} |
||||
|
||||
// capacityInfo returns the total capacity allowance, the total capacity of connected
|
||||
// clients and the total capacity of connected and prioritized clients
|
||||
func (f *clientPool) capacityInfo() (uint64, uint64, uint64) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
// total priority active cap will be supported when the token issuer module is added
|
||||
_, activeCap := f.pp.Active() |
||||
return f.capLimit, activeCap, 0 |
||||
} |
||||
|
||||
// setLimits sets the maximum number and total capacity of connected clients,
|
||||
// dropping some of them if necessary.
|
||||
func (f *clientPool) setLimits(totalConn int, totalCap uint64) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
f.capLimit = totalCap |
||||
f.pp.SetLimits(uint64(totalConn), totalCap) |
||||
} |
||||
|
||||
// setCapacity sets the assigned capacity of a connected client
|
||||
func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) { |
||||
c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) |
||||
if c == nil { |
||||
if setCap { |
||||
return 0, fmt.Errorf("client %064x is not connected", node.ID()) |
||||
} |
||||
c = &clientInfo{node: node} |
||||
f.ns.SetField(node, clientInfoField, c) |
||||
f.ns.SetField(node, connAddressField, freeID) |
||||
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { |
||||
log.Error("BalanceField is missing", "node", node.ID()) |
||||
return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID()) |
||||
} |
||||
defer func() { |
||||
f.ns.SetField(node, connAddressField, nil) |
||||
f.ns.SetField(node, clientInfoField, nil) |
||||
}() |
||||
} |
||||
var ( |
||||
minPriority int64 |
||||
allowed bool |
||||
) |
||||
f.ns.Operation(func() { |
||||
if !setCap || c.priority { |
||||
// check clientInfo.priority inside Operation to ensure thread safety
|
||||
minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap) |
||||
} |
||||
}) |
||||
if allowed { |
||||
return 0, nil |
||||
} |
||||
missing := c.balance.PosBalanceMissing(minPriority, capacity, bias) |
||||
if missing < 1 { |
||||
// ensure that we never return 0 missing and insufficient priority error
|
||||
missing = 1 |
||||
} |
||||
return missing, errNoPriority |
||||
} |
||||
|
||||
// setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked
|
||||
func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
return f.setCapacity(node, freeID, capacity, minConnTime, setCap) |
||||
} |
||||
|
||||
// forClients calls the supplied callback for either the listed node IDs or all connected
|
||||
// nodes. It passes a valid clientInfo to the callback and ensures that the necessary
|
||||
// fields and flags are set in order for BalanceTracker and PriorityPool to work even if
|
||||
// the node is not connected.
|
||||
func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) { |
||||
f.lock.Lock() |
||||
defer f.lock.Unlock() |
||||
|
||||
if len(ids) == 0 { |
||||
f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { |
||||
c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) |
||||
if c != nil { |
||||
cb(c) |
||||
} |
||||
}) |
||||
} else { |
||||
for _, id := range ids { |
||||
node := f.ns.GetNode(id) |
||||
if node == nil { |
||||
node = enode.SignNull(&enr.Record{}, id) |
||||
} |
||||
c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) |
||||
if c != nil { |
||||
cb(c) |
||||
} else { |
||||
c = &clientInfo{node: node} |
||||
f.ns.SetField(node, clientInfoField, c) |
||||
f.ns.SetField(node, connAddressField, "") |
||||
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance != nil { |
||||
cb(c) |
||||
} else { |
||||
log.Error("BalanceField is missing") |
||||
} |
||||
f.ns.SetField(node, connAddressField, nil) |
||||
f.ns.SetField(node, clientInfoField, nil) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// serveCapQuery serves a vflux capacity query. It receives multiple token amount values
|
||||
// and a bias time value. For each given token amount it calculates the maximum achievable
|
||||
// capacity in case the amount is added to the balance.
|
||||
func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { |
||||
var req vflux.CapacityQueryReq |
||||
if rlp.DecodeBytes(data, &req) != nil { |
||||
return nil |
||||
} |
||||
if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { |
||||
return nil |
||||
} |
||||
result := make(vflux.CapacityQueryReply, len(req.AddTokens)) |
||||
if !f.synced() { |
||||
capacityQueryZeroMeter.Mark(1) |
||||
reply, _ := rlp.EncodeToBytes(&result) |
||||
return reply |
||||
} |
||||
|
||||
node := f.ns.GetNode(id) |
||||
if node == nil { |
||||
node = enode.SignNull(&enr.Record{}, id) |
||||
} |
||||
c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) |
||||
if c == nil { |
||||
c = &clientInfo{node: node} |
||||
f.ns.SetField(node, clientInfoField, c) |
||||
f.ns.SetField(node, connAddressField, freeID) |
||||
defer func() { |
||||
f.ns.SetField(node, connAddressField, nil) |
||||
f.ns.SetField(node, clientInfoField, nil) |
||||
}() |
||||
if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { |
||||
log.Error("BalanceField is missing", "node", node.ID()) |
||||
return nil |
||||
} |
||||
} |
||||
// use vfs.CapacityCurve to answer request for multiple newly bought token amounts
|
||||
curve := f.pp.GetCapacityCurve().Exclude(id) |
||||
bias := time.Second * time.Duration(req.Bias) |
||||
if f.connectedBias > bias { |
||||
bias = f.connectedBias |
||||
} |
||||
pb, _ := c.balance.GetBalance() |
||||
for i, addTokens := range req.AddTokens { |
||||
add := addTokens.Int64() |
||||
result[i] = curve.MaxCapacity(func(capacity uint64) int64 { |
||||
return c.balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity) |
||||
}) |
||||
if add <= 0 && uint64(-add) >= pb && result[i] > f.minCap { |
||||
result[i] = f.minCap |
||||
} |
||||
if result[i] < f.minCap { |
||||
result[i] = 0 |
||||
} |
||||
} |
||||
// add first result to metrics (don't care about priority client multi-queries yet)
|
||||
if result[0] == 0 { |
||||
capacityQueryZeroMeter.Mark(1) |
||||
} else { |
||||
capacityQueryNonZeroMeter.Mark(1) |
||||
} |
||||
reply, _ := rlp.EncodeToBytes(&result) |
||||
return reply |
||||
} |
@ -0,0 +1,335 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package server |
||||
|
||||
import ( |
||||
"errors" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/les/utils" |
||||
"github.com/ethereum/go-ethereum/les/vflux" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/p2p/nodestate" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
var ( |
||||
ErrNotConnected = errors.New("client not connected") |
||||
ErrNoPriority = errors.New("priority too low to raise capacity") |
||||
ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity") |
||||
) |
||||
|
||||
// ClientPool implements a client database that assigns a priority to each client
|
||||
// based on a positive and negative balance. Positive balance is externally assigned
|
||||
// to prioritized clients and is decreased with connection time and processed
|
||||
// requests (unless the price factors are zero). If the positive balance is zero
|
||||
// then negative balance is accumulated.
|
||||
//
|
||||
// Balance tracking and priority calculation for connected clients is done by
|
||||
// balanceTracker. PriorityQueue ensures that clients with the lowest positive or
|
||||
// highest negative balance get evicted when the total capacity allowance is full
|
||||
// and new clients with a better balance want to connect.
|
||||
//
|
||||
// Already connected nodes receive a small bias in their favor in order to avoid
|
||||
// accepting and instantly kicking out clients. In theory, we try to ensure that
|
||||
// each client can have several minutes of connection time.
|
||||
//
|
||||
// Balances of disconnected clients are stored in nodeDB including positive balance
|
||||
// and negative banalce. Boeth positive balance and negative balance will decrease
|
||||
// exponentially. If the balance is low enough, then the record will be dropped.
|
||||
type ClientPool struct { |
||||
*priorityPool |
||||
*balanceTracker |
||||
|
||||
setup *serverSetup |
||||
clock mclock.Clock |
||||
closed bool |
||||
ns *nodestate.NodeStateMachine |
||||
synced func() bool |
||||
|
||||
lock sync.RWMutex |
||||
connectedBias time.Duration |
||||
|
||||
minCap uint64 // the minimal capacity value allowed for any client
|
||||
capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation
|
||||
} |
||||
|
||||
// clientPeer represents a peer in the client pool. None of the callbacks should block.
|
||||
type clientPeer interface { |
||||
Node() *enode.Node |
||||
FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address)
|
||||
InactiveAllowance() time.Duration // disconnection timeout for inactive non-priority peers
|
||||
UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer
|
||||
Disconnect() // initiates disconnection (Unregister should always be called)
|
||||
} |
||||
|
||||
// NewClientPool creates a new client pool
|
||||
func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool { |
||||
setup := newServerSetup() |
||||
ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) |
||||
cp := &ClientPool{ |
||||
priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4, 100), |
||||
balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), |
||||
setup: setup, |
||||
ns: ns, |
||||
clock: clock, |
||||
minCap: minCap, |
||||
connectedBias: connectedBias, |
||||
synced: synced, |
||||
} |
||||
|
||||
ns.SubscribeState(nodestate.MergeFlags(setup.activeFlag, setup.inactiveFlag, setup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { |
||||
if newState.Equals(setup.inactiveFlag) { |
||||
// set timeout for non-priority inactive client
|
||||
var timeout time.Duration |
||||
if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { |
||||
timeout = c.InactiveAllowance() |
||||
} |
||||
if timeout > 0 { |
||||
ns.AddTimeout(node, setup.inactiveFlag, timeout) |
||||
} else { |
||||
// Note: if capacity is immediately available then priorityPool will set the active
|
||||
// flag simultaneously with removing the inactive flag and therefore this will not
|
||||
// initiate disconnection
|
||||
ns.SetStateSub(node, nodestate.Flags{}, setup.inactiveFlag, 0) |
||||
} |
||||
} |
||||
if oldState.Equals(setup.inactiveFlag) && newState.Equals(setup.inactiveFlag.Or(setup.priorityFlag)) { |
||||
ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout
|
||||
} |
||||
if newState.Equals(setup.activeFlag) { |
||||
// active with no priority; limit capacity to minCap
|
||||
cap, _ := ns.GetField(node, setup.capacityField).(uint64) |
||||
if cap > minCap { |
||||
cp.requestCapacity(node, minCap, minCap, 0) |
||||
} |
||||
} |
||||
if newState.Equals(nodestate.Flags{}) { |
||||
if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { |
||||
c.Disconnect() |
||||
} |
||||
} |
||||
}) |
||||
|
||||
ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { |
||||
if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { |
||||
newCap, _ := newValue.(uint64) |
||||
c.UpdateCapacity(newCap, node == cp.capReqNode) |
||||
} |
||||
}) |
||||
|
||||
// add metrics
|
||||
cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { |
||||
if oldState.IsEmpty() && !newState.IsEmpty() { |
||||
clientConnectedMeter.Mark(1) |
||||
} |
||||
if !oldState.IsEmpty() && newState.IsEmpty() { |
||||
clientDisconnectedMeter.Mark(1) |
||||
} |
||||
if oldState.HasNone(cp.setup.activeFlag) && oldState.HasAll(cp.setup.activeFlag) { |
||||
clientActivatedMeter.Mark(1) |
||||
} |
||||
if oldState.HasAll(cp.setup.activeFlag) && oldState.HasNone(cp.setup.activeFlag) { |
||||
clientDeactivatedMeter.Mark(1) |
||||
} |
||||
_, connected := cp.Active() |
||||
totalConnectedGauge.Update(int64(connected)) |
||||
}) |
||||
return cp |
||||
} |
||||
|
||||
// Start starts the client pool. Should be called before Register/Unregister.
|
||||
func (cp *ClientPool) Start() { |
||||
cp.ns.Start() |
||||
} |
||||
|
||||
// Stop shuts the client pool down. The clientPeer interface callbacks will not be called
|
||||
// after Stop. Register calls will return nil.
|
||||
func (cp *ClientPool) Stop() { |
||||
cp.balanceTracker.stop() |
||||
cp.ns.Stop() |
||||
} |
||||
|
||||
// Register registers the peer into the client pool. If the peer has insufficient
|
||||
// priority and remains inactive for longer than the allowed timeout then it will be
|
||||
// disconnected by calling the Disconnect function of the clientPeer interface.
|
||||
func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { |
||||
cp.ns.SetField(peer.Node(), cp.setup.clientField, peerWrapper{peer}) |
||||
balance, _ := cp.ns.GetField(peer.Node(), cp.setup.balanceField).(*nodeBalance) |
||||
return balance |
||||
} |
||||
|
||||
// Unregister removes the peer from the client pool
|
||||
func (cp *ClientPool) Unregister(peer clientPeer) { |
||||
cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) |
||||
} |
||||
|
||||
// setConnectedBias sets the connection bias, which is applied to already connected clients
|
||||
// So that already connected client won't be kicked out very soon and we can ensure all
|
||||
// connected clients can have enough time to request or sync some data.
|
||||
func (cp *ClientPool) SetConnectedBias(bias time.Duration) { |
||||
cp.lock.Lock() |
||||
cp.connectedBias = bias |
||||
cp.setActiveBias(bias) |
||||
cp.lock.Unlock() |
||||
} |
||||
|
||||
// SetCapacity sets the assigned capacity of a connected client
|
||||
func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Duration, requested bool) (capacity uint64, err error) { |
||||
cp.lock.RLock() |
||||
if cp.connectedBias > bias { |
||||
bias = cp.connectedBias |
||||
} |
||||
cp.lock.RUnlock() |
||||
|
||||
cp.ns.Operation(func() { |
||||
balance, _ := cp.ns.GetField(node, cp.setup.balanceField).(*nodeBalance) |
||||
if balance == nil { |
||||
err = ErrNotConnected |
||||
return |
||||
} |
||||
capacity, _ = cp.ns.GetField(node, cp.setup.capacityField).(uint64) |
||||
if capacity == 0 { |
||||
// if the client is inactive then it has insufficient priority for the minimal capacity
|
||||
// (will be activated automatically with minCap when possible)
|
||||
return |
||||
} |
||||
if reqCap < cp.minCap { |
||||
// can't request less than minCap; switching between 0 (inactive state) and minCap is
|
||||
// performed by the server automatically as soon as necessary/possible
|
||||
reqCap = cp.minCap |
||||
} |
||||
if reqCap > cp.minCap && cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) { |
||||
err = ErrNoPriority |
||||
return |
||||
} |
||||
if reqCap == capacity { |
||||
return |
||||
} |
||||
if requested { |
||||
// mark the requested node so that the UpdateCapacity callback can signal
|
||||
// whether the update is the direct result of a SetCapacity call on the given node
|
||||
cp.capReqNode = node |
||||
defer func() { |
||||
cp.capReqNode = nil |
||||
}() |
||||
} |
||||
|
||||
var minTarget, maxTarget uint64 |
||||
if reqCap > capacity { |
||||
// Estimate maximum available capacity at the current priority level and request
|
||||
// the estimated amount.
|
||||
// Note: requestCapacity could find the highest available capacity between the
|
||||
// current and the requested capacity but it could cost a lot of iterations with
|
||||
// fine step adjustment if the requested capacity is very high. By doing a quick
|
||||
// estimation of the maximum available capacity based on the capacity curve we
|
||||
// can limit the number of required iterations.
|
||||
curve := cp.getCapacityCurve().exclude(node.ID()) |
||||
maxTarget = curve.maxCapacity(func(capacity uint64) int64 { |
||||
return balance.estimatePriority(capacity, 0, 0, bias, false) |
||||
}) |
||||
if maxTarget <= capacity { |
||||
return |
||||
} |
||||
if maxTarget > reqCap { |
||||
maxTarget = reqCap |
||||
} |
||||
// Specify a narrow target range that allows a limited number of fine step
|
||||
// iterations
|
||||
minTarget = maxTarget - maxTarget/20 |
||||
if minTarget < capacity { |
||||
minTarget = capacity |
||||
} |
||||
} else { |
||||
minTarget, maxTarget = reqCap, reqCap |
||||
} |
||||
if newCap := cp.requestCapacity(node, minTarget, maxTarget, bias); newCap >= minTarget && newCap <= maxTarget { |
||||
capacity = newCap |
||||
return |
||||
} |
||||
// we should be able to find the maximum allowed capacity in a few iterations
|
||||
log.Error("Unable to find maximum allowed capacity") |
||||
err = ErrCantFindMaximum |
||||
}) |
||||
return |
||||
} |
||||
|
||||
// serveCapQuery serves a vflux capacity query. It receives multiple token amount values
|
||||
// and a bias time value. For each given token amount it calculates the maximum achievable
|
||||
// capacity in case the amount is added to the balance.
|
||||
func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { |
||||
var req vflux.CapacityQueryReq |
||||
if rlp.DecodeBytes(data, &req) != nil { |
||||
return nil |
||||
} |
||||
if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { |
||||
return nil |
||||
} |
||||
result := make(vflux.CapacityQueryReply, len(req.AddTokens)) |
||||
if !cp.synced() { |
||||
capacityQueryZeroMeter.Mark(1) |
||||
reply, _ := rlp.EncodeToBytes(&result) |
||||
return reply |
||||
} |
||||
|
||||
bias := time.Second * time.Duration(req.Bias) |
||||
cp.lock.RLock() |
||||
if cp.connectedBias > bias { |
||||
bias = cp.connectedBias |
||||
} |
||||
cp.lock.RUnlock() |
||||
|
||||
// use capacityCurve to answer request for multiple newly bought token amounts
|
||||
curve := cp.getCapacityCurve().exclude(id) |
||||
cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { |
||||
pb, _ := balance.GetBalance() |
||||
for i, addTokens := range req.AddTokens { |
||||
add := addTokens.Int64() |
||||
result[i] = curve.maxCapacity(func(capacity uint64) int64 { |
||||
return balance.estimatePriority(capacity, add, 0, bias, false) / int64(capacity) |
||||
}) |
||||
if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap { |
||||
result[i] = cp.minCap |
||||
} |
||||
if result[i] < cp.minCap { |
||||
result[i] = 0 |
||||
} |
||||
} |
||||
}) |
||||
// add first result to metrics (don't care about priority client multi-queries yet)
|
||||
if result[0] == 0 { |
||||
capacityQueryZeroMeter.Mark(1) |
||||
} else { |
||||
capacityQueryNonZeroMeter.Mark(1) |
||||
} |
||||
reply, _ := rlp.EncodeToBytes(&result) |
||||
return reply |
||||
} |
||||
|
||||
// Handle implements Service
|
||||
func (cp *ClientPool) Handle(id enode.ID, address string, name string, data []byte) []byte { |
||||
switch name { |
||||
case vflux.CapacityQueryName: |
||||
return cp.serveCapQuery(id, address, data) |
||||
default: |
||||
return nil |
||||
} |
||||
} |
@ -0,0 +1,33 @@ |
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package server |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
) |
||||
|
||||
var ( |
||||
totalConnectedGauge = metrics.NewRegisteredGauge("vflux/server/totalConnected", nil) |
||||
|
||||
clientConnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/connected", nil) |
||||
clientActivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/activated", nil) |
||||
clientDeactivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/deactivated", nil) |
||||
clientDisconnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/disconnected", nil) |
||||
|
||||
capacityQueryZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryZero", nil) |
||||
capacityQueryNonZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryNonZero", nil) |
||||
) |
@ -0,0 +1,59 @@ |
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package server |
||||
|
||||
import ( |
||||
"reflect" |
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/nodestate" |
||||
) |
||||
|
||||
type peerWrapper struct{ clientPeer } // the NodeStateMachine type system needs this wrapper
|
||||
|
||||
// serverSetup is a wrapper of the node state machine setup, which contains
|
||||
// all the created flags and fields used in the vflux server side.
|
||||
type serverSetup struct { |
||||
setup *nodestate.Setup |
||||
clientField nodestate.Field // Field contains the client peer handler
|
||||
|
||||
// Flags and fields controlled by balance tracker. BalanceTracker
|
||||
// is responsible for setting/deleting these flags or fields.
|
||||
priorityFlag nodestate.Flags // Flag is set if the node has a positive balance
|
||||
updateFlag nodestate.Flags // Flag is set whenever the node balance is changed(priority changed)
|
||||
balanceField nodestate.Field // Field contains the client balance for priority calculation
|
||||
|
||||
// Flags and fields controlled by priority queue. Priority queue
|
||||
// is responsible for setting/deleting these flags or fields.
|
||||
activeFlag nodestate.Flags // Flag is set if the node is active
|
||||
inactiveFlag nodestate.Flags // Flag is set if the node is inactive
|
||||
capacityField nodestate.Field // Field contains the capacity of the node
|
||||
queueField nodestate.Field // Field contains the infomration in the priority queue
|
||||
} |
||||
|
||||
// newServerSetup initializes the setup for state machine and returns the flags/fields group.
|
||||
func newServerSetup() *serverSetup { |
||||
setup := &serverSetup{setup: &nodestate.Setup{}} |
||||
setup.clientField = setup.setup.NewField("client", reflect.TypeOf(peerWrapper{})) |
||||
setup.priorityFlag = setup.setup.NewFlag("priority") |
||||
setup.updateFlag = setup.setup.NewFlag("update") |
||||
setup.balanceField = setup.setup.NewField("balance", reflect.TypeOf(&nodeBalance{})) |
||||
setup.activeFlag = setup.setup.NewFlag("active") |
||||
setup.inactiveFlag = setup.setup.NewFlag("inactive") |
||||
setup.capacityField = setup.setup.NewField("capacity", reflect.TypeOf(uint64(0))) |
||||
setup.queueField = setup.setup.NewField("queue", reflect.TypeOf(&ppNodeInfo{})) |
||||
return setup |
||||
} |
@ -0,0 +1,289 @@ |
||||
// Copyright 2021 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package vflux |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"io" |
||||
"math" |
||||
"math/big" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||
"github.com/ethereum/go-ethereum/les/vflux" |
||||
vfs "github.com/ethereum/go-ethereum/les/vflux/server" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
"github.com/ethereum/go-ethereum/p2p/enr" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
type fuzzer struct { |
||||
peers [256]*clientPeer |
||||
disconnectList []*clientPeer |
||||
input io.Reader |
||||
exhausted bool |
||||
activeCount, activeCap uint64 |
||||
maxCount, maxCap uint64 |
||||
} |
||||
|
||||
type clientPeer struct { |
||||
fuzzer *fuzzer |
||||
node *enode.Node |
||||
freeID string |
||||
timeout time.Duration |
||||
|
||||
balance vfs.ConnectedBalance |
||||
capacity uint64 |
||||
} |
||||
|
||||
func (p *clientPeer) Node() *enode.Node { |
||||
return p.node |
||||
} |
||||
|
||||
func (p *clientPeer) FreeClientId() string { |
||||
return p.freeID |
||||
} |
||||
|
||||
func (p *clientPeer) InactiveAllowance() time.Duration { |
||||
return p.timeout |
||||
} |
||||
|
||||
func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) { |
||||
p.fuzzer.activeCap -= p.capacity |
||||
if p.capacity != 0 { |
||||
p.fuzzer.activeCount-- |
||||
} |
||||
p.capacity = newCap |
||||
p.fuzzer.activeCap += p.capacity |
||||
if p.capacity != 0 { |
||||
p.fuzzer.activeCount++ |
||||
} |
||||
} |
||||
|
||||
func (p *clientPeer) Disconnect() { |
||||
p.fuzzer.disconnectList = append(p.fuzzer.disconnectList, p) |
||||
p.fuzzer.activeCap -= p.capacity |
||||
if p.capacity != 0 { |
||||
p.fuzzer.activeCount-- |
||||
} |
||||
p.capacity = 0 |
||||
p.balance = nil |
||||
} |
||||
|
||||
func newFuzzer(input []byte) *fuzzer { |
||||
f := &fuzzer{ |
||||
input: bytes.NewReader(input), |
||||
} |
||||
for i := range f.peers { |
||||
f.peers[i] = &clientPeer{ |
||||
fuzzer: f, |
||||
node: enode.SignNull(new(enr.Record), enode.ID{byte(i)}), |
||||
freeID: string([]byte{byte(i)}), |
||||
timeout: f.randomDelay(), |
||||
} |
||||
} |
||||
return f |
||||
} |
||||
|
||||
func (f *fuzzer) read(size int) []byte { |
||||
out := make([]byte, size) |
||||
if _, err := f.input.Read(out); err != nil { |
||||
f.exhausted = true |
||||
} |
||||
return out |
||||
} |
||||
|
||||
func (f *fuzzer) randomByte() byte { |
||||
d := f.read(1) |
||||
return d[0] |
||||
} |
||||
|
||||
func (f *fuzzer) randomBool() bool { |
||||
d := f.read(1) |
||||
return d[0]&1 == 1 |
||||
} |
||||
|
||||
func (f *fuzzer) randomInt(max int) int { |
||||
if max == 0 { |
||||
return 0 |
||||
} |
||||
if max <= 256 { |
||||
return int(f.randomByte()) % max |
||||
} |
||||
var a uint16 |
||||
if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { |
||||
f.exhausted = true |
||||
} |
||||
return int(a % uint16(max)) |
||||
} |
||||
|
||||
func (f *fuzzer) randomTokenAmount(signed bool) int64 { |
||||
x := uint64(f.randomInt(65000)) |
||||
x = x * x * x * x |
||||
|
||||
if signed && (x&1) == 1 { |
||||
if x <= math.MaxInt64 { |
||||
return -int64(x) |
||||
} |
||||
return math.MinInt64 |
||||
} |
||||
if x <= math.MaxInt64 { |
||||
return int64(x) |
||||
} |
||||
return math.MaxInt64 |
||||
} |
||||
|
||||
func (f *fuzzer) randomDelay() time.Duration { |
||||
delay := f.randomByte() |
||||
if delay < 128 { |
||||
return time.Duration(delay) * time.Second |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
func (f *fuzzer) randomFactors() vfs.PriceFactors { |
||||
return vfs.PriceFactors{ |
||||
TimeFactor: float64(f.randomByte()) / 25500, |
||||
CapacityFactor: float64(f.randomByte()) / 255, |
||||
RequestFactor: float64(f.randomByte()) / 255, |
||||
} |
||||
} |
||||
|
||||
func (f *fuzzer) connectedBalanceOp(balance vfs.ConnectedBalance) { |
||||
switch f.randomInt(3) { |
||||
case 0: |
||||
balance.RequestServed(uint64(f.randomTokenAmount(false))) |
||||
case 1: |
||||
balance.SetPriceFactors(f.randomFactors(), f.randomFactors()) |
||||
case 2: |
||||
balance.GetBalance() |
||||
balance.GetRawBalance() |
||||
balance.GetPriceFactors() |
||||
} |
||||
} |
||||
|
||||
func (f *fuzzer) atomicBalanceOp(balance vfs.AtomicBalanceOperator) { |
||||
switch f.randomInt(3) { |
||||
case 0: |
||||
balance.AddBalance(f.randomTokenAmount(true)) |
||||
case 1: |
||||
balance.SetBalance(uint64(f.randomTokenAmount(false)), uint64(f.randomTokenAmount(false))) |
||||
case 2: |
||||
balance.GetBalance() |
||||
balance.GetRawBalance() |
||||
balance.GetPriceFactors() |
||||
} |
||||
} |
||||
|
||||
func FuzzClientPool(input []byte) int { |
||||
if len(input) > 10000 { |
||||
return -1 |
||||
} |
||||
f := newFuzzer(input) |
||||
if f.exhausted { |
||||
return 0 |
||||
} |
||||
clock := &mclock.Simulated{} |
||||
db := memorydb.New() |
||||
pool := vfs.NewClientPool(db, 10, f.randomDelay(), clock, func() bool { return true }) |
||||
pool.Start() |
||||
defer pool.Stop() |
||||
|
||||
count := 0 |
||||
for !f.exhausted && count < 1000 { |
||||
count++ |
||||
switch f.randomInt(11) { |
||||
case 0: |
||||
i := int(f.randomByte()) |
||||
f.peers[i].balance = pool.Register(f.peers[i]) |
||||
case 1: |
||||
i := int(f.randomByte()) |
||||
f.peers[i].Disconnect() |
||||
case 2: |
||||
f.maxCount = uint64(f.randomByte()) |
||||
f.maxCap = uint64(f.randomByte()) |
||||
f.maxCap *= f.maxCap |
||||
pool.SetLimits(f.maxCount, f.maxCap) |
||||
case 3: |
||||
pool.SetConnectedBias(f.randomDelay()) |
||||
case 4: |
||||
pool.SetDefaultFactors(f.randomFactors(), f.randomFactors()) |
||||
case 5: |
||||
pool.SetExpirationTCs(uint64(f.randomInt(50000)), uint64(f.randomInt(50000))) |
||||
case 6: |
||||
if _, err := pool.SetCapacity(f.peers[f.randomByte()].node, uint64(f.randomByte()), f.randomDelay(), f.randomBool()); err == vfs.ErrCantFindMaximum { |
||||
panic(nil) |
||||
} |
||||
case 7: |
||||
if balance := f.peers[f.randomByte()].balance; balance != nil { |
||||
f.connectedBalanceOp(balance) |
||||
} |
||||
case 8: |
||||
pool.BalanceOperation(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, func(balance vfs.AtomicBalanceOperator) { |
||||
count := f.randomInt(4) |
||||
for i := 0; i < count; i++ { |
||||
f.atomicBalanceOp(balance) |
||||
} |
||||
}) |
||||
case 9: |
||||
pool.TotalTokenAmount() |
||||
pool.GetExpirationTCs() |
||||
pool.Active() |
||||
pool.Limits() |
||||
pool.GetPosBalanceIDs(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].node.ID(), f.randomInt(100)) |
||||
case 10: |
||||
req := vflux.CapacityQueryReq{ |
||||
Bias: uint64(f.randomByte()), |
||||
AddTokens: make([]vflux.IntOrInf, f.randomInt(vflux.CapacityQueryMaxLen+1)), |
||||
} |
||||
for i := range req.AddTokens { |
||||
v := vflux.IntOrInf{Type: uint8(f.randomInt(4))} |
||||
if v.Type < 2 { |
||||
v.Value = *big.NewInt(f.randomTokenAmount(false)) |
||||
} |
||||
req.AddTokens[i] = v |
||||
} |
||||
reqEnc, err := rlp.EncodeToBytes(&req) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
p := int(f.randomByte()) |
||||
if p < len(reqEnc) { |
||||
reqEnc[p] = f.randomByte() |
||||
} |
||||
pool.Handle(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, vflux.CapacityQueryName, reqEnc) |
||||
} |
||||
|
||||
for _, peer := range f.disconnectList { |
||||
pool.Unregister(peer) |
||||
} |
||||
f.disconnectList = nil |
||||
if d := f.randomDelay(); d > 0 { |
||||
clock.Run(d) |
||||
} |
||||
//fmt.Println(f.activeCount, f.maxCount, f.activeCap, f.maxCap)
|
||||
if activeCount, activeCap := pool.Active(); activeCount != f.activeCount || activeCap != f.activeCap { |
||||
panic(nil) |
||||
} |
||||
if f.activeCount > f.maxCount || f.activeCap > f.maxCap { |
||||
panic(nil) |
||||
} |
||||
} |
||||
return 0 |
||||
} |
@ -0,0 +1,41 @@ |
||||
// Copyright 2020 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/tests/fuzzers/vflux" |
||||
) |
||||
|
||||
func main() { |
||||
if len(os.Args) != 2 { |
||||
fmt.Fprintf(os.Stderr, "Usage: debug <file>\n") |
||||
fmt.Fprintf(os.Stderr, "Example\n") |
||||
fmt.Fprintf(os.Stderr, " $ debug ../crashers/4bbef6857c733a87ecf6fd8b9e7238f65eb9862a\n") |
||||
os.Exit(1) |
||||
} |
||||
crasher := os.Args[1] |
||||
data, err := ioutil.ReadFile(crasher) |
||||
if err != nil { |
||||
fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) |
||||
os.Exit(1) |
||||
} |
||||
vflux.FuzzClientPool(data) |
||||
} |
Loading…
Reference in new issue