les: clean up server handler (#22357)

pull/22368/head
gary rong 4 years ago committed by GitHub
parent 8f03e3b107
commit 3ecfdccd9a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 185
      les/server_handler.go
  2. 16
      les/server_requests.go

@ -204,6 +204,90 @@ func (h *serverHandler) handle(p *clientPeer) error {
} }
} }
// beforeHandle will do a series of prechecks before handling message.
func (h *serverHandler) beforeHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, reqCnt uint64, maxCount uint64) (*servingTask, uint64) {
// Ensure that the request sent by client peer is valid
inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0)
if reqCnt == 0 || reqCnt > maxCount {
p.fcClient.OneTimeCost(inSizeCost)
return nil, 0
}
// Ensure that the client peer complies with the flow control
// rules agreed by both sides.
if p.isFrozen() {
p.fcClient.OneTimeCost(inSizeCost)
return nil, 0
}
maxCost := p.fcCosts.getMaxCost(msg.Code, reqCnt)
accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost)
if !accepted {
p.freeze()
p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
p.fcClient.OneTimeCost(inSizeCost)
return nil, 0
}
// Create a multi-stage task, estimate the time it takes for the task to
// execute, and cache it in the request service queue.
factor := h.server.costTracker.globalFactor()
if factor < 0.001 {
factor = 1
p.Log().Error("Invalid global cost factor", "factor", factor)
}
maxTime := uint64(float64(maxCost) / factor)
task := h.server.servingQueue.newTask(p, maxTime, priority)
if !task.start() {
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost)
return nil, 0
}
return task, maxCost
}
// Afterhandle will perform a series of operations after message handling,
// such as updating flow control data, sending reply, etc.
func (h *serverHandler) afterHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, maxCost uint64, reqCnt uint64, task *servingTask, reply *reply) {
if reply != nil {
task.done()
}
p.responseLock.Lock()
defer p.responseLock.Unlock()
// Short circuit if the client is already frozen.
if p.isFrozen() {
realCost := h.server.costTracker.realCost(task.servingTime, msg.Size, 0)
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
return
}
// Positive correction buffer value with real cost.
var replySize uint32
if reply != nil {
replySize = reply.size()
}
var realCost uint64
if h.server.costTracker.testing {
realCost = maxCost // Assign a fake cost for testing purpose
} else {
realCost = h.server.costTracker.realCost(task.servingTime, msg.Size, replySize)
if realCost > maxCost {
realCost = maxCost
}
}
bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
if reply != nil {
// Feed cost tracker request serving statistic.
h.server.costTracker.updateStats(msg.Code, reqCnt, task.servingTime, realCost)
// Reduce priority "balance" for the specific peer.
p.balance.RequestServed(realCost)
p.queueSend(func() {
if err := reply.send(bv); err != nil {
select {
case p.errCh <- err:
default:
}
}
})
}
}
// handleMsg is invoked whenever an inbound message is received from a remote // handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error. // peer. The remote connection is torn down upon returning any error.
func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
@ -221,9 +305,8 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
} }
defer msg.Discard() defer msg.Discard()
p.responseCount++ // Lookup the request handler table, ensure it's supported
responseCount := p.responseCount // message type by the protocol.
req, ok := Les3[msg.Code] req, ok := Les3[msg.Code]
if !ok { if !ok {
p.Log().Trace("Received invalid message", "code", msg.Code) p.Log().Trace("Received invalid message", "code", msg.Code)
@ -232,98 +315,42 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {
} }
p.Log().Trace("Received " + req.Name) p.Log().Trace("Received " + req.Name)
// Decode the p2p message, resolve the concrete handler for it.
serve, reqID, reqCnt, err := req.Handle(msg) serve, reqID, reqCnt, err := req.Handle(msg)
if err != nil { if err != nil {
clientErrorMeter.Mark(1) clientErrorMeter.Mark(1)
return errResp(ErrDecode, "%v: %v", msg, err) return errResp(ErrDecode, "%v: %v", msg, err)
} }
if metrics.EnabledExpensive { if metrics.EnabledExpensive {
req.InPacketsMeter.Mark(1) req.InPacketsMeter.Mark(1)
req.InTrafficMeter.Mark(int64(msg.Size)) req.InTrafficMeter.Mark(int64(msg.Size))
} }
p.responseCount++
responseCount := p.responseCount
// Short circuit if the peer is already frozen or the request is invalid. // First check this client message complies all rules before
inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0) // handling it and return a processor if all checks are passed.
if p.isFrozen() || reqCnt == 0 || reqCnt > req.MaxCount { task, maxCost := h.beforeHandle(p, reqID, responseCount, msg, reqCnt, req.MaxCount)
p.fcClient.OneTimeCost(inSizeCost) if task == nil {
return nil
}
// Prepaid max cost units before request been serving.
maxCost := p.fcCosts.getMaxCost(msg.Code, reqCnt)
accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost)
if !accepted {
p.freeze()
p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
p.fcClient.OneTimeCost(inSizeCost)
return nil return nil
} }
// Create a multi-stage task, estimate the time it takes for the task to wg.Add(1)
// execute, and cache it in the request service queue. go func() {
factor := h.server.costTracker.globalFactor() defer wg.Done()
if factor < 0.001 {
factor = 1
p.Log().Error("Invalid global cost factor", "factor", factor)
}
maxTime := uint64(float64(maxCost) / factor)
task := h.server.servingQueue.newTask(p, maxTime, priority)
if task.start() {
wg.Add(1)
go func() {
defer wg.Done()
reply := serve(h, p, task.waitOrStop)
if reply != nil {
task.done()
}
p.responseLock.Lock() reply := serve(h, p, task.waitOrStop)
defer p.responseLock.Unlock() h.afterHandle(p, reqID, responseCount, msg, maxCost, reqCnt, task, reply)
// Short circuit if the client is already frozen. if metrics.EnabledExpensive {
if p.isFrozen() { size := uint32(0)
realCost := h.server.costTracker.realCost(task.servingTime, msg.Size, 0)
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
return
}
// Positive correction buffer value with real cost.
var replySize uint32
if reply != nil { if reply != nil {
replySize = reply.size() size = reply.size()
}
var realCost uint64
if h.server.costTracker.testing {
realCost = maxCost // Assign a fake cost for testing purpose
} else {
realCost = h.server.costTracker.realCost(task.servingTime, msg.Size, replySize)
if realCost > maxCost {
realCost = maxCost
}
} }
bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) req.OutPacketsMeter.Mark(1)
if reply != nil { req.OutTrafficMeter.Mark(int64(size))
// Feed cost tracker request serving statistic. req.ServingTimeMeter.Update(time.Duration(task.servingTime))
h.server.costTracker.updateStats(msg.Code, reqCnt, task.servingTime, realCost) }
// Reduce priority "balance" for the specific peer. }()
p.balance.RequestServed(realCost)
p.queueSend(func() {
if err := reply.send(bv); err != nil {
select {
case p.errCh <- err:
default:
}
}
})
if metrics.EnabledExpensive {
req.OutPacketsMeter.Mark(1)
req.OutTrafficMeter.Mark(int64(replySize))
req.ServingTimeMeter.Update(time.Duration(task.servingTime))
}
}
}()
} else {
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost)
}
// If the client has made too much invalid request(e.g. request a non-existent data), // If the client has made too much invalid request(e.g. request a non-existent data),
// reject them to prevent SPAM attack. // reject them to prevent SPAM attack.
if p.getInvalid() > maxRequestErrors { if p.getInvalid() > maxRequestErrors {

@ -65,7 +65,7 @@ type serveRequestFn func(backend serverBackend, peer *clientPeer, waitOrStop fun
// Les3 contains the request types supported by les/2 and les/3 // Les3 contains the request types supported by les/2 and les/3
var Les3 = map[uint64]RequestType{ var Les3 = map[uint64]RequestType{
GetBlockHeadersMsg: RequestType{ GetBlockHeadersMsg: {
Name: "block header request", Name: "block header request",
MaxCount: MaxHeaderFetch, MaxCount: MaxHeaderFetch,
InPacketsMeter: miscInHeaderPacketsMeter, InPacketsMeter: miscInHeaderPacketsMeter,
@ -75,7 +75,7 @@ var Les3 = map[uint64]RequestType{
ServingTimeMeter: miscServingTimeHeaderTimer, ServingTimeMeter: miscServingTimeHeaderTimer,
Handle: handleGetBlockHeaders, Handle: handleGetBlockHeaders,
}, },
GetBlockBodiesMsg: RequestType{ GetBlockBodiesMsg: {
Name: "block bodies request", Name: "block bodies request",
MaxCount: MaxBodyFetch, MaxCount: MaxBodyFetch,
InPacketsMeter: miscInBodyPacketsMeter, InPacketsMeter: miscInBodyPacketsMeter,
@ -85,7 +85,7 @@ var Les3 = map[uint64]RequestType{
ServingTimeMeter: miscServingTimeBodyTimer, ServingTimeMeter: miscServingTimeBodyTimer,
Handle: handleGetBlockBodies, Handle: handleGetBlockBodies,
}, },
GetCodeMsg: RequestType{ GetCodeMsg: {
Name: "code request", Name: "code request",
MaxCount: MaxCodeFetch, MaxCount: MaxCodeFetch,
InPacketsMeter: miscInCodePacketsMeter, InPacketsMeter: miscInCodePacketsMeter,
@ -95,7 +95,7 @@ var Les3 = map[uint64]RequestType{
ServingTimeMeter: miscServingTimeCodeTimer, ServingTimeMeter: miscServingTimeCodeTimer,
Handle: handleGetCode, Handle: handleGetCode,
}, },
GetReceiptsMsg: RequestType{ GetReceiptsMsg: {
Name: "receipts request", Name: "receipts request",
MaxCount: MaxReceiptFetch, MaxCount: MaxReceiptFetch,
InPacketsMeter: miscInReceiptPacketsMeter, InPacketsMeter: miscInReceiptPacketsMeter,
@ -105,7 +105,7 @@ var Les3 = map[uint64]RequestType{
ServingTimeMeter: miscServingTimeReceiptTimer, ServingTimeMeter: miscServingTimeReceiptTimer,
Handle: handleGetReceipts, Handle: handleGetReceipts,
}, },
GetProofsV2Msg: RequestType{ GetProofsV2Msg: {
Name: "les/2 proofs request", Name: "les/2 proofs request",
MaxCount: MaxProofsFetch, MaxCount: MaxProofsFetch,
InPacketsMeter: miscInTrieProofPacketsMeter, InPacketsMeter: miscInTrieProofPacketsMeter,
@ -115,7 +115,7 @@ var Les3 = map[uint64]RequestType{
ServingTimeMeter: miscServingTimeTrieProofTimer, ServingTimeMeter: miscServingTimeTrieProofTimer,
Handle: handleGetProofs, Handle: handleGetProofs,
}, },
GetHelperTrieProofsMsg: RequestType{ GetHelperTrieProofsMsg: {
Name: "helper trie proof request", Name: "helper trie proof request",
MaxCount: MaxHelperTrieProofsFetch, MaxCount: MaxHelperTrieProofsFetch,
InPacketsMeter: miscInHelperTriePacketsMeter, InPacketsMeter: miscInHelperTriePacketsMeter,
@ -125,7 +125,7 @@ var Les3 = map[uint64]RequestType{
ServingTimeMeter: miscServingTimeHelperTrieTimer, ServingTimeMeter: miscServingTimeHelperTrieTimer,
Handle: handleGetHelperTrieProofs, Handle: handleGetHelperTrieProofs,
}, },
SendTxV2Msg: RequestType{ SendTxV2Msg: {
Name: "new transactions", Name: "new transactions",
MaxCount: MaxTxSend, MaxCount: MaxTxSend,
InPacketsMeter: miscInTxsPacketsMeter, InPacketsMeter: miscInTxsPacketsMeter,
@ -135,7 +135,7 @@ var Les3 = map[uint64]RequestType{
ServingTimeMeter: miscServingTimeTxTimer, ServingTimeMeter: miscServingTimeTxTimer,
Handle: handleSendTx, Handle: handleSendTx,
}, },
GetTxStatusMsg: RequestType{ GetTxStatusMsg: {
Name: "transaction status query request", Name: "transaction status query request",
MaxCount: MaxTxStatus, MaxCount: MaxTxStatus,
InPacketsMeter: miscInTxStatusPacketsMeter, InPacketsMeter: miscInTxStatusPacketsMeter,

Loading…
Cancel
Save