swarm/pss: fixing race condition (#18487)

pull/18502/head
gluk256 6 years ago committed by Anton Evangelatov
parent 15b9b39e6c
commit 105008b6a1
  1. 20
      swarm/pss/pss.go

@ -340,6 +340,7 @@ func (p *Pss) Register(topic *Topic, hndlr *handler) func() {
}
return func() { p.deregister(topic, hndlr) }
}
func (p *Pss) deregister(topic *Topic, hndlr *handler) {
p.handlersMu.Lock()
defer p.handlersMu.Unlock()
@ -362,13 +363,6 @@ func (p *Pss) deregister(topic *Topic, hndlr *handler) {
delete(handlers, hndlr)
}
// get all registered handlers for respective topics
func (p *Pss) getHandlers(topic Topic) map[*handler]bool {
p.handlersMu.RLock()
defer p.handlersMu.RUnlock()
return p.handlers[topic]
}
// Filters incoming messages for processing or forwarding.
// Check if address partially matches
// If yes, it CAN be for us, and we process it
@ -427,7 +421,6 @@ func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error {
}
}
return nil
}
// Entry point to processing a message for which the current node can be the intended recipient.
@ -472,13 +465,22 @@ func (p *Pss) process(pssmsg *PssMsg, raw bool, prox bool) error {
p.executeHandlers(psstopic, payload, from, raw, prox, asymmetric, keyid)
return nil
}
// copy all registered handlers for respective topic in order to avoid data race or deadlock
func (p *Pss) getHandlers(topic Topic) (ret []*handler) {
p.handlersMu.RLock()
defer p.handlersMu.RUnlock()
for k := range p.handlers[topic] {
ret = append(ret, k)
}
return ret
}
func (p *Pss) executeHandlers(topic Topic, payload []byte, from PssAddress, raw bool, prox bool, asymmetric bool, keyid string) {
handlers := p.getHandlers(topic)
peer := p2p.NewPeer(enode.ID{}, fmt.Sprintf("%x", from), []p2p.Cap{})
for h := range handlers {
for _, h := range handlers {
if !h.caps.raw && raw {
log.Warn("norawhandler")
continue

Loading…
Cancel
Save