eth/protocols/snap, p2p/msgrate: support overriding max TTL

msgrater
Péter Szilágyi 4 years ago
parent 7b5a6f357d
commit 14c2574d4a
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
  1. 21
      eth/protocols/snap/sync_test.go
  2. 9
      p2p/msgrate/msgrate.go

@ -794,15 +794,8 @@ func TestMultiSyncManyUseless(t *testing.T) {
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
} }
/*
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
// We're setting the timeout to very low, to increase the chance of the timeout
// being triggered. This was previously a cause of panic, when a response
// arrived simultaneously as a timeout was triggered.
defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
requestTimeout = time.Millisecond
var ( var (
once sync.Once once sync.Once
cancel = make(chan struct{}) cancel = make(chan struct{})
@ -839,6 +832,11 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
mkSource("noStorage", true, false, true), mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false), mkSource("noTrie", true, true, false),
) )
// We're setting the timeout to very low, to increase the chance of the timeout
// being triggered. This was previously a cause of panic, when a response
// arrived simultaneously as a timeout was triggered.
syncer.rates.OverrideTTLLimit = time.Millisecond
done := checkStall(t, term) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
@ -849,10 +847,6 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
func TestMultiSyncManyUnresponsive(t *testing.T) { func TestMultiSyncManyUnresponsive(t *testing.T) {
// We're setting the timeout to very low, to make the test run a bit faster
defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
requestTimeout = time.Millisecond
var ( var (
once sync.Once once sync.Once
cancel = make(chan struct{}) cancel = make(chan struct{})
@ -889,13 +883,16 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
mkSource("noStorage", true, false, true), mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false), mkSource("noTrie", true, true, false),
) )
// We're setting the timeout to very low, to make the test run a bit faster
syncer.rates.OverrideTTLLimit = time.Millisecond
done := checkStall(t, term) done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err) t.Fatalf("sync failed: %v", err)
} }
close(done) close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}*/ }
func checkStall(t *testing.T, term func()) chan struct{} { func checkStall(t *testing.T, term func()) chan struct{} {
testDone := make(chan struct{}) testDone := make(chan struct{})

@ -226,6 +226,10 @@ type Trackers struct {
// run every now and again. // run every now and again.
tuned time.Time tuned time.Time
// The fields below can be used to override certain default values. Their
// purpose is to allow quicker tests. Don't use them in production.
OverrideTTLLimit time.Duration
log log.Logger log log.Logger
lock sync.RWMutex lock sync.RWMutex
} }
@ -237,6 +241,7 @@ func NewTrackers(log log.Logger) *Trackers {
roundtrip: rttMaxEstimate, roundtrip: rttMaxEstimate,
confidence: 1, confidence: 1,
tuned: time.Now(), tuned: time.Now(),
OverrideTTLLimit: ttlLimit,
log: log, log: log,
} }
} }
@ -369,8 +374,8 @@ func (t *Trackers) TargetTimeout() time.Duration {
// during QoS tuning. // during QoS tuning.
func (t *Trackers) targetTimeout() time.Duration { func (t *Trackers) targetTimeout() time.Duration {
timeout := time.Duration(ttlScaling * float64(t.roundtrip) / t.confidence) timeout := time.Duration(ttlScaling * float64(t.roundtrip) / t.confidence)
if timeout > ttlLimit { if timeout > t.OverrideTTLLimit {
timeout = ttlLimit timeout = t.OverrideTTLLimit
} }
return timeout return timeout
} }

Loading…
Cancel
Save