eth/protocols/snap, p2p/msgrate: support overriding max TTL

msgrater
Péter Szilágyi 4 years ago
parent 7b5a6f357d
commit 14c2574d4a
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
  1. 21
      eth/protocols/snap/sync_test.go
  2. 19
      p2p/msgrate/msgrate.go

@ -794,15 +794,8 @@ func TestMultiSyncManyUseless(t *testing.T) {
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}
/*
// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
// We're setting the timeout to very low, to increase the chance of the timeout
// being triggered. This was previously a cause of panic, when a response
// arrived simultaneously as a timeout was triggered.
defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
requestTimeout = time.Millisecond
var (
once sync.Once
cancel = make(chan struct{})
@ -839,6 +832,11 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false),
)
// We're setting the timeout to very low, to increase the chance of the timeout
// being triggered. This was previously a cause of panic, when a response
// arrived simultaneously as a timeout was triggered.
syncer.rates.OverrideTTLLimit = time.Millisecond
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
@ -849,10 +847,6 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all
func TestMultiSyncManyUnresponsive(t *testing.T) {
// We're setting the timeout to very low, to make the test run a bit faster
defer func(old time.Duration) { requestTimeout = old }(requestTimeout)
requestTimeout = time.Millisecond
var (
once sync.Once
cancel = make(chan struct{})
@ -889,13 +883,16 @@ func TestMultiSyncManyUnresponsive(t *testing.T) {
mkSource("noStorage", true, false, true),
mkSource("noTrie", true, true, false),
)
// We're setting the timeout to very low, to make the test run a bit faster
syncer.rates.OverrideTTLLimit = time.Millisecond
done := checkStall(t, term)
if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {
t.Fatalf("sync failed: %v", err)
}
close(done)
verifyTrie(syncer.db, sourceAccountTrie.Hash(), t)
}*/
}
func checkStall(t *testing.T, term func()) chan struct{} {
testDone := make(chan struct{})

@ -226,6 +226,10 @@ type Trackers struct {
// run every now and again.
tuned time.Time
// The fields below can be used to override certain default values. Their
// purpose is to allow quicker tests. Don't use them in production.
OverrideTTLLimit time.Duration
log log.Logger
lock sync.RWMutex
}
@ -233,11 +237,12 @@ type Trackers struct {
// NewTrackers creates an empty set of trackers to be filled with peers.
func NewTrackers(log log.Logger) *Trackers {
return &Trackers{
trackers: make(map[string]*Tracker),
roundtrip: rttMaxEstimate,
confidence: 1,
tuned: time.Now(),
log: log,
trackers: make(map[string]*Tracker),
roundtrip: rttMaxEstimate,
confidence: 1,
tuned: time.Now(),
OverrideTTLLimit: ttlLimit,
log: log,
}
}
@ -369,8 +374,8 @@ func (t *Trackers) TargetTimeout() time.Duration {
// during QoS tuning.
func (t *Trackers) targetTimeout() time.Duration {
timeout := time.Duration(ttlScaling * float64(t.roundtrip) / t.confidence)
if timeout > ttlLimit {
timeout = ttlLimit
if timeout > t.OverrideTTLLimit {
timeout = t.OverrideTTLLimit
}
return timeout
}

Loading…
Cancel
Save