|
|
@ -719,6 +719,12 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { |
|
|
|
txns = append(txns, transaction(origin+i, big.NewInt(100000), key2)) |
|
|
|
txns = append(txns, transaction(origin+i, big.NewInt(100000), key2)) |
|
|
|
} |
|
|
|
} |
|
|
|
pool2.AddBatch(txns) |
|
|
|
pool2.AddBatch(txns) |
|
|
|
|
|
|
|
if err := pool2.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if err := pool1.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Ensure the batch optimization honors the same pool mechanics
|
|
|
|
// Ensure the batch optimization honors the same pool mechanics
|
|
|
|
if len(pool1.pending) != len(pool2.pending) { |
|
|
|
if len(pool1.pending) != len(pool2.pending) { |
|
|
@ -769,6 +775,9 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { |
|
|
|
// Import the batch and verify that limits have been enforced
|
|
|
|
// Import the batch and verify that limits have been enforced
|
|
|
|
pool.AddBatch(txs) |
|
|
|
pool.AddBatch(txs) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
pending := 0 |
|
|
|
pending := 0 |
|
|
|
for _, list := range pool.pending { |
|
|
|
for _, list := range pool.pending { |
|
|
|
pending += list.Len() |
|
|
|
pending += list.Len() |
|
|
@ -778,6 +787,42 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Tests that if transactions start being capped, transasctions are also removed from 'all'
|
|
|
|
|
|
|
|
func TestTransactionCapClearsFromAll(t *testing.T) { |
|
|
|
|
|
|
|
// Reduce the queue limits to shorten test time
|
|
|
|
|
|
|
|
defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots) |
|
|
|
|
|
|
|
DefaultTxPoolConfig.AccountSlots = 2 |
|
|
|
|
|
|
|
DefaultTxPoolConfig.AccountQueue = 2 |
|
|
|
|
|
|
|
DefaultTxPoolConfig.GlobalSlots = 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Create the pool to test the limit enforcement with
|
|
|
|
|
|
|
|
db, _ := ethdb.NewMemDatabase() |
|
|
|
|
|
|
|
statedb, _ := state.New(common.Hash{}, db) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) |
|
|
|
|
|
|
|
pool.resetState() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Create a number of test accounts and fund them
|
|
|
|
|
|
|
|
state, _ := pool.currentState() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
key, _ := crypto.GenerateKey() |
|
|
|
|
|
|
|
addr := crypto.PubkeyToAddress(key.PublicKey) |
|
|
|
|
|
|
|
state.AddBalance(addr, big.NewInt(1000000)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
txs := types.Transactions{} |
|
|
|
|
|
|
|
nonce := uint64(0) |
|
|
|
|
|
|
|
for j := 0; j < int(DefaultTxPoolConfig.GlobalSlots)*2; j++ { |
|
|
|
|
|
|
|
tx := transaction(nonce, big.NewInt(100000), key) |
|
|
|
|
|
|
|
txs = append(txs, tx) |
|
|
|
|
|
|
|
nonce++ |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Import the batch and verify that limits have been enforced
|
|
|
|
|
|
|
|
pool.AddBatch(txs) |
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Tests that if the transaction count belonging to multiple accounts go above
|
|
|
|
// Tests that if the transaction count belonging to multiple accounts go above
|
|
|
|
// some hard threshold, if they are under the minimum guaranteed slot count then
|
|
|
|
// some hard threshold, if they are under the minimum guaranteed slot count then
|
|
|
|
// the transactions are still kept.
|
|
|
|
// the transactions are still kept.
|
|
|
@ -815,6 +860,10 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { |
|
|
|
// Import the batch and verify that limits have been enforced
|
|
|
|
// Import the batch and verify that limits have been enforced
|
|
|
|
pool.AddBatch(txs) |
|
|
|
pool.AddBatch(txs) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
for addr, list := range pool.pending { |
|
|
|
for addr, list := range pool.pending { |
|
|
|
if list.Len() != int(DefaultTxPoolConfig.AccountSlots) { |
|
|
|
if list.Len() != int(DefaultTxPoolConfig.AccountSlots) { |
|
|
|
t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), DefaultTxPoolConfig.AccountSlots) |
|
|
|
t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), DefaultTxPoolConfig.AccountSlots) |
|
|
@ -860,6 +909,10 @@ func TestTransactionPoolRepricing(t *testing.T) { |
|
|
|
// Import the batch and that both pending and queued transactions match up
|
|
|
|
// Import the batch and that both pending and queued transactions match up
|
|
|
|
pool.AddBatch(txs) |
|
|
|
pool.AddBatch(txs) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pending, queued := pool.stats() |
|
|
|
pending, queued := pool.stats() |
|
|
|
if pending != 4 { |
|
|
|
if pending != 4 { |
|
|
|
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) |
|
|
|
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) |
|
|
@ -894,6 +947,10 @@ func TestTransactionPoolRepricing(t *testing.T) { |
|
|
|
if pending, _ = pool.stats(); pending != 3 { |
|
|
|
if pending, _ = pool.stats(); pending != 3 { |
|
|
|
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) |
|
|
|
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Tests that when the pool reaches its global transaction limit, underpriced
|
|
|
|
// Tests that when the pool reaches its global transaction limit, underpriced
|
|
|
@ -937,6 +994,9 @@ func TestTransactionPoolUnderpricing(t *testing.T) { |
|
|
|
|
|
|
|
|
|
|
|
// Import the batch and that both pending and queued transactions match up
|
|
|
|
// Import the batch and that both pending and queued transactions match up
|
|
|
|
pool.AddBatch(txs) |
|
|
|
pool.AddBatch(txs) |
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
pending, queued := pool.stats() |
|
|
|
pending, queued := pool.stats() |
|
|
|
if pending != 3 { |
|
|
|
if pending != 3 { |
|
|
@ -980,6 +1040,9 @@ func TestTransactionPoolUnderpricing(t *testing.T) { |
|
|
|
if queued != 2 { |
|
|
|
if queued != 2 { |
|
|
|
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) |
|
|
|
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Tests that the pool rejects replacement transactions that don't meet the minimum
|
|
|
|
// Tests that the pool rejects replacement transactions that don't meet the minimum
|
|
|
@ -1041,6 +1104,9 @@ func TestTransactionReplacement(t *testing.T) { |
|
|
|
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(threshold+1), key)); err != nil { |
|
|
|
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(threshold+1), key)); err != nil { |
|
|
|
t.Fatalf("failed to replace original queued transaction: %v", err) |
|
|
|
t.Fatalf("failed to replace original queued transaction: %v", err) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if err := pool.validateInternals(); err != nil { |
|
|
|
|
|
|
|
t.Error(err) |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Benchmarks the speed of validating the contents of the pending queue of the
|
|
|
|
// Benchmarks the speed of validating the contents of the pending queue of the
|
|
|
|