Skip to content

Commit

Permalink
adjusting tests
Browse files Browse the repository at this point in the history
  • Loading branch information
michalkucharczyk committed Nov 12, 2024
1 parent f10590f commit 7b96ca3
Show file tree
Hide file tree
Showing 3 changed files with 51 additions and 77 deletions.
4 changes: 2 additions & 2 deletions substrate/client/transaction-pool/tests/fatp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2267,12 +2267,12 @@ fn fatp_avoid_stuck_transaction() {

assert_pool_status!(header06.hash(), &pool, 0, 0);

let event = finalized_block_event(&pool, header03.hash(), header06.hash());
let header07 = api.push_block(7, vec![], true);
let event = finalized_block_event(&pool, header03.hash(), header07.hash());
block_on(pool.maintain(event));

let xt4i_events = futures::executor::block_on_stream(xt4i_watcher).collect::<Vec<_>>();
log::debug!("xt4i_events: {:#?}", xt4i_events);

assert_eq!(xt4i_events, vec![TransactionStatus::Future, TransactionStatus::Dropped]);
assert_eq!(pool.mempool_len(), (0, 0));
}
Expand Down
96 changes: 34 additions & 62 deletions substrate/client/transaction-pool/tests/fatp_limits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -642,12 +642,8 @@ fn fatp_limits_future_size_works() {
assert_eq!(pool.mempool_len().0, 3);
}

// this test is bad by design: xt0, xt1 cannot be dropped by the pool after finalizing 02 - there
// can be new fork where xt0, xt1 will be valid.
// todo: do we need this test?
#[test]
#[ignore]
fn fatp_limits_watcher_xxx() {
fn fatp_limits_watcher_ready_transactions_are_not_droped_when_view_is_dropped() {
sp_tracing::try_init_simple();

let builder = TestPoolBuilder::new();
Expand All @@ -670,26 +666,32 @@ fn fatp_limits_watcher_xxx() {
let xt4 = uxt(Eve, 600);
let xt5 = uxt(Ferdie, 700);

let xt0_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap();
let xt1_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap();
let _xt0_watcher =
block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt0.clone())).unwrap();
let _xt1_watcher =
block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt1.clone())).unwrap();

assert_pool_status!(header01.hash(), &pool, 2, 0);
assert_eq!(pool.mempool_len().1, 2);

let header02 = api.push_block_with_parent(header01.hash(), vec![], true);
block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()), header02.hash())));

let xt2_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap();
let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap();
let _xt2_watcher =
block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt2.clone())).unwrap();
let _xt3_watcher =
block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt3.clone())).unwrap();

assert_pool_status!(header02.hash(), &pool, 2, 0);
assert_eq!(pool.mempool_len().1, 4);

let header03 = api.push_block_with_parent(header02.hash(), vec![], true);
block_on(pool.maintain(new_best_block_event(&pool, Some(header02.hash()), header03.hash())));

let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap();
let xt5_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap();
let _xt4_watcher =
block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt4.clone())).unwrap();
let _xt5_watcher =
block_on(pool.submit_and_watch(invalid_hash(), SOURCE, xt5.clone())).unwrap();

assert_pool_status!(header03.hash(), &pool, 2, 0);
assert_eq!(pool.mempool_len().1, 6);
Expand All @@ -714,63 +716,32 @@ fn fatp_limits_watcher_xxx() {

block_on(pool.maintain(finalized_block_event(&pool, header01.hash(), header02.hash())));
assert!(!pool.status_all().contains_key(&header02.hash()));

//view 01 was dropped
assert!(pool.ready_at(header01.hash()).now_or_never().is_none());
//todo: can we do better? We don't have API to check if event was processed internally.
let mut counter = 0;
while pool.mempool_len().1 != 4 {
sleep(std::time::Duration::from_millis(1));
counter = counter + 1;
if counter > 20 {
assert!(false, "timeout");
}
}
assert_eq!(pool.mempool_len().1, 4);
assert_eq!(pool.mempool_len().1, 6);

block_on(pool.maintain(finalized_block_event(&pool, header02.hash(), header03.hash())));
log::info!("status: {:?}", pool.status_all());

//no revalidation has happened yet, all txs are kept
assert_eq!(pool.mempool_len().1, 6);

//view 03 is still there
assert!(!pool.status_all().contains_key(&header03.hash()));

//view 02 was dropped
assert!(pool.ready_at(header02.hash()).now_or_never().is_none());

//
//
//
//

// assert_pool_status!(header02e.hash(), &pool, 0, 0);
//
// let header02f = api.push_block_with_parent(header01.hash(), vec![], true);
// block_on(pool.maintain(new_best_block_event(&pool, Some(header01.hash()),
// header02f.hash()))); assert_pool_status!(header02f.hash(), &pool, 2, 0);
// assert_ready_iterator!(header02f.hash(), pool, [xt1, xt2]);
//
// let xt3_watcher = block_on(pool.submit_and_watch(invalid_hash(), SOURCE,
// xt3.clone())).unwrap(); let xt4_watcher = block_on(pool.submit_and_watch(invalid_hash(),
// SOURCE, xt4.clone())).unwrap(); let result5 = block_on(pool.submit_and_watch(invalid_hash(),
// SOURCE, xt5.clone())).map(|_| ());
//
// //xt5 hits internal mempool limit
// assert!(matches!(result5.unwrap_err().0, TxPoolError::ImmediatelyDropped));
//
// assert_pool_status!(header02e.hash(), &pool, 2, 0);
// assert_ready_iterator!(header02e.hash(), pool, [xt3, xt4]);
// assert_eq!(pool.mempool_len().1, 4);
//
// let xt1_status = futures::executor::block_on_stream(xt1_watcher).take(2).collect::<Vec<_>>();
// assert_eq!(
// xt1_status,
// vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 1))]
// );
//
// let xt2_status = futures::executor::block_on_stream(xt2_watcher).take(2).collect::<Vec<_>>();
// assert_eq!(
// xt2_status,
// vec![TransactionStatus::Ready, TransactionStatus::InBlock((header02e.hash(), 2))]
// );
//
// let xt3_status = futures::executor::block_on_stream(xt3_watcher).take(1).collect::<Vec<_>>();
// assert_eq!(xt3_status, vec![TransactionStatus::Ready]);
// let xt4_status = futures::executor::block_on_stream(xt4_watcher).take(1).collect::<Vec<_>>();
// assert_eq!(xt4_status, vec![TransactionStatus::Ready]);
let mut prev_header = header03;
for n in 5..=11 {
let header = api.push_block(n, vec![], true);
let event = finalized_block_event(&pool, prev_header.hash(), header.hash());
block_on(pool.maintain(event));
prev_header = header;
}

//now revalidation has happened, all txs are dropped
assert_eq!(pool.mempool_len().1, 0);
}

macro_rules! assert_future_iterator {
Expand Down Expand Up @@ -867,6 +838,7 @@ fn fatp_limits_watcher_future_transactions_are_droped_when_view_is_dropped() {
(xt4, xt4_watcher),
(xt5, xt5_watcher),
];

let future_hashes = pool.futures().iter().map(|t| t.hash).collect::<Vec<_>>();
to_be_checked.retain(|x| !future_hashes.contains(&api.hash_and_length(&x.0).0));

Expand Down
28 changes: 15 additions & 13 deletions substrate/client/transaction-pool/tests/pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,14 @@ fn create_basic_pool(test_api: TestApi) -> BasicPool<TestApi, Block> {
create_basic_pool_with_genesis(Arc::from(test_api)).0
}

const TSOURCE: TimedTransactionSource =
TimedTransactionSource { source: TransactionSource::External, timestamp: None };
const SOURCE: TransactionSource = TransactionSource::External;

#[test]
fn submission_should_work() {
let (pool, api) = pool();
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into()))
.unwrap();

let pending: Vec<_> = pool
Expand All @@ -99,9 +101,9 @@ fn submission_should_work() {
#[test]
fn multiple_submission_should_work() {
let (pool, api) = pool();
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into()))
.unwrap();
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into()))
.unwrap();

let pending: Vec<_> = pool
Expand All @@ -116,7 +118,7 @@ fn multiple_submission_should_work() {
fn early_nonce_should_be_culled() {
sp_tracing::try_init_simple();
let (pool, api) = pool();
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 208).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 208).into()))
.unwrap();

log::debug!("-> {:?}", pool.validated_pool().status());
Expand All @@ -132,7 +134,7 @@ fn early_nonce_should_be_culled() {
fn late_nonce_should_be_queued() {
let (pool, api) = pool();

block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into()))
.unwrap();
let pending: Vec<_> = pool
.validated_pool()
Expand All @@ -141,7 +143,7 @@ fn late_nonce_should_be_queued() {
.collect();
assert_eq!(pending, Vec::<Nonce>::new());

block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into()))
.unwrap();
let pending: Vec<_> = pool
.validated_pool()
Expand All @@ -155,9 +157,9 @@ fn late_nonce_should_be_queued() {
fn prune_tags_should_work() {
let (pool, api) = pool();
let hash209 =
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 209).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 209).into()))
.unwrap();
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt(Alice, 210).into()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt(Alice, 210).into()))
.unwrap();

let pending: Vec<_> = pool
Expand All @@ -183,9 +185,9 @@ fn should_ban_invalid_transactions() {
let (pool, api) = pool();
let uxt = Arc::from(uxt(Alice, 209));
let hash =
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap();
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap();
pool.validated_pool().remove_invalid(&[hash]);
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err();
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err();

// when
let pending: Vec<_> = pool
Expand All @@ -196,7 +198,7 @@ fn should_ban_invalid_transactions() {
assert_eq!(pending, Vec::<Nonce>::new());

// then
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, uxt.clone())).unwrap_err();
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, uxt.clone())).unwrap_err();
}

#[test]
Expand Down Expand Up @@ -224,7 +226,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() {
}));
let pool = Pool::new(Default::default(), true.into(), api.clone());
let xt0 = Arc::from(uxt(Alice, 209));
block_on(pool.submit_one(&api.expect_hash_and_number(0), SOURCE, xt0.clone()))
block_on(pool.submit_one(&api.expect_hash_and_number(0), TSOURCE, xt0.clone()))
.expect("1. Imported");
assert_eq!(pool.validated_pool().status().ready, 1);
assert_eq!(api.validation_requests().len(), 1);
Expand All @@ -242,7 +244,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() {
api.increment_nonce(Alice.into());
api.push_block(2, Vec::new(), true);
let xt1 = uxt(Alice, 211);
block_on(pool.submit_one(&api.expect_hash_and_number(2), SOURCE, xt1.clone().into()))
block_on(pool.submit_one(&api.expect_hash_and_number(2), TSOURCE, xt1.clone().into()))
.expect("2. Imported");
assert_eq!(api.validation_requests().len(), 3);
assert_eq!(pool.validated_pool().status().ready, 1);
Expand Down

0 comments on commit 7b96ca3

Please sign in to comment.