diff --git a/reqpool/src/lib.rs b/reqpool/src/lib.rs index 58140e23..e2502116 100644 --- a/reqpool/src/lib.rs +++ b/reqpool/src/lib.rs @@ -1,7 +1,6 @@ mod config; mod macros; -mod memory_pool; -#[cfg(test)] +#[cfg(any(test, feature = "enable-mock"))] mod mock; mod redis_pool; mod request; diff --git a/reqpool/src/memory_pool.rs b/reqpool/src/memory_pool.rs deleted file mode 100644 index 410da5d1..00000000 --- a/reqpool/src/memory_pool.rs +++ /dev/null @@ -1,115 +0,0 @@ -// use std::collections::HashMap; - -// use chrono::Utc; - -// use crate::{ -// request::{RequestEntity, RequestKey, Status, StatusWithContext}, -// traits::{Pool, PoolWithTrace}, -// }; - -// #[derive(Debug, Clone)] -// pub struct MemoryPool { -// /// The live requests in the pool -// pending: HashMap, -// /// The trace of requests -// trace: Vec<(RequestKey, RequestEntity, StatusWithContext)>, -// } - -// impl Pool for MemoryPool { -// type Config = (); - -// fn new(_config: Self::Config) -> Self { -// Self { -// lives: HashMap::new(), -// trace: Vec::new(), -// } -// } - -// fn add(&mut self, request_key: RequestKey, request_entity: RequestEntity) { -// let status = StatusWithContext::new(Status::Registered, Utc::now()); - -// let old = self.lives.insert( -// request_key.clone(), -// (request_entity.clone(), status.clone()), -// ); - -// if let Some((_, old_status)) = old { -// tracing::error!( -// "MemoryPool.add: request key already exists, {request_key:?}, old status: {old_status:?}" -// ); -// } else { -// tracing::info!("MemoryPool.add, {request_key:?}, status: {status:?}"); -// } - -// self.trace.push((request_key, request_entity, status)); -// } - -// fn remove(&mut self, request_key: &RequestKey) { -// match self.lives.remove(request_key) { -// Some((_, status)) => { -// tracing::info!("MemoryPool.remove, {request_key:?}, status: {status:?}"); -// } -// None => { -// tracing::error!("MemoryPool.remove: request key not found, {request_key:?}"); -// } -// } -// } - -// fn get(&self, request_key: &RequestKey) -> Option<(RequestEntity, StatusWithContext)> { -// self.lives.get(request_key).cloned() -// } - -// fn get_status(&self, request_key: &RequestKey) -> Option { -// self.lives -// .get(request_key) -// .map(|(_, status)| status.clone()) -// } - -// fn update_status(&mut self, request_key: &RequestKey, status: StatusWithContext) { -// match self.lives.remove(request_key) { -// Some((entity, old_status)) => { -// tracing::info!( -// "MemoryPool.update_status, {request_key:?}, old status: {old_status:?}, new status: {status:?}" -// ); -// self.lives -// .insert(request_key.clone(), (entity.clone(), status.clone())); -// self.trace.push((request_key.clone(), entity, status)); -// } -// None => { -// tracing::error!( -// "MemoryPool.update_status: request key not found, discard it, {request_key:?}" -// ); -// } -// } -// } -// } - -// impl PoolWithTrace for MemoryPool { -// fn get_all_live(&self) -> Vec<(RequestKey, RequestEntity, StatusWithContext)> { -// self.lives -// .iter() -// .map(|(k, v)| (k.clone(), v.0.clone(), v.1.clone())) -// .collect() -// } - -// fn get_all_trace(&self) -> Vec<(RequestKey, RequestEntity, StatusWithContext)> { -// self.trace.clone() -// } - -// fn trace( -// &self, -// request_key: &RequestKey, -// ) -> ( -// Option<(RequestEntity, StatusWithContext)>, -// Vec<(RequestKey, RequestEntity, StatusWithContext)>, -// ) { -// let live = self.lives.get(request_key).cloned(); -// let traces = self -// .trace -// .iter() -// .filter(|(k, _, _)| k == request_key) -// .cloned() -// .collect(); -// (live, traces) -// } -// } diff --git a/reqpool/src/mock.rs b/reqpool/src/mock.rs index f819cd4b..d1c8692a 100644 --- a/reqpool/src/mock.rs +++ b/reqpool/src/mock.rs @@ -12,6 +12,9 @@ type GlobalStorage = Mutex>; lazy_static! { // #{redis_url => single_storage} + // + // We use redis_url to distinguish different redis database for tests, to prevent + // data race problem when running multiple tests. static ref GLOBAL_STORAGE: GlobalStorage = Mutex::new(HashMap::new()); } diff --git a/reqpool/src/redis_pool.rs b/reqpool/src/redis_pool.rs index e8c3c9b8..5f75ec73 100644 --- a/reqpool/src/redis_pool.rs +++ b/reqpool/src/redis_pool.rs @@ -2,11 +2,12 @@ use crate::{ impl_display_using_json_pretty, proof_key_to_hack_request_key, RedisPoolConfig, RequestEntity, RequestKey, StatusWithContext, }; +use backoff::{exponential::ExponentialBackoff, SystemClock}; use raiko_lib::prover::{IdStore, IdWrite, ProofKey, ProverError, ProverResult}; use raiko_redis_derive::RedisValue; -#[allow(unused_imports)] use redis::{Client, Commands, RedisResult}; use serde::{Deserialize, Serialize}; +use std::time::Duration; #[derive(Debug, Clone)] pub struct Pool { @@ -136,19 +137,20 @@ impl Pool { Ok(Self { client, config }) } - #[cfg(test)] + #[cfg(any(test, feature = "enable-mock"))] pub(crate) fn conn(&mut self) -> Result { - let _ = self.client; Ok(crate::mock::MockRedisConnection::new( self.config.redis_url.clone(), )) } - #[cfg(not(test))] + #[cfg(not(any(test, feature = "enable-mock")))] fn conn(&mut self) -> Result { - use backoff::{exponential::ExponentialBackoff, SystemClock}; - use std::time::Duration; + self.redis_conn() + } + #[allow(dead_code)] + fn redis_conn(&mut self) -> Result { let backoff: ExponentialBackoff = ExponentialBackoff { initial_interval: Duration::from_secs(10), max_interval: Duration::from_secs(60),