diff --git a/core/lib/config/src/configs/da_client/eigen.rs b/core/lib/config/src/configs/da_client/eigen.rs index cd036e407427..b7723e2271a6 100644 --- a/core/lib/config/src/configs/da_client/eigen.rs +++ b/core/lib/config/src/configs/da_client/eigen.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use zksync_basic_types::secrets::PrivateKey; /// Configuration for the EigenDA remote disperser client. -#[derive(Clone, Debug, PartialEq, Deserialize, Default)] +#[derive(Clone, Debug, PartialEq, Deserialize)] pub struct EigenConfig { /// URL of the Disperser RPC server pub disperser_rpc: String, @@ -24,6 +24,22 @@ pub struct EigenConfig { pub chain_id: u64, } +impl Default for EigenConfig { + fn default() -> Self { + Self { + disperser_rpc: "https://disperser-holesky.eigenda.xyz:443".to_string(), + settlement_layer_confirmation_depth: 0, + eigenda_eth_rpc: Some("https://ethereum-holesky-rpc.publicnode.com".to_string()), + eigenda_svc_manager_address: "0xD4A7E1Bd8015057293f0D0A557088c286942e84b".to_string(), + wait_for_finalization: false, + authenticated: false, + g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), + g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + chain_id: 19000, + } + } +} + #[derive(Clone, Debug, PartialEq)] pub struct EigenSecrets { pub private_key: PrivateKey, diff --git a/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql new file mode 100644 index 000000000000..938d2e09de44 --- /dev/null +++ b/core/lib/dal/migrations/20241812144402_create_index_data_availability.sql @@ -0,0 +1 @@ +CREATE INDEX idx_blob_id_l1_batch_number ON data_availability (blob_id, l1_batch_number); diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index f12042b12576..efc588faa081 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -62,7 +62,9 @@ ethabi.workspace = true rust-kzg-bn254.workspace = true ark-bn254.workspace = true num-bigint.workspace = true -serial_test.workspace = true zksync_web3_decl.workspace = true zksync_eth_client.workspace = true url.workspace = true + +[dev-dependencies] +serial_test.workspace = true diff --git a/core/node/da_clients/src/eigen/client.rs b/core/node/da_clients/src/eigen/client.rs index ef4baa1c2ad0..5baee9475e92 100644 --- a/core/node/da_clients/src/eigen/client.rs +++ b/core/node/da_clients/src/eigen/client.rs @@ -13,21 +13,23 @@ use super::sdk::RawEigenClient; use crate::utils::to_retriable_da_error; #[async_trait] -pub trait GetBlobData: Clone + std::fmt::Debug + Send + Sync { +pub trait GetBlobData: std::fmt::Debug + Send + Sync { async fn get_blob_data(&self, input: &str) -> anyhow::Result>>; + + fn clone_boxed(&self) -> Box; } /// EigenClient is a client for the Eigen DA service. #[derive(Debug, Clone)] -pub struct EigenClient { - pub(crate) client: Arc>, +pub struct EigenClient { + pub(crate) client: Arc, } -impl EigenClient { +impl EigenClient { pub async fn new( config: EigenConfig, secrets: EigenSecrets, - get_blob_data: Box, + get_blob_data: Box, ) -> anyhow::Result { let private_key = SecretKey::from_str(secrets.private_key.0.expose_secret().as_str()) .map_err(|e| anyhow::anyhow!("Failed to parse private key: {}", e))?; @@ -40,7 +42,7 @@ impl EigenClient { } #[async_trait] -impl DataAvailabilityClient for EigenClient { +impl DataAvailabilityClient for EigenClient { async fn dispatch_blob( &self, _: u32, // batch number @@ -75,6 +77,6 @@ impl DataAvailabilityClient for EigenClient { } fn blob_size_limit(&self) -> Option { - Some(RawEigenClient::::blob_size_limit()) + Some(RawEigenClient::blob_size_limit()) } } diff --git a/core/node/da_clients/src/eigen/client_tests.rs b/core/node/da_clients/src/eigen/client_tests.rs index 99cd81cf2797..4fbbd5c36761 100644 --- a/core/node/da_clients/src/eigen/client_tests.rs +++ b/core/node/da_clients/src/eigen/client_tests.rs @@ -17,7 +17,7 @@ mod tests { use crate::eigen::{blob_info::BlobInfo, EigenClient, GetBlobData}; - impl EigenClient { + impl EigenClient { pub async fn get_blob_data( &self, blob_id: BlobInfo, @@ -32,8 +32,8 @@ mod tests { const STATUS_QUERY_TIMEOUT: u64 = 1800000; // 30 minutes const STATUS_QUERY_INTERVAL: u64 = 5; // 5 ms - async fn get_blob_info( - client: &EigenClient, + async fn get_blob_info( + client: &EigenClient, result: &DispatchResponse, ) -> anyhow::Result { let blob_info = (|| async { @@ -62,6 +62,10 @@ mod tests { async fn get_blob_data(&self, _input: &'_ str) -> anyhow::Result>> { Ok(None) } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } } #[ignore = "depends on external RPC"] diff --git a/core/node/da_clients/src/eigen/sdk.rs b/core/node/da_clients/src/eigen/sdk.rs index c1e52e42ffc9..3a3b1202690c 100644 --- a/core/node/da_clients/src/eigen/sdk.rs +++ b/core/node/da_clients/src/eigen/sdk.rs @@ -7,6 +7,7 @@ use tonic::{ transport::{Channel, ClientTlsConfig, Endpoint}, Streaming, }; +use url::Url; use zksync_config::EigenConfig; use zksync_da_client::types::DAError; use zksync_eth_client::clients::PKSigningClient; @@ -30,24 +31,36 @@ use crate::eigen::{ verifier::VerificationError, }; -#[derive(Debug, Clone)] -pub(crate) struct RawEigenClient { +#[derive(Debug)] +pub(crate) struct RawEigenClient { client: Arc>>, private_key: SecretKey, pub config: EigenConfig, verifier: Verifier, - get_blob_data: Box, + get_blob_data: Box, +} + +impl Clone for RawEigenClient { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + private_key: self.private_key, + config: self.config.clone(), + verifier: self.verifier.clone(), + get_blob_data: self.get_blob_data.clone_boxed(), + } + } } pub(crate) const DATA_CHUNK_SIZE: usize = 32; -impl RawEigenClient { +impl RawEigenClient { const BLOB_SIZE_LIMIT: usize = 1024 * 1024 * 2; // 2 MB pub async fn new( private_key: SecretKey, config: EigenConfig, - get_blob_data: Box, + get_blob_data: Box, ) -> anyhow::Result { let endpoint = Endpoint::from_str(config.disperser_rpc.as_str())?.tls_config(ClientTlsConfig::new())?; @@ -60,8 +73,8 @@ impl RawEigenClient { .ok_or(anyhow::anyhow!("EigenDA ETH RPC not set"))?, svc_manager_addr: Address::from_str(&config.eigenda_svc_manager_address)?, max_blob_size: Self::BLOB_SIZE_LIMIT as u32, - g1_url: config.g1_url.clone(), - g2_url: config.g2_url.clone(), + g1_url: Url::parse(&config.g1_url)?, + g2_url: Url::parse(&config.g2_url)?, settlement_layer_confirmation_depth: config.settlement_layer_confirmation_depth, private_key: hex::encode(private_key.secret_bytes()), chain_id: config.chain_id, diff --git a/core/node/da_clients/src/eigen/verifier.rs b/core/node/da_clients/src/eigen/verifier.rs index 1e0aeab06a6c..b6d196df47bc 100644 --- a/core/node/da_clients/src/eigen/verifier.rs +++ b/core/node/da_clients/src/eigen/verifier.rs @@ -1,9 +1,9 @@ -use std::{collections::HashMap, fs::File, io::copy, path::Path}; +use std::{collections::HashMap, path::Path}; use ark_bn254::{Fq, G1Affine}; use ethabi::{encode, ParamType, Token}; use rust_kzg_bn254::{blob::Blob, kzg::Kzg, polynomial::PolynomialFormat}; -use tiny_keccak::{Hasher, Keccak}; +use tokio::{fs::File, io::AsyncWriteExt}; use url::Url; use zksync_basic_types::web3::CallRequest; use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; @@ -70,8 +70,8 @@ pub struct VerifierConfig { pub rpc_url: String, pub svc_manager_addr: Address, pub max_blob_size: u32, - pub g1_url: String, - pub g2_url: String, + pub g1_url: Url, + pub g2_url: Url, pub settlement_layer_confirmation_depth: u32, pub private_key: String, pub chain_id: u64, @@ -104,8 +104,7 @@ impl Verifier { pub const G2POINT: &'static str = "g2.point.powerOf2"; pub const POINT_SIZE: u32 = 32; - async fn save_point(url: String, point: String) -> Result<(), VerificationError> { - let url = Url::parse(&url).map_err(|_| VerificationError::LinkError)?; + async fn save_point(url: Url, point: String) -> Result<(), VerificationError> { let response = reqwest::get(url) .await .map_err(|_| VerificationError::LinkError)?; @@ -114,17 +113,19 @@ impl Verifier { } let path = format!("./{}", point); let path = Path::new(&path); - let mut file = File::create(path).map_err(|_| VerificationError::LinkError)?; + let mut file = File::create(path).await.map_err(|_| VerificationError::LinkError)?; let content = response .bytes() .await .map_err(|_| VerificationError::LinkError)?; - copy(&mut content.as_ref(), &mut file).map_err(|_| VerificationError::LinkError)?; + file.write_all(&content) + .await + .map_err(|_| VerificationError::LinkError)?; Ok(()) } - async fn save_points(url_g1: String, url_g2: String) -> Result { - Self::save_point(url_g1.clone(), Self::G1POINT.to_string()).await?; - Self::save_point(url_g2.clone(), Self::G2POINT.to_string()).await?; + async fn save_points(url_g1: Url, url_g2: Url) -> Result { + Self::save_point(url_g1, Self::G1POINT.to_string()).await?; + Self::save_point(url_g2, Self::G2POINT.to_string()).await?; Ok(".".to_string()) } @@ -134,15 +135,20 @@ impl Verifier { ) -> Result { let srs_points_to_load = cfg.max_blob_size / Self::POINT_SIZE; let path = Self::save_points(cfg.clone().g1_url, cfg.clone().g2_url).await?; - let kzg = Kzg::setup( - &format!("{}/{}", path, Self::G1POINT), - "", - &format!("{}/{}", path, Self::G2POINT), - Self::SRSORDER, - srs_points_to_load, - "".to_string(), - ); - let kzg = kzg.map_err(|e| { + let kzg_handle = tokio::task::spawn_blocking(move || { + Kzg::setup( + &format!("{}/{}", path, Self::G1POINT), + "", + &format!("{}/{}", path, Self::G2POINT), + Self::SRSORDER, + srs_points_to_load, + "".to_string(), + ) + }); + let kzg = kzg_handle.await.map_err(|e| { + tracing::error!("Failed to setup KZG: {:?}", e); + VerificationError::KzgError + })?.map_err(|e| { tracing::error!("Failed to setup KZG: {:?}", e); VerificationError::KzgError })?; @@ -185,9 +191,9 @@ impl Verifier { Ok(()) } - pub fn hash_encode_blob_header(&self, blob_header: BlobHeader) -> Vec { + pub fn hash_encode_blob_header(&self, blob_header: &BlobHeader) -> Vec { let mut blob_quorums = vec![]; - for quorum in blob_header.blob_quorum_params { + for quorum in &blob_header.blob_quorum_params { let quorum = Token::Tuple(vec![ Token::Uint(ethabi::Uint::from(quorum.quorum_number)), Token::Uint(ethabi::Uint::from(quorum.adversary_threshold_percentage)), @@ -206,12 +212,7 @@ impl Verifier { ]); let encoded = encode(&[blob_header]); - - let mut keccak = Keccak::v256(); - keccak.update(&encoded); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - hash.to_vec() + web3::keccak256(&encoded).to_vec() } pub fn process_inclusion_proof( @@ -226,23 +227,15 @@ impl Verifier { } let mut computed_hash = leaf.to_vec(); for i in 0..proof.len() / 32 { - let mut combined = proof[i * 32..(i + 1) * 32] - .iter() - .chain(computed_hash.iter()) - .cloned() - .collect::>(); + let mut buffer = [0u8; 64]; if index % 2 == 0 { - combined = computed_hash - .iter() - .chain(proof[i * 32..(i + 1) * 32].iter()) - .cloned() - .collect::>(); - }; - let mut keccak = Keccak::v256(); - keccak.update(&combined); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - computed_hash = hash.to_vec(); + buffer[..32].copy_from_slice(&computed_hash); + buffer[32..].copy_from_slice(&proof[i * 32..(i + 1) * 32]); + } else { + buffer[..32].copy_from_slice(&proof[i * 32..(i + 1) * 32]); + buffer[32..].copy_from_slice(&computed_hash); + } + computed_hash = web3::keccak256(&buffer).to_vec(); index /= 2; } @@ -250,26 +243,23 @@ impl Verifier { } /// Verifies the certificate's batch root - pub fn verify_merkle_proof(&self, cert: BlobInfo) -> Result<(), VerificationError> { - let inclusion_proof = cert.blob_verification_proof.inclusion_proof; - let root = cert + pub fn verify_merkle_proof(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let inclusion_proof = &cert.blob_verification_proof.inclusion_proof; + let root = &cert .blob_verification_proof .batch_medatada .batch_header .batch_root; let blob_index = cert.blob_verification_proof.blob_index; - let blob_header = cert.blob_header; + let blob_header = &cert.blob_header; let blob_header_hash = self.hash_encode_blob_header(blob_header); - let mut keccak = Keccak::v256(); - keccak.update(&blob_header_hash); - let mut leaf_hash = [0u8; 32]; - keccak.finalize(&mut leaf_hash); + let leaf_hash = web3::keccak256(&blob_header_hash).to_vec(); let generated_root = - self.process_inclusion_proof(&inclusion_proof, &leaf_hash, blob_index)?; + self.process_inclusion_proof(inclusion_proof, &leaf_hash, blob_index)?; - if generated_root != root { + if generated_root != *root { return Err(VerificationError::DifferentRoots); } Ok(()) @@ -277,39 +267,29 @@ impl Verifier { fn hash_batch_metadata( &self, - batch_header: BatchHeader, - signatory_record_hash: Vec, + batch_header: &BatchHeader, + signatory_record_hash: &[u8], confirmation_block_number: u32, ) -> Vec { let batch_header_token = Token::Tuple(vec![ - Token::FixedBytes(batch_header.batch_root), - Token::Bytes(batch_header.quorum_numbers), - Token::Bytes(batch_header.quorum_signed_percentages), + Token::FixedBytes(batch_header.batch_root.clone()), // Clone only where necessary + Token::Bytes(batch_header.quorum_numbers.clone()), + Token::Bytes(batch_header.quorum_signed_percentages.clone()), Token::Uint(ethabi::Uint::from(batch_header.reference_block_number)), ]); let encoded = encode(&[batch_header_token]); - - let mut keccak = Keccak::v256(); - keccak.update(&encoded); - let mut header_hash = [0u8; 32]; - keccak.finalize(&mut header_hash); + let header_hash = web3::keccak256(&encoded).to_vec(); let hash_token = Token::Tuple(vec![ Token::FixedBytes(header_hash.to_vec()), - Token::FixedBytes(signatory_record_hash), + Token::FixedBytes(signatory_record_hash.to_owned()), // Clone only if required ]); let mut hash_encoded = encode(&[hash_token]); hash_encoded.append(&mut confirmation_block_number.to_be_bytes().to_vec()); - - let mut keccak = Keccak::v256(); - keccak.update(&hash_encoded); - let mut hash = [0u8; 32]; - keccak.finalize(&mut hash); - - hash.to_vec() + web3::keccak256(&hash_encoded).to_vec() } /// Retrieves the block to make the request to the service manager @@ -322,15 +302,17 @@ impl Verifier { .map_err(|_| VerificationError::ServiceManagerError)? .as_u64(); - if self.cfg.settlement_layer_confirmation_depth == 0 { - return Ok(latest); - } - Ok(latest - (self.cfg.settlement_layer_confirmation_depth as u64 - 1)) + let depth = self + .cfg + .settlement_layer_confirmation_depth + .saturating_sub(1); + let block_to_return = latest.saturating_sub(depth as u64); + Ok(block_to_return) } async fn call_batch_id_to_metadata_hash( &self, - blob_info: BlobInfo, + blob_info: &BlobInfo, ) -> Result, VerificationError> { let context_block = self.get_context_block().await?; @@ -361,21 +343,19 @@ impl Verifier { } /// Verifies the certificate batch hash - pub async fn verify_batch(&self, blob_info: BlobInfo) -> Result<(), VerificationError> { - let expected_hash = self - .call_batch_id_to_metadata_hash(blob_info.clone()) - .await?; + pub async fn verify_batch(&self, blob_info: &BlobInfo) -> Result<(), VerificationError> { + let expected_hash = self.call_batch_id_to_metadata_hash(blob_info).await?; if expected_hash == vec![0u8; 32] { return Err(VerificationError::EmptyHash); } let actual_hash = self.hash_batch_metadata( - blob_info + &blob_info .blob_verification_proof .batch_medatada .batch_header, - blob_info + &blob_info .blob_verification_proof .batch_medatada .signatory_record_hash, @@ -391,47 +371,17 @@ impl Verifier { Ok(()) } - fn decode_bytes(&self, encoded: Vec) -> Result, String> { - // Ensure the input has at least 64 bytes (offset + length) - if encoded.len() < 64 { - return Err("Encoded data is too short".to_string()); - } - - // Read the offset (first 32 bytes) - let offset = { - let mut offset_bytes = [0u8; 32]; - offset_bytes.copy_from_slice(&encoded[0..32]); - usize::from_be_bytes( - offset_bytes[24..32] - .try_into() - .map_err(|_| "Offset is too large")?, - ) - }; - - // Check if offset is valid - if offset + 32 > encoded.len() { - return Err("Offset points outside the encoded data".to_string()); - } - - // Read the length (32 bytes at the offset position) - let length = { - let mut length_bytes = [0u8; 32]; - length_bytes.copy_from_slice(&encoded[offset..offset + 32]); - usize::from_be_bytes( - length_bytes[24..32] - .try_into() - .map_err(|_| "Offset is too large")?, - ) - }; - - // Check if the length is valid - if offset + 32 + length > encoded.len() { - return Err("Length extends beyond the encoded data".to_string()); + fn decode_bytes(&self, encoded: Vec) -> Result, VerificationError> { + let output_type = [ParamType::Bytes]; + let tokens: Vec = ethabi::decode(&output_type, &encoded) + .map_err(|_| VerificationError::ServiceManagerError)?; + let token = tokens + .first() + .ok_or(VerificationError::ServiceManagerError)?; + match token { + Token::Bytes(data) => Ok(data.to_vec()), + _ => Err(VerificationError::ServiceManagerError), } - - // Extract the bytes data - let data = encoded[offset + 32..offset + 32 + length].to_vec(); - Ok(data) } async fn get_quorum_adversary_threshold( @@ -454,9 +404,7 @@ impl Verifier { .await .map_err(|_| VerificationError::ServiceManagerError)?; - let percentages = self - .decode_bytes(res.0.to_vec()) - .map_err(|_| VerificationError::ServiceManagerError)?; + let percentages = self.decode_bytes(res.0.to_vec())?; if percentages.len() > quorum_number as usize { return Ok(percentages[quorum_number as usize]); @@ -481,13 +429,12 @@ impl Verifier { .map_err(|_| VerificationError::ServiceManagerError)?; self.decode_bytes(res.0.to_vec()) - .map_err(|_| VerificationError::ServiceManagerError) } /// Verifies that the certificate's blob quorum params are correct - pub async fn verify_security_params(&self, cert: BlobInfo) -> Result<(), VerificationError> { - let blob_header = cert.blob_header; - let batch_header = cert.blob_verification_proof.batch_medatada.batch_header; + pub async fn verify_security_params(&self, cert: &BlobInfo) -> Result<(), VerificationError> { + let blob_header = &cert.blob_header; + let batch_header = &cert.blob_verification_proof.batch_medatada.batch_header; let mut confirmed_quorums: HashMap = HashMap::new(); for i in 0..blob_header.blob_quorum_params.len() { @@ -536,9 +483,9 @@ impl Verifier { &self, cert: BlobInfo, ) -> Result<(), VerificationError> { - self.verify_batch(cert.clone()).await?; - self.verify_merkle_proof(cert.clone())?; - self.verify_security_params(cert.clone()).await?; + self.verify_batch(&cert).await?; + self.verify_merkle_proof(&cert)?; + self.verify_security_params(&cert).await?; Ok(()) } } diff --git a/core/node/da_clients/src/eigen/verifier_tests.rs b/core/node/da_clients/src/eigen/verifier_tests.rs index 2c03b2ba6405..35c78dd8d9d3 100644 --- a/core/node/da_clients/src/eigen/verifier_tests.rs +++ b/core/node/da_clients/src/eigen/verifier_tests.rs @@ -2,6 +2,7 @@ mod test { use std::{collections::HashMap, str::FromStr}; + use url::Url; use zksync_eth_client::{clients::PKSigningClient, EnrichedClientResult}; use zksync_types::{ url::SensitiveUrl, @@ -23,8 +24,8 @@ mod test { rpc_url: "https://ethereum-holesky-rpc.publicnode.com".to_string(), svc_manager_addr: Address::from_str("0xD4A7E1Bd8015057293f0D0A557088c286942e84b").unwrap(), max_blob_size: 2 * 1024 * 1024, - g1_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point".to_string(), - g2_url: "https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2".to_string(), + g1_url: Url::parse("https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g1.point").unwrap(), + g2_url: Url::parse("https://github.com/Layr-Labs/eigenda-proxy/raw/2fd70b99ef5bf137d7bbca3461cf9e1f2c899451/resources/g2.point.powerOf2").unwrap(), settlement_layer_confirmation_depth: 0, private_key: "0xd08aa7ae1bb5ddd46c3c2d8cdb5894ab9f54dec467233686ca42629e826ac4c6" .to_string(), @@ -210,7 +211,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_merkle_proof(cert); + let result = verifier.verify_merkle_proof(&cert); assert!(result.is_ok()); } @@ -293,7 +294,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_merkle_proof(cert); + let result = verifier.verify_merkle_proof(&cert); assert!(result.is_ok()); } @@ -330,7 +331,7 @@ mod test { }, ], }; - let result = verifier.hash_encode_blob_header(blob_header); + let result = verifier.hash_encode_blob_header(&blob_header); let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; assert_eq!(result, hex::decode(expected).unwrap()); } @@ -369,7 +370,7 @@ mod test { }, ], }; - let result = verifier.hash_encode_blob_header(blob_header); + let result = verifier.hash_encode_blob_header(&blob_header); let expected = "ba4675a31c9bf6b2f7abfdcedd34b74645cb7332b35db39bff00ae8516a67393"; assert_eq!(result, hex::decode(expected).unwrap()); } @@ -493,7 +494,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_batch(cert).await; + let result = verifier.verify_batch(&cert).await; assert!(result.is_ok()); } @@ -601,7 +602,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_batch(cert).await; + let result = verifier.verify_batch(&cert).await; assert!(result.is_ok()); } @@ -683,7 +684,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_security_params(cert).await; + let result = verifier.verify_security_params(&cert).await; assert!(result.is_ok()); } @@ -808,7 +809,7 @@ mod test { quorum_indexes: vec![0, 1], }, }; - let result = verifier.verify_security_params(cert).await; + let result = verifier.verify_security_params(&cert).await; assert!(result.is_ok()); } } diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs index ce3a4d52bdc4..515c2bb3d834 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen.rs @@ -66,15 +66,16 @@ pub struct GetBlobFromDB { #[async_trait::async_trait] impl GetBlobData for GetBlobFromDB { - async fn get_blob_data(&self, input: &'_ str) -> anyhow::Result>> { - let pool = self.pool.clone(); - let input = input.to_string(); - let mut conn = pool.connection_tagged("eigen_client").await?; + async fn get_blob_data(&self, input: &str) -> anyhow::Result>> { + let mut conn = self.pool.connection_tagged("eigen_client").await?; let batch = conn .data_availability_dal() - .get_blob_data_by_blob_id(&input) + .get_blob_data_by_blob_id(input) .await?; - drop(conn); Ok(batch.map(|b| b.pubdata)) } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } }