diff --git a/Cargo.lock b/Cargo.lock index 36e7025cd55d..189f5d50f430 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -947,14 +947,14 @@ dependencies = [ [[package]] name = "boojum" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#2771569baab9a59690d88cee6ba9b295c8a1e4c4" +source = "git+https://github.com/matter-labs/era-boojum.git?branch=main#84754b066959c8fdfb77edf730fc13ed87404907" dependencies = [ "arrayvec 0.7.3", "bincode", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "const_format", - "convert_case 0.4.0", - "crossbeam 0.7.3", + "convert_case 0.6.0", + "crossbeam 0.8.2", "crypto-bigint 0.5.3", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum.git?branch=main)", "derivative", @@ -969,8 +969,8 @@ dependencies = [ "rand 0.8.5", "rayon", "serde", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "sha3 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", + "sha3 0.10.6", "smallvec", "unroll", ] @@ -1193,12 +1193,10 @@ dependencies = [ [[package]] name = "circuit_definitions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#1934433af48520ac70dc3080b0d1feba877a07c5" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#43aeb53d7d9c909508a98f9fc140edff0e9d2357" dependencies = [ "crossbeam 0.8.2", "derivative", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2)", "serde", "snark_wrapper", "zk_evm 1.4.0", @@ -1752,7 +1750,7 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#3a21c8dee43c77604350fdf33c1615e25bf1dacd" +source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#dad50e7eb7462a3819af8d5209d6ca243395bf51" dependencies = [ "proc-macro-error", "proc-macro2 1.0.66", @@ -2082,7 +2080,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha3 0.10.8", "thiserror", "uint", ] @@ -2663,7 +2661,7 @@ dependencies = [ "rsa", "serde", "serde_json", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", "thiserror", "time", "tokio", @@ -3532,7 +3530,7 @@ dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", ] [[package]] @@ -3999,7 +3997,7 @@ dependencies = [ "tracing", "vise", "zk_evm 1.3.1", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zksync_contracts", "zksync_eth_signer", "zksync_state", @@ -4681,7 +4679,7 @@ checksum = "745a452f8eb71e39ffd8ee32b3c5f51d03845f99786fa9b68db6ff509c505411" dependencies = [ "once_cell", "pest", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", ] [[package]] @@ -5365,12 +5363,11 @@ dependencies = [ [[package]] name = "rescue_poseidon" version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2#09b96e7e82dadac151d8d681f017cb6a16961801" +source = "git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2#c4a788471710bdb7aa0f59e8756b45ef93cdd2b2" dependencies = [ "addchain", "arrayvec 0.7.3", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "boojum", "byteorder", "derivative", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", @@ -5995,8 +5992,7 @@ dependencies = [ [[package]] name = "sha2" version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +source = "git+https://github.com/RustCrypto/hashes.git?rev=1731ced4a116d61ba9dc6ee6d0f38fb8102e357a#1731ced4a116d61ba9dc6ee6d0f38fb8102e357a" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -6005,8 +6001,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=1731ced4a116d61ba9dc6ee6d0f38fb8102e357a#1731ced4a116d61ba9dc6ee6d0f38fb8102e357a" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -6028,8 +6025,7 @@ dependencies = [ [[package]] name = "sha3" version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +source = "git+https://github.com/RustCrypto/hashes.git?rev=7a187e934c1f6c68e4b4e5cf37541b7a0d64d303#7a187e934c1f6c68e4b4e5cf37541b7a0d64d303" dependencies = [ "digest 0.10.7", "keccak", @@ -6037,8 +6033,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" -source = "git+https://github.com/RustCrypto/hashes.git?rev=7a187e934c1f6c68e4b4e5cf37541b7a0d64d303#7a187e934c1f6c68e4b4e5cf37541b7a0d64d303" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", "keccak", @@ -6268,7 +6265,7 @@ dependencies = [ "serde", "serde_json", "sha-1 0.10.1", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", "smallvec", "sqlformat", "sqlx-rt", @@ -6294,7 +6291,7 @@ dependencies = [ "quote 1.0.33", "serde", "serde_json", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", "sqlx-core", "sqlx-rt", "syn 1.0.109", @@ -6445,7 +6442,7 @@ dependencies = [ [[package]] name = "sync_vm" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#3a21c8dee43c77604350fdf33c1615e25bf1dacd" +source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#dad50e7eb7462a3819af8d5209d6ca243395bf51" dependencies = [ "arrayvec 0.7.3", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3)", @@ -6461,8 +6458,8 @@ dependencies = [ "rand 0.4.6", "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon)", "serde", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "sha3 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", + "sha3 0.10.6", "smallvec", "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", "zkevm_opcode_defs 1.3.2", @@ -7214,7 +7211,7 @@ version = "0.1.0" dependencies = [ "multivm", "once_cell", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zksync_contracts", "zksync_state", "zksync_system_constants", @@ -7685,8 +7682,8 @@ dependencies = [ "num 0.4.0", "serde", "serde_json", - "sha2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1731ced4a116d61ba9dc6ee6d0f38fb8102e357a)", - "sha3 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=7a187e934c1f6c68e4b4e5cf37541b7a0d64d303)", + "sha2 0.10.6", + "sha3 0.10.6", "static_assertions", "zkevm_opcode_defs 1.3.1", ] @@ -7694,7 +7691,7 @@ dependencies = [ [[package]] name = "zk_evm" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1#fe8215a7047d24430ad470cf15a19bedb4d6ba0b" +source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2#fbee20f5bac7d6ca3e22ae69b2077c510a07de4e" dependencies = [ "anyhow", "lazy_static", @@ -7709,7 +7706,7 @@ dependencies = [ [[package]] name = "zk_evm" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#fe8215a7047d24430ad470cf15a19bedb4d6ba0b" +source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3#fbee20f5bac7d6ca3e22ae69b2077c510a07de4e" dependencies = [ "anyhow", "lazy_static", @@ -7724,7 +7721,7 @@ dependencies = [ [[package]] name = "zk_evm" version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.0#e33a5ded1b53e35d261fdb46e6d16f2c900b217f" +source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.0#dd76fc5badf2c05278a21b38015a7798fe2fe358" dependencies = [ "anyhow", "lazy_static", @@ -7759,7 +7756,7 @@ dependencies = [ "nom", "num-bigint 0.4.3", "num-traits", - "sha3 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha3 0.10.8", "smallvec", "structopt", "thiserror", @@ -7794,21 +7791,21 @@ dependencies = [ "bitflags 1.3.2", "ethereum-types 0.14.1", "lazy_static", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.8", ] [[package]] name = "zkevm_opcode_defs" version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#c7ab62f4c60b27dfc690c3ab3efb5fff1ded1a25" +source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#dffacadeccdfdbff4bc124d44c595c4a6eae5013" dependencies = [ "bitflags 2.3.2", - "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", "ethereum-types 0.14.1", "k256", "lazy_static", - "sha2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", - "sha3 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sha2 0.10.6", + "sha3 0.10.6", ] [[package]] @@ -7841,7 +7838,7 @@ dependencies = [ [[package]] name = "zkevm_test_harness" version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#1934433af48520ac70dc3080b0d1feba877a07c5" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#43aeb53d7d9c909508a98f9fc140edff0e9d2357" dependencies = [ "bincode", "circuit_definitions", @@ -7852,11 +7849,9 @@ dependencies = [ "hex", "rand 0.4.6", "rayon", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2)", "serde", "serde_json", "smallvec", - "snark_wrapper", "structopt", "test-log", "tracing", @@ -8199,6 +8194,7 @@ dependencies = [ "tempfile", "thiserror", "tracing", + "tracing-subscriber", "vise", "zksync_crypto", "zksync_storage", @@ -8364,7 +8360,7 @@ dependencies = [ "strum", "thiserror", "tokio", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zkevm_test_harness 1.3.3", "zksync_basic_types", "zksync_contracts", @@ -8391,7 +8387,7 @@ dependencies = [ "tokio", "tracing", "vlog", - "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc2)", "zksync_basic_types", ] diff --git a/README.md b/README.md index 3edcdaa2df83..7eeba740c6b7 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ The following questions will be answered by the following resources: ## Policies -- [Security policy](.github/SECURITY.md) +- [Security policy](SECURITY.md) - [Contribution policy](CONTRIBUTING.md) ## License diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 9fcaf037055c..65ce8d073cef 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -191,6 +191,11 @@ pub struct OptionalENConfig { #[serde(default = "OptionalENConfig::default_merkle_tree_block_cache_size_mb")] merkle_tree_block_cache_size_mb: usize, + /// Byte capacity of memtables (recent, non-persisted changes to RocksDB). Setting this to a reasonably + /// large value (order of 512 MiB) is helpful for large DBs that experience write stalls. + #[serde(default = "OptionalENConfig::default_merkle_tree_memtable_capacity_mb")] + merkle_tree_memtable_capacity_mb: usize, + // Other config settings /// Port on which the Prometheus exporter server is listening. pub prometheus_port: Option, @@ -274,6 +279,10 @@ impl OptionalENConfig { 128 } + const fn default_merkle_tree_memtable_capacity_mb() -> usize { + 256 + } + const fn default_fee_history_limit() -> u64 { 1_024 } @@ -318,6 +327,11 @@ impl OptionalENConfig { self.merkle_tree_block_cache_size_mb * BYTES_IN_MEGABYTE } + /// Returns the memtable capacity for Merkle tree in bytes. + pub fn merkle_tree_memtable_capacity(&self) -> usize { + self.merkle_tree_memtable_capacity_mb * BYTES_IN_MEGABYTE + } + pub fn api_namespaces(&self) -> Vec { self.api_namespaces .clone() diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 7f59f856ae9d..a3a0b855fb21 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -21,13 +21,11 @@ use zksync_core::{ }, reorg_detector::ReorgDetector, setup_sigint_handler, - state_keeper::{ - L1BatchExecutorBuilder, MainBatchExecutorBuilder, SealManager, ZkSyncStateKeeper, - }, + state_keeper::{L1BatchExecutorBuilder, MainBatchExecutorBuilder, ZkSyncStateKeeper}, sync_layer::{ batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, - fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, ActionQueue, - ExternalNodeSealer, SyncState, + fetcher::MainNodeFetcherCursor, genesis::perform_genesis_if_needed, ActionQueue, + MainNodeClient, SyncState, }, }; use zksync_dal::{connection::DbVariant, healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; @@ -52,14 +50,6 @@ async fn build_state_keeper( stop_receiver: watch::Receiver, chain_id: L2ChainId, ) -> ZkSyncStateKeeper { - let en_sealer = ExternalNodeSealer::new(action_queue.clone()); - let main_node_url = config.required.main_node_url().unwrap(); - let sealer = SealManager::custom( - None, - vec![en_sealer.clone().into_unconditional_batch_seal_criterion()], - vec![en_sealer.into_miniblock_seal_criterion()], - ); - // These config values are used on the main node, and depending on these values certain transactions can // be *rejected* (that is, not included into the block). However, external node only mirrors what the main // node has already executed, so we can safely set these values to the maximum possible values - if the main @@ -79,22 +69,22 @@ async fn build_state_keeper( config.optional.enum_index_migration_chunk_size, )); - let io = Box::new( - ExternalIO::new( - connection_pool, - action_queue, - sync_state, - main_node_url, - l2_erc20_bridge_addr, - validation_computational_gas_limit, - chain_id, - ) - .await, - ); - + let main_node_url = config.required.main_node_url().unwrap(); + let main_node_client = ::json_rpc(&main_node_url) + .expect("Failed creating JSON-RPC client for main node"); + let io = ExternalIO::new( + connection_pool, + action_queue, + sync_state, + Box::new(main_node_client), + l2_erc20_bridge_addr, + validation_computational_gas_limit, + chain_id, + ) + .await; io.recalculate_miniblock_hashes().await; - ZkSyncStateKeeper::new(stop_receiver, io, batch_executor_base, sealer) + ZkSyncStateKeeper::without_sealer(stop_receiver, Box::new(io), batch_executor_base) } async fn init_tasks( @@ -109,15 +99,15 @@ async fn init_tasks( .required .main_node_url() .expect("Main node URL is incorrect"); - let (stop_sender, stop_receiver) = watch::channel::(false); + let (stop_sender, stop_receiver) = watch::channel(false); let mut healthchecks: Vec> = Vec::new(); // Create components. let gas_adjuster = Arc::new(MainNodeGasPriceFetcher::new(&main_node_url)); let sync_state = SyncState::new(); - let action_queue = ActionQueue::new(); + let (action_queue_sender, action_queue) = ActionQueue::new(); let state_keeper = build_state_keeper( - action_queue.clone(), + action_queue, config.required.state_cache_path.clone(), &config, connection_pool.clone(), @@ -128,18 +118,25 @@ async fn init_tasks( ) .await; + let main_node_client = ::json_rpc(&main_node_url) + .context("Failed creating JSON-RPC client for main node")?; let singleton_pool_builder = ConnectionPool::singleton(DbVariant::Master); - let fetcher = MainNodeFetcher::new( - singleton_pool_builder + let fetcher_cursor = { + let pool = singleton_pool_builder .build() .await - .context("failed to build a connection pool for MainNodeFetcher")?, - &main_node_url, - action_queue.clone(), + .context("failed to build a connection pool for `MainNodeFetcher`")?; + let mut storage = pool.access_storage_tagged("sync_layer").await?; + MainNodeFetcherCursor::new(&mut storage) + .await + .context("failed to load `MainNodeFetcher` cursor from Postgres")? + }; + let fetcher = fetcher_cursor.into_fetcher( + Box::new(main_node_client), + action_queue_sender, sync_state.clone(), stop_receiver.clone(), - ) - .await; + ); let metadata_calculator = MetadataCalculator::new(&MetadataCalculatorConfig { db_path: &config.required.merkle_tree_path, @@ -148,6 +145,7 @@ async fn init_tasks( max_l1_batches_per_iter: config.optional.max_l1_batches_per_tree_iter, multi_get_chunk_size: config.optional.merkle_tree_multi_get_chunk_size, block_cache_capacity: config.optional.merkle_tree_block_cache_size(), + memtable_capacity: config.optional.merkle_tree_memtable_capacity(), }) .await; healthchecks.push(Box::new(metadata_calculator.tree_health_check())); @@ -236,7 +234,7 @@ async fn init_tasks( (tx_sender, vm_barrier, cache_update_handle) }; - let (http_api_handle, http_api_healthcheck) = + let http_server_handles = ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool.clone()) .http(config.required.http_port) .with_filter_limit(config.optional.filters_limit) @@ -247,9 +245,10 @@ async fn init_tasks( .with_sync_state(sync_state.clone()) .enable_api_namespaces(config.optional.api_namespaces()) .build(stop_receiver.clone()) - .await; + .await + .context("Failed initializing HTTP JSON-RPC server")?; - let (mut task_handles, ws_api_healthcheck) = + let ws_server_handles = ApiBuilder::jsonrpc_backend(config.clone().into(), connection_pool.clone()) .ws(config.required.ws_port) .with_filter_limit(config.optional.filters_limit) @@ -262,21 +261,24 @@ async fn init_tasks( .with_sync_state(sync_state) .enable_api_namespaces(config.optional.api_namespaces()) .build(stop_receiver.clone()) - .await; + .await + .context("Failed initializing WS JSON-RPC server")?; - healthchecks.push(Box::new(ws_api_healthcheck)); - healthchecks.push(Box::new(http_api_healthcheck)); + healthchecks.push(Box::new(ws_server_handles.health_check)); + healthchecks.push(Box::new(http_server_handles.health_check)); healthchecks.push(Box::new(ConnectionPoolHealthCheck::new(connection_pool))); let healthcheck_handle = HealthCheckHandle::spawn_server( ([0, 0, 0, 0], config.required.healthcheck_port).into(), healthchecks, ); + + let mut task_handles = vec![]; if let Some(port) = config.optional.prometheus_port { let prometheus_task = PrometheusExporterConfig::pull(port).run(stop_receiver.clone()); task_handles.push(tokio::spawn(prometheus_task)); } - - task_handles.extend(http_api_handle); + task_handles.extend(http_server_handles.tasks); + task_handles.extend(ws_server_handles.tasks); task_handles.extend(cache_update_handle); task_handles.extend([ sk_handle, @@ -389,10 +391,12 @@ async fn main() -> anyhow::Result<()> { tracing::info!("Main node URL is: {}", main_node_url); // Make sure that genesis is performed. + let main_node_client = ::json_rpc(&main_node_url) + .context("Failed creating JSON-RPC client for main node")?; perform_genesis_if_needed( &mut connection_pool.access_storage().await.unwrap(), config.remote.l2_chain_id, - main_node_url.clone(), + &main_node_client, ) .await .context("Performing genesis failed")?; diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index ddc26f7bf35c..21559ba2adb2 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -34,6 +34,8 @@ pub struct ContractsConfig { pub fri_recursion_scheduler_level_vk_hash: H256, pub fri_recursion_node_level_vk_hash: H256, pub fri_recursion_leaf_level_vk_hash: H256, + pub governance_addr: Option
, + pub snark_wrapper_vk_hash: H256, } impl ContractsConfig { @@ -93,6 +95,10 @@ mod tests { fri_recursion_leaf_level_vk_hash: hash( "0x72167c43a46cf38875b267d67716edc4563861364a3c03ab7aee73498421e828", ), + governance_addr: None, + snark_wrapper_vk_hash: hash( + "0x4be443afd605a782b6e56d199df2460a025c81b3dea144e135bece83612563f2", + ), } } @@ -126,7 +132,7 @@ CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_FRI_RECURSION_SCHEDULER_LEVEL_VK_HASH="0x201d4c7d8e781d51a3bbd451a43a8f45240bb765b565ae6ce69192d918c3563d" CONTRACTS_FRI_RECURSION_NODE_LEVEL_VK_HASH="0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080" CONTRACTS_FRI_RECURSION_LEAF_LEVEL_VK_HASH="0x72167c43a46cf38875b267d67716edc4563861364a3c03ab7aee73498421e828" - +CONTRACTS_SNARK_WRAPPER_VK_HASH="0x4be443afd605a782b6e56d199df2460a025c81b3dea144e135bece83612563f2" "#; lock.set_env(config); diff --git a/core/lib/config/src/configs/database.rs b/core/lib/config/src/configs/database.rs index de0a59545dc7..142d862b52f3 100644 --- a/core/lib/config/src/configs/database.rs +++ b/core/lib/config/src/configs/database.rs @@ -38,6 +38,10 @@ pub struct MerkleTreeConfig { /// The default value is 128 MB. #[serde(default = "MerkleTreeConfig::default_block_cache_size_mb")] pub block_cache_size_mb: usize, + /// Byte capacity of memtables (recent, non-persisted changes to RocksDB). Setting this to a reasonably + /// large value (order of 512 MiB) is helpful for large DBs that experience write stalls. + #[serde(default = "MerkleTreeConfig::default_memtable_capacity_mb")] + pub memtable_capacity_mb: usize, /// Maximum number of L1 batches to be processed by the Merkle tree at a time. #[serde(default = "MerkleTreeConfig::default_max_l1_batches_per_iter")] pub max_l1_batches_per_iter: usize, @@ -51,6 +55,7 @@ impl Default for MerkleTreeConfig { mode: MerkleTreeMode::default(), multi_get_chunk_size: Self::default_multi_get_chunk_size(), block_cache_size_mb: Self::default_block_cache_size_mb(), + memtable_capacity_mb: Self::default_memtable_capacity_mb(), max_l1_batches_per_iter: Self::default_max_l1_batches_per_iter(), } } @@ -73,6 +78,10 @@ impl MerkleTreeConfig { 128 } + const fn default_memtable_capacity_mb() -> usize { + 256 + } + const fn default_max_l1_batches_per_iter() -> usize { 20 } @@ -81,6 +90,11 @@ impl MerkleTreeConfig { pub fn block_cache_size(&self) -> usize { self.block_cache_size_mb * super::BYTES_IN_MEGABYTE } + + /// Returns the memtable capacity in bytes. + pub fn memtable_capacity(&self) -> usize { + self.memtable_capacity_mb * super::BYTES_IN_MEGABYTE + } } /// Database configuration. diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 1fb12da55312..cdc5b8b0e6aa 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -24,6 +24,8 @@ pub enum ContractLanguage { Yul, } +const GOVERNANCE_CONTRACT_FILE: &str = + "contracts/ethereum/artifacts/cache/solpp-generated-contracts/governance/IGovernance.sol/IGovernance.json"; const ZKSYNC_CONTRACT_FILE: &str = "contracts/ethereum/artifacts/cache/solpp-generated-contracts/zksync/interfaces/IZkSync.sol/IZkSync.json"; const MULTICALL3_CONTRACT_FILE: &str = @@ -50,9 +52,19 @@ fn read_file_to_json_value(path: impl AsRef) -> serde_json::Value { .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)) } +pub fn load_contract_if_present + std::fmt::Debug>(path: P) -> Option { + let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); + let path = Path::new(&zksync_home).join(path); + path.exists().then(|| { + serde_json::from_value(read_file_to_json_value(&path)["abi"].take()) + .unwrap_or_else(|e| panic!("Failed to parse contract abi from file {:?}: {}", path, e)) + }) +} + pub fn load_contract + std::fmt::Debug>(path: P) -> Contract { - serde_json::from_value(read_file_to_json_value(&path)["abi"].take()) - .unwrap_or_else(|e| panic!("Failed to parse contract abi from file {:?}: {}", path, e)) + load_contract_if_present(&path).unwrap_or_else(|| { + panic!("Failed to load contract from {:?}", path); + }) } pub fn load_sys_contract(contract_name: &str) -> Contract { @@ -69,6 +81,10 @@ pub fn read_contract_abi(path: impl AsRef) -> String { .to_string() } +pub fn governance_contract() -> Option { + load_contract_if_present(GOVERNANCE_CONTRACT_FILE) +} + pub fn zksync_contract() -> Contract { load_contract(ZKSYNC_CONTRACT_FILE) } diff --git a/core/lib/dal/migrations/20231019125310_storage-refunds.down.sql b/core/lib/dal/migrations/20231019125310_storage-refunds.down.sql new file mode 100644 index 000000000000..96c849f3db45 --- /dev/null +++ b/core/lib/dal/migrations/20231019125310_storage-refunds.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + DROP COLUMN IF EXISTS storage_refunds; diff --git a/core/lib/dal/migrations/20231019125310_storage-refunds.up.sql b/core/lib/dal/migrations/20231019125310_storage-refunds.up.sql new file mode 100644 index 000000000000..569c9c777470 --- /dev/null +++ b/core/lib/dal/migrations/20231019125310_storage-refunds.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + ADD COLUMN IF NOT EXISTS storage_refunds BIGINT[]; diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 9e67a3740029..6b47021e2f01 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -2712,38 +2712,6 @@ }, "query": "INSERT INTO eth_txs_history\n (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at)\n VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3)\n RETURNING id" }, - "393345441797999e9f11b8b5ddce0b64356e1e167056d7f76ef6dfffd3534607": { - "describe": { - "columns": [ - { - "name": "name!", - "ordinal": 0, - "type_info": "Varchar" - }, - { - "name": "symbol!", - "ordinal": 1, - "type_info": "Varchar" - }, - { - "name": "decimals!", - "ordinal": 2, - "type_info": "Int4" - } - ], - "nullable": [ - null, - null, - null - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "\n SELECT\n COALESCE(token_list_name, name) as \"name!\",\n COALESCE(token_list_symbol, symbol) as \"symbol!\",\n COALESCE(token_list_decimals, decimals) as \"decimals!\"\n FROM tokens WHERE l2_address = $1\n " - }, "394bbd64939d47fda4e1545e2752b208901e872b7234a5c3af456bdf429a6074": { "describe": { "columns": [ @@ -3003,20 +2971,6 @@ }, "query": "DELETE FROM events WHERE miniblock_number > $1" }, - "3de5668eca2211f9701304e374100d45b359b1f7832d4a30b325fa679012c3e7": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Numeric", - "Timestamp" - ] - } - }, - "query": "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1" - }, "3f6332706376ef4cadda96498872429b6ed28eca5402b03b1aa3b77b8262bccd": { "describe": { "columns": [], @@ -5446,38 +5400,6 @@ }, "query": "\n SELECT l1_batch_number, basic_circuits_blob_url, basic_circuits_inputs_blob_url FROM leaf_aggregation_witness_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND basic_circuits_blob_url is NOT NULL\n AND basic_circuits_inputs_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "73f0e672ff1a5e144b3034beb18271f1164e95029998d6750c6a8953f7344db5": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "Bool", - "Bytea", - "ByteaArray", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Numeric", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4" - ] - } - }, - "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, now(), now())" - }, "741b13b0a4769a30186c650a4a1b24855806a27ccd8d5a50594741842dde44ec": { "describe": { "columns": [ @@ -6422,6 +6344,39 @@ }, "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful' AND is_blob_cleaned=FALSE\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, + "892ad2bed255401e020b4cf89c9e43e32c333dc6627e1e2d2535e13b73d1c508": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "status", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "attempts", + "ordinal": 2, + "type_info": "Int2" + } + ], + "nullable": [ + false, + false, + false + ], + "parameters": { + "Left": [ + "Interval", + "Int2" + ] + } + }, + "query": "\n UPDATE prover_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " + }, "8996a1794585dfe0f9c16a11e113831a63d5d944bc8061d7caa25ea33f12b19d": { "describe": { "columns": [ @@ -7691,29 +7646,6 @@ }, "query": "SELECT l1_batch_number FROM initial_writes WHERE hashed_key = $1" }, - "a9b7a880dbde4f7de5a6c2ff4009281527f2d01a547228981af3af2129ffb3f7": { - "describe": { - "columns": [ - { - "name": "count!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [ - "Bytea", - "Numeric", - "Interval", - "Interval" - ] - } - }, - "query": "\n SELECT COUNT(*) as \"count!\" FROM tokens\n WHERE l2_address = $1 AND\n market_volume > $2 AND now() - market_volume_updated_at < $3 AND\n usd_price > 0 AND now() - usd_price_updated_at < $4\n " - }, "a9d96d6774af2637173d471f02995652cd4c131c05fdcb3d0e1644bcd1aa1809": { "describe": { "columns": [ @@ -7985,32 +7917,6 @@ }, "query": "\n UPDATE scheduler_witness_jobs\n SET aggregation_result_coords = $1,\n updated_at = now()\n WHERE l1_batch_number = $2\n " }, - "adc9ad2c944f9dacc28b5bd133aa37d9e8ea99eca1c5dfbeef37cda4b793f434": { - "describe": { - "columns": [ - { - "name": "market_volume", - "ordinal": 0, - "type_info": "Numeric" - }, - { - "name": "market_volume_updated_at", - "ordinal": 1, - "type_info": "Timestamp" - } - ], - "nullable": [ - true, - true - ], - "parameters": { - "Left": [ - "Bytea" - ] - } - }, - "query": "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1" - }, "ae072f51b65d0b5212264be9a34027922e5aedef7e4741517ad8104bf5aa79e9": { "describe": { "columns": [], @@ -9184,6 +9090,26 @@ }, "query": "\n INSERT INTO call_traces (tx_hash, call_trace)\n SELECT u.tx_hash, u.call_trace\n FROM UNNEST($1::bytea[], $2::bytea[])\n AS u(tx_hash, call_trace)\n " }, + "c3724d96ed4e1c31dd575b911b254ed5a4af4d5b6ad1243c812b37ebde0f6090": { + "describe": { + "columns": [ + { + "name": "storage_refunds", + "ordinal": 0, + "type_info": "Int8Array" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT storage_refunds FROM l1_batches WHERE number = $1" + }, "c49a6925e9462cc85a6e1cc850f2e147e0a5d990efed56f27792698e6cf9ff0c": { "describe": { "columns": [ @@ -10056,6 +9982,39 @@ }, "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " }, + "dce6cfa8b3f4e3c93864d95a0c746a4df31a601cc22e59eb2c2fd747ecbb7c8a": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "Int8", + "Bool", + "Bytea", + "ByteaArray", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Numeric", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Int4", + "Int8Array" + ] + } + }, + "query": "INSERT INTO l1_batches (number, l1_tx_count, l2_tx_count, timestamp, is_finished, fee_account_address, l2_to_l1_logs, l2_to_l1_messages, bloom, priority_ops_onchain_data, predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, storage_refunds, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, now(), now())" + }, "dd330bc075a163974c59ec55ecfddd769d05801963b3e0e840e7f11e7bc6d3e9": { "describe": { "columns": [ @@ -10301,39 +10260,6 @@ }, "query": "SELECT timestamp, virtual_blocks FROM miniblocks WHERE number BETWEEN $1 AND $2 ORDER BY number" }, - "e1ad7a51afef6bd7a95df3294f64b7b1bdc4c4fc7ae5c4195802177986f3e876": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "status", - "ordinal": 1, - "type_info": "Text" - }, - { - "name": "attempts", - "ordinal": 2, - "type_info": "Int2" - } - ], - "nullable": [ - false, - false, - false - ], - "parameters": { - "Left": [ - "Interval", - "Int2" - ] - } - }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now()\n WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2)\n OR (status = 'failed' AND attempts < $2)\n RETURNING id, status, attempts\n " - }, "e29d263f33257a37f391907b7ff588f416a0350b606f16f4779fa1d3bf4be08b": { "describe": { "columns": [ diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 2013eeaa4df4..7fa30c9e6ba7 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -241,6 +241,30 @@ impl BlocksDal<'_, '_> { Ok(Some(heap)) } + pub async fn get_storage_refunds( + &mut self, + number: L1BatchNumber, + ) -> anyhow::Result>> { + let Some(row) = sqlx::query!( + "SELECT storage_refunds FROM l1_batches WHERE number = $1", + number.0 as i64 + ) + .instrument("get_storage_refunds") + .report_latency() + .with_arg("number", &number) + .fetch_optional(self.storage.conn()) + .await? + else { + return Ok(None); + }; + let Some(storage_refunds) = row.storage_refunds else { + return Ok(None); + }; + + let storage_refunds: Vec<_> = storage_refunds.into_iter().map(|n| n as u32).collect(); + Ok(Some(storage_refunds)) + } + pub async fn get_events_queue( &mut self, number: L1BatchNumber, @@ -316,6 +340,7 @@ impl BlocksDal<'_, '_> { initial_bootloader_contents: &[(usize, U256)], predicted_block_gas: BlockGasCount, events_queue: &[LogQuery], + storage_refunds: &[u32], ) -> anyhow::Result<()> { let priority_onchain_data: Vec> = header .priority_ops_onchain_data @@ -338,6 +363,7 @@ impl BlocksDal<'_, '_> { .expect("failed to serialize used_contract_hashes to JSON value"); let base_fee_per_gas = BigDecimal::from_u64(header.base_fee_per_gas) .context("block.base_fee_per_gas should fit in u64")?; + let storage_refunds: Vec<_> = storage_refunds.iter().map(|n| *n as i64).collect(); let mut transaction = self.storage.start_transaction().await?; sqlx::query!( @@ -347,9 +373,9 @@ impl BlocksDal<'_, '_> { bloom, priority_ops_onchain_data, \ predicted_commit_gas_cost, predicted_prove_gas_cost, predicted_execute_gas_cost, \ initial_bootloader_heap_content, used_contract_hashes, base_fee_per_gas, \ - l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, \ + l1_gas_price, l2_fair_gas_price, bootloader_code_hash, default_aa_code_hash, protocol_version, storage_refunds, \ created_at, updated_at \ - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, now(), now())", + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, now(), now())", header.number.0 as i64, header.l1_tx_count as i32, header.l2_tx_count as i32, @@ -377,6 +403,7 @@ impl BlocksDal<'_, '_> { .default_aa .as_bytes(), header.protocol_version.map(|v| v as i32), + &storage_refunds, ) .execute(transaction.conn()) .await?; @@ -1471,7 +1498,7 @@ mod tests { header.l2_to_l1_messages.push(vec![33; 33]); conn.blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[]) + .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[]) .await .unwrap(); @@ -1519,7 +1546,7 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[]) + .insert_l1_batch(&header, &[], predicted_gas, &[], &[]) .await .unwrap(); @@ -1527,7 +1554,7 @@ mod tests { header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[]) + .insert_l1_batch(&header, &[], predicted_gas, &[], &[]) .await .unwrap(); diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index 4cdad1c304bb..af1c218195cf 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -213,6 +213,7 @@ impl FriProverDal<'_, '_> { UPDATE prover_jobs_fri SET status = 'queued', attempts = attempts + 1, updated_at = now(), processing_started_at = now() WHERE (status = 'in_progress' AND processing_started_at <= now() - $1::interval AND attempts < $2) + OR (status = 'in_gpu_proof' AND processing_started_at <= now() - $1::interval AND attempts < $2) OR (status = 'failed' AND attempts < $2) RETURNING id, status, attempts ", diff --git a/core/lib/dal/src/models/storage_token.rs b/core/lib/dal/src/models/storage_token.rs index 1c1e3539f066..1cc42405fe2f 100644 --- a/core/lib/dal/src/models/storage_token.rs +++ b/core/lib/dal/src/models/storage_token.rs @@ -3,26 +3,9 @@ use sqlx::types::{ BigDecimal, }; -use zksync_types::tokens::{TokenMarketVolume, TokenMetadata, TokenPrice}; +use zksync_types::tokens::TokenPrice; use zksync_utils::big_decimal_to_ratio; -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageTokenMetadata { - pub name: String, - pub symbol: String, - pub decimals: i32, -} - -impl From for TokenMetadata { - fn from(metadata: StorageTokenMetadata) -> TokenMetadata { - TokenMetadata { - name: metadata.name, - symbol: metadata.symbol, - decimals: metadata.decimals as u8, - } - } -} - #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageTokenPrice { pub usd_price: Option, @@ -48,26 +31,3 @@ impl From for Option { } } } - -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct StorageTokenMarketVolume { - pub market_volume: Option, - pub market_volume_updated_at: Option, -} - -impl From for Option { - fn from(market_volume: StorageTokenMarketVolume) -> Option { - market_volume - .market_volume - .as_ref() - .map(|volume| TokenMarketVolume { - market_volume: big_decimal_to_ratio(volume).unwrap(), - last_updated: DateTime::::from_naive_utc_and_offset( - market_volume - .market_volume_updated_at - .expect("If `market_volume` is Some then `updated_at` must be Some"), - Utc, - ), - }) - } -} diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index c41ad2b668d5..286cd6c2e8a8 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -548,7 +548,7 @@ mod tests { ); header.is_finished = true; conn.blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[]) + .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[]) .await .unwrap(); conn.blocks_dal() diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 78d1f934b80e..4a9e1ed99799 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -290,7 +290,7 @@ async fn test_duplicate_insert_prover_jobs(connection_pool: ConnectionPool) { ); storage .blocks_dal() - .insert_l1_batch(&header, &[], Default::default(), &[]) + .insert_l1_batch(&header, &[], Default::default(), &[], &[]) .await .unwrap(); @@ -352,7 +352,7 @@ async fn test_requeue_prover_jobs(connection_pool: ConnectionPool) { ); storage .blocks_dal() - .insert_l1_batch(&header, &[], Default::default(), &[]) + .insert_l1_batch(&header, &[], Default::default(), &[], &[]) .await .unwrap(); @@ -415,7 +415,7 @@ async fn test_move_leaf_aggregation_jobs_from_waiting_to_queued(connection_pool: ); storage .blocks_dal() - .insert_l1_batch(&header, &[], Default::default(), &[]) + .insert_l1_batch(&header, &[], Default::default(), &[], &[]) .await .unwrap(); @@ -495,7 +495,7 @@ async fn test_move_node_aggregation_jobs_from_waiting_to_queued(connection_pool: ); storage .blocks_dal() - .insert_l1_batch(&header, &[], Default::default(), &[]) + .insert_l1_batch(&header, &[], Default::default(), &[], &[]) .await .unwrap(); @@ -582,7 +582,7 @@ async fn test_move_scheduler_jobs_from_waiting_to_queued(connection_pool: Connec ); storage .blocks_dal() - .insert_l1_batch(&header, &[], Default::default(), &[]) + .insert_l1_batch(&header, &[], Default::default(), &[], &[]) .await .unwrap(); diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index 8b32b7140733..f7b64aed69ea 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -1,9 +1,8 @@ -use crate::models::storage_token::StorageTokenMarketVolume; use crate::StorageProcessor; use num::{rational::Ratio, BigUint}; use sqlx::types::chrono::Utc; use zksync_types::{ - tokens::{TokenInfo, TokenMarketVolume, TokenMetadata, TokenPrice}, + tokens::{TokenInfo, TokenMetadata, TokenPrice}, Address, MiniblockNumber, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, }; @@ -158,41 +157,6 @@ impl TokensDal<'_, '_> { } } - pub async fn set_l1_token_market_volume( - &mut self, - l1_address: &Address, - market_volume: TokenMarketVolume, - ) { - { - sqlx::query!( - "UPDATE tokens SET market_volume = $2, market_volume_updated_at = $3, updated_at = now() WHERE l1_address = $1", - l1_address.as_bytes(), - ratio_to_big_decimal(&market_volume.market_volume, STORED_USD_PRICE_PRECISION), - market_volume.last_updated.naive_utc(), - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - } - - pub async fn get_token_market_volume( - &mut self, - l2_address: &Address, - ) -> Option { - { - let storage_market_volume = sqlx::query_as!( - StorageTokenMarketVolume, - "SELECT market_volume, market_volume_updated_at FROM tokens WHERE l2_address = $1", - l2_address.as_bytes(), - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap(); - storage_market_volume.and_then(Into::into) - } - } - pub async fn rollback_tokens(&mut self, block_number: MiniblockNumber) { { sqlx::query!( diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index 58e9a7ec9a66..aa3674b6c3d8 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -1,16 +1,10 @@ -use crate::models::storage_token::{StorageTokenMetadata, StorageTokenPrice}; +use crate::models::storage_token::StorageTokenPrice; use crate::SqlxError; use crate::StorageProcessor; -use num::{rational::Ratio, BigUint}; -use sqlx::postgres::types::PgInterval; use zksync_types::{ tokens::{TokenInfo, TokenMetadata, TokenPrice}, Address, }; -use zksync_utils::ratio_to_big_decimal; - -// Precision of the USD price per token -pub(crate) const STORED_USD_PRICE_PRECISION: usize = 6; #[derive(Debug)] pub struct TokensWeb3Dal<'a, 'c> { @@ -43,45 +37,6 @@ impl TokensWeb3Dal<'_, '_> { } } - pub async fn is_token_actively_trading( - &mut self, - l2_token: &Address, - min_volume: &Ratio, - max_acceptable_volume_age_in_secs: u32, - max_acceptable_price_age_in_secs: u32, - ) -> Result { - { - let min_volume = ratio_to_big_decimal(min_volume, STORED_USD_PRICE_PRECISION); - let volume_pg_interval = PgInterval { - months: 0, - days: 0, - microseconds: (max_acceptable_volume_age_in_secs as i64) * 1000000, - }; - let price_pg_interval = PgInterval { - months: 0, - days: 0, - microseconds: (max_acceptable_price_age_in_secs as i64) * 1000000, - }; - let count = sqlx::query!( - r#" - SELECT COUNT(*) as "count!" FROM tokens - WHERE l2_address = $1 AND - market_volume > $2 AND now() - market_volume_updated_at < $3 AND - usd_price > 0 AND now() - usd_price_updated_at < $4 - "#, - l2_token.as_bytes(), - min_volume, - volume_pg_interval, - price_pg_interval - ) - .fetch_one(self.storage.conn()) - .await - .unwrap() - .count; - Ok(count == 1) - } - } - pub async fn get_token_price( &mut self, l2_address: &Address, @@ -98,27 +53,4 @@ impl TokensWeb3Dal<'_, '_> { Ok(storage_price.and_then(Into::into)) } } - - pub async fn get_token_metadata( - &mut self, - l2_address: &Address, - ) -> Result, SqlxError> { - { - let storage_token_metadata = sqlx::query_as!( - StorageTokenMetadata, - r#" - SELECT - COALESCE(token_list_name, name) as "name!", - COALESCE(token_list_symbol, symbol) as "symbol!", - COALESCE(token_list_decimals, decimals) as "decimals!" - FROM tokens WHERE l2_address = $1 - "#, - l2_address.as_bytes(), - ) - .fetch_optional(self.storage.conn()) - .await?; - - Ok(storage_token_metadata.map(Into::into)) - } - } } diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index aa91b57b7572..5fa35ced5e87 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1,20 +1,20 @@ use bigdecimal::BigDecimal; -use std::collections::HashMap; -use std::fmt::{self, Debug}; -use std::iter::FromIterator; -use std::time::Duration; - use itertools::Itertools; -use sqlx::error; -use sqlx::types::chrono::NaiveDateTime; +use sqlx::{error, types::chrono::NaiveDateTime}; + +use std::{collections::HashMap, fmt, time::Duration}; -use zksync_types::tx::tx_execution_info::TxExecutionStatus; -use zksync_types::vm_trace::Call; use zksync_types::{ - block::MiniblockReexecuteData, fee::TransactionExecutionMetrics, get_nonce_key, l1::L1Tx, - l2::L2Tx, protocol_version::ProtocolUpgradeTx, tx::TransactionExecutionResult, - vm_trace::VmExecutionTrace, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, - MiniblockNumber, Nonce, PriorityOpId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + block::MiniblockReexecuteData, + fee::TransactionExecutionMetrics, + get_nonce_key, + l1::L1Tx, + l2::L2Tx, + protocol_version::ProtocolUpgradeTx, + tx::{tx_execution_info::TxExecutionStatus, TransactionExecutionResult}, + vm_trace::{Call, VmExecutionTrace}, + Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, MiniblockNumber, Nonce, + PriorityOpId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::{h256_to_u32, u256_to_big_decimal}; @@ -35,8 +35,14 @@ pub enum L2TxSubmissionResult { } impl fmt::Display for L2TxSubmissionResult { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str(match self { + Self::Added => "added", + Self::Replaced => "replaced", + Self::AlreadyExecuted => "already_executed", + Self::Duplicate => "duplicate", + Self::Proxied => "proxied", + }) } } @@ -378,11 +384,8 @@ impl TransactionsDal<'_, '_> { transactions: &[TransactionExecutionResult], ) { { - let hashes: Vec> = transactions - .iter() - .map(|tx| tx.hash.as_bytes().to_vec()) - .collect(); - let l1_batch_tx_indexes = Vec::from_iter(0..transactions.len() as i32); + let hashes: Vec<_> = transactions.iter().map(|tx| tx.hash.as_bytes()).collect(); + let l1_batch_tx_indexes: Vec<_> = (0..transactions.len() as i32).collect(); sqlx::query!( " UPDATE transactions @@ -398,7 +401,7 @@ impl TransactionsDal<'_, '_> { WHERE transactions.hash=data_table.hash ", &l1_batch_tx_indexes, - &hashes, + &hashes as &[&[u8]], block_number.0 as i64 ) .execute(self.storage.conn()) diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index 1204bdf6c945..b03fdeb697bd 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -32,3 +32,4 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" serde_with = { version = "1", features = ["hex"] } tempfile = "3.0.2" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/core/lib/merkle_tree/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs index 75971fd26fbc..b598a579f6b4 100644 --- a/core/lib/merkle_tree/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -6,6 +6,7 @@ use clap::Parser; use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; use tempfile::TempDir; +use tracing_subscriber::EnvFilter; use std::{ thread, @@ -16,7 +17,7 @@ use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeInstruction, }; -use zksync_storage::RocksDB; +use zksync_storage::{RocksDB, RocksDBOptions}; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; mod batch; @@ -66,8 +67,16 @@ struct Cli { } impl Cli { + fn init_logging() { + tracing_subscriber::fmt() + .pretty() + .with_env_filter(EnvFilter::from_default_env()) + .init(); + } + fn run(self) { - println!("Launched with options: {self:?}"); + Self::init_logging(); + tracing::info!("Launched with options: {self:?}"); let (mut mock_db, mut rocksdb); let mut _temp_dir = None; @@ -77,16 +86,19 @@ impl Cli { &mut mock_db } else { let dir = TempDir::new().expect("failed creating temp dir for RocksDB"); - println!( + tracing::info!( "Created temp dir for RocksDB: {}", dir.path().to_string_lossy() ); - rocksdb = if let Some(block_cache_capacity) = self.block_cache { - let db = RocksDB::with_cache(dir.path(), Some(block_cache_capacity)); - RocksDBWrapper::from(db) - } else { - RocksDBWrapper::new(dir.path()) - }; + let db = RocksDB::with_options( + dir.path(), + RocksDBOptions { + block_cache_capacity: self.block_cache, + ..RocksDBOptions::default() + }, + ); + rocksdb = RocksDBWrapper::from(db); + if let Some(chunk_size) = self.chunk_size { rocksdb.set_multi_get_chunk_size(chunk_size); } @@ -127,7 +139,7 @@ impl Cli { let updated_keys = Self::generate_keys(updated_indices.into_iter()); let kvs = new_keys.into_iter().chain(updated_keys).zip(values); - println!("Processing block #{version}"); + tracing::info!("Processing block #{version}"); let start = Instant::now(); let root_hash = if self.proofs { let reads = Self::generate_keys(read_indices.into_iter()) @@ -143,15 +155,15 @@ impl Cli { output.root_hash }; let elapsed = start.elapsed(); - println!("Processed block #{version} in {elapsed:?}, root hash = {root_hash:?}"); + tracing::info!("Processed block #{version} in {elapsed:?}, root hash = {root_hash:?}"); } - println!("Verifying tree consistency..."); + tracing::info!("Verifying tree consistency..."); let start = Instant::now(); tree.verify_consistency(self.commit_count - 1) .expect("tree consistency check failed"); let elapsed = start.elapsed(); - println!("Verified tree consistency in {elapsed:?}"); + tracing::info!("Verified tree consistency in {elapsed:?}"); if let Some((pruner_handle, pruner_thread)) = pruner_handles { pruner_handle.abort(); @@ -170,5 +182,5 @@ impl Cli { } fn main() { - Cli::parse().run() + Cli::parse().run(); } diff --git a/core/lib/merkle_tree/examples/recovery.rs b/core/lib/merkle_tree/examples/recovery.rs new file mode 100644 index 000000000000..207499da8b41 --- /dev/null +++ b/core/lib/merkle_tree/examples/recovery.rs @@ -0,0 +1,122 @@ +//! Tree recovery load test. + +use clap::Parser; +use rand::{rngs::StdRng, Rng, SeedableRng}; +use tempfile::TempDir; +use tracing_subscriber::EnvFilter; + +use std::time::Instant; + +use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_merkle_tree::{ + recovery::{MerkleTreeRecovery, RecoveryEntry}, + HashTree, Key, PatchSet, PruneDatabase, RocksDBWrapper, ValueHash, +}; +use zksync_storage::{RocksDB, RocksDBOptions}; + +/// CLI for load-testing Merkle tree recovery. +#[derive(Debug, Parser)] +struct Cli { + /// Number of updates to perform. + #[arg(name = "updates")] + update_count: u64, + /// Number of entries per update. + #[arg(name = "ops")] + writes_per_update: usize, + /// Use a no-op hashing function. + #[arg(name = "no-hash", long)] + no_hashing: bool, + /// Perform testing on in-memory DB rather than RocksDB (i.e., with focus on hashing logic). + #[arg(long = "in-memory", short = 'M')] + in_memory: bool, + /// Block cache capacity for RocksDB in bytes. + #[arg(long = "block-cache", conflicts_with = "in_memory")] + block_cache: Option, + /// Seed to use in the RNG for reproducibility. + #[arg(long = "rng-seed", default_value = "0")] + rng_seed: u64, +} + +impl Cli { + fn init_logging() { + tracing_subscriber::fmt() + .pretty() + .with_env_filter(EnvFilter::from_default_env()) + .init(); + } + + fn run(self) { + Self::init_logging(); + tracing::info!("Launched with options: {self:?}"); + + let (mut mock_db, mut rocksdb); + let mut _temp_dir = None; + let db: &mut dyn PruneDatabase = if self.in_memory { + mock_db = PatchSet::default(); + &mut mock_db + } else { + let dir = TempDir::new().expect("failed creating temp dir for RocksDB"); + tracing::info!( + "Created temp dir for RocksDB: {}", + dir.path().to_string_lossy() + ); + let db = RocksDB::with_options( + dir.path(), + RocksDBOptions { + block_cache_capacity: self.block_cache, + ..RocksDBOptions::default() + }, + ); + rocksdb = RocksDBWrapper::from(db); + _temp_dir = Some(dir); + &mut rocksdb + }; + + let hasher: &dyn HashTree = if self.no_hashing { &() } else { &Blake2Hasher }; + let mut rng = StdRng::seed_from_u64(self.rng_seed); + + let recovered_version = 123; + let key_step = + Key::MAX / (Key::from(self.update_count) * Key::from(self.writes_per_update)); + assert!(key_step > Key::from(u64::MAX)); + // ^ Total number of generated keys is <2^128. + + let mut last_key = Key::zero(); + let mut last_leaf_index = 0; + let mut recovery = MerkleTreeRecovery::with_hasher(db, recovered_version, hasher); + let recovery_started_at = Instant::now(); + for updated_idx in 0..self.update_count { + let started_at = Instant::now(); + let recovery_entries = (0..self.writes_per_update) + .map(|_| { + last_key += key_step - Key::from(rng.gen::()); + // ^ Increases the key by a random increment close to `key` step with some randomness. + last_leaf_index += 1; + RecoveryEntry { + key: last_key, + value: ValueHash::zero(), + leaf_index: last_leaf_index, + } + }) + .collect(); + recovery.extend(recovery_entries); + tracing::info!( + "Updated tree with recovery chunk #{updated_idx} in {:?}", + started_at.elapsed() + ); + } + + let tree = recovery.finalize(); + tracing::info!( + "Recovery finished in {:?}; verifying consistency...", + recovery_started_at.elapsed() + ); + let started_at = Instant::now(); + tree.verify_consistency(recovered_version).unwrap(); + tracing::info!("Verified consistency in {:?}", started_at.elapsed()); + } +} + +fn main() { + Cli::parse().run(); +} diff --git a/core/lib/merkle_tree/src/consistency.rs b/core/lib/merkle_tree/src/consistency.rs index 5a7e19202062..2cbe1691b39a 100644 --- a/core/lib/merkle_tree/src/consistency.rs +++ b/core/lib/merkle_tree/src/consistency.rs @@ -55,6 +55,14 @@ pub enum ConsistencyError { }, #[error("leaf with key {full_key} has same index {index} as another key")] DuplicateLeafIndex { index: u64, full_key: Key }, + #[error("internal node with key {key} does not have children")] + EmptyInternalNode { key: NodeKey }, + #[error( + "internal node with key {key} should have version {expected_version} (max among child ref versions)" + )] + KeyVersionMismatch { key: NodeKey, expected_version: u64 }, + #[error("root node should have version >={max_child_version} (max among child ref versions)")] + RootVersionMismatch { max_child_version: u64 }, } impl MerkleTree<'_, DB> @@ -109,6 +117,21 @@ where } Node::Internal(node) => { + let expected_version = node.child_refs().map(|child_ref| child_ref.version).max(); + let Some(expected_version) = expected_version else { + return Err(ConsistencyError::EmptyInternalNode { key }); + }; + if !key.is_empty() && expected_version != key.version { + return Err(ConsistencyError::KeyVersionMismatch { + key, + expected_version, + }); + } else if key.is_empty() && expected_version > key.version { + return Err(ConsistencyError::RootVersionMismatch { + max_child_version: expected_version, + }); + } + // `.into_par_iter()` below is the only place where `rayon`-based parallelism // is used in tree verification. let children: Vec<_> = node.children().collect(); @@ -239,7 +262,7 @@ mod tests { use std::num::NonZeroU64; use super::*; - use crate::PatchSet; + use crate::{types::InternalNode, PatchSet}; use zksync_types::{H256, U256}; const FIRST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); @@ -284,7 +307,7 @@ mod tests { #[test] fn missing_root_error() { let mut db = prepare_database(); - db.roots_mut().remove(&0); + db.remove_root(0); let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); assert_matches!(err, ConsistencyError::MissingRoot(0)); @@ -311,7 +334,7 @@ mod tests { fn leaf_count_mismatch_error() { let mut db = prepare_database(); - let root = db.roots_mut().get_mut(&0).unwrap(); + let root = db.root_mut(0).unwrap(); let Root::Filled { leaf_count, .. } = root else { panic!("unexpected root: {root:?}"); }; @@ -331,7 +354,7 @@ mod tests { fn hash_mismatch_error() { let mut db = prepare_database(); - let root = db.roots_mut().get_mut(&0).unwrap(); + let root = db.root_mut(0).unwrap(); let Root::Filled { node: Node::Internal(node), .. @@ -412,4 +435,62 @@ mod tests { let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); assert_matches!(err, ConsistencyError::DuplicateLeafIndex { index: 1, .. }); } + + #[test] + fn empty_internal_node_error() { + let mut db = prepare_database(); + let node_key = db.nodes_mut().find_map(|(key, node)| { + if let Node::Internal(node) = node { + *node = InternalNode::default(); + return Some(*key); + } + None + }); + let node_key = node_key.unwrap(); + + let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + assert_matches!(err, ConsistencyError::EmptyInternalNode { key } if key == node_key); + } + + #[test] + fn version_mismatch_error() { + let mut db = prepare_database(); + let node_key = db.nodes_mut().find_map(|(key, node)| { + if let Node::Internal(node) = node { + let (nibble, _) = node.children().next().unwrap(); + node.child_ref_mut(nibble).unwrap().version = 1; + return Some(*key); + } + None + }); + let node_key = node_key.unwrap(); + + let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + assert_matches!( + err, + ConsistencyError::KeyVersionMismatch { key, expected_version: 1 } if key == node_key + ); + } + + #[test] + fn root_version_mismatch_error() { + let mut db = prepare_database(); + let Some(Root::Filled { + node: Node::Internal(node), + .. + }) = db.root_mut(0) + else { + unreachable!(); + }; + let (nibble, _) = node.children().next().unwrap(); + node.child_ref_mut(nibble).unwrap().version = 42; + + let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + assert_matches!( + err, + ConsistencyError::RootVersionMismatch { + max_child_version: 42, + } + ); + } } diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index a3344d1d6704..07a9668a61ae 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -48,6 +48,7 @@ mod getters; mod hasher; mod metrics; mod pruning; +pub mod recovery; mod storage; mod types; mod utils; @@ -146,7 +147,7 @@ impl<'a, DB: Database> MerkleTree<'a, DB> { pub fn with_hasher(db: DB, hasher: &'a dyn HashTree) -> Self { let tags = db.manifest().and_then(|manifest| manifest.tags); if let Some(tags) = tags { - tags.assert_consistency(hasher); + tags.assert_consistency(hasher, false); } // If there are currently no tags in the tree, we consider that it fits // for backward compatibility. The tags will be added the next time the tree is saved. @@ -208,7 +209,7 @@ impl<'a, DB: Database> MerkleTree<'a, DB> { /// Returns information about the update such as the final tree hash. pub fn extend(&mut self, key_value_pairs: Vec<(Key, ValueHash)>) -> BlockOutput { let next_version = self.db.manifest().unwrap_or_default().version_count; - let storage = Storage::new(&self.db, self.hasher, next_version); + let storage = Storage::new(&self.db, self.hasher, next_version, true); let (output, patch) = storage.extend(key_value_pairs); self.db.apply_patch(patch); output @@ -226,7 +227,7 @@ impl<'a, DB: Database> MerkleTree<'a, DB> { instructions: Vec<(Key, TreeInstruction)>, ) -> BlockOutputWithProofs { let next_version = self.db.manifest().unwrap_or_default().version_count; - let storage = Storage::new(&self.db, self.hasher, next_version); + let storage = Storage::new(&self.db, self.hasher, next_version, true); let (output, patch) = storage.extend_with_proofs(instructions); self.db.apply_patch(patch); output @@ -246,6 +247,7 @@ mod tests { architecture: "AR64MT".to_owned(), depth: 256, hasher: "blake2s256".to_string(), + is_recovering: false, }); MerkleTree::new(db); @@ -259,6 +261,7 @@ mod tests { architecture: "AR16MT".to_owned(), depth: 128, hasher: "blake2s256".to_string(), + is_recovering: false, }); MerkleTree::new(db); @@ -272,6 +275,7 @@ mod tests { architecture: "AR16MT".to_owned(), depth: 256, hasher: "sha256".to_string(), + is_recovering: false, }); MerkleTree::new(db); diff --git a/core/lib/merkle_tree/src/metrics.rs b/core/lib/merkle_tree/src/metrics.rs index 4e7b41bff9be..29bd58e599eb 100644 --- a/core/lib/merkle_tree/src/metrics.rs +++ b/core/lib/merkle_tree/src/metrics.rs @@ -7,7 +7,9 @@ use std::{ }; use crate::types::Nibbles; -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Global, Histogram, Metrics}; +use vise::{ + Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Global, Histogram, Metrics, Unit, +}; #[derive(Debug, Metrics)] #[metrics(prefix = "merkle_tree")] @@ -24,19 +26,20 @@ const BYTE_SIZE_BUCKETS: Buckets = Buckets::exponential(65_536.0..=16.0 * 1_024. #[derive(Debug, Metrics)] #[metrics(prefix = "merkle_tree_finalize_patch")] struct HashingMetrics { - /// Total amount of hashing input performed while processing a single block. - #[metrics(buckets = BYTE_SIZE_BUCKETS)] - hashed_bytes: Histogram, + /// Total amount of hashing input performed while processing a patch. + #[metrics(buckets = BYTE_SIZE_BUCKETS, unit = Unit::Bytes)] + hashed: Histogram, + /// Total time spent on hashing while processing a patch. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + hashing_duration: Histogram, } -#[vise::register] -static HASHING_METRICS: Global = Global::new(); - /// Hashing-related statistics reported as metrics for each block of operations. #[derive(Debug, Default)] #[must_use = "hashing stats should be `report()`ed"] pub(crate) struct HashingStats { pub hashed_bytes: AtomicU64, + pub hashing_duration: Duration, } impl HashingStats { @@ -45,8 +48,14 @@ impl HashingStats { } pub fn report(self) { + #[vise::register] + static HASHING_METRICS: Global = Global::new(); + let hashed_bytes = self.hashed_bytes.into_inner(); - HASHING_METRICS.hashed_bytes.observe(hashed_bytes); + HASHING_METRICS.hashed.observe(hashed_bytes); + HASHING_METRICS + .hashing_duration + .observe(self.hashing_duration); } } @@ -96,7 +105,7 @@ struct TreeUpdateMetrics { static TREE_UPDATE_METRICS: Global = Global::new(); #[must_use = "tree updater stats should be `report()`ed"] -#[derive(Debug, Clone, Copy, Default)] +#[derive(Clone, Copy, Default)] pub(crate) struct TreeUpdaterStats { pub new_leaves: u64, pub new_internal_nodes: u64, @@ -110,6 +119,24 @@ pub(crate) struct TreeUpdaterStats { pub patch_reads: u64, } +impl fmt::Debug for TreeUpdaterStats { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("TreeUpdaterStats") + .field("new_leaves", &self.new_leaves) + .field("new_internal_nodes", &self.new_internal_nodes) + .field("moved_leaves", &self.moved_leaves) + .field("updated_leaves", &self.updated_leaves) + .field("avg_leaf_level", &self.avg_leaf_level()) + .field("max_leaf_level", &self.max_leaf_level) + .field("key_reads", &self.key_reads) + .field("missing_key_reads", &self.missing_key_reads) + .field("db_reads", &self.db_reads) + .field("patch_reads", &self.patch_reads) + .finish_non_exhaustive() + } +} + impl TreeUpdaterStats { pub(crate) fn update_leaf_levels(&mut self, nibble_count: usize) { let leaf_level = nibble_count as u64 * 4; @@ -118,20 +145,22 @@ impl TreeUpdaterStats { } #[allow(clippy::cast_precision_loss)] // Acceptable for metrics + fn avg_leaf_level(&self) -> f64 { + let touched_leaves = self.new_leaves + self.moved_leaves; + if touched_leaves > 0 { + self.leaf_level_sum as f64 / touched_leaves as f64 + } else { + 0.0 + } + } + pub(crate) fn report(self) { let metrics = &TREE_UPDATE_METRICS; metrics.new_leaves.observe(self.new_leaves); metrics.new_internal_nodes.observe(self.new_internal_nodes); metrics.moved_leaves.observe(self.moved_leaves); metrics.updated_leaves.observe(self.updated_leaves); - - let touched_leaves = self.new_leaves + self.moved_leaves; - let avg_leaf_level = if touched_leaves > 0 { - self.leaf_level_sum as f64 / touched_leaves as f64 - } else { - 0.0 - }; - metrics.avg_leaf_level.observe(avg_leaf_level); + metrics.avg_leaf_level.observe(self.avg_leaf_level()); metrics.max_leaf_level.observe(self.max_leaf_level); if self.key_reads > 0 { @@ -297,7 +326,7 @@ struct PruningMetrics { static PRUNING_METRICS: Global = Global::new(); #[derive(Debug)] -pub(crate) struct PruningStats { +pub struct PruningStats { pub target_retained_version: u64, pub pruned_key_count: usize, pub deleted_stale_key_versions: ops::Range, diff --git a/core/lib/merkle_tree/src/pruning.rs b/core/lib/merkle_tree/src/pruning.rs index 4bbcb8f0bcb5..bf60b8cf956b 100644 --- a/core/lib/merkle_tree/src/pruning.rs +++ b/core/lib/merkle_tree/src/pruning.rs @@ -100,8 +100,9 @@ impl MerkleTreePruner { latest_version.checked_sub(self.past_versions_to_keep) } + #[doc(hidden)] // Used in integration tests; logically private #[allow(clippy::range_plus_one)] // exclusive range is required by `PrunePatchSet` constructor - fn run_once(&mut self) -> Option { + pub fn run_once(&mut self) -> Option { let target_retained_version = self.target_retained_version()?; let min_stale_key_version = self.db.min_stale_key_version()?; let stale_key_new_versions = min_stale_key_version..=target_retained_version; @@ -211,8 +212,10 @@ mod tests { assert!(!stats.has_more_work()); // Check the `PatchSet` implementation of `PruneDatabase`. - assert_eq!(db.roots_mut().len(), 1); - assert!(db.roots_mut().contains_key(&4)); + for version in 0..4 { + assert!(db.root_mut(version).is_none()); + } + assert!(db.root_mut(4).is_some()); } #[test] diff --git a/core/lib/merkle_tree/src/recovery.rs b/core/lib/merkle_tree/src/recovery.rs new file mode 100644 index 000000000000..7e7450596d88 --- /dev/null +++ b/core/lib/merkle_tree/src/recovery.rs @@ -0,0 +1,279 @@ +//! Merkle tree recovery logic. +//! +//! # Overview +//! +//! **Recovery process** is responsible for restoring a Merkle tree from a snapshot. A snapshot +//! consists of all tree entries at a specific tree version. As a result of recovery, we create +//! a Merkle tree with the same entries as the snapshot. Any changes that are applied to the tree +//! afterwards will have the same outcome as if they were applied to the original tree. +//! +//! Importantly, a recovered tree is only *observably* identical to the original tree; it differs +//! in (currently unobservable) node versions. In a recovered tree, all nodes will initially have +//! the same version (the snapshot version), while in the original tree, node versions are distributed +//! from 0 to the snapshot version (both inclusive). +//! +//! Recovery process proceeds as follows: +//! +//! 1. Initialize a tree in the recovery mode. Until recovery is finished, the tree cannot be accessed +//! using ordinary [`MerkleTree`] APIs. +//! 2. Update the tree from a snapshot, which [is fed to the tree](MerkleTreeRecovery::extend()) +//! as [`RecoveryEntry`] chunks. Recovery entries must be ordered by increasing key. +//! 3. Finalize recovery using [`MerkleTreeRecovery::finalize()`]. To check integrity, you may compare +//! [`MerkleTreeRecovery::root_hash()`] to the reference value. +//! +//! The recovery process is tolerant to crashes and may be resumed from the middle. To find the latest +//! recovered key, you may use [`MerkleTreeRecovery::last_processed_key()`]. +//! +//! `RecoveryEntry` chunks are not validated during recovery. They can be authenticated using +//! [`TreeRangeDigest`](crate::TreeRangeDigest)s provided that the tree root hash is authenticated +//! using external means. +//! +//! # Implementation details +//! +//! We require `RecoveryEntry` ordering to simplify tracking the recovery progress. It also makes +//! node updates more efficient. Indeed, it suffices to load a leaf with the greatest key and its ancestors +//! before extending the tree; these nodes are guaranteed to be the *only* DB reads necessary +//! to insert new entries. + +use std::time::Instant; + +use crate::{ + hasher::HashTree, + storage::{PatchSet, PruneDatabase, PrunePatchSet, Storage}, + types::{Key, Manifest, Root, TreeTags, ValueHash}, + MerkleTree, +}; +use zksync_crypto::hasher::blake2::Blake2Hasher; + +/// Entry in a Merkle tree used during recovery. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct RecoveryEntry { + /// Entry key. + pub key: Key, + /// Entry value. + pub value: ValueHash, + /// Leaf index associated with the entry. It is **not** checked whether leaf indices are well-formed + /// during recovery (e.g., that they are unique). + pub leaf_index: u64, +} + +/// Handle to a Merkle tree during its recovery. +#[derive(Debug)] +pub struct MerkleTreeRecovery<'a, DB> { + db: DB, + hasher: &'a dyn HashTree, + recovered_version: u64, +} + +impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { + /// Creates tree recovery with the default Blake2 hasher. + /// + /// # Panics + /// + /// Panics in the same situations as [`Self::with_hasher()`]. + pub fn new(db: DB, recovered_version: u64) -> Self { + Self::with_hasher(db, recovered_version, &Blake2Hasher) + } + + /// Loads a tree with the specified hasher. + /// + /// # Panics + /// + /// - Panics if the tree DB exists and it's not being recovered, or if it's being recovered + /// for a different tree version. + /// - Panics if the hasher or basic tree parameters (e.g., the tree depth) + /// do not match those of the tree loaded from the database. + pub fn with_hasher(mut db: DB, recovered_version: u64, hasher: &'a dyn HashTree) -> Self { + let manifest = db.manifest(); + let mut manifest = if let Some(manifest) = manifest { + if manifest.version_count > 0 { + let expected_version = manifest.version_count - 1; + assert_eq!( + recovered_version, + expected_version, + "Requested to recover tree version {recovered_version}, but it is currently being recovered \ + for version {expected_version}" + ); + } + manifest + } else { + Manifest { + version_count: recovered_version + 1, + tags: None, + } + }; + + manifest.version_count = recovered_version + 1; + if let Some(tags) = &manifest.tags { + tags.assert_consistency(hasher, true); + } else { + let mut tags = TreeTags::new(hasher); + tags.is_recovering = true; + manifest.tags = Some(tags); + } + db.apply_patch(PatchSet::from_manifest(manifest)); + + Self { + db, + hasher, + recovered_version, + } + } + + /// Returns the root hash of the recovered tree at this point. + pub fn root_hash(&self) -> ValueHash { + let root = self.db.root(self.recovered_version); + let Some(Root::Filled { node, .. }) = root else { + return self.hasher.empty_tree_hash(); + }; + node.hash(&mut self.hasher.into(), 0) + } + + /// Returns the last key processed during the recovery process. + pub fn last_processed_key(&self) -> Option { + let storage = Storage::new(&self.db, self.hasher, self.recovered_version, false); + storage.greatest_key() + } + + /// Extends a tree with a chunk of entries. + /// + /// Entries must be ordered by increasing `key`, and the key of the first entry must be greater + /// than [`Self::last_processed_key()`]. + /// + /// # Panics + /// + /// Panics if entry keys are not correctly ordered. + #[tracing::instrument( + level = "debug", + skip_all, + fields( + recovered_version = self.recovered_version, + entries.len = entries.len(), + %entries.key_range = entries_key_range(&entries), + ), + )] + pub fn extend(&mut self, entries: Vec) { + tracing::debug!("Started extending tree"); + + let started_at = Instant::now(); + let storage = Storage::new(&self.db, self.hasher, self.recovered_version, false); + let patch = storage.extend_during_recovery(entries); + tracing::debug!("Finished processing keys; took {:?}", started_at.elapsed()); + + let started_at = Instant::now(); + self.db.apply_patch(patch); + tracing::debug!("Finished persisting to DB; took {:?}", started_at.elapsed()); + } + + /// Finalizes the recovery process marking it as complete in the tree manifest. + #[tracing::instrument( + level = "debug", + skip_all, + fields(recovered_version = self.recovered_version), + )] + #[allow(clippy::missing_panics_doc, clippy::range_plus_one)] + pub fn finalize(mut self) -> MerkleTree<'a, DB> { + let mut manifest = self.db.manifest().unwrap(); + // ^ `unwrap()` is safe: manifest is inserted into the DB on creation + + let leaf_count = if let Some(root) = self.db.root(self.recovered_version) { + root.leaf_count() + } else { + // Marginal case: an empty tree is recovered (i.e., `extend()` was never called). + let patch = PatchSet::for_empty_root(manifest.clone(), self.recovered_version); + self.db.apply_patch(patch); + 0 + }; + tracing::debug!( + "Finalizing recovery of the Merkle tree with {leaf_count} key–value entries" + ); + + let started_at = Instant::now(); + let stale_keys = self.db.stale_keys(self.recovered_version); + let stale_keys_len = stale_keys.len(); + tracing::debug!("Pruning {stale_keys_len} accumulated stale keys"); + let prune_patch = PrunePatchSet::new( + stale_keys, + self.recovered_version..self.recovered_version + 1, + ); + self.db.prune(prune_patch); + tracing::debug!( + "Pruned {stale_keys_len} stale keys in {:?}", + started_at.elapsed() + ); + + manifest + .tags + .get_or_insert_with(|| TreeTags::new(self.hasher)) + .is_recovering = false; + self.db.apply_patch(PatchSet::from_manifest(manifest)); + tracing::debug!("Updated tree manifest to mark recovery as complete"); + + // We don't need additional integrity checks since they were performed in the constructor + MerkleTree { + db: self.db, + hasher: self.hasher, + } + } +} + +fn entries_key_range(entries: &[RecoveryEntry]) -> String { + let (Some(first), Some(last)) = (entries.first(), entries.last()) else { + return "(empty)".to_owned(); + }; + format!("{:0>64x}..={:0>64x}", first.key, last.key) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{hasher::HasherWithStats, types::LeafNode}; + + #[test] + #[should_panic(expected = "Tree is expected to be in the process of recovery")] + fn recovery_for_initialized_tree() { + let mut db = PatchSet::default(); + MerkleTreeRecovery::new(&mut db, 123).finalize(); + MerkleTreeRecovery::new(db, 123); + } + + #[test] + #[should_panic(expected = "Requested to recover tree version 42")] + fn recovery_for_different_version() { + let mut db = PatchSet::default(); + MerkleTreeRecovery::new(&mut db, 123); + MerkleTreeRecovery::new(&mut db, 42); + } + + #[test] + fn recovering_empty_tree() { + let tree = MerkleTreeRecovery::new(PatchSet::default(), 42).finalize(); + assert_eq!(tree.latest_version(), Some(42)); + assert_eq!(tree.root(42), Some(Root::Empty)); + } + + #[test] + fn recovering_tree_with_single_node() { + let mut recovery = MerkleTreeRecovery::new(PatchSet::default(), 42); + let recovery_entry = RecoveryEntry { + key: Key::from(123), + value: ValueHash::repeat_byte(1), + leaf_index: 1, + }; + recovery.extend(vec![recovery_entry]); + let tree = recovery.finalize(); + + assert_eq!(tree.latest_version(), Some(42)); + let mut hasher = HasherWithStats::from(&Blake2Hasher as &dyn HashTree); + assert_eq!( + tree.latest_root_hash(), + LeafNode::new( + recovery_entry.key, + recovery_entry.value, + recovery_entry.leaf_index + ) + .hash(&mut hasher, 0) + ); + tree.verify_consistency(42).unwrap(); + } +} diff --git a/core/lib/merkle_tree/src/storage/database.rs b/core/lib/merkle_tree/src/storage/database.rs index 0c1e494dc21e..4cefd52d9a21 100644 --- a/core/lib/merkle_tree/src/storage/database.rs +++ b/core/lib/merkle_tree/src/storage/database.rs @@ -109,7 +109,10 @@ impl Database for PatchSet { } fn try_root(&self, version: u64) -> Result, DeserializeError> { - Ok(self.roots.get(&version).cloned()) + let Some(patch) = self.patches_by_version.get(&version) else { + return Ok(None); + }; + Ok(patch.root.clone()) } fn try_tree_node( @@ -117,10 +120,8 @@ impl Database for PatchSet { key: &NodeKey, is_leaf: bool, ) -> Result, DeserializeError> { - let node = self - .nodes_by_version - .get(&key.version) - .and_then(|nodes| nodes.get(key)); + let patch_with_node = self.patches_by_version.get(&key.version); + let node = patch_with_node.and_then(|patch| patch.nodes.get(key)); let Some(node) = node.cloned() else { return Ok(None); }; @@ -134,21 +135,44 @@ impl Database for PatchSet { Ok(Some(node)) } - fn apply_patch(&mut self, other: PatchSet) { + fn apply_patch(&mut self, mut other: PatchSet) { + if let Some(other_updated_version) = other.updated_version { + if let Some(updated_version) = self.updated_version { + assert_eq!( + other_updated_version, updated_version, + "Cannot merge patches with different updated versions" + ); + + let patch = self.patches_by_version.get_mut(&updated_version).unwrap(); + let other_patch = other.patches_by_version.remove(&updated_version).unwrap(); + // ^ `unwrap()`s are safe by design. + patch.merge(other_patch); + } else { + assert!( + self.patches_by_version.keys().all(|&ver| ver > other_updated_version), + "Cannot update {self:?} from {other:?}; this would break the update version invariant \ + (the update version being lesser than all inserted versions)" + ); + self.updated_version = Some(other_updated_version); + } + } + let new_version_count = other.manifest.version_count; if new_version_count < self.manifest.version_count { - // Remove obsolete roots and nodes from the patch. - self.roots.retain(|&version, _| version < new_version_count); - self.nodes_by_version - .retain(|&version, _| version < new_version_count); - self.stale_keys_by_version + // Remove obsolete sub-patches from the patch. + self.patches_by_version .retain(|&version, _| version < new_version_count); } self.manifest = other.manifest; - self.roots.extend(other.roots); - self.nodes_by_version.extend(other.nodes_by_version); - self.stale_keys_by_version - .extend(other.stale_keys_by_version); + self.patches_by_version.extend(other.patches_by_version); + for (version, stale_keys) in other.stale_keys_by_version { + self.stale_keys_by_version + .entry(version) + .or_default() + .extend(stale_keys); + } + // `PatchSet` invariants hold by construction: the updated version (if set) is still lower + // than all other versions by design. } } @@ -170,9 +194,28 @@ impl Patched { } pub(crate) fn patched_versions(&self) -> Vec { - self.patch - .as_ref() - .map_or_else(Vec::new, |patch| patch.roots.keys().copied().collect()) + self.patch.as_ref().map_or_else(Vec::new, |patch| { + patch.patches_by_version.keys().copied().collect() + }) + } + + /// Returns the value from the patch and a flag whether this value is final (i.e., a DB lookup + /// is not required). + fn lookup_patch(&self, key: &NodeKey, is_leaf: bool) -> (Option, bool) { + let Some(patch) = &self.patch else { + return (None, false); + }; + if patch.is_new_version(key.version) { + return (patch.tree_node(key, is_leaf), true); + } + let could_be_in_updated_patch = patch.updated_version == Some(key.version); + if could_be_in_updated_patch { + // Unlike with new versions, we must look both in the update patch and in the original DB. + if let Some(node) = patch.tree_node(key, is_leaf) { + return (Some(node), true); + } + } + (None, false) } /// Provides readonly access to the wrapped DB. @@ -223,8 +266,9 @@ impl Database for Patched { fn try_root(&self, version: u64) -> Result, DeserializeError> { if let Some(patch) = &self.patch { - if patch.is_responsible_for_version(version) { - return Ok(patch.roots.get(&version).cloned()); + let has_root = patch.is_new_version(version) || patch.updated_version == Some(version); + if has_root { + return patch.try_root(version); } } self.inner.try_root(version) @@ -235,32 +279,41 @@ impl Database for Patched { key: &NodeKey, is_leaf: bool, ) -> Result, DeserializeError> { - let Some(patch) = &self.patch else { - return self.inner.try_tree_node(key, is_leaf); - }; - - if patch.is_responsible_for_version(key.version) { - patch.try_tree_node(key, is_leaf) // take use of debug assertions + let (patch_node, is_final) = self.lookup_patch(key, is_leaf); + if is_final { + Ok(patch_node) + } else if let Some(node) = patch_node { + Ok(Some(node)) } else { self.inner.try_tree_node(key, is_leaf) } } fn tree_nodes(&self, keys: &NodeKeys) -> Vec> { - let Some(patch) = &self.patch else { + if self.patch.is_none() { return self.inner.tree_nodes(keys); - }; + } - let mut is_in_patch = Vec::with_capacity(keys.len()); - let (patch_keys, db_keys): (Vec<_>, Vec<_>) = keys.iter().partition(|(key, _)| { - let flag = patch.is_responsible_for_version(key.version); - is_in_patch.push(flag); - flag - }); + let mut is_in_patch = vec![false; keys.len()]; + let mut patch_values = vec![]; + for (i, (key, is_leaf)) in keys.iter().enumerate() { + let (patch_node, is_final) = self.lookup_patch(key, *is_leaf); + if is_final { + patch_values.push(patch_node); + is_in_patch[i] = true; + } else if let Some(node) = patch_node { + patch_values.push(Some(node)); + is_in_patch[i] = true; + } + } + let db_keys: Vec<_> = keys + .iter() + .zip(&is_in_patch) + .filter_map(|(&key, &is_in_patch)| (!is_in_patch).then_some(key)) + .collect(); - let mut patch_values = patch.tree_nodes(&patch_keys).into_iter(); + let mut patch_values = patch_values.into_iter(); let mut db_values = self.inner.tree_nodes(&db_keys).into_iter(); - let values = is_in_patch.into_iter().map(|is_in_patch| { if is_in_patch { patch_values.next().unwrap() @@ -346,10 +399,13 @@ impl PruneDatabase for PatchSet { fn prune(&mut self, patch: PrunePatchSet) { for key in &patch.pruned_node_keys { + let Some(patch) = self.patches_by_version.get_mut(&key.version) else { + continue; + }; if key.is_empty() { - self.roots.remove(&key.version); - } else if let Some(nodes) = self.nodes_by_version.get_mut(&key.version) { - nodes.remove(key); + patch.root = None; + } else { + patch.nodes.remove(key); } } @@ -364,10 +420,97 @@ mod tests { use super::*; use crate::{ - storage::tests::{create_patch, generate_nodes, FIRST_KEY}, + storage::{ + tests::{create_patch, generate_nodes, FIRST_KEY}, + Operation, + }, types::{InternalNode, Nibbles}, }; + #[test] + fn patch_set_with_update() { + let manifest = Manifest::new(10, &()); + let old_root = Root::new(2, Node::Internal(InternalNode::default())); + let nodes = generate_nodes(9, &[1, 2]); + let mut patch = PatchSet::new( + manifest, + 9, + old_root.clone(), + nodes.clone(), + vec![], + Operation::Update, + ); + + for ver in (0..9).chain(10..20) { + assert!(patch.root(ver).is_none()); + } + assert_eq!(patch.root(9).unwrap(), old_root); + let (&node_key, expected_node) = nodes.iter().next().unwrap(); + let node = patch.tree_node(&node_key, true).unwrap(); + assert_eq!(node, *expected_node); + + let new_nodes = generate_nodes(10, &[3, 4]); + let manifest = Manifest::new(11, &()); + let new_root = Root::new(4, Node::Internal(InternalNode::default())); + let new_patch = PatchSet::new( + manifest, + 10, + new_root.clone(), + new_nodes.clone(), + vec![], + Operation::Insert, + ); + patch.apply_patch(new_patch); + + for ver in (0..9).chain(11..20) { + assert!(patch.root(ver).is_none()); + } + assert_eq!(patch.root(9).unwrap(), old_root); + assert_eq!(patch.root(10).unwrap(), new_root); + let (&node_key, expected_node) = nodes.iter().next().unwrap(); + let node = patch.tree_node(&node_key, true).unwrap(); + assert_eq!(node, *expected_node); + let (&node_key, expected_node) = new_nodes.iter().next().unwrap(); + let node = patch.tree_node(&node_key, true).unwrap(); + assert_eq!(node, *expected_node); + } + + #[test] + fn merging_two_update_patches() { + let manifest = Manifest::new(10, &()); + let old_root = Root::new(2, Node::Internal(InternalNode::default())); + let nodes = generate_nodes(9, &[1, 2]); + let mut patch = PatchSet::new( + manifest.clone(), + 9, + old_root, + nodes.clone(), + vec![], + Operation::Update, + ); + + let new_nodes = generate_nodes(9, &[3, 4]); + let new_root = Root::new(4, Node::Internal(InternalNode::default())); + let new_patch = PatchSet::new( + manifest, + 9, + new_root.clone(), + new_nodes.clone(), + vec![], + Operation::Update, + ); + patch.apply_patch(new_patch); + + for ver in (0..9).chain(10..20) { + assert!(patch.root(ver).is_none()); + } + assert_eq!(patch.root(9).unwrap(), new_root); + for (&node_key, expected_node) in nodes.iter().chain(&new_nodes) { + let node = patch.tree_node(&node_key, true).unwrap(); + assert_eq!(node, *expected_node); + } + } + #[test] fn requesting_nodes_in_patched_db() { let root = Root::new(2, Node::Internal(InternalNode::default())); @@ -431,4 +574,55 @@ mod tests { [Some(_), Some(_), None, None, None, None, Some(_), Some(_), Some(_)] ); } + + #[test] + fn patched_db_with_update_patch() { + let manifest = Manifest::new(10, &()); + let old_root = Root::new(2, Node::Internal(InternalNode::default())); + let nodes = generate_nodes(9, &[1, 2]); + let db = PatchSet::new( + manifest.clone(), + 9, + old_root.clone(), + nodes.clone(), + vec![], + Operation::Update, + ); + let mut patched = Patched::new(db); + + let new_nodes = generate_nodes(9, &[3, 4]); + let new_root = Root::new(4, Node::Internal(InternalNode::default())); + let new_patch = PatchSet::new( + manifest, + 9, + new_root.clone(), + new_nodes.clone(), + vec![], + Operation::Update, + ); + patched.apply_patch(new_patch); + + for ver in (0..9).chain(10..20) { + assert!(patched.root(ver).is_none()); + } + assert_eq!(patched.root(9).unwrap(), new_root); + for (&node_key, expected_node) in nodes.iter().chain(&new_nodes) { + let node = patched.tree_node(&node_key, true).unwrap(); + assert_eq!(node, *expected_node); + } + + let requested_keys: Vec<_> = nodes + .keys() + .chain(new_nodes.keys()) + .map(|&key| (key, true)) + .collect(); + let retrieved_nodes = patched.tree_nodes(&requested_keys); + assert_eq!(retrieved_nodes.len(), requested_keys.len()); + for ((key, _), node) in requested_keys.iter().zip(retrieved_nodes) { + assert_eq!( + node.unwrap(), + *nodes.get(key).unwrap_or_else(|| &new_nodes[key]) + ); + } + } } diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index a7553727467a..baea778cf93f 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -18,6 +18,7 @@ pub use self::{ use crate::{ hasher::HashTree, metrics::{TreeUpdaterStats, BLOCK_TIMINGS, GENERAL_METRICS}, + recovery::RecoveryEntry, types::{ BlockOutput, ChildRef, InternalNode, Key, LeafNode, Manifest, Nibbles, Node, Root, TreeLogEntry, TreeTags, ValueHash, @@ -25,6 +26,14 @@ use crate::{ utils::increment_counter, }; +/// Tree operation: either inserting a new version or updating an existing one (the latter is only +/// used during tree recovery). +#[derive(Debug, Clone, Copy)] +enum Operation { + Insert, + Update, +} + /// Mutable storage encapsulating AR16MT update logic. #[derive(Debug)] struct TreeUpdater { @@ -92,6 +101,14 @@ impl TreeUpdater { longest_prefixes } + /// Loads the greatest key from the database. + fn load_greatest_key(&mut self, db: &DB) -> Option<(LeafNode, Nibbles)> { + let (leaf, load_result) = self.patch_set.load_greatest_key(db)?; + self.metrics.db_reads += load_result.db_reads; + assert_eq!(load_result.longest_prefixes.len(), 1); + Some((leaf, load_result.longest_prefixes[0])) + } + /// Inserts or updates a value hash for the specified `key`. This implementation /// is almost verbatim the algorithm described in the Jellyfish Merkle tree white paper. /// The algorithm from the paper is as follows: @@ -120,7 +137,7 @@ impl TreeUpdater { parent_nibbles: &Nibbles, leaf_index_fn: impl FnOnce() -> u64, ) -> (TreeLogEntry, NewLeafData) { - let version = self.patch_set.version(); + let version = self.patch_set.root_version(); let traverse_outcome = self.patch_set.traverse(key, parent_nibbles); let (log, leaf_data) = match traverse_outcome { TraverseOutcome::LeafMatch(nibbles, mut leaf) => { @@ -132,12 +149,7 @@ impl TreeUpdater { } TraverseOutcome::LeafMismatch(nibbles, leaf) => { - if let Some((parent_nibbles, last_nibble)) = nibbles.split_last() { - self.patch_set - .child_ref_mut(&parent_nibbles, last_nibble) - .unwrap() - .is_leaf = false; - } + self.update_moved_leaf_ref(&nibbles); let mut nibble_idx = nibbles.nibble_count(); loop { @@ -203,15 +215,26 @@ impl TreeUpdater { // Traverse nodes up to the root level and update `ChildRef.version`. let mut cursor = traverse_outcome.position(); while let Some((parent_nibbles, last_nibble)) = cursor.split_last() { - self.patch_set + let child_ref = self + .patch_set .child_ref_mut(&parent_nibbles, last_nibble) - .unwrap() - .version = version; + .unwrap(); + child_ref.version = child_ref.version.max(version); cursor = parent_nibbles; } (log, leaf_data) } + + fn update_moved_leaf_ref(&mut self, leaf_nibbles: &Nibbles) { + if let Some((parent_nibbles, last_nibble)) = leaf_nibbles.split_last() { + let child_ref = self + .patch_set + .child_ref_mut(&parent_nibbles, last_nibble) + .unwrap(); + child_ref.is_leaf = false; + } + } } /// [`TreeUpdater`] together with a link to the database. @@ -221,22 +244,33 @@ pub(crate) struct Storage<'a, DB: ?Sized> { hasher: &'a dyn HashTree, manifest: Manifest, leaf_count: u64, + operation: Operation, updater: TreeUpdater, } impl<'a, DB: Database + ?Sized> Storage<'a, DB> { /// Creates storage for a new version of the tree. - pub fn new(db: &'a DB, hasher: &'a dyn HashTree, version: u64) -> Self { + pub fn new( + db: &'a DB, + hasher: &'a dyn HashTree, + version: u64, + create_new_version: bool, + ) -> Self { let mut manifest = db.manifest().unwrap_or_default(); if manifest.tags.is_none() { manifest.tags = Some(TreeTags::new(hasher)); } manifest.version_count = version + 1; - let root = if version == 0 { - Root::Empty + let base_version = if create_new_version { + version.checked_sub(1) + } else { + Some(version) + }; + let root = if let Some(base_version) = base_version { + db.root(base_version).unwrap_or(Root::Empty) } else { - db.root(version - 1).expect("no previous root") + Root::Empty }; Self { @@ -244,6 +278,11 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { hasher, manifest, leaf_count: root.leaf_count(), + operation: if create_new_version { + Operation::Insert + } else { + Operation::Update + }, updater: TreeUpdater::new(version, root), } } @@ -254,7 +293,8 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { let load_nodes_latency = BLOCK_TIMINGS.load_nodes.start(); let sorted_keys = SortedKeys::new(key_value_pairs.iter().map(|(key, _)| *key)); let parent_nibbles = self.updater.load_ancestors(&sorted_keys, self.db); - load_nodes_latency.observe(); + let load_nodes_latency = load_nodes_latency.observe(); + tracing::debug!("Load stage took {load_nodes_latency:?}"); let extend_patch_latency = BLOCK_TIMINGS.extend_patch.start(); let mut logs = Vec::with_capacity(key_value_pairs.len()); @@ -264,7 +304,8 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { }); logs.push(log); } - extend_patch_latency.observe(); + let extend_patch_latency = extend_patch_latency.observe(); + tracing::debug!("Tree traversal stage took {extend_patch_latency:?}"); let leaf_count = self.leaf_count; let (root_hash, patch) = self.finalize(); @@ -276,16 +317,64 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { (output, patch) } + pub fn greatest_key(mut self) -> Option { + Some(self.updater.load_greatest_key(self.db)?.0.full_key) + } + + pub fn extend_during_recovery(mut self, recovery_entries: Vec) -> PatchSet { + let (mut prev_key, mut prev_nibbles) = match self.updater.load_greatest_key(self.db) { + Some((leaf, nibbles)) => (Some(leaf.full_key), nibbles), + None => (None, Nibbles::EMPTY), + }; + + let extend_patch_latency = BLOCK_TIMINGS.extend_patch.start(); + for entry in recovery_entries { + if let Some(prev_key) = prev_key { + assert!( + entry.key > prev_key, + "Recovery entries must be ordered by increasing key (previous key: {prev_key:0>64x}, \ + offending entry: {entry:?})" + ); + } + prev_key = Some(entry.key); + + let key_nibbles = Nibbles::new(&entry.key, prev_nibbles.nibble_count()); + let parent_nibbles = prev_nibbles.common_prefix(&key_nibbles); + let (_, new_leaf) = + self.updater + .insert(entry.key, entry.value, &parent_nibbles, || entry.leaf_index); + prev_nibbles = new_leaf.nibbles; + self.leaf_count += 1; + } + let extend_patch_latency = extend_patch_latency.observe(); + tracing::debug!("Tree traversal stage took {extend_patch_latency:?}"); + + let (_, patch) = self.finalize(); + patch + } + fn finalize(self) -> (ValueHash, PatchSet) { + tracing::debug!( + "Finished updating tree; total leaf count: {}, stats: {:?}", + self.leaf_count, + self.updater.metrics + ); self.updater.metrics.report(); - let finalize_patch = BLOCK_TIMINGS.finalize_patch.start(); - let (root_hash, patch, stats) = - self.updater - .patch_set - .finalize(self.manifest, self.leaf_count, self.hasher); + let finalize_patch_latency = BLOCK_TIMINGS.finalize_patch.start(); + let (root_hash, patch, stats) = self.updater.patch_set.finalize( + self.manifest, + self.leaf_count, + self.operation, + self.hasher, + ); GENERAL_METRICS.leaf_count.set(self.leaf_count); - finalize_patch.observe(); + let finalize_patch_latency = finalize_patch_latency.observe(); + tracing::debug!( + "Tree finalization stage took {finalize_patch_latency:?}; hashed {:?}B in {:?}", + stats.hashed_bytes, + stats.hashing_duration + ); stats.report(); (root_hash, patch) diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index 1ba52fab5387..9e251bf01782 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -2,12 +2,16 @@ use rayon::prelude::*; -use std::collections::{hash_map::Entry, HashMap}; +use std::{ + collections::{hash_map::Entry, HashMap}, + iter, + time::Instant, +}; use crate::{ hasher::{HashTree, HasherWithStats, MerklePath}, metrics::HashingStats, - storage::{proofs::SUBTREE_COUNT, SortedKeys, TraverseOutcome}, + storage::{proofs::SUBTREE_COUNT, Operation, SortedKeys, TraverseOutcome}, types::{ ChildRef, InternalNode, Key, LeafNode, Manifest, Nibbles, NibblesBytes, Node, NodeKey, Root, ValueHash, KEY_SIZE, @@ -15,16 +19,32 @@ use crate::{ utils, Database, }; +/// Subset of a [`PatchSet`] corresponding to a specific version. All nodes in the subset +/// have the same version. +#[derive(Debug)] +pub(super) struct PartialPatchSet { + pub root: Option, + // TODO (BFT-130): investigate most efficient ways to store key-value pairs: + // - `HashMap`s indexed by version + // - Full upper levels (i.e., `Vec>`) + pub nodes: HashMap, +} + +impl PartialPatchSet { + pub fn merge(&mut self, other: Self) { + self.root = other.root; + self.nodes.extend(other.nodes); + } +} + /// Raw set of database changes. #[derive(Debug, Default)] -#[cfg_attr(test, derive(Clone))] // Used in tree consistency tests pub struct PatchSet { pub(super) manifest: Manifest, - pub(super) roots: HashMap, - // TODO (BFT-130): investigate most efficient ways to store key-value pairs: - // - `HashMap`s indexed by version - // - Full upper levels (i.e., `Vec>`) - pub(super) nodes_by_version: HashMap>, + pub(super) patches_by_version: HashMap, + /// INVARIANT: If present, `patches_by_version` contains the corresponding version, and it + /// is smaller than all other keys in `patches_by_version`. + pub(super) updated_version: Option, pub(super) stale_keys_by_version: HashMap>, } @@ -32,19 +52,26 @@ impl PatchSet { pub(crate) fn from_manifest(manifest: Manifest) -> Self { Self { manifest, - roots: HashMap::new(), - nodes_by_version: HashMap::new(), + patches_by_version: HashMap::new(), + updated_version: None, stale_keys_by_version: HashMap::new(), } } - pub(super) fn for_empty_root(manifest: Manifest, version: u64) -> Self { + pub(crate) fn for_empty_root(manifest: Manifest, version: u64) -> Self { let stale_keys = if let Some(prev_version) = version.checked_sub(1) { vec![Nibbles::EMPTY.with_version(prev_version)] } else { vec![] }; - Self::new(manifest, version, Root::Empty, HashMap::new(), stale_keys) + Self::new( + manifest, + version, + Root::Empty, + HashMap::new(), + stale_keys, + Operation::Insert, + ) } pub(super) fn new( @@ -53,29 +80,40 @@ impl PatchSet { root: Root, mut nodes: HashMap, mut stale_keys: Vec, + operation: Operation, ) -> Self { debug_assert_eq!(manifest.version_count, version + 1); + debug_assert!(nodes.keys().all(|key| key.version == version)); nodes.shrink_to_fit(); // We never insert into `nodes` later stale_keys.shrink_to_fit(); + let partial_patch = PartialPatchSet { + root: Some(root), + nodes, + }; + let updated_version = match &operation { + Operation::Insert => None, + Operation::Update => Some(version), + }; + Self { manifest, - roots: HashMap::from_iter([(version, root)]), - nodes_by_version: HashMap::from_iter([(version, nodes)]), - stale_keys_by_version: HashMap::from_iter([(version, stale_keys)]), + patches_by_version: HashMap::from([(version, partial_patch)]), + updated_version, + stale_keys_by_version: HashMap::from([(version, stale_keys)]), } } - pub(super) fn is_responsible_for_version(&self, version: u64) -> bool { + pub(super) fn is_new_version(&self, version: u64) -> bool { version >= self.manifest.version_count // this patch truncates `version` - || self.roots.contains_key(&version) + || (self.updated_version != Some(version) && self.patches_by_version.contains_key(&version)) } /// Calculates the number of hashes in `ChildRef`s copied from the previous versions /// of the tree. This allows to estimate redundancy of this `PatchSet`. pub(super) fn copied_hashes_count(&self) -> u64 { - let copied_hashes = self.nodes_by_version.iter().map(|(&version, nodes)| { - let copied_hashes = nodes.values().map(|node| { + let copied_hashes = self.patches_by_version.iter().map(|(&version, patch)| { + let copied_hashes = patch.nodes.values().map(|node| { let Node::Internal(node) = node else { return 0; }; @@ -94,17 +132,25 @@ impl PatchSet { &mut self.manifest } - pub(crate) fn roots_mut(&mut self) -> &mut HashMap { - &mut self.roots + pub(crate) fn root_mut(&mut self, version: u64) -> Option<&mut Root> { + let patch = self.patches_by_version.get_mut(&version)?; + patch.root.as_mut() + } + + pub(crate) fn remove_root(&mut self, version: u64) { + let patch = self.patches_by_version.get_mut(&version).unwrap(); + patch.root = None; } pub(crate) fn nodes_mut(&mut self) -> impl Iterator + '_ { - self.nodes_by_version.values_mut().flatten() + self.patches_by_version + .values_mut() + .flat_map(|patch| &mut patch.nodes) } pub(crate) fn remove_node(&mut self, key: &NodeKey) { - let nodes = self.nodes_by_version.get_mut(&key.version).unwrap(); - nodes.remove(key); + let patch = self.patches_by_version.get_mut(&key.version).unwrap(); + patch.nodes.remove(key); } } @@ -114,23 +160,13 @@ impl PatchSet { struct WorkingNode { inner: Node, prev_version: Option, - is_changed: bool, } impl WorkingNode { - fn unchanged(inner: Node, prev_version: u64) -> Self { - Self { - inner, - prev_version: Some(prev_version), - is_changed: false, - } - } - - fn changed(inner: Node, prev_version: Option) -> Self { + fn new(inner: Node, prev_version: Option) -> Self { Self { inner, prev_version, - is_changed: true, } } } @@ -148,7 +184,7 @@ pub(crate) struct LoadAncestorsResult { /// a Merkle tree. #[derive(Debug)] pub(crate) struct WorkingPatchSet { - version: u64, + root_version: u64, // Group changes by `nibble_count` (which is linearly tied to the tree depth: // `depth == nibble_count * 4`) so that we can compute hashes for all changed nodes // in a single traversal in `Self::finalize()`. @@ -156,23 +192,23 @@ pub(crate) struct WorkingPatchSet { } impl WorkingPatchSet { - pub fn new(version: u64, root: Root) -> Self { + pub fn new(root_version: u64, root: Root) -> Self { let changes_by_nibble_count = match root { Root::Filled { node, .. } => { - let root_node = WorkingNode::changed(node, version.checked_sub(1)); + let root_node = WorkingNode::new(node, root_version.checked_sub(1)); let root_level = [(*Nibbles::EMPTY.bytes(), root_node)]; vec![HashMap::from_iter(root_level)] } Root::Empty => Vec::new(), }; Self { - version, + root_version, changes_by_nibble_count, } } - pub fn version(&self) -> u64 { - self.version + pub fn root_version(&self) -> u64 { + self.root_version } pub fn get(&self, nibbles: &Nibbles) -> Option<&Node> { @@ -190,30 +226,20 @@ impl WorkingPatchSet { } let level = &mut self.changes_by_nibble_count[key.nibble_count()]; - // We use `Entry` API to ensure that `prev_version`is correctly retained + // We use `Entry` API to ensure that `prev_version` is correctly retained // in existing `WorkingNode`s. match level.entry(*key.bytes()) { Entry::Vacant(entry) => { - entry.insert(WorkingNode::changed(node, None)); + entry.insert(WorkingNode::new(node, None)); } Entry::Occupied(mut entry) => { entry.get_mut().inner = node; - entry.get_mut().is_changed = true; } } } /// Marks the retrieved node as changed. pub fn get_mut(&mut self, key: &Nibbles) -> Option<&mut Node> { - let level = self.changes_by_nibble_count.get_mut(key.nibble_count())?; - let node = level.get_mut(key.bytes())?; - node.is_changed = true; - Some(&mut node.inner) - } - - /// Analogue of [`Self::get_mut()`] that doesn't mark the node as changed. - /// This should only be used if the only updated part of the node is its cache. - pub fn get_mut_without_updating(&mut self, key: &Nibbles) -> Option<&mut Node> { let level = self.changes_by_nibble_count.get_mut(key.nibble_count())?; let node = level.get_mut(key.bytes())?; Some(&mut node.inner) @@ -235,10 +261,10 @@ impl WorkingPatchSet { /// The pushed nodes are not marked as changed, so this method should only be used /// if the nodes are loaded from DB. - pub fn push_level_from_db<'a>(&mut self, level: impl Iterator) { + fn push_level_from_db<'a>(&mut self, level: impl Iterator) { let level = level .map(|(key, node)| { - let node = WorkingNode::unchanged(node, key.version); + let node = WorkingNode::new(node, Some(key.version)); (*key.nibbles.bytes(), node) }) .collect(); @@ -254,7 +280,7 @@ impl WorkingPatchSet { let leaf = *leaf; let first_nibble = Nibbles::nibble(&leaf.full_key, 0); let mut internal_node = InternalNode::default(); - internal_node.insert_child_ref(first_nibble, ChildRef::leaf(self.version)); + internal_node.insert_child_ref(first_nibble, ChildRef::leaf(self.root_version)); self.insert(Nibbles::EMPTY, internal_node.clone().into()); self.insert(Nibbles::new(&leaf.full_key, 1), leaf.into()); internal_node @@ -270,7 +296,7 @@ impl WorkingPatchSet { /// Splits this patch set by the first nibble of the contained keys. pub fn split(self) -> [Self; SUBTREE_COUNT] { let mut parts = [(); SUBTREE_COUNT].map(|()| Self { - version: self.version, + root_version: self.root_version, changes_by_nibble_count: vec![HashMap::new(); self.changes_by_nibble_count.len()], }); @@ -293,7 +319,7 @@ impl WorkingPatchSet { } pub fn merge(&mut self, other: Self) { - debug_assert_eq!(self.version, other.version); + debug_assert_eq!(self.root_version, other.root_version); let other_len = other.changes_by_nibble_count.len(); if self.changes_by_nibble_count.len() < other_len { @@ -318,85 +344,118 @@ impl WorkingPatchSet { } } - fn remove_unchanged_nodes(&mut self) { - // Do not remove the root node in any case since it has special role in finalization. - for level in self.changes_by_nibble_count.iter_mut().skip(1) { - level.retain(|_, node| node.is_changed); - } - } - - fn stale_keys(&self) -> Vec { - let levels = self.changes_by_nibble_count.iter().enumerate(); - let stale_keys = levels.flat_map(|(nibble_count, level)| { - level.iter().filter_map(move |(nibbles, node)| { - node.prev_version.map(|prev_version| { - let nibbles = Nibbles::from_parts(*nibbles, nibble_count); - nibbles.with_version(prev_version) - }) - }) - }); - stale_keys.collect() - } - /// Computes hashes and serializes this changeset. - pub fn finalize( - mut self, + pub(super) fn finalize( + self, manifest: Manifest, leaf_count: u64, + operation: Operation, hasher: &dyn HashTree, ) -> (ValueHash, PatchSet, HashingStats) { - self.remove_unchanged_nodes(); - let stale_keys = self.stale_keys(); - let metrics = HashingStats::default(); - + let mut stats = HashingStats::default(); + let (root_hash, patch) = self.finalize_inner( + manifest, + leaf_count, + operation, + |nibble_count, level_changes| { + let started_at = Instant::now(); + let tree_level = nibble_count * 4; + // `into_par_iter()` below uses `rayon` to parallelize hash computations. + let output = level_changes + .into_par_iter() + .map_init( + || hasher.with_stats(&stats), + |hasher, (nibbles, node)| { + let nibbles = Nibbles::from_parts(nibbles, nibble_count); + (nibbles, Some(node.inner.hash(hasher, tree_level)), node) + }, + ) + .collect::>(); + stats.hashing_duration += started_at.elapsed(); + output + }, + ); + let root_hash = root_hash.unwrap_or_else(|| hasher.empty_tree_hash()); + (root_hash, patch, stats) + } + + fn finalize_inner( + self, + manifest: Manifest, + leaf_count: u64, + operation: Operation, + mut map_level_changes: impl FnMut(usize, HashMap) -> I, + ) -> (Option, PatchSet) + where + I: IntoIterator, WorkingNode)>, + { let mut changes_by_nibble_count = self.changes_by_nibble_count; - if changes_by_nibble_count.is_empty() { + let len = changes_by_nibble_count.iter().map(HashMap::len).sum(); + if len == 0 { // The tree is empty and there is no root present. - let patch = PatchSet::for_empty_root(manifest, self.version); - return (hasher.empty_tree_hash(), patch, metrics); + return (None, PatchSet::for_empty_root(manifest, self.root_version)); } - let len = changes_by_nibble_count.iter().map(HashMap::len).sum(); let mut patched_nodes = HashMap::with_capacity(len); + let mut stale_keys = vec![]; // Compute hashes for the changed nodes with decreasing nibble count (i.e., topologically // sorted) and store the computed hash in the parent nodes. while let Some(level_changes) = changes_by_nibble_count.pop() { let nibble_count = changes_by_nibble_count.len(); - let tree_level = nibble_count * 4; - // `into_par_iter()` below uses `rayon` to parallelize hash computations. - let hashed_nodes: Vec<_> = level_changes - .into_par_iter() - .map_init( - || hasher.with_stats(&metrics), - |hasher, (nibbles, node)| { - let nibbles = Nibbles::from_parts(nibbles, nibble_count); - (nibbles, node.inner.hash(hasher, tree_level), node) - }, - ) - .collect(); + let hashed_nodes = map_level_changes(nibble_count, level_changes); for (nibbles, node_hash, node) in hashed_nodes { - if let Some(upper_level_changes) = changes_by_nibble_count.last_mut() { - let (parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); - let parent = upper_level_changes.get_mut(parent_nibbles.bytes()).unwrap(); - let Node::Internal(parent) = &mut parent.inner else { - unreachable!("Node parent must be an internal node"); + let node_version = + if let Some(upper_level_changes) = changes_by_nibble_count.last_mut() { + let (parent_nibbles, last_nibble) = nibbles.split_last().unwrap(); + let parent = upper_level_changes.get_mut(parent_nibbles.bytes()).unwrap(); + let Node::Internal(parent) = &mut parent.inner else { + unreachable!("Node parent must be an internal node"); + }; + // ^ `unwrap()`s are safe by construction: the parent of any changed node + // is an `InternalNode` that must be in the change set as well. + let self_ref = parent.child_ref_mut(last_nibble).unwrap(); + // ^ `unwrap()` is safe by construction: the parent node must reference + // the currently considered child. + if let Some(node_hash) = node_hash { + self_ref.hash = node_hash; + } + self_ref.version + } else { + // We're at the root node level. + if matches!(operation, Operation::Insert) { + // The root node is always replaced for inserts and is never replaced for updated. + if let Some(prev_version) = node.prev_version { + stale_keys.push(nibbles.with_version(prev_version)); + } + } + + let root = Root::new(leaf_count, node.inner); + let patch = PatchSet::new( + manifest, + self.root_version, + root, + patched_nodes, + stale_keys, + operation, + ); + return (node_hash, patch); }; - // ^ `unwrap()`s are safe by construction: the parent of any changed node - // is an `InternalNode` that must be in the change set as well. - let self_ref = parent.child_ref_mut(last_nibble).unwrap(); - // ^ `unwrap()` is safe by construction: the parent node must reference - // the currently considered child. - self_ref.hash = node_hash; - } else { - // We're at the root node level. - let root = Root::new(leaf_count, node.inner); - let patch = - PatchSet::new(manifest, self.version, root, patched_nodes, stale_keys); - return (node_hash, patch, metrics); - } - patched_nodes.insert(nibbles.with_version(self.version), node.inner); + let was_replaced = node + .prev_version + .map_or(true, |prev_version| prev_version < node_version); + if was_replaced { + if let Some(prev_version) = node.prev_version { + stale_keys.push(nibbles.with_version(prev_version)); + } + } + if was_replaced || matches!(operation, Operation::Update) { + // All nodes in the patch set are updated for the update operation, regardless + // of the version change. For insert operations, we only should update nodes + // with the changed version. + patched_nodes.insert(nibbles.with_version(node_version), node.inner); + } } } unreachable!("We should have returned when the root node was encountered above"); @@ -408,24 +467,19 @@ impl WorkingPatchSet { Some(node.inner) } - pub fn finalize_without_hashing(mut self, manifest: Manifest, leaf_count: u64) -> PatchSet { - self.remove_unchanged_nodes(); - let stale_keys = self.stale_keys(); - - let Some(root) = self.take_root() else { - return PatchSet::for_empty_root(manifest, self.version); - }; - let root = Root::new(leaf_count, root); - - let levels = self.changes_by_nibble_count.drain(1..); - let nodes = levels.enumerate().flat_map(|(i, level)| { - let nibble_count = i + 1; - level.into_iter().map(move |(nibbles, node)| { - let nibbles = Nibbles::from_parts(nibbles, nibble_count); - (nibbles.with_version(self.version), node.inner) - }) - }); - PatchSet::new(manifest, self.version, root, nodes.collect(), stale_keys) + pub fn finalize_without_hashing(self, manifest: Manifest, leaf_count: u64) -> PatchSet { + let (_, patch) = self.finalize_inner( + manifest, + leaf_count, + Operation::Insert, + |nibble_count, level_changes| { + level_changes.into_iter().map(move |(nibbles, node)| { + let nibbles = Nibbles::from_parts(nibbles, nibble_count); + (nibbles, None, node) + }) + }, + ); + patch } /// Loads ancestor nodes for all keys in `sorted_keys`. @@ -530,6 +584,36 @@ impl WorkingPatchSet { unreachable!("We must have encountered a leaf or missing node when traversing"); } + pub fn load_greatest_key( + &mut self, + db: &DB, + ) -> Option<(LeafNode, LoadAncestorsResult)> { + let mut nibbles = Nibbles::EMPTY; + let mut db_reads = 0; + let greatest_leaf = loop { + match self.get(&nibbles) { + None => return None, + Some(Node::Leaf(leaf)) => break *leaf, + Some(Node::Internal(node)) => { + let (next_nibble, child_ref) = node.last_child_ref(); + nibbles = nibbles.push(next_nibble).unwrap(); + // ^ `unwrap()` is safe; there can be no internal nodes on the bottommost tree level + let child_key = nibbles.with_version(child_ref.version); + let child_node = db.tree_node(&child_key, child_ref.is_leaf).unwrap(); + // ^ `unwrap()` is safe by construction + self.push_level_from_db(iter::once((&child_key, child_node))); + db_reads += 1; + } + } + }; + + let result = LoadAncestorsResult { + longest_prefixes: vec![nibbles], + db_reads, + }; + Some((greatest_leaf, result)) + } + /// Creates a Merkle proof for the specified `key`, which has given `parent_nibbles` /// in this patch set. `root_nibble_count` specifies to which level the proof needs to be constructed. pub(crate) fn create_proof( @@ -572,7 +656,7 @@ impl WorkingPatchSet { break; } - let parent = self.get_mut_without_updating(&parent_nibbles); + let parent = self.get_mut(&parent_nibbles); let Some(Node::Internal(parent)) = parent else { unreachable!() }; @@ -594,7 +678,10 @@ impl WorkingPatchSet { #[cfg(test)] mod tests { use super::*; - use crate::types::{Key, LeafNode}; + use crate::{ + storage::Storage, + types::{Key, LeafNode}, + }; fn patch_len(patch: &WorkingPatchSet) -> usize { patch.changes_by_nibble_count.iter().map(HashMap::len).sum() @@ -644,4 +731,50 @@ mod tests { } assert_eq!(patch_len(&merged), all_nibbles.len() + 1); } + + #[test] + fn loading_greatest_key() { + // Test empty DB. + let mut patch = WorkingPatchSet::new(0, Root::Empty); + let load_result = patch.load_greatest_key(&PatchSet::default()); + assert!(load_result.is_none()); + + // Test DB with a single entry. + let mut db = PatchSet::default(); + let key = Key::from(1234_u64); + let (_, patch) = Storage::new(&db, &(), 0, true).extend(vec![(key, ValueHash::zero())]); + db.apply_patch(patch); + + let mut patch = WorkingPatchSet::new(1, db.root(0).unwrap()); + let (greatest_leaf, load_result) = patch.load_greatest_key(&db).unwrap(); + assert_eq!(greatest_leaf.full_key, key); + assert_eq!(load_result.longest_prefixes.len(), 1); + assert_eq!(load_result.longest_prefixes[0].nibble_count(), 0); + assert_eq!(load_result.db_reads, 0); + + // Test DB with multiple entries. + let other_key = Key::from_little_endian(&[0xa0; 32]); + let (_, patch) = + Storage::new(&db, &(), 1, true).extend(vec![(other_key, ValueHash::zero())]); + db.apply_patch(patch); + + let mut patch = WorkingPatchSet::new(2, db.root(1).unwrap()); + let (greatest_leaf, load_result) = patch.load_greatest_key(&db).unwrap(); + assert_eq!(greatest_leaf.full_key, other_key); + assert_eq!(load_result.longest_prefixes.len(), 1); + assert_eq!(load_result.longest_prefixes[0].nibble_count(), 1); + assert_eq!(load_result.db_reads, 1); + + let greater_key = Key::from_little_endian(&[0xaf; 32]); + let (_, patch) = + Storage::new(&db, &(), 2, true).extend(vec![(greater_key, ValueHash::zero())]); + db.apply_patch(patch); + + let mut patch = WorkingPatchSet::new(3, db.root(2).unwrap()); + let (greatest_leaf, load_result) = patch.load_greatest_key(&db).unwrap(); + assert_eq!(greatest_leaf.full_key, greater_key); + assert_eq!(load_result.longest_prefixes.len(), 1); + assert_eq!(load_result.longest_prefixes[0].nibble_count(), 2); + assert_eq!(load_result.db_reads, 2); + } } diff --git a/core/lib/merkle_tree/src/storage/proofs.rs b/core/lib/merkle_tree/src/storage/proofs.rs index a9ad624225d1..9e2d172bd6bd 100644 --- a/core/lib/merkle_tree/src/storage/proofs.rs +++ b/core/lib/merkle_tree/src/storage/proofs.rs @@ -215,7 +215,7 @@ impl TreeUpdater { mut root: InternalNode, logs: Vec<(usize, TreeLogEntryWithProof)>, ) -> Vec { - let version = self.patch_set.version(); + let version = self.patch_set.root_version(); let mut root_hash = root.hash(hasher, 0); // Check the kind of each of subtrees. This is used later to ensure the correct @@ -348,6 +348,11 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { root: InternalNode, logs: Vec<(usize, TreeLogEntryWithProof)>, ) -> (BlockOutputWithProofs, PatchSet) { + tracing::debug!( + "Finished updating tree; total leaf count: {}, stats: {:?}", + self.leaf_count, + self.updater.metrics + ); let logs = self.updater.finalize_logs(hasher, root, logs); self.updater.metrics.report(); @@ -499,7 +504,7 @@ mod tests { fn computing_leaf_indices() { let db = prepare_db(); let (instructions, expected_indices) = get_instructions_and_leaf_indices(); - let mut storage = Storage::new(&db, &(), 1); + let mut storage = Storage::new(&db, &(), 1, true); let sorted_keys = SortedKeys::new(instructions.iter().map(|(key, _)| *key)); let parent_nibbles = storage.updater.load_ancestors(&sorted_keys, &db); @@ -511,7 +516,7 @@ mod tests { fn prepare_db() -> PatchSet { let mut db = PatchSet::default(); let (_, patch) = - Storage::new(&db, &(), 0).extend(vec![(byte_key(2), HASH), (byte_key(1), HASH)]); + Storage::new(&db, &(), 0, true).extend(vec![(byte_key(2), HASH), (byte_key(1), HASH)]); db.apply_patch(patch); db } @@ -538,7 +543,7 @@ mod tests { fn extending_storage_with_proofs() { let db = prepare_db(); let (instructions, expected_indices) = get_instructions_and_leaf_indices(); - let storage = Storage::new(&db, &(), 1); + let storage = Storage::new(&db, &(), 1, true); let (block_output, _) = storage.extend_with_proofs(instructions); assert_eq!(block_output.leaf_count, 4); @@ -557,7 +562,7 @@ mod tests { #[test] fn proofs_for_empty_storage() { let db = PatchSet::default(); - let storage = Storage::new(&db, &(), 0); + let storage = Storage::new(&db, &(), 0, true); let instructions = vec![ (byte_key(1), TreeInstruction::Read), (byte_key(2), TreeInstruction::Read), @@ -571,6 +576,6 @@ mod tests { .all(|log| matches!(log.base, TreeLogEntry::ReadMissingKey)); assert!(all_misses); - assert_matches!(patch.roots[&0], Root::Empty); + assert_matches!(patch.patches_by_version[&0].root, Some(Root::Empty)); } } diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index b9aca28fd285..6c6a3a18105e 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -35,6 +35,10 @@ impl NamedColumnFamily for MerkleTreeColumnFamily { Self::StaleKeys => "stale_keys", } } + + fn requires_tuning(&self) -> bool { + matches!(self, Self::Tree) + } } /// Main [`Database`] implementation wrapping a [`RocksDB`] reference. @@ -194,26 +198,29 @@ impl Database for RocksDBWrapper { patch.manifest.serialize(&mut node_bytes); write_batch.put_cf(tree_cf, Self::MANIFEST_KEY, &node_bytes); - for (root_version, root) in patch.roots { - node_bytes.clear(); - let root_key = NodeKey::empty(root_version); - // Delete the key range corresponding to the entire new version. This removes - // potential garbage left after reverting the tree to a previous version. - let next_root_key = NodeKey::empty(root_version + 1); - let keys_to_delete = &*root_key.to_db_key()..&*next_root_key.to_db_key(); - write_batch.delete_range_cf(tree_cf, keys_to_delete); - - root.serialize(&mut node_bytes); - metrics.update_node_bytes(&Nibbles::EMPTY, &node_bytes); - write_batch.put_cf(tree_cf, &root_key.to_db_key(), &node_bytes); - } - - let all_nodes = patch.nodes_by_version.into_values().flatten(); - for (node_key, node) in all_nodes { - node_bytes.clear(); - node.serialize(&mut node_bytes); - metrics.update_node_bytes(&node_key.nibbles, &node_bytes); - write_batch.put_cf(tree_cf, &node_key.to_db_key(), &node_bytes); + for (version, sub_patch) in patch.patches_by_version { + let is_update = patch.updated_version == Some(version); + let root_key = NodeKey::empty(version); + if !is_update { + // Delete the key range corresponding to the entire new version. This removes + // potential garbage left after reverting the tree to a previous version. + let next_root_key = NodeKey::empty(version + 1); + let keys_to_delete = &*root_key.to_db_key()..&*next_root_key.to_db_key(); + write_batch.delete_range_cf(tree_cf, keys_to_delete); + } + + if let Some(root) = sub_patch.root { + node_bytes.clear(); + root.serialize(&mut node_bytes); + metrics.update_node_bytes(&Nibbles::EMPTY, &node_bytes); + write_batch.put_cf(tree_cf, &root_key.to_db_key(), &node_bytes); + } + for (node_key, node) in sub_patch.nodes { + node_bytes.clear(); + node.serialize(&mut node_bytes); + metrics.update_node_bytes(&node_key.nibbles, &node_bytes); + write_batch.put_cf(tree_cf, &node_key.to_db_key(), &node_bytes); + } } let stale_keys_cf = MerkleTreeColumnFamily::StaleKeys; diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index 3751f619124f..15d67604cc04 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -200,6 +200,7 @@ impl TreeTags { let mut architecture = None; let mut hasher = None; let mut depth = None; + let mut is_recovering = false; for _ in 0..tag_count { let key = Self::deserialize_str(bytes)?; @@ -216,6 +217,15 @@ impl TreeTags { })?; depth = Some(parsed); } + "is_recovering" => { + let parsed = value.parse::().map_err(|err| { + DeserializeErrorKind::MalformedTag { + name: "is_recovering", + err: err.into(), + } + })?; + is_recovering = parsed; + } _ => return Err(DeserializeErrorKind::UnknownTag(key.to_owned()).into()), } } @@ -223,6 +233,7 @@ impl TreeTags { architecture: architecture.ok_or(DeserializeErrorKind::MissingTag("architecture"))?, hasher: hasher.ok_or(DeserializeErrorKind::MissingTag("hasher"))?, depth: depth.ok_or(DeserializeErrorKind::MissingTag("depth"))?, + is_recovering, }) } @@ -244,13 +255,18 @@ impl TreeTags { } fn serialize(&self, buffer: &mut Vec) { - leb128::write::unsigned(buffer, 3).unwrap(); + let entry_count = 3 + u64::from(self.is_recovering); + leb128::write::unsigned(buffer, entry_count).unwrap(); Self::serialize_str(buffer, "architecture"); Self::serialize_str(buffer, &self.architecture); Self::serialize_str(buffer, "depth"); Self::serialize_str(buffer, &self.depth.to_string()); Self::serialize_str(buffer, "hasher"); Self::serialize_str(buffer, &self.hasher); + if self.is_recovering { + Self::serialize_str(buffer, "is_recovering"); + Self::serialize_str(buffer, "true"); + } } } @@ -300,6 +316,24 @@ mod tests { assert_eq!(manifest_copy, manifest); } + #[test] + fn serializing_manifest_with_recovery_flag() { + let mut manifest = Manifest::new(42, &()); + manifest.tags.as_mut().unwrap().is_recovering = true; + let mut buffer = vec![]; + manifest.serialize(&mut buffer); + assert_eq!(buffer[0], 42); // version count + assert_eq!(buffer[1], 4); // number of tags + assert_eq!( + buffer[2..], + *b"\x0Carchitecture\x06AR16MT\x05depth\x03256\x06hasher\x08no_op256\x0Dis_recovering\x04true" + ); + // ^ length-prefixed tag names and values + + let manifest_copy = Manifest::deserialize(&buffer).unwrap(); + assert_eq!(manifest_copy, manifest); + } + #[test] fn manifest_serialization_errors() { let manifest = Manifest::new(42, &()); diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index 64241c05b93f..02ec9d4c800d 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -12,6 +12,7 @@ use crate::{ hasher::{HasherWithStats, MerklePath}, types::{NodeKey, TreeInstruction, KEY_SIZE}, }; +use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_types::{H256, U256}; pub(super) const FIRST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); @@ -35,14 +36,21 @@ pub(super) fn create_patch( nodes: HashMap, ) -> PatchSet { let manifest = Manifest::new(latest_version + 1, &()); - PatchSet::new(manifest, latest_version, root, nodes, vec![]) + PatchSet::new( + manifest, + latest_version, + root, + nodes, + vec![], + Operation::Insert, + ) } #[test] fn inserting_entries_in_empty_database() { let db = PatchSet::default(); let mut updater = TreeUpdater::new(0, Root::Empty); - assert_eq!(updater.patch_set.version(), 0); + assert_eq!(updater.patch_set.root_version(), 0); assert!(updater.patch_set.get(&Nibbles::EMPTY).is_none()); let sorted_keys = SortedKeys::new([FIRST_KEY, SECOND_KEY, THIRD_KEY].into_iter()); @@ -166,7 +174,7 @@ fn changing_child_ref_type() { #[test] fn inserting_node_in_non_empty_database() { let mut db = PatchSet::default(); - let storage = Storage::new(&db, &(), 0); + let storage = Storage::new(&db, &(), 0, true); let kvs = vec![(FIRST_KEY, H256([1; 32])), (SECOND_KEY, H256([2; 32]))]; let (_, patch) = storage.extend(kvs); db.apply_patch(patch); @@ -220,7 +228,7 @@ fn inserting_node_in_non_empty_database() { #[test] fn inserting_node_in_non_empty_database_with_moved_key() { let mut db = PatchSet::default(); - let storage = Storage::new(&db, &(), 0); + let storage = Storage::new(&db, &(), 0, true); let kvs = vec![(FIRST_KEY, H256([1; 32])), (THIRD_KEY, H256([3; 32]))]; let (_, patch) = storage.extend(kvs); db.apply_patch(patch); @@ -290,22 +298,22 @@ fn finalize_merkle_path(mut path: MerklePath, hasher: &HasherWithStats<'_>) -> V #[test] fn reading_keys_does_not_change_child_version() { let mut db = PatchSet::default(); - let storage = Storage::new(&db, &(), 0); + let storage = Storage::new(&db, &(), 0, true); let kvs = vec![(FIRST_KEY, H256([0; 32])), (SECOND_KEY, H256([1; 32]))]; let (_, patch) = storage.extend(kvs); db.apply_patch(patch); - let storage = Storage::new(&db, &(), 1); + let storage = Storage::new(&db, &(), 1, true); let instructions = vec![ (FIRST_KEY, TreeInstruction::Read), (E_KEY, TreeInstruction::Write(H256([2; 32]))), ]; let (_, patch) = storage.extend_with_proofs(instructions); - let Root::Filled { + let Some(Root::Filled { leaf_count, node: Node::Internal(node), - } = &patch.roots[&1] + }) = &patch.patches_by_version[&1].root else { panic!("unexpected root"); }; @@ -317,15 +325,15 @@ fn reading_keys_does_not_change_child_version() { #[test] fn read_ops_are_not_reflected_in_patch() { let mut db = PatchSet::default(); - let storage = Storage::new(&db, &(), 0); + let storage = Storage::new(&db, &(), 0, true); let kvs = vec![(FIRST_KEY, H256([0; 32])), (SECOND_KEY, H256([1; 32]))]; let (_, patch) = storage.extend(kvs); db.apply_patch(patch); - let storage = Storage::new(&db, &(), 1); + let storage = Storage::new(&db, &(), 1, true); let instructions = vec![(FIRST_KEY, TreeInstruction::Read)]; let (_, patch) = storage.extend_with_proofs(instructions); - assert!(patch.nodes_by_version[&1].is_empty()); + assert!(patch.patches_by_version[&1].nodes.is_empty()); } // This maps small indices to keys that differ in the starting nibbles. @@ -339,7 +347,7 @@ fn test_read_instructions_do_not_lead_to_copied_nodes(writes_per_block: u64) { // Write some keys into the database. let mut key_count = writes_per_block; let mut database = PatchSet::default(); - let storage = Storage::new(&database, &(), 0); + let storage = Storage::new(&database, &(), 0, true); let kvs = (0..key_count) .map(|i| (big_endian_key(i), H256::zero())) .collect(); @@ -360,7 +368,7 @@ fn test_read_instructions_do_not_lead_to_copied_nodes(writes_per_block: u64) { instructions.shuffle(&mut rng); key_count += writes_per_block; - let storage = Storage::new(&database, &(), 1); + let storage = Storage::new(&database, &(), 1, true); let (_, patch) = storage.extend_with_proofs(instructions); assert_no_copied_nodes(&database, &patch); database.apply_patch(patch); @@ -368,13 +376,13 @@ fn test_read_instructions_do_not_lead_to_copied_nodes(writes_per_block: u64) { } fn assert_no_copied_nodes(database: &PatchSet, patch: &PatchSet) { - assert_eq!(patch.nodes_by_version.len(), 1); + assert_eq!(patch.patches_by_version.len(), 1); - let (&version, nodes) = patch.nodes_by_version.iter().next().unwrap(); - for (key, node) in nodes { + let (&version, patch) = patch.patches_by_version.iter().next().unwrap(); + for (key, node) in &patch.nodes { let prev_node = (0..version).rev().find_map(|v| { let prev_key = key.nibbles.with_version(v); - database.nodes_by_version[&v].get(&prev_key) + database.patches_by_version[&v].nodes.get(&prev_key) }); if let Some(prev_node) = prev_node { assert_ne!(node, prev_node, "node at {key:?} is copied"); @@ -395,7 +403,7 @@ fn test_replaced_keys_are_correctly_tracked(writes_per_block: usize, with_proofs // Write some keys into the database. let mut database = PatchSet::default(); - let storage = Storage::new(&database, &(), 0); + let storage = Storage::new(&database, &(), 0, true); let kvs = (0..100) .map(|i| (big_endian_key(i), H256::zero())) .collect(); @@ -411,7 +419,7 @@ fn test_replaced_keys_are_correctly_tracked(writes_per_block: usize, with_proofs .into_iter() .map(|i| (big_endian_key(i), H256::zero())); - let storage = Storage::new(&database, &(), new_version); + let storage = Storage::new(&database, &(), new_version, true); let patch = if with_proofs { let instructions = updates.map(|(key, value)| (key, TreeInstruction::Write(value))); storage.extend_with_proofs(instructions.collect()).1 @@ -440,17 +448,18 @@ fn replaced_keys_are_correctly_tracked_with_proofs() { } fn assert_replaced_keys(db: &PatchSet, patch: &PatchSet) { - assert_eq!(patch.nodes_by_version.len(), 1); - let (&version, patch_nodes) = patch.nodes_by_version.iter().next().unwrap(); + assert_eq!(patch.patches_by_version.len(), 1); + let (&version, sub_patch) = patch.patches_by_version.iter().next().unwrap(); assert_eq!(patch.stale_keys_by_version.len(), 1); let replaced_keys = patch.stale_keys_by_version.values().next().unwrap(); - let expected_replaced_keys = patch_nodes.keys().filter_map(|key| { + let expected_replaced_keys = sub_patch.nodes.keys().filter_map(|key| { (0..key.version).rev().find_map(|v| { let prev_key = key.nibbles.with_version(v); let contains_key = db - .nodes_by_version + .patches_by_version .get(&prev_key.version)? + .nodes .contains_key(&prev_key); contains_key.then_some(prev_key) }) @@ -469,18 +478,345 @@ fn tree_handles_keys_at_terminal_level() { let kvs = (0_u32..100) .map(|i| (Key::from(i), ValueHash::zero())) .collect(); - let (_, patch) = Storage::new(&db, &(), 0).extend(kvs); + let (_, patch) = Storage::new(&db, &(), 0, true).extend(kvs); db.apply_patch(patch); // Overwrite a key and check that we don't panic. let new_kvs = vec![(Key::from(0), ValueHash::from_low_u64_be(1))]; - let (_, patch) = Storage::new(&db, &(), 1).extend(new_kvs); + let (_, patch) = Storage::new(&db, &(), 1, true).extend(new_kvs); - assert_eq!(patch.roots[&1].leaf_count(), 100); - assert_eq!(patch.nodes_by_version[&1].len(), 2 * KEY_SIZE); // root is counted separately - for (key, node) in &patch.nodes_by_version[&1] { + assert_eq!( + patch.patches_by_version[&1] + .root + .as_ref() + .unwrap() + .leaf_count(), + 100 + ); + assert_eq!(patch.patches_by_version[&1].nodes.len(), 2 * KEY_SIZE); // root is counted separately + for (key, node) in &patch.patches_by_version[&1].nodes { let is_terminal = key.nibbles.nibble_count() == 2 * KEY_SIZE; assert_eq!(is_terminal, matches!(node, Node::Leaf(_))); } assert_eq!(patch.stale_keys_by_version[&1].len(), 2 * KEY_SIZE + 1); } + +#[test] +fn recovery_flattens_node_versions() { + let recovery_version = 100; + let recovery_entries = (0_u64..10).map(|i| RecoveryEntry { + key: Key::from(i) << 252, // the first key nibbles are distinct + value: ValueHash::zero(), + leaf_index: i + 1, + }); + let patch = Storage::new(&PatchSet::default(), &(), recovery_version, false) + .extend_during_recovery(recovery_entries.collect()); + assert_eq!(patch.patches_by_version.len(), 1); + let (updated_version, patch) = patch.patches_by_version.into_iter().next().unwrap(); + assert_eq!(updated_version, recovery_version); + + let root = patch.root.unwrap(); + assert_eq!(root.leaf_count(), 10); + let Root::Filled { + node: Node::Internal(root_node), + .. + } = &root + else { + panic!("Unexpected root: {root:?}"); + }; + for nibble in 0..10 { + assert_eq!( + root_node.child_ref(nibble).unwrap().version, + recovery_version + ); + let expected_key = Nibbles::single(nibble).with_version(recovery_version); + assert_matches!(patch.nodes[&expected_key], Node::Leaf { .. }); + } +} + +fn test_recovery_with_node_hierarchy(chunk_size: usize) { + let recovery_version = 100; + let recovery_entries = (0_u64..256).map(|i| RecoveryEntry { + key: Key::from(i) << 248, // the first two key nibbles are distinct + value: ValueHash::zero(), + leaf_index: i + 1, + }); + let recovery_entries: Vec<_> = recovery_entries.collect(); + + let mut db = PatchSet::default(); + for recovery_chunk in recovery_entries.chunks(chunk_size) { + let patch = Storage::new(&db, &(), recovery_version, false) + .extend_during_recovery(recovery_chunk.to_vec()); + db.apply_patch(patch); + } + assert_eq!(db.updated_version, Some(recovery_version)); + let patch = db.patches_by_version.remove(&recovery_version).unwrap(); + + let root = patch.root.unwrap(); + assert_eq!(root.leaf_count(), 256); + let Root::Filled { + node: Node::Internal(root_node), + .. + } = &root + else { + panic!("Unexpected root: {root:?}"); + }; + + for nibble in 0..16 { + let child_ref = root_node.child_ref(nibble).unwrap(); + assert!(!child_ref.is_leaf); + assert_eq!(child_ref.version, recovery_version); + + let internal_node_key = Nibbles::single(nibble).with_version(recovery_version); + let node = &patch.nodes[&internal_node_key]; + let Node::Internal(node) = node else { + panic!("Unexpected upper-level node: {node:?}"); + }; + assert_eq!(node.child_count(), 16); + + for (second_nibble, child_ref) in node.children() { + let i = nibble * 16 + second_nibble; + assert!(child_ref.is_leaf); + assert_eq!(child_ref.version, recovery_version); + let leaf_key = Nibbles::new(&(Key::from(i) << 248), 2).with_version(recovery_version); + assert_matches!(patch.nodes[&leaf_key], Node::Leaf { .. }); + } + } +} + +#[test] +fn recovery_with_node_hierarchy() { + test_recovery_with_node_hierarchy(256); // single chunk + for chunk_size in [4, 5, 20, 69, 127, 128] { + println!("Testing recovery with chunk size {chunk_size}"); + test_recovery_with_node_hierarchy(chunk_size); + } +} + +fn test_recovery_with_deep_node_hierarchy(chunk_size: usize) { + let recovery_version = 1_000; + let recovery_entries = (0_u64..256).map(|i| RecoveryEntry { + key: Key::from(i), // the last two key nibbles are distinct + value: ValueHash::zero(), + leaf_index: i + 1, + }); + let recovery_entries: Vec<_> = recovery_entries.collect(); + + let mut db = PatchSet::default(); + for recovery_chunk in recovery_entries.chunks(chunk_size) { + let patch = Storage::new(&db, &(), recovery_version, false) + .extend_during_recovery(recovery_chunk.to_vec()); + db.apply_patch(patch); + } + let mut patch = db.patches_by_version.remove(&recovery_version).unwrap(); + // Manually remove all stale keys from the patch + assert_eq!(db.stale_keys_by_version.len(), 1); + for stale_key in &db.stale_keys_by_version[&recovery_version] { + assert!( + patch.nodes.remove(stale_key).is_some(), + "Stale key {stale_key} is missing" + ); + } + + let root = patch.root.unwrap(); + assert_eq!(root.leaf_count(), 256); + let Root::Filled { + node: Node::Internal(root_node), + .. + } = &root + else { + panic!("Unexpected root: {root:?}"); + }; + assert_eq!(root_node.child_count(), 1); + let child_ref = root_node.child_ref(0).unwrap(); + assert!(!child_ref.is_leaf); + assert_eq!(child_ref.version, recovery_version); + + for (node_key, node) in patch.nodes { + assert_eq!( + node_key.version, recovery_version, + "Unexpected version for {node_key}" + ); + + let nibble_count = node_key.nibbles.nibble_count(); + if nibble_count < 64 { + let Node::Internal(node) = node else { + panic!("Unexpected node at {node_key}: {node:?}"); + }; + assert_eq!(node.child_count(), if nibble_count < 62 { 1 } else { 16 }); + } else { + assert_matches!( + node, + Node::Leaf(_), + "Unexpected node at {node_key}: {node:?}" + ); + } + } +} + +#[test] +fn recovery_with_deep_node_hierarchy() { + test_recovery_with_deep_node_hierarchy(256); + for chunk_size in [5, 7, 20, 59, 127, 128] { + println!("Testing recovery with chunk size {chunk_size}"); + test_recovery_with_deep_node_hierarchy(chunk_size); + } +} + +#[test] +fn recovery_workflow_with_multiple_stages() { + let mut db = PatchSet::default(); + let recovery_version = 100; + let recovery_entries = (0_u64..100).map(|i| RecoveryEntry { + key: Key::from(i), + value: ValueHash::zero(), + leaf_index: i, + }); + let patch = Storage::new(&db, &(), recovery_version, false) + .extend_during_recovery(recovery_entries.collect()); + assert_eq!(patch.root(recovery_version).unwrap().leaf_count(), 100); + db.apply_patch(patch); + + let more_recovery_entries = (100_u64..200).map(|i| RecoveryEntry { + key: Key::from(i), + value: ValueHash::zero(), + leaf_index: i, + }); + + let patch = Storage::new(&db, &(), recovery_version, false) + .extend_during_recovery(more_recovery_entries.collect()); + assert_eq!(patch.root(recovery_version).unwrap().leaf_count(), 200); + db.apply_patch(patch); + + // Check that all entries can be accessed + let storage = Storage::new(&db, &(), recovery_version + 1, true); + let instructions = (0_u32..200).map(|i| (Key::from(i), TreeInstruction::Read)); + let (output, _) = storage.extend_with_proofs(instructions.collect()); + assert_eq!(output.leaf_count, 200); + assert_eq!(output.logs.len(), 200); + assert!(output + .logs + .iter() + .all(|log| matches!(log.base, TreeLogEntry::Read { .. }))); +} + +fn test_recovery_pruning_equivalence( + chunk_size: usize, + recovery_chunk_size: usize, + hasher: &dyn HashTree, +) { + const RNG_SEED: u64 = 123; + + println!( + "Testing recovery–pruning equivalence (chunk size: {chunk_size}, recovery chunk size: \ + {recovery_chunk_size})" + ); + + let mut rng = StdRng::seed_from_u64(RNG_SEED); + let kvs = (0..100).map(|i| { + ( + U256([rng.gen(), rng.gen(), rng.gen(), rng.gen()]), + ValueHash::repeat_byte(i), + ) + }); + let kvs: Vec<_> = kvs.collect(); + + // Add `kvs` into the tree in several commits. + let mut db = PatchSet::default(); + for (version, chunk) in kvs.chunks(chunk_size).enumerate() { + let (_, patch) = Storage::new(&db, hasher, version as u64, true).extend(chunk.to_vec()); + db.apply_patch(patch); + } + // Unite all remaining nodes to a map and manually remove all stale keys. + let recovered_version = db.manifest.version_count - 1; + let mut root = db.root(recovered_version).unwrap(); + let mut all_nodes: HashMap<_, _> = db + .patches_by_version + .into_values() + .flat_map(|sub_patch| sub_patch.nodes) + .collect(); + for stale_key in db.stale_keys_by_version.values().flatten() { + all_nodes.remove(stale_key); + } + + // Generate recovery entries. + let recovery_entries = all_nodes.values().filter_map(|node| { + if let Node::Leaf(leaf) = node { + return Some(RecoveryEntry { + key: leaf.full_key, + value: leaf.value_hash, + leaf_index: leaf.leaf_index, + }); + } + None + }); + let mut recovery_entries: Vec<_> = recovery_entries.collect(); + assert_eq!(recovery_entries.len(), 100); + recovery_entries.sort_unstable_by_key(|entry| entry.key); + + // Recover the tree. + let mut recovered_db = PatchSet::default(); + for recovery_chunk in recovery_entries.chunks(recovery_chunk_size) { + let patch = Storage::new(&recovered_db, hasher, recovered_version, false) + .extend_during_recovery(recovery_chunk.to_vec()); + recovered_db.apply_patch(patch); + } + let sub_patch = recovered_db + .patches_by_version + .remove(&recovered_version) + .unwrap(); + let recovered_root = sub_patch.root.unwrap(); + let mut all_recovered_nodes = sub_patch.nodes; + for stale_key in db.stale_keys_by_version.values().flatten() { + all_recovered_nodes.remove(stale_key); + } + + // Nodes must be identical for the pruned and recovered trees up to the version. + if let Root::Filled { + node: Node::Internal(node), + .. + } = &mut root + { + for child_ref in node.child_refs_mut() { + child_ref.version = recovered_version; + } + } + assert_eq!(recovered_root, root); + + let flattened_version_nodes: HashMap<_, _> = all_nodes + .into_iter() + .map(|(key, mut node)| { + if let Node::Internal(node) = &mut node { + for child_ref in node.child_refs_mut() { + child_ref.version = recovered_version; + } + } + (key.nibbles.with_version(recovered_version), node) + }) + .collect(); + assert_eq!(all_recovered_nodes, flattened_version_nodes); +} + +#[test] +fn recovery_pruning_equivalence() { + for chunk_size in [3, 5, 7, 11, 21, 42, 99, 100] { + // No chunking during recovery (simple case). + test_recovery_pruning_equivalence(chunk_size, 100, &()); + // Recovery is chunked (more complex case). + for recovery_chunk_size in [chunk_size, 1, 6, 19, 50, 73] { + test_recovery_pruning_equivalence(chunk_size, recovery_chunk_size, &()); + } + } +} + +#[test] +fn recovery_pruning_equivalence_with_hashing() { + for chunk_size in [3, 7, 21, 42, 100] { + // No chunking during recovery (simple case). + test_recovery_pruning_equivalence(chunk_size, 100, &Blake2Hasher); + // Recovery is chunked (more complex case). + for recovery_chunk_size in [chunk_size, 1, 19, 73] { + test_recovery_pruning_equivalence(chunk_size, recovery_chunk_size, &Blake2Hasher); + } + } +} diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index 86568da7f5de..5e875f6e28ac 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -25,6 +25,7 @@ pub(crate) struct TreeTags { pub architecture: String, pub depth: usize, pub hasher: String, + pub is_recovering: bool, } impl TreeTags { @@ -35,10 +36,11 @@ impl TreeTags { architecture: Self::ARCHITECTURE.to_owned(), hasher: hasher.name().to_owned(), depth: TREE_DEPTH, + is_recovering: false, } } - pub fn assert_consistency(&self, hasher: &dyn HashTree) { + pub fn assert_consistency(&self, hasher: &dyn HashTree, expecting_recovery: bool) { assert_eq!( self.architecture, Self::ARCHITECTURE, @@ -59,6 +61,18 @@ impl TreeTags { hasher.name(), self.hasher ); + + if expecting_recovery { + assert!( + self.is_recovering, + "Tree is expected to be in the process of recovery, but it is not" + ); + } else { + assert!( + !self.is_recovering, + "Tree is being recovered; cannot access it until recovery finishes" + ); + } } } @@ -205,6 +219,26 @@ impl Nibbles { } Some(child) } + + /// Returns nibbles that form a common prefix between these nibbles and the provided `key`. + pub fn common_prefix(mut self, other: &Self) -> Self { + for i in 0..(self.nibble_count + 1) / 2 { + let (this_byte, other_byte) = (self.bytes[i], other.bytes[i]); + if this_byte != other_byte { + // Check whether the first nibble matches. + if this_byte & 0xf0 == other_byte & 0xf0 { + self.nibble_count = i * 2 + 1; + self.bytes[i] &= 0xf0; + self.bytes[(i + 1)..].fill(0); + } else { + self.nibble_count = i * 2; + self.bytes[i..].fill(0); + } + return self; + } + } + self + } } impl fmt::Display for Nibbles { @@ -409,6 +443,16 @@ impl InternalNode { self.children.values() } + #[cfg(test)] + pub(crate) fn child_refs_mut(&mut self) -> impl Iterator + '_ { + self.children.values_mut() + } + + pub(crate) fn last_child_ref(&self) -> (u8, &ChildRef) { + self.children.last().unwrap() + // ^ `unwrap()` is safe by construction; all persisted internal nodes are not empty + } + pub(crate) fn child_hashes(&self) -> [Option; Self::CHILD_COUNT as usize] { let mut hashes = [None; Self::CHILD_COUNT as usize]; for (nibble, child_ref) in self.children.iter() { @@ -561,6 +605,34 @@ mod tests { assert!(nibbles.push(0xb).is_none()); } + #[test] + fn nibbles_prefix() { + let nibbles = Nibbles::new(&TEST_KEY, 6); + assert_eq!(nibbles.common_prefix(&nibbles), nibbles); + + let nibbles = Nibbles::new(&TEST_KEY, 6); + let prefix = Nibbles::new(&TEST_KEY, 4); + assert_eq!(nibbles.common_prefix(&prefix), prefix); + assert_eq!(prefix.common_prefix(&nibbles), prefix); + + let nibbles = Nibbles::new(&TEST_KEY, 7); + assert_eq!(nibbles.common_prefix(&prefix), prefix); + assert_eq!(prefix.common_prefix(&nibbles), prefix); + + let nibbles = Nibbles::new(&TEST_KEY, 64); + let diverging_nibbles = Nibbles::new(&TEST_KEY, 4).push(0x1).unwrap(); + assert_eq!(nibbles.common_prefix(&diverging_nibbles), prefix); + + let diverging_nibbles = Nibbles::new(&TEST_KEY, 5).push(0x1).unwrap(); + assert_eq!( + nibbles.common_prefix(&diverging_nibbles), + Nibbles::new(&TEST_KEY, 5) + ); + + let diverging_nibbles = Nibbles::from_parts([0xff; KEY_SIZE], 64); + assert_eq!(nibbles.common_prefix(&diverging_nibbles), Nibbles::EMPTY); + } + #[test] fn node_key_serialization() { let nibbles = Nibbles::new(&TEST_KEY, 6); diff --git a/core/lib/merkle_tree/src/utils.rs b/core/lib/merkle_tree/src/utils.rs index 5faedf597162..9542b24bbd3c 100644 --- a/core/lib/merkle_tree/src/utils.rs +++ b/core/lib/merkle_tree/src/utils.rs @@ -63,6 +63,13 @@ impl SmallMap { Self::indices(self.bitmap).zip(&self.values) } + pub fn last(&self) -> Option<(u8, &V)> { + let greatest_set_bit = (u16::BITS - self.bitmap.leading_zeros()).checked_sub(1)?; + let greatest_set_bit = u8::try_from(greatest_set_bit).unwrap(); + // ^ `unwrap()` is safe by construction: `greatest_set_bit <= 15`. + Some((greatest_set_bit, self.values.last()?)) + } + fn indices(bitmap: u16) -> impl Iterator { (0..Self::CAPACITY).filter(move |&index| { let mask = 1 << u16::from(index); @@ -74,6 +81,11 @@ impl SmallMap { self.values.iter() } + #[cfg(test)] + pub fn values_mut(&mut self) -> impl Iterator + '_ { + self.values.iter_mut() + } + pub fn get_mut(&mut self, index: u8) -> Option<&mut V> { assert!(index < Self::CAPACITY, "index is too large"); diff --git a/core/lib/merkle_tree/tests/integration/common.rs b/core/lib/merkle_tree/tests/integration/common.rs index dff7c8ca012c..fd9e00855c20 100644 --- a/core/lib/merkle_tree/tests/integration/common.rs +++ b/core/lib/merkle_tree/tests/integration/common.rs @@ -1,5 +1,11 @@ //! Shared functionality. +use once_cell::sync::Lazy; + +use std::collections::HashMap; + +use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; +use zksync_merkle_tree::{HashTree, TreeInstruction}; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; pub fn generate_key_value_pairs(indexes: impl Iterator) -> Vec<(U256, H256)> { @@ -11,3 +17,102 @@ pub fn generate_key_value_pairs(indexes: impl Iterator) -> Vec<(U256 }); kvs.collect() } + +pub fn compute_tree_hash(kvs: impl Iterator) -> H256 { + let kvs_with_indices = kvs + .enumerate() + .map(|(i, (key, value))| (key, value, i as u64 + 1)); + compute_tree_hash_with_indices(kvs_with_indices) +} + +// The extended version of computations used in `InternalNode`. +fn compute_tree_hash_with_indices(kvs: impl Iterator) -> H256 { + let hasher = Blake2Hasher; + let mut empty_tree_hash = hasher.hash_bytes(&[0_u8; 40]); + let level = kvs.map(|(key, value, leaf_index)| { + let mut bytes = [0_u8; 40]; + bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); + bytes[8..].copy_from_slice(value.as_ref()); + (key, hasher.hash_bytes(&bytes)) + }); + let mut level: Vec<(U256, H256)> = level.collect(); + if level.is_empty() { + return hasher.empty_subtree_hash(256); + } + level.sort_unstable_by_key(|(key, _)| *key); + + for _ in 0..256 { + let mut next_level = vec![]; + let mut i = 0; + while i < level.len() { + let (pos, hash) = level[i]; + let aggregate_hash = if pos.bit(0) { + // `pos` corresponds to a right branch of its parent + hasher.compress(&empty_tree_hash, &hash) + } else if let Some((next_pos, next_hash)) = level.get(i + 1) { + if pos + 1 == *next_pos { + i += 1; + hasher.compress(&hash, next_hash) + } else { + hasher.compress(&hash, &empty_tree_hash) + } + } else { + hasher.compress(&hash, &empty_tree_hash) + }; + next_level.push((pos >> 1, aggregate_hash)); + i += 1; + } + + level = next_level; + empty_tree_hash = hasher.compress(&empty_tree_hash, &empty_tree_hash); + } + level[0].1 +} + +// Computing the expected hash takes some time in the debug mode, so we memoize it. +pub static KVS_AND_HASH: Lazy<(Vec<(U256, H256)>, H256)> = Lazy::new(|| { + let kvs = generate_key_value_pairs(0..100); + let expected_hash = compute_tree_hash(kvs.iter().copied()); + (kvs, expected_hash) +}); + +pub fn convert_to_writes(kvs: &[(U256, H256)]) -> Vec<(U256, TreeInstruction)> { + let kvs = kvs + .iter() + .map(|&(key, hash)| (key, TreeInstruction::Write(hash))); + kvs.collect() +} + +/// Emulates leaf index assignment in a real Merkle tree. +#[derive(Debug)] +pub struct TreeMap(HashMap); + +impl TreeMap { + pub fn new(initial_entries: &[(U256, H256)]) -> Self { + let map = initial_entries + .iter() + .enumerate() + .map(|(i, (key, value))| (*key, (*value, i as u64 + 1))) + .collect(); + Self(map) + } + + pub fn extend(&mut self, kvs: &[(U256, H256)]) { + for &(key, new_value) in kvs { + if let Some((value, _)) = self.0.get_mut(&key) { + *value = new_value; + } else { + let leaf_index = self.0.len() as u64 + 1; + self.0.insert(key, (new_value, leaf_index)); + } + } + } + + pub fn root_hash(&self) -> H256 { + let entries = self + .0 + .iter() + .map(|(key, (value, idx))| (*key, *value, *idx)); + compute_tree_hash_with_indices(entries) + } +} diff --git a/core/lib/merkle_tree/tests/integration/main.rs b/core/lib/merkle_tree/tests/integration/main.rs index bf3391df6ccf..a6afb1a58c7a 100644 --- a/core/lib/merkle_tree/tests/integration/main.rs +++ b/core/lib/merkle_tree/tests/integration/main.rs @@ -4,3 +4,4 @@ mod common; mod consistency; mod domain; mod merkle_tree; +mod recovery; diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index f94335390eea..ad9467b8e5f8 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -1,69 +1,17 @@ //! Tests not tied to the zksync domain. -use once_cell::sync::Lazy; use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use std::{cmp, mem}; -use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; +use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, PatchSet, Patched, TreeInstruction, TreeLogEntry, TreeRangeDigest, }; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use crate::common::generate_key_value_pairs; - -fn convert_to_writes(kvs: &[(U256, H256)]) -> Vec<(U256, TreeInstruction)> { - let kvs = kvs - .iter() - .map(|&(key, hash)| (key, TreeInstruction::Write(hash))); - kvs.collect() -} - -// The extended version of computations used in `InternalNode`. -fn compute_tree_hash(kvs: &[(U256, H256)]) -> H256 { - assert!(!kvs.is_empty()); - - let hasher = Blake2Hasher; - let mut empty_tree_hash = hasher.hash_bytes(&[0_u8; 40]); - let level = kvs.iter().enumerate().map(|(i, (key, value))| { - let leaf_index = i as u64 + 1; - let mut bytes = [0_u8; 40]; - bytes[..8].copy_from_slice(&leaf_index.to_be_bytes()); - bytes[8..].copy_from_slice(value.as_ref()); - (*key, hasher.hash_bytes(&bytes)) - }); - let mut level: Vec<(U256, H256)> = level.collect(); - level.sort_unstable_by_key(|(key, _)| *key); - - for _ in 0..256 { - let mut next_level = vec![]; - let mut i = 0; - while i < level.len() { - let (pos, hash) = level[i]; - let aggregate_hash = if pos.bit(0) { - // `pos` corresponds to a right branch of its parent - hasher.compress(&empty_tree_hash, &hash) - } else if let Some((next_pos, next_hash)) = level.get(i + 1) { - if pos + 1 == *next_pos { - i += 1; - hasher.compress(&hash, next_hash) - } else { - hasher.compress(&hash, &empty_tree_hash) - } - } else { - hasher.compress(&hash, &empty_tree_hash) - }; - next_level.push((pos >> 1, aggregate_hash)); - i += 1; - } - - level = next_level; - empty_tree_hash = hasher.compress(&empty_tree_hash, &empty_tree_hash); - } - level[0].1 -} +use crate::common::{compute_tree_hash, convert_to_writes, generate_key_value_pairs, KVS_AND_HASH}; #[test] fn compute_tree_hash_works_correctly() { @@ -76,7 +24,7 @@ fn compute_tree_hash_works_correctly() { let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); let key = key.hashed_key_u256(); - let hash = compute_tree_hash(&[(key, H256([1; 32]))]); + let hash = compute_tree_hash([(key, H256([1; 32]))].into_iter()); assert_eq!(hash, EXPECTED_HASH); } @@ -87,7 +35,7 @@ fn root_hash_is_computed_correctly_on_empty_tree() { let mut tree = MerkleTree::new(PatchSet::default()); let kvs = generate_key_value_pairs(0..kv_count); - let expected_hash = compute_tree_hash(&kvs); + let expected_hash = compute_tree_hash(kvs.iter().copied()); let output = tree.extend(kvs); assert_eq!(output.root_hash, expected_hash); } @@ -104,7 +52,7 @@ fn output_proofs_are_computed_correctly_on_empty_tree() { let mut tree = MerkleTree::new(PatchSet::default()); let kvs = generate_key_value_pairs(0..kv_count); - let expected_hash = compute_tree_hash(&kvs); + let expected_hash = compute_tree_hash(kvs.iter().copied()); let instructions = convert_to_writes(&kvs); let output = tree.extend_with_proofs(instructions.clone()); @@ -134,7 +82,7 @@ fn entry_proofs_are_computed_correctly_on_empty_tree() { let mut tree = MerkleTree::new(PatchSet::default()); let kvs = generate_key_value_pairs(0..kv_count); - let expected_hash = compute_tree_hash(&kvs); + let expected_hash = compute_tree_hash(kvs.iter().copied()); tree.extend(kvs.clone()); let existing_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); @@ -182,7 +130,7 @@ fn proofs_are_computed_correctly_for_mixed_instructions() { let mut instructions: Vec<_> = reads.collect(); // Overwrite all keys in the tree. let writes: Vec<_> = kvs.iter().map(|(key, _)| (*key, H256::zero())).collect(); - let expected_hash = compute_tree_hash(&writes); + let expected_hash = compute_tree_hash(writes.iter().copied()); instructions.extend(convert_to_writes(&writes)); instructions.shuffle(&mut rng); @@ -221,13 +169,6 @@ fn proofs_are_computed_correctly_for_missing_keys() { output.verify_proofs(&Blake2Hasher, empty_tree_hash, &instructions); } -// Computing the expected hash takes some time in the debug mode, so we memoize it. -static KVS_AND_HASH: Lazy<(Vec<(U256, H256)>, H256)> = Lazy::new(|| { - let kvs = generate_key_value_pairs(0..100); - let expected_hash = compute_tree_hash(&kvs); - (kvs, expected_hash) -}); - fn test_intermediate_commits(db: &mut impl Database, chunk_size: usize) { let (kvs, expected_hash) = &*KVS_AND_HASH; let mut final_hash = H256::zero(); @@ -374,7 +315,7 @@ fn test_root_hash_computing_with_key_updates(db: impl Database) { let mut kvs = generate_key_value_pairs(0..50); let mut tree = MerkleTree::new(db); - let expected_hash = compute_tree_hash(&kvs); + let expected_hash = compute_tree_hash(kvs.iter().copied()); let output = tree.extend(kvs.clone()); assert_eq!(output.root_hash, expected_hash); @@ -389,7 +330,7 @@ fn test_root_hash_computing_with_key_updates(db: impl Database) { let changed_kvs: Vec<_> = changed_kvs.collect(); let new_kvs = generate_key_value_pairs(50..75); kvs.extend_from_slice(&new_kvs); - let expected_hash = compute_tree_hash(&kvs); + let expected_hash = compute_tree_hash(kvs.iter().copied()); // We can merge `changed_kvs` and `new_kvs` in any way that preserves `new_kvs` ordering. // We'll do multiple ways (which also will effectively test DB rollbacks). @@ -504,7 +445,7 @@ fn test_root_hash_equals_to_previous_implementation(db: &mut impl Database) { let values = (0..100).map(H256::from_low_u64_be); let kvs: Vec<_> = keys.zip(values).collect(); - let expected_hash = compute_tree_hash(&kvs); + let expected_hash = compute_tree_hash(kvs.iter().copied()); assert_eq!(expected_hash, PREV_IMPL_HASH); let mut tree = MerkleTree::new(db); @@ -618,7 +559,7 @@ mod rocksdb { use std::collections::BTreeMap; use super::*; - use zksync_merkle_tree::{MerkleTreeColumnFamily, RocksDBWrapper}; + use zksync_merkle_tree::{MerkleTreeColumnFamily, MerkleTreePruner, RocksDBWrapper}; use zksync_storage::RocksDB; #[derive(Debug)] @@ -683,14 +624,34 @@ mod rocksdb { let raw_db = db.into_inner(); let snapshot_name = format!("db-snapshot-{chunk_size}-chunked-commits"); insta::assert_yaml_snapshot!(snapshot_name, DatabaseSnapshot::new(&raw_db)); + db = clean_db(raw_db); + } + } - // Clear the entire database instead of using `MerkleTree::truncate_versions()` - // so that it doesn't contain any junk that can influence snapshots. - let mut batch = raw_db.new_write_batch(); - let cf = MerkleTreeColumnFamily::Tree; - batch.delete_range_cf(cf, (&[] as &[_])..&u64::MAX.to_be_bytes()); - raw_db.write(batch).unwrap(); - db = RocksDBWrapper::from(raw_db); + fn clean_db(raw_db: RocksDB) -> RocksDBWrapper { + // Clear the entire database instead of using `MerkleTree::truncate_versions()` + // so that it doesn't contain any junk that can influence snapshots. + let mut batch = raw_db.new_write_batch(); + let cf = MerkleTreeColumnFamily::Tree; + batch.delete_range_cf(cf, (&[] as &[_])..&u64::MAX.to_be_bytes()); + raw_db.write(batch).unwrap(); + RocksDBWrapper::from(raw_db) + } + + #[test] + fn snapshot_for_pruned_tree() { + let Harness { mut db, dir: _dir } = Harness::new(); + for chunk_size in [3, 8, 21] { + test_intermediate_commits(&mut db, chunk_size); + let (mut pruner, _) = MerkleTreePruner::new(&mut db, 0); + pruner.run_once(); + + let raw_db = db.into_inner(); + let snapshot_name = format!("db-snapshot-{chunk_size}-chunked-commits-pruned"); + let db_snapshot = DatabaseSnapshot::new(&raw_db); + assert!(db_snapshot.stale_keys.is_empty()); + insta::assert_yaml_snapshot!(snapshot_name, db_snapshot); + db = clean_db(raw_db); } } diff --git a/core/lib/merkle_tree/tests/integration/recovery.rs b/core/lib/merkle_tree/tests/integration/recovery.rs new file mode 100644 index 000000000000..fe89dded5c32 --- /dev/null +++ b/core/lib/merkle_tree/tests/integration/recovery.rs @@ -0,0 +1,141 @@ +//! Tests for tree recovery. + +use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; +use zksync_crypto::hasher::blake2::Blake2Hasher; + +use zksync_merkle_tree::{ + recovery::{MerkleTreeRecovery, RecoveryEntry}, + Database, MerkleTree, PatchSet, PruneDatabase, ValueHash, +}; + +use crate::common::{convert_to_writes, generate_key_value_pairs, TreeMap, KVS_AND_HASH}; + +#[test] +fn recovery_basics() { + let (kvs, expected_hash) = &*KVS_AND_HASH; + let recovery_entries = kvs + .iter() + .enumerate() + .map(|(i, &(key, value))| RecoveryEntry { + key, + value, + leaf_index: i as u64 + 1, + }); + let mut recovery_entries: Vec<_> = recovery_entries.collect(); + recovery_entries.sort_unstable_by_key(|entry| entry.key); + let greatest_key = recovery_entries[99].key; + + let recovered_version = 123; + let mut recovery = MerkleTreeRecovery::new(PatchSet::default(), recovered_version); + recovery.extend(recovery_entries); + + assert_eq!(recovery.last_processed_key(), Some(greatest_key)); + assert_eq!(recovery.root_hash(), *expected_hash); + + let tree = recovery.finalize(); + tree.verify_consistency(recovered_version).unwrap(); +} + +fn test_recovery_in_chunks(mut create_db: impl FnMut() -> DB) { + let (kvs, expected_hash) = &*KVS_AND_HASH; + let recovery_entries = kvs + .iter() + .enumerate() + .map(|(i, &(key, value))| RecoveryEntry { + key, + value, + leaf_index: i as u64 + 1, + }); + let mut recovery_entries: Vec<_> = recovery_entries.collect(); + recovery_entries.sort_unstable_by_key(|entry| entry.key); + let greatest_key = recovery_entries[99].key; + + let recovered_version = 123; + for chunk_size in [6, 10, 17, 42] { + let mut db = create_db(); + let mut recovery = MerkleTreeRecovery::new(&mut db, recovered_version); + for (i, chunk) in recovery_entries.chunks(chunk_size).enumerate() { + recovery.extend(chunk.to_vec()); + if i % 3 == 1 { + recovery = MerkleTreeRecovery::new(&mut db, recovered_version); + // ^ Simulate recovery interruption and restart + } + } + + assert_eq!(recovery.last_processed_key(), Some(greatest_key)); + assert_eq!(recovery.root_hash(), *expected_hash); + + let mut tree = recovery.finalize(); + tree.verify_consistency(recovered_version).unwrap(); + // Check that new tree versions can be built and function as expected. + test_tree_after_recovery(&mut tree, recovered_version, *expected_hash); + } +} + +fn test_tree_after_recovery( + tree: &mut MerkleTree, + recovered_version: u64, + root_hash: ValueHash, +) { + const RNG_SEED: u64 = 765; + const CHUNK_SIZE: usize = 18; + + assert_eq!(tree.latest_version(), Some(recovered_version)); + assert_eq!(tree.root_hash(recovered_version), Some(root_hash)); + for ver in 0..recovered_version { + assert_eq!(tree.root_hash(ver), None); + } + + // Check adding new and updating existing entries in the tree. + let mut rng = StdRng::seed_from_u64(RNG_SEED); + let mut kvs = generate_key_value_pairs(100..=150); + let mut modified_kvs = generate_key_value_pairs(50..=100); + for (_, value) in &mut modified_kvs { + *value = ValueHash::repeat_byte(1); + } + kvs.extend(modified_kvs); + kvs.shuffle(&mut rng); + + let mut tree_map = TreeMap::new(&KVS_AND_HASH.0); + let mut prev_root_hash = root_hash; + for (i, chunk) in kvs.chunks(CHUNK_SIZE).enumerate() { + tree_map.extend(chunk); + + let new_root_hash = if i % 2 == 0 { + let output = tree.extend(chunk.to_vec()); + output.root_hash + } else { + let instructions = convert_to_writes(chunk); + let output = tree.extend_with_proofs(instructions.clone()); + output.verify_proofs(&Blake2Hasher, prev_root_hash, &instructions); + output.root_hash().unwrap() + }; + + assert_eq!(new_root_hash, tree_map.root_hash()); + tree.verify_consistency(recovered_version + i as u64) + .unwrap(); + prev_root_hash = new_root_hash; + } +} + +#[test] +fn recovery_in_chunks() { + test_recovery_in_chunks(PatchSet::default); +} + +mod rocksdb { + use tempfile::TempDir; + + use super::*; + use zksync_merkle_tree::RocksDBWrapper; + + #[test] + fn recovery_in_chunks() { + let temp_dir = TempDir::new().unwrap(); + let mut counter = 0; + test_recovery_in_chunks(|| { + counter += 1; + RocksDBWrapper::new(&temp_dir.path().join(counter.to_string())) + }); + } +} diff --git a/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits-pruned.snap b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits-pruned.snap new file mode 100644 index 000000000000..b8463049d616 --- /dev/null +++ b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits-pruned.snap @@ -0,0 +1,136 @@ +--- +source: core/lib/merkle_tree/tests/integration/merkle_tree.rs +assertion_line: 658 +expression: "DatabaseSnapshot::new(&raw_db)" +--- +tree: + "00": 05030c61726368697465637475726506415231364d5405646570746803323536066861736865720a626c616b653273323536 + "00000000000000000200": 0059914596ed2e70745c44ef315747ea29aff72bce6757aca30a434d9bb70781000000000000000000000000000000000000000000000000000000000000000202 + "00000000000000000204": 049dfddb9f03237fed8d902e350f0b0e9b85af93f49fe02f0d189c47cd7b122b000000000000000000000000000000000000000000000000000000000000000707 + 0000000000000000020e: 0edeb726b4588b4b373d38b7b43e3083c6e105c991e1812bdcce6c373ed98dd5000000000000000000000000000000000000000000000000000000000000000a0a + "00000000000000000229": 297faa5f83989e2a8b574589c3eeb5fdced0634fd75d662507604796dd4edd1b000000000000000000000000000000000000000000000000000000000000001515 + 0000000000000000022b: 2b5ffd54bff018009f93f7cd782e8922b24520eb99a100efe2c1a4ae6c1ca3f5000000000000000000000000000000000000000000000000000000000000000b0b + 0000000000000000027c: 7cabd5f57ff72670052939485c4533b01b26fd0e490c31bd48b5ac5002ff1f83000000000000000000000000000000000000000000000000000000000000000909 + 0000000000000000027e: 7ed4dea78574266e019059e5b5fd6f94ed1632bd4a643d1c51aa02974def5684000000000000000000000000000000000000000000000000000000000000000303 + 000000000000000002a5: a58e3a77937b9b747f70d4aee9e84992d7955e52e2de71dc9615df3d16b2b816000000000000000000000000000000000000000000000000000000000000001313 + 000000000000000002ab: ab314b8d202e718d011f9f90df81dd0a00dc4f2279da45121f6dec7257622776000000000000000000000000000000000000000000000000000000000000000f0f + 000000000000000002bb: bbd2fb6ed132cf2780a90802acaaa371de119dc89f636dbb647ccdff8b0dc056000000000000000000000000000000000000000000000000000000000000000d0d + 000000000000000002ea: eaa40f6cdd316711961300a0f242fdc226f42d4740c381f05200092eaf70b841000000000000000000000000000000000000000000000000000000000000001212 + 000000000000000002eb: ebdfe46967031db428c3b807c7f8d78f6a51e9ca5f0500ca6099d3d26b1d312a000000000000000000000000000000000000000000000000000000000000001010 + 000000000000000002ef: ef783cc720dbf74c747a155a8975b324d2f8fa80672969dc78fe6f12ea59d03f000000000000000000000000000000000000000000000000000000000000001111 + "00000000000000010205": 05bd0bebbb9c224a78e8d9b6876da7f75cc0d247ce0f6e99f8921fbf410a09a4000000000000000000000000000000000000000000000000000000000000002323 + "00000000000000010207": 07216d95abf0edac40a58f6ca27749af481a4ed3b33dd2d3f10efa814d6da0a7000000000000000000000000000000000000000000000000000000000000001818 + 0000000000000001022d: 2dd4a3d0d6d98198be8e16968d57cda367833b99cfd88c809ce3960ff8b41aba000000000000000000000000000000000000000000000000000000000000002929 + "00000000000000010238": 38ec53adc1cd8bc9f788a5986f73d4e29e2d98945c0aa1d6727be9f8baba4337000000000000000000000000000000000000000000000000000000000000000e0e + 0000000000000001023d: 3d8a8b22175714443f990b996dd26dc71cb53e030d1bf48844df12486e5d4f2a000000000000000000000000000000000000000000000000000000000000002a2a + "00000000000000010249": 49ac53849b70d666cc840b4add0baff56fa9ce7e27be2acb275d109a5994ff8d000000000000000000000000000000000000000000000000000000000000000c0c + 0000000000000001025d: 5daa965d9688be0ac629916a9920e45910c13d1fe45d257a9e17217f226dfb4d000000000000000000000000000000000000000000000000000000000000000101 + 0000000000000001025f: 5f8a83305cccf521595d4bdb1ec8b03be3e9e8e27438db250b9e89f09981799d000000000000000000000000000000000000000000000000000000000000002828 + 0000000000000001026d: 6dfea072e8e999ba0adb48fc1284af16cc1351d53e52b175f9dfe153602b4362000000000000000000000000000000000000000000000000000000000000001414 + "00000000000000010293": 9300ef4007f853d076758bff4c00d6c979113664a6c948d0d313daa379607fdd000000000000000000000000000000000000000000000000000000000000002020 + "00000000000000010299": 99fa0e7a995a9b3c03f9a186d9581dd08387846aade5384b8e85c9c7c193dfb3000000000000000000000000000000000000000000000000000000000000001b1b + 0000000000000001029e: 9e262dd28666dbdb4101e1c27a64deda0e6064360be4c53c89e7b2b35a311943000000000000000000000000000000000000000000000000000000000000002222 + 000000000000000102b3: b314fe2db0b91091038ce12e45838efdfc5c48fe1e68d4f6a4f990ba9e4324f2000000000000000000000000000000000000000000000000000000000000001f1f + 000000000000000102b6: b63eb46438ece57d3d58c40b8193308ffccd3eedec2d645a5ac181f7783aa9c5000000000000000000000000000000000000000000000000000000000000001c1c + 000000000000000102b8: b860ff2535a62373c8c7aa6a31b436cb16427464bde987fc0c35238e10663a56000000000000000000000000000000000000000000000000000000000000002424 + 000000000000000102b9: 002800001632849641ed8fd7f10b2ae3ddaea90de793987e076a54b1d561f0bbc1fa4c65017236b2c6d1e76a12f08ef244830561fd8c3f4cb4eb1d180d9dd3885da41f45cf01 + 000000000000000102dc: dc3e793187abbabede6f6fc80c3ab3ac4a31019d0ef19f59aff10f614a7149fa000000000000000000000000000000000000000000000000000000000000001e1e + 000000000000000102dd: 00280000bf3a7497054eac25ada2fc8c4d315a3a240e6bbe6ecdd3f2fc0f82d8750984410157d9159f07e3d3466d61ab4c26d5b3ea993669fa9b133ee2c9c0a4b859af98b301 + "000000000000000102e8": 0800800005cd913895621437d7b5a41c4cc473ebe257221afa3d6e70297ceacdedfefb2a0137abb18273775dc4a8d30143a3bf7e2db1495570bec8a7876a34f4c18eb9d8eb01 + 000000000000000103b950: b9592813919e07b8df5fd836d1c6e3ac9a9eb7cb66b5ee8a1f4ee0ff3e0b2508000000000000000000000000000000000000000000000000000000000000001919 + 000000000000000103b960: b96e7e15bcbf96c67b1f26fa5ba80089388fbefad39968132c0791cb313d0157000000000000000000000000000000000000000000000000000000000000000404 + 000000000000000103dd50: dd55470824e0db2b94ea10ae29afd473f265bbe758854354b926846821fef91a000000000000000000000000000000000000000000000000000000000000000606 + 000000000000000103dd60: dd69263d904a01d711be57a6f4177bbf6745084517b2ac16468ab075cb88a962000000000000000000000000000000000000000000000000000000000000002727 + "000000000000000103e810": e81cc9e81f01ea7314bf83dc9b7fd942974427fe130fe80b3fb11eed3f80d4ed000000000000000000000000000000000000000000000000000000000000002525 + 000000000000000103e8b0: e8b8b981f358516ba6e7b76e0007bdecf3e97873abe468fbef40110d8206c8d8000000000000000000000000000000000000000000000000000000000000000505 + "00000000000000020201": 01ad1cfd47cdacf2f23714b10102071ea05f91e25ffcf10ac043964bb004b83f000000000000000000000000000000000000000000000000000000000000002c2c + "00000000000000020203": 03484fcec5bd9ccd1b448f7839fe71c8fb7d0e9e9f9076447daa552c89691f60000000000000000000000000000000000000000000000000000000000000003636 + "00000000000000020212": 121a1293f56198bb1639951b344a3ae26f5bca796569bb07d53ce98f4085bcbc000000000000000000000000000000000000000000000000000000000000000808 + "00000000000000020213": 1303216ea6a1a1b396f5fe43017e9869938a896901ed852d95079a1a7d7d57b5000000000000000000000000000000000000000000000000000000000000003434 + "00000000000000020219": 193fbc2a619e98d253554723ff80e1c2de926a8547f09ee1568e53db2b0fa0a2000000000000000000000000000000000000000000000000000000000000003535 + 0000000000000002022e: 2e54be9fbeefab49441556d63666897e33eedb3ff99ed9bffe4b88f9fd85ba11000000000000000000000000000000000000000000000000000000000000003f3f + "00000000000000020252": 5245608ce703d4a307b8c83f268dc60ac248dc26a891baa6116feff1e3cd8ba4000000000000000000000000000000000000000000000000000000000000003939 + "00000000000000020259": 5970e9fe810ebec7d53c18e51fe72b464e75851550b0c34a5e7ebabed8e6c7cc000000000000000000000000000000000000000000000000000000000000002d2d + "00000000000000020260": 00008800b450bcb1628ab1ce5b83630fce3da51e983f02c587d8eba576f264288f799bd2025bf094efc1e3fb61fa5a849f862e091e65738a95d239372e92191d2f5779752b02 + "00000000000000020262": 620975a9aee0d240d9f52ead3ff4f04b9043e260d67bb961fc16188cf1f63635000000000000000000000000000000000000000000000000000000000000003a3a + 0000000000000002029b: 9b420dfe14cdceb77e4464b2a841b6cf6221a29acf6bf363a556a13b72467404000000000000000000000000000000000000000000000000000000000000003d3d + 000000000000000202a0: a0aab27f895e1a504ee0862d8a90c2df63bcc4311409bf357a09d6ba4311d0f8000000000000000000000000000000000000000000000000000000000000003333 + 000000000000000202bd: bd11589aa8ca58a6c00ed1b1354d30b53279703644bed0a73e63513ad11fa5bf000000000000000000000000000000000000000000000000000000000000003737 + 000000000000000202c0: c0519bcddf9b31ea9fbdfe61998e503dca06026d059b990948d1e076657a191e000000000000000000000000000000000000000000000000000000000000002f2f + 000000000000000202c4: c4fbca1f402303e4bc0c85ed3817bc2ff549f665047388b1b40fab23553b2a9d000000000000000000000000000000000000000000000000000000000000001717 + 000000000000000202c7: c78049d38618be10de663e1ef60a5a34a9e635692bf9952c3afb837af3f66e1d000000000000000000000000000000000000000000000000000000000000003232 + 000000000000000202d9: d9898cd6484f797318b72190d1939771527071cac4c2e9da4e053e2b6e0dfe1a000000000000000000000000000000000000000000000000000000000000002b2b + "000000000000000202e2": e2adc6f93c6726bb6f0f56324add68457a28b691aa6e08951cab65ec6f37b54b000000000000000000000000000000000000000000000000000000000000003c3c + 000000000000000202f6: 0082000048902a08b0d7edf4969a608d61ec9911074fc5efb163bc207048d35be9ac0503021993dcbaa58bdd07fccd22a725289a2ab266ee265a3c0dc0eda3a48a154235a102 + "0000000000000002032560": 25659d69d133a28e77d07372508d9bebf0784d709c5f3bfaf1a4d53e22d4febb000000000000000000000000000000000000000000000000000000000000003838 + "0000000000000002032580": 2585f6a0139e0a71940d088e01221a4a5a6852220a894f135cf5c415c511f247000000000000000000000000000000000000000000000000000000000000003030 + "0000000000000002036090": 609f2dcc908d758e6b7dfb10333930babc58eb6ea82a7fb7e332371ba173d855000000000000000000000000000000000000000000000000000000000000002121 + 00000000000000020360b0: 60b22e6aff3665a5256eb5974025b20ba28c2b93ef016a66812f4c6e0c8ef5c4000000000000000000000000000000000000000000000000000000000000002e2e + 000000000000000203d8d0: d8d8c592cc9c74820b9ed9a20fb5709a0d9273f210fbfab39feb94c62f00d806000000000000000000000000000000000000000000000000000000000000003b3b + 000000000000000203d8e0: d8ecaa095dc3b2f77c057c770718c0affe6a9caa0104136143437cf30fb5688e000000000000000000000000000000000000000000000000000000000000003131 + 000000000000000203f640: f6479d69073315ce4df8b1234bd36fde45581f7b61749ba87374c8c71f0b0d72000000000000000000000000000000000000000000000000000000000000001616 + 000000000000000203f670: f673a7125608b9951ba643a43a2b7fc3a87af522e820cccfa65f977949a98c12000000000000000000000000000000000000000000000000000000000000003e3e + "00000000000000030100": 8a8a882257b5edb42fb6c1d47b66df6f451d1f0b07c707b265b47a0a445a49121bf17f09003564f8fd0ce402f3344ebabc39f73df9f5c2c5e7a6ef60d3c65bd3fa126e386b02663d14857c3a5444a050d9655e3827c4728e80d79d1292fd51f52c34b1e7457b0214c2aaac166403626fff92472cec22712462ccd14ca9d431b2cdcc75fe0912cd0026e57d3219488c374ae9bd6c9778ffd012f013335e68a58e614ab7b397b6b3db01941121db179d2296e2f1633f191f7f52dbb69956adb74e105be899b9ab90771c01eb7ced5baec3075725903e2e21b0e1457be9ff668e5e6d40e843117004e4acfa03ae37c191db8c160b309ddf0f969c234bb77fcdf25dfde6f541e0ddffbcfe0de2036f21fa6ab54c46ae0467b9a707bc397efcc6b392f748ee0417583a5a1eb326d6032c9ae2b72aadaed919d02086f2daedba2939863499c8c7a63e1eef851f57dc9900 + "00000000000000030130": 00000a28803101a64d85ff96891a1929b5410e7a7a0b6a60ff8bf3378008164e6c21ae82011d8a35b505566077f9e13da0d84b530d3092ab261a2d2abee46f7f28d74c8e20037d37b6b37acd4cf4045a574c36ae84c9a1dd5f69f307c91f0e72fb20bf0d4fa8013c4e03f48d247afced2623d420be80ad04a1a9e87e27d40dde7cea4f96a9a71803 + "00000000000000030180": 89069ac4c9e498cf8311cdc94afb4afac57857c4d11ff4cfff721be3f2f627b2000000000000000000000000000000000000000000000000000000000000004d4d + 000000000000000301a0: 020882086b44da2a9dd1eaf1e18dad37d7e76005d4b035d5c928629d180c9e0d15554bfb02182fc339100ba14fc2bcbb0ae880c957f3ab0d069eb5f206dd4649004c3fb63100adf6e932d9d77affa57a5662c9976af696c390b173f13f9accdb45d2b3245898030d19ead21c451a82ac4ff7cd7b2f004013811ced4ca96177379ebf5ccc58352900ce290b979ca575bdc45250d691ce3ed7014ec2e83477608208bb75cdead7734903 + 000000000000000301b0: 8224860824620206f5ae3438e207a4bac199dfceedf205d17a5a7f5396b7ceef9eafd0a0032c0a644c823c91a5bea9558492e3b48fd18be253175f64636dbcc860eeee5865017213670135f5ec0a5dfe384bc455d0d377e1c023b39bb0dd1e27e2caaa49a81803c7d6f9eacac49c7dd8949482043c0c42aa0cc5450e38c51e623ed0327ebf094101f3a8e759576757a9c487914a62da083898a037e4aeaa1944c08fe9ff0cfe6a1601409271ef93d4db4c3128801b7e9ae6d181099ca7b8a849670cdc5106b8dad4b801332f352a2bf593288dda47c506db7903535e1900681d84c1bd7cc0ff2fd1548b0034638675b595022e38b43d20ae971594edf5e202eeea2a58bd30f692b2450e1502 + "00000000000000030209": 0959c7836901cba04c833b419ea0dfe0c205bb32a67214c6445b61d7b4a0639b000000000000000000000000000000000000000000000000000000000000004a4a + 0000000000000003020b: 0bebf05465e75ecbdaadb3e79813a435c75b125b2679361a673af1f16b0b8ac8000000000000000000000000000000000000000000000000000000000000004040 + 0000000000000003020c: 0cbf607275c4dca4df0f7ffbb643193ab7a9d7419c0f02e0c3ba79f398f81d46000000000000000000000000000000000000000000000000000000000000004c4c + "00000000000000030239": 39df927e1d5265409744353547dab1f8a27444b2fb917ee896983e046ba91163000000000000000000000000000000000000000000000000000000000000004f4f + 0000000000000003023e: 3e5f86ed85e39b54418692f15640a6bcaf76b7cb6f7b6548da28177df9232ec8000000000000000000000000000000000000000000000000000000000000005252 + "00000000000000030269": 69d819309016b97574c9f345296c54f1432e1a4fc9e48fcc3d3bc03bf4997fda000000000000000000000000000000000000000000000000000000000000004747 + "00000000000000030277": 7783908d59c75dfbcf114da6847cb8abd3be94593bb4267a9b1ebfb223b98ea5000000000000000000000000000000000000000000000000000000000000004b4b + "00000000000000030278": 78421e310f5e842ec956e626534d27008b85f06736ae1f67a1aaa7ed7f21c68f000000000000000000000000000000000000000000000000000000000000005353 + "00000000000000030294": 80000008170ede3e1920837d4865c7c6087bb6117d6083871d773aef1916f8bf1662faea03127afe9fa81b7edde8ad3c48f164abaf9de3ea3c04b5e7eec32bc2609a803c6903 + 0000000000000003029a: 9a559085a559a2e90b54fb02fdd4fd74ab5d604e41c4888c170cdc87c296408e000000000000000000000000000000000000000000000000000000000000004343 + 0000000000000003029f: 9f654242a774697de6c44df46fa9670371ac5999c0b14874dafefb8f4d60638a000000000000000000000000000000000000000000000000000000000000005454 + 000000000000000302a8: a8c56ac32ee75c657919ae3d71f58d15ac0eb2e9d2ff7b9c04b8fd0f96ed7448000000000000000000000000000000000000000000000000000000000000004949 + 000000000000000302ad: ad72cdcdcfb4f0632d055d5445137abf73f21c0dfac21add822c71e82bf2353e000000000000000000000000000000000000000000000000000000000000005151 + 000000000000000302b0: b0e5ace49939a244a59f3654a48da2d9b1c9eb3977925c61a3bac41bfdcfb4ab000000000000000000000000000000000000000000000000000000000000004141 + 000000000000000302b5: 00080200e421f147ed76b69035b1dc7a21a4896daf523abd3a05594f0b9ebe63488ee5700377ab4544f0f4d2663fcc4619ce5a1dd6020cb5f2a4751df29fbb57892276342203 + 000000000000000302c3: c3a199feeed885aebcd589011ee28fdab9550fa9fe53320e56803a6cd4aa0aab000000000000000000000000000000000000000000000000000000000000004242 + 000000000000000302c5: c54f0faa1e221486827abe3f900feb05956f20fc7f15f4e084bd673c489aada4000000000000000000000000000000000000000000000000000000000000004e4e + 000000000000000302d1: d1e5b3c0fc106542ae1485cb8b21e43c3c13630a6a3c20f8615c4b7cdb572490000000000000000000000000000000000000000000000000000000000000004444 + 000000000000000302d8: 000000a821be1d978695211e8e63aa6efccbdf85110a0879cc45f718ddbef1d8392221f80205d8b58aa36651d72ad489469fee23b99bdeab996c96f9aaa1fb691e52b0bdbe029e3d820be42fcb2ad42a93a18226f5eef9ae386b6e926111d97489670383cea103 + "0000000000000003039430": 943a91e688714abcd78bec435e5d1f6412e5132eb61ab16fb71947c26d6dff64000000000000000000000000000000000000000000000000000000000000005050 + 00000000000000030394d0: 94d67b778ebe500b3516f7d892d44cb281067e662cc1483d559cd0d73e00f177000000000000000000000000000000000000000000000000000000000000001d1d + 000000000000000303b550: b553466aa2101212ae2cd93d024063d8367439fdba7b959b730db13a4377f992000000000000000000000000000000000000000000000000000000000000004545 + 000000000000000303b580: b58afc68cab79b077af15ab3b543a110771b218b2c3dd3dc097812e86a89629c000000000000000000000000000000000000000000000000000000000000004848 + 000000000000000303d8f0: d8f4c0f3150c5cc8885c2c0aecbc6819e6689bf4338c70da4816d4cb66ed94f5000000000000000000000000000000000000000000000000000000000000004646 + "000000000000000400": 6455555655c0a8dcf08966f67ecba39300a8722ba44482d5e0b23380a9f213e8c337f590cc03227b208a9a19221548e782068f0fdb6fbd4617116bb152b668c3e9a674ea29c404b49d8c482b50bf07df57e2cc5268959d80fcb3f2da9f0f14d21532f57241f3170453b19451c176ee59d8e6f3c12b6adbd31add13bdea3cec23766e64033aaf8f4d037fa6517eea208326fd635d9ea0a7f08f465d46b631a275df33d13c95e4bf02c504a96003c38bcdb41045b60ec68b377fcd9be9095d5864500f657183cfe777d1e8046dec6c64316471d87e7d08b832fae1293c5bd921e6b05fa6fccc65e0b959119904c062fd7630074d60ddcf9cdfdabdb10356fa4383f98d61c9fdf92909bcfcb819048cedd6f68a9ef7189ace7c6d176c54bd862999e67e1b753e6f91050c255082790357aa7cbdb78645f6e96d2f01c675433421af4d024b10de397ed0c1c85b6cfe030492d380598feb12fe1a1d058ddf0866fe3b7a3ad8fa099914132421d807403033035cd30423af28fd201e24c41618d281a3d0cab380970b73da816a8fbe03117c16035ec4b7a0f7cc597669ca4a4d8d7419d400afd56116b898807f03cdc2ef70bf4e04df49b5b8b47705368b4d5e009e292ec1c92b10ef50501bbb191072bdd0350f3e04c4e5b97afa0931090e318bfaebecf8e2a337476e1a8c102dd970969c6688792f04ba8bc88682602128060614528f82d9a2cae1524e615f6a42cf99a99fec431e4504 + "00000000000000040110": a2000800424573be7b442152cfeb1c3601f99f3130c6f17c7c08b7aaa330c6d87a12515a045c82fb7448102c04e67a1975867232ec52a388958eb4e9e6baeee14c5293abe502503fe6ab38d5ac1c8b7bfe8b097205de90a84730a84de1738a85051d4a4a026302da7578e0017433b8084636a698ea93c593baedaf117de2b36e72020115414bbe02 + "00000000000000040120": 00048828f9c73b9300e34ac8e315a7bff566e806e513bcf859f73fed008bbc294cbd861704fa858129a21ac49dc254a945b31dfe16d6aa38f787508a37e17770d646efc8440054811a6dbdccae4f94ec1115b47b52a93363dbfaa0997714af738a41b673af93008f46a18e5863c4ada0e7cd39134e090bc980515a25e481ffc3763ff91c26e91401fdcebb4312eac9d2281efff9ac70c4693fe3a7e5d1be16a233845059ce1d9e3e02 + "00000000000000040140": 00820900ac90a14f9020bf1306cce40cb69603c44f376b19b3b89b164da4adad2522f3cd0444b03ea062f4472e00e34d5e0293dd01552dc38a7a87408fd085c93074aa6c4104a71618826a808ad48f199fd524a76e7dd86dbebc0f41833f7831fbc6841282d004af4063b99148e997961d12ee3dd264572aeb229046a8e5e9b225dc1dfacc72fc01 + "00000000000000040150": 202008a8c9994c2caa7d57098169757caf7127daad05bbc20a0f2482c66e0bd3e0f2425802351e6086b7c92ade4db538680c6d267f479fccb2d4ce9b10062afdd17369911f04e6d61bcc44717172e1e2c6b2ff387448722c2fc17a17726189a0c93514e5dab4029fa3dba19b7c08af62edc9239b652fb5675abe268ec732eb467087969ba6084b01a64d06efc9a752b9453f768fd58528f039f4fc63aff3e8353f3268ecf15df977046fb85f4290b650931131cbbd4f910b772185cd8006f0b917801678a34b4fa79101 + "00000000000000040160": 2100880851a6e73460f30029943bf553f273b1a48ccd31437e3f83a25fb38dffb2d6ac28020724c3ee74e7f23cb76e9395e87d1e4bff63f1da7049a6eca441724797f3874b02cf2fe9fd420120d82d0609329ca9b5ffa91da307e1b7f51057b747c639a34af8034340fb29a51a13adbfe03a805124b70da17033d7707cfbb0dfcd54d24c1e63d404aaadd43d4d5793871733182fe8506eed15062b5875e420c8b7d22cba53ac378501 + "00000000000000040170": 0080822205a922b3f8195dca99196eacd675b9eb833f50040232bb1f618834baf5b38ba103167a1e11488fdfa9d5b12121dd9910adfbf852c209f3a577b967000745f350f603aacca0ce4f75eee32589c24eec3958eeccef822af3b15daaa432fe3e82a05cb804dab1eee1892b8092343e6487bc31fa80fb2dd7cbb9ed1c6fae878f83dded1c2d00060b08b1a2e058cb5ec3f70f413c5d5440ef7d6e542223d3784fbd6c72e8c68c00 + "00000000000000040190": 8005a8a07dd0df391d962fd4150f793cbc18c888b63ed02cf54a3d0db5cb3f2b29c36e24019fbced1659f7cd1a5c0af9ee189842e1692e65352c1b487f61f2d752f2a0b3d70359773d2c25f7cb39674f6ae2cbc3fbdd82d1ed6972a6aa3ed016269f3e55502404c817723a3d92295eccbaf6eba511b46779dbe67626cde023cf8cd37f33460654018ca3d9745931ef68100145097c22269521f4df6a11f5b2d3e3488216298a8a7603b99a204e2e9faa22aaf21289c9e4f0495f1a388969a5172bc620b50bfa30ddab026f5a9ea74107c8f4de693c261104144e5bc30d5c84fd24817d46e2335b58336701e16772b9714e64a1009a37324bbe60a52cbe8674e7964b8cf9982e83b33ff64103 + 000000000000000401c0: 828a2000c44c8b773c992751a9a68cf562a5d66a82ecb3e5b1f3e825583af4a4dc7188fb02f8e73c81b1ec28c666c9a504eb2be2aa8a2cc1cf0af2889447eea287118568bf032de02cba921be88fba820ac33a520ad265be4c4a290d864f0377884abf900ac002e8927e187971ca77095814caa6cc732835445a638edb782610240ae86388c788030ba1131ae7242bf1d1dfc8c0f32eb0a5cf80c6f85dab3d0d0c5355881ead0e86022e3b99b6d3c2022157528163ef99f3fb81204f8d3f32ff3bcd8fb918e409190904 + 000000000000000401d0: 08800906f4063981ea97ecdb06015df9bf4f6727c57efbf4b0646c3476deb36794324db103afd85ebff302f5bbba6058397c4ea0828d22a3778df3f112311c1d0accf7c708042b1948ebf5edfef56db3a187d77bc30e4b3200612376c93a909b907ff574c6e5037eae10ad154a3d3f012dcfce2fe0adf14890f879676eb80b621029c68d316ca302d82817436305281ec13d95108dab7f12d2efe6baeef0e20c192522e3fb9acfd20190931c85595b1e71a6693a36c92ac5c7859148c77e098b7c950389ab74c5cfcb01 + "000000000000000401e0": 2880a18043fe9188b8b985568354bcdd9250b431c4e714f6b42051be284857a8d2be1722045a0aecb3ba406c7d3e437d8ef8e625e2d77e062da840461ccf48c0c4308f403402299f131a3be0248d2e798e4ad2f600e15e0f33a6cd627aded2521f4ec15f997804f11b8ad362ee769da41a1cd9c52ad888b677eec1f20671001f8777faa9cf964a01fc7d7cf7cf7a7beefa4a24a16c1aee39a859559a0af0057b8ee2d08190b6f92000ac0b59ebc0d792db7fc244e10d924fca664b6f6e2ceec70264212ddc795c962f0012c7eac2be7db3066f6ac802ba777e9eb44c7a5cab845977367ad8b34df71bc500 + 000000000000000401f0: 881000005ee18ade69f359305864cf1d17d37a06c42736b877fafc52e64c3413655b96b104aaa833785c900c5124bf8ed8d919ee7eaa067d54c92198804d10e1cd19bffbc104815452859f52a833bb33a0648e223514255bf86b3b57a022a2cc9e761ce5b5d402 + "00000000000000040210": 10517f8d773163c41a3ad0446b196c056622643019f2f0f7b6ee0bd6374e224e000000000000000000000000000000000000000000000000000000000000005e5e + "00000000000000040225": 0022020063a7ef2de46a7ccd3a1fa06a407e928949b80e72ddb6b3db95ec8715acd6f7f70475611c7ae282df1b3ad74bb8b08fc01ee7f4dd46036cf91eb514376122e4a62302592ab1d5500c2ed989fa665f501c67499af68a98a0090ea4c7e66cf53684fc0c02 + "00000000000000040244": 446c0030d10b7a6b6c2970f02f4505d71de7bbf95c0c99884c0c01467f53a8ab000000000000000000000000000000000000000000000000000000000000005858 + "00000000000000040247": 473c0e43a2c5fd95e1b6b6154e7a720a5d8d257790f508c601633aff2bd9e48d000000000000000000000000000000000000000000000000000000000000005f5f + "00000000000000040248": 0008002091380386034a68521194d26b451f80c900eed4fb13cb9e8913e3dfc5e592b00a0452d96f4eeda24e81dd90939b2748dacd8a765a938bde5574cc54544aa00d194d04 + "00000000000000040256": 56caf919aa7245d39566bd66e00d1a91b9894333d5d0384b6a2a12c6e66d95d1000000000000000000000000000000000000000000000000000000000000006161 + 0000000000000004025e: 5e3c1c034ab4b4eead015b30d1a272db741bb510d9e3d3f1abd099652302414f000000000000000000000000000000000000000000000000000000000000005c5c + 0000000000000004026b: 6b0fd3cfa6040f65f7276c040ccc7a4b246f90de0d99b1d3d43a2feac6c894bd000000000000000000000000000000000000000000000000000000000000006464 + 0000000000000004027b: 7b26af81f4c1ab7cdaa6fc0c4aec935004631a29ea1382ac3827df603e42a9db000000000000000000000000000000000000000000000000000000000000005b5b + "00000000000000040295": 002000086e520d01c9c38a65012be48c7d7a1ee5de3f33497cd4de2605f2998d6f8c0398045c96cbd1a7d9f8be9e808c5e59d525622725bc800f5ca3aeb5e06ef58f96b05c04 + 000000000000000402ca: ca19a24e1132928718d987a578a4ca038eab9602e8c4abf553bd7189459effef000000000000000000000000000000000000000000000000000000000000005656 + 000000000000000402d7: d753f729b5ccfe040f4f592b95b710c4e6f147bb9178880080f2d094becfad86000000000000000000000000000000000000000000000000000000000000005a5a + "000000000000000402e1": e13b9b67638cdad23dec7259168f9562d1dc889b2be70d752bc3dfe82bc6a712000000000000000000000000000000000000000000000000000000000000005757 + "000000000000000402e7": e7df81ef1c550b915e5e9060719aa92ac78a1fddd47c325b79435a7752f7e5b7000000000000000000000000000000000000000000000000000000000000005959 + 000000000000000402f1: f1a7fba0a3a8c6f5ea1249d0c556b27da90c998fe9d6dc565911b5638f66f5c4000000000000000000000000000000000000000000000000000000000000006363 + 000000000000000402f3: f38ccb0299ceb194f40be88e74d175c707832dc74a73b2087aa1027689cd40f4000000000000000000000000000000000000000000000000000000000000006060 + "0000000000000004032540": 25447bdfd63dd3e622671ad29dd6360a085ce6006ac74a6c0d6a3c279ffc3097000000000000000000000000000000000000000000000000000000000000005d5d + "0000000000000004034850": 4858ed2508d9a8a36c8c771fd9cdd71285d5c4dc624a1e63c0abd64cef699b03000000000000000000000000000000000000000000000000000000000000002626 + "00000000000000040348e0": 48e2e8edc61fc5bcddff86b114b5a870369db85287fe39ee8b9a63837552c315000000000000000000000000000000000000000000000000000000000000005555 + "0000000000000004039560": 956ba33c478bde6c11ea2a1f6105753d55271cad5ce7101a08b8547a234e74c6000000000000000000000000000000000000000000000000000000000000001a1a + 00000000000000040395d0: 95dbef93d0be459e34c6e6663551ebc31befb871daa6430a62329531bde4add7000000000000000000000000000000000000000000000000000000000000006262 +stale_keys: {} + diff --git a/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-3-chunked-commits-pruned.snap b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-3-chunked-commits-pruned.snap new file mode 100644 index 000000000000..608e61167cf3 --- /dev/null +++ b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-3-chunked-commits-pruned.snap @@ -0,0 +1,136 @@ +--- +source: core/lib/merkle_tree/tests/integration/merkle_tree.rs +assertion_line: 658 +expression: "DatabaseSnapshot::new(&raw_db)" +--- +tree: + "00": 22030c61726368697465637475726506415231364d5405646570746803323536066861736865720a626c616b653273323536 + "00000000000000020200": 0059914596ed2e70745c44ef315747ea29aff72bce6757aca30a434d9bb70781000000000000000000000000000000000000000000000000000000000000000202 + "00000000000000020204": 049dfddb9f03237fed8d902e350f0b0e9b85af93f49fe02f0d189c47cd7b122b000000000000000000000000000000000000000000000000000000000000000707 + 0000000000000002027c: 7cabd5f57ff72670052939485c4533b01b26fd0e490c31bd48b5ac5002ff1f83000000000000000000000000000000000000000000000000000000000000000909 + 0000000000000002027e: 7ed4dea78574266e019059e5b5fd6f94ed1632bd4a643d1c51aa02974def5684000000000000000000000000000000000000000000000000000000000000000303 + 0000000000000003020e: 0edeb726b4588b4b373d38b7b43e3083c6e105c991e1812bdcce6c373ed98dd5000000000000000000000000000000000000000000000000000000000000000a0a + 000000000000000402bb: bbd2fb6ed132cf2780a90802acaaa371de119dc89f636dbb647ccdff8b0dc056000000000000000000000000000000000000000000000000000000000000000d0d + 000000000000000502ea: eaa40f6cdd316711961300a0f242fdc226f42d4740c381f05200092eaf70b841000000000000000000000000000000000000000000000000000000000000001212 + 000000000000000502eb: ebdfe46967031db428c3b807c7f8d78f6a51e9ca5f0500ca6099d3d26b1d312a000000000000000000000000000000000000000000000000000000000000001010 + 000000000000000502ef: ef783cc720dbf74c747a155a8975b324d2f8fa80672969dc78fe6f12ea59d03f000000000000000000000000000000000000000000000000000000000000001111 + "00000000000000060229": 297faa5f83989e2a8b574589c3eeb5fdced0634fd75d662507604796dd4edd1b000000000000000000000000000000000000000000000000000000000000001515 + 0000000000000006022b: 2b5ffd54bff018009f93f7cd782e8922b24520eb99a100efe2c1a4ae6c1ca3f5000000000000000000000000000000000000000000000000000000000000000b0b + 000000000000000602a5: a58e3a77937b9b747f70d4aee9e84992d7955e52e2de71dc9615df3d16b2b816000000000000000000000000000000000000000000000000000000000000001313 + 000000000000000602ab: ab314b8d202e718d011f9f90df81dd0a00dc4f2279da45121f6dec7257622776000000000000000000000000000000000000000000000000000000000000000f0f + "00000000000000070207": 07216d95abf0edac40a58f6ca27749af481a4ed3b33dd2d3f10efa814d6da0a7000000000000000000000000000000000000000000000000000000000000001818 + "00000000000000080299": 99fa0e7a995a9b3c03f9a186d9581dd08387846aade5384b8e85c9c7c193dfb3000000000000000000000000000000000000000000000000000000000000001b1b + 000000000000000802b9: 002800001632849641ed8fd7f10b2ae3ddaea90de793987e076a54b1d561f0bbc1fa4c65087236b2c6d1e76a12f08ef244830561fd8c3f4cb4eb1d180d9dd3885da41f45cf08 + 000000000000000803b950: b9592813919e07b8df5fd836d1c6e3ac9a9eb7cb66b5ee8a1f4ee0ff3e0b2508000000000000000000000000000000000000000000000000000000000000001919 + 000000000000000803b960: b96e7e15bcbf96c67b1f26fa5ba80089388fbefad39968132c0791cb313d0157000000000000000000000000000000000000000000000000000000000000000404 + 000000000000000902b6: b63eb46438ece57d3d58c40b8193308ffccd3eedec2d645a5ac181f7783aa9c5000000000000000000000000000000000000000000000000000000000000001c1c + 000000000000000902dc: dc3e793187abbabede6f6fc80c3ab3ac4a31019d0ef19f59aff10f614a7149fa000000000000000000000000000000000000000000000000000000000000001e1e + 000000000000000a026d: 6dfea072e8e999ba0adb48fc1284af16cc1351d53e52b175f9dfe153602b4362000000000000000000000000000000000000000000000000000000000000001414 + 000000000000000a0293: 9300ef4007f853d076758bff4c00d6c979113664a6c948d0d313daa379607fdd000000000000000000000000000000000000000000000000000000000000002020 + 000000000000000a02b3: b314fe2db0b91091038ce12e45838efdfc5c48fe1e68d4f6a4f990ba9e4324f2000000000000000000000000000000000000000000000000000000000000001f1f + 000000000000000b0205: 05bd0bebbb9c224a78e8d9b6876da7f75cc0d247ce0f6e99f8921fbf410a09a4000000000000000000000000000000000000000000000000000000000000002323 + 000000000000000b029e: 9e262dd28666dbdb4101e1c27a64deda0e6064360be4c53c89e7b2b35a311943000000000000000000000000000000000000000000000000000000000000002222 + 000000000000000b02b8: b860ff2535a62373c8c7aa6a31b436cb16427464bde987fc0c35238e10663a56000000000000000000000000000000000000000000000000000000000000002424 + 000000000000000c0249: 49ac53849b70d666cc840b4add0baff56fa9ce7e27be2acb275d109a5994ff8d000000000000000000000000000000000000000000000000000000000000000c0c + 000000000000000c02dd: 00280000bf3a7497054eac25ada2fc8c4d315a3a240e6bbe6ecdd3f2fc0f82d8750984410c57d9159f07e3d3466d61ab4c26d5b3ea993669fa9b133ee2c9c0a4b859af98b30c + 000000000000000c02e8: 0800800005cd913895621437d7b5a41c4cc473ebe257221afa3d6e70297ceacdedfefb2a0c37abb18273775dc4a8d30143a3bf7e2db1495570bec8a7876a34f4c18eb9d8eb0c + 000000000000000c03dd50: dd55470824e0db2b94ea10ae29afd473f265bbe758854354b926846821fef91a000000000000000000000000000000000000000000000000000000000000000606 + 000000000000000c03dd60: dd69263d904a01d711be57a6f4177bbf6745084517b2ac16468ab075cb88a962000000000000000000000000000000000000000000000000000000000000002727 + 000000000000000c03e810: e81cc9e81f01ea7314bf83dc9b7fd942974427fe130fe80b3fb11eed3f80d4ed000000000000000000000000000000000000000000000000000000000000002525 + 000000000000000c03e8b0: e8b8b981f358516ba6e7b76e0007bdecf3e97873abe468fbef40110d8206c8d8000000000000000000000000000000000000000000000000000000000000000505 + 000000000000000d022d: 2dd4a3d0d6d98198be8e16968d57cda367833b99cfd88c809ce3960ff8b41aba000000000000000000000000000000000000000000000000000000000000002929 + 000000000000000d0238: 38ec53adc1cd8bc9f788a5986f73d4e29e2d98945c0aa1d6727be9f8baba4337000000000000000000000000000000000000000000000000000000000000000e0e + 000000000000000d023d: 3d8a8b22175714443f990b996dd26dc71cb53e030d1bf48844df12486e5d4f2a000000000000000000000000000000000000000000000000000000000000002a2a + 000000000000000d025d: 5daa965d9688be0ac629916a9920e45910c13d1fe45d257a9e17217f226dfb4d000000000000000000000000000000000000000000000000000000000000000101 + 000000000000000d025f: 5f8a83305cccf521595d4bdb1ec8b03be3e9e8e27438db250b9e89f09981799d000000000000000000000000000000000000000000000000000000000000002828 + "000000000000000e0201": 01ad1cfd47cdacf2f23714b10102071ea05f91e25ffcf10ac043964bb004b83f000000000000000000000000000000000000000000000000000000000000002c2c + "000000000000000e0259": 5970e9fe810ebec7d53c18e51fe72b464e75851550b0c34a5e7ebabed8e6c7cc000000000000000000000000000000000000000000000000000000000000002d2d + 000000000000000e02d9: d9898cd6484f797318b72190d1939771527071cac4c2e9da4e053e2b6e0dfe1a000000000000000000000000000000000000000000000000000000000000002b2b + 000000000000000f0260: 00008800b450bcb1628ab1ce5b83630fce3da51e983f02c587d8eba576f264288f799bd20f5bf094efc1e3fb61fa5a849f862e091e65738a95d239372e92191d2f5779752b0f + 000000000000000f02c0: c0519bcddf9b31ea9fbdfe61998e503dca06026d059b990948d1e076657a191e000000000000000000000000000000000000000000000000000000000000002f2f + 000000000000000f02c4: c4fbca1f402303e4bc0c85ed3817bc2ff549f665047388b1b40fab23553b2a9d000000000000000000000000000000000000000000000000000000000000001717 + 000000000000000f036090: 609f2dcc908d758e6b7dfb10333930babc58eb6ea82a7fb7e332371ba173d855000000000000000000000000000000000000000000000000000000000000002121 + 000000000000000f0360b0: 60b22e6aff3665a5256eb5974025b20ba28c2b93ef016a66812f4c6e0c8ef5c4000000000000000000000000000000000000000000000000000000000000002e2e + 000000000000001002a0: a0aab27f895e1a504ee0862d8a90c2df63bcc4311409bf357a09d6ba4311d0f8000000000000000000000000000000000000000000000000000000000000003333 + 000000000000001002c7: c78049d38618be10de663e1ef60a5a34a9e635692bf9952c3afb837af3f66e1d000000000000000000000000000000000000000000000000000000000000003232 + "00000000000000110203": 03484fcec5bd9ccd1b448f7839fe71c8fb7d0e9e9f9076447daa552c89691f60000000000000000000000000000000000000000000000000000000000000003636 + "00000000000000110212": 121a1293f56198bb1639951b344a3ae26f5bca796569bb07d53ce98f4085bcbc000000000000000000000000000000000000000000000000000000000000000808 + "00000000000000110213": 1303216ea6a1a1b396f5fe43017e9869938a896901ed852d95079a1a7d7d57b5000000000000000000000000000000000000000000000000000000000000003434 + "00000000000000110219": 193fbc2a619e98d253554723ff80e1c2de926a8547f09ee1568e53db2b0fa0a2000000000000000000000000000000000000000000000000000000000000003535 + "00000000000000120252": 5245608ce703d4a307b8c83f268dc60ac248dc26a891baa6116feff1e3cd8ba4000000000000000000000000000000000000000000000000000000000000003939 + 000000000000001202bd: bd11589aa8ca58a6c00ed1b1354d30b53279703644bed0a73e63513ad11fa5bf000000000000000000000000000000000000000000000000000000000000003737 + "0000000000000012032560": 25659d69d133a28e77d07372508d9bebf0784d709c5f3bfaf1a4d53e22d4febb000000000000000000000000000000000000000000000000000000000000003838 + "0000000000000012032580": 2585f6a0139e0a71940d088e01221a4a5a6852220a894f135cf5c415c511f247000000000000000000000000000000000000000000000000000000000000003030 + "00000000000000130262": 620975a9aee0d240d9f52ead3ff4f04b9043e260d67bb961fc16188cf1f63635000000000000000000000000000000000000000000000000000000000000003a3a + "000000000000001302e2": e2adc6f93c6726bb6f0f56324add68457a28b691aa6e08951cab65ec6f37b54b000000000000000000000000000000000000000000000000000000000000003c3c + 000000000000001303d8d0: d8d8c592cc9c74820b9ed9a20fb5709a0d9273f210fbfab39feb94c62f00d806000000000000000000000000000000000000000000000000000000000000003b3b + 000000000000001303d8e0: d8ecaa095dc3b2f77c057c770718c0affe6a9caa0104136143437cf30fb5688e000000000000000000000000000000000000000000000000000000000000003131 + 0000000000000014022e: 2e54be9fbeefab49441556d63666897e33eedb3ff99ed9bffe4b88f9fd85ba11000000000000000000000000000000000000000000000000000000000000003f3f + 0000000000000014029b: 9b420dfe14cdceb77e4464b2a841b6cf6221a29acf6bf363a556a13b72467404000000000000000000000000000000000000000000000000000000000000003d3d + 000000000000001402f6: 0082000048902a08b0d7edf4969a608d61ec9911074fc5efb163bc207048d35be9ac0503141993dcbaa58bdd07fccd22a725289a2ab266ee265a3c0dc0eda3a48a154235a114 + 000000000000001403f640: f6479d69073315ce4df8b1234bd36fde45581f7b61749ba87374c8c71f0b0d72000000000000000000000000000000000000000000000000000000000000001616 + 000000000000001403f670: f673a7125608b9951ba643a43a2b7fc3a87af522e820cccfa65f977949a98c12000000000000000000000000000000000000000000000000000000000000003e3e + 0000000000000015020b: 0bebf05465e75ecbdaadb3e79813a435c75b125b2679361a673af1f16b0b8ac8000000000000000000000000000000000000000000000000000000000000004040 + 000000000000001502b0: b0e5ace49939a244a59f3654a48da2d9b1c9eb3977925c61a3bac41bfdcfb4ab000000000000000000000000000000000000000000000000000000000000004141 + 000000000000001502c3: c3a199feeed885aebcd589011ee28fdab9550fa9fe53320e56803a6cd4aa0aab000000000000000000000000000000000000000000000000000000000000004242 + 0000000000000016029a: 9a559085a559a2e90b54fb02fdd4fd74ab5d604e41c4888c170cdc87c296408e000000000000000000000000000000000000000000000000000000000000004343 + 000000000000001602d1: d1e5b3c0fc106542ae1485cb8b21e43c3c13630a6a3c20f8615c4b7cdb572490000000000000000000000000000000000000000000000000000000000000004444 + 000000000000001701b0: 8224860824620206f5ae3438e207a4bac199dfceedf205d17a5a7f5396b7ceef9eafd0a0152c0a644c823c91a5bea9558492e3b48fd18be253175f64636dbcc860eeee58650a7213670135f5ec0a5dfe384bc455d0d377e1c023b39bb0dd1e27e2caaa49a81817c7d6f9eacac49c7dd8949482043c0c42aa0cc5450e38c51e623ed0327ebf094109f3a8e759576757a9c487914a62da083898a037e4aeaa1944c08fe9ff0cfe6a160b409271ef93d4db4c3128801b7e9ae6d181099ca7b8a849670cdc5106b8dad4b808332f352a2bf593288dda47c506db7903535e1900681d84c1bd7cc0ff2fd1548b0434638675b595022e38b43d20ae971594edf5e202eeea2a58bd30f692b2450e1512 + "00000000000000170269": 69d819309016b97574c9f345296c54f1432e1a4fc9e48fcc3d3bc03bf4997fda000000000000000000000000000000000000000000000000000000000000004747 + 000000000000001702b5: 00080200e421f147ed76b69035b1dc7a21a4896daf523abd3a05594f0b9ebe63488ee5701777ab4544f0f4d2663fcc4619ce5a1dd6020cb5f2a4751df29fbb57892276342217 + 000000000000001702d8: 000000a821be1d978695211e8e63aa6efccbdf85110a0879cc45f718ddbef1d8392221f81305d8b58aa36651d72ad489469fee23b99bdeab996c96f9aaa1fb691e52b0bdbe139e3d820be42fcb2ad42a93a18226f5eef9ae386b6e926111d97489670383cea117 + 000000000000001703b550: b553466aa2101212ae2cd93d024063d8367439fdba7b959b730db13a4377f992000000000000000000000000000000000000000000000000000000000000004545 + 000000000000001703b580: b58afc68cab79b077af15ab3b543a110771b218b2c3dd3dc097812e86a89629c000000000000000000000000000000000000000000000000000000000000004848 + 000000000000001703d8f0: d8f4c0f3150c5cc8885c2c0aecbc6819e6689bf4338c70da4816d4cb66ed94f5000000000000000000000000000000000000000000000000000000000000004646 + "00000000000000180209": 0959c7836901cba04c833b419ea0dfe0c205bb32a67214c6445b61d7b4a0639b000000000000000000000000000000000000000000000000000000000000004a4a + "00000000000000180277": 7783908d59c75dfbcf114da6847cb8abd3be94593bb4267a9b1ebfb223b98ea5000000000000000000000000000000000000000000000000000000000000004b4b + 000000000000001802a8: a8c56ac32ee75c657919ae3d71f58d15ac0eb2e9d2ff7b9c04b8fd0f96ed7448000000000000000000000000000000000000000000000000000000000000004949 + "00000000000000190100": 8a8a882257b5edb42fb6c1d47b66df6f451d1f0b07c707b265b47a0a445a49121bf17f09023564f8fd0ce402f3344ebabc39f73df9f5c2c5e7a6ef60d3c65bd3fa126e386b0e663d14857c3a5444a050d9655e3827c4728e80d79d1292fd51f52c34b1e7457b1114c2aaac166403626fff92472cec22712462ccd14ca9d431b2cdcc75fe0912cd0226e57d3219488c374ae9bd6c9778ffd012f013335e68a58e614ab7b397b6b3db0b941121db179d2296e2f1633f191f7f52dbb69956adb74e105be899b9ab90771c07eb7ced5baec3075725903e2e21b0e1457be9ff668e5e6d40e843117004e4acfa18ae37c191db8c160b309ddf0f969c234bb77fcdf25dfde6f541e0ddffbcfe0de2156f21fa6ab54c46ae0467b9a707bc397efcc6b392f748ee0417583a5a1eb326d6192c9ae2b72aadaed919d02086f2daedba2939863499c8c7a63e1eef851f57dc9903 + "00000000000000190180": 89069ac4c9e498cf8311cdc94afb4afac57857c4d11ff4cfff721be3f2f627b2000000000000000000000000000000000000000000000000000000000000004d4d + 0000000000000019020c: 0cbf607275c4dca4df0f7ffbb643193ab7a9d7419c0f02e0c3ba79f398f81d46000000000000000000000000000000000000000000000000000000000000004c4c + 000000000000001902c5: c54f0faa1e221486827abe3f900feb05956f20fc7f15f4e084bd673c489aada4000000000000000000000000000000000000000000000000000000000000004e4e + 000000000000001a01a0: 020882086b44da2a9dd1eaf1e18dad37d7e76005d4b035d5c928629d180c9e0d15554bfb10182fc339100ba14fc2bcbb0ae880c957f3ab0d069eb5f206dd4649004c3fb63106adf6e932d9d77affa57a5662c9976af696c390b173f13f9accdb45d2b3245898180d19ead21c451a82ac4ff7cd7b2f004013811ced4ca96177379ebf5ccc58352906ce290b979ca575bdc45250d691ce3ed7014ec2e83477608208bb75cdead773491a + 000000000000001a0239: 39df927e1d5265409744353547dab1f8a27444b2fb917ee896983e046ba91163000000000000000000000000000000000000000000000000000000000000004f4f + 000000000000001a0294: 80000008170ede3e1920837d4865c7c6087bb6117d6083871d773aef1916f8bf1662faea1a127afe9fa81b7edde8ad3c48f164abaf9de3ea3c04b5e7eec32bc2609a803c691a + 000000000000001a02ad: ad72cdcdcfb4f0632d055d5445137abf73f21c0dfac21add822c71e82bf2353e000000000000000000000000000000000000000000000000000000000000005151 + 000000000000001a039430: 943a91e688714abcd78bec435e5d1f6412e5132eb61ab16fb71947c26d6dff64000000000000000000000000000000000000000000000000000000000000005050 + 000000000000001a0394d0: 94d67b778ebe500b3516f7d892d44cb281067e662cc1483d559cd0d73e00f177000000000000000000000000000000000000000000000000000000000000001d1d + 000000000000001b0130: 00000a28803101a64d85ff96891a1929b5410e7a7a0b6a60ff8bf3378008164e6c21ae820d1d8a35b505566077f9e13da0d84b530d3092ab261a2d2abee46f7f28d74c8e201a7d37b6b37acd4cf4045a574c36ae84c9a1dd5f69f307c91f0e72fb20bf0d4fa80d3c4e03f48d247afced2623d420be80ad04a1a9e87e27d40dde7cea4f96a9a7181b + 000000000000001b023e: 3e5f86ed85e39b54418692f15640a6bcaf76b7cb6f7b6548da28177df9232ec8000000000000000000000000000000000000000000000000000000000000005252 + 000000000000001b0278: 78421e310f5e842ec956e626534d27008b85f06736ae1f67a1aaa7ed7f21c68f000000000000000000000000000000000000000000000000000000000000005353 + 000000000000001b029f: 9f654242a774697de6c44df46fa9670371ac5999c0b14874dafefb8f4d60638a000000000000000000000000000000000000000000000000000000000000005454 + 000000000000001c01c0: 828a2000c44c8b773c992751a9a68cf562a5d66a82ecb3e5b1f3e825583af4a4dc7188fb0ff8e73c81b1ec28c666c9a504eb2be2aa8a2cc1cf0af2889447eea287118568bf152de02cba921be88fba820ac33a520ad265be4c4a290d864f0377884abf900ac00fe8927e187971ca77095814caa6cc732835445a638edb782610240ae86388c788190ba1131ae7242bf1d1dfc8c0f32eb0a5cf80c6f85dab3d0d0c5355881ead0e86102e3b99b6d3c2022157528163ef99f3fb81204f8d3f32ff3bcd8fb918e40919091c + 000000000000001c0248: 0008002091380386034a68521194d26b451f80c900eed4fb13cb9e8913e3dfc5e592b00a1c52d96f4eeda24e81dd90939b2748dacd8a765a938bde5574cc54544aa00d194d1c + 000000000000001c02ca: ca19a24e1132928718d987a578a4ca038eab9602e8c4abf553bd7189459effef000000000000000000000000000000000000000000000000000000000000005656 + 000000000000001c02e1: e13b9b67638cdad23dec7259168f9562d1dc889b2be70d752bc3dfe82bc6a712000000000000000000000000000000000000000000000000000000000000005757 + 000000000000001c034850: 4858ed2508d9a8a36c8c771fd9cdd71285d5c4dc624a1e63c0abd64cef699b03000000000000000000000000000000000000000000000000000000000000002626 + 000000000000001c0348e0: 48e2e8edc61fc5bcddff86b114b5a870369db85287fe39ee8b9a63837552c315000000000000000000000000000000000000000000000000000000000000005555 + 000000000000001d01d0: 08800906f4063981ea97ecdb06015df9bf4f6727c57efbf4b0646c3476deb36794324db116afd85ebff302f5bbba6058397c4ea0828d22a3778df3f112311c1d0accf7c7081d2b1948ebf5edfef56db3a187d77bc30e4b3200612376c93a909b907ff574c6e5177eae10ad154a3d3f012dcfce2fe0adf14890f879676eb80b621029c68d316ca30ed82817436305281ec13d95108dab7f12d2efe6baeef0e20c192522e3fb9acfd20990931c85595b1e71a6693a36c92ac5c7859148c77e098b7c950389ab74c5cfcb0c + 000000000000001d01e0: 2880a18043fe9188b8b985568354bcdd9250b431c4e714f6b42051be284857a8d2be17221c5a0aecb3ba406c7d3e437d8ef8e625e2d77e062da840461ccf48c0c4308f403413299f131a3be0248d2e798e4ad2f600e15e0f33a6cd627aded2521f4ec15f99781df11b8ad362ee769da41a1cd9c52ad888b677eec1f20671001f8777faa9cf964a0cfc7d7cf7cf7a7beefa4a24a16c1aee39a859559a0af0057b8ee2d08190b6f92005ac0b59ebc0d792db7fc244e10d924fca664b6f6e2ceec70264212ddc795c962f0512c7eac2be7db3066f6ac802ba777e9eb44c7a5cab845977367ad8b34df71bc505 + 000000000000001d0244: 446c0030d10b7a6b6c2970f02f4505d71de7bbf95c0c99884c0c01467f53a8ab000000000000000000000000000000000000000000000000000000000000005858 + 000000000000001d02d7: d753f729b5ccfe040f4f592b95b710c4e6f147bb9178880080f2d094becfad86000000000000000000000000000000000000000000000000000000000000005a5a + 000000000000001d02e7: e7df81ef1c550b915e5e9060719aa92ac78a1fddd47c325b79435a7752f7e5b7000000000000000000000000000000000000000000000000000000000000005959 + "000000000000001e0120": 00048828f9c73b9300e34ac8e315a7bff566e806e513bcf859f73fed008bbc294cbd86171efa858129a21ac49dc254a945b31dfe16d6aa38f787508a37e17770d646efc8440654811a6dbdccae4f94ec1115b47b52a93363dbfaa0997714af738a41b673af93068f46a18e5863c4ada0e7cd39134e090bc980515a25e481ffc3763ff91c26e9140dfdcebb4312eac9d2281efff9ac70c4693fe3a7e5d1be16a233845059ce1d9e3e14 + "000000000000001e0170": 0080822205a922b3f8195dca99196eacd675b9eb833f50040232bb1f618834baf5b38ba118167a1e11488fdfa9d5b12121dd9910adfbf852c209f3a577b967000745f350f61baacca0ce4f75eee32589c24eec3958eeccef822af3b15daaa432fe3e82a05cb81edab1eee1892b8092343e6487bc31fa80fb2dd7cbb9ed1c6fae878f83dded1c2d02060b08b1a2e058cb5ec3f70f413c5d5440ef7d6e542223d3784fbd6c72e8c68c02 + "000000000000001e0225": 0022020063a7ef2de46a7ccd3a1fa06a407e928949b80e72ddb6b3db95ec8715acd6f7f71e75611c7ae282df1b3ad74bb8b08fc01ee7f4dd46036cf91eb514376122e4a62312592ab1d5500c2ed989fa665f501c67499af68a98a0090ea4c7e66cf53684fc0c12 + 000000000000001e025e: 5e3c1c034ab4b4eead015b30d1a272db741bb510d9e3d3f1abd099652302414f000000000000000000000000000000000000000000000000000000000000005c5c + 000000000000001e027b: 7b26af81f4c1ab7cdaa6fc0c4aec935004631a29ea1382ac3827df603e42a9db000000000000000000000000000000000000000000000000000000000000005b5b + "000000000000001e032540": 25447bdfd63dd3e622671ad29dd6360a085ce6006ac74a6c0d6a3c279ffc3097000000000000000000000000000000000000000000000000000000000000005d5d + 000000000000001f0110: a2000800424573be7b442152cfeb1c3601f99f3130c6f17c7c08b7aaa330c6d87a12515a1f5c82fb7448102c04e67a1975867232ec52a388958eb4e9e6baeee14c5293abe511503fe6ab38d5ac1c8b7bfe8b097205de90a84730a84de1738a85051d4a4a026311da7578e0017433b8084636a698ea93c593baedaf117de2b36e72020115414bbe11 + 000000000000001f0140: 00820900ac90a14f9020bf1306cce40cb69603c44f376b19b3b89b164da4adad2522f3cd1d44b03ea062f4472e00e34d5e0293dd01552dc38a7a87408fd085c93074aa6c411fa71618826a808ad48f199fd524a76e7dd86dbebc0f41833f7831fbc6841282d01caf4063b99148e997961d12ee3dd264572aeb229046a8e5e9b225dc1dfacc72fc0c + 000000000000001f0210: 10517f8d773163c41a3ad0446b196c056622643019f2f0f7b6ee0bd6374e224e000000000000000000000000000000000000000000000000000000000000005e5e + 000000000000001f0247: 473c0e43a2c5fd95e1b6b6154e7a720a5d8d257790f508c601633aff2bd9e48d000000000000000000000000000000000000000000000000000000000000005f5f + 000000000000001f02f3: f38ccb0299ceb194f40be88e74d175c707832dc74a73b2087aa1027689cd40f4000000000000000000000000000000000000000000000000000000000000006060 + "00000000000000200150": 202008a8c9994c2caa7d57098169757caf7127daad05bbc20a0f2482c66e0bd3e0f2425812351e6086b7c92ade4db538680c6d267f479fccb2d4ce9b10062afdd17369911f20e6d61bcc44717172e1e2c6b2ff387448722c2fc17a17726189a0c93514e5dab40e9fa3dba19b7c08af62edc9239b652fb5675abe268ec732eb467087969ba6084b0da64d06efc9a752b9453f768fd58528f039f4fc63aff3e8353f3268ecf15df9771e6fb85f4290b650931131cbbd4f910b772185cd8006f0b917801678a34b4fa7910d + "00000000000000200190": 8005a8a07dd0df391d962fd4150f793cbc18c888b63ed02cf54a3d0db5cb3f2b29c36e240a9fbced1659f7cd1a5c0af9ee189842e1692e65352c1b487f61f2d752f2a0b3d71a59773d2c25f7cb39674f6ae2cbc3fbdd82d1ed6972a6aa3ed016269f3e55502420c817723a3d92295eccbaf6eba511b46779dbe67626cde023cf8cd37f33460654088ca3d9745931ef68100145097c22269521f4df6a11f5b2d3e3488216298a8a7616b99a204e2e9faa22aaf21289c9e4f0495f1a388969a5172bc620b50bfa30ddab146f5a9ea74107c8f4de693c261104144e5bc30d5c84fd24817d46e2335b5833670be16772b9714e64a1009a37324bbe60a52cbe8674e7964b8cf9982e83b33ff6411b + 000000000000002001f0: 881000005ee18ade69f359305864cf1d17d37a06c42736b877fafc52e64c3413655b96b120aaa833785c900c5124bf8ed8d919ee7eaa067d54c92198804d10e1cd19bffbc11f815452859f52a833bb33a0648e223514255bf86b3b57a022a2cc9e761ce5b5d414 + "00000000000000200256": 56caf919aa7245d39566bd66e00d1a91b9894333d5d0384b6a2a12c6e66d95d1000000000000000000000000000000000000000000000000000000000000006161 + "00000000000000200295": 002000086e520d01c9c38a65012be48c7d7a1ee5de3f33497cd4de2605f2998d6f8c0398205c96cbd1a7d9f8be9e808c5e59d525622725bc800f5ca3aeb5e06ef58f96b05c20 + 000000000000002002f1: f1a7fba0a3a8c6f5ea1249d0c556b27da90c998fe9d6dc565911b5638f66f5c4000000000000000000000000000000000000000000000000000000000000006363 + "0000000000000020039560": 956ba33c478bde6c11ea2a1f6105753d55271cad5ce7101a08b8547a234e74c6000000000000000000000000000000000000000000000000000000000000001a1a + 00000000000000200395d0: 95dbef93d0be459e34c6e6663551ebc31befb871daa6430a62329531bde4add7000000000000000000000000000000000000000000000000000000000000006262 + "000000000000002100": 6455555655c0a8dcf08966f67ecba39300a8722ba44482d5e0b23380a9f213e8c337f590cc19227b208a9a19221548e782068f0fdb6fbd4617116bb152b668c3e9a674ea29c41fb49d8c482b50bf07df57e2cc5268959d80fcb3f2da9f0f14d21532f57241f3171e53b19451c176ee59d8e6f3c12b6adbd31add13bdea3cec23766e64033aaf8f4d1b7fa6517eea208326fd635d9ea0a7f08f465d46b631a275df33d13c95e4bf02c51fa96003c38bcdb41045b60ec68b377fcd9be9095d5864500f657183cfe777d1e8206dec6c64316471d87e7d08b832fae1293c5bd921e6b05fa6fccc65e0b959119921c062fd7630074d60ddcf9cdfdabdb10356fa4383f98d61c9fdf92909bcfcb8191e8cedd6f68a9ef7189ace7c6d176c54bd862999e67e1b753e6f91050c255082791957aa7cbdb78645f6e96d2f01c675433421af4d024b10de397ed0c1c85b6cfe032092d380598feb12fe1a1d058ddf0866fe3b7a3ad8fa099914132421d8074030331a5cd30423af28fd201e24c41618d281a3d0cab380970b73da816a8fbe03117c16175ec4b7a0f7cc597669ca4a4d8d7419d400afd56116b898807f03cdc2ef70bf4e1cdf49b5b8b47705368b4d5e009e292ec1c92b10ef50501bbb191072bdd0350f3e1dc4e5b97afa0931090e318bfaebecf8e2a337476e1a8c102dd970969c6688792f1dba8bc88682602128060614528f82d9a2cae1524e615f6a42cf99a99fec431e4520 + "00000000000000210160": 2100880851a6e73460f30029943bf553f273b1a48ccd31437e3f83a25fb38dffb2d6ac280f0724c3ee74e7f23cb76e9395e87d1e4bff63f1da7049a6eca441724797f3874b13cf2fe9fd420120d82d0609329ca9b5ffa91da307e1b7f51057b747c639a34af8174340fb29a51a13adbfe03a805124b70da17033d7707cfbb0dfcd54d24c1e63d421aaadd43d4d5793871733182fe8506eed15062b5875e420c8b7d22cba53ac37850a + 0000000000000021026b: 6b0fd3cfa6040f65f7276c040ccc7a4b246f90de0d99b1d3d43a2feac6c894bd000000000000000000000000000000000000000000000000000000000000006464 +stale_keys: {} + diff --git a/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits-pruned.snap b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits-pruned.snap new file mode 100644 index 000000000000..e0b2b0f792b3 --- /dev/null +++ b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits-pruned.snap @@ -0,0 +1,136 @@ +--- +source: core/lib/merkle_tree/tests/integration/merkle_tree.rs +assertion_line: 658 +expression: "DatabaseSnapshot::new(&raw_db)" +--- +tree: + "00": 0d030c61726368697465637475726506415231364d5405646570746803323536066861736865720a626c616b653273323536 + "00000000000000000200": 0059914596ed2e70745c44ef315747ea29aff72bce6757aca30a434d9bb70781000000000000000000000000000000000000000000000000000000000000000202 + "00000000000000000204": 049dfddb9f03237fed8d902e350f0b0e9b85af93f49fe02f0d189c47cd7b122b000000000000000000000000000000000000000000000000000000000000000707 + 0000000000000001020e: 0edeb726b4588b4b373d38b7b43e3083c6e105c991e1812bdcce6c373ed98dd5000000000000000000000000000000000000000000000000000000000000000a0a + 0000000000000001027c: 7cabd5f57ff72670052939485c4533b01b26fd0e490c31bd48b5ac5002ff1f83000000000000000000000000000000000000000000000000000000000000000909 + 0000000000000001027e: 7ed4dea78574266e019059e5b5fd6f94ed1632bd4a643d1c51aa02974def5684000000000000000000000000000000000000000000000000000000000000000303 + 000000000000000102bb: bbd2fb6ed132cf2780a90802acaaa371de119dc89f636dbb647ccdff8b0dc056000000000000000000000000000000000000000000000000000000000000000d0d + 000000000000000102eb: ebdfe46967031db428c3b807c7f8d78f6a51e9ca5f0500ca6099d3d26b1d312a000000000000000000000000000000000000000000000000000000000000001010 + "00000000000000020207": 07216d95abf0edac40a58f6ca27749af481a4ed3b33dd2d3f10efa814d6da0a7000000000000000000000000000000000000000000000000000000000000001818 + "00000000000000020229": 297faa5f83989e2a8b574589c3eeb5fdced0634fd75d662507604796dd4edd1b000000000000000000000000000000000000000000000000000000000000001515 + 0000000000000002022b: 2b5ffd54bff018009f93f7cd782e8922b24520eb99a100efe2c1a4ae6c1ca3f5000000000000000000000000000000000000000000000000000000000000000b0b + 000000000000000202a5: a58e3a77937b9b747f70d4aee9e84992d7955e52e2de71dc9615df3d16b2b816000000000000000000000000000000000000000000000000000000000000001313 + 000000000000000202ab: ab314b8d202e718d011f9f90df81dd0a00dc4f2279da45121f6dec7257622776000000000000000000000000000000000000000000000000000000000000000f0f + 000000000000000202ea: eaa40f6cdd316711961300a0f242fdc226f42d4740c381f05200092eaf70b841000000000000000000000000000000000000000000000000000000000000001212 + 000000000000000202ef: ef783cc720dbf74c747a155a8975b324d2f8fa80672969dc78fe6f12ea59d03f000000000000000000000000000000000000000000000000000000000000001111 + "00000000000000030293": 9300ef4007f853d076758bff4c00d6c979113664a6c948d0d313daa379607fdd000000000000000000000000000000000000000000000000000000000000002020 + "00000000000000030299": 99fa0e7a995a9b3c03f9a186d9581dd08387846aade5384b8e85c9c7c193dfb3000000000000000000000000000000000000000000000000000000000000001b1b + 000000000000000302b3: b314fe2db0b91091038ce12e45838efdfc5c48fe1e68d4f6a4f990ba9e4324f2000000000000000000000000000000000000000000000000000000000000001f1f + 000000000000000302b6: b63eb46438ece57d3d58c40b8193308ffccd3eedec2d645a5ac181f7783aa9c5000000000000000000000000000000000000000000000000000000000000001c1c + 000000000000000302b9: 002800001632849641ed8fd7f10b2ae3ddaea90de793987e076a54b1d561f0bbc1fa4c65037236b2c6d1e76a12f08ef244830561fd8c3f4cb4eb1d180d9dd3885da41f45cf03 + 000000000000000302dc: dc3e793187abbabede6f6fc80c3ab3ac4a31019d0ef19f59aff10f614a7149fa000000000000000000000000000000000000000000000000000000000000001e1e + 000000000000000303b950: b9592813919e07b8df5fd836d1c6e3ac9a9eb7cb66b5ee8a1f4ee0ff3e0b2508000000000000000000000000000000000000000000000000000000000000001919 + 000000000000000303b960: b96e7e15bcbf96c67b1f26fa5ba80089388fbefad39968132c0791cb313d0157000000000000000000000000000000000000000000000000000000000000000404 + "00000000000000040205": 05bd0bebbb9c224a78e8d9b6876da7f75cc0d247ce0f6e99f8921fbf410a09a4000000000000000000000000000000000000000000000000000000000000002323 + "00000000000000040249": 49ac53849b70d666cc840b4add0baff56fa9ce7e27be2acb275d109a5994ff8d000000000000000000000000000000000000000000000000000000000000000c0c + 0000000000000004025d: 5daa965d9688be0ac629916a9920e45910c13d1fe45d257a9e17217f226dfb4d000000000000000000000000000000000000000000000000000000000000000101 + 0000000000000004025f: 5f8a83305cccf521595d4bdb1ec8b03be3e9e8e27438db250b9e89f09981799d000000000000000000000000000000000000000000000000000000000000002828 + 0000000000000004026d: 6dfea072e8e999ba0adb48fc1284af16cc1351d53e52b175f9dfe153602b4362000000000000000000000000000000000000000000000000000000000000001414 + 0000000000000004029e: 9e262dd28666dbdb4101e1c27a64deda0e6064360be4c53c89e7b2b35a311943000000000000000000000000000000000000000000000000000000000000002222 + 000000000000000402b8: b860ff2535a62373c8c7aa6a31b436cb16427464bde987fc0c35238e10663a56000000000000000000000000000000000000000000000000000000000000002424 + 000000000000000402dd: 00280000bf3a7497054eac25ada2fc8c4d315a3a240e6bbe6ecdd3f2fc0f82d8750984410457d9159f07e3d3466d61ab4c26d5b3ea993669fa9b133ee2c9c0a4b859af98b304 + "000000000000000402e8": 0800800005cd913895621437d7b5a41c4cc473ebe257221afa3d6e70297ceacdedfefb2a0437abb18273775dc4a8d30143a3bf7e2db1495570bec8a7876a34f4c18eb9d8eb04 + 000000000000000403dd50: dd55470824e0db2b94ea10ae29afd473f265bbe758854354b926846821fef91a000000000000000000000000000000000000000000000000000000000000000606 + 000000000000000403dd60: dd69263d904a01d711be57a6f4177bbf6745084517b2ac16468ab075cb88a962000000000000000000000000000000000000000000000000000000000000002727 + "000000000000000403e810": e81cc9e81f01ea7314bf83dc9b7fd942974427fe130fe80b3fb11eed3f80d4ed000000000000000000000000000000000000000000000000000000000000002525 + 000000000000000403e8b0: e8b8b981f358516ba6e7b76e0007bdecf3e97873abe468fbef40110d8206c8d8000000000000000000000000000000000000000000000000000000000000000505 + "00000000000000050201": 01ad1cfd47cdacf2f23714b10102071ea05f91e25ffcf10ac043964bb004b83f000000000000000000000000000000000000000000000000000000000000002c2c + 0000000000000005022d: 2dd4a3d0d6d98198be8e16968d57cda367833b99cfd88c809ce3960ff8b41aba000000000000000000000000000000000000000000000000000000000000002929 + "00000000000000050238": 38ec53adc1cd8bc9f788a5986f73d4e29e2d98945c0aa1d6727be9f8baba4337000000000000000000000000000000000000000000000000000000000000000e0e + 0000000000000005023d: 3d8a8b22175714443f990b996dd26dc71cb53e030d1bf48844df12486e5d4f2a000000000000000000000000000000000000000000000000000000000000002a2a + "00000000000000050259": 5970e9fe810ebec7d53c18e51fe72b464e75851550b0c34a5e7ebabed8e6c7cc000000000000000000000000000000000000000000000000000000000000002d2d + "00000000000000050260": 00008800b450bcb1628ab1ce5b83630fce3da51e983f02c587d8eba576f264288f799bd2055bf094efc1e3fb61fa5a849f862e091e65738a95d239372e92191d2f5779752b05 + 000000000000000502c0: c0519bcddf9b31ea9fbdfe61998e503dca06026d059b990948d1e076657a191e000000000000000000000000000000000000000000000000000000000000002f2f + 000000000000000502c4: c4fbca1f402303e4bc0c85ed3817bc2ff549f665047388b1b40fab23553b2a9d000000000000000000000000000000000000000000000000000000000000001717 + 000000000000000502d9: d9898cd6484f797318b72190d1939771527071cac4c2e9da4e053e2b6e0dfe1a000000000000000000000000000000000000000000000000000000000000002b2b + "0000000000000005036090": 609f2dcc908d758e6b7dfb10333930babc58eb6ea82a7fb7e332371ba173d855000000000000000000000000000000000000000000000000000000000000002121 + 00000000000000050360b0: 60b22e6aff3665a5256eb5974025b20ba28c2b93ef016a66812f4c6e0c8ef5c4000000000000000000000000000000000000000000000000000000000000002e2e + "00000000000000060203": 03484fcec5bd9ccd1b448f7839fe71c8fb7d0e9e9f9076447daa552c89691f60000000000000000000000000000000000000000000000000000000000000003636 + "00000000000000060212": 121a1293f56198bb1639951b344a3ae26f5bca796569bb07d53ce98f4085bcbc000000000000000000000000000000000000000000000000000000000000000808 + "00000000000000060213": 1303216ea6a1a1b396f5fe43017e9869938a896901ed852d95079a1a7d7d57b5000000000000000000000000000000000000000000000000000000000000003434 + "00000000000000060219": 193fbc2a619e98d253554723ff80e1c2de926a8547f09ee1568e53db2b0fa0a2000000000000000000000000000000000000000000000000000000000000003535 + 000000000000000602a0: a0aab27f895e1a504ee0862d8a90c2df63bcc4311409bf357a09d6ba4311d0f8000000000000000000000000000000000000000000000000000000000000003333 + 000000000000000602bd: bd11589aa8ca58a6c00ed1b1354d30b53279703644bed0a73e63513ad11fa5bf000000000000000000000000000000000000000000000000000000000000003737 + 000000000000000602c7: c78049d38618be10de663e1ef60a5a34a9e635692bf9952c3afb837af3f66e1d000000000000000000000000000000000000000000000000000000000000003232 + "0000000000000006032560": 25659d69d133a28e77d07372508d9bebf0784d709c5f3bfaf1a4d53e22d4febb000000000000000000000000000000000000000000000000000000000000003838 + "0000000000000006032580": 2585f6a0139e0a71940d088e01221a4a5a6852220a894f135cf5c415c511f247000000000000000000000000000000000000000000000000000000000000003030 + 0000000000000007020b: 0bebf05465e75ecbdaadb3e79813a435c75b125b2679361a673af1f16b0b8ac8000000000000000000000000000000000000000000000000000000000000004040 + 0000000000000007022e: 2e54be9fbeefab49441556d63666897e33eedb3ff99ed9bffe4b88f9fd85ba11000000000000000000000000000000000000000000000000000000000000003f3f + "00000000000000070252": 5245608ce703d4a307b8c83f268dc60ac248dc26a891baa6116feff1e3cd8ba4000000000000000000000000000000000000000000000000000000000000003939 + "00000000000000070262": 620975a9aee0d240d9f52ead3ff4f04b9043e260d67bb961fc16188cf1f63635000000000000000000000000000000000000000000000000000000000000003a3a + 0000000000000007029b: 9b420dfe14cdceb77e4464b2a841b6cf6221a29acf6bf363a556a13b72467404000000000000000000000000000000000000000000000000000000000000003d3d + "000000000000000702e2": e2adc6f93c6726bb6f0f56324add68457a28b691aa6e08951cab65ec6f37b54b000000000000000000000000000000000000000000000000000000000000003c3c + 000000000000000702f6: 0082000048902a08b0d7edf4969a608d61ec9911074fc5efb163bc207048d35be9ac0503071993dcbaa58bdd07fccd22a725289a2ab266ee265a3c0dc0eda3a48a154235a107 + 000000000000000703d8d0: d8d8c592cc9c74820b9ed9a20fb5709a0d9273f210fbfab39feb94c62f00d806000000000000000000000000000000000000000000000000000000000000003b3b + 000000000000000703d8e0: d8ecaa095dc3b2f77c057c770718c0affe6a9caa0104136143437cf30fb5688e000000000000000000000000000000000000000000000000000000000000003131 + 000000000000000703f640: f6479d69073315ce4df8b1234bd36fde45581f7b61749ba87374c8c71f0b0d72000000000000000000000000000000000000000000000000000000000000001616 + 000000000000000703f670: f673a7125608b9951ba643a43a2b7fc3a87af522e820cccfa65f977949a98c12000000000000000000000000000000000000000000000000000000000000003e3e + 000000000000000801b0: 8224860824620206f5ae3438e207a4bac199dfceedf205d17a5a7f5396b7ceef9eafd0a0082c0a644c823c91a5bea9558492e3b48fd18be253175f64636dbcc860eeee5865037213670135f5ec0a5dfe384bc455d0d377e1c023b39bb0dd1e27e2caaa49a81808c7d6f9eacac49c7dd8949482043c0c42aa0cc5450e38c51e623ed0327ebf094103f3a8e759576757a9c487914a62da083898a037e4aeaa1944c08fe9ff0cfe6a1604409271ef93d4db4c3128801b7e9ae6d181099ca7b8a849670cdc5106b8dad4b803332f352a2bf593288dda47c506db7903535e1900681d84c1bd7cc0ff2fd1548b0134638675b595022e38b43d20ae971594edf5e202eeea2a58bd30f692b2450e1506 + "00000000000000080269": 69d819309016b97574c9f345296c54f1432e1a4fc9e48fcc3d3bc03bf4997fda000000000000000000000000000000000000000000000000000000000000004747 + 0000000000000008029a: 9a559085a559a2e90b54fb02fdd4fd74ab5d604e41c4888c170cdc87c296408e000000000000000000000000000000000000000000000000000000000000004343 + 000000000000000802b0: b0e5ace49939a244a59f3654a48da2d9b1c9eb3977925c61a3bac41bfdcfb4ab000000000000000000000000000000000000000000000000000000000000004141 + 000000000000000802b5: 00080200e421f147ed76b69035b1dc7a21a4896daf523abd3a05594f0b9ebe63488ee5700877ab4544f0f4d2663fcc4619ce5a1dd6020cb5f2a4751df29fbb57892276342208 + 000000000000000802c3: c3a199feeed885aebcd589011ee28fdab9550fa9fe53320e56803a6cd4aa0aab000000000000000000000000000000000000000000000000000000000000004242 + 000000000000000802d1: d1e5b3c0fc106542ae1485cb8b21e43c3c13630a6a3c20f8615c4b7cdb572490000000000000000000000000000000000000000000000000000000000000004444 + 000000000000000802d8: 000000a821be1d978695211e8e63aa6efccbdf85110a0879cc45f718ddbef1d8392221f80705d8b58aa36651d72ad489469fee23b99bdeab996c96f9aaa1fb691e52b0bdbe079e3d820be42fcb2ad42a93a18226f5eef9ae386b6e926111d97489670383cea108 + 000000000000000803b550: b553466aa2101212ae2cd93d024063d8367439fdba7b959b730db13a4377f992000000000000000000000000000000000000000000000000000000000000004545 + 000000000000000803b580: b58afc68cab79b077af15ab3b543a110771b218b2c3dd3dc097812e86a89629c000000000000000000000000000000000000000000000000000000000000004848 + 000000000000000803d8f0: d8f4c0f3150c5cc8885c2c0aecbc6819e6689bf4338c70da4816d4cb66ed94f5000000000000000000000000000000000000000000000000000000000000004646 + "00000000000000090100": 8a8a882257b5edb42fb6c1d47b66df6f451d1f0b07c707b265b47a0a445a49121bf17f09003564f8fd0ce402f3344ebabc39f73df9f5c2c5e7a6ef60d3c65bd3fa126e386b05663d14857c3a5444a050d9655e3827c4728e80d79d1292fd51f52c34b1e7457b0614c2aaac166403626fff92472cec22712462ccd14ca9d431b2cdcc75fe0912cd0026e57d3219488c374ae9bd6c9778ffd012f013335e68a58e614ab7b397b6b3db04941121db179d2296e2f1633f191f7f52dbb69956adb74e105be899b9ab90771c02eb7ced5baec3075725903e2e21b0e1457be9ff668e5e6d40e843117004e4acfa09ae37c191db8c160b309ddf0f969c234bb77fcdf25dfde6f541e0ddffbcfe0de2076f21fa6ab54c46ae0467b9a707bc397efcc6b392f748ee0417583a5a1eb326d6092c9ae2b72aadaed919d02086f2daedba2939863499c8c7a63e1eef851f57dc9901 + "00000000000000090180": 89069ac4c9e498cf8311cdc94afb4afac57857c4d11ff4cfff721be3f2f627b2000000000000000000000000000000000000000000000000000000000000004d4d + "00000000000000090209": 0959c7836901cba04c833b419ea0dfe0c205bb32a67214c6445b61d7b4a0639b000000000000000000000000000000000000000000000000000000000000004a4a + 0000000000000009020c: 0cbf607275c4dca4df0f7ffbb643193ab7a9d7419c0f02e0c3ba79f398f81d46000000000000000000000000000000000000000000000000000000000000004c4c + "00000000000000090239": 39df927e1d5265409744353547dab1f8a27444b2fb917ee896983e046ba91163000000000000000000000000000000000000000000000000000000000000004f4f + "00000000000000090277": 7783908d59c75dfbcf114da6847cb8abd3be94593bb4267a9b1ebfb223b98ea5000000000000000000000000000000000000000000000000000000000000004b4b + "00000000000000090294": 80000008170ede3e1920837d4865c7c6087bb6117d6083871d773aef1916f8bf1662faea09127afe9fa81b7edde8ad3c48f164abaf9de3ea3c04b5e7eec32bc2609a803c6909 + 000000000000000902a8: a8c56ac32ee75c657919ae3d71f58d15ac0eb2e9d2ff7b9c04b8fd0f96ed7448000000000000000000000000000000000000000000000000000000000000004949 + 000000000000000902c5: c54f0faa1e221486827abe3f900feb05956f20fc7f15f4e084bd673c489aada4000000000000000000000000000000000000000000000000000000000000004e4e + "0000000000000009039430": 943a91e688714abcd78bec435e5d1f6412e5132eb61ab16fb71947c26d6dff64000000000000000000000000000000000000000000000000000000000000005050 + 00000000000000090394d0: 94d67b778ebe500b3516f7d892d44cb281067e662cc1483d559cd0d73e00f177000000000000000000000000000000000000000000000000000000000000001d1d + 000000000000000a0130: 00000a28803101a64d85ff96891a1929b5410e7a7a0b6a60ff8bf3378008164e6c21ae82051d8a35b505566077f9e13da0d84b530d3092ab261a2d2abee46f7f28d74c8e20097d37b6b37acd4cf4045a574c36ae84c9a1dd5f69f307c91f0e72fb20bf0d4fa8053c4e03f48d247afced2623d420be80ad04a1a9e87e27d40dde7cea4f96a9a7180a + 000000000000000a01a0: 020882086b44da2a9dd1eaf1e18dad37d7e76005d4b035d5c928629d180c9e0d15554bfb06182fc339100ba14fc2bcbb0ae880c957f3ab0d069eb5f206dd4649004c3fb63102adf6e932d9d77affa57a5662c9976af696c390b173f13f9accdb45d2b3245898090d19ead21c451a82ac4ff7cd7b2f004013811ced4ca96177379ebf5ccc58352902ce290b979ca575bdc45250d691ce3ed7014ec2e83477608208bb75cdead773490a + 000000000000000a01c0: 828a2000c44c8b773c992751a9a68cf562a5d66a82ecb3e5b1f3e825583af4a4dc7188fb05f8e73c81b1ec28c666c9a504eb2be2aa8a2cc1cf0af2889447eea287118568bf082de02cba921be88fba820ac33a520ad265be4c4a290d864f0377884abf900ac005e8927e187971ca77095814caa6cc732835445a638edb782610240ae86388c788090ba1131ae7242bf1d1dfc8c0f32eb0a5cf80c6f85dab3d0d0c5355881ead0e86062e3b99b6d3c2022157528163ef99f3fb81204f8d3f32ff3bcd8fb918e40919090a + 000000000000000a023e: 3e5f86ed85e39b54418692f15640a6bcaf76b7cb6f7b6548da28177df9232ec8000000000000000000000000000000000000000000000000000000000000005252 + 000000000000000a0244: 446c0030d10b7a6b6c2970f02f4505d71de7bbf95c0c99884c0c01467f53a8ab000000000000000000000000000000000000000000000000000000000000005858 + 000000000000000a0248: 0008002091380386034a68521194d26b451f80c900eed4fb13cb9e8913e3dfc5e592b00a0a52d96f4eeda24e81dd90939b2748dacd8a765a938bde5574cc54544aa00d194d0a + 000000000000000a0278: 78421e310f5e842ec956e626534d27008b85f06736ae1f67a1aaa7ed7f21c68f000000000000000000000000000000000000000000000000000000000000005353 + 000000000000000a029f: 9f654242a774697de6c44df46fa9670371ac5999c0b14874dafefb8f4d60638a000000000000000000000000000000000000000000000000000000000000005454 + 000000000000000a02ad: ad72cdcdcfb4f0632d055d5445137abf73f21c0dfac21add822c71e82bf2353e000000000000000000000000000000000000000000000000000000000000005151 + 000000000000000a02ca: ca19a24e1132928718d987a578a4ca038eab9602e8c4abf553bd7189459effef000000000000000000000000000000000000000000000000000000000000005656 + 000000000000000a02e1: e13b9b67638cdad23dec7259168f9562d1dc889b2be70d752bc3dfe82bc6a712000000000000000000000000000000000000000000000000000000000000005757 + 000000000000000a034850: 4858ed2508d9a8a36c8c771fd9cdd71285d5c4dc624a1e63c0abd64cef699b03000000000000000000000000000000000000000000000000000000000000002626 + 000000000000000a0348e0: 48e2e8edc61fc5bcddff86b114b5a870369db85287fe39ee8b9a63837552c315000000000000000000000000000000000000000000000000000000000000005555 + 000000000000000b0110: a2000800424573be7b442152cfeb1c3601f99f3130c6f17c7c08b7aaa330c6d87a12515a0b5c82fb7448102c04e67a1975867232ec52a388958eb4e9e6baeee14c5293abe506503fe6ab38d5ac1c8b7bfe8b097205de90a84730a84de1738a85051d4a4a026306da7578e0017433b8084636a698ea93c593baedaf117de2b36e72020115414bbe06 + 000000000000000b0120: 00048828f9c73b9300e34ac8e315a7bff566e806e513bcf859f73fed008bbc294cbd86170bfa858129a21ac49dc254a945b31dfe16d6aa38f787508a37e17770d646efc8440254811a6dbdccae4f94ec1115b47b52a93363dbfaa0997714af738a41b673af93028f46a18e5863c4ada0e7cd39134e090bc980515a25e481ffc3763ff91c26e91405fdcebb4312eac9d2281efff9ac70c4693fe3a7e5d1be16a233845059ce1d9e3e07 + 000000000000000b0140: 00820900ac90a14f9020bf1306cce40cb69603c44f376b19b3b89b164da4adad2522f3cd0a44b03ea062f4472e00e34d5e0293dd01552dc38a7a87408fd085c93074aa6c410ba71618826a808ad48f199fd524a76e7dd86dbebc0f41833f7831fbc6841282d00aaf4063b99148e997961d12ee3dd264572aeb229046a8e5e9b225dc1dfacc72fc04 + 000000000000000b0170: 0080822205a922b3f8195dca99196eacd675b9eb833f50040232bb1f618834baf5b38ba109167a1e11488fdfa9d5b12121dd9910adfbf852c209f3a577b967000745f350f60aaacca0ce4f75eee32589c24eec3958eeccef822af3b15daaa432fe3e82a05cb80bdab1eee1892b8092343e6487bc31fa80fb2dd7cbb9ed1c6fae878f83dded1c2d01060b08b1a2e058cb5ec3f70f413c5d5440ef7d6e542223d3784fbd6c72e8c68c01 + 000000000000000b01d0: 08800906f4063981ea97ecdb06015df9bf4f6727c57efbf4b0646c3476deb36794324db108afd85ebff302f5bbba6058397c4ea0828d22a3778df3f112311c1d0accf7c7080b2b1948ebf5edfef56db3a187d77bc30e4b3200612376c93a909b907ff574c6e5087eae10ad154a3d3f012dcfce2fe0adf14890f879676eb80b621029c68d316ca305d82817436305281ec13d95108dab7f12d2efe6baeef0e20c192522e3fb9acfd20390931c85595b1e71a6693a36c92ac5c7859148c77e098b7c950389ab74c5cfcb04 + 000000000000000b01e0: 2880a18043fe9188b8b985568354bcdd9250b431c4e714f6b42051be284857a8d2be17220a5a0aecb3ba406c7d3e437d8ef8e625e2d77e062da840461ccf48c0c4308f403407299f131a3be0248d2e798e4ad2f600e15e0f33a6cd627aded2521f4ec15f99780bf11b8ad362ee769da41a1cd9c52ad888b677eec1f20671001f8777faa9cf964a04fc7d7cf7cf7a7beefa4a24a16c1aee39a859559a0af0057b8ee2d08190b6f92002ac0b59ebc0d792db7fc244e10d924fca664b6f6e2ceec70264212ddc795c962f0112c7eac2be7db3066f6ac802ba777e9eb44c7a5cab845977367ad8b34df71bc502 + 000000000000000b0210: 10517f8d773163c41a3ad0446b196c056622643019f2f0f7b6ee0bd6374e224e000000000000000000000000000000000000000000000000000000000000005e5e + 000000000000000b0225: 0022020063a7ef2de46a7ccd3a1fa06a407e928949b80e72ddb6b3db95ec8715acd6f7f70b75611c7ae282df1b3ad74bb8b08fc01ee7f4dd46036cf91eb514376122e4a62306592ab1d5500c2ed989fa665f501c67499af68a98a0090ea4c7e66cf53684fc0c06 + 000000000000000b0247: 473c0e43a2c5fd95e1b6b6154e7a720a5d8d257790f508c601633aff2bd9e48d000000000000000000000000000000000000000000000000000000000000005f5f + 000000000000000b025e: 5e3c1c034ab4b4eead015b30d1a272db741bb510d9e3d3f1abd099652302414f000000000000000000000000000000000000000000000000000000000000005c5c + 000000000000000b027b: 7b26af81f4c1ab7cdaa6fc0c4aec935004631a29ea1382ac3827df603e42a9db000000000000000000000000000000000000000000000000000000000000005b5b + 000000000000000b02d7: d753f729b5ccfe040f4f592b95b710c4e6f147bb9178880080f2d094becfad86000000000000000000000000000000000000000000000000000000000000005a5a + 000000000000000b02e7: e7df81ef1c550b915e5e9060719aa92ac78a1fddd47c325b79435a7752f7e5b7000000000000000000000000000000000000000000000000000000000000005959 + 000000000000000b02f3: f38ccb0299ceb194f40be88e74d175c707832dc74a73b2087aa1027689cd40f4000000000000000000000000000000000000000000000000000000000000006060 + 000000000000000b032540: 25447bdfd63dd3e622671ad29dd6360a085ce6006ac74a6c0d6a3c279ffc3097000000000000000000000000000000000000000000000000000000000000005d5d + 000000000000000c00: 6455555655c0a8dcf08966f67ecba39300a8722ba44482d5e0b23380a9f213e8c337f590cc09227b208a9a19221548e782068f0fdb6fbd4617116bb152b668c3e9a674ea29c40bb49d8c482b50bf07df57e2cc5268959d80fcb3f2da9f0f14d21532f57241f3170b53b19451c176ee59d8e6f3c12b6adbd31add13bdea3cec23766e64033aaf8f4d0a7fa6517eea208326fd635d9ea0a7f08f465d46b631a275df33d13c95e4bf02c50ba96003c38bcdb41045b60ec68b377fcd9be9095d5864500f657183cfe777d1e80c6dec6c64316471d87e7d08b832fae1293c5bd921e6b05fa6fccc65e0b95911990cc062fd7630074d60ddcf9cdfdabdb10356fa4383f98d61c9fdf92909bcfcb8190b8cedd6f68a9ef7189ace7c6d176c54bd862999e67e1b753e6f91050c255082790957aa7cbdb78645f6e96d2f01c675433421af4d024b10de397ed0c1c85b6cfe030c92d380598feb12fe1a1d058ddf0866fe3b7a3ad8fa099914132421d8074030330a5cd30423af28fd201e24c41618d281a3d0cab380970b73da816a8fbe03117c16085ec4b7a0f7cc597669ca4a4d8d7419d400afd56116b898807f03cdc2ef70bf4e0adf49b5b8b47705368b4d5e009e292ec1c92b10ef50501bbb191072bdd0350f3e0bc4e5b97afa0931090e318bfaebecf8e2a337476e1a8c102dd970969c6688792f0bba8bc88682602128060614528f82d9a2cae1524e615f6a42cf99a99fec431e450c + 000000000000000c0150: 202008a8c9994c2caa7d57098169757caf7127daad05bbc20a0f2482c66e0bd3e0f2425807351e6086b7c92ade4db538680c6d267f479fccb2d4ce9b10062afdd17369911f0ce6d61bcc44717172e1e2c6b2ff387448722c2fc17a17726189a0c93514e5dab4059fa3dba19b7c08af62edc9239b652fb5675abe268ec732eb467087969ba6084b04a64d06efc9a752b9453f768fd58528f039f4fc63aff3e8353f3268ecf15df9770b6fb85f4290b650931131cbbd4f910b772185cd8006f0b917801678a34b4fa79104 + 000000000000000c0160: 2100880851a6e73460f30029943bf553f273b1a48ccd31437e3f83a25fb38dffb2d6ac28050724c3ee74e7f23cb76e9395e87d1e4bff63f1da7049a6eca441724797f3874b07cf2fe9fd420120d82d0609329ca9b5ffa91da307e1b7f51057b747c639a34af8084340fb29a51a13adbfe03a805124b70da17033d7707cfbb0dfcd54d24c1e63d40caaadd43d4d5793871733182fe8506eed15062b5875e420c8b7d22cba53ac378504 + 000000000000000c0190: 8005a8a07dd0df391d962fd4150f793cbc18c888b63ed02cf54a3d0db5cb3f2b29c36e24039fbced1659f7cd1a5c0af9ee189842e1692e65352c1b487f61f2d752f2a0b3d70959773d2c25f7cb39674f6ae2cbc3fbdd82d1ed6972a6aa3ed016269f3e5550240cc817723a3d92295eccbaf6eba511b46779dbe67626cde023cf8cd37f33460654038ca3d9745931ef68100145097c22269521f4df6a11f5b2d3e3488216298a8a7608b99a204e2e9faa22aaf21289c9e4f0495f1a388969a5172bc620b50bfa30ddab076f5a9ea74107c8f4de693c261104144e5bc30d5c84fd24817d46e2335b58336704e16772b9714e64a1009a37324bbe60a52cbe8674e7964b8cf9982e83b33ff6410a + 000000000000000c01f0: 881000005ee18ade69f359305864cf1d17d37a06c42736b877fafc52e64c3413655b96b10caaa833785c900c5124bf8ed8d919ee7eaa067d54c92198804d10e1cd19bffbc10b815452859f52a833bb33a0648e223514255bf86b3b57a022a2cc9e761ce5b5d407 + 000000000000000c0256: 56caf919aa7245d39566bd66e00d1a91b9894333d5d0384b6a2a12c6e66d95d1000000000000000000000000000000000000000000000000000000000000006161 + 000000000000000c026b: 6b0fd3cfa6040f65f7276c040ccc7a4b246f90de0d99b1d3d43a2feac6c894bd000000000000000000000000000000000000000000000000000000000000006464 + 000000000000000c0295: 002000086e520d01c9c38a65012be48c7d7a1ee5de3f33497cd4de2605f2998d6f8c03980c5c96cbd1a7d9f8be9e808c5e59d525622725bc800f5ca3aeb5e06ef58f96b05c0c + 000000000000000c02f1: f1a7fba0a3a8c6f5ea1249d0c556b27da90c998fe9d6dc565911b5638f66f5c4000000000000000000000000000000000000000000000000000000000000006363 + 000000000000000c039560: 956ba33c478bde6c11ea2a1f6105753d55271cad5ce7101a08b8547a234e74c6000000000000000000000000000000000000000000000000000000000000001a1a + 000000000000000c0395d0: 95dbef93d0be459e34c6e6663551ebc31befb871daa6430a62329531bde4add7000000000000000000000000000000000000000000000000000000000000006262 +stale_keys: {} + diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index c6921d0d2718..3f0dee89f424 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -10,7 +10,7 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] [dependencies] -zk_evm_1_3_3 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag= "v1.3.3-rc1" } +zk_evm_1_3_3 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag= "v1.3.3-rc2" } zk_evm_1_3_1 = { package = "zk_evm", git = "https://github.com/matter-labs/era-zk_evm.git", tag= "v1.3.1-rc2" } zksync_types = { path = "../types" } diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index 40849948f083..e7d7939b6691 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -37,6 +37,7 @@ impl GlueFrom for crate::interface::FinishedL1B l2_to_l1_logs: value.full_result.l2_to_l1_logs, total_log_queries: value.full_result.total_log_queries, cycles_used: value.full_result.cycles_used, + storage_refunds: Vec::new(), }, final_bootloader_memory: None, } @@ -70,6 +71,7 @@ impl GlueFrom for crate::interface::FinishedL1B l2_to_l1_logs: value.full_result.l2_to_l1_logs, total_log_queries: value.full_result.total_log_queries, cycles_used: value.full_result.cycles_used, + storage_refunds: Vec::new(), }, final_bootloader_memory: None, } @@ -103,6 +105,7 @@ impl GlueFrom for crate::interface::Finished l2_to_l1_logs: value.full_result.l2_to_l1_logs, total_log_queries: value.full_result.total_log_queries, cycles_used: value.full_result.cycles_used, + storage_refunds: Vec::new(), }, final_bootloader_memory: None, } diff --git a/core/lib/multivm/src/interface/types/outputs/execution_state.rs b/core/lib/multivm/src/interface/types/outputs/execution_state.rs index 3ae36a179672..128db82ba22d 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_state.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_state.rs @@ -16,6 +16,8 @@ pub struct CurrentExecutionState { pub total_log_queries: usize, /// Number of cycles used by the VM. pub cycles_used: u32, + /// Refunds returned by `StorageOracle`. + pub storage_refunds: Vec, } /// Bootloader Memory of the VM. diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs index c856df786d88..0693fac6d60e 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs @@ -58,7 +58,7 @@ impl PrecompilesProcessor for PrecompilesProcesso ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { // In the next line we same `query.timestamp` as both // an operation in the history of precompiles processor and - // the time when this operation occured. + // the time when this operation occurred. // While slightly weird, it is done for consistency with other oracles // where operations and timestamp have different types. self.timestamp_history diff --git a/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs index 10e7620dd4b3..da9087afedd1 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs @@ -673,7 +673,7 @@ // }; // if test_info.should_rollback() { -// // Some error has occured, we should reject the transaction +// // Some error has occurred, we should reject the transaction // vm.rollback_to_latest_snapshot(); // // vm_state_before_tx. diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs index 96786cf24e02..5566595108be 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs @@ -58,7 +58,7 @@ impl PrecompilesProcessor for PrecompilesProcesso ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { // In the next line we same `query.timestamp` as both // an operation in the history of precompiles processor and - // the time when this operation occured. + // the time when this operation occurred. // While slightly weird, it is done for consistency with other oracles // where operations and timestamp have different types. self.timestamp_history diff --git a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs index 42f856865ffb..bcbce932cb47 100644 --- a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use crate::vm_latest::old_vm::history_recorder::{ AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, WithHistory, + HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, }; use crate::vm_latest::old_vm::oracles::OracleWithHistory; @@ -52,6 +52,9 @@ pub struct StorageOracle { // While formally it does not have to be rollbackable, we still do it to avoid memory bloat // for unused slots. pub(crate) initial_values: HistoryRecorder, H>, + + // Storage refunds that oracle has returned in `estimate_refunds_for_write`. + pub(crate) returned_refunds: HistoryRecorder, H>, } impl OracleWithHistory for StorageOracle { @@ -61,6 +64,7 @@ impl OracleWithHistory for StorageOracle { self.pre_paid_changes.rollback_to_timestamp(timestamp); self.paid_changes.rollback_to_timestamp(timestamp); self.initial_values.rollback_to_timestamp(timestamp); + self.returned_refunds.rollback_to_timestamp(timestamp); } } @@ -72,6 +76,7 @@ impl StorageOracle { pre_paid_changes: Default::default(), paid_changes: Default::default(), initial_values: Default::default(), + returned_refunds: Default::default(), } } @@ -81,6 +86,7 @@ impl StorageOracle { self.pre_paid_changes.delete_history(); self.paid_changes.delete_history(); self.initial_values.delete_history(); + self.returned_refunds.delete_history(); } fn is_storage_key_free(&self, key: &StorageKey) -> bool { @@ -338,11 +344,17 @@ impl VmStorageOracle for StorageOracle { ) -> RefundType { let price_to_pay = self.value_update_price(partial_query); - RefundType::RepeatedWrite(RefundedAmounts { + let refund = RefundType::RepeatedWrite(RefundedAmounts { ergs: 0, // `INITIAL_STORAGE_WRITE_PUBDATA_BYTES` is the default amount of pubdata bytes the user pays for. pubdata_bytes: (INITIAL_STORAGE_WRITE_PUBDATA_BYTES as u32) - price_to_pay, - }) + }); + self.returned_refunds.apply_historic_record( + VectorHistoryEvent::Push(refund.pubdata_refund()), + partial_query.timestamp, + ); + + refund } // Indicate a start of execution frame for rollback purposes diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs index 4bc279ddefbf..893efde27399 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs @@ -51,6 +51,7 @@ pub(crate) struct StorageOracleInnerState { pub(crate) pre_paid_changes: HistoryRecorder, H>, pub(crate) paid_changes: HistoryRecorder, H>, pub(crate) initial_values: HistoryRecorder, H>, + pub(crate) returned_refunds: HistoryRecorder, H>, } #[derive(Clone, PartialEq, Debug)] @@ -108,6 +109,7 @@ impl Vm { pre_paid_changes: self.state.storage.pre_paid_changes.clone(), paid_changes: self.state.storage.paid_changes.clone(), initial_values: self.state.storage.initial_values.clone(), + returned_refunds: self.state.storage.returned_refunds.clone(), }; let local_state = self.state.local_state.clone(); diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index c4c2fe94ee90..770f45c0c122 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -103,6 +103,7 @@ impl Vm { l2_to_l1_logs, total_log_queries, cycles_used: self.state.local_state.monotonic_cycle_counter, + storage_refunds: self.state.storage.returned_refunds.inner().clone(), } } diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs index 853ce7d8cec0..137a1046d48d 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs @@ -61,7 +61,7 @@ impl PrecompilesProcessor for PrecompilesProcessorWithHistory ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { // In the next line we same `query.timestamp` as both // an operation in the history of precompiles processor and - // the time when this operation occured. + // the time when this operation occurred. // While slightly weird, it is done for consistency with other oracles // where operations and timestamp have different types. self.timestamp_history diff --git a/core/lib/multivm/src/versions/vm_m5/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_m5/tests/bootloader.rs index e3ef3f991b83..1034e8595936 100644 --- a/core/lib/multivm/src/versions/vm_m5/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m5/tests/bootloader.rs @@ -624,7 +624,7 @@ // }; // if test_info.should_rollback() { -// // Some error has occured, we should reject the transaction +// // Some error has occurred, we should reject the transaction // vm.rollback_to_latest_snapshot(); // // vm_state_before_tx. diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs index bb7f9f568191..aff382614af0 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs @@ -58,7 +58,7 @@ impl PrecompilesProcessor for PrecompilesProcesso ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { // In the next line we same `query.timestamp` as both // an operation in the history of precompiles processor and - // the time when this operation occured. + // the time when this operation occurred. // While slightly weird, it is done for consistency with other oracles // where operations and timestamp have different types. self.timestamp_history diff --git a/core/lib/multivm/src/versions/vm_m6/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_m6/tests/bootloader.rs index 26fe03453225..be840e16a142 100644 --- a/core/lib/multivm/src/versions/vm_m6/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/tests/bootloader.rs @@ -626,7 +626,7 @@ // }; // if test_info.should_rollback() { -// // Some error has occured, we should reject the transaction +// // Some error has occurred, we should reject the transaction // vm.rollback_to_latest_snapshot(); // // vm_state_before_tx. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs index 03f96ec9e622..11ddb26d03a1 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs @@ -60,7 +60,7 @@ impl PrecompilesProcessor for PrecompilesProcesso ) -> Option<(Vec, Vec, PrecompileCyclesWitness)> { // In the next line we same `query.timestamp` as both // an operation in the history of precompiles processor and - // the time when this operation occured. + // the time when this operation occurred. // While slightly weird, it is done for consistency with other oracles // where operations and timestamp have different types. self.timestamp_history diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 59165c6e485c..93ae111c27aa 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -102,6 +102,7 @@ impl Vm { l2_to_l1_logs, total_log_queries, cycles_used: self.state.local_state.monotonic_cycle_counter, + storage_refunds: Vec::new(), } } diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 2e4a2281a863..b9a9d81fc547 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -106,7 +106,7 @@ pub(crate) async fn create_l1_batch( ); header.is_finished = true; conn.blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[]) + .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[]) .await .unwrap(); conn.blocks_dal() diff --git a/core/lib/storage/src/db.rs b/core/lib/storage/src/db.rs index c6b75c6a25ed..b2a3636f62f5 100644 --- a/core/lib/storage/src/db.rs +++ b/core/lib/storage/src/db.rs @@ -4,7 +4,7 @@ use rocksdb::{ }; use std::{ - collections::HashSet, + collections::{HashMap, HashSet}, ffi::CStr, fmt, marker::PhantomData, @@ -29,6 +29,12 @@ pub trait NamedColumnFamily: 'static + Copy { const ALL: &'static [Self]; /// Names a column family to access it in `RocksDB`. Also used in metrics reporting. fn name(&self) -> &'static str; + + /// Returns whether this CF is so large that it's likely to require special configuration in terms + /// of compaction / memtables. + fn requires_tuning(&self) -> bool { + false + } } /// Thin typesafe wrapper around RocksDB `WriteBatch`. @@ -96,12 +102,39 @@ pub(crate) struct RocksDBInner { } impl RocksDBInner { - pub(crate) fn report_sizes(&self, metrics: &RocksdbSizeMetrics) { + pub(crate) fn collect_metrics(&self, metrics: &RocksdbSizeMetrics) { for &cf_name in &self.cf_names { let cf = self.db.cf_handle(cf_name).unwrap(); // ^ `unwrap()` is safe (CF existence is checked during DB initialization) let labels = RocksdbLabels::new(self.db_name, cf_name); + let writes_stopped = self.int_property(cf, properties::IS_WRITE_STOPPED); + let writes_stopped = writes_stopped == Some(1); + metrics.writes_stopped[&labels].set(writes_stopped.into()); + + let num_immutable_memtables = + self.int_property(cf, properties::NUM_IMMUTABLE_MEM_TABLE); + if let Some(num_immutable_memtables) = num_immutable_memtables { + metrics.immutable_mem_tables[&labels].set(num_immutable_memtables); + } + let num_level0_files = self.int_property(cf, &properties::num_files_at_level(0)); + if let Some(num_level0_files) = num_level0_files { + metrics.level0_files[&labels].set(num_level0_files); + } + let num_flushes = self.int_property(cf, properties::NUM_RUNNING_FLUSHES); + if let Some(num_flushes) = num_flushes { + metrics.running_flushes[&labels].set(num_flushes); + } + let num_compactions = self.int_property(cf, properties::NUM_RUNNING_COMPACTIONS); + if let Some(num_compactions) = num_compactions { + metrics.running_compactions[&labels].set(num_compactions); + } + let pending_compactions = + self.int_property(cf, properties::ESTIMATE_PENDING_COMPACTION_BYTES); + if let Some(pending_compactions) = pending_compactions { + metrics.pending_compactions[&labels].set(pending_compactions); + } + let live_data_size = self.int_property(cf, properties::ESTIMATE_LIVE_DATA_SIZE); if let Some(size) = live_data_size { metrics.live_data_size[&labels].set(size); @@ -139,26 +172,66 @@ impl RocksDBInner { } property } + + /// Waits until writes are not stopped for any of the CFs. Writes can stop immediately on DB initialization + /// if there are too many level-0 SST files; in this case, it may help waiting several seconds until + /// these files are compacted. + fn wait_for_writes_to_resume(&self) { + const RETRY_COUNT: usize = 10; + const RETRY_INTERVAL: Duration = Duration::from_secs(1); + + for retry in 0..RETRY_COUNT { + let cfs_with_stopped_writes = self.cf_names.iter().copied().filter(|cf_name| { + let cf = self.db.cf_handle(cf_name).unwrap(); + // ^ `unwrap()` is safe (CF existence is checked during DB initialization) + self.int_property(cf, properties::IS_WRITE_STOPPED) == Some(1) + }); + let cfs_with_stopped_writes: Vec<_> = cfs_with_stopped_writes.collect(); + if cfs_with_stopped_writes.is_empty() { + return; + } else { + tracing::info!( + "Writes are stopped for column families {cfs_with_stopped_writes:?} in DB `{}` \ + (retry: {retry}/{RETRY_COUNT})", + self.db_name + ); + thread::sleep(RETRY_INTERVAL); + } + } + + tracing::warn!( + "Exceeded {RETRY_COUNT} retries waiting for writes to resume in DB `{}`; \ + proceeding with stopped writes", + self.db_name + ); + } } #[derive(Debug, Clone, Copy)] struct StalledWritesRetries { max_batch_size: usize, retry_count: usize, - interval: Duration, + start_interval: Duration, + scale_factor: f64, } impl Default for StalledWritesRetries { fn default() -> Self { Self { max_batch_size: 128 << 20, // 128 MiB - retry_count: 3, - interval: Duration::from_millis(100), + retry_count: 10, + start_interval: Duration::from_millis(50), + scale_factor: 1.5, } } } impl StalledWritesRetries { + fn interval(&self, retry_index: usize) -> Duration { + self.start_interval + .mul_f64(self.scale_factor.powi(retry_index as i32)) + } + // **NB.** The error message may change between RocksDB versions! fn is_write_stall_error(error: &rocksdb::Error) -> bool { matches!(error.kind(), rocksdb::ErrorKind::ShutdownInProgress) @@ -166,6 +239,19 @@ impl StalledWritesRetries { } } +/// [`RocksDB`] options. +#[derive(Debug, Clone, Copy, Default)] +pub struct RocksDBOptions { + /// Byte capacity of the block cache (the main RocksDB cache for reads). If not set, default RocksDB + /// cache options will be used. + pub block_cache_capacity: Option, + /// Byte capacity of memtables (recent, non-persisted changes to RocksDB) set for large CFs + /// (as defined in [`NamedColumnFamily::requires_tuning()`]). + /// Setting this to a reasonably large value (order of 512 MiB) is helpful for large DBs that experience + /// write stalls. If not set, large CFs will not be configured specially. + pub large_memtable_capacity: Option, +} + /// Thin wrapper around a RocksDB instance. /// /// The wrapper is cheaply cloneable (internally, it wraps a DB instance in an [`Arc`]). @@ -179,13 +265,13 @@ pub struct RocksDB { impl RocksDB { pub fn new(path: &Path) -> Self { - Self::with_cache(path, None) + Self::with_options(path, RocksDBOptions::default()) } - pub fn with_cache(path: &Path, block_cache_capacity: Option) -> Self { - let caches = RocksDBCaches::new(block_cache_capacity); - let options = Self::rocksdb_options(None); - let existing_cfs = DB::list_cf(&options, path).unwrap_or_else(|err| { + pub fn with_options(path: &Path, options: RocksDBOptions) -> Self { + let caches = RocksDBCaches::new(options.block_cache_capacity); + let db_options = Self::rocksdb_options(None, None); + let existing_cfs = DB::list_cf(&db_options, path).unwrap_or_else(|err| { tracing::warn!( "Failed getting column families for RocksDB `{}` at `{}`, assuming CFs are empty; {err}", CF::DB_NAME, @@ -194,15 +280,18 @@ impl RocksDB { vec![] }); - let cf_names: HashSet<_> = CF::ALL.iter().map(|cf| cf.name()).collect(); + let cfs_and_options: HashMap<_, _> = CF::ALL + .iter() + .map(|cf| (cf.name(), cf.requires_tuning())) + .collect(); let obsolete_cfs: Vec<_> = existing_cfs .iter() .filter_map(|cf_name| { let cf_name = cf_name.as_str(); // The default CF is created on RocksDB instantiation in any case; it doesn't need // to be explicitly opened. - let is_obsolete = - cf_name != rocksdb::DEFAULT_COLUMN_FAMILY_NAME && !cf_names.contains(cf_name); + let is_obsolete = cf_name != rocksdb::DEFAULT_COLUMN_FAMILY_NAME + && !cfs_and_options.contains_key(cf_name); is_obsolete.then_some(cf_name) }) .collect(); @@ -216,18 +305,22 @@ impl RocksDB { } // Open obsolete CFs as well; RocksDB initialization will panic otherwise. - let all_cf_names = cf_names.iter().copied().chain(obsolete_cfs); - let cfs = all_cf_names.map(|cf_name| { + let cf_names = cfs_and_options.keys().copied().collect(); + let all_cfs_and_options = cfs_and_options + .into_iter() + .chain(obsolete_cfs.into_iter().map(|name| (name, false))); + let cfs = all_cfs_and_options.map(|(cf_name, requires_tuning)| { let mut block_based_options = BlockBasedOptions::default(); block_based_options.set_bloom_filter(10.0, false); if let Some(cache) = &caches.shared { block_based_options.set_block_cache(cache); } - let cf_options = Self::rocksdb_options(Some(block_based_options)); + let memtable_capacity = options.large_memtable_capacity.filter(|_| requires_tuning); + let cf_options = Self::rocksdb_options(memtable_capacity, Some(block_based_options)); ColumnFamilyDescriptor::new(cf_name, cf_options) }); - let db = DB::open_cf_descriptors(&options, path, cfs).expect("failed to init rocksdb"); + let db = DB::open_cf_descriptors(&db_options, path, cfs).expect("failed to init rocksdb"); let inner = Arc::new(RocksDBInner { db, db_name: CF::DB_NAME, @@ -237,6 +330,13 @@ impl RocksDB { }); RocksdbSizeMetrics::register(CF::DB_NAME, Arc::downgrade(&inner)); + tracing::info!( + "Initialized RocksDB `{}` at `{}` with {options:?}", + CF::DB_NAME, + path.display() + ); + + inner.wait_for_writes_to_resume(); Self { inner, sync_writes: false, @@ -253,16 +353,21 @@ impl RocksDB { self } - fn rocksdb_options(block_based_options: Option) -> Options { + fn rocksdb_options( + memtable_capacity: Option, + block_based_options: Option, + ) -> Options { let mut options = Options::default(); options.create_missing_column_families(true); options.create_if_missing(true); let num_cpus = num_cpus::get() as i32; options.increase_parallelism(num_cpus); + if let Some(memtable_capacity) = memtable_capacity { + options.optimize_level_style_compaction(memtable_capacity); + } // Settings below are taken as per PingCAP recommendations: // https://www.pingcap.com/blog/how-to-troubleshoot-rocksdb-write-stalls-in-tikv/ - options.set_max_write_buffer_number(5); let max_background_jobs = (num_cpus - 1).clamp(1, 8); options.set_max_background_jobs(max_background_jobs); @@ -323,14 +428,18 @@ impl RocksDB { match self.write_inner(raw_batch) { Ok(()) => return Ok(()), Err(err) => { - let should_retry = StalledWritesRetries::is_write_stall_error(&err) - && retry_count < retries.retry_count; - if should_retry { + let is_stalled_write = StalledWritesRetries::is_write_stall_error(&err); + if is_stalled_write { + METRICS.report_stalled_write(CF::DB_NAME); + } + + if is_stalled_write && retry_count < retries.retry_count { + let retry_interval = retries.interval(retry_count); tracing::warn!( - "Writes stalled when writing to DB `{}`; will retry after a delay", + "Writes stalled when writing to DB `{}`; will retry after {retry_interval:?}", CF::DB_NAME ); - thread::sleep(retries.interval); + thread::sleep(retry_interval); retry_count += 1; raw_batch = rocksdb::WriteBatch::from_data(&raw_batch_bytes); } else { @@ -454,6 +563,20 @@ mod tests { use super::*; + #[test] + fn retry_interval_computation() { + let retries = StalledWritesRetries::default(); + assert_close(retries.interval(0), Duration::from_millis(50)); + assert_close(retries.interval(1), Duration::from_millis(75)); + assert_close(retries.interval(2), Duration::from_micros(112_500)); + } + + fn assert_close(lhs: Duration, rhs: Duration) { + let lhs_millis = (lhs.as_secs_f64() * 1_000.0).round() as u64; + let rhs_millis = (rhs.as_secs_f64() * 1_000.0).round() as u64; + assert_eq!(lhs_millis, rhs_millis); + } + #[derive(Debug, Clone, Copy)] enum OldColumnFamilies { Default, diff --git a/core/lib/storage/src/lib.rs b/core/lib/storage/src/lib.rs index 8c86876b5861..6ea928309ff4 100644 --- a/core/lib/storage/src/lib.rs +++ b/core/lib/storage/src/lib.rs @@ -1,5 +1,5 @@ pub mod db; mod metrics; -pub use db::RocksDB; +pub use db::{RocksDB, RocksDBOptions}; pub use rocksdb; diff --git a/core/lib/storage/src/metrics.rs b/core/lib/storage/src/metrics.rs index 1ea4824a6921..a8f4fb1e7b4e 100644 --- a/core/lib/storage/src/metrics.rs +++ b/core/lib/storage/src/metrics.rs @@ -1,7 +1,7 @@ //! General-purpose RocksDB metrics. All metrics code in the crate should be in this module. use once_cell::sync::Lazy; -use vise::{Buckets, Collector, EncodeLabelSet, Family, Gauge, Histogram, Metrics}; +use vise::{Buckets, Collector, Counter, EncodeLabelSet, Family, Gauge, Histogram, Metrics, Unit}; use std::{ collections::HashMap, @@ -41,12 +41,18 @@ pub(crate) struct RocksdbMetrics { /// Size of a serialized `WriteBatch` written to a RocksDB instance. #[metrics(buckets = BYTE_SIZE_BUCKETS)] write_batch_size: Family>, + /// Number of stalled writes for a RocksDB instance. + write_stalled: Family, } impl RocksdbMetrics { pub(crate) fn report_batch_size(&self, db: &'static str, batch_size: usize) { self.write_batch_size[&db.into()].observe(batch_size); } + + pub(crate) fn report_stalled_write(&self, db: &'static str) { + self.write_stalled[&db.into()].inc(); + } } #[vise::register] @@ -56,6 +62,20 @@ pub(crate) static METRICS: vise::Global = vise::Global::new(); #[derive(Debug, Metrics)] #[metrics(prefix = "rocksdb")] pub(crate) struct RocksdbSizeMetrics { + /// Boolean gauge indicating whether writing to the column family is currently stopped. + pub writes_stopped: Family>, + /// Number of immutable memtables. Large value increases risks of write stalls. + pub immutable_mem_tables: Family>, + /// Number of level-0 SST files. Large value increases risks of write stalls. + pub level0_files: Family>, + /// Number of memtable flushes running for the column family. + pub running_flushes: Family>, + /// Number of compactions running for the column family. + pub running_compactions: Family>, + /// Estimated number of bytes for pending compactions. + #[metrics(unit = Unit::Bytes)] + pub pending_compactions: Family>, + /// Estimated size of all live data in the column family of a RocksDB instance. pub live_data_size: Family>, /// Total size of all SST files in the column family of a RocksDB instance. @@ -93,7 +113,7 @@ impl RocksdbSizeMetrics { .expect("instances are poisoned") .retain(|_, instance| { if let Some(instance) = instance.upgrade() { - instance.report_sizes(&metrics); + instance.collect_metrics(&metrics); true } else { false diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index f4ef6e19db7c..5798605da478 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -19,7 +19,7 @@ zksync_mini_merkle_tree = { path = "../mini_merkle_tree" } # We need this import because we wanat DAL to be responsible for (de)serialization codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", branch = "dev" } zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } -zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc1" } +zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } chrono = { version = "0.4", features = ["serde"] } num = { version = "0.3.1", features = ["serde"] } diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_version.rs index afc4868785a9..8faa998636c2 100644 --- a/core/lib/types/src/protocol_version.rs +++ b/core/lib/types/src/protocol_version.rs @@ -156,6 +156,32 @@ pub struct L1VerifierConfig { pub recursion_scheduler_level_vk_hash: H256, } +/// Represents a call that was made during governance operation. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Call { + /// The address to which the call will be made. + pub target: Address, + /// The amount of Ether (in wei) to be sent along with the call. + pub value: U256, + /// The calldata to be executed on the `target` address. + pub data: Vec, + /// Hash of the corresponding Ethereum transaction. + pub eth_hash: H256, + /// Block in which Ethereum transaction was included. + pub eth_block: u64, +} + +/// Defines the structure of an operation that Governance contract executed. +#[derive(Debug, Clone, Default)] +pub struct GovernanceOperation { + /// An array of `Call` structs, each representing a call to be made during the operation. + pub calls: Vec, + /// The hash of the predecessor operation, that should be executed before this operation. + pub predecessor: H256, + /// The value used for creating unique operation hashes. + pub salt: H256, +} + /// Protocol upgrade proposal from L1. /// Most of the fields are optional meaning if value is none /// then this field is not changed within an upgrade. @@ -408,6 +434,106 @@ impl TryFrom for ProtocolUpgrade { } } +impl TryFrom for GovernanceOperation { + type Error = crate::ethabi::Error; + + fn try_from(event: Log) -> Result { + let call_param_type = ParamType::Tuple(vec![ + ParamType::Address, + ParamType::Uint(256), + ParamType::Bytes, + ]); + + let operation_param_type = ParamType::Tuple(vec![ + ParamType::Array(Box::new(call_param_type)), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ]); + // Decode data. + let mut decoded = decode(&[ParamType::Uint(256), operation_param_type], &event.data.0)?; + // Extract `GovernanceOperation` data. + let mut decoded_governance_operation = decoded.remove(1).into_tuple().unwrap(); + + let eth_hash = event + .transaction_hash + .expect("Event transaction hash is missing"); + let eth_block = event + .block_number + .expect("Event block number is missing") + .as_u64(); + + let calls = decoded_governance_operation.remove(0).into_array().unwrap(); + let predecessor = H256::from_slice( + &decoded_governance_operation + .remove(0) + .into_fixed_bytes() + .unwrap(), + ); + let salt = H256::from_slice( + &decoded_governance_operation + .remove(0) + .into_fixed_bytes() + .unwrap(), + ); + + let calls = calls + .into_iter() + .map(|call| { + let mut decoded_governance_operation = call.into_tuple().unwrap(); + + Call { + target: decoded_governance_operation + .remove(0) + .into_address() + .unwrap(), + value: decoded_governance_operation.remove(0).into_uint().unwrap(), + data: decoded_governance_operation.remove(0).into_bytes().unwrap(), + eth_hash, + eth_block, + } + }) + .collect(); + + Ok(Self { + calls, + predecessor, + salt, + }) + } +} + +impl TryFrom for ProtocolUpgrade { + type Error = crate::ethabi::Error; + + fn try_from(call: Call) -> Result { + // Reuses `ProtocolUpgrade::try_from`. + // `ProtocolUpgrade::try_from` only uses 3 log fields: `data`, `block_number`, `transaction_hash`. Others can be filled with dummy values. + // We build data as `call.data` without first 4 bytes which are for selector + // and append it with `bytes32(0)` for compatibility with old event data. + let data = call + .data + .into_iter() + .skip(4) + .chain(encode(&[Token::FixedBytes(H256::zero().0.to_vec())])) + .collect::>() + .into(); + let log = Log { + address: Default::default(), + topics: Default::default(), + data, + block_hash: Default::default(), + block_number: Some(call.eth_block.into()), + transaction_hash: Some(call.eth_hash), + transaction_index: Default::default(), + log_index: Default::default(), + transaction_log_index: Default::default(), + log_type: Default::default(), + removed: Default::default(), + }; + ProtocolUpgrade::try_from(log) + } +} + #[derive(Debug, Clone, Default)] pub struct ProtocolVersion { /// Protocol version ID @@ -559,3 +685,46 @@ impl From for VmVersion { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn governance_operation_from_log() { + let call_token = Token::Tuple(vec![ + Token::Address(Address::random()), + Token::Uint(U256::zero()), + Token::Bytes(vec![1, 2, 3]), + ]); + let operation_token = Token::Tuple(vec![ + Token::Array(vec![call_token]), + Token::FixedBytes(H256::random().0.to_vec()), + Token::FixedBytes(H256::random().0.to_vec()), + ]); + let event_data = encode(&[Token::Uint(U256::zero()), operation_token]); + + let correct_log = Log { + address: Default::default(), + topics: Default::default(), + data: event_data.into(), + block_hash: Default::default(), + block_number: Some(1u64.into()), + transaction_hash: Some(H256::random()), + transaction_index: Default::default(), + log_index: Default::default(), + transaction_log_index: Default::default(), + log_type: Default::default(), + removed: Default::default(), + }; + let decoded_op: GovernanceOperation = correct_log.clone().try_into().unwrap(); + assert_eq!(decoded_op.calls.len(), 1); + + let mut incorrect_log = correct_log; + incorrect_log + .data + .0 + .truncate(incorrect_log.data.0.len() - 32); + assert!(TryInto::::try_into(incorrect_log).is_err()); + } +} diff --git a/core/lib/types/src/prover_server_api/mod.rs b/core/lib/types/src/prover_server_api/mod.rs index 84262b182c60..dc226f11d265 100644 --- a/core/lib/types/src/prover_server_api/mod.rs +++ b/core/lib/types/src/prover_server_api/mod.rs @@ -19,7 +19,7 @@ pub struct ProofGenerationDataRequest {} #[derive(Debug, Serialize, Deserialize)] pub enum ProofGenerationDataResponse { - Success(ProofGenerationData), + Success(Option), Error(String), } diff --git a/core/lib/types/src/tokens.rs b/core/lib/types/src/tokens.rs index 45d97fee2e32..c4b85cb0e21f 100644 --- a/core/lib/types/src/tokens.rs +++ b/core/lib/types/src/tokens.rs @@ -43,11 +43,3 @@ pub struct TokenPrice { pub usd_price: Ratio, pub last_updated: DateTime, } - -/// Token price known to the zkSync network. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TokenMarketVolume { - #[serde(with = "UnsignedRatioSerializeAsDecimal")] - pub market_volume: Ratio, - pub last_updated: DateTime, -} diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 99a1af6bdb6f..64b100d257ec 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -11,7 +11,7 @@ categories = ["cryptography"] [dependencies] zksync_basic_types = { path = "../../lib/basic_types" } -zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc1" } +zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } vlog = { path = "../../lib/vlog" } num = { version = "0.3.1", features = ["serde"] } diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index f4728e05f9ec..6745decd21b9 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -4,10 +4,11 @@ use jsonrpc_core::MetaIoHandler; use jsonrpc_http_server::hyper; use jsonrpc_pubsub::PubSubHandler; use serde::Deserialize; -use tokio::sync::{watch, RwLock}; +use tokio::sync::{oneshot, watch, RwLock}; use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; use std::{net::SocketAddr, sync::Arc, time::Duration}; +use tokio::task::JoinHandle; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; @@ -39,6 +40,8 @@ mod metrics; pub mod namespaces; mod pubsub_notifier; pub mod state; +#[cfg(test)] +pub(crate) mod tests; use self::backend_jsonrpc::{ batch_limiter_middleware::{LimitMiddleware, Transport}, @@ -105,6 +108,14 @@ impl Namespace { ]; } +/// Handles to the initialized API server. +#[derive(Debug)] +pub struct ApiServerHandles { + pub local_addr: SocketAddr, + pub tasks: Vec>>, + pub health_check: ReactiveHealthCheck, +} + #[derive(Debug)] pub struct ApiBuilder { backend: ApiBackend, @@ -309,10 +320,7 @@ impl ApiBuilder { pub async fn build( mut self, stop_receiver: watch::Receiver, - ) -> ( - Vec>>, - ReactiveHealthCheck, - ) { + ) -> anyhow::Result { if self.filters_limit.is_none() { tracing::warn!("Filters limit is not set - unlimited filters are allowed"); } @@ -346,43 +354,17 @@ impl ApiBuilder { _ => {} } - // TODO (PLA-284): Pass `stop_receiver` into every implementation to properly - // handle shutdown signals. match (self.backend, self.transport.take()) { (ApiBackend::Jsonrpc, Some(ApiTransport::Http(addr))) => { - let (api_health_check, health_updater) = ReactiveHealthCheck::new("http_api"); - ( - vec![ - self.build_jsonrpc_http(addr, stop_receiver, health_updater) - .await, - ], - api_health_check, - ) + self.build_jsonrpc_http(addr, stop_receiver).await } (ApiBackend::Jsonrpc, Some(ApiTransport::WebSocket(addr))) => { - let (api_health_check, health_updater) = ReactiveHealthCheck::new("ws_api"); - ( - self.build_jsonrpc_ws(addr, stop_receiver, health_updater) - .await, - api_health_check, - ) + self.build_jsonrpc_ws(addr, stop_receiver).await } (ApiBackend::Jsonrpsee, Some(transport)) => { - let name = match &transport { - ApiTransport::Http(_) => "http_api", - ApiTransport::WebSocket(_) => "ws_api", - }; - let (api_health_check, health_updater) = ReactiveHealthCheck::new(name); - - ( - vec![ - self.build_jsonrpsee(transport, stop_receiver, health_updater) - .await, - ], - api_health_check, - ) + self.build_jsonrpsee(transport, stop_receiver).await } - (_, None) => panic!("ApiTransport is not specified"), + (_, None) => anyhow::bail!("ApiTransport is not specified"), } } @@ -390,8 +372,7 @@ impl ApiBuilder { mut self, addr: SocketAddr, mut stop_receiver: watch::Receiver, - health_updater: HealthUpdater, - ) -> tokio::task::JoinHandle> { + ) -> anyhow::Result { if self.batch_request_size_limit.is_some() { tracing::info!("`batch_request_size_limit` is not supported for HTTP `jsonrpc` backend, this value is ignored"); } @@ -399,31 +380,39 @@ impl ApiBuilder { tracing::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); } + let (health_check, health_updater) = ReactiveHealthCheck::new("http_api"); let vm_barrier = self.vm_barrier.take().unwrap(); + // ^ `unwrap()` is safe by construction + let runtime = tokio::runtime::Builder::new_multi_thread() .enable_all() .thread_name("jsonrpc-http-worker") .worker_threads(self.threads.unwrap()) .build() - .unwrap(); + .context("Failed creating Tokio runtime for `jsonrpc` API backend")?; let mut io_handler: MetaIoHandler<()> = MetaIoHandler::default(); self.extend_jsonrpc_methods(&mut io_handler).await; - tokio::task::spawn_blocking(move || { + let (local_addr_sender, local_addr) = oneshot::channel(); + let server_task = tokio::task::spawn_blocking(move || { let server = jsonrpc_http_server::ServerBuilder::new(io_handler) .threads(1) .event_loop_executor(runtime.handle().clone()) .start_http(&addr) .context("jsonrpc_http::Server::start_http")?; + local_addr_sender.send(*server.address()).ok(); let close_handle = server.close_handle(); let closing_vm_barrier = vm_barrier.clone(); runtime.handle().spawn(async move { - if stop_receiver.changed().await.is_ok() { - tracing::info!("Stop signal received, HTTP JSON-RPC server is shutting down"); - closing_vm_barrier.close(); - close_handle.close(); + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for HTTP JSON-RPC server was dropped without sending a signal" + ); } + tracing::info!("Stop signal received, HTTP JSON-RPC server is shutting down"); + closing_vm_barrier.close(); + close_handle.close(); }); health_updater.update(HealthStatus::Ready.into()); @@ -433,6 +422,23 @@ impl ApiBuilder { runtime.block_on(Self::wait_for_vm(vm_barrier, "HTTP")); runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); Ok(()) + }); + + let local_addr = match local_addr.await { + Ok(addr) => addr, + Err(_) => { + // If the local address was not transmitted, `server_task` must have failed. + let err = server_task + .await + .context("HTTP JSON-RPC server panicked")? + .unwrap_err(); + return Err(err); + } + }; + Ok(ApiServerHandles { + local_addr, + health_check, + tasks: vec![server_task], }) } @@ -482,13 +488,13 @@ impl ApiBuilder { mut self, addr: SocketAddr, mut stop_receiver: watch::Receiver, - health_updater: HealthUpdater, - ) -> Vec>> { + ) -> anyhow::Result { if self.response_body_size_limit.is_some() { tracing::info!("`response_body_size_limit` is not supported for `jsonrpc` backend, this value is ignored"); } - let websocket_requests_per_second_limit = self.websocket_requests_per_minute_limit; + let (health_check, health_updater) = ReactiveHealthCheck::new("ws_api"); + let websocket_requests_per_second_limit = self.websocket_requests_per_minute_limit; let batch_limiter_middleware = LimitMiddleware::new(Transport::Ws, self.batch_request_size_limit); @@ -497,14 +503,14 @@ impl ApiBuilder { .thread_name("jsonrpc-ws-worker") .worker_threads(self.threads.unwrap()) .build() - .unwrap(); // Constructing a runtime should always succeed. + .context("Failed creating Tokio runtime for `jsonrpc-ws` API backend")?; let max_connections = self.subscriptions_limit.unwrap_or(usize::MAX); let vm_barrier = self.vm_barrier.take().unwrap(); let io_handler: MetaIoHandler>, _> = MetaIoHandler::with_middleware(batch_limiter_middleware); let mut io_handler = PubSubHandler::new(io_handler); - let mut notify_handles = Vec::new(); + let mut tasks = Vec::new(); if self .namespaces @@ -513,8 +519,10 @@ impl ApiBuilder { .contains(&Namespace::Pubsub) { let pub_sub = EthSubscribe::new(runtime.handle().clone()); - let polling_interval = self.polling_interval.expect("Polling interval is not set"); - notify_handles.extend([ + let polling_interval = self + .polling_interval + .context("Polling interval is not set")?; + tasks.extend([ tokio::spawn(notify_blocks( pub_sub.active_block_subs.clone(), self.pool.clone(), @@ -538,7 +546,8 @@ impl ApiBuilder { } self.extend_jsonrpc_methods(&mut io_handler).await; - let server_handle = tokio::task::spawn_blocking(move || { + let (local_addr_sender, local_addr) = oneshot::channel(); + let server_task = tokio::task::spawn_blocking(move || { let server = jsonrpc_ws_server::ServerBuilder::with_meta_extractor( io_handler, move |context: &jsonrpc_ws_server::RequestContext| { @@ -552,18 +561,25 @@ impl ApiBuilder { .start(&addr) .context("jsonrpc_ws_server::Server::start()")?; + local_addr_sender.send(*server.addr()).ok(); + let close_handle = server.close_handle(); let closing_vm_barrier = vm_barrier.clone(); runtime.handle().spawn(async move { - if stop_receiver.changed().await.is_ok() { - tracing::info!("Stop signal received, WS JSON-RPC server is shutting down"); - closing_vm_barrier.close(); - close_handle.close(); + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for WS JSON-RPC server was dropped without sending a signal" + ); } + tracing::info!("Stop signal received, WS JSON-RPC server is shutting down"); + closing_vm_barrier.close(); + close_handle.close(); }); health_updater.update(HealthStatus::Ready.into()); - server.wait().unwrap(); + server + .wait() + .context("WS JSON-RPC server encountered fatal error")?; drop(health_updater); tracing::info!("WS JSON-RPC server stopped"); runtime.block_on(Self::wait_for_vm(vm_barrier, "WS")); @@ -571,37 +587,46 @@ impl ApiBuilder { Ok(()) }); - notify_handles.push(server_handle); - notify_handles + let local_addr = match local_addr.await { + Ok(addr) => addr, + Err(_) => { + // If the local address was not transmitted, `server_task` must have failed. + let err = server_task + .await + .context("WS JSON-RPC server panicked")? + .unwrap_err(); + return Err(err); + } + }; + tasks.push(server_task); + + Ok(ApiServerHandles { + local_addr, + tasks, + health_check, + }) } async fn build_jsonrpsee( mut self, transport: ApiTransport, stop_receiver: watch::Receiver, - health_updater: HealthUpdater, - ) -> tokio::task::JoinHandle> { + ) -> anyhow::Result { if matches!(transport, ApiTransport::WebSocket(_)) { // TODO (SMA-1588): Implement `eth_subscribe` method for `jsonrpsee`. tracing::warn!( "`eth_subscribe` is not implemented for jsonrpsee backend, use jsonrpc instead" ); - if self.websocket_requests_per_minute_limit.is_some() { tracing::info!("`websocket_requests_per_second_limit` is not supported for `jsonrpsee` backend, this value is ignored"); } } - let runtime_thread_name = match transport { - ApiTransport::Http(_) => "jsonrpsee-http-worker", - ApiTransport::WebSocket(_) => "jsonrpsee-ws-worker", + let (runtime_thread_name, health_check_name) = match transport { + ApiTransport::Http(_) => ("jsonrpsee-http-worker", "http_api"), + ApiTransport::WebSocket(_) => ("jsonrpsee-ws-worker", "ws_api"), }; - let runtime = tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name(runtime_thread_name) - .worker_threads(self.threads.unwrap()) - .build() - .unwrap(); + let (health_check, health_updater) = ReactiveHealthCheck::new(health_check_name); let vm_barrier = self.vm_barrier.take().unwrap(); let batch_request_config = if let Some(limit) = self.batch_request_size_limit { BatchRequestConfig::Limit(limit as u32) @@ -613,14 +638,24 @@ impl ApiBuilder { .map(|limit| limit as u32) .unwrap_or(u32::MAX); + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name(runtime_thread_name) + .worker_threads(self.threads.unwrap()) + .build() + .with_context(|| { + format!("Failed creating Tokio runtime for {health_check_name} jsonrpsee server") + })?; let rpc = self.build_rpc_module().await; // Start the server in a separate tokio runtime from a dedicated thread. - tokio::task::spawn_blocking(move || { + let (local_addr_sender, local_addr) = oneshot::channel(); + let server_task = tokio::task::spawn_blocking(move || { let res = runtime.block_on(Self::run_jsonrpsee_server( rpc, transport, stop_receiver, + local_addr_sender, health_updater, vm_barrier, batch_request_config, @@ -628,6 +663,23 @@ impl ApiBuilder { )); runtime.shutdown_timeout(GRACEFUL_SHUTDOWN_TIMEOUT); res + }); + + let local_addr = match local_addr.await { + Ok(addr) => addr, + Err(_) => { + // If the local address was not transmitted, `server_task` must have failed. + let err = server_task + .await + .with_context(|| format!("{health_check_name} server panicked"))? + .unwrap_err(); + return Err(err); + } + }; + Ok(ApiServerHandles { + local_addr, + health_check, + tasks: vec![server_task], }) } @@ -636,6 +688,7 @@ impl ApiBuilder { rpc: RpcModule<()>, transport: ApiTransport, mut stop_receiver: watch::Receiver, + local_addr_sender: oneshot::Sender, health_updater: HealthUpdater, vm_barrier: VmConcurrencyBarrier, batch_request_config: BatchRequestConfig, @@ -682,18 +735,26 @@ impl ApiBuilder { .build(addr) .await .with_context(|| format!("Failed building {transport_str} JSON-RPC server"))?; + let local_addr = server.local_addr().with_context(|| { + format!("Failed getting local address for {transport_str} JSON-RPC server") + })?; + local_addr_sender.send(local_addr).ok(); let server_handle = server.start(rpc); let close_handle = server_handle.clone(); let closing_vm_barrier = vm_barrier.clone(); tokio::spawn(async move { - if stop_receiver.changed().await.is_ok() { - tracing::info!( - "Stop signal received, {transport_str} JSON-RPC server is shutting down" + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for {transport_str} JSON-RPC server was dropped \ + without sending a signal" ); - closing_vm_barrier.close(); - close_handle.stop().ok(); } + tracing::info!( + "Stop signal received, {transport_str} JSON-RPC server is shutting down" + ); + closing_vm_barrier.close(); + close_handle.stop().ok(); }); health_updater.update(HealthStatus::Ready.into()); diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index a5f133868b9b..c7cfbe4b7a10 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -18,7 +18,7 @@ use zksync_types::{ L1BatchNumber, MiniblockNumber, Transaction, L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, }; -use zksync_utils::address_to_h256; +use zksync_utils::{address_to_h256, ratio_to_big_decimal_normalized}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Filter, Log, Token, H256}, @@ -27,7 +27,6 @@ use zksync_web3_decl::{ use crate::api_server::web3::{ backend_jsonrpc::error::internal_error, metrics::API_METRICS, RpcState, }; -use crate::fee_ticker::{error::TickerError, FeeTicker, TokenPriceRequestType}; use crate::l1_gas_price::L1GasPriceProvider; #[derive(Debug)] @@ -178,6 +177,13 @@ impl ZksNamespace { pub async fn get_token_price_impl(&self, l2_token: Address) -> Result { const METHOD_NAME: &str = "get_token_price"; + /// Amount of possible symbols after the decimal dot in the USD. + /// Used to convert `Ratio` to `BigDecimal`. + const USD_PRECISION: usize = 100; + /// Minimum amount of symbols after the decimal dot in the USD. + /// Used to convert `Ratio` to `BigDecimal`. + const MIN_PRECISION: usize = 2; + let method_latency = API_METRICS.start_call(METHOD_NAME); let token_price_result = { let mut storage = self @@ -186,20 +192,19 @@ impl ZksNamespace { .access_storage_tagged("api") .await .unwrap(); - let mut tokens_web3_dal = storage.tokens_web3_dal(); - FeeTicker::get_l2_token_price( - &mut tokens_web3_dal, - TokenPriceRequestType::USDForOneToken, - &l2_token, - ) - .await + storage.tokens_web3_dal().get_token_price(&l2_token).await }; let result = match token_price_result { - Ok(price) => Ok(price), - Err(TickerError::PriceNotTracked(_)) => Ok(BigDecimal::zero()), + Ok(Some(price)) => Ok(ratio_to_big_decimal_normalized( + &price.usd_price, + USD_PRECISION, + MIN_PRECISION, + )), + Ok(None) => Ok(BigDecimal::zero()), Err(err) => Err(internal_error(METHOD_NAME, err)), }; + method_latency.observe(); result } diff --git a/core/lib/zksync_core/src/api_server/web3/tests.rs b/core/lib/zksync_core/src/api_server/web3/tests.rs new file mode 100644 index 000000000000..6d6ac9389a61 --- /dev/null +++ b/core/lib/zksync_core/src/api_server/web3/tests.rs @@ -0,0 +1,145 @@ +use tokio::sync::watch; + +use std::{sync::Arc, time::Instant}; + +use db_test_macro::db_test; +use zksync_config::configs::{ + api::Web3JsonRpcConfig, + chain::{NetworkConfig, StateKeeperConfig}, + ContractsConfig, +}; +use zksync_dal::ConnectionPool; +use zksync_health_check::CheckHealth; +use zksync_state::PostgresStorageCaches; +use zksync_types::{L1BatchNumber, U64}; +use zksync_web3_decl::{ + jsonrpsee::http_client::HttpClient, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, +}; + +use super::*; +use crate::{ + api_server::tx_sender::TxSenderConfig, + genesis::{ensure_genesis_state, GenesisParams}, +}; + +const TEST_TIMEOUT: Duration = Duration::from_secs(5); +const POLL_INTERVAL: Duration = Duration::from_millis(50); + +/// Mock [`L1GasPriceProvider`] that returns a constant value. +struct MockL1GasPriceProvider(u64); + +impl L1GasPriceProvider for MockL1GasPriceProvider { + fn estimate_effective_gas_price(&self) -> u64 { + self.0 + } +} + +impl ApiServerHandles { + /// Waits until the server health check reports the ready state. + pub(crate) async fn wait_until_ready(&self) { + let started_at = Instant::now(); + loop { + assert!( + started_at.elapsed() <= TEST_TIMEOUT, + "Timed out waiting for API server" + ); + let health = self.health_check.check_health().await; + if health.status().is_ready() { + break; + } + tokio::time::sleep(POLL_INTERVAL).await; + } + } + + pub(crate) async fn shutdown(self) { + let stop_server = async { + for task in self.tasks { + task.await + .expect("Server panicked") + .expect("Server terminated with error"); + } + }; + tokio::time::timeout(TEST_TIMEOUT, stop_server) + .await + .unwrap(); + } +} + +pub(crate) async fn spawn_http_server( + network_config: &NetworkConfig, + pool: ConnectionPool, + stop_receiver: watch::Receiver, +) -> ApiServerHandles { + let contracts_config = ContractsConfig::from_env().unwrap(); + let web3_config = Web3JsonRpcConfig::from_env().unwrap(); + let state_keeper_config = StateKeeperConfig::from_env().unwrap(); + let api_config = InternalApiConfig::new(network_config, &web3_config, &contracts_config); + let tx_sender_config = + TxSenderConfig::new(&state_keeper_config, &web3_config, api_config.l2_chain_id); + + let storage_caches = PostgresStorageCaches::new(1, 1); + let gas_adjuster = Arc::new(MockL1GasPriceProvider(1)); + let (tx_sender, vm_barrier) = crate::build_tx_sender( + &tx_sender_config, + &web3_config, + &state_keeper_config, + pool.clone(), + pool.clone(), + gas_adjuster, + storage_caches, + ) + .await; + + ApiBuilder::jsonrpsee_backend(api_config, pool) + .http(0) // Assign random port + .with_threads(1) + .with_tx_sender(tx_sender, vm_barrier) + .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()) + .build(stop_receiver) + .await + .expect("Failed spawning JSON-RPC server") +} + +#[db_test] +async fn http_server_can_start(pool: ConnectionPool) { + let network_config = NetworkConfig::from_env().unwrap(); + let mut storage = pool.access_storage().await.unwrap(); + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state( + &mut storage, + network_config.zksync_network_id, + &GenesisParams::mock(), + ) + .await + .unwrap(); + } + drop(storage); + + let (stop_sender, stop_receiver) = watch::channel(false); + let server_handles = spawn_http_server(&network_config, pool, stop_receiver).await; + server_handles.wait_until_ready().await; + + test_http_server_methods(server_handles.local_addr).await; + + stop_sender.send_replace(true); + server_handles.shutdown().await; +} + +async fn test_http_server_methods(local_addr: SocketAddr) { + let client = ::builder() + .build(format!("http://{local_addr}/")) + .unwrap(); + let block_number = client.get_block_number().await.unwrap(); + assert_eq!(block_number, U64::from(0)); + + let l1_batch_number = client.get_l1_batch_number().await.unwrap(); + assert_eq!(l1_batch_number, U64::from(0)); + + let genesis_l1_batch = client + .get_l1_batch_details(L1BatchNumber(0)) + .await + .unwrap() + .unwrap(); + assert!(genesis_l1_batch.base.root_hash.is_some()); +} diff --git a/core/lib/zksync_core/src/data_fetchers/mod.rs b/core/lib/zksync_core/src/data_fetchers/mod.rs index ce62a657d970..f04a80c315e8 100644 --- a/core/lib/zksync_core/src/data_fetchers/mod.rs +++ b/core/lib/zksync_core/src/data_fetchers/mod.rs @@ -17,7 +17,6 @@ use zksync_dal::ConnectionPool; pub mod error; pub mod token_list; pub mod token_price; -pub mod token_trading_volume; pub fn run_data_fetchers( config: &FetcherConfig, @@ -27,11 +26,9 @@ pub fn run_data_fetchers( ) -> Vec>> { let list_fetcher = token_list::TokenListFetcher::new(config.clone(), network); let price_fetcher = token_price::TokenPriceFetcher::new(config.clone()); - let volume_fetcher = token_trading_volume::TradingVolumeFetcher::new(config.clone()); vec![ tokio::spawn(list_fetcher.run(pool.clone(), stop_receiver.clone())), tokio::spawn(price_fetcher.run(pool.clone(), stop_receiver.clone())), - tokio::spawn(volume_fetcher.run(pool, stop_receiver)), ] } diff --git a/core/lib/zksync_core/src/data_fetchers/token_trading_volume/mock.rs b/core/lib/zksync_core/src/data_fetchers/token_trading_volume/mock.rs deleted file mode 100644 index 8aab1c6bb502..000000000000 --- a/core/lib/zksync_core/src/data_fetchers/token_trading_volume/mock.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::collections::HashMap; - -use async_trait::async_trait; -use bigdecimal::FromPrimitive; -use chrono::Utc; -use num::{rational::Ratio, BigUint}; -use zksync_types::{tokens::TokenMarketVolume, Address}; - -use crate::data_fetchers::error::ApiFetchError; - -use super::FetcherImpl; - -#[derive(Debug, Clone)] -pub struct MockTradingVolumeFetcher {} - -impl MockTradingVolumeFetcher { - pub fn new() -> Self { - Self {} - } - - pub fn volume(&self, _token: &Address) -> TokenMarketVolume { - TokenMarketVolume { - market_volume: Ratio::from(BigUint::from_u64(1).unwrap()), // We don't use volume in the server anymore. - last_updated: Utc::now(), - } - } -} - -#[async_trait] -impl FetcherImpl for MockTradingVolumeFetcher { - async fn fetch_trading_volumes( - &self, - tokens: &[Address], - ) -> Result, ApiFetchError> { - let volumes: HashMap<_, _> = tokens - .iter() - .map(|token| (*token, self.volume(token))) - .collect(); - Ok(volumes) - } -} diff --git a/core/lib/zksync_core/src/data_fetchers/token_trading_volume/mod.rs b/core/lib/zksync_core/src/data_fetchers/token_trading_volume/mod.rs deleted file mode 100644 index c4cf1250135a..000000000000 --- a/core/lib/zksync_core/src/data_fetchers/token_trading_volume/mod.rs +++ /dev/null @@ -1,129 +0,0 @@ -//! Token trading volume fetcher loads the information about how good tokens are being traded on exchanges. -//! We need this information in order to either accept or deny paying fees in a certain tokens: -//! we are only interested in tokens that can be sold to cover expences for the network maintenance. - -use std::{collections::HashMap, time::Duration}; - -use async_trait::async_trait; -use tokio::sync::watch; - -use zksync_config::{configs::fetcher::TokenTradingVolumeSource, FetcherConfig}; -use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::{tokens::TokenMarketVolume, Address}; - -use super::error::{ApiFetchError, ErrorAnalyzer}; - -mod mock; -mod uniswap; - -#[async_trait] -pub trait FetcherImpl: std::fmt::Debug + Send + Sync { - /// Retrieves the list of known tokens. - async fn fetch_trading_volumes( - &self, - tokens: &[Address], - ) -> Result, ApiFetchError>; -} - -#[derive(Debug)] -pub struct TradingVolumeFetcher { - config: FetcherConfig, - fetcher: Box, - error_handler: ErrorAnalyzer, -} - -impl TradingVolumeFetcher { - fn create_fetcher(config: &FetcherConfig) -> Box { - let token_trading_volume_config = &config.token_trading_volume; - match token_trading_volume_config.source { - TokenTradingVolumeSource::Uniswap => { - Box::new(uniswap::UniswapTradingVolumeFetcher::new(config)) as Box - } - TokenTradingVolumeSource::Mock => { - Box::new(mock::MockTradingVolumeFetcher::new()) as Box - } - } - } - - pub fn new(config: FetcherConfig) -> Self { - let fetcher = Self::create_fetcher(&config); - let error_handler = ErrorAnalyzer::new("TradingVolumeFetcher"); - Self { - config, - fetcher, - error_handler, - } - } - - pub async fn run( - mut self, - pool: ConnectionPool, - stop_receiver: watch::Receiver, - ) -> anyhow::Result<()> { - let mut fetching_interval = - tokio::time::interval(self.config.token_trading_volume.fetching_interval()); - loop { - if *stop_receiver.borrow() { - tracing::info!("Stop signal received, trading_volume_fetcher is shutting down"); - break; - } - - fetching_interval.tick().await; - self.error_handler.update().await; - - let mut storage = pool.access_storage().await.unwrap(); - let known_l1_tokens = self.load_tokens(&mut storage).await; - - let trading_volumes = match self.fetch_trading_volumes(&known_l1_tokens).await { - Ok(volumes) => { - self.error_handler.reset(); - volumes - } - Err(err) => { - self.error_handler.process_error(err); - continue; - } - }; - - self.store_market_volumes(&mut storage, trading_volumes) - .await; - } - Ok(()) - } - - async fn fetch_trading_volumes( - &self, - addresses: &[Address], - ) -> Result, ApiFetchError> { - const AWAITING_TIMEOUT: Duration = Duration::from_secs(2); - - let fetch_future = self.fetcher.fetch_trading_volumes(addresses); - - tokio::time::timeout(AWAITING_TIMEOUT, fetch_future) - .await - .map_err(|_| ApiFetchError::RequestTimeout)? - } - - async fn store_market_volumes( - &self, - storage: &mut StorageProcessor<'_>, - tokens: HashMap, - ) { - let mut tokens_dal = storage.tokens_dal(); - for (token, volume) in tokens { - tokens_dal.set_l1_token_market_volume(&token, volume).await; - } - } - - /// Returns the list of tokens with known metadata (if token is not in the list we use, - /// it's very likely to not have required level of trading volume anyways). - async fn load_tokens(&self, storage: &mut StorageProcessor<'_>) -> Vec
{ - storage - .tokens_dal() - .get_well_known_token_addresses() - .await - .into_iter() - .map(|(l1_token, _)| l1_token) - .collect() - } -} diff --git a/core/lib/zksync_core/src/data_fetchers/token_trading_volume/uniswap.rs b/core/lib/zksync_core/src/data_fetchers/token_trading_volume/uniswap.rs deleted file mode 100644 index 4f4d2495d077..000000000000 --- a/core/lib/zksync_core/src/data_fetchers/token_trading_volume/uniswap.rs +++ /dev/null @@ -1,147 +0,0 @@ -use std::{collections::HashMap, str::FromStr}; - -use async_trait::async_trait; -use chrono::Utc; -use itertools::Itertools; -use num::{rational::Ratio, BigUint}; -use reqwest::{Client, Url}; -use serde::{Deserialize, Serialize}; - -use zksync_config::FetcherConfig; -use zksync_types::{tokens::TokenMarketVolume, Address}; -use zksync_utils::UnsignedRatioSerializeAsDecimal; - -use crate::data_fetchers::error::ApiFetchError; - -use super::FetcherImpl; - -#[derive(Debug, Clone)] -pub struct UniswapTradingVolumeFetcher { - client: Client, - addr: Url, -} - -impl UniswapTradingVolumeFetcher { - pub fn new(config: &FetcherConfig) -> Self { - Self { - client: Client::new(), - addr: Url::from_str(&config.token_trading_volume.url) - .expect("failed parse Uniswap URL"), - } - } -} - -#[async_trait] -impl FetcherImpl for UniswapTradingVolumeFetcher { - async fn fetch_trading_volumes( - &self, - tokens: &[Address], - ) -> Result, ApiFetchError> { - let comma_separated_token_addresses = tokens - .iter() - .map(|token_addr| format!("\"{:#x}\"", token_addr)) - .join(","); - - let query = format!( - "{{tokens(where:{{id_in:[{}]}}){{id, untrackedVolumeUSD}}}}", - comma_separated_token_addresses - ); - - let last_updated = Utc::now(); - - let raw_response = self - .client - .post(self.addr.clone()) - .json(&serde_json::json!({ - "query": query, - })) - .send() - .await - .map_err(|err| { - ApiFetchError::ApiUnavailable(format!("Uniswap API request failed: {}", err)) - })?; - - let response_status = raw_response.status(); - let response_text = raw_response.text().await.map_err(|err| { - ApiFetchError::Other(format!( - "Error: {} while while decoding response to text", - err - )) - })?; - - let response: GraphqlResponse = serde_json::from_str(&response_text).map_err(|err| { - ApiFetchError::UnexpectedJsonFormat(format!( - "Error: {} while decoding response: {} with status: {}", - err, response_text, response_status - )) - })?; - - let result = response - .data - .tokens - .into_iter() - .map(|token_response| { - ( - token_response.id, - TokenMarketVolume { - market_volume: token_response.untracked_volume_usd, - last_updated, - }, - ) - }) - .collect(); - - Ok(result) - } -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GraphqlResponse { - pub data: GraphqlTokensResponse, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct GraphqlTokensResponse { - pub tokens: Vec, -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct TokenResponse { - pub id: Address, - /// Total amount swapped all time in token pair stored in USD, no minimum liquidity threshold. - #[serde( - with = "UnsignedRatioSerializeAsDecimal", - rename = "untrackedVolumeUSD" - )] - pub untracked_volume_usd: Ratio, -} - -#[tokio::test] -#[ignore] // Remote API may be unavailable, so we ignore this test by default. -async fn test_fetch_uniswap_trading_volumes() { - let mut config = FetcherConfig::from_env().unwrap(); - config.token_trading_volume.url = - "https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2".to_string(); - - let fetcher = UniswapTradingVolumeFetcher::new(&config); - - let tokens = vec![ - Address::from_str("6b175474e89094c44da98b954eedeac495271d0f").expect("DAI"), - Address::from_str("1f9840a85d5af5bf1d1762f925bdaddc4201f984").expect("UNI"), - Address::from_str("514910771af9ca656af840dff83e8264ecf986ca").expect("LINK"), - ]; - - let token_volumes = fetcher - .fetch_trading_volumes(&tokens) - .await - .expect("failed get tokens price"); - - assert_eq!( - token_volumes.len(), - tokens.len(), - "not all data was received" - ); - for token_address in tokens { - assert!(token_volumes.get(&token_address).is_some()); - } -} diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index 815450dd5965..d08c22f185bd 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -826,7 +826,7 @@ async fn insert_l1_batch(tester: &EthSenderTester, number: L1BatchNumber) -> L1B .storage() .await .blocks_dal() - .insert_l1_batch(&header, &[], Default::default(), &[]) + .insert_l1_batch(&header, &[], Default::default(), &[], &[]) .await .unwrap(); tester diff --git a/core/lib/zksync_core/src/eth_watch/client.rs b/core/lib/zksync_core/src/eth_watch/client.rs index f8154f14dbc7..af38ac79ae7d 100644 --- a/core/lib/zksync_core/src/eth_watch/client.rs +++ b/core/lib/zksync_core/src/eth_watch/client.rs @@ -48,6 +48,9 @@ pub struct EthHttpQueryClient { client: E, topics: Vec, zksync_contract_addr: Address, + /// Address of the `Governance` contract. It's optional because it is present only for post-boojum chains. + /// If address is some then client will listen to events coming from it. + governance_address: Option
, verifier_contract_abi: Contract, confirmations_for_eth_event: Option, } @@ -56,13 +59,19 @@ impl EthHttpQueryClient { pub fn new( client: E, zksync_contract_addr: Address, + governance_address: Option
, confirmations_for_eth_event: Option, ) -> Self { - tracing::debug!("New eth client, contract addr: {:x}", zksync_contract_addr); + tracing::debug!( + "New eth client, zkSync addr: {:x}, governance addr: {:?}", + zksync_contract_addr, + governance_address + ); Self { client, topics: Vec::new(), zksync_contract_addr, + governance_address, verifier_contract_abi: verifier_contract(), confirmations_for_eth_event, } @@ -75,7 +84,13 @@ impl EthHttpQueryClient { topics: Vec, ) -> Result, Error> { let filter = FilterBuilder::default() - .address(vec![self.zksync_contract_addr]) + .address( + [Some(self.zksync_contract_addr), self.governance_address] + .iter() + .flatten() + .copied() + .collect(), + ) .from_block(from) .to_block(to) .topics(Some(topics), None, None, None) @@ -88,10 +103,14 @@ impl EthHttpQueryClient { #[async_trait::async_trait] impl EthClient for EthHttpQueryClient { async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result { - let vk_token: Token = self + // This is here for backward compatibility with the old verifier: + // Legacy verifier returns the full verification key; + // New verifier returns the hash of the verification key. + + let vk_hash = self .client .call_contract_function( - "get_verification_key", + "verificationKeyHash", (), None, Default::default(), @@ -99,8 +118,25 @@ impl EthClient for EthHttpQueryClient Self { + Self { + diamond_proxy_address, + last_seen_version_id, + upgrade_proposal_signature: governance_contract + .event("TransparentOperationScheduled") + .expect("TransparentOperationScheduled event is missing in abi") + .signature(), + } + } +} + +#[async_trait::async_trait] +impl EventProcessor for GovernanceUpgradesEventProcessor { + async fn process_events( + &mut self, + storage: &mut StorageProcessor<'_>, + client: &W, + events: Vec, + ) -> Result<(), Error> { + let mut upgrades = Vec::new(); + for event in events + .into_iter() + .filter(|event| event.topics[0] == self.upgrade_proposal_signature) + { + let governance_operation = GovernanceOperation::try_from(event) + .map_err(|err| Error::LogParse(format!("{:?}", err)))?; + // Some calls can target other contracts than Diamond proxy, skip them. + for call in governance_operation + .calls + .into_iter() + .filter(|call| call.target == self.diamond_proxy_address) + { + let upgrade = ProtocolUpgrade::try_from(call) + .map_err(|err| Error::LogParse(format!("{:?}", err)))?; + // Scheduler VK is not present in proposal event. It is hardcoded in verifier contract. + let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { + Some(client.scheduler_vk_hash(address).await?) + } else { + None + }; + upgrades.push((upgrade, scheduler_vk_hash)); + } + } + + if upgrades.is_empty() { + return Ok(()); + } + + let ids_str: Vec<_> = upgrades + .iter() + .map(|(u, _)| format!("{}", u.id as u16)) + .collect(); + tracing::debug!("Received upgrades with ids: {}", ids_str.join(", ")); + + let new_upgrades: Vec<_> = upgrades + .into_iter() + .skip_while(|(v, _)| v.id as u16 <= self.last_seen_version_id as u16) + .collect(); + if new_upgrades.is_empty() { + return Ok(()); + } + + let last_id = new_upgrades.last().unwrap().0.id; + let stage_start = Instant::now(); + for (upgrade, scheduler_vk_hash) in new_upgrades { + let previous_version = storage + .protocol_versions_dal() + .load_previous_version(upgrade.id) + .await + .unwrap_or_else(|| { + panic!( + "Expected some version preceding {:?} be present in DB", + upgrade.id + ) + }); + let new_version = previous_version.apply_upgrade(upgrade, scheduler_vk_hash); + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(new_version) + .await; + } + metrics::histogram!("eth_watcher.poll_eth_node", stage_start.elapsed(), "stage" => "persist_upgrades"); + + self.last_seen_version_id = last_id; + + Ok(()) + } + + fn relevant_topic(&self) -> H256 { + self.upgrade_proposal_signature + } +} diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs index 70e1db9a3f14..84ea1eeb04cf 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs @@ -2,6 +2,7 @@ use crate::eth_watch::client::{Error, EthClient}; use zksync_dal::StorageProcessor; use zksync_types::{web3::types::Log, H256}; +pub mod governance_upgrades; pub mod priority_ops; pub mod upgrades; diff --git a/core/lib/zksync_core/src/eth_watch/mod.rs b/core/lib/zksync_core/src/eth_watch/mod.rs index d6494ae040d5..26faf89a3003 100644 --- a/core/lib/zksync_core/src/eth_watch/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/mod.rs @@ -14,7 +14,8 @@ use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::EthInterface; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ - web3::types::BlockNumber as Web3BlockNumber, Address, PriorityOpId, ProtocolVersionId, + ethabi::Contract, web3::types::BlockNumber as Web3BlockNumber, Address, PriorityOpId, + ProtocolVersionId, }; mod client; @@ -26,6 +27,7 @@ mod tests; use self::{ client::{Error, EthClient, EthHttpQueryClient, RETRY_LIMIT}, event_processors::{ + governance_upgrades::GovernanceUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, upgrades::UpgradesEventProcessor, EventProcessor, }, metrics::{PollStage, METRICS}, @@ -48,7 +50,13 @@ pub struct EthWatch { } impl EthWatch { - pub async fn new(mut client: W, pool: &ConnectionPool, poll_interval: Duration) -> Self { + pub async fn new( + diamond_proxy_address: Address, + governance_contract: Option, + mut client: W, + pool: &ConnectionPool, + poll_interval: Duration, + ) -> Self { let mut storage = pool.access_storage_tagged("eth_watch").await.unwrap(); let state = Self::initialize_state(&client, &mut storage).await; @@ -58,11 +66,20 @@ impl EthWatch { let priority_ops_processor = PriorityOpsEventProcessor::new(state.next_expected_priority_id); let upgrades_processor = UpgradesEventProcessor::new(state.last_seen_version_id); - let event_processors: Vec>> = vec![ + let mut event_processors: Vec>> = vec![ Box::new(priority_ops_processor), Box::new(upgrades_processor), ]; + if let Some(governance_contract) = governance_contract { + let governance_upgrades_processor = GovernanceUpgradesEventProcessor::new( + diamond_proxy_address, + state.last_seen_version_id, + &governance_contract, + ); + event_processors.push(Box::new(governance_upgrades_processor)) + } + let topics = event_processors .iter() .map(|p| p.relevant_topic()) @@ -174,16 +191,25 @@ pub async fn start_eth_watch( pool: ConnectionPool, eth_gateway: E, diamond_proxy_addr: Address, + governance: Option<(Contract, Address)>, stop_receiver: watch::Receiver, ) -> anyhow::Result>> { let eth_watch = ETHWatchConfig::from_env().context("ETHWatchConfig::from_env()")?; let eth_client = EthHttpQueryClient::new( eth_gateway, diamond_proxy_addr, + governance.as_ref().map(|(_, address)| *address), eth_watch.confirmations_for_eth_event, ); - let mut eth_watch = EthWatch::new(eth_client, &pool, eth_watch.poll_interval()).await; + let mut eth_watch = EthWatch::new( + diamond_proxy_addr, + governance.map(|(contract, _)| contract), + eth_client, + &pool, + eth_watch.poll_interval(), + ) + .await; Ok(tokio::spawn(async move { eth_watch.run(pool, stop_receiver).await diff --git a/core/lib/zksync_core/src/eth_watch/tests.rs b/core/lib/zksync_core/src/eth_watch/tests.rs index d9448efbfabe..01fb83b98c01 100644 --- a/core/lib/zksync_core/src/eth_watch/tests.rs +++ b/core/lib/zksync_core/src/eth_watch/tests.rs @@ -10,7 +10,7 @@ use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::protocol_version::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}; use zksync_types::web3::types::{Address, BlockNumber}; use zksync_types::{ - ethabi::{encode, Hash, Token}, + ethabi::{encode, Contract, Hash, Token}, l1::{L1Tx, OpProcessingType, PriorityQueueType}, web3::types::Log, Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, ProtocolVersionId, @@ -22,7 +22,8 @@ use crate::eth_watch::{client::EthClient, EthWatch}; struct FakeEthClientData { transactions: HashMap>, - upgrades: HashMap>, + diamond_upgrades: HashMap>, + governance_upgrades: HashMap>, last_finalized_block_number: u64, } @@ -30,7 +31,8 @@ impl FakeEthClientData { fn new() -> Self { Self { transactions: Default::default(), - upgrades: Default::default(), + diamond_upgrades: Default::default(), + governance_upgrades: Default::default(), last_finalized_block_number: 0, } } @@ -45,12 +47,21 @@ impl FakeEthClientData { } } - fn add_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + fn add_diamond_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { for (upgrade, eth_block) in upgrades { - self.upgrades + self.diamond_upgrades .entry(*eth_block) .or_default() - .push(upgrade_into_log(upgrade.clone(), *eth_block)); + .push(upgrade_into_diamond_proxy_log(upgrade.clone(), *eth_block)); + } + } + + fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + for (upgrade, eth_block) in upgrades { + self.governance_upgrades + .entry(*eth_block) + .or_default() + .push(upgrade_into_governor_log(upgrade.clone(), *eth_block)); } } @@ -75,8 +86,12 @@ impl FakeEthClient { self.inner.write().await.add_transactions(transactions); } - async fn add_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - self.inner.write().await.add_upgrades(upgrades); + async fn add_diamond_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + self.inner.write().await.add_diamond_upgrades(upgrades); + } + + async fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + self.inner.write().await.add_governance_upgrades(upgrades); } async fn set_last_finalized_block_number(&mut self, number: u64) { @@ -113,7 +128,10 @@ impl EthClient for FakeEthClient { if let Some(ops) = self.inner.read().await.transactions.get(&number) { logs.extend_from_slice(ops); } - if let Some(ops) = self.inner.read().await.upgrades.get(&number) { + if let Some(ops) = self.inner.read().await.diamond_upgrades.get(&number) { + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.governance_upgrades.get(&number) { logs.extend_from_slice(ops); } } @@ -190,6 +208,8 @@ async fn test_normal_operation_l1_txs(connection_pool: ConnectionPool) { let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( + Address::default(), + None, client.clone(), &connection_pool, std::time::Duration::from_nanos(1), @@ -235,6 +255,8 @@ async fn test_normal_operation_upgrades(connection_pool: ConnectionPool) { let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( + Address::default(), + None, client.clone(), &connection_pool, std::time::Duration::from_nanos(1), @@ -243,7 +265,7 @@ async fn test_normal_operation_upgrades(connection_pool: ConnectionPool) { let mut storage = connection_pool.access_test_storage().await; client - .add_upgrades(&[ + .add_diamond_upgrades(&[ ( ProtocolUpgrade { id: ProtocolVersionId::latest(), @@ -293,6 +315,8 @@ async fn test_gap_in_upgrades(connection_pool: ConnectionPool) { let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( + Address::default(), + None, client.clone(), &connection_pool, std::time::Duration::from_nanos(1), @@ -301,7 +325,7 @@ async fn test_gap_in_upgrades(connection_pool: ConnectionPool) { let mut storage = connection_pool.access_test_storage().await; client - .add_upgrades(&[( + .add_diamond_upgrades(&[( ProtocolUpgrade { id: ProtocolVersionId::next(), tx: None, @@ -323,6 +347,66 @@ async fn test_gap_in_upgrades(connection_pool: ConnectionPool) { assert_eq!(db_ids[1], next_version); } +#[db_test] +async fn test_normal_operation_governance_upgrades(connection_pool: ConnectionPool) { + setup_db(&connection_pool).await; + + let mut client = FakeEthClient::new(); + let mut watcher = EthWatch::new( + Address::default(), + Some(governance_contract()), + client.clone(), + &connection_pool, + std::time::Duration::from_nanos(1), + ) + .await; + + let mut storage = connection_pool.access_test_storage().await; + client + .add_governance_upgrades(&[ + ( + ProtocolUpgrade { + id: ProtocolVersionId::latest(), + tx: None, + ..Default::default() + }, + 10, + ), + ( + ProtocolUpgrade { + id: ProtocolVersionId::next(), + tx: Some(build_upgrade_tx(ProtocolVersionId::next(), 18)), + ..Default::default() + }, + 18, + ), + ]) + .await; + client.set_last_finalized_block_number(15).await; + // second upgrade will not be processed, as it has less than 5 confirmations + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_ids = storage.protocol_versions_dal().all_version_ids().await; + // there should be genesis version and just added version + assert_eq!(db_ids.len(), 2); + assert_eq!(db_ids[1], ProtocolVersionId::latest()); + + client.set_last_finalized_block_number(20).await; + // now the second upgrade will be processed + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_ids = storage.protocol_versions_dal().all_version_ids().await; + assert_eq!(db_ids.len(), 3); + assert_eq!(db_ids[2], ProtocolVersionId::next()); + + // check that tx was saved with the last upgrade + let tx = storage + .protocol_versions_dal() + .get_protocol_upgrade_tx(ProtocolVersionId::next()) + .await + .unwrap(); + assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); +} + #[db_test] #[should_panic] async fn test_gap_in_single_batch(connection_pool: ConnectionPool) { @@ -330,6 +414,8 @@ async fn test_gap_in_single_batch(connection_pool: ConnectionPool) { let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( + Address::default(), + None, client.clone(), &connection_pool, std::time::Duration::from_nanos(1), @@ -357,6 +443,8 @@ async fn test_gap_between_batches(connection_pool: ConnectionPool) { let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( + Address::default(), + None, client.clone(), &connection_pool, std::time::Duration::from_nanos(1), @@ -389,6 +477,8 @@ async fn test_overlapping_batches(connection_pool: ConnectionPool) { let mut client = FakeEthClient::new(); let mut watcher = EthWatch::new( + Address::default(), + None, client.clone(), &connection_pool, std::time::Duration::from_nanos(1), @@ -490,7 +580,72 @@ fn tx_into_log(tx: L1Tx) -> Log { } } -fn upgrade_into_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { +fn upgrade_into_diamond_proxy_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { + let diamond_cut = upgrade_into_diamond_cut(upgrade); + let data = encode(&[diamond_cut, Token::FixedBytes(vec![0u8; 32])]); + Log { + address: Address::repeat_byte(0x1), + topics: vec![zksync_contract() + .event("ProposeTransparentUpgrade") + .expect("ProposeTransparentUpgrade event is missing in abi") + .signature()], + data: data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + } +} + +fn upgrade_into_governor_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { + let diamond_cut = upgrade_into_diamond_cut(upgrade); + let execute_upgrade_selector = zksync_contract() + .function("executeUpgrade") + .unwrap() + .short_signature(); + let diamond_upgrade_calldata = execute_upgrade_selector + .iter() + .copied() + .chain(encode(&[diamond_cut])) + .collect(); + let governance_call = Token::Tuple(vec![ + Token::Address(Default::default()), + Token::Uint(U256::default()), + Token::Bytes(diamond_upgrade_calldata), + ]); + let governance_operation = Token::Tuple(vec![ + Token::Array(vec![governance_call]), + Token::FixedBytes(vec![0u8; 32]), + Token::FixedBytes(vec![0u8; 32]), + ]); + let final_data = encode(&[Token::FixedBytes(vec![0u8; 32]), governance_operation]); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![ + governance_contract() + .event("TransparentOperationScheduled") + .expect("TransparentOperationScheduled event is missing in abi") + .signature(), + Default::default(), + ], + data: final_data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + } +} + +fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { let tx_data_token = if let Some(tx) = upgrade.tx { Token::Tuple(vec![ Token::Uint(0xfe.into()), @@ -592,7 +747,7 @@ fn upgrade_into_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { Token::Address(Default::default()), ]); - let final_token = Token::Tuple(vec![ + Token::Tuple(vec![ Token::Array(vec![]), Token::Address(Default::default()), Token::Bytes( @@ -601,25 +756,7 @@ fn upgrade_into_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { .chain(encode(&[upgrade_token])) .collect(), ), - ]); - - let data = encode(&[final_token, Token::FixedBytes(vec![0u8; 32])]); - Log { - address: Address::repeat_byte(0x1), - topics: vec![zksync_contract() - .event("ProposeTransparentUpgrade") - .expect("ProposeTransparentUpgrade event is missing in abi") - .signature()], - data: data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::random()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - } + ]) } async fn setup_db(connection_pool: &ConnectionPool) { @@ -633,3 +770,68 @@ async fn setup_db(connection_pool: &ConnectionPool) { }) .await; } + +fn governance_contract() -> Contract { + let json = r#"[ + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "_id", + "type": "bytes32" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "delay", + "type": "uint256" + }, + { + "components": [ + { + "components": [ + { + "internalType": "address", + "name": "target", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "internalType": "struct IGovernance.Call[]", + "name": "calls", + "type": "tuple[]" + }, + { + "internalType": "bytes32", + "name": "predecessor", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "salt", + "type": "bytes32" + } + ], + "indexed": false, + "internalType": "struct IGovernance.Operation", + "name": "_operation", + "type": "tuple" + } + ], + "name": "TransparentOperationScheduled", + "type": "event" + } + ]"#; + serde_json::from_str(json).unwrap() +} diff --git a/core/lib/zksync_core/src/fee_ticker/error.rs b/core/lib/zksync_core/src/fee_ticker/error.rs deleted file mode 100644 index a7e2dceaa8ee..000000000000 --- a/core/lib/zksync_core/src/fee_ticker/error.rs +++ /dev/null @@ -1,12 +0,0 @@ -use thiserror::Error; -use zksync_types::Address; - -#[derive(Debug, Error)] -pub enum TickerError { - #[error("Token {0:x} is not being tracked for its price")] - PriceNotTracked(Address), - #[error("Third-party API data is temporarily unavailable")] - ApiDataUnavailable, - #[error("Fee ticker internal error")] - InternalError, -} diff --git a/core/lib/zksync_core/src/fee_ticker/gas_price.rs b/core/lib/zksync_core/src/fee_ticker/gas_price.rs deleted file mode 100644 index 3c4cff6991f0..000000000000 --- a/core/lib/zksync_core/src/fee_ticker/gas_price.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! This module contains the logic used to calculate the price of 1 gas in Wei. - -use num::{rational::Ratio, BigUint}; - -/// Converts any token price in USD into one Wei price per USD. -pub fn token_price_to_wei_price_usd(token_price: &Ratio, decimals: u32) -> Ratio { - token_price / BigUint::from(10u32).pow(decimals) -} diff --git a/core/lib/zksync_core/src/fee_ticker/mod.rs b/core/lib/zksync_core/src/fee_ticker/mod.rs deleted file mode 100644 index 3589c9e30b33..000000000000 --- a/core/lib/zksync_core/src/fee_ticker/mod.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! This module defines the price components of L2 transactions. - -use core::fmt::Debug; - -use bigdecimal::BigDecimal; -use multivm::vm_latest::utils::fee::base_fee_to_gas_per_pubdata; -use num::{rational::Ratio, BigUint}; -use zksync_types::Address; -use zksync_utils::ratio_to_big_decimal_normalized; - -use self::error::TickerError; -use zksync_dal::tokens_web3_dal::TokensWeb3Dal; - -pub mod error; -mod gas_price; -pub mod types; - -/// Amount of possible symbols after the decimal dot in the USD. -/// Used to convert `Ratio` to `BigDecimal`. -pub const USD_PRECISION: usize = 100; - -/// Minimum amount of symbols after the decimal dot in the USD. -/// Used to convert `Ratio` to `BigDecimal`. -pub const MIN_PRECISION: usize = 2; - -#[derive(Debug, PartialEq, Eq)] -pub enum TokenPriceRequestType { - USDForOneWei, - USDForOneToken, -} - -#[derive(Debug, Default)] -pub struct FeeTicker; - -impl FeeTicker { - /// Returns the token price in USD. - pub async fn get_l2_token_price( - tokens_web3_dal: &mut TokensWeb3Dal<'_, '_>, - request_type: TokenPriceRequestType, - l2_token_addr: &Address, - ) -> Result { - Self::get_l2_token_price_inner(tokens_web3_dal, request_type, l2_token_addr) - .await - .map(|final_price| { - ratio_to_big_decimal_normalized(&final_price, USD_PRECISION, MIN_PRECISION) - }) - } - - /// Returns the acceptable `gas_per_pubdata_byte` based on the current gas price. - pub fn gas_per_pubdata_byte(gas_price_wei: u64, base_fee: u64) -> u32 { - base_fee_to_gas_per_pubdata(gas_price_wei, base_fee) as u32 - } - - async fn get_l2_token_price_inner( - tokens_web3_dal: &mut TokensWeb3Dal<'_, '_>, - request_type: TokenPriceRequestType, - l2_token_addr: &Address, - ) -> Result, TickerError> { - let token_price = tokens_web3_dal - .get_token_price(l2_token_addr) - .await - .map_err(|_| TickerError::InternalError)? - .ok_or(TickerError::PriceNotTracked(*l2_token_addr))? - .usd_price; - - let final_price = match request_type { - TokenPriceRequestType::USDForOneToken => token_price, - TokenPriceRequestType::USDForOneWei => { - let token_metadata = tokens_web3_dal - .get_token_metadata(l2_token_addr) - .await - .map_err(|_| TickerError::InternalError)? - .ok_or_else(|| { - // It's kinda not OK that we have a price for token, but no metadata. - // Not a reason for a panic, but surely highest possible report level. - tracing::error!( - "Token {:x} has price, but no stored metadata", - l2_token_addr - ); - TickerError::PriceNotTracked(*l2_token_addr) - })?; - - gas_price::token_price_to_wei_price_usd( - &token_price, - token_metadata.decimals as u32, - ) - } - }; - - Ok(final_price) - } -} diff --git a/core/lib/zksync_core/src/fee_ticker/types.rs b/core/lib/zksync_core/src/fee_ticker/types.rs deleted file mode 100644 index daedb6f4a6d5..000000000000 --- a/core/lib/zksync_core/src/fee_ticker/types.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[derive(Debug, PartialEq, Eq)] -pub enum TokenPriceRequestType { - USDForOneWei, - USDForOneToken, -} diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 4fe31352e4b2..ec16bd4d17f9 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -34,6 +34,22 @@ pub struct GenesisParams { pub first_l1_verifier_config: L1VerifierConfig, } +impl GenesisParams { + #[cfg(test)] + pub(crate) fn mock() -> Self { + use zksync_types::system_contracts::get_system_smart_contracts; + + Self { + first_validator: Address::repeat_byte(0x01), + protocol_version: ProtocolVersionId::latest(), + base_system_contracts: BaseSystemContracts::load_from_disk(), + system_contracts: get_system_smart_contracts(), + first_l1_verifier_config: L1VerifierConfig::default(), + first_verifier_address: Address::zero(), + } + } +} + pub async fn ensure_genesis_state( storage: &mut StorageProcessor<'_>, zksync_chain_id: L2ChainId, @@ -292,7 +308,13 @@ pub(crate) async fn create_genesis_l1_batch( .await; transaction .blocks_dal() - .insert_l1_batch(&genesis_l1_batch_header, &[], BlockGasCount::default(), &[]) + .insert_l1_batch( + &genesis_l1_batch_header, + &[], + BlockGasCount::default(), + &[], + &[], + ) .await .unwrap(); transaction diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index fd5714d0436c..599415a4edf3 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -28,7 +28,7 @@ use zksync_config::{ ApiConfig, ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, FetcherConfig, ProverConfigs, }; -use zksync_contracts::BaseSystemContracts; +use zksync_contracts::{governance_contract, BaseSystemContracts}; use zksync_dal::{ connection::DbVariant, healthcheck::ConnectionPoolHealthCheck, ConnectionPool, StorageProcessor, }; @@ -53,7 +53,6 @@ pub mod consistency_checker; pub mod data_fetchers; pub mod eth_sender; pub mod eth_watch; -pub mod fee_ticker; pub mod gas_tracker; pub mod genesis; pub mod house_keeper; @@ -67,9 +66,8 @@ pub mod sync_layer; pub mod witness_generator; use crate::api_server::healthcheck::HealthCheckHandle; -use crate::api_server::tx_sender::TxSenderConfig; -use crate::api_server::tx_sender::{TxSender, TxSenderBuilder}; -use crate::api_server::web3::{state::InternalApiConfig, Namespace}; +use crate::api_server::tx_sender::{TxSender, TxSenderBuilder, TxSenderConfig}; +use crate::api_server::web3::{state::InternalApiConfig, ApiServerHandles, Namespace}; use crate::eth_sender::{Aggregator, EthTxManager}; use crate::house_keeper::fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager; use crate::house_keeper::fri_proof_compressor_queue_monitor::FriProofCompressorStatsReporter; @@ -392,12 +390,12 @@ pub async fn initialize_components( ); let started_at = Instant::now(); - tracing::info!("initializing HTTP API"); + tracing::info!("Initializing HTTP API"); let bounded_gas_adjuster = gas_adjuster .get_or_init_bounded() .await .context("gas_adjuster.get_or_init_bounded()")?; - let (futures, health_check) = run_http_api( + let server_handles = run_http_api( &tx_sender_config, &state_keeper_config, &internal_api_config, @@ -412,18 +410,22 @@ pub async fn initialize_components( ) .await .context("run_http_api")?; - task_futures.extend(futures); - healthchecks.push(Box::new(health_check)); + + task_futures.extend(server_handles.tasks); + healthchecks.push(Box::new(server_handles.health_check)); let elapsed = started_at.elapsed(); APP_METRICS.init_latency[&InitStage::HttpApi].set(elapsed); - tracing::info!("initialized HTTP API in {elapsed:?}"); + tracing::info!( + "Initialized HTTP API on {:?} in {elapsed:?}", + server_handles.local_addr + ); } if components.contains(&Component::WsApi) { let storage_caches = match storage_caches { Some(storage_caches) => storage_caches, None => build_storage_caches(&replica_connection_pool, &mut task_futures) - .context("build_Storage_caches()")?, + .context("build_storage_caches()")?, }; let started_at = Instant::now(); @@ -432,7 +434,7 @@ pub async fn initialize_components( .get_or_init_bounded() .await .context("gas_adjuster.get_or_init_bounded()")?; - let (futures, health_check) = run_ws_api( + let server_handles = run_ws_api( &tx_sender_config, &state_keeper_config, &internal_api_config, @@ -446,11 +448,15 @@ pub async fn initialize_components( ) .await .context("run_ws_api")?; - task_futures.extend(futures); - healthchecks.push(Box::new(health_check)); + + task_futures.extend(server_handles.tasks); + healthchecks.push(Box::new(server_handles.health_check)); let elapsed = started_at.elapsed(); APP_METRICS.init_latency[&InitStage::WsApi].set(elapsed); - tracing::info!("initialized WS API in {elapsed:?}"); + tracing::info!( + "initialized WS API on {:?} in {elapsed:?}", + server_handles.local_addr + ); } if components.contains(&Component::ContractVerificationApi) { @@ -500,11 +506,17 @@ pub async fn initialize_components( .build() .await .context("failed to build eth_watch_pool")?; + let governance = contracts_config.governance_addr.map(|addr| { + let contract = governance_contract() + .expect("Governance contract must be present if governance_addr is set in config"); + (contract, addr) + }); task_futures.push( start_eth_watch( eth_watch_pool, query_client.clone(), main_zksync_contract_address, + governance, stop_receiver.clone(), ) .await @@ -1102,7 +1114,7 @@ async fn run_http_api( with_debug_namespace: bool, with_logs_request_translator_enabled: bool, storage_caches: PostgresStorageCaches, -) -> anyhow::Result<(Vec>>, ReactiveHealthCheck)> { +) -> anyhow::Result { let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, &api_config.web3_json_rpc, @@ -1137,7 +1149,7 @@ async fn run_http_api( if with_logs_request_translator_enabled { api_builder = api_builder.enable_request_translator(); } - Ok(api_builder.build(stop_receiver.clone()).await) + api_builder.build(stop_receiver).await } #[allow(clippy::too_many_arguments)] @@ -1152,7 +1164,7 @@ async fn run_ws_api( stop_receiver: watch::Receiver, storage_caches: PostgresStorageCaches, with_logs_request_translator_enabled: bool, -) -> anyhow::Result<(Vec>>, ReactiveHealthCheck)> { +) -> anyhow::Result { let (tx_sender, vm_barrier) = build_tx_sender( tx_sender_config, &api_config.web3_json_rpc, @@ -1189,7 +1201,7 @@ async fn run_ws_api( if with_logs_request_translator_enabled { api_builder = api_builder.enable_request_translator(); } - Ok(api_builder.build(stop_receiver.clone()).await) + api_builder.build(stop_receiver.clone()).await } async fn circuit_breakers_for_components( diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index 3330633f8c13..f1ab5e7a123a 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -18,7 +18,7 @@ use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, Key, MerkleTreeColumnFamily, NoVersionError, TreeEntryWithProof, }; -use zksync_storage::RocksDB; +use zksync_storage::{RocksDB, RocksDBOptions}; use zksync_types::{block::L1BatchHeader, L1BatchNumber, StorageLog, H256}; use super::metrics::{LoadChangesStage, TreeUpdateStage, METRICS}; @@ -61,15 +61,16 @@ impl AsyncTree { mode: MerkleTreeMode, multi_get_chunk_size: usize, block_cache_capacity: usize, + memtable_capacity: usize, ) -> Self { tracing::info!( "Initializing Merkle tree at `{db_path}` with {multi_get_chunk_size} multi-get chunk size, \ - {block_cache_capacity}B block cache", + {block_cache_capacity}B block cache, {memtable_capacity}B memtable capacity", db_path = db_path.display() ); let mut tree = tokio::task::spawn_blocking(move || { - let db = Self::create_db(&db_path, block_cache_capacity); + let db = Self::create_db(&db_path, block_cache_capacity, memtable_capacity); match mode { MerkleTreeMode::Full => ZkSyncTree::new(db), MerkleTreeMode::Lightweight => ZkSyncTree::new_lightweight(db), @@ -85,8 +86,18 @@ impl AsyncTree { } } - fn create_db(path: &Path, block_cache_capacity: usize) -> RocksDB { - let db = RocksDB::with_cache(path, Some(block_cache_capacity)); + fn create_db( + path: &Path, + block_cache_capacity: usize, + memtable_capacity: usize, + ) -> RocksDB { + let db = RocksDB::with_options( + path, + RocksDBOptions { + block_cache_capacity: Some(block_cache_capacity), + large_memtable_capacity: Some(memtable_capacity), + }, + ); if cfg!(test) { // We need sync writes for the unit tests to execute reliably. With the default config, // some writes to RocksDB may occur, but not be visible to the test code. @@ -327,13 +338,8 @@ mod tests { use tempfile::TempDir; use db_test_macro::db_test; - use zksync_contracts::BaseSystemContracts; use zksync_dal::ConnectionPool; - use zksync_types::{ - proofs::PrepareBasicCircuitsJob, protocol_version::L1VerifierConfig, - system_contracts::get_system_smart_contracts, Address, L2ChainId, ProtocolVersionId, - StorageKey, StorageLogKind, - }; + use zksync_types::{proofs::PrepareBasicCircuitsJob, L2ChainId, StorageKey, StorageLogKind}; use super::*; use crate::{ @@ -403,23 +409,12 @@ mod tests { } } - fn mock_genesis_params() -> GenesisParams { - GenesisParams { - first_validator: Address::repeat_byte(0x01), - protocol_version: ProtocolVersionId::latest(), - base_system_contracts: BaseSystemContracts::load_from_disk(), - system_contracts: get_system_smart_contracts(), - first_l1_verifier_config: L1VerifierConfig::default(), - first_verifier_address: Address::zero(), - } - } - #[db_test] async fn loaded_logs_equivalence_basics(pool: ConnectionPool) { ensure_genesis_state( &mut pool.access_storage().await.unwrap(), L2ChainId::from(270), - &mock_genesis_params(), + &GenesisParams::mock(), ) .await .unwrap(); @@ -441,7 +436,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); @@ -455,13 +450,23 @@ mod tests { extend_db_state(&mut storage, logs).await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let mut tree = - AsyncTree::new(temp_dir.path().to_owned(), MerkleTreeMode::Full, 500, 0).await; + let mut tree = create_tree(&temp_dir).await; for number in 0..3 { assert_log_equivalence(&mut storage, &mut tree, L1BatchNumber(number)).await; } } + async fn create_tree(temp_dir: &TempDir) -> AsyncTree { + AsyncTree::new( + temp_dir.path().to_owned(), + MerkleTreeMode::Full, + 500, + 0, + 16 << 20, // 16 MiB + ) + .await + } + async fn assert_log_equivalence( storage: &mut StorageProcessor<'_>, tree: &mut AsyncTree, @@ -519,7 +524,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_non_zero_no_op_logs(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); @@ -556,8 +561,7 @@ mod tests { extend_db_state(&mut storage, logs).await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let mut tree = - AsyncTree::new(temp_dir.path().to_owned(), MerkleTreeMode::Full, 500, 0).await; + let mut tree = create_tree(&temp_dir).await; for batch_number in 0..5 { assert_log_equivalence(&mut storage, &mut tree, L1BatchNumber(batch_number)).await; } @@ -566,7 +570,7 @@ mod tests { #[db_test] async fn loaded_logs_equivalence_with_protective_reads(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId::from(270), &mock_genesis_params()) + ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) .await .unwrap(); @@ -596,8 +600,7 @@ mod tests { assert_eq!(read_logs_count, 7); let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); - let mut tree = - AsyncTree::new(temp_dir.path().to_owned(), MerkleTreeMode::Full, 500, 0).await; + let mut tree = create_tree(&temp_dir).await; for batch_number in 0..3 { assert_log_equivalence(&mut storage, &mut tree, L1BatchNumber(batch_number)).await; } diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index d0359c754f29..296dcd7aabcb 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -70,8 +70,11 @@ pub struct MetadataCalculatorConfig<'a> { /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, /// but the effects vary wildly depending on the setup (e.g., the filesystem used). pub multi_get_chunk_size: usize, - /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MB to several GB. + /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MiB to several GB. pub block_cache_capacity: usize, + /// Capacity of RocksDB memtables. Can be set to a reasonably large value (order of 512 MiB) + /// to mitigate write stalls. + pub memtable_capacity: usize, } impl<'a> MetadataCalculatorConfig<'a> { @@ -87,6 +90,7 @@ impl<'a> MetadataCalculatorConfig<'a> { max_l1_batches_per_iter: db_config.merkle_tree.max_l1_batches_per_iter, multi_get_chunk_size: db_config.merkle_tree.multi_get_chunk_size, block_cache_capacity: db_config.merkle_tree.block_cache_size(), + memtable_capacity: db_config.merkle_tree.memtable_capacity(), } } } diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index b8ebc0e988e0..3062fc4a6161 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -15,10 +15,8 @@ use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_types::{ block::{miniblock_hash, BlockGasCount, L1BatchHeader, MiniblockHeader}, proofs::PrepareBasicCircuitsJob, - protocol_version::L1VerifierConfig, - system_contracts::get_system_smart_contracts, - AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, - StorageKey, StorageLog, H256, + AccountTreeId, Address, L1BatchNumber, L2ChainId, MiniblockNumber, StorageKey, StorageLog, + H256, }; use zksync_utils::u32_to_h256; @@ -282,7 +280,13 @@ async fn test_postgres_backup_recovery( // Re-insert the last batch without metadata immediately. storage .blocks_dal() - .insert_l1_batch(batch_without_metadata, &[], BlockGasCount::default(), &[]) + .insert_l1_batch( + batch_without_metadata, + &[], + BlockGasCount::default(), + &[], + &[], + ) .await .unwrap(); insert_initial_writes_for_batch(&mut storage, batch_without_metadata.number).await; @@ -307,7 +311,7 @@ async fn test_postgres_backup_recovery( for batch_header in &removed_batches { storage .blocks_dal() - .insert_l1_batch(batch_header, &[], BlockGasCount::default(), &[]) + .insert_l1_batch(batch_header, &[], BlockGasCount::default(), &[], &[]) .await .unwrap(); insert_initial_writes_for_batch(&mut storage, batch_header.number).await; @@ -397,27 +401,9 @@ async fn setup_calculator_with_options( let mut storage = pool.access_storage().await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { - let chain_id = L2ChainId::from(270); - let protocol_version = ProtocolVersionId::latest(); - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let system_contracts = get_system_smart_contracts(); - let first_validator = Address::repeat_byte(0x01); - let first_l1_verifier_config = L1VerifierConfig::default(); - let first_verifier_address = Address::zero(); - ensure_genesis_state( - &mut storage, - chain_id, - &GenesisParams { - first_validator, - protocol_version, - base_system_contracts, - system_contracts, - first_l1_verifier_config, - first_verifier_address, - }, - ) - .await - .unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) + .await + .unwrap(); } metadata_calculator } @@ -520,7 +506,7 @@ pub(super) async fn extend_db_state( storage .blocks_dal() - .insert_l1_batch(&header, &[], BlockGasCount::default(), &[]) + .insert_l1_batch(&header, &[], BlockGasCount::default(), &[], &[]) .await .unwrap(); storage @@ -641,27 +627,9 @@ async fn remove_l1_batches( #[db_test] async fn deduplication_works_as_expected(pool: ConnectionPool) { let mut storage = pool.access_storage().await.unwrap(); - - let first_validator = Address::repeat_byte(0x01); - let protocol_version = ProtocolVersionId::latest(); - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let system_contracts = get_system_smart_contracts(); - let first_l1_verifier_config = L1VerifierConfig::default(); - let first_verifier_address = Address::zero(); - ensure_genesis_state( - &mut storage, - L2ChainId::from(270), - &GenesisParams { - protocol_version, - first_validator, - base_system_contracts, - system_contracts, - first_l1_verifier_config, - first_verifier_address, - }, - ) - .await - .unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::from(270), &GenesisParams::mock()) + .await + .unwrap(); let logs = gen_storage_logs(100..120, 1).pop().unwrap(); let hashed_keys: Vec<_> = logs.iter().map(|log| log.key.hashed_key()).collect(); diff --git a/core/lib/zksync_core/src/metadata_calculator/updater.rs b/core/lib/zksync_core/src/metadata_calculator/updater.rs index 0298ac7871b9..384ef1b2bdd4 100644 --- a/core/lib/zksync_core/src/metadata_calculator/updater.rs +++ b/core/lib/zksync_core/src/metadata_calculator/updater.rs @@ -44,6 +44,7 @@ impl TreeUpdater { mode, config.multi_get_chunk_size, config.block_cache_capacity, + config.memtable_capacity, ) .await; Self { diff --git a/core/lib/zksync_core/src/metrics.rs b/core/lib/zksync_core/src/metrics.rs index 11ec3bb74660..c098adda255a 100644 --- a/core/lib/zksync_core/src/metrics.rs +++ b/core/lib/zksync_core/src/metrics.rs @@ -180,6 +180,8 @@ pub(crate) struct ExternalNodeMetrics { pub sync_lag: Gauge, /// Number of the last L1 batch checked by the reorg detector or consistency checker. pub last_correct_batch: Family>, + /// Number of the last miniblock checked by the reorg detector or consistency checker. + pub last_correct_miniblock: Family>, } #[vise::register] diff --git a/core/lib/zksync_core/src/proof_data_handler/mod.rs b/core/lib/zksync_core/src/proof_data_handler/mod.rs index 03abde127156..2c3454662324 100644 --- a/core/lib/zksync_core/src/proof_data_handler/mod.rs +++ b/core/lib/zksync_core/src/proof_data_handler/mod.rs @@ -27,7 +27,7 @@ fn fri_l1_verifier_config_from_env() -> anyhow::Result { // The base layer commitment is not used in the FRI prover verification. recursion_circuits_set_vks_hash: H256::zero(), }, - recursion_scheduler_level_vk_hash: config.fri_recursion_scheduler_level_vk_hash, + recursion_scheduler_level_vk_hash: config.snark_wrapper_vk_hash, }) } diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index 2f49b54f68c9..a816074554cc 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -28,7 +28,6 @@ pub(crate) struct RequestProcessor { } pub(crate) enum RequestProcessorError { - NoPendingBatches, ObjectStore(ObjectStoreError), Sqlx(SqlxError), } @@ -36,10 +35,6 @@ pub(crate) enum RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { - Self::NoPendingBatches => ( - StatusCode::NOT_FOUND, - "No pending batches to process".to_owned(), - ), RequestProcessorError::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( @@ -85,15 +80,19 @@ impl RequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let l1_batch_number = self + let l1_batch_number_result = self .pool .access_storage() .await .unwrap() .proof_generation_dal() .get_next_block_to_be_proven(self.config.proof_generation_timeout()) - .await - .ok_or(RequestProcessorError::NoPendingBatches)?; + .await; + + let l1_batch_number = match l1_batch_number_result { + Some(number) => number, + None => return Ok(Json(ProofGenerationDataResponse::Success(None))), // no batches pending to be proven + }; let blob = self .blob_store @@ -122,7 +121,9 @@ impl RequestProcessor { l1_verifier_config, }; - Ok(Json(ProofGenerationDataResponse::Success(proof_gen_data))) + Ok(Json(ProofGenerationDataResponse::Success(Some( + proof_gen_data, + )))) } pub(crate) async fn submit_proof( diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index 1d58c78ceb70..d413408fdd2f 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -1,11 +1,11 @@ use std::{future::Future, time::Duration}; use zksync_dal::ConnectionPool; -use zksync_types::L1BatchNumber; +use zksync_types::{L1BatchNumber, MiniblockNumber}; use zksync_web3_decl::{ jsonrpsee::core::Error as RpcError, jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, - namespaces::ZksNamespaceClient, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, RpcResult, }; @@ -41,6 +41,40 @@ impl ReorgDetector { Self { client, pool } } + /// Compares hashes of the given local miniblock and the same miniblock from main node. + async fn miniblock_hashes_match(&self, miniblock_number: MiniblockNumber) -> RpcResult { + let local_hash = self + .pool + .access_storage() + .await + .unwrap() + .blocks_dal() + .get_miniblock_header(miniblock_number) + .await + .unwrap() + .unwrap_or_else(|| { + panic!( + "Header does not exist for local miniblock #{}", + miniblock_number + ) + }) + .hash; + + let Some(hash) = self + .client + .get_block_by_number(miniblock_number.0.into(), false) + .await? + .map(|header| header.hash) + else { + // Due to reorg, locally we may be ahead of the main node. + // Lack of the hash on the main node is treated as a hash match, + // We need to wait for our knowledge of main node to catch up. + return Ok(true); + }; + + Ok(hash == local_hash) + } + /// Compares root hashes of the latest local batch and of the same batch from the main node. async fn root_hashes_match(&self, l1_batch_number: L1BatchNumber) -> RpcResult { // Unwrapping is fine since the caller always checks that these root hashes exist. @@ -66,9 +100,9 @@ impl ReorgDetector { .and_then(|b| b.base.root_hash) else { // Due to reorg, locally we may be ahead of the main node. - // Lack of the root hash on the main node is treated as a hash mismatch, - // so we can continue searching for the last correct block. - return Ok(false); + // Lack of the root hash on the main node is treated as a hash match, + // We need to wait for our knowledge of main node to catch up. + return Ok(true); }; Ok(hash == local_hash) } @@ -100,36 +134,6 @@ impl ReorgDetector { } } - /// Checks if the external node is ahead of the main node *NOT* because of a reorg. - /// In such an event, we should not do anything. - /// - /// Theoretically, external node might calculate batch root hash before the main - /// node. Therefore, we need to be sure that we check a batch which has root hashes - /// both on the main node and on the external node. - async fn is_legally_ahead_of_main_node( - &self, - sealed_l1_batch_number: L1BatchNumber, - ) -> RpcResult { - // We must know the latest batch on the main node *before* we ask it for a root hash - // to prevent a race condition (asked for root hash, batch sealed on main node, we've got - // inconsistent results). - let last_main_node_l1_batch = self.client.get_l1_batch_number().await?; - let main_node_l1_batch_root_hash = self - .client - .get_l1_batch_details(sealed_l1_batch_number) - .await? - .and_then(|b| b.base.root_hash); - - let en_ahead_for = sealed_l1_batch_number - .0 - .checked_sub(last_main_node_l1_batch.as_u32()); - // Theoretically it's possible that the EN would not only calculate the root hash, but also seal the batch - // quicker than the main node. So, we allow us to be at most one batch ahead of the main node. - // If the gap is bigger, it's certainly a reorg. - // Allowing the gap is safe: if reorg has happened, it'll be detected anyway in the future iterations. - Ok(main_node_l1_batch_root_hash.is_none() && en_ahead_for <= Some(1)) - } - async fn run_inner(&self) -> RpcResult { loop { let sealed_l1_batch_number = self @@ -142,29 +146,50 @@ impl ReorgDetector { .await .unwrap(); - // If the main node has to catch up with us, we should not do anything just yet. - if self - .is_legally_ahead_of_main_node(sealed_l1_batch_number) - .await? - { - tracing::trace!( - "Local state was updated ahead of the main node. Waiting for the main node to seal the batch" - ); - tokio::time::sleep(SLEEP_INTERVAL).await; - continue; - } + let sealed_miniblock_number = self + .pool + .access_storage() + .await + .unwrap() + .blocks_dal() + .get_sealed_miniblock_number() + .await + .unwrap(); - // At this point we're certain that if we detect a reorg, it's real. - tracing::trace!("Checking for reorgs - L1 batch #{sealed_l1_batch_number}"); - if self.root_hashes_match(sealed_l1_batch_number).await? { + tracing::trace!( + "Checking for reorgs - L1 batch #{sealed_l1_batch_number}, \ + miniblock number #{sealed_miniblock_number}" + ); + + let root_hashes_match = self.root_hashes_match(sealed_l1_batch_number).await?; + let miniblock_hashes_match = + self.miniblock_hashes_match(sealed_miniblock_number).await?; + + // The only event that triggers reorg detection and node rollback is if the + // hash mismatch at the same block height is detected, be it miniblocks or batches. + // + // In other cases either there is only a height mismatch which means that one of + // the nodes needs to do catching up, howver it is not certain that there is actually + // a reorg taking place. + if root_hashes_match && miniblock_hashes_match { EN_METRICS.last_correct_batch[&CheckerComponent::ReorgDetector] .set(sealed_l1_batch_number.0.into()); + EN_METRICS.last_correct_miniblock[&CheckerComponent::ReorgDetector] + .set(sealed_miniblock_number.0.into()); tokio::time::sleep(SLEEP_INTERVAL).await; } else { - tracing::warn!( - "Reorg detected: last state hash doesn't match the state hash from main node \ - (L1 batch #{sealed_l1_batch_number})" - ); + if !root_hashes_match { + tracing::warn!( + "Reorg detected: last state hash doesn't match the state hash from \ + main node (L1 batch #{sealed_l1_batch_number})" + ); + } + if !miniblock_hashes_match { + tracing::warn!( + "Reorg detected: last state hash doesn't match the state hash from \ + main node (MiniblockNumber #{sealed_miniblock_number})" + ); + } tracing::info!("Searching for the first diverged batch"); let last_correct_l1_batch = self.detect_reorg(sealed_l1_batch_number).await?; tracing::info!( diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs index 732f58846b87..b03676b9aada 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -71,7 +71,7 @@ impl TxExecutionResult { #[async_trait] pub trait L1BatchExecutorBuilder: 'static + Send + Sync + fmt::Debug { async fn init_batch( - &self, + &mut self, l1_batch_params: L1BatchEnv, system_env: SystemEnv, ) -> BatchExecutorHandle; @@ -112,7 +112,7 @@ impl MainBatchExecutorBuilder { #[async_trait] impl L1BatchExecutorBuilder for MainBatchExecutorBuilder { async fn init_batch( - &self, + &mut self, l1_batch_params: L1BatchEnv, system_env: SystemEnv, ) -> BatchExecutorHandle { diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index bb01c73a142b..fced87479283 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -29,17 +29,16 @@ use crate::{ extractors, io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - MiniblockSealerHandle, PendingBatchData, StateKeeperIO, + MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, metrics::KEEPER_METRICS, + seal_criteria::{IoSealCriteria, TimeoutSealer}, updates::UpdatesManager, MempoolGuard, }, }; -use super::MiniblockParams; - /// Mempool-based IO for the state keeper. /// Receives transactions from the database through the mempool filtering logic. /// Decides which batch parameters should be used for the new batch. @@ -48,6 +47,7 @@ use super::MiniblockParams; pub(crate) struct MempoolIO { mempool: MempoolGuard, pool: ConnectionPool, + timeout_sealer: TimeoutSealer, filter: L2TxFilter, current_miniblock_number: MiniblockNumber, miniblock_sealer_handle: MiniblockSealerHandle, @@ -65,8 +65,25 @@ pub(crate) struct MempoolIO { virtual_blocks_per_miniblock: u32, } +impl IoSealCriteria for MempoolIO +where + G: L1GasPriceProvider + 'static + Send + Sync, +{ + fn should_seal_l1_batch_unconditionally(&mut self, manager: &UpdatesManager) -> bool { + self.timeout_sealer + .should_seal_l1_batch_unconditionally(manager) + } + + fn should_seal_miniblock(&mut self, manager: &UpdatesManager) -> bool { + self.timeout_sealer.should_seal_miniblock(manager) + } +} + #[async_trait] -impl StateKeeperIO for MempoolIO { +impl StateKeeperIO for MempoolIO +where + G: L1GasPriceProvider + 'static + Send + Sync, +{ fn current_l1_batch_number(&self) -> L1BatchNumber { self.current_l1_batch_number } @@ -423,6 +440,7 @@ impl MempoolIO { Self { mempool, pool, + timeout_sealer: TimeoutSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch current_l1_batch_number: last_sealed_l1_batch_header.number + 1, diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index eab47bf31b9d..27ce52b83cef 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -22,6 +22,7 @@ pub(crate) mod seal_logic; pub(crate) use self::mempool::MempoolIO; use super::{ metrics::{MiniblockQueueStage, MINIBLOCK_METRICS}, + seal_criteria::IoSealCriteria, updates::{MiniblockSealCommand, UpdatesManager}, }; @@ -65,7 +66,7 @@ pub struct MiniblockParams { /// it's used to receive volatile parameters (such as batch parameters), and also it's used to perform /// mutable operations on the persistent state (e.g. persist executed batches). #[async_trait] -pub trait StateKeeperIO: 'static + Send { +pub trait StateKeeperIO: 'static + Send + IoSealCriteria { /// Returns the number of the currently processed L1 batch. fn current_l1_batch_number(&self) -> L1BatchNumber; /// Returns the number of the currently processed miniblock (aka L2 block). diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 7067d905b0d3..24b8a5bb2cfc 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -139,6 +139,7 @@ impl UpdatesManager { &initial_bootloader_contents, self.l1_batch.l1_gas_count, &events_queue, + &finished_batch.final_execution_state.storage_refunds, ) .await .unwrap(); diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 57be16319ea4..d685e1fc81d9 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -160,7 +160,7 @@ impl Tester { let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); storage .blocks_dal() - .insert_l1_batch(&batch_header, &[], Default::default(), &[]) + .insert_l1_batch(&batch_header, &[], Default::default(), &[], &[]) .await .unwrap(); storage diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 60ed817ba691..c775892618bb 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -15,7 +15,7 @@ use super::{ extractors, io::{MiniblockParams, PendingBatchData, StateKeeperIO}, metrics::{AGGREGATION_METRICS, KEEPER_METRICS, L1_BATCH_METRICS}, - seal_criteria::{SealData, SealManager, SealResolution}, + seal_criteria::{ConditionalSealer, SealData, SealResolution}, types::ExecutionMetricsForCriteria, updates::UpdatesManager, }; @@ -57,7 +57,7 @@ pub struct ZkSyncStateKeeper { stop_receiver: watch::Receiver, io: Box, batch_executor_base: Box, - sealer: SealManager, + sealer: Option, } impl ZkSyncStateKeeper { @@ -65,13 +65,26 @@ impl ZkSyncStateKeeper { stop_receiver: watch::Receiver, io: Box, batch_executor_base: Box, - sealer: SealManager, + sealer: ConditionalSealer, ) -> Self { - ZkSyncStateKeeper { + Self { stop_receiver, io, batch_executor_base, - sealer, + sealer: Some(sealer), + } + } + + pub fn without_sealer( + stop_receiver: watch::Receiver, + io: Box, + batch_executor_base: Box, + ) -> Self { + Self { + stop_receiver, + io, + batch_executor_base, + sealer: None, } } @@ -371,7 +384,7 @@ impl ZkSyncStateKeeper { while !self.is_canceled() { if self - .sealer + .io .should_seal_l1_batch_unconditionally(updates_manager) { tracing::debug!( @@ -381,7 +394,7 @@ impl ZkSyncStateKeeper { return Ok(()); } - if self.sealer.should_seal_miniblock(updates_manager) { + if self.io.should_seal_miniblock(updates_manager) { tracing::debug!( "Miniblock #{} (L1 batch #{}) should be sealed as per sealing rules", self.io.current_miniblock_number(), @@ -632,13 +645,18 @@ impl ZkSyncStateKeeper { + updates_manager.pending_txs_encoding_size(), writes_metrics: block_writes_metrics, }; - self.sealer.should_seal_l1_batch( - self.io.current_l1_batch_number().0, - updates_manager.batch_timestamp() as u128 * 1_000, - updates_manager.pending_executed_transactions_len() + 1, - &block_data, - &tx_data, - ) + + if let Some(sealer) = &self.sealer { + sealer.should_seal_l1_batch( + self.io.current_l1_batch_number().0, + updates_manager.batch_timestamp() as u128 * 1_000, + updates_manager.pending_executed_transactions_len() + 1, + &block_data, + &tx_data, + ) + } else { + SealResolution::NoSeal + } } }; (resolution, exec_result) diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index 898a6176bfe6..18401001b4de 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -17,16 +17,18 @@ mod mempool_actor; pub(crate) mod metrics; pub(crate) mod seal_criteria; #[cfg(test)] -mod tests; +pub(crate) mod tests; pub(crate) mod types; pub(crate) mod updates; pub use self::{ batch_executor::{L1BatchExecutorBuilder, MainBatchExecutorBuilder}, keeper::ZkSyncStateKeeper, - seal_criteria::SealManager, }; -pub(crate) use self::{io::MiniblockSealer, mempool_actor::MempoolFetcher, types::MempoolGuard}; +pub(crate) use self::{ + io::MiniblockSealer, mempool_actor::MempoolFetcher, seal_criteria::ConditionalSealer, + types::MempoolGuard, +}; use self::io::{MempoolIO, MiniblockSealerHandle}; use crate::l1_gas_price::L1GasPriceProvider; @@ -76,7 +78,7 @@ where ) .await; - let sealer = SealManager::new(state_keeper_config); + let sealer = ConditionalSealer::new(state_keeper_config); ZkSyncStateKeeper::new( stop_receiver, Box::new(io), diff --git a/core/lib/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs b/core/lib/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs index d9c7b1123755..21233c051bc7 100644 --- a/core/lib/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs +++ b/core/lib/zksync_core/src/state_keeper/seal_criteria/conditional_sealer.rs @@ -7,10 +7,13 @@ use zksync_config::configs::chain::StateKeeperConfig; use super::{criteria, SealCriterion, SealData, SealResolution, AGGREGATION_METRICS}; +/// Checks if an L1 batch should be sealed after executing a transaction. +/// +/// The checks are deterministic, i.e., should depend solely on execution metrics and [`StateKeeperConfig`]. +/// Non-deterministic seal criteria are expressed using [`IoSealCriteria`](super::IoSealCriteria). #[derive(Debug)] pub struct ConditionalSealer { config: StateKeeperConfig, - /// Primary sealers set that is used to check if batch should be sealed after executing a transaction. sealers: Vec>, } @@ -32,7 +35,7 @@ impl ConditionalSealer { None } - pub(super) fn new(config: StateKeeperConfig) -> Self { + pub(crate) fn new(config: StateKeeperConfig) -> Self { let sealers = Self::default_sealers(); Self { config, sealers } } @@ -45,7 +48,7 @@ impl ConditionalSealer { Self { config, sealers } } - pub(super) fn should_seal_l1_batch( + pub fn should_seal_l1_batch( &self, l1_batch_number: u32, block_open_timestamp_ms: u128, diff --git a/core/lib/zksync_core/src/state_keeper/seal_criteria/mod.rs b/core/lib/zksync_core/src/state_keeper/seal_criteria/mod.rs index ffc5bd4b4728..da410ebaf5f1 100644 --- a/core/lib/zksync_core/src/state_keeper/seal_criteria/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/seal_criteria/mod.rs @@ -118,141 +118,58 @@ pub(super) trait SealCriterion: fmt::Debug + Send + 'static { fn prom_criterion_name(&self) -> &'static str; } -/// Sealer function that returns a boolean. -pub type SealerFn = dyn Fn(&UpdatesManager) -> bool + Send; - -pub struct SealManager { - /// Conditional sealer, i.e. one that can decide whether the batch should be sealed after executing a tx. - /// Currently, it's expected to be `Some` on the main node and `None` on the external nodes, since external nodes - /// do not decide whether to seal the batch or not. - conditional_sealer: Option, - /// Unconditional batch sealer, i.e. one that can be used if we should seal the batch *without* executing a tx. - /// If any of the unconditional sealers returns `true`, the batch will be sealed. - /// - /// Note: only non-empty batch can be sealed. - unconditional_sealers: Vec>, - /// Miniblock sealer function used to determine if we should seal the miniblock. - /// If any of the miniblock sealers returns `true`, the miniblock will be sealed. - miniblock_sealers: Vec>, +/// I/O-dependent seal criteria. +pub trait IoSealCriteria { + /// Checks whether an L1 batch should be sealed unconditionally (i.e., regardless of metrics + /// related to transaction execution) given the provided `manager` state. + fn should_seal_l1_batch_unconditionally(&mut self, manager: &UpdatesManager) -> bool; + /// Checks whether a miniblock should be sealed given the provided `manager` state. + fn should_seal_miniblock(&mut self, manager: &UpdatesManager) -> bool; } -impl fmt::Debug for SealManager { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("SealManager") - .finish_non_exhaustive() - } +#[derive(Debug, Clone, Copy)] +pub(super) struct TimeoutSealer { + block_commit_deadline_ms: u64, + miniblock_commit_deadline_ms: u64, } -impl SealManager { - /// Creates a default pre-configured seal manager for the main node. - pub(super) fn new(config: StateKeeperConfig) -> Self { - let timeout_batch_sealer = Self::timeout_batch_sealer(config.block_commit_deadline_ms); - let timeout_miniblock_sealer = - Self::timeout_miniblock_sealer(config.miniblock_commit_deadline_ms); - // Currently, it's assumed that timeout is the only criterion for miniblock sealing. - // If this doesn't hold and some miniblocks are sealed in less than 1 second, - // then state keeper will be blocked waiting for the miniblock timestamp to be changed. - let miniblock_sealers = vec![timeout_miniblock_sealer]; - - let conditional_sealer = ConditionalSealer::new(config); - - Self::custom( - Some(conditional_sealer), - vec![timeout_batch_sealer], - miniblock_sealers, - ) - } - - /// Allows to create a seal manager object from externally-defined sealers. - pub fn custom( - conditional_sealer: Option, - unconditional_sealers: Vec>, - miniblock_sealers: Vec>, - ) -> Self { +impl TimeoutSealer { + pub fn new(config: &StateKeeperConfig) -> Self { Self { - conditional_sealer, - unconditional_sealers, - miniblock_sealers, + block_commit_deadline_ms: config.block_commit_deadline_ms, + miniblock_commit_deadline_ms: config.miniblock_commit_deadline_ms, } } +} - /// Creates a sealer function that would seal the batch because of the timeout. - fn timeout_batch_sealer(block_commit_deadline_ms: u64) -> Box { +impl IoSealCriteria for TimeoutSealer { + fn should_seal_l1_batch_unconditionally(&mut self, manager: &UpdatesManager) -> bool { const RULE_NAME: &str = "no_txs_timeout"; - Box::new(move |manager| { - // Verify timestamp - let should_seal_timeout = - millis_since(manager.batch_timestamp()) > block_commit_deadline_ms; - - if should_seal_timeout { - AGGREGATION_METRICS.inc_criterion(RULE_NAME); - tracing::debug!( - "Decided to seal L1 batch using rule `{RULE_NAME}`; batch timestamp: {}, \ - commit deadline: {block_commit_deadline_ms}ms", - extractors::display_timestamp(manager.batch_timestamp()) - ); - } - should_seal_timeout - }) - } - - /// Creates a sealer function that would seal the miniblock because of the timeout. - /// Will only trigger for the non-empty miniblocks. - fn timeout_miniblock_sealer(miniblock_commit_deadline_ms: u64) -> Box { - if miniblock_commit_deadline_ms < 1000 { - panic!("`miniblock_commit_deadline_ms` should be at least 1000, because miniblocks must have different timestamps"); + if manager.pending_executed_transactions_len() == 0 { + // Regardless of which sealers are provided, we never want to seal an empty batch. + return false; } - Box::new(move |manager| { - !manager.miniblock.executed_transactions.is_empty() - && millis_since(manager.miniblock.timestamp) > miniblock_commit_deadline_ms - }) - } - - pub(super) fn should_seal_l1_batch( - &self, - l1_batch_number: u32, - block_open_timestamp_ms: u128, - tx_count: usize, - block_data: &SealData, - tx_data: &SealData, - ) -> SealResolution { - if let Some(sealer) = &self.conditional_sealer { - sealer.should_seal_l1_batch( - l1_batch_number, - block_open_timestamp_ms, - tx_count, - block_data, - tx_data, - ) - } else { - SealResolution::NoSeal + let block_commit_deadline_ms = self.block_commit_deadline_ms; + // Verify timestamp + let should_seal_timeout = + millis_since(manager.batch_timestamp()) > block_commit_deadline_ms; + + if should_seal_timeout { + AGGREGATION_METRICS.inc_criterion(RULE_NAME); + tracing::debug!( + "Decided to seal L1 batch using rule `{RULE_NAME}`; batch timestamp: {}, \ + commit deadline: {block_commit_deadline_ms}ms", + extractors::display_timestamp(manager.batch_timestamp()) + ); } + should_seal_timeout } - pub(super) fn should_seal_l1_batch_unconditionally( - &self, - updates_manager: &UpdatesManager, - ) -> bool { - // Regardless of which sealers are provided, we never want to seal an empty batch. - updates_manager.pending_executed_transactions_len() != 0 - && self - .unconditional_sealers - .iter() - .any(|sealer| (sealer)(updates_manager)) - } - - pub(super) fn should_seal_miniblock(&self, updates_manager: &UpdatesManager) -> bool { - // Unlike with the L1 batch, we don't check the number of transactions in the miniblock, - // because we might want to seal the miniblock even if it's empty (e.g. on an external node, - // where we have to replicate the state of the main node, including the last (empty) miniblock of the batch). - // The check for the number of transactions is expected to be done, if relevant, in the `miniblock_sealer` - // directly. - self.miniblock_sealers - .iter() - .any(|sealer| (sealer)(updates_manager)) + fn should_seal_miniblock(&mut self, manager: &UpdatesManager) -> bool { + !manager.miniblock.executed_transactions.is_empty() + && millis_since(manager.miniblock.timestamp) > self.miniblock_commit_deadline_ms } } @@ -280,20 +197,23 @@ mod tests { /// This test mostly exists to make sure that we can't seal empty miniblocks on the main node. #[test] fn timeout_miniblock_sealer() { - let timeout_miniblock_sealer = SealManager::timeout_miniblock_sealer(10_000); + let mut timeout_miniblock_sealer = TimeoutSealer { + block_commit_deadline_ms: 10_000, + miniblock_commit_deadline_ms: 10_000, + }; let mut manager = create_updates_manager(); // Empty miniblock should not trigger. manager.miniblock.timestamp = seconds_since_epoch() - 10; assert!( - !timeout_miniblock_sealer(&manager), + !timeout_miniblock_sealer.should_seal_miniblock(&manager), "Empty miniblock shouldn't be sealed" ); // Non-empty miniblock should trigger. apply_tx_to_manager(&mut manager); assert!( - timeout_miniblock_sealer(&manager), + timeout_miniblock_sealer.should_seal_miniblock(&manager), "Non-empty miniblock with old timestamp should be sealed" ); @@ -302,7 +222,7 @@ mod tests { // by other tests). manager.miniblock.timestamp = seconds_since_epoch(); assert!( - !timeout_miniblock_sealer(&manager), + !timeout_miniblock_sealer.should_seal_miniblock(&manager), "Non-empty miniblock with too recent timestamp shouldn't be sealed" ); } diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index 2ef9c6754722..a3e51af8a436 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -9,8 +9,8 @@ use std::{ }; use multivm::interface::{ - CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, - TxExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, + CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, Refunds, + SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, }; use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; use zksync_config::configs::chain::StateKeeperConfig; @@ -18,19 +18,19 @@ use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{ aggregated_operations::AggregatedActionType, - block::legacy_miniblock_hash, - block::miniblock_hash, - block::BlockGasCount, - block::MiniblockReexecuteData, + block::{legacy_miniblock_hash, miniblock_hash, BlockGasCount, MiniblockReexecuteData}, commitment::{L1BatchMetaParameters, L1BatchMetadata}, fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, - tx::tx_execution_info::VmExecutionLogs, + tx::tx_execution_info::{ExecutionMetrics, VmExecutionLogs}, Address, L1BatchNumber, L2ChainId, LogQuery, MiniblockNumber, Nonce, ProtocolVersionId, StorageLogQuery, StorageLogQueryType, Timestamp, Transaction, H256, U256, }; +mod tester; + +pub(crate) use self::tester::TestBatchExecutorBuilder; use self::tester::{ bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, successful_exec_with_metrics, TestScenario, @@ -40,14 +40,12 @@ use crate::state_keeper::{ keeper::POLL_WAIT_DURATION, seal_criteria::{ criteria::{GasCriterion, SlotsCriterion}, - ConditionalSealer, SealManager, + ConditionalSealer, }, types::ExecutionMetricsForCriteria, updates::UpdatesManager, }; -mod tester; - pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); @@ -85,7 +83,7 @@ pub(super) fn default_l1_batch_env( } } -pub(super) fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { +pub(crate) fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { L1BatchMetadata { root_hash: H256::from_low_u64_be(number.into()), rollup_last_leaf_index: u64::from(number) + 20, @@ -112,9 +110,9 @@ pub(super) fn default_vm_block_result() -> FinishedL1Batch { FinishedL1Batch { block_tip_execution_result: VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, - logs: Default::default(), - statistics: Default::default(), - refunds: Default::default(), + logs: VmExecutionLogs::default(), + statistics: VmExecutionStatistics::default(), + refunds: Refunds::default(), }, final_execution_state: CurrentExecutionState { events: vec![], @@ -123,6 +121,7 @@ pub(super) fn default_vm_block_result() -> FinishedL1Batch { l2_to_l1_logs: vec![], total_log_queries: 0, cycles_used: 0, + storage_refunds: Vec::new(), }, final_bootloader_memory: Some(vec![]), } @@ -137,7 +136,7 @@ pub(super) fn create_updates_manager() -> UpdatesManager { ) } -pub(super) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L2Tx { +pub(crate) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L2Tx { let fee = Fee { gas_limit: 1000_u64.into(), max_fee_per_gas: fee_per_gas.into(), @@ -151,7 +150,7 @@ pub(super) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L fee, U256::zero(), L2ChainId::from(271), - &H256::repeat_byte(0x11), + &H256::random(), None, PaymasterParams::default(), ) @@ -192,7 +191,7 @@ pub(super) fn create_execution_result( computational_gas_used: 0, total_log_queries, }, - refunds: Default::default(), + refunds: Refunds::default(), } } @@ -240,23 +239,12 @@ impl Query { async fn sealed_by_number_of_txs() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); - - let scenario = TestScenario::new(); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); - scenario + TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .next_tx("First tx", random_tx(1), successful_exec()) .miniblock_sealed("Miniblock 1") .next_tx("Second tx", random_tx(2), successful_exec()) @@ -272,19 +260,9 @@ async fn sealed_by_gas() { max_single_tx_gas: 62_002, reject_tx_at_gas_percentage: 1.0, close_block_at_gas_percentage: 0.5, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(GasCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(GasCriterion)]); let l1_gas_per_tx = BlockGasCount { commit: 1, // Both txs together with block_base_cost would bring it over the block 31_001 commit bound. @@ -293,10 +271,13 @@ async fn sealed_by_gas() { }; let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { l1_gas: l1_gas_per_tx, - execution_metrics: Default::default(), + execution_metrics: ExecutionMetrics::default(), }); TestScenario::new() + .seal_miniblock_when(|updates| { + updates.miniblock.executed_transactions.len() == 1 + }) .next_tx("First tx", random_tx(1), execution_result.clone()) .miniblock_sealed_with("Miniblock with a single tx", move |updates| { assert_eq!( @@ -328,18 +309,11 @@ async fn sealed_by_gas_then_by_num_tx() { reject_tx_at_gas_percentage: 1.0, close_block_at_gas_percentage: 0.5, transaction_slots: 3, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( + let sealer = ConditionalSealer::with_sealers( config, vec![Box::new(GasCriterion), Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], ); let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { @@ -348,11 +322,12 @@ async fn sealed_by_gas_then_by_num_tx() { prove: 0, execute: 0, }, - execution_metrics: Default::default(), + execution_metrics: ExecutionMetrics::default(), }); // 1st tx is sealed by gas sealer; 2nd, 3rd, & 4th are sealed by slots sealer. TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .next_tx("First tx", random_tx(1), execution_result) .miniblock_sealed("Miniblock 1") .batch_sealed("Batch 1") @@ -371,24 +346,13 @@ async fn sealed_by_gas_then_by_num_tx() { async fn batch_sealed_before_miniblock_does() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 3 - })], - ); - - let scenario = TestScenario::new(); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); // Miniblock sealer will not return true before the batch is sealed because the batch only has 2 txs. - scenario + TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 3) .next_tx("First tx", random_tx(1), successful_exec()) .next_tx("Second tx", random_tx(2), successful_exec()) .miniblock_sealed_with("Miniblock with two txs", |updates| { @@ -403,54 +367,17 @@ async fn batch_sealed_before_miniblock_does() { .await; } -#[tokio::test] -async fn basic_flow() { - let config = StateKeeperConfig { - transaction_slots: 2, - ..Default::default() - }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); - - TestScenario::new() - .next_tx("First tx", random_tx(1), successful_exec()) - .miniblock_sealed("Miniblock 1") - .next_tx("Second tx", random_tx(2), successful_exec()) - .miniblock_sealed("Miniblock 2") - .batch_sealed("Batch 1") - .run(sealer) - .await; -} - #[tokio::test] async fn rejected_tx() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); let rejected_tx = random_tx(1); TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .next_tx("Rejected tx", rejected_tx.clone(), rejected_exec()) .tx_rejected("Tx got rejected", rejected_tx, None) .next_tx("Successful tx", random_tx(2), successful_exec()) @@ -466,24 +393,15 @@ async fn rejected_tx() { async fn bootloader_tip_out_of_gas_flow() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); let first_tx = random_tx(1); let bootloader_out_of_gas_tx = random_tx(2); let third_tx = random_tx(3); TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .next_tx("First tx", first_tx, successful_exec()) .miniblock_sealed("Miniblock with 1st tx") .next_tx( @@ -513,19 +431,9 @@ async fn bootloader_tip_out_of_gas_flow() { async fn pending_batch_is_applied() { let config = StateKeeperConfig { transaction_slots: 3, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); let pending_batch = pending_batch_data(vec![ MiniblockReexecuteData { @@ -546,6 +454,7 @@ async fn pending_batch_is_applied() { // We configured state keeper to use different system contract hashes, so it must seal the pending batch immediately. TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .load_pending_batch(pending_batch) .next_tx("Final tx of batch", random_tx(3), successful_exec()) .miniblock_sealed_with("Miniblock with a single tx", |updates| { @@ -578,19 +487,14 @@ async fn unconditional_sealing() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(move |_| { - batch_seal_trigger_checker.load(Ordering::Relaxed) - })], - vec![Box::new(move |upd_manager| { - if upd_manager.pending_executed_transactions_len() != 0 + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); + + TestScenario::new() + .seal_l1_batch_when(move |_| batch_seal_trigger_checker.load(Ordering::Relaxed)) + .seal_miniblock_when(move |manager| { + if manager.pending_executed_transactions_len() != 0 && start.elapsed() >= seal_miniblock_after { batch_seal_trigger.store(true, Ordering::Relaxed); @@ -598,10 +502,7 @@ async fn unconditional_sealing() { } else { false } - })], - ); - - TestScenario::new() + }) .next_tx("The only tx", random_tx(1), successful_exec()) .no_txs_until_next_action("We don't give transaction to wait for miniblock to be sealed") .miniblock_sealed("Miniblock is sealed with just one tx") @@ -616,19 +517,9 @@ async fn unconditional_sealing() { async fn miniblock_timestamp_after_pending_batch() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); let pending_batch = pending_batch_data(vec![MiniblockReexecuteData { number: MiniblockNumber(1), @@ -639,6 +530,7 @@ async fn miniblock_timestamp_after_pending_batch() { }]); TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .load_pending_batch(pending_batch) .next_tx( "First tx after pending batch", @@ -668,23 +560,12 @@ async fn time_is_monotonic() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); - - let scenario = TestScenario::new(); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); - scenario + TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .next_tx("First tx", random_tx(1), successful_exec()) .miniblock_sealed_with("Miniblock 1", move |updates| { let min_expected = timestamp_first_miniblock.load(Ordering::Relaxed); @@ -730,21 +611,12 @@ async fn time_is_monotonic() { async fn protocol_upgrade() { let config = StateKeeperConfig { transaction_slots: 2, - ..Default::default() + ..StateKeeperConfig::default() }; - let conditional_sealer = Some(ConditionalSealer::with_sealers( - config, - vec![Box::new(SlotsCriterion)], - )); - let sealer = SealManager::custom( - conditional_sealer, - vec![Box::new(|_| false)], - vec![Box::new(|updates| { - updates.miniblock.executed_transactions.len() == 1 - })], - ); + let sealer = ConditionalSealer::with_sealers(config, vec![Box::new(SlotsCriterion)]); TestScenario::new() + .seal_miniblock_when(|updates| updates.miniblock.executed_transactions.len() == 1) .next_tx("First tx", random_tx(1), successful_exec()) .miniblock_sealed("Miniblock 1") .increment_protocol_version("Increment protocol version") diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index b238e02896cc..3595816309bc 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -4,7 +4,7 @@ use tokio::sync::{mpsc, watch}; use std::{ collections::{HashMap, HashSet, VecDeque}, convert::TryInto, - sync::{Arc, RwLock}, + fmt, time::{Duration, Instant}, }; @@ -22,7 +22,7 @@ use zksync_types::{ use crate::state_keeper::{ batch_executor::{BatchExecutorHandle, Command, L1BatchExecutorBuilder, TxExecutionResult}, io::{MiniblockParams, PendingBatchData, StateKeeperIO}, - seal_criteria::SealManager, + seal_criteria::{ConditionalSealer, IoSealCriteria}, tests::{ create_l2_transaction, default_l1_batch_env, default_vm_block_result, BASE_SYSTEM_CONTRACTS, }, @@ -45,10 +45,23 @@ const FEE_ACCOUNT: Address = Address::repeat_byte(0x11); /// it would be easier for developer to find the problem. /// /// See any test in the `mod.rs` file to get a visual example. -#[derive(Debug)] pub(crate) struct TestScenario { actions: VecDeque, pending_batch: Option, + l1_batch_seal_fn: Box, + miniblock_seal_fn: Box, +} + +type SealFn = dyn FnMut(&UpdatesManager) -> bool + Send; + +impl fmt::Debug for TestScenario { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("TestScenario") + .field("actions", &self.actions) + .field("pending_batch", &self.pending_batch) + .finish_non_exhaustive() + } } impl TestScenario { @@ -56,6 +69,8 @@ impl TestScenario { Self { actions: VecDeque::new(), pending_batch: None, + l1_batch_seal_fn: Box::new(|_| false), + miniblock_seal_fn: Box::new(|_| false), } } @@ -146,35 +161,45 @@ impl TestScenario { /// Expects the batch to be sealed. /// Accepts a function that would be given access to the received batch seal params, which can implement /// additional assertions on the sealed batch. - pub(crate) fn batch_sealed_with< + pub(crate) fn batch_sealed_with(mut self, description: &'static str, f: F) -> Self + where F: FnOnce(&VmExecutionResultAndLogs, &UpdatesManager, &L1BatchEnv) + Send + 'static, - >( - mut self, - description: &'static str, - f: F, - ) -> Self { + { self.actions .push_back(ScenarioItem::BatchSeal(description, Some(Box::new(f)))); self } + pub(crate) fn seal_l1_batch_when(mut self, seal_fn: F) -> Self + where + F: FnMut(&UpdatesManager) -> bool + Send + 'static, + { + self.l1_batch_seal_fn = Box::new(seal_fn); + self + } + + pub(crate) fn seal_miniblock_when(mut self, seal_fn: F) -> Self + where + F: FnMut(&UpdatesManager) -> bool + Send + 'static, + { + self.miniblock_seal_fn = Box::new(seal_fn); + self + } + /// Launches the test. /// Provided `SealManager` is expected to be externally configured to adhere the written scenario logic. - pub(crate) async fn run(self, sealer: SealManager) { + pub(crate) async fn run(self, sealer: ConditionalSealer) { assert!(!self.actions.is_empty(), "Test scenario can't be empty"); let batch_executor_base = TestBatchExecutorBuilder::new(&self); - let (stop_sender, stop_receiver) = watch::channel(false); let io = TestIO::new(stop_sender, self); - let sk = ZkSyncStateKeeper::new( stop_receiver, Box::new(io), Box::new(batch_executor_base), sealer, ); - let sk_thread = tokio::spawn(sk.run()); // We must assume that *theoretically* state keeper may ignore the stop signal from IO once scenario is @@ -343,13 +368,13 @@ impl std::fmt::Debug for ScenarioItem { type ExpectedTransactions = VecDeque>>; -#[derive(Debug)] +#[derive(Debug, Default)] pub(crate) struct TestBatchExecutorBuilder { /// Sequence of known transaction execution results per batch. /// We need to store txs for each batch separately, since the same transaction /// can be executed in several batches (e.g. after an `ExcludeAndSeal` rollback). /// When initializing each batch, we will `pop_front` known txs for the corresponding executor. - txs: Arc>, + txs: ExpectedTransactions, /// Set of transactions that would be rolled back at least once. rollback_set: HashSet, } @@ -406,17 +431,23 @@ impl TestBatchExecutorBuilder { // for the initialization of the "next-to-last" batch. txs.push_back(HashMap::default()); - Self { - txs: Arc::new(RwLock::new(txs)), - rollback_set, - } + Self { txs, rollback_set } + } + + /// Adds successful transactions to be executed in a single L1 batch. + pub(crate) fn push_successful_transactions(&mut self, tx_hashes: &[H256]) { + let txs = tx_hashes + .iter() + .copied() + .map(|tx_hash| (tx_hash, VecDeque::from([successful_exec()]))); + self.txs.push_back(txs.collect()); } } #[async_trait] impl L1BatchExecutorBuilder for TestBatchExecutorBuilder { async fn init_batch( - &self, + &mut self, _l1batch_params: L1BatchEnv, _system_env: SystemEnv, ) -> BatchExecutorHandle { @@ -424,7 +455,7 @@ impl L1BatchExecutorBuilder for TestBatchExecutorBuilder { let executor = TestBatchExecutor::new( commands_receiver, - self.txs.write().unwrap().pop_front().unwrap(), + self.txs.pop_front().unwrap(), self.rollback_set.clone(), ); let handle = tokio::task::spawn_blocking(move || executor.run()); @@ -570,6 +601,16 @@ impl TestIO { } } +impl IoSealCriteria for TestIO { + fn should_seal_l1_batch_unconditionally(&mut self, manager: &UpdatesManager) -> bool { + (self.scenario.l1_batch_seal_fn)(manager) + } + + fn should_seal_miniblock(&mut self, manager: &UpdatesManager) -> bool { + (self.scenario.miniblock_seal_fn)(manager) + } +} + #[async_trait] impl StateKeeperIO for TestIO { fn current_l1_batch_number(&self) -> L1BatchNumber { diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs index ef7f7c588ebf..9bf4f45a732c 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs @@ -179,7 +179,7 @@ impl BatchStatusUpdater { else { // We cannot recover from an external API inconsistency. panic!( - "Node API is inconsistent: miniblock {} was reported to be a part of {} L1batch, \ + "Node API is inconsistent: miniblock {} was reported to be a part of {} L1 batch, \ but API has no information about this miniblock", start_miniblock, batch ); }; diff --git a/core/lib/zksync_core/src/sync_layer/cached_main_node_client.rs b/core/lib/zksync_core/src/sync_layer/cached_main_node_client.rs deleted file mode 100644 index 91202db2ffd1..000000000000 --- a/core/lib/zksync_core/src/sync_layer/cached_main_node_client.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::collections::HashMap; - -use zksync_types::{api::en::SyncBlock, MiniblockNumber, U64}; -use zksync_web3_decl::{ - jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, - namespaces::{EnNamespaceClient, EthNamespaceClient}, - RpcResult, -}; - -use super::metrics::{CachedMethod, FETCHER_METRICS}; - -/// Maximum number of concurrent requests to the main node. -const MAX_CONCURRENT_REQUESTS: usize = 100; - -/// This is a temporary implementation of a cache layer for the main node HTTP requests. -/// It was introduced to quickly develop a way to fetch data from the main node concurrently, -/// while not changing the logic of the fetcher itself. -/// It is intentionally designed in an "easy-to-inject, easy-to-remove" way, so that we can easily -/// switch it to a more performant implementation later. -/// -/// The main part of this structure's logic is the ability to concurrently populate the cache -/// of responses and then consume them in a non-concurrent way. -/// -/// Note: not every request is guaranted cached, only the ones that are used to build the action queue. -/// For example, if batch status updater requests a miniblock header long after it was processed by the main -/// fetcher routine, most likely it'll be a cache miss. -#[derive(Debug)] -pub(super) struct CachedMainNodeClient { - /// HTTP client. - client: HttpClient, - /// Earliest miniblock number that is not yet cached. - /// Used as a marker to refill the cache. - next_refill_at: MiniblockNumber, - blocks: HashMap, -} - -impl CachedMainNodeClient { - pub fn build_client(main_node_url: &str) -> Self { - let client = HttpClientBuilder::default() - .build(main_node_url) - .expect("Unable to create a main node client"); - Self { - client, - next_refill_at: MiniblockNumber(0), - blocks: Default::default(), - } - } - - /// Cached version of [`HttpClient::sync_l2_block`]. - pub async fn sync_l2_block(&self, miniblock: MiniblockNumber) -> RpcResult> { - let block = self.blocks.get(&miniblock).cloned(); - FETCHER_METRICS.cache_total[&CachedMethod::SyncL2Block].inc(); - match block { - Some(block) => { - FETCHER_METRICS.cache_hit[&CachedMethod::SyncL2Block].inc(); - Ok(Some(block)) - } - None => self.client.sync_l2_block(miniblock, true).await, - } - } - - /// Re-export of [`HttpClient::get_block_number`]. - /// Added to not expose the internal client. - pub async fn get_block_number(&self) -> RpcResult { - self.client.get_block_number().await - } - - /// Removes a miniblock data from the cache. - pub fn forget_miniblock(&mut self, miniblock: MiniblockNumber) { - self.blocks.remove(&miniblock); - } - - pub async fn populate_miniblocks_cache( - &mut self, - current_miniblock: MiniblockNumber, - last_miniblock: MiniblockNumber, - ) { - // This method may be invoked frequently, but in order to take advantage of the concurrent fetching, - // we only need to do it once in a while. If we'll do it too often, we'll end up adding 1 element to - // the cache at a time, which eliminates the cache's purpose. - if current_miniblock < self.next_refill_at { - return; - } - let populate_latency = FETCHER_METRICS.cache_populate.start(); - let last_miniblock_to_fetch = - last_miniblock.min(current_miniblock + MAX_CONCURRENT_REQUESTS as u32); - let task_futures = (current_miniblock.0..last_miniblock_to_fetch.0) - .map(MiniblockNumber) - .filter(|&miniblock| { - // If the miniblock is already in the cache, we don't need to fetch it. - !self.has_miniblock(miniblock) - }) - .map(|block_number| self.client.sync_l2_block(block_number, true)); - - let results = futures::future::join_all(task_futures).await; - for result in results { - if let Ok(Some(block)) = result { - self.next_refill_at = self.next_refill_at.max(block.number + 1); - self.blocks.insert(block.number, block); - } else { - // At the cache level, it's fine to just silence errors. - // The entry won't be included into the cache, and whoever uses the cache, will have to process - // a cache miss as they will. - FETCHER_METRICS.cache_errors.inc(); - } - } - populate_latency.observe(); - } - - fn has_miniblock(&self, miniblock: MiniblockNumber) -> bool { - self.blocks.contains_key(&miniblock) - } -} diff --git a/core/lib/zksync_core/src/sync_layer/client.rs b/core/lib/zksync_core/src/sync_layer/client.rs new file mode 100644 index 000000000000..148953572477 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/client.rs @@ -0,0 +1,249 @@ +//! Client abstractions for syncing between the external node and the main node. + +use anyhow::Context as _; +use async_trait::async_trait; + +use std::{collections::HashMap, convert::TryInto, fmt}; + +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; +use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; +use zksync_types::{ + api::{self, en::SyncBlock}, + get_code_key, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, U64, +}; +use zksync_web3_decl::{ + jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, +}; + +use super::metrics::{CachedMethod, FETCHER_METRICS}; + +/// Maximum number of concurrent requests to the main node. +const MAX_CONCURRENT_REQUESTS: usize = 100; + +/// Client abstracting connection to the main node. +#[async_trait] +pub trait MainNodeClient: 'static + Send + Sync + fmt::Debug { + async fn fetch_system_contract_by_hash(&self, hash: H256) + -> anyhow::Result; + + async fn fetch_base_system_contracts( + &self, + hashes: BaseSystemContractsHashes, + ) -> anyhow::Result { + Ok(BaseSystemContracts { + bootloader: self + .fetch_system_contract_by_hash(hashes.bootloader) + .await?, + default_aa: self + .fetch_system_contract_by_hash(hashes.default_aa) + .await?, + }) + } + + async fn fetch_genesis_contract_bytecode( + &self, + address: Address, + ) -> anyhow::Result>>; + + async fn fetch_protocol_version( + &self, + protocol_version: ProtocolVersionId, + ) -> anyhow::Result; + + async fn fetch_genesis_l1_batch_hash(&self) -> anyhow::Result; + + async fn fetch_l2_block_number(&self) -> anyhow::Result; + + async fn fetch_l2_block( + &self, + number: MiniblockNumber, + with_transactions: bool, + ) -> anyhow::Result>; +} + +impl dyn MainNodeClient { + /// Creates a client based on JSON-RPC. + pub fn json_rpc(url: &str) -> anyhow::Result { + HttpClientBuilder::default().build(url).map_err(Into::into) + } +} + +#[async_trait] +impl MainNodeClient for HttpClient { + async fn fetch_system_contract_by_hash( + &self, + hash: H256, + ) -> anyhow::Result { + let bytecode = self.get_bytecode_by_hash(hash).await?.with_context(|| { + format!( + "Base system contract bytecode is absent on the main node. Dependency hash: {hash:?}" + ) + })?; + anyhow::ensure!( + hash == zksync_utils::bytecode::hash_bytecode(&bytecode), + "Got invalid base system contract bytecode from main node" + ); + Ok(SystemContractCode { + code: zksync_utils::bytes_to_be_words(bytecode), + hash, + }) + } + + async fn fetch_genesis_contract_bytecode( + &self, + address: Address, + ) -> anyhow::Result>> { + const GENESIS_BLOCK: api::BlockIdVariant = + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(U64([0]))); + + let code_key = get_code_key(&address); + let code_hash = self + .get_storage_at( + ACCOUNT_CODE_STORAGE_ADDRESS, + zksync_utils::h256_to_u256(*code_key.key()), + Some(GENESIS_BLOCK), + ) + .await + .context("Unable to query storage at genesis state")?; + self.get_bytecode_by_hash(code_hash) + .await + .context("Unable to query system contract bytecode") + } + + async fn fetch_protocol_version( + &self, + protocol_version: ProtocolVersionId, + ) -> anyhow::Result { + Ok(self + .get_protocol_version(Some(protocol_version as u16)) + .await? + .with_context(|| { + format!("Protocol version {protocol_version:?} must exist on main node") + })?) + } + + async fn fetch_genesis_l1_batch_hash(&self) -> anyhow::Result { + let genesis_l1_batch = self + .get_l1_batch_details(L1BatchNumber(0)) + .await + .context("couldn't get genesis block from the main node")? + .context("main node did not return a genesis block")?; + genesis_l1_batch + .base + .root_hash + .context("empty genesis block hash") + } + + async fn fetch_l2_block_number(&self) -> anyhow::Result { + let U64([number]) = self.get_block_number().await?; + Ok(MiniblockNumber(number.try_into()?)) + } + + async fn fetch_l2_block( + &self, + number: MiniblockNumber, + with_transactions: bool, + ) -> anyhow::Result> { + self.sync_l2_block(number, with_transactions) + .await + .map_err(Into::into) + } +} + +/// This is a temporary implementation of a cache layer for the main node HTTP requests. +/// It was introduced to quickly develop a way to fetch data from the main node concurrently, +/// while not changing the logic of the fetcher itself. +/// It is intentionally designed in an "easy-to-inject, easy-to-remove" way, so that we can easily +/// switch it to a more performant implementation later. +/// +/// The main part of this structure's logic is the ability to concurrently populate the cache +/// of responses and then consume them in a non-concurrent way. +/// +/// Note: not every request is guaranteed cached, only the ones that are used to build the action queue. +/// For example, if batch status updater requests a miniblock header long after it was processed by the main +/// fetcher routine, most likely it'll be a cache miss. +#[derive(Debug)] +pub(super) struct CachingMainNodeClient { + client: Box, + /// Earliest miniblock number that is not yet cached. Used as a marker to refill the cache. + next_refill_at: MiniblockNumber, + blocks: HashMap, +} + +impl CachingMainNodeClient { + pub fn new(client: Box) -> Self { + Self { + client, + next_refill_at: MiniblockNumber(0), + blocks: Default::default(), + } + } + + /// Cached version of [`HttpClient::sync_l2_block`]. + pub async fn fetch_l2_block( + &self, + miniblock: MiniblockNumber, + ) -> anyhow::Result> { + let block = self.blocks.get(&miniblock).cloned(); + FETCHER_METRICS.cache_total[&CachedMethod::SyncL2Block].inc(); + match block { + Some(block) => { + FETCHER_METRICS.cache_hit[&CachedMethod::SyncL2Block].inc(); + Ok(Some(block)) + } + None => self.client.fetch_l2_block(miniblock, true).await, + } + } + + /// Re-export of [`MainNodeClient::fetch_l2_block_number()`]. Added to not expose the internal client. + pub async fn fetch_l2_block_number(&self) -> anyhow::Result { + self.client.fetch_l2_block_number().await + } + + /// Removes a miniblock data from the cache. + pub fn forget_miniblock(&mut self, miniblock: MiniblockNumber) { + self.blocks.remove(&miniblock); + } + + pub async fn populate_miniblocks_cache( + &mut self, + current_miniblock: MiniblockNumber, + last_miniblock: MiniblockNumber, + ) { + // This method may be invoked frequently, but in order to take advantage of the concurrent fetching, + // we only need to do it once in a while. If we'll do it too often, we'll end up adding 1 element to + // the cache at a time, which eliminates the cache's purpose. + if current_miniblock < self.next_refill_at { + return; + } + let populate_latency = FETCHER_METRICS.cache_populate.start(); + let last_miniblock_to_fetch = + last_miniblock.min(current_miniblock + MAX_CONCURRENT_REQUESTS as u32); + let task_futures = (current_miniblock.0..last_miniblock_to_fetch.0) + .map(MiniblockNumber) + .filter(|&miniblock| { + // If the miniblock is already in the cache, we don't need to fetch it. + !self.has_miniblock(miniblock) + }) + .map(|block_number| self.client.fetch_l2_block(block_number, true)); + + let results = futures::future::join_all(task_futures).await; + for result in results { + if let Ok(Some(block)) = result { + self.next_refill_at = self.next_refill_at.max(block.number + 1); + self.blocks.insert(block.number, block); + } else { + // At the cache level, it's fine to just silence errors. + // The entry won't be included into the cache, and whoever uses the cache, will have to process + // a cache miss as they will. + FETCHER_METRICS.cache_errors.inc(); + } + } + populate_latency.observe(); + } + + fn has_miniblock(&self, miniblock: MiniblockNumber) -> bool { + self.blocks.contains_key(&miniblock) + } +} diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 188f7bd42339..c451587ff025 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -18,10 +18,7 @@ use zksync_types::{ use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; use super::{ - genesis::{ - fetch_protocol_version, fetch_sync_block_without_transactions, - fetch_system_contract_by_hash, - }, + client::MainNodeClient, sync_action::{ActionQueue, SyncAction}, SyncState, }; @@ -34,7 +31,7 @@ use crate::{ MiniblockParams, PendingBatchData, StateKeeperIO, }, metrics::{KEEPER_METRICS, L1_BATCH_METRICS}, - seal_criteria::SealerFn, + seal_criteria::IoSealCriteria, updates::UpdatesManager, }, }; @@ -42,48 +39,6 @@ use crate::{ /// The interval between the action queue polling attempts for the new actions. const POLL_INTERVAL: Duration = Duration::from_millis(100); -/// In the external node we don't actually decide whether we want to seal l1 batch or l2 block. -/// We must replicate the state as it's present in the main node. -/// This structure declares an "unconditional sealer" which would tell the state keeper to seal -/// blocks/batches at the same point as in the main node. -#[derive(Debug, Clone)] -pub struct ExternalNodeSealer { - actions: ActionQueue, -} - -impl ExternalNodeSealer { - pub fn new(actions: ActionQueue) -> Self { - Self { actions } - } - - fn should_seal_miniblock(&self) -> bool { - let res = matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)); - if res { - tracing::info!("Sealing miniblock"); - } - res - } - - fn should_seal_batch(&self) -> bool { - let res = matches!( - self.actions.peek_action(), - Some(SyncAction::SealBatch { .. }) - ); - if res { - tracing::info!("Sealing the batch"); - } - res - } - - pub fn into_unconditional_batch_seal_criterion(self) -> Box { - Box::new(move |_| self.should_seal_batch()) - } - - pub fn into_miniblock_seal_criterion(self) -> Box { - Box::new(move |_| self.should_seal_miniblock()) - } -} - /// ExternalIO is the IO abstraction for the state keeper that is used in the external node. /// It receives a sequence of actions from the fetcher via the action queue and propagates it /// into the state keeper. @@ -98,7 +53,7 @@ pub struct ExternalIO { current_miniblock_number: MiniblockNumber, actions: ActionQueue, sync_state: SyncState, - main_node_url: String, + main_node_client: Box, /// Required to extract newly added tokens. l2_erc20_bridge_addr: Address, @@ -112,7 +67,7 @@ impl ExternalIO { pool: ConnectionPool, actions: ActionQueue, sync_state: SyncState, - main_node_url: String, + main_node_client: Box, l2_erc20_bridge_addr: Address, validation_computational_gas_limit: u32, chain_id: L2ChainId, @@ -144,7 +99,7 @@ impl ExternalIO { current_miniblock_number: last_miniblock_number + 1, actions, sync_state, - main_node_url, + main_node_client, l2_erc20_bridge_addr, validation_computational_gas_limit, chain_id, @@ -236,7 +191,9 @@ impl ExternalIO { match base_system_contracts { Some(version) => version, None => { - let protocol_version = fetch_protocol_version(&self.main_node_url, id) + let protocol_version = self + .main_node_client + .fetch_protocol_version(id) .await .expect("Failed to fetch protocol version from the main node"); self.pool @@ -258,11 +215,9 @@ impl ExternalIO { let bootloader = self .get_base_system_contract(protocol_version.base_system_contracts.bootloader) .await; - let default_aa = self .get_base_system_contract(protocol_version.base_system_contracts.default_aa) .await; - BaseSystemContracts { bootloader, default_aa, @@ -287,9 +242,10 @@ impl ExternalIO { hash, }, None => { - let main_node_url = self.main_node_url.clone(); tracing::info!("Fetching base system contract bytecode from the main node"); - let contract = fetch_system_contract_by_hash(&main_node_url, hash) + let contract = self + .main_node_client + .fetch_system_contract_by_hash(hash) .await .expect("Failed to fetch base system contract bytecode from the main node"); self.pool @@ -308,6 +264,19 @@ impl ExternalIO { } } +impl IoSealCriteria for ExternalIO { + fn should_seal_l1_batch_unconditionally(&mut self, _manager: &UpdatesManager) -> bool { + matches!( + self.actions.peek_action(), + Some(SyncAction::SealBatch { .. }) + ) + } + + fn should_seal_miniblock(&mut self, _manager: &UpdatesManager) -> bool { + matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)) + } +} + #[async_trait] impl StateKeeperIO for ExternalIO { fn current_l1_batch_number(&self) -> L1BatchNumber { @@ -350,14 +319,13 @@ impl StateKeeperIO for ExternalIO { .unwrap()?; if pending_miniblock_header.protocol_version.is_none() { - // Fetch protocol version ID for pending miniblocks to know which VM to use to reexecute them. - let sync_block = fetch_sync_block_without_transactions( - &self.main_node_url, - pending_miniblock_header.number, - ) - .await - .expect("Failed to fetch block from the main node") - .expect("Block must exist"); + // Fetch protocol version ID for pending miniblocks to know which VM to use to re-execute them. + let sync_block = self + .main_node_client + .fetch_l2_block(pending_miniblock_header.number, false) + .await + .expect("Failed to fetch block from the main node") + .expect("Block must exist"); // Loading base system contracts will insert protocol version in the database if it's not present there. let _ = self .load_base_system_contracts_by_version_id(sync_block.protocol_version) @@ -440,7 +408,7 @@ impl StateKeeperIO for ExternalIO { _prev_miniblock_timestamp: u64, ) -> Option { // Wait for the next miniblock to appear in the queue. - let actions = &self.actions; + let actions = &mut self.actions; for _ in 0..poll_iters(POLL_INTERVAL, max_wait) { match actions.peek_action() { Some(SyncAction::Miniblock { @@ -484,7 +452,7 @@ impl StateKeeperIO for ExternalIO { } async fn wait_for_next_tx(&mut self, max_wait: Duration) -> Option { - let actions = &self.actions; + let actions = &mut self.actions; tracing::debug!( "Waiting for the new tx, next action is {:?}", actions.peek_action() @@ -538,10 +506,9 @@ impl StateKeeperIO for ExternalIO { let store_latency = L1_BATCH_METRICS.start_storing_on_en(); // We don't store the transactions in the database until they're executed to not overcomplicate the state // recovery on restart. So we have to store them here. - for tx in updates_manager.miniblock.executed_transactions.iter() { + for tx in &updates_manager.miniblock.executed_transactions { if let Ok(l1_tx) = L1Tx::try_from(tx.transaction.clone()) { let l1_block_number = L1BlockNumber(l1_tx.common_data.eth_block as u32); - transaction .transactions_dal() .insert_transaction_l1(l1_tx, l1_block_number) diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index d33b77c33172..02d8d3b11372 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -1,15 +1,16 @@ +use anyhow::Context as _; use tokio::sync::watch; use std::time::Duration; -use zksync_dal::ConnectionPool; +use zksync_dal::StorageProcessor; use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; -use zksync_web3_decl::{jsonrpsee::core::Error as RpcError, RpcResult}; +use zksync_web3_decl::jsonrpsee::core::Error as RpcError; use super::{ - cached_main_node_client::CachedMainNodeClient, + client::{CachingMainNodeClient, MainNodeClient}, metrics::{FetchStage, L1BatchStage, FETCHER_METRICS}, - sync_action::{ActionQueue, SyncAction}, + sync_action::{ActionQueueSender, SyncAction}, SyncState, }; use crate::metrics::{TxStage, APP_METRICS}; @@ -17,45 +18,40 @@ use crate::metrics::{TxStage, APP_METRICS}; const DELAY_INTERVAL: Duration = Duration::from_millis(500); const RETRY_DELAY_INTERVAL: Duration = Duration::from_secs(5); -/// Structure responsible for fetching batches and miniblock data from the main node. +/// Cursor of [`MainNodeFetcher`]. #[derive(Debug)] -pub struct MainNodeFetcher { - client: CachedMainNodeClient, - current_l1_batch: L1BatchNumber, - current_miniblock: MiniblockNumber, - actions: ActionQueue, - sync_state: SyncState, - stop_receiver: watch::Receiver, +pub struct MainNodeFetcherCursor { + // Fields are public for testing purposes. + pub(super) miniblock: MiniblockNumber, + pub(super) l1_batch: L1BatchNumber, } -impl MainNodeFetcher { - pub async fn new( - pool: ConnectionPool, - main_node_url: &str, - actions: ActionQueue, - sync_state: SyncState, - stop_receiver: watch::Receiver, - ) -> Self { - let mut storage = pool.access_storage_tagged("sync_layer").await.unwrap(); +impl MainNodeFetcherCursor { + /// Loads the cursor + pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { let last_sealed_l1_batch_header = storage .blocks_dal() .get_newest_l1_batch_header() .await - .unwrap(); + .context("Failed getting newest L1 batch header")?; let last_miniblock_number = storage .blocks_dal() .get_sealed_miniblock_number() .await - .unwrap(); + .context("Failed getting sealed miniblock number")?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. - let was_new_batch_open = storage.blocks_dal().pending_batch_exists().await.unwrap(); + let was_new_batch_open = storage + .blocks_dal() + .pending_batch_exists() + .await + .context("Failed checking whether pending L1 batch exists")?; // Miniblocks are always fully processed. - let current_miniblock = last_miniblock_number + 1; + let miniblock = last_miniblock_number + 1; // Decide whether the next batch should be explicitly opened or not. - let current_l1_batch = if was_new_batch_open { + let l1_batch = if was_new_batch_open { // No `OpenBatch` action needed. last_sealed_l1_batch_header.number + 1 } else { @@ -63,23 +59,46 @@ impl MainNodeFetcher { last_sealed_l1_batch_header.number }; - let client = CachedMainNodeClient::build_client(main_node_url); + Ok(Self { + miniblock, + l1_batch, + }) + } - Self { - client, - current_l1_batch, - current_miniblock, + /// Builds a fetcher from this cursor. + pub fn into_fetcher( + self, + client: Box, + actions: ActionQueueSender, + sync_state: SyncState, + stop_receiver: watch::Receiver, + ) -> MainNodeFetcher { + MainNodeFetcher { + client: CachingMainNodeClient::new(client), + cursor: self, actions, sync_state, stop_receiver, } } +} +/// Structure responsible for fetching batches and miniblock data from the main node. +#[derive(Debug)] +pub struct MainNodeFetcher { + client: CachingMainNodeClient, + cursor: MainNodeFetcherCursor, + actions: ActionQueueSender, + sync_state: SyncState, + stop_receiver: watch::Receiver, +} + +impl MainNodeFetcher { pub async fn run(mut self) -> anyhow::Result<()> { tracing::info!( "Starting the fetcher routine. Initial miniblock: {}, initial l1 batch: {}", - self.current_miniblock, - self.current_l1_batch + self.cursor.miniblock, + self.cursor.l1_batch ); // Run the main routine and reconnect upon the network errors. loop { @@ -88,13 +107,16 @@ impl MainNodeFetcher { tracing::info!("Stop signal received, exiting the fetcher routine"); return Ok(()); } - Err(err @ RpcError::Transport(_) | err @ RpcError::RequestTimeout) => { - tracing::warn!("Following transport error occurred: {}", err); - tracing::info!("Trying again after a delay"); - tokio::time::sleep(RETRY_DELAY_INTERVAL).await; // TODO (BFT-100): Implement the fibonacci backoff. - } Err(err) => { - anyhow::bail!("Unexpected error in the fetcher: {}", err); + if let Some(err @ RpcError::Transport(_) | err @ RpcError::RequestTimeout) = + err.downcast_ref::() + { + tracing::warn!("Following transport error occurred: {err}"); + tracing::info!("Trying again after a delay"); + tokio::time::sleep(RETRY_DELAY_INTERVAL).await; // TODO (BFT-100): Implement the fibonacci backoff. + } else { + return Err(err.context("Unexpected error in the fetcher")); + } } } } @@ -104,20 +126,18 @@ impl MainNodeFetcher { *self.stop_receiver.borrow() } - async fn run_inner(&mut self) -> RpcResult<()> { + async fn run_inner(&mut self) -> anyhow::Result<()> { loop { if self.check_if_cancelled() { return Ok(()); } let mut progressed = false; - - let last_main_node_block = - MiniblockNumber(self.client.get_block_number().await?.as_u32()); + let last_main_node_block = self.client.fetch_l2_block_number().await?; self.sync_state.set_main_node_block(last_main_node_block); self.client - .populate_miniblocks_cache(self.current_miniblock, last_main_node_block) + .populate_miniblocks_cache(self.cursor.miniblock, last_main_node_block) .await; let has_action_capacity = self.actions.has_action_capacity(); if has_action_capacity { @@ -139,26 +159,26 @@ impl MainNodeFetcher { /// Tries to fetch the next miniblock and insert it to the sync queue. /// Returns `true` if a miniblock was processed and `false` otherwise. - async fn fetch_next_miniblock(&mut self) -> RpcResult { + async fn fetch_next_miniblock(&mut self) -> anyhow::Result { let total_latency = FETCHER_METRICS.fetch_next_miniblock.start(); let request_latency = FETCHER_METRICS.requests[&FetchStage::SyncL2Block].start(); - let Some(block) = self.client.sync_l2_block(self.current_miniblock).await? else { + let Some(block) = self.client.fetch_l2_block(self.cursor.miniblock).await? else { return Ok(false); }; // This will be fetched from cache. let prev_block = self .client - .sync_l2_block(self.current_miniblock - 1) + .fetch_l2_block(self.cursor.miniblock - 1) .await? .expect("Previous block must exist"); request_latency.observe(); let mut new_actions = Vec::new(); - if block.l1_batch_number != self.current_l1_batch { + if block.l1_batch_number != self.cursor.l1_batch { assert_eq!( block.l1_batch_number, - self.current_l1_batch.next(), + self.cursor.l1_batch.next(), "Unexpected batch number in the next received miniblock" ); @@ -181,7 +201,7 @@ impl MainNodeFetcher { prev_miniblock_hash: prev_block.hash.unwrap_or_else(H256::zero), }); FETCHER_METRICS.l1_batch[&L1BatchStage::Open].set(block.l1_batch_number.0.into()); - self.current_l1_batch += 1; + self.cursor.l1_batch += 1; } else { // New batch implicitly means a new miniblock, so we only need to push the miniblock action // if it's not a new batch. @@ -217,10 +237,10 @@ impl MainNodeFetcher { self.sync_state.get_main_node_block().max(block.number) ); // Forgetting only the previous one because we still need the current one in cache for the next iteration. - self.client - .forget_miniblock(MiniblockNumber(self.current_miniblock.0.saturating_sub(1))); - self.current_miniblock += 1; - self.actions.push_actions(new_actions); + let prev_miniblock_number = MiniblockNumber(self.cursor.miniblock.0.saturating_sub(1)); + self.client.forget_miniblock(prev_miniblock_number); + self.cursor.miniblock += 1; + self.actions.push_actions(new_actions).await; total_latency.observe(); Ok(true) diff --git a/core/lib/zksync_core/src/sync_layer/genesis.rs b/core/lib/zksync_core/src/sync_layer/genesis.rs index 5bcf67a2c66d..4f7501fb0c37 100644 --- a/core/lib/zksync_core/src/sync_layer/genesis.rs +++ b/core/lib/zksync_core/src/sync_layer/genesis.rs @@ -1,29 +1,25 @@ -use crate::genesis::{ensure_genesis_state, GenesisParams}; +use anyhow::Context as _; -use anyhow::Context; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::StorageProcessor; use zksync_types::{ - api, block::DeployedContract, get_code_key, protocol_version::L1VerifierConfig, + block::DeployedContract, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, AccountTreeId, Address, L1BatchNumber, L2ChainId, - MiniblockNumber, ProtocolVersionId, ACCOUNT_CODE_STORAGE_ADDRESS, H256, U64, -}; -use zksync_utils::h256_to_u256; -use zksync_web3_decl::{ - jsonrpsee::{core::error::Error, http_client::HttpClientBuilder}, - namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, + H256, }; +use super::client::MainNodeClient; +use crate::genesis::{ensure_genesis_state, GenesisParams}; + pub async fn perform_genesis_if_needed( storage: &mut StorageProcessor<'_>, zksync_chain_id: L2ChainId, - main_node_url: String, + client: &dyn MainNodeClient, ) -> anyhow::Result<()> { - let mut transaction = storage.start_transaction().await.unwrap(); + let mut transaction = storage.start_transaction().await?; // We want to check whether the genesis is needed before we create genesis params to not // make the node startup slower. - let genesis_block_hash = if transaction.blocks_dal().is_genesis_needed().await.unwrap() { - let genesis_params = create_genesis_params(&main_node_url).await?; + let genesis_block_hash = if transaction.blocks_dal().is_genesis_needed().await? { + let genesis_params = create_genesis_params(client).await?; ensure_genesis_state(&mut transaction, zksync_chain_id, &genesis_params) .await .context("ensure_genesis_state")? @@ -31,21 +27,23 @@ pub async fn perform_genesis_if_needed( transaction .blocks_dal() .get_l1_batch_state_root(L1BatchNumber(0)) - .await - .unwrap() + .await? .context("genesis block hash is empty")? }; - validate_genesis_state(&main_node_url, genesis_block_hash).await; - transaction.commit().await.unwrap(); - + validate_genesis_state(client, genesis_block_hash).await?; + transaction.commit().await?; Ok(()) } -async fn create_genesis_params(main_node_url: &str) -> anyhow::Result { - let base_system_contracts_hashes = fetch_genesis_system_contracts(main_node_url) - .await - .context("Unable to fetch genesis system contracts hashes")?; +async fn create_genesis_params(client: &dyn MainNodeClient) -> anyhow::Result { + let genesis_miniblock = client + .fetch_l2_block(zksync_types::MiniblockNumber(0), false) + .await? + .context("No genesis block on the main node")?; + let first_validator = genesis_miniblock.operator_address; + let base_system_contracts_hashes = genesis_miniblock.base_system_contracts_hashes; + let protocol_version = genesis_miniblock.protocol_version; // Load the list of addresses that are known to contain system contracts at any point in time. // Not every of these addresses is guaranteed to be present in the genesis state, but we'll iterate through @@ -57,18 +55,10 @@ async fn create_genesis_params(main_node_url: &str) -> anyhow::Result anyhow::Result = Vec::with_capacity(system_contract_addresses.len()); - const GENESIS_BLOCK: api::BlockIdVariant = - api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(U64([0]))); + for system_contract_address in system_contract_addresses { - let code_key = get_code_key(&system_contract_address); - let code_hash = client - .get_storage_at( - ACCOUNT_CODE_STORAGE_ADDRESS, - h256_to_u256(*code_key.key()), - Some(GENESIS_BLOCK), - ) - .await - .context("Unable to query storage at genesis state")?; let Some(bytecode) = client - .get_bytecode_by_hash(code_hash) - .await - .context("Unable to query system contract bytecode")? + .fetch_genesis_contract_bytecode(system_contract_address) + .await? else { // It's OK for some of contracts to be absent. // If this is a bug, the genesis root hash won't match. @@ -107,12 +86,6 @@ async fn create_genesis_params(main_node_url: &str) -> anyhow::Result anyhow::Result Result { - let client = HttpClientBuilder::default().build(main_node_url).unwrap(); - let hashes = client - .sync_l2_block(zksync_types::MiniblockNumber(0), false) - .await? - .expect("No genesis block on the main node") - .base_system_contracts_hashes; - Ok(hashes) -} - -pub async fn fetch_system_contract_by_hash( - main_node_url: &str, - hash: H256, -) -> Result { - let client = HttpClientBuilder::default().build(main_node_url).unwrap(); - let bytecode = client.get_bytecode_by_hash(hash).await?.unwrap_or_else(|| { - panic!( - "Base system contract bytecode is absent on the main node. Dependency hash: {:?}", - hash - ) - }); - assert_eq!( - hash, - zksync_utils::bytecode::hash_bytecode(&bytecode), - "Got invalid base system contract bytecode from main node" +async fn validate_genesis_state( + client: &dyn MainNodeClient, + root_hash: H256, +) -> anyhow::Result<()> { + let genesis_l1_batch_hash = client.fetch_genesis_l1_batch_hash().await?; + anyhow::ensure!( + genesis_l1_batch_hash == root_hash, + "Genesis L1 batch root hash mismatch with main node: expected {root_hash}, got {genesis_l1_batch_hash}" ); - Ok(SystemContractCode { - code: zksync_utils::bytes_to_be_words(bytecode), - hash, - }) -} - -pub async fn fetch_base_system_contracts( - main_node_url: &str, - hashes: BaseSystemContractsHashes, -) -> Result { - Ok(BaseSystemContracts { - bootloader: fetch_system_contract_by_hash(main_node_url, hashes.bootloader).await?, - default_aa: fetch_system_contract_by_hash(main_node_url, hashes.default_aa).await?, - }) -} - -pub async fn fetch_protocol_version( - main_node_url: &str, - protocol_version: ProtocolVersionId, -) -> Result { - let client = HttpClientBuilder::default().build(main_node_url).unwrap(); - - Ok(client - .get_protocol_version(Some(protocol_version as u16)) - .await? - .expect("Protocol version must exist")) -} - -pub async fn fetch_sync_block_without_transactions( - main_node_url: &str, - miniblock_number: MiniblockNumber, -) -> Result, Error> { - let client = HttpClientBuilder::default().build(main_node_url).unwrap(); - client.sync_l2_block(miniblock_number, false).await + Ok(()) } diff --git a/core/lib/zksync_core/src/sync_layer/metrics.rs b/core/lib/zksync_core/src/sync_layer/metrics.rs index 2548ebd80be1..c3082c51052d 100644 --- a/core/lib/zksync_core/src/sync_layer/metrics.rs +++ b/core/lib/zksync_core/src/sync_layer/metrics.rs @@ -49,19 +49,10 @@ pub(super) struct FetcherMetrics { #[vise::register] pub(super) static FETCHER_METRICS: vise::Global = vise::Global::new(); -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "action", rename_all = "snake_case")] -pub(super) enum LockAction { - AcquireRead, - AcquireWrite, -} - #[derive(Debug, Metrics)] #[metrics(prefix = "external_node_action_queue")] pub(super) struct ActionQueueMetrics { pub action_queue_size: Gauge, - #[metrics(buckets = Buckets::LATENCIES)] - pub lock: Family>, } #[vise::register] diff --git a/core/lib/zksync_core/src/sync_layer/mod.rs b/core/lib/zksync_core/src/sync_layer/mod.rs index 3e9fc028e7f1..e216ef4f8c55 100644 --- a/core/lib/zksync_core/src/sync_layer/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/mod.rs @@ -1,14 +1,15 @@ pub mod batch_status_updater; -mod cached_main_node_client; +mod client; pub mod external_io; pub mod fetcher; pub mod genesis; mod metrics; pub(crate) mod sync_action; mod sync_state; +#[cfg(test)] +mod tests; pub use self::{ - external_io::{ExternalIO, ExternalNodeSealer}, - sync_action::ActionQueue, + client::MainNodeClient, external_io::ExternalIO, sync_action::ActionQueue, sync_state::SyncState, }; diff --git a/core/lib/zksync_core/src/sync_layer/sync_action.rs b/core/lib/zksync_core/src/sync_layer/sync_action.rs index 78a6c84ce887..977d03dd5329 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_action.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_action.rs @@ -1,51 +1,15 @@ -use std::{ - collections::VecDeque, - sync::{Arc, RwLock}, -}; +use tokio::sync::mpsc; use zksync_types::{Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, H256}; -use super::metrics::{LockAction, QUEUE_METRICS}; +use super::metrics::QUEUE_METRICS; -/// Action queue is used to communicate between the fetcher and the rest of the external node -/// by collecting the fetched data in memory until it gets processed by the different entities. -/// -/// TODO (BFT-82): This structure right now expects no more than a single consumer. Using `peek/pop` pairs in -/// two different threads may lead to a race condition. -#[derive(Debug, Clone, Default)] -pub struct ActionQueue { - inner: Arc>, -} - -impl ActionQueue { - pub fn new() -> Self { - Self::default() - } +#[derive(Debug)] +pub struct ActionQueueSender(mpsc::Sender); - /// Removes the first action from the queue. - pub(crate) fn pop_action(&self) -> Option { - self.write_lock().actions.pop_front().map(|action| { - QUEUE_METRICS.action_queue_size.dec_by(1); - action - }) - } - - /// Returns the first action from the queue without removing it. - pub(crate) fn peek_action(&self) -> Option { - self.read_lock().actions.front().cloned() - } - - /// Returns true if the queue has capacity for a new action. - /// Capacity is limited to avoid memory exhaustion. +impl ActionQueueSender { pub(crate) fn has_action_capacity(&self) -> bool { - const ACTION_CAPACITY: usize = 32_768; // TODO: Make it configurable. - - // Since the capacity is read before the action is pushed, - // it is possible that the capacity will be exceeded, since the fetcher will - // decompose received data into a sequence of actions. - // This is not a problem, since the size of decomposed action is much smaller - // than the configured capacity. - self.read_lock().actions.len() < ACTION_CAPACITY + self.0.capacity() > 0 } /// Pushes a set of actions to the queue. @@ -53,12 +17,14 @@ impl ActionQueue { /// Requires that the actions are in the correct order: starts with a new open batch/miniblock, /// followed by 0 or more transactions, have mandatory `SealMiniblock` and optional `SealBatch` at the end. /// Would panic if the order is incorrect. - pub(crate) fn push_actions(&self, actions: Vec) { - // We need to enforce the ordering of actions to make sure that they can be processed. - Self::check_action_sequence(&actions).expect("Invalid sequence of actions."); - QUEUE_METRICS.action_queue_size.inc_by(actions.len()); - - self.write_lock().actions.extend(actions); + pub(crate) async fn push_actions(&self, actions: Vec) { + Self::check_action_sequence(&actions).unwrap(); + for action in actions { + self.0.send(action).await.expect("EN sync logic panicked"); + QUEUE_METRICS + .action_queue_size + .set(self.0.max_capacity() - self.0.capacity()); + } } /// Checks whether the action sequence is valid. @@ -99,25 +65,50 @@ impl ActionQueue { } Ok(()) } +} + +/// Action queue is used to communicate between the fetcher and the rest of the external node +/// by collecting the fetched data in memory until it gets processed by the different entities. +#[derive(Debug)] +pub struct ActionQueue { + receiver: mpsc::Receiver, + peeked: Option, +} + +impl ActionQueue { + pub fn new() -> (ActionQueueSender, Self) { + const ACTION_CAPACITY: usize = 32_768; // TODO: Make it configurable. - fn read_lock(&self) -> std::sync::RwLockReadGuard<'_, ActionQueueInner> { - let latency = QUEUE_METRICS.lock[&LockAction::AcquireRead].start(); - let lock = self.inner.read().unwrap(); - latency.observe(); - lock + let (sender, receiver) = mpsc::channel(ACTION_CAPACITY); + let sender = ActionQueueSender(sender); + let this = Self { + receiver, + peeked: None, + }; + (sender, this) } - fn write_lock(&self) -> std::sync::RwLockWriteGuard<'_, ActionQueueInner> { - let latency = QUEUE_METRICS.lock[&LockAction::AcquireWrite].start(); - let lock = self.inner.write().unwrap(); - latency.observe(); - lock + /// Removes the first action from the queue. + pub(crate) fn pop_action(&mut self) -> Option { + if let Some(peeked) = self.peeked.take() { + QUEUE_METRICS.action_queue_size.dec_by(1); + return Some(peeked); + } + let action = self.receiver.try_recv().ok(); + if action.is_some() { + QUEUE_METRICS.action_queue_size.dec_by(1); + } + action } -} -#[derive(Debug, Default)] -struct ActionQueueInner { - actions: VecDeque, + /// Returns the first action from the queue without removing it. + pub(crate) fn peek_action(&mut self) -> Option { + if let Some(action) = &self.peeked { + return Some(action.clone()); + } + self.peeked = self.receiver.try_recv().ok(); + self.peeked.clone() + } } /// An instruction for the ExternalIO to request a certain action from the state keeper. @@ -223,7 +214,7 @@ mod tests { vec![miniblock(), tx(), seal_batch()], ]; for (idx, sequence) in test_vector.into_iter().enumerate() { - ActionQueue::check_action_sequence(&sequence) + ActionQueueSender::check_action_sequence(&sequence) .unwrap_or_else(|_| panic!("Valid sequence #{} failed", idx)); } } @@ -271,7 +262,7 @@ mod tests { (vec![seal_batch()], "Unexpected SealMiniblock/SealBatch"), ]; for (idx, (sequence, expected_err)) in test_vector.into_iter().enumerate() { - let Err(err) = ActionQueue::check_action_sequence(&sequence) else { + let Err(err) = ActionQueueSender::check_action_sequence(&sequence) else { panic!( "Invalid sequence passed the test. Sequence #{}, expected error: {}", idx, expected_err diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs new file mode 100644 index 000000000000..02b68d7a8ed9 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -0,0 +1,650 @@ +//! High-level sync layer tests. + +use async_trait::async_trait; +use tokio::{sync::watch, task::JoinHandle}; + +use std::{ + collections::{HashMap, VecDeque}, + iter, + time::{Duration, Instant}, +}; + +use db_test_macro::db_test; +use zksync_config::configs::chain::NetworkConfig; +use zksync_contracts::{BaseSystemContractsHashes, SystemContractCode}; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_types::{ + api, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, +}; + +use super::{ + fetcher::MainNodeFetcherCursor, + sync_action::{ActionQueueSender, SyncAction}, + *, +}; +use crate::{ + api_server::web3::tests::spawn_http_server, + genesis::{ensure_genesis_state, GenesisParams}, + state_keeper::{ + tests::{create_l1_batch_metadata, create_l2_transaction, TestBatchExecutorBuilder}, + ZkSyncStateKeeper, + }, +}; + +const TEST_TIMEOUT: Duration = Duration::from_secs(10); +const POLL_INTERVAL: Duration = Duration::from_millis(50); + +#[derive(Debug, Default)] +struct MockMainNodeClient { + l2_blocks: Vec, +} + +impl MockMainNodeClient { + /// `miniblock_count` doesn't include a fictive miniblock. Returns hashes of generated transactions. + fn push_l1_batch(&mut self, miniblock_count: u32) -> Vec { + let l1_batch_number = self + .l2_blocks + .last() + .map_or(L1BatchNumber(0), |block| block.l1_batch_number + 1); + let number_offset = self.l2_blocks.len() as u32; + + let mut tx_hashes = vec![]; + let l2_blocks = (0..=miniblock_count).map(|number| { + let is_fictive = number == miniblock_count; + let transactions = if is_fictive { + vec![] + } else { + let transaction = create_l2_transaction(10, 100); + tx_hashes.push(transaction.hash()); + vec![transaction.into()] + }; + let number = number + number_offset; + + api::en::SyncBlock { + number: MiniblockNumber(number), + l1_batch_number, + last_in_batch: is_fictive, + timestamp: number.into(), + root_hash: Some(H256::repeat_byte(1)), + l1_gas_price: 2, + l2_fair_gas_price: 3, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + operator_address: Address::repeat_byte(2), + transactions: Some(transactions), + virtual_blocks: Some(!is_fictive as u32), + hash: Some(H256::repeat_byte(1)), + protocol_version: ProtocolVersionId::latest(), + } + }); + + self.l2_blocks.extend(l2_blocks); + tx_hashes + } +} + +#[async_trait] +impl MainNodeClient for MockMainNodeClient { + async fn fetch_system_contract_by_hash( + &self, + _hash: H256, + ) -> anyhow::Result { + anyhow::bail!("Not implemented"); + } + + async fn fetch_genesis_contract_bytecode( + &self, + _address: Address, + ) -> anyhow::Result>> { + anyhow::bail!("Not implemented"); + } + + async fn fetch_protocol_version( + &self, + _protocol_version: ProtocolVersionId, + ) -> anyhow::Result { + anyhow::bail!("Not implemented"); + } + + async fn fetch_genesis_l1_batch_hash(&self) -> anyhow::Result { + anyhow::bail!("Not implemented"); + } + + async fn fetch_l2_block_number(&self) -> anyhow::Result { + if let Some(number) = self.l2_blocks.len().checked_sub(1) { + Ok(MiniblockNumber(number as u32)) + } else { + anyhow::bail!("Not implemented"); + } + } + + async fn fetch_l2_block( + &self, + number: MiniblockNumber, + with_transactions: bool, + ) -> anyhow::Result> { + let Some(mut block) = self.l2_blocks.get(number.0 as usize).cloned() else { + return Ok(None); + }; + if !with_transactions { + block.transactions = None; + } + Ok(Some(block)) + } +} + +fn open_l1_batch(number: u32, timestamp: u64, first_miniblock_number: u32) -> SyncAction { + SyncAction::OpenBatch { + number: L1BatchNumber(number), + timestamp, + l1_gas_price: 2, + l2_fair_gas_price: 3, + operator_address: Default::default(), + protocol_version: ProtocolVersionId::latest(), + first_miniblock_info: (MiniblockNumber(first_miniblock_number), 1), + prev_miniblock_hash: H256::default(), + } +} + +#[derive(Debug)] +struct StateKeeperHandles { + actions_sender: ActionQueueSender, + stop_sender: watch::Sender, + sync_state: SyncState, + task: JoinHandle>, +} + +impl StateKeeperHandles { + async fn wait(self, mut condition: impl FnMut(&SyncState) -> bool) { + let started_at = Instant::now(); + loop { + assert!( + started_at.elapsed() <= TEST_TIMEOUT, + "Timed out waiting for miniblock to be sealed" + ); + if self.task.is_finished() { + match self.task.await { + Err(err) => panic!("State keeper panicked: {}", err), + Ok(Err(err)) => panic!("State keeper finished with an error: {}", err), + Ok(Ok(())) => unreachable!(), + } + } + if condition(&self.sync_state) { + break; + } + tokio::time::sleep(POLL_INTERVAL).await; + } + + self.stop_sender.send_replace(true); + self.task.await.unwrap().unwrap(); + } +} + +async fn ensure_genesis(storage: &mut StorageProcessor<'_>) { + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state(storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + } +} + +/// `tx_hashes` are grouped by the L1 batch. +async fn run_state_keeper(pool: ConnectionPool, tx_hashes: &[&[H256]]) -> StateKeeperHandles { + assert!(!tx_hashes.is_empty()); + assert!(tx_hashes.iter().all(|tx_hashes| !tx_hashes.is_empty())); + + ensure_genesis(&mut pool.access_storage().await.unwrap()).await; + + let (actions_sender, actions) = ActionQueue::new(); + let sync_state = SyncState::new(); + let io = ExternalIO::new( + pool, + actions, + sync_state.clone(), + Box::::default(), + Address::repeat_byte(1), + u32::MAX, + L2ChainId::default(), + ) + .await; + + let (stop_sender, stop_receiver) = watch::channel(false); + let mut batch_executor_base = TestBatchExecutorBuilder::default(); + for &tx_hashes_in_l1_batch in tx_hashes { + batch_executor_base.push_successful_transactions(tx_hashes_in_l1_batch); + } + + let state_keeper = ZkSyncStateKeeper::without_sealer( + stop_receiver, + Box::new(io), + Box::new(batch_executor_base), + ); + StateKeeperHandles { + actions_sender, + stop_sender, + sync_state, + task: tokio::spawn(state_keeper.run()), + } +} + +fn extract_tx_hashes<'a>(actions: impl IntoIterator) -> Vec { + actions + .into_iter() + .filter_map(|action| { + if let SyncAction::Tx(tx) = action { + Some(tx.hash()) + } else { + None + } + }) + .collect() +} + +#[db_test] +async fn external_io_basics(pool: ConnectionPool) { + let open_l1_batch = open_l1_batch(1, 1, 1); + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + let tx = SyncAction::Tx(Box::new(tx.into())); + let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock]; + + let state_keeper = run_state_keeper(pool.clone(), &[&extract_tx_hashes(&actions)]).await; + state_keeper.actions_sender.push_actions(actions).await; + // Wait until the miniblock is sealed. + state_keeper + .wait(|state| state.get_local_block() == MiniblockNumber(1)) + .await; + + // Check that the miniblock is persisted. + let mut storage = pool.access_storage().await.unwrap(); + let miniblock = storage + .blocks_dal() + .get_miniblock_header(MiniblockNumber(1)) + .await + .unwrap() + .expect("Miniblock #1 is not persisted"); + assert_eq!(miniblock.timestamp, 1); + assert_eq!(miniblock.l1_gas_price, 2); + assert_eq!(miniblock.l2_fair_gas_price, 3); + assert_eq!(miniblock.l1_tx_count, 0); + assert_eq!(miniblock.l2_tx_count, 1); + + let tx_receipt = storage + .transactions_web3_dal() + .get_transaction_receipt(tx_hash) + .await + .unwrap() + .expect("Transaction not persisted"); + assert_eq!(tx_receipt.block_number, Some(1.into())); + assert_eq!(tx_receipt.transaction_index, 0.into()); +} + +async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec { + let open_l1_batch = open_l1_batch(1, 1, 1); + let txs = (0..5).map(|_| { + let tx = create_l2_transaction(10, 100); + SyncAction::Tx(Box::new(tx.into())) + }); + let first_miniblock_actions: Vec<_> = iter::once(open_l1_batch) + .chain(txs) + .chain([SyncAction::SealMiniblock]) + .collect(); + + let open_miniblock = SyncAction::Miniblock { + number: MiniblockNumber(2), + timestamp: 2, + virtual_blocks: 1, + }; + let more_txs = (0..3).map(|_| { + let tx = create_l2_transaction(10, 100); + SyncAction::Tx(Box::new(tx.into())) + }); + let second_miniblock_actions: Vec<_> = iter::once(open_miniblock) + .chain(more_txs) + .chain([SyncAction::SealMiniblock]) + .collect(); + + let tx_hashes = extract_tx_hashes( + first_miniblock_actions + .iter() + .chain(&second_miniblock_actions), + ); + let state_keeper = run_state_keeper(pool, &[&tx_hashes]).await; + state_keeper + .actions_sender + .push_actions(first_miniblock_actions) + .await; + state_keeper + .actions_sender + .push_actions(second_miniblock_actions) + .await; + // Wait until both miniblocks are sealed. + state_keeper + .wait(|state| state.get_local_block() == MiniblockNumber(2)) + .await; + tx_hashes +} + +#[db_test] +async fn external_io_with_multiple_miniblocks(pool: ConnectionPool) { + let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + assert_eq!(tx_hashes.len(), 8); + + // Check that both miniblocks are persisted. + let tx_hashes_by_miniblock = [(1, &tx_hashes[..5]), (2, &tx_hashes[5..])]; + let mut storage = pool.access_storage().await.unwrap(); + for (number, expected_tx_hashes) in tx_hashes_by_miniblock { + let miniblock = storage + .blocks_dal() + .get_miniblock_header(MiniblockNumber(number)) + .await + .unwrap() + .unwrap_or_else(|| panic!("Miniblock #{} is not persisted", number)); + assert_eq!(miniblock.l2_tx_count, expected_tx_hashes.len() as u16); + assert_eq!(miniblock.timestamp, u64::from(number)); + + let sync_block = storage + .sync_dal() + .sync_block(MiniblockNumber(number), Address::repeat_byte(1), true) + .await + .unwrap() + .unwrap_or_else(|| panic!("Sync block #{} is not persisted", number)); + + let transactions = sync_block.transactions.unwrap(); + assert_eq!(transactions.len(), expected_tx_hashes.len()); + let tx_hashes: Vec<_> = transactions.iter().map(Transaction::hash).collect(); + assert_eq!(tx_hashes, expected_tx_hashes); + } + drop(storage); + + test_external_io_recovery(pool, tx_hashes).await; +} + +async fn test_external_io_recovery(pool: ConnectionPool, mut tx_hashes: Vec) { + let new_tx = create_l2_transaction(10, 100); + tx_hashes.push(new_tx.hash()); + let new_tx = SyncAction::Tx(Box::new(new_tx.into())); + + let state_keeper = run_state_keeper(pool.clone(), &[&tx_hashes]).await; + // Check that the state keeper state is restored. + assert_eq!( + state_keeper.sync_state.get_local_block(), + MiniblockNumber(2) + ); + + // Send new actions and wait until the new miniblock is sealed. + let open_miniblock = SyncAction::Miniblock { + number: MiniblockNumber(3), + timestamp: 3, + virtual_blocks: 1, + }; + let actions = vec![open_miniblock, new_tx, SyncAction::SealMiniblock]; + state_keeper.actions_sender.push_actions(actions).await; + state_keeper + .wait(|state| state.get_local_block() == MiniblockNumber(3)) + .await; + + let mut storage = pool.access_storage().await.unwrap(); + let miniblock = storage + .blocks_dal() + .get_miniblock_header(MiniblockNumber(3)) + .await + .unwrap() + .expect("Miniblock #3 is not persisted"); + assert_eq!(miniblock.l2_tx_count, 1); + assert_eq!(miniblock.timestamp, 3); +} + +async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { + loop { + let mut storage = pool.access_storage().await.unwrap(); + let last_l1_batch_number = storage + .blocks_dal() + .get_sealed_l1_batch_number() + .await + .unwrap(); + if last_l1_batch_number < L1BatchNumber(number) { + tokio::time::sleep(POLL_INTERVAL).await; + continue; + } + + let metadata = create_l1_batch_metadata(number); + storage + .blocks_dal() + .save_l1_batch_metadata(L1BatchNumber(1), &metadata, H256::zero()) + .await + .unwrap(); + break; + } +} + +#[db_test] +async fn external_io_with_multiple_l1_batches(pool: ConnectionPool) { + let l1_batch = open_l1_batch(1, 1, 1); + let first_tx = create_l2_transaction(10, 100); + let first_tx_hash = first_tx.hash(); + let first_tx = SyncAction::Tx(Box::new(first_tx.into())); + let first_l1_batch_actions = vec![l1_batch, first_tx, SyncAction::SealMiniblock]; + + let fictive_miniblock = SyncAction::Miniblock { + number: MiniblockNumber(2), + timestamp: 2, + virtual_blocks: 0, + }; + let seal_l1_batch = SyncAction::SealBatch { virtual_blocks: 0 }; + let fictive_miniblock_actions = vec![fictive_miniblock, seal_l1_batch]; + + let l1_batch = open_l1_batch(2, 3, 3); + let second_tx = create_l2_transaction(10, 100); + let second_tx_hash = second_tx.hash(); + let second_tx = SyncAction::Tx(Box::new(second_tx.into())); + let second_l1_batch_actions = vec![l1_batch, second_tx, SyncAction::SealMiniblock]; + + let state_keeper = run_state_keeper(pool.clone(), &[&[first_tx_hash], &[second_tx_hash]]).await; + state_keeper + .actions_sender + .push_actions(first_l1_batch_actions) + .await; + state_keeper + .actions_sender + .push_actions(fictive_miniblock_actions) + .await; + state_keeper + .actions_sender + .push_actions(second_l1_batch_actions) + .await; + + let hash_task = tokio::spawn(mock_l1_batch_hash_computation(pool.clone(), 1)); + // Wait until the miniblocks are sealed. + state_keeper + .wait(|state| state.get_local_block() == MiniblockNumber(3)) + .await; + hash_task.await.unwrap(); + + let mut storage = pool.access_storage().await.unwrap(); + let l1_batch_header = storage + .blocks_dal() + .get_l1_batch_header(L1BatchNumber(1)) + .await + .unwrap() + .expect("L1 batch #1 is not persisted"); + assert_eq!(l1_batch_header.timestamp, 1); + assert_eq!(l1_batch_header.l2_tx_count, 1); + + let (first_miniblock, last_miniblock) = storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(L1BatchNumber(1)) + .await + .unwrap() + .expect("Miniblock range for L1 batch #1 is not persisted"); + assert_eq!(first_miniblock, MiniblockNumber(1)); + assert_eq!(last_miniblock, MiniblockNumber(2)); + + let fictive_miniblock = storage + .blocks_dal() + .get_miniblock_header(MiniblockNumber(2)) + .await + .unwrap() + .expect("Fictive miniblock #2 is not persisted"); + assert_eq!(fictive_miniblock.timestamp, 2); + assert_eq!(fictive_miniblock.l2_tx_count, 0); +} + +#[db_test] +async fn fetcher_basics(pool: ConnectionPool) { + let mut storage = pool.access_storage().await.unwrap(); + ensure_genesis(&mut storage).await; + let fetcher_cursor = MainNodeFetcherCursor::new(&mut storage).await.unwrap(); + assert_eq!(fetcher_cursor.l1_batch, L1BatchNumber(0)); + assert_eq!(fetcher_cursor.miniblock, MiniblockNumber(1)); + drop(storage); + + let mut mock_client = MockMainNodeClient::default(); + mock_client.push_l1_batch(0); + // ^ The genesis L1 batch will not be queried, so we're OK with filling it with non-authentic data + let mut tx_hashes = VecDeque::from(mock_client.push_l1_batch(1)); + tx_hashes.extend(mock_client.push_l1_batch(2)); + + let (actions_sender, mut actions) = ActionQueue::new(); + let (stop_sender, stop_receiver) = watch::channel(false); + let sync_state = SyncState::default(); + let fetcher = fetcher_cursor.into_fetcher( + Box::new(mock_client), + actions_sender, + sync_state.clone(), + stop_receiver, + ); + let fetcher_task = tokio::spawn(fetcher.run()); + + // Check that sync_state is updated. + while sync_state.get_main_node_block() < MiniblockNumber(5) { + tokio::time::sleep(POLL_INTERVAL).await; + } + + // Check generated actions. Some basic checks are performed by `ActionQueueSender`. + let mut current_l1_batch_number = L1BatchNumber(0); + let mut current_miniblock_number = MiniblockNumber(0); + let mut tx_count_in_miniblock = 0; + let started_at = Instant::now(); + loop { + assert!( + started_at.elapsed() <= TEST_TIMEOUT, + "Timed out waiting for fetcher" + ); + let Some(action) = actions.pop_action() else { + tokio::time::sleep(POLL_INTERVAL).await; + continue; + }; + match action { + SyncAction::OpenBatch { number, .. } => { + current_l1_batch_number += 1; + current_miniblock_number += 1; // First miniblock is implicitly opened + tx_count_in_miniblock = 0; + assert_eq!(number, current_l1_batch_number); + } + SyncAction::Miniblock { number, .. } => { + current_miniblock_number += 1; + tx_count_in_miniblock = 0; + assert_eq!(number, current_miniblock_number); + } + SyncAction::SealBatch { virtual_blocks } => { + assert_eq!(virtual_blocks, 0); + assert_eq!(tx_count_in_miniblock, 0); + if current_miniblock_number == MiniblockNumber(5) { + break; + } + } + SyncAction::Tx(tx) => { + assert_eq!(tx.hash(), tx_hashes.pop_front().unwrap()); + tx_count_in_miniblock += 1; + } + SyncAction::SealMiniblock => { + assert_eq!(tx_count_in_miniblock, 1); + } + } + } + + stop_sender.send_replace(true); + fetcher_task.await.unwrap().unwrap(); +} + +#[db_test] +async fn fetcher_with_real_server(pool: ConnectionPool) { + // Fill in transactions grouped in multiple miniblocks in the storage. + let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + let mut tx_hashes = VecDeque::from(tx_hashes); + + // Start the API server. + let network_config = NetworkConfig::from_env().unwrap(); + let (stop_sender, stop_receiver) = watch::channel(false); + let server_handles = + spawn_http_server(&network_config, pool.clone(), stop_receiver.clone()).await; + server_handles.wait_until_ready().await; + let server_addr = &server_handles.local_addr; + + // Start the fetcher connected to the API server. + let sync_state = SyncState::default(); + let (actions_sender, mut actions) = ActionQueue::new(); + let client = ::json_rpc(&format!("http://{server_addr}/")).unwrap(); + let fetcher_cursor = MainNodeFetcherCursor { + miniblock: MiniblockNumber(1), + l1_batch: L1BatchNumber(0), + }; + let fetcher = fetcher_cursor.into_fetcher( + Box::new(client), + actions_sender, + sync_state.clone(), + stop_receiver, + ); + let fetcher_task = tokio::spawn(fetcher.run()); + + // Check generated actions. + let mut current_miniblock_number = MiniblockNumber(0); + let mut tx_count_in_miniblock = 0; + let miniblock_number_to_tx_count = HashMap::from([(1, 5), (2, 3)]); + let started_at = Instant::now(); + loop { + assert!( + started_at.elapsed() <= TEST_TIMEOUT, + "Timed out waiting for fetcher actions" + ); + let Some(action) = actions.pop_action() else { + tokio::time::sleep(POLL_INTERVAL).await; + continue; + }; + match action { + SyncAction::OpenBatch { + number, + first_miniblock_info, + .. + } => { + assert_eq!(number, L1BatchNumber(1)); + current_miniblock_number += 1; // First miniblock is implicitly opened + tx_count_in_miniblock = 0; + assert_eq!(first_miniblock_info.0, current_miniblock_number); + } + SyncAction::SealBatch { .. } => unreachable!("L1 batches are not sealed in test"), + SyncAction::Miniblock { number, .. } => { + current_miniblock_number += 1; + tx_count_in_miniblock = 0; + assert_eq!(number, current_miniblock_number); + } + SyncAction::Tx(tx) => { + assert_eq!(tx.hash(), tx_hashes.pop_front().unwrap()); + tx_count_in_miniblock += 1; + } + SyncAction::SealMiniblock => { + assert_eq!( + tx_count_in_miniblock, + miniblock_number_to_tx_count[¤t_miniblock_number] + ); + if current_miniblock_number == MiniblockNumber(2) { + break; + } + } + } + } + + stop_sender.send_replace(true); + fetcher_task.await.unwrap().unwrap(); + server_handles.shutdown().await; +} diff --git a/core/lib/zksync_core/src/witness_generator/basic_circuits.rs b/core/lib/zksync_core/src/witness_generator/basic_circuits.rs index 82f130583cbf..19a02e43a060 100644 --- a/core/lib/zksync_core/src/witness_generator/basic_circuits.rs +++ b/core/lib/zksync_core/src/witness_generator/basic_circuits.rs @@ -11,7 +11,7 @@ use std::{ }; use multivm::vm_latest::{ - constants::MAX_CYCLES_FOR_TX, HistoryDisabled, SimpleMemory, StorageOracle, + constants::MAX_CYCLES_FOR_TX, HistoryDisabled, SimpleMemory, StorageOracle as VmStorageOracle, }; use zksync_config::configs::witness_generator::BasicWitnessGeneratorDataSource; use zksync_config::configs::WitnessGeneratorConfig; @@ -37,7 +37,7 @@ use zksync_utils::{bytes_to_chunks, expand_memory_contents, h256_to_u256, u256_t use super::{ precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, - utils::save_prover_input_artifacts, METRICS, + storage_oracle::StorageOracle, utils::save_prover_input_artifacts, METRICS, }; pub struct BasicCircuitArtifacts { @@ -380,17 +380,11 @@ pub async fn build_basic_circuits_witness_generator_input( .await .unwrap() .unwrap(); - let (_, previous_block_timestamp) = connection + let (previous_block_hash, previous_block_timestamp) = connection .blocks_dal() .get_l1_batch_state_root_and_timestamp(l1_batch_number - 1) .await .unwrap() - .unwrap(); - let previous_block_hash = connection - .blocks_dal() - .get_l1_batch_state_root(l1_batch_number - 1) - .await - .unwrap() .expect("cannot generate witness before the root hash is computed"); BasicCircuitWitnessGeneratorInput { block_number: l1_batch_number, @@ -464,6 +458,12 @@ pub async fn generate_witness( .await .unwrap() .expect("L1 batch should contain at least one miniblock"); + let storage_refunds = connection + .blocks_dal() + .get_storage_refunds(input.block_number) + .await + .unwrap() + .unwrap(); drop(connection); let rt_handle = tokio::runtime::Handle::current(); @@ -520,8 +520,9 @@ pub async fn generate_witness( let storage_view = StorageView::new(storage); let storage_view = storage_view.to_rc_ptr(); - let storage_oracle: StorageOracle>, HistoryDisabled> = - StorageOracle::new(storage_view); + let vm_storage_oracle: VmStorageOracle>, HistoryDisabled> = + VmStorageOracle::new(storage_view); + let storage_oracle = StorageOracle::new(vm_storage_oracle, storage_refunds); let memory: SimpleMemory = SimpleMemory::default(); let mut hasher = DefaultHasher::new(); GEOMETRY_CONFIG.hash(&mut hasher); diff --git a/core/lib/zksync_core/src/witness_generator/mod.rs b/core/lib/zksync_core/src/witness_generator/mod.rs index 268e6073db58..10a7ff861bd8 100644 --- a/core/lib/zksync_core/src/witness_generator/mod.rs +++ b/core/lib/zksync_core/src/witness_generator/mod.rs @@ -49,6 +49,7 @@ pub mod leaf_aggregation; pub mod node_aggregation; mod precalculated_merkle_paths_provider; pub mod scheduler; +mod storage_oracle; #[cfg(test)] mod tests; mod utils; diff --git a/core/lib/zksync_core/src/witness_generator/storage_oracle.rs b/core/lib/zksync_core/src/witness_generator/storage_oracle.rs new file mode 100644 index 000000000000..112b4eb5988c --- /dev/null +++ b/core/lib/zksync_core/src/witness_generator/storage_oracle.rs @@ -0,0 +1,46 @@ +use zksync_types::zkevm_test_harness::zk_evm::abstractions::{ + RefundType, RefundedAmounts, Storage, +}; +use zksync_types::{LogQuery, Timestamp}; + +#[derive(Debug)] +pub(super) struct StorageOracle { + inn: T, + storage_refunds: std::vec::IntoIter, +} + +impl StorageOracle { + pub fn new(inn: T, storage_refunds: Vec) -> Self { + Self { + inn, + storage_refunds: storage_refunds.into_iter(), + } + } +} + +impl Storage for StorageOracle { + fn estimate_refunds_for_write( + &mut self, + _monotonic_cycle_counter: u32, + _partial_query: &LogQuery, + ) -> RefundType { + let pubdata_bytes = self.storage_refunds.next().expect("Missing refund"); + RefundType::RepeatedWrite(RefundedAmounts { + pubdata_bytes, + ergs: 0, + }) + } + + fn execute_partial_query(&mut self, monotonic_cycle_counter: u32, query: LogQuery) -> LogQuery { + self.inn + .execute_partial_query(monotonic_cycle_counter, query) + } + + fn finish_frame(&mut self, timestamp: Timestamp, panicked: bool) { + self.inn.finish_frame(timestamp, panicked) + } + + fn start_frame(&mut self, timestamp: Timestamp) { + self.inn.start_frame(timestamp) + } +} diff --git a/core/tests/vm-benchmark/harness/Cargo.toml b/core/tests/vm-benchmark/harness/Cargo.toml index 8a08e11d03b6..071f49c6e94b 100644 --- a/core/tests/vm-benchmark/harness/Cargo.toml +++ b/core/tests/vm-benchmark/harness/Cargo.toml @@ -11,5 +11,5 @@ zksync_state = { path = "../../../lib/state" } zksync_utils = { path = "../../../lib/utils" } zksync_system_constants = { path = "../../../lib/constants" } zksync_contracts = { path = "../../../lib/contracts" } -zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc1" } +zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } once_cell = "1.17" diff --git a/docs/advanced/bytecode_compression.md b/docs/advanced/bytecode_compression.md index 69b9195b0ce2..78cd37e5ca4b 100644 --- a/docs/advanced/bytecode_compression.md +++ b/docs/advanced/bytecode_compression.md @@ -8,7 +8,7 @@ reconstructed from L1 if needed). Given the want/need to cutdown on space used, bytecode is compressed prior to being posted to L1. At a high level bytecode is chunked into opcodes (which have a size of 8 bytes), assigned a 2 byte index, and the newly formed byte sequence (indexes) are verified and sent to L1. This process is split into 2 different parts: (1) -[the server side operator](https://github.com/matter-labs/zksync-2-dev/blob/main/core/lib/utils/src/bytecode.rs#L31) +[the server side operator](https://github.com/matter-labs/zksync-era/blob/main/core/lib/utils/src/bytecode.rs#L31) handling the compression and (2) [the system contract](https://github.com/matter-labs/system-contracts/blob/main/contracts/BytecodeCompressor.sol) verifying that the compression is correct before sending to L1. diff --git a/docs/advanced/contracts.md b/docs/advanced/contracts.md index ddbce2d423bf..c0d309bdec2c 100644 --- a/docs/advanced/contracts.md +++ b/docs/advanced/contracts.md @@ -59,7 +59,7 @@ If you look on your hardhat example, you'll notice that your `deploy.ts` is actu `hardhat-zksync-deploy` plugin. Which inside uses the zkSync's web3.js, that calls the contract deployer -[here](https://github.com/matter-labs/zksync-2-dev/blob/a2853871778cebe8f09faebe6f2f5c07d29b81f1/sdk/zksync-web3.js/src/contract.ts#L62) +[here](https://github.com/matter-labs/zksync-era/blob/main/sdk/zksync-web3.js/src/contract.ts#L44) ```typescript override getDeployTransaction(..) { diff --git a/docs/advanced/prover.md b/docs/advanced/prover.md index e20c8ddb8d6a..02e69c4d38e2 100644 --- a/docs/advanced/prover.md +++ b/docs/advanced/prover.md @@ -214,7 +214,7 @@ it can be used as 'a' or 'b' in the Select gate example above). ### CSVarLengthEncodable -Implements CircuitVarLengthEncodable - which allows encoding the struct into a vector of varaibles (think about it as +Implements CircuitVarLengthEncodable - which allows encoding the struct into a vector of variables (think about it as serializing to Bytes). ### Summary @@ -401,7 +401,7 @@ And we'll run it over all the operands: out_of_circuit_vm .cycle(&mut tracer) - .expect("cycle should finish succesfully"); + .expect("cycle should finish successfully"); } ``` diff --git a/docs/advanced/zk_intuition.md b/docs/advanced/zk_intuition.md index 16c32dd6762b..f29d03991eec 100644 --- a/docs/advanced/zk_intuition.md +++ b/docs/advanced/zk_intuition.md @@ -138,7 +138,7 @@ this guide was written, the latest version was 1.3.4, but there was also ongoing version 1.4.0. [witness_example]: - https://github.com/matter-labs/zkevm_test_harness/blob/0c17bc7baa4e0b64634d414942ef4200d8613bbd/src/witness/individual_circuits/decommit_code.rs#L23 + https://github.com/matter-labs/era-zkevm_test_harness/tree/main/src/witness/individual_circuits/decommit_code.rs#L24 [verifier]: https://github.com/matter-labs/zksync-2-contracts/blob/d9785355518edc7f686fb2c91ff7d1caced9f9b8/ethereum/contracts/zksync/Plonk4VerifierWithAccessToDNext.sol#L284 [bellman repo]: https://github.com/matter-labs/bellman diff --git a/etc/upgrades/1696936385-refunds-enhancement/mainnet2/transactions.json b/etc/upgrades/1696936385-refunds-enhancement/mainnet2/transactions.json new file mode 100644 index 000000000000..78314cf95f40 --- /dev/null +++ b/etc/upgrades/1696936385-refunds-enhancement/mainnet2/transactions.json @@ -0,0 +1,59 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x6530f010" + }, + "factoryDeps": [], + "newProtocolVersion": "16", + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e0000000000000000000000000000000000000000000000000000000006530f01000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0x91Ca046daD8c3Db41f296267E1720d9C940f613d", + "protocolVersion": "16", + "diamondUpgradeProposalId": { + "type": "BigNumber", + "hex": "0x09" + }, + "upgradeTimestamp": "1697706000", + "proposeTransparentUpgradeCalldata": "0x8043760a00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000006000000000000000000000000091ca046dad8c3db41f296267e1720d9c940f613d0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005241ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e0000000000000000000000000000000000000000000000000000000006530f01000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "transparentUpgrade": { + "facetCuts": [], + "initAddress": "0x91Ca046daD8c3Db41f296267E1720d9C940f613d", + "initCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e0000000000000000000000000000000000000000000000000000000006530f01000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "executeUpgradeCalldata": "0x36d4eb8400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000091ca046dad8c3db41f296267e1720d9c940f613d0000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005241ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000004e0000000000000000000000000000000000000000000000000000000006530f01000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 5804890ac663..73ecf49890f2 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -95,9 +95,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -221,20 +221,20 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -405,7 +405,7 @@ dependencies = [ "lazycell", "log", "peeking_take_while", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "regex", "rustc-hash", @@ -426,12 +426,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -440,7 +440,7 @@ version = "0.68.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726e4313eb6ec35d2730258ad4e15b547ee75d6afaa1361a922e78e59b7d8078" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cexpr", "clang-sys", "lazy_static", @@ -448,12 +448,12 @@ dependencies = [ "log", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", "which", ] @@ -483,9 +483,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bitvec" @@ -531,6 +531,14 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e#1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "blake2-rfc_bellman_edition" version = "0.0.1" @@ -611,11 +619,11 @@ checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] name = "boojum" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum?branch=main#2771569baab9a59690d88cee6ba9b295c8a1e4c4" +source = "git+https://github.com/matter-labs/era-boojum?branch=main#84754b066959c8fdfb77edf730fc13ed87404907" dependencies = [ "arrayvec 0.7.4", "bincode", - "blake2 0.10.6", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "const_format", "convert_case 0.6.0", "crossbeam 0.8.2", @@ -633,7 +641,7 @@ dependencies = [ "rand 0.8.5", "rayon", "serde", - "sha2 0.10.6", + "sha2 0.10.8", "sha3 0.10.6", "smallvec", "tracing", @@ -643,7 +651,7 @@ dependencies = [ [[package]] name = "boojum-cuda" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#4326c66de077a159bdbfe9d2575cb248696f0df9" +source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#ecf246a02d89954f277de16cbbe67ddc5ef386e1" dependencies = [ "boojum", "cmake", @@ -673,9 +681,9 @@ checksum = "ad152d03a2c813c80bb94fedbf3a3f02b28f793e39e7c214c8a0bcc196343de7" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" @@ -705,9 +713,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfa25e60aea747ec7e1124f238816749faa93759c6ff5b31f1ccdda137f4479" +checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" dependencies = [ "serde", ] @@ -816,12 +824,10 @@ dependencies = [ [[package]] name = "circuit_definitions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#3cd647aa57fc2e1180bab53f7a3b61ec47502a46" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#43aeb53d7d9c909508a98f9fc140edff0e9d2357" dependencies = [ "crossbeam 0.8.2", "derivative", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2)", "serde", "snark_wrapper", "zk_evm 1.4.0", @@ -960,20 +966,20 @@ checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" [[package]] name = "const_format" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c990efc7a285731f9a4378d81aff2f0e85a2c8781a05ef0f8baa8dac54d0ff48" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.31" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e026b6ce194a874cb9cf32cd5772d1ef9767cc8fcb5765948d74f37a9d8b2bf6" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "unicode-xid 0.2.4", ] @@ -1285,10 +1291,10 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum?branch=main#2771569baab9a59690d88cee6ba9b295c8a1e4c4" +source = "git+https://github.com/matter-labs/era-boojum?branch=main#84754b066959c8fdfb77edf730fc13ed87404907" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -1296,10 +1302,10 @@ dependencies = [ [[package]] name = "cs_derive" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#3a21c8dee43c77604350fdf33c1615e25bf1dacd" +source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#dad50e7eb7462a3819af8d5209d6ca243395bf51" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "serde", "syn 1.0.109", @@ -1327,9 +1333,9 @@ dependencies = [ [[package]] name = "cudart" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#4326c66de077a159bdbfe9d2575cb248696f0df9" +source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#ecf246a02d89954f277de16cbbe67ddc5ef386e1" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "criterion", "cudart-sys", ] @@ -1337,7 +1343,7 @@ dependencies = [ [[package]] name = "cudart-sys" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#4326c66de077a159bdbfe9d2575cb248696f0df9" +source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#ecf246a02d89954f277de16cbbe67ddc5ef386e1" dependencies = [ "bindgen 0.68.1", "serde_json", @@ -1361,7 +1367,7 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "strsim 0.10.0", "syn 1.0.109", @@ -1388,7 +1394,7 @@ dependencies = [ "hashbrown 0.14.1", "lock_api", "once_cell", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.9", ] [[package]] @@ -1424,10 +1430,11 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" dependencies = [ + "powerfmt", "serde", ] @@ -1437,7 +1444,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -1449,7 +1456,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "rustc_version", "syn 1.0.109", @@ -1609,25 +1616,14 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add4f07d43996f76ef320709726a556a9d4f965d9410d8d0271132d2f8293480" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "error-chain" version = "0.12.4" @@ -1649,7 +1645,7 @@ dependencies = [ "regex", "serde", "serde_json", - "sha3 0.10.6", + "sha3 0.10.8", "thiserror", "uint", ] @@ -1704,7 +1700,7 @@ dependencies = [ "fixed-hash 0.8.0", "impl-rlp", "impl-serde 0.4.0", - "primitive-types 0.12.1", + "primitive-types 0.12.2", "uint", ] @@ -1761,7 +1757,7 @@ dependencies = [ "num-bigint 0.4.4", "num-integer", "num-traits", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "serde", "syn 1.0.109", @@ -1878,7 +1874,7 @@ dependencies = [ [[package]] name = "franklin-crypto" version = "0.0.5" -source = "git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper#b1f1677d9ece6a6ddcfbf394dc2db2bd8b792e6c" +source = "git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper#900332b8c2fe528b5008bb4e6bf2d3f206a9ae56" dependencies = [ "arr_macro", "bellman_ce 0.3.2 (git+https://github.com/matter-labs/bellman?branch=snark-wrapper)", @@ -2008,9 +2004,9 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2149,7 +2145,7 @@ dependencies = [ "rsa", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.8", "thiserror", "time", "tokio", @@ -2488,16 +2484,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows-core", ] [[package]] @@ -2576,7 +2572,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -2659,9 +2655,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] @@ -2713,7 +2709,7 @@ dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sha2 0.10.6", + "sha2 0.10.8", ] [[package]] @@ -2742,9 +2738,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.148" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libloading" @@ -2758,9 +2754,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "librocksdb-sys" @@ -2789,29 +2785,29 @@ dependencies = [ [[package]] name = "linkme" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f948366ad5bb46b5514ba7a7a80643726eef08b06632592699676748c8bc33b" +checksum = "91ed2ee9464ff9707af8e9ad834cffa4802f072caad90639c583dd3c62e6e608" dependencies = [ "linkme-impl", ] [[package]] name = "linkme-impl" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc28438cad73dcc90ff3466fc329a9252b1b8ba668eb0d5668ba97088cf4eef0" +checksum = "ba125974b109d512fccbc6c0244e7580143e460895dfd6ea7f8bbb692fd94396" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "linux-raw-sys" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3852614a3bd9ca9804678ba6be5e3b8ce76dfc902cae004e3e0c44051b6e88db" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "local-ip-address" @@ -2827,9 +2823,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg 1.1.0", "scopeguard", @@ -2940,9 +2936,9 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3017,6 +3013,26 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "multivm" +version = "0.1.0" +dependencies = [ + "anyhow", + "hex", + "itertools 0.10.5", + "once_cell", + "thiserror", + "tracing", + "vise", + "zk_evm 1.3.1", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", + "zksync_contracts", + "zksync_state", + "zksync_system_constants", + "zksync_types", + "zksync_utils", +] + [[package]] name = "native-tls" version = "0.2.11" @@ -3054,7 +3070,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" dependencies = [ "either", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "serde", "syn 1.0.109", @@ -3066,7 +3082,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cfg-if 1.0.0", "libc", ] @@ -3202,7 +3218,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -3265,9 +3281,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg 1.1.0", "libm", @@ -3299,9 +3315,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3337,7 +3353,7 @@ version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3352,9 +3368,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3486,7 +3502,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -3498,7 +3514,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -3521,7 +3537,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.9", ] [[package]] @@ -3540,13 +3556,13 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "smallvec", "windows-targets", ] @@ -3648,9 +3664,9 @@ checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3661,7 +3677,7 @@ checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" dependencies = [ "once_cell", "pest", - "sha2 0.10.6", + "sha2 0.10.8", ] [[package]] @@ -3679,9 +3695,9 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3768,6 +3784,12 @@ version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -3780,8 +3802,8 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ - "proc-macro2 1.0.67", - "syn 2.0.37", + "proc-macro2 1.0.69", + "syn 2.0.38", ] [[package]] @@ -3799,9 +3821,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash 0.8.0", "impl-codec 0.6.0", @@ -3827,7 +3849,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", "version_check", @@ -3839,7 +3861,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "version_check", ] @@ -3861,9 +3883,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -3886,9 +3908,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3911,7 +3933,7 @@ checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.0", + "bitflags 2.4.1", "lazy_static", "num-traits", "rand 0.8.5", @@ -3993,7 +4015,7 @@ version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", ] [[package]] @@ -4261,6 +4283,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -4274,14 +4305,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.6" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebee201405406dbf528b8b672104ae6d6d63e6d118cb10e4d51abbc7b58044ff" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.9", - "regex-syntax 0.7.5", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -4295,13 +4326,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.9" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b23e92ee4318893fa3fe3e6fb365258efbfe6ac6ab30f090cdcbb7aa37efa9" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.5", + "regex-syntax 0.8.2", ] [[package]] @@ -4316,6 +4347,12 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +[[package]] +name = "regex-syntax" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" + [[package]] name = "reqwest" version = "0.11.22" @@ -4365,12 +4402,11 @@ dependencies = [ [[package]] name = "rescue_poseidon" version = "0.4.1" -source = "git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2#09b96e7e82dadac151d8d681f017cb6a16961801" +source = "git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2#c4a788471710bdb7aa0f59e8756b45ef93cdd2b2" dependencies = [ "addchain", "arrayvec 0.7.4", - "blake2 0.10.6", - "boojum", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder", "derivative", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", @@ -4392,7 +4428,7 @@ source = "git+https://github.com/matter-labs/rescue-poseidon#d059b5042df5ed80e15 dependencies = [ "addchain", "arrayvec 0.7.4", - "blake2 0.10.6", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder", "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=dev)", "num-bigint 0.3.3", @@ -4511,11 +4547,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.17" +version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f25469e9ae0f3d0047ca8b93fc56843f38e6774f0914a107ff8b41be8be8e0b7" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -4712,9 +4748,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad977052201c6de01a8ef2aa3378c4bd23217a056337d1d6da40468d267a4fb0" +checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" dependencies = [ "serde", ] @@ -4829,22 +4865,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4888,7 +4924,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -4948,8 +4984,18 @@ dependencies = [ [[package]] name = "sha2" version = "0.10.6" +source = "git+https://github.com/RustCrypto/hashes.git?rev=1731ced4a116d61ba9dc6ee6d0f38fb8102e357a#1731ced4a116d61ba9dc6ee6d0f38fb8102e357a" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -4971,8 +5017,17 @@ dependencies = [ [[package]] name = "sha3" version = "0.10.6" +source = "git+https://github.com/RustCrypto/hashes.git?rev=7a187e934c1f6c68e4b4e5cf37541b7a0d64d303#7a187e934c1f6c68e4b4e5cf37541b7a0d64d303" +dependencies = [ + "digest 0.10.7", + "keccak", +] + +[[package]] +name = "sha3" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" dependencies = [ "digest 0.10.7", "keccak", @@ -4990,10 +5045,10 @@ dependencies = [ [[package]] name = "shivini" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-shivini.git?branch=main#2c73a10ff0310e7f99a0d9a8cea043e697eaa5e4" +source = "git+https://github.com/matter-labs/era-shivini.git?branch=main#2a40d9376bdb92d563e1eacaddcbcfaedd09de89" dependencies = [ "bincode", - "blake2 0.10.6", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "boojum", "boojum-cuda", "circuit_definitions", @@ -5002,7 +5057,7 @@ dependencies = [ "hex", "rand 0.8.5", "serde", - "sha2 0.10.6", + "sha2 0.10.8", "smallvec", ] @@ -5085,11 +5140,9 @@ dependencies = [ [[package]] name = "snark_wrapper" version = "0.1.0" -source = "git+https://github.com/matter-labs/snark-wrapper.git?branch=main#450ea6c9f3ede11e149b86ad3a072e673f9846e7" +source = "git+https://github.com/matter-labs/snark-wrapper.git?branch=main#52f9ef98a7e6c86b405dd0ec42291dacf6e2bcb4" dependencies = [ - "boojum", "derivative", - "franklin-crypto 0.0.5 (git+https://github.com/matter-labs/franklin-crypto?branch=snark_wrapper)", "rand 0.4.6", "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2)", ] @@ -5209,7 +5262,7 @@ dependencies = [ "serde", "serde_json", "sha-1", - "sha2 0.10.6", + "sha2 0.10.8", "smallvec", "sqlformat", "sqlx-rt", @@ -5231,11 +5284,11 @@ dependencies = [ "heck 0.4.1", "hex", "once_cell", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.10.8", "sqlx-core", "sqlx-rt", "syn 1.0.109", @@ -5308,7 +5361,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "syn 1.0.109", ] @@ -5329,7 +5382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "rustversion", "syn 1.0.109", @@ -5358,18 +5411,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.37" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", "unicode-ident", ] @@ -5377,7 +5430,7 @@ dependencies = [ [[package]] name = "sync_vm" version = "1.3.3" -source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#3a21c8dee43c77604350fdf33c1615e25bf1dacd" +source = "git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3#dad50e7eb7462a3819af8d5209d6ca243395bf51" dependencies = [ "arrayvec 0.7.4", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-sync_vm.git?branch=v1.3.3)", @@ -5393,11 +5446,11 @@ dependencies = [ "rand 0.4.6", "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon)", "serde", - "sha2 0.10.6", + "sha2 0.10.8", "sha3 0.10.6", "smallvec", - "zk_evm 1.3.3", - "zkevm_opcode_defs", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", + "zkevm_opcode_defs 1.3.2", ] [[package]] @@ -5457,13 +5510,13 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9601d162c1d77e62c1ea0bc8116cd1caf143ce3af947536c3c9052a1677fe0c" +checksum = "f66edd6b6cd810743c0c71e1d085e92b01ce6a72782032e3f794c8284fe4bcdd" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 1.0.109", + "syn 2.0.38", ] [[package]] @@ -5490,9 +5543,9 @@ version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5507,12 +5560,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", + "powerfmt", "serde", "time-core", "time-macros", @@ -5578,9 +5632,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" dependencies = [ "backtrace", "bytes", @@ -5601,9 +5655,9 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -5687,11 +5741,10 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if 1.0.0", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -5699,20 +5752,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -5921,9 +5974,9 @@ checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "uuid" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" dependencies = [ "serde", ] @@ -5982,9 +6035,9 @@ name = "vise-macros" version = "0.1.0" source = "git+https://github.com/matter-labs/vise.git?rev=dd05139b76ab0843443ab3ff730174942c825dae#dd05139b76ab0843443ab3ff730174942c825dae" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -6022,25 +6075,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "vm" -version = "0.1.0" -dependencies = [ - "anyhow", - "hex", - "itertools 0.10.5", - "once_cell", - "thiserror", - "tracing", - "vise", - "zk_evm 1.3.3", - "zksync_config", - "zksync_contracts", - "zksync_state", - "zksync_types", - "zksync_utils", -] - [[package]] name = "wait-timeout" version = "0.2.0" @@ -6100,9 +6134,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -6134,9 +6168,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.67", + "proc-macro2 1.0.69", "quote 1.0.33", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6261,10 +6295,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows" -version = "0.48.0" +name = "windows-core" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ "windows-targets", ] @@ -6337,9 +6371,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" dependencies = [ "memchr", ] @@ -6375,6 +6409,38 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +[[package]] +name = "zk_evm" +version = "1.3.1" +source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.1-rc2#0a7c775932db4839ff6b7fb0db9bdb3583ab54c0" +dependencies = [ + "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", + "k256", + "lazy_static", + "num 0.4.1", + "serde", + "serde_json", + "sha2 0.10.6", + "sha3 0.10.6", + "static_assertions", + "zkevm_opcode_defs 1.3.1", +] + +[[package]] +name = "zk_evm" +version = "1.3.3" +source = "git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1#fe8215a7047d24430ad470cf15a19bedb4d6ba0b" +dependencies = [ + "anyhow", + "lazy_static", + "num 0.4.1", + "serde", + "serde_json", + "static_assertions", + "zk_evm_abstractions", + "zkevm_opcode_defs 1.3.2", +] + [[package]] name = "zk_evm" version = "1.3.3" @@ -6387,13 +6453,13 @@ dependencies = [ "serde_json", "static_assertions", "zk_evm_abstractions", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", ] [[package]] name = "zk_evm" version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.0#e33a5ded1b53e35d261fdb46e6d16f2c900b217f" +source = "git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.4.0#dd76fc5badf2c05278a21b38015a7798fe2fe358" dependencies = [ "anyhow", "lazy_static", @@ -6402,7 +6468,7 @@ dependencies = [ "serde_json", "static_assertions", "zk_evm_abstractions", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", ] [[package]] @@ -6413,7 +6479,7 @@ dependencies = [ "anyhow", "serde", "static_assertions", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", ] [[package]] @@ -6428,11 +6494,11 @@ dependencies = [ "nom", "num-bigint 0.4.4", "num-traits", - "sha3 0.10.6", + "sha3 0.10.8", "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", ] [[package]] @@ -6452,16 +6518,27 @@ dependencies = [ "serde", "serde_json", "smallvec", - "zkevm_opcode_defs", + "zkevm_opcode_defs 1.3.2", +] + +[[package]] +name = "zkevm_opcode_defs" +version = "1.3.1" +source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.1#00d4ad2292bd55374a0fa10fe11686d7a109d8a0" +dependencies = [ + "bitflags 1.3.2", + "ethereum-types 0.14.1", + "lazy_static", + "sha2 0.10.8", ] [[package]] name = "zkevm_opcode_defs" version = "1.3.2" -source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#c7ab62f4c60b27dfc690c3ab3efb5fff1ded1a25" +source = "git+https://github.com/matter-labs/era-zkevm_opcode_defs.git?branch=v1.3.2#dffacadeccdfdbff4bc124d44c595c4a6eae5013" dependencies = [ - "bitflags 2.4.0", - "blake2 0.10.6", + "bitflags 2.4.1", + "blake2 0.10.6 (git+https://github.com/RustCrypto/hashes.git?rev=1f727ce37ff40fa0cce84eb8543a45bdd3ca4a4e)", "ethereum-types 0.14.1", "k256", "lazy_static", @@ -6492,14 +6569,14 @@ dependencies = [ "sync_vm", "test-log", "tracing", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?branch=v1.3.3)", "zkevm-assembly", ] [[package]] name = "zkevm_test_harness" version = "1.4.0" -source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#3cd647aa57fc2e1180bab53f7a3b61ec47502a46" +source = "git+https://github.com/matter-labs/era-zkevm_test_harness.git?branch=v1.4.0#43aeb53d7d9c909508a98f9fc140edff0e9d2357" dependencies = [ "bincode", "circuit_definitions", @@ -6508,12 +6585,11 @@ dependencies = [ "derivative", "env_logger 0.10.0", "hex", + "rand 0.4.6", "rayon", - "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2)", "serde", "serde_json", "smallvec", - "snark_wrapper", "structopt", "test-log", "tracing", @@ -6615,7 +6691,7 @@ name = "zksync_crypto" version = "0.1.0" dependencies = [ "base64 0.13.1", - "blake2 0.10.6", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "hex", "once_cell", "serde", @@ -6643,9 +6719,9 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_config", "zksync_contracts", "zksync_health_check", + "zksync_system_constants", "zksync_types", "zksync_utils", ] @@ -6923,11 +6999,28 @@ dependencies = [ "vise", ] +[[package]] +name = "zksync_system_constants" +version = "0.1.0" +dependencies = [ + "anyhow", + "bigdecimal", + "hex", + "num 0.3.1", + "once_cell", + "serde", + "serde_json", + "url", + "zksync_basic_types", + "zksync_contracts", + "zksync_utils", +] + [[package]] name = "zksync_types" version = "0.1.0" dependencies = [ - "blake2 0.10.6", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", "codegen 0.1.0", "ethereum-types 0.12.1", @@ -6942,12 +7035,12 @@ dependencies = [ "serde_with", "strum", "thiserror", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", "zkevm_test_harness 1.3.3", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_mini_merkle_tree", + "zksync_system_constants", "zksync_utils", ] @@ -6968,7 +7061,7 @@ dependencies = [ "tokio", "tracing", "vlog", - "zk_evm 1.3.3", + "zk_evm 1.3.3 (git+https://github.com/matter-labs/era-zk_evm.git?tag=v1.3.3-rc1)", "zksync_basic_types", ] @@ -7004,6 +7097,7 @@ dependencies = [ "futures 0.3.28", "hex", "metrics", + "multivm", "prometheus_exporter", "rand 0.8.5", "serde", @@ -7013,7 +7107,6 @@ dependencies = [ "tracing", "vk_setup_data_generator_server_fri", "vlog", - "vm", "zk_evm 1.4.0", "zkevm_test_harness 1.4.0", "zksync_config", @@ -7024,6 +7117,7 @@ dependencies = [ "zksync_prover_utils", "zksync_queued_job_processor", "zksync_state", + "zksync_system_constants", "zksync_types", "zksync_utils", ] diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/proof_fri_compressor/src/compressor.rs index 43f7b2d008c6..df9faaff257a 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/proof_fri_compressor/src/compressor.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use std::time::Instant; use tokio::task::JoinHandle; -use zkevm_test_harness::proof_wrapper_utils::wrap_proof; +use zkevm_test_harness::proof_wrapper_utils::{wrap_proof, WrapperConfig}; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; @@ -55,7 +55,9 @@ impl ProofCompressor { ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, ) .context("get_recursiver_layer_vk_for_circuit_type()")?; - let (wrapper_proof, _) = wrap_proof(proof, scheduler_vk, compression_mode); + let config = WrapperConfig::new(compression_mode); + + let (wrapper_proof, _) = wrap_proof(proof, scheduler_vk, config); let inner = wrapper_proof.into_inner(); // (Re)serialization should always succeed. // TODO: is that true here? diff --git a/prover/prover_fri_gateway/src/api_data_fetcher.rs b/prover/prover_fri_gateway/src/api_data_fetcher.rs index 7b3a814837cf..a009f1783f21 100644 --- a/prover/prover_fri_gateway/src/api_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/api_data_fetcher.rs @@ -33,6 +33,7 @@ impl PeriodicApiStruct { Resp: DeserializeOwned, { tracing::info!("Sending request to {}", endpoint); + self.client .post(endpoint) .json(&request) diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index e2ac2e42dd93..1f00c7f74299 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -33,6 +33,7 @@ impl PeriodicApiStruct { impl PeriodicApi for PeriodicApiStruct { type JobId = (); type Response = ProofGenerationDataResponse; + const SERVICE_NAME: &'static str = "ProofGenDataFetcher"; async fn get_next_request(&self) -> Option<(Self::JobId, ProofGenerationDataRequest)> { @@ -49,7 +50,10 @@ impl PeriodicApi for PeriodicApiStruct { async fn handle_response(&self, _: (), response: Self::Response) { match response { - ProofGenerationDataResponse::Success(data) => { + ProofGenerationDataResponse::Success(None) => { + tracing::info!("There are currently no pending batches to be proven"); + } + ProofGenerationDataResponse::Success(Some(data)) => { tracing::info!("Received proof gen data for: {:?}", data.l1_batch_number); self.save_proof_gen_data(data).await; } diff --git a/prover/vk_setup_data_generator_server_fri/src/main.rs b/prover/vk_setup_data_generator_server_fri/src/main.rs index 464a01fe8697..503a4c322c3d 100644 --- a/prover/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/vk_setup_data_generator_server_fri/src/main.rs @@ -6,9 +6,9 @@ use circuit_definitions::circuit_definitions::recursion_layer::{ use zkevm_test_harness::compute_setups::{ generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs, }; +use zkevm_test_harness::data_source::in_memory_data_source::InMemoryDataSource; use zkevm_test_harness::data_source::SetupDataSource; -use zkevm_test_harness::in_memory_data_source::InMemoryDataSource; -use zkevm_test_harness::proof_wrapper_utils::wrap_proof; +use zkevm_test_harness::proof_wrapper_utils::{wrap_proof, WrapperConfig}; use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; use zksync_prover_fri_types::ProverServiceDataKey; @@ -126,7 +126,9 @@ fn generate_snark_vk( scheduler_vk: ZkSyncRecursionLayerVerificationKey, compression_mode: u8, ) -> anyhow::Result<()> { - let (_, vk) = wrap_proof(proof, scheduler_vk, compression_mode); + let config = WrapperConfig::new(compression_mode); + + let (_, vk) = wrap_proof(proof, scheduler_vk, config); save_snark_vk(vk).context("save_snark_vk") } diff --git a/prover/vk_setup_data_generator_server_fri/src/utils.rs b/prover/vk_setup_data_generator_server_fri/src/utils.rs index 6910a6d270f1..a7780edf7cd2 100644 --- a/prover/vk_setup_data_generator_server_fri/src/utils.rs +++ b/prover/vk_setup_data_generator_server_fri/src/utils.rs @@ -52,7 +52,7 @@ use zkevm_test_harness::witness::full_block_artifact::{ use zkevm_test_harness::witness::recursive_aggregation::compute_leaf_params; use zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZKSyncTestingTree}; -use zkevm_test_harness::in_memory_data_source::InMemoryDataSource; +use zkevm_test_harness::data_source::in_memory_data_source::InMemoryDataSource; pub const CYCLE_LIMIT: usize = 20000;