Skip to content

Commit

Permalink
Merge branch 'feat_validium_pubdata_abstraction' of github.com:lambda…
Browse files Browse the repository at this point in the history
…class/zksync-era into feat_get_batch_pubdata_endpoint
  • Loading branch information
ilitteri committed Jan 30, 2024
2 parents 806edd2 + 6ca23ca commit f637707
Show file tree
Hide file tree
Showing 14 changed files with 603 additions and 188 deletions.
6 changes: 5 additions & 1 deletion core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool};
use zksync_health_check::CheckHealth;
use zksync_state::PostgresStorageCaches;
use zksync_storage::RocksDB;
use zksync_types::l1_batch_committer::RollupModeL1BatchCommitter;
use zksync_utils::wait_for_tasks::wait_for_tasks;

mod config;
Expand Down Expand Up @@ -228,7 +229,10 @@ async fn init_tasks(
.context("failed to build a tree_pool")?;
let tree_handle = task::spawn(metadata_calculator.run(tree_pool, tree_stop_receiver));

let consistency_checker_handle = tokio::spawn(consistency_checker.run(stop_receiver.clone()));
let l1_batch_committer = Arc::new(RollupModeL1BatchCommitter {});

let consistency_checker_handle =
tokio::spawn(consistency_checker.run(stop_receiver.clone(), l1_batch_committer));

let updater_handle = task::spawn(batch_status_updater.run(stop_receiver.clone()));
let sk_handle = task::spawn(state_keeper.run());
Expand Down
12 changes: 9 additions & 3 deletions core/lib/types/src/aggregated_operations.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{fmt, ops, str::FromStr};
use std::{fmt, ops, str::FromStr, sync::Arc};

use codegen::serialize_proof;
use serde::{Deserialize, Serialize};
Expand All @@ -9,7 +9,9 @@ use zkevm_test_harness::{
};
use zksync_basic_types::{ethabi::Token, L1BatchNumber};

use crate::{commitment::L1BatchWithMetadata, ProtocolVersionId, U256};
use crate::{
commitment::L1BatchWithMetadata, l1_batch_committer::L1BatchCommitter, ProtocolVersionId, U256,
};

fn l1_batch_range_from_batches(
batches: &[L1BatchWithMetadata],
Expand All @@ -29,6 +31,7 @@ fn l1_batch_range_from_batches(
pub struct L1BatchCommitOperation {
pub last_committed_l1_batch: L1BatchWithMetadata,
pub l1_batches: Vec<L1BatchWithMetadata>,
pub l1_batch_committer: Arc<dyn L1BatchCommitter>,
}

impl L1BatchCommitOperation {
Expand All @@ -37,7 +40,10 @@ impl L1BatchCommitOperation {
let l1_batches_to_commit = self
.l1_batches
.iter()
.map(L1BatchWithMetadata::l1_commit_data)
.map(|l1_batch_with_metadata| {
self.l1_batch_committer
.l1_commit_data(l1_batch_with_metadata)
})
.collect();

vec![stored_batch_info, Token::Array(l1_batches_to_commit)]
Expand Down
120 changes: 0 additions & 120 deletions core/lib/types/src/commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,126 +157,6 @@ impl L1BatchWithMetadata {
Token::FixedBytes(self.metadata.commitment.as_bytes().to_vec()),
])
}

/// Encodes the L1Batch into CommitBatchInfo (see IExecutor.sol).
pub fn l1_commit_data(&self) -> Token {
if self.header.protocol_version.unwrap().is_pre_boojum() {
Token::Tuple(vec![
Token::Uint(U256::from(self.header.number.0)),
Token::Uint(U256::from(self.header.timestamp)),
Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)),
Token::FixedBytes(self.metadata.merkle_root_hash.as_bytes().to_vec()),
Token::Uint(U256::from(self.header.l1_tx_count)),
Token::FixedBytes(self.metadata.l2_l1_merkle_root.as_bytes().to_vec()),
Token::FixedBytes(
self.header
.priority_ops_onchain_data_hash()
.as_bytes()
.to_vec(),
),
Token::Bytes(self.metadata.initial_writes_compressed.clone()),
Token::Bytes(self.metadata.repeated_writes_compressed.clone()),
Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()),
Token::Array(
self.header
.l2_to_l1_messages
.iter()
.map(|message| Token::Bytes(message.to_vec()))
.collect(),
),
Token::Array(
self.factory_deps
.iter()
.map(|bytecode| Token::Bytes(bytecode.to_vec()))
.collect(),
),
])
} else {
Token::Tuple(vec![
// `batchNumber`
Token::Uint(U256::from(self.header.number.0)),
// `timestamp`
Token::Uint(U256::from(self.header.timestamp)),
// `indexRepeatedStorageChanges`
Token::Uint(U256::from(self.metadata.rollup_last_leaf_index)),
// `newStateRoot`
Token::FixedBytes(self.metadata.merkle_root_hash.as_bytes().to_vec()),
// `numberOfLayer1Txs`
Token::Uint(U256::from(self.header.l1_tx_count)),
// `priorityOperationsHash`
Token::FixedBytes(
self.header
.priority_ops_onchain_data_hash()
.as_bytes()
.to_vec(),
),
// `bootloaderHeapInitialContentsHash`
Token::FixedBytes(
self.metadata
.bootloader_initial_content_commitment
.unwrap()
.as_bytes()
.to_vec(),
),
// `eventsQueueStateHash`
Token::FixedBytes(
self.metadata
.events_queue_commitment
.unwrap()
.as_bytes()
.to_vec(),
),
// `systemLogs`
Token::Bytes(self.metadata.l2_l1_messages_compressed.clone()),
// `totalL2ToL1Pubdata`
Token::Bytes(
self.header
.pubdata_input
.clone()
.unwrap_or(self.construct_pubdata()),
),
])
}
}

pub fn l1_commit_data_size(&self) -> usize {
crate::ethabi::encode(&[Token::Array(vec![self.l1_commit_data()])]).len()
}

/// Packs all pubdata needed for batch commitment in boojum into one bytes array. The packing contains the
/// following: logs, messages, bytecodes, and compressed state diffs.
/// This data is currently part of calldata but will be submitted as part of the blob section post EIP-4844.
pub fn construct_pubdata(&self) -> Vec<u8> {
let mut res: Vec<u8> = vec![];
let validium_mode = std::env::var("VALIDIUM_MODE") == Ok("true".to_owned());

if !validium_mode {
// Process and Pack Logs
res.extend((self.header.l2_to_l1_logs.len() as u32).to_be_bytes());
for l2_to_l1_log in &self.header.l2_to_l1_logs {
res.extend(l2_to_l1_log.0.to_bytes());
}

// Process and Pack Messages
res.extend((self.header.l2_to_l1_messages.len() as u32).to_be_bytes());
for msg in &self.header.l2_to_l1_messages {
res.extend((msg.len() as u32).to_be_bytes());
res.extend(msg);
}

// Process and Pack Bytecodes
res.extend((self.factory_deps.len() as u32).to_be_bytes());
for bytecode in &self.factory_deps {
res.extend((bytecode.len() as u32).to_be_bytes());
res.extend(bytecode);
}

// Extend with Compressed StateDiffs
res.extend(&self.metadata.state_diffs_compressed);
}

res
}
}

impl SerializeCommitment for L2ToL1Log {
Expand Down
134 changes: 134 additions & 0 deletions core/lib/types/src/l1_batch_committer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
use zksync_basic_types::{ethabi::Token, U256};

use crate::{commitment::L1BatchWithMetadata, utils};

pub trait L1BatchCommitter
where
Self: std::fmt::Debug + Send + Sync,
{
fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token;
fn l1_commit_data_size(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> usize {
crate::ethabi::encode(&[Token::Array(vec![
self.l1_commit_data(l1_batch_with_metadata)
])])
.len()
}
}

#[derive(Debug, Clone)]
pub struct RollupModeL1BatchCommitter {}

#[derive(Debug, Clone)]
pub struct ValidiumModeL1BatchCommitter {}

impl L1BatchCommitter for RollupModeL1BatchCommitter {
fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token {
println!("RollupModeL1BatchCommitter");
let commit_data = if l1_batch_with_metadata
.header
.protocol_version
.unwrap()
.is_pre_boojum()
{
preboojum_l1_commit_data(l1_batch_with_metadata)
} else {
rollup_mode_l1_commit_data(l1_batch_with_metadata)
};
Token::Tuple(commit_data)
}
}

impl L1BatchCommitter for ValidiumModeL1BatchCommitter {
fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token {
println!("ValidiumModeL1BatchCommitter");
let commit_data = if l1_batch_with_metadata
.header
.protocol_version
.unwrap()
.is_pre_boojum()
{
preboojum_l1_commit_data(l1_batch_with_metadata)
} else {
validium_mode_l1_commit_data(l1_batch_with_metadata)
};
Token::Tuple(commit_data)
}
}

fn preboojum_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec<Token> {
let header = &l1_batch_with_metadata.header;
let metadata = &l1_batch_with_metadata.metadata;
let commit_data = vec![
Token::Uint(U256::from(header.number.0)),
Token::Uint(U256::from(header.timestamp)),
Token::Uint(U256::from(metadata.rollup_last_leaf_index)),
Token::FixedBytes(metadata.merkle_root_hash.as_bytes().to_vec()),
Token::Uint(U256::from(header.l1_tx_count)),
Token::FixedBytes(metadata.l2_l1_merkle_root.as_bytes().to_vec()),
Token::FixedBytes(header.priority_ops_onchain_data_hash().as_bytes().to_vec()),
Token::Bytes(metadata.initial_writes_compressed.clone()),
Token::Bytes(metadata.repeated_writes_compressed.clone()),
Token::Bytes(metadata.l2_l1_messages_compressed.clone()),
Token::Array(
header
.l2_to_l1_messages
.iter()
.map(|message| Token::Bytes(message.to_vec()))
.collect(),
),
Token::Array(
l1_batch_with_metadata
.factory_deps
.iter()
.map(|bytecode| Token::Bytes(bytecode.to_vec()))
.collect(),
),
];
commit_data
}

fn validium_mode_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec<Token> {
let header = &l1_batch_with_metadata.header;
let metadata = &l1_batch_with_metadata.metadata;
let commit_data = vec![
// `batchNumber`
Token::Uint(U256::from(header.number.0)),
// `timestamp`
Token::Uint(U256::from(header.timestamp)),
// `indexRepeatedStorageChanges`
Token::Uint(U256::from(metadata.rollup_last_leaf_index)),
// `newStateRoot`
Token::FixedBytes(metadata.merkle_root_hash.as_bytes().to_vec()),
// `numberOfLayer1Txs`
Token::Uint(U256::from(header.l1_tx_count)),
// `priorityOperationsHash`
Token::FixedBytes(header.priority_ops_onchain_data_hash().as_bytes().to_vec()),
// `bootloaderHeapInitialContentsHash`
Token::FixedBytes(
metadata
.bootloader_initial_content_commitment
.unwrap()
.as_bytes()
.to_vec(),
),
// `eventsQueueStateHash`
Token::FixedBytes(
metadata
.events_queue_commitment
.unwrap()
.as_bytes()
.to_vec(),
),
// `systemLogs`
Token::Bytes(metadata.l2_l1_messages_compressed.clone()),
];
commit_data
}

fn rollup_mode_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec<Token> {
let mut commit_data = validium_mode_l1_commit_data(l1_batch_with_metadata);
commit_data.push(Token::Bytes(utils::construct_pubdata(
l1_batch_with_metadata,
)));
commit_data
}
2 changes: 2 additions & 0 deletions core/lib/types/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ pub mod utils;
pub mod vk_transform;
pub mod vm_version;

pub mod l1_batch_committer;

/// Denotes the first byte of the special zkSync's EIP-712-signed transaction.
pub const EIP_712_TX_TYPE: u8 = 0x71;

Expand Down
36 changes: 34 additions & 2 deletions core/lib/types/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@ use zksync_basic_types::{Address, H256};
use zksync_utils::{address_to_h256, u256_to_h256};

use crate::{
system_contracts::DEPLOYMENT_NONCE_INCREMENT, web3::signing::keccak256, AccountTreeId,
StorageKey, L2_ETH_TOKEN_ADDRESS, U256,
commitment::L1BatchWithMetadata, system_contracts::DEPLOYMENT_NONCE_INCREMENT,
web3::signing::keccak256, AccountTreeId, StorageKey, L2_ETH_TOKEN_ADDRESS, U256,
};

/// Transforms the *full* account nonce into an *account* nonce.
Expand Down Expand Up @@ -77,6 +77,38 @@ pub fn deployed_address_create(sender: Address, deploy_nonce: U256) -> Address {
Address::from_slice(&keccak256(&bytes)[12..])
}

/// Packs all pubdata needed for batch commitment in boojum into one bytes array. The packing contains the
/// following: logs, messages, bytecodes, and compressed state diffs.
/// This data is currently part of calldata but will be submitted as part of the blob section post EIP-4844.
pub fn construct_pubdata(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec<u8> {
let mut res: Vec<u8> = vec![];

// Process and Pack Logs
res.extend((l1_batch_with_metadata.header.l2_to_l1_logs.len() as u32).to_be_bytes());
for l2_to_l1_log in &l1_batch_with_metadata.header.l2_to_l1_logs {
res.extend(l2_to_l1_log.0.to_bytes());
}

// Process and Pack Messages
res.extend((l1_batch_with_metadata.header.l2_to_l1_messages.len() as u32).to_be_bytes());
for msg in &l1_batch_with_metadata.header.l2_to_l1_messages {
res.extend((msg.len() as u32).to_be_bytes());
res.extend(msg);
}

// Process and Pack Bytecodes
res.extend((l1_batch_with_metadata.factory_deps.len() as u32).to_be_bytes());
for bytecode in &l1_batch_with_metadata.factory_deps {
res.extend((bytecode.len() as u32).to_be_bytes());
res.extend(bytecode);
}

// Extend with Compressed StateDiffs
res.extend(&l1_batch_with_metadata.metadata.state_diffs_compressed);

res
}

#[cfg(test)]
mod tests {
use std::str::FromStr;
Expand Down
Loading

0 comments on commit f637707

Please sign in to comment.