diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml index 42e31983f4..3f55237582 100644 --- a/.github/actionlint.yaml +++ b/.github/actionlint.yaml @@ -4,6 +4,7 @@ self-hosted-runner: - buildjet-2vcpu-ubuntu-2204 - buildjet-4vcpu-ubuntu-2204 - buildjet-8vcpu-ubuntu-2204 + - buildjet-16vcpu-ubuntu-2204 # Configuration variables in array of strings defined in your repository or # organization. `null` means disabling configuration variables check. # Empty array means no configuration variable is allowed. diff --git a/.github/workflows/forester-build-and-push-to-docr.yml b/.github/workflows/forester-build-and-push-to-docr.yml index ac0bccd460..5c78970eb9 100644 --- a/.github/workflows/forester-build-and-push-to-docr.yml +++ b/.github/workflows/forester-build-and-push-to-docr.yml @@ -3,7 +3,7 @@ name: Docker Build and Push to DOCR on: push: branches: - - main + - "*" pull_request: branches: - "**" diff --git a/.github/workflows/forester-tests.yml b/.github/workflows/forester-tests.yml index b6387e5bfa..c7e51d84da 100644 --- a/.github/workflows/forester-tests.yml +++ b/.github/workflows/forester-tests.yml @@ -1,7 +1,8 @@ on: push: branches: - - main + - "*" + - jorrit/feat-bloomfilter-queues paths: - "forester/**" - "photon-api/**" diff --git a/.github/workflows/js.yml b/.github/workflows/js.yml index ff8f3b0f0d..fc4d2aa8af 100644 --- a/.github/workflows/js.yml +++ b/.github/workflows/js.yml @@ -1,7 +1,8 @@ on: push: branches: - - main + - "*" + - jorrit/feat-bloomfilter-queues pull_request: branches: - "*" diff --git a/.github/workflows/light-examples-tests.yml b/.github/workflows/light-examples-tests.yml index 65271c1c50..d1075db982 100644 --- a/.github/workflows/light-examples-tests.yml +++ b/.github/workflows/light-examples-tests.yml @@ -1,7 +1,7 @@ on: push: branches: - - main + - "*" paths: - "examples/**" - "macros/light-sdk-macros/**" diff --git a/.github/workflows/light-system-programs-tests.yml b/.github/workflows/light-system-programs-tests.yml index 050d06beab..6776adb48f 100644 --- a/.github/workflows/light-system-programs-tests.yml +++ b/.github/workflows/light-system-programs-tests.yml @@ -1,7 +1,7 @@ on: push: branches: - - main + - "*" paths: - "programs/**" - "test-programs/**" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3b04b16b77..6b4ada9339 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,7 +1,8 @@ on: push: branches: - - main + - "*" + - jorrit/feat-bloomfilter-queues pull_request: branches: - "*" diff --git a/.github/workflows/prover-test.yml b/.github/workflows/prover-test.yml index 852152d8c5..1602e4831d 100644 --- a/.github/workflows/prover-test.yml +++ b/.github/workflows/prover-test.yml @@ -2,7 +2,7 @@ name: Test gnark prover on: push: branches: - - main + - "*" - 'release/**' paths: - "light-prover/**" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5e4f2934c2..212f7abefe 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -3,7 +3,7 @@ name: Release on: push: branches: - - main + - "*" jobs: release: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2abfc1973f..f14f9db81e 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,7 +1,7 @@ on: push: branches: - - main + - "*" paths: - ".cargo/**" - "**/*.rs" diff --git a/Cargo.lock b/Cargo.lock index cc12cdf4b9..d4f0985330 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,17 +19,24 @@ dependencies = [ "aligned-sized", "anchor-lang", "bytemuck", + "light-bloom-filter", "light-bounded-vec", "light-concurrent-merkle-tree", "light-hash-set", "light-hasher", "light-heap", "light-indexed-merkle-tree", + "light-merkle-tree-reference", + "light-prover-client", "light-utils", + "light-verifier", "num-bigint 0.4.6", "num-traits", + "rand 0.8.5", + "serial_test", "solana-sdk", "solana-security-txt", + "tokio", ] [[package]] @@ -59,6 +66,7 @@ dependencies = [ "rand 0.8.5", "reqwest 0.11.27", "serde_json", + "serial_test", "solana-cli-output", "solana-program-test", "solana-sdk", @@ -960,6 +968,18 @@ dependencies = [ "typenum", ] +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + [[package]] name = "blake2" version = "0.10.6" @@ -1491,6 +1511,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "reqwest 0.11.27", + "serial_test", "solana-program-test", "solana-sdk", "spl-token", @@ -2216,6 +2237,12 @@ dependencies = [ "ieee754", ] +[[package]] +name = "fastmurmur3" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d7e9bc68be4cdabbb8938140b01a8b5bc1191937f2c7e7ecc2fcebbe2d749df" + [[package]] name = "fastrand" version = "2.1.1" @@ -2419,6 +2446,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333" +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + [[package]] name = "futures" version = "0.1.31" @@ -3484,6 +3517,20 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "light-bloom-filter" +version = "0.1.0" +dependencies = [ + "bitvec", + "fastmurmur3", + "light-utils", + "num-bigint 0.4.6", + "num-traits", + "rand 0.8.5", + "solana-program", + "thiserror", +] + [[package]] name = "light-bounded-vec" version = "1.1.0" @@ -5110,6 +5157,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + [[package]] name = "rand" version = "0.7.3" @@ -5332,6 +5385,7 @@ dependencies = [ "num-traits", "reqwest 0.11.27", "serde_json", + "serial_test", "solana-cli-output", "solana-program-test", "solana-sdk", @@ -8102,6 +8156,7 @@ dependencies = [ "quote", "reqwest 0.11.27", "serde_json", + "serial_test", "solana-cli-output", "solana-program-test", "solana-sdk", @@ -8133,6 +8188,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + [[package]] name = "tar" version = "0.4.41" @@ -9461,6 +9522,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + [[package]] name = "x509-parser" version = "0.14.0" diff --git a/Cargo.toml b/Cargo.toml index 03b788d30b..a3cac9a39e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,6 +102,9 @@ num-traits = "0.2.19" # HTTP client reqwest = "0.11.26" +# Testing +serial_test = "3.1.1" + [patch.crates-io] "solana-account-decoder" = { git = "https://github.com/lightprotocol/agave", branch = "v1.18.22-enforce-cpi-tracking" } "solana-accounts-db" = { git = "https://github.com/lightprotocol/agave", branch = "v1.18.22-enforce-cpi-tracking" } diff --git a/circuit-lib/light-prover-client/Cargo.toml b/circuit-lib/light-prover-client/Cargo.toml index eb43beb923..dfbf69f2ea 100644 --- a/circuit-lib/light-prover-client/Cargo.toml +++ b/circuit-lib/light-prover-client/Cargo.toml @@ -53,4 +53,4 @@ sysinfo = "0.31" borsh = ">=0.9, <0.11" [dev-dependencies] duct = "0.13.7" -serial_test = "3.1.1" \ No newline at end of file +serial_test = "3.1.1" diff --git a/circuit-lib/light-prover-client/src/batch_append_with_subtrees.rs b/circuit-lib/light-prover-client/src/batch_append_with_subtrees.rs index 99cc298264..551d14e1c5 100644 --- a/circuit-lib/light-prover-client/src/batch_append_with_subtrees.rs +++ b/circuit-lib/light-prover-client/src/batch_append_with_subtrees.rs @@ -47,7 +47,7 @@ pub fn get_batch_append_with_subtrees_inputs( let mut bigint_leaves = vec![]; let old_subtrees = sub_trees; let old_subtree_hashchain = calculate_hash_chain(&old_subtrees); - let mut merkle_tree = SparseMerkleTree::::new(sub_trees); + let mut merkle_tree = SparseMerkleTree::::new(sub_trees, next_index); let start_index = bigint_to_be_bytes_array::<32>(&BigUint::from_usize(next_index).unwrap()).unwrap(); for leaf in leaves.iter() { diff --git a/circuit-lib/light-prover-client/src/gnark/helpers.rs b/circuit-lib/light-prover-client/src/gnark/helpers.rs index 1071ba1459..d000cc913e 100644 --- a/circuit-lib/light-prover-client/src/gnark/helpers.rs +++ b/circuit-lib/light-prover-client/src/gnark/helpers.rs @@ -45,7 +45,8 @@ pub enum ProofType { Inclusion, NonInclusion, Combined, - BatchAppend, + BatchAppendWithSubtrees, + BatchAppendWithProofs, BatchUpdate, BatchAppendWithSubtreesTest, BatchUpdateTest, @@ -61,7 +62,8 @@ impl Display for ProofType { ProofType::Inclusion => "inclusion", ProofType::NonInclusion => "non-inclusion", ProofType::Combined => "combined", - ProofType::BatchAppend => "append", + ProofType::BatchAppendWithSubtrees => "append-with-subtrees", + ProofType::BatchAppendWithProofs => "append-with-proofs", ProofType::BatchUpdate => "update", ProofType::BatchAppendWithSubtreesTest => "append-with-subtrees-test", ProofType::BatchUpdateTest => "update-test", @@ -113,7 +115,7 @@ pub async fn spawn_prover(restart: bool, config: ProverConfig) { let _ = command.spawn().expect("Failed to start prover process"); - let health_result = health_check(20, 5).await; + let health_result = health_check(20, 30).await; if health_result { info!("Prover started successfully"); } else { diff --git a/circuit-lib/light-prover-client/src/lib.rs b/circuit-lib/light-prover-client/src/lib.rs index 1f376ed857..ea74d2745f 100644 --- a/circuit-lib/light-prover-client/src/lib.rs +++ b/circuit-lib/light-prover-client/src/lib.rs @@ -9,5 +9,6 @@ pub mod groth16_solana_verifier; pub mod helpers; pub mod inclusion; pub mod init_merkle_tree; +pub mod mock_batched_forester; pub mod non_inclusion; pub mod prove_utils; diff --git a/circuit-lib/light-prover-client/src/mock_batched_forester.rs b/circuit-lib/light-prover-client/src/mock_batched_forester.rs index 26b76c70cd..917508cf01 100644 --- a/circuit-lib/light-prover-client/src/mock_batched_forester.rs +++ b/circuit-lib/light-prover-client/src/mock_batched_forester.rs @@ -6,21 +6,21 @@ use light_utils::bigint::bigint_to_be_bytes_array; use reqwest::Client; use crate::{ - batch_append::calculate_hash_chain, - batch_append_2::get_batch_append2_inputs, + batch_append_with_proofs::get_batch_append_with_proofs_inputs, + batch_append_with_subtrees::calculate_hash_chain, batch_update::get_batch_update_inputs, gnark::{ - batch_append_2_json_formatter::BatchAppend2ProofInputsJson, + batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, batch_update_json_formatter::update_inputs_string, constants::{PROVE_PATH, SERVER_ADDRESS}, proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, }, }; -// TODO: rename to MockBatchedForester +#[derive(Clone, Debug)] pub struct MockBatchedForester { pub merkle_tree: MerkleTree, - pub input_queue_leaves: Vec<[u8; 32]>, + pub input_queue_leaves: Vec<([u8; 32], usize)>, /// Indices of leaves which in merkle tree which are active. pub output_queue_leaves: Vec<[u8; 32]>, pub active_leaves: Vec<[u8; 32]>, @@ -52,26 +52,38 @@ impl MockBatchedForester { pub async fn get_batched_append_proof( &mut self, account_next_index: usize, - leaves: Vec<[u8; 32]>, num_zkp_updates: u32, batch_size: u32, + leaves_hashchain: [u8; 32], + max_num_zkp_updates: u32, ) -> Result<(CompressedProof, [u8; 32]), Error> { + let leaves = self.output_queue_leaves.to_vec(); let start = num_zkp_updates as usize * batch_size as usize; let end = start + batch_size as usize; let leaves = leaves[start..end].to_vec(); - // let sub_trees = self.merkle_tree.get_subtrees().try_into().unwrap(); + // if batch is complete, remove leaves from mock output queue + if num_zkp_updates == max_num_zkp_updates - 1 { + for _ in 0..max_num_zkp_updates * batch_size { + self.output_queue_leaves.remove(0); + } + } let local_leaves_hashchain = calculate_hash_chain(&leaves); + assert_eq!(leaves_hashchain, local_leaves_hashchain); let old_root = self.merkle_tree.root(); - let start_index = self.merkle_tree.get_next_index().saturating_sub(1); let mut old_leaves = vec![]; let mut merkle_proofs = vec![]; for i in account_next_index..account_next_index + batch_size as usize { - if account_next_index > i { - } else { - self.merkle_tree.append(&[0u8; 32]).unwrap(); + match self.merkle_tree.get_leaf(i) { + Ok(leaf) => { + old_leaves.push(leaf); + } + Err(_) => { + old_leaves.push([0u8; 32]); + if i <= self.merkle_tree.get_next_index() { + self.merkle_tree.append(&[0u8; 32]).unwrap(); + } + } } - let old_leaf = self.merkle_tree.get_leaf(i).unwrap(); - old_leaves.push(old_leaf); let proof = self.merkle_tree.get_proof_of_leaf(i, true).unwrap(); merkle_proofs.push(proof.to_vec()); } @@ -80,12 +92,12 @@ impl MockBatchedForester { for (i, leaf) in leaves.iter().enumerate() { if old_leaves[i] == [0u8; 32] { let index = account_next_index + i; - self.merkle_tree.update(&leaf, index).unwrap(); + self.merkle_tree.update(leaf, index).unwrap(); } } - let circuit_inputs = get_batch_append2_inputs::( + let circuit_inputs = get_batch_append_with_proofs_inputs::( old_root, - start_index as u32, + account_next_index as u32, leaves, local_leaves_hashchain, old_leaves, @@ -97,7 +109,7 @@ impl MockBatchedForester { self.merkle_tree.root() ); let client = Client::new(); - let inputs_json = BatchAppend2ProofInputsJson::from_inputs(&circuit_inputs).to_string(); + let inputs_json = BatchAppendWithProofsInputsJson::from_inputs(&circuit_inputs).to_string(); let response_result = client .post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) @@ -136,15 +148,17 @@ impl MockBatchedForester { let mut nullifiers = Vec::new(); let mut tx_hashes = Vec::new(); let mut old_leaves = Vec::new(); - for leaf in leaves.iter() { - let index = self.merkle_tree.get_leaf_index(leaf).unwrap(); - if self.merkle_tree.get_next_index() <= index { + for (leaf, index) in leaves.iter() { + let index = *index; + // + 2 because next index is + 1 and we need to init the leaf in + // pos[index] + if self.merkle_tree.get_next_index() < index + 2 { old_leaves.push([0u8; 32]); } else { - old_leaves.push(leaf.clone()); + old_leaves.push(*leaf); } // Handle case that we nullify a leaf which has not been inserted yet. - while self.merkle_tree.get_next_index() <= index { + while self.merkle_tree.get_next_index() < index + 2 { self.merkle_tree.append(&[0u8; 32]).unwrap(); } let proof = self.merkle_tree.get_proof_of_leaf(index, true).unwrap(); @@ -158,24 +172,17 @@ impl MockBatchedForester { .expect("No event for leaf found."); let index_bytes = index.to_be_bytes(); let nullifier = Poseidon::hashv(&[leaf, &index_bytes, &event.tx_hash]).unwrap(); - println!("leaf: {:?}", leaf); - println!("index: {:?}", index); - println!("index_bytes: {:?}", index_bytes); - println!("tx_hash: {:?}", event.tx_hash); - println!("nullifier: {:?}", nullifier); tx_hashes.push(event.tx_hash); nullifiers.push(nullifier); - self.merkle_tree.update(&nullifier, index).unwrap(); } // local_leaves_hashchain is only used for a test assertion. let local_nullifier_hashchain = calculate_hash_chain(&nullifiers); assert_eq!(leaves_hashchain, local_nullifier_hashchain); - // TODO: adapt update circuit to allow for non-zero updates let inputs = get_batch_update_inputs::( old_root, tx_hashes, - leaves, + leaves.iter().map(|(leaf, _)| *leaf).collect(), leaves_hashchain, old_leaves, merkle_proofs, diff --git a/circuit-lib/light-prover-client/tests/gnark.rs b/circuit-lib/light-prover-client/tests/gnark.rs index 6c40cfe1ff..c085193b14 100644 --- a/circuit-lib/light-prover-client/tests/gnark.rs +++ b/circuit-lib/light-prover-client/tests/gnark.rs @@ -259,7 +259,7 @@ async fn prove_batch_two_append() { let mut merkle_proofs = vec![]; for index in current_index..current_index + num_insertions { let proof = merkle_tree.get_proof_of_leaf(index, true).unwrap(); - let leaf = merkle_tree.get_leaf(index); + let leaf = merkle_tree.leaf(index); old_leaves.push(leaf); merkle_proofs.push(proof.to_vec()); } diff --git a/circuit-lib/verifier/src/lib.rs b/circuit-lib/verifier/src/lib.rs index 29ee2b43d3..e56bfa030e 100644 --- a/circuit-lib/verifier/src/lib.rs +++ b/circuit-lib/verifier/src/lib.rs @@ -18,6 +18,8 @@ pub enum VerifierError { CreateGroth16VerifierFailed, #[error("ProofVerificationFailed")] ProofVerificationFailed, + #[error("InvalidBatchSize supported batch sizes are 1, 10, 100, 500, 1000")] + InvalidBatchSize, } #[cfg(feature = "solana")] @@ -30,6 +32,7 @@ impl From for u32 { VerifierError::InvalidPublicInputsLength => 13004, VerifierError::CreateGroth16VerifierFailed => 13005, VerifierError::ProofVerificationFailed => 13006, + VerifierError::InvalidBatchSize => 13007, } } } @@ -42,7 +45,7 @@ impl From for solana_program::program_error::ProgramError { } use VerifierError::*; -#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub struct CompressedProof { pub a: [u8; 32], pub b: [u8; 64], @@ -245,7 +248,7 @@ fn verify( } #[inline(never)] -pub fn verify_batch_append( +pub fn verify_batch_append_with_subtrees( batch_size: usize, public_input_hash: [u8; 32], compressed_proof: &CompressedProof, @@ -281,7 +284,7 @@ pub fn verify_batch_append( } #[inline(never)] -pub fn verify_batch_append2( +pub fn verify_batch_append_with_proofs( batch_size: usize, public_input_hash: [u8; 32], compressed_proof: &CompressedProof, diff --git a/cli/src/utils/process.ts b/cli/src/utils/process.ts index 22490f2313..996601531e 100644 --- a/cli/src/utils/process.ts +++ b/cli/src/utils/process.ts @@ -198,7 +198,7 @@ export async function waitForServers( ({ port, path }) => `http-get://127.0.0.1:${port}${path}`, ), delay: 1000, - timeout: 25000, + timeout: 360_000, interval: 300, simultaneous: 2, validateStatus: function (status: number) { diff --git a/client/src/indexer/test_indexer.rs b/client/src/indexer/test_indexer.rs index 3ddd8a3da2..94a7565723 100644 --- a/client/src/indexer/test_indexer.rs +++ b/client/src/indexer/test_indexer.rs @@ -288,6 +288,7 @@ where let proof_json = deserialize_gnark_proof_json(&body).unwrap(); let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); + let root_indices = root_indices.iter().map(|x| Some(*x)).collect(); return ProofRpcResult { root_indices, address_root_indices, diff --git a/client/src/transaction_params.rs b/client/src/transaction_params.rs index 694f8c3d70..350fdaf1fc 100644 --- a/client/src/transaction_params.rs +++ b/client/src/transaction_params.rs @@ -34,3 +34,16 @@ impl Default for FeeConfig { } } } + +impl FeeConfig { + pub fn test_batched() -> Self { + Self { + // rollover fee plus additonal lamports for the cpi account + state_merkle_tree_rollover: 8, + address_queue_rollover: 392, // not batched + network_fee: 5000, + address_network_fee: 5000, + solana_network_fee: 5000, + } + } +} diff --git a/examples/name-service/programs/name-service/tests/test.rs b/examples/name-service/programs/name-service/tests/test.rs index a597fe6766..568b3f50da 100644 --- a/examples/name-service/programs/name-service/tests/test.rs +++ b/examples/name-service/programs/name-service/tests/test.rs @@ -394,7 +394,7 @@ where inputs, proof: rpc_result.proof, merkle_context, - merkle_tree_root_index: rpc_result.root_indices[0], + merkle_tree_root_index: rpc_result.root_indices[0].unwrap(), address_merkle_context: *address_merkle_context, address_merkle_tree_root_index: 0, new_rdata: new_rdata.clone(), @@ -471,7 +471,7 @@ where inputs, proof: rpc_result.proof, merkle_context, - merkle_tree_root_index: rpc_result.root_indices[0], + merkle_tree_root_index: rpc_result.root_indices[0].unwrap(), address_merkle_context: *address_merkle_context, address_merkle_tree_root_index: 0, }; diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs index e3d8b90c48..3d4cbd9d7d 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_compressed_pda/sdk.rs @@ -25,7 +25,7 @@ pub struct CreateCompressedPdaEscrowInstructionInputs<'a> { pub input_merkle_context: &'a [MerkleContext], pub output_compressed_account_merkle_tree_pubkeys: &'a [Pubkey], pub output_compressed_accounts: &'a [TokenTransferOutputData], - pub root_indices: &'a [u16], + pub root_indices: &'a [Option], pub proof: &'a Option, pub input_token_data: &'a [light_compressed_token::token_data::TokenData], pub input_compressed_accounts: &'a [CompressedAccount], @@ -137,7 +137,7 @@ pub struct CreateCompressedPdaWithdrawalInstructionInputs<'a> { pub input_cpda_merkle_context: MerkleContext, pub output_compressed_account_merkle_tree_pubkeys: &'a [Pubkey], pub output_compressed_accounts: &'a [TokenTransferOutputData], - pub root_indices: &'a [u16], + pub root_indices: &'a [Option], pub proof: &'a Option, pub input_token_data: &'a [light_compressed_token::token_data::TokenData], pub input_compressed_accounts: &'a [CompressedAccount], @@ -201,7 +201,7 @@ pub fn create_withdrawal_instruction( new_lock_up_time: input_params.new_lock_up_time, address: input_params.address, merkle_context: merkle_context_packed[0], - root_index: input_params.root_indices[0], + root_index: input_params.root_indices[0].unwrap_or_default(), }; let instruction_data = crate::instruction::WithdrawCompressedTokensWithCompressedPda { proof: input_params.proof.clone().unwrap(), diff --git a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs index 5593ddc8c8..d91aab031c 100644 --- a/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs +++ b/examples/token-escrow/programs/token-escrow/src/escrow_with_pda/sdk.rs @@ -29,7 +29,7 @@ pub struct CreateEscrowInstructionInputs<'a> { pub input_merkle_context: &'a [MerkleContext], pub output_compressed_account_merkle_tree_pubkeys: &'a [Pubkey], pub output_compressed_accounts: &'a [TokenTransferOutputData], - pub root_indices: &'a [u16], + pub root_indices: &'a [Option], pub proof: &'a Option, pub input_token_data: &'a [light_compressed_token::token_data::TokenData], pub input_compressed_accounts: &'a [CompressedAccount], diff --git a/examples/token-escrow/programs/token-escrow/tests/test.rs b/examples/token-escrow/programs/token-escrow/tests/test.rs index 9ccd2f23d5..77d6b7dac3 100644 --- a/examples/token-escrow/programs/token-escrow/tests/test.rs +++ b/examples/token-escrow/programs/token-escrow/tests/test.rs @@ -237,10 +237,12 @@ pub async fn perform_escrow( let rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&[input_compressed_account_hash]), - Some(&[compressed_input_account_with_context - .merkle_context - .merkle_tree_pubkey]), + Some(vec![input_compressed_account_hash]), + Some(vec![ + compressed_input_account_with_context + .merkle_context + .merkle_tree_pubkey, + ]), None, None, rpc, @@ -301,7 +303,8 @@ pub async fn perform_escrow_with_event( ) .await? .unwrap(); - test_indexer.add_compressed_accounts_with_token_data(&event.0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.0); Ok(()) } @@ -395,10 +398,12 @@ pub async fn perform_withdrawal( let rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&[input_compressed_account_hash]), - Some(&[compressed_input_account_with_context - .merkle_context - .merkle_tree_pubkey]), + Some(vec![input_compressed_account_hash]), + Some(vec![ + compressed_input_account_with_context + .merkle_context + .merkle_tree_pubkey, + ]), None, None, context, @@ -457,7 +462,8 @@ pub async fn perform_withdrawal_with_event( ) .await? .unwrap(); - test_indexer.add_compressed_accounts_with_token_data(&event.0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.0); Ok(()) } diff --git a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs index 7694b9f6a4..7fa24bf48b 100644 --- a/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs +++ b/examples/token-escrow/programs/token-escrow/tests/test_compressed_pda.rs @@ -52,7 +52,7 @@ async fn test_escrow_with_compressed_pda() { ); let mint = create_mint_helper(&mut rpc, &payer).await; let mut test_indexer = test_indexer.await; - + test_indexer.state_merkle_trees.remove(1); let amount = 10000u64; mint_tokens_helper( &mut rpc, @@ -203,7 +203,8 @@ pub async fn perform_escrow_with_event( }), ) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } @@ -236,10 +237,12 @@ async fn create_escrow_ix( let rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&[input_compressed_account_hash]), - Some(&[compressed_input_account_with_context - .merkle_context - .merkle_tree_pubkey]), + Some(vec![input_compressed_account_hash]), + Some(vec![ + compressed_input_account_with_context + .merkle_context + .merkle_tree_pubkey, + ]), Some(&[address]), Some(vec![env.address_merkle_tree_pubkey]), context, @@ -371,7 +374,8 @@ pub async fn perform_withdrawal_with_event( None, ) .await?; - test_indexer.add_compressed_accounts_with_token_data(&event.unwrap().0); + let slot = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.unwrap().0); Ok(()) } @@ -450,8 +454,8 @@ pub async fn perform_withdrawal( // the compressed pda program executes the transaction let rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&[compressed_pda_hash, token_escrow_account_hash]), - Some(&[ + Some(vec![compressed_pda_hash, token_escrow_account_hash]), + Some(vec![ compressed_escrow_pda.merkle_context.merkle_tree_pubkey, token_escrow_account.merkle_context.merkle_tree_pubkey, ]), diff --git a/forester-utils/src/address_merkle_tree_config.rs b/forester-utils/src/address_merkle_tree_config.rs index 10ccaf1fa1..d7b0c08145 100644 --- a/forester-utils/src/address_merkle_tree_config.rs +++ b/forester-utils/src/address_merkle_tree_config.rs @@ -4,9 +4,11 @@ use crate::{ AccountZeroCopy, }; use account_compression::{ - AddressMerkleTreeAccount, AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, - QueueAccount, StateMerkleTreeAccount, StateMerkleTreeConfig, + batched_merkle_tree::BatchedMerkleTreeAccount, AddressMerkleTreeAccount, + AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, QueueAccount, + StateMerkleTreeAccount, StateMerkleTreeConfig, }; +use anchor_lang::Discriminator; use light_client::rpc::RpcConnection; use light_hasher::Poseidon; use num_traits::Zero; @@ -148,21 +150,42 @@ pub async fn state_tree_ready_for_rollover( rpc: &mut R, merkle_tree: Pubkey, ) -> bool { - let account = AccountZeroCopy::::new(rpc, merkle_tree).await; + let account = rpc.get_account(merkle_tree).await.unwrap().unwrap(); let rent_exemption = rpc - .get_minimum_balance_for_rent_exemption(account.account.data.len()) + .get_minimum_balance_for_rent_exemption(account.data.len()) .await .unwrap(); - let tree_meta_data = account.deserialized().metadata; - let tree = - get_concurrent_merkle_tree::(rpc, merkle_tree) + let discriminator = account.data[0..8].try_into().unwrap(); + let (next_index, tree_meta_data, height) = match discriminator { + StateMerkleTreeAccount::DISCRIMINATOR => { + let account = AccountZeroCopy::::new(rpc, merkle_tree).await; + + let tree_meta_data = account.deserialized().metadata; + let tree = get_concurrent_merkle_tree::( + rpc, + merkle_tree, + ) .await; + (tree.next_index(), tree_meta_data, 26) + } + BatchedMerkleTreeAccount::DISCRIMINATOR => { + let account = AccountZeroCopy::::new(rpc, merkle_tree).await; + + let tree_meta_data = account.deserialized(); + ( + tree_meta_data.next_index as usize, + tree_meta_data.metadata, + tree_meta_data.height, + ) + } + _ => panic!("Invalid discriminator"), + }; // rollover threshold is reached - tree.next_index() - >= ((1 << tree.height) * tree_meta_data.rollover_metadata.rollover_threshold / 100) as usize + + next_index>= ((1 << height) * tree_meta_data.rollover_metadata.rollover_threshold / 100) as usize // hash sufficient funds for rollover - && account.account.lamports >= rent_exemption * 2 + && account.lamports >= rent_exemption * 2 // has not been rolled over && tree_meta_data.rollover_metadata.rolledover_slot == u64::MAX } diff --git a/forester-utils/src/forester_epoch.rs b/forester-utils/src/forester_epoch.rs index c50c489d5a..646e9a2372 100644 --- a/forester-utils/src/forester_epoch.rs +++ b/forester-utils/src/forester_epoch.rs @@ -83,6 +83,7 @@ impl TreeAccounts { pub enum TreeType { Address, State, + BatchedState, } impl Display for TreeType { @@ -90,6 +91,7 @@ impl Display for TreeType { match self { TreeType::Address => write!(f, "address"), TreeType::State => write!(f, "state"), + TreeType::BatchedState => write!(f, "batched state"), } } } diff --git a/forester-utils/src/indexer/mod.rs b/forester-utils/src/indexer/mod.rs index f69ee5dbbe..c1659a0a1f 100644 --- a/forester-utils/src/indexer/mod.rs +++ b/forester-utils/src/indexer/mod.rs @@ -24,10 +24,18 @@ pub struct TokenDataWithContext { pub compressed_account: CompressedAccountWithMerkleContext, } -#[derive(Debug)] +#[derive(Debug, Default)] +pub struct BatchedTreeProofRpcResult { + pub proof: Option, + // If none -> proof by index, else included in zkp + pub root_indices: Vec>, + pub address_root_indices: Vec, +} + +#[derive(Debug, Default)] pub struct ProofRpcResult { pub proof: CompressedProof, - pub root_indices: Vec, + pub root_indices: Vec>, pub address_root_indices: Vec, } @@ -49,6 +57,10 @@ pub struct StateMerkleTreeBundle { pub rollover_fee: i64, pub merkle_tree: Box>, pub accounts: StateMerkleTreeAccounts, + pub version: u64, + pub output_queue_elements: Vec<[u8; 32]>, + /// leaf index, leaf, tx hash + pub input_leaf_indices: Vec<(u32, [u8; 32], [u8; 32])>, } #[derive(Debug, Clone)] @@ -93,6 +105,7 @@ pub trait Indexer: Sync + Send + Debug + 'static { fn add_event_and_compressed_accounts( &mut self, + _slot: u64, _event: &PublicTransactionEvent, ) -> ( Vec, @@ -132,8 +145,8 @@ pub trait Indexer: Sync + Send + Debug + 'static { #[allow(async_fn_in_trait)] async fn create_proof_for_compressed_accounts( &mut self, - _compressed_accounts: Option<&[[u8; 32]]>, - _state_merkle_tree_pubkeys: Option<&[Pubkey]>, + _compressed_accounts: Option>, + _state_merkle_tree_pubkeys: Option>, _new_addresses: Option<&[[u8; 32]]>, _address_merkle_tree_pubkeys: Option>, _rpc: &mut R, @@ -141,6 +154,18 @@ pub trait Indexer: Sync + Send + Debug + 'static { unimplemented!() } + #[allow(async_fn_in_trait)] + async fn create_proof_for_compressed_accounts2( + &mut self, + _compressed_accounts: Option>, + _state_merkle_tree_pubkeys: Option>, + _new_addresses: Option<&[[u8; 32]]>, + _address_merkle_tree_pubkeys: Option>, + _rpc: &mut R, + ) -> BatchedTreeProofRpcResult { + unimplemented!() + } + fn add_address_merkle_tree_accounts( &mut self, _merkle_tree_keypair: &Keypair, diff --git a/forester/src/epoch_manager.rs b/forester/src/epoch_manager.rs index d09dc03b95..9aa10b9c56 100644 --- a/forester/src/epoch_manager.rs +++ b/forester/src/epoch_manager.rs @@ -1041,6 +1041,10 @@ impl> EpochManager { ) .await } + _ => panic!( + "perform rollover: Invalid tree type {:?}", + tree_account.tree_type + ), }; match result { diff --git a/forester/src/forester_status.rs b/forester/src/forester_status.rs index 3d62790f2b..2a51f12292 100644 --- a/forester/src/forester_status.rs +++ b/forester/src/forester_status.rs @@ -162,6 +162,10 @@ pub async fn fetch_forester_status(args: &StatusArgs) { match tree.tree_type { TreeType::State => "State", TreeType::Address => "Address", + _ => panic!( + "is_tree_ready_for_rollover: Invalid tree type {:?}", + tree.tree_type + ), } ); let tree_info = get_tree_fullness(&mut rpc, tree.merkle_tree, tree.tree_type) diff --git a/forester/src/rollover/operations.rs b/forester/src/rollover/operations.rs index 4f14d067b9..b24c3edcfa 100644 --- a/forester/src/rollover/operations.rs +++ b/forester/src/rollover/operations.rs @@ -110,6 +110,7 @@ pub async fn get_tree_fullness( threshold, }) } + _ => panic!("get tree fullness: Invalid tree type {:?}", tree_type), } } @@ -134,6 +135,10 @@ pub async fn is_tree_ready_for_rollover( .await? .unwrap(), ), + _ => panic!( + "is_tree_ready_for_rollover: Invalid tree type {:?}", + tree_type + ), }; let is_already_rolled_over = match &account { @@ -154,6 +159,10 @@ pub async fn is_tree_ready_for_rollover( TreeType::Address => { Ok(tree_info.next_index >= tree_info.threshold && tree_info.next_index > 3) } + _ => panic!( + "is_tree_ready_for_rollover: Invalid tree type {:?}", + tree_type + ), } } @@ -193,6 +202,9 @@ pub async fn rollover_state_merkle_tree>( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )), + version: 1, + input_leaf_indices: vec![], + output_queue_elements: vec![], }; indexer.lock().await.add_state_bundle(state_bundle); Ok(()) diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index 18030a9d0b..4fe027dbab 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -113,6 +113,8 @@ async fn test_epoch_monitor_with_test_indexer_and_1_forester() { Some(0), ) .await; + // removing batched Merkle tree + env.indexer.state_merkle_trees.remove(1); let user_index = 0; let balance = env @@ -370,7 +372,8 @@ async fn test_epoch_monitor_with_2_foresters() { Some(0), ) .await; - + // removing batched Merkle tree + env.indexer.state_merkle_trees.remove(1); let user_index = 0; let balance = env .rpc @@ -669,9 +672,9 @@ async fn test_epoch_double_registration() { let config = Arc::new(config); - let indexer: TestIndexer = + let mut indexer: TestIndexer = TestIndexer::init_from_env(&config.payer_keypair, &env_accounts, None).await; - + indexer.state_merkle_trees.remove(1); let indexer = Arc::new(Mutex::new(indexer)); for _ in 0..10 { diff --git a/js/compressed-token/src/idl/light_compressed_token.ts b/js/compressed-token/src/idl/light_compressed_token.ts index c9fcbd8c9b..4384cefa52 100644 --- a/js/compressed-token/src/idl/light_compressed_token.ts +++ b/js/compressed-token/src/idl/light_compressed_token.ts @@ -736,38 +736,6 @@ export type LightCompressedToken = { }, ]; types: [ - { - name: 'AccessMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'owner'; - docs: ['Owner of the Merkle tree.']; - type: 'publicKey'; - }, - { - name: 'programOwner'; - docs: [ - 'Program owner of the Merkle tree. This will be used for program owned Merkle trees.', - ]; - type: 'publicKey'; - }, - { - name: 'forester'; - docs: [ - 'Optional privileged forester pubkey, can be set for custom Merkle trees', - 'without a network fee. Merkle trees without network fees are not', - 'forested by light foresters. The variable is not used in the account', - 'compression program but the registry program. The registry program', - 'implements access control to prevent contention during forester. The', - 'forester pubkey specified in this struct can bypass contention checks.', - ]; - type: 'publicKey'; - }, - ]; - }; - }, { name: 'AccountState'; type: { @@ -1158,34 +1126,6 @@ export type LightCompressedToken = { ]; }; }, - { - name: 'MerkleTreeMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'accessMetadata'; - type: { - defined: 'AccessMetadata'; - }; - }, - { - name: 'rolloverMetadata'; - type: { - defined: 'RolloverMetadata'; - }; - }, - { - name: 'associatedQueue'; - type: 'publicKey'; - }, - { - name: 'nextMerkleTree'; - type: 'publicKey'; - }, - ]; - }; - }, { name: 'MerkleTreeSequenceNumber'; type: { @@ -1439,63 +1379,6 @@ export type LightCompressedToken = { ]; }; }, - { - name: 'RolloverMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'index'; - docs: ['Unique index.']; - type: 'u64'; - }, - { - name: 'rolloverFee'; - docs: [ - 'This fee is used for rent for the next account.', - 'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over', - ]; - type: 'u64'; - }, - { - name: 'rolloverThreshold'; - docs: [ - 'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).', - ]; - type: 'u64'; - }, - { - name: 'networkFee'; - docs: ['Tip for maintaining the account.']; - type: 'u64'; - }, - { - name: 'rolledoverSlot'; - docs: [ - 'The slot when the account was rolled over, a rolled over account should not be written to.', - ]; - type: 'u64'; - }, - { - name: 'closeThreshold'; - docs: [ - 'If current slot is greater than rolledover_slot + close_threshold and', - "the account is empty it can be closed. No 'close' functionality has been", - 'implemented yet.', - ]; - type: 'u64'; - }, - { - name: 'additionalBytes'; - docs: [ - 'Placeholder for bytes of additional accounts which are tied to the', - 'Merkle trees operation and need to be rolled over as well.', - ]; - type: 'u64'; - }, - ]; - }; - }, { name: 'TokenData'; type: { @@ -1549,128 +1432,23 @@ export type LightCompressedToken = { errors: [ { code: 6000; - name: 'PublicKeyAmountMissmatch'; - msg: 'public keys and amounts must be of same length'; + name: 'SignerCheckFailed'; + msg: 'Signer check failed'; }, { code: 6001; - name: 'ComputeInputSumFailed'; - msg: 'ComputeInputSumFailed'; + name: 'CreateTransferInstructionFailed'; + msg: 'Create transfer instruction failed'; }, { code: 6002; - name: 'ComputeOutputSumFailed'; - msg: 'ComputeOutputSumFailed'; + name: 'AccountNotFound'; + msg: 'Account not found'; }, { code: 6003; - name: 'ComputeCompressSumFailed'; - msg: 'ComputeCompressSumFailed'; - }, - { - code: 6004; - name: 'ComputeDecompressSumFailed'; - msg: 'ComputeDecompressSumFailed'; - }, - { - code: 6005; - name: 'SumCheckFailed'; - msg: 'SumCheckFailed'; - }, - { - code: 6006; - name: 'DecompressRecipientUndefinedForDecompress'; - msg: 'DecompressRecipientUndefinedForDecompress'; - }, - { - code: 6007; - name: 'CompressedPdaUndefinedForDecompress'; - msg: 'CompressedPdaUndefinedForDecompress'; - }, - { - code: 6008; - name: 'DeCompressAmountUndefinedForDecompress'; - msg: 'DeCompressAmountUndefinedForDecompress'; - }, - { - code: 6009; - name: 'CompressedPdaUndefinedForCompress'; - msg: 'CompressedPdaUndefinedForCompress'; - }, - { - code: 6010; - name: 'DeCompressAmountUndefinedForCompress'; - msg: 'DeCompressAmountUndefinedForCompress'; - }, - { - code: 6011; - name: 'DelegateSignerCheckFailed'; - msg: 'DelegateSignerCheckFailed'; - }, - { - code: 6012; - name: 'MintTooLarge'; - msg: 'Minted amount greater than u64::MAX'; - }, - { - code: 6013; - name: 'SplTokenSupplyMismatch'; - msg: 'SplTokenSupplyMismatch'; - }, - { - code: 6014; - name: 'HeapMemoryCheckFailed'; - msg: 'HeapMemoryCheckFailed'; - }, - { - code: 6015; - name: 'InstructionNotCallable'; - msg: 'The instruction is not callable'; - }, - { - code: 6016; - name: 'ArithmeticUnderflow'; - msg: 'ArithmeticUnderflow'; - }, - { - code: 6017; - name: 'HashToFieldError'; - msg: 'HashToFieldError'; - }, - { - code: 6018; - name: 'InvalidAuthorityMint'; - msg: 'Expected the authority to be also a mint authority'; - }, - { - code: 6019; - name: 'InvalidFreezeAuthority'; - msg: 'Provided authority is not the freeze authority'; - }, - { - code: 6020; - name: 'InvalidDelegateIndex'; - }, - { - code: 6021; - name: 'TokenPoolPdaUndefined'; - }, - { - code: 6022; - name: 'IsTokenPoolPda'; - msg: 'Compress or decompress recipient is the same account as the token pool pda.'; - }, - { - code: 6023; - name: 'InvalidTokenPoolPda'; - }, - { - code: 6024; - name: 'NoInputTokenAccountsProvided'; - }, - { - code: 6025; - name: 'NoInputsProvided'; + name: 'SerializationError'; + msg: 'Serialization error'; }, ]; }; @@ -2412,38 +2190,6 @@ export const IDL: LightCompressedToken = { }, ], types: [ - { - name: 'AccessMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'owner', - docs: ['Owner of the Merkle tree.'], - type: 'publicKey', - }, - { - name: 'programOwner', - docs: [ - 'Program owner of the Merkle tree. This will be used for program owned Merkle trees.', - ], - type: 'publicKey', - }, - { - name: 'forester', - docs: [ - 'Optional privileged forester pubkey, can be set for custom Merkle trees', - 'without a network fee. Merkle trees without network fees are not', - 'forested by light foresters. The variable is not used in the account', - 'compression program but the registry program. The registry program', - 'implements access control to prevent contention during forester. The', - 'forester pubkey specified in this struct can bypass contention checks.', - ], - type: 'publicKey', - }, - ], - }, - }, { name: 'AccountState', type: { @@ -2838,34 +2584,6 @@ export const IDL: LightCompressedToken = { ], }, }, - { - name: 'MerkleTreeMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'accessMetadata', - type: { - defined: 'AccessMetadata', - }, - }, - { - name: 'rolloverMetadata', - type: { - defined: 'RolloverMetadata', - }, - }, - { - name: 'associatedQueue', - type: 'publicKey', - }, - { - name: 'nextMerkleTree', - type: 'publicKey', - }, - ], - }, - }, { name: 'MerkleTreeSequenceNumber', type: { @@ -3120,63 +2838,6 @@ export const IDL: LightCompressedToken = { ], }, }, - { - name: 'RolloverMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'index', - docs: ['Unique index.'], - type: 'u64', - }, - { - name: 'rolloverFee', - docs: [ - 'This fee is used for rent for the next account.', - 'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over', - ], - type: 'u64', - }, - { - name: 'rolloverThreshold', - docs: [ - 'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).', - ], - type: 'u64', - }, - { - name: 'networkFee', - docs: ['Tip for maintaining the account.'], - type: 'u64', - }, - { - name: 'rolledoverSlot', - docs: [ - 'The slot when the account was rolled over, a rolled over account should not be written to.', - ], - type: 'u64', - }, - { - name: 'closeThreshold', - docs: [ - 'If current slot is greater than rolledover_slot + close_threshold and', - "the account is empty it can be closed. No 'close' functionality has been", - 'implemented yet.', - ], - type: 'u64', - }, - { - name: 'additionalBytes', - docs: [ - 'Placeholder for bytes of additional accounts which are tied to the', - 'Merkle trees operation and need to be rolled over as well.', - ], - type: 'u64', - }, - ], - }, - }, { name: 'TokenData', type: { @@ -3230,128 +2891,23 @@ export const IDL: LightCompressedToken = { errors: [ { code: 6000, - name: 'PublicKeyAmountMissmatch', - msg: 'public keys and amounts must be of same length', + name: 'SignerCheckFailed', + msg: 'Signer check failed', }, { code: 6001, - name: 'ComputeInputSumFailed', - msg: 'ComputeInputSumFailed', + name: 'CreateTransferInstructionFailed', + msg: 'Create transfer instruction failed', }, { code: 6002, - name: 'ComputeOutputSumFailed', - msg: 'ComputeOutputSumFailed', + name: 'AccountNotFound', + msg: 'Account not found', }, { code: 6003, - name: 'ComputeCompressSumFailed', - msg: 'ComputeCompressSumFailed', - }, - { - code: 6004, - name: 'ComputeDecompressSumFailed', - msg: 'ComputeDecompressSumFailed', - }, - { - code: 6005, - name: 'SumCheckFailed', - msg: 'SumCheckFailed', - }, - { - code: 6006, - name: 'DecompressRecipientUndefinedForDecompress', - msg: 'DecompressRecipientUndefinedForDecompress', - }, - { - code: 6007, - name: 'CompressedPdaUndefinedForDecompress', - msg: 'CompressedPdaUndefinedForDecompress', - }, - { - code: 6008, - name: 'DeCompressAmountUndefinedForDecompress', - msg: 'DeCompressAmountUndefinedForDecompress', - }, - { - code: 6009, - name: 'CompressedPdaUndefinedForCompress', - msg: 'CompressedPdaUndefinedForCompress', - }, - { - code: 6010, - name: 'DeCompressAmountUndefinedForCompress', - msg: 'DeCompressAmountUndefinedForCompress', - }, - { - code: 6011, - name: 'DelegateSignerCheckFailed', - msg: 'DelegateSignerCheckFailed', - }, - { - code: 6012, - name: 'MintTooLarge', - msg: 'Minted amount greater than u64::MAX', - }, - { - code: 6013, - name: 'SplTokenSupplyMismatch', - msg: 'SplTokenSupplyMismatch', - }, - { - code: 6014, - name: 'HeapMemoryCheckFailed', - msg: 'HeapMemoryCheckFailed', - }, - { - code: 6015, - name: 'InstructionNotCallable', - msg: 'The instruction is not callable', - }, - { - code: 6016, - name: 'ArithmeticUnderflow', - msg: 'ArithmeticUnderflow', - }, - { - code: 6017, - name: 'HashToFieldError', - msg: 'HashToFieldError', - }, - { - code: 6018, - name: 'InvalidAuthorityMint', - msg: 'Expected the authority to be also a mint authority', - }, - { - code: 6019, - name: 'InvalidFreezeAuthority', - msg: 'Provided authority is not the freeze authority', - }, - { - code: 6020, - name: 'InvalidDelegateIndex', - }, - { - code: 6021, - name: 'TokenPoolPdaUndefined', - }, - { - code: 6022, - name: 'IsTokenPoolPda', - msg: 'Compress or decompress recipient is the same account as the token pool pda.', - }, - { - code: 6023, - name: 'InvalidTokenPoolPda', - }, - { - code: 6024, - name: 'NoInputTokenAccountsProvided', - }, - { - code: 6025, - name: 'NoInputsProvided', + name: 'SerializationError', + msg: 'Serialization error', }, ], }; diff --git a/js/stateless.js/src/idls/account_compression.ts b/js/stateless.js/src/idls/account_compression.ts index f660807456..b9782a3a13 100644 --- a/js/stateless.js/src/idls/account_compression.ts +++ b/js/stateless.js/src/idls/account_compression.ts @@ -84,8 +84,62 @@ export type AccountCompression = { }; value: '[11 , 188 , 15 , 192 , 187 , 71 , 202 , 47 , 116 , 196 , 17 , 46 , 148 , 171 , 19 , 207 , 163 , 198 , 52 , 229 , 220 , 23 , 234 , 203 , 3 , 205 , 26 , 35 , 205 , 126 , 120 , 124 ,]'; }, + { + name: 'TEST_DEFAULT_BATCH_SIZE'; + type: 'u64'; + value: '50'; + }, + { + name: 'TEST_DEFAULT_ZKP_BATCH_SIZE'; + type: 'u64'; + value: '10'; + }, + { + name: 'DEFAULT_BATCH_SIZE'; + type: 'u64'; + value: '50000'; + }, + { + name: 'DEFAULT_ZKP_BATCH_SIZE'; + type: 'u64'; + value: '500'; + }, ]; instructions: [ + { + name: 'initializeBatchedStateMerkleTree'; + accounts: [ + { + name: 'authority'; + isMut: true; + isSigner: true; + }, + { + name: 'merkleTree'; + isMut: true; + isSigner: false; + }, + { + name: 'queue'; + isMut: true; + isSigner: false; + }, + { + name: 'registeredProgramPda'; + isMut: false; + isSigner: false; + isOptional: true; + }, + ]; + args: [ + { + name: 'params'; + type: { + defined: 'InitStateTreeAccountsInstructionData'; + }; + }, + ]; + }, { name: 'initializeAddressMerkleTreeAndQueue'; accounts: [ @@ -621,6 +675,28 @@ export type AccountCompression = { }; }; }, + { + name: 'leafIndices'; + type: { + vec: 'u32'; + }; + }, + { + name: 'txHash'; + type: { + option: { + array: ['u8', 32]; + }; + }; + }, + { + name: 'checkProofByIndex'; + type: { + option: { + vec: 'bool'; + }; + }; + }, ]; }, { @@ -668,6 +744,75 @@ export type AccountCompression = { ]; args: []; }, + { + name: 'batchNullify'; + accounts: [ + { + name: 'authority'; + isMut: false; + isSigner: true; + }, + { + name: 'registeredProgramPda'; + isMut: false; + isSigner: false; + isOptional: true; + }, + { + name: 'logWrapper'; + isMut: false; + isSigner: false; + }, + { + name: 'merkleTree'; + isMut: true; + isSigner: false; + }, + ]; + args: [ + { + name: 'data'; + type: 'bytes'; + }, + ]; + }, + { + name: 'batchAppend'; + accounts: [ + { + name: 'authority'; + isMut: false; + isSigner: true; + }, + { + name: 'registeredProgramPda'; + isMut: false; + isSigner: false; + isOptional: true; + }, + { + name: 'logWrapper'; + isMut: false; + isSigner: false; + }, + { + name: 'merkleTree'; + isMut: true; + isSigner: false; + }, + { + name: 'outputQueue'; + isMut: true; + isSigner: false; + }, + ]; + args: [ + { + name: 'data'; + type: 'bytes'; + }, + ]; + }, ]; accounts: [ { @@ -732,6 +877,149 @@ export type AccountCompression = { ]; }; }, + { + name: 'batchedMerkleTreeMetadata'; + type: { + kind: 'struct'; + fields: [ + { + name: 'accessMetadata'; + type: { + defined: 'AccessMetadata'; + }; + }, + { + name: 'rolloverMetadata'; + type: { + defined: 'RolloverMetadata'; + }; + }, + { + name: 'associatedOutputQueue'; + type: 'publicKey'; + }, + { + name: 'nextMerkleTree'; + type: 'publicKey'; + }, + { + name: 'treeType'; + type: 'u64'; + }, + ]; + }; + }, + { + name: 'batchedMerkleTreeAccount'; + type: { + kind: 'struct'; + fields: [ + { + name: 'metadata'; + type: { + defined: 'MerkleTreeMetadata'; + }; + }, + { + name: 'sequenceNumber'; + type: 'u64'; + }, + { + name: 'treeType'; + type: 'u64'; + }, + { + name: 'nextIndex'; + type: 'u64'; + }, + { + name: 'height'; + type: 'u32'; + }, + { + name: 'rootHistoryCapacity'; + type: 'u32'; + }, + { + name: 'queue'; + type: { + defined: 'BatchedQueue'; + }; + }, + ]; + }; + }, + { + name: 'batchedQueueAccount'; + docs: [ + 'Memory layout:', + '1. QueueMetadata', + '2. num_batches: u64', + '3. hash_chain hash bounded vec', + '3. for num_batches every 33 bytes is a bloom filter', + '3. (output queue) rest of account is bounded vec', + '', + 'One Batch account contains multiple batches.', + ]; + type: { + kind: 'struct'; + fields: [ + { + name: 'metadata'; + type: { + defined: 'QueueMetadata'; + }; + }, + { + name: 'queue'; + type: { + defined: 'BatchedQueue'; + }; + }, + { + name: 'nextIndex'; + docs: [ + 'Output queue requires next index to derive compressed account hashes.', + 'next_index in queue is ahead or equal to next index in the associated', + 'batched Merkle tree account.', + ]; + type: 'u64'; + }, + ]; + }; + }, + { + name: 'batchedQueue'; + type: { + kind: 'struct'; + fields: [ + { + name: 'numBatches'; + type: 'u64'; + }, + { + name: 'batchSize'; + type: 'u64'; + }, + { + name: 'zkpBatchSize'; + type: 'u64'; + }, + { + name: 'currentlyProcessingBatchIndex'; + type: 'u64'; + }, + { + name: 'nextFullBatchIndex'; + type: 'u64'; + }, + { + name: 'bloomFilterCapacity'; + type: 'u64'; + }, + ]; + }; + }, { name: 'groupAuthority'; type: { @@ -898,6 +1186,26 @@ export type AccountCompression = { }, ]; types: [ + { + name: 'ZeroOutLeafIndex'; + type: { + kind: 'struct'; + fields: [ + { + name: 'treeIndex'; + type: 'u8'; + }, + { + name: 'batchIndex'; + type: 'u8'; + }, + { + name: 'leafIndex'; + type: 'u16'; + }, + ]; + }; + }, { name: 'AddressMerkleTreeConfig'; type: { @@ -1009,15 +1317,147 @@ export type AccountCompression = { }; }, { - name: 'QueueType'; + name: 'InitStateTreeAccountsInstructionData'; type: { - kind: 'enum'; - variants: [ + kind: 'struct'; + fields: [ { - name: 'NullifierQueue'; + name: 'index'; + type: 'u64'; }, { - name: 'AddressQueue'; + name: 'programOwner'; + type: { + option: 'publicKey'; + }; + }, + { + name: 'forester'; + type: { + option: 'publicKey'; + }; + }, + { + name: 'additionalBytes'; + type: 'u64'; + }, + { + name: 'inputQueueBatchSize'; + type: 'u64'; + }, + { + name: 'outputQueueBatchSize'; + type: 'u64'; + }, + { + name: 'inputQueueZkpBatchSize'; + type: 'u64'; + }, + { + name: 'outputQueueZkpBatchSize'; + type: 'u64'; + }, + { + name: 'bloomFilterNumIters'; + type: 'u64'; + }, + { + name: 'bloomFilterCapacity'; + type: 'u64'; + }, + { + name: 'rootHistoryCapacity'; + type: 'u32'; + }, + { + name: 'networkFee'; + type: { + option: 'u64'; + }; + }, + { + name: 'rolloverThreshold'; + type: { + option: 'u64'; + }; + }, + { + name: 'closeThreshold'; + type: { + option: 'u64'; + }; + }, + { + name: 'inputQueueNumBatches'; + type: 'u64'; + }, + { + name: 'outputQueueNumBatches'; + type: 'u64'; + }, + { + name: 'height'; + type: 'u32'; + }, + ]; + }; + }, + { + name: 'BatchState'; + type: { + kind: 'enum'; + variants: [ + { + name: 'CanBeFilled'; + }, + { + name: 'Inserted'; + }, + { + name: 'ReadyToUpdateTree'; + }, + ]; + }; + }, + { + name: 'TreeType'; + type: { + kind: 'enum'; + variants: [ + { + name: 'State'; + }, + { + name: 'Address'; + }, + { + name: 'BatchedState'; + }, + { + name: 'BatchedAddress'; + }, + ]; + }; + }, + { + name: 'QueueType'; + type: { + kind: 'enum'; + variants: [ + { + name: 'NullifierQueue'; + }, + { + name: 'AddressQueue'; + }, + { + name: 'Input'; + }, + { + name: 'Address'; + }, + { + name: 'Output'; }, ]; }; @@ -1165,6 +1605,75 @@ export type AccountCompression = { code: 6026; name: 'ProofLengthMismatch'; }, + { + code: 6027; + name: 'InvalidCommitmentLength'; + msg: 'Invalid commitment length'; + }, + { + code: 6028; + name: 'BloomFilterFull'; + msg: 'BloomFilterFull'; + }, + { + code: 6029; + name: 'BatchInsertFailed'; + msg: 'BatchInsertFailed'; + }, + { + code: 6030; + name: 'BatchNotReady'; + msg: 'BatchNotReady'; + }, + { + code: 6031; + name: 'SizeMismatch'; + }, + { + code: 6032; + name: 'BatchAlreadyInserted'; + }, + { + code: 6033; + name: 'InvalidBloomFilterCapacity'; + }, + { + code: 6034; + name: 'InvalidCircuitBatchSize'; + }, + { + code: 6035; + name: 'InvalidDiscriminator'; + }, + { + code: 6036; + name: 'BatchSizeNotDivisibleByZkpBatchSize'; + msg: 'batch_size is not divisible by zkp_batch_size'; + }, + { + code: 6037; + name: 'InclusionProofByIndexFailed'; + }, + { + code: 6038; + name: 'TxHashUndefined'; + }, + { + code: 6039; + name: 'InputDeserializationFailed'; + }, + { + code: 6040; + name: 'InvalidBatch'; + }, + { + code: 6041; + name: 'LeafIndexNotInBatch'; + }, + { + code: 6042; + name: 'UnsupportedParameters'; + }, ]; }; @@ -1254,8 +1763,62 @@ export const IDL: AccountCompression = { }, value: '[11 , 188 , 15 , 192 , 187 , 71 , 202 , 47 , 116 , 196 , 17 , 46 , 148 , 171 , 19 , 207 , 163 , 198 , 52 , 229 , 220 , 23 , 234 , 203 , 3 , 205 , 26 , 35 , 205 , 126 , 120 , 124 ,]', }, + { + name: 'TEST_DEFAULT_BATCH_SIZE', + type: 'u64', + value: '50', + }, + { + name: 'TEST_DEFAULT_ZKP_BATCH_SIZE', + type: 'u64', + value: '10', + }, + { + name: 'DEFAULT_BATCH_SIZE', + type: 'u64', + value: '50000', + }, + { + name: 'DEFAULT_ZKP_BATCH_SIZE', + type: 'u64', + value: '500', + }, ], instructions: [ + { + name: 'initializeBatchedStateMerkleTree', + accounts: [ + { + name: 'authority', + isMut: true, + isSigner: true, + }, + { + name: 'merkleTree', + isMut: true, + isSigner: false, + }, + { + name: 'queue', + isMut: true, + isSigner: false, + }, + { + name: 'registeredProgramPda', + isMut: false, + isSigner: false, + isOptional: true, + }, + ], + args: [ + { + name: 'params', + type: { + defined: 'InitStateTreeAccountsInstructionData', + }, + }, + ], + }, { name: 'initializeAddressMerkleTreeAndQueue', accounts: [ @@ -1791,6 +2354,28 @@ export const IDL: AccountCompression = { }, }, }, + { + name: 'leafIndices', + type: { + vec: 'u32', + }, + }, + { + name: 'txHash', + type: { + option: { + array: ['u8', 32], + }, + }, + }, + { + name: 'checkProofByIndex', + type: { + option: { + vec: 'bool', + }, + }, + }, ], }, { @@ -1838,37 +2423,106 @@ export const IDL: AccountCompression = { ], args: [], }, - ], - accounts: [ { - name: 'registeredProgram', - type: { - kind: 'struct', - fields: [ - { - name: 'registeredProgramId', - type: 'publicKey', - }, - { - name: 'groupAuthorityPda', - type: 'publicKey', - }, - ], - }, + name: 'batchNullify', + accounts: [ + { + name: 'authority', + isMut: false, + isSigner: true, + }, + { + name: 'registeredProgramPda', + isMut: false, + isSigner: false, + isOptional: true, + }, + { + name: 'logWrapper', + isMut: false, + isSigner: false, + }, + { + name: 'merkleTree', + isMut: true, + isSigner: false, + }, + ], + args: [ + { + name: 'data', + type: 'bytes', + }, + ], }, { - name: 'accessMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'owner', - docs: ['Owner of the Merkle tree.'], - type: 'publicKey', - }, - { - name: 'programOwner', - docs: [ + name: 'batchAppend', + accounts: [ + { + name: 'authority', + isMut: false, + isSigner: true, + }, + { + name: 'registeredProgramPda', + isMut: false, + isSigner: false, + isOptional: true, + }, + { + name: 'logWrapper', + isMut: false, + isSigner: false, + }, + { + name: 'merkleTree', + isMut: true, + isSigner: false, + }, + { + name: 'outputQueue', + isMut: true, + isSigner: false, + }, + ], + args: [ + { + name: 'data', + type: 'bytes', + }, + ], + }, + ], + accounts: [ + { + name: 'registeredProgram', + type: { + kind: 'struct', + fields: [ + { + name: 'registeredProgramId', + type: 'publicKey', + }, + { + name: 'groupAuthorityPda', + type: 'publicKey', + }, + ], + }, + }, + { + name: 'accessMetadata', + type: { + kind: 'struct', + fields: [ + { + name: 'owner', + docs: ['Owner of the Merkle tree.'], + type: 'publicKey', + }, + { + name: 'programOwner', + docs: [ 'Program owner of the Merkle tree. This will be used for program owned Merkle trees.', ], type: 'publicKey', @@ -1902,6 +2556,149 @@ export const IDL: AccountCompression = { ], }, }, + { + name: 'batchedMerkleTreeMetadata', + type: { + kind: 'struct', + fields: [ + { + name: 'accessMetadata', + type: { + defined: 'AccessMetadata', + }, + }, + { + name: 'rolloverMetadata', + type: { + defined: 'RolloverMetadata', + }, + }, + { + name: 'associatedOutputQueue', + type: 'publicKey', + }, + { + name: 'nextMerkleTree', + type: 'publicKey', + }, + { + name: 'treeType', + type: 'u64', + }, + ], + }, + }, + { + name: 'batchedMerkleTreeAccount', + type: { + kind: 'struct', + fields: [ + { + name: 'metadata', + type: { + defined: 'MerkleTreeMetadata', + }, + }, + { + name: 'sequenceNumber', + type: 'u64', + }, + { + name: 'treeType', + type: 'u64', + }, + { + name: 'nextIndex', + type: 'u64', + }, + { + name: 'height', + type: 'u32', + }, + { + name: 'rootHistoryCapacity', + type: 'u32', + }, + { + name: 'queue', + type: { + defined: 'BatchedQueue', + }, + }, + ], + }, + }, + { + name: 'batchedQueueAccount', + docs: [ + 'Memory layout:', + '1. QueueMetadata', + '2. num_batches: u64', + '3. hash_chain hash bounded vec', + '3. for num_batches every 33 bytes is a bloom filter', + '3. (output queue) rest of account is bounded vec', + '', + 'One Batch account contains multiple batches.', + ], + type: { + kind: 'struct', + fields: [ + { + name: 'metadata', + type: { + defined: 'QueueMetadata', + }, + }, + { + name: 'queue', + type: { + defined: 'BatchedQueue', + }, + }, + { + name: 'nextIndex', + docs: [ + 'Output queue requires next index to derive compressed account hashes.', + 'next_index in queue is ahead or equal to next index in the associated', + 'batched Merkle tree account.', + ], + type: 'u64', + }, + ], + }, + }, + { + name: 'batchedQueue', + type: { + kind: 'struct', + fields: [ + { + name: 'numBatches', + type: 'u64', + }, + { + name: 'batchSize', + type: 'u64', + }, + { + name: 'zkpBatchSize', + type: 'u64', + }, + { + name: 'currentlyProcessingBatchIndex', + type: 'u64', + }, + { + name: 'nextFullBatchIndex', + type: 'u64', + }, + { + name: 'bloomFilterCapacity', + type: 'u64', + }, + ], + }, + }, { name: 'groupAuthority', type: { @@ -2068,6 +2865,26 @@ export const IDL: AccountCompression = { }, ], types: [ + { + name: 'ZeroOutLeafIndex', + type: { + kind: 'struct', + fields: [ + { + name: 'treeIndex', + type: 'u8', + }, + { + name: 'batchIndex', + type: 'u8', + }, + { + name: 'leafIndex', + type: 'u16', + }, + ], + }, + }, { name: 'AddressMerkleTreeConfig', type: { @@ -2178,6 +2995,129 @@ export const IDL: AccountCompression = { ], }, }, + { + name: 'InitStateTreeAccountsInstructionData', + type: { + kind: 'struct', + fields: [ + { + name: 'index', + type: 'u64', + }, + { + name: 'programOwner', + type: { + option: 'publicKey', + }, + }, + { + name: 'forester', + type: { + option: 'publicKey', + }, + }, + { + name: 'additionalBytes', + type: 'u64', + }, + { + name: 'inputQueueBatchSize', + type: 'u64', + }, + { + name: 'outputQueueBatchSize', + type: 'u64', + }, + { + name: 'inputQueueZkpBatchSize', + type: 'u64', + }, + { + name: 'outputQueueZkpBatchSize', + type: 'u64', + }, + { + name: 'bloomFilterNumIters', + type: 'u64', + }, + { + name: 'bloomFilterCapacity', + type: 'u64', + }, + { + name: 'rootHistoryCapacity', + type: 'u32', + }, + { + name: 'networkFee', + type: { + option: 'u64', + }, + }, + { + name: 'rolloverThreshold', + type: { + option: 'u64', + }, + }, + { + name: 'closeThreshold', + type: { + option: 'u64', + }, + }, + { + name: 'inputQueueNumBatches', + type: 'u64', + }, + { + name: 'outputQueueNumBatches', + type: 'u64', + }, + { + name: 'height', + type: 'u32', + }, + ], + }, + }, + { + name: 'BatchState', + type: { + kind: 'enum', + variants: [ + { + name: 'CanBeFilled', + }, + { + name: 'Inserted', + }, + { + name: 'ReadyToUpdateTree', + }, + ], + }, + }, + { + name: 'TreeType', + type: { + kind: 'enum', + variants: [ + { + name: 'State', + }, + { + name: 'Address', + }, + { + name: 'BatchedState', + }, + { + name: 'BatchedAddress', + }, + ], + }, + }, { name: 'QueueType', type: { @@ -2189,6 +3129,15 @@ export const IDL: AccountCompression = { { name: 'AddressQueue', }, + { + name: 'Input', + }, + { + name: 'Address', + }, + { + name: 'Output', + }, ], }, }, @@ -2335,5 +3284,74 @@ export const IDL: AccountCompression = { code: 6026, name: 'ProofLengthMismatch', }, + { + code: 6027, + name: 'InvalidCommitmentLength', + msg: 'Invalid commitment length', + }, + { + code: 6028, + name: 'BloomFilterFull', + msg: 'BloomFilterFull', + }, + { + code: 6029, + name: 'BatchInsertFailed', + msg: 'BatchInsertFailed', + }, + { + code: 6030, + name: 'BatchNotReady', + msg: 'BatchNotReady', + }, + { + code: 6031, + name: 'SizeMismatch', + }, + { + code: 6032, + name: 'BatchAlreadyInserted', + }, + { + code: 6033, + name: 'InvalidBloomFilterCapacity', + }, + { + code: 6034, + name: 'InvalidCircuitBatchSize', + }, + { + code: 6035, + name: 'InvalidDiscriminator', + }, + { + code: 6036, + name: 'BatchSizeNotDivisibleByZkpBatchSize', + msg: 'batch_size is not divisible by zkp_batch_size', + }, + { + code: 6037, + name: 'InclusionProofByIndexFailed', + }, + { + code: 6038, + name: 'TxHashUndefined', + }, + { + code: 6039, + name: 'InputDeserializationFailed', + }, + { + code: 6040, + name: 'InvalidBatch', + }, + { + code: 6041, + name: 'LeafIndexNotInBatch', + }, + { + code: 6042, + name: 'UnsupportedParameters', + }, ], }; diff --git a/js/stateless.js/src/idls/light_compressed_token.ts b/js/stateless.js/src/idls/light_compressed_token.ts index c9fcbd8c9b..4384cefa52 100644 --- a/js/stateless.js/src/idls/light_compressed_token.ts +++ b/js/stateless.js/src/idls/light_compressed_token.ts @@ -736,38 +736,6 @@ export type LightCompressedToken = { }, ]; types: [ - { - name: 'AccessMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'owner'; - docs: ['Owner of the Merkle tree.']; - type: 'publicKey'; - }, - { - name: 'programOwner'; - docs: [ - 'Program owner of the Merkle tree. This will be used for program owned Merkle trees.', - ]; - type: 'publicKey'; - }, - { - name: 'forester'; - docs: [ - 'Optional privileged forester pubkey, can be set for custom Merkle trees', - 'without a network fee. Merkle trees without network fees are not', - 'forested by light foresters. The variable is not used in the account', - 'compression program but the registry program. The registry program', - 'implements access control to prevent contention during forester. The', - 'forester pubkey specified in this struct can bypass contention checks.', - ]; - type: 'publicKey'; - }, - ]; - }; - }, { name: 'AccountState'; type: { @@ -1158,34 +1126,6 @@ export type LightCompressedToken = { ]; }; }, - { - name: 'MerkleTreeMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'accessMetadata'; - type: { - defined: 'AccessMetadata'; - }; - }, - { - name: 'rolloverMetadata'; - type: { - defined: 'RolloverMetadata'; - }; - }, - { - name: 'associatedQueue'; - type: 'publicKey'; - }, - { - name: 'nextMerkleTree'; - type: 'publicKey'; - }, - ]; - }; - }, { name: 'MerkleTreeSequenceNumber'; type: { @@ -1439,63 +1379,6 @@ export type LightCompressedToken = { ]; }; }, - { - name: 'RolloverMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'index'; - docs: ['Unique index.']; - type: 'u64'; - }, - { - name: 'rolloverFee'; - docs: [ - 'This fee is used for rent for the next account.', - 'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over', - ]; - type: 'u64'; - }, - { - name: 'rolloverThreshold'; - docs: [ - 'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).', - ]; - type: 'u64'; - }, - { - name: 'networkFee'; - docs: ['Tip for maintaining the account.']; - type: 'u64'; - }, - { - name: 'rolledoverSlot'; - docs: [ - 'The slot when the account was rolled over, a rolled over account should not be written to.', - ]; - type: 'u64'; - }, - { - name: 'closeThreshold'; - docs: [ - 'If current slot is greater than rolledover_slot + close_threshold and', - "the account is empty it can be closed. No 'close' functionality has been", - 'implemented yet.', - ]; - type: 'u64'; - }, - { - name: 'additionalBytes'; - docs: [ - 'Placeholder for bytes of additional accounts which are tied to the', - 'Merkle trees operation and need to be rolled over as well.', - ]; - type: 'u64'; - }, - ]; - }; - }, { name: 'TokenData'; type: { @@ -1549,128 +1432,23 @@ export type LightCompressedToken = { errors: [ { code: 6000; - name: 'PublicKeyAmountMissmatch'; - msg: 'public keys and amounts must be of same length'; + name: 'SignerCheckFailed'; + msg: 'Signer check failed'; }, { code: 6001; - name: 'ComputeInputSumFailed'; - msg: 'ComputeInputSumFailed'; + name: 'CreateTransferInstructionFailed'; + msg: 'Create transfer instruction failed'; }, { code: 6002; - name: 'ComputeOutputSumFailed'; - msg: 'ComputeOutputSumFailed'; + name: 'AccountNotFound'; + msg: 'Account not found'; }, { code: 6003; - name: 'ComputeCompressSumFailed'; - msg: 'ComputeCompressSumFailed'; - }, - { - code: 6004; - name: 'ComputeDecompressSumFailed'; - msg: 'ComputeDecompressSumFailed'; - }, - { - code: 6005; - name: 'SumCheckFailed'; - msg: 'SumCheckFailed'; - }, - { - code: 6006; - name: 'DecompressRecipientUndefinedForDecompress'; - msg: 'DecompressRecipientUndefinedForDecompress'; - }, - { - code: 6007; - name: 'CompressedPdaUndefinedForDecompress'; - msg: 'CompressedPdaUndefinedForDecompress'; - }, - { - code: 6008; - name: 'DeCompressAmountUndefinedForDecompress'; - msg: 'DeCompressAmountUndefinedForDecompress'; - }, - { - code: 6009; - name: 'CompressedPdaUndefinedForCompress'; - msg: 'CompressedPdaUndefinedForCompress'; - }, - { - code: 6010; - name: 'DeCompressAmountUndefinedForCompress'; - msg: 'DeCompressAmountUndefinedForCompress'; - }, - { - code: 6011; - name: 'DelegateSignerCheckFailed'; - msg: 'DelegateSignerCheckFailed'; - }, - { - code: 6012; - name: 'MintTooLarge'; - msg: 'Minted amount greater than u64::MAX'; - }, - { - code: 6013; - name: 'SplTokenSupplyMismatch'; - msg: 'SplTokenSupplyMismatch'; - }, - { - code: 6014; - name: 'HeapMemoryCheckFailed'; - msg: 'HeapMemoryCheckFailed'; - }, - { - code: 6015; - name: 'InstructionNotCallable'; - msg: 'The instruction is not callable'; - }, - { - code: 6016; - name: 'ArithmeticUnderflow'; - msg: 'ArithmeticUnderflow'; - }, - { - code: 6017; - name: 'HashToFieldError'; - msg: 'HashToFieldError'; - }, - { - code: 6018; - name: 'InvalidAuthorityMint'; - msg: 'Expected the authority to be also a mint authority'; - }, - { - code: 6019; - name: 'InvalidFreezeAuthority'; - msg: 'Provided authority is not the freeze authority'; - }, - { - code: 6020; - name: 'InvalidDelegateIndex'; - }, - { - code: 6021; - name: 'TokenPoolPdaUndefined'; - }, - { - code: 6022; - name: 'IsTokenPoolPda'; - msg: 'Compress or decompress recipient is the same account as the token pool pda.'; - }, - { - code: 6023; - name: 'InvalidTokenPoolPda'; - }, - { - code: 6024; - name: 'NoInputTokenAccountsProvided'; - }, - { - code: 6025; - name: 'NoInputsProvided'; + name: 'SerializationError'; + msg: 'Serialization error'; }, ]; }; @@ -2412,38 +2190,6 @@ export const IDL: LightCompressedToken = { }, ], types: [ - { - name: 'AccessMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'owner', - docs: ['Owner of the Merkle tree.'], - type: 'publicKey', - }, - { - name: 'programOwner', - docs: [ - 'Program owner of the Merkle tree. This will be used for program owned Merkle trees.', - ], - type: 'publicKey', - }, - { - name: 'forester', - docs: [ - 'Optional privileged forester pubkey, can be set for custom Merkle trees', - 'without a network fee. Merkle trees without network fees are not', - 'forested by light foresters. The variable is not used in the account', - 'compression program but the registry program. The registry program', - 'implements access control to prevent contention during forester. The', - 'forester pubkey specified in this struct can bypass contention checks.', - ], - type: 'publicKey', - }, - ], - }, - }, { name: 'AccountState', type: { @@ -2838,34 +2584,6 @@ export const IDL: LightCompressedToken = { ], }, }, - { - name: 'MerkleTreeMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'accessMetadata', - type: { - defined: 'AccessMetadata', - }, - }, - { - name: 'rolloverMetadata', - type: { - defined: 'RolloverMetadata', - }, - }, - { - name: 'associatedQueue', - type: 'publicKey', - }, - { - name: 'nextMerkleTree', - type: 'publicKey', - }, - ], - }, - }, { name: 'MerkleTreeSequenceNumber', type: { @@ -3120,63 +2838,6 @@ export const IDL: LightCompressedToken = { ], }, }, - { - name: 'RolloverMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'index', - docs: ['Unique index.'], - type: 'u64', - }, - { - name: 'rolloverFee', - docs: [ - 'This fee is used for rent for the next account.', - 'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over', - ], - type: 'u64', - }, - { - name: 'rolloverThreshold', - docs: [ - 'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).', - ], - type: 'u64', - }, - { - name: 'networkFee', - docs: ['Tip for maintaining the account.'], - type: 'u64', - }, - { - name: 'rolledoverSlot', - docs: [ - 'The slot when the account was rolled over, a rolled over account should not be written to.', - ], - type: 'u64', - }, - { - name: 'closeThreshold', - docs: [ - 'If current slot is greater than rolledover_slot + close_threshold and', - "the account is empty it can be closed. No 'close' functionality has been", - 'implemented yet.', - ], - type: 'u64', - }, - { - name: 'additionalBytes', - docs: [ - 'Placeholder for bytes of additional accounts which are tied to the', - 'Merkle trees operation and need to be rolled over as well.', - ], - type: 'u64', - }, - ], - }, - }, { name: 'TokenData', type: { @@ -3230,128 +2891,23 @@ export const IDL: LightCompressedToken = { errors: [ { code: 6000, - name: 'PublicKeyAmountMissmatch', - msg: 'public keys and amounts must be of same length', + name: 'SignerCheckFailed', + msg: 'Signer check failed', }, { code: 6001, - name: 'ComputeInputSumFailed', - msg: 'ComputeInputSumFailed', + name: 'CreateTransferInstructionFailed', + msg: 'Create transfer instruction failed', }, { code: 6002, - name: 'ComputeOutputSumFailed', - msg: 'ComputeOutputSumFailed', + name: 'AccountNotFound', + msg: 'Account not found', }, { code: 6003, - name: 'ComputeCompressSumFailed', - msg: 'ComputeCompressSumFailed', - }, - { - code: 6004, - name: 'ComputeDecompressSumFailed', - msg: 'ComputeDecompressSumFailed', - }, - { - code: 6005, - name: 'SumCheckFailed', - msg: 'SumCheckFailed', - }, - { - code: 6006, - name: 'DecompressRecipientUndefinedForDecompress', - msg: 'DecompressRecipientUndefinedForDecompress', - }, - { - code: 6007, - name: 'CompressedPdaUndefinedForDecompress', - msg: 'CompressedPdaUndefinedForDecompress', - }, - { - code: 6008, - name: 'DeCompressAmountUndefinedForDecompress', - msg: 'DeCompressAmountUndefinedForDecompress', - }, - { - code: 6009, - name: 'CompressedPdaUndefinedForCompress', - msg: 'CompressedPdaUndefinedForCompress', - }, - { - code: 6010, - name: 'DeCompressAmountUndefinedForCompress', - msg: 'DeCompressAmountUndefinedForCompress', - }, - { - code: 6011, - name: 'DelegateSignerCheckFailed', - msg: 'DelegateSignerCheckFailed', - }, - { - code: 6012, - name: 'MintTooLarge', - msg: 'Minted amount greater than u64::MAX', - }, - { - code: 6013, - name: 'SplTokenSupplyMismatch', - msg: 'SplTokenSupplyMismatch', - }, - { - code: 6014, - name: 'HeapMemoryCheckFailed', - msg: 'HeapMemoryCheckFailed', - }, - { - code: 6015, - name: 'InstructionNotCallable', - msg: 'The instruction is not callable', - }, - { - code: 6016, - name: 'ArithmeticUnderflow', - msg: 'ArithmeticUnderflow', - }, - { - code: 6017, - name: 'HashToFieldError', - msg: 'HashToFieldError', - }, - { - code: 6018, - name: 'InvalidAuthorityMint', - msg: 'Expected the authority to be also a mint authority', - }, - { - code: 6019, - name: 'InvalidFreezeAuthority', - msg: 'Provided authority is not the freeze authority', - }, - { - code: 6020, - name: 'InvalidDelegateIndex', - }, - { - code: 6021, - name: 'TokenPoolPdaUndefined', - }, - { - code: 6022, - name: 'IsTokenPoolPda', - msg: 'Compress or decompress recipient is the same account as the token pool pda.', - }, - { - code: 6023, - name: 'InvalidTokenPoolPda', - }, - { - code: 6024, - name: 'NoInputTokenAccountsProvided', - }, - { - code: 6025, - name: 'NoInputsProvided', + name: 'SerializationError', + msg: 'Serialization error', }, ], }; diff --git a/js/stateless.js/src/idls/light_registry.ts b/js/stateless.js/src/idls/light_registry.ts index 93d3942055..efed53308c 100644 --- a/js/stateless.js/src/idls/light_registry.ts +++ b/js/stateless.js/src/idls/light_registry.ts @@ -17,6 +17,11 @@ export type LightRegistry = { type: 'bytes'; value: '[97, 117, 116, 104, 111, 114, 105, 116, 121]'; }, + { + name: 'DEFAULT_WORK_V1'; + type: 'u64'; + value: '1'; + }, ]; instructions: [ { @@ -882,6 +887,177 @@ export type LightRegistry = { }, ]; }, + { + name: 'initializeBatchedStateMerkleTree'; + accounts: [ + { + name: 'authority'; + isMut: true; + isSigner: true; + }, + { + name: 'merkleTree'; + isMut: true; + isSigner: false; + }, + { + name: 'queue'; + isMut: true; + isSigner: false; + }, + { + name: 'registeredProgramPda'; + isMut: false; + isSigner: false; + }, + { + name: 'cpiAuthority'; + isMut: true; + isSigner: false; + }, + { + name: 'accountCompressionProgram'; + isMut: false; + isSigner: false; + }, + { + name: 'protocolConfigPda'; + isMut: false; + isSigner: false; + }, + { + name: 'cpiContextAccount'; + isMut: false; + isSigner: false; + isOptional: true; + }, + { + name: 'lightSystemProgram'; + isMut: false; + isSigner: false; + isOptional: true; + }, + ]; + args: [ + { + name: 'bump'; + type: 'u8'; + }, + { + name: 'params'; + type: { + defined: 'InitStateTreeAccountsInstructionData'; + }; + }, + ]; + }, + { + name: 'batchNullify'; + accounts: [ + { + name: 'registeredForesterPda'; + isMut: true; + isSigner: false; + isOptional: true; + }, + { + name: 'authority'; + isMut: false; + isSigner: true; + }, + { + name: 'cpiAuthority'; + isMut: false; + isSigner: false; + }, + { + name: 'registeredProgramPda'; + isMut: false; + isSigner: false; + }, + { + name: 'accountCompressionProgram'; + isMut: false; + isSigner: false; + }, + { + name: 'logWrapper'; + isMut: false; + isSigner: false; + }, + { + name: 'merkleTree'; + isMut: true; + isSigner: false; + }, + ]; + args: [ + { + name: 'bump'; + type: 'u8'; + }, + { + name: 'data'; + type: 'bytes'; + }, + ]; + }, + { + name: 'batchAppend'; + accounts: [ + { + name: 'registeredForesterPda'; + isMut: true; + isSigner: false; + isOptional: true; + }, + { + name: 'authority'; + isMut: false; + isSigner: true; + }, + { + name: 'cpiAuthority'; + isMut: false; + isSigner: false; + }, + { + name: 'registeredProgramPda'; + isMut: false; + isSigner: false; + }, + { + name: 'accountCompressionProgram'; + isMut: false; + isSigner: false; + }, + { + name: 'logWrapper'; + isMut: false; + isSigner: false; + }, + { + name: 'merkleTree'; + isMut: true; + isSigner: false; + }, + { + name: 'outputQueue'; + isMut: true; + isSigner: false; + }, + ]; + args: [ + { + name: 'bump'; + type: 'u8'; + }, + { + name: 'data'; + type: 'bytes'; + }, + ]; + }, ]; accounts: [ { @@ -1310,6 +1486,11 @@ export const IDL: LightRegistry = { type: 'bytes', value: '[97, 117, 116, 104, 111, 114, 105, 116, 121]', }, + { + name: 'DEFAULT_WORK_V1', + type: 'u64', + value: '1', + }, ], instructions: [ { @@ -2175,6 +2356,177 @@ export const IDL: LightRegistry = { }, ], }, + { + name: 'initializeBatchedStateMerkleTree', + accounts: [ + { + name: 'authority', + isMut: true, + isSigner: true, + }, + { + name: 'merkleTree', + isMut: true, + isSigner: false, + }, + { + name: 'queue', + isMut: true, + isSigner: false, + }, + { + name: 'registeredProgramPda', + isMut: false, + isSigner: false, + }, + { + name: 'cpiAuthority', + isMut: true, + isSigner: false, + }, + { + name: 'accountCompressionProgram', + isMut: false, + isSigner: false, + }, + { + name: 'protocolConfigPda', + isMut: false, + isSigner: false, + }, + { + name: 'cpiContextAccount', + isMut: false, + isSigner: false, + isOptional: true, + }, + { + name: 'lightSystemProgram', + isMut: false, + isSigner: false, + isOptional: true, + }, + ], + args: [ + { + name: 'bump', + type: 'u8', + }, + { + name: 'params', + type: { + defined: 'InitStateTreeAccountsInstructionData', + }, + }, + ], + }, + { + name: 'batchNullify', + accounts: [ + { + name: 'registeredForesterPda', + isMut: true, + isSigner: false, + isOptional: true, + }, + { + name: 'authority', + isMut: false, + isSigner: true, + }, + { + name: 'cpiAuthority', + isMut: false, + isSigner: false, + }, + { + name: 'registeredProgramPda', + isMut: false, + isSigner: false, + }, + { + name: 'accountCompressionProgram', + isMut: false, + isSigner: false, + }, + { + name: 'logWrapper', + isMut: false, + isSigner: false, + }, + { + name: 'merkleTree', + isMut: true, + isSigner: false, + }, + ], + args: [ + { + name: 'bump', + type: 'u8', + }, + { + name: 'data', + type: 'bytes', + }, + ], + }, + { + name: 'batchAppend', + accounts: [ + { + name: 'registeredForesterPda', + isMut: true, + isSigner: false, + isOptional: true, + }, + { + name: 'authority', + isMut: false, + isSigner: true, + }, + { + name: 'cpiAuthority', + isMut: false, + isSigner: false, + }, + { + name: 'registeredProgramPda', + isMut: false, + isSigner: false, + }, + { + name: 'accountCompressionProgram', + isMut: false, + isSigner: false, + }, + { + name: 'logWrapper', + isMut: false, + isSigner: false, + }, + { + name: 'merkleTree', + isMut: true, + isSigner: false, + }, + { + name: 'outputQueue', + isMut: true, + isSigner: false, + }, + ], + args: [ + { + name: 'bump', + type: 'u8', + }, + { + name: 'data', + type: 'bytes', + }, + ], + }, ], accounts: [ { diff --git a/js/stateless.js/src/idls/light_system_program.ts b/js/stateless.js/src/idls/light_system_program.ts index 67f3cc1d74..650d0e45c8 100644 --- a/js/stateless.js/src/idls/light_system_program.ts +++ b/js/stateless.js/src/idls/light_system_program.ts @@ -271,23 +271,6 @@ export type LightSystemProgram = { }, ]; accounts: [ - { - name: 'stateMerkleTreeAccount'; - docs: [ - 'Concurrent state Merkle tree used for public compressed transactions.', - ]; - type: { - kind: 'struct'; - fields: [ - { - name: 'metadata'; - type: { - defined: 'MerkleTreeMetadata'; - }; - }, - ]; - }; - }, { name: 'cpiContextAccount'; docs: [ @@ -322,123 +305,6 @@ export type LightSystemProgram = { }, ]; types: [ - { - name: 'AccessMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'owner'; - docs: ['Owner of the Merkle tree.']; - type: 'publicKey'; - }, - { - name: 'programOwner'; - docs: [ - 'Program owner of the Merkle tree. This will be used for program owned Merkle trees.', - ]; - type: 'publicKey'; - }, - { - name: 'forester'; - docs: [ - 'Optional privileged forester pubkey, can be set for custom Merkle trees', - 'without a network fee. Merkle trees without network fees are not', - 'forested by light foresters. The variable is not used in the account', - 'compression program but the registry program. The registry program', - 'implements access control to prevent contention during forester. The', - 'forester pubkey specified in this struct can bypass contention checks.', - ]; - type: 'publicKey'; - }, - ]; - }; - }, - { - name: 'MerkleTreeMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'accessMetadata'; - type: { - defined: 'AccessMetadata'; - }; - }, - { - name: 'rolloverMetadata'; - type: { - defined: 'RolloverMetadata'; - }; - }, - { - name: 'associatedQueue'; - type: 'publicKey'; - }, - { - name: 'nextMerkleTree'; - type: 'publicKey'; - }, - ]; - }; - }, - { - name: 'RolloverMetadata'; - type: { - kind: 'struct'; - fields: [ - { - name: 'index'; - docs: ['Unique index.']; - type: 'u64'; - }, - { - name: 'rolloverFee'; - docs: [ - 'This fee is used for rent for the next account.', - 'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over', - ]; - type: 'u64'; - }, - { - name: 'rolloverThreshold'; - docs: [ - 'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).', - ]; - type: 'u64'; - }, - { - name: 'networkFee'; - docs: ['Tip for maintaining the account.']; - type: 'u64'; - }, - { - name: 'rolledoverSlot'; - docs: [ - 'The slot when the account was rolled over, a rolled over account should not be written to.', - ]; - type: 'u64'; - }, - { - name: 'closeThreshold'; - docs: [ - 'If current slot is greater than rolledover_slot + close_threshold and', - "the account is empty it can be closed. No 'close' functionality has been", - 'implemented yet.', - ]; - type: 'u64'; - }, - { - name: 'additionalBytes'; - docs: [ - 'Placeholder for bytes of additional accounts which are tied to the', - 'Merkle trees operation and need to be rolled over as well.', - ]; - type: 'u64'; - }, - ]; - }; - }, { name: 'InstructionDataInvoke'; type: { @@ -1336,23 +1202,6 @@ export const IDL: LightSystemProgram = { }, ], accounts: [ - { - name: 'stateMerkleTreeAccount', - docs: [ - 'Concurrent state Merkle tree used for public compressed transactions.', - ], - type: { - kind: 'struct', - fields: [ - { - name: 'metadata', - type: { - defined: 'MerkleTreeMetadata', - }, - }, - ], - }, - }, { name: 'cpiContextAccount', docs: [ @@ -1387,123 +1236,6 @@ export const IDL: LightSystemProgram = { }, ], types: [ - { - name: 'AccessMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'owner', - docs: ['Owner of the Merkle tree.'], - type: 'publicKey', - }, - { - name: 'programOwner', - docs: [ - 'Program owner of the Merkle tree. This will be used for program owned Merkle trees.', - ], - type: 'publicKey', - }, - { - name: 'forester', - docs: [ - 'Optional privileged forester pubkey, can be set for custom Merkle trees', - 'without a network fee. Merkle trees without network fees are not', - 'forested by light foresters. The variable is not used in the account', - 'compression program but the registry program. The registry program', - 'implements access control to prevent contention during forester. The', - 'forester pubkey specified in this struct can bypass contention checks.', - ], - type: 'publicKey', - }, - ], - }, - }, - { - name: 'MerkleTreeMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'accessMetadata', - type: { - defined: 'AccessMetadata', - }, - }, - { - name: 'rolloverMetadata', - type: { - defined: 'RolloverMetadata', - }, - }, - { - name: 'associatedQueue', - type: 'publicKey', - }, - { - name: 'nextMerkleTree', - type: 'publicKey', - }, - ], - }, - }, - { - name: 'RolloverMetadata', - type: { - kind: 'struct', - fields: [ - { - name: 'index', - docs: ['Unique index.'], - type: 'u64', - }, - { - name: 'rolloverFee', - docs: [ - 'This fee is used for rent for the next account.', - 'It accumulates in the account so that once the corresponding Merkle tree account is full it can be rolled over', - ], - type: 'u64', - }, - { - name: 'rolloverThreshold', - docs: [ - 'The threshold in percentage points when the account should be rolled over (95 corresponds to 95% filled).', - ], - type: 'u64', - }, - { - name: 'networkFee', - docs: ['Tip for maintaining the account.'], - type: 'u64', - }, - { - name: 'rolledoverSlot', - docs: [ - 'The slot when the account was rolled over, a rolled over account should not be written to.', - ], - type: 'u64', - }, - { - name: 'closeThreshold', - docs: [ - 'If current slot is greater than rolledover_slot + close_threshold and', - "the account is empty it can be closed. No 'close' functionality has been", - 'implemented yet.', - ], - type: 'u64', - }, - { - name: 'additionalBytes', - docs: [ - 'Placeholder for bytes of additional accounts which are tied to the', - 'Merkle trees operation and need to be rolled over as well.', - ], - type: 'u64', - }, - ], - }, - }, { name: 'InstructionDataInvoke', type: { diff --git a/js/stateless.js/tests/e2e/compress.test.ts b/js/stateless.js/tests/e2e/compress.test.ts index 44f689edd0..5b27405578 100644 --- a/js/stateless.js/tests/e2e/compress.test.ts +++ b/js/stateless.js/tests/e2e/compress.test.ts @@ -43,7 +43,8 @@ function txFees( : bn(0); /// Fee if the tx nullifies at least one input account - const networkInFee = tx.in ? STATE_MERKLE_TREE_NETWORK_FEE : bn(0); + const networkInFee = + tx.in || tx.out ? STATE_MERKLE_TREE_NETWORK_FEE : bn(0); /// Fee if the tx creates at least one address const networkAddressFee = tx.addr ? ADDRESS_TREE_NETWORK_FEE : bn(0); diff --git a/js/stateless.js/tests/e2e/test-rpc.test.ts b/js/stateless.js/tests/e2e/test-rpc.test.ts index 55f003b5c8..f16a7d834d 100644 --- a/js/stateless.js/tests/e2e/test-rpc.test.ts +++ b/js/stateless.js/tests/e2e/test-rpc.test.ts @@ -76,6 +76,7 @@ describe('test-rpc', () => { preCompressBalance - compressLamportsAmount - 5000 - + 5000 - STATE_MERKLE_TREE_ROLLOVER_FEE.toNumber(), ); }); diff --git a/light-prover/prover/circuit_builder.go b/light-prover/prover/circuit_builder.go index 86eaa2d39f..54b4823a66 100644 --- a/light-prover/prover/circuit_builder.go +++ b/light-prover/prover/circuit_builder.go @@ -8,12 +8,12 @@ import ( type CircuitType string const ( - CombinedCircuitType CircuitType = "combined" - InclusionCircuitType CircuitType = "inclusion" - NonInclusionCircuitType CircuitType = "non-inclusion" + CombinedCircuitType CircuitType = "combined" + InclusionCircuitType CircuitType = "inclusion" + NonInclusionCircuitType CircuitType = "non-inclusion" BatchAppendWithSubtreesCircuitType CircuitType = "append-with-subtrees" BatchAppendWithProofsCircuitType CircuitType = "append-with-proofs" - BatchUpdateCircuitType CircuitType = "update" + BatchUpdateCircuitType CircuitType = "update" ) func SetupCircuitV1(circuit CircuitType, inclusionTreeHeight uint32, inclusionNumberOfCompressedAccounts uint32, nonInclusionTreeHeight uint32, nonInclusionNumberOfCompressedAccounts uint32) (*ProvingSystemV1, error) { diff --git a/merkle-tree/bloom-filter/Cargo.toml b/merkle-tree/bloom-filter/Cargo.toml new file mode 100644 index 0000000000..ed4315767f --- /dev/null +++ b/merkle-tree/bloom-filter/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "light-bloom-filter" +version = "0.1.0" +description = "Experimental bloom filter." +repository = "https://github.com/Lightprotocol/light-protocol" +license = "Apache-2.0" +edition = "2021" + +[features] +solana = ["solana-program"] + +[dependencies] +bitvec = "1.0.1" +fastmurmur3 = "0.2.0" +num-bigint = "0.4" +num-traits = "0.2" +solana-program = { workspace = true, optional = true } +thiserror = "1.0" + +[dev-dependencies] +rand = "0.8" +light-utils = { path = "../../utils", version = "1.0.0" } diff --git a/merkle-tree/bloom-filter/src/lib.rs b/merkle-tree/bloom-filter/src/lib.rs new file mode 100644 index 0000000000..928906dbaa --- /dev/null +++ b/merkle-tree/bloom-filter/src/lib.rs @@ -0,0 +1,258 @@ +use std::f64::consts::LN_2; +use thiserror::Error; + +#[derive(Debug, Error, PartialEq)] +pub enum BloomFilterError { + #[error("Bloom filter is full")] + Full, + #[error("Invalid store capacity")] + InvalidStoreCapacity, +} + +#[cfg(feature = "solana")] +impl From for u32 { + fn from(e: BloomFilterError) -> u32 { + match e { + BloomFilterError::Full => 16001, + BloomFilterError::InvalidStoreCapacity => 16002, + } + } +} + +#[cfg(feature = "solana")] +impl From for solana_program::program_error::ProgramError { + fn from(e: BloomFilterError) -> Self { + solana_program::program_error::ProgramError::Custom(e.into()) + } +} + +#[derive(Debug)] +pub struct BloomFilter<'a> { + pub num_iters: usize, + pub capacity: u64, + pub store: &'a mut [u8], +} + +impl<'a> BloomFilter<'a> { + // TODO: find source for this + pub fn calculate_bloom_filter_size(n: usize, p: f64) -> usize { + let m = -((n as f64) * p.ln()) / (LN_2 * LN_2); + m.ceil() as usize + } + + pub fn calculate_optimal_hash_functions(n: usize, m: usize) -> usize { + let k = (m as f64 / n as f64) * LN_2; + k.ceil() as usize + } + + pub fn new( + num_iters: usize, + capacity: u64, + store: &'a mut [u8], + ) -> Result { + // Capacity is in bits while store is in bytes. + if store.len() * 8 != capacity as usize { + return Err(BloomFilterError::InvalidStoreCapacity); + } + Ok(Self { + num_iters, + capacity, + store, + }) + } + + pub fn probe_index_fast_murmur(value_bytes: &[u8], iteration: usize, capacity: &u64) -> usize { + let iter_bytes = iteration.to_le_bytes(); + let base_hash = fastmurmur3::hash(value_bytes); + let mut combined_bytes = [0u8; 24]; + combined_bytes[..16].copy_from_slice(&base_hash.to_le_bytes()); + combined_bytes[16..].copy_from_slice(&iter_bytes); + + let combined_hash = fastmurmur3::hash(&combined_bytes); + (combined_hash % (*capacity as u128)) as usize + } + + pub fn insert(&mut self, value: &[u8; 32]) -> Result<(), BloomFilterError> { + if self._insert(value, true) { + Ok(()) + } else { + Err(BloomFilterError::Full) + } + } + + // TODO: reconsider &mut self + pub fn contains(&mut self, value: &[u8; 32]) -> bool { + !self._insert(value, false) + } + + fn _insert(&mut self, value: &[u8; 32], insert: bool) -> bool { + let mut all_bits_set = true; + use bitvec::prelude::*; + + let bits = BitSlice::::from_slice_mut(self.store); + for i in 0..self.num_iters { + let probe_index = Self::probe_index_fast_murmur(value, i, &(self.capacity)); + if bits[probe_index] { + continue; + } else if insert { + all_bits_set = false; + bits.set(probe_index, true); + } else if !bits[probe_index] && !insert { + return true; + } + } + !all_bits_set + } +} + +#[cfg(test)] +mod test { + use super::*; + use light_utils::bigint::bigint_to_be_bytes_array; + use num_bigint::{RandBigInt, ToBigUint}; + use rand::thread_rng; + + #[test] + fn test_insert_and_contains() -> Result<(), BloomFilterError> { + let capacity = 128_000 * 8; + let mut store = [0u8; 128_000]; + let mut bf = BloomFilter { + num_iters: 3, + capacity, + store: &mut store, + }; + + let value1 = [1u8; 32]; + let value2 = [2u8; 32]; + + bf.insert(&value1)?; + assert!(bf.contains(&value1)); + assert!(!bf.contains(&value2)); + + Ok(()) + } + + #[test] + fn short_rnd_test() { + let capacity = 500; + let bloom_filter_capacity = 20_000 * 8; + let optimal_hash_functions = 3; + rnd_test( + 1000, + capacity, + bloom_filter_capacity, + optimal_hash_functions, + false, + ); + } + + /// Bench results: + /// - 15310 CU for 10 insertions with 3 hash functions + /// - capacity 5000 0.000_000_000_1 with 15 hash functions seems to not + /// produce any collisions + #[ignore = "bench"] + #[test] + fn bench_bloom_filter() { + let capacity = 5000; + let bloom_filter_capacity = + BloomFilter::calculate_bloom_filter_size(capacity, 0.000_000_000_1); + let optimal_hash_functions = 15; + let iterations = 1_000_000; + rnd_test( + iterations, + capacity, + bloom_filter_capacity, + optimal_hash_functions, + true, + ); + } + + fn rnd_test( + num_iters: usize, + capacity: usize, + bloom_filter_capacity: usize, + optimal_hash_functions: usize, + bench: bool, + ) { + println!("Optimal hash functions: {}", optimal_hash_functions); + println!( + "Bloom filter capacity (kb): {}", + bloom_filter_capacity / 8 / 1_000 + ); + let mut num_total_txs = 0; + let mut rng = thread_rng(); + let mut failed_vec = Vec::new(); + for j in 0..num_iters { + let mut inserted_values = Vec::new(); + let mut store = vec![0; bloom_filter_capacity]; + let mut bf = BloomFilter { + num_iters: optimal_hash_functions, + capacity: bloom_filter_capacity as u64, + store: &mut store, + }; + if j == 0 { + println!("Bloom filter capacity: {}", bf.capacity); + println!("Bloom filter size: {}", bf.store.len()); + println!("Bloom filter size (kb): {}", bf.store.len() / 8 / 1_000); + println!("num iters: {}", bf.num_iters); + } + for i in 0..capacity { + num_total_txs += 1; + let value = { + let mut _value = 0u64.to_biguint().unwrap(); + while inserted_values.contains(&_value.clone()) { + _value = rng.gen_biguint(254); + } + inserted_values.push(_value.clone()); + + _value + }; + let value: [u8; 32] = bigint_to_be_bytes_array(&value).unwrap(); + match bf.insert(&value) { + Ok(_) => { + assert!(bf.contains(&value)); + } + Err(_) => { + println!("Failed to insert iter: {}", i); + println!("total iter {}", j); + println!("num_total_txs {}", num_total_txs); + failed_vec.push(i); + } + }; + assert!(bf.contains(&value)); + assert!(bf.insert(&value).is_err()); + } + } + if bench { + println!("total num tx {}", num_total_txs); + let average = failed_vec.iter().sum::() as f64 / failed_vec.len() as f64; + println!("average failed insertions: {}", average); + println!( + "max failed insertions: {}", + failed_vec.iter().max().unwrap() + ); + println!( + "min failed insertions: {}", + failed_vec.iter().min().unwrap() + ); + + let num_chunks = 10; + let chunk_size = num_iters / num_chunks; + failed_vec.sort(); + for (i, chunk) in failed_vec.chunks(chunk_size).enumerate() { + let average = chunk.iter().sum::() as f64 / chunk.len() as f64; + println!("chunk: {} average failed insertions: {}", i, average); + println!( + "chunk: {} max failed insertions: {}", + i, + chunk.iter().max().unwrap() + ); + println!( + "chunk: {} min failed insertions: {}", + i, + chunk.iter().min().unwrap() + ); + } + } + } +} diff --git a/merkle-tree/bounded-vec/Cargo.toml b/merkle-tree/bounded-vec/Cargo.toml index 29716562fb..84e79a353c 100644 --- a/merkle-tree/bounded-vec/Cargo.toml +++ b/merkle-tree/bounded-vec/Cargo.toml @@ -17,3 +17,4 @@ thiserror = "1.0" [dev-dependencies] rand = "0.8" +bytemuck = { version = "1.17", features = ["derive"] } diff --git a/merkle-tree/bounded-vec/src/lib.rs b/merkle-tree/bounded-vec/src/lib.rs index 6aad71371c..5f24c6792a 100644 --- a/merkle-tree/bounded-vec/src/lib.rs +++ b/merkle-tree/bounded-vec/src/lib.rs @@ -1,12 +1,16 @@ +pub mod offset; + use std::{ alloc::{self, handle_alloc_error, Layout}, - fmt, mem, - ops::{Index, IndexMut}, + fmt, + mem::{self, size_of, ManuallyDrop}, + ops::{Index, IndexMut, Sub}, ptr::{self, NonNull}, slice::{self, Iter, IterMut, SliceIndex}, }; use memoffset::span_of; +use offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at}; use thiserror::Error; #[derive(Debug, Error, PartialEq)] @@ -17,6 +21,8 @@ pub enum BoundedVecError { ArraySize(usize, usize), #[error("The requested start index is out of bounds.")] IterFromOutOfBounds, + #[error("Memory allocated {0}, Memory required {0}")] + InsufficientMemoryAllocated(usize, usize), } #[cfg(feature = "solana")] @@ -26,6 +32,7 @@ impl From for u32 { BoundedVecError::Full => 8001, BoundedVecError::ArraySize(_, _) => 8002, BoundedVecError::IterFromOutOfBounds => 8003, + BoundedVecError::InsufficientMemoryAllocated(_, _) => 8004, } } } @@ -143,6 +150,13 @@ where Self { metadata, data } } + #[inline] + pub fn clear(&mut self) { + unsafe { + (*self.metadata).length = 0; + } + } + /// Creates a `BoundedVec` with the given `metadata`. /// /// # Safety @@ -341,6 +355,96 @@ where } Ok(()) } + + pub fn deserialize( + account_data: &mut [u8], + start_offset: &mut usize, + ) -> Result>, BoundedVecError> { + unsafe { + let meta_data_size = size_of::(); + if account_data.len().saturating_sub(*start_offset) < meta_data_size { + return Err(BoundedVecError::InsufficientMemoryAllocated( + account_data.len().saturating_sub(*start_offset), + meta_data_size, + )); + } + let metadata: *mut BoundedVecMetadata = read_ptr_at(account_data, start_offset); + let full_vector_size = (*metadata).capacity() * size_of::(); + if account_data.len().saturating_sub(*start_offset) < full_vector_size { + return Err(BoundedVecError::InsufficientMemoryAllocated( + account_data.len().saturating_sub(*start_offset), + full_vector_size + meta_data_size, + )); + } + Ok(ManuallyDrop::new(BoundedVec::from_raw_parts( + metadata, + read_array_like_ptr_at(account_data, start_offset, (*metadata).capacity()), + ))) + } + } + + pub fn deserialize_multiple( + num: usize, + account_data: &mut [u8], + start_offset: &mut usize, + ) -> Result>>, BoundedVecError> { + let mut value_vecs = Vec::with_capacity(num); + for _ in 0..num { + let vec = Self::deserialize(account_data, start_offset)?; + value_vecs.push(vec); + } + Ok(value_vecs) + } + + pub fn init( + capacity: usize, + account_data: &mut [u8], + start_offset: &mut usize, + with_len: bool, + ) -> Result>, BoundedVecError> { + let vector_size = capacity * size_of::(); + let full_vector_size = vector_size + size_of::(); + if full_vector_size > account_data.len().saturating_sub(*start_offset) { + return Err(BoundedVecError::InsufficientMemoryAllocated( + account_data.len().saturating_sub(*start_offset), + full_vector_size, + )); + } + let meta: BoundedVecMetadata = if with_len { + BoundedVecMetadata::new_with_length(capacity, capacity) + } else { + BoundedVecMetadata::new(capacity) + }; + write_at::(account_data, meta.to_le_bytes().as_slice(), start_offset); + let meta: *mut BoundedVecMetadata = unsafe { + read_ptr_at( + &*account_data, + &mut start_offset.sub(size_of::()), + ) + }; + + Ok(unsafe { + ManuallyDrop::new(BoundedVec::from_raw_parts( + meta, + read_array_like_ptr_at(&*account_data, start_offset, capacity), + )) + }) + } + + pub fn init_multiple( + num: usize, + capacity: usize, + account_data: &mut [u8], + start_offset: &mut usize, + with_len: bool, + ) -> Result>>, BoundedVecError> { + let mut value_vecs = Vec::with_capacity(num); + for _ in 0..num { + let vec = Self::init(capacity, account_data, start_offset, with_len)?; + value_vecs.push(vec); + } + Ok(value_vecs) + } } impl Clone for BoundedVec @@ -486,6 +590,15 @@ impl CyclicBoundedVecMetadata { } } + pub fn new_with_length(capacity: usize, length: usize) -> Self { + Self { + capacity, + length, + first_index: 0, + last_index: 0, + } + } + pub fn new_with_indices( capacity: usize, length: usize, @@ -528,6 +641,10 @@ impl CyclicBoundedVecMetadata { pub fn length(&self) -> usize { self.length } + + pub fn get_first_index(&self) -> usize { + self.first_index + } } /// `CyclicBoundedVec` is a wrapper around [`Vec`](std::vec::Vec) which: @@ -795,6 +912,77 @@ where pub fn last_mut(&mut self) -> Option<&mut T> { self.get_mut(self.last_index()) } + + pub fn init( + capacity: usize, + account_data: &mut [u8], + start_offset: &mut usize, + with_len: bool, + ) -> Result, BoundedVecError> { + let vector_size = capacity * size_of::(); + + let full_vector_size = vector_size + size_of::(); + if full_vector_size > account_data.len().saturating_sub(*start_offset) { + return Err(BoundedVecError::InsufficientMemoryAllocated( + account_data.len().saturating_sub(*start_offset), + full_vector_size, + )); + } + let meta: CyclicBoundedVecMetadata = if with_len { + CyclicBoundedVecMetadata::new_with_length(capacity, capacity) + } else { + CyclicBoundedVecMetadata::new(capacity) + }; + write_at::( + account_data, + meta.to_le_bytes().as_slice(), + start_offset, + ); + let meta: *mut CyclicBoundedVecMetadata = unsafe { + read_ptr_at( + &*account_data, + &mut start_offset.sub(size_of::()), + ) + }; + Ok(unsafe { + ManuallyDrop::new(CyclicBoundedVec::from_raw_parts( + meta, + read_array_like_ptr_at(&*account_data, start_offset, capacity), + )) + }) + } + + // TODO: pull ManuallyDrop into CyclicBoundedVec + pub fn deserialize( + account_data: &mut [u8], + start_offset: &mut usize, + ) -> Result>, BoundedVecError> { + unsafe { + if account_data.len().saturating_sub(*start_offset) + < size_of::() + { + return Err(BoundedVecError::InsufficientMemoryAllocated( + account_data.len().saturating_sub(*start_offset), + size_of::(), + )); + } + + let metadata: *mut CyclicBoundedVecMetadata = read_ptr_at(account_data, start_offset); + + let full_vector_size = (*metadata).capacity() * size_of::(); + if account_data.len().saturating_sub(*start_offset) < full_vector_size { + return Err(BoundedVecError::InsufficientMemoryAllocated( + account_data.len().saturating_sub(*start_offset), + full_vector_size, + )); + } + + Ok(ManuallyDrop::new(CyclicBoundedVec::from_raw_parts( + metadata, + read_array_like_ptr_at(account_data, start_offset, (*metadata).capacity()), + ))) + } + } } impl fmt::Debug for CyclicBoundedVec @@ -1716,4 +1904,180 @@ mod test { ] ); } + + #[test] + fn test_clear_pass() { + let mut vec = BoundedVec::with_capacity(5); + vec.push(1).unwrap(); + vec.push(2).unwrap(); + vec.clear(); + assert_eq!(vec.len(), 0); + assert!(vec.get(0).is_none()); + assert!(vec.get(1).is_none()); + } + + #[test] + fn test_clear_fail() { + let mut vec = BoundedVec::with_capacity(5); + vec.push(1).unwrap(); + assert_eq!(vec.get(0).unwrap(), &1); + vec.clear(); + assert_eq!(vec.get(0), None); + } + + #[test] + fn test_deserialize_pass() { + let mut account_data = vec![0u8; 64]; + let mut start_offset = 0; + + // Initialize data with valid BoundedVec metadata and elements + BoundedVec::::init(4, &mut account_data, &mut start_offset, false).unwrap(); + start_offset = 0; + + // Deserialize the BoundedVec + let deserialized_vec = BoundedVec::::deserialize(&mut account_data, &mut start_offset) + .expect("Failed to deserialize BoundedVec"); + + assert_eq!(deserialized_vec.metadata().capacity(), 4); + assert_eq!(deserialized_vec.metadata().length(), 0); + } + + #[test] + fn test_deserialize_multiple_pass() { + let mut account_data = vec![0u8; 128]; + let mut start_offset = 0; + + // Initialize data for multiple BoundedVecs + BoundedVec::::init(4, &mut account_data, &mut start_offset, false).unwrap(); + BoundedVec::::init(4, &mut account_data, &mut start_offset, false).unwrap(); + start_offset = 0; + + // Deserialize multiple BoundedVecs + let deserialized_vecs = + BoundedVec::::deserialize_multiple(2, &mut account_data, &mut start_offset) + .expect("Failed to deserialize multiple BoundedVecs"); + + assert_eq!(deserialized_vecs.len(), 2); + } + + #[test] + fn test_init_pass() { + let mut account_data = vec![0u8; 64]; + let mut start_offset = 0; + + // Initialize a BoundedVec with capacity 4 + let mut vec = BoundedVec::::init(4, &mut account_data, &mut start_offset, false) + .expect("Failed to initialize BoundedVec"); + + assert_eq!(vec.metadata().capacity(), 4); + assert_eq!(vec.metadata().length(), 0); + for i in 0..4 { + assert!(vec.get(i).is_none()); + vec.push(i as u64).unwrap(); + assert_eq!(*vec.get(i).unwrap(), i as u64); + assert!(vec.metadata().length() == i + 1); + } + } + + #[test] + fn test_init_multiple_pass() { + let mut account_data = vec![0u8; 128]; + let mut start_offset = 0; + let mut initialized_vecs = + BoundedVec::::init_multiple(2, 4, &mut account_data, &mut start_offset, false) + .expect("Failed to initialize multiple BoundedVecs"); + + assert_eq!(initialized_vecs.len(), 2); + assert_eq!(initialized_vecs[0].metadata().capacity(), 4); + assert_eq!(initialized_vecs[1].metadata().capacity(), 4); + assert_eq!(initialized_vecs[0].metadata().length(), 0); + assert_eq!(initialized_vecs[1].metadata().length(), 0); + for i in 0..4 { + for vec in initialized_vecs.iter_mut() { + assert!(vec.get(i).is_none()); + vec.push(i as u64).unwrap(); + assert_eq!(*vec.get(i).unwrap(), i as u64); + assert!(vec.metadata().length() == i + 1); + } + } + } + + #[test] + fn test_insufficient_memory_deserialize_metadata() { + let required_memory = mem::size_of::(); + let mut account_data = vec![0u8; required_memory - 1]; + let mut start_offset = 0; + + let result = BoundedVec::::deserialize(&mut account_data, &mut start_offset); + assert!(matches!( + result, + Err(BoundedVecError::InsufficientMemoryAllocated(_, expected_memory + )) if expected_memory == required_memory + )); + } + + #[test] + fn test_insufficient_memory_deserialize_full_vector() { + let required_memory = mem::size_of::() + 4 * mem::size_of::(); + let mut account_data = vec![0u8; required_memory]; + BoundedVec::::init(4, &mut account_data, &mut 0, false).unwrap(); + let mut account_data = account_data[0..required_memory - 1].to_vec(); + let mut start_offset = 0; + + let result = BoundedVec::::deserialize(&mut account_data, &mut start_offset); + assert!(matches!( + result, + Err(BoundedVecError::InsufficientMemoryAllocated(_, expected_memory + )) if expected_memory == required_memory + )); + } + + #[test] + fn test_insufficient_memory_init_single() { + let required_memory = mem::size_of::() + 4 * mem::size_of::(); + let mut account_data = vec![0u8; required_memory - 1]; + let mut start_offset = 0; + let result = BoundedVec::::init(4, &mut account_data, &mut start_offset, false); + assert!(matches!( + result, + Err(BoundedVecError::InsufficientMemoryAllocated(_, expected_memory + )) if expected_memory == required_memory + )); + } + + #[test] + fn test_insufficient_memory_deserialize_multiple() { + let required_memory = + 2 * (mem::size_of::() + 3 * mem::size_of::()); + let mut account_data = vec![0u8; required_memory]; + BoundedVec::::init_multiple(2, 3, &mut account_data, &mut 0, false).unwrap(); + let mut account_data = account_data[0..required_memory - 1].to_vec(); + let mut start_offset = 0; + + let result = + BoundedVec::::deserialize_multiple(2, &mut account_data, &mut start_offset); + let required_memory_per_vec = required_memory / 2; + assert!(matches!( + result, + Err(BoundedVecError::InsufficientMemoryAllocated(_, expected_memory + )) if expected_memory == required_memory_per_vec + )); + } + + #[test] + fn test_insufficient_memory_init_multiple() { + let required_memory = + 2 * (mem::size_of::() + 3 * mem::size_of::()); + let mut account_data = vec![0u8; required_memory - 1]; + + let result = BoundedVec::::init_multiple(2, 3, &mut account_data, &mut 0, false); + let required_memory_per_vec = required_memory / 2; + assert!(matches!( + result, + Err(BoundedVecError::InsufficientMemoryAllocated( + _, + expected_memory + )) if expected_memory == required_memory_per_vec + )); + } } diff --git a/utils/src/offset/copy.rs b/merkle-tree/bounded-vec/src/offset/copy.rs similarity index 99% rename from utils/src/offset/copy.rs rename to merkle-tree/bounded-vec/src/offset/copy.rs index ee903e6661..acc94a760d 100644 --- a/utils/src/offset/copy.rs +++ b/merkle-tree/bounded-vec/src/offset/copy.rs @@ -1,8 +1,6 @@ use std::{mem, ptr}; -use light_bounded_vec::{ - BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecMetadata, -}; +use crate::{BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecMetadata}; /// Creates a copy of value of type `T` based on the provided `bytes` buffer. /// diff --git a/utils/src/offset/mod.rs b/merkle-tree/bounded-vec/src/offset/mod.rs similarity index 100% rename from utils/src/offset/mod.rs rename to merkle-tree/bounded-vec/src/offset/mod.rs diff --git a/utils/src/offset/zero_copy.rs b/merkle-tree/bounded-vec/src/offset/zero_copy.rs similarity index 100% rename from utils/src/offset/zero_copy.rs rename to merkle-tree/bounded-vec/src/offset/zero_copy.rs diff --git a/merkle-tree/concurrent/Cargo.toml b/merkle-tree/concurrent/Cargo.toml index aa9bc9fa76..dfba68e5e4 100644 --- a/merkle-tree/concurrent/Cargo.toml +++ b/merkle-tree/concurrent/Cargo.toml @@ -16,7 +16,7 @@ solana = [ [dependencies] borsh = "0.10" -bytemuck = "1.17" +bytemuck = { version = "1.17", features = ["derive"] } light-bounded-vec = { path = "../bounded-vec", version = "1.1.0" } light-hasher = { path = "../hasher", version = "1.1.0" } light-utils = { version = "1.1.0", path = "../../utils" } diff --git a/merkle-tree/concurrent/src/copy.rs b/merkle-tree/concurrent/src/copy.rs index bac7df1942..12c3793b93 100644 --- a/merkle-tree/concurrent/src/copy.rs +++ b/merkle-tree/concurrent/src/copy.rs @@ -1,8 +1,10 @@ use std::ops::Deref; -use light_bounded_vec::{BoundedVecMetadata, CyclicBoundedVecMetadata}; +use light_bounded_vec::{ + offset::copy::{read_bounded_vec_at, read_cyclic_bounded_vec_at, read_value_at}, + BoundedVecMetadata, CyclicBoundedVecMetadata, +}; use light_hasher::Hasher; -use light_utils::offset::copy::{read_bounded_vec_at, read_cyclic_bounded_vec_at, read_value_at}; use memoffset::{offset_of, span_of}; use crate::{errors::ConcurrentMerkleTreeError, ConcurrentMerkleTree}; diff --git a/merkle-tree/concurrent/src/zero_copy.rs b/merkle-tree/concurrent/src/zero_copy.rs index a2e1103dd4..c928f2acd6 100644 --- a/merkle-tree/concurrent/src/zero_copy.rs +++ b/merkle-tree/concurrent/src/zero_copy.rs @@ -5,10 +5,10 @@ use std::{ }; use light_bounded_vec::{ + offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at}, BoundedVec, BoundedVecMetadata, CyclicBoundedVec, CyclicBoundedVecMetadata, }; use light_hasher::Hasher; -use light_utils::offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at}; use memoffset::{offset_of, span_of}; use crate::{errors::ConcurrentMerkleTreeError, ConcurrentMerkleTree}; diff --git a/merkle-tree/concurrent/tests/tests.rs b/merkle-tree/concurrent/tests/tests.rs index 1cc9de5286..c1ce8b157c 100644 --- a/merkle-tree/concurrent/tests/tests.rs +++ b/merkle-tree/concurrent/tests/tests.rs @@ -776,7 +776,7 @@ where .unwrap(); let changelog_index = merkle_tree.changelog_index(); - let old_leaf = reference_tree.get_leaf(i); + let old_leaf = reference_tree.leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); merkle_tree @@ -799,7 +799,7 @@ where .unwrap(); let changelog_index = merkle_tree.changelog_index(); - let old_leaf = reference_tree.get_leaf(i); + let old_leaf = reference_tree.leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); merkle_tree @@ -820,7 +820,7 @@ where .unwrap(); let changelog_index = merkle_tree.changelog_index(); - let old_leaf = reference_tree.get_leaf(i); + let old_leaf = reference_tree.leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); merkle_tree @@ -1388,7 +1388,7 @@ async fn test_spl_compat() { let root = concurrent_mt.root(); let changelog_index = concurrent_mt.changelog_index(); - let old_leaf = reference_tree.get_leaf(0); + let old_leaf = reference_tree.leaf(0); let mut proof = reference_tree.get_proof_of_leaf(0, false).unwrap(); concurrent_mt @@ -1412,7 +1412,7 @@ async fn test_spl_compat() { let root = concurrent_mt.root(); let changelog_index = concurrent_mt.changelog_index(); - let old_leaf = reference_tree.get_leaf(i); + let old_leaf = reference_tree.leaf(i); let mut proof = reference_tree.get_proof_of_leaf(i, false).unwrap(); concurrent_mt @@ -1547,7 +1547,7 @@ where // Update random leaf. let leaf_index = rng.gen_range(0..reference_tree_1.leaves().len()); - let old_leaf = reference_tree_1.get_leaf(leaf_index); + let old_leaf = reference_tree_1.leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() @@ -1620,7 +1620,7 @@ where // Update random leaf. let leaf_index = rng.gen_range(0..reference_tree_1.leaves().len()); - let old_leaf = reference_tree_1.get_leaf(leaf_index); + let old_leaf = reference_tree_1.leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() @@ -2402,7 +2402,7 @@ fn test_subtree_updates() { Some(index) => { let change_log_index = con_mt.changelog_index(); let mut proof = ref_mt.get_proof_of_leaf(index, false).unwrap(); - let old_leaf = ref_mt.get_leaf(index); + let old_leaf = ref_mt.leaf(index); let current_root = con_mt.root(); spl_concurrent_mt .set_leaf( @@ -2432,7 +2432,7 @@ fn test_subtree_updates() { // test rightmost leaf edge case let change_log_index = con_mt.changelog_index(); let mut proof = ref_mt.get_proof_of_leaf(index, false).unwrap(); - let old_leaf = ref_mt.get_leaf(index); + let old_leaf = ref_mt.leaf(index); let current_root = con_mt.root(); spl_concurrent_mt .set_leaf( @@ -2950,7 +2950,7 @@ where // Update random leaf. let leaf_index = rng.gen_range(0..reference_tree.leaves().len()); - let old_leaf = reference_tree.get_leaf(leaf_index); + let old_leaf = reference_tree.leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() @@ -3194,7 +3194,7 @@ where } else { // Update random leaf. let leaf_index = rng.gen_range(1..reference_tree.leaves().len()); - let old_leaf = reference_tree.get_leaf(leaf_index); + let old_leaf = reference_tree.leaf(leaf_index); let new_leaf: [u8; 32] = Fr::rand(&mut rng) .into_bigint() .to_bytes_be() diff --git a/merkle-tree/indexed/src/copy.rs b/merkle-tree/indexed/src/copy.rs index f7465339a6..5ff95443de 100644 --- a/merkle-tree/indexed/src/copy.rs +++ b/merkle-tree/indexed/src/copy.rs @@ -1,12 +1,14 @@ use std::{fmt, marker::PhantomData, ops::Deref}; use crate::{errors::IndexedMerkleTreeError, IndexedMerkleTree}; -use light_bounded_vec::CyclicBoundedVecMetadata; +use light_bounded_vec::{ + offset::copy::{read_cyclic_bounded_vec_at, read_value_at}, + CyclicBoundedVecMetadata, +}; use light_concurrent_merkle_tree::{ copy::ConcurrentMerkleTreeCopy, errors::ConcurrentMerkleTreeError, }; use light_hasher::Hasher; -use light_utils::offset::copy::{read_cyclic_bounded_vec_at, read_value_at}; use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned}; #[derive(Debug)] diff --git a/merkle-tree/indexed/src/zero_copy.rs b/merkle-tree/indexed/src/zero_copy.rs index d0a4fddd57..372530644d 100644 --- a/merkle-tree/indexed/src/zero_copy.rs +++ b/merkle-tree/indexed/src/zero_copy.rs @@ -5,14 +5,16 @@ use std::{ ops::{Deref, DerefMut}, }; -use light_bounded_vec::{CyclicBoundedVec, CyclicBoundedVecMetadata}; +use light_bounded_vec::{ + offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at}, + CyclicBoundedVec, CyclicBoundedVecMetadata, +}; use light_concurrent_merkle_tree::{ errors::ConcurrentMerkleTreeError, zero_copy::{ConcurrentMerkleTreeZeroCopy, ConcurrentMerkleTreeZeroCopyMut}, ConcurrentMerkleTree, }; use light_hasher::Hasher; -use light_utils::offset::zero_copy::{read_array_like_ptr_at, read_ptr_at, write_at}; use num_traits::{CheckedAdd, CheckedSub, ToBytes, Unsigned}; use crate::{errors::IndexedMerkleTreeError, IndexedMerkleTree}; diff --git a/merkle-tree/indexed/tests/tests.rs b/merkle-tree/indexed/tests/tests.rs index 8da768a2cb..a796b3ff5e 100644 --- a/merkle-tree/indexed/tests/tests.rs +++ b/merkle-tree/indexed/tests/tests.rs @@ -96,7 +96,7 @@ where Ok(update) } -// TODO: unify these helpers with MockIndexer +// TODO: unify these helpers with MockBatchedForester /// A mock function which imitates a relayer endpoint for updating the /// nullifier Merkle tree. fn relayer_update( @@ -633,8 +633,8 @@ pub fn functional_non_inclusion_test() { assert_eq!(indexed_array_element_1.next_index, 0); assert_eq!(indexed_array_element_1.index, 1); - let leaf_0 = relayer_merkle_tree.merkle_tree.get_leaf(0); - let leaf_1 = relayer_merkle_tree.merkle_tree.get_leaf(1); + let leaf_0 = relayer_merkle_tree.merkle_tree.leaf(0); + let leaf_1 = relayer_merkle_tree.merkle_tree.leaf(1); assert_eq!( leaf_0, Poseidon::hashv(&[ diff --git a/merkle-tree/reference/src/lib.rs b/merkle-tree/reference/src/lib.rs index 8585311ce2..aa3ddbbfc7 100644 --- a/merkle-tree/reference/src/lib.rs +++ b/merkle-tree/reference/src/lib.rs @@ -210,7 +210,7 @@ where Ok(canopy) } - pub fn get_leaf(&self, leaf_index: usize) -> [u8; 32] { + pub fn leaf(&self, leaf_index: usize) -> [u8; 32] { self.layers[0] .get(leaf_index) .cloned() @@ -282,6 +282,17 @@ where } subtrees } + + pub fn get_next_index(&self) -> usize { + self.rightmost_index + 1 + } + + pub fn get_leaf(&self, index: usize) -> Result<[u8; 32], ReferenceMerkleTreeError> { + self.layers[0] + .get(index) + .cloned() + .ok_or(ReferenceMerkleTreeError::LeafDoesNotExist(index)) + } } #[cfg(test)] @@ -331,7 +342,7 @@ mod tests { #[test] fn test_subtrees() { let tree_depth = 4; - let mut tree = MerkleTree::::new(tree_depth, 0); // Replace TestHasher with your specific hasher. + let mut tree = MerkleTree::::new(tree_depth, 0); let subtrees = tree.get_subtrees(); for (i, subtree) in subtrees.iter().enumerate() { diff --git a/merkle-tree/reference/src/sparse_merkle_tree.rs b/merkle-tree/reference/src/sparse_merkle_tree.rs index 0258edf164..756007df2a 100644 --- a/merkle-tree/reference/src/sparse_merkle_tree.rs +++ b/merkle-tree/reference/src/sparse_merkle_tree.rs @@ -14,10 +14,10 @@ impl SparseMerkleTree where H: Hasher, { - pub fn new(subtrees: [[u8; 32]; HEIGHT]) -> Self { + pub fn new(subtrees: [[u8; 32]; HEIGHT], next_index: usize) -> Self { Self { subtrees, - next_index: 0, + next_index, root: [0u8; 32], _hasher: PhantomData, } @@ -81,16 +81,20 @@ mod test { #[test] fn test_sparse_merkle_tree() { - let height = 4; - let mut merkle_tree = SparseMerkleTree::::new_empty(); + let height = 10; + let mut merkle_tree = SparseMerkleTree::::new_empty(); let mut reference_merkle_tree = MerkleTree::::new(height, 0); - - let leaf = [1u8; 32]; - merkle_tree.append(leaf); - reference_merkle_tree.append(&leaf).unwrap(); - assert_eq!(merkle_tree.root(), reference_merkle_tree.root()); - let subtrees = merkle_tree.get_subtrees(); - let reference_subtrees = reference_merkle_tree.get_subtrees(); - assert_eq!(subtrees.to_vec(), reference_subtrees); + for i in 0..1 << height { + let mut leaf = [0u8; 32]; + leaf[24..].copy_from_slice(&(i as u64).to_be_bytes()); + println!("i: {}, leaf: {:?}", i, leaf); + merkle_tree.append(leaf); + reference_merkle_tree.append(&leaf).unwrap(); + assert_eq!(merkle_tree.root(), reference_merkle_tree.root()); + assert_eq!(merkle_tree.get_next_index(), i + 1); + let subtrees = merkle_tree.get_subtrees(); + let reference_subtrees = reference_merkle_tree.get_subtrees(); + assert_eq!(subtrees.to_vec(), reference_subtrees); + } } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c67842dcd4..9bd31afbcd 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -398,6 +398,10 @@ importers: specifier: ^1.6.0 version: 1.6.0(@types/node@22.5.5)(@vitest/browser@1.6.0)(terser@5.31.0) + hasher.rs/src/main/wasm: {} + + hasher.rs/src/main/wasm-simd: {} + js/compressed-token: dependencies: '@coral-xyz/anchor': diff --git a/programs/account-compression/Cargo.toml b/programs/account-compression/Cargo.toml index dc856e6dc1..d7c5011859 100644 --- a/programs/account-compression/Cargo.toml +++ b/programs/account-compression/Cargo.toml @@ -17,10 +17,10 @@ no-log-ix-name = [] cpi = ["no-entrypoint"] custom-heap = ["light-heap"] mem-profiling = [] -default = ["custom-heap"] +default = ["custom-heap", "test"] test-sbf = [] bench-sbf = [] - +test = [] [dependencies] @@ -33,10 +33,19 @@ light-hasher = { version = "1.1.0", path = "../../merkle-tree/hasher", features light-heap = { version = "1.1.0", path = "../../heap", optional = true } light-concurrent-merkle-tree = { version = "1.1.0", path = "../../merkle-tree/concurrent", features = ["solana"] } light-indexed-merkle-tree = { version = "1.1.0", path = "../../merkle-tree/indexed", features = ["solana"] } -light-utils = { version = "1.1.0", path = "../../utils" } +light-utils = { version = "1.0.0", path = "../../utils" } +light-bloom-filter = { version = "0.1.0", path = "../../merkle-tree/bloom-filter", features = ["solana"] } num-bigint = "0.4" num-traits = "0.2.19" solana-security-txt = "1.1.0" +light-verifier = { version = "1.0.0", path = "../../circuit-lib/verifier", features = ["solana"] } [target.'cfg(not(target_os = "solana"))'.dependencies] solana-sdk = { workspace = true } + +[dev-dependencies] +rand = "0.8.5" +light-prover-client = { version = "1.2.0", path = "../../circuit-lib/light-prover-client", features = ["gnark"] } +light-merkle-tree-reference = { version = "1.1.0", path = "../../merkle-tree/reference" } +tokio = { version = "1.16.1" } +serial_test = "3.1.1" diff --git a/programs/account-compression/src/errors.rs b/programs/account-compression/src/errors.rs index 35a07180c2..f70a8b4fae 100644 --- a/programs/account-compression/src/errors.rs +++ b/programs/account-compression/src/errors.rs @@ -55,4 +55,25 @@ pub enum AccountCompressionErrorCode { UnsupportedAdditionalBytes, InvalidGroup, ProofLengthMismatch, + #[msg("Invalid commitment length")] + InvalidCommitmentLength, + #[msg("BloomFilterFull")] + BloomFilterFull, + #[msg("BatchInsertFailed")] + BatchInsertFailed, + #[msg("BatchNotReady")] + BatchNotReady, + SizeMismatch, + BatchAlreadyInserted, + InvalidBloomFilterCapacity, + InvalidCircuitBatchSize, + InvalidDiscriminator, + #[msg("batch_size is not divisible by zkp_batch_size")] + BatchSizeNotDivisibleByZkpBatchSize, + InclusionProofByIndexFailed, + TxHashUndefined, + InputDeserializationFailed, + InvalidBatch, + LeafIndexNotInBatch, + UnsupportedParameters, } diff --git a/programs/account-compression/src/instructions/append_leaves.rs b/programs/account-compression/src/instructions/append_leaves.rs index 62622c250b..d20378475d 100644 --- a/programs/account-compression/src/instructions/append_leaves.rs +++ b/programs/account-compression/src/instructions/append_leaves.rs @@ -1,4 +1,5 @@ use crate::{ + batched_queue::{BatchedQueueAccount, ZeroCopyBatchedQueueAccount}, errors::AccountCompressionErrorCode, state::StateMerkleTreeAccount, state_merkle_tree_from_bytes_zero_copy_mut, @@ -10,7 +11,7 @@ use crate::{ }, RegisteredProgram, }; -use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey}; +use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey, Discriminator}; #[derive(Accounts)] pub struct AppendLeaves<'info> { @@ -35,6 +36,16 @@ impl GroupAccess for StateMerkleTreeAccount { } } +impl GroupAccess for BatchedQueueAccount { + fn get_owner(&self) -> &Pubkey { + &self.metadata.access_metadata.owner + } + + fn get_program_owner(&self) -> &Pubkey { + &self.metadata.access_metadata.program_owner + } +} + impl<'info> GroupAccounts<'info> for AppendLeaves<'info> { fn get_authority(&self) -> &Signer<'info> { &self.authority @@ -44,6 +55,13 @@ impl<'info> GroupAccounts<'info> for AppendLeaves<'info> { } } +#[derive(AnchorSerialize, AnchorDeserialize)] +pub struct ZeroOutLeafIndex { + pub tree_index: u8, + pub batch_index: u8, + pub leaf_index: u16, +} + pub fn process_append_leaves_to_merkle_trees<'a, 'b, 'c: 'info, 'info>( ctx: Context<'a, 'b, 'c, 'info, AppendLeaves<'info>>, leaves: Vec<(u8, [u8; 32])>, @@ -74,6 +92,11 @@ fn batch_append_leaves<'a, 'c: 'info, 'info>( let len = ctx.remaining_accounts.len(); for i in 0..len { let merkle_tree_acc_info = &ctx.remaining_accounts[i]; + //TODO: check whether copy from slice is more efficient + let merkle_tree_acc_discriminator: [u8; 8] = ctx.remaining_accounts[i].try_borrow_data()? + [0..8] + .try_into() + .unwrap(); let rollover_fee: u64 = { let start = match leaves.iter().position(|x| x.0 as usize == i) { Some(pos) => Ok(pos), @@ -86,41 +109,82 @@ fn batch_append_leaves<'a, 'c: 'info, 'info>( let batch_size = end - start; leaves_processed += batch_size; - let rollover_fee = { - let merkle_tree_account = - AccountLoader::::try_from(merkle_tree_acc_info) - .map_err(ProgramError::from)?; - - { - let merkle_tree_account = merkle_tree_account.load()?; - let rollover_fee = merkle_tree_account.metadata.rollover_metadata.rollover_fee - * batch_size as u64; - - check_signer_is_registered_or_authority::( - ctx, - &merkle_tree_account, - )?; - - rollover_fee - } - }; - - let mut merkle_tree = merkle_tree_acc_info.try_borrow_mut_data()?; - let mut merkle_tree = state_merkle_tree_from_bytes_zero_copy_mut(&mut merkle_tree)?; - - merkle_tree - .append_batch( + match merkle_tree_acc_discriminator { + StateMerkleTreeAccount::DISCRIMINATOR => append_v1( + ctx, + merkle_tree_acc_info, + batch_size, leaves[start..end] .iter() .map(|x| &x.1) .collect::>() .as_slice(), - ) - .map_err(ProgramError::from)?; - - rollover_fee + )?, + BatchedQueueAccount::DISCRIMINATOR => { + append_v2(ctx, merkle_tree_acc_info, batch_size, &leaves[start..end])? + } + _ => return err!(anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch), + } }; transfer_lamports_cpi(&ctx.accounts.fee_payer, merkle_tree_acc_info, rollover_fee)?; } Ok(leaves_processed) } + +fn append_v1<'a, 'b, 'c: 'info, 'info>( + ctx: &Context<'a, 'b, 'c, 'info, AppendLeaves<'info>>, + merkle_tree_acc_info: &'info AccountInfo<'info>, + batch_size: usize, + leaves: &[&[u8; 32]], +) -> Result { + let rollover_fee = { + let merkle_tree_account = + AccountLoader::::try_from(merkle_tree_acc_info) + .map_err(ProgramError::from)?; + + { + let merkle_tree_account = merkle_tree_account.load()?; + let rollover_fee = + merkle_tree_account.metadata.rollover_metadata.rollover_fee * batch_size as u64; + + check_signer_is_registered_or_authority::( + ctx, + &merkle_tree_account, + )?; + + rollover_fee + } + }; + let mut merkle_tree = merkle_tree_acc_info.try_borrow_mut_data()?; + let mut merkle_tree = state_merkle_tree_from_bytes_zero_copy_mut(&mut merkle_tree)?; + merkle_tree + .append_batch(leaves) + .map_err(ProgramError::from)?; + Ok(rollover_fee) +} + +fn append_v2<'a, 'b, 'c: 'info, 'info>( + ctx: &Context<'a, 'b, 'c, 'info, AppendLeaves<'info>>, + merkle_tree_acc_info: &'info AccountInfo<'info>, + batch_size: usize, + leaves: &[(u8, [u8; 32])], +) -> Result { + let account_data = &mut merkle_tree_acc_info.try_borrow_mut_data()?; + let output_queue_zero_copy = &mut ZeroCopyBatchedQueueAccount::from_bytes_mut(account_data)?; + check_signer_is_registered_or_authority::( + ctx, + output_queue_zero_copy.get_account(), + )?; + + for (_, leaf) in leaves { + output_queue_zero_copy.insert_into_current_batch(leaf)?; + } + + let rollover_fee = output_queue_zero_copy + .get_account() + .metadata + .rollover_metadata + .rollover_fee + * batch_size as u64; + Ok(rollover_fee) +} diff --git a/programs/account-compression/src/instructions/batch_append.rs b/programs/account-compression/src/instructions/batch_append.rs new file mode 100644 index 0000000000..cddc9069f1 --- /dev/null +++ b/programs/account-compression/src/instructions/batch_append.rs @@ -0,0 +1,58 @@ +use crate::{ + batched_merkle_tree::{InstructionDataBatchAppendInputs, ZeroCopyBatchedMerkleTreeAccount}, + emit_indexer_event, + errors::AccountCompressionErrorCode, + utils::check_signer_is_registered_or_authority::{ + check_signer_is_registered_or_authority, GroupAccounts, + }, + RegisteredProgram, +}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct BatchAppend<'info> { + /// CHECK: should only be accessed by a registered program or owner. + pub authority: Signer<'info>, + pub registered_program_pda: Option>, + /// CHECK: when emitting event. + pub log_wrapper: UncheckedAccount<'info>, + /// CHECK: in from_bytes_mut. + #[account(mut)] + pub merkle_tree: AccountInfo<'info>, + /// CHECK: in from_bytes_mut. + #[account(mut)] + pub output_queue: AccountInfo<'info>, +} + +impl<'info> GroupAccounts<'info> for BatchAppend<'info> { + fn get_authority(&self) -> &Signer<'info> { + &self.authority + } + fn get_registered_program_pda(&self) -> &Option> { + &self.registered_program_pda + } +} + +pub fn process_batch_append_leaves<'a, 'b, 'c: 'info, 'info>( + ctx: &'a Context<'a, 'b, 'c, 'info, BatchAppend<'info>>, + instruction_data: InstructionDataBatchAppendInputs, +) -> Result<()> { + let account_data = &mut ctx.accounts.merkle_tree.try_borrow_mut_data()?; + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; + check_signer_is_registered_or_authority::( + ctx, + merkle_tree, + )?; + let associated_queue = merkle_tree.get_account().metadata.associated_queue; + if ctx.accounts.output_queue.key() != associated_queue { + return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); + } + + let output_queue_data = &mut ctx.accounts.output_queue.try_borrow_mut_data()?; + let event = merkle_tree.update_output_queue( + output_queue_data, + instruction_data, + ctx.accounts.merkle_tree.key().to_bytes(), + )?; + emit_indexer_event(event.try_to_vec()?, &ctx.accounts.log_wrapper) +} diff --git a/programs/account-compression/src/instructions/batch_nullify.rs b/programs/account-compression/src/instructions/batch_nullify.rs new file mode 100644 index 0000000000..ba21ca0aa7 --- /dev/null +++ b/programs/account-compression/src/instructions/batch_nullify.rs @@ -0,0 +1,45 @@ +use crate::{ + batched_merkle_tree::{InstructionDataBatchNullifyInputs, ZeroCopyBatchedMerkleTreeAccount}, + emit_indexer_event, + utils::check_signer_is_registered_or_authority::{ + check_signer_is_registered_or_authority, GroupAccounts, + }, + RegisteredProgram, +}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct BatchNullify<'info> { + /// CHECK: should only be accessed by a registered program or owner. + pub authority: Signer<'info>, + pub registered_program_pda: Option>, + /// CHECK: when emitting event. + pub log_wrapper: UncheckedAccount<'info>, + /// CHECK: in from_bytes_mut. + #[account(mut)] + pub merkle_tree: AccountInfo<'info>, +} + +impl<'info> GroupAccounts<'info> for BatchNullify<'info> { + fn get_authority(&self) -> &Signer<'info> { + &self.authority + } + fn get_registered_program_pda(&self) -> &Option> { + &self.registered_program_pda + } +} + +pub fn process_batch_nullify<'a, 'b, 'c: 'info, 'info>( + ctx: &'a Context<'a, 'b, 'c, 'info, BatchNullify<'info>>, + instruction_data: InstructionDataBatchNullifyInputs, +) -> Result<()> { + let account_data = &mut ctx.accounts.merkle_tree.try_borrow_mut_data()?; + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; + check_signer_is_registered_or_authority::( + ctx, + merkle_tree, + )?; + let event = merkle_tree + .update_input_queue(instruction_data, ctx.accounts.merkle_tree.key().to_bytes())?; + emit_indexer_event(event.try_to_vec()?, &ctx.accounts.log_wrapper) +} diff --git a/programs/account-compression/src/instructions/insert_into_queues.rs b/programs/account-compression/src/instructions/insert_into_queues.rs index 4d56b4c19c..a09300e6f6 100644 --- a/programs/account-compression/src/instructions/insert_into_queues.rs +++ b/programs/account-compression/src/instructions/insert_into_queues.rs @@ -1,4 +1,6 @@ use crate::{ + batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount, + batched_queue::{BatchedQueueAccount, ZeroCopyBatchedQueueAccount}, check_queue_type, errors::AccountCompressionErrorCode, state::queue::{queue_from_bytes_zero_copy_mut, QueueAccount}, @@ -10,7 +12,7 @@ use crate::{ }, QueueType, RegisteredProgram, }; -use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey, ZeroCopy}; +use anchor_lang::{prelude::*, solana_program::pubkey::Pubkey, Discriminator, ZeroCopy}; use num_bigint::BigUint; #[derive(Accounts)] @@ -30,90 +32,81 @@ pub struct InsertIntoQueues<'info> { pub fn process_insert_into_queues<'a, 'b, 'c: 'info, 'info, MerkleTreeAccount: Owner + ZeroCopy>( ctx: Context<'a, 'b, 'c, 'info, InsertIntoQueues<'info>>, elements: &'a [[u8; 32]], + indices: Vec, queue_type: QueueType, + tx_hash: Option<[u8; 32]>, ) -> Result<()> { if elements.is_empty() { return err!(AccountCompressionErrorCode::InputElementsEmpty); } - let expected_remaining_accounts = elements.len() * 2; - if expected_remaining_accounts != ctx.remaining_accounts.len() { - msg!( - "Number of remaining accounts does not match, expected {}, got {}", - expected_remaining_accounts, - ctx.remaining_accounts.len() - ); - return err!(crate::errors::AccountCompressionErrorCode::NumberOfLeavesMismatch); - } + light_heap::bench_sbf_start!("acp_create_queue_map"); let mut queue_map = QueueMap::new(); // Deduplicate tree and queue pairs. // So that we iterate over every pair only once, // and pay rollover fees only once. - for i in (0..ctx.remaining_accounts.len()).step_by(2) { - let queue: &AccountInfo<'info> = ctx.remaining_accounts.get(i).unwrap(); - let merkle_tree = ctx.remaining_accounts.get(i + 1).unwrap(); - let associated_merkle_tree = { - let queue = AccountLoader::::try_from(queue)?; - let queue = queue.load()?; - check_queue_type(&queue.metadata.queue_type, &queue_type)?; - queue.metadata.associated_merkle_tree - }; - - if merkle_tree.key() != associated_merkle_tree { - msg!( - "Queue account {:?} is not associated with any address Merkle tree. Provided accounts {:?}", - queue.key(), ctx.remaining_accounts); - return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); + let mut current_index = 0; + for (index, element) in elements.iter().enumerate() { + let current_account_discriminator = ctx + .remaining_accounts + .get(current_index) + .unwrap() + .try_borrow_data()?[0..8] + .try_into() + .unwrap(); + match current_account_discriminator { + QueueAccount::DISCRIMINATOR => add_queue_bundle_v1( + &mut current_index, + queue_type, + &mut queue_map, + element, + ctx.remaining_accounts, + )?, + BatchedQueueAccount::DISCRIMINATOR => add_queue_bundle_v2( + &mut current_index, + queue_type, + &mut queue_map, + element, + indices[index], + ctx.remaining_accounts, + )?, + _ => { + msg!( + "Invalid account discriminator {:?}", + current_account_discriminator + ); + return err!(anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch); + } } - - queue_map - .entry(queue.key()) - .or_insert_with(|| QueueBundle::new(queue, merkle_tree)) - .elements - .push(elements[i / 2]); + } + if current_index != ctx.remaining_accounts.len() { + msg!( + "Number of remaining accounts does not match, expected {}, got {}", + current_index, + ctx.remaining_accounts.len() + ); + return err!(crate::errors::AccountCompressionErrorCode::NumberOfLeavesMismatch); } light_heap::bench_sbf_end!("acp_create_queue_map"); for queue_bundle in queue_map.values() { - let rollover_fee: u64; - - let queue = AccountLoader::::try_from(queue_bundle.queue)?; - light_heap::bench_sbf_start!("acp_prep_insertion"); - { - let queue = queue.load()?; - check_signer_is_registered_or_authority::( - &ctx, &queue, - )?; - rollover_fee = - queue.metadata.rollover_metadata.rollover_fee * queue_bundle.elements.len() as u64; - } - { - let sequence_number = { - let merkle_tree = queue_bundle.merkle_tree.try_borrow_data()?; - let merkle_tree = state_merkle_tree_from_bytes_zero_copy(&merkle_tree)?; - merkle_tree.sequence_number() - }; - - let queue = queue.to_account_info(); - let mut queue = queue.try_borrow_mut_data()?; - let mut queue = unsafe { queue_from_bytes_zero_copy_mut(&mut queue).unwrap() }; - light_heap::bench_sbf_end!("acp_prep_insertion"); - light_heap::bench_sbf_start!("acp_insert_nf_into_queue"); - for element in queue_bundle.elements.iter() { - let element = BigUint::from_bytes_be(element.as_slice()); - queue - .insert(&element, sequence_number) - .map_err(ProgramError::from)?; + let rollover_fee = match queue_bundle.queue_type { + QueueType::NullifierQueue => process_queue_bundle_v1(&ctx, queue_bundle), + QueueType::AddressQueue => process_queue_bundle_v1(&ctx, queue_bundle), + QueueType::Input => process_queue_bundle_v2(&ctx, queue_bundle, &tx_hash), + _ => { + msg!("Queue type {:?} is not supported", queue_bundle.queue_type); + return err!(AccountCompressionErrorCode::InvalidQueueType); } - light_heap::bench_sbf_end!("acp_insert_nf_into_queue"); - } + }?; if rollover_fee > 0 { transfer_lamports_cpi( &ctx.accounts.fee_payer, - &queue_bundle.queue.to_account_info(), + // Queue account + &queue_bundle.accounts[1].to_account_info(), rollover_fee, )?; } @@ -121,3 +114,152 @@ pub fn process_insert_into_queues<'a, 'b, 'c: 'info, 'info, MerkleTreeAccount: O Ok(()) } + +fn process_queue_bundle_v1<'info>( + ctx: &Context<'_, '_, '_, 'info, InsertIntoQueues<'info>>, + queue_bundle: &QueueBundle<'_, '_>, +) -> Result { + let queue = AccountLoader::::try_from(queue_bundle.accounts[1])?; + light_heap::bench_sbf_start!("acp_prep_insertion"); + let rollover_fee = { + let queue = queue.load()?; + check_signer_is_registered_or_authority::(ctx, &queue)?; + + queue.metadata.rollover_metadata.rollover_fee * queue_bundle.elements.len() as u64 + }; + { + let sequence_number = { + let merkle_tree = queue_bundle.accounts[0].try_borrow_data()?; + let merkle_tree = state_merkle_tree_from_bytes_zero_copy(&merkle_tree)?; + merkle_tree.sequence_number() + }; + + let queue = queue.to_account_info(); + let mut queue = queue.try_borrow_mut_data()?; + let mut queue = unsafe { queue_from_bytes_zero_copy_mut(&mut queue).unwrap() }; + light_heap::bench_sbf_end!("acp_prep_insertion"); + light_heap::bench_sbf_start!("acp_insert_nf_into_queue"); + for element in queue_bundle.elements.iter() { + let element = BigUint::from_bytes_be(element.as_slice()); + queue + .insert(&element, sequence_number) + .map_err(ProgramError::from)?; + } + light_heap::bench_sbf_end!("acp_insert_nf_into_queue"); + } + Ok(rollover_fee) +} + +fn process_queue_bundle_v2<'info>( + ctx: &Context<'_, '_, '_, 'info, InsertIntoQueues<'info>>, + queue_bundle: &QueueBundle<'_, '_>, + tx_hash: &Option<[u8; 32]>, +) -> Result { + let account_data = &mut queue_bundle.accounts[1].try_borrow_mut_data()?; + let merkle_tree = &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data)?; + let output_queue_account_data = &mut queue_bundle.accounts[0].try_borrow_mut_data()?; + let output_queue = &mut ZeroCopyBatchedQueueAccount::from_bytes_mut(output_queue_account_data)?; + check_signer_is_registered_or_authority::( + ctx, + merkle_tree, + )?; + let rollover_fee = merkle_tree + .get_account() + .metadata + .rollover_metadata + .rollover_fee + * queue_bundle.elements.len() as u64; + for (element, leaf_index) in queue_bundle + .elements + .iter() + .zip(queue_bundle.indices.iter()) + { + let tx_hash = tx_hash.ok_or(AccountCompressionErrorCode::TxHashUndefined)?; + light_heap::bench_sbf_start!("acp_insert_nf_into_queue_v2"); + // check for every account whether the value is still in the queue and zero it out. + // If checked fail if the value is not in the queue. + output_queue.prove_inclusion_by_index_and_zero_out_leaf(*leaf_index as u64, element)?; + merkle_tree.insert_nullifier_into_current_batch(element, *leaf_index as u64, &tx_hash)?; + light_heap::bench_sbf_end!("acp_insert_nf_into_queue_v2"); + } + Ok(rollover_fee) +} + +fn add_queue_bundle_v1<'a, 'info>( + remaining_accounts_index: &mut usize, + queue_type: QueueType, + queue_map: &mut std::collections::HashMap>, + element: &'a [u8; 32], + remaining_accounts: &'info [AccountInfo<'info>], +) -> Result<()> { + let queue = remaining_accounts.get(*remaining_accounts_index).unwrap(); + let merkle_tree = remaining_accounts + .get(*remaining_accounts_index + 1) + .unwrap(); + let associated_merkle_tree = { + let queue = AccountLoader::::try_from(queue)?; + let queue = queue.load()?; + check_queue_type(&queue.metadata.queue_type, &queue_type)?; + queue.metadata.associated_merkle_tree + }; + if merkle_tree.key() != associated_merkle_tree { + msg!( + "Queue account {:?} is not associated with Merkle tree {:?}", + queue.key(), + merkle_tree.key() + ); + return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); + } + queue_map + .entry(queue.key()) + .or_insert_with(|| QueueBundle::new(queue_type, vec![merkle_tree, queue])) + .elements + .push(element); + *remaining_accounts_index += 2; + Ok(()) +} + +fn add_queue_bundle_v2<'a, 'info>( + remaining_accounts_index: &mut usize, + queue_type: QueueType, + queue_map: &mut std::collections::HashMap>, + element: &'a [u8; 32], + index: u32, + remaining_accounts: &'info [AccountInfo<'info>], +) -> Result<()> { + // TODO: add address support + if queue_type != QueueType::NullifierQueue { + msg!("Queue type Address is not supported for BatchedMerkleTreeAccount"); + return err!(AccountCompressionErrorCode::InvalidQueueType); + } + let output_queue = remaining_accounts.get(*remaining_accounts_index).unwrap(); + let merkle_tree = remaining_accounts + .get(*remaining_accounts_index + 1) + .unwrap(); + let output_queue_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue.try_borrow_mut_data()?)?; + let associated_merkle_tree = output_queue_account + .get_account() + .metadata + .associated_merkle_tree; + + if merkle_tree.key() != associated_merkle_tree { + msg!( + "Queue account {:?} is not associated with Merkle tree {:?}", + output_queue.key(), + merkle_tree.key() + ); + return err!(AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated); + } + queue_map + .entry(merkle_tree.key()) + .or_insert_with(|| QueueBundle::new(QueueType::Input, vec![output_queue, merkle_tree])) + .elements + .push(element); + queue_map + .entry(merkle_tree.key()) + .and_modify(|x| x.indices.push(index)); + *remaining_accounts_index += 2; + + Ok(()) +} diff --git a/programs/account-compression/src/instructions/intialize_batched_state_merkle_tree.rs b/programs/account-compression/src/instructions/intialize_batched_state_merkle_tree.rs new file mode 100644 index 0000000000..39c23fe76b --- /dev/null +++ b/programs/account-compression/src/instructions/intialize_batched_state_merkle_tree.rs @@ -0,0 +1,801 @@ +use anchor_lang::{prelude::*, Discriminator}; +use light_hasher::Hasher; +use light_utils::fee::compute_rollover_fee; + +use crate::{ + batched_merkle_tree::{ + get_merkle_tree_account_size, BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount, + }, + batched_queue::{ + assert_queue_inited, get_output_queue_account_size, BatchedQueue, BatchedQueueAccount, + ZeroCopyBatchedQueueAccount, + }, + errors::AccountCompressionErrorCode, + initialize_address_queue::check_rollover_fee_sufficient, + utils::{ + check_account::check_account_balance_is_rent_exempt, + check_signer_is_registered_or_authority::{ + check_signer_is_registered_or_authority, GroupAccounts, + }, + constants::{ + DEFAULT_BATCH_SIZE, DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, DEFAULT_ZKP_BATCH_SIZE, + TEST_DEFAULT_BATCH_SIZE, TEST_DEFAULT_ZKP_BATCH_SIZE, + }, + }, + AccessMetadata, MerkleTreeMetadata, QueueMetadata, QueueType, RegisteredProgram, + RolloverMetadata, +}; + +#[derive(Accounts)] +pub struct InitializeBatchedStateMerkleTreeAndQueue<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account(zero)] + pub merkle_tree: AccountLoader<'info, BatchedMerkleTreeAccount>, + #[account(zero)] + pub queue: AccountLoader<'info, BatchedQueueAccount>, + pub registered_program_pda: Option>, +} + +impl<'info> GroupAccounts<'info> for InitializeBatchedStateMerkleTreeAndQueue<'info> { + fn get_authority(&self) -> &Signer<'info> { + &self.authority + } + fn get_registered_program_pda(&self) -> &Option> { + &self.registered_program_pda + } +} + +#[derive(Debug, PartialEq, Clone, Copy, AnchorDeserialize, AnchorSerialize)] +pub struct InitStateTreeAccountsInstructionData { + pub index: u64, + pub program_owner: Option, + pub forester: Option, + pub additional_bytes: u64, + pub input_queue_batch_size: u64, + pub output_queue_batch_size: u64, + pub input_queue_zkp_batch_size: u64, + pub output_queue_zkp_batch_size: u64, + pub bloom_filter_num_iters: u64, + pub bloom_filter_capacity: u64, + pub root_history_capacity: u32, + pub network_fee: Option, + pub rollover_threshold: Option, + pub close_threshold: Option, + pub input_queue_num_batches: u64, + pub output_queue_num_batches: u64, + pub height: u32, +} + +impl InitStateTreeAccountsInstructionData { + pub fn test_default() -> Self { + Self { + index: 0, + program_owner: None, + forester: None, + additional_bytes: DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, + bloom_filter_num_iters: 3, + input_queue_batch_size: TEST_DEFAULT_BATCH_SIZE, + output_queue_batch_size: TEST_DEFAULT_BATCH_SIZE, + input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, + output_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, + input_queue_num_batches: 2, + output_queue_num_batches: 2, + height: 26, + root_history_capacity: 20, + bloom_filter_capacity: 20_000 * 8, + network_fee: Some(5000), + rollover_threshold: Some(95), + close_threshold: None, + } + } + + pub fn e2e_test_default() -> Self { + Self { + index: 0, + program_owner: None, + forester: None, + additional_bytes: DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, + bloom_filter_num_iters: 3, + input_queue_batch_size: 500, + output_queue_batch_size: 500, + input_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, + output_queue_zkp_batch_size: TEST_DEFAULT_ZKP_BATCH_SIZE, + input_queue_num_batches: 2, + output_queue_num_batches: 2, + height: 26, + root_history_capacity: 20, + bloom_filter_capacity: 20_000 * 8, + network_fee: Some(5000), + rollover_threshold: Some(95), + close_threshold: None, + } + } +} + +impl Default for InitStateTreeAccountsInstructionData { + fn default() -> Self { + Self { + index: 0, + program_owner: None, + forester: None, + additional_bytes: DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE, + bloom_filter_num_iters: 3, + input_queue_batch_size: DEFAULT_BATCH_SIZE, + output_queue_batch_size: DEFAULT_BATCH_SIZE, + input_queue_zkp_batch_size: DEFAULT_ZKP_BATCH_SIZE, + output_queue_zkp_batch_size: DEFAULT_ZKP_BATCH_SIZE, + input_queue_num_batches: 2, + output_queue_num_batches: 2, + height: 26, + root_history_capacity: (DEFAULT_BATCH_SIZE / DEFAULT_ZKP_BATCH_SIZE * 2) as u32, + bloom_filter_capacity: (DEFAULT_BATCH_SIZE + 1) * 8, + network_fee: Some(5000), + rollover_threshold: Some(95), + close_threshold: None, + } + } +} + +pub fn process_initialize_batched_state_merkle_tree<'info>( + ctx: Context<'_, '_, '_, 'info, InitializeBatchedStateMerkleTreeAndQueue<'info>>, + params: InitStateTreeAccountsInstructionData, +) -> Result<()> { + #[cfg(feature = "test")] + validate_batched_tree_params(params); + #[cfg(not(feature = "test"))] + { + if params != InitStateTreeAccountsInstructionData::default() { + return err!(AccountCompressionErrorCode::UnsupportedParameters); + } + } + + let owner = match ctx.accounts.registered_program_pda.as_ref() { + Some(registered_program_pda) => { + check_signer_is_registered_or_authority::< + InitializeBatchedStateMerkleTreeAndQueue, + RegisteredProgram, + >(&ctx, registered_program_pda)?; + registered_program_pda.group_authority_pda + } + None => ctx.accounts.authority.key(), + }; + + let output_queue_pubkey = ctx.accounts.queue.key(); + let queue_account_size = get_output_queue_account_size( + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.output_queue_num_batches, + ); + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + + let queue_rent = check_account_balance_is_rent_exempt( + &ctx.accounts.queue.to_account_info(), + queue_account_size, + )?; + + let mt_pubkey = ctx.accounts.merkle_tree.key(); + let merkle_tree_rent = check_account_balance_is_rent_exempt( + &ctx.accounts.merkle_tree.to_account_info(), + mt_account_size, + )?; + + let additional_bytes_rent = (Rent::get()?).minimum_balance(params.additional_bytes as usize); + + let output_queue_account_data: AccountInfo<'info> = ctx.accounts.queue.to_account_info(); + let queue_data = &mut output_queue_account_data.try_borrow_mut_data()?; + + let mt_account_info = ctx.accounts.merkle_tree.to_account_info(); + let mt_data = &mut mt_account_info.try_borrow_mut_data()?; + + init_batched_state_merkle_tree_accounts( + owner, + params, + queue_data, + output_queue_pubkey, + queue_rent, + mt_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + )?; + + Ok(()) +} + +pub fn bytes_to_struct_checked( + bytes: &mut [u8], +) -> Result<*mut T> { + if bytes.len() < std::mem::size_of::() { + return err!(AccountCompressionErrorCode::InvalidAccountSize); + } + + if INIT { + if bytes[0..8] != [0; 8] { + msg!("Discriminator bytes must be zero for initialization."); + return err!(AccountCompressionErrorCode::InvalidDiscriminator); + } + bytes[0..8].copy_from_slice(&T::DISCRIMINATOR); + } else if T::DISCRIMINATOR != bytes[0..8] { + msg!( + "Expected discriminator: {:?}, actual {:?} ", + T::DISCRIMINATOR, + bytes[0..8].to_vec() + ); + return err!(AccountCompressionErrorCode::InvalidDiscriminator); + } + + Ok(bytes[8..].as_mut_ptr() as *mut T) +} + +pub fn init_batched_state_merkle_tree_accounts( + owner: Pubkey, + params: InitStateTreeAccountsInstructionData, + output_queue_account_data: &mut [u8], + output_queue_pubkey: Pubkey, + queue_rent: u64, + mt_account_data: &mut [u8], + mt_pubkey: Pubkey, + merkle_tree_rent: u64, + additional_bytes_rent: u64, +) -> Result<()> { + let num_batches_input_queue = params.input_queue_num_batches; + let num_batches_output_queue = params.output_queue_num_batches; + let height = params.height; + + // Output queue + { + let rollover_fee = match params.rollover_threshold { + Some(rollover_threshold) => { + let rent = merkle_tree_rent + additional_bytes_rent + queue_rent; + let rollover_fee = compute_rollover_fee(rollover_threshold, height, rent) + .map_err(ProgramError::from)?; + check_rollover_fee_sufficient(rollover_fee, 0, rent, rollover_threshold, height)?; + rollover_fee + } + None => 0, + }; + msg!(" Output queue rollover_fee: {}", rollover_fee); + let metadata = QueueMetadata { + next_queue: Pubkey::default(), + access_metadata: AccessMetadata::new(owner, params.program_owner, params.forester), + rollover_metadata: RolloverMetadata::new( + params.index, + rollover_fee, + params.rollover_threshold, + params.network_fee.unwrap_or_default(), + params.close_threshold, + Some(params.additional_bytes), + ), + queue_type: QueueType::Output as u64, + associated_merkle_tree: mt_pubkey, + }; + + ZeroCopyBatchedQueueAccount::init( + metadata, + num_batches_output_queue, + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + output_queue_account_data, + 0, + 0, + )?; + } + let metadata = MerkleTreeMetadata { + next_merkle_tree: Pubkey::default(), + access_metadata: crate::AccessMetadata::new(owner, params.program_owner, params.forester), + rollover_metadata: crate::RolloverMetadata::new( + params.index, + // Complete rollover fee is charged when creating an output + // compressed account by inserting it into the output queue. + 0, + params.rollover_threshold, + params.network_fee.unwrap_or_default(), + params.close_threshold, + None, + ), + associated_queue: output_queue_pubkey, + }; + msg!("initing mt_account: "); + ZeroCopyBatchedMerkleTreeAccount::init( + metadata, + params.root_history_capacity, + num_batches_input_queue, + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + height, + mt_account_data, + params.bloom_filter_num_iters, + params.bloom_filter_capacity, + )?; + Ok(()) +} + +pub fn validate_batched_tree_params(params: InitStateTreeAccountsInstructionData) { + assert!(params.input_queue_batch_size > 0); + assert!(params.output_queue_batch_size > 0); + assert_eq!( + params.input_queue_batch_size % params.input_queue_zkp_batch_size, + 0, + "Input queue batch size must divisible by input_queue_zkp_batch_size." + ); + assert_eq!( + params.output_queue_batch_size % params.output_queue_zkp_batch_size, + 0, + "Output queue batch size must divisible by output_queue_zkp_batch_size." + ); + assert!( + match_circuit_size(params.input_queue_zkp_batch_size), + "Zkp batch size not supported. Supported 1, 10, 100, 500, 1000" + ); + assert!( + match_circuit_size(params.output_queue_zkp_batch_size), + "Zkp batch size not supported. Supported 1, 10, 100, 500, 1000" + ); + + assert!(params.bloom_filter_num_iters > 0); + assert!(params.bloom_filter_capacity > params.input_queue_batch_size * 8); + assert_eq!( + params.bloom_filter_capacity % 8, + 0, + "Bloom filter capacity must be divisible by 8." + ); + assert!(params.bloom_filter_capacity > 0); + assert!(params.root_history_capacity > 0); + assert!(params.input_queue_batch_size > 0); + assert_eq!(params.input_queue_num_batches, 2); + assert_eq!(params.output_queue_num_batches, 2); + assert_eq!(params.close_threshold, None); + assert_eq!(params.height, 26); +} + +pub fn match_circuit_size(size: u64) -> bool { + matches!(size, 10 | 100 | 500 | 1000) +} + +pub fn assert_mt_zero_copy_inited( + account_data: &mut [u8], + ref_account: BatchedMerkleTreeAccount, + num_iters: u64, +) { + let mut zero_copy_account = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data) + .expect("from_bytes_mut failed"); + let queue = zero_copy_account.get_account().queue; + let ref_queue = ref_account.queue; + let queue_type = QueueType::Input as u64; + let num_batches = ref_queue.num_batches as usize; + + assert_eq!( + *zero_copy_account.get_account(), + ref_account, + "metadata mismatch" + ); + + assert_eq!( + zero_copy_account.root_history.capacity(), + ref_account.root_history_capacity as usize, + "root_history_capacity mismatch" + ); + assert_eq!( + *zero_copy_account.root_history.get(0).unwrap(), + light_hasher::Poseidon::zero_bytes()[ref_account.height as usize], + "root_history not initialized" + ); + + assert_queue_inited( + queue, + ref_queue, + queue_type, + &mut zero_copy_account.value_vecs, + &mut zero_copy_account.bloom_filter_stores, + &mut zero_copy_account.batches, + num_batches, + num_iters, + ); +} + +pub fn get_output_queue_account_default( + owner: Pubkey, + program_owner: Option, + forester: Option, + rollover_threshold: Option, + index: u64, + batch_size: u64, + zkp_batch_size: u64, + additional_bytes: u64, + rent: u64, + associated_merkle_tree: Pubkey, + height: u32, + num_batches: u64, +) -> BatchedQueueAccount { + let rollover_fee = match rollover_threshold { + Some(rollover_threshold) => compute_rollover_fee(rollover_threshold, height, rent) + .map_err(ProgramError::from) + .unwrap(), + None => 0, + }; + let metadata = QueueMetadata { + next_queue: Pubkey::default(), + access_metadata: AccessMetadata { + owner, + program_owner: program_owner.unwrap_or_default(), + forester: forester.unwrap_or_default(), + }, + rollover_metadata: RolloverMetadata { + close_threshold: u64::MAX, + index, + rolledover_slot: u64::MAX, + rollover_threshold: rollover_threshold.unwrap_or(u64::MAX), + rollover_fee, + network_fee: 5000, + additional_bytes, + }, + queue_type: QueueType::Output as u64, + associated_merkle_tree, + }; + let queue = BatchedQueue::get_output_queue_default(batch_size, zkp_batch_size, num_batches); + BatchedQueueAccount { + metadata, + queue, + next_index: 0, + } +} + +#[cfg(test)] +pub mod tests { + + use light_bounded_vec::{BoundedVecMetadata, CyclicBoundedVecMetadata}; + use rand::{rngs::StdRng, Rng}; + + use crate::{ + batch::Batch, + batched_merkle_tree::{get_merkle_tree_account_size, get_merkle_tree_account_size_default}, + batched_queue::{ + assert_queue_zero_copy_inited, get_output_queue_account_size, + get_output_queue_account_size_default, BatchedQueue, + }, + }; + + use super::*; + + pub fn get_output_queue( + owner: Pubkey, + program_owner: Option, + forester: Option, + rollover_threshold: Option, + index: u64, + batch_size: u64, + zkp_batch_size: u64, + additional_bytes: u64, + rent: u64, + associated_merkle_tree: Pubkey, + network_fee: u64, + num_batches: u64, + height: u32, + ) -> BatchedQueueAccount { + let rollover_fee = match rollover_threshold { + Some(rollover_threshold) => { + let rollover_fee = compute_rollover_fee(rollover_threshold, height, rent) + .map_err(ProgramError::from) + .unwrap(); + rollover_fee + } + None => 0, + }; + let metadata = QueueMetadata { + next_queue: Pubkey::default(), + access_metadata: AccessMetadata { + owner, + program_owner: program_owner.unwrap_or_default(), + forester: forester.unwrap_or_default(), + }, + rollover_metadata: RolloverMetadata { + close_threshold: u64::MAX, + index, + rolledover_slot: u64::MAX, + rollover_threshold: rollover_threshold.unwrap_or(u64::MAX), + rollover_fee, + network_fee, + additional_bytes, + }, + queue_type: QueueType::Output as u64, + associated_merkle_tree, + }; + let queue = BatchedQueue::get_output_queue_default(batch_size, zkp_batch_size, num_batches); + BatchedQueueAccount { + metadata, + queue, + next_index: 0, + } + } + + #[test] + fn test_account_init() { + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_default(); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let params = InitStateTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + init_batched_state_merkle_tree_accounts( + owner, + params.clone(), + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + let ref_output_queue_account = get_output_queue_account_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.additional_bytes, + merkle_tree_rent + additional_bytes_rent + queue_rent, + mt_pubkey, + params.height, + params.output_queue_num_batches, + ); + assert_queue_zero_copy_inited( + output_queue_account_data.as_mut_slice(), + ref_output_queue_account, + 0, + ); + let ref_mt_account = BatchedMerkleTreeAccount::get_state_tree_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + output_queue_pubkey, + params.height, + params.input_queue_num_batches, + ); + assert_mt_zero_copy_inited( + &mut mt_account_data, + ref_mt_account, + params.bloom_filter_num_iters, + ); + } + + #[test] + fn test_rnd_account_init() { + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + for _ in 0..10000 { + println!("next iter ------------------------------------"); + let owner = Pubkey::new_unique(); + + let program_owner = if rng.gen_bool(0.5) { + Some(Pubkey::new_unique()) + } else { + None + }; + let forester = if rng.gen_bool(0.5) { + Some(Pubkey::new_unique()) + } else { + None + }; + let input_queue_zkp_batch_size = rng.gen_range(1..1000); + let output_queue_zkp_batch_size = rng.gen_range(1..1000); + + let params = InitStateTreeAccountsInstructionData { + index: rng.gen_range(0..1000), + program_owner, + forester, + additional_bytes: rng.gen_range(0..1000), + bloom_filter_num_iters: rng.gen_range(0..4), + input_queue_batch_size: rng.gen_range(1..1000) * input_queue_zkp_batch_size, + output_queue_batch_size: rng.gen_range(1..1000) * output_queue_zkp_batch_size, + input_queue_zkp_batch_size, + output_queue_zkp_batch_size, + // 8 bits per byte, divisible by 8 for aligned memory + bloom_filter_capacity: rng.gen_range(0..100) * 8 * 8, + network_fee: Some(rng.gen_range(0..1000)), + rollover_threshold: Some(rng.gen_range(0..100)), + close_threshold: None, + root_history_capacity: rng.gen_range(1..1000), + input_queue_num_batches: rng.gen_range(1..4), + output_queue_num_batches: rng.gen_range(1..4), + height: rng.gen_range(1..32), + }; + let queue_account_size = get_output_queue_account_size( + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.output_queue_num_batches, + ); + + use std::mem::size_of; + { + let num_batches = params.output_queue_num_batches as usize; + let num_zkp_batches = + params.output_queue_batch_size / params.output_queue_zkp_batch_size; + let batch_size = size_of::() * num_batches + size_of::(); + let value_vec_size = (params.output_queue_batch_size as usize * 32 + + size_of::()) + * num_batches; + let hash_chain_store_size = + (num_zkp_batches as usize * 32 + size_of::()) * num_batches; + // Output queue + let ref_queue_account_size = + // metadata + BatchedQueueAccount::LEN + + batch_size + // 2 value vecs + + value_vec_size + // 2 hash chain stores + + hash_chain_store_size; + + assert_eq!(queue_account_size, ref_queue_account_size); + } + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + { + let num_zkp_batches = + params.input_queue_batch_size / params.input_queue_zkp_batch_size; + let num_batches = params.input_queue_num_batches as usize; + let batch_size = size_of::() * num_batches + size_of::(); + let bloom_filter_size = (params.bloom_filter_capacity as usize / 8 + + size_of::()) + * num_batches; + let hash_chain_store_size = + (num_zkp_batches as usize * 32 + size_of::()) * num_batches; + let root_history_size = params.root_history_capacity as usize * 32 + + size_of::(); + // Output queue + let ref_account_size = + // metadata + BatchedMerkleTreeAccount::LEN + + root_history_size + + batch_size + + bloom_filter_size + // 2 hash chain stores + + hash_chain_store_size; + assert_eq!(mt_account_size, ref_account_size); + } + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let merkle_tree_rent = rng.gen_range(0..10000000); + let queue_rent = rng.gen_range(0..10000000); + let additional_bytes_rent = rng.gen_range(0..10000000); + init_batched_state_merkle_tree_accounts( + owner, + params.clone(), + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + let ref_output_queue_account = get_output_queue( + owner, + program_owner, + forester, + params.rollover_threshold, + params.index, + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.additional_bytes, + merkle_tree_rent + additional_bytes_rent + queue_rent, + mt_pubkey, + params.network_fee.unwrap_or_default(), + params.output_queue_num_batches, + params.height, + ); + assert_queue_zero_copy_inited( + output_queue_account_data.as_mut_slice(), + ref_output_queue_account, + 0, + ); + let ref_mt_account = BatchedMerkleTreeAccount::get_state_tree_default( + owner, + program_owner, + forester, + params.rollover_threshold, + params.index, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + output_queue_pubkey, + params.height, + params.input_queue_num_batches, + ); + assert_mt_zero_copy_inited( + &mut mt_account_data, + ref_mt_account, + params.bloom_filter_num_iters, + ); + } + } + + /// Tests: + /// 1. functional init + /// 2. failing init again + /// 3. functional deserialize + /// 4. failing deserialize invalid data + /// 5. failing deserialize invalid discriminator + #[test] + fn test_bytes_to_struct() { + #[account] + #[derive(Debug, PartialEq, Copy)] + pub struct MyStruct { + pub data: u64, + } + let mut bytes = vec![0; 8 + std::mem::size_of::()]; + let mut empty_bytes = vec![0; 8 + std::mem::size_of::()]; + + // Test 1 functional init. + let inited_struct = bytes_to_struct_checked::(&mut bytes).unwrap(); + unsafe { + (*inited_struct).data = 1; + } + assert_eq!(bytes[0..8], MyStruct::DISCRIMINATOR); + assert_eq!(bytes[8..].to_vec(), vec![1, 0, 0, 0, 0, 0, 0, 0]); + // Test 2 failing init again. + assert_eq!( + bytes_to_struct_checked::(&mut bytes).unwrap_err(), + AccountCompressionErrorCode::InvalidDiscriminator.into() + ); + + // Test 3 functional deserialize. + let inited_struct = + unsafe { *bytes_to_struct_checked::(&mut bytes).unwrap() }; + assert_eq!(inited_struct, MyStruct { data: 1 }); + // Test 4 failing deserialize invalid data. + assert_eq!( + bytes_to_struct_checked::(&mut empty_bytes).unwrap_err(), + AccountCompressionErrorCode::InvalidDiscriminator.into() + ); + // Test 5 failing deserialize invalid discriminator. + bytes[0] = 0; + assert_eq!( + bytes_to_struct_checked::(&mut bytes).unwrap_err(), + AccountCompressionErrorCode::InvalidDiscriminator.into() + ); + } +} diff --git a/programs/account-compression/src/instructions/mod.rs b/programs/account-compression/src/instructions/mod.rs index 208e851d3c..10534c9a2c 100644 --- a/programs/account-compression/src/instructions/mod.rs +++ b/programs/account-compression/src/instructions/mod.rs @@ -33,3 +33,12 @@ pub use rollover_address_merkle_tree_and_queue::*; pub mod deregister_program; pub use deregister_program::*; + +pub mod intialize_batched_state_merkle_tree; +pub use intialize_batched_state_merkle_tree::*; + +pub mod batch_nullify; +pub use batch_nullify::*; + +pub mod batch_append; +pub use batch_append::*; diff --git a/programs/account-compression/src/instructions/nullify_leaves.rs b/programs/account-compression/src/instructions/nullify_leaves.rs index 53c193bf1d..b6947b1e22 100644 --- a/programs/account-compression/src/instructions/nullify_leaves.rs +++ b/programs/account-compression/src/instructions/nullify_leaves.rs @@ -38,7 +38,6 @@ impl<'info> GroupAccounts<'info> for NullifyLeaves<'info> { } } -// TODO: implement for multiple nullifiers got a stack frame error with a loop pub fn process_nullify_leaves<'a, 'b, 'c: 'info, 'info>( ctx: &'a Context<'a, 'b, 'c, 'info, NullifyLeaves<'info>>, change_log_indices: &'a [u64], diff --git a/programs/account-compression/src/lib.rs b/programs/account-compression/src/lib.rs index a352d24946..29e97f0d29 100644 --- a/programs/account-compression/src/lib.rs +++ b/programs/account-compression/src/lib.rs @@ -10,6 +10,7 @@ pub mod utils; pub use processor::*; pub mod sdk; use anchor_lang::prelude::*; +use batched_merkle_tree::InstructionDataBatchNullifyInputs; declare_id!("compr6CUsB5m2jS4Y3831ztGSTnDpnKJTKS95d64XVq"); @@ -21,19 +22,23 @@ solana_security_txt::security_txt! { policy: "https://github.com/Lightprotocol/light-protocol/blob/main/SECURITY.md", source_code: "https://github.com/Lightprotocol/light-protocol" } - #[program] pub mod account_compression { + use batched_merkle_tree::InstructionDataBatchAppendInputs; use errors::AccountCompressionErrorCode; - use self::{ - initialize_state_merkle_tree_and_nullifier_queue::process_initialize_state_merkle_tree_and_nullifier_queue, - insert_into_queues::{process_insert_into_queues, InsertIntoQueues}, - }; + use self::insert_into_queues::{process_insert_into_queues, InsertIntoQueues}; use super::*; + pub fn initialize_batched_state_merkle_tree<'info>( + ctx: Context<'_, '_, '_, 'info, InitializeBatchedStateMerkleTreeAndQueue<'info>>, + params: InitStateTreeAccountsInstructionData, + ) -> Result<()> { + process_initialize_batched_state_merkle_tree(ctx, params) + } + pub fn initialize_address_merkle_tree_and_queue<'info>( ctx: Context<'_, '_, '_, 'info, InitializeAddressMerkleTreeAndQueue<'info>>, index: u64, @@ -59,7 +64,9 @@ pub mod account_compression { process_insert_into_queues::( ctx, addresses.as_slice(), + Vec::new(), QueueType::AddressQueue, + None, ) } @@ -185,11 +192,15 @@ pub mod account_compression { pub fn insert_into_nullifier_queues<'a, 'b, 'c: 'info, 'info>( ctx: Context<'a, 'b, 'c, 'info, InsertIntoQueues<'info>>, nullifiers: Vec<[u8; 32]>, + leaf_indices: Vec, + tx_hash: Option<[u8; 32]>, ) -> Result<()> { process_insert_into_queues::( ctx, &nullifiers, + leaf_indices, QueueType::NullifierQueue, + tx_hash, ) } @@ -198,4 +209,24 @@ pub mod account_compression { ) -> Result<()> { process_rollover_state_merkle_tree_nullifier_queue_pair(ctx) } + + pub fn batch_nullify<'a, 'b, 'c: 'info, 'info>( + ctx: Context<'a, 'b, 'c, 'info, BatchNullify<'info>>, + data: Vec, + ) -> Result<()> { + let instruction_data = InstructionDataBatchNullifyInputs::try_from_slice(&data) + .map_err(|_| AccountCompressionErrorCode::InputDeserializationFailed)?; + process_batch_nullify(&ctx, instruction_data)?; + Ok(()) + } + + pub fn batch_append<'a, 'b, 'c: 'info, 'info>( + ctx: Context<'a, 'b, 'c, 'info, BatchAppend<'info>>, + data: Vec, + ) -> Result<()> { + let instruction_data = InstructionDataBatchAppendInputs::try_from_slice(&data) + .map_err(|_| AccountCompressionErrorCode::InputDeserializationFailed)?; + process_batch_append_leaves(&ctx, instruction_data)?; + Ok(()) + } } diff --git a/programs/account-compression/src/state/batch.rs b/programs/account-compression/src/state/batch.rs new file mode 100644 index 0000000000..faef58ddc2 --- /dev/null +++ b/programs/account-compression/src/state/batch.rs @@ -0,0 +1,485 @@ +use crate::errors::AccountCompressionErrorCode; +use anchor_lang::prelude::*; +use light_bloom_filter::BloomFilter; +use light_bounded_vec::BoundedVec; +use light_hasher::{Hasher, Poseidon}; + +#[repr(u64)] +#[derive(Clone, Debug, PartialEq, Eq, Copy)] +pub enum BatchState { + /// Batch can be filled with values. + CanBeFilled, + /// Batch has been inserted into the tree. + Inserted, + /// Batch is ready to be inserted into the tree. Possibly it is already + /// partially inserted into the tree. + ReadyToUpdateTree, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Batch { + /// Number of inserted elements in the zkp batch. + num_inserted: u64, + state: BatchState, + current_zkp_batch_index: u64, + num_inserted_zkps: u64, + /// Number of iterations for the bloom_filter. + pub num_iters: u64, + /// Theoretical capacity of the bloom_filter. We want to make it much larger + /// than batch_size to avoid false positives. + pub bloom_filter_capacity: u64, + pub batch_size: u64, + pub zkp_batch_size: u64, + /// Sequence number when it is save to clear the batch without advancing to + /// the saved root index. + pub sequence_number: u64, + pub root_index: u32, + pub start_index: u64, + /// Placeholder for forester to signal that the bloom filter is wiped + /// already. + pub bloom_filter_is_wiped: bool, +} + +impl Batch { + pub fn new( + num_iters: u64, + bloom_filter_capacity: u64, + batch_size: u64, + zkp_batch_size: u64, + start_index: u64, + ) -> Self { + Batch { + num_iters, + bloom_filter_capacity, + batch_size, + num_inserted: 0, + state: BatchState::CanBeFilled, + zkp_batch_size, + current_zkp_batch_index: 0, + num_inserted_zkps: 0, + sequence_number: 0, + root_index: 0, + start_index, + bloom_filter_is_wiped: false, + } + } + + pub fn get_state(&self) -> BatchState { + self.state + } + + /// fill -> ready -> inserted -> fill + pub fn advance_state_to_can_be_filled(&mut self) -> Result<()> { + if self.state == BatchState::Inserted { + self.state = BatchState::CanBeFilled; + } else { + msg!( + "Batch is in incorrect state {} expected Inserted 3", + self.state as u64 + ); + return err!(AccountCompressionErrorCode::BatchNotReady); + } + Ok(()) + } + + /// fill -> ready -> inserted -> fill + pub fn advance_state_to_inserted(&mut self) -> Result<()> { + if self.state == BatchState::ReadyToUpdateTree { + self.state = BatchState::Inserted; + } else { + msg!( + "Batch is in incorrect state {} expected ReadyToUpdateTree 2", + self.state as u64 + ); + return err!(AccountCompressionErrorCode::BatchNotReady); + } + Ok(()) + } + + /// fill -> ready -> inserted -> fill + pub fn advance_state_to_ready_to_update_tree(&mut self) -> Result<()> { + if self.state == BatchState::CanBeFilled { + self.state = BatchState::ReadyToUpdateTree; + } else { + msg!( + "Batch is in incorrect state {} expected ReadyToUpdateTree 2", + self.state as u64 + ); + return err!(AccountCompressionErrorCode::BatchNotReady); + } + Ok(()) + } + + pub fn get_num_inserted(&self) -> u64 { + self.num_inserted + } + + pub fn get_current_zkp_batch_index(&self) -> u64 { + self.current_zkp_batch_index + } + + pub fn get_num_inserted_zkps(&self) -> u64 { + self.num_inserted_zkps + } + + pub fn store_value( + &mut self, + value: &[u8; 32], + value_store: &mut BoundedVec<[u8; 32]>, + ) -> Result<()> { + if self.state != BatchState::CanBeFilled { + return err!(AccountCompressionErrorCode::BatchNotReady); + } + value_store.push(*value).map_err(ProgramError::from)?; + Ok(()) + } + + pub fn store_and_hash_value( + &mut self, + value: &[u8; 32], + value_store: &mut BoundedVec<[u8; 32]>, + hashchain_store: &mut BoundedVec<[u8; 32]>, + ) -> Result<()> { + self.store_value(value, value_store)?; + self.add_to_hash_chain(value, hashchain_store) + } + + /// Inserts into the bloom filter and hashes the value. + /// (used by input/nullifier queue) + pub fn insert( + &mut self, + bloom_filter_value: &[u8; 32], + hashchain_value: &[u8; 32], + store: &mut [u8], + hashchain_store: &mut BoundedVec<[u8; 32]>, + ) -> Result<()> { + let mut bloom_filter = + BloomFilter::new(self.num_iters as usize, self.bloom_filter_capacity, store) + .map_err(ProgramError::from)?; + bloom_filter + .insert(bloom_filter_value) + .map_err(ProgramError::from)?; + self.add_to_hash_chain(hashchain_value, hashchain_store) + } + + pub fn add_to_hash_chain( + &mut self, + value: &[u8; 32], + hashchain_store: &mut BoundedVec<[u8; 32]>, + ) -> Result<()> { + if self.num_inserted == self.zkp_batch_size || self.num_inserted == 0 { + hashchain_store.push(*value).map_err(ProgramError::from)?; + self.num_inserted = 0; + } else if let Some(last_hashchain) = hashchain_store.last() { + let hashchain = + Poseidon::hashv(&[last_hashchain, value.as_slice()]).map_err(ProgramError::from)?; + *hashchain_store.last_mut().unwrap() = hashchain; + } + + self.num_inserted += 1; + if self.num_inserted == self.zkp_batch_size { + self.current_zkp_batch_index += 1; + } + + if self.get_num_zkp_batches() == self.current_zkp_batch_index { + self.advance_state_to_ready_to_update_tree()?; + self.num_inserted = 0; + } + + Ok(()) + } + + /// Inserts into the bloom filter and hashes the value. + /// (used by nullifier queue) + pub fn check_non_inclusion(&self, value: &[u8; 32], store: &mut [u8]) -> Result<()> { + let mut bloom_filter = + BloomFilter::new(self.num_iters as usize, self.bloom_filter_capacity, store) + .map_err(ProgramError::from)?; + if bloom_filter.contains(value) { + #[cfg(target_os = "solana")] + msg!("Value already exists in the bloom filter."); + return err!(AccountCompressionErrorCode::BatchInsertFailed); + } + Ok(()) + } + + pub fn get_num_zkp_batches(&self) -> u64 { + self.batch_size / self.zkp_batch_size + } + + pub fn mark_as_inserted_in_merkle_tree( + &mut self, + sequence_number: u64, + root_index: u32, + root_history_length: u32, + ) -> Result<()> { + if self.state != BatchState::ReadyToUpdateTree { + return err!(AccountCompressionErrorCode::BatchNotReady); + } + let num_zkp_batches = self.get_num_zkp_batches(); + + self.num_inserted_zkps += 1; + + // Batch has been successfully inserted into the tree. + if self.num_inserted_zkps == num_zkp_batches { + self.current_zkp_batch_index = 0; + self.state = BatchState::Inserted; + self.num_inserted_zkps = 0; + // Saving sequence number and root index for the batch. + // When the batch is cleared check that sequence number is greater or equal than self.sequence_number + // if not advance current root index to root index + self.sequence_number = sequence_number + root_history_length as u64; + self.root_index = root_index; + } + + Ok(()) + } + + pub fn get_hashchain_store_len(&self) -> usize { + self.batch_size as usize / self.zkp_batch_size as usize + } + + pub fn value_is_inserted_in_batch(&self, leaf_index: u64) -> Result { + let max_batch_leaf_index = + self.get_num_zkp_batches() * self.zkp_batch_size + self.start_index; + let min_batch_leaf_index = self.start_index; + Ok(leaf_index < max_batch_leaf_index && leaf_index >= min_batch_leaf_index) + } + + pub fn get_value_index_in_batch(&self, leaf_index: u64) -> Result { + Ok(leaf_index + .checked_sub(self.start_index) + .ok_or(AccountCompressionErrorCode::LeafIndexNotInBatch)?) + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + fn get_test_batch() -> Batch { + Batch::new(3, 160_000, 500, 100, 0) + } + + /// simulate zkp batch insertion + fn test_mark_as_inserted(mut batch: Batch) { + let mut sequence_number = 10; + let mut root_index = 20; + let root_history_length = 23; + for i in 0..batch.get_num_zkp_batches() { + sequence_number += i as u64; + root_index += i as u32; + batch + .mark_as_inserted_in_merkle_tree(sequence_number, root_index, root_history_length) + .unwrap(); + if i != batch.get_num_zkp_batches() - 1 { + assert_eq!(batch.get_state(), BatchState::ReadyToUpdateTree); + assert_eq!(batch.get_num_inserted(), 0); + assert_eq!(batch.get_current_zkp_batch_index(), 5); + assert_eq!(batch.get_num_inserted_zkps(), i + 1); + } else { + assert_eq!(batch.get_state(), BatchState::Inserted); + assert_eq!(batch.get_num_inserted(), 0); + assert_eq!(batch.get_current_zkp_batch_index(), 0); + assert_eq!(batch.get_num_inserted_zkps(), 0); + } + } + assert_eq!(batch.get_state(), BatchState::Inserted); + assert_eq!(batch.get_num_inserted(), 0); + let mut ref_batch = get_test_batch(); + ref_batch.state = BatchState::Inserted; + ref_batch.root_index = root_index; + ref_batch.sequence_number = sequence_number + root_history_length as u64; + assert_eq!(batch, ref_batch); + } + + #[test] + fn test_store_value() { + let mut batch = get_test_batch(); + + let mut value_store = BoundedVec::with_capacity(batch.batch_size as usize); + let mut hashchain_store = BoundedVec::with_capacity(batch.get_hashchain_store_len()); + + let mut ref_batch = get_test_batch(); + for i in 0..batch.batch_size { + ref_batch.num_inserted %= ref_batch.zkp_batch_size; + + let mut value = [0u8; 32]; + value[24..].copy_from_slice(&i.to_be_bytes()); + assert!(batch + .store_and_hash_value(&value, &mut value_store, &mut hashchain_store) + .is_ok()); + ref_batch.num_inserted += 1; + if ref_batch.num_inserted == ref_batch.zkp_batch_size { + ref_batch.current_zkp_batch_index += 1; + } + if ref_batch.current_zkp_batch_index == ref_batch.get_num_zkp_batches() { + ref_batch.state = BatchState::ReadyToUpdateTree; + ref_batch.num_inserted = 0; + } + assert_eq!(batch, ref_batch); + assert_eq!(*value_store.get(i as usize).unwrap(), value); + } + let result = batch.store_and_hash_value(&[1u8; 32], &mut value_store, &mut hashchain_store); + assert_eq!( + result.unwrap_err(), + AccountCompressionErrorCode::BatchNotReady.into() + ); + assert_eq!(batch.get_state(), BatchState::ReadyToUpdateTree); + assert_eq!(batch.get_num_inserted(), 0); + assert_eq!(batch.get_current_zkp_batch_index(), 5); + assert_eq!(batch.get_num_zkp_batches(), 5); + assert_eq!(batch.get_num_inserted_zkps(), 0); + + test_mark_as_inserted(batch); + } + + #[test] + fn test_insert() { + // Behavior Input queue + let mut batch = get_test_batch(); + let mut store = vec![0u8; 20_000]; + let hashchain_store_len = batch.get_hashchain_store_len(); + let mut hashchain_store: BoundedVec<[u8; 32]> = + BoundedVec::with_capacity(hashchain_store_len); + + let mut ref_batch = get_test_batch(); + for i in 0..batch.batch_size { + ref_batch.num_inserted %= ref_batch.zkp_batch_size; + let mut value = [0u8; 32]; + value[24..].copy_from_slice(&i.to_be_bytes()); + let ref_hash_chain = if i % batch.zkp_batch_size == 0 { + value + } else { + Poseidon::hashv(&[hashchain_store.last().unwrap(), &value]).unwrap() + }; + assert!(batch + .insert(&value, &value, &mut store, &mut hashchain_store) + .is_ok()); + let mut bloom_filter = BloomFilter { + num_iters: batch.num_iters as usize, + capacity: batch.bloom_filter_capacity, + store: &mut store, + }; + assert!(bloom_filter.contains(&value)); + batch.check_non_inclusion(&value, &mut store).unwrap_err(); + + ref_batch.num_inserted += 1; + assert_eq!(*hashchain_store.last().unwrap(), ref_hash_chain); + if ref_batch.num_inserted == ref_batch.zkp_batch_size { + ref_batch.current_zkp_batch_index += 1; + } + if i == batch.batch_size - 1 { + ref_batch.state = BatchState::ReadyToUpdateTree; + ref_batch.num_inserted = 0; + } + assert_eq!(batch, ref_batch); + } + test_mark_as_inserted(batch); + } + + #[test] + fn test_add_to_hash_chain() { + let mut batch = get_test_batch(); + let hashchain_store_len = batch.get_hashchain_store_len(); + let mut hashchain_store: BoundedVec<[u8; 32]> = + BoundedVec::with_capacity(hashchain_store_len); + let value = [1u8; 32]; + + assert!(batch + .add_to_hash_chain(&value, &mut hashchain_store) + .is_ok()); + let mut ref_batch = get_test_batch(); + let user_hash_chain = value; + ref_batch.num_inserted = 1; + assert_eq!(batch, ref_batch); + assert_eq!(hashchain_store[0], user_hash_chain); + let value = [2u8; 32]; + let ref_hash_chain = Poseidon::hashv(&[&user_hash_chain, &value]).unwrap(); + assert!(batch + .add_to_hash_chain(&value, &mut hashchain_store) + .is_ok()); + + ref_batch.num_inserted = 2; + assert_eq!(batch, ref_batch); + assert_eq!(hashchain_store[0], ref_hash_chain); + } + + #[test] + fn test_check_non_inclusion() { + let mut batch = get_test_batch(); + + let value = [1u8; 32]; + let mut store = vec![0u8; 20_000]; + let hashchain_store_len = batch.get_hashchain_store_len(); + let mut hashchain_store: BoundedVec<[u8; 32]> = + BoundedVec::with_capacity(hashchain_store_len); + + assert!(batch.check_non_inclusion(&value, &mut store).is_ok()); + let ref_batch = get_test_batch(); + assert_eq!(batch, ref_batch); + batch + .insert(&value, &value, &mut store, &mut hashchain_store) + .unwrap(); + assert!(batch.check_non_inclusion(&value, &mut store).is_err()); + } + + #[test] + fn test_getters() { + let mut batch = get_test_batch(); + assert_eq!(batch.get_num_zkp_batches(), 5); + assert_eq!(batch.get_hashchain_store_len(), 5); + assert_eq!(batch.get_state(), BatchState::CanBeFilled); + assert_eq!(batch.get_num_inserted(), 0); + assert_eq!(batch.get_current_zkp_batch_index(), 0); + assert_eq!(batch.get_num_inserted_zkps(), 0); + batch.advance_state_to_ready_to_update_tree().unwrap(); + assert_eq!(batch.get_state(), BatchState::ReadyToUpdateTree); + batch.advance_state_to_inserted().unwrap(); + assert_eq!(batch.get_state(), BatchState::Inserted); + } + + /// 1. Failing test lowest value in eligble range - 1 + /// 2. Functional test lowest value in eligble range + /// 3. Functional test highest value in eligble range + /// 4. Failing test eligble range + 1 + #[test] + fn test_value_is_inserted_in_batch() { + let mut batch = get_test_batch(); + batch.advance_state_to_ready_to_update_tree().unwrap(); + batch.advance_state_to_inserted().unwrap(); + batch.start_index = 1; + let lowest_eligible_value = batch.start_index; + let highest_eligible_value = + batch.start_index + batch.get_num_zkp_batches() * batch.zkp_batch_size - 1; + // 1. Failing test lowest value in eligble range - 1 + assert_eq!( + batch + .value_is_inserted_in_batch(lowest_eligible_value - 1) + .unwrap(), + false + ); + // 2. Functional test lowest value in eligble range + assert_eq!( + batch + .value_is_inserted_in_batch(lowest_eligible_value) + .unwrap(), + true + ); + // 3. Functional test highest value in eligble range + assert_eq!( + batch + .value_is_inserted_in_batch(highest_eligible_value) + .unwrap(), + true + ); + // 4. Failing test eligble range + 1 + assert_eq!( + batch + .value_is_inserted_in_batch(highest_eligible_value + 1) + .unwrap(), + false + ); + } +} diff --git a/programs/account-compression/src/state/batched_merkle_tree.rs b/programs/account-compression/src/state/batched_merkle_tree.rs new file mode 100644 index 0000000000..1f788d9d6b --- /dev/null +++ b/programs/account-compression/src/state/batched_merkle_tree.rs @@ -0,0 +1,2357 @@ +use crate::{ + batch::BatchState, + batched_queue::ZeroCopyBatchedQueueAccount, + bytes_to_struct_checked, + errors::AccountCompressionErrorCode, + utils::{ + check_signer_is_registered_or_authority::GroupAccess, constants::TEST_DEFAULT_BATCH_SIZE, + }, + InitStateTreeAccountsInstructionData, +}; +use aligned_sized::aligned_sized; +use anchor_lang::prelude::*; +use borsh::{BorshDeserialize, BorshSerialize}; +use light_bounded_vec::{BoundedVec, CyclicBoundedVec, CyclicBoundedVecMetadata}; +use light_hasher::{Hasher, Poseidon}; +use light_verifier::{verify_batch_append_with_proofs, verify_batch_update, CompressedProof}; +use std::mem::{size_of, ManuallyDrop}; + +use super::{ + batch::Batch, + batched_queue::{ + init_queue, input_queue_bytes, insert_into_current_batch, queue_account_size, BatchedQueue, + }, + AccessMetadata, MerkleTreeMetadata, QueueType, RolloverMetadata, +}; + +#[derive(Debug, PartialEq, Default)] +#[aligned_sized(anchor)] +#[account(zero_copy)] +pub struct BatchedMerkleTreeMetadata { + pub access_metadata: AccessMetadata, + pub rollover_metadata: RolloverMetadata, + // Queue associated with this Merkle tree. + pub associated_output_queue: Pubkey, + // Next Merkle tree to be used after rollover. + pub next_merkle_tree: Pubkey, + pub tree_type: u64, +} + +impl GroupAccess for ZeroCopyBatchedMerkleTreeAccount { + fn get_owner(&self) -> &Pubkey { + &self.get_account().metadata.access_metadata.owner + } + + fn get_program_owner(&self) -> &Pubkey { + &self.get_account().metadata.access_metadata.program_owner + } +} + +#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq)] +pub struct BatchAppendEvent { + pub id: [u8; 32], + pub batch_index: u64, + pub zkp_batch_index: u64, + pub batch_size: u64, + pub old_next_index: u64, + pub new_next_index: u64, + pub new_root: [u8; 32], + pub root_index: u32, + pub sequence_number: u64, +} +#[derive(BorshDeserialize, BorshSerialize, Debug, PartialEq)] +pub struct BatchNullifyEvent { + pub id: [u8; 32], + pub batch_index: u64, + pub zkp_batch_index: u64, + pub new_root: [u8; 32], + pub root_index: u32, + pub sequence_number: u64, + pub batch_size: u64, +} + +#[repr(u64)] +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum TreeType { + State = 1, + Address = 2, + BatchedState = 3, + BatchedAddress = 4, +} + +#[derive(Debug, PartialEq, Default)] +#[account(zero_copy)] +#[aligned_sized(anchor)] +pub struct BatchedMerkleTreeAccount { + pub metadata: MerkleTreeMetadata, + pub sequence_number: u64, + pub tree_type: u64, + pub next_index: u64, + pub height: u32, + pub root_history_capacity: u32, + pub queue: BatchedQueue, +} + +impl BatchedMerkleTreeAccount { + pub fn size(&self) -> Result { + let account_size = Self::LEN; + let root_history_size = size_of::() + + (size_of::<[u8; 32]>() * self.root_history_capacity as usize); + let size = account_size + + root_history_size + + queue_account_size(&self.queue, QueueType::Input as u64)?; + Ok(size) + } + + pub fn get_state_tree_default( + owner: Pubkey, + program_owner: Option, + forester: Option, + rollover_threshold: Option, + index: u64, + network_fee: u64, + batch_size: u64, + zkp_batch_size: u64, + bloom_filter_capacity: u64, + root_history_capacity: u32, + associated_queue: Pubkey, + height: u32, + num_batches: u64, + ) -> Self { + Self { + metadata: MerkleTreeMetadata { + next_merkle_tree: Pubkey::default(), + access_metadata: AccessMetadata::new(owner, program_owner, forester), + rollover_metadata: RolloverMetadata::new( + index, + 0, + rollover_threshold, + network_fee, + None, + None, + ), + associated_queue, + }, + sequence_number: 0, + tree_type: TreeType::BatchedState as u64, + next_index: 0, + height, + root_history_capacity, + queue: BatchedQueue::get_input_queue_default( + batch_size, + bloom_filter_capacity, + zkp_batch_size, + num_batches, + ), + } + } +} + +#[derive(Debug, PartialEq)] +pub struct ZeroCopyBatchedMerkleTreeAccount { + account: *mut BatchedMerkleTreeAccount, + pub root_history: ManuallyDrop>, + pub batches: ManuallyDrop>, + pub value_vecs: Vec>>, + pub bloom_filter_stores: Vec>>, + pub hashchain_store: Vec>>, +} + +/// Get batch from account. +/// Hash all public inputs into one poseidon hash. +/// Public inputs: +/// 1. old root (get from account by index) +/// 2. new root (send to chain and ) +/// 3. start index (get from batch) +/// 4. end index (get from batch start index plus batch size) +#[derive(Debug, PartialEq, Clone, Copy, BorshSerialize, BorshDeserialize)] +pub struct InstructionDataBatchNullifyInputs { + pub public_inputs: BatchProofInputsIx, + pub compressed_proof: CompressedProof, +} + +#[derive(Debug, PartialEq, Clone, Copy, BorshSerialize, BorshDeserialize)] +pub struct BatchProofInputsIx { + pub new_root: [u8; 32], + pub old_root_index: u16, +} + +#[derive(Debug, PartialEq, Clone, Copy, BorshSerialize, BorshDeserialize)] +pub struct InstructionDataBatchAppendInputs { + pub public_inputs: AppendBatchProofInputsIx, + pub compressed_proof: CompressedProof, +} + +#[derive(Debug, PartialEq, Clone, Copy, BorshDeserialize, BorshSerialize)] +pub struct AppendBatchProofInputsIx { + pub new_root: [u8; 32], +} + +impl ZeroCopyBatchedMerkleTreeAccount { + pub fn get_account(&self) -> &BatchedMerkleTreeAccount { + unsafe { self.account.as_ref() }.unwrap() + } + pub fn get_account_mut(&mut self) -> &mut BatchedMerkleTreeAccount { + unsafe { self.account.as_mut() }.unwrap() + } + + pub fn from_bytes_mut(account_data: &mut [u8]) -> Result { + unsafe { + let account = bytes_to_struct_checked::(account_data)?; + if account_data.len() != (*account).size()? { + return err!(AccountCompressionErrorCode::SizeMismatch); + } + let mut start_offset = BatchedMerkleTreeAccount::LEN; + let root_history = CyclicBoundedVec::deserialize(account_data, &mut start_offset) + .map_err(ProgramError::from)?; + let (batches, value_vecs, bloom_filter_stores, hashchain_store) = input_queue_bytes( + &(*account).queue, + account_data, + QueueType::Input as u64, + &mut start_offset, + )?; + + Ok(ZeroCopyBatchedMerkleTreeAccount { + account, + root_history, + batches, + value_vecs, + bloom_filter_stores, + hashchain_store, + }) + } + } + + pub fn init( + metadata: MerkleTreeMetadata, + root_history_capacity: u32, + num_batches_input_queue: u64, + input_queue_batch_size: u64, + input_queue_zkp_batch_size: u64, + height: u32, + account_data: &mut [u8], + num_iters: u64, + bloom_filter_capacity: u64, + ) -> Result { + unsafe { + let account = bytes_to_struct_checked::(account_data)?; + (*account).metadata = metadata; + (*account).root_history_capacity = root_history_capacity; + (*account).height = height; + (*account).tree_type = TreeType::BatchedState as u64; + (*account).queue.init( + num_batches_input_queue, + input_queue_batch_size, + input_queue_zkp_batch_size, + )?; + (*account).queue.bloom_filter_capacity = bloom_filter_capacity; + if account_data.len() != (*account).size()? { + msg!("merkle_tree_account: {:?}", (*account)); + msg!("account_data.len(): {}", account_data.len()); + msg!("account.size(): {}", (*account).size()?); + return err!(AccountCompressionErrorCode::SizeMismatch); + } + let mut start_offset = BatchedMerkleTreeAccount::LEN; + + let mut root_history = CyclicBoundedVec::init( + (*account).root_history_capacity as usize, + account_data, + &mut start_offset, + false, + ) + .map_err(ProgramError::from)?; + root_history.push(light_hasher::Poseidon::zero_bytes()[height as usize]); + + let (batches, value_vecs, bloom_filter_stores, hashchain_store) = init_queue( + &(*account).queue, + QueueType::Input as u64, + account_data, + num_iters, + bloom_filter_capacity, + &mut start_offset, + )?; + Ok(ZeroCopyBatchedMerkleTreeAccount { + account, + root_history, + batches, + value_vecs, + bloom_filter_stores, + hashchain_store, + }) + } + } + + // Note: when proving inclusion by index in + // value array we need to insert the value into a bloom_filter once it is + // inserted into the tree. Check this with get_num_inserted_zkps + pub fn update_output_queue( + &mut self, + queue_account_data: &mut [u8], + instruction_data: InstructionDataBatchAppendInputs, + id: [u8; 32], + ) -> Result { + let mut queue_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(queue_account_data).unwrap(); + + let batch_index = queue_account.get_account().queue.next_full_batch_index; + let circuit_batch_size = queue_account.get_account().queue.zkp_batch_size; + let batches = &mut queue_account.batches; + let full_batch = batches.get_mut(batch_index as usize).unwrap(); + + if full_batch.get_state() != BatchState::ReadyToUpdateTree { + msg!("Queue is in invalid state: {:?}", full_batch.get_state()); + return err!(AccountCompressionErrorCode::BatchAlreadyInserted); + } + + let new_root = instruction_data.public_inputs.new_root; + let num_zkps = full_batch.get_num_inserted_zkps(); + + let leaves_hashchain = queue_account + .hashchain_store + .get(batch_index as usize) + .unwrap() + .get(num_zkps as usize) + .unwrap(); + let old_root = self.root_history.last().unwrap(); + let start_index = self.get_account().next_index; + let mut start_index_bytes = [0u8; 32]; + start_index_bytes[24..].copy_from_slice(&start_index.to_be_bytes()); + let public_input_hash = + create_hash_chain([*old_root, new_root, *leaves_hashchain, start_index_bytes])?; + + self.update::<5>( + circuit_batch_size as usize, + instruction_data.compressed_proof, + public_input_hash, + )?; + let account = self.get_account_mut(); + account.next_index += circuit_batch_size; + let root_history_capacity = account.root_history_capacity; + let sequence_number = account.sequence_number; + self.root_history.push(new_root); + let root_index = self.root_history.last_index() as u32; + full_batch.mark_as_inserted_in_merkle_tree( + sequence_number, + root_index, + root_history_capacity, + )?; + if full_batch.get_state() == BatchState::Inserted { + queue_account.get_account_mut().queue.next_full_batch_index += 1; + queue_account.get_account_mut().queue.next_full_batch_index %= + queue_account.get_account_mut().queue.num_batches; + } + Ok(BatchAppendEvent { + id, + batch_index, + batch_size: circuit_batch_size, + zkp_batch_index: num_zkps, + old_next_index: start_index, + new_next_index: start_index + circuit_batch_size, + new_root, + root_index, + sequence_number: self.get_account().sequence_number, + }) + } + + pub fn update_input_queue( + &mut self, + instruction_data: InstructionDataBatchNullifyInputs, + id: [u8; 32], + ) -> Result { + let batch_index = self.get_account().queue.next_full_batch_index; + + let full_batch = self.batches.get(batch_index as usize).unwrap(); + + if full_batch.get_state() != BatchState::ReadyToUpdateTree { + msg!("Queue is in invalid state: {:?}", full_batch.get_state()); + return err!(AccountCompressionErrorCode::BatchAlreadyInserted); + } + let num_zkps = full_batch.get_num_inserted_zkps(); + + let leaves_hashchain = self + .hashchain_store + .get(batch_index as usize) + .unwrap() + .get(num_zkps as usize) + .unwrap(); + let old_root = self + .root_history + .get(instruction_data.public_inputs.old_root_index as usize) + .unwrap(); + let new_root = instruction_data.public_inputs.new_root; + + let public_input_hash = create_hash_chain([*old_root, new_root, *leaves_hashchain])?; + let circuit_batch_size = self.get_account().queue.zkp_batch_size; + let sequence_number = self.get_account().sequence_number; + self.update::<3>( + circuit_batch_size as usize, + instruction_data.compressed_proof, + public_input_hash, + )?; + self.root_history.push(new_root); + + let root_history_capacity = self.get_account().root_history_capacity; + let full_batch = self.batches.get_mut(batch_index as usize).unwrap(); + full_batch.mark_as_inserted_in_merkle_tree( + sequence_number, + self.root_history.last_index() as u32, + root_history_capacity, + )?; + // TODO(optimization): search for bloom_filter that can be cleared + + if full_batch.get_state() == BatchState::Inserted { + let account = self.get_account_mut(); + account.queue.next_full_batch_index += 1; + account.queue.next_full_batch_index %= account.queue.num_batches; + } + Ok(BatchNullifyEvent { + id, + batch_index, + batch_size: circuit_batch_size, + zkp_batch_index: num_zkps, + new_root, + root_index: self.root_history.last_index() as u32, + sequence_number: self.get_account().sequence_number, + }) + } + + fn update( + &mut self, + batch_size: usize, + proof: CompressedProof, + public_input_hash: [u8; 32], + ) -> Result<()> { + if QUEUE_TYPE == QueueType::Output as u64 { + verify_batch_append_with_proofs(batch_size, public_input_hash, &proof) + .map_err(ProgramError::from)?; + } else if QUEUE_TYPE == QueueType::Input as u64 { + verify_batch_update(batch_size, public_input_hash, &proof) + .map_err(ProgramError::from)?; + } else { + return err!(AccountCompressionErrorCode::InvalidQueueType); + } + self.get_account_mut().sequence_number += 1; + Ok(()) + } + + /// State nullification: + /// - value is committed to bloom_filter for non-inclusion proof + /// - nullifier is Hash(value, tx_hash), committed to leaves hashchain + /// - tx_hash is hash of all inputs and outputs + /// -> we can access the history of how commitments are spent in zkps for example fraud proofs + pub fn insert_nullifier_into_current_batch( + &mut self, + compressed_account_hash: &[u8; 32], + leaf_index: u64, + tx_hash: &[u8; 32], + ) -> Result<()> { + let leaf_index_bytes = leaf_index.to_be_bytes(); + let nullifier = Poseidon::hashv(&[compressed_account_hash, &leaf_index_bytes, tx_hash]) + .map_err(ProgramError::from)?; + self.insert_into_current_batch(compressed_account_hash, &nullifier) + } + + fn insert_into_current_batch( + &mut self, + bloom_filter_value: &[u8; 32], + leaves_hash_value: &[u8; 32], + ) -> Result<()> { + unsafe { + let (root_index, sequence_number) = insert_into_current_batch( + QueueType::Input as u64, + &mut (*self.account).queue, + &mut self.batches, + &mut self.value_vecs, + &mut self.bloom_filter_stores, + &mut self.hashchain_store, + bloom_filter_value, + Some(leaves_hash_value), + None, + )?; + + /* + * Note on security for root buffer: + * Account { + * bloom_filter: [B0, B1], + * roots: [R0, R1, R2, R3, R4, R5, R6, R7, R8, R9], + * } + * + * Timeslot 0: + * - insert into B0 until full + * + * Timeslot 1: + * - insert into B1 until full + * - update tree with B0 in 4 partial updates, don't clear B0 yet + * -> R0 -> B0.1 + * -> R1 -> B0.2 + * -> R2 -> B0.3 + * -> R3 -> B0.4 - final B0 root + * B0.sequence_number = 13 (3 + account.root.length) + * B0.root_index = 3 + * - execute some B1 root updates + * -> R4 -> B1.1 + * -> R5 -> B1.2 + * -> R6 -> B1.3 + * -> R7 -> B1.4 - final B1 (update batch 0) root + * B0.sequence_number = 17 (7 + account.root.length) + * B0.root_index = 7 + * current_sequence_number = 8 + * Timeslot 2: + * - clear B0 + * - current_sequence_number < 14 -> zero out all roots until root index is 3 + * - R8 -> 0 + * - R9 -> 0 + * - R0 -> 0 + * - R1 -> 0 + * - R2 -> 0 + * - now all roots containing values nullified in the final B0 root update are zeroed + * .-> B0 is safe to clear + */ + if let Some(sequence_number) = sequence_number { + // If the sequence number is greater than current sequence number + // there is still at least one root which can be used to prove + // inclusion of a value which was in the batch that was just wiped. + if sequence_number > self.get_account().sequence_number { + // advance root history array current index from latest root + // to root_index and overwrite all roots with zeros + if let Some(root_index) = root_index { + let root_index = root_index as usize; + let start = self.root_history.last_index(); + let end = self.root_history.len() + root_index; + for index in start + 1..end { + let index = index % self.root_history.len(); + if index == root_index { + break; + } + let root = self.root_history.get_mut(index).unwrap(); + *root = [0u8; 32]; + } + } + } + } + } + Ok(()) + } + + pub fn get_root_index(&self) -> u32 { + self.root_history.last_index() as u32 + } + pub fn get_root(&self) -> Option<[u8; 32]> { + self.root_history.last().copied() + } +} + +pub fn create_hash_chain(inputs: [[u8; 32]; T]) -> Result<[u8; 32]> { + let mut hash_chain = inputs[0]; + for input in inputs.iter().skip(1) { + hash_chain = Poseidon::hashv(&[&hash_chain, input]).map_err(ProgramError::from)?; + } + Ok(hash_chain) +} + +pub fn create_hash_chain_from_vec(inputs: Vec<[u8; 32]>) -> Result<[u8; 32]> { + let mut hash_chain = inputs[0]; + for input in inputs.iter().skip(1) { + hash_chain = Poseidon::hashv(&[&hash_chain, input]).map_err(ProgramError::from)?; + } + Ok(hash_chain) +} + +pub fn get_merkle_tree_account_size_default() -> usize { + let mt_account = BatchedMerkleTreeAccount { + metadata: MerkleTreeMetadata::default(), + next_index: 0, + sequence_number: 0, + tree_type: TreeType::BatchedState as u64, + height: 26, + root_history_capacity: 20, + queue: BatchedQueue { + currently_processing_batch_index: 0, + num_batches: 2, + batch_size: TEST_DEFAULT_BATCH_SIZE, + bloom_filter_capacity: 20_000 * 8, + // next_index: 0, + zkp_batch_size: 10, + ..Default::default() + }, + }; + mt_account.size().unwrap() +} + +pub fn get_merkle_tree_account_size_from_params( + params: InitStateTreeAccountsInstructionData, +) -> usize { + get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ) +} + +pub fn get_merkle_tree_account_size( + batch_size: u64, + bloom_filter_capacity: u64, + zkp_batch_size: u64, + root_history_capacity: u32, + height: u32, + num_batches: u64, +) -> usize { + let mt_account = BatchedMerkleTreeAccount { + metadata: MerkleTreeMetadata::default(), + next_index: 0, + sequence_number: 0, + tree_type: TreeType::BatchedState as u64, + height, + root_history_capacity, + queue: BatchedQueue { + num_batches, + batch_size, + bloom_filter_capacity, + zkp_batch_size, + ..Default::default() + }, + }; + mt_account.size().unwrap() +} +pub fn assert_nullify_event( + event: BatchNullifyEvent, + new_root: [u8; 32], + old_zero_copy_account: &ZeroCopyBatchedMerkleTreeAccount, + mt_pubkey: Pubkey, +) { + let batch_index = old_zero_copy_account + .get_account() + .queue + .next_full_batch_index; + let batch = old_zero_copy_account + .batches + .get(batch_index as usize) + .unwrap(); + let ref_event = BatchNullifyEvent { + id: mt_pubkey.to_bytes(), + batch_index, + zkp_batch_index: batch.get_num_inserted_zkps(), + new_root, + root_index: (old_zero_copy_account.get_root_index() + 1) + % old_zero_copy_account.get_account().root_history_capacity, + sequence_number: old_zero_copy_account.get_account().sequence_number + 1, + batch_size: old_zero_copy_account.get_account().queue.zkp_batch_size, + }; + assert_eq!(event, ref_event); +} + +pub fn assert_batch_append_event_event( + event: BatchAppendEvent, + new_root: [u8; 32], + old_output_queue_account: &ZeroCopyBatchedQueueAccount, + old_zero_copy_account: &ZeroCopyBatchedMerkleTreeAccount, + mt_pubkey: Pubkey, +) { + let batch_index = old_output_queue_account + .get_account() + .queue + .next_full_batch_index; + let batch = old_output_queue_account + .batches + .get(batch_index as usize) + .unwrap(); + let ref_event = BatchAppendEvent { + id: mt_pubkey.to_bytes(), + batch_index, + zkp_batch_index: batch.get_num_inserted_zkps(), + new_root, + root_index: (old_zero_copy_account.get_root_index() + 1) + % old_zero_copy_account.get_account().root_history_capacity, + sequence_number: old_zero_copy_account.get_account().sequence_number + 1, + batch_size: old_zero_copy_account.get_account().queue.zkp_batch_size, + old_next_index: old_zero_copy_account.get_account().next_index, + new_next_index: old_zero_copy_account.get_account().next_index + + old_output_queue_account.get_account().queue.zkp_batch_size, + }; + assert_eq!(event, ref_event); +} +#[cfg(test)] +mod tests { + #![allow(warnings)] + + use light_bloom_filter::{BloomFilter, BloomFilterError}; + use light_concurrent_merkle_tree::event::NullifierEvent; + use light_merkle_tree_reference::MerkleTree; + use light_prover_client::{ + gnark::helpers::{spawn_prover, ProofType, ProverConfig}, + mock_batched_forester::{self, MockBatchedForester, MockTxEvent}, + }; + use serial_test::serial; + use std::{cmp::min, ops::Deref}; + + use rand::{rngs::StdRng, Rng}; + + use crate::{ + batch::BatchState, + batched_queue::{ + get_output_queue_account_size_default, get_output_queue_account_size_from_params, + BatchedQueueAccount, + }, + init_batched_state_merkle_tree_accounts, + }; + + use super::*; + /// Insert into input queue: + /// 1. New value exists in the current batch bloom_filter + /// 2. New value does not exist in the other batch bloom_filters + /// 3. + pub fn assert_input_queue_insert( + mut pre_account: BatchedMerkleTreeAccount, + mut pre_batches: ManuallyDrop>, + pre_value_vecs: &mut Vec>>, + pre_roots: Vec<[u8; 32]>, + mut pre_hashchains: Vec>>, + mut merkle_tree_zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, + insert_values: Vec<[u8; 32]>, + leaf_indices: Vec, + tx_hash: [u8; 32], + input_is_in_tree: Vec, + array_indices: Vec, + ) -> Result<()> { + for (i, insert_value) in insert_values.iter().enumerate() { + if !input_is_in_tree[i] { + let value_vec_index = array_indices[i]; + assert!( + pre_value_vecs.iter_mut().any(|value_vec| { + if value_vec.len() > value_vec_index { + ({ + if value_vec[value_vec_index] == *insert_value { + value_vec[value_vec_index] = [0u8; 32]; + true + } else { + false + } + }) + } else { + false + } + }), + "Value not in value vec." + ); + } + + let leaf_index = leaf_indices[i]; + + let post_roots: Vec<[u8; 32]> = merkle_tree_zero_copy_account + .root_history + .iter() + .cloned() + .collect(); + // if root buffer changed it must be only overwritten by [0u8;32] + if post_roots != pre_roots { + let only_zero_overwrites = post_roots + .iter() + .zip(pre_roots.iter()) + .all(|(post, pre)| *post == *pre || *post == [0u8; 32]); + if !only_zero_overwrites { + panic!("Root buffer changed.") + } + } + + let current_batch_index = merkle_tree_zero_copy_account + .get_account() + .queue + .currently_processing_batch_index as usize; + let inserted_batch_index = pre_account.queue.currently_processing_batch_index as usize; + let expected_batch = pre_batches.get_mut(inserted_batch_index).unwrap(); + + if expected_batch.get_state() == BatchState::Inserted { + pre_hashchains[inserted_batch_index].clear(); + expected_batch.sequence_number = 0; + expected_batch.advance_state_to_can_be_filled().unwrap(); + } + + // New value exists in the current batch bloom filter + let mut bloom_filter = light_bloom_filter::BloomFilter::new( + merkle_tree_zero_copy_account.batches[inserted_batch_index].num_iters as usize, + merkle_tree_zero_copy_account.batches[inserted_batch_index].bloom_filter_capacity, + merkle_tree_zero_copy_account.bloom_filter_stores[inserted_batch_index] + .as_mut_slice(), + ) + .unwrap(); + assert!(bloom_filter.contains(&insert_value)); + let mut pre_hashchain = pre_hashchains.get_mut(inserted_batch_index).unwrap(); + let nullifier = + Poseidon::hashv(&[insert_value.as_slice(), &leaf_index.to_be_bytes(), &tx_hash]) + .unwrap(); + expected_batch.add_to_hash_chain(&nullifier, &mut pre_hashchain)?; + + // New value does not exist in the other batch bloom_filters + for (i, batch) in merkle_tree_zero_copy_account.batches.iter_mut().enumerate() { + // Skip current batch it is already checked above + if i != inserted_batch_index { + let mut bloom_filter = light_bloom_filter::BloomFilter::new( + batch.num_iters as usize, + batch.bloom_filter_capacity, + merkle_tree_zero_copy_account.bloom_filter_stores[i].as_mut_slice(), + ) + .unwrap(); + assert!(!bloom_filter.contains(&insert_value)); + } + } + // if the currently processing batch changed it should + // increment by one and the old batch should be ready to + // update + if expected_batch.get_current_zkp_batch_index() == expected_batch.get_num_zkp_batches() + { + assert_eq!( + merkle_tree_zero_copy_account.batches + [pre_account.queue.currently_processing_batch_index as usize] + .get_state(), + BatchState::ReadyToUpdateTree + ); + pre_account.queue.currently_processing_batch_index += 1; + pre_account.queue.currently_processing_batch_index %= pre_account.queue.num_batches; + assert_eq!( + merkle_tree_zero_copy_account.batches[inserted_batch_index], + *expected_batch + ); + assert_eq!( + merkle_tree_zero_copy_account.hashchain_store[inserted_batch_index] + .last() + .unwrap(), + pre_hashchain.last().unwrap(), + "Hashchain store inconsistent." + ); + } + } + + assert_eq!( + *merkle_tree_zero_copy_account.get_account(), + pre_account, + "BatchedMerkleTreeAccount changed." + ); + let inserted_batch_index = pre_account.queue.currently_processing_batch_index as usize; + let mut expected_batch = pre_batches[inserted_batch_index].clone(); + assert_eq!( + merkle_tree_zero_copy_account.batches[inserted_batch_index], + expected_batch + ); + assert_eq!( + merkle_tree_zero_copy_account.hashchain_store, *pre_hashchains, + "Hashchain store inconsistent." + ); + Ok(()) + } + + /// Expected behavior for insert into output queue: + /// - add value to value array + /// - batch.num_inserted += 1 + /// - if batch is full after insertion advance state to ReadyToUpdateTree + pub fn assert_output_queue_insert( + mut pre_account: BatchedQueueAccount, + mut pre_batches: ManuallyDrop>, + mut pre_value_store: Vec>>, + mut pre_hashchains: Vec>>, + mut output_zero_copy_account: ZeroCopyBatchedQueueAccount, + insert_values: Vec<[u8; 32]>, + ) -> Result<()> { + for batch in output_zero_copy_account.batches.iter_mut() { + println!("output_zero_copy_account.batch: {:?}", batch); + } + for batch in pre_batches.iter() { + println!("pre_batch: {:?}", batch); + } + for insert_value in insert_values.iter() { + // There are no bloom_filters + for store in output_zero_copy_account.bloom_filter_stores.iter() { + assert_eq!(store.capacity(), 0); + } + // if the currently processing batch changed it should + // increment by one and the old batch should be ready to + // update + + let inserted_batch_index = pre_account.queue.currently_processing_batch_index as usize; + let mut expected_batch = &mut pre_batches[inserted_batch_index]; + let pre_value_store = pre_value_store.get_mut(inserted_batch_index).unwrap(); + let pre_hashchain = pre_hashchains.get_mut(inserted_batch_index).unwrap(); + if expected_batch.get_state() == BatchState::Inserted { + expected_batch.advance_state_to_can_be_filled().unwrap(); + pre_value_store.clear(); + pre_hashchain.clear(); + expected_batch.start_index = pre_account.next_index; + } + pre_account.next_index += 1; + expected_batch.store_and_hash_value(&insert_value, pre_value_store, pre_hashchain)?; + + let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; + assert!(output_zero_copy_account.value_vecs[inserted_batch_index] + .as_mut_slice() + .to_vec() + .contains(&insert_value)); + assert!(!output_zero_copy_account.value_vecs[other_batch] + .as_mut_slice() + .to_vec() + .contains(&insert_value)); + if expected_batch.get_num_zkp_batches() == expected_batch.get_current_zkp_batch_index() + { + assert!( + output_zero_copy_account.batches + [pre_account.queue.currently_processing_batch_index as usize] + .get_state() + == BatchState::ReadyToUpdateTree + ); + pre_account.queue.currently_processing_batch_index += 1; + pre_account.queue.currently_processing_batch_index %= pre_account.queue.num_batches; + assert_eq!( + output_zero_copy_account.batches[inserted_batch_index], + *expected_batch + ); + } + } + let inserted_batch_index = pre_account.queue.currently_processing_batch_index as usize; + let expected_batch = &pre_batches[inserted_batch_index]; + assert_eq!( + output_zero_copy_account.batches[inserted_batch_index], + *expected_batch + ); + assert_eq!( + *output_zero_copy_account.get_account(), + pre_account, + "ZeroCopyBatchedQueueAccount changed." + ); + assert_eq!(pre_hashchains, output_zero_copy_account.hashchain_store); + assert_eq!(pre_value_store, output_zero_copy_account.value_vecs); + assert_eq!(pre_batches, output_zero_copy_account.batches); + Ok(()) + } + + #[derive(Debug, PartialEq, Clone)] + pub struct MockTransactionInputs { + inputs: Vec<[u8; 32]>, + outputs: Vec<[u8; 32]>, + } + + pub fn simulate_transaction( + instruction_data: MockTransactionInputs, + merkle_tree_account_data: &mut [u8], + output_queue_account_data: &mut [u8], + reference_merkle_tree: &MerkleTree, + ) -> Result { + let mut output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(output_queue_account_data).unwrap(); + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree_account_data).unwrap(); + let flattened_inputs = instruction_data + .inputs + .iter() + .cloned() + .chain(instruction_data.outputs.iter().cloned()) + .collect::>(); + let tx_hash = create_hash_chain_from_vec(flattened_inputs)?; + + for input in instruction_data.inputs.iter() { + // zkp inclusion in Merkle tree + let inclusion = reference_merkle_tree.get_leaf_index(input); + let leaf_index = if inclusion.is_none() { + println!("simulate_transaction: inclusion is none"); + let mut included = false; + let mut leaf_index = 0; + let next_index = merkle_tree_zero_copy_account.get_account().next_index; + let batch_size = output_zero_copy_account.get_account().queue.batch_size; + + for (batch_index, value_vec) in + output_zero_copy_account.value_vecs.iter_mut().enumerate() + { + for (value_index, value) in value_vec.iter_mut().enumerate() { + if *value == *input { + let batch_start_index = output_zero_copy_account + .batches + .get(batch_index) + .unwrap() + .start_index; + included = true; + *value = [0u8; 32]; + leaf_index = value_index as u64 + batch_start_index; + } + } + } + if !included { + panic!("Value not included in any output queue or trees."); + } + leaf_index + } else { + inclusion.unwrap() as u64 + }; + + println!( + "sim tx input: \n {:?} \nleaf index : {:?}, \ntx hash {:?}", + input, leaf_index, tx_hash, + ); + merkle_tree_zero_copy_account + .insert_nullifier_into_current_batch(input, leaf_index, &tx_hash)?; + } + + for output in instruction_data.outputs.iter() { + let leaf_index = output_zero_copy_account.get_account().next_index; + println!( + "sim tx output: \n {:?} \nleaf index : {:?}", + output, leaf_index + ); + output_zero_copy_account.insert_into_current_batch(output)?; + } + Ok(MockTxEvent { + inputs: instruction_data.inputs.clone(), + outputs: instruction_data.outputs.clone(), + tx_hash, + }) + } + + #[serial] + #[tokio::test] + async fn test_simulate_transactions() { + spawn_prover( + true, + ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::BatchAppendWithProofsTest, + ProofType::BatchUpdateTest, + ], + }, + ) + .await; + let mut mock_indexer = mock_batched_forester::MockBatchedForester::<26>::default(); + + let num_tx = 2200; + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_default(); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = crate::ID; + + let params = crate::InitStateTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update = false; + let mut out_ready_for_update = false; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + + for tx in 0..num_tx { + println!("tx: {}", tx); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_updates: {}", num_output_updates); + { + println!("Simulate tx {} -----------------------------", tx); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let number_of_outputs = rng.gen_range(0..7); + let mut outputs = vec![]; + for _ in 0..number_of_outputs { + outputs.push(get_rnd_bytes(&mut rng)); + } + let number_of_inputs = if rng.gen_bool(0.5) { + let number_of_inputs = if !mock_indexer.active_leaves.is_empty() { + let x = min(mock_indexer.active_leaves.len(), 5); + rng.gen_range(0..x) + } else { + 0 + }; + number_of_inputs + } else { + 0 + }; + + let mut inputs = vec![]; + let mut input_is_in_tree = vec![]; + let mut leaf_indices = vec![]; + let mut array_indices = vec![]; + let mut retries = min(10, mock_indexer.active_leaves.len()); + while inputs.len() < number_of_inputs && retries > 0 { + let (leaf_array_index, leaf) = + get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + let inserted = mock_indexer.merkle_tree.get_leaf_index(&leaf); + if let Some(leaf_index) = inserted { + inputs.push(leaf); + leaf_indices.push(leaf_index as u64); + input_is_in_tree.push(true); + array_indices.push(0); + } else if rng.gen_bool(0.1) { + inputs.push(leaf); + let output_queue = ZeroCopyBatchedQueueAccount::from_bytes_mut( + &mut output_queue_account_data, + ) + .unwrap(); + let mut leaf_array_index = 0; + let mut batch_index = 0; + for (i, vec) in output_queue.value_vecs.iter().enumerate() { + let pos = vec.iter().position(|value| *value == leaf); + if let Some(pos) = pos { + leaf_array_index = pos; + batch_index = i; + break; + } + if i == output_queue.value_vecs.len() - 1 { + panic!("Leaf not found in output queue."); + } + } + let batch = output_queue.batches.get(batch_index).unwrap(); + array_indices.push(leaf_array_index); + let leaf_index: u64 = batch.start_index + leaf_array_index as u64; + leaf_indices.push(leaf_index); + input_is_in_tree.push(false); + } + retries -= 1; + } + let number_of_inputs = inputs.len(); + println!("number_of_inputs: {}", number_of_inputs); + + let instruction_data = MockTransactionInputs { + inputs: inputs.clone(), + outputs: outputs.clone(), + }; + + let merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + println!( + "input queue: {:?}", + merkle_tree_zero_copy_account.batches[0].get_num_inserted() + ); + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + let mut pre_mt_data = mt_account_data.clone(); + let pre_output_account = output_zero_copy_account.get_account().clone(); + let pre_output_batches = output_zero_copy_account.batches.clone(); + let mut pre_output_value_stores = output_zero_copy_account.value_vecs.clone(); + let pre_hashchains = output_zero_copy_account.hashchain_store.clone(); + + let pre_mt_account = merkle_tree_zero_copy_account.get_account().clone(); + let pre_batches = merkle_tree_zero_copy_account.batches.clone(); + let pre_roots = merkle_tree_zero_copy_account + .root_history + .iter() + .cloned() + .collect(); + let pre_mt_hashchains = merkle_tree_zero_copy_account.hashchain_store.clone(); + + if !outputs.is_empty() || !inputs.is_empty() { + println!("Simulating tx with inputs: {:?}", instruction_data); + let event = simulate_transaction( + instruction_data, + &mut pre_mt_data, + &mut output_queue_account_data, + &mock_indexer.merkle_tree, + ) + .unwrap(); + mock_indexer.tx_events.push(event.clone()); + + if !inputs.is_empty() { + let merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_data) + .unwrap(); + assert_input_queue_insert( + pre_mt_account, + pre_batches, + &mut pre_output_value_stores, + pre_roots, + pre_mt_hashchains, + merkle_tree_zero_copy_account, + inputs.clone(), + leaf_indices.clone(), + event.tx_hash, + input_is_in_tree, + array_indices, + ) + .unwrap(); + } + + if !outputs.is_empty() { + assert_output_queue_insert( + pre_output_account, + pre_output_batches, + pre_output_value_stores, + pre_hashchains, + output_zero_copy_account.clone(), + outputs.clone(), + ) + .unwrap(); + } + + for i in 0..number_of_inputs { + mock_indexer + .input_queue_leaves + .push((inputs[i], leaf_indices[i] as usize)); + } + for i in 0..number_of_outputs { + mock_indexer.active_leaves.push(outputs[i]); + mock_indexer.output_queue_leaves.push(outputs[i]); + } + + num_output_values += number_of_outputs; + num_input_values += number_of_inputs; + let merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_data).unwrap(); + in_ready_for_update = merkle_tree_zero_copy_account + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + out_ready_for_update = output_zero_copy_account + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + + mt_account_data = pre_mt_data.clone(); + } else { + println!("Skipping simulate tx for no inputs or outputs"); + } + } + + if in_ready_for_update && rng.gen_bool(1.0) { + println!("Input update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + let old_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + let (input_res, new_root) = { + let mut zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) + .unwrap(); + println!("batches {:?}", zero_copy_account.batches); + + let old_root_index = zero_copy_account.root_history.last_index(); + let next_full_batch = + zero_copy_account.get_account().queue.next_full_batch_index; + let batch = zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + println!( + "zero_copy_account + .hashchain_store {:?}", + zero_copy_account.hashchain_store + ); + println!( + "hashchain store len {:?}", + zero_copy_account.hashchain_store.len() + ); + println!( + "batch.get_num_inserted_zkps() as usize {:?}", + batch.get_num_inserted_zkps() as usize + ); + let leaves_hashchain = zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + + let (proof, new_root) = mock_indexer + .get_batched_update_proof( + zero_copy_account.get_account().queue.zkp_batch_size as u32, + *leaves_hashchain, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + public_inputs: BatchProofInputsIx { + new_root, + old_root_index: old_root_index as u16, + }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + zero_copy_account + .update_input_queue(instruction_data, mt_pubkey.to_bytes()), + new_root, + ) + }; + println!("Input update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + let nullify_event = input_res.unwrap(); + in_ready_for_update = false; + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) + .unwrap(); + assert_nullify_event(nullify_event, new_root, &old_zero_copy_account, mt_pubkey); + assert_merkle_tree_update( + old_zero_copy_account, + zero_copy_account, + None, + None, + new_root, + ); + mt_account_data = pre_mt_account_data.clone(); + + num_input_updates += 1; + } + + if out_ready_for_update && rng.gen_bool(1.0) { + println!("Output update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + + let mut pre_mt_account_data = mt_account_data.clone(); + let mut zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) + .unwrap(); + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + + let next_index = zero_copy_account.get_account().next_index; + let next_full_batch = output_zero_copy_account + .get_account() + .queue + .next_full_batch_index; + let batch = output_zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves_hashchain = output_zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hashchain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + + let instruction_data = InstructionDataBatchAppendInputs { + public_inputs: AppendBatchProofInputsIx { new_root }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + let mut pre_output_queue_state = output_queue_account_data.clone(); + println!("Output update -----------------------------"); + + let output_res = zero_copy_account.update_output_queue( + &mut pre_output_queue_state, + instruction_data, + mt_pubkey.to_bytes(), + ); + assert!(output_res.is_ok()); + let batch_append_event = output_res.unwrap(); + + assert_eq!( + *zero_copy_account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut pre_output_queue_state) + .unwrap(); + let old_output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + + let old_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + + println!("batch 0: {:?}", output_zero_copy_account.batches[0]); + println!("batch 1: {:?}", output_zero_copy_account.batches[1]); + assert_batch_append_event_event( + batch_append_event, + new_root, + &old_output_zero_copy_account, + &old_zero_copy_account, + mt_pubkey, + ); + assert_merkle_tree_update( + old_zero_copy_account, + zero_copy_account, + Some(old_output_zero_copy_account), + Some(output_zero_copy_account), + new_root, + ); + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + out_ready_for_update = false; + num_output_updates += 1; + } + } + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data).unwrap(); + println!("batch 0: {:?}", output_zero_copy_account.batches[0]); + println!("batch 1: {:?}", output_zero_copy_account.batches[1]); + println!("num_output_updates: {}", num_output_updates); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); + } + + // Get random leaf that is not in the input queue. + pub fn get_random_leaf( + rng: &mut StdRng, + active_leaves: &mut Vec<[u8; 32]>, + ) -> (usize, [u8; 32]) { + if active_leaves.len() == 0 { + return (0, [0u8; 32]); + } + let index = rng.gen_range(0..active_leaves.len()); + // get random leaf from vector and remove it + (index, active_leaves.remove(index)) + } + + /// queues with a counter which keeps things below X tps and an if that + /// executes tree updates when possible. + #[serial] + #[tokio::test] + async fn test_e2e() { + spawn_prover( + true, + ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::BatchAppendWithProofsTest, + ProofType::BatchUpdateTest, + ], + }, + ) + .await; + let mut mock_indexer = mock_batched_forester::MockBatchedForester::<26>::default(); + + let num_tx = 2200; + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_default(); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let params = crate::InitStateTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update = false; + let mut out_ready_for_update = false; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + + for tx in 0..num_tx { + println!("tx: {}", tx); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_updates: {}", num_output_updates); + // Output queue + { + let mut output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + if rng.gen_bool(0.5) { + println!("Output insert -----------------------------"); + println!("num_output_values: {}", num_output_values); + let mut rnd_bytes = get_rnd_bytes(&mut rng); + + let pre_account = output_zero_copy_account.get_account().clone(); + let pre_batches = output_zero_copy_account.batches.clone(); + let pre_value_store = output_zero_copy_account.value_vecs.clone(); + let pre_hashchains = output_zero_copy_account.hashchain_store.clone(); + + output_zero_copy_account + .insert_into_current_batch(&rnd_bytes) + .unwrap(); + assert_output_queue_insert( + pre_account, + pre_batches, + pre_value_store, + pre_hashchains, + output_zero_copy_account.clone(), + vec![rnd_bytes], + ) + .unwrap(); + num_output_values += 1; + mock_indexer.output_queue_leaves.push(rnd_bytes); + } + out_ready_for_update = output_zero_copy_account + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + } + + // Input queue + { + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + + if rng.gen_bool(0.5) && !mock_indexer.active_leaves.is_empty() { + println!("Input insert -----------------------------"); + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + + let pre_batches: ManuallyDrop> = + merkle_tree_zero_copy_account.batches.clone(); + let pre_account = merkle_tree_zero_copy_account.get_account().clone(); + let pre_roots = merkle_tree_zero_copy_account + .root_history + .iter() + .cloned() + .collect(); + let pre_hashchains = merkle_tree_zero_copy_account.hashchain_store.clone(); + let tx_hash = create_hash_chain_from_vec(vec![leaf].to_vec()).unwrap(); + let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); + mock_indexer.input_queue_leaves.push((leaf, leaf_index)); + mock_indexer.tx_events.push(MockTxEvent { + inputs: vec![leaf], + outputs: vec![], + tx_hash, + }); + + merkle_tree_zero_copy_account + .insert_nullifier_into_current_batch( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ) + .unwrap(); + + { + let merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) + .unwrap(); + assert_input_queue_insert( + pre_account, + pre_batches, + &mut vec![], + pre_roots, + pre_hashchains, + merkle_tree_zero_copy_account, + vec![leaf], + vec![leaf_index as u64], + tx_hash, + vec![true], + vec![], + ) + .unwrap(); + } + num_input_values += 1; + } + + in_ready_for_update = merkle_tree_zero_copy_account + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::ReadyToUpdateTree); + } + + if in_ready_for_update { + println!("Input update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + in_ready_for_update = false; + perform_input_update(&mut pre_mt_account_data, &mut mock_indexer, true, mt_pubkey) + .await; + mt_account_data = pre_mt_account_data.clone(); + + num_input_updates += 1; + } + + if out_ready_for_update { + println!("Output update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + let mut zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) + .unwrap(); + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + + let next_index = zero_copy_account.get_account().next_index; + let next_full_batch = output_zero_copy_account + .get_account() + .queue + .next_full_batch_index; + let batch = output_zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves = output_zero_copy_account + .value_vecs + .get(next_full_batch as usize) + .unwrap() + .deref() + .clone() + .to_vec(); + println!("leaves {:?}", leaves.len()); + let leaves_hashchain = output_zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hashchain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; + let end = start + batch.zkp_batch_size as usize; + for i in start..end { + // Storing the leaf in the output queue indexer so that it + // can be inserted into the input queue later. + mock_indexer.active_leaves.push(leaves[i]); + } + + let instruction_data = InstructionDataBatchAppendInputs { + public_inputs: AppendBatchProofInputsIx { new_root }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + let mut pre_output_queue_state = output_queue_account_data.clone(); + println!("Output update -----------------------------"); + + let output_res = zero_copy_account.update_output_queue( + &mut pre_output_queue_state, + instruction_data, + mt_pubkey.to_bytes(), + ); + + assert_eq!( + *zero_copy_account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + println!( + "post update: sequence number: {}", + zero_copy_account.get_account().sequence_number + ); + println!("output_res {:?}", output_res); + assert!(output_res.is_ok()); + + println!("output update success {}", num_output_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut pre_output_queue_state) + .unwrap(); + let old_output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + + let old_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + + println!("batch 0: {:?}", output_zero_copy_account.batches[0]); + println!("batch 1: {:?}", output_zero_copy_account.batches[1]); + let nullify_event = output_res.unwrap(); + assert_merkle_tree_update( + old_zero_copy_account, + zero_copy_account, + Some(old_output_zero_copy_account), + Some(output_zero_copy_account), + new_root, + ); + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + out_ready_for_update = false; + num_output_updates += 1; + } + } + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data).unwrap(); + println!("batch 0: {:?}", output_zero_copy_account.batches[0]); + println!("batch 1: {:?}", output_zero_copy_account.batches[1]); + println!("num_output_updates: {}", num_output_updates); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); + } + pub async fn perform_input_update( + mt_account_data: &mut [u8], + mock_indexer: &mut MockBatchedForester<26>, + enable_assert: bool, + mt_pubkey: Pubkey, + ) { + let mut cloned_mt_account_data = (*mt_account_data).to_vec(); + let old_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(cloned_mt_account_data.as_mut_slice()) + .unwrap(); + let (input_res, root) = { + let mut zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(mt_account_data).unwrap(); + + let old_root_index = zero_copy_account.root_history.last_index(); + let next_full_batch = zero_copy_account.get_account().queue.next_full_batch_index; + let batch = zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves_hashchain = zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_update_proof( + zero_copy_account.get_account().queue.zkp_batch_size as u32, + *leaves_hashchain, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + public_inputs: BatchProofInputsIx { + new_root, + old_root_index: old_root_index as u16, + }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + zero_copy_account.update_input_queue(instruction_data, mt_pubkey.to_bytes()), + new_root, + ) + }; + println!("Input update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + let event = input_res.unwrap(); + + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(mt_account_data).unwrap(); + if enable_assert { + assert_merkle_tree_update(old_zero_copy_account, zero_copy_account, None, None, root); + } + } + + fn assert_merkle_tree_update( + old_zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, + zero_copy_account: ZeroCopyBatchedMerkleTreeAccount, + old_queue_account: Option, + queue_account: Option, + root: [u8; 32], + ) { + let mut expected_account = old_zero_copy_account.get_account().clone(); + expected_account.sequence_number += 1; + let actual_account = zero_copy_account.get_account().clone(); + + let ( + batches, + previous_batchs, + previous_processing, + expected_queue_account, + mut next_full_batch_index, + ) = if let Some(queue_account) = queue_account.as_ref() { + let expected_queue_account = old_queue_account.as_ref().unwrap().get_account().clone(); + + let previous_processing = if queue_account + .get_account() + .queue + .currently_processing_batch_index + == 0 + { + queue_account.get_account().queue.num_batches - 1 + } else { + queue_account + .get_account() + .queue + .currently_processing_batch_index + - 1 + }; + expected_account.next_index += queue_account.batches.get(0).unwrap().zkp_batch_size; + let next_full_batch_index = expected_queue_account.queue.next_full_batch_index; + ( + queue_account.batches.clone(), + old_queue_account.as_ref().unwrap().batches.clone(), + previous_processing, + Some(expected_queue_account), + next_full_batch_index, + ) + } else { + let previous_processing = + if expected_account.queue.currently_processing_batch_index == 0 { + expected_account.queue.num_batches - 1 + } else { + expected_account.queue.currently_processing_batch_index - 1 + }; + ( + zero_copy_account.batches.clone(), + old_zero_copy_account.batches.clone(), + previous_processing, + None, + 0, + ) + }; + + let mut checked_one = false; + for (i, batch) in batches.iter().enumerate() { + let previous_batch = previous_batchs.get(i).unwrap(); + if batch.sequence_number != 0 + && batch.get_state() == BatchState::Inserted + && previous_processing == i as u64 + { + if queue_account.is_some() { + next_full_batch_index += 1; + next_full_batch_index %= expected_queue_account.unwrap().queue.num_batches; + } else { + expected_account.queue.next_full_batch_index += 1; + expected_account.queue.next_full_batch_index %= + expected_account.queue.num_batches; + } + + assert_eq!( + batch.root_index as usize, + zero_copy_account.root_history.last_index() + ); + assert_eq!(batch.get_num_inserted_zkps(), 0); + assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); + assert_eq!(batch.get_num_inserted(), 0); + assert_ne!(batch.sequence_number, previous_batch.sequence_number); + assert_eq!(batch.get_current_zkp_batch_index(), 0); + assert_ne!(batch.get_state(), previous_batch.get_state()); + } else if batch.get_state() == BatchState::ReadyToUpdateTree && !checked_one { + checked_one = true; + assert_eq!( + batch.get_num_inserted_zkps(), + previous_batch.get_num_inserted_zkps() + 1 + ); + assert_eq!(batch.get_num_inserted(), previous_batch.get_num_inserted()); + + assert_eq!(batch.sequence_number, previous_batch.sequence_number); + assert_eq!(batch.root_index, previous_batch.root_index); + assert_eq!( + batch.get_current_zkp_batch_index(), + batch.get_num_zkp_batches() + ); + assert_eq!(batch.get_state(), previous_batch.get_state()); + assert_eq!(batch.get_num_inserted(), 0); + } else { + assert_eq!(*batch, *previous_batch); + } + } + if let Some(queue_account) = queue_account.as_ref() { + let mut expected_queue_account = expected_queue_account.unwrap(); + expected_queue_account.queue.next_full_batch_index = next_full_batch_index; + assert_eq!(*queue_account.get_account(), expected_queue_account); + } + + assert_eq!(actual_account, expected_account); + for (i, root) in zero_copy_account.root_history.iter().enumerate() { + println!("current: i {:?}", i); + println!("current: root {:?}", root); + } + for (i, root) in old_zero_copy_account.root_history.iter().enumerate() { + println!("old_zero_copy_account: i {:?}", i); + println!("old_zero_copy_account: root {:?}", root); + } + assert_eq!(*zero_copy_account.root_history.last().unwrap(), root); + } + + pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { + let mut rnd_bytes = rng.gen::<[u8; 32]>(); + rnd_bytes[0] = 0; + rnd_bytes + } + + #[serial] + #[tokio::test] + async fn test_fill_queues_completely() { + spawn_prover( + true, + ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::BatchAppendWithProofsTest, + ProofType::BatchUpdateTest, + ], + }, + ) + .await; + let roothistory_capacity = vec![17, 80]; // + for root_history_capacity in roothistory_capacity { + let mut mock_indexer = mock_batched_forester::MockBatchedForester::<26>::default(); + + let mut params = crate::InitStateTreeAccountsInstructionData::test_default(); + params.output_queue_batch_size = params.input_queue_batch_size * 10; + // Root history capacity which is greater than the input updates + params.root_history_capacity = root_history_capacity; + + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_from_params(params); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_from_params(params); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update = false; + let mut out_ready_for_update = false; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + let mut output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + let num_tx = params.output_queue_num_batches * params.output_queue_batch_size; + + for tx in 0..num_tx { + // Output queue + let mut output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + + let mut rnd_bytes = get_rnd_bytes(&mut rng); + + let pre_account = output_zero_copy_account.get_account().clone(); + let pre_batches = output_zero_copy_account.batches.clone(); + let pre_value_store = output_zero_copy_account.value_vecs.clone(); + let pre_hashchains = output_zero_copy_account.hashchain_store.clone(); + + output_zero_copy_account + .insert_into_current_batch(&rnd_bytes) + .unwrap(); + assert_output_queue_insert( + pre_account, + pre_batches, + pre_value_store, + pre_hashchains, + output_zero_copy_account.clone(), + vec![rnd_bytes], + ) + .unwrap(); + mock_indexer.output_queue_leaves.push(rnd_bytes); + num_output_values += 1; + } + let rnd_bytes = get_rnd_bytes(&mut rng); + let result = output_zero_copy_account.insert_into_current_batch(&rnd_bytes); + assert_eq!( + result.unwrap_err(), + AccountCompressionErrorCode::BatchNotReady.into() + ); + + output_zero_copy_account + .batches + .iter() + .for_each(|b| assert_eq!(b.get_state(), BatchState::ReadyToUpdateTree)); + + for i in 0..output_zero_copy_account + .get_account() + .queue + .get_num_zkp_batches() + { + println!("Output update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + let mut zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut pre_mt_account_data) + .unwrap(); + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + let mut pre_output_queue_state = output_queue_account_data.clone(); + let next_index = zero_copy_account.get_account().next_index; + let next_full_batch = output_zero_copy_account + .get_account() + .queue + .next_full_batch_index; + let batch = output_zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves = mock_indexer.output_queue_leaves.clone(); + let leaves_hashchain = output_zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hashchain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; + let end = start + batch.zkp_batch_size as usize; + for i in start..end { + // Storing the leaf in the output queue indexer so that it + // can be inserted into the input queue later. + mock_indexer.active_leaves.push(leaves[i]); + } + + let instruction_data = InstructionDataBatchAppendInputs { + public_inputs: AppendBatchProofInputsIx { new_root }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + println!("Output update -----------------------------"); + + let output_res = zero_copy_account.update_output_queue( + &mut pre_output_queue_state, + instruction_data, + mt_pubkey.to_bytes(), + ); + assert!(output_res.is_ok()); + + assert_eq!( + *zero_copy_account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut pre_output_queue_state) + .unwrap(); + let old_output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account_data) + .unwrap(); + + let old_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + out_ready_for_update = false; + num_output_updates += 1; + } + + let num_tx = params.input_queue_num_batches * params.input_queue_batch_size; + let mut first_value = [0u8; 32]; + for tx in 0..num_tx { + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + + println!("Input insert -----------------------------"); + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); + + let pre_batches: ManuallyDrop> = + merkle_tree_zero_copy_account.batches.clone(); + let pre_account = merkle_tree_zero_copy_account.get_account().clone(); + let pre_roots = merkle_tree_zero_copy_account + .root_history + .iter() + .cloned() + .collect(); + let pre_hashchains = merkle_tree_zero_copy_account.hashchain_store.clone(); + let tx_hash = create_hash_chain_from_vec(vec![leaf].to_vec()).unwrap(); + // Index input queue insert event + mock_indexer.input_queue_leaves.push((leaf, leaf_index)); + mock_indexer.tx_events.push(MockTxEvent { + inputs: vec![leaf], + outputs: vec![], + tx_hash, + }); + merkle_tree_zero_copy_account + .insert_nullifier_into_current_batch( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ) + .unwrap(); + assert_input_queue_insert( + pre_account, + pre_batches, + &mut vec![], + pre_roots, + pre_hashchains, + merkle_tree_zero_copy_account, + vec![leaf], + vec![leaf_index as u64], + tx_hash, + vec![true], + vec![], + ) + .unwrap(); + + // Insert the same value twice + { + // copy data so that failing test doesn't affect the state of + // subsequent tests + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) + .unwrap(); + let result = merkle_tree_zero_copy_account.insert_nullifier_into_current_batch( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ); + result.unwrap_err(); + // assert_eq!( + // result.unwrap_err(), + // AccountCompressionErrorCode::BatchInsertFailed.into() + // ); + } + // Try to insert first value into any batch + if tx == 0 { + first_value = leaf; + } else { + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) + .unwrap(); + let result = merkle_tree_zero_copy_account.insert_nullifier_into_current_batch( + &first_value.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ); + // assert_eq!( + // result.unwrap_err(), + // AccountCompressionErrorCode::BatchInsertFailed.into() + // ); + result.unwrap_err(); + // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + } + // Assert input queue is full and doesn't accept more inserts + { + let merkle_tree_zero_copy_account = + &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) + .unwrap(); + let rnd_bytes = get_rnd_bytes(&mut rng); + let tx_hash = get_rnd_bytes(&mut rng); + let result = merkle_tree_zero_copy_account + .insert_nullifier_into_current_batch(&rnd_bytes, 0, &tx_hash); + assert_eq!( + result.unwrap_err(), + AccountCompressionErrorCode::BatchNotReady.into() + ); + } + // Root of the final batch of first input queue batch + let mut first_input_batch_update_root_value = [0u8; 32]; + let num_updates = params.input_queue_batch_size / params.input_queue_zkp_batch_size + * params.input_queue_num_batches; + for i in 0..num_updates { + println!("input update ----------------------------- {}", i); + perform_input_update(&mut mt_account_data, &mut mock_indexer, false, mt_pubkey) + .await; + println!( + "performed input queue batched update {} created root {:?}", + i, + mock_indexer.merkle_tree.root() + ); + if i == 4 { + first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); + } + let mut merkle_tree_zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data).unwrap(); + println!( + "root {:?}", + merkle_tree_zero_copy_account.root_history.last().unwrap() + ); + println!( + "root last index {:?}", + merkle_tree_zero_copy_account.root_history.last_index() + ); + } + // assert all bloom_filters are inserted + { + let merkle_tree_zero_copy_account = + &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) + .unwrap(); + for batch in merkle_tree_zero_copy_account.batches.iter() { + println!("batch {:?}", batch); + assert_eq!(batch.get_state(), BatchState::Inserted); + } + } + // do one insert and expect that roots until merkle_tree_zero_copy_account.batches[0].root_index are zero + { + let merkle_tree_zero_copy_account = + &mut ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut mt_account_data) + .unwrap(); + let pre_batch_zero = merkle_tree_zero_copy_account + .batches + .get(0) + .unwrap() + .clone(); + + let value = &get_rnd_bytes(&mut rng); + let tx_hash = &get_rnd_bytes(&mut rng); + merkle_tree_zero_copy_account + .insert_nullifier_into_current_batch(value, 0, tx_hash); + { + let post_batch = merkle_tree_zero_copy_account + .batches + .get(0) + .unwrap() + .clone(); + assert_eq!(post_batch.get_state(), BatchState::CanBeFilled); + assert_eq!(post_batch.get_num_inserted(), 1); + let mut bloom_filter_store = merkle_tree_zero_copy_account + .bloom_filter_stores + .get_mut(0) + .unwrap(); + let mut bloom_filter = BloomFilter::new( + params.bloom_filter_num_iters as usize, + params.bloom_filter_capacity, + bloom_filter_store.as_mut_slice(), + ) + .unwrap(); + assert!(bloom_filter.contains(value)); + } + + let root_history_len = merkle_tree_zero_copy_account + .get_account() + .root_history_capacity; + for root in merkle_tree_zero_copy_account.root_history.iter() { + println!("root {:?}", root); + } + println!( + "root in root index {:?}", + merkle_tree_zero_copy_account.root_history[pre_batch_zero.root_index as usize] + ); + // check that all roots have been overwritten except the root index + // of the update + let root_history_len: u32 = merkle_tree_zero_copy_account.root_history.len() as u32; + let start = merkle_tree_zero_copy_account.root_history.last_index() as u32; + println!("start {:?}", start); + for root in start + 1..pre_batch_zero.root_index + root_history_len { + println!("actual index {:?}", root); + let index = root % root_history_len; + + if index == pre_batch_zero.root_index { + let root_index = pre_batch_zero.root_index as usize; + + assert_eq!( + merkle_tree_zero_copy_account.root_history[root_index], + first_input_batch_update_root_value + ); + assert_eq!( + merkle_tree_zero_copy_account.root_history[root_index - 1], + [0u8; 32] + ); + break; + } + println!("index {:?}", index); + assert_eq!( + merkle_tree_zero_copy_account.root_history[index as usize], + [0u8; 32] + ); + } + } + } + } + // TODO: add test that we cannot insert a batch that is not ready +} diff --git a/programs/account-compression/src/state/batched_queue.rs b/programs/account-compression/src/state/batched_queue.rs new file mode 100644 index 0000000000..253722881e --- /dev/null +++ b/programs/account-compression/src/state/batched_queue.rs @@ -0,0 +1,827 @@ +use crate::utils::constants::TEST_DEFAULT_BATCH_SIZE; +use crate::{batch::Batch, errors::AccountCompressionErrorCode, QueueMetadata, QueueType}; +use crate::{bytes_to_struct_checked, InitStateTreeAccountsInstructionData}; +use aligned_sized::aligned_sized; +use anchor_lang::prelude::*; +use light_bounded_vec::{BoundedVec, BoundedVecMetadata}; +use std::mem::ManuallyDrop; + +use super::batch::BatchState; + +/// Memory layout: +/// 1. QueueMetadata +/// 2. num_batches: u64 +/// 3. hash_chain hash bounded vec +/// 3. for num_batches every 33 bytes is a bloom filter +/// 3. (output queue) rest of account is bounded vec +/// +/// One Batch account contains multiple batches. +#[account(zero_copy)] +#[aligned_sized(anchor)] +#[derive(AnchorDeserialize, Debug, Default, PartialEq)] +pub struct BatchedQueueAccount { + pub metadata: QueueMetadata, + pub queue: BatchedQueue, + /// Output queue requires next index to derive compressed account hashes. + /// next_index in queue is ahead or equal to next index in the associated + /// batched Merkle tree account. + pub next_index: u64, +} + +#[account(zero_copy)] +#[derive(AnchorDeserialize, Debug, Default, PartialEq)] +pub struct BatchedQueue { + pub num_batches: u64, + pub batch_size: u64, + pub zkp_batch_size: u64, + pub currently_processing_batch_index: u64, + pub next_full_batch_index: u64, + pub bloom_filter_capacity: u64, +} + +impl BatchedQueue { + pub fn get_num_zkp_batches(&self) -> u64 { + self.batch_size / self.zkp_batch_size + } + + pub fn get_output_queue_default( + batch_size: u64, + zkp_batch_size: u64, + num_batches: u64, + ) -> Self { + BatchedQueue { + num_batches, + zkp_batch_size, + batch_size, + currently_processing_batch_index: 0, + next_full_batch_index: 0, + bloom_filter_capacity: 0, + } + } + + pub fn get_input_queue_default( + batch_size: u64, + bloom_filter_capacity: u64, + zkp_batch_size: u64, + num_batches: u64, + ) -> Self { + BatchedQueue { + num_batches, + zkp_batch_size, + batch_size, + currently_processing_batch_index: 0, + next_full_batch_index: 0, + bloom_filter_capacity, + } + } +} + +pub fn queue_account_size(account: &BatchedQueue, queue_type: u64) -> Result { + let (num_value_vec, num_bloom_filter_stores, num_hashchain_store) = + account.get_size_parameters(queue_type)?; + let account_size = if queue_type != QueueType::Output as u64 { + 0 + } else { + BatchedQueueAccount::LEN + }; + let batches_size = std::mem::size_of::() + + (std::mem::size_of::() * account.num_batches as usize); + let value_vecs_size = (std::mem::size_of::() + + 32 * account.batch_size as usize) + * num_value_vec; + // Bloomfilter capacity is in bits. + let bloom_filter_stores_size = (std::mem::size_of::() + + account.bloom_filter_capacity as usize / 8) + * num_bloom_filter_stores; + let hashchain_store_size = (std::mem::size_of::() + + 32 * account.get_num_zkp_batches() as usize) + * num_hashchain_store; + let size = account_size + + batches_size + + value_vecs_size + + bloom_filter_stores_size + + hashchain_store_size; + Ok(size) +} + +impl BatchedQueueAccount { + pub fn get_size_parameters(&self) -> Result<(usize, usize, usize)> { + self.queue.get_size_parameters(self.metadata.queue_type) + } + pub fn init( + &mut self, + meta_data: QueueMetadata, + num_batches: u64, + batch_size: u64, + zkp_batch_size: u64, + bloom_filter_capacity: u64, + ) -> Result<()> { + self.metadata = meta_data; + self.queue.init(num_batches, batch_size, zkp_batch_size)?; + self.queue.bloom_filter_capacity = bloom_filter_capacity; + Ok(()) + } +} + +impl BatchedQueue { + pub fn init(&mut self, num_batches: u64, batch_size: u64, zkp_batch_size: u64) -> Result<()> { + self.num_batches = num_batches; + self.batch_size = batch_size; + // Check that batch size is divisible by zkp_batch_size. + if batch_size % zkp_batch_size != 0 { + return err!(AccountCompressionErrorCode::BatchSizeNotDivisibleByZkpBatchSize); + } + self.zkp_batch_size = zkp_batch_size; + Ok(()) + } + + pub fn get_size_parameters(&self, queue_type: u64) -> Result<(usize, usize, usize)> { + let num_batches = self.num_batches as usize; + // Input queues don't store values + let num_value_stores = if queue_type == QueueType::Output as u64 { + num_batches + } else if queue_type == QueueType::Input as u64 { + 0 + } else { + return err!(AccountCompressionErrorCode::InvalidQueueType); + }; + // Output queues don't use bloom filters. + let num_stores = if queue_type == QueueType::Input as u64 { + num_batches + } else if queue_type == QueueType::Output as u64 && self.bloom_filter_capacity == 0 { + 0 + } else { + return err!(AccountCompressionErrorCode::InvalidQueueType); + }; + Ok((num_value_stores, num_stores, num_batches)) + } +} + +/// Batched output queue +#[derive(Debug, Clone)] +pub struct ZeroCopyBatchedQueueAccount { + account: *mut BatchedQueueAccount, + pub batches: ManuallyDrop>, + pub value_vecs: Vec>>, + pub bloom_filter_stores: Vec>>, + /// hashchain_store_capacity = batch_capacity / zkp_batch_size + pub hashchain_store: Vec>>, +} + +impl ZeroCopyBatchedQueueAccount { + pub fn get_account(&self) -> &BatchedQueueAccount { + unsafe { &*self.account } + } + + pub fn get_account_mut(&mut self) -> &mut BatchedQueueAccount { + unsafe { &mut *self.account } + } + + pub fn from_bytes_mut(account_data: &mut [u8]) -> Result { + let account = bytes_to_struct_checked::(account_data)?; + unsafe { + let (num_value_stores, num_stores, num_hashchain_stores) = + (*account).get_size_parameters()?; + + let (batches, value_vecs, bloom_filter_stores, hashchain_store) = + output_queue_from_bytes( + num_value_stores, + num_stores, + num_hashchain_stores, + account_data, + )?; + Ok(ZeroCopyBatchedQueueAccount { + account, + batches, + value_vecs, + bloom_filter_stores, + hashchain_store, + }) + } + } + + pub fn init( + metadata: QueueMetadata, + num_batches_output_queue: u64, + output_queue_batch_size: u64, + output_queue_zkp_batch_size: u64, + account_data: &mut [u8], + num_iters: u64, + bloom_filter_capacity: u64, + ) -> Result { + let account = bytes_to_struct_checked::(account_data)?; + unsafe { + (*account).init( + metadata, + num_batches_output_queue, + output_queue_batch_size, + output_queue_zkp_batch_size, + bloom_filter_capacity, + )?; + + let (batches, value_vecs, bloom_filter_stores, hashchain_store) = init_queue( + &(*account).queue, + (*account).metadata.queue_type, + account_data, + num_iters, + bloom_filter_capacity, + &mut 0, + )?; + Ok(ZeroCopyBatchedQueueAccount { + account, + batches, + value_vecs, + bloom_filter_stores, + hashchain_store, + }) + } + } + + pub fn insert_into_current_batch(&mut self, value: &[u8; 32]) -> Result<()> { + let current_index = self.get_account().next_index; + unsafe { + insert_into_current_batch( + (*self.account).metadata.queue_type, + &mut (*self.account).queue, + &mut self.batches, + &mut self.value_vecs, + &mut self.bloom_filter_stores, + &mut self.hashchain_store, + value, + None, + Some(current_index), + )?; + (*self.account).next_index += 1; + } + Ok(()) + } + + /// Zero out a leaf by index if it exists in the queues value vec. If + /// checked fail if leaf is not found. + pub fn prove_inclusion_by_index_and_zero_out_leaf( + &mut self, + leaf_index: u64, + value: &[u8; 32], + ) -> Result<()> { + for (batch_index, batch) in self.batches.iter().enumerate() { + if batch.value_is_inserted_in_batch(leaf_index)? { + let index = batch.get_value_index_in_batch(leaf_index)?; + let element = self.value_vecs[batch_index] + .get_mut(index as usize) + .ok_or(AccountCompressionErrorCode::InclusionProofByIndexFailed)?; + + if element == value { + *element = [0; 32]; + return Ok(()); + } else { + return err!(AccountCompressionErrorCode::InclusionProofByIndexFailed); + } + } + } + err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + } + + pub fn get_batch_num_inserted_in_current_batch(&self) -> u64 { + let next_full_batch = self.get_account().queue.currently_processing_batch_index; + let batch = self.batches.get(next_full_batch as usize).unwrap(); + batch.get_num_inserted() + batch.get_current_zkp_batch_index() * batch.zkp_batch_size + } +} + +#[allow(clippy::ptr_arg)] +#[allow(clippy::type_complexity)] +pub fn insert_into_current_batch( + queue_type: u64, + account: &mut BatchedQueue, + batches: &mut ManuallyDrop>, + value_vecs: &mut Vec>>, + bloom_filter_stores: &mut Vec>>, + hashchain_store: &mut Vec>>, + value: &[u8; 32], + leaves_hash_value: Option<&[u8; 32]>, + current_index: Option, +) -> Result<(Option, Option)> { + let len = batches.len(); + let mut root_index = None; + let mut sequence_number = None; + let currently_processing_batch_index = account.currently_processing_batch_index as usize; + // Insert value into current batch. + { + let mut bloom_filter_stores = bloom_filter_stores.get_mut(currently_processing_batch_index); + let mut value_store = value_vecs.get_mut(currently_processing_batch_index); + let mut hashchain_store = hashchain_store.get_mut(currently_processing_batch_index); + + let current_batch = batches.get_mut(currently_processing_batch_index).unwrap(); + let mut wipe = false; + if current_batch.get_state() == BatchState::Inserted { + current_batch.advance_state_to_can_be_filled()?; + if let Some(current_index) = current_index { + current_batch.start_index = current_index; + } + wipe = true; + } + // We expect to insert into the current batch. + if current_batch.get_state() == BatchState::ReadyToUpdateTree { + for batch in batches.iter_mut() { + msg!("batch {:?}", batch); + } + return err!(AccountCompressionErrorCode::BatchNotReady); + } + + if wipe { + if let Some(blomfilter_stores) = bloom_filter_stores.as_mut() { + if !current_batch.bloom_filter_is_wiped { + (*blomfilter_stores) + .as_mut_slice() + .iter_mut() + .for_each(|x| *x = 0); + // Saving sequence number and root index for the batch. + // When the batch is cleared check that sequence number is greater or equal than self.sequence_number + // if not advance current root index to root index + if current_batch.sequence_number != 0 { + if root_index.is_none() && sequence_number.is_none() { + root_index = Some(current_batch.root_index); + sequence_number = Some(current_batch.sequence_number); + current_batch.sequence_number = 0; + } else { + unreachable!("root_index is already set this is a bug."); + } + } + } else { + current_batch.bloom_filter_is_wiped = false; + } + } + if let Some(value_store) = value_store.as_mut() { + (*value_store).clear(); + } + if let Some(hashchain_store) = hashchain_store.as_mut() { + (*hashchain_store).clear(); + } + } + + let queue_type = QueueType::from(queue_type); + match queue_type { + // QueueType::Address => current_batch.insert_and_store( + // value, + // bloom_filter_stores.unwrap().as_mut_slice(), + // value_store.unwrap(), + // hashchain_store.unwrap(), + // ), + QueueType::Input => current_batch.insert( + value, + leaves_hash_value.unwrap(), + bloom_filter_stores.unwrap().as_mut_slice(), + hashchain_store.as_mut().unwrap(), + ), + QueueType::Output => current_batch.store_and_hash_value( + value, + value_store.unwrap(), + hashchain_store.unwrap(), + ), + _ => err!(AccountCompressionErrorCode::InvalidQueueType), + }?; + } + + // If queue has bloom_filters check non-inclusion of value in bloom_filters of + // other batches. (Current batch is already checked by insertion.) + if !bloom_filter_stores.is_empty() { + for index in currently_processing_batch_index + 1..(len + currently_processing_batch_index) + { + let index = index % len; + let bloom_filter_stores = bloom_filter_stores.get_mut(index).unwrap().as_mut_slice(); + let current_batch = batches.get_mut(index).unwrap(); + current_batch.check_non_inclusion(value, bloom_filter_stores)?; + } + } + + if batches[account.currently_processing_batch_index as usize].get_state() + == BatchState::ReadyToUpdateTree + { + account.currently_processing_batch_index += 1; + account.currently_processing_batch_index %= len as u64; + } + Ok((root_index, sequence_number)) +} + +#[allow(clippy::type_complexity)] +pub fn output_queue_from_bytes( + num_value_stores: usize, + num_stores: usize, + num_hashchain_stores: usize, + account_data: &mut [u8], +) -> Result<( + ManuallyDrop>, + Vec>>, + Vec>>, + Vec>>, +)> { + let mut start_offset = BatchedQueueAccount::LEN; + let batches = + BoundedVec::deserialize(account_data, &mut start_offset).map_err(ProgramError::from)?; + let value_vecs = + BoundedVec::deserialize_multiple(num_value_stores, account_data, &mut start_offset) + .map_err(ProgramError::from)?; + let bloom_filter_stores = + BoundedVec::deserialize_multiple(num_stores, account_data, &mut start_offset) + .map_err(ProgramError::from)?; + let hashchain_store = + BoundedVec::deserialize_multiple(num_hashchain_stores, account_data, &mut start_offset) + .map_err(ProgramError::from)?; + Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) +} + +#[allow(clippy::type_complexity)] +pub fn input_queue_bytes( + account: &BatchedQueue, + account_data: &mut [u8], + queue_type: u64, + start_offset: &mut usize, +) -> Result<( + ManuallyDrop>, + Vec>>, + Vec>>, + Vec>>, +)> { + let (num_value_stores, num_stores, hashchain_store_capacity) = + account.get_size_parameters(queue_type)?; + if queue_type == QueueType::Output as u64 { + *start_offset += BatchedQueueAccount::LEN; + } + let batches = + BoundedVec::deserialize(account_data, start_offset).map_err(ProgramError::from)?; + let value_vecs = BoundedVec::deserialize_multiple(num_value_stores, account_data, start_offset) + .map_err(ProgramError::from)?; + let bloom_filter_stores = + BoundedVec::deserialize_multiple(num_stores, account_data, start_offset) + .map_err(ProgramError::from)?; + let hashchain_store = + BoundedVec::deserialize_multiple(hashchain_store_capacity, account_data, start_offset) + .map_err(ProgramError::from)?; + + Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) +} + +#[allow(clippy::type_complexity)] +pub fn init_queue( + account: &BatchedQueue, + queue_type: u64, + account_data: &mut [u8], + num_iters: u64, + bloom_filter_capacity: u64, + start_offset: &mut usize, +) -> Result<( + ManuallyDrop>, + Vec>>, + Vec>>, + Vec>>, +)> { + if account_data.len() - *start_offset != queue_account_size(account, queue_type)? { + msg!("*start_offset {:?}", *start_offset); + msg!("account_data.len() {:?}", account_data.len()); + msg!("net size {:?}", account_data.len() - *start_offset); + msg!( + "queue_account_size {:?}", + queue_account_size(account, queue_type)? + ); + return err!(AccountCompressionErrorCode::SizeMismatch); + } + let (num_value_stores, num_stores, num_hashchain_stores) = + account.get_size_parameters(queue_type)?; + + if queue_type == QueueType::Output as u64 { + *start_offset += BatchedQueueAccount::LEN; + } + + let mut batches = BoundedVec::init( + account.num_batches as usize, + account_data, + start_offset, + false, + ) + .map_err(ProgramError::from)?; + + for i in 0..account.num_batches { + batches + .push(Batch::new( + num_iters, + bloom_filter_capacity, + account.batch_size, + account.zkp_batch_size, + account.batch_size * i, + )) + .map_err(ProgramError::from)?; + } + + let value_vecs = BoundedVec::init_multiple( + num_value_stores, + account.batch_size as usize, + account_data, + start_offset, + false, + ) + .map_err(ProgramError::from)?; + + let bloom_filter_stores = BoundedVec::init_multiple( + num_stores, + account.bloom_filter_capacity as usize / 8, + account_data, + start_offset, + true, + ) + .map_err(ProgramError::from)?; + + let hashchain_store = BoundedVec::init_multiple( + num_hashchain_stores, + account.get_num_zkp_batches() as usize, + account_data, + start_offset, + false, + ) + .map_err(ProgramError::from)?; + + Ok((batches, value_vecs, bloom_filter_stores, hashchain_store)) +} + +pub fn get_output_queue_account_size_default() -> usize { + let account = BatchedQueueAccount { + metadata: QueueMetadata::default(), + next_index: 0, + queue: BatchedQueue { + num_batches: 2, + batch_size: TEST_DEFAULT_BATCH_SIZE, + zkp_batch_size: 10, + ..Default::default() + }, + }; + queue_account_size(&account.queue, QueueType::Output as u64).unwrap() +} + +pub fn get_output_queue_account_size_from_params( + ix_data: InitStateTreeAccountsInstructionData, +) -> usize { + let account = BatchedQueueAccount { + metadata: QueueMetadata::default(), + next_index: 0, + queue: BatchedQueue { + num_batches: ix_data.output_queue_num_batches, + batch_size: ix_data.output_queue_batch_size, + zkp_batch_size: ix_data.output_queue_zkp_batch_size, + ..Default::default() + }, + }; + queue_account_size(&account.queue, QueueType::Output as u64).unwrap() +} + +pub fn get_output_queue_account_size( + batch_size: u64, + zkp_batch_size: u64, + num_batches: u64, +) -> usize { + let account = BatchedQueueAccount { + metadata: QueueMetadata::default(), + next_index: 0, + queue: BatchedQueue { + num_batches, + batch_size, + zkp_batch_size, + ..Default::default() + }, + }; + queue_account_size(&account.queue, QueueType::Output as u64).unwrap() +} + +pub fn assert_queue_inited( + queue: BatchedQueue, + ref_queue: BatchedQueue, + queue_type: u64, + value_vecs: &mut Vec>>, + bloom_filter_stores: &mut Vec>>, + batches: &mut ManuallyDrop>, + num_batches: usize, + num_iters: u64, +) { + assert_eq!(queue, ref_queue, "queue mismatch"); + assert_eq!(batches.len(), num_batches, "batches mismatch"); + for (i, batch) in batches.iter().enumerate() { + let ref_batch = Batch::new( + num_iters, + ref_queue.bloom_filter_capacity, + ref_queue.batch_size, + ref_queue.zkp_batch_size, + ref_queue.batch_size * i as u64, + ); + + assert_eq!(batch, &ref_batch, "batch mismatch"); + } + + if queue_type == QueueType::Input as u64 { + assert_eq!(value_vecs.len(), 0, "value_vecs mismatch"); + assert_eq!(value_vecs.capacity(), 0, "value_vecs mismatch"); + } else { + assert_eq!(value_vecs.capacity(), num_batches, "value_vecs mismatch"); + assert_eq!(value_vecs.len(), num_batches, "value_vecs mismatch"); + } + + if queue_type == QueueType::Output as u64 { + assert_eq!( + bloom_filter_stores.capacity(), + 0, + "bloom_filter_stores mismatch" + ); + } else { + assert_eq!( + bloom_filter_stores.capacity(), + num_batches, + "bloom_filter_stores mismatch" + ); + assert_eq!( + bloom_filter_stores.len(), + num_batches, + "bloom_filter_stores mismatch" + ); + } + + for vec in bloom_filter_stores { + assert_eq!( + vec.metadata().capacity() * 8, + queue.bloom_filter_capacity as usize, + "bloom_filter_capacity mismatch" + ); + } + + for vec in value_vecs.iter() { + assert_eq!( + vec.metadata().capacity(), + queue.batch_size as usize, + "batch_size mismatch" + ); + assert_eq!(vec.len(), 0, "batch_size mismatch"); + } +} + +pub fn assert_queue_zero_copy_inited( + account_data: &mut [u8], + ref_account: BatchedQueueAccount, + num_iters: u64, +) { + let mut zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(account_data).expect("from_bytes_mut failed"); + let num_batches = ref_account.queue.num_batches as usize; + let queue = zero_copy_account.get_account().queue; + let queue_type = zero_copy_account.get_account().metadata.queue_type; + assert_eq!( + zero_copy_account.get_account().metadata, + ref_account.metadata, + "metadata mismatch" + ); + assert_queue_inited( + queue, + ref_account.queue, + queue_type, + &mut zero_copy_account.value_vecs, + &mut zero_copy_account.bloom_filter_stores, + &mut zero_copy_account.batches, + num_batches, + num_iters, + ); +} + +#[cfg(test)] +pub mod tests { + + use crate::{AccessMetadata, RolloverMetadata}; + + use super::*; + + pub fn get_test_account_and_account_data( + batch_size: u64, + num_batches: u64, + queue_type: QueueType, + bloom_filter_capacity: u64, + ) -> (BatchedQueueAccount, Vec) { + let metadata = QueueMetadata { + next_queue: Pubkey::new_unique(), + access_metadata: AccessMetadata::default(), + rollover_metadata: RolloverMetadata::default(), + queue_type: queue_type as u64, + associated_merkle_tree: Pubkey::new_unique(), + }; + + let account = BatchedQueueAccount { + metadata: metadata.clone(), + next_index: 0, + queue: BatchedQueue { + batch_size: batch_size as u64, + num_batches: num_batches as u64, + currently_processing_batch_index: 0, + next_full_batch_index: 0, + bloom_filter_capacity, + zkp_batch_size: 10, + }, + }; + let account_data: Vec = + vec![0; queue_account_size(&account.queue, account.metadata.queue_type).unwrap()]; + (account, account_data) + } + + #[test] + fn test_output_queue_account() { + let batch_size = 100; + // 1 batch in progress, 1 batch ready to be processed + let num_batches = 2; + let bloom_filter_capacity = 0; + let bloom_filter_num_iters = 0; + for queue_type in vec![QueueType::Output] { + let (ref_account, mut account_data) = get_test_account_and_account_data( + batch_size, + num_batches, + queue_type, + bloom_filter_capacity, + ); + ZeroCopyBatchedQueueAccount::init( + ref_account.metadata, + num_batches, + batch_size, + 10, + &mut account_data, + bloom_filter_num_iters, + bloom_filter_capacity, + ) + .unwrap(); + + assert_queue_zero_copy_inited(&mut account_data, ref_account, bloom_filter_num_iters); + let mut zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut account_data).unwrap(); + let value = [1u8; 32]; + zero_copy_account.insert_into_current_batch(&value).unwrap(); + // assert!(zero_copy_account.insert_into_current_batch(&value).is_ok()); + if queue_type != QueueType::Output { + assert!(zero_copy_account.insert_into_current_batch(&value).is_err()); + } + } + } + + #[test] + fn test_value_exists_in_value_vec_present() { + let (account, mut account_data) = + get_test_account_and_account_data(100, 2, QueueType::Output, 0); + let mut zero_copy_account = ZeroCopyBatchedQueueAccount::init( + account.metadata.clone(), + 2, + 100, + 10, + &mut account_data, + 0, + 0, + ) + .unwrap(); + + let value = [1u8; 32]; + let value2 = [2u8; 32]; + + // 1. Functional for 1 value + { + zero_copy_account.insert_into_current_batch(&value).unwrap(); + assert_eq!( + zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(1, &value), + anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + ); + assert_eq!( + zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(0, &value2), + anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + ); + assert!(zero_copy_account + .prove_inclusion_by_index_and_zero_out_leaf(0, &value) + .is_ok()); + } + // 2. Functional does not succeed on second invocation + { + assert_eq!( + zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(0, &value), + anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + ); + } + + // 3. Functional for 2 values + { + zero_copy_account + .insert_into_current_batch(&value2) + .unwrap(); + + assert_eq!( + zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(0, &value2), + anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + ); + assert!(zero_copy_account + .prove_inclusion_by_index_and_zero_out_leaf(1, &value2) + .is_ok()); + } + // 4. Functional does not succeed on second invocation + { + assert_eq!( + zero_copy_account.prove_inclusion_by_index_and_zero_out_leaf(1, &value2), + anchor_lang::err!(AccountCompressionErrorCode::InclusionProofByIndexFailed) + ); + } + } +} diff --git a/programs/account-compression/src/state/mod.rs b/programs/account-compression/src/state/mod.rs index 5fb8a86758..f88958c732 100644 --- a/programs/account-compression/src/state/mod.rs +++ b/programs/account-compression/src/state/mod.rs @@ -21,3 +21,7 @@ pub use rollover::*; pub mod group_authority; pub use group_authority::*; + +pub mod batch; +pub mod batched_merkle_tree; +pub mod batched_queue; diff --git a/programs/account-compression/src/state/queue.rs b/programs/account-compression/src/state/queue.rs index 5f8a2835a6..1ffab400ec 100644 --- a/programs/account-compression/src/state/queue.rs +++ b/programs/account-compression/src/state/queue.rs @@ -10,7 +10,7 @@ use light_hash_set::{zero_copy::HashSetZeroCopy, HashSet}; use std::mem; #[account(zero_copy)] -#[derive(AnchorDeserialize, Debug, PartialEq)] +#[derive(AnchorDeserialize, Debug, PartialEq, Default)] pub struct QueueMetadata { pub access_metadata: AccessMetadata, pub rollover_metadata: RolloverMetadata, @@ -27,6 +27,22 @@ pub struct QueueMetadata { pub enum QueueType { NullifierQueue = 1, AddressQueue = 2, + Input = 3, + Address = 4, + Output = 5, +} + +impl From for QueueType { + fn from(value: u64) -> Self { + match value { + 1 => QueueType::NullifierQueue, + 2 => QueueType::AddressQueue, + 3 => QueueType::Input, + 4 => QueueType::Address, + 5 => QueueType::Output, + _ => panic!("Invalid queue type"), + } + } } pub fn check_queue_type(queue_type: &u64, expected_queue_type: &QueueType) -> Result<()> { @@ -36,6 +52,7 @@ pub fn check_queue_type(queue_type: &u64, expected_queue_type: &QueueType) -> Re Ok(()) } } + impl QueueMetadata { pub fn init( &mut self, diff --git a/programs/account-compression/src/utils/constants.rs b/programs/account-compression/src/utils/constants.rs index 7eb9e04f54..d37e757d22 100644 --- a/programs/account-compression/src/utils/constants.rs +++ b/programs/account-compression/src/utils/constants.rs @@ -42,3 +42,16 @@ pub const NOOP_PUBKEY: [u8; 32] = [ 11, 188, 15, 192, 187, 71, 202, 47, 116, 196, 17, 46, 148, 171, 19, 207, 163, 198, 52, 229, 220, 23, 234, 203, 3, 205, 26, 35, 205, 126, 120, 124, ]; + +#[constant] +pub const TEST_DEFAULT_BATCH_SIZE: u64 = 50; + +#[constant] +pub const TEST_DEFAULT_ZKP_BATCH_SIZE: u64 = 10; + +#[constant] +pub const DEFAULT_BATCH_SIZE: u64 = 50000; +#[constant] +pub const DEFAULT_ZKP_BATCH_SIZE: u64 = 500; + +pub const DEFAULT_CPI_CONTEXT_ACCOUNT_SIZE: u64 = 20 * 1024 + 8; diff --git a/programs/account-compression/src/utils/queue.rs b/programs/account-compression/src/utils/queue.rs index c66d8a1469..53fe454ee3 100644 --- a/programs/account-compression/src/utils/queue.rs +++ b/programs/account-compression/src/utils/queue.rs @@ -2,30 +2,34 @@ use std::collections::HashMap; use anchor_lang::prelude::{AccountInfo, Pubkey}; +use crate::QueueType; + /// Mapping of address queue public keys to a bundle containing: /// /// * The queue. /// * Associated Merkle tree. /// * Addresses to insert. -pub type QueueMap<'info> = HashMap>; +pub type QueueMap<'a, 'info> = HashMap>; /// A bundle containing: /// /// * Address queue. /// * Merkle tree associated with that queue. /// * Addresses to insert to that queue. -pub struct QueueBundle<'info> { - pub queue: &'info AccountInfo<'info>, - pub merkle_tree: &'info AccountInfo<'info>, - pub elements: Vec<[u8; 32]>, +pub struct QueueBundle<'a, 'info> { + pub queue_type: QueueType, + pub accounts: Vec<&'info AccountInfo<'info>>, + pub elements: Vec<&'a [u8; 32]>, + pub indices: Vec, } -impl<'info> QueueBundle<'info> { - pub fn new(queue: &'info AccountInfo<'info>, merkle_tree: &'info AccountInfo<'info>) -> Self { +impl<'a, 'info> QueueBundle<'a, 'info> { + pub fn new(queue_type: QueueType, accounts: Vec<&'info AccountInfo<'info>>) -> Self { Self { - queue, - merkle_tree, + queue_type, + accounts, elements: Vec::new(), + indices: Vec::new(), } } } diff --git a/programs/compressed-token/src/burn.rs b/programs/compressed-token/src/burn.rs index 3264eef58b..2252c82ade 100644 --- a/programs/compressed-token/src/burn.rs +++ b/programs/compressed-token/src/burn.rs @@ -44,11 +44,16 @@ pub fn process_burn<'a, 'b, 'c, 'info: 'b + 'c>( ctx.remaining_accounts, &mint, )?; + let proof = if inputs.proof == CompressedProof::default() { + None + } else { + Some(inputs.proof) + }; cpi_execute_compressed_transaction_transfer( ctx.accounts, compressed_input_accounts, &output_compressed_accounts, - Some(inputs.proof), + proof, inputs.cpi_context, ctx.accounts.cpi_authority_pda.to_account_info(), ctx.accounts.light_system_program.to_account_info(), @@ -193,7 +198,7 @@ pub mod sdk { pub struct CreateBurnInstructionInputs { pub fee_payer: Pubkey, pub authority: Pubkey, - pub root_indices: Vec, + pub root_indices: Vec>, pub proof: CompressedProof, pub input_token_data: Vec, pub input_compressed_accounts: Vec, diff --git a/programs/compressed-token/src/delegation.rs b/programs/compressed-token/src/delegation.rs index b5fcabe3a1..4b581ca228 100644 --- a/programs/compressed-token/src/delegation.rs +++ b/programs/compressed-token/src/delegation.rs @@ -17,6 +17,7 @@ use crate::{ ErrorCode, GenericInstruction, }; +// TODO: add instruction which accepts an optional proof #[derive(Debug, Clone, AnchorSerialize, AnchorDeserialize)] pub struct CompressedTokenInstructionDataApprove { pub proof: CompressedProof, @@ -53,11 +54,16 @@ pub fn process_approve<'a, 'b, 'c, 'info: 'b + 'c>( &ctx.accounts.authority.key(), ctx.remaining_accounts, )?; + let proof = if inputs.proof == CompressedProof::default() { + None + } else { + Some(inputs.proof) + }; cpi_execute_compressed_transaction_transfer( ctx.accounts, compressed_input_accounts, &output_compressed_accounts, - Some(inputs.proof), + proof, inputs.cpi_context, ctx.accounts.cpi_authority_pda.to_account_info(), ctx.accounts.light_system_program.to_account_info(), @@ -182,11 +188,16 @@ pub fn process_revoke<'a, 'b, 'c, 'info: 'b + 'c>( &ctx.accounts.authority.key(), ctx.remaining_accounts, )?; + let proof = if inputs.proof == CompressedProof::default() { + None + } else { + Some(inputs.proof) + }; cpi_execute_compressed_transaction_transfer( ctx.accounts, compressed_input_accounts, &output_compressed_accounts, - Some(inputs.proof), + proof, inputs.cpi_context, ctx.accounts.cpi_authority_pda.to_account_info(), ctx.accounts.light_system_program.to_account_info(), @@ -272,7 +283,7 @@ pub mod sdk { pub struct CreateApproveInstructionInputs { pub fee_payer: Pubkey, pub authority: Pubkey, - pub root_indices: Vec, + pub root_indices: Vec>, pub proof: CompressedProof, pub input_token_data: Vec, pub input_compressed_accounts: Vec, @@ -361,7 +372,7 @@ pub mod sdk { pub struct CreateRevokeInstructionInputs { pub fee_payer: Pubkey, pub authority: Pubkey, - pub root_indices: Vec, + pub root_indices: Vec>, pub proof: CompressedProof, pub input_token_data: Vec, pub input_compressed_accounts: Vec, diff --git a/programs/compressed-token/src/freeze.rs b/programs/compressed-token/src/freeze.rs index 22bbb3b315..8d64136d3d 100644 --- a/programs/compressed-token/src/freeze.rs +++ b/programs/compressed-token/src/freeze.rs @@ -52,11 +52,16 @@ pub fn process_freeze_or_thaw< &ctx.accounts.mint.key(), ctx.remaining_accounts, )?; + let proof = if inputs.proof == CompressedProof::default() { + None + } else { + Some(inputs.proof) + }; cpi_execute_compressed_transaction_transfer( ctx.accounts, compressed_input_accounts, &output_compressed_accounts, - Some(inputs.proof), + proof, inputs.cpi_context, ctx.accounts.cpi_authority_pda.to_account_info(), ctx.accounts.light_system_program.to_account_info(), @@ -212,7 +217,7 @@ pub mod sdk { pub struct CreateInstructionInputs { pub fee_payer: Pubkey, pub authority: Pubkey, - pub root_indices: Vec, + pub root_indices: Vec>, pub proof: CompressedProof, pub input_token_data: Vec, pub input_compressed_accounts: Vec, diff --git a/programs/compressed-token/src/process_transfer.rs b/programs/compressed-token/src/process_transfer.rs index 6bc06d9eff..214a6852a8 100644 --- a/programs/compressed-token/src/process_transfer.rs +++ b/programs/compressed-token/src/process_transfer.rs @@ -598,7 +598,9 @@ pub mod transfer_sdk { use anchor_spl::token::Token; use light_system_program::{ invoke::processor::CompressedProof, - sdk::compressed_account::{CompressedAccount, MerkleContext, PackedMerkleContext}, + sdk::compressed_account::{ + CompressedAccount, MerkleContext, PackedMerkleContext, QueueIndex, + }, }; use solana_sdk::{ instruction::{AccountMeta, Instruction}, @@ -631,7 +633,7 @@ pub mod transfer_sdk { owner: &Pubkey, input_merkle_context: &[MerkleContext], output_compressed_accounts: &[TokenTransferOutputData], - root_indices: &[u16], + root_indices: &[Option], proof: &Option, input_token_data: &[TokenData], input_compressed_accounts: &[CompressedAccount], @@ -714,7 +716,7 @@ pub mod transfer_sdk { input_merkle_context: &[MerkleContext], owner_if_delegate_is_signer: Option, output_compressed_accounts: &[TokenTransferOutputData], - root_indices: &[u16], + root_indices: &[Option], proof: &Option, mint: Pubkey, owner: &Pubkey, @@ -764,7 +766,7 @@ pub mod transfer_sdk { input_merkle_context: &[MerkleContext], delegate: Option, output_compressed_accounts: &[TokenTransferOutputData], - root_indices: &[u16], + root_indices: &[Option], proof: &Option, mint: Pubkey, is_compress: bool, @@ -833,7 +835,7 @@ pub mod transfer_sdk { input_token_data: &[TokenData], input_compressed_accounts: &[CompressedAccount], input_merkle_context: &[MerkleContext], - root_indices: &[u16], + root_indices: &[Option], output_compressed_accounts: &[TokenTransferOutputData], ) -> ( HashMap, @@ -878,6 +880,12 @@ pub mod transfer_sdk { } else { None }; + // Potential footgun queue index is set in merkle tree but its not used here + let queue_index = if root_indices[i].is_none() { + Some(QueueIndex::default()) + } else { + None + }; let token_data_with_context = InputTokenDataWithContext { amount: token_data.amount, delegate_index, @@ -887,9 +895,9 @@ pub mod transfer_sdk { .unwrap() as u8, nullifier_queue_pubkey_index: 0, leaf_index: input_merkle_context[i].leaf_index, - queue_index: None, + queue_index, }, - root_index: root_indices[i], + root_index: root_indices[i].unwrap_or_default(), lamports, tlv: None, }; diff --git a/programs/registry/src/account_compression_cpi/batch_append.rs b/programs/registry/src/account_compression_cpi/batch_append.rs new file mode 100644 index 0000000000..f4dc940682 --- /dev/null +++ b/programs/registry/src/account_compression_cpi/batch_append.rs @@ -0,0 +1,49 @@ +use crate::ForesterEpochPda; +use account_compression::{ + batched_merkle_tree::BatchedMerkleTreeAccount, batched_queue::BatchedQueueAccount, + program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED, +}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct BatchAppend<'info> { + /// CHECK: only eligible foresters can append leaves. Is checked in ix. + #[account(mut)] + pub registered_forester_pda: Option>, + pub authority: Signer<'info>, + /// CHECK: (seed constraints) used to invoke account compression program via cpi. + #[account(seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + pub cpi_authority: AccountInfo<'info>, + /// CHECK: (account compression program) group access control. + pub registered_program_pda: AccountInfo<'info>, + pub account_compression_program: Program<'info, AccountCompression>, + /// CHECK: (account compression program) when emitting event. + pub log_wrapper: UncheckedAccount<'info>, + /// CHECK: (account compression program). + #[account(mut)] + pub merkle_tree: AccountLoader<'info, BatchedMerkleTreeAccount>, + /// CHECK: (account compression program). + #[account(mut)] + pub output_queue: AccountLoader<'info, BatchedQueueAccount>, +} + +pub fn process_batch_append(ctx: &Context, bump: u8, data: Vec) -> Result<()> { + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + let accounts = account_compression::cpi::accounts::BatchAppend { + authority: ctx.accounts.cpi_authority.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.clone()), + log_wrapper: ctx.accounts.log_wrapper.to_account_info(), + output_queue: ctx.accounts.output_queue.to_account_info(), + }; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + + account_compression::cpi::batch_append(cpi_ctx, data) +} diff --git a/programs/registry/src/account_compression_cpi/batch_nullify.rs b/programs/registry/src/account_compression_cpi/batch_nullify.rs new file mode 100644 index 0000000000..4cbcb8ece5 --- /dev/null +++ b/programs/registry/src/account_compression_cpi/batch_nullify.rs @@ -0,0 +1,45 @@ +use crate::ForesterEpochPda; +use account_compression::{ + batched_merkle_tree::BatchedMerkleTreeAccount, program::AccountCompression, + utils::constants::CPI_AUTHORITY_PDA_SEED, +}; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct BatchNullify<'info> { + /// CHECK: only eligible foresters can nullify leaves. Is checked in ix. + #[account(mut)] + pub registered_forester_pda: Option>, + pub authority: Signer<'info>, + /// CHECK: (seed constraints) used to invoke account compression program via cpi. + #[account(seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + pub cpi_authority: AccountInfo<'info>, + /// CHECK: (account compression program) group access control. + pub registered_program_pda: AccountInfo<'info>, + pub account_compression_program: Program<'info, AccountCompression>, + /// CHECK: (account compression program) when emitting event. + pub log_wrapper: UncheckedAccount<'info>, + /// CHECK: (account compression program). + #[account(mut)] + pub merkle_tree: AccountLoader<'info, BatchedMerkleTreeAccount>, +} + +pub fn process_batch_nullify(ctx: &Context, bump: u8, data: Vec) -> Result<()> { + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + let accounts = account_compression::cpi::accounts::BatchNullify { + authority: ctx.accounts.cpi_authority.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.clone()), + log_wrapper: ctx.accounts.log_wrapper.to_account_info(), + }; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + + account_compression::cpi::batch_nullify(cpi_ctx, data) +} diff --git a/programs/registry/src/account_compression_cpi/initialize_batched_state_tree.rs b/programs/registry/src/account_compression_cpi/initialize_batched_state_tree.rs new file mode 100644 index 0000000000..1dcccd40fa --- /dev/null +++ b/programs/registry/src/account_compression_cpi/initialize_batched_state_tree.rs @@ -0,0 +1,53 @@ +use crate::protocol_config::state::ProtocolConfigPda; +use account_compression::{ + program::AccountCompression, utils::constants::CPI_AUTHORITY_PDA_SEED, + InitStateTreeAccountsInstructionData, +}; +use anchor_lang::prelude::*; +use light_system_program::program::LightSystemProgram; + +#[derive(Accounts)] +pub struct InitializeBatchedStateMerkleTreeAndQueue<'info> { + #[account(mut)] + pub authority: Signer<'info>, + /// CHECK: initializated in account compression program. + #[account(zero)] + pub merkle_tree: AccountInfo<'info>, + /// CHECK: initializated in account compression program. + #[account(zero)] + pub queue: AccountInfo<'info>, + /// CHECK: (account compression program) access control. + pub registered_program_pda: AccountInfo<'info>, + /// CHECK: (seed constraints) used to invoke account compression program via cpi. + #[account(mut, seeds = [CPI_AUTHORITY_PDA_SEED], bump)] + pub cpi_authority: AccountInfo<'info>, + pub account_compression_program: Program<'info, AccountCompression>, + pub protocol_config_pda: Account<'info, ProtocolConfigPda>, + /// CHECK: (system program) new cpi context account. + pub cpi_context_account: Option>, + pub light_system_program: Option>, +} + +pub fn process_initialize_batched_state_merkle_tree( + ctx: &Context, + bump: u8, + params: InitStateTreeAccountsInstructionData, +) -> Result<()> { + let bump = &[bump]; + let seeds = [CPI_AUTHORITY_PDA_SEED, bump]; + let signer_seeds = &[&seeds[..]]; + let accounts = account_compression::cpi::accounts::InitializeBatchedStateMerkleTreeAndQueue { + authority: ctx.accounts.cpi_authority.to_account_info(), + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), + queue: ctx.accounts.queue.to_account_info(), + registered_program_pda: Some(ctx.accounts.registered_program_pda.clone()), + }; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.account_compression_program.to_account_info(), + accounts, + signer_seeds, + ); + + account_compression::cpi::initialize_batched_state_merkle_tree(cpi_ctx, params) +} diff --git a/programs/registry/src/account_compression_cpi/mod.rs b/programs/registry/src/account_compression_cpi/mod.rs index 355189dbae..066aa4bba1 100644 --- a/programs/registry/src/account_compression_cpi/mod.rs +++ b/programs/registry/src/account_compression_cpi/mod.rs @@ -1,3 +1,6 @@ +pub mod batch_append; +pub mod batch_nullify; +pub mod initialize_batched_state_tree; pub mod initialize_tree_and_queue; pub mod nullify; pub mod register_program; diff --git a/programs/registry/src/account_compression_cpi/sdk.rs b/programs/registry/src/account_compression_cpi/sdk.rs index 5a76abc88f..415c3506a3 100644 --- a/programs/registry/src/account_compression_cpi/sdk.rs +++ b/programs/registry/src/account_compression_cpi/sdk.rs @@ -2,9 +2,11 @@ use crate::utils::{ get_cpi_authority_pda, get_forester_epoch_pda_from_authority, get_protocol_config_pda_address, }; + use account_compression::utils::constants::NOOP_PUBKEY; use account_compression::{ - AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig, + AddressMerkleTreeConfig, AddressQueueConfig, InitStateTreeAccountsInstructionData, + NullifierQueueConfig, StateMerkleTreeConfig, }; use anchor_lang::prelude::*; use anchor_lang::InstructionData; @@ -281,3 +283,90 @@ pub fn create_initialize_merkle_tree_instruction( data: instruction_data.data(), } } + +pub fn create_initialize_batched_merkle_tree_instruction( + payer: Pubkey, + merkle_tree_pubkey: Pubkey, + queue_pubkey: Pubkey, + cpi_context_pubkey: Pubkey, + params: InitStateTreeAccountsInstructionData, +) -> Instruction { + let register_program_pda = get_registered_program_pda(&crate::ID); + let (cpi_authority, bump) = get_cpi_authority_pda(); + let protocol_config_pda = get_protocol_config_pda_address().0; + let instruction_data = crate::instruction::InitializeBatchedStateMerkleTree { bump, params }; + let accounts = crate::accounts::InitializeBatchedStateMerkleTreeAndQueue { + authority: payer, + registered_program_pda: register_program_pda, + merkle_tree: merkle_tree_pubkey, + queue: queue_pubkey, + cpi_authority, + account_compression_program: account_compression::ID, + protocol_config_pda, + light_system_program: Some(LightSystemProgram::id()), + cpi_context_account: Some(cpi_context_pubkey), + }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} + +pub fn create_batch_append_instruction( + forester: Pubkey, + derivation_pubkey: Pubkey, + merkle_tree_pubkey: Pubkey, + output_queue_pubkey: Pubkey, + epoch: u64, + data: Vec, +) -> Instruction { + let forester_epoch_pda = get_forester_epoch_pda_from_authority(&derivation_pubkey, epoch).0; + let registered_program_pda = get_registered_program_pda(&crate::ID); + + let (cpi_authority_pda, bump) = get_cpi_authority_pda(); + let accounts = crate::accounts::BatchAppend { + authority: forester, + merkle_tree: merkle_tree_pubkey, + output_queue: output_queue_pubkey, + cpi_authority: cpi_authority_pda, + registered_forester_pda: Some(forester_epoch_pda), + registered_program_pda, + account_compression_program: account_compression::ID, + log_wrapper: NOOP_PUBKEY.into(), + }; + let instruction_data = crate::instruction::BatchAppend { bump, data }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} + +pub fn create_batch_nullify_instruction( + forester: Pubkey, + derivation_pubkey: Pubkey, + merkle_tree_pubkey: Pubkey, + epoch: u64, + data: Vec, +) -> Instruction { + let forester_epoch_pda = get_forester_epoch_pda_from_authority(&derivation_pubkey, epoch).0; + let registered_program_pda = get_registered_program_pda(&crate::ID); + + let (cpi_authority_pda, bump) = get_cpi_authority_pda(); + let accounts = crate::accounts::BatchNullify { + authority: forester, + merkle_tree: merkle_tree_pubkey, + cpi_authority: cpi_authority_pda, + registered_forester_pda: Some(forester_epoch_pda), + registered_program_pda, + account_compression_program: account_compression::ID, + log_wrapper: NOOP_PUBKEY.into(), + }; + let instruction_data = crate::instruction::BatchNullify { bump, data }; + Instruction { + program_id: crate::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction_data.data(), + } +} diff --git a/programs/registry/src/constants.rs b/programs/registry/src/constants.rs index 6d29c55e79..f0ffb9f370 100644 --- a/programs/registry/src/constants.rs +++ b/programs/registry/src/constants.rs @@ -7,3 +7,6 @@ pub const FORESTER_EPOCH_SEED: &[u8] = b"forester_epoch"; #[constant] pub const PROTOCOL_CONFIG_PDA_SEED: &[u8] = b"authority"; + +#[constant] +pub const DEFAULT_WORK_V1: u64 = 1; diff --git a/programs/registry/src/epoch/register_epoch.rs b/programs/registry/src/epoch/register_epoch.rs index c03a279530..248ba926a4 100644 --- a/programs/registry/src/epoch/register_epoch.rs +++ b/programs/registry/src/epoch/register_epoch.rs @@ -113,6 +113,7 @@ impl ForesterEpochPda { authority: &Pubkey, queue_pubkey: &Pubkey, current_solana_slot: u64, + num_work_items: u64, ) -> Result<()> { if forester_epoch_pda.authority != *authority { msg!( @@ -123,7 +124,7 @@ impl ForesterEpochPda { return err!(RegistryError::InvalidForester); } forester_epoch_pda.check_eligibility(current_solana_slot, queue_pubkey)?; - forester_epoch_pda.work_counter += 1; + forester_epoch_pda.work_counter += num_work_items; Ok(()) } @@ -131,6 +132,7 @@ impl ForesterEpochPda { forester_epoch_pda: &mut ForesterEpochPda, authority: &Pubkey, queue_pubkey: &Pubkey, + num_work_items: u64, ) -> Result<()> { let current_solana_slot = anchor_lang::solana_program::sysvar::clock::Clock::get()?.slot; Self::check_forester( @@ -138,6 +140,7 @@ impl ForesterEpochPda { authority, queue_pubkey, current_solana_slot, + num_work_items, ) } } diff --git a/programs/registry/src/lib.rs b/programs/registry/src/lib.rs index b6e4d7f062..dd3a81d0c0 100644 --- a/programs/registry/src/lib.rs +++ b/programs/registry/src/lib.rs @@ -8,6 +8,7 @@ pub mod account_compression_cpi; pub mod errors; pub use crate::epoch::{finalize_registration::*, register_epoch::*, report_work::*}; pub use account_compression_cpi::{ + batch_append::*, batch_nullify::*, initialize_batched_state_tree::*, initialize_tree_and_queue::*, nullify::*, register_program::*, rollover_state_tree::*, update_address_tree::*, }; @@ -21,6 +22,7 @@ pub mod utils; use account_compression::MerkleTreeMetadata; pub use selection::forester::*; +use account_compression::InitStateTreeAccountsInstructionData; use anchor_lang::solana_program::pubkey::Pubkey; use errors::RegistryError; use protocol_config::state::ProtocolConfig; @@ -42,6 +44,8 @@ declare_id!("Lighton6oQpVkeewmo2mcPTQQp7kYHr4fWpAgJyEmDX"); #[program] pub mod light_registry { + use constants::DEFAULT_WORK_V1; + use super::*; /// Initializes the protocol config pda. Can only be called once by the @@ -370,6 +374,7 @@ pub mod light_registry { ctx.accounts.authority.key(), ctx.accounts.nullifier_queue.key(), &mut ctx.accounts.registered_forester_pda, + DEFAULT_WORK_V1, )?; process_nullify( @@ -402,6 +407,7 @@ pub mod light_registry { ctx.accounts.authority.key(), ctx.accounts.queue.key(), &mut ctx.accounts.registered_forester_pda, + DEFAULT_WORK_V1, )?; process_update_address_merkle_tree( &ctx, @@ -427,6 +433,7 @@ pub mod light_registry { ctx.accounts.authority.key(), ctx.accounts.old_queue.key(), &mut ctx.accounts.registered_forester_pda, + DEFAULT_WORK_V1, )?; process_rollover_address_merkle_tree_and_queue(&ctx, bump) @@ -442,6 +449,7 @@ pub mod light_registry { ctx.accounts.authority.key(), ctx.accounts.old_queue.key(), &mut ctx.accounts.registered_forester_pda, + DEFAULT_WORK_V1, )?; check_cpi_context( @@ -457,6 +465,90 @@ pub mod light_registry { ctx.accounts.light_system_program.as_ref().to_account_info(), ) } + + pub fn initialize_batched_state_merkle_tree<'info>( + ctx: Context<'_, '_, '_, 'info, InitializeBatchedStateMerkleTreeAndQueue<'info>>, + bump: u8, + params: InitStateTreeAccountsInstructionData, + ) -> Result<()> { + if let Some(network_fee) = params.network_fee { + if network_fee != ctx.accounts.protocol_config_pda.config.network_fee { + return err!(RegistryError::InvalidNetworkFee); + } + if params.forester.is_some() { + msg!("Forester pubkey must not be defined for trees serviced by light foresters."); + return err!(RegistryError::ForesterDefined); + } + } else if params.forester.is_none() { + msg!("Forester pubkey required for trees without a network fee."); + msg!("Trees without a network fee will not be serviced by light foresters."); + return err!(RegistryError::ForesterUndefined); + } + check_cpi_context( + ctx.accounts + .cpi_context_account + .as_ref() + .unwrap() + .to_account_info(), + &ctx.accounts.protocol_config_pda.config, + )?; + + process_initialize_batched_state_merkle_tree(&ctx, bump, params)?; + + process_initialize_cpi_context( + bump, + ctx.accounts.authority.to_account_info(), + ctx.accounts + .cpi_context_account + .as_ref() + .unwrap() + .to_account_info(), + ctx.accounts.merkle_tree.to_account_info(), + ctx.accounts + .light_system_program + .as_ref() + .unwrap() + .to_account_info(), + ) + } + + pub fn batch_nullify<'info>( + ctx: Context<'_, '_, '_, 'info, BatchNullify<'info>>, + bump: u8, + data: Vec, + ) -> Result<()> { + { + let account = ctx.accounts.merkle_tree.load()?; + let metadata = account.metadata; + check_forester( + &metadata, + ctx.accounts.authority.key(), + ctx.accounts.merkle_tree.key(), + &mut ctx.accounts.registered_forester_pda, + account.queue.batch_size, + )?; + } + process_batch_nullify(&ctx, bump, data) + } + + pub fn batch_append<'info>( + ctx: Context<'_, '_, '_, 'info, BatchAppend<'info>>, + bump: u8, + data: Vec, + ) -> Result<()> { + { + let queue_account = ctx.accounts.output_queue.load()?; + let metadata = ctx.accounts.merkle_tree.load()?.metadata; + check_forester( + &metadata, + ctx.accounts.authority.key(), + ctx.accounts.output_queue.key(), + &mut ctx.accounts.registered_forester_pda, + queue_account.queue.batch_size, + )?; + } + process_batch_append(&ctx, bump, data) + } } /// if registered_forester_pda is not None check forester eligibility and network_fee is not 0 @@ -467,13 +559,19 @@ pub fn check_forester( authority: Pubkey, queue: Pubkey, registered_forester_pda: &mut Option>, + num_work_items: u64, ) -> Result<()> { if let Some(forester_pda) = registered_forester_pda.as_mut() { // Checks forester: // - signer // - eligibility // - increments work counter - ForesterEpochPda::check_forester_in_program(forester_pda, &authority, &queue)?; + ForesterEpochPda::check_forester_in_program( + forester_pda, + &authority, + &queue, + num_work_items, + )?; if metadata.rollover_metadata.network_fee == 0 { return err!(RegistryError::InvalidNetworkFee); } diff --git a/programs/system/src/invoke/append_state.rs b/programs/system/src/invoke/append_state.rs index 29ede1e741..00ff3effa5 100644 --- a/programs/system/src/invoke/append_state.rs +++ b/programs/system/src/invoke/append_state.rs @@ -37,7 +37,7 @@ pub fn insert_output_compressed_accounts_into_state_merkle_tree< invoking_program: &Option, hashed_pubkeys: &'a mut Vec<(Pubkey, [u8; 32])>, sequence_numbers: &'a mut Vec, -) -> Result<()> { +) -> Result> { bench_sbf_start!("cpda_append_data_init"); let mut account_infos = vec![ ctx.accounts.get_fee_payer().to_account_info(), // fee payer @@ -61,7 +61,7 @@ pub fn insert_output_compressed_accounts_into_state_merkle_tree< AccountMeta::new_readonly(account_infos[2].key(), false), AccountMeta::new_readonly(account_infos[3].key(), false), ]; - let instruction_data = create_cpi_accounts_and_instruction_data( + let (instruction_data, network_fee_bundle) = create_cpi_accounts_and_instruction_data( output_compressed_accounts, output_compressed_account_indices, output_compressed_account_hashes, @@ -84,7 +84,7 @@ pub fn insert_output_compressed_accounts_into_state_merkle_tree< invoke_signed(&instruction, account_infos.as_slice(), seeds)?; bench_sbf_end!("cpda_append_rest"); - Ok(()) + Ok(network_fee_bundle) } /// Creates CPI accounts, instruction data, and performs checks. @@ -100,6 +100,7 @@ pub fn insert_output_compressed_accounts_into_state_merkle_tree< /// exist in input compressed accounts. An address may not be used in an /// output compressed accounts. This will close the account. #[allow(clippy::too_many_arguments)] +#[allow(clippy::type_complexity)] pub fn create_cpi_accounts_and_instruction_data<'a>( output_compressed_accounts: &[OutputCompressedAccountWithPackedContext], output_compressed_account_indices: &mut [u32], @@ -111,10 +112,11 @@ pub fn create_cpi_accounts_and_instruction_data<'a>( remaining_accounts: &'a [AccountInfo<'a>], account_infos: &mut Vec>, accounts: &mut Vec, -) -> Result> { +) -> Result<(Vec, Option<(u8, u64)>)> { let mut current_index: i16 = -1; let mut num_leaves_in_tree: u32 = 0; let mut mt_next_index = 0; + let mut network_fee_bundle = None; let num_leaves = output_compressed_account_hashes.len(); let mut instruction_data = Vec::::with_capacity(12 + 33 * num_leaves); let mut hashed_merkle_tree = [0u8; 32]; @@ -137,22 +139,28 @@ pub fn create_cpi_accounts_and_instruction_data<'a>( } else if account.merkle_tree_index as i16 > current_index { current_index = account.merkle_tree_index.into(); let seq; + let merkle_tree_pubkey; + let network_fee; // Check 1. - (mt_next_index, _, seq) = check_program_owner_state_merkle_tree( - &remaining_accounts[account.merkle_tree_index as usize], - invoking_program, - )?; + (mt_next_index, network_fee, seq, merkle_tree_pubkey) = + check_program_owner_state_merkle_tree( + &remaining_accounts[account.merkle_tree_index as usize], + invoking_program, + )?; + if network_fee_bundle.is_none() && network_fee.is_some() { + network_fee_bundle = Some((account.merkle_tree_index, network_fee.unwrap())); + } let account_info = remaining_accounts[account.merkle_tree_index as usize].to_account_info(); - sequence_numbers.push(MerkleTreeSequenceNumber { pubkey: account_info.key(), seq, }); - hashed_merkle_tree = match hashed_pubkeys.iter().find(|x| x.0 == account_info.key()) { + + hashed_merkle_tree = match hashed_pubkeys.iter().find(|x| x.0 == merkle_tree_pubkey) { Some(hashed_merkle_tree) => hashed_merkle_tree.1, None => { - hash_to_bn254_field_size_be(&account_info.key().to_bytes()) + hash_to_bn254_field_size_be(&merkle_tree_pubkey.to_bytes()) .unwrap() .0 } @@ -232,7 +240,8 @@ pub fn create_cpi_accounts_and_instruction_data<'a>( instruction_data.extend_from_slice(&[index_merkle_tree_account - 1]); instruction_data.extend_from_slice(&output_compressed_account_hashes[j]); } - Ok(instruction_data) + + Ok((instruction_data, network_fee_bundle)) } #[test] @@ -243,9 +252,12 @@ fn test_instruction_data_borsh_compat() { vec.extend_from_slice(&[2u8; 32]); vec.push(3); vec.extend_from_slice(&[4u8; 32]); + let refe = vec![(1, [2u8; 32]), (3, [4u8; 32])]; - let mut serialized = Vec::new(); - Vec::<(u8, [u8; 32])>::serialize(&refe, &mut serialized).unwrap(); + use anchor_lang::InstructionData; + let instruction_data = + account_compression::instruction::AppendLeavesToMerkleTrees { leaves: refe }; + let serialized = instruction_data.data()[8..].to_vec(); assert_eq!(serialized, vec); let res = Vec::<(u8, [u8; 32])>::deserialize(&mut vec.as_slice()).unwrap(); assert_eq!(res, vec![(1, [2u8; 32]), (3, [4u8; 32])]); diff --git a/programs/system/src/invoke/emit_event.rs b/programs/system/src/invoke/emit_event.rs index 6449b27ad3..d2b34e3821 100644 --- a/programs/system/src/invoke/emit_event.rs +++ b/programs/system/src/invoke/emit_event.rs @@ -13,12 +13,23 @@ use crate::{ pub fn emit_state_transition_event<'a, 'b, 'c: 'info, 'info, A: InvokeAccounts<'info> + Bumps>( inputs: InstructionDataInvoke, ctx: &'a Context<'a, 'b, 'c, 'info, A>, - input_compressed_account_hashes: Vec<[u8; 32]>, + mut input_compressed_account_hashes: Vec<[u8; 32]>, output_compressed_account_hashes: Vec<[u8; 32]>, output_leaf_indices: Vec, sequence_numbers: Vec, ) -> Result<()> { + // Do not include read-only accounts in the event. + for (i, account) in inputs + .input_compressed_accounts_with_merkle_context + .iter() + .enumerate() + { + if account.read_only { + input_compressed_account_hashes.remove(i); + } + } // Note: message is unimplemented + // (if we compute the tx hash in indexer we don't need to modify the event.) let event = PublicTransactionEvent { input_compressed_account_hashes, output_compressed_account_hashes, diff --git a/programs/system/src/invoke/nullify_state.rs b/programs/system/src/invoke/nullify_state.rs index aa3dbd14d5..c8e6b4bc9c 100644 --- a/programs/system/src/invoke/nullify_state.rs +++ b/programs/system/src/invoke/nullify_state.rs @@ -4,9 +4,11 @@ use light_macros::heap_neutral; use crate::{ constants::CPI_AUTHORITY_PDA_BUMP, - invoke::InstructionDataInvoke, invoke_cpi::verify_signer::check_program_owner_state_merkle_tree, - sdk::accounts::{InvokeAccounts, SignerAccounts}, + sdk::{ + accounts::{InvokeAccounts, SignerAccounts}, + compressed_account::PackedCompressedAccountWithMerkleContext, + }, }; /// 1. Checks that if nullifier queue has program_owner it invoking_program is @@ -20,10 +22,11 @@ pub fn insert_nullifiers< 'info, A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, >( - inputs: &'a InstructionDataInvoke, + input_compressed_accounts_with_merkle_context: &'a [PackedCompressedAccountWithMerkleContext], ctx: &'a Context<'a, 'b, 'c, 'info, A>, nullifiers: &'a [[u8; 32]], invoking_program: &Option, + tx_hash: [u8; 32], ) -> Result> { light_heap::bench_sbf_start!("cpda_insert_nullifiers_prep_accs"); let mut account_infos = vec![ @@ -44,6 +47,8 @@ pub fn insert_nullifiers< AccountMeta::new_readonly(account_infos[2].key(), false), AccountMeta::new_readonly(account_infos[3].key(), false), ]; + + let mut leaf_indices = Vec::with_capacity(input_compressed_accounts_with_merkle_context.len()); // If the transaction contains at least one input compressed account a // network fee is paid. This network fee is paid in addition to the address // network fee. The network fee is paid once per transaction, defined in the @@ -51,7 +56,13 @@ pub fn insert_nullifiers< // nullifier queue is mutable. The network fee field in the queue is not // used. let mut network_fee_bundle = None; - for account in inputs.input_compressed_accounts_with_merkle_context.iter() { + for account in input_compressed_accounts_with_merkle_context.iter() { + // Don't nullify read-only accounts. + if account.read_only { + continue; + } + leaf_indices.push(account.merkle_context.leaf_index); + let account_info = &ctx.remaining_accounts[account.merkle_context.nullifier_queue_pubkey_index as usize]; accounts.push(AccountMeta { @@ -60,7 +71,7 @@ pub fn insert_nullifiers< is_writable: true, }); account_infos.push(account_info.clone()); - let (_, network_fee, _) = check_program_owner_state_merkle_tree( + let (_, network_fee, _, _) = check_program_owner_state_merkle_tree( &ctx.remaining_accounts[account.merkle_context.merkle_tree_pubkey_index as usize], invoking_program, )?; @@ -75,7 +86,7 @@ pub fn insert_nullifiers< accounts.push(AccountMeta { pubkey: account_info.key(), is_signer: false, - is_writable: false, + is_writable: true, }); account_infos.push(account_info.clone()); } @@ -85,6 +96,8 @@ pub fn insert_nullifiers< let instruction_data = account_compression::instruction::InsertIntoNullifierQueues { nullifiers: nullifiers.to_vec(), + leaf_indices, + tx_hash: Some(tx_hash), }; let data = instruction_data.data(); diff --git a/programs/system/src/invoke/processor.rs b/programs/system/src/invoke/processor.rs index 87b6e66df5..a8cb8e6730 100644 --- a/programs/system/src/invoke/processor.rs +++ b/programs/system/src/invoke/processor.rs @@ -13,7 +13,7 @@ use crate::{ sol_compression::compress_or_decompress_lamports, sum_check::sum_check, verify_state_proof::{ - fetch_input_compressed_account_roots, fetch_roots_address_merkle_tree, + create_tx_hash, fetch_input_compressed_account_roots, fetch_roots_address_merkle_tree, hash_input_compressed_accounts, verify_state_proof, }, }, @@ -63,7 +63,7 @@ pub fn process< } // Sum check --------------------------------------------------- bench_sbf_start!("cpda_sum_check"); - sum_check( + let (num_read_only_input_accounts, num_prove_by_index_input_accounts) = sum_check( &inputs.input_compressed_accounts_with_merkle_context, &inputs.output_compressed_accounts, &inputs.relay_fee, @@ -93,7 +93,7 @@ pub fn process< let mut compressed_account_addresses: Vec> = vec![None; num_input_compressed_accounts + num_new_addresses]; - let mut output_leaf_indices = vec![0u32; num_output_compressed_accounts]; + let mut output_compressed_account_indices = vec![0u32; num_output_compressed_accounts]; let mut output_compressed_account_hashes = vec![[0u8; 32]; num_output_compressed_accounts]; // hashed_pubkeys_capacity is the maximum of hashed pubkey the tx could have. // 1 owner pubkey inputs + every remaining account pubkey can be a tree + every output can be owned by a different pubkey @@ -103,136 +103,58 @@ pub fn process< let mut hashed_pubkeys = Vec::<(Pubkey, [u8; 32])>::with_capacity(hashed_pubkeys_capacity); // Verify state and or address proof --------------------------------------------------- + + // Allocate heap memory here because roots are only used for proof verification. + let mut new_address_roots = vec![[0u8; 32]; num_new_addresses]; + + // hash input compressed accounts --------------------------------------------------- + bench_sbf_start!("cpda_hash_input_compressed_accounts"); if !inputs .input_compressed_accounts_with_merkle_context .is_empty() - || !inputs.new_address_params.is_empty() { - // Allocate heap memory here because roots are only used for proof verification. - let mut new_address_roots = vec![[0u8; 32]; num_new_addresses]; - let mut input_compressed_account_roots = vec![[0u8; 32]; num_input_compressed_accounts]; - // hash input compressed accounts --------------------------------------------------- - bench_sbf_start!("cpda_hash_input_compressed_accounts"); - if !inputs - .input_compressed_accounts_with_merkle_context - .is_empty() - { - hash_input_compressed_accounts( - ctx.remaining_accounts, - &inputs.input_compressed_accounts_with_merkle_context, - &mut input_compressed_account_hashes, - &mut compressed_account_addresses, - &mut hashed_pubkeys, - )?; - // # Safety this is a safeguard for memory safety. - // This error should never be triggered. - if hashed_pubkeys.capacity() != hashed_pubkeys_capacity { - msg!( - "hashed_pubkeys exceeded capacity. Used {}, allocated {}.", - hashed_pubkeys.capacity(), - hashed_pubkeys_capacity - ); - return err!(SystemProgramError::InvalidCapacity); - } - fetch_input_compressed_account_roots( - &inputs.input_compressed_accounts_with_merkle_context, - &ctx, - &mut input_compressed_account_roots, - )?; + hash_input_compressed_accounts( + ctx.remaining_accounts, + &inputs.input_compressed_accounts_with_merkle_context, + &mut input_compressed_account_hashes, + &mut compressed_account_addresses, + &mut hashed_pubkeys, + )?; + // # Safety this is a safeguard for memory safety. + // This error should never be triggered. + if hashed_pubkeys.capacity() != hashed_pubkeys_capacity { + msg!( + "hashed_pubkeys exceeded capacity. Used {}, allocated {}.", + hashed_pubkeys.capacity(), + hashed_pubkeys_capacity + ); + return err!(SystemProgramError::InvalidCapacity); } + } - bench_sbf_end!("cpda_hash_input_compressed_accounts"); - let mut new_addresses = vec![[0u8; 32]; num_new_addresses]; - // Insert addresses into address merkle tree queue --------------------------------------------------- - if !new_addresses.is_empty() { - derive_new_addresses( - &inputs.new_address_params, - num_input_compressed_accounts, - ctx.remaining_accounts, - &mut compressed_account_addresses, - &mut new_addresses, - )?; - let network_fee_bundle = insert_addresses_into_address_merkle_tree_queue( - &ctx, - &new_addresses, - &inputs.new_address_params, - &invoking_program, - )?; - if let Some(network_fee_bundle) = network_fee_bundle { - let (remaining_account_index, network_fee) = network_fee_bundle; - transfer_lamports_cpi( - ctx.accounts.get_fee_payer(), - &ctx.remaining_accounts[remaining_account_index as usize], - network_fee, - )?; - } - fetch_roots_address_merkle_tree( - &inputs.new_address_params, - &ctx, - &mut new_address_roots, - )?; - } - bench_sbf_start!("cpda_verify_state_proof"); + bench_sbf_end!("cpda_hash_input_compressed_accounts"); + let mut new_addresses = vec![[0u8; 32]; num_new_addresses]; + // Insert addresses into address merkle tree queue --------------------------------------------------- + let address_network_fee_bundle = if !new_addresses.is_empty() { + derive_new_addresses( + &inputs.new_address_params, + num_input_compressed_accounts, + ctx.remaining_accounts, + &mut compressed_account_addresses, + &mut new_addresses, + // TODO: add readonly addresses here + )?; + fetch_roots_address_merkle_tree(&inputs.new_address_params, &ctx, &mut new_address_roots)?; - let proof = match &inputs.proof { - Some(proof) => proof, - None => return err!(SystemProgramError::ProofIsNone), - }; - let compressed_verifier_proof = CompressedVerifierProof { - a: proof.a, - b: proof.b, - c: proof.c, - }; - match verify_state_proof( - &input_compressed_account_roots, - &input_compressed_account_hashes, - &new_address_roots, + insert_addresses_into_address_merkle_tree_queue( + &ctx, &new_addresses, - &compressed_verifier_proof, - ) { - Ok(_) => Ok(()), - Err(e) => { - msg!( - "input_compressed_accounts_with_merkle_context: {:?}", - inputs.input_compressed_accounts_with_merkle_context - ); - Err(e) - } - }?; - bench_sbf_end!("cpda_verify_state_proof"); - // insert nullifiers (input compressed account hashes)--------------------------------------------------- - bench_sbf_start!("cpda_nullifiers"); - if !inputs - .input_compressed_accounts_with_merkle_context - .is_empty() - { - let network_fee_bundle = insert_nullifiers( - &inputs, - &ctx, - &input_compressed_account_hashes, - &invoking_program, - )?; - if let Some(network_fee_bundle) = network_fee_bundle { - let (remaining_account_index, network_fee) = network_fee_bundle; - transfer_lamports_cpi( - ctx.accounts.get_fee_payer(), - &ctx.remaining_accounts[remaining_account_index as usize], - network_fee, - )?; - } - } - bench_sbf_end!("cpda_nullifiers"); - } else if inputs.proof.is_some() { - return err!(SystemProgramError::ProofIsSome); - } else if inputs - .input_compressed_accounts_with_merkle_context - .is_empty() - && inputs.new_address_params.is_empty() - && inputs.output_compressed_accounts.is_empty() - { - return err!(SystemProgramError::EmptyInputs); - } - bench_sbf_end!("cpda_nullifiers"); + &inputs.new_address_params, + &invoking_program, + )? + } else { + None + }; // Allocate space for sequence numbers with remaining account length as a // proxy. We cannot allocate heap memory in @@ -240,12 +162,12 @@ pub fn process< // heap neutral. let mut sequence_numbers = Vec::with_capacity(ctx.remaining_accounts.len()); // Insert leaves (output compressed account hashes) --------------------------------------------------- - if !inputs.output_compressed_accounts.is_empty() { + let output_network_fee_bundle = if !inputs.output_compressed_accounts.is_empty() { bench_sbf_start!("cpda_append"); - insert_output_compressed_accounts_into_state_merkle_tree( + let network_fee_bundle = insert_output_compressed_accounts_into_state_merkle_tree( &mut inputs.output_compressed_accounts, &ctx, - &mut output_leaf_indices, + &mut output_compressed_account_indices, &mut output_compressed_account_hashes, &mut compressed_account_addresses, &invoking_program, @@ -263,10 +185,107 @@ pub fn process< return err!(SystemProgramError::InvalidCapacity); } bench_sbf_end!("cpda_append"); - } + network_fee_bundle + } else { + None + }; bench_sbf_start!("emit_state_transition_event"); // Reduce the capacity of the sequence numbers vector. sequence_numbers.shrink_to_fit(); + + // insert nullifiers (input compressed account hashes)--------------------------------------------------- + // Nullifiers need to be inserted befor proof verification because the + // in certain cases we zero out roots in batched input queues. + // These roots need to be zero prior to proof verification. + bench_sbf_start!("cpda_nullifiers"); + let input_network_fee_bundle = if num_input_compressed_accounts > num_read_only_input_accounts { + // Access the current slot + let current_slot = Clock::get()?.slot; + let tx_hash = create_tx_hash( + &inputs.input_compressed_accounts_with_merkle_context, + &input_compressed_account_hashes, + &output_compressed_account_hashes, + current_slot, + ); + // Insert nullifiers for compressed input account hashes into nullifier + // queue except read-only accounts. + insert_nullifiers( + &inputs.input_compressed_accounts_with_merkle_context, + &ctx, + &input_compressed_account_hashes, + &invoking_program, + tx_hash, + )? + } else { + None + }; + bench_sbf_end!("cpda_nullifiers"); + + // Transfer network fee + transfer_network_fee( + &ctx, + input_network_fee_bundle, + address_network_fee_bundle, + output_network_fee_bundle, + )?; + + if num_prove_by_index_input_accounts < num_input_compressed_accounts + || !inputs.new_address_params.is_empty() + { + bench_sbf_start!("cpda_verify_state_proof"); + if let Some(proof) = inputs.proof.as_ref() { + bench_sbf_start!("cpda_verify_state_proof"); + let compressed_verifier_proof = CompressedVerifierProof { + a: proof.a, + b: proof.b, + c: proof.c, + }; + let mut input_compressed_account_roots = + Vec::with_capacity(num_input_compressed_accounts); + fetch_input_compressed_account_roots( + &inputs.input_compressed_accounts_with_merkle_context, + &ctx, + &mut input_compressed_account_roots, + )?; + match verify_state_proof( + &inputs.input_compressed_accounts_with_merkle_context, + &input_compressed_account_roots, + &input_compressed_account_hashes, + &new_address_roots, + &new_addresses, + &compressed_verifier_proof, + ) { + Ok(_) => Ok(()), + Err(e) => { + msg!("proof {:?}", proof); + msg!( + "input_compressed_account_hashes {:?}", + input_compressed_account_hashes + ); + msg!("input roots {:?}", input_compressed_account_roots); + msg!( + "input_compressed_accounts_with_merkle_context: {:?}", + inputs.input_compressed_accounts_with_merkle_context + ); + Err(e) + } + }?; + bench_sbf_end!("cpda_verify_state_proof"); + } else { + return err!(SystemProgramError::ProofIsNone); + } + } else if inputs.proof.is_some() { + return err!(SystemProgramError::ProofIsSome); + } else if inputs + .input_compressed_accounts_with_merkle_context + .is_empty() + && inputs.new_address_params.is_empty() + && inputs.output_compressed_accounts.is_empty() + { + return err!(SystemProgramError::EmptyInputs); + } + bench_sbf_end!("cpda_nullifiers"); + // Emit state transition event --------------------------------------------------- bench_sbf_start!("emit_state_transition_event"); emit_state_transition_event( @@ -274,10 +293,71 @@ pub fn process< &ctx, input_compressed_account_hashes, output_compressed_account_hashes, - output_leaf_indices, + output_compressed_account_indices, sequence_numbers, )?; bench_sbf_end!("emit_state_transition_event"); Ok(()) } + +/// Network fee distribution: +/// - if any account is created or modified -> transfer network fee (5000 lamports) +/// (Previously we didn't charge for appends now we have to since values go into a queue.) +/// - if an address is created -> transfer an additional network fee (5000 lamports) +/// +/// Examples: +/// 1. create account with address network fee 10,000 lamports +/// 2. token transfer network fee 5,000 lamports +/// 3. mint token network fee 5,000 lamports +#[inline(always)] +fn transfer_network_fee< + 'a, + 'b, + 'c: 'info, + 'info, + A: InvokeAccounts<'info> + SignerAccounts<'info> + Bumps, +>( + ctx: &Context<'a, 'b, 'c, 'info, A>, + input_network_fee_bundle: Option<(u8, u64)>, + address_network_fee_bundle: Option<(u8, u64)>, + output_network_fee_bundle: Option<(u8, u64)>, +) -> Result<()> { + if let Some(network_fee_bundle) = input_network_fee_bundle { + let address_fee = if let Some(network_fee_bundle) = address_network_fee_bundle { + let (_, network_fee) = network_fee_bundle; + network_fee + } else { + 0 + }; + let (remaining_account_index, mut network_fee) = network_fee_bundle; + network_fee += address_fee; + transfer_lamports_cpi( + ctx.accounts.get_fee_payer(), + &ctx.remaining_accounts[remaining_account_index as usize], + network_fee, + )?; + } else if let Some(network_fee_bundle) = output_network_fee_bundle { + let address_fee = if let Some(network_fee_bundle) = address_network_fee_bundle { + let (_, network_fee) = network_fee_bundle; + network_fee + } else { + 0 + }; + let (remaining_account_index, mut network_fee) = network_fee_bundle; + network_fee += address_fee; + transfer_lamports_cpi( + ctx.accounts.get_fee_payer(), + &ctx.remaining_accounts[remaining_account_index as usize], + network_fee, + )?; + } else if let Some(network_fee_bundle) = address_network_fee_bundle { + let (remaining_account_index, network_fee) = network_fee_bundle; + transfer_lamports_cpi( + ctx.accounts.get_fee_payer(), + &ctx.remaining_accounts[remaining_account_index as usize], + network_fee, + )?; + } + Ok(()) +} diff --git a/programs/system/src/invoke/sum_check.rs b/programs/system/src/invoke/sum_check.rs index a32794e6b5..ca22346eb1 100644 --- a/programs/system/src/invoke/sum_check.rs +++ b/programs/system/src/invoke/sum_check.rs @@ -14,11 +14,24 @@ pub fn sum_check( relay_fee: &Option, compress_or_decompress_lamports: &Option, is_compress: &bool, -) -> Result<()> { +) -> Result<(usize, usize)> { let mut sum: u64 = 0; + let num_read_only = 0; + let mut num_prove_by_index_accounts = 0; for compressed_account_with_context in input_compressed_accounts_with_merkle_context.iter() { + if compressed_account_with_context + .merkle_context + .queue_index + .is_some() + { + num_prove_by_index_accounts += 1; + } + // Readonly accounts are not included in the sum check, since these are + // not invalidated in this transaction. if compressed_account_with_context.read_only { unimplemented!("read_only accounts are not supported. Set read_only to false."); + // num_read_only += 1; + // continue; } sum = sum .checked_add(compressed_account_with_context.compressed_account.lamports) @@ -58,7 +71,7 @@ pub fn sum_check( } if sum == 0 { - Ok(()) + Ok((num_read_only, num_prove_by_index_accounts)) } else { Err(SystemProgramError::SumCheckFailed.into()) } @@ -125,7 +138,7 @@ mod test { relay_fee: Option, compress_or_decompress_lamports: Option, is_compress: bool, - ) -> Result<()> { + ) -> Result<(usize, usize)> { let mut inputs = Vec::new(); for i in input_amounts.iter() { inputs.push(PackedCompressedAccountWithMerkleContext { diff --git a/programs/system/src/invoke/verify_state_proof.rs b/programs/system/src/invoke/verify_state_proof.rs index 9983a09b68..41f93b53fc 100644 --- a/programs/system/src/invoke/verify_state_proof.rs +++ b/programs/system/src/invoke/verify_state_proof.rs @@ -3,10 +3,11 @@ use crate::{ NewAddressParamsPacked, }; use account_compression::{ - utils::check_discrimininator::check_discriminator, AddressMerkleTreeAccount, - StateMerkleTreeAccount, + batched_merkle_tree::{BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount}, + utils::check_discrimininator::check_discriminator, + AddressMerkleTreeAccount, StateMerkleTreeAccount, }; -use anchor_lang::{prelude::*, Bumps}; +use anchor_lang::{prelude::*, Bumps, Discriminator}; use light_concurrent_merkle_tree::zero_copy::ConcurrentMerkleTreeZeroCopy; use light_hasher::Poseidon; use light_indexed_merkle_tree::zero_copy::IndexedMerkleTreeZeroCopy; @@ -18,6 +19,7 @@ use light_verifier::{ }; use std::mem; +// TODO: add support for batched Merkle trees #[inline(never)] #[heap_neutral] pub fn fetch_input_compressed_account_roots< @@ -29,24 +31,49 @@ pub fn fetch_input_compressed_account_roots< >( input_compressed_accounts_with_merkle_context: &'a [PackedCompressedAccountWithMerkleContext], ctx: &'a Context<'a, 'b, 'c, 'info, A>, - roots: &'a mut [[u8; 32]], + roots: &'a mut Vec<[u8; 32]>, ) -> Result<()> { - for (i, input_compressed_account_with_context) in input_compressed_accounts_with_merkle_context - .iter() - .enumerate() + for input_compressed_account_with_context in + input_compressed_accounts_with_merkle_context.iter() { + // Skip accounts which prove inclusion by index in output queue. + if input_compressed_account_with_context + .merkle_context + .queue_index + .is_some() + { + continue; + } let merkle_tree = &ctx.remaining_accounts[input_compressed_account_with_context .merkle_context .merkle_tree_pubkey_index as usize]; - let merkle_tree = merkle_tree.try_borrow_data()?; - check_discriminator::(&merkle_tree)?; - let merkle_tree = ConcurrentMerkleTreeZeroCopy::::from_bytes_zero_copy( - &merkle_tree[8 + mem::size_of::()..], - ) - .map_err(ProgramError::from)?; - let fetched_roots = &merkle_tree.roots; + let merkle_tree = &mut merkle_tree.try_borrow_mut_data()?; + let mut discriminator_bytes = [0u8; 8]; + discriminator_bytes.copy_from_slice(&merkle_tree[0..8]); + match discriminator_bytes { + StateMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = + ConcurrentMerkleTreeZeroCopy::::from_bytes_zero_copy( + &merkle_tree[8 + mem::size_of::()..], + ) + .map_err(ProgramError::from)?; + let fetched_roots = &merkle_tree.roots; - roots[i] = fetched_roots[input_compressed_account_with_context.root_index as usize]; + (*roots) + .push(fetched_roots[input_compressed_account_with_context.root_index as usize]); + } + BatchedMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree) + .map_err(ProgramError::from)?; + (*roots).push( + merkle_tree.root_history + [input_compressed_account_with_context.root_index as usize], + ); + } + _ => { + return err!(crate::ErrorCode::AccountDiscriminatorMismatch); + } + } } Ok(()) } @@ -110,22 +137,19 @@ pub fn hash_input_compressed_accounts<'a, 'b, 'c: 'info, 'info>( .iter() .enumerate() { - // For heap neutrality we cannot allocate new heap memory in this function. - match &input_compressed_account_with_context - .compressed_account - .address - { - Some(address) => addresses[j] = Some(*address), - None => {} - }; - if input_compressed_account_with_context - .merkle_context - .queue_index - .is_some() - { - unimplemented!("Queue index is not supported."); + // Skip read-only accounts. Read-only accounts are just included in + // proof verification, but since these accounts are not invalidated the + // address and lamports must not be used in sum and address checks. + if !input_compressed_account_with_context.read_only { + // For heap neutrality we cannot allocate new heap memory in this function. + match &input_compressed_account_with_context + .compressed_account + .address + { + Some(address) => addresses[j] = Some(*address), + None => {} + }; } - #[allow(clippy::comparison_chain)] if current_mt_index != input_compressed_account_with_context @@ -202,16 +226,29 @@ pub fn hash_input_compressed_accounts<'a, 'b, 'c: 'info, 'info>( #[heap_neutral] pub fn verify_state_proof( + input_compressed_accounts_with_merkle_context: &[PackedCompressedAccountWithMerkleContext], roots: &[[u8; 32]], leaves: &[[u8; 32]], address_roots: &[[u8; 32]], addresses: &[[u8; 32]], compressed_proof: &CompressedProof, ) -> anchor_lang::Result<()> { - if !addresses.is_empty() && !leaves.is_empty() { + // Filter out leaves that are not in the proof (proven by index). + let proof_input_leaves = leaves + .iter() + .enumerate() + .filter(|(x, _)| { + input_compressed_accounts_with_merkle_context[*x] + .merkle_context + .queue_index + .is_none() + }) + .map(|x| *x.1) + .collect::>(); + if !addresses.is_empty() && !proof_input_leaves.is_empty() { verify_create_addresses_and_merkle_proof_zkp( roots, - leaves, + &proof_input_leaves, address_roots, addresses, compressed_proof, @@ -221,7 +258,69 @@ pub fn verify_state_proof( verify_create_addresses_zkp(address_roots, addresses, compressed_proof) .map_err(ProgramError::from)?; } else { - verify_merkle_proof_zkp(roots, leaves, compressed_proof).map_err(ProgramError::from)?; + verify_merkle_proof_zkp(roots, &proof_input_leaves, compressed_proof) + .map_err(ProgramError::from)?; } Ok(()) } + +pub fn create_tx_hash( + input_compressed_accounts_with_merkle_context: &[PackedCompressedAccountWithMerkleContext], + input_compressed_account_hashes: &[[u8; 32]], + output_compressed_account_hashes: &[[u8; 32]], + current_slot: u64, +) -> [u8; 32] { + use light_hasher::Hasher; + // Do not include read-only accounts in the event. + let index = find_first_non_read_only_account(input_compressed_accounts_with_merkle_context); + // TODO: extend with message hash (first 32 bytes of the message) + let mut tx_hash = input_compressed_account_hashes[index]; + for (i, hash) in input_compressed_account_hashes + .iter() + .skip(index + 1) + .enumerate() + { + if input_compressed_accounts_with_merkle_context[i].read_only { + continue; + } + tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); + } + tx_hash = Poseidon::hashv(&[&tx_hash, ¤t_slot.to_be_bytes()]).unwrap(); + for hash in output_compressed_account_hashes.iter() { + tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); + } + tx_hash +} + +fn find_first_non_read_only_account( + input_compressed_accounts_with_merkle_context: &[PackedCompressedAccountWithMerkleContext], +) -> usize { + for (i, account) in input_compressed_accounts_with_merkle_context + .iter() + .enumerate() + { + if !account.read_only { + return i; + } + } + 0 +} + +pub fn create_tx_hash_offchain( + input_compressed_account_hashes: &[[u8; 32]], + output_compressed_account_hashes: &[[u8; 32]], + current_slot: u64, +) -> [u8; 32] { + use light_hasher::Hasher; + // Do not include read-only accounts in the event. + // TODO: extend with message hash (first 32 bytes of the message) + let mut tx_hash = input_compressed_account_hashes[0]; + for hash in input_compressed_account_hashes.iter().skip(1) { + tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); + } + tx_hash = Poseidon::hashv(&[&tx_hash, ¤t_slot.to_be_bytes()]).unwrap(); + for hash in output_compressed_account_hashes.iter() { + tx_hash = Poseidon::hashv(&[&tx_hash, hash]).unwrap(); + } + tx_hash +} diff --git a/programs/system/src/invoke_cpi/initialize.rs b/programs/system/src/invoke_cpi/initialize.rs index c5787ac0f0..a48480f340 100644 --- a/programs/system/src/invoke_cpi/initialize.rs +++ b/programs/system/src/invoke_cpi/initialize.rs @@ -1,4 +1,3 @@ -use account_compression::StateMerkleTreeAccount; use anchor_lang::prelude::*; use super::account::CpiContextAccount; @@ -10,5 +9,6 @@ pub struct InitializeCpiContextAccount<'info> { pub fee_payer: Signer<'info>, #[account(zero)] pub cpi_context_account: Account<'info, CpiContextAccount>, - pub associated_merkle_tree: AccountLoader<'info, StateMerkleTreeAccount>, + /// CHECK: manually in instruction + pub associated_merkle_tree: AccountInfo<'info>, } diff --git a/programs/system/src/invoke_cpi/instruction.rs b/programs/system/src/invoke_cpi/instruction.rs index 797004f2ce..ba794441a3 100644 --- a/programs/system/src/invoke_cpi/instruction.rs +++ b/programs/system/src/invoke_cpi/instruction.rs @@ -112,6 +112,7 @@ impl InstructionDataInvokeCpi { } } } + #[cfg(test)] mod tests { use std::vec; diff --git a/programs/system/src/invoke_cpi/verify_signer.rs b/programs/system/src/invoke_cpi/verify_signer.rs index 547ce191ab..10792be881 100644 --- a/programs/system/src/invoke_cpi/verify_signer.rs +++ b/programs/system/src/invoke_cpi/verify_signer.rs @@ -1,8 +1,10 @@ use account_compression::{ - utils::{check_discrimininator::check_discriminator, constants::CPI_AUTHORITY_PDA_SEED}, + batched_merkle_tree::{BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount}, + batched_queue::{BatchedQueueAccount, ZeroCopyBatchedQueueAccount}, + utils::constants::CPI_AUTHORITY_PDA_SEED, AddressMerkleTreeAccount, StateMerkleTreeAccount, }; -use anchor_lang::prelude::*; +use anchor_lang::{prelude::*, Discriminator}; use light_concurrent_merkle_tree::zero_copy::ConcurrentMerkleTreeZeroCopy; use light_hasher::Poseidon; use light_heap::{bench_sbf_end, bench_sbf_start}; @@ -120,46 +122,96 @@ pub fn output_compressed_accounts_write_access_check( pub fn check_program_owner_state_merkle_tree<'a, 'b: 'a>( merkle_tree_acc_info: &'b AccountInfo<'a>, invoking_program: &Option, -) -> Result<(u32, Option, u64)> { - let (seq, next_index) = { - let merkle_tree = merkle_tree_acc_info.try_borrow_data()?; - check_discriminator::(&merkle_tree).map_err(ProgramError::from)?; - let merkle_tree = ConcurrentMerkleTreeZeroCopy::::from_bytes_zero_copy( - &merkle_tree[8 + mem::size_of::()..], - ) - .map_err(ProgramError::from)?; +) -> Result<(u32, Option, u64, Pubkey)> { + let (seq, next_index, network_fee, program_owner, merkle_tree_pubkey) = { + let mut discriminator_bytes = [0u8; 8]; + discriminator_bytes.copy_from_slice(&merkle_tree_acc_info.try_borrow_data()?[0..8]); + match discriminator_bytes { + StateMerkleTreeAccount::DISCRIMINATOR => { + let (seq, next_index) = { + let merkle_tree = merkle_tree_acc_info.try_borrow_mut_data()?; + let merkle_tree = + ConcurrentMerkleTreeZeroCopy::::from_bytes_zero_copy( + &merkle_tree[8 + mem::size_of::()..], + ) + .map_err(ProgramError::from)?; - let seq = merkle_tree.sequence_number() as u64 + 1; - let next_index: u32 = merkle_tree.next_index().try_into().unwrap(); + let seq = merkle_tree.sequence_number() as u64 + 1; + let next_index: u32 = merkle_tree.next_index().try_into().unwrap(); + (seq, next_index) + }; + let merkle_tree = + AccountLoader::::try_from(merkle_tree_acc_info) + .unwrap(); + let merkle_tree_unpacked = merkle_tree.load()?; + ( + seq, + next_index, + merkle_tree_unpacked.metadata.rollover_metadata.network_fee, + merkle_tree_unpacked.metadata.access_metadata.program_owner, + merkle_tree_acc_info.key(), + ) + } + BatchedMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = &mut merkle_tree_acc_info.try_borrow_mut_data()?; + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree) + .map_err(ProgramError::from)?; + let account = merkle_tree.get_account(); + let seq = account.sequence_number + 1; + let next_index: u32 = account.next_index.try_into().unwrap(); - (seq, next_index) - }; + ( + seq, + next_index, + account.metadata.rollover_metadata.network_fee, + account.metadata.access_metadata.program_owner, + merkle_tree_acc_info.key(), + ) + } + BatchedQueueAccount::DISCRIMINATOR => { + let merkle_tree = &mut merkle_tree_acc_info.try_borrow_mut_data()?; + let merkle_tree = ZeroCopyBatchedQueueAccount::from_bytes_mut(merkle_tree) + .map_err(ProgramError::from)?; + let account = merkle_tree.get_account(); + let seq = u64::MAX; + let next_index: u32 = account.next_index.try_into().unwrap(); - let merkle_tree = - AccountLoader::::try_from(merkle_tree_acc_info).unwrap(); - let merkle_tree_unpacked = merkle_tree.load()?; + ( + seq, + next_index, + account.metadata.rollover_metadata.network_fee, + account.metadata.access_metadata.program_owner, + account.metadata.associated_merkle_tree, + ) + } + _ => { + return err!(crate::ErrorCode::AccountDiscriminatorMismatch); + } + } + }; - let network_fee = if merkle_tree_unpacked.metadata.rollover_metadata.network_fee != 0 { - Some(merkle_tree_unpacked.metadata.rollover_metadata.network_fee) + let network_fee = if network_fee != 0 { + Some(network_fee) } else { None }; - if merkle_tree_unpacked.metadata.access_metadata.program_owner != Pubkey::default() { + if program_owner != Pubkey::default() { if let Some(invoking_program) = invoking_program { - if *invoking_program == merkle_tree_unpacked.metadata.access_metadata.program_owner { - return Ok((next_index, network_fee, seq)); + if *invoking_program == program_owner { + return Ok((next_index, network_fee, seq, merkle_tree_pubkey)); } } msg!( "invoking_program.key() {:?} == merkle_tree_unpacked.program_owner {:?}", invoking_program, - merkle_tree_unpacked.metadata.access_metadata.program_owner + program_owner ); return Err(SystemProgramError::InvalidMerkleTreeOwner.into()); } - Ok((next_index, network_fee, seq)) + Ok((next_index, network_fee, seq, merkle_tree_pubkey)) } +// TODO: extend to match batched trees pub fn check_program_owner_address_merkle_tree<'a, 'b: 'a>( merkle_tree_acc_info: &'b AccountInfo<'a>, invoking_program: &Option, diff --git a/programs/system/src/lib.rs b/programs/system/src/lib.rs index a4ad3d6148..69d68a3293 100644 --- a/programs/system/src/lib.rs +++ b/programs/system/src/lib.rs @@ -21,6 +21,8 @@ solana_security_txt::security_txt! { policy: "https://github.com/Lightprotocol/light-protocol/blob/main/SECURITY.md", source_code: "https://github.com/Lightprotocol/light-protocol" } +use account_compression::{batched_merkle_tree::BatchedMerkleTreeAccount, StateMerkleTreeAccount}; +use anchor_lang::Discriminator; #[program] pub mod light_system_program { @@ -35,7 +37,17 @@ pub mod light_system_program { pub fn init_cpi_context_account(ctx: Context) -> Result<()> { // Check that Merkle tree is initialized. - ctx.accounts.associated_merkle_tree.load()?; + let data = ctx.accounts.associated_merkle_tree.data.borrow(); + + let mut discriminator_bytes = [0u8; 8]; + discriminator_bytes.copy_from_slice(&data[0..8]); + match discriminator_bytes { + StateMerkleTreeAccount::DISCRIMINATOR => Ok(()), + BatchedMerkleTreeAccount::DISCRIMINATOR => Ok(()), + _ => { + err!(anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch) + } + }?; ctx.accounts .cpi_context_account .init(ctx.accounts.associated_merkle_tree.key()); diff --git a/programs/system/src/sdk/invoke.rs b/programs/system/src/sdk/invoke.rs index 4ffb1b7622..adf1d7cf72 100644 --- a/programs/system/src/sdk/invoke.rs +++ b/programs/system/src/sdk/invoke.rs @@ -8,7 +8,8 @@ use solana_sdk::{ }; use super::compressed_account::{ - CompressedAccount, MerkleContext, PackedCompressedAccountWithMerkleContext, PackedMerkleContext, + CompressedAccount, MerkleContext, PackedCompressedAccountWithMerkleContext, + PackedMerkleContext, QueueIndex, }; use crate::{ invoke::{processor::CompressedProof, sol_compression::SOL_POOL_PDA_SEED}, @@ -29,7 +30,7 @@ pub fn create_invoke_instruction( output_compressed_accounts: &[CompressedAccount], merkle_context: &[MerkleContext], output_compressed_account_merkle_tree_pubkeys: &[Pubkey], - input_root_indices: &[u16], + input_root_indices: &[Option], new_address_params: &[NewAddressParams], proof: Option, compress_or_decompress_lamports: Option, @@ -85,7 +86,7 @@ pub fn create_invoke_instruction_data_and_remaining_accounts( new_address_params: &[NewAddressParams], merkle_context: &[MerkleContext], input_compressed_accounts: &[CompressedAccount], - input_root_indices: &[u16], + input_root_indices: &[Option], output_compressed_account_merkle_tree_pubkeys: &[Pubkey], output_compressed_accounts: &[CompressedAccount], proof: Option, @@ -113,6 +114,16 @@ pub fn create_invoke_instruction_data_and_remaining_accounts( index += 1; } }; + let root_index = if input_root_indices.len() > i { + input_root_indices[i] + } else { + None + }; + let queue_index = if root_index.is_none() { + Some(QueueIndex::default()) + } else { + None + }; _input_compressed_accounts.push(PackedCompressedAccountWithMerkleContext { compressed_account: input_compressed_accounts[i].clone(), merkle_context: PackedMerkleContext { @@ -121,10 +132,10 @@ pub fn create_invoke_instruction_data_and_remaining_accounts( .unwrap() as u8, nullifier_queue_pubkey_index: 0, leaf_index: context.leaf_index, - queue_index: None, + queue_index, }, read_only: false, - root_index: input_root_indices[i], + root_index: root_index.unwrap_or_default(), }); } @@ -274,7 +285,7 @@ mod test { let output_compressed_account_merkle_tree_pubkeys = vec![merkle_tree_pubkey, merkle_tree_pubkey_1]; - let input_root_indices = vec![0, 1]; + let input_root_indices = vec![Some(0), Some(1)]; let proof = CompressedProof { a: [0u8; 32], b: [1u8; 64], diff --git a/sdk/src/proof.rs b/sdk/src/proof.rs index 89999d80c9..3bd3a22b1a 100644 --- a/sdk/src/proof.rs +++ b/sdk/src/proof.rs @@ -38,6 +38,6 @@ pub struct CompressedProof { #[derive(Debug)] pub struct ProofRpcResult { pub proof: CompressedProof, - pub root_indices: Vec, + pub root_indices: Vec>, pub address_root_indices: Vec, } diff --git a/test-programs/account-compression-test/Cargo.toml b/test-programs/account-compression-test/Cargo.toml index bc77dc5563..be902aaa66 100644 --- a/test-programs/account-compression-test/Cargo.toml +++ b/test-programs/account-compression-test/Cargo.toml @@ -49,3 +49,4 @@ serde_json = "1.0.114" solana-sdk = { workspace = true } thiserror = "1.0" memoffset = "0.9.1" +serial_test = "3.1.1" \ No newline at end of file diff --git a/test-programs/account-compression-test/tests/batched_merkle_tree_test.rs b/test-programs/account-compression-test/tests/batched_merkle_tree_test.rs new file mode 100644 index 0000000000..d33e95b652 --- /dev/null +++ b/test-programs/account-compression-test/tests/batched_merkle_tree_test.rs @@ -0,0 +1,919 @@ +#![cfg(feature = "test-sbf")] + +use account_compression::batched_merkle_tree::{ + get_merkle_tree_account_size, AppendBatchProofInputsIx, BatchProofInputsIx, + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, + ZeroCopyBatchedMerkleTreeAccount, +}; +use account_compression::batched_queue::{ + assert_queue_zero_copy_inited, get_output_queue_account_size, BatchedQueueAccount, + ZeroCopyBatchedQueueAccount, +}; +use account_compression::errors::AccountCompressionErrorCode; +use account_compression::{assert_mt_zero_copy_inited, get_output_queue_account_default}; +use account_compression::{ + batched_merkle_tree::BatchedMerkleTreeAccount, InitStateTreeAccountsInstructionData, ID, +}; +use anchor_lang::prelude::AccountMeta; +use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; +use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig}; +use light_prover_client::mock_batched_forester::{MockBatchedForester, MockTxEvent}; +use light_system_program::invoke::verify_state_proof::create_tx_hash_offchain; +use light_test_utils::test_env::NOOP_PROGRAM_ID; +use light_test_utils::{assert_rpc_error, create_account_instruction, RpcConnection, RpcError}; +use light_test_utils::{rpc::ProgramTestRpcConnection, AccountZeroCopy}; +use light_verifier::CompressedProof; +use serial_test::serial; +use solana_program_test::ProgramTest; +use solana_sdk::account::WritableAccount; +use solana_sdk::pubkey::Pubkey; +use solana_sdk::signature::Signature; +use solana_sdk::{ + instruction::Instruction, + signature::{Keypair, Signer}, +}; + +pub enum TestMode { + InvalidMerkleTree, + InvalidOutputQueue, + Functional, + InvalidRegisteredProgram, +} + +/// 1. init accounts - Functional: initialize a batched Merkle tree and output queue +/// 2. append leaves - Failing: Invalid signe +/// 3. append leaves - Functional insert 10 leaves into output queue +/// 4. batch append - Failing: Invalid Signer +/// 5. batch append - Failing: Invalid Output queue - association +/// 6. batch append - Failing: append Invalid Merkle tree +/// 7. batch append - Failing: Invalid Registered Program +/// 8. batch append - Functional: batch append 10 leaves +/// 9. insert_into_queue - Failing Invalid authority (input queue) +/// 10. insert_into_queue - Failing Invalid Merkle tree - association +/// 11. insert_into_queue - Functional insert 10 leaves into input queue +/// 12. batch nullify - Failing Invalid authority +/// 13. batch nullify - Failing Invalid merkle tree +/// 14. batch nullify - Failing Invalid registered program +/// 15. batch nullify - Functional batch nullify 10 leaves +#[serial] +#[tokio::test] +async fn test_batch_state_merkle_tree() { + let mut program_test = ProgramTest::default(); + program_test.add_program("account_compression", ID, None); + program_test.add_program( + "spl_noop", + Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY), + None, + ); + let merkle_tree_keypair = Keypair::new(); + let merkle_tree_pubkey = merkle_tree_keypair.pubkey(); + let nullifier_queue_keypair = Keypair::new(); + let output_queue_pubkey = nullifier_queue_keypair.pubkey(); + program_test.set_compute_max_units(1_400_000u64); + let context = program_test.start_with_context().await; + let mut context = ProgramTestRpcConnection { context }; + let payer_pubkey = context.get_payer().pubkey(); + let payer = context.get_payer().insecure_clone(); + // 1. Functional initialize a batched Merkle tree and output queue + { + let params = InitStateTreeAccountsInstructionData::test_default(); + let queue_account_size = get_output_queue_account_size( + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.output_queue_num_batches, + ); + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + let queue_rent = context + .get_minimum_balance_for_rent_exemption(queue_account_size) + .await + .unwrap(); + let create_queue_account_ix = create_account_instruction( + &payer_pubkey, + queue_account_size, + queue_rent, + &ID, + Some(&nullifier_queue_keypair), + ); + let mt_rent = context + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let additional_bytes_rent = context + .get_minimum_balance_for_rent_exemption(params.additional_bytes as usize) + .await + .unwrap(); + let total_rent = queue_rent + mt_rent + additional_bytes_rent; + let create_mt_account_ix = create_account_instruction( + &payer_pubkey, + mt_account_size, + mt_rent, + &ID, + Some(&merkle_tree_keypair), + ); + + let instruction = + account_compression::instruction::InitializeBatchedStateMerkleTree { params }; + let accounts = account_compression::accounts::InitializeBatchedStateMerkleTreeAndQueue { + authority: context.get_payer().pubkey(), + merkle_tree: merkle_tree_pubkey, + queue: output_queue_pubkey, + registered_program_pda: None, + }; + + let instruction = Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction.data(), + }; + context + .create_and_send_transaction( + &[create_queue_account_ix, create_mt_account_ix, instruction], + &payer_pubkey, + &[&payer, &nullifier_queue_keypair, &merkle_tree_keypair], + ) + .await + .unwrap(); + let mut merkle_tree = + AccountZeroCopy::::new(&mut context, merkle_tree_pubkey) + .await; + + let mut queue = + AccountZeroCopy::::new(&mut context, output_queue_pubkey).await; + let owner = context.get_payer().pubkey(); + + let ref_mt_account = BatchedMerkleTreeAccount::get_state_tree_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + output_queue_pubkey, + params.height, + params.input_queue_num_batches, + ); + + assert_mt_zero_copy_inited( + &mut merkle_tree.account.data.as_mut_slice(), + ref_mt_account, + params.bloom_filter_num_iters, + ); + + let ref_output_queue_account = get_output_queue_account_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.additional_bytes, + total_rent, + merkle_tree_pubkey, + params.height, + params.output_queue_num_batches, + ); + assert_queue_zero_copy_inited( + &mut queue.account.data.as_mut_slice(), + ref_output_queue_account, + 0, + ); + } + let mut mock_indexer = MockBatchedForester::<26>::default(); + let invalid_payer = Keypair::new(); + context + .airdrop_lamports(&invalid_payer.pubkey(), 1_000_000_000) + .await + .unwrap(); + // 2. Failing: Invalid signer (insert into output queue) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_insert_into_output_queue( + &mut context, + &mut mock_indexer, + output_queue_pubkey, + &invalid_payer, + &mut 0, + 5, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidAuthority.into(), + ) + .unwrap(); + } + // 3. Functional: insert 10 leaves into output queue + let num_of_leaves = 10; + let num_tx = 5; + let mut counter = 0; + for _ in 0..num_tx { + perform_insert_into_output_queue( + &mut context, + &mut mock_indexer, + output_queue_pubkey, + &payer, + &mut counter, + num_of_leaves, + ) + .await + .unwrap(); + } + spawn_prover( + true, + ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::BatchAppendWithProofsTest, + ProofType::BatchUpdateTest, + ], + }, + ) + .await; + + // 4. Failing Invalid Signer (batch append) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_batch_append( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &invalid_payer, + TestMode::Functional, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidAuthority.into(), + ) + .unwrap(); + } + // 5. Failing Invalid Output queue - association (batch append) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_batch_append( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &payer, + TestMode::InvalidOutputQueue, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(), + ) + .unwrap(); + } + // 6. Failing append Invalid Merkle tree (batch append) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_batch_append( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &payer, + TestMode::InvalidMerkleTree, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidDiscriminator.into(), + ) + .unwrap(); + } + // 7. Failing Invalid Registered Program (batch append) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_batch_append( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &payer, + TestMode::InvalidRegisteredProgram, + ) + .await; + assert_rpc_error( + result, + 0, + anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(), + ) + .unwrap(); + } + + // 8. Functional batch append 10 leaves + for _ in 0..num_tx { + perform_batch_append( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &payer, + TestMode::Functional, + ) + .await + .unwrap(); + } + + // 9. Failing Invalid authority (insert into nullifier queue) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_insert_into_input_queue( + &mut context, + &mut mock_indexer, + &mut 0, + 10, + output_queue_pubkey, + merkle_tree_pubkey, + &invalid_payer, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidAuthority.into(), + ) + .unwrap(); + } + + // 10. Failing Invalid Merkle tree - association (insert into nullifier queue) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_insert_into_input_queue( + &mut context, + &mut mock_indexer, + &mut 0, + 10, + output_queue_pubkey, + output_queue_pubkey, + &invalid_payer, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::MerkleTreeAndQueueNotAssociated.into(), + ) + .unwrap(); + } + // 11. Functional insert 10 leaves into input queue + let num_of_leaves = 10; + let num_tx = 5; + let mut counter = 0; + for _ in 0..num_tx { + perform_insert_into_input_queue( + &mut context, + &mut mock_indexer, + &mut counter, + num_of_leaves, + output_queue_pubkey, + merkle_tree_pubkey, + &payer, + ) + .await + .unwrap(); + } + // 12. Failing Invalid authority (batch nullify) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_batch_nullify( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &invalid_payer, + TestMode::Functional, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidAuthority.into(), + ) + .unwrap(); + } + // 13. Failing Invalid merkle tree (batch nullify) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_batch_nullify( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &payer, + TestMode::InvalidMerkleTree, + ) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InvalidDiscriminator.into(), + ) + .unwrap(); + } + // 14. Failing Invalid registered program (batch nullify) + { + let mut mock_indexer = mock_indexer.clone(); + let result = perform_batch_nullify( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &invalid_payer, + TestMode::InvalidRegisteredProgram, + ) + .await; + assert_rpc_error( + result, + 0, + anchor_lang::error::ErrorCode::AccountDiscriminatorMismatch.into(), + ) + .unwrap(); + } + // 15. Functional batch nullify 10 leaves + for i in 0..num_tx { + println!("nullify leaves tx: {:?}", i); + perform_batch_nullify( + &mut context, + &mut mock_indexer, + merkle_tree_pubkey, + output_queue_pubkey, + &payer, + TestMode::Functional, + ) + .await + .unwrap(); + } +} + +pub async fn perform_insert_into_output_queue( + context: &mut ProgramTestRpcConnection, + mock_indexer: &mut MockBatchedForester<26>, + output_queue_pubkey: Pubkey, + payer: &Keypair, + counter: &mut u32, + num_of_leaves: u32, +) -> Result { + let mut leaves = vec![]; + for _ in 0..num_of_leaves { + let mut leaf = [0u8; 32]; + leaf[31] = *counter as u8; + leaves.push((0, leaf)); + mock_indexer.output_queue_leaves.push(leaf); + mock_indexer.tx_events.push(MockTxEvent { + tx_hash: [0u8; 32], + inputs: vec![], + outputs: vec![leaf], + }); + *counter += 1; + } + + let instruction = account_compression::instruction::AppendLeavesToMerkleTrees { leaves }; + let accounts = account_compression::accounts::InsertIntoQueues { + authority: payer.pubkey(), + fee_payer: payer.pubkey(), + registered_program_pda: None, + system_program: Pubkey::default(), + }; + let accounts = vec![ + accounts.to_account_metas(Some(true)), + vec![AccountMeta { + pubkey: output_queue_pubkey, + is_signer: false, + is_writable: true, + }], + ] + .concat(); + + let instruction = Instruction { + program_id: ID, + accounts, + data: instruction.data(), + }; + context + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await +} +pub async fn perform_batch_append( + context: &mut ProgramTestRpcConnection, + mock_indexer: &mut MockBatchedForester<26>, + merkle_tree_pubkey: Pubkey, + output_queue_pubkey: Pubkey, + payer: &Keypair, + mode: TestMode, +) -> Result { + let merkle_tree_account = &mut context + .get_account(merkle_tree_pubkey) + .await + .unwrap() + .unwrap(); + let output_queue_account = &mut context + .get_account(output_queue_pubkey) + .await + .unwrap() + .unwrap(); + let mut mt_account_data = merkle_tree_account.data_as_mut_slice(); + let mut output_queue_account_data = output_queue_account.data_as_mut_slice(); + let instruction_data = create_append_batch_ix_data( + mock_indexer, + &mut mt_account_data, + &mut output_queue_account_data, + ) + .await; + let mut data = Vec::new(); + instruction_data.serialize(&mut data).unwrap(); + let (merkle_tree_pubkey, output_queue_pubkey, registered_program_pda) = match mode { + TestMode::Functional => (merkle_tree_pubkey, output_queue_pubkey, None), + TestMode::InvalidOutputQueue => (merkle_tree_pubkey, Pubkey::new_unique(), None), + TestMode::InvalidMerkleTree => (output_queue_pubkey, output_queue_pubkey, None), + TestMode::InvalidRegisteredProgram => ( + merkle_tree_pubkey, + output_queue_pubkey, + Some(output_queue_pubkey), + ), + }; + + let instruction = account_compression::instruction::BatchAppend { data }; + let accounts = account_compression::accounts::BatchAppend { + authority: payer.pubkey(), + registered_program_pda, + log_wrapper: NOOP_PROGRAM_ID, + merkle_tree: merkle_tree_pubkey, + output_queue: output_queue_pubkey, + }; + + let instruction = Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction.data(), + }; + context + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await +} +pub async fn perform_batch_nullify( + context: &mut ProgramTestRpcConnection, + mock_indexer: &mut MockBatchedForester<26>, + merkle_tree_pubkey: Pubkey, + output_queue_pubkey: Pubkey, + payer: &Keypair, + mode: TestMode, +) -> Result { + let merkle_tree_account = &mut context + .get_account(merkle_tree_pubkey) + .await + .unwrap() + .unwrap(); + let mut mt_account_data = merkle_tree_account.data_as_mut_slice(); + let instruction_data = create_nullify_batch_ix_data(mock_indexer, &mut mt_account_data).await; + let mut data = Vec::new(); + instruction_data.serialize(&mut data).unwrap(); + let (merkle_tree_pubkey, registered_program_pda) = match mode { + TestMode::Functional => (merkle_tree_pubkey, None), + TestMode::InvalidMerkleTree => (output_queue_pubkey, None), + TestMode::InvalidRegisteredProgram => (merkle_tree_pubkey, Some(output_queue_pubkey)), + _ => panic!("Invalid mode"), + }; + let instruction = account_compression::instruction::BatchNullify { data }; + let accounts = account_compression::accounts::BatchNullify { + authority: payer.pubkey(), + registered_program_pda, + log_wrapper: NOOP_PROGRAM_ID, + merkle_tree: merkle_tree_pubkey, + }; + + let instruction = Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction.data(), + }; + context + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await +} + +pub async fn perform_insert_into_input_queue( + context: &mut ProgramTestRpcConnection, + mock_indexer: &mut MockBatchedForester<26>, + counter: &mut u32, + num_of_leaves: u32, + output_queue_pubkey: Pubkey, + merkle_tree_pubkey: Pubkey, + payer: &Keypair, +) -> Result { + let mut leaves = vec![]; + let leaf_indices = (counter.clone()..counter.clone() + num_of_leaves).collect::>(); + for _ in 0..num_of_leaves { + let mut leaf = [0u8; 32]; + leaf[31] = *counter as u8; + leaves.push(leaf); + mock_indexer + .input_queue_leaves + .push((leaf, *counter as usize)); + + *counter += 1; + } + let slot = context.get_slot().await.unwrap(); + let tx_hash = create_tx_hash_offchain(&leaves, &vec![], slot); + mock_indexer.tx_events.push(MockTxEvent { + tx_hash, + inputs: leaves.clone(), + outputs: vec![], + }); + + let instruction = account_compression::instruction::InsertIntoNullifierQueues { + nullifiers: leaves, + leaf_indices, + tx_hash: Some(tx_hash), + }; + let accounts = account_compression::accounts::InsertIntoQueues { + authority: payer.pubkey(), + fee_payer: payer.pubkey(), + registered_program_pda: None, + system_program: Pubkey::default(), + }; + let mut account_metas = Vec::new(); + for _ in 0..num_of_leaves { + account_metas.push(AccountMeta { + pubkey: output_queue_pubkey, + is_signer: false, + is_writable: true, + }); + account_metas.push(AccountMeta { + pubkey: merkle_tree_pubkey, + is_signer: false, + is_writable: true, + }); + } + let accounts = vec![accounts.to_account_metas(Some(true)), account_metas].concat(); + + let instruction = Instruction { + program_id: ID, + accounts, + data: instruction.data(), + }; + context + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await +} + +pub async fn create_append_batch_ix_data( + mock_indexer: &mut MockBatchedForester<26>, + mt_account_data: &mut [u8], + output_queue_account_data: &mut [u8], +) -> InstructionDataBatchAppendInputs { + let zero_copy_account = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(mt_account_data).unwrap(); + let output_zero_copy_account = + ZeroCopyBatchedQueueAccount::from_bytes_mut(output_queue_account_data).unwrap(); + + let next_index = zero_copy_account.get_account().next_index; + let next_full_batch = output_zero_copy_account + .get_account() + .queue + .next_full_batch_index; + let batch = output_zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves_hashchain = output_zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hashchain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + + InstructionDataBatchAppendInputs { + public_inputs: AppendBatchProofInputsIx { new_root }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + } +} + +pub async fn create_nullify_batch_ix_data( + mock_indexer: &mut MockBatchedForester<26>, + account_data: &mut [u8], +) -> InstructionDataBatchNullifyInputs { + let zero_copy_account: ZeroCopyBatchedMerkleTreeAccount = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(account_data).unwrap(); + println!("batches {:?}", zero_copy_account.batches); + + let old_root_index = zero_copy_account.root_history.last_index(); + let next_full_batch = zero_copy_account.get_account().queue.next_full_batch_index; + let batch = zero_copy_account + .batches + .get(next_full_batch as usize) + .unwrap(); + println!( + "zero_copy_account + .hashchain_store {:?}", + zero_copy_account.hashchain_store + ); + println!( + "hashchain store len {:?}", + zero_copy_account.hashchain_store.len() + ); + println!( + "batch.get_num_inserted_zkps() as usize {:?}", + batch.get_num_inserted_zkps() as usize + ); + let leaves_hashchain = zero_copy_account + .hashchain_store + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_update_proof( + zero_copy_account.get_account().queue.zkp_batch_size as u32, + *leaves_hashchain, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + public_inputs: BatchProofInputsIx { + new_root, + old_root_index: old_root_index as u16, + }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + instruction_data +} + +#[serial] +#[tokio::test] +async fn test_init_batch_state_merkle_trees() { + let mut program_test = ProgramTest::default(); + program_test.add_program("account_compression", ID, None); + program_test.add_program( + "spl_noop", + Pubkey::new_from_array(account_compression::utils::constants::NOOP_PUBKEY), + None, + ); + program_test.set_compute_max_units(1_400_000u64); + let context = program_test.start_with_context().await; + let mut context = ProgramTestRpcConnection { context }; + + let payer_pubkey = context.get_payer().pubkey(); + let payer = context.get_payer().insecure_clone(); + let params = InitStateTreeAccountsInstructionData::test_default(); + let e2e_test_params = InitStateTreeAccountsInstructionData::e2e_test_default(); + let default_params = InitStateTreeAccountsInstructionData::default(); + let param_vec = vec![params, e2e_test_params, default_params]; + for params in param_vec.iter() { + println!("Init new mt with params {:?}", params); + let merkle_tree_keypair = Keypair::new(); + let merkle_tree_pubkey = merkle_tree_keypair.pubkey(); + let nullifier_queue_keypair = Keypair::new(); + let output_queue_pubkey = nullifier_queue_keypair.pubkey(); + let queue_account_size = get_output_queue_account_size( + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.output_queue_num_batches, + ); + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + let queue_rent = context + .get_minimum_balance_for_rent_exemption(queue_account_size) + .await + .unwrap(); + let create_queue_account_ix = create_account_instruction( + &payer_pubkey, + queue_account_size, + queue_rent, + &ID, + Some(&nullifier_queue_keypair), + ); + let mt_rent = context + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let additional_bytes_rent = context + .get_minimum_balance_for_rent_exemption(params.additional_bytes as usize) + .await + .unwrap(); + let total_rent = queue_rent + mt_rent + additional_bytes_rent; + let create_mt_account_ix = create_account_instruction( + &payer_pubkey, + mt_account_size, + mt_rent, + &ID, + Some(&merkle_tree_keypair), + ); + + let instruction = + account_compression::instruction::InitializeBatchedStateMerkleTree { params: *params }; + let accounts = account_compression::accounts::InitializeBatchedStateMerkleTreeAndQueue { + authority: context.get_payer().pubkey(), + merkle_tree: merkle_tree_pubkey, + queue: output_queue_pubkey, + registered_program_pda: None, + }; + + let instruction = Instruction { + program_id: ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction.data(), + }; + context + .create_and_send_transaction( + &[create_queue_account_ix, create_mt_account_ix, instruction], + &payer_pubkey, + &[&payer, &nullifier_queue_keypair, &merkle_tree_keypair], + ) + .await + .unwrap(); + let merkle_tree = + AccountZeroCopy::::new(&mut context, merkle_tree_pubkey) + .await; + + let mut queue = + AccountZeroCopy::::new(&mut context, output_queue_pubkey).await; + let owner = context.get_payer().pubkey(); + + let ref_mt_account = BatchedMerkleTreeAccount::get_state_tree_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + output_queue_pubkey, + params.height, + params.input_queue_num_batches, + ); + + let mut tree_data = merkle_tree.account.data.clone(); + assert_mt_zero_copy_inited( + &mut tree_data.as_mut_slice(), + ref_mt_account, + params.bloom_filter_num_iters, + ); + + let ref_output_queue_account = get_output_queue_account_default( + owner, + None, + None, + params.rollover_threshold, + 0, + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.additional_bytes, + total_rent, + merkle_tree_pubkey, + params.height, + params.output_queue_num_batches, + ); + assert_queue_zero_copy_inited( + &mut queue.account.data.as_mut_slice(), + ref_output_queue_account, + 0, + ); + } +} diff --git a/test-programs/account-compression-test/tests/merkle_tree_tests.rs b/test-programs/account-compression-test/tests/merkle_tree_tests.rs index 9486f3d2f8..1d3102687e 100644 --- a/test-programs/account-compression-test/tests/merkle_tree_tests.rs +++ b/test-programs/account-compression-test/tests/merkle_tree_tests.rs @@ -1358,6 +1358,8 @@ async fn insert_into_single_nullifier_queue( ) -> Result { let instruction_data = account_compression::instruction::InsertIntoNullifierQueues { nullifiers: elements.to_vec(), + leaf_indices: Vec::new(), + tx_hash: None, }; let accounts = account_compression::accounts::InsertIntoQueues { fee_payer: fee_payer.pubkey(), @@ -1402,6 +1404,8 @@ async fn insert_into_nullifier_queues( ) -> Result { let instruction_data = account_compression::instruction::InsertIntoNullifierQueues { nullifiers: elements.to_vec(), + leaf_indices: Vec::new(), + tx_hash: None, }; let accounts = account_compression::accounts::InsertIntoQueues { fee_payer: fee_payer.pubkey(), diff --git a/test-programs/compressed-token-test/Cargo.toml b/test-programs/compressed-token-test/Cargo.toml index 6bc44c6441..86c3181794 100644 --- a/test-programs/compressed-token-test/Cargo.toml +++ b/test-programs/compressed-token-test/Cargo.toml @@ -41,3 +41,4 @@ num-traits = "0.2.19" spl-token = { workspace = true } anchor-spl = { workspace = true } rand = "0.8" +serial_test = { workspace = true } \ No newline at end of file diff --git a/test-programs/compressed-token-test/tests/test.rs b/test-programs/compressed-token-test/tests/test.rs index a158a6fdd4..18e9348e9f 100644 --- a/test-programs/compressed-token-test/tests/test.rs +++ b/test-programs/compressed-token-test/tests/test.rs @@ -45,6 +45,7 @@ use light_test_utils::{ }; use light_verifier::VerifierError; use rand::Rng; +use serial_test::serial; use solana_sdk::{ instruction::{Instruction, InstructionError}, pubkey::Pubkey, @@ -1296,8 +1297,8 @@ async fn test_approve_failing() { .collect::>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + Some(input_compressed_account_hashes), + Some(input_merkle_tree_pubkeys), None, None, &mut rpc, @@ -1404,7 +1405,7 @@ async fn test_approve_failing() { let invalid_proof = CompressedProof { a: [0; 32], b: [0; 64], - c: [0; 32], + c: [1; 32], }; let inputs = CreateApproveInstructionInputs { @@ -1721,8 +1722,8 @@ async fn test_revoke_failing() { .collect::>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + Some(input_compressed_account_hashes), + Some(input_merkle_tree_pubkeys), None, None, &mut rpc, @@ -1731,7 +1732,7 @@ async fn test_revoke_failing() { // 1. Invalid root indices. { - let invalid_root_indices = vec![0]; + let invalid_root_indices = vec![Some(0)]; let inputs = CreateRevokeInstructionInputs { fee_payer: rpc.get_payer().pubkey(), @@ -2427,8 +2428,8 @@ async fn test_failing_freeze() { .collect::>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + Some(input_compressed_account_hashes), + Some(input_merkle_tree_pubkeys), None, None, &mut rpc, @@ -2512,7 +2513,7 @@ async fn test_failing_freeze() { let invalid_proof = CompressedProof { a: [0; 32], b: [0; 64], - c: [0; 32], + c: [1; 32], }; let inputs = CreateInstructionInputs { @@ -2574,8 +2575,8 @@ async fn test_failing_freeze() { .collect::>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + Some(input_compressed_account_hashes), + Some(input_merkle_tree_pubkeys), None, None, &mut rpc, @@ -2691,8 +2692,8 @@ async fn test_failing_thaw() { .collect::>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + Some(input_compressed_account_hashes), + Some(input_merkle_tree_pubkeys), None, None, &mut rpc, @@ -2776,7 +2777,7 @@ async fn test_failing_thaw() { let invalid_proof = CompressedProof { a: [0; 32], b: [0; 64], - c: [0; 32], + c: [1; 32], }; let inputs = CreateInstructionInputs { @@ -2829,8 +2830,8 @@ async fn test_failing_thaw() { .collect::>(); let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + Some(input_compressed_account_hashes), + Some(input_merkle_tree_pubkeys), None, None, &mut rpc, @@ -3183,8 +3184,8 @@ pub async fn failing_compress_decompress( let (root_indices, proof) = if !input_compressed_account_hashes.is_empty() { let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + Some(input_compressed_account_hashes), + Some(input_merkle_tree_pubkeys), None, None, rpc, @@ -3313,10 +3314,12 @@ async fn test_invalid_inputs() { .clone()]; let proof_rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&[input_compressed_accounts[0].hash().unwrap()]), - Some(&[input_compressed_accounts[0] - .merkle_context - .merkle_tree_pubkey]), + Some(vec![input_compressed_accounts[0].hash().unwrap()]), + Some(vec![ + input_compressed_accounts[0] + .merkle_context + .merkle_tree_pubkey, + ]), None, None, &mut rpc, @@ -3555,7 +3558,8 @@ async fn test_invalid_inputs() { // Test 10: invalid root indices { let mut root_indices = proof_rpc_result.root_indices.clone(); - root_indices[0] += 1; + let root_index = root_indices[0].as_mut().unwrap(); + *root_index += 1; let res = perform_transfer_failing_test( &mut rpc, change_out_compressed_account_0, @@ -3627,7 +3631,7 @@ async fn perform_transfer_failing_test( nullifier_queue_pubkey: &Pubkey, payer: &Keypair, proof: &Option, - root_indices: &[u16], + root_indices: &[Option], input_compressed_accounts: &[CompressedAccountWithMerkleContext], invalid_mint: bool, ) -> Result { @@ -3688,3 +3692,34 @@ async fn perform_transfer_failing_test( ); rpc.process_transaction(transaction).await } + +#[serial] +#[tokio::test] +async fn mint_with_batched_tree() { + let (mut rpc, env) = setup_test_programs_with_accounts(None).await; + let payer = rpc.get_payer().insecure_clone(); + let merkle_tree_pubkey = env.batched_output_queue; + let mut test_indexer = + TestIndexer::::init_from_env(&payer, &env, None).await; + let sender = Keypair::new(); + airdrop_lamports(&mut rpc, &sender.pubkey(), 1_000_000_000) + .await + .unwrap(); + let delegate = Keypair::new(); + airdrop_lamports(&mut rpc, &delegate.pubkey(), 1_000_000_000) + .await + .unwrap(); + let mint = create_mint_helper(&mut rpc, &payer).await; + let amount = 10000u64; + let num_recipients = 25; + mint_tokens_helper( + &mut rpc, + &mut test_indexer, + &merkle_tree_pubkey, + &payer, + &mint, + vec![amount; num_recipients], + vec![sender.pubkey(); num_recipients], + ) + .await; +} diff --git a/test-programs/e2e-test/tests/test.rs b/test-programs/e2e-test/tests/test.rs index 281f2f22d9..a581fac681 100644 --- a/test-programs/e2e-test/tests/test.rs +++ b/test-programs/e2e-test/tests/test.rs @@ -1,10 +1,15 @@ #![cfg(feature = "test-sbf")] +use account_compression::InitStateTreeAccountsInstructionData; +use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_registry::protocol_config::state::ProtocolConfig; use light_test_utils::e2e_test_env::{E2ETestEnv, GeneralActionConfig, KeypairActionConfig}; use light_test_utils::indexer::TestIndexer; use light_test_utils::rpc::ProgramTestRpcConnection; -use light_test_utils::test_env::setup_test_programs_with_accounts_with_protocol_config; +use light_test_utils::test_env::{ + setup_test_programs_with_accounts_with_protocol_config, + setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params, +}; #[tokio::test] async fn test_10_all() { @@ -16,23 +21,44 @@ async fn test_10_all() { report_work_phase_length: 100, ..ProtocolConfig::default() }; + let params = InitStateTreeAccountsInstructionData::e2e_test_default(); + let (rpc, env_accounts) = - setup_test_programs_with_accounts_with_protocol_config(None, protocol_config, true).await; + setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params( + None, + protocol_config, + true, + params, + ) + .await; let indexer: TestIndexer = TestIndexer::init_from_env( &env_accounts.forester.insecure_clone(), &env_accounts, - Some(KeypairActionConfig::all_default().prover_config()), + Some(ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::Inclusion, + ProofType::NonInclusion, + ProofType::BatchUpdateTest, + ProofType::BatchAppendWithProofsTest, + ], + }), ) .await; - + let mut config = KeypairActionConfig::test_default(); + config.fee_assert = false; + let mut general_config = GeneralActionConfig::default(); + general_config.rollover = None; + general_config.create_address_mt = None; + general_config.create_state_mt = None; let mut env = E2ETestEnv::>::new( rpc, indexer, &env_accounts, - KeypairActionConfig::all_default(), - GeneralActionConfig::default(), + config, + general_config, 10, None, ) @@ -42,7 +68,7 @@ async fn test_10_all() { } // cargo test-sbf -p e2e-test -- --nocapture --ignored --test test_10000_all > output.txt 2>&1 -#[ignore] +#[ignore = "Not maintained for batched trees."] #[tokio::test] async fn test_10000_all() { let protocol_config = ProtocolConfig { @@ -59,7 +85,15 @@ async fn test_10000_all() { let indexer: TestIndexer = TestIndexer::init_from_env( &env_accounts.forester.insecure_clone(), &env_accounts, - Some(KeypairActionConfig::all_default().prover_config()), + Some(ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::Inclusion, + ProofType::NonInclusion, + ProofType::BatchUpdateTest, + ProofType::BatchUpdateTest, + ], + }), ) .await; diff --git a/test-programs/registry-test/Cargo.toml b/test-programs/registry-test/Cargo.toml index 52538a72f5..fec490b2ad 100644 --- a/test-programs/registry-test/Cargo.toml +++ b/test-programs/registry-test/Cargo.toml @@ -44,3 +44,4 @@ light-verifier = {path = "../../circuit-lib/verifier"} solana-cli-output = { workspace = true } serde_json = "1.0.114" solana-sdk = { workspace = true } +serial_test = { workspace = true } \ No newline at end of file diff --git a/test-programs/registry-test/tests/tests.rs b/test-programs/registry-test/tests/tests.rs index 1833f00565..1f526ee734 100644 --- a/test-programs/registry-test/tests/tests.rs +++ b/test-programs/registry-test/tests/tests.rs @@ -1,13 +1,16 @@ #![cfg(feature = "test-sbf")] +use account_compression::batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount; use account_compression::{ - AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig, + AddressMerkleTreeConfig, AddressQueueConfig, InitStateTreeAccountsInstructionData, + NullifierQueueConfig, StateMerkleTreeConfig, }; use anchor_lang::{InstructionData, ToAccountMetas}; use forester_utils::forester_epoch::get_epoch_phases; use light_registry::account_compression_cpi::sdk::{ - create_nullify_instruction, create_update_address_merkle_tree_instruction, - CreateNullifyInstructionInputs, UpdateAddressMerkleTreeInstructionInputs, + create_batch_append_instruction, create_batch_nullify_instruction, create_nullify_instruction, + create_update_address_merkle_tree_instruction, CreateNullifyInstructionInputs, + UpdateAddressMerkleTreeInstructionInputs, }; use light_registry::errors::RegistryError; use light_registry::protocol_config::state::{ProtocolConfig, ProtocolConfigPda}; @@ -24,14 +27,18 @@ use light_test_utils::assert_epoch::{ assert_epoch_pda, assert_finalized_epoch_registration, assert_registered_forester_pda, assert_report_work, fetch_epoch_and_forester_pdas, }; -use light_test_utils::e2e_test_env::init_program_test_env; +use light_test_utils::e2e_test_env::{init_program_test_env, init_program_test_env_forester}; use light_test_utils::rpc::ProgramTestRpcConnection; +use light_test_utils::test_batch_forester::{ + create_append_batch_ix_data, perform_batch_append, perform_batch_nullify, +}; use light_test_utils::test_env::{ create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account, deregister_program_with_registry_program, initialize_new_group, register_program_with_registry_program, setup_accounts, setup_test_programs, - setup_test_programs_with_accounts_with_protocol_config, EnvAccountKeypairs, - GROUP_PDA_SEED_TEST_KEYPAIR, OLD_REGISTRY_ID_TEST_KEYPAIR, + setup_test_programs_with_accounts_with_protocol_config, + setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params, + EnvAccountKeypairs, GROUP_PDA_SEED_TEST_KEYPAIR, OLD_REGISTRY_ID_TEST_KEYPAIR, }; use light_test_utils::test_env::{get_test_env_accounts, setup_test_programs_with_accounts}; use light_test_utils::test_forester::{empty_address_queue_test, nullify_compressed_accounts}; @@ -40,6 +47,7 @@ use light_test_utils::{ create_rollover_state_merkle_tree_instructions, register_test_forester, update_test_forester, Epoch, RpcConnection, SolanaRpcConnection, SolanaRpcUrl, TreeAccounts, TreeType, }; +use serial_test::serial; use solana_sdk::{ instruction::Instruction, native_token::LAMPORTS_PER_SOL, @@ -489,6 +497,7 @@ async fn test_initialize_protocol_config() { } } +#[serial] #[tokio::test] async fn test_custom_forester() { let (mut rpc, env) = setup_test_programs_with_accounts_with_protocol_config( @@ -520,6 +529,7 @@ async fn test_custom_forester() { &cpi_context_keypair, None, Some(unregistered_forester_keypair.pubkey()), + 1, ) .await; @@ -565,10 +575,146 @@ async fn test_custom_forester() { .unwrap(); } } + +#[serial] +#[tokio::test] +async fn test_custom_forester_batched() { + let devnet = false; + let tree_params = if devnet { + InitStateTreeAccountsInstructionData::default() + } else { + InitStateTreeAccountsInstructionData::test_default() + }; + + let (mut rpc, env) = + setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params( + None, + ProtocolConfig::default(), + true, + tree_params, + ) + .await; + + { + let mut instruction_data = None; + let unregistered_forester_keypair = Keypair::new(); + rpc.airdrop_lamports(&unregistered_forester_keypair.pubkey(), 1_000_000_000) + .await + .unwrap(); + let merkle_tree_keypair = Keypair::new(); + let nullifier_queue_keypair = Keypair::new(); + let cpi_context_keypair = Keypair::new(); + // create work 1 item in address and nullifier queue each + let (mut state_merkle_tree_bundle, _, mut rpc) = { + let mut e2e_env = if devnet { + let mut e2e_env = init_program_test_env_forester(rpc, &env).await; + e2e_env.keypair_action_config.fee_assert = false; + e2e_env + } else { + init_program_test_env(rpc, &env).await + }; + e2e_env.indexer.state_merkle_trees.clear(); + // add state merkle tree to the indexer + e2e_env + .indexer + .add_state_merkle_tree( + &mut e2e_env.rpc, + &merkle_tree_keypair, + &nullifier_queue_keypair, + &cpi_context_keypair, + None, + None, + 2, + ) + .await; + let state_merkle_tree_pubkey = + e2e_env.indexer.state_merkle_trees[0].accounts.merkle_tree; + let output_queue_pubkey = e2e_env.indexer.state_merkle_trees[0] + .accounts + .nullifier_queue; + let mut merkle_tree_account = e2e_env + .rpc + .get_account(state_merkle_tree_pubkey) + .await + .unwrap() + .unwrap(); + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut merkle_tree_account.data) + .unwrap(); + // fill two output and one input batch + for i in 0..merkle_tree.get_account().queue.batch_size { + println!("\ntx {}", i); + + e2e_env + .compress_sol_deterministic(&unregistered_forester_keypair, 1_000_000, None) + .await; + e2e_env + .transfer_sol_deterministic( + &unregistered_forester_keypair, + &Pubkey::new_unique(), + None, + ) + .await + .unwrap(); + if i == merkle_tree.get_account().queue.batch_size / 2 { + instruction_data = Some( + create_append_batch_ix_data( + &mut e2e_env.rpc, + &mut e2e_env.indexer.state_merkle_trees[0], + state_merkle_tree_pubkey, + output_queue_pubkey, + ) + .await, + ); + } + } + ( + e2e_env.indexer.state_merkle_trees[0].clone(), + e2e_env.indexer.address_merkle_trees[0].clone(), + e2e_env.rpc, + ) + }; + let num_output_zkp_batches = + tree_params.input_queue_batch_size / tree_params.output_queue_zkp_batch_size; + for i in 0..num_output_zkp_batches { + // Simulate concurrency since instruction data has been created before + let instruction_data = if i == 0 { + instruction_data.clone() + } else { + None + }; + perform_batch_append( + &mut rpc, + &mut state_merkle_tree_bundle, + &env.forester, + 0, + false, + instruction_data, + ) + .await + .unwrap(); + // We only spent half of the output queue + if i < num_output_zkp_batches / 2 { + perform_batch_nullify( + &mut rpc, + &mut state_merkle_tree_bundle, + &env.forester, + 0, + false, + None, + ) + .await + .unwrap(); + } + } + } +} + /// Test: /// 1. SUCCESS: Register a forester /// 2. SUCCESS: Update forester authority /// 3. SUCCESS: Register forester for epoch +#[serial] #[tokio::test] async fn test_register_and_update_forester_pda() { let (mut rpc, env) = setup_test_programs_with_accounts_with_protocol_config( @@ -733,6 +879,8 @@ async fn test_register_and_update_forester_pda() { // create work 1 item in address and nullifier queue each let (mut state_merkle_tree_bundle, mut address_merkle_tree, mut rpc) = { let mut e2e_env = init_program_test_env(rpc, &env).await; + // remove batched Merkle tree, fee assert makes this test flaky otherwise + e2e_env.indexer.state_merkle_trees.remove(1); e2e_env.create_address(None, None).await; e2e_env .compress_sol_deterministic(&forester_keypair, 1_000_000, None) @@ -869,7 +1017,6 @@ async fn failing_test_forester() { .await; let expected_error_code = anchor_lang::error::ErrorCode::ConstraintHasOne as u32; assert_rpc_error(result, 0, expected_error_code).unwrap(); - println!("here1"); } // 3. FAIL: Update forester pda weight with invalid authority { @@ -889,7 +1036,6 @@ async fn failing_test_forester() { .await; let expected_error_code = anchor_lang::error::ErrorCode::ConstraintHasOne as u32; assert_rpc_error(result, 0, expected_error_code).unwrap(); - println!("here1"); } // 4. FAIL: Nullify with invalid authority { @@ -912,7 +1058,6 @@ async fn failing_test_forester() { .create_and_send_transaction(&[ix], &payer.pubkey(), &[&payer]) .await; assert_rpc_error(result, 0, expected_error_code).unwrap(); - println!("here1"); } // 4 FAIL: update address Merkle tree failed { @@ -946,6 +1091,47 @@ async fn failing_test_forester() { .await; assert_rpc_error(result, 0, expected_error_code).unwrap(); } + // 4 FAIL: batch append failed + { + let expected_error_code = RegistryError::InvalidForester.into(); + let authority = rpc.get_payer().insecure_clone(); + let mut instruction = create_batch_append_instruction( + authority.pubkey(), + authority.pubkey(), + env.batched_state_merkle_tree, + env.batched_output_queue, + 0, + Vec::new(), + ); + // Swap the derived forester pda with an initialized but invalid one. + instruction.accounts[0].pubkey = + get_forester_epoch_pda_from_authority(&env.forester.pubkey(), 0).0; + + let result = rpc + .create_and_send_transaction(&[instruction], &authority.pubkey(), &[&authority]) + .await; + assert_rpc_error(result, 0, expected_error_code).unwrap(); + } + // 4 FAIL: batch nullify failed + { + let expected_error_code = RegistryError::InvalidForester.into(); + let authority = rpc.get_payer().insecure_clone(); + let mut instruction = create_batch_nullify_instruction( + authority.pubkey(), + authority.pubkey(), + env.batched_state_merkle_tree, + 0, + Vec::new(), + ); + // Swap the derived forester pda with an initialized but invalid one. + instruction.accounts[0].pubkey = + get_forester_epoch_pda_from_authority(&env.forester.pubkey(), 0).0; + + let result = rpc + .create_and_send_transaction(&[instruction], &authority.pubkey(), &[&authority]) + .await; + assert_rpc_error(result, 0, expected_error_code).unwrap(); + } // 5. FAIL: rollover address tree with invalid authority { let new_queue_keypair = Keypair::new(); diff --git a/test-programs/sdk-test-program/programs/sdk-test/tests/test.rs b/test-programs/sdk-test-program/programs/sdk-test/tests/test.rs index fccfdde94d..1f683b1ac0 100644 --- a/test-programs/sdk-test-program/programs/sdk-test/tests/test.rs +++ b/test-programs/sdk-test-program/programs/sdk-test/tests/test.rs @@ -252,7 +252,7 @@ where inputs, proof: rpc_result.proof, merkle_context, - merkle_tree_root_index: rpc_result.root_indices[0], + merkle_tree_root_index: rpc_result.root_indices[0].unwrap(), address_merkle_context: *address_merkle_context, address_merkle_tree_root_index: 0, nested_data, diff --git a/test-programs/system-cpi-test/src/lib.rs b/test-programs/system-cpi-test/src/lib.rs index c20ea12e28..683978d8a7 100644 --- a/test-programs/system-cpi-test/src/lib.rs +++ b/test-programs/system-cpi-test/src/lib.rs @@ -138,7 +138,12 @@ pub mod system_cpi_test { ctx.accounts.merkle_tree.to_account_info(), ]; - account_compression::cpi::insert_into_nullifier_queues(cpi_context, vec![[1u8; 32]])?; + account_compression::cpi::insert_into_nullifier_queues( + cpi_context, + vec![[1u8; 32]], + vec![], + None, + )?; Ok(()) } diff --git a/test-programs/system-cpi-test/tests/test.rs b/test-programs/system-cpi-test/tests/test.rs index 7b253e49bb..5a539e011e 100644 --- a/test-programs/system-cpi-test/tests/test.rs +++ b/test-programs/system-cpi-test/tests/test.rs @@ -127,6 +127,7 @@ async fn only_test_create_pda() { &program_owned_cpi_context_keypair, Some(light_compressed_token::ID), None, + 1, ) .await; let mint = create_mint_helper(&mut rpc, &payer).await; @@ -505,6 +506,9 @@ async fn test_create_pda_in_program_owned_merkle_trees() { forester: env.forester.insecure_clone(), registered_forester_pda: env.registered_forester_pda, forester_epoch: env.forester_epoch.clone(), + batched_cpi_context: env.batched_cpi_context, + batched_output_queue: env.batched_output_queue, + batched_state_merkle_tree: env.batched_state_merkle_tree, }; perform_create_pda_failing( @@ -534,6 +538,7 @@ async fn test_create_pda_in_program_owned_merkle_trees() { &program_owned_cpi_context_keypair, Some(light_compressed_token::ID), None, + 1, ) .await; let env_with_program_owned_state_merkle_tree = EnvAccounts { @@ -550,6 +555,9 @@ async fn test_create_pda_in_program_owned_merkle_trees() { forester: env.forester.insecure_clone(), registered_forester_pda: env.registered_forester_pda, forester_epoch: env.forester_epoch.clone(), + batched_cpi_context: env.batched_cpi_context, + batched_output_queue: env.batched_output_queue, + batched_state_merkle_tree: env.batched_state_merkle_tree, }; perform_create_pda_failing( &mut test_indexer, @@ -578,6 +586,7 @@ async fn test_create_pda_in_program_owned_merkle_trees() { &program_owned_cpi_context_keypair, Some(ID), None, + 1, ) .await; let program_owned_address_merkle_tree_keypair = Keypair::new(); @@ -605,6 +614,9 @@ async fn test_create_pda_in_program_owned_merkle_trees() { forester: env.forester.insecure_clone(), registered_forester_pda: env.registered_forester_pda, forester_epoch: env.forester_epoch.clone(), + batched_cpi_context: env.batched_cpi_context, + batched_output_queue: env.batched_output_queue, + batched_state_merkle_tree: env.batched_state_merkle_tree, }; let seed = [4u8; 32]; let data = [5u8; 31]; @@ -693,7 +705,8 @@ pub async fn perform_create_pda_with_event( .create_and_send_transaction_with_event(&[instruction], &payer_pubkey, &[payer], None) .await? .unwrap(); - test_indexer.add_compressed_accounts_with_token_data(&event.0); + let slot: u64 = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.0); Ok(()) } @@ -841,8 +854,8 @@ pub async fn perform_with_input_accounts( .cpi_context; let rpc_result = test_indexer .create_proof_for_compressed_accounts( - Some(&hashes), - Some(&merkle_tree_pubkeys), + Some(hashes), + Some(merkle_tree_pubkeys), None, None, rpc, @@ -859,7 +872,7 @@ pub async fn perform_with_input_accounts( } else { None }, - root_index: rpc_result.root_indices[0], + root_index: rpc_result.root_indices[0].unwrap(), merkle_context: PackedMerkleContext { leaf_index: token_account.compressed_account.merkle_context.leaf_index, merkle_tree_pubkey_index: 0, @@ -896,7 +909,7 @@ pub async fn perform_with_input_accounts( nullifier_queue_pubkey_index: 1, queue_index: None, }, - root_index: rpc_result.root_indices[0], + root_index: rpc_result.root_indices[0].unwrap(), read_only: false, }, token_transfer_data, @@ -914,8 +927,8 @@ pub async fn perform_with_input_accounts( .await; if expected_error_code == u32::MAX { let result = result?.unwrap(); - - test_indexer.add_compressed_accounts_with_token_data(&result.0); + let slot: u64 = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &result.0); Ok(()) } else { assert_rpc_error(result, 0, expected_error_code) diff --git a/test-programs/system-cpi-test/tests/test_program_owned_trees.rs b/test-programs/system-cpi-test/tests/test_program_owned_trees.rs index af518b1159..38569f6f69 100644 --- a/test-programs/system-cpi-test/tests/test_program_owned_trees.rs +++ b/test-programs/system-cpi-test/tests/test_program_owned_trees.rs @@ -62,6 +62,7 @@ async fn test_program_owned_merkle_tree() { }), ) .await; + test_indexer .add_state_merkle_tree( &mut rpc, @@ -70,6 +71,7 @@ async fn test_program_owned_merkle_tree() { &cpi_context_keypair, Some(light_compressed_token::ID), None, + 1, ) .await; @@ -115,11 +117,12 @@ async fn test_program_owned_merkle_tree() { 26, >(&mut rpc, program_owned_merkle_tree_pubkey) .await; - test_indexer.add_compressed_accounts_with_token_data(&event.0); + let slot: u64 = rpc.get_slot().await.unwrap(); + test_indexer.add_compressed_accounts_with_token_data(slot, &event.0); assert_ne!(post_merkle_tree.root(), pre_merkle_tree.root()); assert_eq!( post_merkle_tree.root(), - test_indexer.state_merkle_trees[1].merkle_tree.root() + test_indexer.state_merkle_trees[2].merkle_tree.root() ); let invalid_program_owned_merkle_tree_keypair = Keypair::new(); @@ -135,6 +138,7 @@ async fn test_program_owned_merkle_tree() { &cpi_context_keypair, Some(Keypair::new().pubkey()), None, + 1, ) .await; let recipient_keypair = Keypair::new(); diff --git a/test-programs/system-test/Cargo.toml b/test-programs/system-test/Cargo.toml index 7526276d5b..207aa248d2 100644 --- a/test-programs/system-test/Cargo.toml +++ b/test-programs/system-test/Cargo.toml @@ -44,3 +44,4 @@ solana-cli-output = { workspace = true } serde_json = "1.0.114" solana-sdk = { workspace = true } quote.workspace = true +serial_test = "3.1.1" \ No newline at end of file diff --git a/test-programs/system-test/tests/test.rs b/test-programs/system-test/tests/test.rs index ba74d4780c..dedcf2db82 100644 --- a/test-programs/system-test/tests/test.rs +++ b/test-programs/system-test/tests/test.rs @@ -1,10 +1,16 @@ #![cfg(feature = "test-sbf")] +use account_compression::batched_queue::ZeroCopyBatchedQueueAccount; use account_compression::errors::AccountCompressionErrorCode; +use account_compression::InitStateTreeAccountsInstructionData; use anchor_lang::error::ErrorCode; use anchor_lang::{AnchorSerialize, InstructionData, ToAccountMetas}; use light_hasher::Poseidon; -use light_prover_client::gnark::helpers::{ProofType, ProverConfig, ProverMode}; +use light_prover_client::gnark::helpers::{spawn_prover, ProofType, ProverConfig, ProverMode}; use light_registry::protocol_config::state::ProtocolConfig; +use light_system_program::invoke::processor::CompressedProof; +use light_system_program::sdk::compressed_account::{ + CompressedAccountWithMerkleContext, QueueIndex, +}; use light_system_program::{ errors::SystemProgramError, sdk::{ @@ -17,6 +23,7 @@ use light_system_program::{ utils::{get_cpi_authority_pda, get_registered_program_pda}, InstructionDataInvoke, NewAddressParams, }; +use light_test_utils::test_batch_forester::perform_batch_append; use light_test_utils::test_env::{EnvAccounts, FORESTER_TEST_KEYPAIR, PAYER_KEYPAIR}; use light_test_utils::{ airdrop_lamports, assert_rpc_error, FeeConfig, Indexer, RpcConnection, RpcError, @@ -38,7 +45,9 @@ use light_test_utils::{ use light_utils::hash_to_bn254_field_size_be; use light_verifier::VerifierError; use quote::format_ident; +use serial_test::serial; use solana_cli_output::CliAccount; +use solana_sdk::signature::Signature; use solana_sdk::{ instruction::{AccountMeta, Instruction, InstructionError}, pubkey::Pubkey, @@ -75,10 +84,18 @@ use tokio::fs::write as async_write; /// 5. invalid Merkle tree account (AccountDiscriminatorMismatch) /// 6.1 invalid queue account (InvalidQueueType) /// 6.2 invalid queue account (AccountDiscriminatorMismatch) +#[serial] #[tokio::test] async fn invoke_failing_test() { let (mut context, env) = setup_test_programs_with_accounts(None).await; - + spawn_prover( + true, + ProverConfig { + run_mode: Some(ProverMode::Rpc), + circuits: vec![], + }, + ) + .await; let payer = context.get_payer().insecure_clone(); // no inputs let (remaining_accounts, inputs_struct) = create_invoke_instruction_data_and_remaining_accounts( @@ -102,15 +119,8 @@ async fn invoke_failing_test() { .await .unwrap(); - let mut test_indexer = TestIndexer::::init_from_env( - &payer, - &env, - Some(ProverConfig { - run_mode: Some(ProverMode::Rpc), - circuits: vec![], - }), - ) - .await; + let mut test_indexer = + TestIndexer::::init_from_env(&payer, &env, None).await; // circuit instantiations allow for 1, 2, 3, 4, 8 inclusion proofs let options = [0usize, 1usize, 2usize, 3usize, 4usize, 8usize]; @@ -197,20 +207,12 @@ pub async fn failing_transaction_inputs( .iter() .map(|x| x.hash().unwrap()) .collect::>(); - let input_compressed_account_hashes = if num_inputs != 0 { - Some(hashes.as_slice()) - } else { - None - }; + let input_compressed_account_hashes = if num_inputs != 0 { Some(hashes) } else { None }; let mts = input_compressed_accounts .iter() .map(|x| x.merkle_context.merkle_tree_pubkey) .collect::>(); - let input_state_merkle_trees = if num_inputs != 0 { - Some(mts.as_slice()) - } else { - None - }; + let input_state_merkle_trees = if num_inputs != 0 { Some(mts) } else { None }; let proof_input_derived_addresses = if num_addresses != 0 { Some(derived_addresses.as_slice()) } else { @@ -359,28 +361,28 @@ pub async fn failing_transaction_inputs_inner( .await .unwrap(); } - // invalid leaf index - { - println!( - "leaf index: {}", - inputs_struct.input_compressed_accounts_with_merkle_context[num_inputs - 1] - .merkle_context - .leaf_index - ); - let mut inputs_struct = inputs_struct.clone(); - inputs_struct.input_compressed_accounts_with_merkle_context[num_inputs - 1] - .merkle_context - .leaf_index += 1; - create_instruction_and_failing_transaction( - context, - payer, - inputs_struct, - remaining_accounts.clone(), - VerifierError::ProofVerificationFailed.into(), - ) - .await - .unwrap(); - } + // // invalid leaf index + // { + // println!( + // "leaf index: {}", + // inputs_struct.input_compressed_accounts_with_merkle_context[num_inputs - 1] + // .merkle_context + // .leaf_index + // ); + // let mut inputs_struct = inputs_struct.clone(); + // inputs_struct.input_compressed_accounts_with_merkle_context[num_inputs - 1] + // .merkle_context + // .leaf_index += 1; + // create_instruction_and_failing_transaction( + // context, + // payer, + // inputs_struct, + // remaining_accounts.clone(), + // VerifierError::ProofVerificationFailed.into(), + // ) + // .await + // .unwrap(); + // } // invalid account data (lamports) if !inputs_struct.output_compressed_accounts.is_empty() { let mut inputs_struct = inputs_struct.clone(); @@ -886,6 +888,7 @@ pub async fn create_instruction_and_failing_transaction( /// 2. should fail: in compressed account and invalid zkp /// 3. should fail: in compressed account and invalid signer /// 4. should succeed: in compressed account inserted in (1.) and valid zkp +#[serial] #[tokio::test] async fn invoke_test() { let (mut context, env) = setup_test_programs_with_accounts(None).await; @@ -944,7 +947,9 @@ async fn invoke_test() { .await .unwrap() .unwrap(); - let (created_compressed_accounts, _) = test_indexer.add_event_and_compressed_accounts(&event.0); + let slot: u64 = context.get_slot().await.unwrap(); + let (created_compressed_accounts, _) = + test_indexer.add_event_and_compressed_accounts(slot, &event.0); assert_created_compressed_accounts( output_compressed_accounts.as_slice(), output_merkle_tree_pubkeys.as_slice(), @@ -967,11 +972,11 @@ async fn invoke_test() { &[MerkleContext { merkle_tree_pubkey, leaf_index: 0, - nullifier_queue_pubkey, + nullifier_queue_pubkey: nullifier_queue_pubkey, queue_index: None, }], &[merkle_tree_pubkey], - &[0u16], + &[Some(0u16)], &Vec::new(), None, None, @@ -1001,11 +1006,11 @@ async fn invoke_test() { &[MerkleContext { merkle_tree_pubkey, leaf_index: 0, - nullifier_queue_pubkey, + nullifier_queue_pubkey: nullifier_queue_pubkey, queue_index: None, }], &[merkle_tree_pubkey], - &[0u16], + &[Some(0u16)], &Vec::new(), None, None, @@ -1025,16 +1030,18 @@ async fn invoke_test() { let compressed_account_with_context = test_indexer.compressed_accounts[0].clone(); let proof_rpc_res = test_indexer .create_proof_for_compressed_accounts( - Some(&[compressed_account_with_context + Some(vec![compressed_account_with_context .compressed_account .hash::( &merkle_tree_pubkey, &compressed_account_with_context.merkle_context.leaf_index, ) .unwrap()]), - Some(&[compressed_account_with_context - .merkle_context - .merkle_tree_pubkey]), + Some(vec![ + compressed_account_with_context + .merkle_context + .merkle_tree_pubkey, + ]), None, None, &mut context, @@ -1049,7 +1056,7 @@ async fn invoke_test() { &[MerkleContext { merkle_tree_pubkey, leaf_index: 0, - nullifier_queue_pubkey, + nullifier_queue_pubkey: nullifier_queue_pubkey, queue_index: None, }], &[merkle_tree_pubkey], @@ -1079,7 +1086,8 @@ async fn invoke_test() { .await .unwrap() .unwrap(); - test_indexer.add_event_and_compressed_accounts(&event.0); + let slot: u64 = context.get_slot().await.unwrap(); + test_indexer.add_event_and_compressed_accounts(slot, &event.0); println!("Double spend -------------------------"); let output_compressed_accounts = vec![CompressedAccount { @@ -1097,7 +1105,7 @@ async fn invoke_test() { &[MerkleContext { merkle_tree_pubkey, leaf_index: 0, - nullifier_queue_pubkey, + nullifier_queue_pubkey: nullifier_queue_pubkey, queue_index: None, }], &[merkle_tree_pubkey], @@ -1128,7 +1136,7 @@ async fn invoke_test() { &[MerkleContext { merkle_tree_pubkey, leaf_index: 1, - nullifier_queue_pubkey, + nullifier_queue_pubkey: nullifier_queue_pubkey, queue_index: None, }], &[merkle_tree_pubkey], @@ -1153,6 +1161,7 @@ async fn invoke_test() { /// 4. should succeed: create two addresses with different seeds /// 5. should succeed: create multiple addresses with different seeds and spend input compressed accounts /// testing: (input accounts, new addresses) (1, 1), (1, 2), (2, 1), (2, 2) +#[serial] #[tokio::test] async fn test_with_address() { let (mut context, env) = setup_test_programs_with_accounts(None).await; @@ -1341,6 +1350,7 @@ async fn test_with_address() { } } +#[serial] #[tokio::test] async fn test_with_compression() { let (mut context, env) = setup_test_programs_with_accounts(None).await; @@ -1443,16 +1453,18 @@ async fn test_with_compression() { let compressed_account_with_context = test_indexer.compressed_accounts.last().unwrap().clone(); let proof_rpc_res = test_indexer .create_proof_for_compressed_accounts( - Some(&[compressed_account_with_context + Some(vec![compressed_account_with_context .compressed_account .hash::( &merkle_tree_pubkey, &compressed_account_with_context.merkle_context.leaf_index, ) .unwrap()]), - Some(&[compressed_account_with_context - .merkle_context - .merkle_tree_pubkey]), + Some(vec![ + compressed_account_with_context + .merkle_context + .merkle_tree_pubkey, + ]), None, None, &mut context, @@ -1476,7 +1488,7 @@ async fn test_with_compression() { &[MerkleContext { merkle_tree_pubkey, leaf_index: 0, - nullifier_queue_pubkey, + nullifier_queue_pubkey: nullifier_queue_pubkey, queue_index: None, }], &[merkle_tree_pubkey], @@ -1518,6 +1530,7 @@ async fn test_with_compression() { } #[ignore = "this is a helper function to regenerate accounts"] +#[serial] #[tokio::test] async fn regenerate_accounts() { let output_dir = "../../cli/accounts/"; @@ -1555,6 +1568,7 @@ async fn regenerate_accounts() { protocol_config, true, skip_register_programs, + InitStateTreeAccountsInstructionData::test_default(), ) .await; @@ -1645,3 +1659,691 @@ async fn regenerate_accounts() { .unwrap(); file.write_all(&rustfmt(rust_file).unwrap()).unwrap(); } + +/// Tests batched compressed transaction execution: +/// 1. Should succeed: without compressed account (0 lamports), no input compressed account. +/// 2. Should fail: input compressed account with invalid ZKP. +/// 3. Should fail: input compressed account with invalid signer. +/// 4. Should succeed: prove inclusion by index. +/// 5. Should fail: double spend by index +/// 6. Should fail: invalid leaf index +/// 7. Should success: Spend compressed accounts by zkp and index, with v1 and v2 trees +/// 8. Should fail: double-spending by index after spending by ZKP. +/// 9. Should fail: double-spending by ZKP after spending by index. +/// 10. Should fail: double-spending by index after spending by index. +/// 11. Should fail: double-spending by ZKP after spending by ZKP. +#[serial] +#[tokio::test] +async fn batch_invoke_test() { + let (mut context, env) = setup_test_programs_with_accounts(None).await; + + let payer = context.get_payer().insecure_clone(); + let mut test_indexer = TestIndexer::::init_from_env( + &payer, + &env, + Some(ProverConfig { + run_mode: None, + circuits: vec![ProofType::Inclusion, ProofType::BatchAppendWithProofsTest], + }), + ) + .await; + let payer_pubkey = payer.pubkey(); + + let merkle_tree_pubkey = env.batched_state_merkle_tree; + let output_queue_pubkey = env.batched_output_queue; + let output_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: payer.pubkey(), + data: None, + address: None, + }]; + // 1. Should succeed: without compressed account (0 lamports), no input compressed account. + create_output_accounts( + &mut context, + &payer, + &mut test_indexer, + output_queue_pubkey, + 1, + true, + ) + .await + .unwrap(); + + let input_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: payer_pubkey, + data: None, + address: None, + }]; + // 2. Should fail: input compressed account with invalid ZKP. + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &input_compressed_accounts, + &output_compressed_accounts, + &[MerkleContext { + merkle_tree_pubkey, + leaf_index: 0, + nullifier_queue_pubkey: output_queue_pubkey, + queue_index: None, + }], + &[output_queue_pubkey], + &[Some(0u16)], + &Vec::new(), + Some(CompressedProof::default()), + None, + false, + None, + true, + ); + + let result = context + .create_and_send_transaction(&[instruction], &payer_pubkey, &[&payer]) + .await; + assert_rpc_error(result, 0, VerifierError::ProofVerificationFailed.into()).unwrap(); + + // 3. Should fail: input compressed account with invalid signer. + let invalid_signer_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: Keypair::new().pubkey(), + data: None, + address: None, + }]; + + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &invalid_signer_compressed_accounts, + &output_compressed_accounts, + &[MerkleContext { + merkle_tree_pubkey, + leaf_index: 0, + nullifier_queue_pubkey: output_queue_pubkey, + queue_index: None, + }], + &[merkle_tree_pubkey], + &[Some(0u16)], + &Vec::new(), + None, + None, + false, + None, + true, + ); + + let result = context + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await; + assert_rpc_error(result, 0, SystemProgramError::SignerCheckFailed.into()).unwrap(); + + // 4. Should succeed: prove inclusion by index. + { + let compressed_account_with_context = test_indexer.compressed_accounts[0].clone(); + let proof_rpc_result = test_indexer + .create_proof_for_compressed_accounts2( + Some(vec![compressed_account_with_context.hash().unwrap()]), + Some(vec![ + compressed_account_with_context + .merkle_context + .merkle_tree_pubkey, + ]), + None, + None, + &mut context, + ) + .await; + // No proof since value is in output queue + assert!(proof_rpc_result.proof.is_none()); + // No root index since value is in output queue + assert!(proof_rpc_result.root_indices[0].is_none()); + let input_compressed_accounts = vec![compressed_account_with_context.compressed_account]; + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &input_compressed_accounts, + &output_compressed_accounts, + &[MerkleContext { + merkle_tree_pubkey, + leaf_index: compressed_account_with_context.merkle_context.leaf_index, + nullifier_queue_pubkey: output_queue_pubkey, + // Values are not used, it only has to be Some + queue_index: Some(QueueIndex { + index: 123, + queue_id: 200, + }), + }], + &[output_queue_pubkey], + &[], + &Vec::new(), + None, + None, + false, + None, + true, + ); + println!("Transaction with input proof by index -------------------------"); + + let event = context + .create_and_send_transaction_with_event( + &[instruction], + &payer_pubkey, + &[&payer], + Some(TransactionParams { + num_input_compressed_accounts: 1, + num_output_compressed_accounts: 1, + num_new_addresses: 0, + compress: 0, + fee_config: FeeConfig::test_batched(), + }), + ) + .await + .unwrap() + .unwrap(); + let slot: u64 = context.get_slot().await.unwrap(); + test_indexer.add_event_and_compressed_accounts(slot, &event.0); + } + + // 5. Should fail: double spend by index + { + let output_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: Keypair::new().pubkey(), + data: None, + address: None, + }]; + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &input_compressed_accounts, + &output_compressed_accounts, + &[MerkleContext { + merkle_tree_pubkey, + leaf_index: 0, + nullifier_queue_pubkey: output_queue_pubkey, + queue_index: Some(QueueIndex { + index: 123, + queue_id: 200, + }), + }], + &[output_queue_pubkey], + &[], + &Vec::new(), + None, + None, + false, + None, + true, + ); + let result = context + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InclusionProofByIndexFailed.into(), + ) + .unwrap(); + } + // 6. Should fail: invalid leaf index + { + let input_compressed_account = test_indexer + .get_compressed_accounts_by_owner(&payer_pubkey) + .iter() + .filter(|x| x.merkle_context.nullifier_queue_pubkey == output_queue_pubkey) + .last() + .unwrap() + .clone(); + let output_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: Keypair::new().pubkey(), + data: None, + address: None, + }]; + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &[input_compressed_account.compressed_account], + &output_compressed_accounts, + &[MerkleContext { + merkle_tree_pubkey, + leaf_index: input_compressed_account.merkle_context.leaf_index - 1, + nullifier_queue_pubkey: output_queue_pubkey, + queue_index: Some(QueueIndex { + index: 123, + queue_id: 200, + }), + }], + &[output_queue_pubkey], + &[], + &Vec::new(), + None, + None, + false, + None, + true, + ); + let result = context + .create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await; + assert_rpc_error( + result, + 0, + AccountCompressionErrorCode::InclusionProofByIndexFailed.into(), + ) + .unwrap(); + } + // create compressed account in v1 Merkle tree + { + let merkle_tree_pubkey = env.merkle_tree_pubkey; + create_output_accounts( + &mut context, + &payer, + &mut test_indexer, + merkle_tree_pubkey, + 1, + false, + ) + .await + .unwrap(); + } + // 7. Should success: Spend compressed accounts by zkp and index, with v1 and v2 trees + { + let compressed_account_with_context_1 = test_indexer.compressed_accounts[0].clone(); + let compressed_account_with_context_2 = test_indexer.compressed_accounts[1].clone(); + let proof_rpc_result = test_indexer + .create_proof_for_compressed_accounts2( + Some(vec![ + compressed_account_with_context_1.hash().unwrap(), + compressed_account_with_context_2.hash().unwrap(), + ]), + Some(vec![ + compressed_account_with_context_1 + .merkle_context + .merkle_tree_pubkey, + compressed_account_with_context_2 + .merkle_context + .merkle_tree_pubkey, + ]), + None, + None, + &mut context, + ) + .await; + let input_compressed_accounts = vec![ + compressed_account_with_context_1.compressed_account, + compressed_account_with_context_2.compressed_account, + ]; + let output_compressed_accounts = vec![ + CompressedAccount { + lamports: 0, + owner: payer_pubkey, + data: None, + address: None, + }, + CompressedAccount { + lamports: 0, + owner: payer_pubkey, + data: None, + address: None, + }, + ]; + let merkle_context_1 = compressed_account_with_context_1.merkle_context; + let mut merkle_context_2 = compressed_account_with_context_2.merkle_context; + // merkle_context_2.queue_index = Some(proofs_by_index[0].1); + // Queue index is not used it is just Some to signal that the value is not in the proof + merkle_context_2.queue_index = Some(QueueIndex { + index: 123, + queue_id: 200, + }); + + let instruction = create_invoke_instruction( + &payer_pubkey, + &payer_pubkey, + &input_compressed_accounts, + &output_compressed_accounts, + &[merkle_context_1, merkle_context_2], + &[ + merkle_context_1.merkle_tree_pubkey, + merkle_context_2.nullifier_queue_pubkey, + ], + &proof_rpc_result.root_indices, + &Vec::new(), + proof_rpc_result.proof, + None, + false, + None, + true, + ); + println!("Combined Transaction with index and zkp -------------------------"); + + let event = context + .create_and_send_transaction_with_event(&[instruction], &payer_pubkey, &[&payer], None) + .await + .unwrap() + .unwrap(); + let slot: u64 = context.get_slot().await.unwrap(); + test_indexer.add_event_and_compressed_accounts(slot, &event.0); + } + create_compressed_accounts_in_batch_merkle_tree( + &mut context, + &mut test_indexer, + &payer, + output_queue_pubkey, + &env, + ) + .await + .unwrap(); + + // 8. spend account by zkp -> double spend by index + { + // Selecting compressed account: + // - from the end of the array (accounts at the end are in the Merkle tree (onyl 10 are inserted)) + // - Compressed account in the batched Merkle tree + let compressed_account_with_context_1 = test_indexer + .compressed_accounts + .iter() + .filter(|x| { + x.compressed_account.owner == payer_pubkey + && x.merkle_context.nullifier_queue_pubkey == output_queue_pubkey + }) + .last() + .unwrap() + .clone(); + let result = double_spend_compressed_account( + &mut context, + &mut test_indexer, + &payer, + TestMode::ByZkpThenIndex, + compressed_account_with_context_1.clone(), + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InclusionProofByIndexFailed.into(), + ) + .unwrap(); + } + + // 9. spend account by index -> double spend by zkp + { + // Selecting compressed account: + // - from the end of the array (accounts at the end are in the Merkle tree (onyl 10 are inserted)) + // - Compressed account in the batched Merkle tree + let compressed_account_with_context_1 = test_indexer + .compressed_accounts + .iter() + .filter(|x| { + x.compressed_account.owner == payer_pubkey + && x.merkle_context.nullifier_queue_pubkey == output_queue_pubkey + }) + .last() + .unwrap() + .clone(); + let result = double_spend_compressed_account( + &mut context, + &mut test_indexer, + &payer, + TestMode::ByIndexThenZkp, + compressed_account_with_context_1.clone(), + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InclusionProofByIndexFailed.into(), + ) + .unwrap(); + } + // 10. spend account by index -> double spend by index + { + // Selecting compressed account: + // - from the end of the array (accounts at the end are in the Merkle tree (onyl 10 are inserted)) + // - Compressed account in the batched Merkle tree + let compressed_account_with_context_1 = test_indexer + .compressed_accounts + .iter() + .filter(|x| { + x.compressed_account.owner == payer_pubkey + && x.merkle_context.nullifier_queue_pubkey == output_queue_pubkey + }) + .last() + .unwrap() + .clone(); + let result = double_spend_compressed_account( + &mut context, + &mut test_indexer, + &payer, + TestMode::ByIndexThenIndex, + compressed_account_with_context_1.clone(), + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InclusionProofByIndexFailed.into(), + ) + .unwrap(); + } + // 11. spend account by zkp -> double spend by zkp + { + // Selecting compressed account: + // - from the end of the array (accounts at the end are in the Merkle tree (onyl 10 are inserted)) + // - Compressed account in the batched Merkle tree + let compressed_account_with_context_1 = test_indexer + .compressed_accounts + .iter() + .filter(|x| { + x.compressed_account.owner == payer_pubkey + && x.merkle_context.nullifier_queue_pubkey == output_queue_pubkey + }) + .last() + .unwrap() + .clone(); + let result = double_spend_compressed_account( + &mut context, + &mut test_indexer, + &payer, + TestMode::ByZkpThenZkp, + compressed_account_with_context_1.clone(), + ) + .await; + assert_rpc_error( + result, + 1, + AccountCompressionErrorCode::InclusionProofByIndexFailed.into(), + ) + .unwrap(); + } +} + +#[derive(Debug, PartialEq)] +pub enum TestMode { + ByZkpThenIndex, + ByIndexThenZkp, + ByIndexThenIndex, + ByZkpThenZkp, +} + +pub async fn double_spend_compressed_account( + context: &mut ProgramTestRpcConnection, + test_indexer: &mut TestIndexer, + payer: &Keypair, + mode: TestMode, + compressed_account_with_context_1: CompressedAccountWithMerkleContext, +) -> Result<(), RpcError> { + let proof_rpc_result = test_indexer + .create_proof_for_compressed_accounts( + Some(vec![compressed_account_with_context_1.hash().unwrap()]), + Some(vec![ + compressed_account_with_context_1 + .merkle_context + .merkle_tree_pubkey, + ]), + None, + None, + context, + ) + .await; + let input_compressed_accounts = vec![compressed_account_with_context_1.compressed_account]; + let output_compressed_accounts = vec![CompressedAccount { + lamports: 0, + owner: payer.pubkey(), + data: None, + address: None, + }]; + let merkle_context_1 = compressed_account_with_context_1.merkle_context; + let mut instructions = vec![create_invoke_instruction( + &payer.pubkey(), + &payer.pubkey(), + &input_compressed_accounts, + &output_compressed_accounts, + &[merkle_context_1], + &[merkle_context_1.nullifier_queue_pubkey], + &proof_rpc_result.root_indices, + &Vec::new(), + Some(proof_rpc_result.proof), + None, + false, + None, + true, + )]; + + { + let mut merkle_context = merkle_context_1; + merkle_context.queue_index = Some(QueueIndex { + queue_id: 1, + index: 0, + }); + let instruction = create_invoke_instruction( + &payer.pubkey(), + &payer.pubkey(), + &input_compressed_accounts, + &output_compressed_accounts, + &[merkle_context], + &[merkle_context.nullifier_queue_pubkey], + &vec![None], + &Vec::new(), + None, + None, + false, + None, + true, + ); + if mode == TestMode::ByZkpThenIndex { + instructions.insert(1, instruction); + } else if mode == TestMode::ByIndexThenZkp { + instructions.insert(0, instruction); + } else if mode == TestMode::ByIndexThenIndex { + instructions.remove(0); + instructions.push(instruction.clone()); + instructions.push(instruction); + } + } + if mode == TestMode::ByZkpThenZkp { + let instruction = instructions[0].clone(); + instructions.push(instruction); + } + let event = context + .create_and_send_transaction_with_event(&instructions, &payer.pubkey(), &[&payer], None) + .await? + .unwrap(); + let slot: u64 = context.get_slot().await.unwrap(); + test_indexer.add_event_and_compressed_accounts(slot, &event.0); + Ok(()) +} + +/// fill batch and perform batch append +pub async fn create_compressed_accounts_in_batch_merkle_tree( + context: &mut ProgramTestRpcConnection, + test_indexer: &mut TestIndexer, + payer: &Keypair, + output_queue_pubkey: Pubkey, + env: &EnvAccounts, +) -> Result<(), RpcError> { + let mut output_queue_account = context + .get_account(output_queue_pubkey) + .await + .unwrap() + .unwrap(); + let output_queue = + ZeroCopyBatchedQueueAccount::from_bytes_mut(&mut output_queue_account.data).unwrap(); + let fullness = output_queue.get_batch_num_inserted_in_current_batch(); + let remaining_leaves = output_queue.get_account().queue.batch_size - fullness; + for _ in 0..remaining_leaves { + create_output_accounts(context, &payer, test_indexer, output_queue_pubkey, 1, true).await?; + } + let bundle = test_indexer + .state_merkle_trees + .iter_mut() + .find(|x| x.accounts.nullifier_queue == output_queue_pubkey) + .unwrap(); + perform_batch_append(context, bundle, &env.forester, 0, false, None).await?; + Ok(()) +} +pub async fn create_output_accounts( + context: &mut ProgramTestRpcConnection, + payer: &Keypair, + test_indexer: &mut TestIndexer, + output_queue_pubkey: Pubkey, + num_accounts: usize, + is_batched: bool, +) -> Result { + let output_compressed_accounts = vec![ + CompressedAccount { + lamports: 0, + owner: payer.pubkey(), + data: None, + address: None, + }; + num_accounts + ]; + let output_merkle_tree_pubkeys = vec![output_queue_pubkey; num_accounts]; + let instruction = create_invoke_instruction( + &payer.pubkey(), + &payer.pubkey(), + &Vec::new(), + &output_compressed_accounts, + &Vec::new(), + output_merkle_tree_pubkeys.as_slice(), + &Vec::new(), + &Vec::new(), + None, + None, + false, + None, + true, + ); + let fee_config = if is_batched { + FeeConfig::test_batched() + } else { + FeeConfig::default() + }; + + let (event, signature, _) = context + .create_and_send_transaction_with_event( + &[instruction], + &payer.pubkey(), + &[&payer], + Some(TransactionParams { + num_input_compressed_accounts: 0, + num_output_compressed_accounts: 1, + num_new_addresses: 0, + compress: 0, + fee_config, + }), + ) + .await + .unwrap() + .unwrap(); + let slot: u64 = context.get_slot().await.unwrap(); + let (created_compressed_accounts, _) = + test_indexer.add_event_and_compressed_accounts(slot, &event); + assert_created_compressed_accounts( + output_compressed_accounts.as_slice(), + output_merkle_tree_pubkeys.as_slice(), + created_compressed_accounts.as_slice(), + false, + ); + Ok(signature) +} diff --git a/test-utils/src/assert_address_merkle_tree.rs b/test-utils/src/assert_address_merkle_tree.rs index 3900ccd0d5..3871730f13 100644 --- a/test-utils/src/assert_address_merkle_tree.rs +++ b/test-utils/src/assert_address_merkle_tree.rs @@ -31,7 +31,7 @@ pub async fn assert_address_merkle_tree_initialized( .metadata .rollover_metadata .rollover_threshold, - merkle_tree_config.rollover_threshold.unwrap_or_default() + merkle_tree_config.rollover_threshold.unwrap_or(u64::MAX) ); assert_eq!( merkle_tree_account.metadata.rollover_metadata.network_fee, diff --git a/test-utils/src/assert_compressed_tx.rs b/test-utils/src/assert_compressed_tx.rs index c84fb16cd2..4ef53c5f5e 100644 --- a/test-utils/src/assert_compressed_tx.rs +++ b/test-utils/src/assert_compressed_tx.rs @@ -1,4 +1,9 @@ +use account_compression::batched_merkle_tree::{ + BatchedMerkleTreeAccount, ZeroCopyBatchedMerkleTreeAccount, +}; +use account_compression::batched_queue::BatchedQueueAccount; use account_compression::{state::QueueAccount, StateMerkleTreeAccount}; +use anchor_lang::Discriminator; use forester_utils::indexer::{Indexer, StateMerkleTreeAccounts}; use forester_utils::{get_concurrent_merkle_tree, get_hash_set, AccountZeroCopy}; use light_client::rpc::RpcConnection; @@ -9,7 +14,6 @@ use light_system_program::sdk::{ event::PublicTransactionEvent, invoke::get_sol_pool_pda, }; -use log::debug; use num_bigint::BigUint; use num_traits::FromBytes; use solana_sdk::account::ReadableAccount; @@ -123,12 +127,39 @@ pub async fn assert_nullifiers_exist_in_hash_sets( input_compressed_account_hashes: &[[u8; 32]], ) { for (i, hash) in input_compressed_account_hashes.iter().enumerate() { - let nullifier_queue = unsafe { - get_hash_set::(rpc, snapshots[i].accounts.nullifier_queue).await - }; - assert!(nullifier_queue - .contains(&BigUint::from_be_bytes(hash.as_slice()), None) - .unwrap()); + match snapshots[i].version { + 1 => { + let nullifier_queue = unsafe { + get_hash_set::(rpc, snapshots[i].accounts.nullifier_queue) + .await + }; + assert!(nullifier_queue + .contains(&BigUint::from_be_bytes(hash.as_slice()), None) + .unwrap()); + } + 2 => { + let mut merkle_tree_account = rpc + .get_account(snapshots[i].accounts.merkle_tree) + .await + .unwrap() + .unwrap(); + let mut merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut merkle_tree_account.data) + .unwrap(); + let mut batches = merkle_tree.batches.clone(); + batches.iter_mut().enumerate().any(|(i, batch)| { + batch + .check_non_inclusion( + hash, + merkle_tree.bloom_filter_stores[i].as_mut_slice(), + ) + .is_err() + }); + } + _ => { + panic!("assert_nullifiers_exist_in_hash_sets: invalid version"); + } + } } } @@ -159,7 +190,8 @@ pub fn assert_created_compressed_accounts( && x.address == output_account.compressed_account.address),); assert!(output_merkle_tree_pubkeys .iter() - .any(|x| *x == output_account.merkle_context.merkle_tree_pubkey),); + .any(|x| *x == output_account.merkle_context.merkle_tree_pubkey + || *x == output_account.merkle_context.nullifier_queue_pubkey),); } } @@ -181,10 +213,10 @@ pub fn assert_public_transaction_event( ); for account in event.output_compressed_accounts.iter() { assert!( - output_merkle_tree_accounts - .iter() - .any(|x| x.merkle_tree == event.pubkey_array[account.merkle_tree_index as usize]), - // output_merkle_tree_accounts[account.merkle_tree_index as usize].merkle_tree, + output_merkle_tree_accounts.iter().any(|x| x.merkle_tree + == event.pubkey_array[account.merkle_tree_index as usize] + // handle output queue + || x.nullifier_queue == event.pubkey_array[account.merkle_tree_index as usize]), "assert_public_transaction_event: output state merkle tree account index mismatch" ); } @@ -212,14 +244,17 @@ pub fn assert_public_transaction_event( .iter_mut() .find(|x| x.pubkey == merkle_tree_pubkey); if index.is_none() { - debug!("reference sequence numbers: {:?}", sequence_numbers); - debug!("event: {:?}", event); + println!("reference sequence numbers: {:?}", sequence_numbers); + println!("event: {:?}", event); panic!( "merkle tree pubkey not found in sequence numbers : {:?}", merkle_tree_pubkey ); } else { - index.as_mut().unwrap().seq += 1; + let seq = &mut index.as_mut().unwrap().seq; + // The output queue doesn't have a sequence number hence we set it + // u64::MAX to mark it as a batched queue. + *seq = seq.saturating_add(1); } } for sequence_number in updated_sequence_numbers.iter() { @@ -236,6 +271,7 @@ pub struct MerkleTreeTestSnapShot { pub merkle_tree_account_lamports: u64, pub queue_account_lamports: u64, pub cpi_context_account_lamports: u64, + pub version: u64, } // TODO: add assert that changelog, seq number is updated correctly @@ -254,47 +290,60 @@ pub async fn assert_merkle_tree_after_tx>( deduped_snapshots.dedup(); let mut sequence_numbers = Vec::new(); for (i, snapshot) in deduped_snapshots.iter().enumerate() { - let merkle_tree = get_concurrent_merkle_tree::( - rpc, - snapshot.accounts.merkle_tree, - ) - .await; - debug!("sequence number: {:?}", merkle_tree.next_index() as u64); - debug!("next index: {:?}", snapshot.next_index); - debug!("prev sequence number: {:?}", snapshot.num_added_accounts); - sequence_numbers.push(MerkleTreeSequenceNumber { - pubkey: snapshot.accounts.merkle_tree, - seq: merkle_tree.sequence_number() as u64, - }); - if merkle_tree.root() == snapshot.root { - debug!("deduped_snapshots: {:?}", deduped_snapshots); - debug!("i: {:?}", i); - panic!("merkle tree root update failed, it should have updated but didn't"); - } - assert_eq!( - merkle_tree.next_index(), - snapshot.next_index + snapshot.num_added_accounts - ); - let test_indexer_merkle_tree = test_indexer - .get_state_merkle_trees_mut() - .iter_mut() - .find(|x| x.accounts.merkle_tree == snapshot.accounts.merkle_tree) - .expect("merkle tree not found in test indexer"); + match snapshot.version { + 1 => { + let merkle_tree = + get_concurrent_merkle_tree::( + rpc, + snapshot.accounts.merkle_tree, + ) + .await; + println!("sequence number: {:?}", merkle_tree.next_index() as u64); + println!("next index: {:?}", snapshot.next_index); + println!("prev sequence number: {:?}", snapshot.num_added_accounts); + sequence_numbers.push(MerkleTreeSequenceNumber { + pubkey: snapshot.accounts.merkle_tree, + seq: merkle_tree.sequence_number() as u64, + }); + if merkle_tree.root() == snapshot.root { + println!("deduped_snapshots: {:?}", deduped_snapshots); + println!("i: {:?}", i); + panic!("merkle tree root update failed, it should have updated but didn't"); + } + assert_eq!( + merkle_tree.next_index(), + snapshot.next_index + snapshot.num_added_accounts + ); + let test_indexer_merkle_tree = test_indexer + .get_state_merkle_trees_mut() + .iter_mut() + .find(|x| x.accounts.merkle_tree == snapshot.accounts.merkle_tree) + .expect("merkle tree not found in test indexer"); + + if merkle_tree.root() != test_indexer_merkle_tree.merkle_tree.root() { + // The following lines are just println prints + println!("Merkle tree pubkey {:?}", snapshot.accounts.merkle_tree); + for (i, leaf) in test_indexer_merkle_tree.merkle_tree.layers[0] + .iter() + .enumerate() + { + println!("test_indexer_merkle_tree index {} leaf: {:?}", i, leaf); + } + for i in 0..16 { + println!("root {} {:?}", i, merkle_tree.roots.get(i)); + } - if merkle_tree.root() != test_indexer_merkle_tree.merkle_tree.root() { - // The following lines are just debug prints - debug!("Merkle tree pubkey {:?}", snapshot.accounts.merkle_tree); - for (i, leaf) in test_indexer_merkle_tree.merkle_tree.layers[0] - .iter() - .enumerate() - { - debug!("test_indexer_merkle_tree index {} leaf: {:?}", i, leaf); + panic!("merkle tree root update failed"); + } } - for i in 0..16 { - debug!("root {} {:?}", i, merkle_tree.roots.get(i)); + 2 => { + // TODO: assert batched merkle tree + } + _ => { + panic!( + "assert_merkle_tree_after_tx: get_merkle_tree_snapshots: invalid discriminator" + ); } - - panic!("merkle tree root update failed"); } } sequence_numbers @@ -312,39 +361,94 @@ pub async fn get_merkle_tree_snapshots( ) -> Vec { let mut snapshots = Vec::new(); for account_bundle in accounts.iter() { - let merkle_tree = get_concurrent_merkle_tree::( - rpc, - account_bundle.merkle_tree, - ) - .await; - let merkle_tree_account = - AccountZeroCopy::::new(rpc, account_bundle.merkle_tree).await; - - let queue_account_lamports = match rpc - .get_account(account_bundle.nullifier_queue) + let mut account_data = rpc + .get_account(account_bundle.merkle_tree) .await .unwrap() - { - Some(x) => x.lamports, - None => 0, - }; - let cpi_context_account_lamports = - match rpc.get_account(account_bundle.cpi_context).await.unwrap() { - Some(x) => x.lamports, - None => 0, - }; - snapshots.push(MerkleTreeTestSnapShot { - accounts: *account_bundle, - root: merkle_tree.root(), - next_index: merkle_tree.next_index(), - num_added_accounts: accounts - .iter() - .filter(|x| x.merkle_tree == account_bundle.merkle_tree) - .count(), - merkle_tree_account_lamports: merkle_tree_account.account.lamports(), - queue_account_lamports, - cpi_context_account_lamports, - }); + .unwrap(); + match account_data.data[0..8].try_into().unwrap() { + StateMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree = + get_concurrent_merkle_tree::( + rpc, + account_bundle.merkle_tree, + ) + .await; + let merkle_tree_account = + AccountZeroCopy::::new(rpc, account_bundle.merkle_tree) + .await; + + let queue_account_lamports = match rpc + .get_account(account_bundle.nullifier_queue) + .await + .unwrap() + { + Some(x) => x.lamports, + None => 0, + }; + let cpi_context_account_lamports = + match rpc.get_account(account_bundle.cpi_context).await.unwrap() { + Some(x) => x.lamports, + None => 0, + }; + snapshots.push(MerkleTreeTestSnapShot { + accounts: *account_bundle, + root: merkle_tree.root(), + next_index: merkle_tree.next_index(), + num_added_accounts: accounts + .iter() + .filter(|x| x.merkle_tree == account_bundle.merkle_tree) + .count(), + merkle_tree_account_lamports: merkle_tree_account.account.lamports(), + queue_account_lamports, + cpi_context_account_lamports, + version: 1, + }); + } + BatchedMerkleTreeAccount::DISCRIMINATOR => { + let merkle_tree_account_lamports = account_data.lamports; + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(&mut account_data.data) + .unwrap(); + let queue_account_lamports = match rpc + .get_account(account_bundle.nullifier_queue) + .await + .unwrap() + { + Some(x) => x.lamports, + None => 0, + }; + let cpi_context_account_lamports = + match rpc.get_account(account_bundle.cpi_context).await.unwrap() { + Some(x) => x.lamports, + None => 0, + }; + let root = *merkle_tree.root_history.last().unwrap(); + + let output_queue = AccountZeroCopy::::new( + rpc, + account_bundle.nullifier_queue, + ) + .await; + + snapshots.push(MerkleTreeTestSnapShot { + accounts: *account_bundle, + root, + next_index: output_queue.deserialized().next_index as usize, + num_added_accounts: accounts + .iter() + .filter(|x| x.merkle_tree == account_bundle.merkle_tree) + .count(), + merkle_tree_account_lamports, + queue_account_lamports, + cpi_context_account_lamports, + version: 2, + }); + } + _ => { + panic!("get_merkle_tree_snapshots: invalid discriminator"); + } + } } snapshots } diff --git a/test-utils/src/assert_queue.rs b/test-utils/src/assert_queue.rs index fa2b6ff4f2..623b1c6a4b 100644 --- a/test-utils/src/assert_queue.rs +++ b/test-utils/src/assert_queue.rs @@ -159,7 +159,7 @@ pub async fn assert_queue( rolledover_slot: expected_rolledover_slot.unwrap_or(u64::MAX), rollover_threshold: associated_tree_config .rollover_threshold - .unwrap_or_default(), + .unwrap_or(u64::MAX), network_fee: queue_config.network_fee.unwrap_or_default(), rollover_fee: expected_rollover_fee, close_threshold: associated_tree_config.close_threshold.unwrap_or(u64::MAX), diff --git a/test-utils/src/e2e_test_env.rs b/test-utils/src/e2e_test_env.rs index 83c7cd9a46..97ca757655 100644 --- a/test-utils/src/e2e_test_env.rs +++ b/test-utils/src/e2e_test_env.rs @@ -68,6 +68,9 @@ // indexer trait: get_compressed_accounts_by_owner -> return compressed accounts, // refactor all tests to work with that so that we can run all tests with a test validator and concurrency +use account_compression::batch::BatchState; +use account_compression::batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount; +use account_compression::batched_queue::ZeroCopyBatchedQueueAccount; use light_compressed_token::token_data::AccountState; use light_prover_client::gnark::helpers::{ProofType, ProverConfig}; use light_registry::protocol_config::state::{ProtocolConfig, ProtocolConfigPda}; @@ -103,13 +106,14 @@ use crate::state_tree_rollover::assert_rolled_over_pair; use crate::system_program::{ compress_sol_test, create_addresses_test, decompress_sol_test, transfer_compressed_sol_test, }; +use crate::test_batch_forester::{perform_batch_append, perform_batch_nullify}; use crate::test_env::{ create_address_merkle_tree_and_queue_account, create_state_merkle_tree_and_queue_account, EnvAccounts, }; use crate::test_forester::{empty_address_queue_test, nullify_compressed_accounts}; use account_compression::utils::constants::{ - STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT, + STATE_MERKLE_TREE_CANOPY_DEPTH, STATE_MERKLE_TREE_HEIGHT, TEST_DEFAULT_BATCH_SIZE, }; use account_compression::{ AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig, @@ -136,7 +140,6 @@ use crate::indexer::TestIndexer; use light_client::rpc::errors::RpcError; use light_client::rpc::RpcConnection; use light_client::transaction_params::{FeeConfig, TransactionParams}; -use light_prover_client::gnark::helpers::ProverMode; pub struct User { pub keypair: Keypair, @@ -209,8 +212,44 @@ pub async fn init_program_test_env( &env_accounts.forester.insecure_clone(), env_accounts, Some(ProverConfig { - run_mode: Some(ProverMode::Rpc), - circuits: vec![], + run_mode: None, + circuits: vec![ + ProofType::BatchAppendWithProofsTest, + ProofType::BatchUpdateTest, + ProofType::Inclusion, + ProofType::NonInclusion, + ], + }), + ) + .await; + + E2ETestEnv::>::new( + rpc, + indexer, + env_accounts, + KeypairActionConfig::all_default(), + GeneralActionConfig::default(), + 10, + None, + ) + .await +} + +pub async fn init_program_test_env_forester( + rpc: ProgramTestRpcConnection, + env_accounts: &EnvAccounts, +) -> E2ETestEnv> { + let indexer: TestIndexer = TestIndexer::init_from_env( + &env_accounts.forester.insecure_clone(), + env_accounts, + Some(ProverConfig { + run_mode: None, + circuits: vec![ + ProofType::BatchAppendWithProofs, + ProofType::BatchUpdate, + ProofType::Inclusion, + ProofType::NonInclusion, + ], }), ) .await; @@ -408,6 +447,8 @@ where .create_state_mt .unwrap_or_default(), ) { + println!("\n------------------------------------------------------\n"); + println!("Creating new state Merkle tree"); self.create_state_tree(rollover_threshold).await; self.stats.create_state_mt += 1; } @@ -417,6 +458,9 @@ where .create_address_mt .unwrap_or_default(), ) { + println!("\n------------------------------------------------------\n"); + println!("Creating new address Merkle tree"); + self.create_address_tree(rollover_threshold).await; self.stats.create_address_mt += 1; } @@ -427,26 +471,127 @@ where .unwrap_or_default(), ) { for state_tree_bundle in self.indexer.get_state_merkle_trees_mut().iter_mut() { - println!("\n --------------------------------------------------\n\t\t NULLIFYING LEAVES\n --------------------------------------------------"); - // find forester which is eligible this slot for this tree - if let Some(payer) = Self::get_eligible_forester_for_queue( - &state_tree_bundle.accounts.nullifier_queue, - &self.foresters, - self.slot, - ) { - // TODO: add newly addeded trees to foresters - nullify_compressed_accounts( - &mut self.rpc, - &payer, - state_tree_bundle, - self.epoch, - false, - ) - .await - .unwrap(); - } else { - println!("No forester found for nullifier queue"); - }; + println!("state tree bundle version {}", state_tree_bundle.version); + match state_tree_bundle.version { + 1 => { + println!("\n --------------------------------------------------\n\t\t NULLIFYING LEAVES v1\n --------------------------------------------------"); + // find forester which is eligible this slot for this tree + if let Some(payer) = Self::get_eligible_forester_for_queue( + &state_tree_bundle.accounts.nullifier_queue, + &self.foresters, + self.slot, + ) { + // TODO: add newly addeded trees to foresters + nullify_compressed_accounts( + &mut self.rpc, + &payer, + state_tree_bundle, + self.epoch, + false, + ) + .await + .unwrap(); + } else { + println!("No forester found for nullifier queue"); + }; + } + 2 => { + let merkle_tree_pubkey = state_tree_bundle.accounts.merkle_tree; + let queue_pubkey = state_tree_bundle.accounts.nullifier_queue; + // Check input queue + if let Some(payer) = Self::get_eligible_forester_for_queue( + &state_tree_bundle.accounts.merkle_tree, + &self.foresters, + self.slot, + ) { + let mut merkle_tree_account = self + .rpc + .get_account(merkle_tree_pubkey) + .await + .unwrap() + .unwrap(); + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); + let next_full_batch_index = + merkle_tree.get_account().queue.next_full_batch_index; + let batch = merkle_tree + .batches + .get(next_full_batch_index as usize) + .unwrap(); + let batch_state = batch.get_state(); + println!( + "output batch_state {:?}, {}, batch index {}", + batch_state, + batch.get_num_inserted() + + batch.get_current_zkp_batch_index() * batch.zkp_batch_size, + next_full_batch_index + ); + println!("input batch_state {:?}", batch_state); + if batch_state == BatchState::ReadyToUpdateTree { + println!("\n --------------------------------------------------\n\t\t NULLIFYING LEAVES batched (v2)\n --------------------------------------------------"); + for _ in 0..TEST_DEFAULT_BATCH_SIZE { + perform_batch_nullify( + &mut self.rpc, + state_tree_bundle, + &payer, + self.epoch, + false, + None, + ) + .await + .unwrap(); + } + } + } + // Check output queue + if let Some(payer) = Self::get_eligible_forester_for_queue( + &state_tree_bundle.accounts.nullifier_queue, + &self.foresters, + self.slot, + ) { + println!("\n --------------------------------------------------\n\t\t Appending LEAVES batched (v2)\n --------------------------------------------------"); + let mut queue_account = + self.rpc.get_account(queue_pubkey).await.unwrap().unwrap(); + let output_queue = ZeroCopyBatchedQueueAccount::from_bytes_mut( + queue_account.data.as_mut_slice(), + ) + .unwrap(); + let next_full_batch_index = + output_queue.get_account().queue.next_full_batch_index; + let batch = output_queue + .batches + .get(next_full_batch_index as usize) + .unwrap(); + let batch_state = batch.get_state(); + println!( + "output batch_state {:?}, {}, batch index {}", + batch_state, + batch.get_num_inserted() + + batch.get_current_zkp_batch_index() * batch.zkp_batch_size, + next_full_batch_index + ); + if batch_state == BatchState::ReadyToUpdateTree { + for _ in 0..TEST_DEFAULT_BATCH_SIZE { + perform_batch_append( + &mut self.rpc, + state_tree_bundle, + &payer, + self.epoch, + false, + None, + ) + .await + .unwrap(); + } + } + } + } + _ => { + println!("Version skipped {}", state_tree_bundle.version); + } + } } } @@ -692,11 +837,19 @@ where .indexer .get_state_merkle_trees() .iter() - .map(|state_merkle_tree_bundle| TreeAccounts { - tree_type: TreeType::State, - merkle_tree: state_merkle_tree_bundle.accounts.merkle_tree, - queue: state_merkle_tree_bundle.accounts.nullifier_queue, - is_rolledover: false, + .map(|state_merkle_tree_bundle| { + let tree_type = match state_merkle_tree_bundle.version { + 1 => TreeType::State, + 2 => TreeType::BatchedState, + _ => panic!("unsupported version {}", state_merkle_tree_bundle.version), + }; + + TreeAccounts { + tree_type, + merkle_tree: state_merkle_tree_bundle.accounts.merkle_tree, + queue: state_merkle_tree_bundle.accounts.nullifier_queue, + is_rolledover: false, + } }) .collect::>(); self.indexer.get_address_merkle_trees().iter().for_each( @@ -826,12 +979,11 @@ where STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )); - let state_tree_account = - AccountZeroCopy::::new( - &mut self.rpc, - nullifier_queue_keypair.pubkey(), - ) - .await; + let state_tree_account = AccountZeroCopy::::new( + &mut self.rpc, + nullifier_queue_keypair.pubkey(), + ) + .await; self.indexer .get_state_merkle_trees_mut() .push(StateMerkleTreeBundle { @@ -845,7 +997,10 @@ where nullifier_queue: nullifier_queue_keypair.pubkey(), cpi_context: cpi_context_keypair.pubkey(), }, + version: 1, merkle_tree, + output_queue_elements: vec![], + input_leaf_indices: vec![], }); // TODO: Add assert } @@ -1092,9 +1247,14 @@ where tree_index: Option, ) -> Result { let input_compressed_accounts = self.get_compressed_sol_accounts(&from.pubkey()); - let output_merkle_tree = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)] - .accounts - .merkle_tree; + let bundle = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)].clone(); + let rollover_fee = bundle.rollover_fee; + let output_merkle_tree = match bundle.version { + 1 => bundle.accounts.merkle_tree, + // Output queue for batched trees + 2 => bundle.accounts.nullifier_queue, + _ => panic!("Unsupported version"), + }; let recipients = vec![*to]; let transaction_params = if self.keypair_action_config.fee_assert { Some(TransactionParams { @@ -1102,7 +1262,10 @@ where num_input_compressed_accounts: input_compressed_accounts.len() as u8, num_output_compressed_accounts: 1u8, compress: 0, - fee_config: FeeConfig::default(), + fee_config: FeeConfig { + state_merkle_tree_rollover: rollover_fee as u64, + ..Default::default() + }, }) } else { None @@ -1219,16 +1382,25 @@ where tree_index: Option, ) { let input_compressed_accounts = self.get_compressed_sol_accounts(&from.pubkey()); - let output_merkle_tree = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)] - .accounts - .merkle_tree; + let bundle = self.indexer.get_state_merkle_trees()[tree_index.unwrap_or(0)].clone(); + let rollover_fee = bundle.rollover_fee; + let output_merkle_tree = match bundle.version { + 1 => bundle.accounts.merkle_tree, + // Output queue for batched trees + 2 => bundle.accounts.nullifier_queue, + _ => panic!("Unsupported version"), + }; + let transaction_parameters = if self.keypair_action_config.fee_assert { Some(TransactionParams { num_new_addresses: 0, num_input_compressed_accounts: input_compressed_accounts.len() as u8, num_output_compressed_accounts: 1u8, compress: amount as i64, - fee_config: FeeConfig::default(), + fee_config: FeeConfig { + state_merkle_tree_rollover: rollover_fee as u64, + ..Default::default() + }, }) } else { None @@ -1888,10 +2060,13 @@ where nullifier_queue: new_nullifier_queue_keypair.pubkey(), cpi_context: new_cpi_signature_keypair.pubkey(), }, + version: 1, merkle_tree: Box::new(light_merkle_tree_reference::MerkleTree::::new( STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )), + output_queue_elements: vec![], + input_leaf_indices: vec![], }); Ok(()) } @@ -1970,11 +2145,17 @@ where ) as usize; let index = Self::safe_gen_range(&mut self.rng, 0..range_max, 0); - pubkeys.push( - self.indexer.get_state_merkle_trees()[index] - .accounts - .merkle_tree, - ); + let bundle = &self.indexer.get_state_merkle_trees()[index]; + // For batched trees we need to use the output queue + if bundle.version == 2 { + pubkeys.push(bundle.accounts.nullifier_queue); + } else { + pubkeys.push( + self.indexer.get_state_merkle_trees()[index] + .accounts + .merkle_tree, + ); + } } pubkeys.sort(); pubkeys @@ -2260,8 +2441,8 @@ impl KeypairActionConfig { compress_spl: Some(0.0), decompress_spl: Some(0.0), mint_spl: None, - transfer_spl: Some(0.0), - max_output_accounts: Some(10), + transfer_spl: Some(1.0), + max_output_accounts: Some(3), fee_assert: true, approve_spl: None, revoke_spl: None, diff --git a/test-utils/src/indexer/test_indexer.rs b/test-utils/src/indexer/test_indexer.rs index f6d304fc02..06e87f0c43 100644 --- a/test-utils/src/indexer/test_indexer.rs +++ b/test-utils/src/indexer/test_indexer.rs @@ -1,24 +1,36 @@ +use account_compression::batched_merkle_tree::ZeroCopyBatchedMerkleTreeAccount; +use account_compression::batched_queue::{BatchedQueueAccount, ZeroCopyBatchedQueueAccount}; +use light_macros::pubkey; +use light_prover_client::batch_append_with_proofs::get_batch_append_with_proofs_inputs; +use light_prover_client::batch_append_with_subtrees::calculate_hash_chain; +use light_prover_client::gnark::batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson; +use light_system_program::invoke::verify_state_proof::{create_tx_hash, create_tx_hash_offchain}; +use light_system_program::sdk::compressed_account::QueueIndex; use log::{debug, info, warn}; use num_bigint::BigUint; use solana_sdk::bs58; +use std::future::Future; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use crate::e2e_test_env::KeypairActionConfig; +use crate::test_batch_forester::create_batched_state_merkle_tree; +use crate::test_env::BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR; use crate::{ spl::create_initialize_mint_instructions, test_env::create_address_merkle_tree_and_queue_account, }; use account_compression::{ - AddressMerkleTreeConfig, AddressQueueConfig, NullifierQueueConfig, StateMerkleTreeConfig, + rollover, AddressMerkleTreeConfig, AddressQueueConfig, InitStateTreeAccountsInstructionData, + NullifierQueueConfig, StateMerkleTreeConfig, }; use forester_utils::indexer::{ - AddressMerkleTreeAccounts, AddressMerkleTreeBundle, Indexer, IndexerError, MerkleProof, - NewAddressProofWithContext, ProofRpcResult, StateMerkleTreeAccounts, StateMerkleTreeBundle, - TokenDataWithContext, + AddressMerkleTreeAccounts, AddressMerkleTreeBundle, BatchedTreeProofRpcResult, Indexer, + IndexerError, MerkleProof, NewAddressProofWithContext, ProofRpcResult, StateMerkleTreeAccounts, + StateMerkleTreeBundle, TokenDataWithContext, }; -use forester_utils::{get_concurrent_merkle_tree, get_indexed_merkle_tree}; -use light_client::rpc::RpcConnection; +use forester_utils::{get_concurrent_merkle_tree, get_indexed_merkle_tree, AccountZeroCopy}; +use light_client::rpc::{RpcConnection, RpcError}; use light_client::transaction_params::FeeConfig; use light_compressed_token::constants::TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR; use light_compressed_token::mint_sdk::create_create_token_pool_instruction; @@ -69,7 +81,6 @@ use { std::time::Duration, }; -// TODO: find a different way to init Indexed array on the heap so that it doesn't break the stack #[derive(Debug)] pub struct TestIndexer { pub state_merkle_trees: Vec, @@ -249,7 +260,7 @@ impl Indexer for TestIndexer { .map(|x| { self.state_merkle_trees .iter() - .find(|y| y.accounts.merkle_tree == *x) + .find(|y| y.accounts.merkle_tree == *x || y.accounts.nullifier_queue == *x) .unwrap() .accounts }) @@ -258,145 +269,24 @@ impl Indexer for TestIndexer { fn add_event_and_compressed_accounts( &mut self, + slot: u64, event: &PublicTransactionEvent, ) -> ( Vec, Vec, ) { - for hash in event.input_compressed_account_hashes.iter() { - let index = self.compressed_accounts.iter().position(|x| { - x.compressed_account - .hash::( - &x.merkle_context.merkle_tree_pubkey, - &x.merkle_context.leaf_index, - ) - .unwrap() - == *hash - }); - if let Some(index) = index { - self.nullified_compressed_accounts - .push(self.compressed_accounts[index].clone()); - self.compressed_accounts.remove(index); - continue; - }; - if index.is_none() { - let index = self - .token_compressed_accounts - .iter() - .position(|x| { - x.compressed_account - .compressed_account - .hash::( - &x.compressed_account.merkle_context.merkle_tree_pubkey, - &x.compressed_account.merkle_context.leaf_index, - ) - .unwrap() - == *hash - }) - .expect("input compressed account not found"); - self.token_nullified_compressed_accounts - .push(self.token_compressed_accounts[index].clone()); - self.token_compressed_accounts.remove(index); - } - } - let mut compressed_accounts = Vec::new(); let mut token_compressed_accounts = Vec::new(); - for (i, compressed_account) in event.output_compressed_accounts.iter().enumerate() { - let nullifier_queue_pubkey = self - .state_merkle_trees - .iter() - .find(|x| { - x.accounts.merkle_tree - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }) - .unwrap() - .accounts - .nullifier_queue; - // if data is some, try to deserialize token data, if it fails, add to compressed_accounts - // if data is none add to compressed_accounts - // new accounts are inserted in front so that the newest accounts are found first - match compressed_account.compressed_account.data.as_ref() { - Some(data) => { - if compressed_account.compressed_account.owner == light_compressed_token::ID - && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR - { - if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { - let token_account = TokenDataWithContext { - token_data, - compressed_account: CompressedAccountWithMerkleContext { - compressed_account: compressed_account - .compressed_account - .clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey: event.pubkey_array[event - .output_compressed_accounts[i] - .merkle_tree_index - as usize], - nullifier_queue_pubkey, - queue_index: None, - }, - }, - }; - token_compressed_accounts.push(token_account.clone()); - self.token_compressed_accounts.insert(0, token_account); - } - } else { - let compressed_account = CompressedAccountWithMerkleContext { - compressed_account: compressed_account.compressed_account.clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey: event.pubkey_array[event - .output_compressed_accounts[i] - .merkle_tree_index - as usize], - nullifier_queue_pubkey, - queue_index: None, - }, - }; - compressed_accounts.push(compressed_account.clone()); - self.compressed_accounts.insert(0, compressed_account); - } - } - None => { - let compressed_account = CompressedAccountWithMerkleContext { - compressed_account: compressed_account.compressed_account.clone(), - merkle_context: MerkleContext { - leaf_index: event.output_leaf_indices[i], - merkle_tree_pubkey: event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize], - nullifier_queue_pubkey, - queue_index: None, - }, - }; - compressed_accounts.push(compressed_account.clone()); - self.compressed_accounts.insert(0, compressed_account); - } - }; - let merkle_tree = &mut self - .state_merkle_trees - .iter_mut() - .find(|x| { - x.accounts.merkle_tree - == event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize] - }) - .unwrap() - .merkle_tree; - merkle_tree - .append( - &compressed_account - .compressed_account - .hash::( - &event.pubkey_array - [event.output_compressed_accounts[i].merkle_tree_index as usize], - &event.output_leaf_indices[i], - ) - .unwrap(), - ) - .expect("insert failed"); + let event_inputs_len = event.input_compressed_account_hashes.len(); + let event_outputs_len = event.output_compressed_account_hashes.len(); + for i in 0..std::cmp::max(event_inputs_len, event_outputs_len) { + self.process_v1_compressed_account( + slot, + event, + i, + &mut token_compressed_accounts, + &mut compressed_accounts, + ); } self.events.push(event.clone()); @@ -431,17 +321,117 @@ impl Indexer for TestIndexer { &self.group_pda } + async fn create_proof_for_compressed_accounts2( + &mut self, + compressed_accounts: Option>, + state_merkle_tree_pubkeys: Option>, + new_addresses: Option<&[[u8; 32]]>, + address_merkle_tree_pubkeys: Option>, + rpc: &mut R, + ) -> BatchedTreeProofRpcResult { + let mut indices_to_remove = Vec::new(); + + // for all accounts in batched trees, check whether values are in tree or queue + let (compressed_accounts, state_merkle_tree_pubkeys) = + if let Some((compressed_accounts, state_merkle_tree_pubkeys)) = + compressed_accounts.zip(state_merkle_tree_pubkeys) + { + for (i, (compressed_account, state_merkle_tree_pubkey)) in compressed_accounts + .iter() + .zip(state_merkle_tree_pubkeys.iter()) + .enumerate() + { + let accounts = self.state_merkle_trees.iter().find(|x| { + x.accounts.merkle_tree == *state_merkle_tree_pubkey && x.version == 2 + }); + if let Some(accounts) = accounts { + let output_queue_pubkey = accounts.accounts.nullifier_queue; + let mut queue = + AccountZeroCopy::::new(rpc, output_queue_pubkey) + .await; + let queue_zero_copy = ZeroCopyBatchedQueueAccount::from_bytes_mut( + queue.account.data.as_mut_slice(), + ) + .unwrap(); + for (j, value_array) in queue_zero_copy.value_vecs.iter().enumerate() { + let index = value_array.iter().position(|x| *x == *compressed_account); + if let Some(index) = index { + indices_to_remove.push(i); + } + } + } + } + let compress_accounts = compressed_accounts + .iter() + .enumerate() + .filter(|(i, _)| !indices_to_remove.contains(i)) + .map(|(_, x)| *x) + .collect::>(); + let state_merkle_tree_pubkeys = state_merkle_tree_pubkeys + .iter() + .enumerate() + .filter(|(i, _)| !indices_to_remove.contains(i)) + .map(|(_, x)| *x) + .collect::>(); + if compress_accounts.is_empty() { + (None, None) + } else { + (Some(compress_accounts), Some(state_merkle_tree_pubkeys)) + } + } else { + (None, None) + }; + let rpc_result = if (compressed_accounts.is_some() + && !compressed_accounts.as_ref().unwrap().is_empty()) + || address_merkle_tree_pubkeys.is_some() + { + Some( + self.create_proof_for_compressed_accounts( + compressed_accounts, + state_merkle_tree_pubkeys, + new_addresses, + address_merkle_tree_pubkeys, + rpc, + ) + .await, + ) + } else { + None + }; + let address_root_indices = if let Some(rpc_result) = rpc_result.as_ref() { + rpc_result.address_root_indices.clone() + } else { + Vec::new() + }; + let root_indices = { + let mut root_indices = if let Some(rpc_result) = rpc_result.as_ref() { + rpc_result.root_indices.clone() + } else { + Vec::new() + }; + for index in indices_to_remove { + root_indices.insert(index, None); + } + root_indices + }; + BatchedTreeProofRpcResult { + proof: rpc_result.map(|x| x.proof), + root_indices, + address_root_indices, + } + } + async fn create_proof_for_compressed_accounts( &mut self, - compressed_accounts: Option<&[[u8; 32]]>, - state_merkle_tree_pubkeys: Option<&[Pubkey]>, + mut compressed_accounts: Option>, + mut state_merkle_tree_pubkeys: Option>, new_addresses: Option<&[[u8; 32]]>, address_merkle_tree_pubkeys: Option>, rpc: &mut R, ) -> ProofRpcResult { if compressed_accounts.is_some() && ![1usize, 2usize, 3usize, 4usize, 8usize] - .contains(&compressed_accounts.unwrap().len()) + .contains(&compressed_accounts.as_ref().unwrap().len()) { panic!( "compressed_accounts must be of length 1, 2, 3, 4 or 8 != {}", @@ -456,7 +446,11 @@ impl Indexer for TestIndexer { match (compressed_accounts, new_addresses) { (Some(accounts), None) => { let (payload, indices) = self - .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) + .process_inclusion_proofs( + &state_merkle_tree_pubkeys.unwrap(), + &accounts, + rpc, + ) .await; (indices, Vec::new(), payload.to_string()) } @@ -472,7 +466,11 @@ impl Indexer for TestIndexer { } (Some(accounts), Some(addresses)) => { let (inclusion_payload, inclusion_indices) = self - .process_inclusion_proofs(state_merkle_tree_pubkeys.unwrap(), accounts, rpc) + .process_inclusion_proofs( + &state_merkle_tree_pubkeys.unwrap(), + &accounts, + rpc, + ) .await; let (non_inclusion_payload, non_inclusion_indices) = self .process_non_inclusion_proofs( @@ -503,14 +501,16 @@ impl Indexer for TestIndexer { .send() .await .expect("Failed to execute request."); + println!("response_result {:?}", response_result); if response_result.status().is_success() { let body = response_result.text().await.unwrap(); let proof_json = deserialize_gnark_proof_json(&body).unwrap(); let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); + let root_indices = root_indices.iter().map(|x| Some(*x)).collect::>(); return ProofRpcResult { root_indices, - address_root_indices, + address_root_indices: address_root_indices.clone(), proof: CompressedProof { a: proof_a, b: proof_b, @@ -521,7 +521,7 @@ impl Indexer for TestIndexer { warn!("Error: {}", response_result.text().await.unwrap()); tokio::time::sleep(Duration::from_secs(1)).await; if let Some(ref prover_config) = self.prover_config { - spawn_prover(true, prover_config.clone()).await; + spawn_prover(false, prover_config.clone()).await; } retries -= 1; } @@ -604,11 +604,18 @@ impl TestIndexer { prover_config: Option, ) -> Self { Self::new( - vec![StateMerkleTreeAccounts { - merkle_tree: env.merkle_tree_pubkey, - nullifier_queue: env.nullifier_queue_pubkey, - cpi_context: env.cpi_context_account_pubkey, - }], + vec![ + StateMerkleTreeAccounts { + merkle_tree: env.merkle_tree_pubkey, + nullifier_queue: env.nullifier_queue_pubkey, + cpi_context: env.cpi_context_account_pubkey, + }, + StateMerkleTreeAccounts { + merkle_tree: env.batched_state_merkle_tree, + nullifier_queue: env.batched_output_queue, + cpi_context: env.batched_cpi_context, + }, + ], vec![AddressMerkleTreeAccounts { merkle_tree: env.address_merkle_tree_pubkey, queue: env.address_merkle_tree_queue_pubkey, @@ -628,6 +635,8 @@ impl TestIndexer { prover_config: Option, ) -> Self { if let Some(ref prover_config) = prover_config { + // TODO: remove restart input and check whether prover is already + // running with correct config spawn_prover(true, prover_config.clone()).await; } let mut state_merkle_trees = Vec::new(); @@ -636,10 +645,22 @@ impl TestIndexer { STATE_MERKLE_TREE_HEIGHT as usize, STATE_MERKLE_TREE_CANOPY_DEPTH as usize, )); + let test_batched_output_queue = + Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(); + let version = if state_merkle_tree_account.nullifier_queue + == test_batched_output_queue.pubkey() + { + 2 + } else { + 1 + }; state_merkle_trees.push(StateMerkleTreeBundle { accounts: *state_merkle_tree_account, merkle_tree, rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, + version, + output_queue_elements: vec![], + input_leaf_indices: vec![], }); } @@ -711,34 +732,58 @@ impl TestIndexer { self.add_address_merkle_tree_accounts(merkle_tree_keypair, queue_keypair, owning_program_id) } + #[allow(clippy::too_many_arguments)] pub async fn add_state_merkle_tree( &mut self, rpc: &mut R, merkle_tree_keypair: &Keypair, - nullifier_queue_keypair: &Keypair, + queue_keypair: &Keypair, cpi_context_keypair: &Keypair, owning_program_id: Option, forester: Option, + version: u64, ) { - create_state_merkle_tree_and_queue_account( - &self.payer, - true, - rpc, - merkle_tree_keypair, - nullifier_queue_keypair, - Some(cpi_context_keypair), - owning_program_id, - forester, - self.state_merkle_trees.len() as u64, - &StateMerkleTreeConfig::default(), - &NullifierQueueConfig::default(), - ) - .await - .unwrap(); + let rollover_fee = match version { + 1 => { + create_state_merkle_tree_and_queue_account( + &self.payer, + true, + rpc, + merkle_tree_keypair, + queue_keypair, + Some(cpi_context_keypair), + owning_program_id, + forester, + self.state_merkle_trees.len() as u64, + &StateMerkleTreeConfig::default(), + &NullifierQueueConfig::default(), + ) + .await + .unwrap(); + FeeConfig::default().state_merkle_tree_rollover as i64 + } + 2 => { + let params = InitStateTreeAccountsInstructionData::test_default(); + create_batched_state_merkle_tree( + &self.payer, + true, + rpc, + merkle_tree_keypair, + queue_keypair, + cpi_context_keypair, + params, + ).await; + FeeConfig::test_batched().state_merkle_tree_rollover as i64 + } + _ => panic!( + "add_state_merkle_tree: Version not supported, {}. Versions: 1 concurrent, 2 batched", + version + ), + }; let state_merkle_tree_account = StateMerkleTreeAccounts { merkle_tree: merkle_tree_keypair.pubkey(), - nullifier_queue: nullifier_queue_keypair.pubkey(), + nullifier_queue: queue_keypair.pubkey(), cpi_context: cpi_context_keypair.pubkey(), }; let merkle_tree = Box::new(MerkleTree::::new( @@ -749,7 +794,10 @@ impl TestIndexer { self.state_merkle_trees.push(StateMerkleTreeBundle { merkle_tree, accounts: state_merkle_tree_account, - rollover_fee: FeeConfig::default().state_merkle_tree_rollover as i64, + rollover_fee, + version, + output_queue_elements: vec![], + input_leaf_indices: vec![], }); } @@ -763,12 +811,12 @@ impl TestIndexer { let mut root_indices = Vec::new(); for (i, account) in accounts.iter().enumerate() { - let merkle_tree = &self + let bundle = &self .state_merkle_trees .iter() .find(|x| x.accounts.merkle_tree == merkle_tree_pubkeys[i]) - .unwrap() - .merkle_tree; + .unwrap(); + let merkle_tree = &bundle.merkle_tree; let leaf_index = merkle_tree.get_leaf_index(account).unwrap(); let proof = merkle_tree.get_proof_of_leaf(leaf_index, true).unwrap(); inclusion_proofs.push(InclusionMerkleProofInputs { @@ -777,30 +825,45 @@ impl TestIndexer { path_index: BigInt::from_be_bytes(leaf_index.to_be_bytes().as_slice()), path_elements: proof.iter().map(|x| BigInt::from_be_bytes(x)).collect(), }); - let fetched_merkle_tree = unsafe { - get_concurrent_merkle_tree::( - rpc, - merkle_tree_pubkeys[i], + let (root_index, root) = if bundle.version == 1 { + let fetched_merkle_tree = unsafe { + get_concurrent_merkle_tree::( + rpc, + merkle_tree_pubkeys[i], + ) + .await + }; + for i in 0..fetched_merkle_tree.roots.len() { + info!("roots {:?} {:?}", i, fetched_merkle_tree.roots[i]); + } + info!( + "sequence number {:?}", + fetched_merkle_tree.sequence_number() + ); + info!("root index {:?}", fetched_merkle_tree.root_index()); + info!("local sequence number {:?}", merkle_tree.sequence_number); + ( + fetched_merkle_tree.root_index() as u32, + fetched_merkle_tree.root(), + ) + } else { + let mut merkle_tree_account = rpc + .get_account(merkle_tree_pubkeys[i]) + .await + .unwrap() + .unwrap(); + let merkle_tree = ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut( + merkle_tree_account.data.as_mut_slice(), + ) + .unwrap(); + ( + merkle_tree.get_root_index(), + merkle_tree.get_root().unwrap(), ) - .await }; - for i in 0..fetched_merkle_tree.roots.len() { - info!("roots {:?} {:?}", i, fetched_merkle_tree.roots[i]); - } - info!( - "sequence number {:?}", - fetched_merkle_tree.sequence_number() - ); - info!("root index {:?}", fetched_merkle_tree.root_index()); - info!("local sequence number {:?}", merkle_tree.sequence_number); - - assert_eq!( - merkle_tree.root(), - fetched_merkle_tree.root(), - "Merkle tree root mismatch" - ); + assert_eq!(merkle_tree.root(), root, "Merkle tree root mismatch"); - root_indices.push(fetched_merkle_tree.root_index() as u16); + root_indices.push(root_index as u16); } let inclusion_proof_inputs = InclusionProofInputs(inclusion_proofs.as_slice()); @@ -852,10 +915,10 @@ impl TestIndexer { /// adds the output_compressed_accounts to the compressed_accounts /// removes the input_compressed_accounts from the compressed_accounts /// adds the input_compressed_accounts to the nullified_compressed_accounts - pub fn add_lamport_compressed_accounts(&mut self, event_bytes: Vec) { + pub fn add_lamport_compressed_accounts(&mut self, slot: u64, event_bytes: Vec) { let event_bytes = event_bytes.clone(); let event = PublicTransactionEvent::deserialize(&mut event_bytes.as_slice()).unwrap(); - self.add_event_and_compressed_accounts(&event); + self.add_event_and_compressed_accounts(slot, &event); } /// deserializes an event @@ -864,8 +927,12 @@ impl TestIndexer { /// adds the input_compressed_accounts to the nullified_compressed_accounts /// deserialiazes token data from the output_compressed_accounts /// adds the token_compressed_accounts to the token_compressed_accounts - pub fn add_compressed_accounts_with_token_data(&mut self, event: &PublicTransactionEvent) { - self.add_event_and_compressed_accounts(event); + pub fn add_compressed_accounts_with_token_data( + &mut self, + slot: u64, + event: &PublicTransactionEvent, + ) { + self.add_event_and_compressed_accounts(slot, event); } /// returns the compressed sol balance of the owner pubkey @@ -888,4 +955,216 @@ impl TestIndexer { .map(|x| x.token_data.amount) .sum() } + + fn process_v1_compressed_account( + &mut self, + slot: u64, + event: &PublicTransactionEvent, + i: usize, + token_compressed_accounts: &mut Vec, + compressed_accounts: &mut Vec, + ) { + if event.input_compressed_account_hashes.len() > i { + let tx_hash: [u8; 32] = create_tx_hash_offchain( + &event.input_compressed_account_hashes, + &event.output_compressed_account_hashes, + slot, + ); + println!("tx_hash {:?}", tx_hash); + println!("slot {:?}", slot); + let hash = event.input_compressed_account_hashes[i]; + let index = self.compressed_accounts.iter().position(|x| { + x.compressed_account + .hash::( + &x.merkle_context.merkle_tree_pubkey, + &x.merkle_context.leaf_index, + ) + .unwrap() + == hash + }); + let (leaf_index, merkle_tree_pubkey) = if let Some(index) = index { + self.nullified_compressed_accounts + .push(self.compressed_accounts[index].clone()); + let leaf_index = self.compressed_accounts[index].merkle_context.leaf_index; + let merkle_tree_pubkey = self.compressed_accounts[index] + .merkle_context + .merkle_tree_pubkey; + self.compressed_accounts.remove(index); + (leaf_index, merkle_tree_pubkey) + } else { + let index = self + .token_compressed_accounts + .iter() + .position(|x| { + x.compressed_account + .compressed_account + .hash::( + &x.compressed_account.merkle_context.merkle_tree_pubkey, + &x.compressed_account.merkle_context.leaf_index, + ) + .unwrap() + == hash + }) + .expect("input compressed account not found"); + self.token_nullified_compressed_accounts + .push(self.token_compressed_accounts[index].clone()); + let leaf_index = self.token_compressed_accounts[index] + .compressed_account + .merkle_context + .leaf_index; + let merkle_tree_pubkey = self.token_compressed_accounts[index] + .compressed_account + .merkle_context + .merkle_tree_pubkey; + self.token_compressed_accounts.remove(index); + (leaf_index, merkle_tree_pubkey) + }; + let bundle = &mut self + .get_state_merkle_trees_mut() + .iter_mut() + .find(|x| x.accounts.merkle_tree == merkle_tree_pubkey) + .unwrap(); + // Store leaf indices of input accounts for batched trees + if bundle.version == 2 { + let leaf_hash = event.input_compressed_account_hashes[i]; + bundle + .input_leaf_indices + .push((leaf_index, leaf_hash, tx_hash)); + } + } + if event.output_compressed_accounts.len() > i { + let compressed_account = &event.output_compressed_accounts[i]; + + let merkle_tree = self.state_merkle_trees.iter().find(|x| { + x.accounts.merkle_tree + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }); + // Check for output queue + let merkle_tree = if let Some(merkle_tree) = merkle_tree { + merkle_tree + } else { + self.state_merkle_trees + .iter() + .find(|x| { + x.accounts.nullifier_queue + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }) + .unwrap() + }; + println!("found merkle tree {:?}", merkle_tree.accounts.merkle_tree); + let nullifier_queue_pubkey = merkle_tree.accounts.nullifier_queue; + let merkle_tree_pubkey = merkle_tree.accounts.merkle_tree; + // if data is some, try to deserialize token data, if it fails, add to compressed_accounts + // if data is none add to compressed_accounts + // new accounts are inserted in front so that the newest accounts are found first + match compressed_account.compressed_account.data.as_ref() { + Some(data) => { + if compressed_account.compressed_account.owner == light_compressed_token::ID + && data.discriminator == TOKEN_COMPRESSED_ACCOUNT_DISCRIMINATOR + { + if let Ok(token_data) = TokenData::deserialize(&mut data.data.as_slice()) { + let token_account = TokenDataWithContext { + token_data, + compressed_account: CompressedAccountWithMerkleContext { + compressed_account: compressed_account + .compressed_account + .clone(), + merkle_context: MerkleContext { + leaf_index: event.output_leaf_indices[i], + merkle_tree_pubkey, + nullifier_queue_pubkey, + queue_index: None, + }, + }, + }; + token_compressed_accounts.push(token_account.clone()); + self.token_compressed_accounts.insert(0, token_account); + } + } else { + let compressed_account = CompressedAccountWithMerkleContext { + compressed_account: compressed_account.compressed_account.clone(), + merkle_context: MerkleContext { + leaf_index: event.output_leaf_indices[i], + merkle_tree_pubkey, + nullifier_queue_pubkey, + queue_index: None, + }, + }; + compressed_accounts.push(compressed_account.clone()); + self.compressed_accounts.insert(0, compressed_account); + } + } + None => { + let compressed_account = CompressedAccountWithMerkleContext { + compressed_account: compressed_account.compressed_account.clone(), + merkle_context: MerkleContext { + leaf_index: event.output_leaf_indices[i], + merkle_tree_pubkey, + nullifier_queue_pubkey, + queue_index: None, + }, + }; + compressed_accounts.push(compressed_account.clone()); + self.compressed_accounts.insert(0, compressed_account); + } + }; + let seq = event + .sequence_numbers + .iter() + .find(|x| x.pubkey == merkle_tree_pubkey); + let seq = if let Some(seq) = seq { + seq + } else { + event + .sequence_numbers + .iter() + .find(|x| x.pubkey == nullifier_queue_pubkey) + .unwrap() + }; + let is_batched = seq.seq == u64::MAX; + + println!("Output is batched {:?}", is_batched); + if !is_batched { + let merkle_tree = &mut self + .state_merkle_trees + .iter_mut() + .find(|x| { + x.accounts.merkle_tree + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }) + .unwrap(); + merkle_tree + .merkle_tree + .append( + &compressed_account + .compressed_account + .hash::( + &event.pubkey_array[event.output_compressed_accounts[i] + .merkle_tree_index + as usize], + &event.output_leaf_indices[i], + ) + .unwrap(), + ) + .expect("insert failed"); + } else { + let merkle_tree = &mut self + .state_merkle_trees + .iter_mut() + .find(|x| { + x.accounts.nullifier_queue + == event.pubkey_array + [event.output_compressed_accounts[i].merkle_tree_index as usize] + }) + .unwrap(); + + merkle_tree + .output_queue_elements + .push(event.output_compressed_account_hashes[i]); + } + } + } } diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index c4d4de26bb..5260bdf7b8 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -16,6 +16,7 @@ pub mod rpc; pub mod spl; pub mod state_tree_rollover; pub mod system_program; +pub mod test_batch_forester; pub mod test_env; #[allow(unused)] pub mod test_forester; diff --git a/test-utils/src/rpc/test_rpc.rs b/test-utils/src/rpc/test_rpc.rs index 2046807d8f..9f0573b10e 100644 --- a/test-utils/src/rpc/test_rpc.rs +++ b/test-utils/src/rpc/test_rpc.rs @@ -169,7 +169,9 @@ impl RpcConnection for ProgramTestRpcConnection { // a network_fee is charged if there are input compressed accounts or new addresses let mut network_fee: i64 = 0; - if transaction_params.num_input_compressed_accounts != 0 { + if transaction_params.num_input_compressed_accounts != 0 + || transaction_params.num_output_compressed_accounts != 0 + { network_fee += transaction_params.fee_config.network_fee as i64; } if transaction_params.num_new_addresses != 0 { diff --git a/test-utils/src/spl.rs b/test-utils/src/spl.rs index 1f2a51c240..eff02fb379 100644 --- a/test-utils/src/spl.rs +++ b/test-utils/src/spl.rs @@ -103,8 +103,9 @@ pub async fn mint_tokens_helper_with_lamports>( .await .unwrap() .unwrap(); + let slot = rpc.get_slot().await.unwrap(); + let (_, created_token_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); - let (_, created_token_accounts) = test_indexer.add_event_and_compressed_accounts(&event); assert_mint_to( rpc, test_indexer, @@ -359,10 +360,10 @@ pub async fn compressed_transfer_test>( "input_compressed_account_hashes: {:?}", input_compressed_account_hashes ); - let proof_rpc_result = test_indexer - .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + let rpc_result = test_indexer + .create_proof_for_compressed_accounts2( + Some(input_compressed_account_hashes.clone()), + Some(input_merkle_tree_pubkeys.clone()), None, None, rpc, @@ -375,14 +376,15 @@ pub async fn compressed_transfer_test>( } else { None }; + let authority_signer = if delegate_is_signer { payer } else { from }; let instruction = create_transfer_instruction( &payer.pubkey(), &authority_signer.pubkey(), // authority &input_merkle_tree_context, &output_compressed_accounts, - &proof_rpc_result.root_indices, - &Some(proof_rpc_result.proof), + &rpc_result.root_indices, + &rpc_result.proof, &input_compressed_account_token_data, // input_token_data &input_compressed_accounts .iter() @@ -448,9 +450,9 @@ pub async fn compressed_transfer_test>( .await .unwrap() .unwrap(); - + let slot = rpc.get_slot().await.unwrap(); let (created_change_output_account, created_token_output_accounts) = - test_indexer.add_event_and_compressed_accounts(&event); + test_indexer.add_event_and_compressed_accounts(slot, &event); let delegates = if let Some(index) = delegate_change_account_index { let mut delegates = vec![None; created_token_output_accounts.len()]; delegates[index as usize] = Some(payer.pubkey()); @@ -510,9 +512,9 @@ pub async fn decompress_test>( .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::>(); let proof_rpc_result = test_indexer - .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + .create_proof_for_compressed_accounts2( + Some(input_compressed_account_hashes.clone()), + Some(input_merkle_tree_pubkeys.clone()), None, None, rpc, @@ -528,7 +530,7 @@ pub async fn decompress_test>( .collect::>(), // input_compressed_account_merkle_tree_pubkeys &[change_out_compressed_account], // output_compressed_accounts &proof_rpc_result.root_indices, // root_indices - &Some(proof_rpc_result.proof), + &proof_rpc_result.proof, input_compressed_accounts .iter() .map(|x| x.token_data.clone()) @@ -578,8 +580,8 @@ pub async fn decompress_test>( .await .unwrap() .unwrap(); - - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); + let slot = rpc.get_slot().await.unwrap(); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); assert_transfer( rpc, test_indexer, @@ -675,8 +677,8 @@ pub async fn compress_test>( .await .unwrap() .unwrap(); - - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); + let slot = rpc.get_slot().await.unwrap(); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); assert_transfer( rpc, @@ -737,9 +739,9 @@ pub async fn approve_test>( ); println!("input compressed accounts: {:?}", input_compressed_accounts); let proof_rpc_result = test_indexer - .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + .create_proof_for_compressed_accounts2( + Some(input_compressed_account_hashes.clone()), + Some(input_merkle_tree_pubkeys.clone()), None, None, rpc, @@ -769,7 +771,7 @@ pub async fn approve_test>( change_compressed_account_merkle_tree: *change_compressed_account_merkle_tree, delegate: *delegate, root_indices: proof_rpc_result.root_indices, - proof: proof_rpc_result.proof, + proof: proof_rpc_result.proof.unwrap_or_default(), }; let instruction = create_approve_instruction(inputs).unwrap(); @@ -824,7 +826,8 @@ pub async fn approve_test>( .await .unwrap() .unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); + let slot = rpc.get_slot().await.unwrap(); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); let expected_delegated_token_data = TokenData { mint, @@ -898,9 +901,9 @@ pub async fn revoke_test>( .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::>(); let proof_rpc_result = test_indexer - .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + .create_proof_for_compressed_accounts2( + Some(input_compressed_account_hashes.clone()), + Some(input_merkle_tree_pubkeys.clone()), None, None, rpc, @@ -926,7 +929,7 @@ pub async fn revoke_test>( mint, output_account_merkle_tree: *output_account_merkle_tree, root_indices: proof_rpc_result.root_indices, - proof: proof_rpc_result.proof, + proof: proof_rpc_result.proof.unwrap_or_default(), }; let instruction = create_revoke_instruction(inputs).unwrap(); @@ -950,7 +953,8 @@ pub async fn revoke_test>( .await .unwrap() .unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); + let slot = rpc.get_slot().await.unwrap(); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); let input_amount = input_compressed_accounts .iter() .map(|x| x.token_data.amount) @@ -1049,9 +1053,9 @@ pub async fn freeze_or_thaw_test>(); let proof_rpc_result = test_indexer - .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + .create_proof_for_compressed_accounts2( + Some(input_compressed_account_hashes.clone()), + Some(input_merkle_tree_pubkeys.clone()), None, None, rpc, @@ -1076,7 +1080,7 @@ pub async fn freeze_or_thaw_test>(), outputs_merkle_tree: *outputs_merkle_tree, root_indices: proof_rpc_result.root_indices, - proof: proof_rpc_result.proof, + proof: proof_rpc_result.proof.unwrap_or_default(), }; let instruction = create_instruction::(inputs).unwrap(); @@ -1101,7 +1105,8 @@ pub async fn freeze_or_thaw_test>( .await .unwrap() .unwrap(); - let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(&event); + let slot = rpc.get_slot().await.unwrap(); + let (_, created_output_accounts) = test_indexer.add_event_and_compressed_accounts(slot, &event); let mut delegates = Vec::new(); let mut expected_output_accounts = Vec::new(); @@ -1321,9 +1327,9 @@ pub async fn create_burn_test_instruction>( .map(|x| x.compressed_account.merkle_context.merkle_tree_pubkey) .collect::>(); let proof_rpc_result = test_indexer - .create_proof_for_compressed_accounts( - Some(&input_compressed_account_hashes), - Some(&input_merkle_tree_pubkeys), + .create_proof_for_compressed_accounts2( + Some(input_compressed_account_hashes.clone()), + Some(input_merkle_tree_pubkeys.clone()), None, None, rpc, @@ -1336,12 +1342,12 @@ pub async fn create_burn_test_instruction>( }; let proof = if mode == BurnInstructionMode::InvalidProof { CompressedProof { - a: proof_rpc_result.proof.a, - b: proof_rpc_result.proof.b, - c: proof_rpc_result.proof.a, // flip c to make proof invalid but not run into decompress errors + a: proof_rpc_result.proof.as_ref().unwrap().a, + b: proof_rpc_result.proof.as_ref().unwrap().b, + c: proof_rpc_result.proof.as_ref().unwrap().a, // flip c to make proof invalid but not run into decompress errors } } else { - proof_rpc_result.proof + proof_rpc_result.proof.unwrap_or_default() }; let inputs = CreateBurnInstructionInputs { fee_payer: rpc.get_payer().pubkey(), diff --git a/test-utils/src/state_tree_rollover.rs b/test-utils/src/state_tree_rollover.rs index f6ed886778..d5d6edf560 100644 --- a/test-utils/src/state_tree_rollover.rs +++ b/test-utils/src/state_tree_rollover.rs @@ -4,12 +4,13 @@ use crate::assert_rollover::{ assert_rolledover_merkle_trees, assert_rolledover_merkle_trees_metadata, assert_rolledover_queues_metadata, }; +use account_compression::batched_merkle_tree::BatchedMerkleTreeAccount; use account_compression::NullifierQueueConfig; use account_compression::{ self, initialize_address_merkle_tree::AccountLoader, state::QueueAccount, StateMerkleTreeAccount, StateMerkleTreeConfig, ID, }; -use anchor_lang::{InstructionData, Lamports, ToAccountMetas}; +use anchor_lang::{Discriminator, InstructionData, Lamports, ToAccountMetas}; use forester_utils::{create_account_instruction, get_hash_set}; use light_client::rpc::errors::RpcError; use light_client::rpc::RpcConnection; @@ -121,26 +122,34 @@ pub async fn set_state_merkle_tree_next_index( lamports: u64, ) { let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); - { - let merkle_tree_deserialized = - &mut ConcurrentMerkleTreeZeroCopyMut::::from_bytes_zero_copy_mut( - &mut merkle_tree.data[8 + std::mem::size_of::()..], - ) - .unwrap(); - unsafe { - *merkle_tree_deserialized.next_index = next_index as usize; + let discriminator = merkle_tree.data[0..8].try_into().unwrap(); + match discriminator { + StateMerkleTreeAccount::DISCRIMINATOR => { + { + let merkle_tree_deserialized = + &mut ConcurrentMerkleTreeZeroCopyMut::::from_bytes_zero_copy_mut( + &mut merkle_tree.data[8 + std::mem::size_of::()..], + ) + .unwrap(); + unsafe { + *merkle_tree_deserialized.next_index = next_index as usize; + } + } + + let mut account_share_data = AccountSharedData::from(merkle_tree); + account_share_data.set_lamports(lamports); + rpc.set_account(merkle_tree_pubkey, &account_share_data); + let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); + let merkle_tree_deserialized = + ConcurrentMerkleTreeZeroCopyMut::::from_bytes_zero_copy_mut( + &mut merkle_tree.data[8 + std::mem::size_of::()..], + ) + .unwrap(); + assert_eq!(merkle_tree_deserialized.next_index() as u64, next_index); } + BatchedMerkleTreeAccount::DISCRIMINATOR => {} + _ => panic!("Invalid discriminator"), } - let mut account_share_data = AccountSharedData::from(merkle_tree); - account_share_data.set_lamports(lamports); - rpc.set_account(merkle_tree_pubkey, &account_share_data); - let mut merkle_tree = rpc.get_account(*merkle_tree_pubkey).await.unwrap().unwrap(); - let merkle_tree_deserialized = - ConcurrentMerkleTreeZeroCopyMut::::from_bytes_zero_copy_mut( - &mut merkle_tree.data[8 + std::mem::size_of::()..], - ) - .unwrap(); - assert_eq!(merkle_tree_deserialized.next_index() as u64, next_index); } #[allow(clippy::too_many_arguments)] diff --git a/test-utils/src/system_program.rs b/test-utils/src/system_program.rs index b47c06b402..6187952219 100644 --- a/test-utils/src/system_program.rs +++ b/test-utils/src/system_program.rs @@ -309,7 +309,7 @@ pub async fn compressed_transaction_test>( .unwrap(), ); } - Some(compressed_account_hashes.as_slice()) + Some(compressed_account_hashes.to_vec()) } else { None }; @@ -321,7 +321,7 @@ pub async fn compressed_transaction_test>( let state_input_merkle_trees = if state_input_merkle_trees.is_empty() { None } else { - Some(state_input_merkle_trees.as_slice()) + Some(state_input_merkle_trees) }; let mut root_indices = Vec::new(); let mut proof = None; @@ -341,19 +341,20 @@ pub async fn compressed_transaction_test>( }; let proof_rpc_res = inputs .test_indexer - .create_proof_for_compressed_accounts( + .create_proof_for_compressed_accounts2( compressed_account_input_hashes, - state_input_merkle_trees, + state_input_merkle_trees.clone(), inputs.created_addresses, address_merkle_tree_pubkeys, inputs.rpc, ) .await; + root_indices = proof_rpc_res.root_indices; - proof = Some(proof_rpc_res.proof); + proof = proof_rpc_res.proof; let input_merkle_tree_accounts = inputs .test_indexer - .get_state_merkle_tree_accounts(state_input_merkle_trees.unwrap_or(&[])); + .get_state_merkle_tree_accounts(state_input_merkle_trees.unwrap_or(vec![]).as_slice()); input_merkle_tree_snapshots = get_merkle_tree_snapshots::(inputs.rpc, input_merkle_tree_accounts.as_slice()).await; @@ -428,9 +429,10 @@ pub async fn compressed_transaction_test>( .await? .unwrap(); + let slot = inputs.rpc.get_slot().await.unwrap(); let (created_output_compressed_accounts, _) = inputs .test_indexer - .add_event_and_compressed_accounts(&event.0); + .add_event_and_compressed_accounts(slot, &event.0); let input = AssertCompressedTransactionInputs { rpc: inputs.rpc, test_indexer: inputs.test_indexer, @@ -447,7 +449,7 @@ pub async fn compressed_transaction_test>( created_addresses: inputs.created_addresses.unwrap_or(&[]), sorted_output_accounts: inputs.sorted_output_accounts, relay_fee: inputs.relay_fee, - input_compressed_account_hashes: &compressed_account_hashes, + input_compressed_account_hashes: compressed_account_hashes.as_slice(), address_queue_pubkeys: &inputs .new_address_params .iter() diff --git a/test-utils/src/test_batch_forester.rs b/test-utils/src/test_batch_forester.rs new file mode 100644 index 0000000000..71b916e59d --- /dev/null +++ b/test-utils/src/test_batch_forester.rs @@ -0,0 +1,553 @@ +use account_compression::{ + assert_mt_zero_copy_inited, + batched_merkle_tree::{ + get_merkle_tree_account_size, AppendBatchProofInputsIx, BatchAppendEvent, + BatchNullifyEvent, BatchProofInputsIx, BatchedMerkleTreeAccount, + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, + ZeroCopyBatchedMerkleTreeAccount, + }, + batched_queue::{ + assert_queue_zero_copy_inited, get_output_queue_account_size, BatchedQueueAccount, + ZeroCopyBatchedQueueAccount, + }, + get_output_queue_account_default, InitStateTreeAccountsInstructionData, +}; +use anchor_lang::AnchorSerialize; +use forester_utils::{create_account_instruction, indexer::StateMerkleTreeBundle, AccountZeroCopy}; +use light_client::rpc::{RpcConnection, RpcError}; +use light_hasher::Poseidon; +use light_prover_client::{ + batch_append_with_proofs::get_batch_append_with_proofs_inputs, + batch_append_with_subtrees::calculate_hash_chain, + batch_update::get_batch_update_inputs, + gnark::{ + batch_append_with_proofs_json_formatter::BatchAppendWithProofsInputsJson, + batch_update_json_formatter::update_inputs_string, + constants::{PROVE_PATH, SERVER_ADDRESS}, + proof_helpers::{compress_proof, deserialize_gnark_proof_json, proof_from_json_struct}, + }, +}; +use light_registry::{ + account_compression_cpi::sdk::{ + create_batch_append_instruction, create_batch_nullify_instruction, + create_initialize_batched_merkle_tree_instruction, + }, + protocol_config::state::ProtocolConfig, +}; +use light_utils::bigint::bigint_to_be_bytes_array; +use light_verifier::CompressedProof; +use reqwest::Client; +use solana_sdk::{ + instruction::Instruction, + pubkey::Pubkey, + signature::{Keypair, Signature, Signer}, + transaction::Transaction, +}; +pub async fn perform_batch_append( + rpc: &mut Rpc, + bundle: &mut StateMerkleTreeBundle, + forester: &Keypair, + epoch: u64, + _is_metadata_forester: bool, + instruction_data: Option, +) -> Result { + // let forester_epoch_pda = get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0; + // let pre_forester_counter = if is_metadata_forester { + // 0 + // } else { + // rpc.get_anchor_account::(&forester_epoch_pda) + // .await + // .unwrap() + // .unwrap() + // .work_counter + // }; + let merkle_tree_pubkey = bundle.accounts.merkle_tree; + let output_queue_pubkey = bundle.accounts.nullifier_queue; + + let data = if let Some(instruction_data) = instruction_data { + instruction_data + } else { + create_append_batch_ix_data(rpc, bundle, merkle_tree_pubkey, output_queue_pubkey).await + }; + let instruction = create_batch_append_instruction( + forester.pubkey(), + forester.pubkey(), + merkle_tree_pubkey, + output_queue_pubkey, + epoch, + data.try_to_vec().unwrap(), + ); + let res = rpc + .create_and_send_transaction_with_event::( + &[instruction], + &forester.pubkey(), + &[forester], + None, + ) + .await? + .unwrap(); + println!("event {:?}", res.0); + Ok(res.1) +} + +pub async fn create_append_batch_ix_data( + rpc: &mut Rpc, + bundle: &mut StateMerkleTreeBundle, + merkle_tree_pubkey: Pubkey, + output_queue_pubkey: Pubkey, +) -> InstructionDataBatchAppendInputs { + let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree_account.data.as_mut_slice()) + .unwrap(); + let merkle_tree_next_index = merkle_tree.get_account().next_index as usize; + + let mut output_queue_account = rpc.get_account(output_queue_pubkey).await.unwrap().unwrap(); + let output_queue = + ZeroCopyBatchedQueueAccount::from_bytes_mut(output_queue_account.data.as_mut_slice()) + .unwrap(); + let output_queue_account = output_queue.get_account(); + let full_batch_index = output_queue_account.queue.next_full_batch_index; + let zkp_batch_size = output_queue_account.queue.zkp_batch_size; + let max_num_zkp_updates = output_queue_account.queue.get_num_zkp_batches(); + + let leaves = bundle.output_queue_elements.to_vec(); + + let num_inserted_zkps = output_queue.batches[full_batch_index as usize].get_num_inserted_zkps(); + let leaves_hashchain = + output_queue.hashchain_store[full_batch_index as usize][num_inserted_zkps as usize]; + let (proof, new_root) = { + let start = num_inserted_zkps as usize * zkp_batch_size as usize; + let end = start + zkp_batch_size as usize; + let batch_update_leaves = leaves[start..end].to_vec(); + // if batch is complete, remove leaves from mock output queue + if num_inserted_zkps == max_num_zkp_updates - 1 { + for _ in 0..max_num_zkp_updates * zkp_batch_size { + bundle.output_queue_elements.remove(0); + } + } + + let local_leaves_hashchain = calculate_hash_chain(&batch_update_leaves); + assert_eq!(leaves_hashchain, local_leaves_hashchain); + + let old_root = bundle.merkle_tree.root(); + let mut old_leaves = vec![]; + let mut merkle_proofs = vec![]; + for i in merkle_tree_next_index..merkle_tree_next_index + zkp_batch_size as usize { + match bundle.merkle_tree.get_leaf(i) { + Ok(leaf) => { + old_leaves.push(leaf); + } + Err(_) => { + old_leaves.push([0u8; 32]); + if i <= bundle.merkle_tree.get_next_index() { + bundle.merkle_tree.append(&[0u8; 32]).unwrap(); + } + } + } + let proof = bundle.merkle_tree.get_proof_of_leaf(i, true).unwrap(); + merkle_proofs.push(proof.to_vec()); + } + // Insert new leaves into the merkle tree. Every leaf which is not [0u8; + // 32] has already been nullified hence shouldn't be updated. + for (i, leaf) in batch_update_leaves.iter().enumerate() { + if old_leaves[i] == [0u8; 32] { + let index = merkle_tree_next_index + i; + bundle.merkle_tree.update(leaf, index).unwrap(); + } + } + let circuit_inputs = get_batch_append_with_proofs_inputs::<26>( + old_root, + merkle_tree_next_index as u32, + batch_update_leaves, + local_leaves_hashchain, + old_leaves, + merkle_proofs, + zkp_batch_size as u32, + ); + assert_eq!( + bigint_to_be_bytes_array::<32>(&circuit_inputs.new_root.to_biguint().unwrap()).unwrap(), + bundle.merkle_tree.root() + ); + let client = Client::new(); + let inputs_json = BatchAppendWithProofsInputsJson::from_inputs(&circuit_inputs).to_string(); + + let response_result = client + .post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) + .header("Content-Type", "text/plain; charset=utf-8") + .body(inputs_json) + .send() + .await + .expect("Failed to execute request."); + if response_result.status().is_success() { + let body = response_result.text().await.unwrap(); + let proof_json = deserialize_gnark_proof_json(&body).unwrap(); + let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); + let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); + ( + CompressedProof { + a: proof_a, + b: proof_b, + c: proof_c, + }, + bigint_to_be_bytes_array::<32>(&circuit_inputs.new_root.to_biguint().unwrap()) + .unwrap(), + ) + } else { + panic!("Failed to get proof from server."); + } + }; + + InstructionDataBatchAppendInputs { + public_inputs: AppendBatchProofInputsIx { new_root }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + } +} + +pub async fn perform_batch_nullify( + rpc: &mut Rpc, + bundle: &mut StateMerkleTreeBundle, + forester: &Keypair, + epoch: u64, + _is_metadata_forester: bool, + instruction_data: Option, +) -> Result { + // let forester_epoch_pda = get_forester_epoch_pda_from_authority(&forester.pubkey(), epoch).0; + // let pre_forester_counter = if is_metadata_forester { + // 0 + // } else { + // rpc.get_anchor_account::(&forester_epoch_pda) + // .await + // .unwrap() + // .unwrap() + // .work_counter + // }; + let merkle_tree_pubkey = bundle.accounts.merkle_tree; + + let data = if let Some(instruction_data) = instruction_data { + instruction_data + } else { + get_batched_nullify_ix_data(rpc, bundle, merkle_tree_pubkey).await? + }; + let instruction = create_batch_nullify_instruction( + forester.pubkey(), + forester.pubkey(), + merkle_tree_pubkey, + epoch, + data.try_to_vec().unwrap(), + ); + let res = rpc + .create_and_send_transaction_with_event::( + &[instruction], + &forester.pubkey(), + &[forester], + None, + ) + .await? + .unwrap(); + Ok(res.1) +} + +pub async fn get_batched_nullify_ix_data( + rpc: &mut Rpc, + bundle: &mut StateMerkleTreeBundle, + merkle_tree_pubkey: Pubkey, +) -> Result { + let mut merkle_tree_account = rpc.get_account(merkle_tree_pubkey).await.unwrap().unwrap(); + let merkle_tree = + ZeroCopyBatchedMerkleTreeAccount::from_bytes_mut(merkle_tree_account.data.as_mut_slice()) + .unwrap(); + let zkp_batch_size = merkle_tree.get_account().queue.zkp_batch_size; + let full_batch_index = merkle_tree.get_account().queue.next_full_batch_index; + let full_batch = &merkle_tree.batches[full_batch_index as usize]; + let zkp_batch_index = full_batch.get_num_inserted_zkps(); + let leaves_hashchain = + merkle_tree.hashchain_store[full_batch_index as usize][zkp_batch_index as usize]; + let mut merkle_proofs = vec![]; + let leaf_indices_tx_hashes = bundle.input_leaf_indices[..zkp_batch_size as usize].to_vec(); + let mut leaves = Vec::new(); + let old_root_index = merkle_tree.root_history.last_index(); + let old_root: [u8; 32] = bundle.merkle_tree.root(); + assert_eq!( + old_root, + *merkle_tree.root_history.get(old_root_index).unwrap() + ); + + let mut nullifiers = Vec::new(); + let mut tx_hashes = Vec::new(); + let mut old_leaves = Vec::new(); + let mut path_indices = Vec::new(); + for (index, leaf, tx_hash) in leaf_indices_tx_hashes.iter() { + path_indices.push(*index); + let index = *index as usize; + let leaf = *leaf; + + leaves.push(leaf); + // + 2 because next index is + 1 and we need to init the leaf in + // pos[index] + if bundle.merkle_tree.get_next_index() < index + 2 { + old_leaves.push([0u8; 32]); + } else { + old_leaves.push(leaf); + } + // Handle case that we nullify a leaf which has not been inserted yet. + while bundle.merkle_tree.get_next_index() < index + 2 { + bundle.merkle_tree.append(&[0u8; 32]).unwrap(); + } + let proof = bundle.merkle_tree.get_proof_of_leaf(index, true).unwrap(); + merkle_proofs.push(proof.to_vec()); + // path_indices.push(index as u32); + bundle.input_leaf_indices.remove(0); + let index_bytes = index.to_be_bytes(); + use light_hasher::Hasher; + let nullifier = Poseidon::hashv(&[&leaf, &index_bytes, tx_hash]).unwrap(); + tx_hashes.push(*tx_hash); + nullifiers.push(nullifier); + bundle.merkle_tree.update(&nullifier, index).unwrap(); + } + // local_leaves_hashchain is only used for a test assertion. + let local_nullifier_hashchain = calculate_hash_chain(&nullifiers); + assert_eq!(leaves_hashchain, local_nullifier_hashchain); + let inputs = get_batch_update_inputs::<26>( + old_root, + tx_hashes, + leaves.to_vec(), + leaves_hashchain, + old_leaves, + merkle_proofs, + path_indices, + zkp_batch_size as u32, + ); + let client = Client::new(); + let circuit_inputs_new_root = + bigint_to_be_bytes_array::<32>(&inputs.new_root.to_biguint().unwrap()).unwrap(); + let inputs = update_inputs_string(&inputs); + let new_root = bundle.merkle_tree.root(); + + let response_result = client + .post(&format!("{}{}", SERVER_ADDRESS, PROVE_PATH)) + .header("Content-Type", "text/plain; charset=utf-8") + .body(inputs) + .send() + .await + .expect("Failed to execute request."); + assert_eq!(circuit_inputs_new_root, new_root); + let (proof, new_root) = if response_result.status().is_success() { + let body = response_result.text().await.unwrap(); + let proof_json = deserialize_gnark_proof_json(&body).unwrap(); + let (proof_a, proof_b, proof_c) = proof_from_json_struct(proof_json); + let (proof_a, proof_b, proof_c) = compress_proof(&proof_a, &proof_b, &proof_c); + ( + CompressedProof { + a: proof_a, + b: proof_b, + c: proof_c, + }, + new_root, + ) + } else { + println!("response_result: {:?}", response_result); + panic!("Failed to get proof from server."); + }; + + Ok(InstructionDataBatchNullifyInputs { + public_inputs: BatchProofInputsIx { + new_root, + old_root_index: old_root_index as u16, + }, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }) +} + +use anchor_lang::{InstructionData, ToAccountMetas}; + +pub async fn create_batched_state_merkle_tree( + payer: &Keypair, + registry: bool, + rpc: &mut R, + merkle_tree_keypair: &Keypair, + queue_keypair: &Keypair, + cpi_context_keypair: &Keypair, + params: InitStateTreeAccountsInstructionData, +) -> Result { + let queue_account_size = get_output_queue_account_size( + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.output_queue_num_batches, + ); + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + let queue_rent = rpc + .get_minimum_balance_for_rent_exemption(queue_account_size) + .await + .unwrap(); + let create_queue_account_ix = create_account_instruction( + &payer.pubkey(), + queue_account_size, + queue_rent, + &account_compression::ID, + Some(queue_keypair), + ); + let mt_rent = rpc + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let create_mt_account_ix = create_account_instruction( + &payer.pubkey(), + mt_account_size, + mt_rent, + &account_compression::ID, + Some(merkle_tree_keypair), + ); + let rent_cpi_config = rpc + .get_minimum_balance_for_rent_exemption(ProtocolConfig::default().cpi_context_size as usize) + .await + .unwrap(); + let create_cpi_context_instruction = create_account_instruction( + &payer.pubkey(), + ProtocolConfig::default().cpi_context_size as usize, + rent_cpi_config, + &light_system_program::ID, + Some(cpi_context_keypair), + ); + let instruction = if registry { + create_initialize_batched_merkle_tree_instruction( + payer.pubkey(), + merkle_tree_keypair.pubkey(), + queue_keypair.pubkey(), + cpi_context_keypair.pubkey(), + params, + ) + } else { + let instruction = + account_compression::instruction::InitializeBatchedStateMerkleTree { params }; + let accounts = account_compression::accounts::InitializeBatchedStateMerkleTreeAndQueue { + authority: payer.pubkey(), + merkle_tree: merkle_tree_keypair.pubkey(), + queue: queue_keypair.pubkey(), + registered_program_pda: None, + }; + + Instruction { + program_id: account_compression::ID, + accounts: accounts.to_account_metas(Some(true)), + data: instruction.data(), + } + }; + + let transaction = Transaction::new_signed_with_payer( + &[ + create_mt_account_ix, + create_queue_account_ix, + create_cpi_context_instruction, + instruction, + ], + Some(&payer.pubkey()), + &vec![ + payer, + merkle_tree_keypair, + queue_keypair, + cpi_context_keypair, + ], + rpc.get_latest_blockhash().await.unwrap(), + ); + rpc.process_transaction(transaction).await +} + +pub async fn assert_registry_created_batched_state_merkle_tree( + rpc: &mut R, + payer_pubkey: Pubkey, + merkle_tree_pubkey: Pubkey, + output_queue_pubkey: Pubkey, + // TODO: assert cpi_context_account creation + _cpi_context_pubkey: Pubkey, + params: InitStateTreeAccountsInstructionData, +) -> Result<(), RpcError> { + let mut merkle_tree = + AccountZeroCopy::::new(rpc, merkle_tree_pubkey).await; + + let mut queue = AccountZeroCopy::::new(rpc, output_queue_pubkey).await; + let ref_mt_account = BatchedMerkleTreeAccount::get_state_tree_default( + payer_pubkey, + params.program_owner, + params.forester, + params.rollover_threshold, + params.index, + params.network_fee.unwrap_or_default(), + params.input_queue_batch_size, + params.input_queue_zkp_batch_size, + params.bloom_filter_capacity, + params.root_history_capacity, + output_queue_pubkey, + params.height, + params.input_queue_num_batches, + ); + + assert_mt_zero_copy_inited( + merkle_tree.account.data.as_mut_slice(), + ref_mt_account, + params.bloom_filter_num_iters, + ); + + let queue_account_size = get_output_queue_account_size( + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.output_queue_num_batches, + ); + let mt_account_size = get_merkle_tree_account_size( + params.input_queue_batch_size, + params.bloom_filter_capacity, + params.input_queue_zkp_batch_size, + params.root_history_capacity, + params.height, + params.input_queue_num_batches, + ); + let queue_rent = rpc + .get_minimum_balance_for_rent_exemption(queue_account_size) + .await + .unwrap(); + let mt_rent = rpc + .get_minimum_balance_for_rent_exemption(mt_account_size) + .await + .unwrap(); + let additional_bytes_rent = rpc + .get_minimum_balance_for_rent_exemption(params.additional_bytes as usize) + .await + .unwrap(); + let total_rent = queue_rent + mt_rent + additional_bytes_rent; + let ref_output_queue_account = get_output_queue_account_default( + payer_pubkey, + params.program_owner, + params.forester, + params.rollover_threshold, + params.index, + params.output_queue_batch_size, + params.output_queue_zkp_batch_size, + params.additional_bytes, + total_rent, + merkle_tree_pubkey, + params.height, + params.output_queue_num_batches, + ); + + assert_queue_zero_copy_inited( + queue.account.data.as_mut_slice(), + ref_output_queue_account, + 0, // output queue doesn't have a bloom filter hence no iterations + ); + Ok(()) +} diff --git a/test-utils/src/test_env.rs b/test-utils/src/test_env.rs index cdcedc06a4..393f4ec9a6 100644 --- a/test-utils/src/test_env.rs +++ b/test-utils/src/test_env.rs @@ -2,12 +2,17 @@ use crate::assert_address_merkle_tree::assert_address_merkle_tree_initialized; use crate::assert_queue::assert_address_queue_initialized; use crate::env_accounts; use crate::rpc::test_rpc::ProgramTestRpcConnection; +use crate::test_batch_forester::{ + assert_registry_created_batched_state_merkle_tree, create_batched_state_merkle_tree, +}; use account_compression::sdk::create_initialize_address_merkle_tree_and_queue_instruction; use account_compression::utils::constants::GROUP_AUTHORITY_SEED; use account_compression::{ sdk::create_initialize_merkle_tree_instruction, GroupAuthority, RegisteredProgram, }; -use account_compression::{AddressMerkleTreeConfig, AddressQueueConfig, QueueType}; +use account_compression::{ + AddressMerkleTreeConfig, AddressQueueConfig, InitStateTreeAccountsInstructionData, QueueType, +}; use account_compression::{NullifierQueueConfig, StateMerkleTreeConfig}; use forester_utils::forester_epoch::{Epoch, TreeAccounts, TreeType}; use forester_utils::registry::register_test_forester; @@ -138,6 +143,9 @@ pub struct EnvAccounts { pub cpi_context_account_pubkey: Pubkey, pub registered_forester_pda: Pubkey, pub forester_epoch: Option, + pub batched_state_merkle_tree: Pubkey, + pub batched_output_queue: Pubkey, + pub batched_cpi_context: Pubkey, } impl EnvAccounts { @@ -158,6 +166,9 @@ impl EnvAccounts { cpi_context_account_pubkey: pubkey!("cpi1uHzrEhBG733DoEJNgHCyRS3XmmyVNZx5fonubE4"), registered_forester_pda: Pubkey::default(), forester_epoch: None, // Set to None or to an appropriate Epoch value if needed + batched_state_merkle_tree: pubkey!("HLKs5NJ8FXkJg8BrzJt56adFYYuwg5etzDtBbQYTsixu"), + batched_output_queue: pubkey!("6L7SzhYB3anwEQ9cphpJ1U7Scwj57bx2xueReg7R9cKU"), + batched_cpi_context: pubkey!("7Hp52chxaew8bW1ApR4fck2bh6Y8qA1pu3qwH6N9zaLj"), } } } @@ -173,6 +184,9 @@ pub struct EnvAccountKeypairs { pub cpi_context_account: Keypair, pub system_program: Keypair, pub registry_program: Keypair, + pub batched_state_merkle_tree: Keypair, + pub batched_output_queue: Keypair, + pub batched_cpi_context: Keypair, } impl EnvAccountKeypairs { @@ -188,6 +202,10 @@ impl EnvAccountKeypairs { cpi_context_account: Keypair::from_bytes(&SIGNATURE_CPI_TEST_KEYPAIR).unwrap(), system_program: Keypair::from_bytes(&OLD_SYSTEM_PROGRAM_ID_TEST_KEYPAIR).unwrap(), registry_program: Keypair::from_bytes(&OLD_REGISTRY_ID_TEST_KEYPAIR).unwrap(), + batched_state_merkle_tree: Keypair::from_bytes(&BATCHED_STATE_MERKLE_TREE_TEST_KEYPAIR) + .unwrap(), + batched_output_queue: Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(), + batched_cpi_context: Keypair::from_bytes(&BATCHED_CPI_CONTEXT_TEST_KEYPAIR).unwrap(), } } @@ -245,6 +263,10 @@ impl EnvAccountKeypairs { cpi_context_account, system_program, registry_program, + batched_state_merkle_tree: Keypair::from_bytes(&BATCHED_STATE_MERKLE_TREE_TEST_KEYPAIR) + .unwrap(), + batched_output_queue: Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR).unwrap(), + batched_cpi_context: Keypair::from_bytes(&BATCHED_CPI_CONTEXT_TEST_KEYPAIR).unwrap(), } } } @@ -320,6 +342,28 @@ pub const FORESTER_TEST_KEYPAIR: [u8; 64] = [ 204, ]; +// HLKs5NJ8FXkJg8BrzJt56adFYYuwg5etzDtBbQYTsixu +pub const BATCHED_STATE_MERKLE_TREE_TEST_KEYPAIR: [u8; 64] = [ + 85, 82, 64, 221, 4, 69, 191, 4, 64, 56, 29, 32, 145, 68, 117, 157, 130, 83, 228, 58, 142, 48, + 130, 43, 101, 149, 140, 82, 123, 102, 108, 148, 242, 174, 90, 229, 244, 60, 225, 10, 207, 196, + 201, 136, 192, 35, 58, 9, 149, 215, 40, 149, 244, 9, 184, 209, 113, 234, 101, 91, 227, 243, 41, + 254, +]; +// 6L7SzhYB3anwEQ9cphpJ1U7Scwj57bx2xueReg7R9cKU +pub const BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR: [u8; 64] = [ + 56, 183, 128, 249, 154, 184, 81, 219, 6, 98, 1, 79, 56, 253, 134, 198, 170, 16, 43, 112, 170, + 206, 203, 48, 49, 119, 115, 11, 192, 208, 67, 107, 79, 47, 194, 208, 90, 252, 43, 18, 216, 76, + 41, 113, 8, 161, 113, 18, 188, 202, 207, 115, 125, 235, 151, 110, 167, 166, 249, 78, 75, 221, + 38, 219, +]; +// 7Hp52chxaew8bW1ApR4fck2bh6Y8qA1pu3qwH6N9zaLj +pub const BATCHED_CPI_CONTEXT_TEST_KEYPAIR: [u8; 64] = [ + 152, 98, 187, 34, 35, 31, 202, 218, 11, 86, 181, 144, 29, 208, 167, 201, 77, 12, 104, 170, 95, + 53, 115, 33, 244, 179, 187, 255, 246, 100, 43, 203, 93, 116, 162, 215, 36, 226, 217, 56, 215, + 240, 198, 198, 253, 195, 107, 230, 122, 63, 116, 163, 105, 167, 18, 188, 161, 63, 146, 7, 238, + 3, 12, 228, +]; + /// Setup test programs with accounts /// deploys: /// 1. light program @@ -371,7 +415,6 @@ pub async fn setup_test_programs_with_accounts( /// - registers a forester /// - advances to the active phase slot 2 /// - active phase doesn't end -// TODO(vadorovsky): ...in favor of this one. pub async fn setup_test_programs_with_accounts_v2( additional_programs: Option>, ) -> ( @@ -392,12 +435,25 @@ pub async fn setup_test_programs_with_accounts_v2( ) .await } - -// TODO(vadorovsky): Remote this function... pub async fn setup_test_programs_with_accounts_with_protocol_config( additional_programs: Option>, protocol_config: ProtocolConfig, register_forester_and_advance_to_active_phase: bool, +) -> (ProgramTestRpcConnection, EnvAccounts) { + setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params( + additional_programs, + protocol_config, + register_forester_and_advance_to_active_phase, + InitStateTreeAccountsInstructionData::test_default(), + ) + .await +} + +pub async fn setup_test_programs_with_accounts_with_protocol_config_and_batched_tree_params( + additional_programs: Option>, + protocol_config: ProtocolConfig, + register_forester_and_advance_to_active_phase: bool, + batched_tree_init_params: InitStateTreeAccountsInstructionData, ) -> (ProgramTestRpcConnection, EnvAccounts) { let context = setup_test_programs(additional_programs).await; let mut context = ProgramTestRpcConnection { context }; @@ -418,6 +474,7 @@ pub async fn setup_test_programs_with_accounts_with_protocol_config( protocol_config, register_forester_and_advance_to_active_phase, true, + batched_tree_init_params, ) .await; (context, env_accounts) @@ -445,12 +502,14 @@ pub async fn setup_test_programs_with_accounts_with_protocol_config_v2( airdrop_lamports(&mut context, &keypairs.forester.pubkey(), 10_000_000_000) .await .unwrap(); + let params = InitStateTreeAccountsInstructionData::test_default(); let env_accounts = initialize_accounts( &mut context, keypairs, protocol_config, register_forester_and_advance_to_active_phase, true, + params, ) .await; (context, env_accounts) @@ -458,8 +517,17 @@ pub async fn setup_test_programs_with_accounts_with_protocol_config_v2( pub async fn setup_accounts(keypairs: EnvAccountKeypairs, url: SolanaRpcUrl) -> EnvAccounts { let mut rpc = SolanaRpcConnection::new(url, None); + let params = InitStateTreeAccountsInstructionData::test_default(); - initialize_accounts(&mut rpc, keypairs, ProtocolConfig::default(), false, false).await + initialize_accounts( + &mut rpc, + keypairs, + ProtocolConfig::default(), + false, + false, + params, + ) + .await } pub async fn initialize_accounts( @@ -468,6 +536,7 @@ pub async fn initialize_accounts( protocol_config: ProtocolConfig, register_forester_and_advance_to_active_phase: bool, skip_register_programs: bool, + batched_tree_init_params: InitStateTreeAccountsInstructionData, ) -> EnvAccounts { let cpi_authority_pda = get_cpi_authority_pda(); let protocol_config_pda = get_protocol_config_pda_address(); @@ -557,6 +626,32 @@ pub async fn initialize_accounts( .await .unwrap(); + assert_eq!( + batched_tree_init_params.additional_bytes, + ProtocolConfig::default().cpi_context_size + ); + create_batched_state_merkle_tree( + &keypairs.governance_authority, + true, + context, + &keypairs.batched_state_merkle_tree, + &keypairs.batched_output_queue, + &keypairs.batched_cpi_context, + batched_tree_init_params, + ) + .await + .unwrap(); + assert_registry_created_batched_state_merkle_tree( + context, + get_group_pda(group_seed_keypair.pubkey()), + keypairs.batched_state_merkle_tree.pubkey(), + keypairs.batched_output_queue.pubkey(), + keypairs.batched_cpi_context.pubkey(), + batched_tree_init_params, + ) + .await + .unwrap(); + create_address_merkle_tree_and_queue_account( &keypairs.governance_authority, true, @@ -634,6 +729,9 @@ pub async fn initialize_accounts( registered_registry_program_pda, registered_forester_pda: get_forester_pda(&keypairs.forester.pubkey()).0, forester_epoch, + batched_cpi_context: keypairs.batched_cpi_context.pubkey(), + batched_output_queue: keypairs.batched_output_queue.pubkey(), + batched_state_merkle_tree: keypairs.batched_state_merkle_tree.pubkey(), } } pub fn get_group_pda(seed: Pubkey) -> Pubkey { @@ -684,6 +782,7 @@ pub async fn initialize_new_group( group_pda } +// TODO: unify with keypairs pub fn get_test_env_accounts() -> EnvAccounts { let merkle_tree_keypair = Keypair::from_bytes(&MERKLE_TREE_TEST_KEYPAIR).unwrap(); let merkle_tree_pubkey = merkle_tree_keypair.pubkey(); @@ -724,6 +823,15 @@ pub fn get_test_env_accounts() -> EnvAccounts { cpi_context_account_pubkey: cpi_context_keypair.pubkey(), registered_registry_program_pda, forester_epoch: None, + batched_cpi_context: Keypair::from_bytes(&BATCHED_CPI_CONTEXT_TEST_KEYPAIR) + .unwrap() + .pubkey(), + batched_output_queue: Keypair::from_bytes(&BATCHED_OUTPUT_QUEUE_TEST_KEYPAIR) + .unwrap() + .pubkey(), + batched_state_merkle_tree: Keypair::from_bytes(&BATCHED_STATE_MERKLE_TREE_TEST_KEYPAIR) + .unwrap() + .pubkey(), } } @@ -942,7 +1050,7 @@ pub async fn create_address_merkle_tree_and_queue_account( let expected_right_most_leaf = reference_tree .merkle_tree - .get_leaf(reference_tree.merkle_tree.rightmost_index - 1); + .leaf(reference_tree.merkle_tree.rightmost_index - 1); let _expected_right_most_leaf = [ 30, 164, 22, 238, 180, 2, 24, 181, 64, 193, 207, 184, 219, 233, 31, 109, 84, 232, 162, 158, diff --git a/test-utils/src/test_forester.rs b/test-utils/src/test_forester.rs index 3f57ab49c0..2121a4a47c 100644 --- a/test-utils/src/test_forester.rs +++ b/test-utils/src/test_forester.rs @@ -1,4 +1,7 @@ use crate::test_env::NOOP_PROGRAM_ID; +use account_compression::batched_merkle_tree::{ + InstructionDataBatchAppendInputs, ZeroCopyBatchedMerkleTreeAccount, +}; use account_compression::instruction::UpdateAddressMerkleTree; use account_compression::state::QueueAccount; use account_compression::utils::constants::{ diff --git a/utils/src/lib.rs b/utils/src/lib.rs index c3b051f3e4..fd9ab8a573 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -13,7 +13,6 @@ use thiserror::Error; pub mod bigint; pub mod fee; -pub mod offset; pub mod prime; pub mod rand;