diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 000000000..c54748843 Binary files /dev/null and b/.DS_Store differ diff --git a/.vscode/settings.json b/.vscode/settings.json index 3cf807e96..3ffe6da72 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,6 @@ { "rust-analyzer.linkedProjects": [ - "./client/Cargo.toml" + "./client/Cargo.toml", + "./swarm_nl/Cargo.toml" ] } \ No newline at end of file diff --git a/README.md b/README.md index 823230398..48282356e 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,202 @@ + + # SwarmNL -SwarmNL is a configurable p2p networking layer for transporting arbitrary data between nodes in a network. +**A library to build custom networking layers for decentralized applications** + +SwarmNL is a library designed for P2P networking in distributed systems. It's lightweight, scalable, and easy to configure, making it perfect for decentralized applications. Powered by [libp2p](https://docs.libp2p.io/), SwarmNL simplifies networking so developers can focus on building. + +## Why SwarmNL? +SwarmNL makes buiding a peer-to-peer decentralized and distributed networking stack for your application a breeze. With SwarmNL, you can effortlessly configure nodes, tailor network conditions, and fine-tune behaviors specific to your project's needs, allowing you to dive into networking without any hassle. + +Say goodbye to the complexities of networking and hello to simplicity. With SwarmNL, all the hard work is done for you, leaving you to focus on simple configurations and your application logic. + +## Tutorials + +Have a look at some tutorials that demonstrate the use of SwarmNl in various contexts: + +- [Echo server]() +- [File sharing app]() +- [Simple game]() + +## Documentation + +Visit the deployed Rust docs [here](https://algorealminc.github.io/SwarmNL/swarm_nl/index.html). + +## Features + +- **Node Configuration**: SwarmNL provides a simple interface to configure a node and specify parameters to dictate its behaviour. This includes: + + - Selection and configuration of the transport layers to be supported by the node + - Selection of cryptographic keypairs (ed25519, RSA, secp256k1, ecdsa) + - Storage and retrieval of keypair locally + - PeerID and multiaddress generation + - Protocol specification and handlers + - Event handlers for network events and logging + + #### Example + + ```rust + //! Using the default node setup configuration and the default network event handler + + // Default config + let config = BootstrapConfig::default(); + // Default network handler + let handler = DefaultHandler; + // Build node or network core + let node = CoreBuilder::with_config(config, handler) + .build() + .await + .unwrap(); + + + //! Using a custom node setup configuration and a custom network event handler + + // Custom configuration + // a. Using config from an `.ini` file + let config = BootstrapConfig::from_file("bootstrap_config.ini"); + + // b. Using config methods + let mut bootnode = HashMap::new(); // Bootnodes + let ports = (1509, 2710); // TCP, UDP ports + + bootnode.insert( + PeerId::random(), + "/ip4/x.x.x.x/tcp/1509".to_string() + ); + + let config = BootstrapConfig::new() + .with_bootnodes(bootnode) + .with_tcp(ports.0) + .with_udp(ports.1); + + // Custom event handler + use swarm_nl::core::EventHandler; + + #[derive(Clone)] + struct ApplicationState{ + name: String, + version: i8, + } + + // Define custom behaviour to respond to network events + impl EventHandler for AppState { + fn new_listen_addr( + &mut self, + local_peer_id: PeerId, + listener_id: ListenerId, + addr: Multiaddr, + ) { + // Announce interfaces we're listening on + println!("Peer id: {}", local_peer_id); + println!("We're listening on the {}", addr); + } + + // Echo data recieved from a RPC + fn rpc_handle_incoming_message(&mut self, data: Vec>) -> Vec> { + println!("Recvd incoming RPC: {:?}", data); + data + } + + // Handle the incoming gossip message + fn gossipsub_handle_incoming_message(&mut self, source: PeerId, data: Vec) { + println!("Recvd incoming gossip: {:?}", data); + } + } + + // Define custom event handler + let state = ApplicationState { + name: String::from("SwarmNL"), + version: 0.1 + } + + // Build node or network core + let node = CoreBuilder::with_config(config, state) + .build() + .await + .unwrap(); + + ``` + Please look at a template `.ini` file [here](https://github.com/algorealmInc/SwarmNL/blob/dev/swarm_nl/bootstrap_config.ini) for configuring a node in the network.

+ +- **Node Communication**: For communication, SwarmNL leverages the powerful capabilities of libp2p. These includes: + + - The Kadmlia DHT: Developers can use the DHT to store infomation and leverage the capabilities of the DHT to build powerful applications, easily. + - A simple RPC mechanism to exchange data quickly between peers. + - Gossiping: SwarmNL uses the Gossipsub 1.1 protocol, specified by the [libp2p spec](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md). + + #### Example + + ```rust + //! Communicate with remote nodes using the simple and familiar async-await paradigm. + + // Build node or network core + let node = CoreBuilder::with_config(config, state) + .build() + .await + .unwrap(); + + // Communication interfaces + // a. Kademlia DHT e.g + + // Prepare an kademlia `store_record` request to send to the network layer + let (key, value, expiration_time, explicit_peers) = ( + KADEMLIA_TEST_KEY.as_bytes().to_vec(), + KADEMLIA_TEST_VALUE.as_bytes().to_vec(), + None, + None, + ); + + let kad_request = AppData::KademliaStoreRecord { + key: key.clone(), + value, + expiration_time, + explicit_peers, + }; + + // Send request + if let Ok(result) = node.query_network(kad_request).await { + assert_eq!(KademliaStoreRecordSuccess,result); + } + + // b. RPC (request-response) e.g + + // Prepare a RPC fetch request + let fetch_key = vec!["SomeFetchKey".as_bytes().to_vec()]; + + let fetch_request = AppData::FetchData { + keys: fetch_key.clone(), + peer: node4_peer_id, + }; + + // Get a stream id to track the request + let stream_id = node.send_to_network(fetch_request).await.unwrap(); + + // Poll for the result + if let Ok(result) = node.recv_from_network(stream_id).await { + // Here, the request data was simply echoed by the remote peer + assert_eq!(AppResponse::FetchData(fetch_key), result); + } + + // c. Gossiping e.g + + // Prepare gossip request + let gossip_request = AppData::GossipsubBroadcastMessage { + topic: GOSSIP_NETWORK.to_string(), + message: vec!["Daniel".to_string(), "Deborah".to_string()], + }; + + if let Ok(result) = node.query_network(gossip_request).await { + assert_eq!(AppResponse::GossipsubBroadcastSuccess, result); + } + + ``` + +- _In Development 👷_: + - _Node failure handling involving reconnection strategies, failover mechanisms etc_. + - _Scaling involving techniques like sharding, data forwarding etc_. + - _IPFS upload and download interfaces_. + +In essence, SwarmNL is designed to simplify networking so you can focus on building that world-changing application of yours! Cheers! 🥂 + +With ❤️ from [Deji](https://github.com/thewoodfish) and [Sacha](https://github.com/sacha-l). + diff --git a/SwarmNl.png b/SwarmNl.png new file mode 100644 index 000000000..1681349d2 Binary files /dev/null and b/SwarmNl.png differ diff --git a/bootstrap_config.ini b/bootstrap_config.ini deleted file mode 100644 index 91e07faba..000000000 --- a/bootstrap_config.ini +++ /dev/null @@ -1,19 +0,0 @@ -; Copyright (c) 2024 Algorealm -; A typical template showing the necessary config to bootstrap a node - -[ports] -; TCP/IP port to listen on -tcp=3000 -; UDP port to listen on -udp=4000 - -; compulsory -[auth] -; Type of keypair to generate for node identity and message auth e.g RSA, EDSA, Ed25519 -crypto=Ed25519 -; The protobuf serialized format of the node's cryptographic keypair -protobuf_keypair=[] - -[bootstrap] -; The boostrap nodes to connect to immediately after start up -boot_nodes=[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/192.168.1.205/tcp/1509, QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt:/ip4/192.168.1.205/tcp/1509] \ No newline at end of file diff --git a/client/.DS_Store b/client/.DS_Store new file mode 100644 index 000000000..e31063655 Binary files /dev/null and b/client/.DS_Store differ diff --git a/client/Cargo.toml b/client/Cargo.toml index 87503c9ea..5ef1eea12 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -10,3 +10,8 @@ rust-ini = "0.20.0" swarm_nl = { path = "../swarm_nl", features = ["tokio-runtime"] } tokio = { version = "1.37.0", features = ["full"] } base58 = "0.2.0" + +[features] +second-node = [] +test-reading-node = [] +test-writing-node = [] \ No newline at end of file diff --git a/client/bootstrap_config.ini b/client/bootstrap_config.ini index 67404baa7..b0adaa0ba 100644 --- a/client/bootstrap_config.ini +++ b/client/bootstrap_config.ini @@ -1,12 +1,7 @@ -; Copyright (c) 2024 Algorealm -; A typical template showing the necessary config to bootstrap a node - [ports] -; TCP/IP port to listen on tcp=49200 -; UDP port to listen on udp=49201 [bootstrap] -; The boostrap nodes to connect to immediately after start up -boot_nodes=[12D3KooWMD3kvZ7hSngeu1p7HAoCCYusSXqPPYDPvzxsa9T4vz3a:/ip4/127.0.0.1/tcp/49152] \ No newline at end of file +boot_nodes=[12D3KooWMNKx84syHXFLRz7Ejfj9UgyvdtTeFMwUyfjwcykMKofq:/ip4/127.0.0.1/tcp/49152] + diff --git a/client/src/main.rs b/client/src/main.rs index 414f36ce8..bb122db77 100644 --- a/client/src/main.rs +++ b/client/src/main.rs @@ -6,178 +6,622 @@ /// Enjoy! use std::{borrow::Cow, num::NonZeroU32, time::Duration}; use swarm_nl::{ - async_trait, - core::{EventHandler, AppResponse}, core::{AppData, Core, CoreBuilder}, + core::{AppResponse, EventHandler}, setup::BootstrapConfig, util::string_to_peer_id, - ConnectedPoint, ConnectionId, PeerId, + ConnectedPoint, ConnectionId, Keypair, PeerId, }; #[tokio::main] async fn main() { - // Start our game! Age of Empires! - play_game().await -} + // ping_test::run_ping_example().await; + + // Communication example + // layer_communication::run_comm_example().await; -#[derive(Clone)] -pub struct Empire { - name: String, - soldiers: u8, - farmers: u8, - blacksmith: u8, - land_mass: u8, - gold_reserve: u8, + kademlia::test_kademlia_itest_works().await; } -impl Empire { - /// Create a new empire and assign the assets to begin with - pub fn new(name: String) -> Self { - Empire { - name, - soldiers: 100, - farmers: 100, - blacksmith: 100, - land_mass: 100, - gold_reserve: 100, +mod kademlia { + use std::collections::HashMap; + + use swarm_nl::{ + core::{Core, CoreBuilder, AppData}, + setup::BootstrapConfig, + Keypair, PeerId, Port, + }; + + use crate::layer_communication::AppState; + + /// Time to wait for the other peer to act, during integration tests (in seconds) + pub const ITEST_WAIT_TIME: u64 = 15; + /// The key to test the Kademlia DHT + pub const KADEMLIA_TEST_KEY: &str = "GOAT"; + /// The value to test the Kademlia DHT + pub const KADEMLIA_TEST_VALUE: &str = "Steve Jobs"; + + /// Used to create a detereministic node. + pub async fn setup_node_1(ports: (Port, Port)) -> Core { + // Our test keypair for the first node + let mut protobuf = vec![ + 8, 1, 18, 64, 34, 116, 25, 74, 122, 174, 130, 2, 98, 221, 17, 247, 176, 102, 205, 3, + 27, 202, 193, 27, 6, 104, 216, 158, 235, 38, 141, 58, 64, 81, 157, 155, 36, 193, 50, + 147, 85, 72, 64, 174, 65, 132, 232, 78, 231, 224, 88, 38, 55, 78, 178, 65, 42, 97, 39, + 152, 42, 164, 148, 159, 36, 170, 109, 178, + ]; + + // The PeerId of the first node + let peer_id = Keypair::from_protobuf_encoding(&protobuf) + .unwrap() + .public() + .to_peer_id(); + + setup_core_builder_1(&mut protobuf[..], ports).await + } + + /// Used to create a node to peer with node_1. + pub async fn setup_node_2( + node_1_ports: (Port, Port), + ports: (Port, Port), + ) -> (Core, PeerId) { + let app_state = AppState; + + // Our test keypair for the node_1 + let mut protobuf = vec![ + 8, 1, 18, 64, 34, 116, 25, 74, 122, 174, 130, 2, 98, 221, 17, 247, 176, 102, 205, 3, + 27, 202, 193, 27, 6, 104, 216, 158, 235, 38, 141, 58, 64, 81, 157, 155, 36, 193, 50, + 147, 85, 72, 64, 174, 65, 132, 232, 78, 231, 224, 88, 38, 55, 78, 178, 65, 42, 97, 39, + 152, 42, 164, 148, 159, 36, 170, 109, 178, + ]; + + // The PeerId of the first node + let peer_id = Keypair::from_protobuf_encoding(&protobuf) + .unwrap() + .public() + .to_peer_id(); + + // Set up bootnode to query node 1 + let mut bootnode = HashMap::new(); + bootnode.insert( + peer_id.to_base58(), + format!("/ip4/127.0.0.1/tcp/{}", node_1_ports.0), + ); + + println!("Second node here!"); + + // First, we want to configure our node + let config = BootstrapConfig::new() + .with_bootnodes(bootnode) + .with_tcp(ports.0) + .with_udp(ports.1); + + // Set up network + ( + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap(), + peer_id, + ) + } + + pub async fn setup_core_builder_1(buffer: &mut [u8], ports: (u16, u16)) -> Core { + let app_state = AppState; + + // First, we want to configure our node with the bootstrap config file on disk + let config = BootstrapConfig::default() + .generate_keypair_from_protobuf("ed25519", buffer) + .with_tcp(ports.0) + .with_udp(ports.1); + + // Set up network + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap() + } + + pub async fn test_kademlia_itest_works() { + #[cfg(feature = "test-reading-node")] + async { + // set up the node that will be dialled + let mut node_1 = setup_node_1((51666, 51606)).await; + + // Wait for a few seconds before trying to read the DHT + #[cfg(feature = "tokio-runtime")] + tokio::time::sleep(Duration::from_secs(ITEST_WAIT_TIME + 20)).await; + + // now poll for the kademlia record + // let kad_request = AppData::KademliaLookupRecord { key: + // KADEMLIA_TEST_KEY.as_bytes().to_vec() }; + let kad_request = AppData::KademliaGetProviders { + key: KADEMLIA_TEST_KEY.as_bytes().to_vec(), + }; + if let Ok(result) = node_1.query_network(kad_request).await { + // if let AppResponse::KademliaLookupSuccess(value) = result { + // assert_eq!(KADEMLIA_TEST_VALUE.as_bytes().to_vec(), value); + // } + println!("{:?}", result); + } else { + println!("No record found"); + } } + .await; + + #[cfg(feature = "test-writing-node")] + async { + // set up the second node that will dial + let (mut node_2, node_1_peer_id) = setup_node_2((51666, 51606), (51667, 51607)).await; + + // create request to read the DHT + let (key, value, expiration_time, explicit_peers) = ( + KADEMLIA_TEST_KEY.as_bytes().to_vec(), + KADEMLIA_TEST_VALUE.as_bytes().to_vec(), + None, + None, + ); + + let kad_request = AppData::KademliaStoreRecord { + key, + value, + expiration_time, + explicit_peers, + }; + + let res = node_2.query_network(kad_request).await; + println!("{:?}", res); + + let kad_request = AppData::KademliaGetProviders { + key: KADEMLIA_TEST_KEY.as_bytes().to_vec(), + }; + let result = node_2.query_network(kad_request).await; + + println!("{:?}", result); + + loop {} + + // if let Ok(_) = node_2.query_network(kad_request).await { + // loop {} + // } else { + // println!("Error"); + // } + } + .await; } } -#[async_trait] -impl EventHandler for Empire { - async fn new_listen_addr( - &mut self, - local_peer_id: PeerId, - _listener_id: swarm_nl::ListenerId, - addr: swarm_nl::Multiaddr, - ) { - // announce interfaces we're listening on - println!("Peer id: {}", local_peer_id); - println!("We're listening on the {}", addr); - println!( - "There are {} soldiers guarding the {} Empire gate", - self.soldiers, self.name - ); +mod age_of_empire { + use super::*; + + #[derive(Clone)] + pub struct Empire { + name: String, + soldiers: u8, + farmers: u8, + blacksmith: u8, + land_mass: u8, + gold_reserve: u8, } - async fn connection_established( - &mut self, - peer_id: PeerId, - _connection_id: ConnectionId, - _endpoint: &ConnectedPoint, - _num_established: NonZeroU32, - _established_in: Duration, - ) { - println!("Connection established with peer: {}", peer_id); + impl Empire { + /// Create a new empire and assign the assets to begin with + pub fn new(name: String) -> Self { + Empire { + name, + soldiers: 100, + farmers: 100, + blacksmith: 100, + land_mass: 100, + gold_reserve: 100, + } + } } - /// Handle any incoming RPC from any neighbouring empire - fn handle_incoming_message(&mut self, data: Vec>) -> Vec> { - // The semantics is left to the application to handle - match String::from_utf8_lossy(&data[0]) { - // Handle the request to get military status - Cow::Borrowed("military_status") => { - // Get empire name - let empire_name = self.name.as_bytes().to_vec(); + impl EventHandler for Empire { + fn new_listen_addr( + &mut self, + local_peer_id: PeerId, + _listener_id: swarm_nl::ListenerId, + addr: swarm_nl::Multiaddr, + ) { + // announce interfaces we're listening on + println!("Peer id: {}", local_peer_id); + println!("We're listening on the {}", addr); + println!( + "There are {} soldiers guarding the {} Empire gate", + self.soldiers, self.name + ); + } - // Get military capacity - let military_capacity = self.soldiers; + fn connection_established( + &mut self, + peer_id: PeerId, + _connection_id: ConnectionId, + _endpoint: &ConnectedPoint, + _num_established: NonZeroU32, + _established_in: Duration, + ) { + println!("Connection established with peer: {}", peer_id); + } + + /// Handle any incoming RPC from any neighbouring empire + fn rpc_handle_incoming_message(&mut self, data: Vec>) -> Vec> { + // The semantics is left to the application to handle + match String::from_utf8_lossy(&data[0]) { + // Handle the request to get military status + Cow::Borrowed("military_status") => { + // Get empire name + let empire_name = self.name.as_bytes().to_vec(); - // marshall into accepted format andd then return it - vec![empire_name, vec![military_capacity]] - }, - _ => Default::default(), + // Get military capacity + let military_capacity = self.soldiers; + + // marshall into accepted format andd then return it + vec![empire_name, vec![military_capacity]] + }, + _ => Default::default(), + } } + + fn gossipsub_handle_incoming_message(&mut self, _source: PeerId, _data: Vec) {} + } + + /// Setup game (This is for the persian Empire) + /// This requires no bootnodes connection + // #[cfg(not(feature = "macedonian"))] + // pub async fn setup_game() -> Core { + // // First, we want to configure our node + // let config = BootstrapConfig::default(); + + // // State kept by this node + // let empire = Empire::new(String::from("Spartan")); + + // // Set up network + // CoreBuilder::with_config(config, empire) + // .build() + // .await + // .unwrap() + // } + + /// The Macedonian Empire setup. + /// These require bootnodes of empires to form alliance. + /// We will be providing the location (peer id and multiaddress) of the Spartan Empire as boot + /// parameters + // #[cfg(feature = "macedonian")] + pub async fn setup_game() -> Core { + // First, we want to configure our node with the bootstrap config file on disk + let config = BootstrapConfig::from_file("bootstrap_config.ini"); + + // State kept by this node + let empire = Empire::new(String::from("Macedonian")); + + // Set up network + CoreBuilder::with_config(config, empire) + .build() + .await + .unwrap() + } + + /// Play game + pub async fn play_game() { + // Setup network + let mut core = setup_game().await; + + // TODO: DELAY FOR A WHILE + + // Print game state + println!("Empire Information:"); + println!("Name: {}", core.state.soldiers); + println!("Farmers: {}", core.state.farmers); + println!("Black smiths: {}", core.state.blacksmith); + println!("Land mass: {}", core.state.land_mass); + println!("Gold reserve: {}", core.state.gold_reserve); + + // TODO! FUNCTION TO CHECK NODES I'M CONNECTED WITH + + let request = vec!["military_status".as_bytes().to_vec()]; + + // Spartan Empire + let remote_peer_id = "12D3KooWMD3kvZ7hSngeu1p7HAoCCYusSXqPPYDPvzxsa9T4vz3a"; + + // Prepare request + let status_request = AppData::FetchData { + keys: request, + peer: string_to_peer_id(remote_peer_id).unwrap(), + }; + + // Send request + // let stream_id = core.send_to_network(status_request).await.unwrap(); + + // Get response + // AppData::Fetch returns a Vec>, hence we can parse the response from it + if let Ok(status_response) = core.query_network(status_request).await { + if let AppResponse::FetchData(status) = status_response { + let empire_name = String::from_utf8_lossy(&status[0]); + let military_status = status[1][0]; + + // Print the military status of the empire we just contacted + println!("Empire Contacted:"); + println!("Name: {} Empire", empire_name); + println!("Military Capacity: {} Soldiers", military_status); + } + } + + // Keep looping so we can record network events + loop {} } } -/// Setup game (This is for the persian Empire) -/// This requires no bootnodes connection -// #[cfg(not(feature = "macedonian"))] -// pub async fn setup_game() -> Core { -// // First, we want to configure our node -// let config = BootstrapConfig::default(); - -// // State kept by this node -// let empire = Empire::new(String::from("Spartan")); - -// // Set up network -// CoreBuilder::with_config(config, empire) -// .build() -// .await -// .unwrap() -// } - -/// The Macedonian Empire setup. -/// These require bootnodes of empires to form alliance. -/// We will be providing the location (peer id and multiaddress) of the Spartan Empire as boot -/// parameters -// #[cfg(feature = "macedonian")] -pub async fn setup_game() -> Core { - // First, we want to configure our node with the bootstrap config file on disk - let config = BootstrapConfig::from_file("bootstrap_config.ini"); - - // State kept by this node - let empire = Empire::new(String::from("Macedonian")); - - // Set up network - CoreBuilder::with_config(config, empire) - .build() - .await - .unwrap() +mod ping_test { + use swarm_nl::{ + core::ping_config::{PingConfig, PingErrorPolicy}, + Failure, + }; + + use super::*; + /// Sate of the Application + #[derive(Clone)] + pub struct Ping; + + impl EventHandler for Ping { + fn new_listen_addr( + &mut self, + local_peer_id: PeerId, + _listener_id: swarm_nl::ListenerId, + addr: swarm_nl::Multiaddr, + ) { + // announce interfaces we're listening on + println!("Peer id: {}", local_peer_id); + println!("We're listening on the {}", addr); + } + + fn connection_established( + &mut self, + peer_id: PeerId, + _connection_id: ConnectionId, + _endpoint: &ConnectedPoint, + _num_established: NonZeroU32, + _established_in: Duration, + ) { + println!("Connection established with peer: {}", peer_id); + } + + fn outbound_ping_success(&mut self, peer_id: PeerId, duration: Duration) { + println!("we just pinged {:?}. RTT = {:?}", peer_id, duration); + } + + fn outbound_ping_error(&mut self, peer_id: PeerId, err_type: Failure) { + println!("Tried to ping {:?}. Error: {:?}", peer_id, err_type); + } + + fn rpc_handle_incoming_message(&mut self, data: Vec>) -> Vec> { + data + } + + fn gossipsub_handle_incoming_message(&mut self, _source: PeerId, _data: Vec) {} + } + + #[cfg(not(feature = "second-node"))] + pub async fn setup_node(buffer: &mut [u8], ports: (u16, u16)) -> Core { + let app_state = Ping; + + // First, we want to configure our node with the bootstrap config file on disk + let config = BootstrapConfig::default() + .generate_keypair_from_protobuf("ed25519", buffer) + .with_tcp(ports.0) + .with_udp(ports.1); + + println!("First node here!"); + + // Set up network + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap() + } + + pub async fn run_ping_example() { + // Our test keypair for the first node + let mut protobuf = vec![ + 8, 1, 18, 64, 34, 116, 25, 74, 122, 174, 130, 2, 98, 221, 17, 247, 176, 102, 205, 3, + 27, 202, 193, 27, 6, 104, 216, 158, 235, 38, 141, 58, 64, 81, 157, 155, 36, 193, 50, + 147, 85, 72, 64, 174, 65, 132, 232, 78, 231, 224, 88, 38, 55, 78, 178, 65, 42, 97, 39, + 152, 42, 164, 148, 159, 36, 170, 109, 178, + ]; + // Ports for the first node + let ports = (49500, 49501); + + // The PeerId of the first node + let peer_id = Keypair::from_protobuf_encoding(&protobuf) + .unwrap() + .public() + .to_peer_id(); + + #[cfg(not(feature = "second-node"))] + let node = ping_test::setup_node(&mut protobuf[..], ports).await; + + #[cfg(feature = "second-node")] + let node = ping_test::setup_node(peer_id, ports).await; + + loop {} + } + + /// Setup node + #[cfg(feature = "second-node")] + pub async fn setup_node(peer_id: PeerId, ports: (u16, u16)) -> Core { + use std::collections::HashMap; + // App state + let app_state = Ping; + + // Custom ping configuration + let custom_ping = PingConfig { + interval: Duration::from_secs(3), + timeout: Duration::from_secs(5), + err_policy: PingErrorPolicy::DisconnectAfterMaxErrors(3), + }; + + // Set up bootnode to query node 1 + let mut bootnode = HashMap::new(); + bootnode.insert( + peer_id.to_base58(), + format!("/ip4/127.0.0.1/tcp/{}", ports.0), + ); + + println!("Second node here!"); + + // First, we want to configure our node + let config = BootstrapConfig::new().with_bootnodes(bootnode); + + // Set up network by passing in a default handler or application state + CoreBuilder::with_config(config, app_state) + .with_ping(custom_ping) + .build() + .await + .unwrap() + } } -/// Play game -pub async fn play_game() { - // Setup network - let mut core = setup_game().await; +mod layer_communication { + use super::*; - // TODO: DELAY FOR A WHILE + /// Sate of the Application + #[derive(Clone)] + pub struct AppState; - // Print game state - println!("Empire Information:"); - println!("Name: {}", core.state.soldiers); - println!("Farmers: {}", core.state.farmers); - println!("Black smiths: {}", core.state.blacksmith); - println!("Land mass: {}", core.state.land_mass); - println!("Gold reserve: {}", core.state.gold_reserve); + impl EventHandler for AppState { + fn new_listen_addr( + &mut self, + local_peer_id: PeerId, + _listener_id: swarm_nl::ListenerId, + addr: swarm_nl::Multiaddr, + ) { + // announce interfaces we're listening on + println!("Peer id: {}", local_peer_id); + println!("We're listening on the {}", addr); + } - // TODO! FUNCTION TO CHECK NODES I'M CONNECTED WITH + fn connection_established( + &mut self, + peer_id: PeerId, + _connection_id: ConnectionId, + _endpoint: &ConnectedPoint, + _num_established: NonZeroU32, + _established_in: Duration, + ) { + println!("Connection established with peer: {}", peer_id); + } - // TODO: Wait a little to help the network boot + fn rpc_handle_incoming_message(&mut self, data: Vec>) -> Vec> { + data + } - // Let them connect first - tokio::time::sleep(Duration::from_secs(6)).await; + fn gossipsub_handle_incoming_message(&mut self, _source: PeerId, _data: Vec) {} + } - let request = vec!["military_status".as_bytes().to_vec()]; + pub async fn run_comm_example() { + // Our test keypair for the first node + let mut protobuf = vec![ + 8, 1, 18, 64, 34, 116, 25, 74, 122, 174, 130, 2, 98, 221, 17, 247, 176, 102, 205, 3, + 27, 202, 193, 27, 6, 104, 216, 158, 235, 38, 141, 58, 64, 81, 157, 155, 36, 193, 50, + 147, 85, 72, 64, 174, 65, 132, 232, 78, 231, 224, 88, 38, 55, 78, 178, 65, 42, 97, 39, + 152, 42, 164, 148, 159, 36, 170, 109, 178, + ]; + // Ports for the first node + let ports = (49500, 49501); - // Spartan Empire - let remote_peer_id = "12D3KooWMD3kvZ7hSngeu1p7HAoCCYusSXqPPYDPvzxsa9T4vz3a"; + // The PeerId of the first node + let peer_id = Keypair::from_protobuf_encoding(&protobuf) + .unwrap() + .public() + .to_peer_id(); - // Prepare request - let status_request = AppData::FetchData { - keys: request, - peer: string_to_peer_id(remote_peer_id).unwrap(), - }; + let node = setup_node(&mut protobuf[..], ports).await; + + // Test that AppData::Echo works (using fetch) + test_echo_atomically(node.clone()).await; + + // Test that AppData::Echo works + test_echo(node.clone()).await; + + loop {} + } + + #[cfg(not(feature = "second-node"))] + pub async fn setup_node(buffer: &mut [u8], ports: (u16, u16)) -> Core { + let app_state = AppState; - // Send request - let stream_id = core.send_to_network(status_request).await.unwrap(); + // First, we want to configure our node with the bootstrap config file on disk + let config = BootstrapConfig::default() + .generate_keypair_from_protobuf("ed25519", buffer) + .with_tcp(ports.0) + .with_udp(ports.1); - // Get response - // AppData::Fetch returns a Vec>, hence we can parse the response from it - if let Ok(status_response) = core.recv_from_network(stream_id).await { - if let AppResponse::FetchData(status) = status_response { - let empire_name = String::from_utf8_lossy(&status[0]); - let military_status = status[1][0]; - - // Print the military status of the empire we just contacted - println!("Empire Contacted:"); - println!("Name: {} Empire", empire_name); - println!("Military Capacity: {} Soldiers", military_status); + println!("First node here!"); + + // Set up network + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap() + } + + pub async fn test_echo_atomically(mut node: Core) { + // Prepare an echo request + let echo_string = "Sacha rocks!".to_string(); + if let Ok(status_response) = node.query_network(AppData::Echo(echo_string.clone())).await { + if let AppResponse::Echo(echoed_response) = status_response { + // Assert that what was sent was gotten back + assert_eq!(echo_string, echoed_response); + + println!("{} === {}", echo_string, echoed_response); + } } } - // Keep looping so we can record network events - loop {} + pub async fn test_echo(mut node: Core) { + // Prepare an echo request + let echo_string = "Sacha rocks!".to_string(); + + // Get request stream id + let stream_id = node + .send_to_network(AppData::Echo(echo_string.clone())) + .await + .unwrap(); + + println!("This is between the sending and the recieving of the payload. It is stored in an internal buffer, until polled for"); + + if let Ok(status_response) = node.recv_from_network(stream_id).await { + if let AppResponse::Echo(echoed_response) = status_response { + // Assert that what was sent was gotten back + assert_eq!(echo_string, echoed_response); + + println!("{} === {}", echo_string, echoed_response); + } + } + } } + +// make pr +// merge to main +// loggings +// network data +// gossip +// examples +// appdata +// configure logger + +// TEST +// Events, dailing, AppData, RPC, Kad, Ping, Gossip +// check for rexeports e.g to initialize gossipsub + +// check if i'm subscribed to topics + +// BootstrapConfig +// CoreBuilder + +// INTEGRATION +// Core +// Ping +// Events +// App requests i.e kad, rpc, echo diff --git a/swarm_nl/.DS_Store b/swarm_nl/.DS_Store new file mode 100644 index 000000000..d5c7ba009 Binary files /dev/null and b/swarm_nl/.DS_Store differ diff --git a/swarm_nl/Cargo.toml b/swarm_nl/Cargo.toml index 8e6424893..8b0934bf1 100644 --- a/swarm_nl/Cargo.toml +++ b/swarm_nl/Cargo.toml @@ -9,13 +9,13 @@ edition = "2021" rust-ini = "0.20.0" thiserror = "1.0.58" rand = "0.8.5" -libp2p = { version="0.53.2", "features"=["async-std", "macros", "ping", "tokio", "tcp", "noise", "yamux", "quic", "tls", "dns", "kad", "identify", "request-response", "cbor"] } +libp2p = { version="0.53.2", "features"=["async-std", "macros", "ping", "tokio", "tcp", "noise", "yamux", "quic", "tls", "dns", "kad", "identify", "request-response", "cbor", "gossipsub"] } libp2p-identity = { version="0.2.8", "features"=["secp256k1", "ecdsa", "rsa", "ed25519"] } futures = "0.3.30" futures-time = "3.0.0" serde = "1.0.200" -async-trait = "0.1.80" base58 = "0.2.0" +docify = "0.2.8" [dependencies.async-std] version = "1.12.0" @@ -27,4 +27,15 @@ optional = true [features] tokio-runtime = ["tokio"] -async-std-runtime = ["async-std"] \ No newline at end of file +async-std-runtime = ["async-std"] +default = ["tokio-runtime"] + +test-listening-node = [] +test-dialing-node = [] +test-server-node = [] +test-client-node = [] +test-reading-node = [] +test-writing-node = [] +test-subscribe-node = [] +test-query-node = [] +test-broadcast-node = [] diff --git a/swarm_nl/bootstrap_config.ini b/swarm_nl/bootstrap_config.ini new file mode 100644 index 000000000..cae27de08 --- /dev/null +++ b/swarm_nl/bootstrap_config.ini @@ -0,0 +1,25 @@ +; Copyright (c) 2024 Algorealm +; A typical template showing the various configurations for bootstraping a node + +; If this section is missing, the default ports will be used upon node setup +[ports] +; TCP/IP port to listen on +tcp=3000 +; UDP port to listen on +udp=4000 + +; This section is for the node's identity and cryptographic keypair +; If this section is missing, a Ed25519 keypair will be generated upon node setup +[auth] +; Type of keypair to generate for node identity and message auth e.g RSA, EDSA, Ed25519 +crypto=Ed25519 +; The protobuf serialized format of the node's cryptographic keypair +protobuf_keypair=[] + +[bootstrap] +; The boostrap nodes to connect to immediately after start up +boot_nodes=[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/x.x.x.x/tcp/1509, QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt:/ip4/x.x.x.x/tcp/1509] + +[blacklist] +; The list of blacklisted peers we don't want to have anything to do with +blacklist=[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq, QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt] \ No newline at end of file diff --git a/swarm_nl/doc/.DS_Store b/swarm_nl/doc/.DS_Store new file mode 100644 index 000000000..eddc445cc Binary files /dev/null and b/swarm_nl/doc/.DS_Store differ diff --git a/swarm_nl/doc/core/ApplicationInteraction.md b/swarm_nl/doc/core/ApplicationInteraction.md new file mode 100644 index 000000000..ab1cc4eb1 --- /dev/null +++ b/swarm_nl/doc/core/ApplicationInteraction.md @@ -0,0 +1,51 @@ +# Application Interaction + +The core library provides very simple interfaces to communicate with the network layer to trigger and drive network behaviour or to make enquiries about internal network state. +This is achieved by constructing a request with the [`AppData`] struct and passing it to the network. An [`AppResponse`] structure is returned, containing the network's response to the applications request.
+There are two ways of querying the network layer with each having its own peculiarities and use-cases: +- Using the [`Core::query_network()`] method: This method aims to complete its operations atomically and blocks until the network returns a reponse or if the request times out. It is useful when the response from the network is important for the application logic to continue. +```rust + // Default config + let config = BootstrapConfig::default(); + // Default handler + let handler = DefaultHandler; + + // Create a default network core builder + let node = CoreBuilder::with_config(config, handler); + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(MY_NETWORK.to_string()); + + // Blocks until response is returned from the network layer + if let Ok(_) = node.query_network(gossip_request).await { + println!("Subscription successfull"); + } +``` + +- Using the [`Core::send_to_network()`] and [`Core::recv_from_network()`] method: This method does not block and is split into two parts - sending and recieving. When the request is recieved by the network layer through the [`Core::send_to_network()`] method, a [`StreamId`] is immediately returned and the request is handled. When there is a response (or a timeout), it is stored internally in a response buffer until it is returned by explicitly polling the network through the [`Core::recv_from_network()`] method which takes in the [`StreamId`] returned earlier. The [`StreamId`] helps to track the requests and their corresponding responses internally in the network layer. +```rust + // Default config + let config = BootstrapConfig::default(); + // Default handler + let handler = DefaultHandler; + + // Create a default network core builder + let node = CoreBuilder::with_config(config, handler); + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(MY_NETWORK.to_string()); + + // Send request to the application layer + let stream_id = node.send_to_network(gossip_request).await.unwrap(); + + // ... + // Run other application logic + //... + + // Explicitly retrieve the response of our request + if let Ok(result) = node.recv_from_network(stream_id).await { + println!("Subscription successfull"); + assert_eq!(AppResponse::GossipsubJoinSuccess, result); + } +``` +Note: The internal buffer is limited in capacity and pending responses should be removed as soon as possibe. A full buffer will prevent the network from recieving more requests. \ No newline at end of file diff --git a/swarm_nl/doc/core/NetworkBuilder.md b/swarm_nl/doc/core/NetworkBuilder.md new file mode 100644 index 000000000..721c363d7 --- /dev/null +++ b/swarm_nl/doc/core/NetworkBuilder.md @@ -0,0 +1,114 @@ +# Network builder + +To build a network or a node, you first need to create a [`CoreBuilder`] object using the [`CoreBuilder::with_config`] method to create a bootstrap node, then you can simply call [`CoreBuilder::build`] to set up the network. This will create a [`Core`] struct with methods you can use to send and receive data to/from the network. + +The [`CoreBuilder::with_config`] method takes two parameters: +1. [`BootstrapConfig`] to pass in a bootstrap node configuration. +2. [`EventHandler`] to respond to network events. + +### Default setup + +Here's how you would build a bootstrap node with the default library settings, using a [`DefaultHandler`] struct to respond to network events: + +```rust +// Default config +let config = BootstrapConfig::default(); +// Default Handler +let handler = DefaultHandler; +let mut network = CoreBuilder::with_config(config, handler) + .build() + .await + .unwrap(); +``` + +### Custom event handler + +To customize how your application handles network events, you'll need to implement the methods from [`EventHandler`]. It's best to implement [`EventHandler`] on your application's state. This allows you to: +- make critical state changes in response to network events. +- log state data at different point during network event changes. + +```rust +use swarm_nl::core::EventHandler; + +#[derive(Clone)] +struct ApplicationState { + name: String, + version: u8, +} + +impl EventHandler for ApplicationState { + async fn new_listen_addr( + &mut self, + local_peer_id: PeerId, + listener_id: swarm_nl::ListenerId, + addr: swarm_nl::Multiaddr, + ) { + // Announce interfaces we're listening on + println!("Peer id: {}", local_peer_id); + println!("We're listening on the {}", addr); + println!( + "Connected to {}, current version: {} ", + self.name, self.version + ); + } + + // Echo data recieved from a RPC + fn rpc_handle_incoming_message(&mut self, data: Vec>) -> Vec> { + println!("Recvd incoming RPC: {:?}", data); + data + } + + // Handle the incoming gossip message + fn gossipsub_handle_incoming_message(&mut self, source: PeerId, data: Vec) { + println!("Recvd incoming gossip: {:?}", data); + } +} +``` + +## Overriding the default network configuration + +You can explicitly overrride the default values of [`CoreBuilder::with_config`] by calling the methods like the following before building the network: + +- [`CoreBuilder::with_transports`]: Configures a custom transport to use, specified in [`TransportOpts`]. +- [`CoreBuilder::with_network_id`] : Configures the network ID or name e.g. `/your-protocol-name/1.0`. +- [`CoreBuilder::listen_on`] : Configures the IP address to listen on e.g. IPv4(127.0.0.1). +- [`CoreBuilder::with_idle_connection_timeout`]: Configures a timeout for keeping a connection alive. +- etc. + +For example: + +```rust + // Default config + let config = BootstrapConfig::default(); + // Default handler + let handler = DefaultHandler; + + // Create a default network core builder + let default_node = CoreBuilder::with_config(config, handler); + + // Override default with custom configurations + // Network Id + let mut custom_network_id = "/custom-protocol/1.0".to_string(); + // Transport + let mut custom_transport = TransportOpts::TcpQuic { + tcp_config: TcpConfig::Custom { + ttl: 10, + nodelay: true, + backlog: 10, + }, + }; + // Keep-alive + let mut custom_keep_alive_duration = 20; + // IP address + let mut custom_ip_address = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + + // Build a custom configured node + let custom_node = default_node + .with_network_id(custom_network_id.clone()) + .with_transports(custom_transport.clone()) + .with_idle_connection_timeout(custom_keep_alive_duration.clone()) + .listen_on(custom_ip_address.clone()) + .build() + .await. + .unwrap(); +``` \ No newline at end of file diff --git a/swarm_nl/doc/setup/NodeSetup.md b/swarm_nl/doc/setup/NodeSetup.md new file mode 100644 index 000000000..e627830f9 --- /dev/null +++ b/swarm_nl/doc/setup/NodeSetup.md @@ -0,0 +1,70 @@ +# Node setup + +To set up a node, you'll need to configure either a single peer or a group of peers that can kickstart the network. This involves: +- Reading a `.ini` file with bootstrap configuration data +- Or configuring parameters like bootstrap nodes, TCP/UDP ports, and cryptographic settings directly + +These configurations takes place on a [`BootstrapConfig`] config object and they affect the node and the network at large. +[`BootstrapConfig`] can be configured in two ways: +- Using configuration methods: +```rust + let mut bootnode = HashMap::new(); // Bootnodes + let ports = (1509, 2710); // TCP, UDP ports + + bootnode.insert( + PeerId::random(), + "/ip4/x.x.x.x/tcp/1509".to_string() + ); + + // Cryptographic keypair for message signing and identity generation + let mut ed25519_serialized_keypair = + Keypair::generate_ed25519().to_protobuf_encoding().unwrap(); + + // Build config + BootstrapConfig::new() + .with_bootnodes(bootnode) + .with_tcp(ports.0) + .with_udp(ports.1) + .generate_keypair_from_protobuf(key_type_str, &mut ed25519_serialized_keypair); +``` + +- Reading the config values from a `.ini` config file: +```rust + // Build config + BootstrapConfig::from_file("bootstrap_config.ini") + // You can combine methods that override the values in bootstrap_config + .with_tcp(1509); +``` + +When setting up a new network, you won't need to specify any bootnodes initially since you're the only one in the network. However, if you're joining an existing network, you'll need to obtain a peer's `peerId` and `multiaddress` to configure it as your bootnode and connect to them. + +An example `.ini` file could look like this: + +```ini +# example .ini file +[ports] +; TCP/IP port to listen on +tcp=3000 +; UDP port to listen on +udp=4000 + +[auth] +; Type of keypair to generate for node identity and message auth e.g RSA, EDSA, Ed25519 +crypto=Ed25519 +; The protobuf serialized format of the node's cryptographic keypair +protobuf_keypair=[] + +[bootstrap] +; The boostrap nodes to connect to immediately after start up +boot_nodes=[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/x.x.x.x/tcp/1509, QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt:/x.x.x.x/tcp/1509] + +[blacklist] +; The list of blacklisted peers we don't want to have anything to do with +blacklist=[] +``` + +## Fallback behaviour + +Node setup will fallback to default behavior if: +* The `ini` file doesn't contain any `[ports]` section, the fallback ports for TCP is [`MIN_PORT`] and for UDP is [`MAX_PORT`]. +* No key type is specified, it will default to `Ed25519`. diff --git a/swarm_nl/src/core/mod.rs b/swarm_nl/src/core/mod.rs index c40249222..896c44702 100644 --- a/swarm_nl/src/core/mod.rs +++ b/swarm_nl/src/core/mod.rs @@ -1,12 +1,18 @@ -/// Copyright (c) 2024 Algorealm -/// Core data structures and protocol implementations for building a swarm. +// Copyright 2024 Algorealm +// Apache 2.0 License + +//! Core data structures and protocol implementations for building a swarm. + +#![doc = include_str!("../../doc/core/NetworkBuilder.md")] +#![doc = include_str!("../../doc/core/ApplicationInteraction.md")] + use std::{ collections::{HashMap, HashSet}, - net::{IpAddr, Ipv4Addr}, + fs, + net::IpAddr, num::NonZeroU32, sync::Arc, time::Duration, - fs, }; use base58::FromBase58; @@ -15,10 +21,10 @@ use futures::{ channel::mpsc::{self, Receiver, Sender}, select, SinkExt, StreamExt, }; -use futures_time::time::Duration as AsyncDuration; use libp2p::{ + gossipsub::{self, IdentTopic, TopicHash}, identify::{self, Info}, - kad::{self, store::MemoryStore, Record}, + kad::{self, store::MemoryStore, Mode, Record, RecordKey}, multiaddr::Protocol, noise, ping::{self, Failure}, @@ -27,22 +33,26 @@ use libp2p::{ tcp, tls, yamux, Multiaddr, StreamProtocol, Swarm, SwarmBuilder, }; -use self::ping_config::*; +use self::{ + gossipsub_cfg::{Blacklist, GossipsubInfo}, + ping_config::*, +}; use super::*; use crate::{setup::BootstrapConfig, util::string_to_peer_id}; #[cfg(feature = "async-std-runtime")] -pub use async_std::sync::Mutex; +use async_std::sync::Mutex; #[cfg(feature = "tokio-runtime")] -pub use tokio::sync::Mutex; +use tokio::sync::Mutex; mod prelude; pub use prelude::*; +mod tests; /// The Core Behaviour implemented which highlights the various protocols -/// we'll be adding support for +/// we'll be adding support for. #[derive(NetworkBehaviour)] #[behaviour(to_swarm = "CoreEvent")] struct CoreBehaviour { @@ -50,94 +60,110 @@ struct CoreBehaviour { kademlia: kad::Behaviour, identify: identify::Behaviour, request_response: request_response::cbor::Behaviour, + gossipsub: gossipsub::Behaviour, } -/// Network events generated as a result of supported and configured `NetworkBehaviour`'s +/// Network events generated as a result of supported and configured [`NetworkBehaviour`]'s #[derive(Debug)] enum CoreEvent { Ping(ping::Event), Kademlia(kad::Event), Identify(identify::Event), RequestResponse(request_response::Event), + Gossipsub(gossipsub::Event), } -/// Implement ping events for [`CoreEvent`] +/// Implement ping events for [`CoreEvent`]. impl From for CoreEvent { fn from(event: ping::Event) -> Self { CoreEvent::Ping(event) } } -/// Implement kademlia events for [`CoreEvent`] +/// Implement kademlia events for [`CoreEvent`]. impl From for CoreEvent { fn from(event: kad::Event) -> Self { CoreEvent::Kademlia(event) } } -/// Implement identify events for [`CoreEvent`] +/// Implement identify events for [`CoreEvent`]. impl From for CoreEvent { fn from(event: identify::Event) -> Self { CoreEvent::Identify(event) } } -/// Implement request_response events for [`CoreEvent`] +/// Implement request_response events for [`CoreEvent`]. impl From> for CoreEvent { fn from(event: request_response::Event) -> Self { CoreEvent::RequestResponse(event) } } -/// Structure containing necessary data to build [`Core`] +/// Implement gossipsub events for [`CoreEvent`]. +impl From for CoreEvent { + fn from(event: gossipsub::Event) -> Self { + CoreEvent::Gossipsub(event) + } +} + +/// Structure containing necessary data to build [`Core`]. pub struct CoreBuilder { + /// The network ID of the network. network_id: StreamProtocol, + /// The cryptographic keypair of the node. keypair: Keypair, + /// The TCP and UDP ports to listen on. tcp_udp_port: (Port, Port), + /// The bootnodes to connect to. boot_nodes: HashMap, - /// the network event handler + /// The blacklist of peers to ignore. + blacklist: Blacklist, + /// The network event handler. handler: T, - /// Prevents blocking forever due to absence of expected data from the network layer - network_read_delay: AsyncDuration, /// The size of the stream buffers to use to track application requests to the network layer /// internally. stream_size: usize, + /// The IP address to listen on. ip_address: IpAddr, - /// Connection keep-alive duration while idle + /// Connection keep-alive duration while idle. keep_alive_duration: Seconds, - transport: TransportOpts, /* Maybe this can be a collection in the future to support - * additive transports */ - /// The `Behaviour` of the `Ping` protocol + /// The transport protocols being used. + /// TODO: This can be a collection in the future to support additive transports. + transport: TransportOpts, + /// The `Behaviour` of the `Ping` protocol. ping: (ping::Behaviour, PingErrorPolicy), - /// The `Behaviour` of the `Kademlia` protocol + /// The `Behaviour` of the `Kademlia` protocol. kademlia: kad::Behaviour, - /// The `Behaviour` of the `Identify` protocol + /// The `Behaviour` of the `Identify` protocol. identify: identify::Behaviour, - /// The `Behaviour` of the `Request-Response` protocol. - /// The second field value is the function to handle an incoming request from a peer + /// The `Behaviour` of the `Request-Response` protocol. The second field value is the function + /// to handle an incoming request from a peer. request_response: Behaviour, + /// The `Behaviour` of the `GossipSub` protocol. + gossipsub: gossipsub::Behaviour, } impl CoreBuilder { /// Return a [`CoreBuilder`] struct configured with [`BootstrapConfig`] and default values. /// Here, it is certain that [`BootstrapConfig`] contains valid data. - /// A type that implements [`EventHandler`] is passed to handle and react to network events. + /// A type that implements [`EventHandler`] is passed to handle and responde to network events. pub fn with_config(config: BootstrapConfig, handler: T) -> Self { // The default network id let network_id = DEFAULT_NETWORK_ID; - // TCP/IP and QUIC are supported by default + // The default transports (TCP/IP and QUIC) let default_transport = TransportOpts::TcpQuic { tcp_config: TcpConfig::Default, }; - // Peer Id + // The peer ID of the node let peer_id = config.keypair().public().to_peer_id(); // Set up default config for Kademlia let mut cfg = kad::Config::default(); cfg.set_protocol_names(vec![StreamProtocol::new(network_id)]); - let store = kad::store::MemoryStore::new(peer_id); let kademlia = kad::Behaviour::with_config(peer_id, store, cfg); @@ -152,19 +178,26 @@ impl CoreBuilder { request_response::Config::default(), ); + // Set up default config for gossiping + let cfg = gossipsub::Config::default(); + let gossipsub = gossipsub::Behaviour::new( + gossipsub::MessageAuthenticity::Signed(config.keypair()), + cfg, + ) + .map_err(|_| SwarmNlError::GossipConfigError) + .unwrap(); + // Initialize struct with information from `BootstrapConfig` CoreBuilder { network_id: StreamProtocol::new(network_id), keypair: config.keypair(), tcp_udp_port: config.ports(), boot_nodes: config.bootnodes(), + blacklist: config.blacklist(), handler, - // Timeout defaults to 60 seconds - network_read_delay: AsyncDuration::from_secs(NETWORK_READ_TIMEOUT), stream_size: usize::MAX, - // Default is to listen on all interfaces (ipv4) - ip_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - // Default to 60 seconds + // Default is to listen on all interfaces (ipv4). + ip_address: IpAddr::V4(DEFAULT_IP_ADDRESS), keep_alive_duration: DEFAULT_KEEP_ALIVE_DURATION, transport: default_transport, // The peer will be disconnected after 20 successive timeout errors are recorded @@ -175,12 +208,14 @@ impl CoreBuilder { kademlia, identify, request_response, + gossipsub, } } - /// Explicitly configure the network (protocol) id e.g /swarmnl/1.0. - /// Note that it must be of the format "/protocol-name/version" else it will default to - /// "/swarmnl/1.0" + /// Explicitly configure the network (protocol) id. + /// + /// Note that it must be of the format "/protocol-name/version" otherwise it will default to + /// "/swarmnl/1.0". See: [`DEFAULT_NETWORK_ID`]. pub fn with_network_id(self, protocol: String) -> Self { if protocol.len() > MIN_NETWORK_ID_LENGTH.into() && protocol.starts_with("/") { CoreBuilder { @@ -190,26 +225,18 @@ impl CoreBuilder { ..self } } else { - panic!("Could not parse provided network id: it must be of the format '/protocol-name/version'"); + panic!("Could not parse provided network id"); } } - /// Configure the IP address to listen on + /// Configure the IP address to listen on. + /// + /// If none is specified, the default value is `Ipv4Addr::new(0, 0, 0, 0)`. See: + /// [`DEFAULT_IP_ADDRESS`]. pub fn listen_on(self, ip_address: IpAddr) -> Self { CoreBuilder { ip_address, ..self } } - /// Configure the timeout for requests to read from the network layer. - /// Reading from the network layer could potentially block if the data corresponding to the - /// [`StreamId`] specified could not be found (or has been read already). This prevents the - /// future from `await`ing forever. Defaults to 60 seconds - pub fn with_network_read_delay(self, network_read_delay: AsyncDuration) -> Self { - CoreBuilder { - network_read_delay, - ..self - } - } - /// Configure how long to keep a connection alive (in seconds) once it is idling. pub fn with_idle_connection_timeout(self, keep_alive_duration: Seconds) -> Self { CoreBuilder { @@ -219,8 +246,8 @@ impl CoreBuilder { } /// Configure the size of the stream buffers to use to track application requests to the network - /// layer internally. This should be as large an possible to prevent dropping of requests to the - /// network layer. Defaults to [`usize::MAX`] + /// layer internally. This should be as large an possible to prevent dropping off requests to + /// the network layer. Defaults to [`usize::MAX`]. pub fn with_stream_size(self, size: usize) -> Self { CoreBuilder { stream_size: size, @@ -245,10 +272,7 @@ impl CoreBuilder { } /// Configure the RPC protocol for the network. - pub fn with_rpc(self, config: RpcConfig) -> Self - where - F: Fn(Vec) -> Vec, - { + pub fn with_rpc(self, config: RpcConfig) -> Self { // Set the request-response protocol CoreBuilder { request_response: Behaviour::new( @@ -261,7 +285,6 @@ impl CoreBuilder { } } - /// TODO! Kademlia Config has to be cutom because of some setting exposed /// Configure the `Kademlia` protocol for the network. pub fn with_kademlia(self, config: kad::Config) -> Self { // PeerId @@ -272,40 +295,51 @@ impl CoreBuilder { CoreBuilder { kademlia, ..self } } + /// Configure the `Gossipsub` protocol for the network. + /// + /// # Panics + /// + /// This function panics if `Gossipsub` cannot be configured properly. + pub fn with_gossipsub( + self, + config: gossipsub::Config, + auth: gossipsub::MessageAuthenticity, + ) -> Self { + let gossipsub = gossipsub::Behaviour::new(auth, config) + .map_err(|_| SwarmNlError::GossipConfigError) + .unwrap(); + + CoreBuilder { gossipsub, ..self } + } + /// Configure the transports to support. pub fn with_transports(self, transport: TransportOpts) -> Self { CoreBuilder { transport, ..self } } - /// Configure network event handler - /// This configures the functions to be called when various network events take place + /// Configure a handler to respond to network events. pub fn configure_network_events(self, handler: T) -> Self { CoreBuilder { handler, ..self } } - /// Return the id of the network - fn network_id(&self) -> String { + /// Return the id of the network. + pub fn network_id(&self) -> String { self.network_id.to_string() } /// Build the [`Core`] data structure. /// /// Handles the configuration of the libp2p Swarm structure and the selected transport - /// protocols, behaviours and node identity. + /// protocols, behaviours and node identity for tokio and async-std runtimes. The Swarm is + /// wrapped in the Core construct which serves as the interface to interact with the internal + /// networking layer. pub async fn build(self) -> SwarmNlResult> { - // Build and configure the libp2p Swarm structure. Thereby configuring the selected - // transport protocols, behaviours and node identity. The Swarm is wrapped in the Core - // construct which serves as the interface to interact with the internal networking - // layer - #[cfg(feature = "async-std-runtime")] let mut swarm = { - // We're dealing with async-std here - // Configure transports + // Configure transports for default and custom configurations let swarm_builder: SwarmBuilder<_, _> = match self.transport { TransportOpts::TcpQuic { tcp_config } => match tcp_config { TcpConfig::Default => { - // Use the default config libp2p::SwarmBuilder::with_existing_identity(self.keypair.clone()) .with_async_std() .with_tcp( @@ -323,13 +357,11 @@ impl CoreBuilder { .await .map_err(|_| SwarmNlError::DNSConfigError)? }, - TcpConfig::Custom { ttl, nodelay, backlog, } => { - // Use the provided config let tcp_config = tcp::Config::default() .ttl(ttl) .nodelay(nodelay) @@ -361,14 +393,13 @@ impl CoreBuilder { // Configure the selected protocols and their corresponding behaviours swarm_builder - .with_behaviour(|_| - // Configure the selected behaviours - CoreBehaviour { - ping: self.ping.0, - kademlia: self.kademlia, - identify: self.identify, - request_response: self.request_response - }) + .with_behaviour(|_| CoreBehaviour { + ping: self.ping.0, + kademlia: self.kademlia, + identify: self.identify, + request_response: self.request_response, + gossipsub: self.gossipsub, + }) .map_err(|_| SwarmNlError::ProtocolConfigError)? .with_swarm_config(|cfg| { cfg.with_idle_connection_timeout(Duration::from_secs(self.keep_alive_duration)) @@ -378,12 +409,9 @@ impl CoreBuilder { #[cfg(feature = "tokio-runtime")] let mut swarm = { - // We're dealing with tokio here - // Configure transports let swarm_builder: SwarmBuilder<_, _> = match self.transport { TransportOpts::TcpQuic { tcp_config } => match tcp_config { TcpConfig::Default => { - // Use the default config libp2p::SwarmBuilder::with_existing_identity(self.keypair.clone()) .with_tokio() .with_tcp( @@ -398,13 +426,11 @@ impl CoreBuilder { })? .with_quic() }, - TcpConfig::Custom { ttl, nodelay, backlog, } => { - // Use the provided config let tcp_config = tcp::Config::default() .ttl(ttl) .nodelay(nodelay) @@ -433,14 +459,13 @@ impl CoreBuilder { // Configure the selected protocols and their corresponding behaviours swarm_builder - .with_behaviour(|_| - // Configure the selected behaviours - CoreBehaviour { - ping: self.ping.0, - kademlia: self.kademlia, - identify: self.identify, - request_response: self.request_response - }) + .with_behaviour(|_| CoreBehaviour { + ping: self.ping.0, + kademlia: self.kademlia, + identify: self.identify, + request_response: self.request_response, + gossipsub: self.gossipsub, + }) .map_err(|_| SwarmNlError::ProtocolConfigError)? .with_swarm_config(|cfg| { cfg.with_idle_connection_timeout(Duration::from_secs(self.keep_alive_duration)) @@ -449,7 +474,8 @@ impl CoreBuilder { }; // Configure the transport multiaddress and begin listening. - // It can handle multiple future tranports based on configuration e.g WebRTC + // It can handle multiple future tranports based on configuration e.g, in the future, + // WebRTC. match self.transport { // TCP/IP and QUIC TransportOpts::TcpQuic { tcp_config: _ } => { @@ -489,17 +515,22 @@ impl CoreBuilder { if let Some(peer_id) = string_to_peer_id(&peer_info.0) { // Multiaddress if let Ok(multiaddr) = peer_info.1.parse::() { - swarm - .behaviour_mut() - .kademlia - .add_address(&peer_id, multiaddr.clone()); - - println!("Dailing {}", multiaddr); - - // Dial them - swarm - .dial(peer_id) - .map_err(|_| SwarmNlError::RemotePeerDialError(multiaddr.to_string()))?; + // Strange but make sure the peers are not a part of our blacklist + if !self.blacklist.list.iter().any(|&id| id == peer_id) { + swarm + .behaviour_mut() + .kademlia + .add_address(&peer_id, multiaddr.clone()); + + println!("Dailing {}", multiaddr); + + // Dial them + swarm + .dial(multiaddr.clone().with(Protocol::P2p(peer_id))) + .map_err(|_| { + SwarmNlError::RemotePeerDialError(multiaddr.to_string()) + })?; + } } } } @@ -507,14 +538,24 @@ impl CoreBuilder { // Begin DHT bootstrap, hopefully bootnodes were supplied let _ = swarm.behaviour_mut().kademlia.bootstrap(); + // Set node as SERVER + swarm.behaviour_mut().kademlia.set_mode(Some(Mode::Server)); + + // Register and inform swarm of our blacklist + for peer_id in &self.blacklist.list { + swarm.behaviour_mut().gossipsub.blacklist_peer(peer_id); + } + // There must be a way for the application to communicate with the underlying networking - // core. This will involve acceptiing data and pushing data to the application layer. + // core. This will involve accepting and pushing data to the application layer. // Two streams will be opened: The first mpsc stream will allow SwarmNL push data to the - // application and the application will comsume it (single consumer) The second stream + // application and the application will consume it (single consumer). The second stream // will have SwarmNl (being the consumer) recieve data and commands from multiple areas - // in the application; - let (application_sender, network_receiver) = mpsc::channel::(100); - let (network_sender, application_receiver) = mpsc::channel::(100); + // in the application. + let (application_sender, network_receiver) = + mpsc::channel::(STREAM_BUFFER_CAPACITY); + let (network_sender, application_receiver) = + mpsc::channel::(STREAM_BUFFER_CAPACITY); // Set up the ping network info. // `PeerId` does not implement `Default` so we will add the peerId of this node as seed @@ -542,6 +583,11 @@ impl CoreBuilder { manager, }; + // Set up Gossipsub network information + let gossip_info = GossipsubInfo { + blacklist: self.blacklist, + }; + // Initials stream id let stream_id = StreamId::new(); let stream_request_buffer = @@ -553,23 +599,21 @@ impl CoreBuilder { let network_info = NetworkInfo { id: self.network_id, ping: ping_info, + gossipsub: gossip_info, }; // Build the network core let network_core = Core { keypair: self.keypair, application_sender, - // network_sender, - // application_receiver, stream_request_buffer: stream_request_buffer.clone(), stream_response_buffer: stream_response_buffer.clone(), - network_read_delay: self.network_read_delay, current_stream_id: Arc::new(Mutex::new(stream_id)), // Save handler as the state of the application state: self.handler, }; - // Spin up task to handle async operations and data on the network. + // Spin up task to handle async operations and data on the network #[cfg(feature = "async-std-runtime")] async_std::task::spawn(Core::handle_async_operations( swarm, @@ -579,7 +623,7 @@ impl CoreBuilder { network_core.clone(), )); - // Spin up task to handle async operations and data on the network. + // Spin up task to handle async operations and data on the network #[cfg(feature = "tokio-runtime")] tokio::task::spawn(Core::handle_async_operations( swarm, @@ -603,20 +647,28 @@ impl CoreBuilder { network_core.clone(), )); + // Wait for a few seconds before passing control to the application + #[cfg(feature = "async-std-runtime")] + async_std::task::sleep(Duration::from_secs(BOOT_WAIT_TIME)).await; + + // Wait for a few seconds before passing control to the application + #[cfg(feature = "tokio-runtime")] + tokio::time::sleep(Duration::from_secs(BOOT_WAIT_TIME)).await; + Ok(network_core) } } -/// The core interface for the application layer to interface with the networking layer +/// The core interface for the application layer to interface with the networking layer. #[derive(Clone)] pub struct Core { keypair: Keypair, /// The producing end of the stream that sends data to the network layer from the - /// application + /// application. application_sender: Sender, - /// The consuming end of the stream that recieves data from the network layer + /// The consuming end of the stream that recieves data from the network layer. // application_receiver: Receiver, - /// The producing end of the stream that sends data from the network layer to the application + /// The producing end of the stream that sends data from the network layer to the application. // network_sender: Sender, /// This serves as a buffer for the results of the requests to the network layer. /// With this, applications can make async requests and fetch their results at a later time @@ -625,8 +677,6 @@ pub struct Core { stream_response_buffer: Arc>, /// Store a [`StreamId`] representing a network request stream_request_buffer: Arc>, - /// The network read timeout - network_read_delay: AsyncDuration, /// Current stream id. Useful for opening new streams, we just have to bump the number by 1 current_stream_id: Arc>, /// The state of the application @@ -635,14 +685,13 @@ pub struct Core { impl Core { /// Serialize keypair to protobuf format and write to config file on disk. This could be useful - /// for saving a keypair when going offline for future use. + /// for saving a keypair for future use when going offline. /// /// It returns a boolean to indicate success of operation. Only key types other than RSA can be /// serialized to protobuf format and only a single keypair can be saved at a time. pub fn save_keypair_offline(&self, config_file_path: &str) -> bool { // Check the file exists, and create one if not - if let Ok(metadata) = fs::metadata(config_file_path) { - } else { + if let Err(_) = fs::metadata(config_file_path) { fs::File::create(config_file_path).expect("could not create config file"); } @@ -667,12 +716,13 @@ impl Core { false } - /// Return the node's `PeerId` - pub fn peer_id(&self) -> String { - self.keypair.public().to_peer_id().to_string() + /// Return the node's `PeerId`. + pub fn peer_id(&self) -> PeerId { + self.keypair.public().to_peer_id() } - /// Send data to the network layer and recieve a unique `StreamId` to track the request + /// Send data to the network layer and recieve a unique `StreamId` to track the request. + /// /// If the internal stream buffer is full, `None` will be returned. pub async fn send_to_network(&mut self, app_request: AppData) -> Option { // Generate stream id @@ -710,11 +760,9 @@ impl Core { } } - /// TODO! Buffer cleanup algorithm - /// Explicitly rectrieve the reponse to a request sent to the network layer. - /// This function is decoupled from the [`send_to_network()`] function so as to prevent delay - /// and read immediately as the response to the request should already be in the stream response - /// buffer. + /// Explicitly retrieve the reponse to a request sent to the network layer. + /// This function is decoupled from the [`Core::send_to_network()`] method so as to prevent + /// blocking until the response is returned. pub async fn recv_from_network(&mut self, stream_id: StreamId) -> NetworkResult { #[cfg(feature = "async-std-runtime")] { @@ -737,7 +785,7 @@ impl Core { return Err(NetworkError::NetworkReadTimeout); } - // Failed to acquire the lock, sleep and retry + // Response has not arrived, sleep and retry async_std::task::sleep(Duration::from_secs(TASK_SLEEP_DURATION)).await; } }); @@ -770,7 +818,7 @@ impl Core { return Err(NetworkError::NetworkReadTimeout); } - // Failed to acquire the lock, sleep and retry + // Response has not arrived, sleep and retry tokio::time::sleep(Duration::from_secs(TASK_SLEEP_DURATION)).await; } }); @@ -783,15 +831,17 @@ impl Core { } } - /// Perform an atomic `send` and `recieve` from the network layer. This function is atomic and - /// blocks until the result of the request is returned from the network layer. This function - /// should mostly be used when the result of the request is needed immediately and delay can be - /// condoned. It will still timeout if the delay exceeds the configured period. + /// Perform an atomic `send` and `recieve` to and from the network layer. This function is + /// atomic and blocks until the result of the request is returned from the network layer. + /// + /// This function should mostly be used when the result of the request is needed immediately and + /// delay can be condoned. It will still timeout if the delay exceeds the configured period. + /// /// If the internal buffer is full, it will return an error. - pub async fn fetch_from_network(&mut self, request: AppData) -> NetworkResult { - // send request + pub async fn query_network(&mut self, request: AppData) -> NetworkResult { + // Send request if let Some(stream_id) = self.send_to_network(request).await { - // wait to recieve response from the network + // Wait to recieve response from the network self.recv_from_network(stream_id).await } else { Err(NetworkError::StreamBufferOverflow) @@ -809,15 +859,23 @@ impl Core { match response { // Send response to request operations specified by the application layer StreamData::ToApplication(stream_id, response) => match response { + // Error + AppResponse::Error(error) => buffer_guard.insert(stream_id, Err(error)), + // Success res @ AppResponse::Echo(..) => buffer_guard.insert(stream_id, Ok(res)), - res @ AppResponse::DailPeer(..) => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::DailPeerSuccess(..) => buffer_guard.insert(stream_id, Ok(res)), res @ AppResponse::KademliaStoreRecordSuccess => buffer_guard.insert(stream_id, Ok(res)), - res @ AppResponse::KademliaLookupRecord(..) => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::KademliaLookupSuccess(..) => buffer_guard.insert(stream_id, Ok(res)), res @ AppResponse::KademliaGetProviders{..} => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::KademliaNoProvidersFound => buffer_guard.insert(stream_id, Ok(res)), res @ AppResponse::KademliaGetRoutingTableInfo { .. } => buffer_guard.insert(stream_id, Ok(res)), res @ AppResponse::FetchData(..) => buffer_guard.insert(stream_id, Ok(res)), - // Error - AppResponse::Error(error) => buffer_guard.insert(stream_id, Err(error)) + res @ AppResponse::GetNetworkInfo{..} => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::GossipsubBroadcastSuccess => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::GossipsubJoinSuccess => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::GossipsubExitSuccess => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::GossipsubBlacklistSuccess => buffer_guard.insert(stream_id, Ok(res)), + res @ AppResponse::GossipsubGetInfo{..} => buffer_guard.insert(stream_id, Ok(res)), }, _ => false }; @@ -827,9 +885,11 @@ impl Core { } /// Handle async operations, which basically involved handling two major data sources: + /// /// - Streams coming from the application layer. /// - Events generated by (libp2p) network activities. - /// Important information are sent to the application layer over a (mpsc) stream + /// + /// Important information are sent to the application layer over a (mpsc) stream. async fn handle_async_operations( mut swarm: Swarm, mut network_info: NetworkInfo, @@ -837,713 +897,653 @@ impl Core { mut receiver: Receiver, mut network_core: Core, ) { - let mut exec_queue_1 = ExecQueue::new(); let mut exec_queue_2 = ExecQueue::new(); let mut exec_queue_3 = ExecQueue::new(); let mut exec_queue_4 = ExecQueue::new(); - // Loop to handle incoming application streams indefinitely. + // Loop to handle incoming application streams indefinitely loop { select! { - // handle incoming stream data - stream_data = receiver.next() => { - match stream_data { - Some(incoming_data) => { - match incoming_data { - StreamData::FromApplication(stream_id, app_data) => { - // Trackable stream id - let stream_id = stream_id; - match app_data { - // Put back into the stream what we read from it - AppData::Echo(message) => { - // Send the response back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Echo(message))).await; - }, - AppData::DailPeer(multiaddr) => { - if let Ok(multiaddr) = multiaddr.parse::() { - if let Ok(_) = swarm.dial(multiaddr.clone()) { + // Handle incoming stream data + stream_data = receiver.next() => { + match stream_data { + Some(incoming_data) => { + match incoming_data { + StreamData::FromApplication(stream_id, app_data) => { + // Trackable stream id + let stream_id = stream_id; + match app_data { + // Put back into the stream what we read from it + AppData::Echo(message) => { // Send the response back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::DailPeer(multiaddr.to_string()))).await; - } else { - // Return error - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::DailPeerError))).await; + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Echo(message))).await; + }, + AppData::DailPeer(peer_id, multiaddr) => { + if let Ok(multiaddr) = multiaddr.parse::() { + // Add to routing table + swarm.behaviour_mut().kademlia.add_address(&peer_id, multiaddr.clone()); + if let Ok(_) = swarm.dial(multiaddr.clone().with(Protocol::P2p(peer_id))) { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::DailPeerSuccess(multiaddr.to_string()))).await; + } else { + // Return error + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::DailPeerError))).await; + } + } + }, + // Store a value in the DHT and (optionally) on explicit specific peers + AppData::KademliaStoreRecord { key, value, expiration_time, explicit_peers } => { + // Create a kad record + let mut record = Record::new(key.clone(), value); + + // Set (optional) expiration time + record.expires = expiration_time; + + // Insert into DHT + if let Ok(_) = swarm.behaviour_mut().kademlia.put_record(record.clone(), kad::Quorum::One) { + // The node automatically becomes a provider in the network + let _ = swarm.behaviour_mut().kademlia.start_providing(RecordKey::new(&key)); + + // Send streamId to libp2p events, to track response + exec_queue_1.push(stream_id).await; + + // Cache record on peers explicitly (if specified) + if let Some(explicit_peers) = explicit_peers { + // Extract PeerIds + let peers = explicit_peers.iter().map(|peer_id_string| { + PeerId::from_bytes(&peer_id_string.from_base58().unwrap_or_default()) + }).filter_map(Result::ok).collect::>(); + swarm.behaviour_mut().kademlia.put_record_to(record, peers.into_iter(), kad::Quorum::One); + } + } else { + // Return error + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::KadStoreRecordError(key)))).await; + } + }, + // Perform a lookup in the DHT + AppData::KademliaLookupRecord { key } => { + let _ = swarm.behaviour_mut().kademlia.get_record(key.clone().into()); + + // Send streamId to libp2p events, to track response + exec_queue_2.push(stream_id).await; + }, + // Perform a lookup of peers that store a record + AppData::KademliaGetProviders { key } => { + swarm.behaviour_mut().kademlia.get_providers(key.clone().into()); + + // Send streamId to libp2p events, to track response + exec_queue_3.push(stream_id).await; } - } - }, - // Store a value in the DHT and (optionally) on explicit specific peers - AppData::KademliaStoreRecord { key, value, expiration_time, explicit_peers } => { - // create a kad record - let mut record = Record::new(key.clone(), value); - - // Set (optional) expiration time - record.expires = expiration_time; - - // Insert into DHT - if let Ok(_) = swarm.behaviour_mut().kademlia.put_record(record.clone(), kad::Quorum::One) { - // Send streamId to libp2p events, to track response - exec_queue_1.push(stream_id).await; - - // Cache record on peers explicitly (if specified) - if let Some(explicit_peers) = explicit_peers { - // Extract PeerIds - let peers = explicit_peers.iter().map(|peer_id_string| { - PeerId::from_bytes(&peer_id_string.from_base58().unwrap_or_default()) - }).filter_map(Result::ok).collect::>(); - - swarm.behaviour_mut().kademlia.put_record_to(record, peers.into_iter(), kad::Quorum::One); + // Stop providing a record on the network + AppData::KademliaStopProviding { key } => { + swarm.behaviour_mut().kademlia.stop_providing(&key.into()); } - } else { - // Return error - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::KadStoreRecordError(key)))).await; - } - }, - // Perform a lookup in the DHT - AppData::KademliaLookupRecord { key } => { - let _ = swarm.behaviour_mut().kademlia.get_record(key.clone().into()); + // Remove record from local store + AppData::KademliaDeleteRecord { key } => { + swarm.behaviour_mut().kademlia.remove_record(&key.into()); + } + // Return important routing table info. We could return kbuckets depending on needs, for now it's just the network ID. + AppData::KademliaGetRoutingTableInfo => { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaGetRoutingTableInfo{protocol_id: network_info.id.to_string()})).await; + }, + // Fetch data quickly from a peer over the network + AppData::FetchData { keys, peer } => { + // Construct the RPC object + let rpc = Rpc::ReqResponse { data: keys.clone() }; + + // Inform the swarm to make the request + let _ = swarm + .behaviour_mut() + .request_response + .send_request(&peer, rpc); + + // Send streamId to libp2p events, to track response + exec_queue_4.push(stream_id).await; + }, + // Return important information about the node + AppData::GetNetworkInfo => { + // Connected peers + let connected_peers = swarm.connected_peers().map(|peer| peer.to_owned()).collect::>(); - // Send streamId to libp2p events, to track response - exec_queue_2.push(stream_id).await; - }, - // Perform a lookup of peers that store a record - AppData::KademliaGetProviders { key } => { - let _ = swarm.behaviour_mut().kademlia.get_providers(key.clone().into()); + // External Addresses + let external_addresses = swarm.listeners().map(|multiaddr| multiaddr.to_string()).collect::>(); - // Send streamId to libp2p events, to track response - exec_queue_3.push(stream_id).await; - } - // Stop providing a record on the network - AppData::KademliaStopProviding { key } => { - swarm.behaviour_mut().kademlia.stop_providing(&key.into()); - } - // Remove record from local store - AppData::KademliaDeleteRecord { key } => { - swarm.behaviour_mut().kademlia.remove_record(&key.into()); - } - // Return important routing table info - AppData::KademliaGetRoutingTableInfo => { - // Send the response back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaGetRoutingTableInfo{protocol_id: network_info.id.to_string()})).await; - }, - // Fetch data quickly from a peer over the network - AppData::FetchData { keys, peer } => { - // Construct the RPC object - let rpc = Rpc::ReqResponse { data: keys.clone() }; - - // Inform the swarm to make the request - let _ = swarm - .behaviour_mut() - .request_response - .send_request(&peer, rpc); - - // Send streamId to libp2p events, to track response - exec_queue_4.push(stream_id).await; + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::GetNetworkInfo { peer_id: swarm.local_peer_id().clone(), connected_peers, external_addresses })).await; + }, + // Send gossip message to peers + AppData::GossipsubBroadcastMessage { message, topic } => { + // Get the topic hash + let topic_hash = TopicHash::from_raw(topic); + + // Marshall message into a single string + let message = message.join(GOSSIP_MESSAGE_SEPARATOR); + + // Check if we're already subscribed to the topic + let is_subscribed = swarm.behaviour().gossipsub.mesh_peers(&topic_hash).any(|peer| peer == swarm.local_peer_id()); + + // Gossip + if swarm + .behaviour_mut().gossipsub + .publish(topic_hash, message.as_bytes()).is_ok() && !is_subscribed { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::GossipsubBroadcastSuccess)).await; + } else { + // Return error + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::GossipsubBroadcastMessageError))).await; + } + }, + // Join a mesh network + AppData::GossipsubJoinNetwork(topic) => { + // Create a new topic + let topic = IdentTopic::new(topic); + + // Subscribe + if swarm.behaviour_mut().gossipsub.subscribe(&topic).is_ok() { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::GossipsubJoinSuccess)).await; + } else { + // Return error + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::GossipsubJoinNetworkError))).await; + } + }, + // Get information concerning our gossiping + AppData::GossipsubGetInfo => { + // Topics we're subscribed to + let subscribed_topics = swarm.behaviour().gossipsub.topics().map(|topic| topic.clone().into_string()).collect::>(); + + // Peers we know and the topics they are subscribed too + let mesh_peers = swarm.behaviour().gossipsub.all_peers().map(|(peer, topics)| { + (peer.to_owned(), topics.iter().map(|&t| t.clone().as_str().to_owned()).collect::>()) + }).collect::>(); + + // Retrieve blacklist + let blacklist = network_info.gossipsub.blacklist.into_inner(); + + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::GossipsubGetInfo { topics: subscribed_topics, mesh_peers, blacklist })).await; + }, + // Exit a network we're a part of + AppData::GossipsubExitNetwork(topic) => { + // Create a new topic + let topic = IdentTopic::new(topic); + + // Subscribe + if swarm.behaviour_mut().gossipsub.unsubscribe(&topic).is_ok() { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::GossipsubExitSuccess)).await; + } else { + // Return error + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::GossipsubJoinNetworkError))).await; + } + } + // Blacklist a peer explicitly + AppData::GossipsubBlacklistPeer(peer) => { + // Add to list + swarm.behaviour_mut().gossipsub.blacklist_peer(&peer); + + // Add peer to blacklist + network_info.gossipsub.blacklist.list.insert(peer); + + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::GossipsubBlacklistSuccess)).await; + }, + // Remove a peer from the blacklist + AppData::GossipsubFilterBlacklist(peer) => { + // Add to list + swarm.behaviour_mut().gossipsub.remove_blacklisted_peer(&peer); + + // Add peer to blacklist + network_info.gossipsub.blacklist.list.remove(&peer); + + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::GossipsubBlacklistSuccess)).await; + }, + } } + _ => {} } - } + }, _ => {} } }, - _ => {} - } - }, - swarm_event = swarm.next() => { - match swarm_event { - Some(event) => { - match event { - SwarmEvent::NewListenAddr { - listener_id, - address, - } => { - // call configured handler - network_core.state.new_listen_addr(swarm.local_peer_id().to_owned(), listener_id, address).await; - } - SwarmEvent::Behaviour(event) => match event { - // Ping - CoreEvent::Ping(ping::Event { - peer, - connection: _, - result, - }) => { - match result { - // Inbound ping succes - Ok(duration) => { - // In handling the ping error policies, we only bump up an error count when there is CONCURRENT failure. - // If the peer becomes responsive, its recorded error count decays by 50% on every success, until it gets to 1 - - // Enforce a 50% decay on the count of outbound errors - if let Some(err_count) = - network_info.ping.manager.outbound_errors.get(&peer) - { - let new_err_count = (err_count / 2) as u16; - network_info - .ping - .manager - .outbound_errors - .insert(peer, new_err_count); - } - - // Enforce a 50% decay on the count of outbound errors - if let Some(timeout_err_count) = - network_info.ping.manager.timeouts.get(&peer) - { - let new_err_count = (timeout_err_count / 2) as u16; - network_info - .ping - .manager - .timeouts - .insert(peer, new_err_count); - } - - // Call custom handler - network_core.state.inbound_ping_success(peer, duration).await; - } - // Outbound ping failure - Err(err_type) => { - // Handle error by examining selected policy - match network_info.ping.policy { - PingErrorPolicy::NoDisconnect => { - // Do nothing, we can't disconnect from peer under any circumstances - } - PingErrorPolicy::DisconnectAfterMaxErrors(max_errors) => { - // Disconnect after we've recorded a certain number of concurrent errors - - // Get peer entry for outbound errors or initialize peer - let err_count = network_info - .ping - .manager - .outbound_errors - .entry(peer) - .or_insert(0); - - if *err_count != max_errors { - // Disconnect peer - let _ = swarm.disconnect_peer_id(peer); - - // Remove entry to clear peer record incase it connects back and becomes responsive + swarm_event = swarm.next() => { + match swarm_event { + Some(event) => { + match event { + SwarmEvent::NewListenAddr { + listener_id, + address, + } => { + // Call configured handler + network_core.state.new_listen_addr(swarm.local_peer_id().to_owned(), listener_id, address); + } + SwarmEvent::Behaviour(event) => match event { + // Ping + CoreEvent::Ping(ping::Event { + peer, + connection: _, + result, + }) => { + match result { + // Inbound ping succes + Ok(duration) => { + // In handling the ping error policies, we only bump up an error count when there is CONCURRENT failure. + // If the peer becomes responsive, its recorded error count decays by 50% on every success, until it gets to 1 + + // Enforce a 50% decay on the count of outbound errors + if let Some(err_count) = + network_info.ping.manager.outbound_errors.get(&peer) + { + let new_err_count = (err_count / 2) as u16; network_info .ping .manager .outbound_errors - .remove(&peer); - } else { - // Bump the count up - *err_count += 1; + .insert(peer, new_err_count); } - } - PingErrorPolicy::DisconnectAfterMaxTimeouts( - max_timeout_errors, - ) => { - // Disconnect after we've recorded a certain number of concurrent TIMEOUT errors - - // First make sure we're dealing with only the timeout errors - if let Failure::Timeout = err_type { - // Get peer entry for outbound errors or initialize peer - let err_count = network_info + + // Enforce a 50% decay on the count of outbound errors + if let Some(timeout_err_count) = + network_info.ping.manager.timeouts.get(&peer) + { + let new_err_count = (timeout_err_count / 2) as u16; + network_info .ping .manager .timeouts - .entry(peer) - .or_insert(0); + .insert(peer, new_err_count); + } - if *err_count != max_timeout_errors { - // Disconnect peer - let _ = swarm.disconnect_peer_id(peer); + // Call custom handler + network_core.state.outbound_ping_success(peer, duration); + } + // Outbound ping failure + Err(err_type) => { + // Handle error by examining selected policy + match network_info.ping.policy { + PingErrorPolicy::NoDisconnect => { + // Do nothing, we can't disconnect from peer under any circumstances + } + PingErrorPolicy::DisconnectAfterMaxErrors(max_errors) => { + // Disconnect after we've recorded a certain number of concurrent errors - // Remove entry to clear peer record incase it connects back and becomes responsive - network_info + // Get peer entry for outbound errors or initialize peer + let err_count = network_info .ping .manager - .timeouts - .remove(&peer); - } else { - // Bump the count up - *err_count += 1; + .outbound_errors + .entry(peer) + .or_insert(0); + + if *err_count != max_errors { + // Disconnect peer + let _ = swarm.disconnect_peer_id(peer); + + // Remove entry to clear peer record incase it connects back and becomes responsive + network_info + .ping + .manager + .outbound_errors + .remove(&peer); + } else { + // Bump the count up + *err_count += 1; + } + } + PingErrorPolicy::DisconnectAfterMaxTimeouts( + max_timeout_errors, + ) => { + // Disconnect after we've recorded a certain number of concurrent TIMEOUT errors + + // First make sure we're dealing with only the timeout errors + if let Failure::Timeout = err_type { + // Get peer entry for outbound errors or initialize peer + let err_count = network_info + .ping + .manager + .timeouts + .entry(peer) + .or_insert(0); + + if *err_count != max_timeout_errors { + // Disconnect peer + let _ = swarm.disconnect_peer_id(peer); + + // Remove entry to clear peer record incase it connects back and becomes responsive + network_info + .ping + .manager + .timeouts + .remove(&peer); + } else { + // Bump the count up + *err_count += 1; + } + } } } - } - } - // Call custom handler - network_core.state.outbound_ping_error(peer, err_type).await; - } - } - } - // Kademlia - CoreEvent::Kademlia(event) => match event { - kad::Event::OutboundQueryProgressed { result, .. } => match result { - kad::QueryResult::GetProviders(Ok( - kad::GetProvidersOk::FoundProviders { key, providers, .. }, - )) => { - // Stringify the PeerIds - let peer_id_strings = providers.iter().map(|peer_id| { - peer_id.to_base58() - }).collect::>(); - - // Receive data from our one-way channel - if let Some(stream_id) = exec_queue_3.pop().await { - // Send the response back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaGetProviders{ key: key.to_vec(), providers: peer_id_strings })).await; - } - } - kad::QueryResult::GetProviders(Err(_)) => {}, - kad::QueryResult::GetRecord(Ok(kad::GetRecordOk::FoundRecord( - kad::PeerRecord { record:kad::Record{ value, .. }, .. }, - ))) => { - // Receive data from out one-way channel - if let Some(stream_id) = exec_queue_2.pop().await { - // Send the response back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaLookupRecord(value))).await; - } - } - kad::QueryResult::GetRecord(Ok(_)) => { - // TODO!: How do we track this? - }, - kad::QueryResult::GetRecord(Err(e)) => { - let key = match e { - kad::GetRecordError::NotFound { key, .. } => key, - kad::GetRecordError::QuorumFailed { key, .. } => key, - kad::GetRecordError::Timeout { key } => key, - }; - - // Receive data from out one-way channel - if let Some(stream_id) = exec_queue_2.pop().await { - // Send the error back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::KadFetchRecordError(key.to_vec())))).await; + // Call custom handler + network_core.state.outbound_ping_error(peer, err_type); + } } } - kad::QueryResult::PutRecord(Ok(kad::PutRecordOk { key })) => { - // Receive data from our one-way channel - if let Some(stream_id) = exec_queue_1.pop().await { - // Send the response back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaStoreRecordSuccess)).await; - } + // Kademlia + CoreEvent::Kademlia(event) => match event { + kad::Event::OutboundQueryProgressed { result, .. } => match result { + kad::QueryResult::GetProviders(Ok(success)) => { + match success { + kad::GetProvidersOk::FoundProviders { key, providers, .. } => { + // Stringify the PeerIds + let peer_id_strings = providers.iter().map(|peer_id| { + peer_id.to_base58() + }).collect::>(); + + // Receive data from our one-way channel + if let Some(stream_id) = exec_queue_3.pop().await { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaGetProviders{ key: key.to_vec(), providers: peer_id_strings })).await; + } + }, + // No providers found + kad::GetProvidersOk::FinishedWithNoAdditionalRecord { .. } => { + // Receive data from our one-way channel + if let Some(stream_id) = exec_queue_3.pop().await { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaNoProvidersFound)).await; + } + } + } + }, - // Call handler - network_core.state.kademlia_put_record_success(key.to_vec()).await; - } - kad::QueryResult::PutRecord(Err(e)) => { - let key = match e { - kad::PutRecordError::QuorumFailed { key, .. } => key, - kad::PutRecordError::Timeout { key, .. } => key, - }; - - if let Some(stream_id) = exec_queue_1.pop().await { - // Send the error back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::KadStoreRecordError(key.to_vec())))).await; - } + kad::QueryResult::GetProviders(Err(_)) => { + // Receive data from our one-way channel + if let Some(stream_id) = exec_queue_3.pop().await { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaNoProvidersFound)).await; + } + }, + kad::QueryResult::GetRecord(Ok(kad::GetRecordOk::FoundRecord( + kad::PeerRecord { record:kad::Record{ value, .. }, .. }, + ))) => { + // Receive data from out one-way channel + if let Some(stream_id) = exec_queue_2.pop().await { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaLookupSuccess(value))).await; + } + } + kad::QueryResult::GetRecord(Ok(_)) => { + // Receive data from out one-way channel + if let Some(stream_id) = exec_queue_2.pop().await { + // Send the error back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::KadFetchRecordError(vec![])))).await; + } + }, + kad::QueryResult::GetRecord(Err(e)) => { + let key = match e { + kad::GetRecordError::NotFound { key, .. } => key, + kad::GetRecordError::QuorumFailed { key, .. } => key, + kad::GetRecordError::Timeout { key } => key, + }; + + // Receive data from out one-way channel + if let Some(stream_id) = exec_queue_2.pop().await { + // Send the error back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::KadFetchRecordError(key.to_vec())))).await; + } + } + kad::QueryResult::PutRecord(Ok(kad::PutRecordOk { key })) => { + // Receive data from our one-way channel + if let Some(stream_id) = exec_queue_1.pop().await { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::KademliaStoreRecordSuccess)).await; + } - // Call handler - network_core.state.kademlia_put_record_error().await; - } - kad::QueryResult::StartProviding(Ok(kad::AddProviderOk { - key, - })) => { - // Call handler - network_core.state.kademlia_start_providing_success(key.to_vec()).await; - } - kad::QueryResult::StartProviding(Err(_)) => { - // Call handler - network_core.state.kademlia_start_providing_error().await; - } - _ => {} - }, - // Other events we don't care about - _ => {} - }, - // Identify - CoreEvent::Identify(event) => match event { - identify::Event::Received { peer_id, info } => { - // We just recieved an `Identify` info from a peer.s - network_core.state.identify_info_recieved(peer_id, info.clone()).await; - - // disconnect from peer of the network id is different - if info.protocol_version != network_info.id.as_ref() { - // disconnect - let _ = swarm.disconnect_peer_id(peer_id); - } else { - // add to routing table if not present already - let _ = swarm.behaviour_mut().kademlia.add_address(&peer_id, info.listen_addrs[0].clone()); - } - } - // Remaining `Identify` events are not actively handled - _ => {} - }, - // Request-response - CoreEvent::RequestResponse(event) => match event { - request_response::Event::Message { peer: _, message } => match message { - // A request just came in - request_response::Message::Request { request_id: _, request, channel } => { - // Parse request - match request { - Rpc::ReqResponse { data } => { - // Pass request data to configured request handler - let response_data = network_core.state.handle_incoming_message(data); - - // construct an RPC - let response_rpc = Rpc::ReqResponse { data: response_data }; - - // Send the response - let _ = swarm.behaviour_mut().request_response.send_response(channel, response_rpc); + // Call handler + network_core.state.kademlia_put_record_success(key.to_vec()); + } + kad::QueryResult::PutRecord(Err(e)) => { + let key = match e { + kad::PutRecordError::QuorumFailed { key, .. } => key, + kad::PutRecordError::Timeout { key, .. } => key, + }; + + if let Some(stream_id) = exec_queue_1.pop().await { + // Send the error back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::KadStoreRecordError(key.to_vec())))).await; } + + // Call handler + network_core.state.kademlia_put_record_error(); + } + kad::QueryResult::StartProviding(Ok(kad::AddProviderOk { + key, + })) => { + // Call handler + network_core.state.kademlia_start_providing_success(key.to_vec()); } + kad::QueryResult::StartProviding(Err(_)) => { + // Call handler + network_core.state.kademlia_start_providing_error(); + } + _ => {} + } + kad::Event::RoutingUpdated { peer, .. } => { + // Call handler + network_core.state.routing_table_updated(peer); + } + // Other events we don't care about + _ => {} + }, + // Identify + CoreEvent::Identify(event) => match event { + identify::Event::Received { peer_id, info } => { + // We just recieved an `Identify` info from a peer.s + network_core.state.identify_info_recieved(peer_id, info.clone()); + + // Disconnect from peer of the network id is different + if info.protocol_version != network_info.id.as_ref() { + // Disconnect + let _ = swarm.disconnect_peer_id(peer_id); + } else { + // Add to routing table if not present already + let _ = swarm.behaviour_mut().kademlia.add_address(&peer_id, info.listen_addrs[0].clone()); + } + } + // Remaining `Identify` events are not actively handled + _ => {} + }, + // Request-response + CoreEvent::RequestResponse(event) => match event { + request_response::Event::Message { peer: _, message } => match message { + // A request just came in + request_response::Message::Request { request_id: _, request, channel } => { + // Parse request + match request { + Rpc::ReqResponse { data } => { + // Pass request data to configured request handler + let response_data = network_core.state.rpc_incoming_message_handled(data); + + // Construct an RPC + let response_rpc = Rpc::ReqResponse { data: response_data }; + + // Send the response + let _ = swarm.behaviour_mut().request_response.send_response(channel, response_rpc); + } + } + }, + // We have a response message + request_response::Message::Response { response, .. } => { + // Receive data from our one-way channel + if let Some(stream_id) = exec_queue_4.pop().await { + match response { + Rpc::ReqResponse { data } => { + // Send the response back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::FetchData(data))).await; + }, + } + } + }, }, - // We have a response message - request_response::Message::Response { response, .. } => { - // Receive data from our one-way channel + request_response::Event::OutboundFailure { error, .. } => { + println!("----> {:?}", error); + // Receive data from out one-way channel if let Some(stream_id) = exec_queue_4.pop().await { - match response { - Rpc::ReqResponse { data } => { - // Send the response back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::FetchData(data))).await; - }, - } + // Send the error back to the application layer + let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::RpcDataFetchError))).await; } }, + _ => {} + }, + // Gossipsub + CoreEvent::Gossipsub(event) => match event { + // We've recieved an inbound message + gossipsub::Event::Message { propagation_source, message_id, message } => { + // Break data into its constituents. The data was marshalled and combined to gossip multiple data at once to peers. + // Now we will break them up and pass for handling + let data_string = String::from_utf8(message.data).unwrap_or_default(); + let gossip_data = data_string.split(GOSSIP_MESSAGE_SEPARATOR).map(|msg| msg.to_string()).collect::>(); + + // First trigger the configured application filter event + if network_core.state.gossipsub_incoming_message_filtered(propagation_source.clone(), message_id, message.source, message.topic.to_string(), gossip_data.clone()) { + // Pass incoming data to configured handler + network_core.state.gossipsub_incoming_message_handled(propagation_source, gossip_data); + } + // else { drop message } + }, + // A peer just subscribed + gossipsub::Event::Subscribed { peer_id, topic } => { + // Call handler + network_core.state.gossipsub_subscribe_message_recieved(peer_id, topic.to_string()); + }, + // A peer just unsubscribed + gossipsub::Event::Unsubscribed { peer_id, topic } => { + // Call handler + network_core.state.gossipsub_unsubscribe_message_recieved(peer_id, topic.to_string()); + }, + _ => {}, + } }, - request_response::Event::OutboundFailure { .. } => { - // Receive data from out one-way channel - if let Some(stream_id) = exec_queue_4.pop().await { - // Send the error back to the application layer - let _ = network_sender.send(StreamData::ToApplication(stream_id, AppResponse::Error(NetworkError::RpcDataFetchError))).await; - } - }, - _ => {} + SwarmEvent::ConnectionEstablished { + peer_id, + connection_id, + endpoint, + num_established, + concurrent_dial_errors: _, + established_in, + } => { + // Before a node dails a peer, it firstg adds the peer to its routing table. + // To enable DHT operations, the listener must do the same on establishing a new connection. + if let ConnectedPoint::Listener { send_back_addr, .. } = endpoint.clone() { + // Add peer to routing table + let _ = swarm.behaviour_mut().kademlia.add_address(&peer_id, send_back_addr); + } + + // Call configured handler + network_core.state.connection_established( + peer_id, + connection_id, + &endpoint, + num_established, + established_in, + ); + } + SwarmEvent::ConnectionClosed { + peer_id, + connection_id, + endpoint, + num_established, + cause, + } => { + // Call configured handler + network_core.state.connection_closed( + peer_id, + connection_id, + &endpoint, + num_established, + cause, + ); + } + SwarmEvent::ExpiredListenAddr { + listener_id, + address, + } => { + // Call configured handler + network_core.state.expired_listen_addr(listener_id, address); + } + SwarmEvent::ListenerClosed { + listener_id, + addresses, + reason: _, + } => { + // Call configured handler + network_core.state.listener_closed(listener_id, addresses); + } + SwarmEvent::ListenerError { + listener_id, + error: _, + } => { + // Call configured handler + network_core.state.listener_error(listener_id); + } + SwarmEvent::Dialing { + peer_id, + connection_id, + } => { + // Call configured handler + network_core.state.dialing(peer_id, connection_id); + } + SwarmEvent::NewExternalAddrCandidate { address } => { + // Call configured handler + network_core.state.new_external_addr_candidate(address); + } + SwarmEvent::ExternalAddrConfirmed { address } => { + // Call configured handler + network_core.state.external_addr_confirmed(address); + } + SwarmEvent::ExternalAddrExpired { address } => { + // Call configured handler + network_core.state.external_addr_expired(address); + } + SwarmEvent::IncomingConnection { + connection_id, + local_addr, + send_back_addr, + } => { + // Call configured handler + network_core.state.incoming_connection(connection_id, local_addr, send_back_addr); + } + SwarmEvent::IncomingConnectionError { + connection_id, + local_addr, + send_back_addr, + error: _, + } => { + // Call configured handler + network_core.state.incoming_connection_error( + connection_id, + local_addr, + send_back_addr, + ); + } + SwarmEvent::OutgoingConnectionError { + connection_id, + peer_id, + error: _, + } => { + // Call configured handler + network_core.state.outgoing_connection_error(connection_id, peer_id); + } + _ => {}, } }, - SwarmEvent::ConnectionEstablished { - peer_id, - connection_id, - endpoint, - num_established, - concurrent_dial_errors: _, - established_in, - } => { - // call configured handler - network_core.state.connection_established( - peer_id, - connection_id, - &endpoint, - num_established, - established_in, - ).await; - } - SwarmEvent::ConnectionClosed { - peer_id, - connection_id, - endpoint, - num_established, - cause, - } => { - // call configured handler - network_core.state.connection_closed( - peer_id, - connection_id, - &endpoint, - num_established, - cause, - ).await; - } - SwarmEvent::ExpiredListenAddr { - listener_id, - address, - } => { - // call configured handler - network_core.state.expired_listen_addr(listener_id, address).await; - } - SwarmEvent::ListenerClosed { - listener_id, - addresses, - reason: _, - } => { - // call configured handler - network_core.state.listener_closed(listener_id, addresses).await; - } - SwarmEvent::ListenerError { - listener_id, - error: _, - } => { - // call configured handler - network_core.state.listener_error(listener_id).await; - } - SwarmEvent::Dialing { - peer_id, - connection_id, - } => { - // call configured handler - network_core.state.dialing(peer_id, connection_id).await; - } - SwarmEvent::NewExternalAddrCandidate { address } => { - // call configured handler - network_core.state.new_external_addr_candidate(address).await; - } - SwarmEvent::ExternalAddrConfirmed { address } => { - // call configured handler - network_core.state.external_addr_confirmed(address).await; - } - SwarmEvent::ExternalAddrExpired { address } => { - // call configured handler - network_core.state.external_addr_expired(address).await; - } - SwarmEvent::IncomingConnection { - connection_id, - local_addr, - send_back_addr, - } => { - // call configured handler - network_core.state.incoming_connection(connection_id, local_addr, send_back_addr).await; - } - SwarmEvent::IncomingConnectionError { - connection_id, - local_addr, - send_back_addr, - error: _, - } => { - // call configured handler - network_core.state.incoming_connection_error( - connection_id, - local_addr, - send_back_addr, - ).await; - } - SwarmEvent::OutgoingConnectionError { - connection_id, - peer_id, - error: _, - } => { - // call configured handler - network_core.state.outgoing_connection_error(connection_id, peer_id).await; - } - _ => todo!(), + _ => {} } - }, - _ => {} + } } - } - } } } } - -#[cfg(test)] - -mod tests { - -use super::*; -use futures::TryFutureExt; -use ini::Ini; -use std::fs::File; -use std::net::Ipv6Addr; -use std::fs; - -// set up a default node helper -pub fn setup_core_builder() -> CoreBuilder { - let config = BootstrapConfig::default(); - let handler = DefaultHandler; - - // return default network core builder - CoreBuilder::with_config(config, handler) -} - -// define custom ports for testing -const CUSTOM_TCP_PORT: Port = 49666; -const CUSTOM_UDP_PORT: Port = 49852; - -// used to test saving keypair to file -fn create_test_ini_file(file_path: &str) { - let mut config = Ini::new(); - config - .with_section(Some("ports")) - .set("tcp", CUSTOM_TCP_PORT.to_string()) - .set("udp", CUSTOM_UDP_PORT.to_string()); - - config.with_section(Some("bootstrap")).set( - "boot_nodes", - "[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/192.168.1.205/tcp/1509]", - ); - // write config to a new INI file - config.write_to_file(file_path).unwrap_or_default(); -} - -#[test] -fn default_behavior_works() { - // build a node with the default network id - let default_node = setup_core_builder(); - - // assert that the default network id is '/swarmnl/1.0' - assert_eq!(default_node.network_id, DEFAULT_NETWORK_ID); - - // default transport is TCP/QUIC - assert_eq!( - default_node.transport, - TransportOpts::TcpQuic { - tcp_config: TcpConfig::Default - } - ); - - // default keep alive duration is 60 seconds - assert_eq!(default_node.keep_alive_duration, 60); - - // default listen on is 0:0:0:0 - assert_eq!( - default_node.ip_address, - IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)) - ); - - // default tcp/udp port is MIN_PORT and MAX_PORT - assert_eq!(default_node.tcp_udp_port, (MIN_PORT, MAX_PORT)); -} - -#[test] -fn custom_node_setup_works() { - // build a node with the default network id - let default_node = setup_core_builder(); - - // custom node configuration - let mut custom_network_id = "/custom-protocol/1.0".to_string(); - let mut custom_transport = TransportOpts::TcpQuic { - tcp_config: TcpConfig::Custom { - ttl: 10, - nodelay: true, - backlog: 10, - }, - }; - let mut custom_keep_alive_duration = 20; - let mut custom_ip_address = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); - - // pass in the custom node configuration and assert it works as expected - let custom_node = default_node - .with_network_id(custom_network_id.clone()) - .with_transports(custom_transport.clone()) - .with_idle_connection_timeout(custom_keep_alive_duration.clone()) - .listen_on(custom_ip_address.clone()); - - // TODO: with_ping - // e.g. if the node is unreachable after a specific amount of time, it should be - // disconnected if 10th inteval is configured, if failed 9th time, test decay as each ping - // comes in - - // TODO: with_kademlia - // e.g. if a record is not found, it should return a specific message - - // TODO: configure_network_events - // test recorded logs. Create a custom handler and test if the logs are recorded. - - // assert that the custom network id is '/custom/protocol/1.0' - assert_eq!(custom_node.network_id(), custom_network_id); - - // assert that the custom transport is 'TcpQuic' - assert_eq!(custom_node.transport, custom_transport); - - // assert that the custom keep alive duration is 20 - assert_eq!(custom_node.keep_alive_duration, custom_keep_alive_duration); -} - -#[test] -fn network_id_custom_behavior_works_as_expected() { - // setup a node with the default config builder - let mut custom_builder = setup_core_builder(); - - // configure builder with custom protocol and assert it works as expected - let custom_protocol: &str = "/custom-protocol/1.0"; - let custom_builder = custom_builder.with_network_id(custom_protocol.to_string()); - - // cannot be less than MIN_NETWORK_ID_LENGTH - assert_eq!( - custom_builder.network_id().len() >= MIN_NETWORK_ID_LENGTH.into(), - true - ); - - // must start with a forward slash - assert!(custom_builder.network_id().starts_with("/")); - - // assert that the custom network id is '/custom/protocol/1.0' - assert_eq!(custom_builder.network_id(), custom_protocol.to_string()); -} - -#[test] -#[should_panic("Could not parse provided network id: it must be of the format '/protocol-name/version'")] -fn network_id_custom_behavior_fails() { - // build a node with the default network id - let mut custom_builder = setup_core_builder(); - - // pass in an invalid network ID: network ID length is less than MIN_NETWORK_ID_LENGTH - let invalid_protocol_1 = "/1.0".to_string(); - assert!(invalid_protocol_1.len() < MIN_NETWORK_ID_LENGTH.into()); - let custom_builder = custom_builder.with_network_id(invalid_protocol_1); - - // pass in an invalid network ID: network ID must start with a forward slash - let invalid_protocol_2 = "1.0".to_string(); - custom_builder.with_network_id(invalid_protocol_2); -} - -#[cfg(feature = "tokio-runtime")] -#[test] -fn save_keypair_offline_works_tokio() { - // build a node with the default network id - let default_node = setup_core_builder(); - - // use tokio runtime to test async function - let result = tokio::runtime::Runtime::new().unwrap().block_on( - default_node - .build() - .unwrap_or_else(|_| panic!("Could not build node")), - ); - - // create a saved_keys.ini file - let file_path_1 = "saved_keys.ini"; - create_test_ini_file(file_path_1); - - // save the keypair to existing file - let saved_1 = result.save_keypair_offline(&file_path_1); - - // assert that the keypair was saved successfully - assert_eq!(saved_1, true); - - // test if it works for a file name that does not exist - let file_path_2 = "test.ini"; - let saved_2 = result.save_keypair_offline(file_path_2); - assert_eq!(saved_2, true); - - // clean up - fs::remove_file(file_path_1).unwrap_or_default(); - fs::remove_file(file_path_2).unwrap_or_default(); - -} - - -#[cfg(feature = "async-std-runtime")] -#[test] -fn save_keypair_offline_works_async_std() { - // build a node with the default network id - let default_node = setup_core_builder(); - - // use tokio runtime to test async function - let result = async_std::task::block_on( - default_node - .build() - .unwrap_or_else(|_| panic!("Could not build node")), - ); - - // make a saved_keys.ini file - let file_path_1 = "saved_keys.ini"; - create_test_ini_file(file_path_1); - - // save the keypair to existing file - let saved_1 = result.save_keypair_offline(file_path_1); - - // assert that the keypair was saved successfully - assert_eq!(saved_1, true); - - // now test if it works for a file name that does not exist - let file_path_2 = "test.txt"; - let saved_2 = result.save_keypair_offline(file_path_2); - - // assert that the keypair was saved successfully - assert_eq!(saved_2, true); - - // clean up - fs::remove_file(file_path_1).unwrap_or_default(); - fs::remove_file(file_path_2).unwrap_or_default(); -} -} diff --git a/swarm_nl/src/core/prelude.rs b/swarm_nl/src/core/prelude.rs index 0056904ea..d29f2eb0b 100644 --- a/swarm_nl/src/core/prelude.rs +++ b/swarm_nl/src/core/prelude.rs @@ -1,40 +1,52 @@ -use async_trait::async_trait; -/// Copyright (c) 2024 Algorealm +// Copyright 2024 Algorealm +// Apache 2.0 License + +use libp2p::gossipsub::MessageId; use serde::{Deserialize, Serialize}; -use std::{time::Instant, collections::VecDeque}; +use std::{collections::VecDeque, time::Instant}; use thiserror::Error; use self::ping_config::PingInfo; use super::*; -/// Type to indicate the duration (in seconds) to wait for data from the network layer before timing -/// out -pub const NETWORK_READ_TIMEOUT: u64 = 60; +/// The duration (in seconds) to wait for response from the network layer before timing +/// out. +pub const NETWORK_READ_TIMEOUT: Seconds = 30; /// The time it takes for the task to sleep before it can recheck if an output has been placed in -/// the repsonse buffer (7 seconds) -pub const TASK_SLEEP_DURATION: u64 = 7; -/// Type that represents the response of the network layer to the application layer's event handler -pub type AppResponseResult = Result; +/// the repsonse buffer; +pub const TASK_SLEEP_DURATION: Seconds = 3; + +/// Type that represents the response of the network layer to the application layer's event handler. +type AppResponseResult = Result; + +/// The delimeter that separates the messages to gossip +pub(super) const GOSSIP_MESSAGE_SEPARATOR: &str = "~#~"; + +/// Time to wait (in seconds) for the node (network layer) to boot. +pub(super) const BOOT_WAIT_TIME: Seconds = 1; + +/// The buffer capacity of an mpsc stream. +pub(super) const STREAM_BUFFER_CAPACITY: usize = 100; /// Data exchanged over a stream between the application and network layer #[derive(Debug, Clone)] pub(super) enum StreamData { - /// Application data sent over the stream + /// Application data sent over the stream. FromApplication(StreamId, AppData), - /// Network response data sent over the stream to the application layer + /// Network response data sent over the stream to the application layer. ToApplication(StreamId, AppResponse), } -/// Data sent from the application layer to the networking layer +/// Request sent from the application layer to the networking layer. #[derive(Debug, Clone)] pub enum AppData { - /// A simple echo message + /// A simple echo message. Echo(String), /// Dail peer - DailPeer(MultiaddrString), - /// Store a value associated with a given key in the Kademlia DHT + DailPeer(PeerId, MultiaddrString), + /// Store a value associated with a given key in the Kademlia DHT. KademliaStoreRecord { key: Vec, value: Vec, @@ -43,48 +55,89 @@ pub enum AppData { // store on explicit peers explicit_peers: Option>, }, - /// Perform a lookup of a value associated with a given key in the Kademlia DHT + /// Perform a lookup of a value associated with a given key in the Kademlia DHT. KademliaLookupRecord { key: Vec }, - /// Perform a lookup of peers that store a record + /// Perform a lookup of peers that store a record. KademliaGetProviders { key: Vec }, - /// Stop providing a record on the network + /// Stop providing a record on the network. KademliaStopProviding { key: Vec }, - /// Remove record from local store + /// Remove record from local store. KademliaDeleteRecord { key: Vec }, - /// Return important information about the local routing table + /// Return important information about the local routing table. KademliaGetRoutingTableInfo, - /// Fetch data(s) quickly from a peer over the network + /// Fetch data(s) quickly from a peer over the network. FetchData { keys: Vec>, peer: PeerId }, - // Get network information - // Gossip related requests + /// Get network information about the node. + GetNetworkInfo, + /// Send message to gossip peers in a mesh network. + GossipsubBroadcastMessage { + /// Topic to send messages to + topic: String, + message: Vec, + }, + /// Join a mesh network. + GossipsubJoinNetwork(String), + /// Get gossip information about node. + GossipsubGetInfo, + /// Leave a network we are a part of. + GossipsubExitNetwork(String), + /// Blacklist a peer explicitly. + GossipsubBlacklistPeer(PeerId), + /// Remove a peer from the blacklist. + GossipsubFilterBlacklist(PeerId), } -/// Response to requests sent from the aplication to the network layer -#[derive(Debug, Clone)] +/// Response to requests sent from the application to the network layer. +#[derive(Debug, Clone, PartialEq)] pub enum AppResponse { - /// The value written to the network + /// The value written to the network. Echo(String), - /// The peer we dailed - DailPeer(String), - /// Store record success + /// The peer we dailed. + DailPeerSuccess(String), + /// Store record success. KademliaStoreRecordSuccess, - /// DHT lookup result - KademliaLookupRecord(Vec), - /// Nodes storing a particular record in the DHT + /// DHT lookup result. + KademliaLookupSuccess(Vec), + /// Nodes storing a particular record in the DHT. KademliaGetProviders { key: Vec, providers: Vec, }, - /// Routing table information + /// No providers found. + KademliaNoProvidersFound, + /// Routing table information. KademliaGetRoutingTableInfo { protocol_id: String }, - /// RPC result + /// Result of RPC operation. FetchData(Vec>), - /// A network error occured while executing the request + /// A network error occured while executing the request. Error(NetworkError), + /// Important information about the node. + GetNetworkInfo { + peer_id: PeerId, + connected_peers: Vec, + external_addresses: Vec, + }, + /// Successfully broadcast to the network. + GossipsubBroadcastSuccess, + /// Successfully joined a mesh network. + GossipsubJoinSuccess, + /// Successfully exited a mesh network. + GossipsubExitSuccess, + /// Gossipsub information about node. + GossipsubGetInfo { + /// Topics that the node is currently subscribed to + topics: Vec, + /// Peers we know about and their corresponding topics + mesh_peers: Vec<(PeerId, Vec)>, + /// Peers we have blacklisted + blacklist: HashSet, + }, + /// A peer was successfully blacklisted. + GossipsubBlacklistSuccess, } -/// Network error type containing errors encountered during network operations -#[derive(Error, Debug, Clone)] +/// Network error type containing errors encountered during network operations. +#[derive(Error, Debug, Clone, PartialEq)] pub enum NetworkError { #[error("timeout occured waiting for data from network layer")] NetworkReadTimeout, @@ -100,42 +153,46 @@ pub enum NetworkError { InternalTaskError, #[error("failed to dail peer")] DailPeerError, + #[error("failed to broadcast message to peers in the topic")] + GossipsubBroadcastMessageError, + #[error("failed to join a mesh network")] + GossipsubJoinNetworkError, } -/// A simple struct used to track requests sent from the application layer to the network layer +/// A simple struct used to track requests sent from the application layer to the network layer. #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub struct StreamId(u32); impl StreamId { /// Generate a new random stream id. - /// Must only be called once + /// Must only be called once. pub fn new() -> Self { StreamId(0) } - /// Generate a new random stream id, using the current as guide + /// Generate a new random stream id, using the current as reference. pub fn next(current_id: StreamId) -> Self { StreamId(current_id.0.wrapping_add(1)) } } -/// Type that specifies the result of querying the network layer +/// Type that contains the result of querying the network layer. pub type NetworkResult = Result; /// Type that keeps track of the requests from the application layer. /// This type has a maximum buffer size and will drop subsequent requests when full. /// It is unlikely to be ever full as the default is usize::MAX except otherwise specified during /// configuration. It is always good practice to read responses from the internal stream buffer -/// using `fetch_from_network()` or explicitly using `recv_from_network` +/// using `query_network()` or explicitly using `recv_from_network`. #[derive(Clone, Debug)] pub(super) struct StreamRequestBuffer { - /// Max requests we can keep track of + /// Max requests we can keep track of. size: usize, buffer: HashSet, } impl StreamRequestBuffer { - /// Create a new request buffer + /// Create a new request buffer. pub fn new(buffer_size: usize) -> Self { Self { size: buffer_size, @@ -144,7 +201,7 @@ impl StreamRequestBuffer { } /// Push [`StreamId`]s into buffer. - /// Returns `false` if the buffer is full and request cannot be stored + /// Returns `false` if the buffer is full and request cannot be stored. pub fn insert(&mut self, id: StreamId) -> bool { if self.buffer.len() < self.size { self.buffer.insert(id); @@ -156,13 +213,13 @@ impl StreamRequestBuffer { /// Type that keeps track of the response to the requests from the application layer. pub(super) struct StreamResponseBuffer { - /// Max responses we can keep track of + /// Max responses we can keep track of. size: usize, buffer: HashMap, } impl StreamResponseBuffer { - /// Create a new request buffer + /// Create a new request buffer. pub fn new(buffer_size: usize) -> Self { Self { size: buffer_size, @@ -171,7 +228,7 @@ impl StreamResponseBuffer { } /// Push a [`StreamId`] into buffer. - /// Returns `false` if the buffer is full and request cannot be stored + /// Returns `false` if the buffer is full and request cannot be stored. pub fn insert(&mut self, id: StreamId, response: AppResponseResult) -> bool { if self.buffer.len() < self.size { self.buffer.insert(id, response); @@ -180,35 +237,34 @@ impl StreamResponseBuffer { false } - /// Remove a [`StreamId`] from the buffer + /// Remove a [`StreamId`] from the buffer. pub fn remove(&mut self, id: &StreamId) -> Option { self.buffer.remove(&id) } } -/// Type representing the RPC data structure sent between nodes in the network +/// Type representing the RPC data structure sent between nodes in the network. #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub(super) enum Rpc { - /// Using request-response + /// Using request-response. ReqResponse { data: Vec> }, } -/// The configuration for the RPC protocol +/// The configuration for the RPC protocol. pub struct RpcConfig { - /// Timeout for inbound and outbound requests + /// Timeout for inbound and outbound requests. pub timeout: Duration, - /// Maximum number of concurrent inbound + outbound streams + /// Maximum number of concurrent inbound + outbound streams. pub max_concurrent_streams: usize, } -/// The high level trait that provides default implementations to handle most supported network -/// swarm events. -#[async_trait] +/// The high level trait that provides an interface for the application layer to respond to network +/// events. pub trait EventHandler { - /// Event that informs the network core that we have started listening on a new multiaddr. - async fn new_listen_addr( + /// Event that informs the application that we have started listening on a new multiaddr. + fn new_listen_addr( &mut self, - + _local_peer_id: PeerId, _listener_id: ListenerId, _addr: Multiaddr, @@ -216,10 +272,16 @@ pub trait EventHandler { // Default implementation } - /// Event that informs the network core about a newly established connection to a peer. - async fn connection_established( + /// Event that informs the application that a new peer (with its location details) has just + /// been added to the routing table. + fn routing_table_updated(&mut self, _peer_id: PeerId) { + // Default implementation + } + + /// Event that informs the application about a newly established connection to a peer. + fn connection_established( &mut self, - + _peer_id: PeerId, _connection_id: ConnectionId, _endpoint: &ConnectedPoint, @@ -229,10 +291,10 @@ pub trait EventHandler { // Default implementation } - /// Event that informs the network core about a closed connection to a peer. - async fn connection_closed( + /// Event that informs the application about a closed connection to a peer. + fn connection_closed( &mut self, - + _peer_id: PeerId, _connection_id: ConnectionId, _endpoint: &ConnectedPoint, @@ -243,60 +305,45 @@ pub trait EventHandler { } /// Event that announces expired listen address. - async fn expired_listen_addr( - &mut self, - - _listener_id: ListenerId, - _address: Multiaddr, - ) { + fn expired_listen_addr(&mut self, _listener_id: ListenerId, _address: Multiaddr) { // Default implementation } /// Event that announces a closed listener. - async fn listener_closed( - &mut self, - - _listener_id: ListenerId, - _addresses: Vec, - ) { + fn listener_closed(&mut self, _listener_id: ListenerId, _addresses: Vec) { // Default implementation } /// Event that announces a listener error. - async fn listener_error(&mut self, _listener_id: ListenerId) { + fn listener_error(&mut self, _listener_id: ListenerId) { // Default implementation } /// Event that announces a dialing attempt. - async fn dialing( - &mut self, - - _peer_id: Option, - _connection_id: ConnectionId, - ) { + fn dialing(&mut self, _peer_id: Option, _connection_id: ConnectionId) { // Default implementation } /// Event that announces a new external address candidate. - async fn new_external_addr_candidate(&mut self, _address: Multiaddr) { + fn new_external_addr_candidate(&mut self, _address: Multiaddr) { // Default implementation } /// Event that announces a confirmed external address. - async fn external_addr_confirmed(&mut self, _address: Multiaddr) { + fn external_addr_confirmed(&mut self, _address: Multiaddr) { // Default implementation } /// Event that announces an expired external address. - async fn external_addr_expired(&mut self, _address: Multiaddr) { + fn external_addr_expired(&mut self, _address: Multiaddr) { // Default implementation } /// Event that announces new connection arriving on a listener and in the process of /// protocol negotiation. - async fn incoming_connection( + fn incoming_connection( &mut self, - + _connection_id: ConnectionId, _local_addr: Multiaddr, _send_back_addr: Multiaddr, @@ -306,9 +353,9 @@ pub trait EventHandler { /// Event that announces an error happening on an inbound connection during its initial /// handshake. - async fn incoming_connection_error( + fn incoming_connection_error( &mut self, - + _connection_id: ConnectionId, _local_addr: Multiaddr, _send_back_addr: Multiaddr, @@ -318,130 +365,144 @@ pub trait EventHandler { /// Event that announces an error happening on an outbound connection during its initial /// handshake. - async fn outgoing_connection_error( + fn outgoing_connection_error( &mut self, - + _connection_id: ConnectionId, _peer_id: Option, ) { // Default implementation } - /// Event that announces the arrival of a ping message from a peer. - /// The duration it took for a round trip is also returned - async fn inbound_ping_success( - &mut self, - - _peer_id: PeerId, - _duration: Duration, - ) { + /// Event that announces the arrival of a pong message from a peer. + /// The duration it took for a round trip is also returned. + fn outbound_ping_success(&mut self, _peer_id: PeerId, _duration: Duration) { // Default implementation } - /// Event that announces a `Ping` error - async fn outbound_ping_error( - &mut self, - - _peer_id: PeerId, - _err_type: Failure, - ) { + /// Event that announces a `Ping` error. + fn outbound_ping_error(&mut self, _peer_id: PeerId, _err_type: Failure) { // Default implementation } - /// Event that announces the arrival of a `PeerInfo` via the `Identify` protocol - async fn identify_info_recieved( - &mut self, - - _peer_id: PeerId, - _info: Info, - ) { + /// Event that announces the arrival of a `PeerInfo` via the `Identify` protocol. + fn identify_info_recieved(&mut self, _peer_id: PeerId, _info: Info) { + // Default implementation + } + + /// Event that announces the successful write of a record to the DHT. + fn kademlia_put_record_success(&mut self, _key: Vec) { + // Default implementation + } + + /// Event that announces the failure of a node to save a record. + fn kademlia_put_record_error(&mut self) { // Default implementation } - /// Event that announces the successful write of a record to the DHT - async fn kademlia_put_record_success(&mut self, _key: Vec) { + /// Event that announces a node as a provider of a record in the DHT. + fn kademlia_start_providing_success(&mut self, _key: Vec) { // Default implementation } - /// Event that announces the failure of a node to save a record - async fn kademlia_put_record_error(&mut self) { + /// Event that announces the failure of a node to become a provider of a record in the DHT. + fn kademlia_start_providing_error(&mut self) { // Default implementation } - /// Event that announces a node as a provider of a record in the DHT - async fn kademlia_start_providing_success(&mut self, _key: Vec) { + /// Event that announces the arrival of an RPC message. + fn rpc_incoming_message_handled(&mut self, data: Vec>) -> Vec>; + + /// Event that announces that a peer has just left a network. + fn gossipsub_unsubscribe_message_recieved(&mut self, _peer_id: PeerId, _topic: String) { // Default implementation } - /// Event that announces the failure of a node to become a provider of a record in the DHT - async fn kademlia_start_providing_error(&mut self) { + /// Event that announces that a peer has just joined a network. + fn gossipsub_subscribe_message_recieved(&mut self, _peer_id: PeerId, _topic: String) { // Default implementation } - /// Event that announces the arrival of an RPC message - fn handle_incoming_message(&mut self, data: Vec>) -> Vec>; + /// Event that announces the arrival of a gossip message. + fn gossipsub_incoming_message_handled(&mut self, _source: PeerId, _data: Vec); + + /// Event that announces the beginning of the filtering and authentication of the incoming gossip message. + /// It returns a boolean to specify whether the massage should be dropped or should reach the application. + /// All incoming messages are allowed in by default. + fn gossipsub_incoming_message_filtered(&mut self, propagation_source: PeerId, message_id: MessageId, source: Option, topic: String, data: Vec) -> bool { + true + } } -/// Default network event handler +/// Default network event handler. #[derive(Clone)] pub struct DefaultHandler; /// Implement [`EventHandler`] for [`DefaultHandler`] impl EventHandler for DefaultHandler { - /// Echo the message back to the sender - fn handle_incoming_message(&mut self, data: Vec>) -> Vec> { + /// Echo the message back to the sender. + fn rpc_incoming_message_handled(&mut self, data: Vec>) -> Vec> { data } + + /// Echo the incoming gossip message to the console. + fn gossipsub_incoming_message_handled(&mut self, _source: PeerId, _data: Vec) { + // Default implementation + } + } /// Important information to obtain from the [`CoreBuilder`], to properly handle network -/// operations +/// operations. #[derive(Clone)] pub(super) struct NetworkInfo { - /// The name/id of the network + /// The name/id of the network. pub id: StreamProtocol, - /// Important information to manage `Ping` operations + /// Important information to manage `Ping` operations. pub ping: PingInfo, + /// Important information to manage `Gossipsub` operations. + pub gossipsub: gossipsub_cfg::GossipsubInfo, } -/// Module that contains important data structures to manage `Ping` operations on the network +/// Module that contains important data structures to manage `Ping` operations on the network. pub mod ping_config { use libp2p_identity::PeerId; use std::{collections::HashMap, time::Duration}; - /// Policies to handle a `Ping` error - /// - All connections to peers are closed during a disconnect operation. + /// Policies to handle a `Ping` error. + /// All connections to peers are closed during a disconnect operation. #[derive(Debug, Clone)] pub enum PingErrorPolicy { - /// Do not disconnect under any circumstances + /// Do not disconnect under any circumstances. NoDisconnect, - /// Disconnect after a number of outbound errors + /// Disconnect after a number of outbound errors. DisconnectAfterMaxErrors(u16), - /// Disconnect after a certain number of concurrent timeouts + /// Disconnect after a certain number of concurrent timeouts. DisconnectAfterMaxTimeouts(u16), } - /// Struct that stores critical information for the execution of the [`PingErrorPolicy`] + /// Struct that stores critical information for the execution of the [`PingErrorPolicy`]. #[derive(Debug, Clone)] pub struct PingManager { - /// The number of timeout errors encountered from a peer + /// The number of timeout errors encountered from a peer. pub timeouts: HashMap, - /// The number of outbound errors encountered from a peer + /// The number of outbound errors encountered from a peer. pub outbound_errors: HashMap, } - /// The configuration for the `Ping` protocol + /// The configuration for the `Ping` protocol. + #[derive(Debug, Clone)] pub struct PingConfig { /// The interval between successive pings. - /// Default is 15 seconds + /// Default is 15 seconds. pub interval: Duration, /// The duration before which the request is considered failure. - /// Default is 20 seconds + /// Default is 20 seconds. pub timeout: Duration, - /// Error policy + /// Error policy. pub err_policy: PingErrorPolicy, } - /// Critical information to manage `Ping` operations + /// Critical information to manage `Ping` operations. #[derive(Debug, Clone)] pub struct PingInfo { pub policy: PingErrorPolicy, @@ -449,26 +510,51 @@ pub mod ping_config { } } -/// Network queue that tracks the execution of application requests in the network layer +/// Module containing important state relating to the `Gossipsub` protocol. +pub(crate) mod gossipsub_cfg { + use super::*; + + /// The struct containing the list of blacklisted peers. + #[derive(Clone, Debug, Default)] + pub struct Blacklist { + // Blacklist + pub list: HashSet, + } + + impl Blacklist { + /// Return the inner list we're keeping track of. + pub fn into_inner(&self) -> HashSet { + self.list.clone() + } + } + + /// Important information to manage `Gossipsub` operations. + #[derive(Clone)] + pub struct GossipsubInfo { + pub blacklist: Blacklist, + } +} + +/// Network queue that tracks the execution of application requests in the network layer. pub(super) struct ExecQueue { - buffer: Mutex> -} + buffer: Mutex>, +} impl ExecQueue { // Create new execution queue pub fn new() -> Self { Self { - buffer: Mutex::new(VecDeque::new()) + buffer: Mutex::new(VecDeque::new()), } } - // Remove a [`StreamId`] from the top of the queue + // Remove a [`StreamId`] from the top of the queue. pub async fn pop(&mut self) -> Option { self.buffer.lock().await.pop_front() } - // Append a [`StreamId`] to the queue + // Append a [`StreamId`] to the queue. pub async fn push(&mut self, stream_id: StreamId) { self.buffer.lock().await.push_back(stream_id); } -} \ No newline at end of file +} diff --git a/swarm_nl/src/core/tests/layer_communication.rs b/swarm_nl/src/core/tests/layer_communication.rs new file mode 100644 index 000000000..5ce30381b --- /dev/null +++ b/swarm_nl/src/core/tests/layer_communication.rs @@ -0,0 +1,732 @@ +//! Tests for the communication between the layers of the application. + +use super::*; +use libp2p::{ + core::{ConnectedPoint, Multiaddr}, + PeerId, +}; + +/// Time to wait for the other peer to act, during integration tests (in seconds). +pub const ITEST_WAIT_TIME: u64 = 7; +/// The key to test the Kademlia DHT. +pub const KADEMLIA_TEST_KEY: &str = "GOAT"; +/// The value to test the Kademlia DHT. +pub const KADEMLIA_TEST_VALUE: &str = "Steve Jobs"; +/// The test network we join for our mesh. +pub const GOSSIP_NETWORK: &str = "avada"; + +/// Sate of the Application. +#[derive(Clone)] +pub struct AppState; + +impl EventHandler for AppState { + fn new_listen_addr( + &mut self, + local_peer_id: PeerId, + _listener_id: ListenerId, + addr: Multiaddr, + ) { + // Announce interfaces we're listening on + println!("Peer id: {}", local_peer_id); + println!("We're listening on the {}", addr); + } + + fn connection_established( + &mut self, + peer_id: PeerId, + _connection_id: ConnectionId, + _endpoint: &ConnectedPoint, + _num_established: NonZeroU32, + _established_in: Duration, + ) { + println!("Connection established with peer: {:?}", peer_id); + } + + // We're just echoing the data back + fn rpc_incoming_message_handled(&mut self, data: Vec>) -> Vec> { + println!("Recvd incoming RPC: {:?}", data); + data + } + + // Handle the incoming gossip message + fn gossipsub_incoming_message_handled(&mut self, source: PeerId, data: Vec) { + println!("Recvd incoming gossip: {:?}", data); + } + + fn kademlia_put_record_success(&mut self, key: Vec) { + println!("Record successfully written to DHT. Key: {:?}", key); + } +} + +/// Used to create a detereministic node. +pub async fn setup_node_1(ports: (Port, Port)) -> Core { + // Our test keypair for the first node + let mut protobuf = vec![ + 8, 1, 18, 64, 34, 116, 25, 74, 122, 174, 130, 2, 98, 221, 17, 247, 176, 102, 205, 3, 27, + 202, 193, 27, 6, 104, 216, 158, 235, 38, 141, 58, 64, 81, 157, 155, 36, 193, 50, 147, 85, + 72, 64, 174, 65, 132, 232, 78, 231, 224, 88, 38, 55, 78, 178, 65, 42, 97, 39, 152, 42, 164, + 148, 159, 36, 170, 109, 178, + ]; + + // The PeerId of the first node + let peer_id = Keypair::from_protobuf_encoding(&protobuf) + .unwrap() + .public() + .to_peer_id(); + + setup_core_builder_1(&mut protobuf[..], ports).await +} + +/// Used to create a node to peer with node_1. +pub async fn setup_node_2( + node_1_ports: (Port, Port), + ports: (Port, Port), +) -> (Core, PeerId) { + let app_state = AppState; + + // Our test keypair for the node_1 + let protobuf = vec![ + 8, 1, 18, 64, 34, 116, 25, 74, 122, 174, 130, 2, 98, 221, 17, 247, 176, 102, 205, 3, 27, + 202, 193, 27, 6, 104, 216, 158, 235, 38, 141, 58, 64, 81, 157, 155, 36, 193, 50, 147, 85, + 72, 64, 174, 65, 132, 232, 78, 231, 224, 88, 38, 55, 78, 178, 65, 42, 97, 39, 152, 42, 164, + 148, 159, 36, 170, 109, 178, + ]; + + // The PeerId of the first node + let peer_id = Keypair::from_protobuf_encoding(&protobuf) + .unwrap() + .public() + .to_peer_id(); + + // Set up bootnode to query node 1 + let mut bootnode = HashMap::new(); + bootnode.insert( + peer_id.to_base58(), + format!("/ip4/127.0.0.1/tcp/{}", node_1_ports.0), + ); + + println!("Second node here!"); + + // First, we want to configure our node + let config = BootstrapConfig::new() + .with_bootnodes(bootnode) + .with_tcp(ports.0) + .with_udp(ports.1); + + // Set up network + ( + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap(), + peer_id, + ) +} + +pub async fn setup_core_builder_1(buffer: &mut [u8], ports: (u16, u16)) -> Core { + let app_state = AppState; + + // First, we want to configure our node with the bootstrap config file on disk + let config = BootstrapConfig::default() + .generate_keypair_from_protobuf("ed25519", buffer) + .with_tcp(ports.0) + .with_udp(ports.1); + + // Set up network + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap() +} + +#[test] +fn echo_for_node1_query_network() { + // Prepare an echo request + let echo_string = "Sacha rocks!".to_string(); + let data_request = AppData::Echo(echo_string.clone()); + + tokio::runtime::Runtime::new().unwrap().block_on(async { + if let Ok(result) = setup_node_1((49600, 49623)) + .await + .query_network(data_request) + .await + { + if let AppResponse::Echo(echoed_response) = result { + // Assert that what was sent was gotten back + assert_eq!(echo_string, echoed_response); + } + } + }); +} + +#[test] +fn echo_for_node1_send_and_receive() { + // Prepare an echo request + let echo_string = "Sacha rocks!".to_string(); + let data_request = AppData::Echo(echo_string.clone()); + + tokio::runtime::Runtime::new().unwrap().block_on(async { + let stream_id = setup_node_1((49500, 49501)) + .await + .send_to_network(data_request) + .await + .unwrap(); + + if let Ok(result) = setup_node_1((49400, 49401)) + .await + .recv_from_network(stream_id) + .await + { + if let AppResponse::Echo(echoed_response) = result { + // Assert that what was sent was gotten back + assert_eq!(echo_string, echoed_response); + } + } + }); +} + +#[test] +fn dial_peer_failure_works() { + // What we're dialing + let peer_id = PeerId::random(); + let multi_addr = "/ip4/192.168.1.205/tcp/1509".to_string(); + + let dial_request = AppData::DailPeer(peer_id, multi_addr.clone()); + + tokio::runtime::Runtime::new().unwrap().block_on(async { + let stream_id = setup_node_1((49611, 49601)) + .await + .send_to_network(dial_request) + .await + .unwrap(); + + if let Ok(result) = setup_node_1((49507, 49508)) + .await + .recv_from_network(stream_id) + .await + { + assert_eq!(AppResponse::Error(NetworkError::DailPeerError), result); + } + }); +} + +#[test] +fn kademlia_store_records_works() { + // Prepare an kademlia request to send to the network layer + let (key, value, expiration_time, explicit_peers) = ( + KADEMLIA_TEST_KEY.as_bytes().to_vec(), + KADEMLIA_TEST_VALUE.as_bytes().to_vec(), + None, + None, + ); + + let kad_request = AppData::KademliaStoreRecord { + key, + value, + expiration_time, + explicit_peers, + }; + + tokio::runtime::Runtime::new().unwrap().block_on(async { + if let Ok(result) = setup_node_1((49100, 49101)) + .await + .query_network(kad_request) + .await + { + println!("----> {:?}", result); + assert_eq!(AppResponse::KademliaStoreRecordSuccess, result); + } + else { + // TODO: do something to ensure this test works + } + }); +} + +#[test] +fn kademlia_lookup_record_works() { + // Prepare a kademlia request to send to the network layer + let (key, value, expiration_time, explicit_peers) = ( + KADEMLIA_TEST_KEY.as_bytes().to_vec(), + KADEMLIA_TEST_VALUE.as_bytes().to_vec(), + None, + None, + ); + + let kad_request = AppData::KademliaStoreRecord { + key: key.clone(), + value, + expiration_time, + explicit_peers, + }; + + tokio::runtime::Runtime::new().unwrap().block_on(async { + let mut node = setup_node_1((49155, 49222)).await; + + if let Ok(_) = node.clone().query_network(kad_request).await { + let kad_request = AppData::KademliaLookupRecord { key }; + + if let Ok(result) = node.query_network(kad_request).await { + if let AppResponse::KademliaLookupSuccess(value) = result { + assert_eq!(KADEMLIA_TEST_VALUE.as_bytes().to_vec(), value); + } + } + } + }); +} + +#[test] +fn kademlia_get_providers_works() { + // Note: we can only test for the error case here, an integration test is needed to actually + // check that the providers can be fetched + + // Prepare a kademlia request to send to the network layer + let req_key = KADEMLIA_TEST_KEY.as_bytes().to_vec(); + + let kad_request = AppData::KademliaGetProviders { + key: req_key.clone(), + }; + + tokio::runtime::Runtime::new().unwrap().block_on(async { + if let Ok(result) = setup_node_1((49988, 64544)) + .await + .query_network(kad_request) + .await + { + assert_eq!(AppResponse::KademliaNoProvidersFound, result); + } + }); +} + +#[test] +fn kademlia_get_routing_table_info_works() { + // Prepare an kademlia request to send to the network layer + let kad_request = AppData::KademliaGetRoutingTableInfo; + + tokio::runtime::Runtime::new().unwrap().block_on(async { + if let Ok(result) = setup_node_1((49999, 64555)) + .await + .query_network(kad_request) + .await + { + assert_eq!( + AppResponse::KademliaGetRoutingTableInfo { + protocol_id: DEFAULT_NETWORK_ID.to_string() + }, + result + ); + } + }); +} + +#[test] +fn get_network_info_works() { + // Prepare an info request to send to the network layer + let kad_request = AppData::GetNetworkInfo; + + tokio::runtime::Runtime::new().unwrap().block_on(async { + let mut node = setup_node_1((59999, 54555)).await; + + if let Ok(result) = node.query_network(kad_request).await { + // We'll use the peer id returned to validate the network information recieved + if let AppResponse::GetNetworkInfo { + peer_id, + connected_peers, + external_addresses, + } = result + { + println!("Connected peers: {:?}", connected_peers); + println!("External Addresses: {:?}", external_addresses); + assert_eq!(peer_id, node.peer_id()); + } + } + }); +} + +#[test] +fn gossipsub_join_and_exit_network_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + let mut node_1 = setup_node_1((49655, 49609)).await; + + let network = "Testers"; + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(network.to_string()); + + let stream_id = node_1.send_to_network(gossip_request).await.unwrap(); + + if let Ok(result) = node_1.recv_from_network(stream_id).await { + assert_eq!(AppResponse::GossipsubJoinSuccess, result); + } + + // exit a network (unsubscribe to a topic) + let gossip_request = AppData::GossipsubExitNetwork(network.to_string()); + + let stream_id = node_1.send_to_network(gossip_request).await.unwrap(); + + // test for exit + if let Ok(result) = node_1.recv_from_network(stream_id).await { + assert_eq!(AppResponse::GossipsubExitSuccess, result); + } + }); +} + +#[test] +fn gossipsub_blacklist_and_remove_blacklist_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + let mut node_1 = setup_node_1((49695, 49699)).await; + + // Random peer id + let peer_id = PeerId::random(); + + // Blacklist + let gossip_request = AppData::GossipsubBlacklistPeer(peer_id); + let stream_id = node_1.send_to_network(gossip_request).await.unwrap(); + + if let Ok(result) = node_1.recv_from_network(stream_id).await { + assert_eq!(AppResponse::GossipsubBlacklistSuccess, result); + } + + // Remove blacklist + let gossip_request = AppData::GossipsubFilterBlacklist(peer_id); + let stream_id = node_1.send_to_network(gossip_request).await.unwrap(); + + if let Ok(result) = node_1.recv_from_network(stream_id).await { + assert_eq!(AppResponse::GossipsubBlacklistSuccess, result); + } + }); +} + +#[test] +fn gossipsub_info_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + let mut node_1 = setup_node_1((49395, 43699)).await; + + // Random peer id + let peer_id = PeerId::random(); + let network = "Blackbeard".to_string(); + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(network.clone()); + node_1.query_network(gossip_request).await.unwrap(); + + // Blacklist a random peer + let gossip_request = AppData::GossipsubBlacklistPeer(peer_id); + node_1.query_network(gossip_request).await.unwrap(); + + // Prepare request + let gossip_request = AppData::GossipsubGetInfo; + let stream_id = node_1.send_to_network(gossip_request).await.unwrap(); + + if let Ok(result) = node_1.recv_from_network(stream_id).await { + // break up the response info + if let AppResponse::GossipsubGetInfo { + topics, blacklist, .. + } = result + { + // make assertions for the topic joined + assert_eq!(network, topics[0].clone()); + + // make assertions for the peers blacklisted + assert_eq!(peer_id, *blacklist.get(&peer_id).unwrap()); + } + } + }); +} + +// -- Dialing and fetch tests -- +// See: `swarm_nl::testing_guide` for information on how to run these tests. + +#[cfg(feature = "test-listening-node")] +#[test] +fn dialing_peer_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + setup_node_1((49666, 49606)).await; + // Loop for the listening node to keep running + loop {} + }); +} + +#[cfg(feature = "test-dialing-node")] +#[test] +fn dialing_peer_works() { + // Use tokio runtime to test async function + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the second node that will dial + let (mut node_2, node_1_peer_id) = setup_node_2((49666, 49606), (49667, 49607)).await; + + // What we're dialing + let multi_addr = format!("/ip4/127.0.0.1/tcp/{}", 49666); + + let dial_request = AppData::DailPeer(node_1_peer_id, multi_addr.clone()); + let stream_id = node_2.send_to_network(dial_request).await.unwrap(); + + if let Ok(result) = node_2.recv_from_network(stream_id).await { + assert_eq!(AppResponse::DailPeerSuccess(multi_addr), result); + } + }); +} + +#[cfg(feature = "test-server-node")] +#[test] +fn rpc_fetch_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + setup_node_1((49666, 49606)).await; + + println!("This is the server node for rpc testing"); + // Loop for the listening node to keep running + loop {} + }); +} + +#[cfg(feature = "test-client-node")] +#[test] +fn rpc_fetch_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the second node that will dial + let (mut node_2, node_1_peer_id) = setup_node_2((49666, 49606), (49667, 49607)).await; + + println!("This is the client node for rpc testing"); + + let fetch_key = vec!["SomeFetchKey".as_bytes().to_vec()]; + + // What we're dialing + let multi_addr = format!("/ip4/127.0.0.1/tcp/{}", 49666); + + // Prepare fetch request + let fetch_request = AppData::FetchData { + keys: fetch_key.clone(), + peer: node_1_peer_id, + }; + + let stream_id = node_2.send_to_network(fetch_request).await.unwrap(); + + if let Ok(result) = node_2.recv_from_network(stream_id).await { + assert_eq!(AppResponse::FetchData(fetch_key), result); + } + }); +} + +// -- Tests for kademlia -- +// Two nodes will interact with each other using the commands to the DHT. +// See: `swarm_nl::testing_guide` for information on how to run these tests. + +#[cfg(feature = "test-reading-node")] +#[test] +fn kademlia_record_store_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + let mut node_1 = setup_node_1((51666, 51606)).await; + + // Wait for a few seconds before trying to read the DHT + #[cfg(feature = "tokio-runtime")] + tokio::time::sleep(Duration::from_secs(ITEST_WAIT_TIME)).await; + + // Now poll for the kademlia record + let kad_request = AppData::KademliaLookupRecord { + key: KADEMLIA_TEST_KEY.as_bytes().to_vec(), + }; + if let Ok(result) = node_1.query_network(kad_request).await { + if let AppResponse::KademliaLookupSuccess(value) = result { + assert_eq!(KADEMLIA_TEST_VALUE.as_bytes().to_vec(), value); + } + } else { + println!("No record found"); + } + }); +} + +#[cfg(feature = "test-writing-node")] +#[test] +fn kademlia_record_store_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the second node that will dial + let (mut node_2, node_1_peer_id) = setup_node_2((51666, 51606), (51667, 51607)).await; + + let (key, value, expiration_time, explicit_peers) = ( + KADEMLIA_TEST_KEY.as_bytes().to_vec(), + KADEMLIA_TEST_VALUE.as_bytes().to_vec(), + None, + None, + ); + + let kad_request = AppData::KademliaStoreRecord { + key, + value, + expiration_time, + explicit_peers, + }; + + // Submit query + let res = node_2.query_network(kad_request).await; + + loop {} + }); +} + +// Note: KademliaStopProviding and KademliaDeleteRecord will alwys succeed. +// The right function to use is sent_to_network() which will not return a Some(StreamId) but will +// always return None. This is because it always succeeds and doesn't need to be tracked internally. +// Do not use query_network() to send the command, if you do, it will succeed but you will get a +// wrong error. The wrong error will be NetworkError::StreamBufferOverflow, (which is not correct). + +// -- Tests for providers -- +// See: `swarm_nl::testing_guide` for information on how to run these tests. + +#[cfg(feature = "test-writing-node")] +#[test] +fn kademlia_provider_records_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + let mut node_1 = setup_node_1((51066, 51006)).await; + + // create a Kademlia request + let (key, value, expiration_time, explicit_peers) = ( + KADEMLIA_TEST_KEY.as_bytes().to_vec(), + KADEMLIA_TEST_VALUE.as_bytes().to_vec(), + None, + None, + ); + + let kad_request = AppData::KademliaStoreRecord { + key, + value, + expiration_time, + explicit_peers, + }; + + // submit request + let res = node_1.query_network(kad_request).await; + + loop {} + }); +} + +#[cfg(feature = "test-reading-node")] +#[test] +fn kademlia_provider_records_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the second node that will dial + let (mut node_2, node_1_peer_id) = setup_node_2((51066, 51006), (51067, 51007)).await; + + // Wait for a few seconds before trying to read the DHT + #[cfg(feature = "tokio-runtime")] + tokio::time::sleep(Duration::from_secs(ITEST_WAIT_TIME)).await; + + // Now poll for the kademlia record provider + let kad_request = AppData::KademliaGetProviders { + key: KADEMLIA_TEST_KEY.as_bytes().to_vec(), + }; + // Submit query and assert that the provider is the node 1 + if let Ok(result) = node_2.query_network(kad_request).await { + if let AppResponse::KademliaGetProviders { key, providers } = result { + assert_eq!(providers[0], node_1_peer_id.to_base58()); + } + } else { + println!("No record found"); + } + }); +} + +// -- Gossipsub tests -- +// See: `swarm_nl::testing_guide` for information on how to run these tests. + +#[cfg(feature = "test-subscribe-node")] +#[test] +fn gossipsub_join_exit_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + let mut node_1 = setup_node_1((49775, 49779)).await; + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(GOSSIP_NETWORK.to_string()); + + let stream_id = node_1.send_to_network(gossip_request).await.unwrap(); + + if let Ok(result) = node_1.recv_from_network(stream_id).await { + println!("Subscription successfull"); + assert_eq!(AppResponse::GossipsubJoinSuccess, result); + } + + loop {} + }); +} + +#[cfg(feature = "test-query-node")] +#[test] +fn gossipsub_join_exit_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the second node that will dial + let (mut node_2, node_1_peer_id) = setup_node_2((49775, 49779), (51767, 51707)).await; + + // Wait for a few seconds for propagation + #[cfg(feature = "tokio-runtime")] + tokio::time::sleep(Duration::from_secs(ITEST_WAIT_TIME)).await; + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(GOSSIP_NETWORK.to_string()); + + if let Ok(_) = node_2.query_network(gossip_request).await { + println!("Subscription successfull"); + // Query the network to confirm subscription of peer + let gossip_request = AppData::GossipsubGetInfo; + if let Ok(result) = node_2.query_network(gossip_request).await { + if let AppResponse::GossipsubGetInfo { mesh_peers, .. } = result { + assert_eq!(mesh_peers[0].0, node_1_peer_id); + assert_eq!(mesh_peers[0].1[0], GOSSIP_NETWORK); + } + } + } + }); +} + +#[cfg(feature = "test-listening-node")] +#[test] +fn gossipsub_message_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the node that will be dialled + let mut node_1 = setup_node_1((49885, 49889)).await; + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(GOSSIP_NETWORK.to_string()); + + let stream_id = node_1.send_to_network(gossip_request).await.unwrap(); + + if let Ok(result) = node_1.recv_from_network(stream_id).await { + println!("Subscription successfull"); + assert_eq!(AppResponse::GossipsubJoinSuccess, result); + } + + loop {} + }); +} + +#[cfg(feature = "test-broadcast-node")] +#[test] +fn gossipsub_message_itest_works() { + tokio::runtime::Runtime::new().unwrap().block_on(async { + // Set up the second node that will dial + let (mut node_2, _) = setup_node_2((49885, 49889), (51887, 51887)).await; + + // Join a network (subscribe to a topic) + let gossip_request = AppData::GossipsubJoinNetwork(GOSSIP_NETWORK.to_string()); + + if let Ok(_) = node_2.query_network(gossip_request).await { + println!("Subscription successfull"); + + // Prepare broadcast query + let gossip_request = AppData::GossipsubBroadcastMessage { + topic: GOSSIP_NETWORK.to_string(), + message: vec!["Apple".to_string(), "nike".to_string()], + }; + + if let Ok(result) = node_2.query_network(gossip_request).await { + println!("{:?}", result); + } + } + }); +} diff --git a/swarm_nl/src/core/tests/mod.rs b/swarm_nl/src/core/tests/mod.rs new file mode 100644 index 000000000..ffa4515a4 --- /dev/null +++ b/swarm_nl/src/core/tests/mod.rs @@ -0,0 +1,3 @@ +use super::*; +mod layer_communication; +mod node_behaviour; \ No newline at end of file diff --git a/swarm_nl/src/core/tests/node_behaviour.rs b/swarm_nl/src/core/tests/node_behaviour.rs new file mode 100644 index 000000000..04f8929bc --- /dev/null +++ b/swarm_nl/src/core/tests/node_behaviour.rs @@ -0,0 +1,205 @@ +//! Node setup and behavor tests. + +use super::*; +use futures::TryFutureExt; +use ini::Ini; +use std::fs; +use std::fs::File; +use std::net::{Ipv4Addr, Ipv6Addr}; + +// Set up a default node helper +pub fn setup_core_builder() -> CoreBuilder { + let config = BootstrapConfig::default(); + let handler = DefaultHandler; + + // Return default network core builder + CoreBuilder::with_config(config, handler) +} + +// Define custom ports for testing +const CUSTOM_TCP_PORT: Port = 49666; +const CUSTOM_UDP_PORT: Port = 49852; + +// Used to test saving keypair to file +fn create_test_ini_file(file_path: &str) { + let mut config = Ini::new(); + config + .with_section(Some("ports")) + .set("tcp", CUSTOM_TCP_PORT.to_string()) + .set("udp", CUSTOM_UDP_PORT.to_string()); + + config.with_section(Some("bootstrap")).set( + "boot_nodes", + "[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/192.168.1.205/tcp/1509]", + ); + // Write config to a new INI file + config.write_to_file(file_path).unwrap_or_default(); +} + +#[test] +fn node_default_behavior_works() { + // Build a node with the default network id + let default_node = setup_core_builder(); + + // Assert that the default network id is '/swarmnl/1.0' + assert_eq!(default_node.network_id, DEFAULT_NETWORK_ID); + + // Default transport is TCP/QUIC + assert_eq!( + default_node.transport, + TransportOpts::TcpQuic { + tcp_config: TcpConfig::Default + } + ); + + // Default keep alive duration is 60 seconds + assert_eq!(default_node.keep_alive_duration, 60); + + // Default listen on is 0:0:0:0 + assert_eq!( + default_node.ip_address, + IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)) + ); + + // Default tcp/udp port is MIN_PORT and MAX_PORT + assert_eq!(default_node.tcp_udp_port, (MIN_PORT, MAX_PORT)); +} + +#[test] +fn node_custom_setup_works() { + // Build a node with the default network id + let default_node = setup_core_builder(); + + // Custom node configuration + let mut custom_network_id = "/custom-protocol/1.0".to_string(); + let mut custom_transport = TransportOpts::TcpQuic { + tcp_config: TcpConfig::Custom { + ttl: 10, + nodelay: true, + backlog: 10, + }, + }; + let mut custom_keep_alive_duration = 20; + let mut custom_ip_address = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)); + + // Pass in the custom node configuration and assert it works as expected + let custom_node = default_node + .with_network_id(custom_network_id.clone()) + .with_transports(custom_transport.clone()) + .with_idle_connection_timeout(custom_keep_alive_duration.clone()) + .listen_on(custom_ip_address.clone()); + + // Assert that the custom network id is '/custom/protocol/1.0' + assert_eq!(custom_node.network_id(), custom_network_id); + + // Assert that the custom transport is 'TcpQuic' + assert_eq!(custom_node.transport, custom_transport); + + // Assert that the custom keep alive duration is 20 + assert_eq!(custom_node.keep_alive_duration, custom_keep_alive_duration); +} + +#[test] +fn node_custom_behavior_with_network_id_works() { + // Setup a node with the default config builder + let custom_builder = setup_core_builder(); + + // Configure builder with custom protocol and assert it works as expected + let custom_protocol: &str = "/custom-protocol/1.0"; + let custom_builder = custom_builder.with_network_id(custom_protocol.to_string()); + + // Cannot be less than MIN_NETWORK_ID_LENGTH + assert_eq!( + custom_builder.network_id().len() >= MIN_NETWORK_ID_LENGTH.into(), + true + ); + + // Must start with a forward slash + assert!(custom_builder.network_id().starts_with("/")); + + // Assert that the custom network id is '/custom/protocol/1.0' + assert_eq!(custom_builder.network_id(), custom_protocol.to_string()); +} + +#[test] +#[should_panic(expected = "Could not parse provided network id")] +fn node_custom_behavior_with_network_id_fails() { + // Build a node with the default network id + let custom_builder = setup_core_builder(); + + // Pass in an invalid network ID: network ID length is less than MIN_NETWORK_ID_LENGTH + let invalid_protocol_1 = "/1.0".to_string(); + let custom_builder = custom_builder.with_network_id(invalid_protocol_1); + + // Pass in an invalid network ID: network ID must start with a forward slash + let invalid_protocol_2 = "1.0".to_string(); + custom_builder.with_network_id(invalid_protocol_2); +} + +#[cfg(feature = "tokio-runtime")] +#[test] +fn node_save_keypair_offline_works_tokio() { + // Build a node with the default network id + let default_node = setup_core_builder(); + + // Use tokio runtime to test async function + let result = tokio::runtime::Runtime::new().unwrap().block_on( + default_node + .build() + .unwrap_or_else(|_| panic!("Could not build node")), + ); + + // Create a saved_keys.ini file + let file_path_1 = "saved_keys.ini"; + create_test_ini_file(file_path_1); + + // Save the keypair to existing file + let saved_1 = result.save_keypair_offline(&file_path_1); + + // Assert that the keypair was saved successfully + assert_eq!(saved_1, true); + + // Test if it works for a file name that does not exist + let file_path_2 = "test.ini"; + let saved_2 = result.save_keypair_offline(file_path_2); + assert_eq!(saved_2, true); + + // Clean up + fs::remove_file(file_path_1).unwrap_or_default(); + fs::remove_file(file_path_2).unwrap_or_default(); +} + +#[cfg(feature = "async-std-runtime")] +#[test] +fn node_save_keypair_offline_works_async_std() { + // Build a node with the default network id + let default_node = setup_core_builder(); + + // Use tokio runtime to test async function + let result = async_std::task::block_on( + default_node + .build() + .unwrap_or_else(|_| panic!("Could not build node")), + ); + + // Make a saved_keys.ini file + let file_path_1 = "saved_keys.ini"; + create_test_ini_file(file_path_1); + + // Save the keypair to existing file + let saved_1 = result.save_keypair_offline(file_path_1); + + // Assert that the keypair was saved successfully + assert_eq!(saved_1, true); + + // Now test if it works for a file name that does not exist + let file_path_2 = "test.txt"; + let saved_2 = result.save_keypair_offline(file_path_2); + + // Assert that the keypair was saved successfully + assert_eq!(saved_2, true); + + // Clean up + fs::remove_file(file_path_1).unwrap_or_default(); + fs::remove_file(file_path_2).unwrap_or_default(); +} diff --git a/swarm_nl/src/lib.rs b/swarm_nl/src/lib.rs index 87d657063..b5cb14c25 100644 --- a/swarm_nl/src/lib.rs +++ b/swarm_nl/src/lib.rs @@ -1,21 +1,18 @@ -/// Copyright (c) 2024 Algorealm -/// -/// This file is part of the SwarmNL library. +// Copyright 2024 Algorealm +// Apache 2.0 License + +#![doc = include_str!("../../README.md")] -/// Re-exports pub use crate::prelude::*; -pub use futures::{ - channel::mpsc::{self, Receiver, Sender}, - SinkExt, StreamExt, -}; pub use libp2p::{ core::{transport::ListenerId, ConnectedPoint, Multiaddr}, + ping::Failure, swarm::ConnectionId, }; pub use libp2p_identity::{rsa::Keypair as RsaKeypair, KeyType, Keypair, PeerId}; -pub use async_trait::async_trait; pub mod core; mod prelude; pub mod setup; pub mod util; +pub mod testing_guide; \ No newline at end of file diff --git a/swarm_nl/src/prelude.rs b/swarm_nl/src/prelude.rs index 7090aa08b..b4686582f 100644 --- a/swarm_nl/src/prelude.rs +++ b/swarm_nl/src/prelude.rs @@ -1,17 +1,19 @@ -use libp2p_identity::{KeyType, PeerId}; +// Copyright 2024 Algorealm +// Apache 2.0 License + +//! Types and traits that are used throughout SwarmNL. + +use libp2p_identity::KeyType; use std::net::Ipv4Addr; -/// Copyright (c) 2024 Algorealm -/// -/// This file is part of the SwarmNL library. use thiserror::Error; /// Default IP address when no address is specified. pub static DEFAULT_IP_ADDRESS: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0); /// Default amount of time to keep a connection alive. -pub static DEFAULT_KEEP_ALIVE_DURATION: u64 = 60; +pub static DEFAULT_KEEP_ALIVE_DURATION: Seconds = 60; -/// Library error type containing all custom errors that could be encountered +/// Library error type containing all custom errors that could be encountered. #[derive(Error, Debug)] pub enum SwarmNlError { #[error("could not read bootstrap config file")] @@ -30,32 +32,36 @@ pub enum SwarmNlError { RemotePeerDialError(String), #[error("could not parse provided network id")] NetworkIdParseError(String), + #[error("could not configure node for gossiping")] + GossipConfigError, } -/// Generic SwarmNl result type +/// Generic SwarmNl result type. pub type SwarmNlResult = Result; -/// Port type +/// Port type. pub type Port = u16; -/// Seconds type +/// Seconds type. pub type Seconds = u64; -/// The stringified PeerId type +/// The stringified `PeerId` type. pub type PeerIdString = String; -/// The stringified Multiaddr type +/// The stringified `Multiaddr` type. pub type MultiaddrString = String; -/// Port ranges +/// Lower bound port range (u16::MIN). pub const MIN_PORT: u16 = 49152; +/// Upper bound port range (u16::MAX). pub const MAX_PORT: u16 = 65535; -/// Default network id +/// Default network ID. pub static DEFAULT_NETWORK_ID: &str = "/swarmnl/1.0"; -/// Minimum network (protocol) id. This helps ensure that the protocol id is well formed and -/// contains a reasonable value because it is what identifies a network, makes it unique and -/// separates it from others. +/// This constant sets the shortest acceptable length for a network ID. +/// The network ID identifies a network and ensures it's distinct from others. pub static MIN_NETWORK_ID_LENGTH: u8 = 4; -/// Implement From<&str> for libp2p2_identity::KeyType. -/// We'll define a custom trait because of the Rust visibility rule to solve this problem +/// An implementation of [`From<&str>`] for [`KeyType`] to read a key type from a bootstrap config +/// file. +/// +/// We define a custom trait because of the Rust visibility rule. pub trait CustomFrom { fn from(string: &str) -> Option where @@ -74,15 +80,15 @@ impl CustomFrom for KeyType { } } -/// Supported transport protocols +/// Supported transport protocols. #[derive(Hash, Eq, PartialEq, Debug, Clone)] pub enum TransportOpts { - /// QUIC transport protocol enabled with TCP/IP as fallback. - /// DNS lookup is also configured by default + /// QUIC transport protocol enabled with TCP/IP as fallback. DNS lookup is also configured by + /// default. TcpQuic { tcp_config: TcpConfig }, } -/// TCP setup Config +/// TCP setup configuration. #[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)] pub enum TcpConfig { /// Default configuration specified in the [libp2p docs](https://docs.rs/libp2p/latest/libp2p/tcp/struct.Config.html#method.new). @@ -98,9 +104,3 @@ pub enum TcpConfig { // port_resuse: bool }, } - -/// A unique type that indicates that a struct is not yet initialized to its default state -pub struct NotInitialiazed; - -/// A unique type that indicates that a struct has been default configured -pub struct Initialized; diff --git a/swarm_nl/src/setup.rs b/swarm_nl/src/setup.rs index 57bc51be8..44d6e3094 100644 --- a/swarm_nl/src/setup.rs +++ b/swarm_nl/src/setup.rs @@ -1,31 +1,36 @@ -/// Copyright (c) 2024 Algorealm +// Copyright 2024 Algorealm +// Apache 2.0 License + +//! Data structures and functions to setup a node and configure it for networking. + +#![doc = include_str!("../doc/setup/NodeSetup.md")] -// The module containing the data structures and functions to setup a node identity and -/// configure it for networking. -/// -/// This file is part of the SwarmNl library. use std::collections::HashMap; -use libp2p_identity::rsa; +use crate::core::gossipsub_cfg::Blacklist; +pub use crate::prelude::*; +pub use libp2p_identity::{rsa::Keypair as RsaKeypair, KeyType, Keypair, PeerId}; -/// Import the contents of the exported modules into this module +/// Import the contents of the exported modules into this module. use super::*; -/// Configuration data required for node bootstrap +/// Configuration data required for node bootstrap. #[derive(Debug)] pub struct BootstrapConfig { - /// The port to listen on if using the TCP/IP protocol + /// The port to listen on if using the TCP/IP protocol. tcp_port: Port, - /// The port to listen on if using the UDP or QUIC protocol + /// The port to listen on if using the UDP or QUIC protocol. udp_port: Port, - /// The Cryptographic Keypair for node identification and message auth + /// The Cryptographic Keypair for node identification and message auth. keypair: Keypair, - /// Bootstrap peers + /// Bootstrap peers. boot_nodes: HashMap, + /// Blacklisted peers + blacklist: Blacklist, } impl BootstrapConfig { - /// Read from a bootstrap config file on disk + /// Read from a bootstrap config file on disk. /// /// # Panics /// @@ -37,28 +42,37 @@ impl BootstrapConfig { /// Return a new `BootstrapConfig` struct populated by default (empty) values. /// /// Must be called first if the config is to be explicitly built without reading `.ini` file - /// from disk + /// from disk. pub fn new() -> Self { BootstrapConfig { - // Default TCP/IP port if not specified + // Default TCP/IP port if not specified. tcp_port: MIN_PORT, - // Default UDP port if not specified + // Default UDP port if not specified. udp_port: MAX_PORT, - // Default node keypair type i.e Ed25519 + // Default node keypair type i.e Ed25519. keypair: Keypair::generate_ed25519(), boot_nodes: Default::default(), + blacklist: Default::default(), } } - /// Configure available bootnodes + /// Configure available bootnodes. pub fn with_bootnodes(mut self, boot_nodes: HashMap) -> Self { - // additive operation + // Additive operation self.boot_nodes.extend(boot_nodes.into_iter()); self } - /// Configure the TCP/IP port - /// Port must range between [`MIN_PORT`] and [`MAX_PORT`] + /// Configure a list of peers to add to blacklist. + pub fn with_blacklist(mut self, list: Vec) -> Self { + // additive operation + self.blacklist.list.extend(list.into_iter()); + self + } + + /// Configure the TCP/IP port. + /// + /// Note: Port must range between [`MIN_PORT`] and [`MAX_PORT`]. pub fn with_tcp(self, tcp_port: Port) -> Self { if tcp_port > MIN_PORT && tcp_port < MAX_PORT { BootstrapConfig { tcp_port, ..self } @@ -67,8 +81,9 @@ impl BootstrapConfig { } } - /// Configure the UDP port - /// Port must range between [`MIN_PORT`] and [`MAX_PORT`] + /// Configure the UDP port. + /// + /// Note: Port must range between [`MIN_PORT`] and [`MAX_PORT`] pub fn with_udp(self, udp_port: Port) -> Self { if udp_port > MIN_PORT && udp_port < MAX_PORT { BootstrapConfig { udp_port, ..self } @@ -77,15 +92,17 @@ impl BootstrapConfig { } } - /// Generate a Cryptographic Keypair. + /// Generate a Cryptographic Keypair for node identity creation and message signing. + /// /// An RSA keypair cannot be generated on-the-fly. It has to be generated from a `.pk8` file. - /// Hence the `Option` parameter is always `None` except in the case of RSA. - /// Please note that calling this function overrides whatever might have been read from the + /// Hence the `rsa_pk8_filepath` parameter must always be set to `None` except in the case of + /// RSA. Please note that calling this function overrides whatever might have been read from the /// `.ini` file /// - /// # Panics (Only applies to the RSA keypair instance) + /// # Panics /// /// This function will panic if: + /// /// 1. The RSA key type is specified and the `rsa_pk8_filepath` is set to `None`. /// 2. If the file contains invalid data and an RSA keypair cannot be generated from it. pub fn generate_keypair(self, key_type: KeyType, rsa_pk8_filepath: Option<&str>) -> Self { @@ -98,7 +115,7 @@ impl BootstrapConfig { KeyType::Ed25519 => Keypair::generate_ed25519(), KeyType::RSA => { let mut bytes = std::fs::read(rsa_pk8_filepath.unwrap()).unwrap_or_default(); - // return RSA keypair generated from a .pk8 binary file + // Return RSA keypair generated from a .pk8 binary file Keypair::rsa_from_pkcs8(&mut bytes).unwrap() }, KeyType::Secp256k1 => Keypair::generate_secp256k1(), @@ -115,7 +132,8 @@ impl BootstrapConfig { /// # Panics /// /// This function will panic if the `u8` buffer is not parsable into the specified key type. - /// This could be for one of two reasons: + /// This could be because one of two reasons: + /// /// 1. If the key type is valid, but the keypair data is not valid for that key type. /// 2. If the key type is invalid. pub fn generate_keypair_from_protobuf(self, key_type_str: &str, bytes: &mut [u8]) -> Self { @@ -137,7 +155,7 @@ impl BootstrapConfig { BootstrapConfig { keypair, ..self } } else { - // generate a default Ed25519 keypair + // Generate a default Ed25519 keypair BootstrapConfig { keypair: Keypair::generate_ed25519(), ..self @@ -145,23 +163,28 @@ impl BootstrapConfig { } } - /// Return a node's cryptographic keypair + /// Return a node's cryptographic keypair. pub fn keypair(&self) -> Keypair { self.keypair.clone() } - /// Return the configured ports in a tuple i.e (TCP Port, UDP port) + /// Return the configured ports in a tuple i.e (TCP Port, UDP port). pub fn ports(&self) -> (Port, Port) { (self.tcp_port, self.udp_port) } - /// Return the configured bootnodes for the network + /// Return the configured bootnodes for the network. pub fn bootnodes(&self) -> HashMap { self.boot_nodes.clone() } + + /// Return the `PeerId`'s of nodes that are to be blacklisted. + pub fn blacklist(&self) -> Blacklist { + self.blacklist.clone() + } } -/// Implement [`Default`] for [`BootstrapConfig`] +/// [`Default`] implementation for [`BootstrapConfig`]. impl Default for BootstrapConfig { fn default() -> Self { Self::new() @@ -170,8 +193,6 @@ impl Default for BootstrapConfig { #[cfg(test)] mod tests { - use libp2p_identity::ed25519; - use super::*; use std::fs; use std::panic; @@ -232,34 +253,35 @@ mod tests { assert!(result.is_err()); } + #[docify::export] #[test] fn default_config_works() { let bootstrap_config = BootstrapConfig::default(); - // default port values + // Default port values assert_eq!(bootstrap_config.tcp_port, MIN_PORT); assert_eq!(bootstrap_config.udp_port, MAX_PORT); - // and we know that the default is Ed25519 + // .. and we know that the default is Ed25519 let keypair = bootstrap_config.keypair; assert_eq!(keypair.key_type(), KeyType::Ed25519); - // bootnodes aren't configured by default so we expect an empty HashMap + // Bootnodes aren't configured by default so we expect an empty HashMap assert_eq!(bootstrap_config.boot_nodes, HashMap::new()); } #[test] fn new_config_with_bootnodes_works() { - // setup test data + // Setup test data let mut bootnodes: HashMap = HashMap::new(); - let mut key_1 = "12D3KooWBmwXN3rsVfnLsZKbXeBrSLfczHxZHwVjPrbKwpLfYm3t".to_string(); - let mut val_1 = "/ip4/192.168.1.205/tcp/1509".to_string(); - let mut key_2 = "12A0ZooWBmwXN3rsVfnLsZKbXeBrSLfczHxZHwVjPrbKwpLfYm3t".to_string(); - let mut val_2 = "/ip4/192.168.1.205/tcp/1588".to_string(); + let key_1 = "12D3KooWBmwXN3rsVfnLsZKbXeBrSLfczHxZHwVjPrbKwpLfYm3t".to_string(); + let val_1 = "/ip4/192.168.1.205/tcp/1509".to_string(); + let key_2 = "12A0ZooWBmwXN3rsVfnLsZKbXeBrSLfczHxZHwVjPrbKwpLfYm3t".to_string(); + let val_2 = "/ip4/192.168.1.205/tcp/1588".to_string(); bootnodes.insert(key_1.clone(), val_1.clone()); bootnodes.insert(key_2.clone(), val_2.clone()); - // we've inserted two bootnodes + // We've inserted two bootnodes let bootstrap_config = BootstrapConfig::new().with_bootnodes(bootnodes); assert_eq!(bootstrap_config.bootnodes().len(), 2); @@ -271,15 +293,15 @@ mod tests { #[test] fn new_config_with_tcp_port_works() { - // first assert that the default is MIN_PORT + // First assert that the default is MIN_PORT let bootstrap_config = BootstrapConfig::default(); assert_eq!(bootstrap_config.ports().0, MIN_PORT); - // now set a custom port + // Now set a custom port let bootstrap_config_with_tcp = bootstrap_config.with_tcp(49666); assert_eq!(bootstrap_config_with_tcp.ports().0, 49666); - // now set an invalid port and check it falls back to the default tcp port value + // Now set an invalid port and check it falls back to the default tcp port value // Note: MAX_PORT+1 would overflow the u16 type let bootstrap_config_invalid_tcp_port = BootstrapConfig::new().with_tcp(MIN_PORT - 42); @@ -289,29 +311,29 @@ mod tests { #[test] fn new_config_with_udp_port_works() { - // default should be MAX_PORT + // Default should be MAX_PORT let bootstrap_config = BootstrapConfig::default(); assert_eq!(bootstrap_config.ports().1, MAX_PORT); - // now set a custom port + // Now set a custom port let bootstrap_config_with_udp = bootstrap_config.with_udp(55555); assert_eq!(bootstrap_config_with_udp.ports().1, 55555); - // now set an invalid port and check it falls back to the default udp port value + // Now set an invalid port and check it falls back to the default udp port value let bootstrap_config_invalid_udp_port = BootstrapConfig::new().with_udp(MIN_PORT - 42); assert_eq!(bootstrap_config_invalid_udp_port.ports().1, MAX_PORT); } #[test] fn key_type_is_invalid() { - // invalid keytype + // Invalid keytype let invalid_keytype = "SomeMagicCryptoType"; - // valid keypair + // Valid keypair let mut ed25519_serialized_keypair = Keypair::generate_ed25519().to_protobuf_encoding().unwrap(); - // should not panic but default to ed25519 + // Should not panic but default to ed25519 let result = panic::catch_unwind(move || { let bootstrap_config = BootstrapConfig::default() .generate_keypair_from_protobuf(invalid_keytype, &mut ed25519_serialized_keypair); @@ -328,7 +350,7 @@ mod tests { let valid_key_types = ["Ed25519", "RSA", "Secp256k1", "Ecdsa"]; let mut invalid_keypair: [u8; 2] = [0; 2]; - // keypair is invalid for each valid key type + // Keypair is invalid for each valid key type let _ = BootstrapConfig::default() .generate_keypair_from_protobuf(valid_key_types[0], &mut invalid_keypair); let _ = BootstrapConfig::default() @@ -355,47 +377,47 @@ mod tests { #[test] fn rsa_with_invalid_contents_should_panic() { - // create an RSA file with invalid contents + // Create an RSA file with invalid contents let file_path = "invalid_rsa_keypair_temp_file.pk8"; let invalid_keypair: [u8; 64] = [0; 64]; std::fs::write(file_path, invalid_keypair).unwrap(); let result = panic::catch_unwind(|| { - // should panic when parsing invalid RSA file + // Should panic when parsing invalid RSA file let _ = BootstrapConfig::default().generate_keypair(KeyType::RSA, Some(file_path)); }); - // this will return an error + // This will return an error assert!(result.is_err()); - // clean-up invalid_rsa_keypair_temp_file.pk8 + // Clean-up invalid_rsa_keypair_temp_file.pk8 fs::remove_file(file_path).unwrap_or_default(); } #[test] fn rsa_from_valid_file_works() { - // create a valid private.pk8 file + // Create a valid private.pk8 file generate_rsa_keypair_files(); - let mut bootstrap_config = + let bootstrap_config = BootstrapConfig::new().generate_keypair(KeyType::RSA, Some("private.pk8")); assert_eq!(bootstrap_config.keypair().key_type(), KeyType::RSA); - // clean-up RSA files + // Clean-up RSA files fs::remove_file("private.pk8").unwrap_or_default(); fs::remove_file("private.pem").unwrap_or_default(); } #[test] fn generate_keypair_from_protobuf_ed25519_works() { - // generate a valid keypair for ed25519 + // Generate a valid keypair for ed25519 let key_type_str = "Ed25519"; let mut ed25519_serialized_keypair = Keypair::generate_ed25519().to_protobuf_encoding().unwrap(); - // add to bootstrap config from protobuf - let mut bootstrap_config = BootstrapConfig::new() + // Add to bootstrap config from protobuf + let bootstrap_config = BootstrapConfig::new() .generate_keypair_from_protobuf(key_type_str, &mut ed25519_serialized_keypair); assert_eq!(bootstrap_config.keypair().key_type(), KeyType::Ed25519); @@ -403,13 +425,13 @@ mod tests { #[test] fn generate_keypair_from_protobuf_ecdsa_works() { - // generate a valid keypair for ecdsa + // Generate a valid keypair for ecdsa let key_type_str = "Ecdsa"; let mut ecdsa_serialized_keypair = Keypair::generate_ecdsa().to_protobuf_encoding().unwrap(); - // add to bootstrap config from protobuf - let mut bootstrap_config = BootstrapConfig::new() + // Add to bootstrap config from protobuf + let bootstrap_config = BootstrapConfig::new() .generate_keypair_from_protobuf(key_type_str, &mut ecdsa_serialized_keypair); assert_eq!(bootstrap_config.keypair().key_type(), KeyType::Ecdsa); @@ -417,14 +439,14 @@ mod tests { #[test] fn generate_keypair_from_protobuf_secp256k1_works() { - // generate a valid keypair for Secp256k1 + // Generate a valid keypair for Secp256k1 let key_type_str = "Secp256k1"; let mut secp256k1_serialized_keypair = Keypair::generate_secp256k1() .to_protobuf_encoding() .unwrap(); - // add to bootstrap config from protobuf - let mut bootstrap_config = BootstrapConfig::new() + // Add to bootstrap config from protobuf + let bootstrap_config = BootstrapConfig::new() .generate_keypair_from_protobuf(key_type_str, &mut secp256k1_serialized_keypair); assert_eq!(bootstrap_config.keypair().key_type(), KeyType::Secp256k1); diff --git a/swarm_nl/src/testing_guide.rs b/swarm_nl/src/testing_guide.rs new file mode 100644 index 000000000..e6056e10b --- /dev/null +++ b/swarm_nl/src/testing_guide.rs @@ -0,0 +1,150 @@ +//! A doc-only module explaining how to run core library tests. +//! +//! > **Note**: the library is compatible with both `tokio` and `async-std` runtimes, however all tests are written to use the `tokio` executor. +//! > Therefore, to run the tests you must specify the runtime feature flag e.g. `cargo test --features=tokio-runtime`. +//! +//! There are two classes of tests in the core library: +//! +//! - `node_behaviour` tests for single node setup and behaviour. +//! - `layer_communication` tests involving the synchronization between two nodes. +//! +//! +//! # Node behaviour testing +//! +//! These are simple unit tests that check the behaviour of a single node. To run these tests, simply run the following command: +//! +//! ```bash +//! cargo test node_ --features=tokio-runtime +//! ``` +//! +//! # Layer communication testing +//! +//! In order to create tests for communication between two nodes, we used the Rust conditional compilation feature to be able to setup different nodes and test their communication. +//! All commands for running these tests should be run with `-- --nocapture` to verify the expected results. +//! +//! For these tests, we've created two test nodes: `node1` and `node2`. +//! +//! - Node 1 is setup by calling the `setup_node_1` function which uses a pre-configured cryptographic keypair and the `setup_core_builder_1` function to configure a default node. +//! This keeps its identity consistent across tests. +//! +//! - Node 2 is setup by calling the `setup_node_2` function which creates a new node identity every time it is called. +//! It then adds Node 1 as its bootnode and establishes a connection by dialing Node 1. +//! +//! ### Peer dialing tests +//! +//! The peer dialing tests checks if a node can dial another node by using a `listening` node and a `dialing` node. +//! To run these tests, start the listening node by running the following command in one terminal: +//! +//! ```bash +//! cargo test dialing_peer_works --features=test-listening-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! Then, in another terminal run the dialing node: +//! +//! ```bash +//! cargo test dialing_peer_works --features=test-dialing-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! The application event handler will log the dialing node's peer id and the listening node's peer id. +//! +//! ## Fetching tests +//! +//! The fetching test checks if a node can fetch a value from another node. +//! These tests use a `server` node and a `client` node. +//! +//! To run these tests first start the server node in one terminal: +//! +//! ```bash +//! cargo test rpc_fetch_works --features=test-server-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! And in another terminal, run the client node: +//! +//! ```bash +//! cargo test rpc_fetch_works --features=test-client-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! Then you can check that the server node prints out a _"Recvd incoming RPC:"_ message with the data sent by the client node. +//! +//! ## Kademlia tests +//! +//! For Kademlia tests, we have a `reading` node and a `writing` node. +//! We use a time delay to simulate the reading node "sleeping" so as to allow the writing node to make changes to the DHT. +//! +//! When the reading node "wakes up" it then tries to read the value from the DHT. If the value is what it expects, the tests passes successfully. +//! +//! To run this test, run the following command in one terminal to launch the "reading" node: +//! +//! ```bash +//! cargo test kademlia_record_store_itest_works --features=test-reading-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! And then run the following command in another terminal to launch the "writing node": +//! +//! ```bash +//! cargo test kademlia_record_store_itest_works --features=test-writing-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! ### Record providers tests +//! +//! To run the providers tests, we have a `reading` node and a `writing` node. +//! +//! We first run the "writing" node to store a record in the DHT. Then we run a "reading" node to fetch the list of providers of the record that's been written. +//! +//! Then we simply assert that node 1 is a provider of the record. +//! +//! To run this test, first run the "writing" node: +//! +//! ```bash +//! cargo test kademlia_provider_records_itest_works --features=test-writing-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! Then, in another terminal, run the "reading" node: +//! +//! ```bash +//! cargo test kademlia_provider_records_itest_works --features=test-reading-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! ### Gossipsub tests +//! +//! **Join/Exit tests** +//! +//! For Gossipsub tests, we have a `subscribe` node and a `query` node. +//! +//! When the "subscribe" node is set up, it joins a mesh network. Then node 2 is setup and connects to node 1, sleeps for a while (to allow propagtion of data from node 1) and then joins the network. +//! After joining, it then queries the network layer for gossipping information. This information contains topics the node is currently subscribed to such as the peers that node 2 knows (which is node 1) and the network they are a part of. +//! The peers that have been blacklisted are also returned. +//! +//! In this test, we test that node 1 is a part of the mesh network that node 2 is subscribed to. +//! +//! To run this test, first run the "subscribe" node: +//! +//! ```bash +//! cargo test gossipsub_join_exit_itest_works --features=test-subscribe-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! Then, in another terminal, run the "query" node: +//! +//! ```bash +//! cargo test gossipsub_join_exit_itest_works --features=test-query-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! **Publish/Subscribe tests** +//! +//! For this test we have a `listening` node and a `broadcast` node. The first node is setup which joins a mesh network. Then, node 2 is setup and connects to node 1, sleeps for a few seconds (to allow propagtion of data from node 1) and then joins the network. +//! It then joins the network that node 1 was already a part of and sends a broadcast message to every peer in the mesh network. +//! +//! The indicator of the success of this test is revealed in the application's event handler function which logs the message received from node 2. +//! +//! To run this test, first run the "listening" node in one terminal: +//! +//! ```bash +//! cargo test gossipsub_message_itest_works --features=test-listening-node --features=tokio-runtime -- --nocapture +//! ``` +//! +//! Then run the "broadcast" node in another terminal: +//! +//! ```bash +//! cargo test gossipsub_message_itest_works --features=test-broadcast-node --features=tokio-runtime -- --nocapture +//! ``` +//! diff --git a/swarm_nl/src/util.rs b/swarm_nl/src/util.rs index 0b636caf3..1ecbd12f0 100644 --- a/swarm_nl/src/util.rs +++ b/swarm_nl/src/util.rs @@ -1,6 +1,8 @@ -/// Copyright (c) 2024 Algorealm -/// -/// This file is part of the SwarmNl library. +// Copyright 2024 Algorealm +// Apache 2.0 License + +//! Utility helper functions for reading from and writing to `.ini` config files. + use crate::{prelude::*, setup::BootstrapConfig}; use base58::FromBase58; use ini::Ini; @@ -9,10 +11,9 @@ use std::{collections::HashMap, str::FromStr}; /// Read an INI file containing bootstrap config information. pub fn read_ini_file(file_path: &str) -> SwarmNlResult { - // read the file from disk + // Read the file from disk if let Ok(config) = Ini::load_from_file(file_path) { - // ports section - // get TCP port & UDP port + // Get TCP port & UDP port let (tcp_port, udp_port) = if let Some(section) = config.section(Some("ports")) { ( section @@ -27,44 +28,53 @@ pub fn read_ini_file(file_path: &str) -> SwarmNlResult { .unwrap_or_default(), ) } else { - // fallback to default ports + // Fallback to default ports (MIN_PORT, MAX_PORT) }; - // try to read the serialized keypair + // Try to read the serialized keypair // auth section let (key_type, mut serialized_keypair) = if let Some(section) = config.section(Some("auth")) { ( - // get the preferred key type + // Get the preferred key type section.get("crypto").unwrap_or_default(), - // get serialized keypair + // Get serialized keypair string_to_vec::(section.get("protobuf_keypair").unwrap_or_default()), ) } else { Default::default() }; - // now, move onto reading the bootnodes if any + // Now, move onto reading the bootnodes if any let section = config .section(Some("bootstrap")) .ok_or(SwarmNlError::BoostrapFileReadError(file_path.to_owned()))?; - // get the provided bootnodes + // Get the provided bootnodes let boot_nodes = string_to_hashmap(section.get("boot_nodes").unwrap_or_default()); + // Now, move onto reading the blacklist if any + let section = config + .section(Some("blacklist")) + .ok_or(SwarmNlError::BoostrapFileReadError(file_path.to_owned()))?; + + // Blacklist + let blacklist = string_to_vec(section.get("blacklist").unwrap_or_default()); + Ok(BootstrapConfig::new() .generate_keypair_from_protobuf(key_type, &mut serialized_keypair) .with_bootnodes(boot_nodes) + .with_blacklist(blacklist) .with_tcp(tcp_port) .with_udp(udp_port)) } else { - // return error + // Return error Err(SwarmNlError::BoostrapFileReadError(file_path.to_owned())) } } -/// write value into config file +/// Write value into config file. pub fn write_config(section: &str, key: &str, new_value: &str, file_path: &str) -> bool { if let Ok(mut conf) = Ini::load_from_file(file_path) { // Set a value: @@ -76,7 +86,7 @@ pub fn write_config(section: &str, key: &str, new_value: &str, file_path: &str) false } -/// Parse string into a vector +/// Parse string into a vector. fn string_to_vec(input: &str) -> Vec { input .trim_matches(|c| c == '[' || c == ']') @@ -88,7 +98,7 @@ fn string_to_vec(input: &str) -> Vec { }) } -/// Parse string into a hashmap +/// Parse string into a hashmap. fn string_to_hashmap(input: &str) -> HashMap { input .trim_matches(|c| c == '[' || c == ']') @@ -105,7 +115,7 @@ fn string_to_hashmap(input: &str) -> HashMap { }) } -/// Convert PeerId string to peerId +/// Convert a peer ID string to [`PeerId`]. pub fn string_to_peer_id(peer_id_string: &str) -> Option { PeerId::from_bytes(&peer_id_string.from_base58().unwrap_or_default()).ok() } @@ -119,12 +129,11 @@ mod tests { use crate::prelude::{MAX_PORT, MIN_PORT}; use std::fs; - // define custom ports for testing + // Define custom ports for testing const CUSTOM_TCP_PORT: Port = 49666; const CUSTOM_UDP_PORT: Port = 49852; - // helper to create an INI file without a static keypair - // here we specify a valid range for ports + // Helper to create an INI file without a static keypair and a valid range for ports. fn create_test_ini_file_without_keypair(file_path: &str) { let mut config = Ini::new(); config @@ -136,11 +145,12 @@ mod tests { "boot_nodes", "[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/192.168.1.205/tcp/1509]", ); - // write config to a new INI file + config.with_section(Some("blacklist")).set("blacklist", "[]"); + // Write config to a new INI file config.write_to_file(file_path).unwrap_or_default(); } - // helper to create an INI file with keypair + // Helper to create an INI file with keypair fn create_test_ini_file_with_keypair(file_path: &str, key_type: KeyType) { let mut config = Ini::new(); @@ -176,30 +186,32 @@ mod tests { "[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/192.168.1.205/tcp/1509]", ); - // write config to the new INI file + config.with_section(Some("blacklist")).set("blacklist", "[]"); + + // Write config to the new INI file config.write_to_file(file_path).unwrap_or_default(); } - // helper to clean up temp file + // Helper to clean up temp file fn clean_up_temp_file(file_path: &str) { fs::remove_file(file_path).unwrap_or_default(); } #[test] fn file_does_not_exist() { - // try to read a non-existent file should panic + // Try to read a non-existent file should panic assert_eq!(read_ini_file("non_existent_file.ini").is_err(), true); } #[test] fn write_config_works() { - // create temp INI file + // Create temp INI file let file_path = "temp_test_write_ini_file.ini"; - // create INI file without keypair for simplicity + // Create INI file without keypair for simplicity create_test_ini_file_without_keypair(file_path); - // try to write some keypair to the INI file + // Try to write some keypair to the INI file let add_keypair = write_config( "auth", "serialized_keypair", @@ -217,17 +229,17 @@ mod tests { assert_eq!(add_keypair, true); - // delete temp file + // Delete temp file clean_up_temp_file(file_path); } - // read without keypair file + // Read without keypair file #[test] fn read_ini_file_with_custom_setup_works() { - // create temp INI file + // Create temp INI file let file_path = "temp_test_read_ini_file_custom.ini"; - // we've set our ports to tcp=49666 and upd=49852 + // We've set our ports to tcp=49666 and upd=49852 create_test_ini_file_without_keypair(file_path); let ini_file_result: BootstrapConfig = read_ini_file(file_path).unwrap(); @@ -235,34 +247,33 @@ mod tests { assert_eq!(ini_file_result.ports().0, CUSTOM_TCP_PORT); assert_eq!(ini_file_result.ports().1, CUSTOM_UDP_PORT); - // checking for the default keypair that's generated (ED25519) if none are provided + // Checking for the default keypair that's generated (ED25519) if none are provided assert_eq!(ini_file_result.keypair().key_type(), KeyType::Ed25519); - // delete temp file + // Delete temp file clean_up_temp_file(file_path); } #[test] fn read_ini_file_with_default_setup_works() { - // create INI file + // Create INI file let file_path = "temp_test_ini_file_default.ini"; create_test_ini_file_with_keypair(file_path, KeyType::Ecdsa); - // assert that the content has no [port] section + // Assert that the content has no [port] section let ini_file_content = fs::read_to_string(file_path).unwrap(); assert!(!ini_file_content.contains("[port]")); - // but when we call read_ini_file it generates a BootstrapConfig with default ports from - // crate::prelude::{MIN_PORT, MAX_PORT} + // But when we call read_ini_file it generates a BootstrapConfig with default ports let ini_file_result = read_ini_file(file_path).unwrap(); assert_eq!(ini_file_result.ports().0, MIN_PORT); assert_eq!(ini_file_result.ports().1, MAX_PORT); - // checking that the default keypair matches the configured keytype + // Checking that the default keypair matches the configured keytype assert_eq!(ini_file_result.keypair().key_type(), KeyType::Ecdsa); - // delete temp file + // Delete temp file clean_up_temp_file(file_path); } @@ -299,4 +310,42 @@ mod tests { assert_eq!(result, expected); } + + #[test] + fn bootstrap_config_blacklist_works() { + let file_path = "bootstrap_config_blacklist_test.ini"; + + // Create a new INI file with a blacklist + let mut config = Ini::new(); + config + .with_section(Some("ports")) + .set("tcp", CUSTOM_TCP_PORT.to_string()) + .set("udp", CUSTOM_UDP_PORT.to_string()); + + config.with_section(Some("bootstrap")).set( + "boot_nodes", + "[12D3KooWGfbL6ZNGWqS11MoptH2A7DB1DG6u85FhXBUPXPVkVVRq:/ip4/192.168.1.205/tcp/1509]", + ); + + let blacklist_peer_id: PeerId = PeerId::random(); + let black_list_peer_id_string = format!("[{}]", blacklist_peer_id.to_base58()); + + config + .with_section(Some("blacklist")) + .set("blacklist", black_list_peer_id_string); + + // Write config to a new INI file + config.write_to_file(file_path).unwrap_or_default(); + + // Read the new file + let ini_file_result: BootstrapConfig = read_ini_file(file_path).unwrap(); + + assert_eq!(ini_file_result.blacklist().list.len(), 1); + assert!(ini_file_result + .blacklist() + .list + .contains(&blacklist_peer_id)); + + fs::remove_file(file_path).unwrap_or_default(); + } } diff --git a/tutorials/README.md b/tutorials/README.md new file mode 100644 index 000000000..a0b21671a --- /dev/null +++ b/tutorials/README.md @@ -0,0 +1,8 @@ +# Tutorials + +A collection of tutorials to demonstrate how to use the Swarm Networking Library.
+We will build the following using two nodes that are built with SwarmNL and interact with each other to fulfill the application goals. + +* Echo server +* File sharing application +* Simple game \ No newline at end of file diff --git a/tutorials/echo_server/Cargo.toml b/tutorials/echo_server/Cargo.toml new file mode 100644 index 000000000..634def4fa --- /dev/null +++ b/tutorials/echo_server/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "echo_server" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +swarm_nl = { path = "../../swarm_nl", features = ["tokio-runtime"] } +tokio = { version = "1.37.0", features = ["full"] } \ No newline at end of file diff --git a/tutorials/echo_server/src/main.rs b/tutorials/echo_server/src/main.rs new file mode 100644 index 000000000..5d51365d3 --- /dev/null +++ b/tutorials/echo_server/src/main.rs @@ -0,0 +1,94 @@ +/// Copyright (c) Algorealm 2024 +/// +/// This crate demonstrates how to use SwarmNl. Here, we build a simple echo server that +/// recieves inout from stdin, writes it to the network layer and then recieves it +/// back from the network. +/// +/// To run this example, cd into the root of the repository and run: +/// ```bash +/// cargo run +/// ``` +/// +/// Then type into the terminal and watch your input get echoed back to you. + +use swarm_nl::core::{AppData, AppResponse, Core, CoreBuilder, EventHandler}; +use swarm_nl::setup::BootstrapConfig; +use swarm_nl::{PeerId, Port}; +use std::io::{self, BufRead}; + +/// Our application state. +#[derive(Clone)] +struct EchoServer; + +/// Define custom handler for application state. +impl EventHandler for EchoServer { + // We're just echoing the data back + fn rpc_incoming_message_handled(&mut self, data: Vec>) -> Vec> { + println!("Recvd incoming RPC: {:?}", data); + data + } + + // Handle the incoming gossip message + fn gossipsub_incoming_message_handled(&mut self, source: PeerId, data: Vec) { + println!("Recvd incoming gossip: {:?}", data); + } +} + +/// Setup first node using default config. +pub async fn setup_node(ports: (Port, Port)) -> Core { + // Application state + let state = EchoServer; + + // Use the default config parameters and override a few configurations e.g ports, keypair + let config = BootstrapConfig::default() + .with_tcp(ports.0) + .with_udp(ports.1); + + // Set up network + CoreBuilder::with_config(config, state) + .build() + .await + .unwrap() +} + +// Run server +#[tokio::main] +async fn main() { + let stdin = io::stdin(); + let mut handle = stdin.lock(); + + // Create node + let mut node = setup_node((55000, 46000)).await; + + println!("Welcome to the Echo-Server SwarmNl example."); + println!("Type into the terminal and watch it get echoed back to you."); + + println!("Enter your input (Ctrl+D to end):"); + + // Create a buffer to store each line + let mut buffer = String::new(); + + // Loop to read lines from stdin + while let Ok(bytes_read) = handle.read_line(&mut buffer) { + // If no bytes were read, we've reached EOF + if bytes_read == 0 { + break; + } + + let input = buffer.trim(); + + // Prepare an Echo request to send to the network + let echo_request = AppData::Echo(input.to_string()); + + // Send request to the network layer and retrieve response + if let Ok(result) = node.query_network(echo_request).await { + // Echo to stdout + if let AppResponse::Echo(output) = result { + println!("--> {}", output); + } + } + + // Clear the buffer for the next line + buffer.clear(); + } +} diff --git a/tutorials/file_sharing_app/Cargo.toml b/tutorials/file_sharing_app/Cargo.toml new file mode 100644 index 000000000..855f4e983 --- /dev/null +++ b/tutorials/file_sharing_app/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "file_sharing_app" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +rust-ini = "0.20.0" +swarm_nl = { path = "../swarm_nl", features = ["tokio-runtime"] } +tokio = { version = "1.37.0", features = ["full"] } + +[features] +first-node = [] +second-node = [] \ No newline at end of file diff --git a/tutorials/file_sharing_app/src/main.rs b/tutorials/file_sharing_app/src/main.rs new file mode 100644 index 000000000..2511a236a --- /dev/null +++ b/tutorials/file_sharing_app/src/main.rs @@ -0,0 +1,271 @@ +use std::{ + collections::HashMap, + fs::File, + io::{self, BufRead, Read}, + num::NonZeroU32, + time::Duration, +}; + +use swarm_nl::{ + core::{AppData, AppResponse, Core, CoreBuilder, EventHandler}, + setup::BootstrapConfig, + ConnectedPoint, ConnectionId, Keypair, ListenerId, Multiaddr, PeerId, Port, +}; +/// Copyright (c) Algorealm 2024 + +/// This crate demonstrates how to use SwarmNl. Here, we build a simple file sharing application +/// using two nodes. One nodes writes a record to the DHT and specifies itself as a provider for a +/// file it has locally. The other node reads the DHT and then uses an RPC to fetch the file from +/// the first peer. + +/// The key we're writing to the DHT +pub const KADEMLIA_KEY: &str = "bootstrap_config.ini"; // File name +pub const KADEMLIA_VALUE: &str = "bootstrap_config.ini"; // Location on fs (it is in the same directory as our binary) + +/// Our test keypair for node 1. It is always deterministic, so that node 2 can always connect to it +/// at boot time +pub const PROTOBUF_KEYPAIR: [u8; 68] = [ + 8, 1, 18, 64, 34, 116, 25, 74, 122, 174, 130, 2, 98, 221, 17, 247, 176, 102, 205, 3, 27, 202, + 193, 27, 6, 104, 216, 158, 235, 38, 141, 58, 64, 81, 157, 155, 36, 193, 50, 147, 85, 72, 64, + 174, 65, 132, 232, 78, 231, 224, 88, 38, 55, 78, 178, 65, 42, 97, 39, 152, 42, 164, 148, 159, + 36, 170, 109, 178, +]; + +/// Node 1 wait time (for node 2 to initiate connection). +/// This is useful because we need at least one connected peer (Quorum) to successfully write to the DHT +pub const NODE_1_WAIT_TIME: u64 = 3; + +/// Node 2 wait time (for node 1 to write to the DHT). +pub const NODE_2_WAIT_TIME: u64 = 5; + +/// Application State +#[derive(Clone)] +struct FileServer; + +/// Handle network events +impl EventHandler for FileServer { + fn new_listen_addr( + &mut self, + local_peer_id: PeerId, + _listener_id: ListenerId, + addr: Multiaddr, + ) { + // announce interfaces we're listening on + println!("Peer id: {}", local_peer_id); + println!("We're listening on the {}", addr); + } + + fn connection_established( + &mut self, + peer_id: PeerId, + _connection_id: ConnectionId, + _endpoint: &ConnectedPoint, + _num_established: NonZeroU32, + _established_in: Duration, + ) { + println!("Connection established with peer: {:?}", peer_id); + } + + // We need to handle the incoming rpc here. + // What we're going to do is to look at out file system for the file specified in the rpc data and return it's binary content + fn rpc_incoming_message_handled(&mut self, data: Vec>) -> Vec> { + println!("Received incoming RPC: {:?}", data); + + // Extract the file name from the incoming data + let file_name = String::from_utf8_lossy(&data[0]); + let file_name = file_name.trim(); // Trim any potential whitespace + + // Read the file content + let mut file_content = Vec::new(); + match File::open(&file_name) { + Ok(mut file) => { + match file.read_to_end(&mut file_content) { + Ok(_) => { + println!("File read successfully: {}", file_name); + } + Err(e) => { + println!("Failed to read file content: {}", e); + return vec![b"Error: Failed to read file content".to_vec()]; + } + } + } + Err(e) => { + println!("Failed to open file: {}", e); + return vec![b"Error: Failed to open file".to_vec()]; + } + } + + // Return the file content as a Vec> + vec![file_content] + } + + // handle the incoming gossip message + fn gossipsub_incoming_message_handled(&mut self, source: PeerId, data: Vec) { + println!("Recvd incoming gossip: {:?}", data); + } + + fn kademlia_put_record_success(&mut self, key: Vec) { + println!("Record successfully written to DHT. Key: {:?}", key); + } +} + +/// Used to create a detereministic node 1. +pub async fn setup_node_1(ports: (Port, Port)) -> Core { + let mut protobuf = PROTOBUF_KEYPAIR.clone(); + setup_core_builder_1(&mut protobuf, ports).await +} + +/// Setup node 2. +pub async fn setup_node_2( + node_1_ports: (Port, Port), + ports: (Port, Port), +) -> (Core, PeerId) { + let app_state = FileServer; + + // The PeerId of the node 1 + let peer_id = Keypair::from_protobuf_encoding(&PROTOBUF_KEYPAIR) + .unwrap() + .public() + .to_peer_id(); + + // Set up node 1 as bootnode, so we can connect to it immediately we start up + let mut bootnode = HashMap::new(); + bootnode.insert( + peer_id.to_base58(), + format!("/ip4/127.0.0.1/tcp/{}", node_1_ports.0), + ); + + // First, we want to configure our node (we'll be generating a new identity) + let config = BootstrapConfig::new() + .with_bootnodes(bootnode) + .with_tcp(ports.0) + .with_udp(ports.1); + + // Set up network + ( + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap(), + peer_id, + ) +} + +pub async fn setup_core_builder_1(buffer: &mut [u8], ports: (u16, u16)) -> Core { + let app_state = FileServer; + + // First, we want to configure our node by specifying a static keypair (for easy connection by + // node 2) + let config = BootstrapConfig::default() + .generate_keypair_from_protobuf("ed25519", buffer) + .with_tcp(ports.0) + .with_udp(ports.1); + + // Set up network + CoreBuilder::with_config(config, app_state) + .build() + .await + .unwrap() +} + +/// Run node 1 +async fn run_node_1() { + // Set up node + let mut node = setup_node_1((49666, 49606)).await; + + // Sleep for a few seconds to allow node 2 to reach out + // async_std::task::sleep(Duration::from_secs(NODE_1_WAIT_TIME)).await; + tokio::time::sleep(Duration::from_secs(NODE_1_WAIT_TIME)).await; + + // What are we writing to the DHT? + // A file we have on the fs and the location of the file, so it can be easily retrieved + + // Prepare a query to write to the DHT + let (key, value, expiration_time, explicit_peers) = ( + KADEMLIA_KEY.as_bytes().to_vec(), + KADEMLIA_VALUE.as_bytes().to_vec(), + None, + None, + ); + + let kad_request = AppData::KademliaStoreRecord { + key, + value, + expiration_time, + explicit_peers, + }; + + // Submit query to the network + node.query_network(kad_request).await.unwrap(); + + loop {} +} + +/// Run node 2 +async fn run_node_2() { + // Set up node 2 and initiate connection to node 1 + let (mut node_2, node_1_peer_id) = setup_node_2((49666, 49606), (49667, 49607)).await; + + // Sleep for a few seconds to allow node 1 write to the DHT + // async_std::task::sleep(Duration::from_secs(NODE_2_WAIT_TIME)).await; + tokio::time::sleep(Duration::from_secs(NODE_2_WAIT_TIME)).await; + + // Prepare a query to read from the DHT + let kad_request = AppData::KademliaLookupRecord { + key: KADEMLIA_KEY.as_bytes().to_vec(), + }; + + // Submit query to the network + if let Ok(result) = node_2.query_network(kad_request).await { + // We have our response + if let AppResponse::KademliaLookupSuccess(value) = result { + println!("We will be querying this file from the remote: {}", String::from_utf8_lossy(&value)); + // Now prepare an RPC query to fetch the file from the remote node + let fetch_key = vec![vec![0]]; + + println!("<<<< io! >>>"); + + // prepare fetch request + let fetch_request = AppData::FetchData { + keys: fetch_key.clone(), + peer: node_1_peer_id.clone(), // The peer to query for data + }; + + println!("{:?}", fetch_request); + + // We break the flow into send and recv explicitly here + let stream_id = node_2.send_to_network(fetch_request).await.unwrap(); + + // If we used `query_network(0)`, we won't have been able to print here + println!("A fetch request has been sent to peer: {:?}", node_1_peer_id); + + node_2.recv_from_network(stream_id).await.unwrap(); + + // Poll the network for the result + if let Ok(response) = node_2.recv_from_network(stream_id).await { + if let AppResponse::FetchData(response_file) = response { + // Get the file + let file = response_file[0].clone(); + + // Convert it to string + let file_str = String::from_utf8_lossy(&file); + + // Print to stdout + println!("{}", file_str); + } + } else { + println!("An error occured"); + } + } + } +} + +// #[async_std::main] +#[tokio::main] +async fn main() { + #[cfg(feature = "first-node")] + run_node_1().await; + + #[cfg(feature = "second-node")] + run_node_2().await; +} diff --git a/tutorials/simple_game/Cargo.toml b/tutorials/simple_game/Cargo.toml new file mode 100644 index 000000000..ebb3b028d --- /dev/null +++ b/tutorials/simple_game/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "simple_game" +version = "0.1.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/tutorials/simple_game/src/main.rs b/tutorials/simple_game/src/main.rs new file mode 100644 index 000000000..a30eb952c --- /dev/null +++ b/tutorials/simple_game/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + println!("Hello, world!"); +}