From c3b91c565fa8f9a879cfb2b5a84fdf1cd109e07e Mon Sep 17 00:00:00 2001 From: qima Date: Mon, 22 Apr 2024 22:41:52 +0800 Subject: [PATCH] chore(node): lower some log levels to reduce log size --- sn_networking/src/record_store.rs | 2 +- sn_networking/src/replication_fetcher.rs | 2 +- sn_node/src/node.rs | 4 ++-- sn_node/src/put_validation.rs | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index 1884e05786..153b790948 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -129,7 +129,7 @@ impl NodeRecordStore { let process_entry = |entry: &DirEntry| -> _ { let path = entry.path(); if path.is_file() { - info!("Existing record found: {path:?}"); + trace!("Existing record found: {path:?}"); // if we've got a file, lets try and read it let filename = match path.file_name().and_then(|n| n.to_str()) { Some(file_name) => file_name, diff --git a/sn_networking/src/replication_fetcher.rs b/sn_networking/src/replication_fetcher.rs index 1635bd8ea4..bc1b1d270f 100644 --- a/sn_networking/src/replication_fetcher.rs +++ b/sn_networking/src/replication_fetcher.rs @@ -222,7 +222,7 @@ impl ReplicationFetcher { pub(crate) fn next_keys_to_fetch(&mut self) -> Vec<(PeerId, RecordKey)> { self.prune_expired_keys_and_slow_nodes(); - info!("Next to fetch...."); + trace!("Next to fetch...."); if self.on_going_fetches.len() >= MAX_PARALLEL_FETCH { warn!("Replication Fetcher doesn't have free fetch capacity. Currently has {} entries in queue.", diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 9abcc2aba8..c3b979caf6 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -369,7 +369,7 @@ impl Node { } NetworkEvent::KeysToFetchForReplication(keys) => { event_header = "KeysToFetchForReplication"; - info!("Going to fetch {:?} keys for replication", keys.len()); + trace!("Going to fetch {:?} keys for replication", keys.len()); self.record_metrics(Marker::fetching_keys_for_replication(&keys)); if let Err(err) = self.fetch_replication_keys_without_wait(keys) { @@ -756,7 +756,7 @@ fn received_valid_chunk_proof( ) -> Option<()> { if let Ok(Response::Query(QueryResponse::GetChunkExistenceProof(Ok(proof)))) = resp { if expected_proof.verify(&proof) { - debug!( + trace!( "Got a valid ChunkProof of {key:?} from {peer:?}, during peer chunk proof check." ); Some(()) diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index 9dcc5e459b..4199f71df8 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -263,7 +263,7 @@ impl Node { }; // finally store the Record directly into the local storage - debug!("Storing chunk {chunk_name:?} as Record locally"); + trace!("Storing chunk {chunk_name:?} as Record locally"); self.network.put_local_record(record); self.record_metrics(Marker::ValidChunkRecordPutFromNetwork(&pretty_key));