Skip to content

Commit

Permalink
added populate and explicit memory function
Browse files Browse the repository at this point in the history
  • Loading branch information
hunhoffe committed Dec 6, 2023
1 parent 82fd218 commit 474cf7a
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 27 deletions.
13 changes: 2 additions & 11 deletions kernel/tests/s11_rackscale_benchmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1137,16 +1137,11 @@ fn s11_rackscale_dynrep_userspace() {
let mut test = RackscaleRun::new("userspace-smp".to_string(), built);
test.controller_match_fn = controller_match_fn;
test.transport = transport;

// TODO: will need to increase timeouts in the future
//test.controller_timeout *= 2;
//test.client_timeout *= 2;

test.controller_timeout *= 4;
test.client_timeout *= 4;
test.use_affinity_shmem = cfg!(feature = "affinity-shmem");
test.use_qemu_huge_pages = cfg!(feature = "affinity-shmem");
test.file_name = file_name.to_string();

// TODO: will need to increase # of clients in the future
if is_smoke {
test.num_clients = 1;
test.cores_per_client = 1;
Expand All @@ -1156,11 +1151,7 @@ fn s11_rackscale_dynrep_userspace() {
// TODO: after change above, can just be cores_per_client
test.cores_per_client = cores_per_client - (test.num_clients + 1);
}

// TODO: may need to increase memory in the future
test.memory = 2 * 4096;
// TODO: may need to increase shmem size in the future?
test.shmem_size = 1024 * 2;

test.run_rackscale();
}
77 changes: 61 additions & 16 deletions usr/init/src/dynrep/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering;

use lineup::tls2::{Environment, SchedulerControlBlock};
use nr2::nr::{AffinityChange, Dispatch, NodeReplicated};
use nr2::nr::{AffinityChange, Dispatch, NodeReplicated, ThreadToken};
use rawtime::Instant;
use x86::bits64::paging::VAddr;
use x86::random::rdrand64;
Expand Down Expand Up @@ -52,29 +52,33 @@ enum OpWr {
impl Dispatch for HashTable {
type ReadOperation<'a> = OpRd;
type WriteOperation = OpWr;
type Response = Result<Option<u64>, ()>;
type Response = Result<u64, ()>;

fn dispatch<'a>(&self, op: Self::ReadOperation<'a>) -> Self::Response {
match op {
OpRd::Get(key) => {
let val = self.map.get(&key);
Ok(val.copied())
}
OpRd::Get(key) => match self.map.get(&key) {
Some(val) => Ok(*val),
None => Err(()),
},
}
}

fn dispatch_mut(&mut self, op: Self::WriteOperation) -> Self::Response {
match op {
OpWr::Put(key, val) => {
let resp = self.map.insert(key, val);
Ok(resp)
}
OpWr::Put(key, val) => match self.map.insert(key, val) {
None => Ok(0),
Some(_) => Err(()),
},
}
}
}

fn run_bench(mid: usize, core_id: usize, replica: Arc<NodeReplicated<HashTable>>) {
let ttkn = replica.register(mid - 1).unwrap();
fn run_bench(
mid: usize,
core_id: usize,
ttkn: ThreadToken,
replica: Arc<NodeReplicated<HashTable>>,
) {
let mut random_key: u64 = 0;
let batch_size = 64;
let duration = 5;
Expand All @@ -98,17 +102,56 @@ fn run_bench(mid: usize, core_id: usize, replica: Arc<NodeReplicated<HashTable>>

unsafe extern "C" fn bencher_trampoline(args: *mut u8) -> *mut u8 {
let current_gtid = vibrio::syscalls::System::core_id().expect("Can't get core id");
let hwthreads = vibrio::syscalls::System::threads().expect("Cant get system topology");
let mid = kpi::system::mid_from_gtid(current_gtid);

// TODO: use this to change # of replicas used
let replica_num = mid - 1; // 1

let replica: Arc<NodeReplicated<HashTable>> =
Arc::from_raw(args as *const NodeReplicated<HashTable>);
let ttkn = replica.register(replica_num).unwrap();

let mut max_gtid = current_gtid;
// Figure out how many clients there are - this will determine how we
let mut nnodes = 0;
for hwthread in hwthreads.iter() {
// mid == machine id, otherwise referred to as client id
let mid = kpi::system::mid_from_gtid(hwthread.id);
if mid > nnodes {
nnodes = mid;
}
if hwthread.id > max_gtid {
max_gtid = hwthread.id;
}
}
let cores_per_machine = (hwthreads.len() / nnodes) as u64;

let populate_key_start = (NUM_ENTRIES / hwthreads.len() as u64)
* (cores_per_machine * (mid as u64 - 1)
+ (kpi::system::mtid_from_gtid(current_gtid)) as u64);
let mut populate_key_end = populate_key_start + (NUM_ENTRIES / hwthreads.len() as u64);
if current_gtid == max_gtid {
populate_key_end = NUM_ENTRIES;
}
for key in populate_key_start..populate_key_end {
replica
.execute_mut(OpWr::Put(key, NUM_ENTRIES - key), ttkn)
.unwrap();
}
log::info!(
"populated key region [{}-{})",
populate_key_start,
populate_key_end
);

// Synchronize with all cores
POOR_MANS_BARRIER.fetch_sub(1, Ordering::Release);
while POOR_MANS_BARRIER.load(Ordering::Acquire) != 0 {
core::hint::spin_loop();
}

run_bench(mid, current_gtid, replica.clone());
run_bench(mid, current_gtid, ttkn, replica.clone());
ptr::null_mut()
}

Expand All @@ -130,7 +173,9 @@ pub fn userspace_dynrep_test() {
log::info!("Found {:?} client machines", nnodes);

// Create data structure, with as many replicas as there are clients (assuming 1 numa node per client)
let num_replicas = NonZeroUsize::new(nnodes).unwrap();
// TODO: change this to change number of replicas
let num_replicas = NonZeroUsize::new(nnodes).unwrap(); // NonZeroUsize::new(1).unwrap();

let replicas = Arc::new(
NodeReplicated::<HashTable>::new(num_replicas, |afc: AffinityChange| {
log::trace!("Got AffinityChange: {:?}", afc);
Expand All @@ -139,14 +184,14 @@ pub fn userspace_dynrep_test() {
let mut affinity = (*ALLOC_AFFINITY).lock();
let old_affinity = *affinity;
*affinity = r;
log::info!("Set alloc affinity to {:?}", r);
log::trace!("Set alloc affinity to {:?}", r + 1);
return old_affinity;
}
AffinityChange::Revert(orig) => {
//pcm.set_mem_affinity(orig).expect("Can't set affinity");
let mut affinity = (*ALLOC_AFFINITY).lock();
*affinity = orig;
log::info!("Restored alloc affinity to {:?}", orig);
log::trace!("Restored alloc affinity to {:?}", orig + 1);
return 0;
}
}
Expand Down

0 comments on commit 474cf7a

Please sign in to comment.