diff --git a/hwtracer/src/perf/collect.c b/hwtracer/src/perf/collect.c index e633dcca0..af5a252c4 100644 --- a/hwtracer/src/perf/collect.c +++ b/hwtracer/src/perf/collect.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -44,17 +43,6 @@ // The bit in the IA32_RTIT_CTL MSR that disables compressed returns. #define IA32_RTIT_CTL_DISRETC 1 << 11 -/* - * The thread's perf file descriptor and its associated underlying `mmap(2)` - * regions. The file descriptor is re-used for subsequent trace collections for - * the same thread. - * - * FIXME: These leak when a thread dies. - */ -static thread_local void *cache_base_buf = NULL; -static thread_local void *cache_aux_buf = NULL; -static thread_local int cache_perf_fd = -1; - enum hwt_cerror_kind { hwt_cerror_unused = 0, hwt_cerror_unknown = 1, @@ -530,9 +518,7 @@ hwt_perf_init_collector(struct hwt_perf_collector_config *tr_conf, tr_ctx->perf_fd = -1; // Obtain a file descriptor through which to speak to perf. - if (cache_perf_fd == -1) - cache_perf_fd = open_perf(tr_conf->aux_bufsize, err); - tr_ctx->perf_fd = cache_perf_fd; + tr_ctx->perf_fd = open_perf(tr_conf->aux_bufsize, err); if (tr_ctx->perf_fd == -1) { hwt_set_cerr(err, hwt_cerror_errno, errno); failing = true; @@ -562,10 +548,8 @@ hwt_perf_init_collector(struct hwt_perf_collector_config *tr_conf, // data_bufsize'. int page_size = getpagesize(); tr_ctx->base_bufsize = (1 + tr_conf->data_bufsize) * page_size; - if (!cache_base_buf) - cache_base_buf = mmap(NULL, tr_ctx->base_bufsize, PROT_WRITE, MAP_SHARED, + tr_ctx->base_buf = mmap(NULL, tr_ctx->base_bufsize, PROT_WRITE, MAP_SHARED, tr_ctx->perf_fd, 0); - tr_ctx->base_buf = cache_base_buf; if (tr_ctx->base_buf == MAP_FAILED) { hwt_set_cerr(err, hwt_cerror_errno, errno); failing = true; @@ -581,10 +565,8 @@ hwt_perf_init_collector(struct hwt_perf_collector_config *tr_conf, // Allocate the AUX buffer. // // Mapped R/W so as to have a saturating ring buffer. - if (!cache_aux_buf) - cache_aux_buf = mmap(NULL, base_header->aux_size, PROT_READ | PROT_WRITE, + tr_ctx->aux_buf = mmap(NULL, base_header->aux_size, PROT_READ | PROT_WRITE, MAP_SHARED, tr_ctx->perf_fd, base_header->aux_offset); - tr_ctx->aux_buf = cache_aux_buf; if (tr_ctx->aux_buf == MAP_FAILED) { hwt_set_cerr(err, hwt_cerror_errno, errno); failing = true; @@ -752,6 +734,16 @@ bool hwt_perf_free_collector(struct hwt_perf_ctx *tr_ctx, struct hwt_cerror *err) { int ret = true; + if ((tr_ctx->aux_buf) && + (munmap(tr_ctx->aux_buf, tr_ctx->aux_bufsize) == -1)) { + hwt_set_cerr(err, hwt_cerror_errno, errno); + ret = false; + } + if ((tr_ctx->base_buf) && + (munmap(tr_ctx->base_buf, tr_ctx->base_bufsize) == -1)) { + hwt_set_cerr(err, hwt_cerror_errno, errno); + ret = false; + } if (tr_ctx->stop_fds[1] != -1) { // If the write end of the pipe is still open, the thread is still running. close(tr_ctx->stop_fds[1]); // signals thread to stop. @@ -763,6 +755,10 @@ bool hwt_perf_free_collector(struct hwt_perf_ctx *tr_ctx, if (tr_ctx->stop_fds[0] != -1) { close(tr_ctx->stop_fds[0]); } + if (tr_ctx->perf_fd >= 0) { + close(tr_ctx->perf_fd); + tr_ctx->perf_fd = -1; + } if (tr_ctx != NULL) { free(tr_ctx); } diff --git a/hwtracer/src/pt/ykpt/parser.rs b/hwtracer/src/pt/ykpt/parser.rs index 0841ee022..162ca4b4b 100644 --- a/hwtracer/src/pt/ykpt/parser.rs +++ b/hwtracer/src/pt/ykpt/parser.rs @@ -11,6 +11,8 @@ use super::packets::*; #[derive(Clone, Copy, Debug)] enum PacketParserState { + /// Initial state, waiting for a PSB packet. + Init, /// The "normal" decoding state. Normal, /// We are decoding a PSB+ sequence. @@ -27,6 +29,7 @@ impl PacketParserState { // OPT: The order below is a rough guess based on what limited traces I've seen. Benchmark // and optimise. match self { + Self::Init => &[PacketKind::PSB], Self::Normal => &[ PacketKind::ShortTNT, PacketKind::PAD, @@ -60,6 +63,7 @@ impl PacketParserState { /// kind of packet. fn transition(&mut self, pkt_kind: PacketKind) { let new = match (*self, pkt_kind) { + (Self::Init, PacketKind::PSB) => Self::PSBPlus, (Self::Normal, PacketKind::PSB) => Self::PSBPlus, (Self::PSBPlus, PacketKind::PSBEND) => Self::Normal, _ => return, // No state transition. @@ -105,7 +109,7 @@ impl<'t> PacketParser<'t> { pub(super) fn new(bytes: &'t [u8]) -> Self { Self { bits: BitSlice::from_slice(bytes), - state: PacketParserState::Normal, + state: PacketParserState::Init, prev_tip: 0, } } @@ -228,7 +232,6 @@ impl Iterator for PacketParser<'_> { mod tests { use super::{super::packets::*, PacketParser}; use crate::{trace_closure, work_loop, TracerBuilder, TracerKind}; - use std::hint::black_box; /// Parse the packets of a small trace, checking the basic structure of the decoded trace. #[test] @@ -270,22 +273,6 @@ mod tests { assert!(matches!(ts, TestState::SawPacketGenDisable)); } - /// Checks PT packet streams make sense when a perf fd is re-used. - #[test] - fn decode_many() { - let tc = TracerBuilder::new() - .tracer_kind(TracerKind::PT(crate::perf::PerfCollectorConfig::default())) - .build() - .unwrap(); - for _ in 0..50 { - let trace = trace_closure(&tc, || work_loop(3)); - // Force full-decoding of the trace. - for p in PacketParser::new(trace.bytes()) { - let _ = black_box(p); - } - } - } - /// Test target IP decompression when the `IPBytes = 0b000`. #[test] fn ipbytes_decompress_000() { diff --git a/tests/c/early_return_fall_out.c b/tests/c/early_return_fall_out.c new file mode 100644 index 000000000..eff68a8b4 --- /dev/null +++ b/tests/c/early_return_fall_out.c @@ -0,0 +1,67 @@ +// Run-time: +// env-var: YKD_LOG_IR=-:jit-post-opt +// env-var: YKD_SERIALISE_COMPILATION=1 +// env-var: YK_LOG=4 +// stderr: +// 3 +// 2 +// yk-jit-event: start-tracing +// 1 +// yk-jit-event: stop-tracing-early-return +// return +// yk-jit-event: start-tracing +// 3 +// yk-jit-event: stop-tracing +// ... +// 2 +// yk-jit-event: enter-jit-code +// 1 +// yk-jit-event: deoptimise +// return +// exit + +// Check that an early return caused by falling out of the interpreter loop is +// handled correctly. + +#include +#include +#include +#include +#include +#include + +void loop(YkMT *, YkLocation *); + +void loop(YkMT *mt, YkLocation *loc) { + int res = 9998; + int i = 3; + NOOPT_VAL(res); + NOOPT_VAL(i); + while (i > 0) { + yk_mt_control_point(mt, loc); + fprintf(stderr, "%d\n", i); + i--; + } + yk_mt_early_return(mt); + fprintf(stderr, "return\n"); + NOOPT_VAL(res); +} + +int main(int argc, char **argv) { + YkMT *mt = yk_mt_new(NULL); + yk_mt_hot_threshold_set(mt, 2); + YkLocation loc = yk_location_new(); + + int res = 9998; + int i = 4; + NOOPT_VAL(loc); + NOOPT_VAL(res); + NOOPT_VAL(i); + loop(mt, &loc); + loop(mt, &loc); + fprintf(stderr, "exit\n"); + NOOPT_VAL(res); + yk_location_drop(loc); + yk_mt_shutdown(mt); + return (EXIT_SUCCESS); +} diff --git a/tests/c/early_return_recursive1.c b/tests/c/early_return_recursive1.c new file mode 100644 index 000000000..c32fdb569 --- /dev/null +++ b/tests/c/early_return_recursive1.c @@ -0,0 +1,66 @@ +// Run-time: +// env-var: YKD_LOG_IR=-:jit-pre-opt,jit-post-opt +// env-var: YKD_SERIALISE_COMPILATION=1 +// env-var: YK_LOG=4 +// stderr: +// 2 +// yk-jit-event: start-tracing +// 1 +// yk-jit-event: stop-tracing-early-return +// return +// 3 +// yk-jit-event: start-tracing +// 2 +// yk-jit-event: stop-tracing +// --- Begin jit-pre-opt --- +// ... +// --- End jit-pre-opt --- +// ... +// 1 +// return +// exit + +// Check that early return from recursive interpreter loops works. + +#include +#include +#include +#include +#include +#include + +int loop(YkMT *, YkLocation *, int); + +int loop(YkMT *mt, YkLocation *loc, int i) { + int res = 9998; + NOOPT_VAL(res); + NOOPT_VAL(i); + while (i > 0) { + yk_mt_control_point(mt, loc); + if (i > 2) { + loop(mt, loc, i - 1); + } + fprintf(stderr, "%d\n", i); + i--; + } + yk_mt_early_return(mt); + fprintf(stderr, "return\n"); + NOOPT_VAL(res); + return i; +} + +int main(int argc, char **argv) { + YkMT *mt = yk_mt_new(NULL); + yk_mt_hot_threshold_set(mt, 2); + YkLocation loc = yk_location_new(); + + int res = 9998; + NOOPT_VAL(loc); + NOOPT_VAL(res); + loop(mt, &loc, 3); + fprintf(stderr, "exit\n"); + NOOPT_VAL(res); + yk_location_drop(loc); + yk_mt_shutdown(mt); + return (EXIT_SUCCESS); +} diff --git a/tests/c/early_return_recursive2.c b/tests/c/early_return_recursive2.c new file mode 100644 index 000000000..c854f9353 --- /dev/null +++ b/tests/c/early_return_recursive2.c @@ -0,0 +1,71 @@ +// Run-time: +// env-var: YKD_LOG_IR=-:jit-pre-opt,jit-post-opt +// env-var: YKD_SERIALISE_COMPILATION=1 +// env-var: YK_LOG=4 +// stderr: +// yk-jit-event: start-tracing +// 0x{{loc2}}: 2 +// 0x{{loc2}}: 1 +// yk-jit-event: stop-tracing-early-return +// return +// 0x{{loc1}}: 3 +// yk-jit-event: start-tracing +// 0x{{loc1}}: 2 +// yk-jit-event: stop-tracing +// --- Begin jit-pre-opt --- +// ... +// --- End jit-pre-opt --- +// ... +// 0x{{loc1}}: 1 +// return +// exit + +// Check that early return from recursive interpreter loops works. +// +// In this scenario, the parent function starts tracing at location 1, a +// recursive interpreter loop runs and exits, but without encountering +// location 1 (the location that initiated tracing). + +#include +#include +#include +#include +#include +#include + +int loop(YkMT *, YkLocation *, YkLocation *, int); + +int loop(YkMT *mt, YkLocation *use_loc, YkLocation *next_loc, int i) { + assert(use_loc != NULL); + NOOPT_VAL(i); + while (i > 0) { + yk_mt_control_point(mt, use_loc); + if (i > 2) { + loop(mt, next_loc, NULL, i - 1); + } + fprintf(stderr, "%p: %d\n", use_loc, i); + i--; + } + yk_mt_early_return(mt); + fprintf(stderr, "return\n"); + return i; +} + +int main(int argc, char **argv) { + YkMT *mt = yk_mt_new(NULL); + yk_mt_hot_threshold_set(mt, 0); + + // First location: used by first level deep recursion. + YkLocation loc1 = yk_location_new(); + // Second location: used by second level deep recursion. + YkLocation loc2 = yk_location_new(); + + NOOPT_VAL(loc1); + NOOPT_VAL(loc2); + loop(mt, &loc1, &loc2, 3); + fprintf(stderr, "exit\n"); + yk_location_drop(loc1); + yk_location_drop(loc2); + yk_mt_shutdown(mt); + return (EXIT_SUCCESS); +} diff --git a/tests/c/fcmp_double.c b/tests/c/fcmp_double.c index 1fd425a85..30a6063f1 100644 --- a/tests/c/fcmp_double.c +++ b/tests/c/fcmp_double.c @@ -1,5 +1,4 @@ // Run-time: -// env-var: YKD_LOG_IR=aot,jit-pre-opt,jit-asm // env-var: YKD_SERIALISE_COMPILATION=1 // env-var: YK_LOG=4 // stderr: diff --git a/tests/c/fcmp_float.c b/tests/c/fcmp_float.c index 7089b0516..ef8b9e9e2 100644 --- a/tests/c/fcmp_float.c +++ b/tests/c/fcmp_float.c @@ -1,5 +1,4 @@ // Run-time: -// env-var: YKD_LOG_IR=aot,jit-pre-opt,jit-asm // env-var: YKD_SERIALISE_COMPILATION=1 // env-var: YK_LOG=4 // stderr: diff --git a/ykcapi/src/lib.rs b/ykcapi/src/lib.rs index ba06dcc3f..d4228f166 100644 --- a/ykcapi/src/lib.rs +++ b/ykcapi/src/lib.rs @@ -50,6 +50,14 @@ pub extern "C" fn yk_mt_control_point(_mt: *mut MT, _loc: *mut Location) { // Intentionally empty. } +#[no_mangle] +pub unsafe extern "C" fn __yk_mt_early_return(mt: *mut MT, frameaddr: *mut c_void) { + let mt = unsafe { &*mt }; + let arc = unsafe { Arc::from_raw(mt) }; + arc.early_return(frameaddr); + forget(arc); +} + // The new control point called after the interpreter has been patched by ykllvm. #[cfg(target_arch = "x86_64")] #[naked] diff --git a/ykcapi/yk.h b/ykcapi/yk.h index fad7223ca..244ccc4ed 100644 --- a/ykcapi/yk.h +++ b/ykcapi/yk.h @@ -49,6 +49,22 @@ void yk_mt_shutdown(YkMT *); // execute JITted code. void yk_mt_control_point(YkMT *, YkLocation *); +// At each point a function containing a control point can exit "early" this +// function must be called. "Early" includes, but is not +// limited to, the following: +// +// 1. Immediately after a non-infinite loop containing a call to +// `yk_mt_control_point`. +// 2. Immediately before `return` statements in code reachable from a +// `yk_mt_control_point`. +// +// Failure to call this function will lead to undefined behaviour. +# define yk_mt_early_return(mt) __yk_mt_early_return(mt, __builtin_frame_address(0)) + +// This is an internal function to yk: calling it directly leads to undefined +// behaviour. +void __yk_mt_early_return(YkMT *, void *); + // Set the threshold at which `YkLocation`'s are considered hot. void yk_mt_hot_threshold_set(YkMT *, YkHotThreshold); diff --git a/ykrt/src/compile/jitc_yk/codegen/x64/deopt.rs b/ykrt/src/compile/jitc_yk/codegen/x64/deopt.rs index 065e47936..119ab0d20 100644 --- a/ykrt/src/compile/jitc_yk/codegen/x64/deopt.rs +++ b/ykrt/src/compile/jitc_yk/codegen/x64/deopt.rs @@ -92,7 +92,7 @@ fn running_trace(gidxs: &[usize]) -> Arc { /// * glen - Length for list in `gptr`. #[no_mangle] pub(crate) extern "C" fn __yk_deopt( - frameaddr: *const c_void, + frameaddr: *mut c_void, gidx: u64, gp_regs: &[u64; 16], fp_regs: &[u64; 16], @@ -320,13 +320,13 @@ pub(crate) extern "C" fn __yk_deopt( // The `clone` should really be `Arc::clone(&ctr)` but that doesn't play well with type // inference in this (unusual) case. - ctr.mt.guard_failure(ctr.clone(), gidx); + ctr.mt.guard_failure(ctr.clone(), gidx, frameaddr); // Since we won't return from this function, drop `ctr` manually. drop(ctr); // Now overwrite the existing stack with our newly recreated one. - unsafe { replace_stack(newframedst as *mut c_void, newstack, memsize) }; + unsafe { replace_stack(newframedst, newstack, memsize) }; } /// Writes the stack frames that we recreated in [__yk_deopt] onto the current stack, overwriting diff --git a/ykrt/src/mt.rs b/ykrt/src/mt.rs index 60107d9c3..97c35acdd 100644 --- a/ykrt/src/mt.rs +++ b/ykrt/src/mt.rs @@ -401,7 +401,7 @@ impl MT { #[allow(clippy::not_unsafe_ptr_arg_deref)] pub fn control_point(self: &Arc, loc: &Location, frameaddr: *mut c_void, smid: u64) { - match self.transition_control_point(loc) { + match self.transition_control_point(loc, frameaddr) { TransitionControlPoint::NoAction => (), TransitionControlPoint::Execute(ctr) => { self.log.log(Verbosity::JITEvent, "enter-jit-code"); @@ -438,6 +438,7 @@ impl MT { hl, thread_tracer: tt, promotions: Vec::new(), + frameaddr, }; }), Err(e) => todo!("{e:?}"), @@ -453,7 +454,11 @@ impl MT { hl, thread_tracer, promotions, - } => (hl, thread_tracer, promotions), + frameaddr: tracing_frameaddr, + } => { + assert_eq!(frameaddr, tracing_frameaddr); + (hl, thread_tracer, promotions) + } _ => unreachable!(), }, ); @@ -485,7 +490,11 @@ impl MT { hl, thread_tracer, promotions, - } => (hl, thread_tracer, promotions), + frameaddr: tracing_frameaddr, + } => { + assert_eq!(frameaddr, tracing_frameaddr); + (hl, thread_tracer, promotions) + } _ => unreachable!(), }, ); @@ -515,7 +524,11 @@ impl MT { /// Perform the next step to `loc` in the `Location` state-machine for a control point. If /// `loc` moves to the Compiled state, return a pointer to a [CompiledTrace] object. - fn transition_control_point(self: &Arc, loc: &Location) -> TransitionControlPoint { + fn transition_control_point( + self: &Arc, + loc: &Location, + frameaddr: *mut c_void, + ) -> TransitionControlPoint { MTThread::with(|mtt| { let is_tracing = mtt.is_tracing(); match loc.hot_location() { @@ -578,16 +591,31 @@ impl MT { HotLocationKind::Tracing => { let hl = loc.hot_location_arc_clone().unwrap(); match &*mtt.tstate.borrow() { - MTThreadState::Tracing { hl: thread_hl, .. } => { + MTThreadState::Tracing { + hl: thread_hl, + frameaddr: tracing_frameaddr, + .. + } => { // This thread is tracing something... if !Arc::ptr_eq(thread_hl, &hl) { // ...but not this Location. TransitionControlPoint::NoAction } else { - // ...and it's this location: we have therefore finished - // tracing the loop. - lk.kind = HotLocationKind::Compiling; - TransitionControlPoint::StopTracing + // ...and it's this location... + if frameaddr == *tracing_frameaddr { + lk.kind = HotLocationKind::Compiling; + TransitionControlPoint::StopTracing + } else { + // ...but in a different frame. + #[cfg(target_arch = "x86_64")] + { + // If this assert fails, we've fallen through to a + // caller, which is UB. + assert!(*tracing_frameaddr > frameaddr); + } + // We're inlining. + TransitionControlPoint::NoAction + } } } _ => { @@ -626,27 +654,44 @@ impl MT { } => { let hl = loc.hot_location_arc_clone().unwrap(); match &*mtt.tstate.borrow() { - MTThreadState::Tracing { hl: thread_hl, .. } => { + MTThreadState::Tracing { + hl: thread_hl, + frameaddr: tracing_frameaddr, + .. + } => { // This thread is tracing something... if !Arc::ptr_eq(thread_hl, &hl) { // ...but not this Location. - TransitionControlPoint::Execute(Arc::clone(root_ctr)) + TransitionControlPoint::NoAction } else { - // ...and it's this location: we have therefore finished - // tracing the loop. - let parent_ctr = Arc::clone(parent_ctr); - let root_ctr_cl = Arc::clone(root_ctr); - lk.kind = HotLocationKind::Compiled(Arc::clone(root_ctr)); - drop(lk); - TransitionControlPoint::StopSideTracing { - gidx, - parent_ctr, - root_ctr: root_ctr_cl, + // ...and it's this location. + if frameaddr == *tracing_frameaddr { + let parent_ctr = Arc::clone(parent_ctr); + let root_ctr_cl = Arc::clone(root_ctr); + lk.kind = + HotLocationKind::Compiled(Arc::clone(root_ctr)); + drop(lk); + TransitionControlPoint::StopSideTracing { + gidx, + parent_ctr, + root_ctr: root_ctr_cl, + } + } else { + // ...but in a different frame. + #[cfg(target_arch = "x86_64")] + { + // If this assert fails, we've fallen through to a + // caller, which is UB. + assert!(*tracing_frameaddr > frameaddr); + } + // We're inlining. + TransitionControlPoint::NoAction } } } _ => { // This thread isn't tracing anything. + assert!(!is_tracing); TransitionControlPoint::Execute(Arc::clone(root_ctr)) } } @@ -733,7 +778,12 @@ impl MT { // FIXME: Don't side trace the last guard of a side-trace as this guard always fails. // FIXME: Don't side-trace after switch instructions: not every guard failure is equal // and a trace compiled for case A won't work for case B. - pub(crate) fn guard_failure(self: &Arc, parent: Arc, gidx: GuardIdx) { + pub(crate) fn guard_failure( + self: &Arc, + parent: Arc, + gidx: GuardIdx, + frameaddr: *mut c_void, + ) { match self.transition_guard_failure(parent, gidx) { TransitionGuardFailure::NoAction => (), TransitionGuardFailure::StartSideTracing(hl) => { @@ -748,6 +798,7 @@ impl MT { hl, thread_tracer: tt, promotions: Vec::new(), + frameaddr, }; }), Err(e) => todo!("{e:?}"), @@ -755,6 +806,40 @@ impl MT { } } } + + pub fn early_return(self: &Arc, frameaddr: *mut c_void) { + MTThread::with(|mtt| { + let mut abort = false; + if let MTThreadState::Tracing { + frameaddr: tracing_frameaddr, + .. + } = &*mtt.tstate.borrow() + { + if frameaddr <= *tracing_frameaddr { + abort = true; + } + } + + if abort { + match mtt.tstate.replace(MTThreadState::Interpreting) { + MTThreadState::Tracing { + thread_tracer, hl, .. + } => { + // We don't care if the thread tracer went wrong: we're not going to use + // its result anyway. + thread_tracer.stop().ok(); + let mut lk = hl.lock(); + lk.tracecompilation_error(self); + drop(lk); + self.stats.timing_state(TimingState::None); + self.log + .log(Verbosity::JITEvent, "stop-tracing-early-return"); + } + _ => unreachable!(), + } + } + }); + } } #[cfg(target_arch = "x86_64")] @@ -810,6 +895,9 @@ enum MTThreadState { thread_tracer: Box, /// Records the content of data recorded via `yk_promote`. promotions: Vec, + /// The `frameaddr` when tracing started. This allows us to tell if we're finishing tracing + /// at the same point that we started. + frameaddr: *mut c_void, }, /// This thread is executing a trace. Note that the `dyn CompiledTrace` serves two different purposes: /// @@ -1017,7 +1105,9 @@ mod tests { } fn expect_start_tracing(mt: &Arc, loc: &Location) { - let TransitionControlPoint::StartTracing(hl) = mt.transition_control_point(loc) else { + let TransitionControlPoint::StartTracing(hl) = + mt.transition_control_point(loc, 0 as *mut c_void) + else { panic!() }; MTThread::with(|mtt| { @@ -1025,12 +1115,15 @@ mod tests { hl, thread_tracer: Box::new(DummyTraceRecorder), promotions: Vec::new(), + frameaddr: 0 as *mut c_void, }; }); } fn expect_stop_tracing(mt: &Arc, loc: &Location) { - let TransitionControlPoint::StopTracing = mt.transition_control_point(loc) else { + let TransitionControlPoint::StopTracing = + mt.transition_control_point(loc, 0 as *mut c_void) + else { panic!() }; MTThread::with(|mtt| { @@ -1049,6 +1142,7 @@ mod tests { hl, thread_tracer: Box::new(DummyTraceRecorder), promotions: Vec::new(), + frameaddr: 0 as *mut c_void, }; }); } @@ -1062,7 +1156,7 @@ mod tests { let loc = Location::new(); for i in 0..mt.hot_threshold() { assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); assert_eq!(loc.count(), Some(i + 1)); @@ -1082,12 +1176,12 @@ mod tests { ))); loc.hot_location().unwrap().lock().kind = HotLocationKind::Compiled(ctr.clone()); assert!(matches!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::Execute(_) )); expect_start_side_tracing(&mt, ctr); - match mt.transition_control_point(&loc) { + match mt.transition_control_point(&loc, 0 as *mut c_void) { TransitionControlPoint::StopSideTracing { .. } => { MTThread::with(|mtt| { *mtt.tstate.borrow_mut() = MTThreadState::Interpreting; @@ -1100,7 +1194,7 @@ mod tests { _ => unreachable!(), } assert!(matches!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::Execute(_) )); } @@ -1124,25 +1218,25 @@ mod tests { // otherwise tracing will start, and the assertions will fail. for _ in 0..hot_thrsh / (num_threads * 4) { assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); let c1 = loc.count(); assert!(c1.is_some()); assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); let c2 = loc.count(); assert!(c2.is_some()); assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); let c3 = loc.count(); assert!(c3.is_some()); assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); let c4 = loc.count(); @@ -1162,7 +1256,7 @@ mod tests { // at or below the threshold: it could even be (although it's rather unlikely) 0! assert!(loc.count().is_some()); loop { - match mt.transition_control_point(&loc) { + match mt.transition_control_point(&loc, 0 as *mut c_void) { TransitionControlPoint::NoAction => (), TransitionControlPoint::StartTracing(hl) => { MTThread::with(|mtt| { @@ -1170,6 +1264,7 @@ mod tests { hl, thread_tracer: Box::new(DummyTraceRecorder), promotions: Vec::new(), + frameaddr: 0 as *mut c_void, }; }); break; @@ -1194,7 +1289,7 @@ mod tests { // Get the location to the point of being hot. for _ in 0..THRESHOLD { assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); } @@ -1226,7 +1321,7 @@ mod tests { HotLocationKind::Tracing )); assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); assert!(matches!( @@ -1247,7 +1342,7 @@ mod tests { // Get the location to the point of being hot. for _ in 0..THRESHOLD { assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); } @@ -1305,17 +1400,17 @@ mod tests { for _ in 0..THRESHOLD { assert_eq!( - mt.transition_control_point(&loc1), + mt.transition_control_point(&loc1, 0 as *mut c_void), TransitionControlPoint::NoAction ); assert_eq!( - mt.transition_control_point(&loc2), + mt.transition_control_point(&loc2, 0 as *mut c_void), TransitionControlPoint::NoAction ); } expect_start_tracing(&mt, &loc1); assert_eq!( - mt.transition_control_point(&loc2), + mt.transition_control_point(&loc2, 0 as *mut c_void), TransitionControlPoint::NoAction ); assert!(matches!( @@ -1352,7 +1447,7 @@ mod tests { let num_starts = Arc::clone(&num_starts); thrs.push(thread::spawn(move || { for _ in 0..THRESHOLD { - match mt.transition_control_point(&loc) { + match mt.transition_control_point(&loc, 0 as *mut c_void) { TransitionControlPoint::NoAction => (), TransitionControlPoint::Execute(_) => (), TransitionControlPoint::StartTracing(hl) => { @@ -1362,6 +1457,7 @@ mod tests { hl, thread_tracer: Box::new(DummyTraceRecorder), promotions: Vec::new(), + frameaddr: 0 as *mut c_void, }; }); assert!(matches!( @@ -1374,7 +1470,7 @@ mod tests { HotLocationKind::Compiling )); assert_eq!( - mt.transition_control_point(&loc), + mt.transition_control_point(&loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); assert!(matches!( @@ -1386,7 +1482,7 @@ mod tests { ); loop { if let TransitionControlPoint::Execute(_) = - mt.transition_control_point(&loc) + mt.transition_control_point(&loc, 0 as *mut c_void) { break; } @@ -1421,11 +1517,11 @@ mod tests { for _ in 0..THRESHOLD { assert_eq!( - mt.transition_control_point(&loc1), + mt.transition_control_point(&loc1, 0 as *mut c_void), TransitionControlPoint::NoAction ); assert_eq!( - mt.transition_control_point(&loc2), + mt.transition_control_point(&loc2, 0 as *mut c_void), TransitionControlPoint::NoAction ); } @@ -1440,7 +1536,7 @@ mod tests { expect_start_tracing(&mt, &loc2); assert_eq!( - mt.transition_control_point(&loc1), + mt.transition_control_point(&loc1, 0 as *mut c_void), TransitionControlPoint::NoAction ); expect_stop_tracing(&mt, &loc2); @@ -1462,7 +1558,7 @@ mod tests { fn to_compiled(mt: &Arc, loc: &Location) -> Arc { for _ in 0..THRESHOLD { assert_eq!( - mt.transition_control_point(loc), + mt.transition_control_point(loc, 0 as *mut c_void), TransitionControlPoint::NoAction ); } @@ -1496,11 +1592,11 @@ mod tests { expect_start_side_tracing(&mt, ctr2); assert!(matches!( - dbg!(mt.transition_control_point(&loc1)), + dbg!(mt.transition_control_point(&loc1, 0 as *mut c_void)), TransitionControlPoint::Execute(_) )); assert!(matches!( - mt.transition_control_point(&loc2), + mt.transition_control_point(&loc2, 0 as *mut c_void), TransitionControlPoint::StopSideTracing { .. } )); } @@ -1511,7 +1607,7 @@ mod tests { let loc = Location::new(); b.iter(|| { for _ in 0..100000 { - black_box(mt.transition_control_point(&loc)); + black_box(mt.transition_control_point(&loc, 0 as *mut c_void)); } }); } @@ -1527,7 +1623,7 @@ mod tests { let mt = Arc::clone(&mt); thrs.push(thread::spawn(move || { for _ in 0..100 { - black_box(mt.transition_control_point(&loc)); + black_box(mt.transition_control_point(&loc, 0 as *mut c_void)); } })); } @@ -1558,14 +1654,14 @@ mod tests { // https://github.com/ykjit/yk/issues/519 expect_start_tracing(&mt, &loc2); assert!(matches!( - mt.transition_control_point(&loc1), + mt.transition_control_point(&loc1, 0 as *mut c_void), TransitionControlPoint::NoAction )); // But once we stop tracing for `loc2`, we should be able to execute the trace for `loc1`. expect_stop_tracing(&mt, &loc2); assert!(matches!( - mt.transition_control_point(&loc1), + mt.transition_control_point(&loc1, 0 as *mut c_void), TransitionControlPoint::Execute(_) )); }