rumpk/npl/bench_ion.zig

165 lines
4.1 KiB
Zig

// SPDX-License-Identifier: LSL-1.0
// Copyright (c) 2026 Markus Maiwald
// Stewardship: Self Sovereign Society Foundation
//
// This file is part of the Nexus Sovereign Core.
// See legal/LICENSE_SOVEREIGN.md for license terms.
//! Rumpk NPL: Benchmark (The Speed Freak)
//!
//! Measures raw ION ring transfer latency using the RISC-V cycle counter.
//! Tests the physics of shared-memory communication overhead.
//!
//! SAFETY: Uses inline assembly to read the hardware cycle counter (`rdcycle`).
const std = @import("std");
// 1. The SysTable Contract (Must match Kernel!)
const ION_BASE = 0x83000000;
const IonPacket = extern struct {
data: u64,
phys: u64,
len: u16,
id: u16,
};
const CmdPacket = extern struct {
kind: u32,
arg: u32,
};
const RingBufferPacket = extern struct {
head: u32,
tail: u32,
mask: u32,
data: [256]IonPacket,
};
const RingBufferCmd = extern struct {
head: u32,
tail: u32,
mask: u32,
data: [256]CmdPacket,
};
const SysTable = extern struct {
magic: u32,
s_rx: *RingBufferPacket,
s_tx: *RingBufferPacket,
s_event: *RingBufferPacket,
s_cmd: *RingBufferCmd,
};
fn get_systable() *SysTable {
return @ptrFromInt(ION_BASE);
}
inline fn read_cycles() u64 {
// SAFETY(Bench): Cycles variable is populated by inline assembly before return.
var cycles: u64 = undefined;
// RISC-V 64-bit cycle counter
asm volatile ("rdcycle %[cycles]"
: [cycles] "=r" (cycles),
);
return cycles;
}
export fn main() c_int {
const sys = get_systable();
print("[BENCH] Warming up ION...\n");
if (sys.magic != 0x4E585553) {
print("[BENCH] Magic mismatch! 0x");
print_hex(sys.magic);
print("\n");
return 1;
}
var i: usize = 0;
const ITERATIONS = 100_000;
// 1. Calibrate (Measure overhead of reading cycles)
const t0 = read_cycles();
const t1 = read_cycles();
const overhead = t1 - t0;
print("[BENCH] Timer Overhead: ");
print_u64(overhead);
print(" cycles\n");
// 2. The Loop
print("[BENCH] Starting 100k IO ops (Raw Ring Push - NO YIELD)...\n");
const start = read_cycles();
const cmd_ring = sys.s_cmd;
while (i < ITERATIONS) : (i += 1) {
// Measure Pure Ring Push (Physics)
while (true) {
const head = @atomicLoad(u32, &cmd_ring.head, .monotonic);
const tail = @atomicLoad(u32, &cmd_ring.tail, .monotonic);
const next = (head + 1) & cmd_ring.mask;
if (next != tail) {
cmd_ring.data[head & cmd_ring.mask] = .{ .kind = 0, .arg = @intCast(i) };
@atomicStore(u32, &cmd_ring.head, next, .release);
// fiber_yield(); // BYPASS FOR RAW SPEED
break;
} else {
// Ring full - yield to let Kernel process
fiber_yield();
}
}
}
const end = read_cycles();
const total_cycles = (end - start) - overhead;
const avg = total_cycles / ITERATIONS;
print("[BENCH] Total Cycles: ");
print_u64(total_cycles);
print("\n");
print("[BENCH] Result: ");
print_u64(avg);
print(" cycles/op\n");
return 0;
}
// OS Shims
extern fn write(fd: c_int, buf: [*]const u8, count: usize) isize;
const YIELD_LOC = 0x83000FF0;
fn fiber_yield() void {
const ptr: *const *const fn () callconv(.c) void = @ptrFromInt(YIELD_LOC);
const func = ptr.*;
func();
}
fn print(text: []const u8) void {
_ = write(1, text.ptr, text.len);
}
fn print_u64(val: u64) void {
// SAFETY(Bench): Buffer populated by `std.fmt.bufPrint` before use.
var buf: [32]u8 = undefined;
const s = std.fmt.bufPrint(&buf, "{}", .{val}) catch "ERR";
print(s);
}
fn print_hex(val: u32) void {
// SAFETY(Bench): Buffer populated by `std.fmt.bufPrint` before use.
var buf: [16]u8 = undefined;
const s = std.fmt.bufPrint(&buf, "{x}", .{val}) catch "ERR";
print(s);
}
pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, ret_addr: ?usize) noreturn {
_ = error_return_trace;
_ = ret_addr;
print("\n[BENCH] PANIC: ");
print(msg);
print("\n");
while (true) {}
}