// SPDX-License-Identifier: LSL-1.0 // Copyright (c) 2026 Markus Maiwald // Stewardship: Self Sovereign Society Foundation // // This file is part of the Nexus Sovereign Core. // See legal/LICENSE_SOVEREIGN.md for license terms. //! Rumpk NPL: Flood (The Stress Test) //! //! A high-throughput NPL fiber designed to saturate the ION command ring. //! Used for testing backpressure, starvation, and the Adaptive Governor. //! //! SAFETY: Performs direct atomic operations on the shared command ring. const std = @import("std"); // 1. The SysTable Contract (Must match Kernel!) const ION_BASE = 0x83000000; const IonPacket = extern struct { data: u64, phys: u64, len: u16, id: u16, }; const CmdPacket = extern struct { kind: u32, arg: u32, }; const RingBufferPacket = extern struct { head: u32, tail: u32, mask: u32, data: [256]IonPacket, }; const RingBufferCmd = extern struct { head: u32, tail: u32, mask: u32, data: [256]CmdPacket, }; const SysTable = extern struct { magic: u32, s_rx: *RingBufferPacket, s_tx: *RingBufferPacket, s_event: *RingBufferPacket, s_cmd: *RingBufferCmd, }; fn get_systable() *SysTable { return @ptrFromInt(ION_BASE); } // Minimal Shims extern fn write(fd: c_int, buf: [*]const u8, count: usize) isize; const YIELD_LOC = 0x83000FF0; fn fiber_yield() void { const ptr: *const *const fn () callconv(.c) void = @ptrFromInt(YIELD_LOC); const func = ptr.*; func(); } fn print(text: []const u8) void { _ = write(1, text.ptr, text.len); } fn print_u64(val: u64) void { // SAFETY(Flood): Buffer populated by `std.fmt.bufPrint` before use. var buf: [32]u8 = undefined; const s = std.fmt.bufPrint(&buf, "{}", .{val}) catch "ERR"; print(s); } inline fn cpu_relax() void { // Hint to CPU that we are spinning (Pause instruction on x86, NOP on RISC-V/ARM usually) asm volatile ("nop"); } pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, ret_addr: ?usize) noreturn { _ = error_return_trace; _ = ret_addr; print("\n[FLOOD] PANIC: "); print(msg); print("\n"); while (true) {} } export fn main() c_int { const sys = get_systable(); print("[FLOOD] Opening Sluice Gates...\n"); if (sys.magic != 0x4E585553) { print("[FLOOD] Magic mismatch! Aborting.\n"); return 1; } var tx_count: u64 = 0; var drop_count: u64 = 0; // The "Firehose" Pattern const cmd_ring = sys.s_cmd; // Use a burst size to allow successful processing before yielding // or just hammer it? Prompt says "bypass Yield if Full logic... spin briefly". // But in a cooperative OS, if we spin TOO much, we starve the kernel. // The Governor Logic: "When RX_RING > 80% Full -> Boost Priority of ION Fiber". // So if we fill it, the kernel scheduler should eventually switch us out? // Wait, the scheduler only runs when WE yield. // "Since they share the CPU (Cooperative), if the Flood Payload spins too much, the Kernel starves." // Exactly. So we MUST yield occasionally, OR the Governor must be inside the Scheduler... // but the Scheduler isn't preemptive! // // UNLESS... "The Adaptive Governor in sched.nim ... forces a context switch away from the Flood Fiber". // How can it force a switch if we don't call yield or suffer an interrupt? // Ah, Rumpk v1.1 is currently COOPERATIVE. There is no preemption yet (Timer IRQ is mocked/disabled). // So the Flood Payload *MUST* call `fiber_yield()`. // If it *refuses* to yield (while(true) without yield), the system hangs (Saboteur scenario). // // However, the instructions say: "Verify the 'War Mode' Governor stays locked...". // This implies we DO yield, but we want to maximize our time slice? // No, War Mode in Task 1 was about *bypassing* the other fibers (NexShell/Watchdog). // // Here, we have Contention: Producer (Flood) vs Consumer (ION). // If Flood yields, ION runs. ION drains. Flood runs. // If Flood creates packets faster than ION consumes, Ring fills. // Once Ring is full, Flood MUST yield. // // So the test is: // Can we saturate the ring? // Does the system recover (drain) when full? // Do we avoid "Drops = 100%" (which implies ION never runs)? while (true) { // 1. Attempt Atomic Push (Physics) const head = @atomicLoad(u32, &cmd_ring.head, .monotonic); const tail = @atomicLoad(u32, &cmd_ring.tail, .monotonic); const mask = cmd_ring.mask; const next = (head + 1) & mask; var success = false; if (next != tail) { // Space available cmd_ring.data[head & mask] = .{ .kind = 100, .arg = 0 }; // NOOP kind @atomicStore(u32, &cmd_ring.head, next, .release); success = true; } if (success) { tx_count += 1; // Burst logic: Don't yield immediately on success, try to fill ring. // But we check periodic telemetry. } else { drop_count += 1; // Backpressure: The Ring is full. // We spin briefly then yield to let consumer drain. // If we yield immediately, it's efficient. // If we spin, we test the "War Mode" Governor's ability to handle pressure? // Actually, if we spin, we just waste cycles. // But "We test Backpressure and Starvation". // Let's yield here. fiber_yield(); } // 2. Periodic Telemetry (Throttle to avoid IO overhead affecting Flood) if ((tx_count + drop_count) % 100_000 == 0) { print("[FLOOD] TX: "); print_u64(tx_count); print(" | DROPS: "); print_u64(drop_count); print("\n"); // Mandatory Yield to keep system alive even if not full? // If we don't yield on success, we fill the ring very fast. // Then we hit "drop_count" path and yield. // That is acceptable behavior. } } return 0; }