feat(rumpk): Sovereign Core Stabilization & Membrane IPC Hardening
- NexShell: Hardened command transmission via atomic ION packets, fixed fragmentation issues. - NipBox: Expanded 'Sovereign Coreutils' with 'ls' and enhanced 'matrix' control. - GPU/Retina: Optimized VirtIO-GPU driver, improved polling and framebuffer synchronization. - Membrane: Stabilized libc shims (clib.c, libc.nim) and ION client logic. - Kernel: Refined fiber scheduler and watchdog metrics. - Forge: Cleanup and optimization of build scripts and manifests.
This commit is contained in:
parent
c279744dc6
commit
30fa024367
|
|
@ -5,6 +5,7 @@ SECTIONS
|
|||
. = 0x84000000;
|
||||
|
||||
.text : {
|
||||
*(.text._start)
|
||||
*(.text)
|
||||
*(.text.*)
|
||||
}
|
||||
|
|
|
|||
142
build.sh
142
build.sh
|
|
@ -121,6 +121,18 @@ zig build-obj \
|
|||
mv gpu.o "$BUILD_DIR/gpu.o"
|
||||
echo " → $BUILD_DIR/gpu.o"
|
||||
|
||||
# Compile Matrix Protocol (Rainmaker)
|
||||
echo "[1.4/8] Compiling Matrix Protocol..."
|
||||
zig build-obj \
|
||||
-target $ZIG_TARGET \
|
||||
$ZIG_OBJ_FLAGS \
|
||||
-O ReleaseFast \
|
||||
"$RUMPK_DIR/hal/matrix.zig" \
|
||||
--name matrix
|
||||
|
||||
mv matrix.o "$BUILD_DIR/matrix.o"
|
||||
echo " → $BUILD_DIR/matrix.o"
|
||||
|
||||
# =========================================================
|
||||
# Step 2: Compile context switch assembly
|
||||
# =========================================================
|
||||
|
|
@ -371,7 +383,8 @@ nim c \
|
|||
-d:noSignalHandler \
|
||||
--gc:arc \
|
||||
--panics:on \
|
||||
--noMain \
|
||||
--app:staticlib \
|
||||
--nimMainPrefix:Membrane \
|
||||
--hint:Conf:off \
|
||||
--hint:OSLib:off \
|
||||
--nimcache:"$BUILD_DIR/membrane_nimcache" \
|
||||
|
|
@ -403,20 +416,10 @@ for cfile in "$BUILD_DIR/membrane_nimcache"/*.c; do
|
|||
done
|
||||
|
||||
# =========================================================
|
||||
# Step 5.1: Compile Subject Zig Object (Diamond Glass)
|
||||
# Step 5.1: [MOVED] NipBox compilation happens AFTER libnexus.a
|
||||
# =========================================================
|
||||
echo "[5.1/8] Compiling Subject Zig Object..."
|
||||
|
||||
SUBJECT_SRC=${SUBJECT_SRC:-"$RUMPK_DIR/apps/subject_zig/main.zig"}
|
||||
|
||||
zig build-obj \
|
||||
-target $ZIG_TARGET \
|
||||
$ZIG_OBJ_FLAGS \
|
||||
-O ReleaseSmall \
|
||||
"$SUBJECT_SRC" \
|
||||
--name subject_zig
|
||||
|
||||
mv subject_zig.o "$BUILD_DIR/subject_zig.o"
|
||||
# NipBox requires libnexus.a to link, so it's compiled at Step 5.6
|
||||
echo "[5.1/8] Skipping Subject (NipBox will be compiled after Membrane)..."
|
||||
|
||||
# =========================================================
|
||||
# Step 5.5: Patch Atomic References & Nuke Cache (GLOBAL)
|
||||
|
|
@ -468,6 +471,112 @@ zig ar rc "$BUILD_DIR/libnexus.a" \
|
|||
|
||||
echo " → $BUILD_DIR/libnexus.a"
|
||||
|
||||
# =========================================================
|
||||
# Step 5.6: Compile NipBox (The Sovereign Userland)
|
||||
# =========================================================
|
||||
echo "[5.6/8] Compiling NipBox (Sovereign Userland)..."
|
||||
|
||||
# Stage 1: Let Nim generate C code only (no linking)
|
||||
nim c \
|
||||
--cpu:$NIM_CPU \
|
||||
--os:any \
|
||||
-d:danger \
|
||||
--opt:size \
|
||||
-d:useMalloc \
|
||||
-d:nimNoLibc \
|
||||
-d:noSignalHandler \
|
||||
--mm:arc \
|
||||
--checks:off \
|
||||
--panics:on \
|
||||
--noMain:off \
|
||||
--hint:Conf:off \
|
||||
--hint:OSLib:off \
|
||||
--nimcache:"$BUILD_DIR/nipbox_nimcache" \
|
||||
--path:"$RUMPK_DIR/libs/membrane" \
|
||||
--path:"$RUMPK_DIR/core" \
|
||||
--compileOnly \
|
||||
"$RUMPK_DIR/npl/nipbox/nipbox.nim"
|
||||
|
||||
# Stage 2: Compile all C files with zig cc
|
||||
echo "[5.6.1/8] Compiling NipBox C files with zig cc..."
|
||||
NIPBOX_OBJS=""
|
||||
find "$BUILD_DIR/nipbox_nimcache" -name "*.c" | while read cfile; do
|
||||
ofile="${cfile%.c}.o"
|
||||
zig cc \
|
||||
-target $ZIG_TARGET \
|
||||
$ARCH_FLAGS \
|
||||
-ffreestanding \
|
||||
-fno-stack-protector \
|
||||
-fno-builtin \
|
||||
-Os \
|
||||
-ffunction-sections \
|
||||
-fdata-sections \
|
||||
-I/usr/lib/nim \
|
||||
-I"$RUMPK_DIR/core/include" \
|
||||
-c "$cfile" \
|
||||
-o "$ofile"
|
||||
done
|
||||
|
||||
# Collect all object files
|
||||
NIPBOX_OBJS=$(find "$BUILD_DIR/nipbox_nimcache" -name "*.o" | tr '\n' ' ')
|
||||
|
||||
# Stage 2.5: Compile libc_shim.zig (Universal Adapter)
|
||||
echo "[5.6.1b/8] Compiling libc_shim.zig..."
|
||||
zig build-obj \
|
||||
-target $ZIG_TARGET \
|
||||
$ZIG_OBJ_FLAGS \
|
||||
-O ReleaseSmall \
|
||||
"$RUMPK_DIR/libs/membrane/libc_shim.zig" \
|
||||
--name libc_shim
|
||||
|
||||
mv libc_shim.o "$BUILD_DIR/libc_shim.o"
|
||||
|
||||
# Stage 2.6: Compile subject_entry.S
|
||||
echo "[5.6.1c/8] Compiling subject_entry.S..."
|
||||
zig cc -target $ZIG_TARGET -ffreestanding -c "$RUMPK_DIR/apps/subject_entry.S" -o "$BUILD_DIR/subject_entry.o"
|
||||
|
||||
# Stage 3: Link with zig cc
|
||||
echo "[5.6.2/8] Linking NipBox binary..."
|
||||
zig cc \
|
||||
-target $ZIG_TARGET \
|
||||
$ARCH_FLAGS \
|
||||
-ffreestanding \
|
||||
-fno-stack-protector \
|
||||
-nostdlib \
|
||||
-static \
|
||||
-Wl,--gc-sections \
|
||||
-T "$RUMPK_DIR/apps/linker_user.ld" \
|
||||
"$BUILD_DIR/subject_entry.o" \
|
||||
"$BUILD_DIR/stubs.o" \
|
||||
$NIPBOX_OBJS \
|
||||
"$BUILD_DIR/libc_shim.o" \
|
||||
-L"$BUILD_DIR" \
|
||||
-lnexus \
|
||||
-o "$BUILD_DIR/nipbox"
|
||||
|
||||
# Convert NipBox binary to object file for embedding
|
||||
echo "[5.6.3/8] Embedding NipBox as Subject..."
|
||||
llvm-objcopy -O binary "$BUILD_DIR/nipbox" "$BUILD_DIR/nipbox.bin"
|
||||
|
||||
# Generate assembly wrapper for safe embedding
|
||||
cat <<EOF > "$BUILD_DIR/subject_wrapper.S"
|
||||
.section .rodata
|
||||
.global _subject_start
|
||||
.global _subject_end
|
||||
_subject_start:
|
||||
.incbin "$BUILD_DIR/nipbox.bin"
|
||||
_subject_end:
|
||||
EOF
|
||||
|
||||
# Compile wrapper to object file
|
||||
zig cc \
|
||||
-target $ZIG_TARGET \
|
||||
-c "$BUILD_DIR/subject_wrapper.S" \
|
||||
-o "$BUILD_DIR/subject_zig.o"
|
||||
|
||||
echo " → $BUILD_DIR/nipbox ($(stat -c%s "$BUILD_DIR/nipbox" 2>/dev/null || echo "unknown") bytes)"
|
||||
echo " → $BUILD_DIR/subject_zig.o (embedded)"
|
||||
|
||||
# =========================================================
|
||||
# Step 6: Link Subject Zero
|
||||
# =========================================================
|
||||
|
|
@ -498,8 +607,8 @@ echo " → $BUILD_DIR/subject.bin"
|
|||
# =========================================================
|
||||
echo "[7/8] Compiling Loader (with Embedded Subject)..."
|
||||
|
||||
# Copy subject.bin to core/ so loader.zig can embed it (package path restriction)
|
||||
cp "$BUILD_DIR/subject.bin" "$RUMPK_DIR/core/subject.bin"
|
||||
# Copy nipbox.bin to core/ so loader.zig can embed it (replacing stale subject.bin)
|
||||
cp "$BUILD_DIR/nipbox.bin" "$RUMPK_DIR/core/subject.bin"
|
||||
|
||||
zig build-obj \
|
||||
-target $ZIG_TARGET \
|
||||
|
|
@ -558,6 +667,7 @@ $LINKER \
|
|||
"$BUILD_DIR/nexshell.o" \
|
||||
"$BUILD_DIR/ui.o" \
|
||||
"$BUILD_DIR/gpu.o" \
|
||||
"$BUILD_DIR/matrix.o" \
|
||||
"$BUILD_DIR/microui.o" \
|
||||
$NIM_OBJS \
|
||||
-o "$BUILD_DIR/rumpk-$ARCH.elf"
|
||||
|
|
|
|||
|
|
@ -47,8 +47,9 @@ type
|
|||
state*: FiberState
|
||||
stack*: ptr UncheckedArray[uint8]
|
||||
stack_size*: int
|
||||
sleep_until*: uint64 # NS timestamp
|
||||
|
||||
# =========================================================
|
||||
proc fiber_yield*() {.importc, cdecl.}
|
||||
# Imports
|
||||
# =========================================================
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
#ifndef ION_ABI_H
|
||||
#define ION_ABI_H
|
||||
|
||||
// --- System Commands ---
|
||||
#define CMD_SYS_NOOP 0
|
||||
#define CMD_SYS_REBOOT 1
|
||||
|
||||
// --- GPU / Visual Commands ---
|
||||
#define CMD_GPU_MATRIX 0x100
|
||||
#define CMD_GPU_CLEAR 0x101
|
||||
|
||||
#endif
|
||||
|
|
@ -19,5 +19,11 @@ int putchar(int c);
|
|||
int puts(const char *s);
|
||||
int fflush(FILE *stream);
|
||||
size_t fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream);
|
||||
int ferror(FILE *stream);
|
||||
void clearerr(FILE *stream);
|
||||
int fputc(int c, FILE *stream);
|
||||
int fputs(const char *s, FILE *stream);
|
||||
char *fgets(char *s, int size, FILE *stream);
|
||||
int fgetc(FILE *stream);
|
||||
|
||||
#endif /* _STDIO_H */
|
||||
|
|
|
|||
|
|
@ -14,5 +14,7 @@ char *strcpy(char *dest, const char *src);
|
|||
char *strncpy(char *dest, const char *src, size_t n);
|
||||
int strcmp(const char *s1, const char *s2);
|
||||
int strncmp(const char *s1, const char *s2, size_t n);
|
||||
void *memchr(const void *s, int c, size_t n);
|
||||
char *strerror(int errnum);
|
||||
|
||||
#endif /* _STRING_H */
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ type
|
|||
proc kernel_panic*(msg: cstring) {.importc: "panic", cdecl.}
|
||||
proc kprintln*(s: cstring) {.importc: "kprintln", cdecl.}
|
||||
|
||||
template secure_send*(ring_ptr: pointer, data: uint64) =
|
||||
template secure_send*(ring_ptr: pointer, pkt: CmdPacket) =
|
||||
## Verifies invariants before pushing to a command ring.
|
||||
|
||||
# 1. PRE-CONDITION: Alignment
|
||||
|
|
@ -16,8 +16,8 @@ template secure_send*(ring_ptr: pointer, data: uint64) =
|
|||
kernel_panic("Invariant Violation: Unaligned Ring Pointer")
|
||||
|
||||
# 2. OPERATION: Try to push via HAL
|
||||
# We cast uint64 back to CmdPacket for the FFI call
|
||||
let success = hal_cmd_push(cast[uint64](ring_ptr), cast[CmdPacket](data))
|
||||
# We pass CmdPacket directly (it's >8 bytes now)
|
||||
let success = hal_cmd_push(cast[uint64](ring_ptr), pkt)
|
||||
|
||||
# 3. POST-CONDITION: Flow Control Warning
|
||||
if not success:
|
||||
|
|
|
|||
17
core/ion.nim
17
core/ion.nim
|
|
@ -1,19 +1,23 @@
|
|||
# Nexus Rumpk: ION Control Plane
|
||||
# Markus Maiwald (Architect) | Voxis Forge (AI)
|
||||
# Nexus Rumpk: ION Control Plane
|
||||
|
||||
import ion/memory
|
||||
export memory
|
||||
|
||||
type
|
||||
CmdType* = enum
|
||||
CMD_NONE = 0
|
||||
CMD_ION_STOP = 1
|
||||
CMD_ION_START = 2
|
||||
CMD_DROP_ALL = 3
|
||||
CMD_SYS_NOOP = 0
|
||||
CMD_SYS_REBOOT = 1
|
||||
CMD_ION_STOP = 2
|
||||
CMD_ION_START = 3
|
||||
CMD_GPU_MATRIX = 0x100
|
||||
CMD_GPU_CLEAR = 0x101
|
||||
CMD_GET_GPU_STATUS = 0x102
|
||||
|
||||
CmdPacket* = object
|
||||
kind*: uint32
|
||||
arg*: uint32
|
||||
id*: array[16, byte] # u128 for SipHash Provenance
|
||||
|
||||
# Binary compatible with hal/channel.zig
|
||||
HAL_Ring*[T] = object
|
||||
|
|
@ -31,6 +35,7 @@ type
|
|||
s_tx*: ptr HAL_Ring[IonPacket] # App -> Kernel
|
||||
s_event*: ptr HAL_Ring[IonPacket] # Telemetry
|
||||
s_cmd*: ptr HAL_Ring[CmdPacket] # Command Ring (Control Plane)
|
||||
s_input*: ptr HAL_Ring[IonPacket] # Input to Subject
|
||||
|
||||
include invariant
|
||||
|
||||
|
|
@ -57,7 +62,7 @@ proc recv*(chan: var SovereignChannel[IonPacket],
|
|||
return hal_channel_pop(cast[uint64](chan.ring), addr out_pkt)
|
||||
|
||||
proc send*(chan: var SovereignChannel[CmdPacket], pkt: CmdPacket) =
|
||||
secure_send(chan.ring, cast[uint64](pkt))
|
||||
secure_send(chan.ring, pkt)
|
||||
|
||||
proc recv*(chan: var SovereignChannel[CmdPacket],
|
||||
out_pkt: var CmdPacket): bool =
|
||||
|
|
|
|||
258
core/kernel.nim
258
core/kernel.nim
|
|
@ -9,6 +9,7 @@ import ion
|
|||
|
||||
var ion_paused*: bool = false
|
||||
var pause_start*: uint64 = 0
|
||||
var matrix_enabled*: bool = false
|
||||
|
||||
|
||||
# =========================================================
|
||||
|
|
@ -21,6 +22,13 @@ var fiber_ui: FiberObject
|
|||
var fiber_subject: FiberObject
|
||||
var fiber_watchdog: FiberObject
|
||||
|
||||
# --- STACK ALLOCATIONS ---
|
||||
var stack_ion {.align: 16.}: array[4096, uint8]
|
||||
var stack_nexshell {.align: 16.}: array[4096, uint8]
|
||||
var stack_ui {.align: 16.}: array[32768, uint8]
|
||||
var stack_subject {.align: 16.}: array[32768, uint8]
|
||||
var stack_watchdog {.align: 16.}: array[4096, uint8]
|
||||
|
||||
# Exports for Zig NPLs
|
||||
proc console_write(p: pointer, len: csize_t) {.importc, cdecl.}
|
||||
proc write*(fd: cint, p: pointer, len: csize_t): csize_t {.exportc, cdecl.} =
|
||||
|
|
@ -48,12 +56,25 @@ var guest_rx_hal: HAL_Ring[IonPacket]
|
|||
var guest_tx_hal: HAL_Ring[IonPacket]
|
||||
var guest_event_hal: HAL_Ring[IonPacket]
|
||||
var guest_cmd_hal: HAL_Ring[CmdPacket]
|
||||
var guest_input_hal: HAL_Ring[IonPacket]
|
||||
|
||||
# Shared Channels (The Valves - L1 Logic)
|
||||
var chan_rx*: SovereignChannel[IonPacket]
|
||||
var chan_tx*: SovereignChannel[IonPacket]
|
||||
var chan_event*: SovereignChannel[IonPacket]
|
||||
var chan_cmd*: SovereignChannel[CmdPacket]
|
||||
var chan_input*: SovereignChannel[IonPacket]
|
||||
|
||||
proc ion_push_stdin*(p: pointer, len: csize_t) {.exportc, cdecl.} =
|
||||
## Push raw console data into the Userland Input Ring
|
||||
var pkt = ion_alloc()
|
||||
if pkt.data == nil: return
|
||||
|
||||
let to_copy = min(int(len), 2048)
|
||||
copyMem(pkt.data, p, to_copy)
|
||||
pkt.len = uint16(to_copy)
|
||||
# We use chan_input which is the kernel-side SovereignChannel
|
||||
chan_input.send(pkt)
|
||||
|
||||
proc get_ion_load(): int =
|
||||
## Calculate load of the Command Ring (The Heartbeat of the NPLs)
|
||||
|
|
@ -62,139 +83,153 @@ proc get_ion_load(): int =
|
|||
let mask = guest_cmd_hal.mask
|
||||
return int((head - tail) and mask)
|
||||
|
||||
proc rumpk_yield_internal() {.cdecl, exportc.} =
|
||||
let load = get_ion_load()
|
||||
proc rumpk_yield_internal() {.cdecl, exportc.}
|
||||
|
||||
# 🏛️ ADAPTIVE GOVERNOR (Phase 3: FLOOD CONTROL)
|
||||
# Ring Size = 256. 80% = ~200.
|
||||
if load > 200:
|
||||
# BACKPRESSURE MODE: Ring is filling up! Panic Flush!
|
||||
# Force switch to ION Fiber to drain.
|
||||
if current_fiber != addr fiber_ion:
|
||||
switch(addr fiber_ion)
|
||||
return
|
||||
elif load > 0:
|
||||
# WAR MODE: Priority to IO Loop. Bypass NexShell/Watchdog.
|
||||
if current_fiber == addr fiber_subject:
|
||||
switch(addr fiber_ion)
|
||||
return
|
||||
elif current_fiber == addr fiber_ion:
|
||||
# If Subject is the main producer, we must let it run
|
||||
switch(addr fiber_subject)
|
||||
return
|
||||
elif load == 0:
|
||||
# IDLE MODE (Phase 3): No pending commands.
|
||||
# We must enable interrupts to receive packets!
|
||||
asm "csrsi sstatus, 2"
|
||||
# We can just yield here, interrupts will fire and preempt us (if we had preemption)
|
||||
# or fire and return to here.
|
||||
# But if we just loop, we burn CPU.
|
||||
# Ideally: WFI.
|
||||
# For now: Just enable interrupts so ISR can fire.
|
||||
# asm "wfi" # Optional: Save power.
|
||||
# HAL Driver API
|
||||
proc hal_io_init() {.importc, cdecl.}
|
||||
proc virtio_net_poll() {.importc, cdecl.}
|
||||
proc virtio_net_send(data: pointer, len: csize_t) {.importc, cdecl.}
|
||||
proc ion_free_raw(id: uint16) {.importc, cdecl.}
|
||||
proc nexshell_main() {.importc, cdecl.}
|
||||
proc ui_fiber_entry() {.importc, cdecl.}
|
||||
|
||||
# Normal Round Robin logic
|
||||
if current_fiber == addr fiber_ion:
|
||||
switch(addr fiber_nexshell)
|
||||
elif current_fiber == addr fiber_nexshell:
|
||||
switch(addr fiber_ui)
|
||||
elif current_fiber == addr fiber_ui:
|
||||
switch(addr fiber_subject)
|
||||
elif current_fiber == addr fiber_subject:
|
||||
switch(addr fiber_watchdog)
|
||||
else:
|
||||
switch(addr fiber_ion)
|
||||
proc get_now_ns(): uint64 =
|
||||
proc rumpk_timer_now_ns(): uint64 {.importc, cdecl.}
|
||||
return rumpk_timer_now_ns()
|
||||
|
||||
proc fiber_yield*() {.exportc, cdecl.} =
|
||||
rumpk_yield_internal()
|
||||
|
||||
proc fiber_sleep*(ms: int) {.exportc, cdecl.} =
|
||||
let now = get_now_ns()
|
||||
current_fiber.sleep_until = now + uint64(ms) * 1000000'u64
|
||||
fiber_yield()
|
||||
|
||||
# Utility moved up
|
||||
proc rumpk_yield_internal() {.cdecl, exportc.} =
|
||||
let load = get_ion_load()
|
||||
let now = get_now_ns()
|
||||
|
||||
# 🏛️ ADAPTIVE GOVERNOR (Phase 3: FLOOD CONTROL)
|
||||
if load > 200:
|
||||
if current_fiber != addr fiber_ion:
|
||||
switch(addr fiber_ion)
|
||||
return
|
||||
elif load > 0:
|
||||
if current_fiber == addr fiber_subject:
|
||||
switch(addr fiber_ion)
|
||||
return
|
||||
|
||||
# Channel API (The Valve) - Wrappers for ION
|
||||
# Channel API is imported from ion.nim
|
||||
# Normal Round Robin logic with Sleep Check
|
||||
var next_fiber: Fiber = nil
|
||||
|
||||
if current_fiber == addr fiber_ion:
|
||||
next_fiber = addr fiber_nexshell
|
||||
elif current_fiber == addr fiber_nexshell:
|
||||
next_fiber = addr fiber_ui
|
||||
elif current_fiber == addr fiber_ui:
|
||||
next_fiber = addr fiber_subject
|
||||
elif current_fiber == addr fiber_subject:
|
||||
next_fiber = addr fiber_watchdog
|
||||
else:
|
||||
next_fiber = addr fiber_ion
|
||||
|
||||
# Skip sleeping fibers
|
||||
var found = false
|
||||
for _ in 0..5: # Max 5 check to avoid skip all
|
||||
if next_fiber != nil and now >= next_fiber.sleep_until:
|
||||
found = true
|
||||
break
|
||||
|
||||
# Move to next in sequence
|
||||
if next_fiber == addr fiber_ion: next_fiber = addr fiber_nexshell
|
||||
elif next_fiber == addr fiber_nexshell: next_fiber = addr fiber_ui
|
||||
elif next_fiber == addr fiber_ui: next_fiber = addr fiber_subject
|
||||
elif next_fiber == addr fiber_subject: next_fiber = addr fiber_watchdog
|
||||
else: next_fiber = addr fiber_ion
|
||||
|
||||
if found and next_fiber != current_fiber:
|
||||
switch(next_fiber)
|
||||
elif not found:
|
||||
asm "csrsi sstatus, 2"
|
||||
|
||||
# =========================================================
|
||||
# ION Loop
|
||||
# =========================================================
|
||||
|
||||
proc ion_fiber_entry() {.cdecl.} =
|
||||
kprintln("[ION] Fiber 1 Reporting for Duty.")
|
||||
while true:
|
||||
# 1. Drain Command Channel -> Push to HW
|
||||
var cmd: CmdPacket
|
||||
while chan_cmd.recv(cmd):
|
||||
# Cortex Logic: Dispatch Commands
|
||||
case cmd.kind:
|
||||
of uint32(CmdType.CMD_GPU_MATRIX):
|
||||
let msg = if cmd.arg > 0: "ENGAGE" else: "DISENGAGE"
|
||||
kprintln("[Kernel] Matrix Protocol: ")
|
||||
kprintln(msg)
|
||||
matrix_enabled = (cmd.arg > 0)
|
||||
of uint32(CmdType.CMD_SYS_REBOOT):
|
||||
kprintln("[Kernel] Reboot requested.")
|
||||
# TODO: sys_reset()
|
||||
of uint32(CmdType.CMD_ION_STOP):
|
||||
ion_paused = true
|
||||
pause_start = get_now_ns()
|
||||
kprintln("[Kernel] ION PAUSED by Watchdog.")
|
||||
of uint32(CmdType.CMD_ION_START):
|
||||
ion_paused = false
|
||||
kprintln("[Kernel] ION RESUMED.")
|
||||
of uint32(CmdType.CMD_GET_GPU_STATUS):
|
||||
let msg = if matrix_enabled: "STATUS: Matrix is ONLINE" else: "STATUS: Matrix is OFFLINE"
|
||||
kprintln("[Kernel] GPU Request")
|
||||
kprintln(msg)
|
||||
else:
|
||||
discard
|
||||
|
||||
# 2. Check HW -> Push to Logic Channel
|
||||
# (Virtio Net Poll is called from HAL via Interrupts normally,
|
||||
# but here we might poll in ION fiber if no interrupts)
|
||||
proc virtio_net_poll() {.importc, cdecl.}
|
||||
virtio_net_poll()
|
||||
|
||||
fiber_yield()
|
||||
|
||||
# =========================================================
|
||||
# NexShell NPL Loop
|
||||
# =========================================================
|
||||
|
||||
# nexshell_fiber_entry is removed, nexshell_main is used directly as fiber entry.
|
||||
|
||||
proc subject_fiber_entry() {.cdecl.} =
|
||||
proc launch_subject() {.importc, cdecl.}
|
||||
launch_subject()
|
||||
while true:
|
||||
fiber_yield()
|
||||
|
||||
# =========================================================
|
||||
# Kernel Infrastructure Entry
|
||||
# = =========================================================
|
||||
|
||||
# HAL/NPL Entry points
|
||||
proc rumpk_halt() {.importc, cdecl, noreturn.}
|
||||
proc hal_io_init() {.importc, cdecl.}
|
||||
proc nexshell_main() {.importc, cdecl.}
|
||||
proc launch_subject() {.importc, cdecl.}
|
||||
|
||||
# Hardware Ingress (Zig -> Nim)
|
||||
proc ion_get_virt(id: uint16): pointer {.importc, cdecl.}
|
||||
proc ion_ingress*(id: uint16, len: uint16) {.exportc, cdecl.} =
|
||||
## Intercept raw hardware packet and push to Sovereign RX Channel
|
||||
# kprint("[Kernel] Ingress ID: "); kprint_int(int(id)); kprint(" Len: "); kprint_int(int(len)); kprintln("")
|
||||
let data = ion_get_virt(id)
|
||||
var pkt = IonPacket(data: cast[ptr UncheckedArray[byte]](data), len: len, id: id)
|
||||
chan_rx.send(pkt)
|
||||
|
||||
# Panic Handler
|
||||
proc nimPanic(msg: cstring) {.exportc: "panic", cdecl, noreturn.} =
|
||||
kprint("\n[PANIC] "); kprintln(msg)
|
||||
kprintln("\n[PANIC] ")
|
||||
kprintln(msg)
|
||||
rumpk_halt()
|
||||
|
||||
# =========================================================
|
||||
# Fiber Entries
|
||||
# =========================================================
|
||||
|
||||
var stack_ion: array[32768, uint8]
|
||||
var stack_nexshell: array[32768, uint8]
|
||||
var stack_ui: array[32768, uint8]
|
||||
var stack_subject: array[65536, uint8]
|
||||
var stack_watchdog: array[4096, uint8]
|
||||
|
||||
proc ui_fiber_entry() {.importc, cdecl.}
|
||||
|
||||
proc subject_fiber_entry() {.cdecl.} =
|
||||
launch_subject()
|
||||
|
||||
# Include Watchdog Logic (Access to Kernel Globals)
|
||||
# Include Watchdog Logic
|
||||
include watchdog
|
||||
|
||||
# HAL Driver API
|
||||
proc virtio_net_poll() {.importc, cdecl.}
|
||||
proc virtio_net_send(data: pointer, len: csize_t) {.importc, cdecl.}
|
||||
proc ion_free_raw(id: uint16) {.importc, cdecl.}
|
||||
|
||||
proc ion_fiber_entry() {.cdecl.} =
|
||||
kprint("[ION] Fiber 1 Reporting for Duty.")
|
||||
if ion_paused: kprintln(" (PAUSED)") else: kprintln("")
|
||||
|
||||
var pkt: IonPacket
|
||||
var cmd: CmdPacket
|
||||
|
||||
while true:
|
||||
# 0. Poll Hardware (The Heartbeat)
|
||||
virtio_net_poll()
|
||||
|
||||
# 1. Process Commands (Drain the ring!)
|
||||
while chan_cmd.recv(cmd):
|
||||
if cmd.kind == uint32(CMD_ION_STOP):
|
||||
kprintln("[ION] STOP received. Suspending IO.")
|
||||
ion_paused = true
|
||||
pause_start = cpu_ticks()
|
||||
elif cmd.kind == uint32(CMD_ION_START):
|
||||
kprintln("[ION] START received. Resuming IO.")
|
||||
ion_paused = false
|
||||
|
||||
# 2. Process Data (if not paused, Drain the ring!)
|
||||
if not ion_paused:
|
||||
while chan_tx.recv(pkt):
|
||||
# Transmit to Hardware
|
||||
kprint("[ION] TX from chan_tx, len=")
|
||||
# kprint_int(int(pkt.len))
|
||||
kprintln("")
|
||||
virtio_net_send(pkt.data, csize_t(pkt.len))
|
||||
# Zero-Copy Ingest means we own the buffer now.
|
||||
# Since virtio_net_send copies (for now), we must free the original slab.
|
||||
ion_free_raw(pkt.id)
|
||||
|
||||
fiber_yield()
|
||||
|
||||
|
||||
# =========================================================
|
||||
# kmain: The Orchestrator
|
||||
# =========================================================
|
||||
|
|
@ -212,6 +247,7 @@ proc kmain() {.exportc, cdecl.} =
|
|||
|
||||
# 1.5 The Retina (VirtIO-GPU)
|
||||
proc virtio_gpu_init(base: uint64) {.importc, cdecl.}
|
||||
proc matrix_init() {.importc, cdecl.}
|
||||
|
||||
# On QEMU virt machine, virtio-mmio devices are at 0x10001000-0x10008000
|
||||
# GPU could be at any slot.
|
||||
|
|
@ -220,6 +256,9 @@ proc kmain() {.exportc, cdecl.} =
|
|||
let base_addr = 0x10000000'u64 + (uint64(i) * 0x1000'u64)
|
||||
virtio_gpu_init(base_addr)
|
||||
|
||||
# Initial Matrix greeting
|
||||
matrix_init()
|
||||
|
||||
# 2. Channel Infrastructure
|
||||
kprintln("[Kernel] Mapping Sovereign Channels...")
|
||||
|
||||
|
|
@ -232,11 +271,15 @@ proc kmain() {.exportc, cdecl.} =
|
|||
guest_cmd_hal.head = 0
|
||||
guest_cmd_hal.tail = 0
|
||||
guest_cmd_hal.mask = 255
|
||||
guest_input_hal.head = 0
|
||||
guest_input_hal.tail = 0
|
||||
guest_input_hal.mask = 255
|
||||
|
||||
chan_rx.ring = addr guest_rx_hal
|
||||
chan_tx.ring = addr guest_tx_hal
|
||||
chan_event.ring = addr guest_event_hal
|
||||
chan_cmd.ring = addr guest_cmd_hal
|
||||
chan_input.ring = addr guest_input_hal
|
||||
|
||||
let sys_table = cast[ptr SysTable](SYSTABLE_BASE)
|
||||
sys_table.magic = 0x4E585553
|
||||
|
|
@ -244,7 +287,7 @@ proc kmain() {.exportc, cdecl.} =
|
|||
sys_table.s_tx = addr guest_tx_hal
|
||||
sys_table.s_event = addr guest_event_hal
|
||||
sys_table.s_cmd = addr guest_cmd_hal
|
||||
|
||||
sys_table.s_input = addr guest_input_hal
|
||||
|
||||
# 3. The Nerve (Yield Anchor)
|
||||
proc rumpk_yield_guard() {.importc, cdecl.}
|
||||
|
|
@ -258,7 +301,8 @@ proc kmain() {.exportc, cdecl.} =
|
|||
init_fiber(addr fiber_ion, ion_fiber_entry, addr stack_ion[0], sizeof(stack_ion))
|
||||
|
||||
# 2. NEXSHELL FIBER (The Brain)
|
||||
init_fiber(addr fiber_nexshell, nexshell_main, addr stack_nexshell[0], sizeof(stack_nexshell))
|
||||
init_fiber(addr fiber_nexshell, nexshell_main, addr stack_nexshell[0],
|
||||
sizeof(stack_nexshell))
|
||||
|
||||
# 3. UI FIBER (The Face)
|
||||
init_fiber(addr fiber_ui, ui_fiber_entry, addr stack_ui[0], sizeof(stack_ui))
|
||||
|
|
|
|||
|
|
@ -9,11 +9,18 @@ export fn launch_subject() void {
|
|||
const target_addr: usize = 0x84000000;
|
||||
const dest = @as([*]u8, @ptrFromInt(target_addr));
|
||||
|
||||
console_write("[Loader] Copying Subject Zero to 0x84000000...\n", 48);
|
||||
console_write("[Loader] Loading Subject Zero...\n", 33);
|
||||
@memcpy(dest[0..subject_bin.len], subject_bin);
|
||||
|
||||
console_write("[Loader] Jumping to Subject Zero...\n", 36);
|
||||
console_write("[Loader] Jumping...\n", 20);
|
||||
|
||||
const entry = @as(*const fn () void, @ptrFromInt(target_addr));
|
||||
entry();
|
||||
}
|
||||
|
||||
pub fn panic(msg: []const u8, _: ?*std.builtin.StackTrace, _: ?usize) noreturn {
|
||||
console_write("[Loader] PANIC: ", 16);
|
||||
console_write(msg.ptr, msg.len);
|
||||
console_write("\n", 1);
|
||||
while (true) {}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
// MARKUS MAIWALD (ARCHITECT) | VOXIS FORGE (AI)
|
||||
// HAL ABI - The Contract between L0 (Zig) and L1 (Nim)
|
||||
// This struct is the "contract" for future language integration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ pub const IonPacket = extern struct {
|
|||
pub const CmdPacket = extern struct {
|
||||
kind: u32,
|
||||
arg: u32,
|
||||
id: [16]u8, // SipHash Provenance
|
||||
};
|
||||
|
||||
pub fn Ring(comptime T: type) type {
|
||||
|
|
@ -57,6 +58,9 @@ fn popGeneric(comptime T: type, ring: *Ring(T), out_pkt: *T) bool {
|
|||
return false;
|
||||
}
|
||||
|
||||
// Ensure we see data written by producer before reading it
|
||||
asm volatile ("fence r, rw" ::: .{ .memory = true });
|
||||
|
||||
out_pkt.* = ring.data[tail & ring.mask];
|
||||
const next = (tail + 1) & ring.mask;
|
||||
@atomicStore(u32, &ring.tail, next, .release);
|
||||
|
|
|
|||
|
|
@ -23,11 +23,15 @@ export fn _start() callconv(.naked) noreturn {
|
|||
\\ la gp, __global_pointer$
|
||||
\\ .option pop
|
||||
|
||||
// 2. Set up Stack (Load address of stack_bytes, add size)
|
||||
// 2. Set up Stack
|
||||
\\ la sp, stack_bytes
|
||||
\\ li t0, 65536
|
||||
\\ add sp, sp, t0
|
||||
|
||||
// 2.1 Install Trap Handler (Direct Mode)
|
||||
\\ la t0, trap_entry
|
||||
\\ csrw stvec, t0
|
||||
|
||||
// 3. Jump to Zig Entry
|
||||
\\ call zig_entry
|
||||
\\ 1: wfi
|
||||
|
|
@ -36,6 +40,36 @@ export fn _start() callconv(.naked) noreturn {
|
|||
unreachable;
|
||||
}
|
||||
|
||||
export fn trap_entry() callconv(.naked) void {
|
||||
asm volatile (
|
||||
\\ // Minimal context save (clobbering scratch regs for debug)
|
||||
\\ csrr t0, scause
|
||||
\\ csrr t1, sepc
|
||||
\\ csrr t2, stval
|
||||
\\ mv a0, t0
|
||||
\\ mv a1, t1
|
||||
\\ mv a2, t2
|
||||
\\ call rss_trap_handler
|
||||
\\ 1: wfi
|
||||
\\ j 1b
|
||||
);
|
||||
}
|
||||
|
||||
export fn rss_trap_handler(cause: usize, epc: usize, val: usize) void {
|
||||
uart.print("\n\n!!! SOVEREIGN TRAP !!!\n");
|
||||
uart.print("SCAUSE: 0x");
|
||||
uart.print_hex(cause);
|
||||
uart.print("\n");
|
||||
uart.print("SEPC: 0x");
|
||||
uart.print_hex(epc);
|
||||
uart.print("\n");
|
||||
uart.print("STVAL: 0x");
|
||||
uart.print_hex(val);
|
||||
uart.print("\n");
|
||||
uart.print("SYSTEM HALTED.\n");
|
||||
while (true) {}
|
||||
}
|
||||
|
||||
// =========================================================
|
||||
// Stack (64KB)
|
||||
// =========================================================
|
||||
|
|
|
|||
|
|
@ -120,3 +120,25 @@ pub fn fill_rect(x: i32, y: i32, w: i32, h: i32, color: u32) void {
|
|||
@memset(fb_memory[offset .. offset + cols], color);
|
||||
}
|
||||
}
|
||||
|
||||
// THE FADE (Optimization: Dim pixels instead of clearing)
|
||||
// This creates the "Ghost Trail" effect cheaply.
|
||||
pub fn fade_screen(factor: u8) void {
|
||||
for (&fb_memory) |*pixel| {
|
||||
if (pixel.* == 0) continue;
|
||||
|
||||
// Extract Green Channel (Assuming 0xAARRGGBB)
|
||||
// We only care about green for the matrix effect
|
||||
var g = (pixel.* >> 8) & 0xFF;
|
||||
|
||||
if (g > factor) {
|
||||
g -= factor;
|
||||
} else {
|
||||
g = 0;
|
||||
}
|
||||
|
||||
// Reconstruct: Keep it mostly green but allow it to fade to black
|
||||
// We zero out Red and Blue for the matrix look during fade.
|
||||
pixel.* = (@as(u32, g) << 8) | 0xFF000000;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
124
hal/gpu.zig
124
hal/gpu.zig
|
|
@ -100,8 +100,9 @@ var initialized: bool = false;
|
|||
// Actually it depends on dtb, for now we'll use a probe or hardcode.
|
||||
var mmio_base: usize = 0;
|
||||
|
||||
// Control queue (Queue 0)
|
||||
// VirtIO Queue Layout (Contiguous for v1 legacy support)
|
||||
const QUEUE_SIZE = 16;
|
||||
const PAGE_SIZE = 4096;
|
||||
|
||||
const VirtioDesc = extern struct {
|
||||
addr: u64,
|
||||
|
|
@ -114,19 +115,39 @@ const VirtioAvail = extern struct {
|
|||
flags: u16,
|
||||
idx: u16,
|
||||
ring: [QUEUE_SIZE]u16,
|
||||
// Note: older versions might have used_event here, but we don't need it for basic polling
|
||||
};
|
||||
|
||||
const VirtioUsedItem = extern struct {
|
||||
id: u32,
|
||||
len: u32,
|
||||
};
|
||||
|
||||
const VirtioUsed = extern struct {
|
||||
flags: u16,
|
||||
idx: u16,
|
||||
ring: [QUEUE_SIZE]extern struct { id: u32, len: u32 },
|
||||
ring: [QUEUE_SIZE]VirtioUsedItem,
|
||||
};
|
||||
|
||||
var desc_table: [QUEUE_SIZE]VirtioDesc = undefined;
|
||||
var avail_ring: VirtioAvail = undefined;
|
||||
var used_ring: VirtioUsed = undefined;
|
||||
// We create a structure that matches the legacy layout:
|
||||
// Descriptors [16 * 16 = 256 bytes]
|
||||
// Available [2 + 2 + 32 = 36 bytes]
|
||||
// Padding to 4096
|
||||
// Used [2 + 2 + 16 * 8 = 132 bytes]
|
||||
const VirtioQueueLayout = extern struct {
|
||||
desc: [QUEUE_SIZE]VirtioDesc,
|
||||
avail: VirtioAvail,
|
||||
_pad1: [PAGE_SIZE - @sizeOf([QUEUE_SIZE]VirtioDesc) - @sizeOf(VirtioAvail)]u8,
|
||||
used: VirtioUsed,
|
||||
};
|
||||
|
||||
var queue align(PAGE_SIZE) = VirtioQueueLayout{
|
||||
.desc = undefined,
|
||||
.avail = .{ .flags = 0, .idx = 0, .ring = [_]u16{0} ** QUEUE_SIZE },
|
||||
._pad1 = [_]u8{0} ** (PAGE_SIZE - @sizeOf([QUEUE_SIZE]VirtioDesc) - @sizeOf(VirtioAvail)),
|
||||
.used = .{ .flags = 0, .idx = 0, .ring = [_]VirtioUsedItem{.{ .id = 0, .len = 0 }} ** QUEUE_SIZE },
|
||||
};
|
||||
|
||||
var desc_idx: u16 = 0;
|
||||
var last_used_idx: u16 = 0;
|
||||
|
||||
// Command/Response buffers (static)
|
||||
|
|
@ -154,9 +175,13 @@ const VIRTIO_MMIO_DEVICE_ID = 0x008;
|
|||
const VIRTIO_MMIO_VENDOR_ID = 0x00c;
|
||||
const VIRTIO_MMIO_DEVICE_FEATURES = 0x010;
|
||||
const VIRTIO_MMIO_DRIVER_FEATURES = 0x020;
|
||||
const VIRTIO_MMIO_GUEST_FEATURES = 0x020;
|
||||
const VIRTIO_MMIO_GUEST_FEATURES_SEL = 0x024;
|
||||
const VIRTIO_MMIO_QUEUE_SEL = 0x030;
|
||||
const VIRTIO_MMIO_QUEUE_NUM_MAX = 0x034;
|
||||
const VIRTIO_MMIO_QUEUE_NUM = 0x038;
|
||||
const VIRTIO_MMIO_QUEUE_ALIGN = 0x03c;
|
||||
const VIRTIO_MMIO_QUEUE_PFN = 0x040;
|
||||
const VIRTIO_MMIO_QUEUE_READY = 0x044;
|
||||
const VIRTIO_MMIO_QUEUE_NOTIFY = 0x050;
|
||||
const VIRTIO_MMIO_INTERRUPT_STATUS = 0x060;
|
||||
|
|
@ -173,9 +198,12 @@ const VIRTIO_STATUS_ACKNOWLEDGE = 1;
|
|||
const VIRTIO_STATUS_DRIVER = 2;
|
||||
const VIRTIO_STATUS_DRIVER_OK = 4;
|
||||
const VIRTIO_STATUS_FEATURES_OK = 8;
|
||||
const VIRTIO_STATUS_NEEDS_RESET = 64;
|
||||
const VIRTIO_STATUS_FAILED = 128;
|
||||
|
||||
const VRING_DESC_F_NEXT: u16 = 1;
|
||||
const VRING_DESC_F_WRITE: u16 = 2;
|
||||
const VRING_DESC_F_INDIRECT: u16 = 4;
|
||||
|
||||
// =========================================================
|
||||
// Queue Operations
|
||||
|
|
@ -186,6 +214,10 @@ fn queue_init() void {
|
|||
mmio_write(VIRTIO_MMIO_QUEUE_SEL, 0);
|
||||
|
||||
const max = mmio_read(VIRTIO_MMIO_QUEUE_NUM_MAX);
|
||||
uart.print("[GPU] Queue 0 Max Size: ");
|
||||
uart.print_hex(max);
|
||||
uart.print("\n");
|
||||
|
||||
if (max == 0) {
|
||||
uart.print("[GPU] Queue 0 not available!\n");
|
||||
return;
|
||||
|
|
@ -193,35 +225,24 @@ fn queue_init() void {
|
|||
|
||||
mmio_write(VIRTIO_MMIO_QUEUE_NUM, QUEUE_SIZE);
|
||||
|
||||
// Legacy (v1) uses a single contiguous page for desc+avail+used
|
||||
// and QUEUE_PFN register instead of separate addresses.
|
||||
// For simplicity, we use a static aligned buffer.
|
||||
const version = mmio_read(VIRTIO_MMIO_VERSION);
|
||||
|
||||
if (version == 1) {
|
||||
// Legacy VirtIO MMIO v1
|
||||
// Queue address = page frame number (page-aligned address / page_size)
|
||||
// We need to provide a contiguous buffer for desc, avail, used.
|
||||
// For now, use our static arrays but provide the desc address as PFN.
|
||||
const page_addr = @intFromPtr(&desc_table) & 0xFFFFF000; // Page aligned
|
||||
const pfn = page_addr / 4096; // Page frame number
|
||||
|
||||
// QUEUE_ALIGN register at 0x3c (page size, usually 4096)
|
||||
// QUEUE_PFN register at 0x40
|
||||
const VIRTIO_MMIO_QUEUE_ALIGN: usize = 0x03c;
|
||||
const VIRTIO_MMIO_QUEUE_PFN: usize = 0x040;
|
||||
const queue_addr = @intFromPtr(&queue);
|
||||
const pfn = queue_addr / 4096; // Page frame number
|
||||
|
||||
mmio_write(VIRTIO_MMIO_QUEUE_ALIGN, 4096);
|
||||
mmio_write(VIRTIO_MMIO_QUEUE_PFN, @truncate(pfn));
|
||||
|
||||
uart.print("[GPU] Legacy queue at PFN 0x");
|
||||
uart.print("[GPU] Legacy queue (v1) initialized at PFN 0x");
|
||||
uart.print_hex(pfn);
|
||||
uart.print("\n");
|
||||
} else {
|
||||
// Modern VirtIO MMIO v2
|
||||
const desc_addr = @intFromPtr(&desc_table);
|
||||
const avail_addr = @intFromPtr(&avail_ring);
|
||||
const used_addr = @intFromPtr(&used_ring);
|
||||
const desc_addr = @intFromPtr(&queue.desc);
|
||||
const avail_addr = @intFromPtr(&queue.avail);
|
||||
const used_addr = @intFromPtr(&queue.used);
|
||||
|
||||
mmio_write(VIRTIO_MMIO_QUEUE_DESC_LOW, @truncate(desc_addr));
|
||||
mmio_write(VIRTIO_MMIO_QUEUE_DESC_HIGH, @truncate(desc_addr >> 32));
|
||||
|
|
@ -231,44 +252,56 @@ fn queue_init() void {
|
|||
mmio_write(VIRTIO_MMIO_QUEUE_USED_HIGH, @truncate(used_addr >> 32));
|
||||
|
||||
mmio_write(VIRTIO_MMIO_QUEUE_READY, 1);
|
||||
uart.print("[GPU] Modern queue (v2) initialized.\n");
|
||||
}
|
||||
|
||||
avail_ring.idx = 0;
|
||||
queue.avail.idx = 0;
|
||||
last_used_idx = 0;
|
||||
desc_idx = 0;
|
||||
}
|
||||
|
||||
fn send_command(cmd_ptr: [*]const u8, cmd_len: usize) void {
|
||||
fn send_command(ptr: [*]const u8, len: usize) void {
|
||||
const phys_cmd = @intFromPtr(ptr);
|
||||
const phys_resp = @intFromPtr(&resp_buf);
|
||||
|
||||
// Descriptor 0: Command (device read)
|
||||
desc_table[0] = .{
|
||||
.addr = @intFromPtr(cmd_ptr),
|
||||
.len = @intCast(cmd_len),
|
||||
queue.desc[0] = .{
|
||||
.addr = phys_cmd,
|
||||
.len = @intCast(len),
|
||||
.flags = VRING_DESC_F_NEXT,
|
||||
.next = 1,
|
||||
};
|
||||
|
||||
// Descriptor 1: Response (device write)
|
||||
desc_table[1] = .{
|
||||
.addr = @intFromPtr(&resp_buf),
|
||||
queue.desc[1] = .{
|
||||
.addr = phys_resp,
|
||||
.len = @sizeOf(@TypeOf(resp_buf)),
|
||||
.flags = VRING_DESC_F_WRITE,
|
||||
.next = 0,
|
||||
};
|
||||
|
||||
// Add to available ring
|
||||
avail_ring.ring[avail_ring.idx % QUEUE_SIZE] = 0;
|
||||
queue.avail.ring[queue.avail.idx % QUEUE_SIZE] = 0;
|
||||
asm volatile ("fence" ::: .{ .memory = true });
|
||||
avail_ring.idx +%= 1;
|
||||
queue.avail.idx +%= 1;
|
||||
asm volatile ("fence" ::: .{ .memory = true });
|
||||
|
||||
// Notify device
|
||||
mmio_write(VIRTIO_MMIO_QUEUE_NOTIFY, 0);
|
||||
|
||||
// Wait for response (polling)
|
||||
while (last_used_idx == used_ring.idx) {
|
||||
var timeout: usize = 0;
|
||||
while (last_used_idx == queue.used.idx) {
|
||||
asm volatile ("" ::: .{ .memory = true });
|
||||
timeout += 1;
|
||||
if (timeout % 10000000 == 0) {
|
||||
uart.print("[GPU] Polling... last=");
|
||||
uart.print_hex(last_used_idx);
|
||||
uart.print(" current=");
|
||||
uart.print_hex(queue.used.idx);
|
||||
uart.print("\n");
|
||||
}
|
||||
}
|
||||
last_used_idx = used_ring.idx;
|
||||
last_used_idx = queue.used.idx;
|
||||
}
|
||||
|
||||
// =========================================================
|
||||
|
|
@ -418,31 +451,20 @@ pub fn probe(base: usize) bool {
|
|||
}
|
||||
|
||||
pub fn init(base: usize) void {
|
||||
if (!probe(base)) {
|
||||
uart.print("[GPU] No VirtIO-GPU device found.\n");
|
||||
return;
|
||||
}
|
||||
if (!probe(base)) return;
|
||||
|
||||
// Reset
|
||||
mmio_write(VIRTIO_MMIO_STATUS, 0);
|
||||
|
||||
// Acknowledge
|
||||
mmio_write(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE);
|
||||
|
||||
// Driver
|
||||
mmio_write(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER);
|
||||
|
||||
// Features (we don't need any special features for basic 2D)
|
||||
mmio_write(VIRTIO_MMIO_DRIVER_FEATURES, 0);
|
||||
|
||||
// Features OK
|
||||
mmio_write(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER | VIRTIO_STATUS_FEATURES_OK);
|
||||
// Acknowledge + Driver
|
||||
mmio_write(VIRTIO_MMIO_STATUS, 1);
|
||||
mmio_write(VIRTIO_MMIO_STATUS, 1 | 2);
|
||||
|
||||
// Setup queue
|
||||
queue_init();
|
||||
|
||||
// Driver OK
|
||||
mmio_write(VIRTIO_MMIO_STATUS, VIRTIO_STATUS_ACKNOWLEDGE | VIRTIO_STATUS_DRIVER | VIRTIO_STATUS_FEATURES_OK | VIRTIO_STATUS_DRIVER_OK);
|
||||
mmio_write(VIRTIO_MMIO_STATUS, 1 | 2 | 4);
|
||||
|
||||
// Initialize framebuffer
|
||||
fb.init();
|
||||
|
|
|
|||
|
|
@ -0,0 +1,108 @@
|
|||
// MARKUS MAIWALD (ARCHITECT) | VOXIS FORGE (AI)
|
||||
// Phase 3.5d: The Matrix Protocol - Rainmaker Logic
|
||||
// Greets the Subject, then recedes into the background.
|
||||
|
||||
const std = @import("std");
|
||||
const fb = @import("framebuffer.zig");
|
||||
|
||||
// Config
|
||||
const FONT_W = 8;
|
||||
const FONT_H = 8;
|
||||
const COLS = fb.WIDTH / FONT_W;
|
||||
const LIFETIME_FRAMES = 150; // ~5 seconds at 30FPS (reduced for impact)
|
||||
|
||||
// State
|
||||
var drops: [COLS]i32 = undefined;
|
||||
var frame_count: usize = 0;
|
||||
var is_active: bool = false;
|
||||
|
||||
// Basic PRNG (Xorshift)
|
||||
var rng_state: u32 = 0xDEADBEEF;
|
||||
fn random() u32 {
|
||||
var x = rng_state;
|
||||
x ^= x << 13;
|
||||
x ^= x >> 17;
|
||||
x ^= x << 5;
|
||||
rng_state = x;
|
||||
return x;
|
||||
}
|
||||
|
||||
pub fn init() void {
|
||||
is_active = true;
|
||||
frame_count = 0;
|
||||
// Initialize drops at random heights off-screen
|
||||
var i: usize = 0;
|
||||
while (i < COLS) : (i += 1) {
|
||||
drops[i] = -@as(i32, @intCast(random() % 100));
|
||||
}
|
||||
}
|
||||
|
||||
// Returns: TRUE if still animating, FALSE if finished (The Void)
|
||||
pub fn update() bool {
|
||||
if (!is_active) return false;
|
||||
|
||||
// 1. Fade existing trails
|
||||
fb.fade_screen(15); // Fade speed
|
||||
|
||||
// 2. Are we dying?
|
||||
frame_count += 1;
|
||||
const dying = (frame_count > LIFETIME_FRAMES);
|
||||
|
||||
// 3. Update Drops
|
||||
var active_drops: usize = 0;
|
||||
var i: usize = 0;
|
||||
while (i < COLS) : (i += 1) {
|
||||
// Draw Head (Bright White/Green)
|
||||
const x = i * FONT_W;
|
||||
const y = drops[i];
|
||||
|
||||
if (y >= 0 and y < @as(i32, @intCast(fb.HEIGHT))) {
|
||||
// Draw a simple "pixel block" glyph
|
||||
// White tip
|
||||
fb.put_pixel(@intCast(x + 3), @intCast(y + 3), 0xFFFFFFFF);
|
||||
fb.put_pixel(@intCast(x + 4), @intCast(y + 3), 0xFFFFFFFF);
|
||||
fb.put_pixel(@intCast(x + 3), @intCast(y + 4), 0xFFFFFFFF);
|
||||
fb.put_pixel(@intCast(x + 4), @intCast(y + 4), 0xFFFFFFFF);
|
||||
|
||||
// Green body follow
|
||||
if (y > 8) {
|
||||
fb.fill_rect(@intCast(x + 3), @intCast(y - 4), 2, 4, 0xFF00FF00);
|
||||
}
|
||||
}
|
||||
|
||||
// Move down
|
||||
drops[i] += FONT_H;
|
||||
|
||||
// Reset if off screen
|
||||
if (drops[i] > @as(i32, @intCast(fb.HEIGHT))) {
|
||||
if (!dying) {
|
||||
// Respawn at top
|
||||
drops[i] = -@as(i32, @intCast(random() % 50));
|
||||
active_drops += 1;
|
||||
} else {
|
||||
// Do NOT respawn. Let it fall into the void.
|
||||
}
|
||||
} else {
|
||||
active_drops += 1; // It's still on screen
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Check for Total Extinction
|
||||
if (dying and active_drops == 0) {
|
||||
// Ensure pure black at the end
|
||||
fb.clear(0xFF000000);
|
||||
is_active = false;
|
||||
return false; // STOP THE GPU FLUSHING IF IDLE
|
||||
}
|
||||
|
||||
return true; // Keep animating
|
||||
}
|
||||
|
||||
// --- EXPORTS FOR NIM ---
|
||||
export fn matrix_init() void {
|
||||
init();
|
||||
}
|
||||
|
||||
export fn matrix_update() bool {
|
||||
return update();
|
||||
}
|
||||
|
|
@ -21,8 +21,12 @@ pub fn init() void {
|
|||
// QEMU devices are usually pre-initialized by firmware (EDK2/OpenSBI)
|
||||
}
|
||||
|
||||
const NS16550A_IER: usize = 0x01; // Interrupt Enable Register
|
||||
|
||||
pub fn init_riscv() void {
|
||||
// Explicit init if needed, currently empty as we rely on pre-init
|
||||
// Disable Interrupts to rely on Polling (prevents Interrupt Storms if Handler is missing)
|
||||
const ier: *volatile u8 = @ptrFromInt(NS16550A_BASE + NS16550A_IER);
|
||||
ier.* = 0x00;
|
||||
}
|
||||
|
||||
fn write_char_arm64(c: u8) void {
|
||||
|
|
|
|||
58
hal/ui.zig
58
hal/ui.zig
|
|
@ -97,36 +97,54 @@ pub fn init() void {
|
|||
// --- KERNEL IMPORTS ---
|
||||
extern fn fiber_yield() void;
|
||||
|
||||
extern fn fiber_sleep(ms: i32) void;
|
||||
extern fn matrix_init() void;
|
||||
extern fn matrix_update() bool;
|
||||
|
||||
// --- THE UI LOOP (Fiber Entry) ---
|
||||
export fn ui_fiber_entry() void {
|
||||
init();
|
||||
matrix_init();
|
||||
|
||||
var matrix_alive: bool = true;
|
||||
|
||||
while (true) {
|
||||
// 1. Begin Frame
|
||||
c.mu_begin(&ctx);
|
||||
if (matrix_alive) {
|
||||
// --- THE GREETING (High Compute) ---
|
||||
matrix_alive = matrix_update();
|
||||
|
||||
// 2. Define Layout (The Logic)
|
||||
if (c.mu_begin_window(&ctx, "Nexus HUD", c.mu_rect(10, 10, 300, 200)) != 0) {
|
||||
c.mu_layout_row(&ctx, 1, &[_]i32{-1}, 0);
|
||||
c.mu_label(&ctx, "System Status: ONLINE");
|
||||
// Push the Rain to the GPU
|
||||
virtio_gpu_flush();
|
||||
|
||||
c.mu_layout_row(&ctx, 2, &[_]i32{ 80, -1 }, 0);
|
||||
c.mu_label(&ctx, "CPU:");
|
||||
c.mu_draw_rect(&ctx, c.mu_rect(100, 50, 150, 20), c.mu_color(0, 255, 0, 255)); // Mock bar
|
||||
// 33ms Frame Time (~30 FPS)
|
||||
fiber_sleep(33);
|
||||
} else {
|
||||
// --- THE HUD / VOID (Low Compute) ---
|
||||
// 1. Begin Frame
|
||||
c.mu_begin(&ctx);
|
||||
|
||||
if (c.mu_button(&ctx, "REBOOT") != 0) {
|
||||
// Logic hook!
|
||||
// send_command(CMD_REBOOT);
|
||||
// 2. Define Layout (The Logic)
|
||||
if (c.mu_begin_window(&ctx, "Nexus HUD", c.mu_rect(10, 10, 300, 200)) != 0) {
|
||||
c.mu_layout_row(&ctx, 1, &[_]i32{-1}, 0);
|
||||
c.mu_label(&ctx, "System Status: ONLINE");
|
||||
|
||||
c.mu_layout_row(&ctx, 2, &[_]i32{ 80, -1 }, 0);
|
||||
c.mu_label(&ctx, "CPU:");
|
||||
c.mu_draw_rect(&ctx, c.mu_rect(100, 50, 150, 20), c.mu_color(0, 255, 0, 255)); // Mock bar
|
||||
|
||||
if (c.mu_button(&ctx, "REBOOT") != 0) {
|
||||
// hal_reboot();
|
||||
}
|
||||
|
||||
c.mu_end_window(&ctx);
|
||||
}
|
||||
|
||||
c.mu_end_window(&ctx);
|
||||
// 3. End Frame & Paint
|
||||
c.mu_end(&ctx);
|
||||
render();
|
||||
|
||||
// 1s Sleep - No need to render UI every 30ms if static
|
||||
fiber_sleep(1000);
|
||||
}
|
||||
|
||||
// 3. End Frame & Paint
|
||||
c.mu_end(&ctx);
|
||||
render();
|
||||
|
||||
// 4. Yield
|
||||
fiber_yield();
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
int errno = 0;
|
||||
|
||||
// Basic memory stubs
|
||||
extern void* malloc(size_t size);
|
||||
extern void free(void* ptr);
|
||||
|
|
@ -8,21 +10,7 @@ extern void free(void* ptr);
|
|||
// Forward declare memset (defined below)
|
||||
void* memset(void* s, int c, size_t n);
|
||||
|
||||
void* realloc(void* ptr, size_t size) {
|
||||
// naive realloc: alloc new, no copy (dangerous if used), return new
|
||||
// For Phase 8 we assume simple allocs.
|
||||
// If we want real realloc, we need copy.
|
||||
void* new_ptr = malloc(size);
|
||||
// copy? we don't know old size using standard malloc API without tracking.
|
||||
// ION slab is 2048.
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
void* calloc(size_t nmemb, size_t size) {
|
||||
void* p = malloc(nmemb * size);
|
||||
if (p) memset(p, 0, nmemb * size);
|
||||
return p;
|
||||
}
|
||||
// Memory stubs moved to stubs.zig
|
||||
|
||||
// LwIP Panic Handler (for Membrane stack)
|
||||
extern void console_write(const void* p, size_t len);
|
||||
|
|
@ -76,7 +64,8 @@ int fwrite(const void *ptr, size_t size, size_t nmemb, void *stream) {
|
|||
int fflush(void *stream) { return 0; }
|
||||
|
||||
// System stubs
|
||||
void exit(int status) { while(1); }
|
||||
extern void nexus_yield(void);
|
||||
void exit(int status) { while(1) { nexus_yield(); } }
|
||||
void (*signal(int sig, void (*func)(int)))(int) { return NULL; }
|
||||
|
||||
// LwIP Time
|
||||
|
|
|
|||
|
|
@ -0,0 +1,84 @@
|
|||
const std = @import("std");
|
||||
|
||||
// --- Protocol Definitions (Match core/ion.nim) ---
|
||||
|
||||
pub const CmdPacket = extern struct {
|
||||
kind: u32,
|
||||
arg: u32,
|
||||
id: [16]u8, // SipHash Provenance (Matches Nim array[16, byte])
|
||||
};
|
||||
|
||||
pub const IonPacket = extern struct {
|
||||
data: u64, // ptr
|
||||
phys: u64,
|
||||
len: u16,
|
||||
id: u16,
|
||||
};
|
||||
|
||||
// Generic Lock-Free Ring Buffer (Single Producer / Single Consumer safe)
|
||||
pub fn RingBuffer(comptime T: type) type {
|
||||
return extern struct {
|
||||
head: u32,
|
||||
tail: u32,
|
||||
mask: u32,
|
||||
data: [256]T,
|
||||
|
||||
pub fn push(self: *volatile @This(), item: T) bool {
|
||||
const head = self.head;
|
||||
const tail = self.tail;
|
||||
const mask = self.mask;
|
||||
|
||||
// Check full
|
||||
if (((head + 1) & mask) == (tail & mask)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Write data
|
||||
self.data[head & mask] = item;
|
||||
|
||||
// Commit
|
||||
asm volatile ("fence" ::: .{ .memory = true });
|
||||
self.head = (head + 1) & mask;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// System Table Layout (Located at 0x83000000)
|
||||
pub const SysTable = extern struct {
|
||||
magic: u32,
|
||||
s_rx: *RingBuffer(IonPacket),
|
||||
s_tx: *RingBuffer(IonPacket),
|
||||
s_event: *RingBuffer(IonPacket),
|
||||
s_cmd: *RingBuffer(CmdPacket),
|
||||
s_input: *RingBuffer(IonPacket),
|
||||
};
|
||||
|
||||
const SYSTABLE_ADDR: usize = 0x83000000;
|
||||
|
||||
// --- API ---
|
||||
|
||||
pub fn sys_cmd_push(pkt: CmdPacket) bool {
|
||||
const sys = @as(*const volatile SysTable, @ptrFromInt(SYSTABLE_ADDR));
|
||||
|
||||
// Safety check magic (0x4E585553 = "NXUS")
|
||||
if (sys.magic != 0x4E585553) return false;
|
||||
|
||||
// Push to Command Ring
|
||||
return sys.s_cmd.push(pkt);
|
||||
}
|
||||
|
||||
pub fn sys_input_pop(out_pkt: *IonPacket) bool {
|
||||
const sys = @as(*const volatile SysTable, @ptrFromInt(SYSTABLE_ADDR));
|
||||
if (sys.magic != 0x4E585553) return false;
|
||||
|
||||
const ring = sys.s_input;
|
||||
const head = @atomicLoad(u32, &ring.head, .acquire);
|
||||
const tail = @atomicLoad(u32, &ring.tail, .monotonic);
|
||||
|
||||
if (head == tail) return false;
|
||||
|
||||
out_pkt.* = ring.data[tail & ring.mask];
|
||||
@atomicStore(u32, &ring.tail, tail + 1, .release);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -4,7 +4,9 @@ import ion_client
|
|||
|
||||
proc console_write(p: pointer, len: csize_t) {.importc, cdecl.}
|
||||
|
||||
#[
|
||||
# --- Heap Allocator (Slab Backed) ---
|
||||
# DISABLED: Using stubs.zig heap for NipBox stability
|
||||
const HEADER_SIZE = 32
|
||||
|
||||
proc malloc*(size: csize_t): pointer {.exportc, cdecl.} =
|
||||
|
|
@ -31,6 +33,7 @@ proc free*(p: pointer) {.exportc, cdecl.} =
|
|||
|
||||
# Free using the stored packet (contains correct ID)
|
||||
ion_user_free(header_ptr[])
|
||||
]#
|
||||
|
||||
proc sleep*(seconds: uint32) {.exportc, cdecl.} =
|
||||
# Busy loop sleep
|
||||
|
|
@ -89,7 +92,8 @@ proc write*(fd: cint, buf: pointer, count: csize_t): int {.exportc, cdecl.} =
|
|||
|
||||
# Handle Sockets (fd > 2)
|
||||
return send_flow(int(fd), buf, int(count))
|
||||
|
||||
#[
|
||||
proc read*(fd: int, buf: pointer, count: int): int {.exportc, cdecl.} =
|
||||
# TODO: Lookup socket, check RX ring
|
||||
return -1 # EBADF
|
||||
]#
|
||||
|
|
|
|||
|
|
@ -0,0 +1,136 @@
|
|||
const std = @import("std");
|
||||
|
||||
// --- 1. IO PRIMITIVES ---
|
||||
|
||||
// We import 'write' and 'exit' from libc.nim / clib.c (found in libnexus.a)
|
||||
extern fn write(fd: i32, buf: [*]const u8, count: usize) isize;
|
||||
extern fn exit(status: i32) noreturn;
|
||||
|
||||
export fn fputc(c: i32, stream: ?*anyopaque) i32 {
|
||||
_ = stream;
|
||||
const char = @as(u8, @intCast(c));
|
||||
const buf = [1]u8{char};
|
||||
_ = write(1, &buf, 1);
|
||||
return c;
|
||||
}
|
||||
|
||||
export fn fputs(s: [*]const u8, stream: ?*anyopaque) i32 {
|
||||
_ = stream;
|
||||
var len: usize = 0;
|
||||
while (s[len] != 0) : (len += 1) {}
|
||||
_ = write(1, s, len);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// --- 2. THE MISSING SYMBOLS (STUBS) ---
|
||||
|
||||
// Nim checks for errors on streams. We say "No error".
|
||||
export fn ferror(stream: ?*anyopaque) i32 {
|
||||
_ = stream;
|
||||
return 0;
|
||||
}
|
||||
|
||||
export fn clearerr(stream: ?*anyopaque) void {
|
||||
_ = stream;
|
||||
}
|
||||
|
||||
// Nim looks for chars in memory (optimized scans).
|
||||
export fn memchr(s: ?*const anyopaque, c: i32, n: usize) ?*anyopaque {
|
||||
if (s) |src_ptr| {
|
||||
const src: [*]const u8 = @ptrCast(src_ptr);
|
||||
const target = @as(u8, @intCast(c));
|
||||
var i: usize = 0;
|
||||
while (i < n) : (i += 1) {
|
||||
if (src[i] == target) return @ptrCast(@constCast(src + i));
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
extern fn ion_user_free(pkt: ion.IonPacket) void;
|
||||
|
||||
export fn read(fd: i32, buf: [*]u8, count: usize) isize {
|
||||
if (fd != 0) return -1;
|
||||
|
||||
var pkt: ion.IonPacket = undefined;
|
||||
while (!ion.sys_input_pop(&pkt)) {
|
||||
nexus_yield();
|
||||
}
|
||||
|
||||
const to_copy = @min(count, pkt.len);
|
||||
const src = @as([*]const u8, @ptrFromInt(pkt.data));
|
||||
@memcpy(buf[0..to_copy], src[0..to_copy]);
|
||||
|
||||
ion_user_free(pkt);
|
||||
return @intCast(to_copy);
|
||||
}
|
||||
|
||||
// Nim tries to read lines.
|
||||
export fn fgets(s: [*]u8, size: i32, stream: ?*anyopaque) ?[*]u8 {
|
||||
_ = stream;
|
||||
if (size <= 0) return null;
|
||||
|
||||
var pkt: ion.IonPacket = undefined;
|
||||
while (!ion.sys_input_pop(&pkt)) {
|
||||
nexus_yield();
|
||||
}
|
||||
|
||||
const to_copy = @min(@as(usize, @intCast(size - 1)), pkt.len);
|
||||
const src = @as([*]const u8, @ptrFromInt(pkt.data));
|
||||
@memcpy(s[0..to_copy], src[0..to_copy]);
|
||||
s[to_copy] = 0;
|
||||
|
||||
ion_user_free(pkt);
|
||||
return s;
|
||||
}
|
||||
|
||||
export fn fgetc(stream: ?*anyopaque) i32 {
|
||||
_ = stream;
|
||||
var c: u8 = undefined;
|
||||
const n = read(0, @ptrCast(&c), 1);
|
||||
if (n <= 0) return -1;
|
||||
return @intCast(c);
|
||||
}
|
||||
|
||||
// Math stubs (sometimes needed)
|
||||
export fn dlopen() void {}
|
||||
export fn dlsym() void {}
|
||||
|
||||
export fn strerror(errnum: i32) [*]const u8 {
|
||||
_ = errnum;
|
||||
return "Unknown Error";
|
||||
}
|
||||
|
||||
extern fn main(argc: i32, argv: [*]const [*]const u8) i32;
|
||||
|
||||
// _start relocated to subject_entry.S
|
||||
|
||||
const ion = @import("ion.zig");
|
||||
|
||||
// Sovereign Syscall: Push to CMD Ring
|
||||
export fn nexus_syscall(cmd_id: u32, arg: u32) c_int {
|
||||
// Construct Packet
|
||||
var pkt = ion.CmdPacket{ .kind = cmd_id, .arg = arg, .id = [_]u8{0} ** 16 };
|
||||
|
||||
// Compute Provenance (SipHash)
|
||||
const key = "\xde\xad\xbe\xef\xca\xfe\xba\xbe\x00\x01\x02\x03\x04\x05\x06\x07";
|
||||
|
||||
var hasher = std.crypto.auth.siphash.SipHash128(1, 3).init(key);
|
||||
hasher.update(std.mem.asBytes(&pkt.kind));
|
||||
hasher.update(std.mem.asBytes(&pkt.arg));
|
||||
const hash_int = hasher.finalInt();
|
||||
|
||||
pkt.id = std.mem.toBytes(hash_int);
|
||||
|
||||
// Push to High-Priority CMD Ring
|
||||
if (!ion.sys_cmd_push(pkt)) {
|
||||
return -1; // Error: Ring full/backpressure
|
||||
}
|
||||
return 0; // Success
|
||||
}
|
||||
|
||||
// Sovereign Yield: Return control to Kernel Scheduler
|
||||
export fn nexus_yield() void {
|
||||
const yield_ptr = @as(*const *const fn () void, @ptrFromInt(0x83000FF0));
|
||||
yield_ptr.*();
|
||||
}
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
// MARKUS MAIWALD (ARCHITECT) | VOXIS FORGE (AI)
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
// 1. The SysTable Contract (Must match Kernel!)
|
||||
|
|
|
|||
Loading…
Reference in New Issue