rumpk/core/kernel.nim

610 lines
21 KiB
Nim

# Copyright (c) 2026 Nexus Foundation
# Licensed under the Libertaria Sovereign License (LSL-1.0)
# See legal/LICENSE_SOVEREIGN.md for details.
# MARKUS MAIWALD (ARCHITECT) | VOXIS FORGE (AI)
# Rumpk Layer 1: The Logic Core (Autonomous Immune System)
{.push stackTrace: off, lineTrace: off.}
import fiber except fiber_yield
import ion
import loader
import fs/tar
import fs/sfs
import netswitch
import ../libs/membrane/net_glue
import ../libs/membrane/compositor
var ion_paused*: bool = false
var pause_start*: uint64 = 0
var matrix_enabled*: bool = false
# --- CORE LOGGING ---
proc console_write(p: pointer, len: csize_t) {.importc, cdecl.}
proc kwrite*(p: pointer, len: csize_t) {.exportc, cdecl.} =
if p != nil and len > 0:
console_write(p, len)
proc kprint*(s: cstring) {.exportc, cdecl.} =
if s != nil:
let length = len(s)
if length > 0:
kwrite(cast[pointer](s), csize_t(length))
proc kprint_hex*(n: uint64) {.exportc, cdecl.} =
const hex_chars = "0123456789ABCDEF"
var buf: array[18, char]
buf[0] = '0'
buf[1] = 'x'
for i in 0..15:
let nibble = (n shr (60 - (i * 4))) and 0xF
buf[i+2] = hex_chars[nibble]
console_write(addr buf[0], 18)
proc kprintln*(s: cstring) {.exportc, cdecl.} =
kprint(s); kprint("\n")
proc write*(fd: cint, p: pointer, len: csize_t): csize_t {.exportc, cdecl.} =
console_write(p, len)
return len
# Wrapper for VFS write to handle stdout/stderr
proc wrapper_vfs_write(fd: int32, buf: pointer, count: uint64): int64 {.cdecl.} =
if fd == 1 or fd == 2:
console_write(buf, csize_t(count))
return int64(count)
return ion_vfs_write(fd, buf, count)
# =========================================================
# Fiber Management (Forward Declared)
# =========================================================
var fiber_ion: FiberObject
var fiber_nexshell: FiberObject
var fiber_ui: FiberObject
var fiber_subject: FiberObject
var fiber_watchdog: FiberObject
var fiber_compositor: FiberObject
var fiber_netswitch: FiberObject # Phase 36.2: Network Traffic Cop
# Phase 29: Dynamic Worker Pool (The Hive)
const MAX_WORKERS = 8
var worker_pool: array[MAX_WORKERS, FiberObject]
var worker_stacks: array[MAX_WORKERS, array[8192, uint8]]
var worker_active: array[MAX_WORKERS, bool]
var next_worker_id: uint64 = 100 # Start worker IDs at 100
var subject_loading_path: string = "bin/nipbox"
proc subject_fiber_entry() {.cdecl.} =
## The Sovereign Container for Userland Consciousness.
## This loop persists across program reloads.
kprintln("[Subject] Fiber Entry reached.")
while true:
kprint("[Subject] Attempting to load: ")
kprintln(cstring(subject_loading_path))
let entry = kload(subject_loading_path)
if entry != 0:
kprintln("[Subject] Consciousness Transferred.")
rumpk_enter_userland(entry)
else:
kprint("[Subject] Failed to load: ")
kprintln(cstring(subject_loading_path))
kprintln("[Subject] Pausing for Rebirth.")
fiber.switch(addr fiber_ion) # Emergency yield to master
# --- STACK ALLOCATIONS ---
var stack_ion {.align: 4096.}: array[4096, uint8]
var stack_nexshell {.align: 4096.}: array[4096, uint8]
var stack_ui {.align: 4096.}: array[32768, uint8]
var stack_subject {.align: 4096.}: array[32768, uint8]
var stack_watchdog {.align: 4096.}: array[4096, uint8]
var stack_netswitch {.align: 4096.}: array[8192, uint8] # Phase 36.2
var stack_compositor {.align: 4096.}: array[128 * 1024, uint8]
# Phase 31: Memory Manager (The Glass Cage)
proc mm_init() {.importc, cdecl.}
proc mm_enable_kernel_paging() {.importc, cdecl.}
# HAL Framebuffer imports (Phase 26: Visual Cortex)
proc fb_kern_get_addr(): uint64 {.importc, cdecl.}
# --- INITRD SYMBOLS ---
var binary_initrd_tar_start {.importc: "_initrd_start".}: char
var binary_initrd_tar_end {.importc: "_initrd_end".}: char
# =========================================================
# Shared Infrastructure
# =========================================================
const SYSTABLE_BASE = 0x83000000'u64
# Global Rings (The Pipes - L0 Physics)
var guest_rx_hal: HAL_Ring[IonPacket]
var guest_tx_hal: HAL_Ring[IonPacket]
var guest_event_hal: HAL_Ring[IonPacket]
var guest_cmd_hal: HAL_Ring[CmdPacket]
# Shared Channels (The Valves - L1 Logic)
# Shared Channels
var chan_rx*: SovereignChannel[IonPacket]
var chan_tx*: SovereignChannel[IonPacket]
var chan_event*: SovereignChannel[IonPacket]
var chan_cmd*: SovereignChannel[CmdPacket]
var chan_compositor_input*: SovereignChannel[IonPacket]
# chan_input is now imported from ion.nim!
proc ion_push_stdin*(p: pointer, len: csize_t) {.exportc, cdecl.} =
if chan_input.ring == nil:
return
var pkt = ion_alloc()
if pkt.data == nil: return
let to_copy = min(int(len), 2048)
copyMem(pkt.data, p, to_copy)
pkt.len = uint16(to_copy)
kprintln("[Kernel] Input packet pushed to ring")
# Phase 35d: Route to Compositor FIRST
if chan_compositor_input.ring != nil:
chan_compositor_input.send(pkt)
else:
# Fallback to direct routing if compositor not active
chan_input.send(pkt)
proc get_ion_load(): int =
## Calculate load of the Command Ring (The Heartbeat of the NPLs)
let head = guest_cmd_hal.head
let tail = guest_cmd_hal.tail
let mask = guest_cmd_hal.mask
return int((head - tail) and mask)
proc rumpk_yield_internal() {.cdecl, exportc.}
# HAL Driver API
proc hal_io_init() {.importc, cdecl.}
proc virtio_net_poll() {.importc, cdecl.}
proc virtio_net_send(data: pointer, len: uint32) {.importc, cdecl.}
proc rumpk_yield_guard() {.importc, cdecl.}
proc virtio_blk_read(sector: uint64, buf: pointer) {.importc, cdecl.}
proc virtio_blk_write(sector: uint64, buf: pointer) {.importc, cdecl.}
proc ion_free_raw(id: uint16) {.importc, cdecl.}
proc nexshell_main() {.importc, cdecl.}
proc ui_fiber_entry() {.importc, cdecl.}
proc rumpk_halt() {.importc, cdecl, noreturn.}
proc compositor_fiber_entry() {.cdecl.} =
kprintln("[Compositor] Fiber Entry reached.")
while true:
compositor.compositor_step()
# High frequency yield (120Hz goal)
rumpk_yield_internal()
proc get_now_ns(): uint64 =
proc rumpk_timer_now_ns(): uint64 {.importc, cdecl.}
return rumpk_timer_now_ns()
proc fiber_yield*() {.exportc, cdecl.} =
rumpk_yield_internal()
proc fiber_sleep*(ms: int) {.exportc, cdecl.} =
let now = get_now_ns()
current_fiber.sleep_until = now + uint64(ms) * 1000000'u64
fiber_yield()
proc rumpk_yield_internal() {.cdecl, exportc.} =
let now = get_now_ns()
# Normal Round Robin logic with Sleep Check
var next_fiber: Fiber = nil
if current_fiber == addr fiber_ion:
next_fiber = addr fiber_nexshell
elif current_fiber == addr fiber_nexshell:
next_fiber = addr fiber_subject
elif current_fiber == addr fiber_subject:
next_fiber = addr fiber_watchdog
elif current_fiber == addr fiber_watchdog:
next_fiber = addr fiber_ion
else:
next_fiber = addr fiber_ion
# Skip sleeping fibers
var found = false
for _ in 0..6: # Max 6 check
if next_fiber != nil and now >= next_fiber.sleep_until:
found = true
break
# Move to next in sequence
if next_fiber == addr fiber_ion: next_fiber = addr fiber_nexshell
elif next_fiber == addr fiber_nexshell: next_fiber = addr fiber_subject
elif next_fiber == addr fiber_subject: next_fiber = addr fiber_watchdog
elif next_fiber == addr fiber_watchdog: next_fiber = addr fiber_ion
else: next_fiber = addr fiber_ion
# Force found = true for now
found = true
if found and next_fiber != current_fiber:
kprint("[Sched] "); kprint(current_fiber.name); kprint(" -> "); kprintln(next_fiber.name)
switch(next_fiber)
elif not found:
asm "csrsi sstatus, 2"
asm "wfi"
# =========================================================
# ION Intelligence Fiber (Core System Supervisor)
# =========================================================
proc ion_fiber_entry() {.cdecl.} =
hal_io_init()
kprintln("[ION] Fiber 1 Reporting for Duty.")
while true:
var cmd: CmdPacket
while chan_cmd.recv(cmd):
case cmd.kind:
of uint32(CmdType.CMD_GPU_MATRIX):
matrix_enabled = (cmd.arg > 0)
of uint32(CmdType.CMD_SYS_EXIT):
kprintln("[Kernel] Subject Exited. Respawning...")
subject_loading_path = "bin/nipbox"
init_fiber(addr fiber_subject, subject_fiber_entry, addr stack_subject[0], stack_subject.len)
of uint32(CmdType.CMD_ION_STOP):
ion_paused = true
pause_start = get_now_ns()
of uint32(CmdType.CMD_ION_START):
ion_paused = false
of uint32(CmdType.CMD_NET_TX):
let args = cast[ptr NetArgs](cmd.arg)
virtio_net_send(cast[ptr UncheckedArray[byte]](args.buf), uint32(args.len))
of uint32(CmdType.CMD_NET_RX):
let args = cast[ptr NetArgs](cmd.arg)
virtio_net_poll()
var pkt: IonPacket
if chan_rx.recv(pkt):
let copy_len = if uint64(pkt.len) > args.len: args.len else: uint64(pkt.len)
copyMem(cast[pointer](args.buf), cast[pointer](pkt.data), copy_len)
args.len = copy_len
ion_free_raw(pkt.id)
else:
args.len = 0
of uint32(CmdType.CMD_BLK_READ):
let args = cast[ptr BlkArgs](cmd.arg)
virtio_blk_read(args.sector, cast[pointer](args.buf))
of uint32(CmdType.CMD_BLK_WRITE):
let args = cast[ptr BlkArgs](cmd.arg)
virtio_blk_write(args.sector, cast[pointer](args.buf))
of uint32(CmdType.CMD_FS_WRITE):
let args = cast[ptr FileArgs](cmd.arg)
sfs_write_file(cast[cstring](args.name), cast[cstring](args.data), int(args.len))
sfs_sync_vfs()
of uint32(CmdType.CMD_FS_READ):
let args = cast[ptr FileArgs](cmd.arg)
let bytes_read = sfs_read_file(cast[cstring](args.name), cast[pointer](args.data), int(args.len))
args.len = uint64(bytes_read)
else:
discard
fiber_yield()
# Hardware Ingress (Zig -> Nim)
proc ion_get_virt(id: uint16): pointer {.importc, cdecl.}
proc ion_ingress*(id: uint16, len: uint16) {.exportc, cdecl.} =
let data = ion_get_virt(id)
var pkt = IonPacket(data: cast[ptr UncheckedArray[byte]](data), len: len, id: id)
chan_rx.send(pkt)
# Panic Handler
proc nimPanic(msg: cstring) {.exportc: "panic", cdecl, noreturn.} =
kprintln("\n[PANIC] ")
kprintln(msg)
rumpk_halt()
# Include Watchdog Logic
include watchdog
# =========================================================
# Generic Worker Trampoline
# =========================================================
proc worker_trampoline() {.cdecl.} =
let user_fn = cast[proc(arg: uint64) {.cdecl.}](current_fiber.user_entry)
if user_fn != nil:
user_fn(current_fiber.user_arg)
for i in 0..<MAX_WORKERS:
if worker_pool[i].id == current_fiber.id:
worker_active[i] = false
break
while true:
fiber_yield()
proc k_spawn(entry: pointer, arg: uint64): int32 {.exportc, cdecl.} =
var slot = -1
for i in 0..<MAX_WORKERS:
if not worker_active[i]:
slot = i
break
if slot == -1: return -1
let worker = addr worker_pool[slot]
worker.id = next_worker_id
next_worker_id += 1
worker.promises = PLEDGE_ALL
worker.user_entry = entry
worker.user_arg = arg
init_fiber(worker, worker_trampoline, addr worker_stacks[slot][0], sizeof(worker_stacks[slot]))
worker_active[slot] = true
return int32(worker.id)
proc k_join(fid: uint64): int32 {.exportc, cdecl.} =
for i in 0..<MAX_WORKERS:
if worker_pool[i].id == fid and worker_active[i]:
while worker_active[i]:
fiber_yield()
return 0
return -1
# Pledge Implementation
proc k_pledge(promises: uint64): int32 {.exportc, cdecl.} =
if current_fiber == nil: return -1
current_fiber.promises = current_fiber.promises and promises
return 0
proc mm_debug_check_va(va: uint64) {.importc, cdecl.}
proc k_handle_exception*(nr, epc, tval: uint) {.exportc, cdecl.} =
kprintln("\n[EXCEPTION] FATAL")
kprint(" Code: "); kprint_hex(nr)
kprint("\n EPC: "); kprint_hex(epc)
kprint("\n TVAL: "); kprint_hex(tval)
if nr == 12: # Instruction Page Fault
kprintln("\n[MM] Dumping PTE for EPC:")
mm_debug_check_va(epc)
var sstatus_val: uint64
{.emit: "asm volatile(\"csrr %0, sstatus\" : \"=r\"(`sstatus_val`));".}
kprint("[CPU] sstatus: "); kprint_hex(sstatus_val)
if (sstatus_val and (1 shl 8)) != 0:
kprintln(" (Mode: Supervisor)")
else:
kprintln(" (Mode: User)")
kprintln("\n[SYSTEM HALTED]")
rumpk_halt()
proc k_check_deferred_yield*() {.exportc, cdecl.} =
## Called by trap handler to check if the current fiber wants to yield
## after a syscall or interrupt return.
if current_fiber != nil and current_fiber.wants_yield:
current_fiber.wants_yield = false
# kprintln("[Sched] Deferred yield triggered")
fiber_yield()
proc k_handle_syscall*(nr, a0, a1, a2: uint): uint {.exportc, cdecl.} =
# kprint("[Syscall] "); kprint_hex(nr); kprintln("")
if nr != 0x100: # Ignore YIELD noise
kprint("[Syscall] NR: "); kprint_hex(nr); kprintln("")
case nr:
of 0x01: # EXIT
kprintln("[Kernel] Subject EXIT Triggered")
var pkt = CmdPacket(kind: uint32(CmdType.CMD_SYS_EXIT), arg: a0)
chan_cmd.send(pkt)
current_fiber.wants_yield = true
return 0
of 0x101: # PLEDGE
# Only allow reducing privileges? For now, allow setting.
current_fiber.promises = a0
return 0
of 0x200: # OPEN
let flags = int32(a1)
if (flags and 0x01) != 0:
if (current_fiber.promises and PLEDGE_WPATH) == 0: return cast[uint](-1)
else:
if (current_fiber.promises and PLEDGE_RPATH) == 0: return cast[uint](-1)
return uint(ion_vfs_open(cast[cstring](a0), flags))
of 0x201: # CLOSE
return uint(ion_vfs_close(int32(a0)))
of 0x202: # LIST
if (current_fiber.promises and PLEDGE_RPATH) == 0: return cast[uint](-1)
return uint(ion_vfs_list(cast[pointer](a0), uint64(a1)))
of 0x203: # READ
if a0 == 0:
if (current_fiber.promises and PLEDGE_STDIO) == 0: return cast[uint](-1)
var pkt: IonPacket
if chan_input.recv(pkt):
let n = if uint64(pkt.len) < a2: uint64(pkt.len) else: a2
if n > 0: copyMem(cast[pointer](a1), cast[pointer](pkt.data), int(n))
ion_free_raw(pkt.id)
return n
else:
current_fiber.wants_yield = true
return 0
if (current_fiber.promises and PLEDGE_RPATH) == 0: return cast[uint](-1)
return uint(ion_vfs_read(int32(a0), cast[pointer](a1), uint64(a2)))
of 0x204: # WRITE
# Bypass optimization for now to test stability
return uint(ion_vfs_write(int32(a0), cast[pointer](a1), uint64(a2)))
of 0x300: # SURFACE_CREATE
return uint(compositor.create_surface(int(a0), int(a1)))
of 0x301: # SURFACE_FLIP
return 0
of 0x302: # SURFACE_GET_PTR
return cast[uint](compositor.hal_surface_get_ptr(int32(a0)))
of 0x500: # SPAWN
return uint(k_spawn(cast[pointer](a0), uint64(a1)))
of 0x501: # JOIN
return uint(k_join(uint64(a0)))
of 0x100: # YIELD
# Deferred yield: Set flag, yield happens after trap return
current_fiber.wants_yield = true
return 0
of 0x220: # BLK_READ - Raw Sector Read (Block Valve)
# a0 = sector, a1 = buffer pointer (userland), a2 = count (sectors)
if (current_fiber.promises and PLEDGE_RPATH) == 0: return cast[uint](-1)
var buf: array[512, byte]
virtio_blk_read(uint64(a0), addr buf[0])
copyMem(cast[pointer](a1), addr buf[0], 512)
return 512
of 0x221: # BLK_WRITE - Raw Sector Write (Block Valve)
# a0 = sector, a1 = buffer pointer (userland), a2 = count (sectors)
if (current_fiber.promises and PLEDGE_WPATH) == 0: return cast[uint](-1)
virtio_blk_write(uint64(a0), cast[ptr byte](a1))
return 512
of 0x222: # BLK_SYNC - Flush (Block Valve)
# VirtIO block is synchronous, so this is a no-op for now
return 0
of 0: # EXIT
fiber_yield()
return 0
else:
return 0
proc kmain() {.exportc, cdecl.} =
kprintln("\n\n")
kprintln("╔═══════════════════════════════════════╗")
kprintln("║ NEXUS RUMK v1.1 - SOVEREIGN ║")
kprintln("╚═══════════════════════════════════════╝")
kprint("[Kernel] current_fiber Addr: "); kprint_hex(cast[uint64](addr current_fiber)); kprintln("")
kprint("[Kernel] stack_subject Addr: "); kprint_hex(cast[uint64](addr stack_subject[0])); kprintln("")
kprint("[Kernel] GP: "); var gp: uint64; {.emit: "asm volatile(\"mv %0, gp\" : \"=r\"(`gp`));".}; kprint_hex(gp); kprintln("")
ion_pool_init()
# Phase 31: Memory Manager (The Glass Cage)
mm_init()
mm_enable_kernel_paging()
# Diagnostic: Check stvec
var stvec_val: uint64
{.emit: "asm volatile(\"csrr %0, stvec\" : \"=r\"(`stvec_val`));".}
kprint("[Kernel] stvec: ")
kprint_hex(stvec_val)
kprintln("")
# Phase 37 Fix: Enable sstatus.SUM (Supervisor User Memory access)
# This allows the kernel (S-mode) to read/write pages with PTE_U (User bit).
{.emit: "asm volatile(\"csrs sstatus, %0\" : : \"r\"(1L << 18));".}
ion_init_input()
hal_io_init()
vfs_init(addr binary_initrd_tar_start, addr binary_initrd_tar_end)
sfs_mount()
sfs_sync_vfs()
let sys = cast[ptr SysTable](SYSTABLE_BASE)
sys.fn_vfs_open = ion_vfs_open
sys.fn_vfs_read = ion_vfs_read
sys.fn_vfs_list = ion_vfs_list
sys.fn_vfs_write = wrapper_vfs_write
sys.fn_vfs_close = ion_vfs_close
sys.fn_log = cast[pointer](kwrite)
sys.fn_pledge = k_pledge
# fn_yield removed - yield is now syscall 0x100
# Phase 35e: Crypto HAL integration
proc hal_crypto_siphash(key: ptr array[16, byte], data: pointer, len: uint64, out_hash: ptr array[16, byte]) {.importc, cdecl.}
proc hal_crypto_ed25519_verify(sig: ptr array[64, byte], msg: pointer, len: uint64, pk: ptr array[32, byte]): bool {.importc, cdecl.}
sys.fn_siphash = hal_crypto_siphash
sys.fn_ed25519_verify = hal_crypto_ed25519_verify
# GPU disabled temporarily until display works
# proc virtio_gpu_init(base: uint64) {.importc, cdecl.}
# proc matrix_init() {.importc, cdecl.}
# kprintln("[Kernel] Scanning for VirtIO-GPU...")
# for i in 1..8:
# let base_addr = 0x10000000'u64 + (uint64(i) * 0x1000'u64)
# virtio_gpu_init(base_addr)
# matrix_init()
# Move Rings to Shared Memory (User Accessible)
# 0x83001000 onwards
let ring_rx_ptr = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0x1000)
let ring_tx_ptr = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0x2000)
let ring_event_ptr = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0x3000)
let ring_cmd_ptr = cast[ptr HAL_Ring[CmdPacket]](SYSTABLE_BASE + 0x4000)
# Init Shared Rings
ring_rx_ptr.head = 0; ring_rx_ptr.tail = 0; ring_rx_ptr.mask = 255
ring_tx_ptr.head = 0; ring_tx_ptr.tail = 0; ring_tx_ptr.mask = 255
ring_event_ptr.head = 0; ring_event_ptr.tail = 0; ring_event_ptr.mask = 255
ring_cmd_ptr.head = 0; ring_cmd_ptr.tail = 0; ring_cmd_ptr.mask = 255
# Connect Channels
chan_rx.ring = ring_rx_ptr
chan_tx.ring = ring_tx_ptr
chan_event.ring = ring_event_ptr
chan_cmd.ring = ring_cmd_ptr
# Connect SysTable
sys.s_rx = ring_rx_ptr
sys.s_tx = ring_tx_ptr
sys.s_event = ring_event_ptr
sys.s_cmd = ring_cmd_ptr
let ring_input_ptr = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0x5000)
ring_input_ptr.head = 0; ring_input_ptr.tail = 0; ring_input_ptr.mask = 255
chan_input.ring = ring_input_ptr
sys.s_input = ring_input_ptr
sys.magic = 0x4E585553
# Removed stale BSS assignments (sys.s_rx = ...)
# Phase 36.2: Initialize Network Membrane BEFORE userland starts
netswitch_init()
netswitch_attach_systable(sys)
# Framebuffer info
sys.fb_addr = fb_kern_get_addr()
sys.fb_width = 1920
sys.fb_height = 1080
sys.fb_stride = 1920 * 4
sys.fb_bpp = 32
sys.fn_yield = rumpk_yield_guard
kprintln("[Kernel] Spawning System Fibers...")
fiber_ion.name = "ion"
init_fiber(addr fiber_ion, ion_fiber_entry, addr stack_ion[0], sizeof(stack_ion))
fiber_compositor.name = "compositor"
init_fiber(addr fiber_compositor, compositor_fiber_entry, addr stack_compositor[0], sizeof(stack_compositor))
fiber_nexshell.name = "nexshell"
init_fiber(addr fiber_nexshell, nexshell_main, addr stack_nexshell[0], sizeof(stack_nexshell))
# Phase 31: Page Table root for worker isolation
proc mm_create_worker_map(stack_base: uint64, stack_size: uint64, packet_addr: uint64): uint64 {.importc, cdecl.}
fiber_subject.name = "subject"
init_fiber(addr fiber_subject, subject_fiber_entry, addr stack_subject[0], sizeof(stack_subject))
fiber_subject.satp_value = mm_create_worker_map(cast[uint64](addr stack_subject[0]), uint64(sizeof(stack_subject)), 0x83000000'u64)
fiber_watchdog.name = "watchdog"
init_fiber(addr fiber_watchdog, watchdog_loop, addr stack_watchdog[0], sizeof(stack_watchdog))
# Phase 36.2: NetSwitch Fiber (Traffic Cop)
fiber_netswitch.name = "netswitch"
init_fiber(addr fiber_netswitch, fiber_netswitch_entry, addr stack_netswitch[0], sizeof(stack_netswitch))
kprintln("[Kernel] Enabling Supervisor Interrupts (SIE)...")
asm "csrsi sstatus, 2"
kprintln("[Kernel] All Systems Go. Entering Autonomous Loop.")
switch(addr fiber_ion)
{.pop.}