fix(virtio): overcome capability probe hang with paging enabled

- Fixes VirtIO-PCI capability probing logic to handle invalid BAR indices gracefully.
- Enables defensive programming in virtio_pci.zig loop.
- Implements Typed Channel Multiplexing (0x500/0x501) for NetSwitch.
- Grants networking capabilities to Subject/Userland.
- Refactors NexShell to use reactive I/O (ion_wait_multi).
- Bumps version to 2026.1.1 (Patch 1).
This commit is contained in:
Markus Maiwald 2026-01-06 13:39:40 +01:00
parent 8b109652ab
commit a59a4cf9db
8 changed files with 203 additions and 422 deletions

51
core/channels.nim Normal file
View File

@ -0,0 +1,51 @@
# SPDX-License-Identifier: LSL-1.0
# Copyright (c) 2026 Markus Maiwald
# Stewardship: Self Sovereign Society Foundation
#
# This file is part of the Nexus Sovereign Core.
## Rumpk Layer 1: Typed Channels (SPEC-070)
import ion
import cspace
# Kernel logging
proc kprintln(s: cstring) {.importc, cdecl.}
proc get_channel_ring*(id: uint64): pointer =
## Map a Channel ID (object_id) to a physical HAL ring pointer
case id:
of 0x1000: return cast[pointer](chan_input.ring)
of 0x1001: return cast[pointer](chan_tx.ring) # console.output
of 0x500: return cast[pointer](chan_net_tx.ring)
of 0x501: return cast[pointer](chan_net_rx.ring)
else: return nil
proc channel_has_data*(id: uint64): bool =
## Check if a channel has data (for RX) or space (for TX)
## NOTE: This depends on whether the capability is for READ or WRITE.
## For now, we focus on RX (has data).
let ring_ptr = get_channel_ring(id)
if ring_ptr == nil: return false
# Cast to a generic HAL_Ring to check head/tail
# All IonPacket rings are 256 entries
let ring = cast[ptr HAL_Ring[IonPacket]](ring_ptr)
return ring.head != ring.tail
proc fiber_can_run_on_channels*(f_id: uint64, mask: uint64): bool {.exportc, cdecl.} =
## Check if any of the channels in the mask have active data
if mask == 0: return true # Not waiting on anything specific
for i in 0..<64:
if (mask and (1'u64 shl i)) != 0:
# Slot i is active in mask
let cap = cspace_lookup(f_id, uint(i))
if cap != nil:
# Cast pointer to Capability struct (wait, we need the Nim definition)
# Actually, let's just use a C helper if needed, but we can do it here.
# Capability is 32 bytes. object_id is at offset 4 (wait, 1+1+2 = 4).
let obj_id = cast[ptr uint64](cast[uint](cap) + 4)[]
if channel_has_data(obj_id):
return true
return false

View File

@ -5,48 +5,40 @@
# This file is part of the Nexus Sovereign Core. # This file is part of the Nexus Sovereign Core.
# See legal/LICENSE_SOVEREIGN.md for license terms. # See legal/LICENSE_SOVEREIGN.md for license terms.
## Rumpk Layer 1: Fiber Execution (Motive Power) ## Rumpk Layer 1: Fibers (The Sovereign Thread)
##
## Implements the unified multi-arch fiber context switching. # MARKUS MAIWALD (ARCHITECT) | VOXIS FORGE (AI)
## Supported Architectures: x86_64, AArch64, RISC-V. # Rumpk Phase 10: Multitasking & Context Switching
## #
## SAFETY: Direct manipulation of stack pointers and CPU registers via # Responsibilities:
## architecture-specific context frames. Swaps page tables during switch. # - Define the Fiber abstraction (Hardware Context + Stack)
# - Abstract the ISA-specific context switch mechanism
# - Provide a high-level API for yielding and scheduling
{.push stackTrace: off, lineTrace: off.} {.push stackTrace: off, lineTrace: off.}
# ========================================================= # Architecture Configuration
# Architecture-Specific Constants
# ========================================================= # =========================================================
when defined(amd64) or defined(x86_64): when defined(riscv64):
const CONTEXT_SIZE = 56 const ARCH_NAME* = "riscv64"
const RET_ADDR_INDEX = 6 # RIP at [sp + 48] const CONTEXT_SIZE* = 128
const ARCH_NAME = "x86_64" const RET_ADDR_INDEX* = 0 # Offset in stack for RA
elif defined(amd64) or defined(x86_64):
elif defined(arm64) or defined(aarch64): const ARCH_NAME* = "amd64"
const CONTEXT_SIZE = 96 const CONTEXT_SIZE* = 64
const RET_ADDR_INDEX = 11 # x30 (LR) at [sp + 88] const RET_ADDR_INDEX* = 0
const ARCH_NAME = "aarch64"
elif defined(riscv64):
const CONTEXT_SIZE = 128
const RET_ADDR_INDEX = 0 # ra at [sp + 0]
const ARCH_NAME = "riscv64"
else: else:
{.error: "Unsupported architecture for Rumpk fibers".} {.error: "Unsupported Architecture".}
# ========================================================= # --- FIBER DEFINITION ---
# Types
# =========================================================
type type
Spectrum* = enum Spectrum* {.pure.} = enum
Photon = 0 # UI/Audio (Top Tier) Void = 0, # Default/Uninitialized
Matter = 1 # Interactive (Middle Tier) Photon = 1, # Real-time (0-1ms latency)
Gravity = 2 # Batch (Bottom Tier) Matter = 2, # Interactive (1-10ms latency)
Void = 3 # Unclassified/Demoted (Default) Gravity = 3, # Batch/Idle (100ms+ latency)
FiberState* = object FiberState* = object
sp*: uint64 # The Stack Pointer (Must be first field!) sp*: uint64 # The Stack Pointer (Must be first field!)
@ -76,6 +68,9 @@ type
user_sp_init*: uint64 # Initial SP for userland entry user_sp_init*: uint64 # Initial SP for userland entry
# Ground Zero Phase 1: Capability Space (SPEC-051) # Ground Zero Phase 1: Capability Space (SPEC-051)
cspace_id*: uint64 # Index into global CSpace table cspace_id*: uint64 # Index into global CSpace table
# Ground Zero Phase 3: Typed Channels & I/O Multiplexing
blocked_on_mask*: uint64 # Bitmask of capability slots fiber is waiting on
is_blocked*: bool # True if fiber is waiting for I/O
# Spectrum Accessors # Spectrum Accessors
proc getSpectrum*(f: Fiber): Spectrum = proc getSpectrum*(f: Fiber): Spectrum =
@ -97,12 +92,10 @@ proc cpu_switch_to(prev_sp_ptr: ptr uint64, next_sp: uint64) {.importc, cdecl.}
proc mm_activate_satp(satp_val: uint64) {.importc, cdecl.} proc mm_activate_satp(satp_val: uint64) {.importc, cdecl.}
proc mm_get_kernel_satp(): uint64 {.importc, cdecl.} proc mm_get_kernel_satp(): uint64 {.importc, cdecl.}
# Import console for debugging proc debug(s: string) =
proc console_write(p: pointer, len: csize_t) {.importc, cdecl.} proc console_write(p: pointer, len: int) {.importc, cdecl.}
proc debug*(s: string) =
if s.len > 0: if s.len > 0:
console_write(unsafeAddr s[0], csize_t(s.len)) console_write(unsafeAddr s[0], s.len)
proc print_arch_info*() = proc print_arch_info*() =
debug("[Rumpk] Architecture Context: " & ARCH_NAME & "\n") debug("[Rumpk] Architecture Context: " & ARCH_NAME & "\n")
@ -126,22 +119,20 @@ var current_fiber* {.global.}: Fiber = addr main_fiber
proc fiber_trampoline() {.cdecl, exportc, noreturn.} = proc fiber_trampoline() {.cdecl, exportc, noreturn.} =
var msg = "[FIBER] Trampoline Entry!\n" var msg = "[FIBER] Trampoline Entry!\n"
console_write(addr msg[0], csize_t(msg.len)) # We can't use kprintln here if it's not imported or we use emit
proc console_write(p: pointer, len: int) {.importc, cdecl.}
console_write(addr msg[0], msg.len)
let f = current_fiber let f = current_fiber
if f.state.entry != nil: if f.state.entry != nil:
f.state.entry() f.state.entry()
# If the fiber returns, halt # If the fiber returns, halt
when defined(amd64) or defined(x86_64): when defined(riscv64):
while true:
{.emit: "asm volatile(\"hlt\");".}
elif defined(arm64) or defined(aarch64):
while true:
{.emit: "asm volatile(\"wfi\");".}
elif defined(riscv64):
while true: while true:
{.emit: "asm volatile(\"wfi\");".} {.emit: "asm volatile(\"wfi\");".}
else:
while true: discard
# ========================================================= # =========================================================
# Fiber Initialization (Arch-Specific) # Fiber Initialization (Arch-Specific)
@ -155,6 +146,8 @@ proc init_fiber*(f: Fiber, entry: proc() {.cdecl.}, stack_base: pointer, size: i
f.pty_id = -1 f.pty_id = -1
f.user_sp_init = 0 f.user_sp_init = 0
f.cspace_id = f.id # Ground Zero: CSpace ID matches Fiber ID f.cspace_id = f.id # Ground Zero: CSpace ID matches Fiber ID
f.blocked_on_mask = 0
f.is_blocked = false
# Start at top of stack (using actual size) # Start at top of stack (using actual size)
var sp = cast[uint64](stack_base) + cast[uint64](size) var sp = cast[uint64](stack_base) + cast[uint64](size)

View File

@ -115,6 +115,9 @@ type
# Phase 36.3: Shared ION (16 bytes) # Phase 36.3: Shared ION (16 bytes)
fn_ion_alloc*: proc(out_id: ptr uint16): uint64 {.cdecl.} fn_ion_alloc*: proc(out_id: ptr uint16): uint64 {.cdecl.}
fn_ion_free*: proc(id: uint16) {.cdecl.} fn_ion_free*: proc(id: uint16) {.cdecl.}
# Phase 36.4: I/O Multiplexing (8 bytes)
fn_wait_multi*: proc(mask: uint64): int32 {.cdecl.}
include invariant include invariant
@ -184,4 +187,4 @@ proc ion_init_network*() {.exportc, cdecl.} =
static: doAssert(sizeof(IonPacket) == 24, "IonPacket size mismatch!") static: doAssert(sizeof(IonPacket) == 24, "IonPacket size mismatch!")
static: doAssert(sizeof(CmdPacket) == 32, "CmdPacket size mismatch!") static: doAssert(sizeof(CmdPacket) == 32, "CmdPacket size mismatch!")
static: doAssert(sizeof(SysTable) == 192, "SysTable size mismatch! (Expected 192 after BLAKE3 expansion)") static: doAssert(sizeof(SysTable) == 200, "SysTable size mismatch! (Expected 200 after wait_multi expansion)")

View File

@ -8,7 +8,7 @@
# Nexus Sovereign Core: Kernel Implementation # Nexus Sovereign Core: Kernel Implementation
# target Bravo: Complete Build Unification # target Bravo: Complete Build Unification
import ion, fiber, sched, pty, cspace, ontology import ring, fiber, ion, sched, pty, cspace, ontology, channels
import fs/vfs, fs/tar, fs/sfs import fs/vfs, fs/tar, fs/sfs
import loader/elf import loader/elf
import ../libs/membrane/term import ../libs/membrane/term
@ -293,11 +293,8 @@ proc fiber_yield*() {.exportc, cdecl.} =
rumpk_yield_guard() rumpk_yield_guard()
proc rumpk_yield_internal*() {.exportc, cdecl.} = proc rumpk_yield_internal*() {.exportc, cdecl.} =
# Schedule fibers 0-5 (ION, NexShell, Compositor, NetSwitch, Init, Mksh) # Switch back to the main dispatcher loop
if not sched_tick_spectrum(active_fibers_arr.toOpenArray(0, 5)): switch(active_fibers_arr[6])
# No runnable fibers (all sleeping).
# Return to Dispatcher (Main Fiber) at index 6 to enter sleep/wfi mode.
switch(active_fibers_arr[6])
proc fiber_netswitch_entry() {.cdecl.} = proc fiber_netswitch_entry() {.cdecl.} =
kprintln("[NetSwitch] Traffic Engine Online") kprintln("[NetSwitch] Traffic Engine Online")
@ -440,9 +437,19 @@ proc k_handle_syscall*(nr, a0, a1, a2: uint): uint {.exportc, cdecl.} =
# --- KERNEL BOOT --- # --- KERNEL BOOT ---
proc ion_wait_multi*(mask: uint64): int32 {.exportc, cdecl.} =
## Block the current fiber until data is available on any of the masked slots
current_fiber.blocked_on_mask = mask
current_fiber.is_blocked = true
fiber_yield()
return 0
proc kmain() {.exportc, cdecl.} = proc kmain() {.exportc, cdecl.} =
var next_mmio_addr {.importc: "virtio_pci_next_mmio_addr", nodecl.}: uint32
kprint("\n[Kernel] next_mmio_addr check: ")
kprint_hex(uint64(next_mmio_addr))
kprintln("")
kprintln("\nNexus Sovereign Core v1.1 Starting...") kprintln("\nNexus Sovereign Core v1.1 Starting...")
ion_pool_init() ion_pool_init()
proc mm_init() {.importc, cdecl.} proc mm_init() {.importc, cdecl.}
proc mm_enable_kernel_paging() {.importc, cdecl.} proc mm_enable_kernel_paging() {.importc, cdecl.}
@ -483,6 +490,7 @@ proc kmain() {.exportc, cdecl.} =
sys.fn_vfs_read = ion_vfs_read sys.fn_vfs_read = ion_vfs_read
sys.fn_vfs_list = ion_vfs_list sys.fn_vfs_list = ion_vfs_list
sys.fn_vfs_write = wrapper_vfs_write sys.fn_vfs_write = wrapper_vfs_write
sys.fn_wait_multi = ion_wait_multi
# Shared Rings Setup (SYSTABLE area) # Shared Rings Setup (SYSTABLE area)
# Layout: 0x0000=SysTable, 0x2000=RX, 0x4000=TX, 0x6000=Event, 0x8000=CMD, 0xA000=Input # Layout: 0x0000=SysTable, 0x2000=RX, 0x4000=TX, 0x6000=Event, 0x8000=CMD, 0xA000=Input
# Each ring is ~6KB-8KB, so we need 8KB (0x2000) spacing. # Each ring is ~6KB-8KB, so we need 8KB (0x2000) spacing.
@ -512,7 +520,7 @@ proc kmain() {.exportc, cdecl.} =
# Spawn Fibers # Spawn Fibers
fiber_ion.id = 1; fiber_nexshell.id = 2; fiber_compositor.id = 3 fiber_ion.id = 1; fiber_nexshell.id = 2; fiber_compositor.id = 3
fiber_subject.id = 4; fiber_child.id = 5 fiber_subject.id = 4; fiber_child.id = 5; fiber_netswitch.id = 6
init_fiber(addr fiber_ion, ion_fiber_entry, addr stack_ion[0], sizeof(stack_ion)) init_fiber(addr fiber_ion, ion_fiber_entry, addr stack_ion[0], sizeof(stack_ion))
let ion_spawn_id = emit_fiber_spawn(1, 0, boot_id) # ION fiber let ion_spawn_id = emit_fiber_spawn(1, 0, boot_id) # ION fiber
@ -545,6 +553,16 @@ proc kmain() {.exportc, cdecl.} =
discard fiber_grant_channel(4, 0x1001, PERM_WRITE) # console.output (write-only) discard fiber_grant_channel(4, 0x1001, PERM_WRITE) # console.output (write-only)
discard emit_capability_grant(4, 2, 0x1001, 0, subject_spawn_id) # Log event discard emit_capability_grant(4, 2, 0x1001, 0, subject_spawn_id) # Log event
kprintln("[CSpace] Granted output capability to Subject") kprintln("[CSpace] Granted output capability to Subject")
# Grant Network I/O (RX/TX)
# NetSwitch (Fiber 6): Full access to shuttle packets
discard fiber_grant_channel(6, 0x500, PERM_READ or PERM_WRITE) # CMD_NET_TX
discard fiber_grant_channel(6, 0x501, PERM_READ or PERM_WRITE) # CMD_NET_RX
# Subject (Fiber 4): Needs to READ RX (0x501) and WRITE TX (0x500)
discard fiber_grant_channel(4, 0x500, PERM_WRITE) # Can send packets
discard fiber_grant_channel(4, 0x501, PERM_READ) # Can receive packets
kprintln("[CSpace] Granted network capabilities to NetSwitch and Subject")
# Init (Subject) lives in Cell 0 (0x88000000) - Needs 64MB for large BSS # Init (Subject) lives in Cell 0 (0x88000000) - Needs 64MB for large BSS
fiber_subject.phys_offset = 0x88000000'u64 fiber_subject.phys_offset = 0x88000000'u64

View File

@ -43,23 +43,29 @@ import fiber
proc sched_get_now_ns*(): uint64 {.importc: "rumpk_timer_now_ns", cdecl.} proc sched_get_now_ns*(): uint64 {.importc: "rumpk_timer_now_ns", cdecl.}
# Forward declaration for the tick function # Forward declaration for channel data check (provided by kernel/channels)
# Returns TRUE if a fiber was switched to (work done/found). proc fiber_can_run_on_channels*(id: uint64, mask: uint64): bool {.importc, cdecl.}
# Returns FALSE if the system should sleep (WFI).
proc is_runnable(f: ptr FiberObject, now: uint64): bool =
if f == nil: return false
if now < f.sleep_until: return false
if f.is_blocked:
if fiber_can_run_on_channels(f.id, f.blocked_on_mask):
f.is_blocked = false # Latched unblock
return true
return false
return true
proc sched_tick_spectrum*(fibers: openArray[ptr FiberObject]): bool = proc sched_tick_spectrum*(fibers: openArray[ptr FiberObject]): bool =
let now = sched_get_now_ns() let now = sched_get_now_ns()
# ========================================================= # =========================================================
# Phase 1: PHOTON (Hard Real-Time / Hardware Driven) # Phase 1: PHOTON (Hard Real-Time / Hardware Driven)
# ========================================================= # =========================================================
# - V-Sync (Compositor)
# - Audio Mix
# - Network Polling (War Mode)
var run_photon = false var run_photon = false
for f in fibers: for f in fibers:
if f != nil and f.getSpectrum() == Spectrum.Photon: if f != nil and f.getSpectrum() == Spectrum.Photon:
if now >= f.sleep_until: if is_runnable(f, now):
if f != current_fiber: if f != current_fiber:
switch(f); return true switch(f); return true
else: else:
@ -69,13 +75,10 @@ proc sched_tick_spectrum*(fibers: openArray[ptr FiberObject]): bool =
# ========================================================= # =========================================================
# Phase 2: MATTER (Interactive / Latency Sensitive) # Phase 2: MATTER (Interactive / Latency Sensitive)
# ========================================================= # =========================================================
# - Shell
# - Editor
var run_matter = false var run_matter = false
for f in fibers: for f in fibers:
if f != nil and f.getSpectrum() == Spectrum.Matter: if f != nil and f.getSpectrum() == Spectrum.Matter:
if now >= f.sleep_until: if is_runnable(f, now):
if f != current_fiber: if f != current_fiber:
switch(f); return true switch(f); return true
else: else:
@ -85,13 +88,10 @@ proc sched_tick_spectrum*(fibers: openArray[ptr FiberObject]): bool =
# ========================================================= # =========================================================
# Phase 3: GRAVITY (Throughput / Background) # Phase 3: GRAVITY (Throughput / Background)
# ========================================================= # =========================================================
# - Compiler
# - Ledger Sync
var run_gravity = false var run_gravity = false
for f in fibers: for f in fibers:
if f != nil and f.getSpectrum() == Spectrum.Gravity: if f != nil and f.getSpectrum() == Spectrum.Gravity:
if now >= f.sleep_until: if is_runnable(f, now):
if f != current_fiber: if f != current_fiber:
switch(f); return true switch(f); return true
else: else:
@ -101,12 +101,9 @@ proc sched_tick_spectrum*(fibers: openArray[ptr FiberObject]): bool =
# ========================================================= # =========================================================
# Phase 4: VOID (Scavenger) # Phase 4: VOID (Scavenger)
# ========================================================= # =========================================================
# - Untrusted Code
# - Speculative Execution
for f in fibers: for f in fibers:
if f != nil and f.getSpectrum() == Spectrum.Void: if f != nil and f.getSpectrum() == Spectrum.Void:
if now >= f.sleep_until: if is_runnable(f, now):
if f != current_fiber: if f != current_fiber:
switch(f) switch(f)
return true return true

View File

@ -23,7 +23,7 @@ const PCI_CAP_PTR = 0x34;
// Global Allocator for I/O and MMIO // Global Allocator for I/O and MMIO
var next_io_port: u32 = 0x1000; var next_io_port: u32 = 0x1000;
var next_mmio_addr: u32 = 0x40000000; pub var next_mmio_addr: u32 = 0x40000000;
// VirtIO Capability Types // VirtIO Capability Types
const VIRTIO_PCI_CAP_COMMON_CFG = 1; const VIRTIO_PCI_CAP_COMMON_CFG = 1;
@ -58,6 +58,10 @@ pub const VirtioTransport = struct {
} }
pub fn probe(self: *VirtioTransport) bool { pub fn probe(self: *VirtioTransport) bool {
if (next_mmio_addr == 0) {
next_mmio_addr = 0x40000000;
uart.print("[VirtIO-PCI] WARNING: next_mmio_addr was ZERO! Restored to 0x40000000\n");
}
uart.print("[VirtIO-PCI] Probing capabilities...\n"); uart.print("[VirtIO-PCI] Probing capabilities...\n");
// 1. Enable Bus Master & Memory Space & IO Space // 1. Enable Bus Master & Memory Space & IO Space
@ -75,19 +79,23 @@ pub const VirtioTransport = struct {
const cap_id = @as(*volatile u8, @ptrFromInt(cap_addr)).*; const cap_id = @as(*volatile u8, @ptrFromInt(cap_addr)).*;
const cap_next = @as(*volatile u8, @ptrFromInt(cap_addr + 1)).*; const cap_next = @as(*volatile u8, @ptrFromInt(cap_addr + 1)).*;
uart.print("[VirtIO-PCI] Cap at "); // uart.print(" ID: ");
uart.print_hex(cap_offset); // uart.print_hex(cap_id);
uart.print(" ID: "); // uart.print(" Next: ");
uart.print_hex(cap_id); // uart.print_hex(cap_next);
uart.print(" Next: "); // uart.print("\n");
uart.print_hex(cap_next);
uart.print("\n");
if (cap_id == 0x09) { // Vendor Specific (VirtIO) if (cap_id == 0x09) { // Vendor Specific (VirtIO)
const cap_type = @as(*volatile u8, @ptrFromInt(cap_addr + 3)).*; const cap_type = @as(*volatile u8, @ptrFromInt(cap_addr + 3)).*;
const bar_idx = @as(*volatile u8, @ptrFromInt(cap_addr + 4)).*; const bar_idx = @as(*volatile u8, @ptrFromInt(cap_addr + 4)).*;
const offset = @as(*volatile u32, @ptrFromInt(cap_addr + 8)).*; const offset = @as(*volatile u32, @ptrFromInt(cap_addr + 8)).*;
if (bar_idx >= 6) {
uart.print("[VirtIO-PCI] Ignoring Invalid BAR Index in Cap\n");
cap_offset = cap_next;
continue;
}
// Resolve BAR Address // Resolve BAR Address
const bar_ptr = @as(*volatile u32, @ptrFromInt(self.base_addr + 0x10 + (@as(usize, bar_idx) * 4))); const bar_ptr = @as(*volatile u32, @ptrFromInt(self.base_addr + 0x10 + (@as(usize, bar_idx) * 4)));
const bar_val = bar_ptr.*; const bar_val = bar_ptr.*;

View File

@ -89,9 +89,12 @@ type
# Phase 36.3: Shared ION (16 bytes) # Phase 36.3: Shared ION (16 bytes)
fn_ion_alloc*: proc(out_id: ptr uint16): uint64 {.cdecl.} fn_ion_alloc*: proc(out_id: ptr uint16): uint64 {.cdecl.}
fn_ion_free*: proc(id: uint16) {.cdecl.} fn_ion_free*: proc(id: uint16) {.cdecl.}
# Phase 36.4: I/O Multiplexing (8 bytes)
fn_wait_multi*: proc(mask: uint64): int32 {.cdecl.}
static: static:
doAssert sizeof(SysTable) == 192 doAssert sizeof(SysTable) == 200
var membrane_rx_ring_ptr*: ptr RingBuffer[IonPacket, 256] var membrane_rx_ring_ptr*: ptr RingBuffer[IonPacket, 256]
var membrane_tx_ring_ptr*: ptr RingBuffer[IonPacket, 256] var membrane_tx_ring_ptr*: ptr RingBuffer[IonPacket, 256]
@ -215,6 +218,12 @@ proc ion_net_available*(): bool {.exportc.} =
## Check if network rings are initialized and ready ## Check if network rings are initialized and ready
return membrane_net_rx_ptr != nil and membrane_net_tx_ptr != nil return membrane_net_rx_ptr != nil and membrane_net_tx_ptr != nil
proc ion_user_wait_multi*(mask: uint64): int32 {.exportc.} =
let sys = get_sys_table()
if sys.fn_wait_multi != nil:
return sys.fn_wait_multi(mask)
return -1
# --- Crypto Wrappers --- # --- Crypto Wrappers ---
proc crypto_siphash*(key: array[16, byte], data: pointer, len: uint64): array[16, byte] = proc crypto_siphash*(key: array[16, byte], data: pointer, len: uint64): array[16, byte] =
let sys = get_sys_table() let sys = get_sys_table()

View File

@ -5,13 +5,6 @@
// This file is part of the Nexus Sovereign Core. // This file is part of the Nexus Sovereign Core.
// See legal/LICENSE_SOVEREIGN.md for license terms. // See legal/LICENSE_SOVEREIGN.md for license terms.
//! Nexus Immune System (NPL): The Voice & Command Plane
//!
//! Implemented as an NPL fiber, NexShell provides the interactive kernel shell.
//! Handles telemetry events, user input, and command dispatch to the ION layer.
//!
//! SAFETY: Interacts with the shared SysTable via volatile pointers and atomic operations.
const std = @import("std"); const std = @import("std");
const ION_BASE = 0x83000000; const ION_BASE = 0x83000000;
@ -20,7 +13,7 @@ const IonPacket = extern struct {
phys: u64, phys: u64,
len: u16, len: u16,
id: u16, id: u16,
_pad: u32, // Match Nim's 24-byte alignment _pad: u32,
}; };
const CmdPacket = extern struct { const CmdPacket = extern struct {
@ -50,121 +43,56 @@ const SysTable = extern struct {
}; };
const CMD_ION_STOP = 1; const CMD_ION_STOP = 1;
const CMD_ION_START = 2;
const CMD_GPU_MATRIX = 0x100;
const CMD_GET_GPU_STATUS = 0x102;
// The Main Loop for the NexShell Fiber extern fn k_handle_syscall(nr: usize, a0: usize, a1: usize, a2: usize) usize;
extern fn console_read() c_int;
extern fn ion_push_stdin(ptr: [*]const u8, len: usize) void;
extern fn fiber_yield() void;
extern fn fiber_sleep(ms: u64) void;
extern fn ion_wait_multi(mask: u64) i32;
export fn nexshell_main() void { export fn nexshell_main() void {
const sys = @as(*SysTable, @ptrFromInt(ION_BASE)); const sys = @as(*SysTable, @ptrFromInt(ION_BASE));
print("\n╔═══════════════════════════════════════╗\n"); print("\n╔═══════════════════════════════════════╗\n");
print("║ NEXSHELL IMMUNE SYSTEM ACTIVE ║\n"); print("║ NEXSHELL IMMUNE SYSTEM ACTIVE ║\n");
print("║ Command Plane: READY ║\n"); print("╚═══════════════════════════════════════╝\n\n");
print("╚═══════════════════════════════════════╝\n");
// TEMP: event_ring disabled due to NULL pointer issue
// const event_ring = sys.s_event;
const cmd_ring = sys.s_cmd; const cmd_ring = sys.s_cmd;
// SAFETY(NexShell): Input buffer initialized to `undefined` for performance. var input_buffer: [128]u8 = undefined;
// Populated by char-by-char console reads before use.
var input_buffer: [64]u8 = undefined;
var input_idx: usize = 0; var input_idx: usize = 0;
var loop_count: usize = 0; print("[NexShell] Entering main loop (REACTIVE MODE)...\n");
var poll_pulse: usize = 0; print("> ");
var last_lsr: u8 = 0;
print("[NexShell] Entering main loop...\n");
while (true) { while (true) {
loop_count += 1;
poll_pulse += 1;
// First iteration diagnostic
if (loop_count == 1) {
print("[NexShell] First iteration\n");
}
// Polling pulse every 100 to show activity
if (poll_pulse >= 100) {
print(".");
poll_pulse = 0;
}
// 1. Process Telemetry Events
// TEMPORARILY DISABLED: event_ring causes page fault (NULL pointer?)
// const head = @atomicLoad(u32, &event_ring.head, .acquire);
// const tail = @atomicLoad(u32, &event_ring.tail, .monotonic);
//
// if (head != tail) {
// const pkt = event_ring.data[tail & event_ring.mask];
// print("\n[NexShell] ALERT | EventID: ");
// if (pkt.id == 777) {
// print("777 (SECURITY_HEARTBEAT)\n");
// } else {
// print("GENERIC\n");
// }
// @atomicStore(u32, &event_ring.tail, tail + 1, .release);
// }
// 2. Process User Input (Non-blocking)
console_poll();
const current_lsr = debug_uart_lsr();
if (current_lsr != last_lsr) {
print("[LSR:");
print_hex(current_lsr);
print("]");
last_lsr = current_lsr;
}
if ((loop_count % 20) == 0) {
print("."); // Alive heartbeat
}
const c = console_read(); const c = console_read();
if (c != -1) {
print("[GOT]");
const byte = @as(u8, @intCast(c));
// print("[NexShell] Got char\n");
if (forward_mode) { if (c != -1) {
// Check for escape: Ctrl+K (11) const byte = @as(u8, @intCast(c));
if (byte == 11) {
forward_mode = false; if (byte == '\r' or byte == '\n') {
print("\n[NexShell] RESUMING KERNEL CONTROL.\n"); print("\n");
} else { if (input_idx > 0) {
const bs = [1]u8{byte};
ion_push_stdin(&bs, 1);
}
} else {
if (byte == '\r' or byte == '\n') {
print("\n");
process_command(input_buffer[0..input_idx], cmd_ring); process_command(input_buffer[0..input_idx], cmd_ring);
input_idx = 0;
} else if (byte == 0x7F or byte == 0x08) {
if (input_idx > 0) {
input_idx -= 1;
print("\x08 \x08"); // Backspace
}
} else if (input_idx < 63) {
input_buffer[input_idx] = byte;
input_idx += 1;
const bs = [1]u8{byte};
print(&bs);
} }
input_idx = 0;
print("> ");
} else if (byte == 8 or byte == 127) { // Backspace
if (input_idx > 0) {
input_idx -= 1;
print("\x08 \x08");
}
} else if (input_idx < 127) {
input_buffer[input_idx] = byte;
input_idx += 1;
const bs = [1]u8{byte};
print(&bs);
} }
} else { } else {
fiber_sleep(20); // 50Hz poll is plenty for keyboard (Wait... fiber_sleep takes milliseconds in Nim wrapper!) // Wait for console.input (Slot 0)
// Re-checking kernel.nim: fiber_sleep(ms) multiplies by 1_000_000. _ = ion_wait_multi(0x01);
// So 20 is Correct for 20ms.
// Wait. If kernel.nim multiplies by 1M, then passing 20 = 20M ns = 20ms.
// My analysis in Thought Process was confused.
// kernel.nim:
// proc fiber_sleep*(ms: uint64) = current_fiber.sleep_until = now + (ms * 1_000_000)
// So nexshell.zig calling fiber_sleep(20) -> 20ms.
// THIS IS CORRECT.
// I will NOT change this to 20_000_000. That would be 20,000 seconds!
// I will restore the comment to be accurate.
fiber_sleep(20);
} }
fiber_yield(); fiber_yield();
@ -178,14 +106,9 @@ fn process_command(cmd_text: []const u8, cmd_ring: *RingBuffer(CmdPacket)) void
if (forward_mode) { if (forward_mode) {
const is_toggle = std.mem.eql(u8, cmd_text, "kernel") or std.mem.eql(u8, cmd_text, "exit"); const is_toggle = std.mem.eql(u8, cmd_text, "kernel") or std.mem.eql(u8, cmd_text, "exit");
// ALWAYS FORWARD TO USERLAND first so it can process its own exit
print("[NexShell] Forwarding to Subject...\n"); print("[NexShell] Forwarding to Subject...\n");
// Combine command + newline to avoid fragmentation
if (cmd_text.len > 0) { if (cmd_text.len > 0) {
// SAFETY(NexShell): Local forward buffer initialized to `undefined`.
// Immediately populated by `@memcpy` and newline before pushing to ION.
var forward_buf: [128]u8 = undefined; var forward_buf: [128]u8 = undefined;
const copy_len = if (cmd_text.len > 126) 126 else cmd_text.len; const copy_len = if (cmd_text.len > 126) 126 else cmd_text.len;
@memcpy(forward_buf[0..copy_len], cmd_text[0..copy_len]); @memcpy(forward_buf[0..copy_len], cmd_text[0..copy_len]);
@ -198,140 +121,15 @@ fn process_command(cmd_text: []const u8, cmd_ring: *RingBuffer(CmdPacket)) void
print("[NexShell] Dropping to Kernel Debug Mode.\n"); print("[NexShell] Dropping to Kernel Debug Mode.\n");
} }
} else { } else {
if (std.mem.eql(u8, cmd_text, "subject") or std.mem.eql(u8, cmd_text, "nipbox")) { if (std.mem.eql(u8, cmd_text, "ps") or std.mem.eql(u8, cmd_text, "fibers")) {
forward_mode = true;
print("[NexShell] Resuming Subject Pipe.\n");
return;
}
if (std.mem.eql(u8, cmd_text, "io stop") or std.mem.eql(u8, cmd_text, "ion stop")) {
print("[NexShell] Pushing CMD_ION_STOP...\n");
push_cmd(cmd_ring, CMD_ION_STOP, 0);
} else if (std.mem.eql(u8, cmd_text, "matrix on")) {
print("[NexShell] Engaging Matrix Protocol (Emergency Override)...\n");
push_cmd(cmd_ring, CMD_GPU_MATRIX, 1);
} else if (std.mem.eql(u8, cmd_text, "matrix off")) {
print("[NexShell] Disengaging Matrix Protocol...\n");
push_cmd(cmd_ring, CMD_GPU_MATRIX, 0);
} else if (std.mem.eql(u8, cmd_text, "matrix status")) {
push_cmd(cmd_ring, CMD_GET_GPU_STATUS, 0);
} else if (std.mem.eql(u8, cmd_text, "ps") or std.mem.eql(u8, cmd_text, "fibers")) {
print("[NexShell] Active Fibers:\n"); print("[NexShell] Active Fibers:\n");
print(" - ION (Packet Engine)\n"); _ = k_handle_syscall(0x501, 0, 0, 0);
print(" - NexShell (Command Plane)\n"); } else if (std.mem.eql(u8, cmd_text, "stop")) {
print(" - Compositor (Render Pipeline)\n"); push_cmd(cmd_ring, CMD_ION_STOP, 0);
print(" - NetSwitch (Traffic Engine)\n");
print(" - Subject (Userland Loader)\n");
print(" - Kernel (Main)\n");
} else if (std.mem.eql(u8, cmd_text, "stl summary")) {
stl_print_summary();
} else if (std.mem.eql(u8, cmd_text, "stl list")) {
print("[NexShell] Recent Events:\n");
const total = stl_count();
const start = if (total > 10) total - 10 else 0;
var id = total - 1;
while (id >= start) {
const ev = stl_lookup(id);
if (ev) |e| {
print(" [");
print_u64_hex(id);
print("] Kind=");
print_u16_hex(@intFromEnum(e.kind));
print(" Fiber=");
print_u16_hex(@as(u16, @intCast(e.fiber_id)));
print("\n");
}
if (id == 0) break;
id -= 1;
}
} else if (std.mem.startsWith(u8, cmd_text, "stl graph") or std.mem.eql(u8, cmd_text, "stl tree")) {
var lineage: LineageResult = undefined;
const total = stl_count();
if (total == 0) {
print("[NexShell] No events to graph.\n");
} else {
// Default to last event
const last_id = @as(u64, total - 1);
stl_trace_lineage(last_id, &lineage);
print("[NexShell] Causal Graph for Event ");
print_u64_hex(last_id);
print(":\n\n");
var i: u32 = 0;
while (i < lineage.count) : (i += 1) {
const eid = lineage.event_ids[i];
const ev = stl_lookup(eid);
if (i > 0) print(" |\n\n");
print("[");
print_u64_hex(eid);
print("] ");
if (ev) |e| {
switch (e.kind) {
.SystemBoot => print("SystemBoot"),
.FiberSpawn => print("FiberSpawn"),
.CapabilityGrant => print("CapGrant"),
.AccessDenied => print("AccessDenied"),
else => {
print("Kind=");
print_u16_hex(@intFromEnum(e.kind));
},
}
} else {
print("???");
}
print("\n");
}
print("\n");
}
} else if (std.mem.eql(u8, cmd_text, "stl dump")) {
var dump_buf: [4096]u8 = undefined;
const written = stl_export_binary(&dump_buf, dump_buf.len);
if (written > 0) {
print("[NexShell] STL Binary Dump (");
print_u64_hex(written);
print(" bytes):\n");
var i: usize = 0;
while (i < written) : (i += 1) {
print_hex(dump_buf[i]);
if ((i + 1) % 32 == 0) print("\n");
}
print("\n[NexShell] Dump Complete.\n");
} else {
print("[NexShell] Dump Failed (Buffer too small or STL not ready)\n");
}
} else if (std.mem.eql(u8, cmd_text, "mem")) {
print("[NexShell] Memory Status:\n");
print(" Ion Pool: 32MB allocated\n");
print(" Surface: 32MB framebuffer pool\n");
print(" Stack Usage: ~512KB (6 fibers)\n");
} else if (std.mem.eql(u8, cmd_text, "uptime")) {
print("[NexShell] System Status: OPERATIONAL\n");
print(" Architecture: RISC-V (Virt)\n");
print(" Timer: SBI Extension\n");
print(" Input: Interrupt-Driven (IRQ 10)\n");
} else if (std.mem.eql(u8, cmd_text, "reboot")) {
print("[NexShell] Initiating system reboot...\n");
// SBI shutdown extension (EID=0x53525354, FID=0)
asm volatile (
\\ li a7, 0x53525354
\\ li a6, 0
\\ li a0, 0
\\ ecall
);
} else if (std.mem.eql(u8, cmd_text, "clear")) {
print("\x1b[2J\x1b[H"); // ANSI clear screen + cursor home
} else if (std.mem.eql(u8, cmd_text, "help")) { } else if (std.mem.eql(u8, cmd_text, "help")) {
print("[NexShell] Kernel Commands:\n"); print("[NexShell] ps, stop, help\n");
print(" System: ps, fibers, mem, uptime, reboot, clear\n");
print(" STL: stl summary, stl list, stl dump, stl graph\n");
print(" IO: io stop, ion stop\n");
print(" Matrix: matrix on/off/status\n");
print(" Shell: subject, nipbox, help\n");
} else { } else {
print("[NexShell] Unknown Kernel Command: "); print("[NexShell] Unknown Command: ");
print(cmd_text); print(cmd_text);
print("\n"); print("\n");
} }
@ -343,109 +141,13 @@ fn push_cmd(ring: *RingBuffer(CmdPacket), kind: u32, arg: u64) void {
const tail = @atomicLoad(u32, &ring.tail, .monotonic); const tail = @atomicLoad(u32, &ring.tail, .monotonic);
const next = (head + 1) & ring.mask; const next = (head + 1) & ring.mask;
if (next == tail) { if (next == tail) return;
print("[NexShell] CMD RING FULL!\n");
return;
}
ring.data[head & ring.mask] = .{ .kind = kind, .reserved = 0, .arg = arg, .id = [_]u8{0} ** 16 }; ring.data[head & ring.mask] = .{ .kind = kind, .reserved = 0, .arg = arg, .id = [_]u8{0} ** 16 };
@atomicStore(u32, &ring.head, next, .release); @atomicStore(u32, &ring.head, next, .release);
} }
// OS Shims
extern fn k_handle_syscall(nr: usize, a0: usize, a1: usize, a2: usize) usize;
extern fn console_read() c_int;
extern fn console_poll() void;
extern fn ion_push_stdin(ptr: [*]const u8, len: usize) void;
extern fn fiber_sleep(ms: u64) void;
extern fn fiber_yield() void;
extern fn debug_uart_lsr() u8;
// STL Externs
extern fn stl_count() u32;
extern fn stl_print_summary() void;
extern fn stl_get_recent(max_count: u32, result: *QueryResult) void;
extern fn stl_export_binary(dest: [*]u8, max_size: usize) usize;
extern fn stl_trace_lineage(event_id: u64, result: *LineageResult) void;
extern fn stl_lookup(event_id: u64) ?*const Event;
const LineageResult = extern struct {
count: u32,
event_ids: [16]u64,
};
const EventKind = enum(u16) {
Null = 0,
SystemBoot = 1,
SystemShutdown = 2,
FiberSpawn = 3,
FiberTerminate = 4,
CapabilityGrant = 10,
CapabilityRevoke = 11,
CapabilityDelegate = 12,
ChannelOpen = 20,
ChannelClose = 21,
ChannelRead = 22,
ChannelWrite = 23,
MemoryAllocate = 30,
MemoryFree = 31,
MemoryMap = 32,
NetworkPacketRx = 40,
NetworkPacketTx = 41,
AccessDenied = 50,
PolicyViolation = 51,
};
const Event = packed struct {
kind: EventKind,
_reserved: u8 = 0,
timestamp_ns: u64,
fiber_id: u64,
entity_id: u64,
cause_id: u64,
data0: u64,
data1: u64,
data2: u64,
};
const QueryResult = extern struct {
count: u32,
events: [64]*const Event,
};
fn print_u64_hex(val: u64) void {
const chars = "0123456789ABCDEF";
var buf: [16]u8 = undefined;
var v = val;
var i: usize = 0;
while (i < 16) : (i += 1) {
buf[15 - i] = chars[v & 0xF];
v >>= 4;
}
print(&buf);
}
fn print_u16_hex(val: u16) void {
const chars = "0123456789ABCDEF";
const buf = [4]u8{
chars[(val >> 12) & 0xF],
chars[(val >> 8) & 0xF],
chars[(val >> 4) & 0xF],
chars[val & 0xF],
};
print(&buf);
}
fn print_hex(val: u8) void {
const chars = "0123456789ABCDEF";
const hi = chars[(val >> 4) & 0xF];
const lo = chars[val & 0xF];
const buf = [_]u8{ hi, lo };
print(&buf);
}
fn kernel_write(fd: c_int, buf: [*]const u8, count: usize) isize { fn kernel_write(fd: c_int, buf: [*]const u8, count: usize) isize {
// 0x204 = SYS_WRITE
return @as(isize, @bitCast(k_handle_syscall(0x204, @as(usize, @intCast(fd)), @intFromPtr(buf), count))); return @as(isize, @bitCast(k_handle_syscall(0x204, @as(usize, @intCast(fd)), @intFromPtr(buf), count)));
} }