Network: Phase 36 Component (DHCP, VirtIO 12B, Hardened Logs)

This commit is contained in:
Markus Maiwald 2026-01-07 14:48:40 +01:00
parent 77b4cb55c7
commit 4c91aa7f14
16 changed files with 890 additions and 193 deletions

View File

@ -10,7 +10,7 @@
## Freestanding implementation (No OS module dependencies).
## Uses fixed-size buffers and raw blocks for persistence.
import fiber # For yield
import ring, fiber # For yield
proc kprintln(s: cstring) {.importc, cdecl.}
proc kprint(s: cstring) {.importc, cdecl.}

View File

@ -118,6 +118,10 @@ type
# Phase 36.4: I/O Multiplexing (8 bytes)
fn_wait_multi*: proc(mask: uint64): int32 {.cdecl.}
# Phase 36.5: Network Hardware Info (8 bytes)
net_mac*: array[6, byte]
reserved_mac*: array[2, byte]
include invariant
@ -170,6 +174,9 @@ proc ion_init_input*() {.exportc, cdecl.} =
chan_input.ring = addr guest_input_hal
proc ion_init_network*() {.exportc, cdecl.} =
# NOTE: This function is called early in kernel boot.
# The actual ring memory will be allocated in SYSTABLE region by kmain.
# We just initialize the local HAL rings here for internal kernel use.
net_rx_hal.head = 0
net_rx_hal.tail = 0
net_rx_hal.mask = 255
@ -184,7 +191,31 @@ proc ion_init_network*() {.exportc, cdecl.} =
netswitch_rx_hal.tail = 0
netswitch_rx_hal.mask = 255
chan_netswitch_rx.ring = addr netswitch_rx_hal
# Initialize user slab
ion_user_slab_init()
# Internal allocators removed - use shared/systable versions
# =========================================================
# SysTable-Compatible Wrappers for User Slab
# =========================================================
# These wrappers have the same signature as fn_ion_alloc/fn_ion_free
# but use the user slab instead of the kernel ION pool.
# Track allocated buffers by pseudo-ID (index in slab)
proc ion_user_alloc_systable*(out_id: ptr uint16): uint64 {.exportc, cdecl.} =
## SysTable-compatible allocator using user slab (via shared bitmap)
return ion_alloc_shared(out_id)
proc ion_user_free_systable*(id: uint16) {.exportc, cdecl.} =
## SysTable-compatible free using user slab
var pkt: IonPacket
pkt.id = id
pkt.data = cast[ptr UncheckedArray[byte]](1) # Dummy non-nil
ion_free(pkt)
static: doAssert(sizeof(IonPacket) == 24, "IonPacket size mismatch!")
static: doAssert(sizeof(CmdPacket) == 32, "CmdPacket size mismatch!")
static: doAssert(sizeof(SysTable) == 200, "SysTable size mismatch! (Expected 200 after wait_multi expansion)")
static: doAssert(sizeof(SysTable) == 208, "SysTable size mismatch! (Expected 208 after MAC+pad)")

View File

@ -23,6 +23,13 @@ const
POOL_COUNT* = 1024 # Number of packets in the pool (2MB total RAM)
POOL_ALIGN* = 4096 # VirtIO/Page Alignment
SYSTABLE_BASE = 0x83000000'u64
USER_SLAB_OFFSET = 0x10000'u64 # Offset within SYSTABLE
USER_SLAB_BASE* = SYSTABLE_BASE + USER_SLAB_OFFSET # 0x83010000
USER_SLAB_COUNT = 512 # 512 packets to cover RX Ring (256) + TX
USER_PKT_SIZE = 2048 # 2KB per packet
USER_BITMAP_ADDR = SYSTABLE_BASE + 0x100
type
# The Physical Token representing a packet
IonPacket* = object
@ -38,6 +45,7 @@ type
free_ring: RingBuffer[uint16, POOL_COUNT] # Stores IDs of free slabs
base_phys: uint64
var global_tx_ring*: RingBuffer[IonPacket, 256]
var global_pool: PacketPool
proc ion_pool_init*() {.exportc.} =
@ -58,6 +66,7 @@ proc ion_pool_init*() {.exportc.} =
dbg("[ION] Ring Init...")
global_pool.free_ring.init()
global_tx_ring.init()
# Fill the free ring with all indices [0..1023]
dbg("[ION] Filling Slabs...")
@ -95,6 +104,17 @@ proc ion_free*(pkt: IonPacket) {.exportc.} =
## O(1) Free. Returns the token to the ring.
if pkt.data == nil: return
if (pkt.id and 0x8000) != 0:
# User Slab - Clear shared bitmap
let slotIdx = pkt.id and 0x7FFF
if slotIdx >= USER_SLAB_COUNT: return
let bitmap = cast[ptr array[16, byte]](USER_BITMAP_ADDR)
let byteIdx = int(slotIdx) div 8
let bitIdx = int(slotIdx) mod 8
let mask = byte(1 shl bitIdx)
bitmap[byteIdx] = bitmap[byteIdx] and (not mask)
return
discard global_pool.free_ring.push(pkt.id)
# Helper for C/Zig Interop (Pure Pointers)
@ -114,10 +134,18 @@ proc ion_free_raw*(id: uint16) {.exportc, cdecl.} =
ion_free(pkt)
proc ion_get_virt*(id: uint16): ptr byte {.exportc.} =
if (id and 0x8000) != 0:
let idx = id and 0x7FFF
let offset = int(idx) * SLAB_SIZE
return cast[ptr byte](USER_SLAB_BASE + uint64(offset))
let offset = int(id) * SLAB_SIZE
return addr global_pool.buffer[offset]
proc ion_get_phys*(id: uint16): uint64 {.exportc.} =
if (id and 0x8000) != 0:
let idx = id and 0x7FFF
let offset = int(idx) * SLAB_SIZE
return USER_SLAB_BASE + uint64(offset)
let offset = int(id) * SLAB_SIZE
return global_pool.base_phys + uint64(offset)
@ -125,13 +153,16 @@ proc ion_get_phys*(id: uint16): uint64 {.exportc.} =
# The Global TX Ring (Multiplexing)
# =========================================================
var global_tx_ring*: RingBuffer[IonPacket, 256]
proc ion_tx_init*() {.exportc.} =
global_tx_ring.init()
proc ion_tx_push*(pkt: IonPacket): bool {.exportc.} =
global_tx_ring.push(pkt)
if global_tx_ring.push(pkt):
# dbg("[ION TX] Pushed")
return true
dbg("[ION TX] PUSH FAILED (Global Ring Full)")
return false
proc ion_tx_pop*(out_id: ptr uint16, out_len: ptr uint16): bool {.exportc.} =
if global_tx_ring.isEmpty:
@ -142,4 +173,41 @@ proc ion_tx_pop*(out_id: ptr uint16, out_len: ptr uint16): bool {.exportc.} =
out_id[] = pkt.id
out_len[] = pkt.len
dbg("[ION TX] Popped Packet for VirtIO")
return true
# =========================================================
# User-Visible Slab Allocator (Shared Memory)
# =========================================================
# NOTE: This allocator provides buffers in the SYSTABLE shared region
# (0x83010000+) which is mapped into both kernel and userland page tables.
# Used for network packet egress from userland.
# NOTE: Constants moved to top
# var user_slab_bitmap: array[USER_SLAB_COUNT, bool] # REMOVED: Use Shared Bitmap
proc ion_user_slab_init*() {.exportc.} =
## Initialize shared user slab bitmap (all free)
let bitmap = cast[ptr array[64, byte]](USER_BITMAP_ADDR)
for i in 0 ..< 64:
bitmap[i] = 0
proc ion_alloc_shared*(out_id: ptr uint16): uint64 {.exportc, cdecl.} =
## Allocate a buffer from the user-visible slab (Kernel Side, Shared Bitmap)
let bitmap = cast[ptr array[64, byte]](USER_BITMAP_ADDR)
for byteIdx in 0 ..< 64:
if bitmap[byteIdx] != 0xFF:
for bitIdx in 0 ..< 8:
let mask = byte(1 shl bitIdx)
if (bitmap[byteIdx] and mask) == 0:
# Found free
bitmap[byteIdx] = bitmap[byteIdx] or mask
let idx = byteIdx * 8 + bitIdx
if idx >= USER_SLAB_COUNT: return 0
out_id[] = uint16(idx) or 0x8000
return USER_SLAB_BASE + uint64(idx) * USER_PKT_SIZE
return 0

View File

@ -25,8 +25,8 @@ proc rumpk_timer_handler() {.exportc, cdecl, used.} =
# --- EXTERNAL SYMBOLS ---
proc ion_get_phys(id: uint16): uint64 {.importc, cdecl.}
proc ion_alloc_raw*(out_id: ptr uint16): uint64 {.importc, cdecl.}
proc ion_free_raw*(id: uint16) {.importc, cdecl.}
proc rumpk_net_init() {.importc, cdecl.}
proc virtio_net_poll() {.importc, cdecl.}
proc virtio_blk_read(sector: uint64, buf: ptr byte) {.importc, cdecl.}
proc virtio_blk_write(sector: uint64, buf: ptr byte) {.importc, cdecl.}
proc fb_kern_get_addr(): uint64 {.importc, cdecl.}
@ -62,6 +62,10 @@ proc kprint_hex*(val: uint64) {.exportc, cdecl.} =
proc uart_print_hex(val: uint64) {.importc, cdecl.}
uart_print_hex(val)
proc kprint_hex8*(val: uint8) {.exportc, cdecl.} =
proc uart_print_hex8(val: uint8) {.importc, cdecl.}
uart_print_hex8(val)
# ION Unified Memory Manager shim
proc ion_alloc*(): IonPacket =
var id: uint16
@ -292,27 +296,66 @@ proc ion_fiber_entry() {.cdecl.} =
else: discard
fiber_sleep(10_000_000) # 10ms
proc fiber_yield*() {.exportc, cdecl.} =
proc rumpk_yield_guard() {.importc, cdecl.}
rumpk_yield_guard()
proc rumpk_yield_internal*() {.exportc, cdecl.} =
# Switch back to the main dispatcher loop
switch(active_fibers_arr[6])
proc fiber_yield*() {.exportc, cdecl.} =
current_fiber.wants_yield = true
rumpk_yield_internal()
proc fiber_netswitch_entry() {.cdecl.} =
kprintln("[NetSwitch] Traffic Engine Online")
# Iron Firewall: Verify channel sovereignty before operation
if chan_netswitch_rx.ring == nil:
kprintln("[CRITICAL] NetSwitch RX channel uninitialized - HALTING")
while true: fiber_yield()
if chan_net_rx.ring == nil or chan_net_tx.ring == nil:
kprintln("[CRITICAL] Global net rings uninitialized - HALTING")
while true: fiber_yield()
kprintln("[NetSwitch] Channels verified. Sovereignty confirmed.")
while true:
var pkt: IonPacket
# INGRESS: Driver -> NetSwitch -> Subject (chan_net_rx)
if chan_netswitch_rx.recv(pkt):
ion_free_raw(pkt.id)
else:
fiber_sleep(10_000_000) # 10ms
fiber_yield()
if not chan_net_rx.send(pkt):
# kprintln("[NetSwitch] Dropped Ingress (Drop)")
ion_free_raw(pkt.id)
# else:
# kprintln("[NetSwitch] Forwarded Ingress")
# EGRESS: Subject (chan_net_tx) -> NetSwitch -> Driver (ion_tx_push)
if chan_net_tx.recv(pkt):
kprintln("[NetSwitch] Forwarding Egress")
# uart_print("[NetSwitch] Forwarding Egress\n")
var res = ion_tx_push(pkt)
if not res: kprintln("[NetSwitch] Drop (TX Full)")
# Manual Polling (Interrupts Disabled)
virtio_net_poll()
# Prevent Starvation
fiber_sleep(1)
proc ion_ingress*(id: uint16, len: uint16) {.exportc, cdecl.} =
proc ion_ingress*(id: uint16, len: uint16, offset: uint16) {.exportc, cdecl.} =
## Handle packet from Network Driver
let pkt = IonPacket(id: id, len: len)
# Get actual physical address and apply driver offset
let base_phys = ion_get_phys(id)
let base_virt = ion_get_virt(id)
# Create packet with pointers pointing DIRECTLY to the Ethernet frame
let pkt = IonPacket(
id: id,
len: len,
phys: base_phys + uint64(offset),
data: cast[ptr UncheckedArray[byte]](cast[uint64](base_virt) + uint64(offset))
)
if not chan_netswitch_rx.send(pkt):
ion_free_raw(id)
@ -336,12 +379,9 @@ proc k_handle_exception*(scause, sepc, stval: uint) {.exportc, cdecl.} =
kprint(" sepc="); kprint_hex(sepc)
kprint(" stval="); kprint_hex(stval)
kprintln("")
kprintln("[IMMUNE] Fiber HALTING.")
while true: fiber_yield()
proc rumpk_yield_guard() {.cdecl.} =
current_fiber.wants_yield = true
fiber_yield()
kprintln("[IMMUNE] System HALTING (Trap Loop Prevention).")
while true:
{.emit: "asm volatile(\"wfi\");".}
proc wrapper_vfs_write(fd: int32, buf: pointer, count: uint64): int64 {.cdecl.} =
return ion_vfs_write(fd, buf, count)
@ -460,12 +500,19 @@ proc kmain() {.exportc, cdecl.} =
kprint("\n[Kernel] next_mmio_addr check: ")
kprint_hex(uint64(next_mmio_addr))
kprintln("\nNexus Sovereign Core v1.1.2 Starting...")
# HAL Hardware Inits
# rumpk_net_init() -- Moved below
ion_pool_init()
proc mm_init() {.importc, cdecl.}
proc mm_enable_kernel_paging() {.importc, cdecl.}
mm_init()
mm_enable_kernel_paging()
# HAL Hardware Inits (Moved after ION/MM init)
rumpk_net_init()
# Ground Zero Phase 1: Initialize Capability System (SPEC-051)
init_cspace_subsystem()
kprintln("[CSpace] Capability system initialized")
@ -475,6 +522,7 @@ proc kmain() {.exportc, cdecl.} =
let boot_id = emit_system_boot()
ion_init_input()
ion_init_network() # Initialize net rings early
hal_io_init()
pty_init()
discard pty_alloc()
@ -501,8 +549,27 @@ proc kmain() {.exportc, cdecl.} =
sys.fn_vfs_list = ion_vfs_list
sys.fn_vfs_write = wrapper_vfs_write
sys.fn_wait_multi = ion_wait_multi
# Point to user slab allocator (shared memory) instead of kernel pool
sys.fn_ion_alloc = ion_user_alloc_systable
sys.fn_ion_free = ion_user_free_systable
# Populate Network MAC
proc virtio_net_get_mac(out_mac: ptr byte) {.importc, cdecl.}
virtio_net_get_mac(addr sys.net_mac[0])
kprint("[Kernel] MAC Address: ")
for i in 0 ..< 6:
kprint_hex8(sys.net_mac[i])
if i < 5: kprint(":")
kprintln("")
# Initialize user slab bitmap (at offset 0x100, 16 bytes for 128 slots)
let bitmap = cast[ptr array[16, byte]](SYSTABLE_BASE + 0x100)
for i in 0 ..< 16:
bitmap[i] = 0 # All slots free
# Shared Rings Setup (SYSTABLE area)
# Layout: 0x0000=SysTable, 0x2000=RX, 0x4000=TX, 0x6000=Event, 0x8000=CMD, 0xA000=Input
# Layout: 0x0000=SysTable, 0x0100=Bitmap, 0x2000=RX, 0x4000=TX, 0x6000=Event, 0x8000=CMD, 0xA000=Input
# 0xC000=Net_RX, 0xE000=Net_TX, 0x10000=User_Slab
# Each ring is ~6KB-8KB, so we need 8KB (0x2000) spacing.
chan_rx.ring = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0x2000)
chan_tx.ring = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0x4000)
@ -510,20 +577,31 @@ proc kmain() {.exportc, cdecl.} =
chan_cmd.ring = cast[ptr HAL_Ring[CmdPacket]](SYSTABLE_BASE + 0x8000)
chan_input.ring = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0xA000)
# Network Rings (Shared with Userland)
chan_net_rx.ring = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0xC000)
chan_net_tx.ring = cast[ptr HAL_Ring[IonPacket]](SYSTABLE_BASE + 0xE000)
# Initialize Shared Memory Rings
chan_rx.ring.mask = 255; chan_tx.ring.mask = 255
ring_event.mask = 255; chan_cmd.ring.mask = 255
chan_input.ring.mask = 255
chan_net_rx.ring.mask = 255; chan_net_tx.ring.mask = 255
# Force reset pointers to zero
chan_rx.ring.head = 0; chan_rx.ring.tail = 0
chan_tx.ring.head = 0; chan_tx.ring.tail = 0
ring_event.head = 0; ring_event.tail = 0
chan_cmd.ring.head = 0; chan_cmd.ring.tail = 0
chan_input.ring.head = 0; chan_input.ring.tail = 0
chan_net_rx.ring.head = 0; chan_net_rx.ring.tail = 0
chan_net_tx.ring.head = 0; chan_net_tx.ring.tail = 0
sys.s_rx = chan_rx.ring; sys.s_tx = chan_tx.ring; sys.s_event = ring_event
sys.s_cmd = chan_cmd.ring; sys.s_input = chan_input.ring
# Map Network Rings (Now in shared memory)
sys.s_net_rx = chan_net_rx.ring
sys.s_net_tx = chan_net_tx.ring
sys.magic = 0x4E585553
sys.fb_addr = fb_kern_get_addr()
sys.fb_width = 1920; sys.fb_height = 1080; sys.fb_stride = 1920 * 4; sys.fb_bpp = 32
@ -543,6 +621,7 @@ proc kmain() {.exportc, cdecl.} =
init_fiber(addr fiber_nexshell, nexshell_main, addr stack_nexshell[0], sizeof(stack_nexshell))
let shell_spawn_id = emit_fiber_spawn(2, 0, boot_id) # NexShell fiber
# NetSwitch Spawn
init_fiber(addr fiber_netswitch, fiber_netswitch_entry, addr stack_netswitch[0], sizeof(stack_netswitch))
let netswitch_spawn_id = emit_fiber_spawn(6, 0, boot_id) # NetSwitch fiber
discard netswitch_spawn_id
@ -583,9 +662,21 @@ proc kmain() {.exportc, cdecl.} =
asm "csrsi sstatus, 2"
{.emit: "asm volatile(\"csrs sie, %0\" : : \"r\"(1L << 9));".}
let plic_base = 0x0c000000'u64
cast[ptr uint32](plic_base + 0x2000 + 0x80)[] = (1'u32 shl 10)
# Priority (each IRQ has a 4-byte priority register)
cast[ptr uint32](plic_base + 40)[] = 1 # UART (IRQ 10: 10*4 = 40)
# cast[ptr uint32](plic_base + 128)[] = 1 # VirtIO-Net (IRQ 32: 32*4 = 128)
# cast[ptr uint32](plic_base + 132)[] = 1 # VirtIO-Net (IRQ 33: 33*4 = 132)
# cast[ptr uint32](plic_base + 136)[] = 1 # VirtIO-Net (IRQ 34: 34*4 = 136)
# cast[ptr uint32](plic_base + 140)[] = 1 # VirtIO-Net (IRQ 35: 35*4 = 140)
# Enable (Supervisor Context 1)
# IRQs 0-31
# cast[ptr uint32](plic_base + 0x2000 + 0x80)[] = (1'u32 shl 10)
# IRQs 32-63
# cast[ptr uint32](plic_base + 0x2000 + 0x80 + 4)[] = 0x0000000F # Enable 32,33,34,35
# Threshold
cast[ptr uint32](plic_base + 0x201000)[] = 0
cast[ptr uint32](plic_base + 40)[] = 1
active_fibers_arr[0] = addr fiber_ion; active_fibers_arr[1] = addr fiber_nexshell
active_fibers_arr[2] = addr fiber_compositor; active_fibers_arr[3] = addr fiber_netswitch

View File

@ -252,6 +252,8 @@ export fn rss_trap_handler(frame: *TrapFrame) void {
if (irq == 10) { // UART0 is IRQ 10 on Virt machine
uart.poll_input();
} else if (irq >= 32 and irq <= 35) {
virtio_net.virtio_net_poll();
} else if (irq == 0) {
// Spurious or no pending interrupt
}
@ -321,7 +323,9 @@ export fn zig_entry() void {
uart.print("[Rumpk L0] zig_entry reached\n");
uart.print("[Rumpk RISC-V] Handing off to Nim L1...\n");
_ = virtio_net;
// Networking is initialized by kmain -> rumpk_net_init
NimMain();
kmain();
rumpk_halt();
@ -353,7 +357,7 @@ extern fn hal_surface_init() void;
export fn hal_io_init() void {
uart.init();
hal_surface_init();
virtio_net.init();
// Network init is now called explicitly by kernel (rumpk_net_init)
virtio_block.init();
}

View File

@ -200,14 +200,17 @@ pub fn create_worker_map(stack_base: u64, stack_size: u64, packet_addr: u64, phy
try map_range(root, UART_BASE, UART_BASE, PAGE_SIZE, PTE_R | PTE_W);
try map_range(root, PLIC_BASE, PLIC_BASE, 0x400000, PTE_R | PTE_W);
try map_range(root, VIRTIO_BASE, VIRTIO_BASE, 0x8000, PTE_R | PTE_W);
try map_range(root, VIRTIO_BASE, VIRTIO_BASE, 0x8000, PTE_R | PTE_W);
try map_range(root, 0x30000000, 0x30000000, 0x10000000, PTE_R | PTE_W); // PCIe ECAM
try map_range(root, 0x40000000, 0x40000000, 0x10000000, PTE_R | PTE_W); // PCIe MMIO
try map_range(root, 0x20000000, 0x20000000, 0x10000, PTE_R | PTE_W); // PTY Slave
// 4. Overlap stack with user access
try map_range(root, stack_base, stack_base, stack_size, PTE_R | PTE_W | PTE_U);
// 5. Shared SysTable & Rings (0x83000000) - Map 32KB (8 pages)
// 5. Shared SysTable & Rings & User Slab (0x83000000) - Map 256KB (64 pages; covers up to 0x40000)
var j: u64 = 0;
while (j < 8) : (j += 1) {
while (j < 64) : (j += 1) {
const addr = packet_addr + (j * PAGE_SIZE);
try map_page(root, addr, addr, PTE_R | PTE_W | PTE_U);
}

View File

@ -63,11 +63,11 @@ export fn malloc(size: usize) ?*anyopaque {
}
// Trace allocations (disabled to reduce noise)
// uart.print("[Alloc] ");
// uart.print_hex(size);
// uart.print(" -> Used: ");
// uart.print_hex(aligned_idx + total_needed);
// uart.print("\n");
uart.print("[Alloc] ");
uart.print_hex(size);
uart.print(" -> Used: ");
uart.print_hex(aligned_idx + total_needed);
uart.print("\n");
const base_ptr = &heap[aligned_idx];
const header = @as(*BlockHeader, @ptrCast(@alignCast(base_ptr)));
@ -131,5 +131,11 @@ export fn calloc(nmemb: usize, size: usize) ?*anyopaque {
// =========================================================
export fn get_ticks() u32 {
return 0; // TODO: Implement real timer
var time_val: u64 = 0;
asm volatile ("rdtime %[ret]"
: [ret] "=r" (time_val),
);
// QEMU 'virt' RISC-V timebase is 10MHz (10,000,000 Hz).
// Convert to milliseconds: val / 10,000.
return @truncate(time_val / 10000);
}

View File

@ -279,3 +279,15 @@ pub fn print_hex(value: usize) void {
export fn uart_print_hex(value: u64) void {
print_hex(value);
}
pub fn print_hex8(value: u8) void {
const hex_chars = "0123456789ABCDEF";
const nibble1 = (value >> 4) & 0xF;
const nibble2 = value & 0xF;
write_char(hex_chars[nibble1]);
write_char(hex_chars[nibble2]);
}
export fn uart_print_hex8(value: u8) void {
print_hex8(value);
}

View File

@ -17,15 +17,26 @@ const std = @import("std");
const uart = @import("uart.zig");
const pci = @import("virtio_pci.zig");
// VirtIO Feature Bits
const VIRTIO_F_VERSION_1 = 32;
const VIRTIO_NET_F_MAC = 5;
const VIRTIO_NET_F_MRG_RXBUF = 15;
// Status Bits
const VIRTIO_CONFIG_S_ACKNOWLEDGE = 1;
const VIRTIO_CONFIG_S_DRIVER = 2;
const VIRTIO_CONFIG_S_DRIVER_OK = 4;
const VIRTIO_CONFIG_S_FEATURES_OK = 8;
// External Nim functions
extern fn net_ingest_packet(data: [*]const u8, len: usize) bool;
// External C/Zig stubs
extern fn malloc(size: usize) ?*anyopaque;
extern fn ion_alloc_raw(out_id: *u16) u64;
extern fn ion_alloc_shared(out_id: *u16) u64;
extern fn ion_free_raw(id: u16) void;
extern fn ion_ingress(id: u16, len: u16) void;
extern fn ion_ingress(id: u16, len: u16, offset: u16) void;
extern fn ion_get_virt(id: u16) [*]u8;
extern fn ion_get_phys(id: u16) u64;
extern fn ion_tx_pop(out_id: *u16, out_len: *u16) bool;
@ -34,20 +45,27 @@ var global_driver: ?VirtioNetDriver = null;
var poll_count: u32 = 0;
export fn virtio_net_poll() void {
pub export fn virtio_net_poll() void {
poll_count += 1;
// Periodic debug: show queue state (SILENCED FOR PRODUCTION)
// if (poll_count == 1 or (poll_count % 1000000 == 0)) {
// if (global_driver) |*d| {
// if (d.rx_queue) |_| {
// asm volatile ("fence" ::: .{ .memory = true });
// uart.print("[VirtIO] Poll #");
// uart.print_hex(poll_count);
// uart.print("\n");
// }
// }
// }
// Periodic debug: show queue state
if (poll_count == 1 or (poll_count % 100000 == 0)) {
if (global_driver) |*d| {
if (d.rx_queue) |q| {
const hw_idx = q.used.idx;
const drv_idx = q.index;
uart.print("[VirtIO] Poll #");
uart.print_hex(poll_count);
uart.print(" RX HW:");
uart.print_hex(hw_idx);
uart.print(" DRV:");
uart.print_hex(drv_idx);
uart.print(" Avail:");
uart.print_hex(q.avail.idx);
uart.print("\n");
}
}
}
if (global_driver) |*d| {
if (d.rx_queue) |q| {
@ -80,7 +98,21 @@ export fn virtio_net_send(data: [*]const u8, len: usize) void {
}
}
pub fn init() void {
pub export fn virtio_net_get_mac(out_mac: [*]u8) void {
if (global_driver) |*d| {
d.get_mac(out_mac);
} else {
// Default fallback if no driver
out_mac[0] = 0x00;
out_mac[1] = 0x00;
out_mac[2] = 0x00;
out_mac[3] = 0x00;
out_mac[4] = 0x00;
out_mac[5] = 0x00;
}
}
pub export fn rumpk_net_init() void {
if (VirtioNetDriver.probe()) |_| {
uart.print("[Rumpk L0] Networking initialized (Sovereign).\n");
}
@ -92,6 +124,39 @@ pub const VirtioNetDriver = struct {
rx_queue: ?*Virtqueue = null,
tx_queue: ?*Virtqueue = null,
pub fn get_mac(self: *VirtioNetDriver, out: [*]u8) void {
uart.print("[VirtIO-Net] Reading MAC from device_cfg...\n");
if (self.transport.is_modern) {
// Use device_cfg directly - this is the VirtIO-Net specific config
if (self.transport.device_cfg) |cfg| {
const ptr: [*]volatile u8 = @ptrCast(cfg);
uart.print(" DeviceCfg at: ");
uart.print_hex(@intFromPtr(cfg));
uart.print("\n MAC bytes: ");
for (0..6) |i| {
out[i] = ptr[i];
uart.print_hex8(ptr[i]);
if (i < 5) uart.print(":");
}
uart.print("\n");
} else {
uart.print(" ERROR: device_cfg is null!\n");
// Fallback to zeros
for (0..6) |i| {
out[i] = 0;
}
}
} else {
// Legacy
// Device Config starts at offset 20.
const base = self.transport.legacy_bar + 20;
for (0..6) |i| {
out[i] = @as(*volatile u8, @ptrFromInt(base + i)).*;
}
}
}
pub fn init(base: usize, irq_num: u32) VirtioNetDriver {
return .{
.transport = pci.VirtioTransport.init(base),
@ -147,10 +212,61 @@ pub const VirtioNetDriver = struct {
self.transport.reset();
// 3. Acknowledge & Sense Driver
self.transport.add_status(1); // ACKNOWLEDGE
self.transport.add_status(2); // DRIVER
self.transport.add_status(VIRTIO_CONFIG_S_ACKNOWLEDGE);
self.transport.add_status(VIRTIO_CONFIG_S_DRIVER);
// 4. Feature Negotiation
if (self.transport.is_modern) {
uart.print("[VirtIO] Starting feature negotiation...\n");
if (self.transport.common_cfg == null) {
uart.print("[VirtIO] ERROR: common_cfg is null!\n");
return false;
}
const cfg = self.transport.common_cfg.?;
uart.print("[VirtIO] common_cfg addr: ");
uart.print_hex(@intFromPtr(cfg));
uart.print("\n");
uart.print("[VirtIO] Reading device features...\n");
// Read Device Features (Page 0)
cfg.device_feature_select = 0;
asm volatile ("fence" ::: .{ .memory = true });
const f_low = cfg.device_feature;
// Read Device Features (Page 1)
cfg.device_feature_select = 1;
asm volatile ("fence" ::: .{ .memory = true });
const f_high = cfg.device_feature;
uart.print("[VirtIO] Device Features: ");
uart.print_hex(f_low);
uart.print(" ");
uart.print_hex(f_high);
uart.print("\n");
// Accept VERSION_1 (Modern) and MAC
const accept_low: u32 = (1 << VIRTIO_NET_F_MAC);
const accept_high: u32 = (1 << (VIRTIO_F_VERSION_1 - 32));
uart.print("[VirtIO] Writing driver features...\n");
cfg.driver_feature_select = 0;
cfg.driver_feature = accept_low;
asm volatile ("fence" ::: .{ .memory = true });
cfg.driver_feature_select = 1;
cfg.driver_feature = accept_high;
asm volatile ("fence" ::: .{ .memory = true });
uart.print("[VirtIO] Checking feature negotiation...\n");
self.transport.add_status(VIRTIO_CONFIG_S_FEATURES_OK);
asm volatile ("fence" ::: .{ .memory = true });
if ((self.transport.get_status() & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
uart.print("[VirtIO] Feature negotiation failed!\n");
return false;
}
uart.print("[VirtIO] Features accepted.\n");
}
// 5. Setup RX Queue (0)
self.transport.select_queue(0);
const rx_count = self.transport.get_queue_size();
@ -212,6 +328,12 @@ pub const VirtioNetDriver = struct {
const raw_ptr = malloc(total_size + 4096) orelse return error.OutOfMemory;
const aligned_addr = (@intFromPtr(raw_ptr) + 4095) & ~@as(usize, 4095);
// Zero out the queue memory to ensure clean state
const byte_ptr: [*]u8 = @ptrFromInt(aligned_addr);
for (0..total_size) |i| {
byte_ptr[i] = 0;
}
const q_ptr_raw = malloc(@sizeOf(Virtqueue)) orelse return error.OutOfMemory;
const q_ptr: *Virtqueue = @ptrCast(@alignCast(q_ptr_raw));
@ -221,6 +343,16 @@ pub const VirtioNetDriver = struct {
q_ptr.avail = @ptrFromInt(aligned_addr + desc_size);
q_ptr.used = @ptrFromInt(aligned_addr + used_offset);
uart.print(" [Queue Setup] Base: ");
uart.print_hex(aligned_addr);
uart.print(" Desc: ");
uart.print_hex(@intFromPtr(q_ptr.desc));
uart.print(" Avail: ");
uart.print_hex(@intFromPtr(q_ptr.avail));
uart.print(" Used: ");
uart.print_hex(@intFromPtr(q_ptr.used));
uart.print("\n");
// Allocate ID tracking array
const ids_size = @as(usize, count) * @sizeOf(u16);
const ids_ptr = malloc(ids_size) orelse return error.OutOfMemory;
@ -236,7 +368,7 @@ pub const VirtioNetDriver = struct {
if (is_rx) {
// RX: Allocate Initial Slabs
phys_addr = ion_alloc_raw(&slab_id);
phys_addr = ion_alloc_shared(&slab_id);
if (phys_addr == 0) {
uart.print("[VirtIO] RX ION Alloc Failed. OOM.\n");
return error.OutOfMemory;
@ -298,7 +430,7 @@ pub const VirtioNetDriver = struct {
var replenished: bool = false;
while (q.index != hw_idx) {
// uart.print("[VirtIO RX] Processing Packet...\n");
uart.print("[VirtIO RX] Processing Packet...\n");
const elem = used_ring[q.index % q.num];
const desc_idx = elem.id;
@ -313,7 +445,8 @@ pub const VirtioNetDriver = struct {
// uart.print_hex(slab_id);
// uart.print("\n");
const header_len: u32 = 10;
// Modern VirtIO-net header: 10 bytes (legacy) + 2 bytes (num_buffers) = 12 bytes
const header_len: u32 = 12;
if (len > header_len) {
// Call ION - Pass only the Ethernet Frame (Skip VirtIO Header)
// ion_ingress receives slab_id which contains full buffer.
@ -322,7 +455,7 @@ pub const VirtioNetDriver = struct {
// The NPL must then offset into the buffer by 10 to get to Ethernet.
// OR: We adjust here. Let's adjust here by storing offset.
// Simplest: Pass len directly, NPL will skip first 10 bytes.
ion_ingress(slab_id, @intCast(len - header_len));
ion_ingress(slab_id, @intCast(len - header_len), @intCast(header_len));
} else {
uart.print(" [Warn] Packet too short/empty\n");
ion_free_raw(slab_id);
@ -330,7 +463,7 @@ pub const VirtioNetDriver = struct {
// Replenish
var new_id: u16 = 0;
const new_phys = ion_alloc_raw(&new_id);
const new_phys = ion_alloc_shared(&new_id);
if (new_phys != 0) {
q.desc[desc_idx].addr = new_phys;
q.ids[desc_idx] = new_id;
@ -380,6 +513,8 @@ pub const VirtioNetDriver = struct {
const idx = avail_phase % q.num;
const phys_addr = ion_get_phys(slab_id);
const virt_addr = ion_get_virt(slab_id);
@memset(virt_addr[0..10], 0); // Zero out VirtIO Header (Legacy/Modern 10-byte)
const desc = &q.desc[idx];
desc.addr = phys_addr;
@ -404,8 +539,17 @@ pub const VirtioNetDriver = struct {
const q = self.tx_queue orelse return;
const avail_ring = get_avail_ring(q.avail);
uart.print("[VirtIO TX] Packet Data: ");
for (0..16) |i| {
if (i < len) {
uart.print_hex8(data[i]);
uart.print(" ");
}
}
uart.print("\n");
var slab_id: u16 = 0;
const phys = ion_alloc_raw(&slab_id);
const phys = ion_alloc_shared(&slab_id);
if (phys == 0) {
uart.print("[VirtIO] TX OOM\n");
return;
@ -419,7 +563,8 @@ pub const VirtioNetDriver = struct {
const desc = &q.desc[desc_idx];
q.ids[desc_idx] = slab_id;
const header_len: usize = 10;
// Modern VirtIO-net header: 10 bytes (legacy) + 2 bytes (num_buffers) = 12 bytes
const header_len: usize = 12;
@memset(buf_ptr[0..header_len], 0);
const copy_len = if (len > 2000) 2000 else len;

View File

@ -23,7 +23,11 @@ const PCI_CAP_PTR = 0x34;
// Global Allocator for I/O and MMIO
var next_io_port: u32 = 0x1000;
pub var next_mmio_addr: u32 = 0x40000000;
const MMIO_ALLOC_ADDR: usize = 0x83000400;
fn get_mmio_alloc() *u64 {
return @ptrFromInt(MMIO_ALLOC_ADDR);
}
// VirtIO Capability Types
const VIRTIO_PCI_CAP_COMMON_CFG = 1;
@ -44,6 +48,7 @@ pub const VirtioTransport = struct {
notify_cfg: ?usize, // Base of notification region
notify_off_multiplier: u32,
isr_cfg: ?*volatile u8,
device_cfg: ?*volatile u8,
pub fn init(ecam_base: usize) VirtioTransport {
return .{
@ -54,13 +59,14 @@ pub const VirtioTransport = struct {
.notify_cfg = null,
.notify_off_multiplier = 0,
.isr_cfg = null,
.device_cfg = null,
};
}
pub fn probe(self: *VirtioTransport) bool {
if (next_mmio_addr == 0) {
next_mmio_addr = 0x40000000;
uart.print("[VirtIO-PCI] WARNING: next_mmio_addr was ZERO! Restored to 0x40000000\n");
const mmio_alloc = get_mmio_alloc();
if (mmio_alloc.* < 0x40000000) {
mmio_alloc.* = 0x40000000;
}
uart.print("[VirtIO-PCI] Probing capabilities...\n");
@ -70,6 +76,18 @@ pub const VirtioTransport = struct {
// 2. Check for Capabilities
const status_ptr: *volatile u16 = @ptrFromInt(self.base_addr + PCI_STATUS);
uart.print(" [PCI BARs] ");
for (0..6) |i| {
const bar_val = @as(*volatile u32, @ptrFromInt(self.base_addr + 0x10 + (i * 4))).*;
uart.print("BAR");
uart.print_hex8(@intCast(i));
uart.print(":");
uart.print_hex(bar_val);
uart.print(" ");
}
uart.print("\n");
if ((status_ptr.* & 0x10) != 0) {
// Has Capabilities
var cap_offset = @as(*volatile u8, @ptrFromInt(self.base_addr + PCI_CAP_PTR)).*;
@ -89,6 +107,17 @@ pub const VirtioTransport = struct {
const cap_type = @as(*volatile u8, @ptrFromInt(cap_addr + 3)).*;
const bar_idx = @as(*volatile u8, @ptrFromInt(cap_addr + 4)).*;
const offset = @as(*volatile u32, @ptrFromInt(cap_addr + 8)).*;
const length = @as(*volatile u32, @ptrFromInt(cap_addr + 12)).*;
uart.print(" [VirtIO Cap] Type:");
uart.print_hex(cap_type);
uart.print(" BAR:");
uart.print_hex(bar_idx);
uart.print(" Off:");
uart.print_hex(offset);
uart.print(" Len:");
uart.print_hex(length);
uart.print("\n");
if (bar_idx >= 6) {
uart.print("[VirtIO-PCI] Ignoring Invalid BAR Index in Cap\n");
@ -102,17 +131,32 @@ pub const VirtioTransport = struct {
// Check if BAR is assigned and is a Memory BAR (bit 0 == 0)
if ((bar_val & 0x1) == 0 and (bar_val & 0xFFFFFFF0) == 0) {
uart.print("[VirtIO-PCI] Initializing Unassigned Memory BAR ");
uart.print_hex(@as(u64, bar_idx));
uart.print("[VirtIO-PCI] dev:");
uart.print_hex(self.base_addr);
uart.print(" ALLOC_VAL: ");
uart.print_hex(mmio_alloc.*);
uart.print(" Initializing BAR");
uart.print_hex8(@intCast(bar_idx));
uart.print(" at ");
uart.print_hex(next_mmio_addr);
uart.print_hex(mmio_alloc.*);
uart.print("\n");
bar_ptr.* = next_mmio_addr;
bar_ptr.* = @intCast(mmio_alloc.* & 0xFFFFFFFF);
// Handle 64-bit BAR (Bit 2 of BAR value before write, or check type)
// If bit 2 is 1 (0b100), it's 64-bit.
if ((bar_val & 0x4) != 0) {
const high_ptr = @as(*volatile u32, @ptrFromInt(self.base_addr + 0x10 + (@as(usize, bar_idx) * 4) + 4));
high_ptr.* = @intCast(mmio_alloc.* >> 32);
}
const rb = bar_ptr.*;
uart.print("[VirtIO-PCI] BAR Assigned. Readback: ");
uart.print("[VirtIO-PCI] dev:");
uart.print_hex(self.base_addr);
uart.print(" BAR Assigned. Readback: ");
uart.print_hex(rb);
uart.print("\n");
next_mmio_addr += 0x10000; // Increment 64KB
mmio_alloc.* += 0x10000; // Increment 64KB
}
// Refresh BAR resolution (Memory only for Modern)
@ -120,8 +164,18 @@ pub const VirtioTransport = struct {
if (cap_type == VIRTIO_PCI_CAP_COMMON_CFG) {
uart.print("[VirtIO-PCI] Found Modern Common Config\n");
uart.print(" BAR Base: ");
uart.print_hex(@as(u64, bar_base));
uart.print(" Offset: ");
uart.print_hex(@as(u64, offset));
uart.print("\n");
self.common_cfg = @ptrFromInt(bar_base + offset);
self.is_modern = true;
uart.print(" CommonCfg Ptr: ");
uart.print_hex(@intFromPtr(self.common_cfg.?));
uart.print("\n");
}
if (cap_type == VIRTIO_PCI_CAP_NOTIFY_CFG) {
uart.print("[VirtIO-PCI] Found Modern Notify Config\n");
@ -132,6 +186,15 @@ pub const VirtioTransport = struct {
uart.print("[VirtIO-PCI] Found Modern ISR Config\n");
self.isr_cfg = @ptrFromInt(bar_base + offset);
}
if (cap_type == VIRTIO_PCI_CAP_DEVICE_CFG) {
uart.print("[VirtIO-PCI] Found Modern Device Config\n");
uart.print(" BAR Base: ");
uart.print_hex(@as(u64, bar_base));
uart.print(" Offset: ");
uart.print_hex(@as(u64, offset));
uart.print("\n");
self.device_cfg = @ptrFromInt(bar_base + offset);
}
}
uart.print("[VirtIO-PCI] Next Cap...\n");
cap_offset = cap_next;

View File

@ -105,53 +105,119 @@ int execv(const char *path, char *const argv[]) {
return (int)syscall(0x600, (long)path, 0, 0);
}
int printf(const char *format, ...) {
va_list args;
va_start(args, format);
const char *p = format;
// Robust Formatter
typedef struct {
char *buf;
size_t size;
size_t pos;
} OutCtx;
static void out_char(OutCtx *ctx, char c) {
if (ctx->buf && ctx->size > 0 && ctx->pos < ctx->size - 1) {
ctx->buf[ctx->pos] = c;
}
ctx->pos++;
}
static void out_num(OutCtx *ctx, unsigned long n, int base, int width, int zeropad, int upper) {
char buf[64];
const char *digits = upper ? "0123456789ABCDEF" : "0123456789abcdef";
int i = 0;
if (n == 0) buf[i++] = '0';
else while (n > 0) { buf[i++] = digits[n % base]; n /= base; }
while (i < width) buf[i++] = (zeropad ? '0' : ' ');
while (i > 0) out_char(ctx, buf[--i]);
}
static int vformat(OutCtx *ctx, const char *fmt, va_list ap) {
if (!fmt) return 0;
const char *p = fmt;
ctx->pos = 0;
while (*p) {
if (*p == '%' && *(p+1)) {
p++;
if (*p == 's') {
const char *s = va_arg(args, const char*);
console_write(s, strlen(s));
} else if (*p == 'd') {
int i = va_arg(args, int);
char buf[16];
int len = 0;
if (i == 0) { console_write("0", 1); }
else {
if (i < 0) { console_write("-", 1); i = -i; }
while (i > 0) { buf[len++] = (i % 10) + '0'; i /= 10; }
for (int j = 0; j < len/2; j++) { char t = buf[j]; buf[j] = buf[len-1-j]; buf[len-1-j] = t; }
console_write(buf, len);
}
} else {
console_write("%", 1);
console_write(p, 1);
if (*p != '%') { out_char(ctx, *p++); continue; }
p++; // skip %
if (!*p) break;
int zeropad = 0, width = 0, l_mod = 0, h_mod = 0;
if (*p == '0') { zeropad = 1; p++; }
while (*p >= '0' && *p <= '9') { width = width * 10 + (*p - '0'); p++; }
while (*p == 'l') { l_mod++; p++; }
if (*p == 'h') { h_mod = 1; p++; }
if (!*p) break;
switch (*p) {
case 's': {
const char *s = va_arg(ap, const char *);
if (!s) s = "(null)";
while (*s) out_char(ctx, *s++);
break;
}
} else {
console_write(p, 1);
case 'c': out_char(ctx, (char)va_arg(ap, int)); break;
case 'd':
case 'i': {
long n = (l_mod >= 1) ? va_arg(ap, long) : va_arg(ap, int);
unsigned long un;
if (n < 0) { out_char(ctx, '-'); un = 0UL - (unsigned long)n; }
else un = (unsigned long)n;
out_num(ctx, un, 10, width, zeropad, 0);
break;
}
case 'u': {
unsigned long n = (l_mod >= 1) ? va_arg(ap, unsigned long) : va_arg(ap, unsigned int);
out_num(ctx, n, 10, width, zeropad, 0);
break;
}
case 'p':
case 'x':
case 'X': {
unsigned long n;
if (*p == 'p') n = (unsigned long)va_arg(ap, void *);
else n = (l_mod >= 1) ? va_arg(ap, unsigned long) : va_arg(ap, unsigned int);
out_num(ctx, n, 16, width, zeropad, (*p == 'X'));
break;
}
case '%': out_char(ctx, '%'); break;
default: out_char(ctx, '%'); out_char(ctx, *p); break;
}
p++;
}
va_end(args);
return 0;
}
int sprintf(char *str, const char *format, ...) {
if (str) str[0] = 0;
return 0;
}
int snprintf(char *str, size_t size, const char *format, ...) {
if (str && size > 0) str[0] = 0;
return 0;
if (ctx->buf && ctx->size > 0) {
size_t end = (ctx->pos < ctx->size) ? ctx->pos : ctx->size - 1;
ctx->buf[end] = '\0';
}
return (int)ctx->pos;
}
int vsnprintf(char *str, size_t size, const char *format, va_list ap) {
if (str && size > 0) str[0] = 0;
return 0;
OutCtx ctx = { .buf = str, .size = size, .pos = 0 };
return vformat(&ctx, format, ap);
}
int snprintf(char *str, size_t size, const char *format, ...) {
va_list ap; va_start(ap, format);
int res = vsnprintf(str, size, format, ap);
va_end(ap);
return res;
}
int sprintf(char *str, const char *format, ...) {
va_list ap; va_start(ap, format);
int res = vsnprintf(str, (size_t)-1, format, ap);
va_end(ap);
return res;
}
int vprintf(const char *format, va_list ap) {
char tmp[1024];
int n = vsnprintf(tmp, sizeof(tmp), format, ap);
if (n > 0) console_write(tmp, (n < (int)sizeof(tmp)) ? (size_t)n : sizeof(tmp)-1);
return n;
}
int printf(const char *format, ...) {
va_list ap; va_start(ap, format);
int res = vprintf(format, ap);
va_end(ap);
return res;
}
int fwrite(const void *ptr, size_t size, size_t nmemb, void *stream) {

View File

@ -36,6 +36,16 @@ typedef uintptr_t mem_ptr_t;
// Protection type (required for SYS_LIGHTWEIGHT_PROT even in NO_SYS mode)
typedef uint32_t sys_prot_t;
// =========================================================
// Endianness (RISC-V 64 is Little Endian)
// =========================================================
#undef LITTLE_ENDIAN
#define LITTLE_ENDIAN 1234
#undef BIG_ENDIAN
#define BIG_ENDIAN 4321
#undef BYTE_ORDER
#define BYTE_ORDER LITTLE_ENDIAN
// =========================================================
// Compiler Hints
// =========================================================
@ -53,11 +63,17 @@ typedef uint32_t sys_prot_t;
// Diagnostics and Assertions
// =========================================================
// Platform diagnostics (unconditionally disabled for now)
#define LWIP_PLATFORM_DIAG(x) do {} while(0)
// Platform diagnostics
extern void lwip_platform_diag(const char *fmt, ...);
#ifndef LWIP_PLATFORM_DIAG
#define LWIP_PLATFORM_DIAG(x) lwip_platform_diag x
#endif
// Platform assertions (disabled for now)
#define LWIP_PLATFORM_ASSERT(x) do {} while(0)
// Platform assertions
extern void nexus_lwip_panic(const char* msg);
#ifndef LWIP_PLATFORM_ASSERT
#define LWIP_PLATFORM_ASSERT(x) nexus_lwip_panic(x)
#endif
// =========================================================
// Random Number Generation
@ -72,14 +88,15 @@ extern uint32_t syscall_get_random(void);
// Printf Format Specifiers
// =========================================================
// For 64-bit architectures
// For 64-bit architectures
#define X8_F "02x"
#define U16_F "u"
#define S16_F "d"
#define X16_F "x"
#define U16_F "hu"
#define S16_F "hd"
#define X16_F "hx"
#define U32_F "u"
#define S32_F "d"
#define X32_F "x"
#define SZT_F "zu"
#define SZT_F "lu"
#endif /* LWIP_ARCH_CC_H */

View File

@ -1,37 +1,82 @@
#ifndef LWIP_HDR_LWIPOPTS_MEMBRANE_H
#define LWIP_HDR_LWIPOPTS_MEMBRANE_H
/**
* @file lwipopts.h
* @brief lwIP Configuration for NexusOS Membrane
*/
#ifndef LWIP_LWIPOPTS_H
#define LWIP_LWIPOPTS_H
// --- LwIP Debug Constants (Needed before opt.h defines them) ---
#define LWIP_DBG_ON 0x80U
#define LWIP_DBG_OFF 0x00U
#define LWIP_DBG_TRACE 0x40U
#define LWIP_DBG_STATE 0x20U
#define LWIP_DBG_FRESH 0x10U
#define LWIP_DBG_HALT 0x08U
// 1. Run in the App's Thread
#define NO_SYS 1
#define LWIP_TIMERS 1
#define LWIP_SOCKET 0
#define LWIP_NETCONN 0
// 2. Protection (Required for sys_prot_t type definition)
#define SYS_LIGHTWEIGHT_PROT 1
// DHCP Support
#define LWIP_DHCP 1
#define LWIP_ACD 0
#define LWIP_DHCP_DOES_ACD_CHECK 0
#define LWIP_AUTOIP 0
#define LWIP_UDP 1
#define LWIP_NETIF_HOSTNAME 1
#define LWIP_RAW 1
// 3. Memory (Internal Pools)
#define MEM_LIBC_MALLOC 0
#define MEMP_MEM_MALLOC 0
#define MEM_SIZE (256 * 1024) // 256KB Heap for LwIP
#define MEMP_NUM_PBUF 64 // High RX capacity
#define PBUF_POOL_SIZE 128 // Large packet pool
#define MEM_ALIGNMENT 64
// Performance & Memory
#define MEM_ALIGNMENT 8
#define MEM_SIZE (64 * 1024)
#define MEMP_NUM_PBUF 16
#define MEMP_NUM_UDP_PCB 4
#define MEMP_NUM_TCP_PCB 4
#define PBUF_POOL_SIZE 32
// 4. Performance (Fast Path)
#define TCP_MSS 1460
#define TCP_WND (16 * TCP_MSS) // Larger window for high throughput
#define LWIP_TCP_KEEPALIVE 1
// Network Interface
#define LWIP_ETHERNET 1
#define LWIP_ARP 1
#define ETHARP_SUPPORT_VLAN 0
// 5. Disable System Features
#define LWIP_NETCONN 0 // We use Raw API
#define LWIP_SOCKET 0 // We implement our own Shim
#define LWIP_STATS 0 // Save cycles
#define LWIP_DHCP 1 // Enable Dynamic Host Configuration
#define LWIP_ICMP 1 // Enable ICMP (Ping)
#define LWIP_DHCP_DOES_ACD_CHECK 0 // Disable Address Conflict Detection
#define LWIP_ACD 0 // Disable ACD module
// Checksum Configuration
// CHECK disabled (don't validate incoming - helps debug)
// GEN enabled (QEMU user-mode networking requires valid checksums)
#define CHECKSUM_CHECK_UDP 0
#define CHECKSUM_CHECK_TCP 0
#define CHECKSUM_CHECK_IP 0
#define CHECKSUM_CHECK_ICMP 0
#define CHECKSUM_GEN_UDP 1
#define CHECKSUM_GEN_TCP 1
#define CHECKSUM_GEN_IP 1
#define CHECKSUM_GEN_ICMP 1
// Disable all debugs and diagnostics for a clean link
#define LWIP_DEBUG 0
#define LWIP_PLATFORM_DIAG(x) do {} while(0)
// Loopback Support
#define LWIP_HAVE_LOOPIF 1
#define LWIP_NETIF_LOOPBACK 1
#define LWIP_LOOPBACK_MAX_PBUFS 8
// Debugging (Loud Mode)
#define LWIP_DEBUG 1
#define LWIP_PLATFORM_DIAG(x) lwip_platform_diag x
#define DHCP_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE | LWIP_DBG_STATE)
#define UDP_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
#define NETIF_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE | LWIP_DBG_STATE)
#define IP_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
#define ICMP_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
//#define MEM_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
//#define MEMP_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
//#define PBUF_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
#define ETHERNET_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
#define ETHARP_DEBUG (LWIP_DBG_ON | LWIP_DBG_TRACE)
#define LWIP_DBG_MIN_LEVEL 0
#define LWIP_DBG_TYPES_ON 0xFFU
// Endianness
#undef BYTE_ORDER
#define BYTE_ORDER 1234
#endif

View File

@ -92,9 +92,13 @@ type
# Phase 36.4: I/O Multiplexing (8 bytes)
fn_wait_multi*: proc(mask: uint64): int32 {.cdecl.}
# Phase 36.5: Network Hardware Info (8 bytes)
net_mac*: array[6, byte]
reserved_mac*: array[2, byte]
static:
doAssert sizeof(SysTable) == 200
doAssert sizeof(SysTable) == 208
var membrane_rx_ring_ptr*: ptr RingBuffer[IonPacket, 256]
var membrane_tx_ring_ptr*: ptr RingBuffer[IonPacket, 256]
@ -137,27 +141,54 @@ proc ion_user_init*() {.exportc.} =
console_write(addr err[0], uint(err.len))
# --- ION CLIENT LOGIC ---
# Pure shared-memory slab allocator - NO kernel function calls!
const
USER_SLAB_BASE = 0x83010000'u64 # Start of user packet slab in SysTable region
USER_SLAB_COUNT = 512 # Number of packet slots
USER_PKT_SIZE = 2048 # Size of each packet buffer
USER_BITMAP_ADDR = 0x83000100'u64 # Bitmap stored in SysTable region (after SysTable struct)
# Get pointer to shared bitmap (512 bits = 64 bytes for 512 slots)
proc get_user_bitmap(): ptr array[64, byte] =
return cast[ptr array[64, byte]](USER_BITMAP_ADDR)
proc ion_user_alloc*(out_pkt: ptr IonPacket): bool {.exportc.} =
let sys = cast[ptr SysTable](SYS_TABLE_ADDR)
if sys.magic != 0x4E585553 or sys.fn_ion_alloc == nil:
return false
var id: uint16
let phys = sys.fn_ion_alloc(addr id)
if phys == 0: return false
## Allocate packet from shared slab - pure userland, no kernel call
let bitmap = get_user_bitmap()
out_pkt.id = id
out_pkt.phys = phys
out_pkt.len = 0
# In our identity-mapped unikernel, phys == virt
out_pkt.data = cast[ptr UncheckedArray[byte]](phys)
return true
# Find first free slot
for byteIdx in 0 ..< 64:
if bitmap[byteIdx] != 0xFF: # At least one bit free
for bitIdx in 0 ..< 8:
let slotIdx = byteIdx * 8 + bitIdx
if slotIdx >= USER_SLAB_COUNT:
return false
let mask = byte(1 shl bitIdx)
if (bitmap[byteIdx] and mask) == 0:
# Found free slot - mark as used
bitmap[byteIdx] = bitmap[byteIdx] or mask
let addr_val = USER_SLAB_BASE + uint64(slotIdx) * USER_PKT_SIZE
out_pkt.id = uint16(slotIdx) or 0x8000
out_pkt.phys = addr_val
out_pkt.len = 0
out_pkt.data = cast[ptr UncheckedArray[byte]](addr_val)
return true
return false
proc ion_user_free*(pkt: IonPacket) {.exportc.} =
let sys = cast[ptr SysTable](SYS_TABLE_ADDR)
if sys.magic == 0x4E585553 and sys.fn_ion_free != nil:
sys.fn_ion_free(pkt.id)
## Free packet back to shared slab - pure userland, no kernel call
if pkt.data == nil:
return
let slotIdx = pkt.id and 0x7FFF
if slotIdx >= USER_SLAB_COUNT:
return
let bitmap = get_user_bitmap()
let byteIdx = int(slotIdx) div 8
let bitIdx = int(slotIdx) mod 8
let mask = byte(1 shl bitIdx)
bitmap[byteIdx] = bitmap[byteIdx] and (not mask)
proc ion_user_return*(id: uint16) {.exportc.} =
if membrane_cmd_ring_ptr == nil: return
@ -243,3 +274,7 @@ proc crypto_blake3*(data: pointer, len: uint64): array[32, byte] =
let sys = get_sys_table()
if sys.fn_blake3 != nil:
sys.fn_blake3(data, len, addr result)
proc ion_get_mac*(): array[6, byte] =
let sys = get_sys_table()
return sys.net_mac

View File

@ -34,14 +34,66 @@ proc glue_print(s: string) =
#include "lwip/tcp.h"
#include "lwip/timeouts.h"
#include "netif/ethernet.h"
#include "lwip/raw.h"
#include "lwip/icmp.h"
#include "lwip/inet_chksum.h"
#include <string.h>
#include "lwip/dhcp.h"
// Externs
extern int printf(const char *format, ...);
// If string.h is missing, we need the prototype for our clib.c implementation
void* memcpy(void* dest, const void* src, size_t n);
extern err_t etharp_output(struct netif *netif, struct pbuf *p, const ip4_addr_t *ipaddr);
// extern err_t netif_loopif_init(struct netif *netif);
const char* lwip_strerr(err_t err) { return "LwIP Error"; }
// --- PING IMPLEMENTATION (Phase 36c) ---
static struct raw_pcb *ping_pcb;
static u16_t ping_seq_num;
static u8_t ping_recv(void *arg, struct raw_pcb *pcb, struct pbuf *p, const ip_addr_t *addr) {
LWIP_UNUSED_ARG(arg);
LWIP_UNUSED_ARG(pcb);
if (p->tot_len >= sizeof(struct ip_hdr) + sizeof(struct icmp_echo_hdr)) {
printf("[Membrane] PING REPLY from %s: %d bytes\n", ipaddr_ntoa(addr), p->tot_len);
}
pbuf_free(p);
return 1; // Eat the packet
}
static void ping_send(const ip_addr_t *addr) {
if (!ping_pcb) {
ping_pcb = raw_new(IP_PROTO_ICMP);
if (ping_pcb) {
raw_recv(ping_pcb, ping_recv, NULL);
raw_bind(ping_pcb, IP_ADDR_ANY);
}
}
if (!ping_pcb) return;
struct pbuf *p = pbuf_alloc(PBUF_IP, sizeof(struct icmp_echo_hdr) + 32, PBUF_RAM);
if (!p) return;
struct icmp_echo_hdr *iecho = (struct icmp_echo_hdr *)p->payload;
ICMPH_TYPE_SET(iecho, ICMP_ECHO);
ICMPH_CODE_SET(iecho, 0);
iecho->chksum = 0;
iecho->id = 0xAFAF;
iecho->seqno = lwip_htons(++ping_seq_num);
// Fill payload
memset((char *)p->payload + sizeof(struct icmp_echo_hdr), 'A', 32);
iecho->chksum = inet_chksum(iecho, p->len);
raw_sendto(ping_pcb, p, addr);
pbuf_free(p);
}
""".}
proc lwip_init*() {.importc: "lwip_init", cdecl.}
@ -77,13 +129,19 @@ proc ion_linkoutput(netif: pointer, p: pointer): int32 {.exportc, cdecl.} =
struct pbuf *curr = (struct pbuf *)`p`;
while (curr != NULL) {
if (`offset` + curr->len > 2000) break;
memcpy((void*)((uintptr_t)`pkt`.data + `offset`), curr->payload, curr->len);
// DEBUG: Verify payload
// unsigned char* pl = (unsigned char*)curr->payload;
// glue_print(" Payload Byte: ");
// glue_print_hex((uint64_t)pl[0]);
memcpy((void*)((uintptr_t)`pkt`.data + `offset` + 12), curr->payload, curr->len);
`offset` += curr->len;
curr = curr->next;
}
""".}
pkt.len = uint16(offset)
pkt.len = uint16(offset) + 12
if not ion_net_tx(pkt):
ion_user_free(pkt)
@ -92,6 +150,8 @@ proc ion_linkoutput(netif: pointer, p: pointer): int32 {.exportc, cdecl.} =
return 0 # ERR_OK
proc ion_netif_init(netif: pointer): int32 {.exportc, cdecl.} =
let mac = ion_get_mac()
glue_print("[Membrane] Configuring Interface with Hardware MAC\n")
{.emit: """
struct netif *ni = (struct netif *)`netif`;
ni->name[0] = 'i';
@ -101,9 +161,14 @@ proc ion_netif_init(netif: pointer): int32 {.exportc, cdecl.} =
ni->mtu = 1500;
ni->hwaddr_len = 6;
ni->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_LINK_UP;
// Set MAC: 00:DE:AD:BE:EF:01 (matching QEMU -netdev tap)
ni->hwaddr[0] = 0x00; ni->hwaddr[1] = 0xDE; ni->hwaddr[2] = 0xAD;
ni->hwaddr[3] = 0xBE; ni->hwaddr[4] = 0xEF; ni->hwaddr[5] = 0x01;
// Set MAC from SysTable
ni->hwaddr[0] = `mac`[0];
ni->hwaddr[1] = `mac`[1];
ni->hwaddr[2] = `mac`[2];
ni->hwaddr[3] = `mac`[3];
ni->hwaddr[4] = `mac`[4];
ni->hwaddr[5] = `mac`[5];
""".}
return 0
@ -124,6 +189,8 @@ proc membrane_init*() {.exportc, cdecl.} =
glue_print("[Membrane] Calling lwip_init()...\n")
lwip_init()
glue_print("[Membrane] lwip_init() returned.\n")
{.emit: "printf(\"[Membrane] LwIP Byte Order: %d (LE=%d, BE=%d)\\n\", BYTE_ORDER, LITTLE_ENDIAN, BIG_ENDIAN);".}
{.emit: "lwip_platform_diag(\"[Membrane] DIAG TEST: %s\\n\", \"OK\");".}
# 2. Setup Netif
{.emit: """
@ -147,36 +214,50 @@ proc membrane_init*() {.exportc, cdecl.} =
glue_print("[Membrane] Network Stack Operational (Waiting for DHCP IP...)\n")
var last_notified_ip: uint32 = 0
var dhcp_retried = false
var last_ping_time: uint32 = 0
var looped_ping_done = false
var gateway_ping_count = 0
# proc glue_print_hex(v: uint64) =
# const hex_chars = "0123456789ABCDEF"
# var buf: array[20, char]
# buf[0] = '0'; buf[1] = 'x'
# var val = v
# for i in countdown(15, 0):
# buf[2+i] = hex_chars[int(val and 0xF)]
# val = val shr 4
# buf[18] = '\n'; buf[19] = '\0'
# buf[18] = '\n'; buf[19] = '\0'
# console_write(addr buf[0], 20)
proc glue_print_hex(v: uint64) =
const hex_chars = "0123456789ABCDEF"
var buf: array[20, char]
buf[0] = '0'; buf[1] = 'x'
var val = v
for i in countdown(15, 0):
buf[2+i] = hex_chars[int(val and 0xF)]
val = val shr 4
buf[18] = '\n'; buf[19] = '\0'
console_write(addr buf[0], 20)
proc pump_membrane_stack*() {.exportc, cdecl.} =
## The Pulse of the Membrane. Call frequently to handle timers and RX.
let now = sys_now()
# if (now mod 1000) < 50:
# glue_print(".")
# glue_print("[Membrane] Time: ")
# glue_print_hex(uint64(now))
# glue_print("\n")
# 3. Check for IP (Avoid continuous Nim string allocation/leak)
var ip_addr: uint32
{.emit: "`ip_addr` = ip4_addr_get_u32(netif_ip4_addr((struct netif *)`g_netif`));".}
if ip_addr != 0 and ip_addr != last_notified_ip:
glue_print("[Membrane] IP STATUS CHANGE: ")
# Call Zig kprint_hex directly
proc kprint_hex_ext(v: uint64) {.importc: "kprint_hex", cdecl.}
kprint_hex_ext(uint64(ip_addr))
glue_print_hex(uint64(ip_addr))
glue_print("\n")
last_notified_ip = ip_addr
# Force DHCP Retry if no IP after 3 seconds
# Force DHCP Retry if no IP after 3 seconds
if now > 3000 and not dhcp_retried and ip_addr == 0:
dhcp_retried = true
glue_print("[Membrane] Forcing DHCP Restart...\n")
{.emit: "dhcp_start((struct netif *)`g_netif`);".}
# 1. LwIP Timers (Raw API needs manual polling)
if now - last_tcp_tmr >= 250:
@ -186,6 +267,15 @@ proc pump_membrane_stack*() {.exportc, cdecl.} =
etharp_tmr()
last_arp_tmr = now
# Phase 36c: Ping Automation (Disabled/Silent)
if now - last_ping_time > 5000:
last_ping_time = now
if ip_addr != 0:
# Trigger periodic actions here
discard
# {.emit: "ip_addr_t t; IP4_ADDR(&t, 10,0,2,2); ping_send(&t);".}
# DHCP Timers
if now - last_dhcp_fine >= 500:
# glue_print("[Membrane] DHCP Fine Timer\n")
@ -200,13 +290,29 @@ proc pump_membrane_stack*() {.exportc, cdecl.} =
var pkt: IonPacket
while ion_net_rx(addr pkt):
glue_print("[Membrane] Ingress Packet\n")
# DEBUG: Hex dump first 32 bytes (Disabled for Ping Test)
# {.emit: """
# printf("[Membrane] RX Hex Dump (first 32 bytes):\n");
# for (int i = 0; i < 32 && i < `pkt`.len; i++) {
# printf("%02x ", `pkt`.data[i]);
# if ((i + 1) % 16 == 0) printf("\n");
# }
# printf("\n");
# """.}
# Pass to LwIP
{.emit: """
struct pbuf *p = pbuf_alloc(PBUF_RAW, `pkt`.len, PBUF_POOL);
if (p != NULL) {
pbuf_take(p, `pkt`.data, `pkt`.len);
if (netif_default->input(p, netif_default) != ERR_OK) {
pbuf_free(p);
if (`pkt`.data == NULL) {
printf("[Membrane] ERROR: Ingress pkt.data is NULL!\n");
pbuf_free(p);
} else {
// OFFSET FIX: Kernel already applied VirtIO offset (12 bytes) to pkt.data
pbuf_take(p, (void*)((uintptr_t)`pkt`.data), `pkt`.len);
if (netif_default->input(p, netif_default) != ERR_OK) {
pbuf_free(p);
}
}
}
""".}

View File

@ -18,11 +18,15 @@
* - No critical sections needed (single fiber context)
*/
#include <stdarg.h>
#include <stddef.h>
#include "lwip/opt.h"
#include "lwip/arch.h"
#include "lwip/sys.h"
#include "lwip/stats.h"
extern int vprintf(const char *format, va_list args);
// =========================================================
// External Kernel Interface
// =========================================================
@ -95,19 +99,22 @@ void sys_arch_unprotect(sys_prot_t pval) {
// Diagnostics (Optional)
// =========================================================
#if LWIP_PLATFORM_DIAG
// =========================================================
// Diagnostics
// =========================================================
/**
* lwip_platform_diag - Output diagnostic message
* Used by LWIP_PLATFORM_DIAG() macro if enabled
* Used by LWIP_PLATFORM_DIAG() macro
*/
void lwip_platform_diag(const char *fmt, ...) {
// For now, silent. Could use console_write for debug builds.
(void)fmt;
console_write("<<<LwIP>>> ", 11);
va_list args;
va_start(args, fmt);
vprintf(fmt, args);
va_end(args);
}
#endif /* LWIP_PLATFORM_DIAG */
// =========================================================
// Assertions (Contract Enforcement)
// =========================================================
@ -115,12 +122,10 @@ void lwip_platform_diag(const char *fmt, ...) {
/**
* lwip_platform_assert - Handle failed assertions
* @param msg Assertion message
* @param line Line number
* @param file File name
*
* In a production kernel, this should trigger a controlled panic.
* Note: Mapped via LWIP_PLATFORM_ASSERT macro in cc.h
*/
void lwip_platform_assert(const char *msg, int line, const char *file) {
void lwip_platform_assert_impl(const char *msg) {
const char panic_msg[] = "[lwIP ASSERT FAILED]\n";
console_write(panic_msg, sizeof(panic_msg) - 1);
console_write(msg, __builtin_strlen(msg));