feat(hal): ARM64 port, VirtIO MMIO, dual-arch HAL (M3.1-M3.3)

This commit is contained in:
Markus Maiwald 2026-02-15 19:58:51 +01:00
parent 011e0b699e
commit 8d4b581519
21 changed files with 2141 additions and 174 deletions

View File

@ -33,6 +33,10 @@ SECTIONS
*(.data.*)
} > RAM
.nexus.manifest : {
KEEP(*(.nexus.manifest))
} > RAM
.bss : {
. = ALIGN(8);
__bss_start = .;

View File

@ -0,0 +1,44 @@
/* Memory Layout — ARM64 Cellular Memory (M3.3):
* User RAM: 0x48000000 - 0x4FFFFFFF (128MB)
* Stack starts at 0x4BFFFFF0 and grows down
* QEMU virt: -m 512M ensures valid physical backing
*/
MEMORY
{
RAM (rwx) : ORIGIN = 0x48000000, LENGTH = 128M
}
SECTIONS
{
. = 0x48000000;
.text : {
*(.text._start)
*(.text)
*(.text.*)
} > RAM
.rodata : {
*(.rodata)
*(.rodata.*)
} > RAM
.data : {
*(.data)
*(.data.*)
} > RAM
.nexus.manifest : {
KEEP(*(.nexus.manifest))
} > RAM
.bss : {
. = ALIGN(8);
__bss_start = .;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(8);
__bss_end = .;
} > RAM
}

View File

@ -11,8 +11,14 @@ _start:
2:
fence rw, rw
# Arguments (argc, argv) are already in a0, a1 from Kernel
# sp is already pointing to argc from Kernel
# Valid Args from Stack (Linux ABI)
ld a0, 0(sp) # argc
addi a1, sp, 8 # argv
# Calculate envp in a2: envp = argv + (argc + 1) * 8
addi t0, a0, 1 # t0 = argc + 1
slli t0, t0, 3 # t0 = (argc + 1) * 8
add a2, a1, t0 # a2 = argv + offset
call main

View File

@ -7,15 +7,16 @@
//! Rumpk Boot Header
//!
//! Defines the Multiboot2 header for GRUB/QEMU and the bare-metal entry point.
//! Handles BSS clearing and stack initialization before jumping to the Nim kernel.
//! Architecture-dispatched entry point for bare-metal boot.
//! Handles BSS clearing and stack initialization before jumping to HAL init.
//!
//! SAFETY: Executed in the earliest boot stage with no environment initialized.
const std = @import("std");
const builtin = @import("builtin");
// =========================================================
// Multiboot2 Header (for GRUB/QEMU)
// Multiboot2 Header (for GRUB/QEMU x86 only)
// =========================================================
const MULTIBOOT2_MAGIC: u32 = 0xe85250d6;
@ -43,39 +44,92 @@ export const multiboot2_header linksection(".multiboot2") = Multiboot2Header{
};
// =========================================================
// Entry Point
// Arch-Specific HAL Entry Points
// =========================================================
extern fn riscv_init() noreturn;
extern fn aarch64_init() void; // Returns void (calls rumpk_halt internally)
// =========================================================
// Entry Point (Architecture Dispatched)
// =========================================================
// 1MB Kernel Stack
const STACK_SIZE = 0x100000;
export var kernel_stack: [STACK_SIZE]u8 align(16) linksection(".bss.stack") = undefined;
export fn _start() callconv(.naked) noreturn {
// Clear BSS, set up stack, then jump to RISC-V Init
asm volatile (
\\ // Set up stack
\\ la sp, kernel_stack
\\ li t0, %[stack_size]
\\ add sp, sp, t0
\\
\\ // Clear BSS
\\ la t0, __bss_start
\\ la t1, __bss_end
\\1:
\\ bge t0, t1, 2f
\\ sd zero, (t0)
\\ addi t0, t0, 8
\\ j 1b
\\2:
\\ // Jump to HAL Init
\\ call riscv_init
\\
\\ // Should never return
\\ wfi
\\ j 2b
:
: [stack_size] "i" (STACK_SIZE),
);
switch (builtin.cpu.arch) {
.riscv64 => {
asm volatile (
\\ // Set up stack
\\ la sp, kernel_stack
\\ li t0, %[stack_size]
\\ add sp, sp, t0
\\
\\ // Clear BSS
\\ la t0, __bss_start
\\ la t1, __bss_end
\\1:
\\ bge t0, t1, 2f
\\ sd zero, (t0)
\\ addi t0, t0, 8
\\ j 1b
\\2:
\\ // Jump to RISC-V HAL Init
\\ call riscv_init
\\
\\ // Should never return
\\ wfi
\\ j 2b
:
: [stack_size] "i" (STACK_SIZE),
);
},
.aarch64 => {
asm volatile (
// Mask all exceptions
\\ msr daifset, #0xf
//
// Enable FP/SIMD (CPACR_EL1.FPEN = 0b11)
\\ mov x0, #(3 << 20)
\\ msr cpacr_el1, x0
\\ isb
//
// Disable alignment check (SCTLR_EL1.A = 0)
\\ mrs x0, sctlr_el1
\\ bic x0, x0, #(1 << 1)
\\ msr sctlr_el1, x0
\\ isb
//
// Set up stack
\\ adrp x0, kernel_stack
\\ add x0, x0, :lo12:kernel_stack
\\ mov x1, #0x100000
\\ add sp, x0, x1
//
// Clear BSS
\\ adrp x0, __bss_start
\\ add x0, x0, :lo12:__bss_start
\\ adrp x1, __bss_end
\\ add x1, x1, :lo12:__bss_end
\\ 1: cmp x0, x1
\\ b.ge 2f
\\ str xzr, [x0], #8
\\ b 1b
\\ 2:
//
// Jump to ARM64 HAL Init
\\ bl aarch64_init
//
// Should never return
\\ 3: wfe
\\ b 3b
);
},
else => {
// Unsupported architecture
unreachable;
},
}
}

54
boot/linker_aarch64.ld Normal file
View File

@ -0,0 +1,54 @@
/* Rumpk Linker Script (AArch64)
* For QEMU virt machine (ARM64)
* Load address: 0x40080000 (QEMU -kernel default for virt)
*/
ENTRY(_start)
SECTIONS
{
. = 0x40080000;
PROVIDE(__kernel_vbase = .);
PROVIDE(__kernel_pbase = .);
.text : {
*(.text._start)
*(.text*)
}
.rodata : {
*(.rodata*)
}
.data : {
. = ALIGN(16);
*(.sdata*)
*(.sdata.*)
*(.data*)
}
.initrd : {
_initrd_start = .;
KEEP(*(.initrd))
_initrd_end = .;
}
.bss : {
__bss_start = .;
*(.bss*)
*(COMMON)
__bss_end = .;
}
.stack (NOLOAD) : {
. = ALIGN(16);
. += 0x100000; /* 1MB Stack */
PROVIDE(__stack_top = .);
}
/DISCARD/ : {
*(.comment)
*(.note*)
*(.eh_frame*)
}
}

View File

@ -55,9 +55,14 @@ fn halt_impl() callconv(.c) noreturn {
}
}
// =========================================================
// Exports for Nim FFI
// =========================================================
const builtin = @import("builtin");
// Sovereign timer canonical time source for the entire kernel
extern fn rumpk_timer_now_ns() u64;
export fn hal_get_time_ns() u64 {
return rumpk_timer_now_ns();
}
export fn rumpk_console_write(ptr: [*]const u8, len: usize) void {
hal.console_write(ptr, len);
@ -113,17 +118,27 @@ pub const cspace_check_perm = cspace.cspace_check_perm;
pub const surface = @import("surface.zig");
comptime {
// Force analysis
// Force analysis architecture-independent modules
_ = @import("stubs.zig");
_ = @import("mm.zig");
_ = @import("channel.zig");
_ = @import("uart.zig");
_ = @import("virtio_block.zig");
_ = @import("virtio_net.zig");
_ = @import("virtio_pci.zig");
_ = @import("ontology.zig");
_ = @import("entry_riscv.zig");
_ = @import("cspace.zig");
_ = @import("surface.zig");
_ = @import("initrd.zig");
// Architecture-specific modules
if (builtin.cpu.arch == .riscv64) {
_ = @import("mm.zig");
_ = @import("virtio_block.zig");
_ = @import("virtio_net.zig");
_ = @import("virtio_pci.zig");
_ = @import("entry_riscv.zig");
} else if (builtin.cpu.arch == .aarch64) {
_ = @import("entry_aarch64.zig");
_ = @import("gic.zig");
_ = @import("virtio_mmio.zig");
_ = @import("virtio_block.zig");
_ = @import("virtio_net.zig");
}
}

View File

@ -13,6 +13,7 @@
//! SAFETY: All operations use atomic loads/stores with proper memory fences.
const std = @import("std");
const builtin = @import("builtin");
pub const IonPacket = extern struct {
data: u64,
@ -41,8 +42,8 @@ pub fn Ring(comptime T: type) type {
// INVARIANT 1: The Handle Barrier
fn validate_ring_ptr(ptr: u64) void {
// 0x8000_0000 is kernel base, 0x8300_0000 is ION base.
if (ptr < 0x8000_0000) {
const min_valid: u64 = if (builtin.cpu.arch == .aarch64) 0x4000_0000 else 0x8000_0000;
if (ptr < min_valid) {
@panic("HAL: Invariant Violation - Invalid Ring Pointer");
}
}
@ -72,7 +73,11 @@ fn popGeneric(comptime T: type, ring: *Ring(T), out_pkt: *T) bool {
}
// Ensure we see data written by producer before reading it
asm volatile ("fence r, rw" ::: .{ .memory = true });
switch (builtin.cpu.arch) {
.riscv64 => asm volatile ("fence r, rw" ::: .{ .memory = true }),
.aarch64 => asm volatile ("dmb ld" ::: .{ .memory = true }),
else => @compileError("unsupported arch"),
}
out_pkt.* = ring.data[tail & ring.mask];
const next = (tail + 1) & ring.mask;

View File

@ -230,6 +230,19 @@ pub export fn cspace_check_perm(fiber_id: u64, slot: usize, perm_bits: u8) bool
return cap.has_perm(perm);
}
/// Check if fiber has Channel capability for given channel_id with required permission (C ABI)
/// Scans all CSpace slots for a matching Channel capability by object_id.
pub export fn cspace_check_channel(fiber_id: u64, channel_id: u64, perm_bits: u8) bool {
const cs = cspace_get(fiber_id) orelse return false;
const perm: CapPerms = @bitCast(perm_bits);
for (&cs.slots) |*cap| {
if (cap.cap_type == .Channel and cap.object_id == channel_id and cap.has_perm(perm)) {
return true;
}
}
return false;
}
// Unit tests
test "Capability creation and validation" {
const cap = Capability{

1060
hal/entry_aarch64.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -308,7 +308,7 @@ export fn rss_trap_handler(frame: *TrapFrame) void {
const irq = PLIC_CLAIM.*;
if (irq == 10) { // UART0 is IRQ 10 on Virt machine
// uart.print("[IRQ] 10\n");
uart.print("[IRQ] 10\n");
uart_input.poll_input();
} else if (irq >= 32 and irq <= 35) {
virtio_net.virtio_net_poll();
@ -447,6 +447,13 @@ export fn hal_io_init() void {
virtio_block.init();
}
export fn hal_panic(msg: [*:0]const u8) callconv(.c) noreturn {
uart.print("[HAL PANIC] ");
uart.print(std.mem.span(msg));
uart.print("\n");
rumpk_halt();
}
export fn rumpk_halt() noreturn {
uart.print("[Rumpk RISC-V] Halting.\n");
while (true) {

169
hal/gic.zig Normal file
View File

@ -0,0 +1,169 @@
// SPDX-License-Identifier: LCL-1.0
// Copyright (c) 2026 Markus Maiwald
// Stewardship: Self Sovereign Society Foundation
//
// This file is part of the Nexus Commonwealth.
// See legal/LICENSE_COMMONWEALTH.md for license terms.
//! Rumpk Layer 0: GICv2 Driver (ARM64)
//!
//! Minimal Generic Interrupt Controller v2 for QEMU virt machine.
//! Handles interrupt enable, claim, and complete for timer and device IRQs.
//!
//! SAFETY: All register accesses use volatile pointers to MMIO regions.
// =========================================================
// GICv2 MMIO Base Addresses (QEMU virt machine)
// =========================================================
const GICD_BASE: usize = 0x08000000; // Distributor
const GICC_BASE: usize = 0x08010000; // CPU Interface
// =========================================================
// Distributor Registers (GICD)
// =========================================================
const GICD_CTLR: usize = 0x000; // Control
const GICD_TYPER: usize = 0x004; // Type (read-only)
const GICD_ISENABLER: usize = 0x100; // Set-Enable (banked per 32 IRQs)
const GICD_ICENABLER: usize = 0x180; // Clear-Enable
const GICD_ISPENDR: usize = 0x200; // Set-Pending
const GICD_ICPENDR: usize = 0x280; // Clear-Pending
const GICD_IPRIORITYR: usize = 0x400; // Priority (byte-accessible)
const GICD_ITARGETSR: usize = 0x800; // Target (byte-accessible)
const GICD_ICFGR: usize = 0xC00; // Configuration
// =========================================================
// CPU Interface Registers (GICC)
// =========================================================
const GICC_CTLR: usize = 0x000; // Control
const GICC_PMR: usize = 0x004; // Priority Mask
const GICC_IAR: usize = 0x00C; // Interrupt Acknowledge
const GICC_EOIR: usize = 0x010; // End of Interrupt
// =========================================================
// IRQ Numbers (QEMU virt)
// =========================================================
/// Non-Secure Physical Timer PPI
pub const TIMER_IRQ: u32 = 30;
/// UART PL011 (SPI #1 = IRQ 33)
pub const UART_IRQ: u32 = 33;
/// VirtIO MMIO IRQ base (SPI #16 = IRQ 48)
/// QEMU virt assigns SPIs 48..79 to MMIO slots 0..31
pub const VIRTIO_MMIO_IRQ_BASE: u32 = 48;
// Spurious interrupt ID
const SPURIOUS_IRQ: u32 = 1023;
// =========================================================
// MMIO Helpers
// =========================================================
fn gicd_read(offset: usize) u32 {
const ptr: *volatile u32 = @ptrFromInt(GICD_BASE + offset);
return ptr.*;
}
fn gicd_write(offset: usize, val: u32) void {
const ptr: *volatile u32 = @ptrFromInt(GICD_BASE + offset);
ptr.* = val;
}
fn gicc_read(offset: usize) u32 {
const ptr: *volatile u32 = @ptrFromInt(GICC_BASE + offset);
return ptr.*;
}
fn gicc_write(offset: usize, val: u32) void {
const ptr: *volatile u32 = @ptrFromInt(GICC_BASE + offset);
ptr.* = val;
}
// =========================================================
// Public API
// =========================================================
/// Initialize GICv2 distributor and CPU interface.
pub fn gic_init() void {
// 1. Disable distributor during setup
gicd_write(GICD_CTLR, 0);
// 2. Set all SPIs to lowest priority (0xFF) and target CPU 0
// PPIs (0-31) are banked per-CPU, handled separately
const typer = gicd_read(GICD_TYPER);
const it_lines = (typer & 0x1F) + 1; // Number of 32-IRQ groups
var i: usize = 1; // Skip group 0 (SGIs/PPIs - banked)
while (i < it_lines) : (i += 1) {
// Disable all SPIs
gicd_write(GICD_ICENABLER + i * 4, 0xFFFFFFFF);
// Set priority to 0xA0 (low but not lowest)
var j: usize = 0;
while (j < 8) : (j += 1) {
gicd_write(GICD_IPRIORITYR + (i * 32 + j * 4), 0xA0A0A0A0);
}
// Target CPU 0 for all SPIs
j = 0;
while (j < 8) : (j += 1) {
gicd_write(GICD_ITARGETSR + (i * 32 + j * 4), 0x01010101);
}
}
// 3. Configure PPI priorities (group 0, banked)
// Timer IRQ 30: priority 0x20 (high)
const timer_prio_reg = GICD_IPRIORITYR + (TIMER_IRQ / 4) * 4;
const timer_prio_shift: u5 = @intCast((TIMER_IRQ % 4) * 8);
var prio_val = gicd_read(timer_prio_reg);
prio_val &= ~(@as(u32, 0xFF) << timer_prio_shift);
prio_val |= @as(u32, 0x20) << timer_prio_shift;
gicd_write(timer_prio_reg, prio_val);
// 4. Enable distributor (Group 0 + Group 1)
gicd_write(GICD_CTLR, 0x3);
// 5. Configure CPU interface
gicc_write(GICC_PMR, 0xFF); // Accept all priorities
gicc_write(GICC_CTLR, 0x1); // Enable CPU interface
}
/// Enable a specific interrupt in the distributor.
pub fn gic_enable_irq(irq: u32) void {
const reg = GICD_ISENABLER + (irq / 32) * 4;
const bit: u5 = @intCast(irq % 32);
gicd_write(reg, @as(u32, 1) << bit);
}
/// Disable a specific interrupt in the distributor.
pub fn gic_disable_irq(irq: u32) void {
const reg = GICD_ICENABLER + (irq / 32) * 4;
const bit: u5 = @intCast(irq % 32);
gicd_write(reg, @as(u32, 1) << bit);
}
/// Acknowledge an interrupt (read IAR). Returns IRQ number or SPURIOUS_IRQ.
pub fn gic_claim() u32 {
return gicc_read(GICC_IAR) & 0x3FF;
}
/// Signal end of interrupt processing.
pub fn gic_complete(irq: u32) void {
gicc_write(GICC_EOIR, irq);
}
/// Check if a claimed IRQ is spurious.
pub fn is_spurious(irq: u32) bool {
return irq >= SPURIOUS_IRQ;
}
/// Enable the NS Physical Timer interrupt (IRQ 30).
pub fn gic_enable_timer_irq() void {
gic_enable_irq(TIMER_IRQ);
}
/// Enable a VirtIO MMIO slot interrupt in the GIC.
pub fn gic_enable_virtio_mmio_irq(slot: u32) void {
gic_enable_irq(VIRTIO_MMIO_IRQ_BASE + slot);
}

View File

@ -205,8 +205,10 @@ pub fn create_worker_map(stack_base: u64, stack_size: u64, packet_addr: u64, phy
try map_range(root, 0x40000000, 0x40000000, 0x10000000, PTE_R | PTE_W); // PCIe MMIO
try map_range(root, 0x20000000, 0x20000000, 0x10000, PTE_R | PTE_W); // PTY Slave
// 4. Overlap stack with user access
try map_range(root, stack_base, stack_base, stack_size, PTE_R | PTE_W | PTE_U);
// 4. Overlap stack with user access (Optional)
if (stack_base != 0) {
try map_range(root, stack_base, stack_base, stack_size, PTE_R | PTE_W | PTE_U);
}
// 5. Shared SysTable & Rings & User Slab (0x83000000) - Map 256KB (64 pages; covers up to 0x40000)
var j: u64 = 0;

View File

@ -194,10 +194,12 @@ pub export fn stl_init() void {
stl_initialized = true;
}
/// Get current timestamp (placeholder - will be replaced by HAL timer)
/// Sovereign timer canonical time source for all kernel timestamps
extern fn rumpk_timer_now_ns() u64;
/// Get current timestamp in nanoseconds since boot
fn get_timestamp_ns() u64 {
// TODO: Integrate with HAL timer
return 0;
return rumpk_timer_now_ns();
}
/// Emit event to STL (C ABI)

View File

@ -15,6 +15,9 @@
const uart = @import("uart.zig");
// Sovereign timer canonical time source for the entire kernel
extern fn rumpk_timer_now_ns() u64;
// =========================================================
// Heap Stubs (Bump Allocator with Block Headers)
// =========================================================
@ -137,13 +140,9 @@ export fn calloc(nmemb: usize, size: usize) ?*anyopaque {
// =========================================================
export fn get_ticks() u32 {
var time_val: u64 = 0;
asm volatile ("rdtime %[ret]"
: [ret] "=r" (time_val),
);
// QEMU 'virt' RISC-V timebase is 10MHz (10,000,000 Hz).
// Convert to milliseconds: val / 10,000.
return @truncate(time_val / 10000);
// Delegate to sovereign timer single source of truth for all time
const ns = rumpk_timer_now_ns();
return @truncate(ns / 1_000_000); // ns ms
}
// export fn rumpk_timer_set_ns(ns: u64) void {
@ -160,10 +159,10 @@ export fn nexshell_main() void {
}
extern fn k_handle_syscall(nr: usize, a0: usize, a1: usize, a2: usize) usize;
export fn exit(code: c_int) noreturn {
_ = code;
while (true) asm volatile ("wfi");
}
// export fn exit(code: c_int) noreturn {
// _ = code;
// while (true) asm volatile ("wfi");
// }
// =========================================================
// Atomic Stubs (To resolve linker errors with libcompiler_rt)

View File

@ -13,7 +13,7 @@
//! DECISION(Alloc): Bump allocator chosen for simplicity and determinism.
//! Memory is never reclaimed; system reboots to reset.
const uart = @import("uart.zig");
// const uart = @import("uart.zig");
// =========================================================
// Heap Stubs (Bump Allocator with Block Headers)
@ -27,11 +27,13 @@ var heap_idx: usize = 0;
var heap_init_done: bool = false;
export fn debug_print(s: [*]const u8, len: usize) void {
uart.print(s[0..len]);
_ = s;
_ = len;
// TODO: Use syscall for userland debug printing
}
export fn kprint_hex(value: u64) void {
uart.print_hex(value);
_ = value;
}
// Header structure (64 bytes aligned to match LwIP MEM_ALIGNMENT)
@ -45,9 +47,9 @@ export fn malloc(size: usize) ?*anyopaque {
if (!heap_init_done) {
if (heap_idx != 0) {
uart.print("[Alloc] WARNING: BSS NOT ZEROED! heap_idx: ");
uart.print_hex(heap_idx);
uart.print("\n");
// uart.print("[Alloc] WARNING: BSS NOT ZEROED! heap_idx: ");
// uart.print_hex(heap_idx);
// uart.print("\n");
heap_idx = 0;
}
heap_init_done = true;
@ -58,11 +60,11 @@ export fn malloc(size: usize) ?*anyopaque {
const aligned_idx = (heap_idx + align_mask) & ~align_mask;
if (aligned_idx + total_needed > heap.len) {
uart.print("[Alloc] OOM! Size: ");
uart.print_hex(size);
uart.print(" Used: ");
uart.print_hex(heap_idx);
uart.print("\n");
// uart.print("[Alloc] OOM! Size: ");
// uart.print_hex(size);
// uart.print(" Used: ");
// uart.print_hex(heap_idx);
// uart.print("\n");
return null;
}
@ -137,3 +139,29 @@ export fn calloc(nmemb: usize, size: usize) ?*anyopaque {
export fn get_ticks() u32 {
return 0; // TODO: Implement real timer
}
// export fn strlen(s: [*]const u8) usize {
// var i: usize = 0;
// while (s[i] != 0) : (i += 1) {}
// return i;
// }
// export fn fwrite(ptr: ?*anyopaque, size: usize, nmemb: usize, stream: ?*anyopaque) usize {
// _ = ptr;
// _ = size;
// _ = nmemb;
// _ = stream;
// return 0;
// }
// export fn fflush(stream: ?*anyopaque) c_int {
// _ = stream;
// return 0;
// }
// export fn write(fd: c_int, buf: ?*anyopaque, count: usize) isize {
// _ = fd;
// _ = buf;
// _ = count;
// return 0;
// }

View File

@ -34,9 +34,19 @@ pub const NS16550A_LCR: usize = 0x03; // Line Control Register
// Input logic moved to uart_input.zig
// PL011 Additional Registers
pub const PL011_IBRD: usize = 0x24; // Integer Baud Rate Divisor
pub const PL011_FBRD: usize = 0x28; // Fractional Baud Rate Divisor
pub const PL011_LCR_H: usize = 0x2C; // Line Control
pub const PL011_CR: usize = 0x30; // Control
pub const PL011_IMSC: usize = 0x38; // Interrupt Mask Set/Clear
pub const PL011_ICR: usize = 0x44; // Interrupt Clear
pub const PL011_RXFE: u32 = 1 << 4; // Receive FIFO Empty
pub fn init() void {
switch (builtin.cpu.arch) {
.riscv64 => init_riscv(),
.aarch64 => init_aarch64(),
else => {},
}
}
@ -107,6 +117,78 @@ pub fn init_riscv() void {
// uart_input.poll_input(); // We cannot call this here safely without dep
}
pub fn init_aarch64() void {
const base = PL011_BASE;
// 1. Disable UART during setup
const cr: *volatile u32 = @ptrFromInt(base + PL011_CR);
cr.* = 0;
// 2. Clear all pending interrupts
const icr: *volatile u32 = @ptrFromInt(base + PL011_ICR);
icr.* = 0x7FF;
// 3. Set baud rate (115200 @ 24MHz QEMU clock)
// IBRD = 24000000 / (16 * 115200) = 13
// FBRD = ((0.0208... * 64) + 0.5) = 1
const ibrd: *volatile u32 = @ptrFromInt(base + PL011_IBRD);
const fbrd: *volatile u32 = @ptrFromInt(base + PL011_FBRD);
ibrd.* = 13;
fbrd.* = 1;
// 4. Line Control: 8N1, FIFO enable
const lcr_h: *volatile u32 = @ptrFromInt(base + PL011_LCR_H);
lcr_h.* = (0x3 << 5) | (1 << 4); // WLEN=8bit, FEN=1
// 5. Enable receive interrupt
const imsc: *volatile u32 = @ptrFromInt(base + PL011_IMSC);
imsc.* = (1 << 4); // RXIM: Receive interrupt mask
// 6. Enable UART: TXE + RXE + UARTEN
cr.* = (1 << 8) | (1 << 9) | (1 << 0); // TXE | RXE | UARTEN
// --- LOOPBACK TEST ---
// PL011 has loopback via CR bit 7 (LBE)
cr.* = cr.* | (1 << 7); // Enable loopback
// Write test byte
const dr: *volatile u32 = @ptrFromInt(base + PL011_DR);
const fr: *volatile u32 = @ptrFromInt(base + PL011_FR);
// Wait for TX not full
while ((fr.* & PL011_TXFF) != 0) {}
dr.* = 0xA5;
// Wait for RX not empty
var timeout: usize = 1000000;
while ((fr.* & PL011_RXFE) != 0 and timeout > 0) {
timeout -= 1;
}
var passed = false;
var reason: []const u8 = "Timeout";
if ((fr.* & PL011_RXFE) == 0) {
const val: u8 = @truncate(dr.*);
if (val == 0xA5) {
passed = true;
} else {
reason = "Data Mismatch";
}
}
// Disable loopback
cr.* = cr.* & ~@as(u32, 1 << 7);
if (passed) {
write_bytes("[UART] Loopback Test: PASS\n");
} else {
write_bytes("[UART] Loopback Test: FAIL (");
write_bytes(reason);
write_bytes(")\n");
}
}
fn write_char_arm64(c: u8) void {
const dr: *volatile u32 = @ptrFromInt(PL011_BASE + PL011_DR);
const fr: *volatile u32 = @ptrFromInt(PL011_BASE + PL011_FR);
@ -152,6 +234,13 @@ pub fn read_direct() ?u8 {
return thr.*;
}
},
.aarch64 => {
const dr: *volatile u32 = @ptrFromInt(PL011_BASE + PL011_DR);
const fr: *volatile u32 = @ptrFromInt(PL011_BASE + PL011_FR);
if ((fr.* & PL011_RXFE) == 0) {
return @truncate(dr.*);
}
},
else => {},
}
return null;
@ -163,6 +252,11 @@ pub fn get_lsr() u8 {
const lsr: *volatile u8 = @ptrFromInt(NS16550A_BASE + NS16550A_LSR);
return lsr.*;
},
.aarch64 => {
// Return PL011 flags register (low byte)
const fr: *volatile u32 = @ptrFromInt(PL011_BASE + PL011_FR);
return @truncate(fr.*);
},
else => return 0,
}
}

View File

@ -20,6 +20,7 @@ pub fn poll_input() void {
// Only Kernel uses this
const Kernel = struct {
extern fn ion_push_stdin(ptr: [*]const u8, len: usize) void;
extern fn kprint(s: [*]const u8) void;
};
switch (builtin.cpu.arch) {
@ -34,6 +35,9 @@ pub fn poll_input() void {
const byte = thr.*;
const byte_arr = [1]u8{byte};
// DEBUG: Trace hardware read
Kernel.kprint("[HW Read]\n");
// Forward to Kernel Input Channel
Kernel.ion_push_stdin(&byte_arr, 1);

View File

@ -13,8 +13,14 @@
//! the request. Uses bounce-buffers to guarantee alignment.
const std = @import("std");
const builtin = @import("builtin");
const uart = @import("uart.zig");
const pci = @import("virtio_pci.zig");
// Comptime transport switch: PCI on RISC-V, MMIO on ARM64
const transport_mod = if (builtin.cpu.arch == .aarch64)
@import("virtio_mmio.zig")
else
@import("virtio_pci.zig");
// External C/Zig stubs
extern fn malloc(size: usize) ?*anyopaque;
@ -46,13 +52,43 @@ pub fn init() void {
}
pub const VirtioBlkDriver = struct {
transport: pci.VirtioTransport,
transport: transport_mod.VirtioTransport,
v_desc: [*]volatile VirtioDesc,
v_avail: *volatile VirtioAvail,
v_used: *volatile VirtioUsed,
queue_size: u16,
pub fn probe() ?VirtioBlkDriver {
if (builtin.cpu.arch == .aarch64) {
return probe_mmio();
} else {
return probe_pci();
}
}
fn probe_mmio() ?VirtioBlkDriver {
const mmio = @import("virtio_mmio.zig");
const base = mmio.find_device(2) orelse { // device_id=2 is block
uart.print("[VirtIO] No VirtIO-Block MMIO device found\n");
return null;
};
uart.print("[VirtIO] Found VirtIO-Block at MMIO 0x");
uart.print_hex(base);
uart.print("\n");
var self = VirtioBlkDriver{
.transport = transport_mod.VirtioTransport.init(base),
.v_desc = undefined,
.v_avail = undefined,
.v_used = undefined,
.queue_size = 0,
};
if (self.init_device()) {
return self;
}
return null;
}
fn probe_pci() ?VirtioBlkDriver {
const PCI_ECAM_BASE: usize = 0x30000000;
const bus: u8 = 0;
const func: u8 = 0;
@ -69,7 +105,7 @@ pub const VirtioBlkDriver = struct {
uart.print_hex(i);
uart.print(".0\n");
var self = VirtioBlkDriver{
.transport = pci.VirtioTransport.init(addr),
.transport = transport_mod.VirtioTransport.init(addr),
.v_desc = undefined,
.v_avail = undefined,
.v_used = undefined,
@ -87,29 +123,56 @@ pub const VirtioBlkDriver = struct {
if (!self.transport.probe()) return false;
self.transport.reset();
self.transport.add_status(1);
self.transport.add_status(2);
self.transport.add_status(1); // ACKNOWLEDGE
self.transport.add_status(2); // DRIVER
// Feature negotiation
const dev_features = self.transport.get_device_features();
_ = dev_features;
// Accept no special features for block just basic operation
self.transport.set_driver_features(0);
transport_mod.io_barrier();
// FEATURES_OK only on modern (v2) transport
if (self.transport.is_modern) {
self.transport.add_status(8); // FEATURES_OK
transport_mod.io_barrier();
}
self.transport.select_queue(0);
const count = self.transport.get_queue_size();
const max_count = self.transport.get_queue_size();
// Cap queue size for memory efficiency
const MAX_BLK_QUEUE: u16 = 128;
const count = if (max_count > MAX_BLK_QUEUE) MAX_BLK_QUEUE else max_count;
// [Desc] [Avail] [Used] (Simplified layout)
const total = (count * 16) + (6 + count * 2) + 4096 + (6 + count * 8);
const raw_ptr = malloc(total + 4096) orelse return false;
const aligned = (@intFromPtr(raw_ptr) + 4095) & ~@as(usize, 4095);
// Zero out queue memory to ensure clean state
const byte_ptr: [*]u8 = @ptrFromInt(aligned);
for (0..total) |i| {
byte_ptr[i] = 0;
}
self.v_desc = @ptrFromInt(aligned);
self.v_avail = @ptrFromInt(aligned + (count * 16));
self.v_used = @ptrFromInt(aligned + (count * 16) + (6 + count * 2) + 4096);
self.queue_size = count;
// Ensure avail/used rings start clean
self.v_avail.flags = 0;
self.v_avail.idx = 0;
self.v_used.flags = 0;
if (self.transport.is_modern) {
self.transport.setup_modern_queue(aligned, aligned + (count * 16), @intFromPtr(self.v_used));
} else {
self.transport.set_queue_size(count);
self.transport.setup_legacy_queue(@intCast(aligned >> 12));
}
self.transport.add_status(4);
self.transport.add_status(4); // DRIVER_OK
global_blk = self.*;
uart.print("[VirtIO-Blk] Device Ready. Queue Size: ");
@ -151,15 +214,26 @@ pub const VirtioBlkDriver = struct {
// Submit to Avail Ring
const ring = @as([*]volatile u16, @ptrFromInt(@intFromPtr(self.v_avail) + 4));
ring[self.v_avail.idx % self.queue_size] = 0; // Head of chain
asm volatile ("fence w, w" ::: .{ .memory = true });
const expected_used = self.v_used.idx +% 1;
transport_mod.io_barrier();
self.v_avail.idx +%= 1;
asm volatile ("fence w, w" ::: .{ .memory = true });
transport_mod.io_barrier();
self.transport.notify(0);
// Wait for device (Polling)
while (self.v_used.idx == 0) {
asm volatile ("nop");
// Wait for device (Polling wait until used ring advances)
var timeout: usize = 0;
while (self.v_used.idx != expected_used) {
transport_mod.io_barrier();
timeout += 1;
if (timeout > 100_000_000) {
uart.print("[VirtIO-Blk] READ TIMEOUT! used.idx=");
uart.print_hex(self.v_used.idx);
uart.print(" expected=");
uart.print_hex(expected_used);
uart.print("\n");
return error.DiskError;
}
}
if (status != 0) return error.DiskError;
@ -190,14 +264,22 @@ pub const VirtioBlkDriver = struct {
const ring = @as([*]volatile u16, @ptrFromInt(@intFromPtr(self.v_avail) + 4));
ring[self.v_avail.idx % self.queue_size] = 3;
asm volatile ("fence w, w" ::: .{ .memory = true });
const expected_used = self.v_used.idx +% 1;
transport_mod.io_barrier();
self.v_avail.idx +%= 1;
asm volatile ("fence w, w" ::: .{ .memory = true });
transport_mod.io_barrier();
self.transport.notify(0);
while (status == 0xFF) {
asm volatile ("nop");
// Wait for device (Polling wait until used ring advances)
var timeout: usize = 0;
while (self.v_used.idx != expected_used) {
transport_mod.io_barrier();
timeout += 1;
if (timeout > 100_000_000) {
uart.print("[VirtIO-Blk] WRITE TIMEOUT!\n");
return error.DiskError;
}
}
if (status != 0) return error.DiskError;

268
hal/virtio_mmio.zig Normal file
View File

@ -0,0 +1,268 @@
// SPDX-License-Identifier: LCL-1.0
// Copyright (c) 2026 Markus Maiwald
// Stewardship: Self Sovereign Society Foundation
//
// This file is part of the Nexus Commonwealth.
// See legal/LICENSE_COMMONWEALTH.md for license terms.
//! Rumpk HAL: VirtIO MMIO Transport Layer (ARM64)
//!
//! Provides the same VirtioTransport API as virtio_pci.zig but for MMIO-based
//! VirtIO devices as found on QEMU aarch64 virt machine.
//!
//! QEMU virt MMIO layout: 32 slots starting at 0x0a000000, stride 0x200.
//! Each slot triggers GIC SPI (IRQ 48 + slot_index).
//!
//! Supports both legacy (v1) and modern (v2) MMIO transport.
//!
//! SAFETY: All hardware registers accessed via volatile pointers.
const std = @import("std");
const builtin = @import("builtin");
const uart = @import("uart.zig");
// =========================================================
// VirtIO MMIO Register Offsets (spec §4.2.2)
// =========================================================
const VIRTIO_MMIO_MAGIC_VALUE = 0x000;
const VIRTIO_MMIO_VERSION = 0x004;
const VIRTIO_MMIO_DEVICE_ID = 0x008;
const VIRTIO_MMIO_VENDOR_ID = 0x00C;
const VIRTIO_MMIO_DEVICE_FEATURES = 0x010;
const VIRTIO_MMIO_DEVICE_FEATURES_SEL = 0x014;
const VIRTIO_MMIO_DRIVER_FEATURES = 0x020;
const VIRTIO_MMIO_DRIVER_FEATURES_SEL = 0x024;
const VIRTIO_MMIO_QUEUE_SEL = 0x030;
const VIRTIO_MMIO_QUEUE_NUM_MAX = 0x034;
const VIRTIO_MMIO_QUEUE_NUM = 0x038;
const VIRTIO_MMIO_QUEUE_ALIGN = 0x03C;
const VIRTIO_MMIO_QUEUE_PFN = 0x040;
const VIRTIO_MMIO_QUEUE_READY = 0x044;
const VIRTIO_MMIO_QUEUE_NOTIFY = 0x050;
const VIRTIO_MMIO_INTERRUPT_STATUS = 0x060;
const VIRTIO_MMIO_INTERRUPT_ACK = 0x064;
const VIRTIO_MMIO_STATUS = 0x070;
const VIRTIO_MMIO_QUEUE_DESC_LOW = 0x080;
const VIRTIO_MMIO_QUEUE_DESC_HIGH = 0x084;
const VIRTIO_MMIO_QUEUE_AVAIL_LOW = 0x090;
const VIRTIO_MMIO_QUEUE_AVAIL_HIGH = 0x094;
const VIRTIO_MMIO_QUEUE_USED_LOW = 0x0A0;
const VIRTIO_MMIO_QUEUE_USED_HIGH = 0x0A4;
const VIRTIO_MMIO_CONFIG = 0x100; // Device-specific config starts here
// VirtIO magic value: "virt" in little-endian
const VIRTIO_MAGIC: u32 = 0x74726976;
// =========================================================
// QEMU virt MMIO Topology
// =========================================================
const MMIO_BASE: usize = 0x0a000000;
const MMIO_STRIDE: usize = 0x200;
const MMIO_SLOT_COUNT: usize = 32;
const MMIO_IRQ_BASE: u32 = 48; // GIC SPI base for VirtIO MMIO
// =========================================================
// MMIO Read/Write Helpers
// =========================================================
fn mmio_read(base: usize, offset: usize) u32 {
const ptr: *volatile u32 = @ptrFromInt(base + offset);
return ptr.*;
}
fn mmio_write(base: usize, offset: usize, val: u32) void {
const ptr: *volatile u32 = @ptrFromInt(base + offset);
ptr.* = val;
}
fn mmio_read_u8(base: usize, offset: usize) u8 {
const ptr: *volatile u8 = @ptrFromInt(base + offset);
return ptr.*;
}
// =========================================================
// Arch-safe memory barrier
// =========================================================
pub inline fn io_barrier() void {
switch (builtin.cpu.arch) {
.aarch64 => asm volatile ("dmb sy" ::: .{ .memory = true }),
.riscv64 => asm volatile ("fence" ::: .{ .memory = true }),
else => @compileError("unsupported arch"),
}
}
// =========================================================
// VirtIO MMIO Transport (same API surface as PCI transport)
// =========================================================
pub const VirtioTransport = struct {
base_addr: usize,
is_modern: bool,
version: u32,
// Legacy compatibility fields (match PCI transport shape)
legacy_bar: usize,
// Modern interface placeholders (unused for MMIO but present for API compat)
common_cfg: ?*volatile anyopaque,
notify_cfg: ?usize,
notify_off_multiplier: u32,
isr_cfg: ?*volatile u8,
device_cfg: ?*volatile u8,
pub fn init(mmio_base: usize) VirtioTransport {
return .{
.base_addr = mmio_base,
.is_modern = false,
.version = 0,
.legacy_bar = 0,
.common_cfg = null,
.notify_cfg = null,
.notify_off_multiplier = 0,
.isr_cfg = null,
.device_cfg = null,
};
}
pub fn probe(self: *VirtioTransport) bool {
const magic = mmio_read(self.base_addr, VIRTIO_MMIO_MAGIC_VALUE);
if (magic != VIRTIO_MAGIC) return false;
self.version = mmio_read(self.base_addr, VIRTIO_MMIO_VERSION);
if (self.version != 1 and self.version != 2) return false;
const device_id = mmio_read(self.base_addr, VIRTIO_MMIO_DEVICE_ID);
if (device_id == 0) return false; // No device at this slot
self.is_modern = (self.version == 2);
uart.print("[VirtIO-MMIO] Probed 0x");
uart.print_hex(self.base_addr);
uart.print(" Ver=");
uart.print_hex(self.version);
uart.print(" DevID=");
uart.print_hex(device_id);
uart.print("\n");
return true;
}
pub fn reset(self: *VirtioTransport) void {
self.set_status(0);
// After reset, wait for device to reinitialize (spec §2.1.1)
io_barrier();
}
pub fn get_status(self: *VirtioTransport) u8 {
return @truncate(mmio_read(self.base_addr, VIRTIO_MMIO_STATUS));
}
pub fn set_status(self: *VirtioTransport, status: u8) void {
mmio_write(self.base_addr, VIRTIO_MMIO_STATUS, @as(u32, status));
}
pub fn add_status(self: *VirtioTransport, status: u8) void {
self.set_status(self.get_status() | status);
}
pub fn select_queue(self: *VirtioTransport, idx: u16) void {
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_SEL, @as(u32, idx));
}
pub fn get_queue_size(self: *VirtioTransport) u16 {
return @truncate(mmio_read(self.base_addr, VIRTIO_MMIO_QUEUE_NUM_MAX));
}
pub fn set_queue_size(self: *VirtioTransport, size: u16) void {
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_NUM, @as(u32, size));
}
pub fn setup_legacy_queue(self: *VirtioTransport, pfn: u32) void {
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_ALIGN, 4096);
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_PFN, pfn);
}
pub fn setup_modern_queue(self: *VirtioTransport, desc: u64, avail: u64, used: u64) void {
// Set queue size first
const max_size = mmio_read(self.base_addr, VIRTIO_MMIO_QUEUE_NUM_MAX);
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_NUM, max_size);
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_DESC_LOW, @truncate(desc));
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_DESC_HIGH, @truncate(desc >> 32));
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_AVAIL_LOW, @truncate(avail));
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, @truncate(avail >> 32));
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_USED_LOW, @truncate(used));
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_USED_HIGH, @truncate(used >> 32));
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_READY, 1);
}
pub fn notify(self: *VirtioTransport, queue_idx: u16) void {
mmio_write(self.base_addr, VIRTIO_MMIO_QUEUE_NOTIFY, @as(u32, queue_idx));
}
// =========================================================
// Unified Accessor API (matches PCI transport extensions)
// =========================================================
pub fn get_device_features(self: *VirtioTransport) u64 {
mmio_write(self.base_addr, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 0);
io_barrier();
const low: u64 = mmio_read(self.base_addr, VIRTIO_MMIO_DEVICE_FEATURES);
mmio_write(self.base_addr, VIRTIO_MMIO_DEVICE_FEATURES_SEL, 1);
io_barrier();
const high: u64 = mmio_read(self.base_addr, VIRTIO_MMIO_DEVICE_FEATURES);
return (high << 32) | low;
}
pub fn set_driver_features(self: *VirtioTransport, features: u64) void {
mmio_write(self.base_addr, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 0);
mmio_write(self.base_addr, VIRTIO_MMIO_DRIVER_FEATURES, @truncate(features));
io_barrier();
mmio_write(self.base_addr, VIRTIO_MMIO_DRIVER_FEATURES_SEL, 1);
mmio_write(self.base_addr, VIRTIO_MMIO_DRIVER_FEATURES, @truncate(features >> 32));
io_barrier();
}
pub fn get_device_config_byte(self: *VirtioTransport, offset: usize) u8 {
return mmio_read_u8(self.base_addr, VIRTIO_MMIO_CONFIG + offset);
}
pub fn ack_interrupt(self: *VirtioTransport) u32 {
const status = mmio_read(self.base_addr, VIRTIO_MMIO_INTERRUPT_STATUS);
mmio_write(self.base_addr, VIRTIO_MMIO_INTERRUPT_ACK, status);
return status;
}
};
// =========================================================
// Device Discovery
// =========================================================
/// Scan MMIO slots for a VirtIO device with the given device ID.
/// Returns MMIO base address or null if not found.
pub fn find_device(device_id: u32) ?usize {
var slot: usize = 0;
while (slot < MMIO_SLOT_COUNT) : (slot += 1) {
const base = MMIO_BASE + (slot * MMIO_STRIDE);
const magic = mmio_read(base, VIRTIO_MMIO_MAGIC_VALUE);
if (magic != VIRTIO_MAGIC) continue;
const dev_id = mmio_read(base, VIRTIO_MMIO_DEVICE_ID);
if (dev_id == device_id) {
return base;
}
}
return null;
}
/// Get the GIC SPI number for a given MMIO slot base address.
pub fn slot_irq(base: usize) u32 {
const slot = (base - MMIO_BASE) / MMIO_STRIDE;
return MMIO_IRQ_BASE + @as(u32, @intCast(slot));
}

View File

@ -14,8 +14,14 @@
//! to ensure correct synchronization with the virtual device.
const std = @import("std");
const builtin = @import("builtin");
const uart = @import("uart.zig");
const pci = @import("virtio_pci.zig");
// Comptime transport switch: PCI on RISC-V, MMIO on ARM64
const transport_mod = if (builtin.cpu.arch == .aarch64)
@import("virtio_mmio.zig")
else
@import("virtio_pci.zig");
// VirtIO Feature Bits
const VIRTIO_F_VERSION_1 = 32;
@ -117,47 +123,24 @@ pub export fn rumpk_net_init() void {
}
pub const VirtioNetDriver = struct {
transport: pci.VirtioTransport,
transport: transport_mod.VirtioTransport,
irq: u32,
rx_queue: ?*Virtqueue = null,
tx_queue: ?*Virtqueue = null,
pub fn get_mac(self: *VirtioNetDriver, out: [*]u8) void {
uart.print("[VirtIO-Net] Reading MAC from device_cfg...\n");
if (self.transport.is_modern) {
// Use device_cfg directly - this is the VirtIO-Net specific config
if (self.transport.device_cfg) |cfg| {
const ptr: [*]volatile u8 = @ptrCast(cfg);
uart.print(" DeviceCfg at: ");
uart.print_hex(@intFromPtr(cfg));
uart.print("\n MAC bytes: ");
for (0..6) |i| {
out[i] = ptr[i];
uart.print_hex8(ptr[i]);
if (i < 5) uart.print(":");
}
uart.print("\n");
} else {
uart.print(" ERROR: device_cfg is null!\n");
// Fallback to zeros
for (0..6) |i| {
out[i] = 0;
}
}
} else {
// Legacy
// Device Config starts at offset 20.
const base = self.transport.legacy_bar + 20;
for (0..6) |i| {
out[i] = @as(*volatile u8, @ptrFromInt(base + i)).*;
}
uart.print("[VirtIO-Net] Reading MAC from device config...\n");
for (0..6) |i| {
out[i] = self.transport.get_device_config_byte(i);
uart.print_hex8(out[i]);
if (i < 5) uart.print(":");
}
uart.print("\n");
}
pub fn init(base: usize, irq_num: u32) VirtioNetDriver {
return .{
.transport = pci.VirtioTransport.init(base),
.transport = transport_mod.VirtioTransport.init(base),
.irq = irq_num,
.rx_queue = null,
.tx_queue = null,
@ -165,6 +148,32 @@ pub const VirtioNetDriver = struct {
}
pub fn probe() ?VirtioNetDriver {
if (builtin.cpu.arch == .aarch64) {
return probe_mmio();
} else {
return probe_pci();
}
}
fn probe_mmio() ?VirtioNetDriver {
uart.print("[VirtIO] Probing MMIO for networking device...\n");
const mmio = @import("virtio_mmio.zig");
const base = mmio.find_device(1) orelse { // device_id=1 is net
uart.print("[VirtIO] No VirtIO-Net MMIO device found\n");
return null;
};
uart.print("[VirtIO] Found VirtIO-Net at MMIO 0x");
uart.print_hex(base);
uart.print("\n");
const irq = mmio.slot_irq(base);
var self = VirtioNetDriver.init(base, irq);
if (self.init_device()) {
return self;
}
return null;
}
fn probe_pci() ?VirtioNetDriver {
uart.print("[VirtIO] Probing PCI for networking device...\n");
const PCI_ECAM_BASE: usize = 0x30000000;
const bus: u8 = 0;
@ -213,52 +222,22 @@ pub const VirtioNetDriver = struct {
self.transport.add_status(VIRTIO_CONFIG_S_ACKNOWLEDGE);
self.transport.add_status(VIRTIO_CONFIG_S_DRIVER);
// 4. Feature Negotiation
if (self.transport.is_modern) {
// 4. Feature Negotiation (unified across PCI and MMIO)
{
uart.print("[VirtIO] Starting feature negotiation...\n");
if (self.transport.common_cfg == null) {
uart.print("[VirtIO] ERROR: common_cfg is null!\n");
return false;
}
const cfg = self.transport.common_cfg.?;
uart.print("[VirtIO] common_cfg addr: ");
uart.print_hex(@intFromPtr(cfg));
uart.print("\n");
uart.print("[VirtIO] Reading device features...\n");
// Read Device Features (Page 0)
cfg.device_feature_select = 0;
asm volatile ("fence" ::: .{ .memory = true });
const f_low = cfg.device_feature;
// Read Device Features (Page 1)
cfg.device_feature_select = 1;
asm volatile ("fence" ::: .{ .memory = true });
const f_high = cfg.device_feature;
const dev_features = self.transport.get_device_features();
uart.print("[VirtIO] Device Features: ");
uart.print_hex(f_low);
uart.print(" ");
uart.print_hex(f_high);
uart.print_hex(dev_features);
uart.print("\n");
// Accept VERSION_1 (Modern) and MAC
const accept_low: u32 = (1 << VIRTIO_NET_F_MAC);
const accept_high: u32 = (1 << (VIRTIO_F_VERSION_1 - 32));
const accept: u64 = (1 << VIRTIO_NET_F_MAC) |
(@as(u64, 1) << VIRTIO_F_VERSION_1);
self.transport.set_driver_features(accept);
transport_mod.io_barrier();
uart.print("[VirtIO] Writing driver features...\n");
cfg.driver_feature_select = 0;
cfg.driver_feature = accept_low;
asm volatile ("fence" ::: .{ .memory = true });
cfg.driver_feature_select = 1;
cfg.driver_feature = accept_high;
asm volatile ("fence" ::: .{ .memory = true });
uart.print("[VirtIO] Checking feature negotiation...\n");
self.transport.add_status(VIRTIO_CONFIG_S_FEATURES_OK);
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
if ((self.transport.get_status() & VIRTIO_CONFIG_S_FEATURES_OK) == 0) {
uart.print("[VirtIO] Feature negotiation failed!\n");
return false;
@ -267,10 +246,15 @@ pub const VirtioNetDriver = struct {
}
// 5. Setup RX Queue (0)
self.transport.select_queue(0);
const rx_count = self.transport.get_queue_size();
const rx_max = self.transport.get_queue_size();
// Cap queue size to avoid ION pool exhaustion (MMIO v1 reports 1024)
const MAX_QUEUE: u16 = 256;
const rx_count = if (rx_max > MAX_QUEUE) MAX_QUEUE else rx_max;
uart.print("[VirtIO] RX Queue Size: ");
uart.print_hex(rx_count);
uart.print("\n");
uart.print(" (max: ");
uart.print_hex(rx_max);
uart.print(")\n");
if (rx_count == 0 or rx_count == 0xFFFF) {
uart.print("[VirtIO] Invalid RX Queue Size. Aborting.\n");
@ -288,10 +272,13 @@ pub const VirtioNetDriver = struct {
// 6. Setup TX Queue (1)
self.transport.select_queue(1);
const tx_count = self.transport.get_queue_size();
const tx_max = self.transport.get_queue_size();
const tx_count = if (tx_max > MAX_QUEUE) MAX_QUEUE else tx_max;
uart.print("[VirtIO] TX Queue Size: ");
uart.print_hex(tx_count);
uart.print("\n");
uart.print(" (max: ");
uart.print_hex(tx_max);
uart.print(")\n");
if (tx_count == 0 or tx_count == 0xFFFF) {
uart.print("[VirtIO] Invalid TX Queue Size. Aborting.\n");
@ -392,11 +379,11 @@ pub const VirtioNetDriver = struct {
q_ptr.avail.flags = 0;
q_ptr.used.flags = 0;
asm volatile ("fence w, w" ::: .{ .memory = true });
transport_mod.io_barrier();
if (is_rx) {
q_ptr.avail.idx = count;
asm volatile ("fence w, w" ::: .{ .memory = true });
transport_mod.io_barrier();
}
const phys_addr = aligned_addr;
@ -404,6 +391,7 @@ pub const VirtioNetDriver = struct {
if (self.transport.is_modern) {
self.transport.setup_modern_queue(phys_addr, phys_addr + desc_size, phys_addr + used_offset);
} else {
self.transport.set_queue_size(count);
const pfn = @as(u32, @intCast(phys_addr >> 12));
self.transport.setup_legacy_queue(pfn);
}
@ -413,7 +401,7 @@ pub const VirtioNetDriver = struct {
}
pub fn rx_poll(self: *VirtioNetDriver, q: *Virtqueue) void {
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
const used = q.used;
const hw_idx = used.idx;
@ -473,7 +461,7 @@ pub const VirtioNetDriver = struct {
q.desc[desc_idx].addr = new_phys;
q.ids[desc_idx] = new_id;
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
avail_ring[q.avail.idx % q.num] = @intCast(desc_idx);
q.avail.idx +%= 1;
@ -486,14 +474,14 @@ pub const VirtioNetDriver = struct {
}
if (replenished) {
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
self.transport.notify(0);
}
}
pub fn tx_poll(self: *VirtioNetDriver, q: *Virtqueue) void {
_ = self;
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
const used = q.used;
const used_idx = used.idx;
const used_ring = get_used_ring(used);
@ -528,11 +516,11 @@ pub const VirtioNetDriver = struct {
q.ids[idx] = slab_id;
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
avail_ring[idx] = @intCast(idx);
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
q.avail.idx +%= 1;
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
self.transport.notify(1);
uart.print("[VirtIO TX-Slab] Sent ");
@ -579,11 +567,11 @@ pub const VirtioNetDriver = struct {
desc.len = @intCast(header_len + copy_len);
desc.flags = 0;
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
avail_ring[idx] = @intCast(desc_idx);
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
q.avail.idx +%= 1;
asm volatile ("fence" ::: .{ .memory = true });
transport_mod.io_barrier();
self.transport.notify(1);
uart.print("[VirtIO TX] Queued & Notified Len=");

View File

@ -14,6 +14,7 @@
//! Dynamically assigns BARs (Base Address Registers) if unassigned by firmware.
const std = @import("std");
const builtin = @import("builtin");
const uart = @import("uart.zig");
// PCI Config Offsets
@ -316,6 +317,17 @@ pub const VirtioTransport = struct {
}
}
pub fn set_queue_size(self: *VirtioTransport, size: u16) void {
// PCI legacy: queue size is read-only (device sets it)
// Modern: could set via common_cfg.queue_size
if (self.is_modern) {
if (self.common_cfg) |cfg| {
cfg.queue_size = size;
}
}
// Legacy PCI: queue size is fixed by device, no register to write
}
pub fn setup_legacy_queue(self: *VirtioTransport, pfn: u32) void {
// Only for legacy
@as(*volatile u32, @ptrFromInt(self.legacy_bar + 0x08)).* = pfn;
@ -345,8 +357,65 @@ pub const VirtioTransport = struct {
notify_ptr.* = queue_idx;
}
}
// =========================================================
// Unified Accessor API (matches MMIO transport)
// =========================================================
pub fn get_device_features(self: *VirtioTransport) u64 {
if (self.is_modern) {
const cfg = self.common_cfg.?;
cfg.device_feature_select = 0;
io_barrier();
const low: u64 = cfg.device_feature;
cfg.device_feature_select = 1;
io_barrier();
const high: u64 = cfg.device_feature;
return (high << 32) | low;
} else {
// Legacy: features at offset 0x00 (32-bit only)
return @as(*volatile u32, @ptrFromInt(self.legacy_bar + 0x00)).*;
}
}
pub fn set_driver_features(self: *VirtioTransport, features: u64) void {
if (self.is_modern) {
const cfg = self.common_cfg.?;
cfg.driver_feature_select = 0;
cfg.driver_feature = @truncate(features);
io_barrier();
cfg.driver_feature_select = 1;
cfg.driver_feature = @truncate(features >> 32);
io_barrier();
} else {
// Legacy: guest features at offset 0x04 (32-bit only)
@as(*volatile u32, @ptrFromInt(self.legacy_bar + 0x04)).* = @truncate(features);
}
}
pub fn get_device_config_byte(self: *VirtioTransport, offset: usize) u8 {
if (self.is_modern) {
if (self.device_cfg) |cfg| {
const ptr: [*]volatile u8 = @ptrCast(cfg);
return ptr[offset];
}
return 0;
} else {
// Legacy: device config starts at offset 20
return @as(*volatile u8, @ptrFromInt(self.legacy_bar + 20 + offset)).*;
}
}
};
/// Arch-safe memory barrier for VirtIO I/O ordering.
pub inline fn io_barrier() void {
switch (builtin.cpu.arch) {
.riscv64 => asm volatile ("fence" ::: .{ .memory = true }),
.aarch64 => asm volatile ("dmb sy" ::: .{ .memory = true }),
else => @compileError("unsupported arch"),
}
}
// Modern Config Structure Layout
pub const VirtioPciCommonCfg = extern struct {
device_feature_select: u32,