314 lines
9.1 KiB
Zig
314 lines
9.1 KiB
Zig
// SPDX-License-Identifier: LCL-1.0
|
|
// Copyright (c) 2026 Markus Maiwald
|
|
// Stewardship: Self Sovereign Society Foundation
|
|
//
|
|
// This file is part of the Nexus Commonwealth.
|
|
// See legal/LICENSE_COMMONWEALTH.md for license terms.
|
|
|
|
//! Rumpk HAL: Memory Management (The Glass Cage)
|
|
//!
|
|
//! Implements Sv39 virtual memory isolation for RISC-V.
|
|
//! Handles page table construction, identity mapping, and worker isolation.
|
|
//!
|
|
//! SAFETY: Manages raw physical memory addresses for page tables.
|
|
//! Performs identity mapping for the kernel and restricted mapping for workers.
|
|
|
|
const std = @import("std");
|
|
|
|
// Sv39 Constants
|
|
pub const PAGE_SIZE: usize = 4096;
|
|
pub const PAGE_SHIFT: u6 = 12;
|
|
pub const PTE_PER_PAGE: usize = 512;
|
|
pub const LEVELS: u8 = 3;
|
|
|
|
// Physical memory layout (RISC-V QEMU virt)
|
|
pub const DRAM_BASE: u64 = 0x80000000;
|
|
pub const DRAM_SIZE: u64 = 256 * 1024 * 1024; // 256MB for expanded userspace
|
|
|
|
// MMIO regions
|
|
pub const UART_BASE: u64 = 0x10000000;
|
|
pub const VIRTIO_BASE: u64 = 0x10001000;
|
|
pub const PLIC_BASE: u64 = 0x0c000000;
|
|
|
|
// PTE Flags
|
|
pub const PTE_V: u64 = 1 << 0; // Valid
|
|
pub const PTE_R: u64 = 1 << 1; // Read
|
|
pub const PTE_W: u64 = 1 << 2; // Write
|
|
pub const PTE_X: u64 = 1 << 3; // Execute
|
|
pub const PTE_U: u64 = 1 << 4; // User
|
|
pub const PTE_G: u64 = 1 << 5; // Global
|
|
pub const PTE_A: u64 = 1 << 6; // Accessed
|
|
pub const PTE_D: u64 = 1 << 7; // Dirty
|
|
|
|
// Page Table Entry
|
|
pub const PageTableEntry = packed struct {
|
|
flags: u10,
|
|
ppn0: u9,
|
|
ppn1: u9,
|
|
ppn2: u26,
|
|
reserved: u10,
|
|
|
|
pub fn init(pa: u64, flags: u64) PageTableEntry {
|
|
const ppn = pa >> PAGE_SHIFT;
|
|
return PageTableEntry{
|
|
.flags = @truncate(flags),
|
|
.ppn0 = @truncate(ppn & 0x1FF),
|
|
.ppn1 = @truncate((ppn >> 9) & 0x1FF),
|
|
.ppn2 = @truncate((ppn >> 18) & 0x3FFFFFF),
|
|
.reserved = 0,
|
|
};
|
|
}
|
|
|
|
pub fn to_u64(self: PageTableEntry) u64 {
|
|
return @bitCast(self);
|
|
}
|
|
|
|
pub fn get_pa(self: PageTableEntry) u64 {
|
|
const ppn: u64 = (@as(u64, self.ppn2) << 18) |
|
|
(@as(u64, self.ppn1) << 9) |
|
|
@as(u64, self.ppn0);
|
|
return ppn << PAGE_SHIFT;
|
|
}
|
|
|
|
pub fn is_valid(self: PageTableEntry) bool {
|
|
return (self.flags & PTE_V) != 0;
|
|
}
|
|
|
|
pub fn is_leaf(self: PageTableEntry) bool {
|
|
return (self.flags & (PTE_R | PTE_W | PTE_X)) != 0;
|
|
}
|
|
};
|
|
|
|
// Page Table (512 entries)
|
|
pub const PageTable = struct {
|
|
entries: [PTE_PER_PAGE]PageTableEntry align(PAGE_SIZE),
|
|
|
|
pub fn init() PageTable {
|
|
return PageTable{
|
|
.entries = [_]PageTableEntry{PageTableEntry.init(0, 0)} ** PTE_PER_PAGE,
|
|
};
|
|
}
|
|
|
|
pub fn get_entry(self: *PageTable, index: usize) *PageTableEntry {
|
|
return &self.entries[index];
|
|
}
|
|
};
|
|
|
|
// Simple bump allocator for page tables
|
|
var page_alloc_base: u64 = 0;
|
|
var page_alloc_offset: u64 = 0;
|
|
var kernel_satp_value: u64 = 0;
|
|
|
|
pub fn init_page_allocator(base: u64, size: u64) void {
|
|
_ = size;
|
|
page_alloc_base = base;
|
|
page_alloc_offset = 0;
|
|
}
|
|
|
|
pub fn alloc_page_table() ?*PageTable {
|
|
if (page_alloc_offset + PAGE_SIZE > 8 * 1024 * 1024) {
|
|
return null;
|
|
}
|
|
|
|
const addr = page_alloc_base + page_alloc_offset;
|
|
page_alloc_offset += PAGE_SIZE;
|
|
|
|
const pt: *PageTable = @ptrFromInt(addr);
|
|
pt.* = PageTable.init();
|
|
return pt;
|
|
}
|
|
|
|
// Extract VPN from virtual address
|
|
pub fn vpn(va: u64, level: u8) usize {
|
|
const shift: u6 = @intCast(PAGE_SHIFT + (9 * level));
|
|
return @truncate((va >> shift) & 0x1FF);
|
|
}
|
|
|
|
// Map a single page
|
|
pub fn map_page(root: *PageTable, va: u64, pa: u64, flags: u64) !void {
|
|
var pt = root;
|
|
var level: u8 = 2;
|
|
|
|
while (level > 0) : (level -= 1) {
|
|
const idx = vpn(va, level);
|
|
const pte = pt.get_entry(idx);
|
|
|
|
if (!pte.is_valid()) {
|
|
const new_pt = alloc_page_table() orelse return error.OutOfMemory;
|
|
pte.* = PageTableEntry.init(@intFromPtr(new_pt), PTE_V);
|
|
}
|
|
|
|
pt = @ptrFromInt(pte.get_pa());
|
|
}
|
|
|
|
const idx = vpn(va, 0);
|
|
const pte = pt.get_entry(idx);
|
|
pte.* = PageTableEntry.init(pa, flags | PTE_V | PTE_A | PTE_D);
|
|
}
|
|
|
|
// Map a range of pages
|
|
pub fn map_range(root: *PageTable, va_start: u64, pa_start: u64, size: u64, flags: u64) !void {
|
|
var offset: u64 = 0;
|
|
while (offset < size) : (offset += PAGE_SIZE) {
|
|
try map_page(root, va_start + offset, pa_start + offset, flags);
|
|
}
|
|
}
|
|
|
|
// Create kernel identity map
|
|
pub fn create_kernel_identity_map() !*PageTable {
|
|
const root = alloc_page_table() orelse return error.OutOfMemory;
|
|
|
|
// Kernel Identity Map (VA = PA, S-mode ONLY) - Now 256MB
|
|
try map_range(root, DRAM_BASE, DRAM_BASE, DRAM_SIZE, PTE_R | PTE_W | PTE_X);
|
|
|
|
// MMIO regions
|
|
try map_range(root, UART_BASE, UART_BASE, PAGE_SIZE, PTE_R | PTE_W);
|
|
try map_range(root, 0x10001000, 0x10001000, 0x8000, PTE_R | PTE_W);
|
|
try map_range(root, 0x30000000, 0x30000000, 0x10000000, PTE_R | PTE_W);
|
|
try map_range(root, 0x40000000, 0x40000000, 0x10000000, PTE_R | PTE_W);
|
|
try map_range(root, PLIC_BASE, PLIC_BASE, 0x400000, PTE_R | PTE_W);
|
|
|
|
return root;
|
|
}
|
|
|
|
// Create restricted worker map
|
|
pub fn create_worker_map(stack_base: u64, stack_size: u64, packet_addr: u64) !*PageTable {
|
|
const root = alloc_page_table() orelse return error.OutOfMemory;
|
|
|
|
// 🏛️ THE EXPANDED CAGE (Phase 37 - 256MB RAM)
|
|
|
|
kprint("[MM] Creating worker map:\n");
|
|
kprint("[MM] Kernel (S-mode): 0x80000000-0x88000000\n");
|
|
kprint("[MM] User (U-mode): 0x88000000-0x90000000\n");
|
|
|
|
// 1. Kernel Memory (0-128MB) -> Supervisor ONLY (PTE_U = 0)
|
|
// This allows the fiber trampoline to execute in S-mode.
|
|
try map_range(root, DRAM_BASE, DRAM_BASE, 128 * 1024 * 1024, PTE_R | PTE_W | PTE_X);
|
|
|
|
// 2. User Memory (128-256MB) -> User Accessible (PTE_U = 1)
|
|
// This allows NipBox (at 128MB offset) to execute in U-mode.
|
|
try map_range(root, DRAM_BASE + (128 * 1024 * 1024), DRAM_BASE + (128 * 1024 * 1024), 128 * 1024 * 1024, PTE_R | PTE_W | PTE_X | PTE_U);
|
|
|
|
// 3. User MMIO (UART)
|
|
try map_range(root, UART_BASE, UART_BASE, PAGE_SIZE, PTE_R | PTE_W | PTE_U);
|
|
|
|
// 4. Overlap stack with user access
|
|
try map_range(root, stack_base, stack_base, stack_size, PTE_R | PTE_W | PTE_U);
|
|
|
|
// 5. Shared SysTable & Rings (0x83000000) - Map 32KB (8 pages)
|
|
var j: u64 = 0;
|
|
while (j < 8) : (j += 1) {
|
|
const addr = packet_addr + (j * PAGE_SIZE);
|
|
try map_page(root, addr, addr, PTE_R | PTE_W | PTE_U);
|
|
}
|
|
|
|
kprint("[MM] Worker map created successfully\n");
|
|
|
|
return root;
|
|
}
|
|
|
|
// Convert page table to SATP value
|
|
pub fn make_satp(root: *PageTable) u64 {
|
|
const ppn = @intFromPtr(root) >> PAGE_SHIFT;
|
|
const mode: u64 = 8; // Sv39
|
|
return (mode << 60) | ppn;
|
|
}
|
|
|
|
// Activate page table
|
|
pub export fn mm_activate_satp(satp_val: u64) callconv(.c) void {
|
|
asm volatile (
|
|
\\csrw satp, %[satp]
|
|
\\sfence.vma zero, zero
|
|
:
|
|
: [satp] "r" (satp_val),
|
|
);
|
|
}
|
|
|
|
// Export for kernel
|
|
pub export fn mm_init() callconv(.c) void {
|
|
// Relocate page tables to 240MB offset (Top of 256MB)
|
|
const pt_base = DRAM_BASE + (240 * 1024 * 1024);
|
|
init_page_allocator(pt_base, 8 * 1024 * 1024);
|
|
}
|
|
|
|
pub export fn mm_enable_kernel_paging() callconv(.c) void {
|
|
const root = create_kernel_identity_map() catch {
|
|
while (true) {}
|
|
};
|
|
const satp_val = make_satp(root);
|
|
kernel_satp_value = satp_val;
|
|
mm_activate_satp(satp_val);
|
|
}
|
|
|
|
pub export fn mm_get_kernel_satp() callconv(.c) u64 {
|
|
return kernel_satp_value;
|
|
}
|
|
|
|
pub export fn mm_create_worker_map(stack_base: u64, stack_size: u64, packet_addr: u64) callconv(.c) u64 {
|
|
if (create_worker_map(stack_base, stack_size, packet_addr)) |root| {
|
|
return make_satp(root);
|
|
} else |_| {
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
extern fn kprint(s: [*:0]const u8) void;
|
|
extern fn kprint_hex(n: u64) void;
|
|
extern fn kprintln(s: [*:0]const u8) void;
|
|
|
|
pub export fn mm_debug_check_va(va: u64) callconv(.c) void {
|
|
kprint("[MM] Inspecting VA: ");
|
|
kprint_hex(va);
|
|
kprintln("");
|
|
|
|
// Get Root
|
|
const ppn = kernel_satp_value & 0xFFFFFFFFFFF;
|
|
const root_pa = ppn << PAGE_SHIFT;
|
|
const root: *PageTable = @ptrFromInt(root_pa);
|
|
|
|
// Level 2
|
|
const idx2 = vpn(va, 2);
|
|
const pte2 = root.get_entry(idx2);
|
|
kprint(" L2[");
|
|
kprint_hex(idx2);
|
|
kprint("]: ");
|
|
kprint_hex(pte2.to_u64());
|
|
if (!pte2.is_valid()) {
|
|
kprintln(" (Invalid)");
|
|
return;
|
|
}
|
|
if (pte2.is_leaf()) {
|
|
kprintln(" (Leaf)");
|
|
return;
|
|
}
|
|
kprintln(" (Table)");
|
|
|
|
// Level 1
|
|
const pt1: *PageTable = @ptrFromInt(pte2.get_pa());
|
|
const idx1 = vpn(va, 1);
|
|
const pte1 = pt1.get_entry(idx1);
|
|
kprint(" L1[");
|
|
kprint_hex(idx1);
|
|
kprint("]: ");
|
|
kprint_hex(pte1.to_u64());
|
|
if (!pte1.is_valid()) {
|
|
kprintln(" (Invalid)");
|
|
return;
|
|
}
|
|
if (pte1.is_leaf()) {
|
|
kprintln(" (Leaf)");
|
|
return;
|
|
}
|
|
kprintln(" (Table)");
|
|
|
|
// Level 0
|
|
const pt0: *PageTable = @ptrFromInt(pte1.get_pa());
|
|
const idx0 = vpn(va, 0);
|
|
const pte0 = pt0.get_entry(idx0);
|
|
kprint(" L0[");
|
|
kprint_hex(idx0);
|
|
kprint("]: ");
|
|
kprint_hex(pte0.to_u64());
|
|
kprintln("");
|
|
}
|