rumpk/hal/virtio_block.zig

288 lines
9.1 KiB
Zig

// MARKUS MAIWALD (ARCHITECT) | VOXIS FORGE (AI)
// Rumpk Layer 0: VirtIO-Block Driver (The Ledger)
// - Provides persistent storage access (Sector I/O)
const std = @import("std");
const uart = @import("uart.zig");
const pci = @import("virtio_pci.zig");
extern fn malloc(size: usize) ?*anyopaque;
var global_blk: ?VirtioBlkDriver = null;
export fn virtio_blk_read(sector: u64, buf: [*]u8) void {
if (global_blk) |*d| {
d.read_sync(sector, buf);
} else {
uart.print("[VirtIO-Blk] Error: Driver not initialized.\n");
}
}
export fn virtio_blk_write(sector: u64, buf: [*]const u8) void {
if (global_blk) |*d| {
d.write_sync(sector, buf);
} else {
uart.print("[VirtIO-Blk] Error: Driver not initialized.\n");
}
}
pub fn init() void {
if (VirtioBlkDriver.probe()) |*driver| {
var d = driver.*;
if (d.init_device()) {
uart.print("[Rumpk L0] Storage initialized (The Ledger).\n");
}
} else {
uart.print("[Rumpk L0] No Storage Device Found.\n");
}
}
const VIRTIO_BLK_T_IN: u32 = 0;
const VIRTIO_BLK_T_OUT: u32 = 1;
const SECTOR_SIZE: usize = 512;
pub const VirtioBlkDriver = struct {
transport: pci.VirtioTransport,
req_queue: ?*Virtqueue,
last_used_idx: u16,
pub fn init(transport: pci.VirtioTransport) !VirtioBlkDriver {
var driver = VirtioBlkDriver{
.req_queue = null,
.transport = transport,
.last_used_idx = 0,
};
if (!driver.init_device()) {
return error.DeviceInitFailed;
}
return driver;
}
pub fn probe() ?VirtioBlkDriver {
uart.print("[VirtIO] Probing PCI for Block device...\n");
const PCI_ECAM_BASE: usize = 0x30000000;
// Scan a few slots. Usually 00:02.0 if 00:01.0 is Net.
// Or implement real PCI scan logic later.
// For now, check slot 2 (dev=2).
const bus: u8 = 0;
const dev: u8 = 2; // Assuming second device
const func: u8 = 0;
const addr = PCI_ECAM_BASE | (@as(usize, bus) << 20) | (@as(usize, dev) << 15) | (@as(usize, func) << 12);
const ptr: *volatile u32 = @ptrFromInt(addr);
const id = ptr.*;
// Device ID 0x1001 (Legacy Block) or 0x1042 (Modern Block)
// 0x1042 = 0x1040 + 2
if (id == 0x10011af4 or id == 0x10421af4) {
uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:02.0\n");
return VirtioBlkDriver.init(pci.VirtioTransport.init(addr)) catch null;
}
// Try Slot 3 just in case
const dev3: u8 = 3;
const addr3 = PCI_ECAM_BASE | (@as(usize, bus) << 20) | (@as(usize, dev3) << 15) | (@as(usize, func) << 12);
const ptr3: *volatile u32 = @ptrFromInt(addr3);
const id3 = ptr3.*;
if (id3 == 0x10011af4 or id3 == 0x10421af4) {
uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:03.0\n");
return VirtioBlkDriver.init(pci.VirtioTransport.init(addr3)) catch null;
}
return null;
}
pub fn init_device(self: *VirtioBlkDriver) bool {
// 0. Probe Transport (Legacy/Modern)
if (!self.transport.probe()) {
uart.print("[VirtIO-Blk] Transport Probe Failed.\n");
return false;
}
// 1. Reset
self.transport.reset();
// 2. ACK + DRIVER
self.transport.add_status(3);
// 3. Queue Setup (Queue 0 is Request Queue)
self.transport.select_queue(0);
const q_size = self.transport.get_queue_size();
if (q_size == 0) return false;
self.req_queue = self.setup_queue(0, q_size) catch return false;
// 4. Driver OK
self.transport.add_status(4);
global_blk = self.*;
uart.print("[VirtIO-Blk] Device Ready. Queue Size: ");
uart.print_hex(q_size);
uart.print(" HeaderSize: ");
uart.print_hex(@sizeOf(VirtioBlkReq));
uart.print("\n");
return true;
}
pub fn read_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]u8) void {
self.submit_request(VIRTIO_BLK_T_IN, sector, buf);
}
pub fn write_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]const u8) void {
// Cast const away because submit_request buffer logic is generic, but T_OUT implies read from buf
self.submit_request(VIRTIO_BLK_T_OUT, sector, @constCast(buf));
}
// SOVEREIGN BOUNCE BUFFERS (Aligned to avoid offset bugs)
var bounce_header: VirtioBlkReq align(16) = undefined;
var bounce_sector: [512]u8 align(4096) = undefined;
var bounce_status: u8 align(16) = 0;
fn submit_request(self: *VirtioBlkDriver, type_: u32, sector: u64, buf: [*]u8) void {
const q = self.req_queue orelse return;
const idx = q.avail.idx % q.num;
// Use fixed descriptors indices 0, 1, 2
const d1 = 0;
const d2 = 1;
const d3 = 2;
bounce_header.type = type_;
bounce_header.reserved = 0;
bounce_header.sector = sector;
bounce_status = 0xFF;
const VRING_DESC_F_NEXT: u16 = 1;
const VRING_DESC_F_WRITE: u16 = 2;
if (type_ == VIRTIO_BLK_T_OUT) {
@memcpy(&bounce_sector, buf[0..512]);
}
q.desc[d1].addr = @intFromPtr(&bounce_header);
q.desc[d1].len = @sizeOf(VirtioBlkReq);
q.desc[d1].flags = VRING_DESC_F_NEXT;
q.desc[d1].next = d2;
q.desc[d2].addr = @intFromPtr(&bounce_sector);
q.desc[d2].len = 512;
if (type_ == VIRTIO_BLK_T_IN) {
q.desc[d2].flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
} else {
q.desc[d2].flags = VRING_DESC_F_NEXT;
}
q.desc[d2].next = d3;
q.desc[d3].addr = @intFromPtr(&bounce_status);
q.desc[d3].len = 1;
q.desc[d3].flags = VRING_DESC_F_WRITE;
q.desc[d3].next = 0;
asm volatile ("fence" ::: .{ .memory = true });
const avail_ring = get_avail_ring(q.avail);
avail_ring[idx] = d1;
asm volatile ("fence" ::: .{ .memory = true });
q.avail.idx +%= 1;
asm volatile ("fence" ::: .{ .memory = true });
self.transport.notify(0);
// Polling Used Ring
var timeout: usize = 10000000;
const used_ptr = q.used; // *VirtqUsed
while (used_ptr.idx == self.last_used_idx and timeout > 0) : (timeout -= 1) {
asm volatile ("fence" ::: .{ .memory = true });
}
if (timeout == 0) {
uart.print("[VirtIO-Blk] Timeout Waiting for Used Ring!\n");
} else {
// Request Done.
self.last_used_idx +%= 1; // Consume
asm volatile ("fence" ::: .{ .memory = true });
if (bounce_status != 0) {
uart.print("[VirtIO-Blk] I/O Error Status: ");
uart.print_hex(bounce_status);
uart.print("\n");
} else {
if (type_ == VIRTIO_BLK_T_IN) {
const dest_slice = buf[0..512];
@memcpy(dest_slice, &bounce_sector);
}
}
}
}
fn setup_queue(self: *VirtioBlkDriver, index: u16, count: u16) !*Virtqueue {
// ...(Similar to Net)...
// Allocate Memory
const desc_size = 16 * @as(usize, count);
const avail_size = 6 + 2 * @as(usize, count);
const used_offset = (desc_size + avail_size + 4095) & ~@as(usize, 4095);
const used_size = 6 + 8 * @as(usize, count);
const total_size = used_offset + used_size;
const raw_ptr = malloc(total_size + 4096) orelse return error.OutOfMemory;
const aligned_addr = (@intFromPtr(raw_ptr) + 4095) & ~@as(usize, 4095);
const q_ptr_raw = malloc(@sizeOf(Virtqueue)) orelse return error.OutOfMemory;
const q_ptr: *Virtqueue = @ptrCast(@alignCast(q_ptr_raw));
q_ptr.num = count;
q_ptr.desc = @ptrFromInt(aligned_addr);
q_ptr.avail = @ptrFromInt(aligned_addr + desc_size);
q_ptr.used = @ptrFromInt(aligned_addr + used_offset);
// Notify Device
const phys_addr = aligned_addr;
self.transport.select_queue(index);
if (self.transport.is_modern) {
self.transport.setup_modern_queue(phys_addr, phys_addr + desc_size, phys_addr + used_offset);
} else {
const pfn = @as(u32, @intCast(phys_addr >> 12));
self.transport.setup_legacy_queue(pfn);
}
return q_ptr;
}
// structs ...
const VirtioBlkReq = packed struct {
type: u32,
reserved: u32,
sector: u64,
};
const Virtqueue = struct {
desc: [*]volatile VirtioDesc,
avail: *volatile VirtioAvail,
used: *volatile VirtioUsed,
num: u16,
};
const VirtioDesc = struct {
addr: u64,
len: u32,
flags: u16,
next: u16,
};
const VirtioAvail = extern struct {
flags: u16,
idx: u16,
};
const VirtioUsed = extern struct {
flags: u16,
idx: u16,
};
inline fn get_avail_ring(avail: *volatile VirtioAvail) [*]volatile u16 {
return @ptrFromInt(@intFromPtr(avail) + 4);
}
};