rumpk/hal/gpu.zig

523 lines
15 KiB
Zig

// SPDX-License-Identifier: LCL-1.0
// Copyright (c) 2026 Markus Maiwald
// Stewardship: Self Sovereign Society Foundation
//
// This file is part of the Nexus Commonwealth.
// See legal/LICENSE_COMMONWEALTH.md for license terms.
//! Rumpk Layer 0: VirtIO-GPU Driver (The Retina)
//!
//! Blasts pixels from RAM canvas to host display.
//! Handles command queue management, resource creation, and buffer flushing.
//!
//! SAFETY: All hardware registers and queues are accessed via volatile pointers.
//! Uses static command/response buffers to avoid dynamic allocation in the tick loop.
const std = @import("std");
const fb = @import("framebuffer.zig");
const uart = @import("uart.zig");
// =========================================================
// VirtIO-GPU Constants
// =========================================================
const VIRTIO_GPU_CMD_GET_DISPLAY_INFO: u32 = 0x0100;
const VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: u32 = 0x0101;
const VIRTIO_GPU_CMD_RESOURCE_UNREF: u32 = 0x0102;
const VIRTIO_GPU_CMD_SET_SCANOUT: u32 = 0x0103;
const VIRTIO_GPU_CMD_RESOURCE_FLUSH: u32 = 0x0104;
const VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: u32 = 0x0105;
const VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: u32 = 0x0106;
const VIRTIO_GPU_RESP_OK_NODATA: u32 = 0x1100;
const VIRTIO_GPU_RESP_OK_DISPLAY_INFO: u32 = 0x1101;
const VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: u32 = 1;
const RESOURCE_ID: u32 = 1;
// =========================================================
// VirtIO-GPU Structures (packed for wire format)
// =========================================================
const VirtioGpuCtrlHdr = extern struct {
type: u32,
flags: u32,
fence_id: u64,
ctx_id: u32,
padding: u32,
};
const VirtioGpuResourceCreate2D = extern struct {
hdr: VirtioGpuCtrlHdr,
resource_id: u32,
format: u32,
width: u32,
height: u32,
};
const VirtioGpuMemEntry = extern struct {
addr: u64,
length: u32,
padding: u32,
};
const VirtioGpuResourceAttachBacking = extern struct {
hdr: VirtioGpuCtrlHdr,
resource_id: u32,
nr_entries: u32,
// Followed by VirtioGpuMemEntry array
};
const VirtioGpuSetScanout = extern struct {
hdr: VirtioGpuCtrlHdr,
r_x: u32,
r_y: u32,
r_width: u32,
r_height: u32,
scanout_id: u32,
resource_id: u32,
};
const VirtioGpuTransferToHost2D = extern struct {
hdr: VirtioGpuCtrlHdr,
r_x: u32,
r_y: u32,
r_width: u32,
r_height: u32,
offset: u64,
resource_id: u32,
padding: u32,
};
const VirtioGpuResourceFlush = extern struct {
hdr: VirtioGpuCtrlHdr,
r_x: u32,
r_y: u32,
r_width: u32,
r_height: u32,
resource_id: u32,
padding: u32,
};
// =========================================================
// Driver State
// =========================================================
var initialized: bool = false;
// MMIO base for VirtIO-GPU (discovered at runtime or hardcoded for virt machine)
// On QEMU virt, virtio-gpu-device is at 0x10008000 (device 1 after net)
// Actually it depends on dtb, for now we'll use a probe or hardcode.
var mmio_base: usize = 0;
// VirtIO Queue Layout (Contiguous for v1 legacy support)
const QUEUE_SIZE = 16;
const PAGE_SIZE = 4096;
const VirtioDesc = extern struct {
addr: u64,
len: u32,
flags: u16,
next: u16,
};
const VirtioAvail = extern struct {
flags: u16,
idx: u16,
ring: [QUEUE_SIZE]u16,
// Note: older versions might have used_event here, but we don't need it for basic polling
};
const VirtioUsedItem = extern struct {
id: u32,
len: u32,
};
const VirtioUsed = extern struct {
flags: u16,
idx: u16,
ring: [QUEUE_SIZE]VirtioUsedItem,
};
// We create a structure that matches the legacy layout:
// Descriptors [16 * 16 = 256 bytes]
// Available [2 + 2 + 32 = 36 bytes]
// Padding to 4096
// Used [2 + 2 + 16 * 8 = 132 bytes]
const VirtioQueueLayout = extern struct {
desc: [QUEUE_SIZE]VirtioDesc,
avail: VirtioAvail,
_pad1: [PAGE_SIZE - @sizeOf([QUEUE_SIZE]VirtioDesc) - @sizeOf(VirtioAvail)]u8,
used: VirtioUsed,
};
var queue align(PAGE_SIZE) = VirtioQueueLayout{
// SAFETY(GPU): Descriptor ring is initialized to `undefined` for performance.
// Individual descriptors are populated by `send_command` before use.
.desc = undefined,
.avail = .{ .flags = 0, .idx = 0, .ring = [_]u16{0} ** QUEUE_SIZE },
._pad1 = [_]u8{0} ** (PAGE_SIZE - @sizeOf([QUEUE_SIZE]VirtioDesc) - @sizeOf(VirtioAvail)),
.used = .{ .flags = 0, .idx = 0, .ring = [_]VirtioUsedItem{.{ .id = 0, .len = 0 }} ** QUEUE_SIZE },
};
var last_used_idx: u16 = 0;
// Command/Response buffers (static)
// SAFETY(GPU): Command buffer initialized to `undefined` for performance.
// Populated by command-specific functions (e.g. `cmd_transfer_2d`) before transmission.
var cmd_buf: [512]u8 align(4096) = undefined;
// SAFETY(GPU): Response buffer initialized to `undefined` for performance.
// Populated by hardware device during synchronous command execution.
var resp_buf: [256]u8 align(4096) = undefined;
// =========================================================
// MMIO Helpers
// =========================================================
fn mmio_read(offset: usize) u32 {
const ptr: *volatile u32 = @ptrFromInt(mmio_base + offset);
return ptr.*;
}
fn mmio_write(offset: usize, val: u32) void {
const ptr: *volatile u32 = @ptrFromInt(mmio_base + offset);
ptr.* = val;
}
// VirtIO MMIO offsets
const VIRTIO_MMIO_MAGIC_VALUE = 0x000;
const VIRTIO_MMIO_VERSION = 0x004;
const VIRTIO_MMIO_DEVICE_ID = 0x008;
const VIRTIO_MMIO_VENDOR_ID = 0x00c;
const VIRTIO_MMIO_DEVICE_FEATURES = 0x010;
const VIRTIO_MMIO_DRIVER_FEATURES = 0x020;
const VIRTIO_MMIO_GUEST_FEATURES = 0x020;
const VIRTIO_MMIO_GUEST_FEATURES_SEL = 0x024;
const VIRTIO_MMIO_QUEUE_SEL = 0x030;
const VIRTIO_MMIO_QUEUE_NUM_MAX = 0x034;
const VIRTIO_MMIO_QUEUE_NUM = 0x038;
const VIRTIO_MMIO_QUEUE_ALIGN = 0x03c;
const VIRTIO_MMIO_QUEUE_PFN = 0x040;
const VIRTIO_MMIO_QUEUE_READY = 0x044;
const VIRTIO_MMIO_QUEUE_NOTIFY = 0x050;
const VIRTIO_MMIO_INTERRUPT_STATUS = 0x060;
const VIRTIO_MMIO_INTERRUPT_ACK = 0x064;
const VIRTIO_MMIO_STATUS = 0x070;
const VIRTIO_MMIO_QUEUE_DESC_LOW = 0x080;
const VIRTIO_MMIO_QUEUE_DESC_HIGH = 0x084;
const VIRTIO_MMIO_QUEUE_AVAIL_LOW = 0x090;
const VIRTIO_MMIO_QUEUE_AVAIL_HIGH = 0x094;
const VIRTIO_MMIO_QUEUE_USED_LOW = 0x0a0;
const VIRTIO_MMIO_QUEUE_USED_HIGH = 0x0a4;
const VIRTIO_STATUS_ACKNOWLEDGE = 1;
const VIRTIO_STATUS_DRIVER = 2;
const VIRTIO_STATUS_DRIVER_OK = 4;
const VIRTIO_STATUS_FEATURES_OK = 8;
const VIRTIO_STATUS_NEEDS_RESET = 64;
const VIRTIO_STATUS_FAILED = 128;
const VRING_DESC_F_NEXT: u16 = 1;
const VRING_DESC_F_WRITE: u16 = 2;
const VRING_DESC_F_INDIRECT: u16 = 4;
// =========================================================
// Queue Operations
// =========================================================
fn queue_init() void {
// Select queue 0 (controlq)
mmio_write(VIRTIO_MMIO_QUEUE_SEL, 0);
const max = mmio_read(VIRTIO_MMIO_QUEUE_NUM_MAX);
uart.print("[GPU] Queue 0 Max Size: ");
uart.print_hex(max);
uart.print("\n");
if (max == 0) {
uart.print("[GPU] Queue 0 not available!\n");
return;
}
mmio_write(VIRTIO_MMIO_QUEUE_NUM, QUEUE_SIZE);
const version = mmio_read(VIRTIO_MMIO_VERSION);
if (version == 1) {
// Legacy VirtIO MMIO v1
const queue_addr = @intFromPtr(&queue);
const pfn = queue_addr / 4096; // Page frame number
mmio_write(VIRTIO_MMIO_QUEUE_ALIGN, 4096);
mmio_write(VIRTIO_MMIO_QUEUE_PFN, @truncate(pfn));
uart.print("[GPU] Legacy queue (v1) initialized at PFN 0x");
uart.print_hex(pfn);
uart.print("\n");
} else {
// Modern VirtIO MMIO v2
const desc_addr = @intFromPtr(&queue.desc);
const avail_addr = @intFromPtr(&queue.avail);
const used_addr = @intFromPtr(&queue.used);
mmio_write(VIRTIO_MMIO_QUEUE_DESC_LOW, @truncate(desc_addr));
mmio_write(VIRTIO_MMIO_QUEUE_DESC_HIGH, @truncate(desc_addr >> 32));
mmio_write(VIRTIO_MMIO_QUEUE_AVAIL_LOW, @truncate(avail_addr));
mmio_write(VIRTIO_MMIO_QUEUE_AVAIL_HIGH, @truncate(avail_addr >> 32));
mmio_write(VIRTIO_MMIO_QUEUE_USED_LOW, @truncate(used_addr));
mmio_write(VIRTIO_MMIO_QUEUE_USED_HIGH, @truncate(used_addr >> 32));
mmio_write(VIRTIO_MMIO_QUEUE_READY, 1);
uart.print("[GPU] Modern queue (v2) initialized.\n");
}
queue.avail.idx = 0;
last_used_idx = 0;
}
fn send_command(ptr: [*]const u8, len: usize) void {
const phys_cmd = @intFromPtr(ptr);
const phys_resp = @intFromPtr(&resp_buf);
// Descriptor 0: Command (device read)
queue.desc[0] = .{
.addr = phys_cmd,
.len = @intCast(len),
.flags = VRING_DESC_F_NEXT,
.next = 1,
};
// Descriptor 1: Response (device write)
queue.desc[1] = .{
.addr = phys_resp,
.len = @sizeOf(@TypeOf(resp_buf)),
.flags = VRING_DESC_F_WRITE,
.next = 0,
};
// Add to available ring
queue.avail.ring[queue.avail.idx % QUEUE_SIZE] = 0;
asm volatile ("fence" ::: .{ .memory = true });
queue.avail.idx +%= 1;
asm volatile ("fence" ::: .{ .memory = true });
// Notify device
mmio_write(VIRTIO_MMIO_QUEUE_NOTIFY, 0);
// Wait for response (polling)
var timeout: usize = 0;
while (last_used_idx == queue.used.idx) {
asm volatile ("" ::: .{ .memory = true });
timeout += 1;
if (timeout % 10000000 == 0) {
// uart.print("[GPU] Polling... last=");
// uart.print_hex(last_used_idx);
// uart.print(" current=");
// uart.print_hex(queue.used.idx);
// uart.print("\n");
}
}
last_used_idx = queue.used.idx;
}
// =========================================================
// GPU Commands
// =========================================================
fn cmd_resource_create_2d() void {
const cmd = @as(*VirtioGpuResourceCreate2D, @ptrCast(@alignCast(&cmd_buf)));
cmd.* = .{
.hdr = .{
.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D,
.flags = 0,
.fence_id = 0,
.ctx_id = 0,
.padding = 0,
},
.resource_id = RESOURCE_ID,
.format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM,
.width = fb.WIDTH,
.height = fb.HEIGHT,
};
send_command(&cmd_buf, @sizeOf(VirtioGpuResourceCreate2D));
}
fn cmd_attach_backing() void {
// We need to send the header + 1 mem entry
const AttachCmd = extern struct {
hdr: VirtioGpuCtrlHdr,
resource_id: u32,
nr_entries: u32,
entry: VirtioGpuMemEntry,
};
const cmd = @as(*AttachCmd, @ptrCast(@alignCast(&cmd_buf)));
cmd.* = .{
.hdr = .{
.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING,
.flags = 0,
.fence_id = 0,
.ctx_id = 0,
.padding = 0,
},
.resource_id = RESOURCE_ID,
.nr_entries = 1,
.entry = .{
.addr = fb.fb_get_buffer_phys(),
.length = @intCast(fb.get_size()),
.padding = 0,
},
};
send_command(&cmd_buf, @sizeOf(AttachCmd));
}
fn cmd_set_scanout() void {
const cmd = @as(*VirtioGpuSetScanout, @ptrCast(@alignCast(&cmd_buf)));
cmd.* = .{
.hdr = .{
.type = VIRTIO_GPU_CMD_SET_SCANOUT,
.flags = 0,
.fence_id = 0,
.ctx_id = 0,
.padding = 0,
},
.r_x = 0,
.r_y = 0,
.r_width = fb.WIDTH,
.r_height = fb.HEIGHT,
.scanout_id = 0,
.resource_id = RESOURCE_ID,
};
send_command(&cmd_buf, @sizeOf(VirtioGpuSetScanout));
}
fn cmd_transfer_2d() void {
const cmd = @as(*VirtioGpuTransferToHost2D, @ptrCast(@alignCast(&cmd_buf)));
cmd.* = .{
.hdr = .{
.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D,
.flags = 0,
.fence_id = 0,
.ctx_id = 0,
.padding = 0,
},
.r_x = 0,
.r_y = 0,
.r_width = fb.WIDTH,
.r_height = fb.HEIGHT,
.offset = 0,
.resource_id = RESOURCE_ID,
.padding = 0,
};
send_command(&cmd_buf, @sizeOf(VirtioGpuTransferToHost2D));
}
fn cmd_resource_flush() void {
const cmd = @as(*VirtioGpuResourceFlush, @ptrCast(@alignCast(&cmd_buf)));
cmd.* = .{
.hdr = .{
.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH,
.flags = 0,
.fence_id = 0,
.ctx_id = 0,
.padding = 0,
},
.r_x = 0,
.r_y = 0,
.r_width = fb.WIDTH,
.r_height = fb.HEIGHT,
.resource_id = RESOURCE_ID,
.padding = 0,
};
send_command(&cmd_buf, @sizeOf(VirtioGpuResourceFlush));
}
// =========================================================
// Public API
// =========================================================
pub fn probe(base: usize) bool {
mmio_base = base;
const magic = mmio_read(VIRTIO_MMIO_MAGIC_VALUE);
const version = mmio_read(VIRTIO_MMIO_VERSION);
const device_id = mmio_read(VIRTIO_MMIO_DEVICE_ID);
// Debug: Print what we found
uart.print("[GPU Probe] 0x");
uart.print_hex(base);
uart.print(" Magic=0x");
uart.print_hex(magic);
uart.print(" Ver=");
uart.print_hex(version);
uart.print(" DevID=");
uart.print_hex(device_id);
uart.print("\n");
// Magic = "virt" (0x74726976), Version = 1 or 2, Device ID = 16 (GPU)
if (magic != 0x74726976) return false;
if (version != 1 and version != 2) return false;
if (device_id != 16) return false;
uart.print("[GPU] VirtIO-GPU found at 0x");
uart.print_hex(base);
uart.print("\n");
return true;
}
pub fn init(base: usize) void {
if (!probe(base)) return;
// Reset
mmio_write(VIRTIO_MMIO_STATUS, 0);
// Acknowledge + Driver
mmio_write(VIRTIO_MMIO_STATUS, 1);
mmio_write(VIRTIO_MMIO_STATUS, 1 | 2);
// Setup queue
queue_init();
// Driver OK
mmio_write(VIRTIO_MMIO_STATUS, 1 | 2 | 4);
// Initialize framebuffer
fb.init();
// GPU Setup sequence
uart.print("[GPU] Creating 2D Resource...\n");
cmd_resource_create_2d();
uart.print("[GPU] Attaching Backing...\n");
cmd_attach_backing();
uart.print("[GPU] Setting Scanout...\n");
cmd_set_scanout();
initialized = true;
uart.print("[GPU] VirtIO-GPU initialized. Resolution: 800x600\n");
// Draw initial test pattern
fb.fill_rect(100, 100, 200, 50, 0xFF00FF00); // Neon green bar
flush();
}
pub fn flush() void {
if (!initialized) return;
cmd_transfer_2d();
cmd_resource_flush();
}
// Export for Nim
export fn virtio_gpu_init(base: usize) void {
init(base);
}
export fn virtio_gpu_flush() void {
flush();
}