feat(scribe): Implement Scribe Editor Save & Stabilize VirtIO-Block

- hal/virtio_block: Implemented global bounce buffers and Used Ring Polling for stable, synchronous I/O.
- core/fs/sfs: Implemented sfs_write_file to handle SFS file creation and data writing.
- core/ion: Added CMD_FS_WRITE syscall definition.
- core/kernel: Added CMD_FS_WRITE syscall handler and fs/sfs integration.
- npl/nipbox: Added nexus_file_write wrapper and updated Scribe (ed) to use it for saving files.
This commit is contained in:
Markus Maiwald 2025-12-31 23:20:30 +01:00
parent 64380de4a7
commit 4f1ad1f3be
7 changed files with 331 additions and 116 deletions

View File

@ -101,3 +101,88 @@ proc sfs_list*() =
if not found:
kprintln(" (Empty)")
proc sfs_write_file*(name: cstring, data: cstring, data_len: int) =
if not sfs_mounted:
kprintln("[SFS] Write Error: Not mounted.")
return
# 1. Read Directory Table (Sector 1)
virtio_blk_read(1, addr io_buffer[0])
var free_slot_offset = -1
var found_file_offset = -1
var max_sector: uint32 = 1
var offset = 0
while offset < 512:
if io_buffer[offset] != 0:
var entry_name: string = ""
for i in 0..31:
if io_buffer[offset+i] == 0: break
entry_name.add(char(io_buffer[offset+i]))
if entry_name == $name:
found_file_offset = offset
var s_sect: uint32 = uint32(io_buffer[offset+32]) or
(uint32(io_buffer[offset+33]) shl 8) or
(uint32(io_buffer[offset+34]) shl 16) or
(uint32(io_buffer[offset+35]) shl 24)
if s_sect > max_sector: max_sector = s_sect
elif free_slot_offset == -1:
free_slot_offset = offset
offset += 64
# 2. Determine Target Sector
var target_sector: uint32 = 0
var target_offset = 0
if found_file_offset != -1:
kprintln("[SFS] Overwriting existing file...")
target_offset = found_file_offset
target_sector = uint32(io_buffer[target_offset+32]) or
(uint32(io_buffer[target_offset+33]) shl 8) or
(uint32(io_buffer[target_offset+34]) shl 16) or
(uint32(io_buffer[target_offset+35]) shl 24)
elif free_slot_offset != -1:
kprintln("[SFS] Creating new file...")
target_offset = free_slot_offset
target_sector = max_sector + 1
else:
kprintln("[SFS] Error: Directory Full.")
return
# 3. Write Data
kprint("[SFS] Writing to Sector: ")
kprint_hex(uint64(target_sector))
kprintln("")
var data_buf: array[512, byte]
for i in 0..511: data_buf[i] = 0
for i in 0 ..< data_len:
if i < 512: data_buf[i] = byte(data[i])
virtio_blk_write(uint64(target_sector), addr data_buf[0])
# 4. Update Directory Entry
var n_str = $name
for i in 0..31:
if i < n_str.len: io_buffer[target_offset+i] = byte(n_str[i])
else: io_buffer[target_offset+i] = 0
io_buffer[target_offset+32] = byte(target_sector and 0xFF)
io_buffer[target_offset+33] = byte((target_sector shr 8) and 0xFF)
io_buffer[target_offset+34] = byte((target_sector shr 16) and 0xFF)
io_buffer[target_offset+35] = byte((target_sector shr 24) and 0xFF)
var sz = uint32(data_len)
io_buffer[target_offset+36] = byte(sz and 0xFF)
io_buffer[target_offset+37] = byte((sz shr 8) and 0xFF)
io_buffer[target_offset+38] = byte((sz shr 16) and 0xFF)
io_buffer[target_offset+39] = byte((sz shr 24) and 0xFF)
# 5. Write Directory Table Back
virtio_blk_write(1, addr io_buffer[0])
kprintln("[SFS] Write Complete.")

View File

@ -16,6 +16,7 @@ type
CMD_FS_OPEN = 0x200
CMD_FS_READ = 0x201
CMD_FS_READDIR = 0x202 # Returns raw listing
CMD_FS_WRITE = 0x203 # Write File (arg1=ptr to FileArgs)
CMD_ION_FREE = 0x300 # Return slab to pool
CMD_SYS_EXEC = 0x400 # Swap Consciousness (ELF Loading)
CMD_NET_TX = 0x500 # Send Network Packet (arg1=ptr, arg2=len)
@ -33,6 +34,11 @@ type
fd*: uint64
buffer*: uint64
FileArgs* = object
name*: uint64
data*: uint64
len*: uint64
NetArgs* = object
buf*: uint64
len*: uint64

View File

@ -7,7 +7,6 @@ import fiber
import ion
import loader
var ion_paused*: bool = false
var pause_start*: uint64 = 0
var matrix_enabled*: bool = false
@ -271,6 +270,9 @@ proc ion_fiber_entry() {.cdecl.} =
of uint32(CmdType.CMD_BLK_WRITE):
let args = cast[ptr BlkArgs](cmd.arg)
virtio_blk_write(args.sector, cast[pointer](args.buf))
of uint32(CmdType.CMD_FS_WRITE):
let args = cast[ptr FileArgs](cmd.arg)
sfs_write_file(cast[cstring](args.name), cast[cstring](args.data), int(args.len))
else:
discard

View File

@ -43,12 +43,20 @@ const SECTOR_SIZE: usize = 512;
pub const VirtioBlkDriver = struct {
transport: pci.VirtioTransport,
req_queue: ?*Virtqueue = null,
req_queue: ?*Virtqueue,
last_used_idx: u16,
pub fn init(base: usize) VirtioBlkDriver {
return .{
.transport = pci.VirtioTransport.init(base),
pub fn init(transport: pci.VirtioTransport) !VirtioBlkDriver {
var driver = VirtioBlkDriver{
.req_queue = null,
.transport = transport,
.last_used_idx = 0,
};
if (!driver.init_device()) {
return error.DeviceInitFailed;
}
return driver;
}
pub fn probe() ?VirtioBlkDriver {
@ -69,7 +77,7 @@ pub const VirtioBlkDriver = struct {
// 0x1042 = 0x1040 + 2
if (id == 0x10011af4 or id == 0x10421af4) {
uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:02.0\n");
return VirtioBlkDriver.init(addr);
return VirtioBlkDriver.init(pci.VirtioTransport.init(addr)) catch null;
}
// Try Slot 3 just in case
@ -79,7 +87,7 @@ pub const VirtioBlkDriver = struct {
const id3 = ptr3.*;
if (id3 == 0x10011af4 or id3 == 0x10421af4) {
uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:03.0\n");
return VirtioBlkDriver.init(addr3);
return VirtioBlkDriver.init(pci.VirtioTransport.init(addr3)) catch null;
}
return null;
@ -110,94 +118,68 @@ pub const VirtioBlkDriver = struct {
uart.print("[VirtIO-Blk] Device Ready. Queue Size: ");
uart.print_hex(q_size);
uart.print(" HeaderSize: ");
uart.print_hex(@sizeOf(VirtioBlkReq));
uart.print("\n");
return true;
}
pub fn read_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]u8) void {
self.submit_request(VIRTIO_BLK_T_IN, sector, buf, 512);
self.submit_request(VIRTIO_BLK_T_IN, sector, buf);
}
pub fn write_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]const u8) void {
// Cast const away because submit_request buffer logic is generic, but T_OUT implies read from buf
self.submit_request(VIRTIO_BLK_T_OUT, sector, @constCast(buf), 512);
self.submit_request(VIRTIO_BLK_T_OUT, sector, @constCast(buf));
}
fn submit_request(self: *VirtioBlkDriver, type_: u32, sector: u64, buf: [*]u8, len: u32) void {
// SOVEREIGN BOUNCE BUFFERS (Aligned to avoid offset bugs)
var bounce_header: VirtioBlkReq align(16) = undefined;
var bounce_sector: [512]u8 align(4096) = undefined;
var bounce_status: u8 align(16) = 0;
fn submit_request(self: *VirtioBlkDriver, type_: u32, sector: u64, buf: [*]u8) void {
const q = self.req_queue orelse return;
const idx = q.avail.idx % q.num;
// We need 3 descriptors: Header, Buffer, Status
// For simplicity, we assume we have 3 consecutive descriptors available.
// A robust driver would allocate from a free list.
// We will just take 3 linearly? No, 'desc' is a ring.
// We need to find 3 free descriptors.
// Simplification: We assume traffic is low and we consume valid indices.
// Currently 'virtio_net' used a simple 1:1 map.
// We will just use `idx * 3`, `idx * 3 + 1`, `idx * 3 + 2`?
// No, `idx` is from avail ring.
// Let's implement a simple free list or just bump a counter?
// To be safe, let's just pick head = idx.
// Wait, standard `idx` tracks the avail ring index, not descriptor index.
// We can pick descriptor index = idx (modulo q.num/3?).
// Let's maintain a `next_free_desc` in the driver or Queue?
// Since this is Sync, we can just use descriptors 0, 1, 2 always?
// NO. Concurrency issues if called from multiple fibers?
// Since we are single-threaded (mostly) in ION fiber for now, maybe.
// But cleaner: use `idx` as base.
// Descriptor table size = q_size. If q_size=128, we can support 128/3 concurrent requests.
// Let's use `head_desc = (idx * 3) % q_size`.
// Ensure q_size is large enough.
// Use fixed descriptors indices 0, 1, 2
const d1 = 0;
const d2 = 1;
const d3 = 2;
const head = (idx * 3) % q.num;
const d1 = head;
const d2 = (head + 1) % q.num;
const d3 = (head + 2) % q.num;
// 1. Header
const req_header = malloc(@sizeOf(VirtioBlkReq)) orelse return;
const header: *VirtioBlkReq = @ptrCast(@alignCast(req_header));
header.type = type_;
header.reserved = 0;
header.sector = sector;
// 2. Status
const status_ptr = malloc(1) orelse return;
const status: *u8 = @ptrCast(@alignCast(status_ptr));
status.* = 0xFF; // Init with error
bounce_header.type = type_;
bounce_header.reserved = 0;
bounce_header.sector = sector;
bounce_status = 0xFF;
const VRING_DESC_F_NEXT: u16 = 1;
const VRING_DESC_F_WRITE: u16 = 2;
// Setup Desc 1 (Header)
q.desc[d1].addr = @intFromPtr(header);
if (type_ == VIRTIO_BLK_T_OUT) {
@memcpy(&bounce_sector, buf[0..512]);
}
q.desc[d1].addr = @intFromPtr(&bounce_header);
q.desc[d1].len = @sizeOf(VirtioBlkReq);
q.desc[d1].flags = VRING_DESC_F_NEXT;
q.desc[d1].next = d2;
// Setup Desc 2 (Buffer)
q.desc[d2].addr = @intFromPtr(buf);
q.desc[d2].len = len;
// If T_IN (0), Device Writes to Buffer (Needs WRITE flag)
q.desc[d2].addr = @intFromPtr(&bounce_sector);
q.desc[d2].len = 512;
if (type_ == VIRTIO_BLK_T_IN) {
q.desc[d2].flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
// uart.print("[VirtIO-Blk] Read Req (Flags=3)\n");
} else {
q.desc[d2].flags = VRING_DESC_F_NEXT;
// uart.print("[VirtIO-Blk] Write Req (Flags=1)\n");
}
q.desc[d2].next = d3;
// Setup Desc 3 (Status)
q.desc[d3].addr = @intFromPtr(status);
q.desc[d3].addr = @intFromPtr(&bounce_status);
q.desc[d3].len = 1;
q.desc[d3].flags = VRING_DESC_F_WRITE; // Device writes status!
q.desc[d3].flags = VRING_DESC_F_WRITE;
q.desc[d3].next = 0;
asm volatile ("fence" ::: .{ .memory = true });
// Put head in Avail Ring
const avail_ring = get_avail_ring(q.avail);
avail_ring[idx] = d1;
@ -207,40 +189,32 @@ pub const VirtioBlkDriver = struct {
self.transport.notify(0);
// Busy Wait for Completion (Sync)
// We poll Used Ring.
// We need to track 'last_used_idx'.
// Simplified: Wait until status changes?
// No, status write might happen last.
// Wait for status to be 0 (OK) or 1 (Error).
// Safety timeout
// Polling Used Ring
var timeout: usize = 10000000;
while (status.* == 0xFF and timeout > 0) : (timeout -= 1) {
// asm volatile ("pause");
// Invalidate cache?
const used_ptr = q.used; // *VirtqUsed
while (used_ptr.idx == self.last_used_idx and timeout > 0) : (timeout -= 1) {
asm volatile ("fence" ::: .{ .memory = true });
}
if (timeout == 0) {
uart.print("[VirtIO-Blk] Timeout on Sector ");
uart.print_hex(sector);
uart.print("\n");
} else if (status.* != 0) {
uart.print("[VirtIO-Blk] I/O Error: ");
uart.print_hex(status.*);
uart.print("\n");
}
uart.print("[VirtIO-Blk] Timeout Waiting for Used Ring!\n");
} else {
// Request Done.
self.last_used_idx +%= 1; // Consume
asm volatile ("fence" ::: .{ .memory = true });
// Cleanup?
// We used malloc, we should free.
// But implementing 'free' is hard if we don't have it exposed from stubs.
// 'virtio_net' used 'ion_alloc_raw' and 'ion_free_raw'.
// Here we simulated malloc.
// Assumption: malloc usage here is leaky in this MVP unless we implement free.
// For Phase 10: "The Ledger", leaking 16 bytes per block op is acceptable for a demo,
// OR we use a static buffer for headers if single threaded.
// Let's use a static global header buffer since we are sync.
if (bounce_status != 0) {
uart.print("[VirtIO-Blk] I/O Error Status: ");
uart.print_hex(bounce_status);
uart.print("\n");
} else {
if (type_ == VIRTIO_BLK_T_IN) {
const dest_slice = buf[0..512];
@memcpy(dest_slice, &bounce_sector);
}
}
}
}
fn setup_queue(self: *VirtioBlkDriver, index: u16, count: u16) !*Virtqueue {
@ -277,7 +251,7 @@ pub const VirtioBlkDriver = struct {
}
// structs ...
const VirtioBlkReq = extern struct {
const VirtioBlkReq = packed struct {
type: u32,
reserved: u32,
sector: u64,

63
npl/nipbox/editor.nim Normal file
View File

@ -0,0 +1,63 @@
# Markus Maiwald (Architect) | Voxis Forge (AI)
# Scribe: The Sovereign Editor
# A modal line editor for the Sovereign Userland.
import std
var scribe_buffer: seq[string] = @[]
var scribe_filename: string = ""
proc scribe_save() =
# 1. Create content string
var content = ""
for line in scribe_buffer:
content.add(line)
content.add('\n')
# 2. Write to Disk (Using SFS)
print("[Scribe] Saving '" & scribe_filename & "'...")
nexus_file_write(scribe_filename, content)
proc start_editor*(filename: string) =
scribe_filename = filename
scribe_buffer = @[]
print("Scribe v1.0. Editing: " & filename)
if filename == "matrix.conf":
# Try autoload?
print("(New File)")
while true:
var line = ""
print_raw(": ")
if not my_readline(line): break
if line == ".":
# Command Mode
while true:
var cmd = ""
print_raw("(cmd) ")
if not my_readline(cmd): break
if cmd == "w":
scribe_save()
print("Saved.")
break # Back to prompt or stay? Ed stays in command mode?
# Ed uses '.' to toggle? No, '.' ends insert.
# Scribe: '.' enters Command Menu single-shot.
elif cmd == "p":
var i = 1
for l in scribe_buffer:
print_int(i); print_raw(" "); print(l)
i += 1
break
elif cmd == "q":
return
elif cmd == "i":
print("(Insert Mode)")
break
else:
print("Unknown command. w=save, p=print, q=quit, i=insert")
else:
# Append
scribe_buffer.add(line)

View File

@ -1,33 +1,16 @@
# src/npl/nipbox/nipbox.nim
# --- 1. RAW IMPORTS (The Physics) ---
# We talk directly to libc_shim.zig
import strutils
import std
import editor
type
cint = int32
csize_t = uint
cptr = pointer
# Standard POSIX-ish
proc write(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.}
proc read(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.}
proc open(pathname: cstring, flags: cint): cint {.importc, cdecl.}
proc close(fd: cint): cint {.importc, cdecl.}
proc exit(status: cint) {.importc, cdecl.}
proc list_files(buf: pointer, len: uint64): int64 {.importc, cdecl.}
# Our Custom Syscalls (Defined in libc_shim)
proc nexus_syscall(cmd: cint, arg: uint64): cint {.importc, cdecl.}
proc nexus_yield() {.importc, cdecl.}
proc nexus_net_tx(buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_net_rx(buf: cptr, max_len: uint64): uint64 {.importc, cdecl.}
proc nexus_blk_read(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_blk_write(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
const CMD_GPU_MATRIX = 0x100
const CMD_GPU_STATUS = 0x102
const CMD_GET_GPU_STATUS = 0x102
const CMD_SYS_EXEC = 0x400
# Constants
const
CMD_SYS_EXIT = 1
CMD_GPU_MATRIX = 0x100
CMD_GPU_STATUS = 0x102
CMD_GET_GPU_STATUS = 0x102
CMD_SYS_EXEC = 0x400
# --- SOVEREIGN NETWORKING TYPES ---
type
@ -348,6 +331,7 @@ proc main() =
elif cmd == "exec": do_exec(arg)
elif cmd == "dd": do_dd(arg)
elif cmd == "mkfs": do_mkfs()
elif cmd == "ed": start_editor(arg)
elif cmd == "help": do_help()
else: print("Unknown command: " & cmd)

101
npl/nipbox/std.nim Normal file
View File

@ -0,0 +1,101 @@
# Standard C Types
# cint, csize_t are in system/ctypes (implicitly available?)
# If not, we fix it by aliasing system ones or just using int/uint.
# Let's try relying on system.
type
cptr* = pointer
# Standard POSIX-ish Wrappers (from libc_shim)
proc write*(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.}
proc read*(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.}
proc open*(pathname: cstring, flags: cint): cint {.importc, cdecl.}
proc close*(fd: cint): cint {.importc, cdecl.}
proc exit*(status: cint) {.importc, cdecl.}
proc list_files*(buf: pointer, len: uint64): int64 {.importc, cdecl.}
# Our Custom Syscalls (Defined in libc_shim)
proc nexus_syscall*(cmd: cint, arg: uint64): cint {.importc, cdecl.}
proc nexus_yield*() {.importc, cdecl.}
proc nexus_net_tx*(buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_net_rx*(buf: cptr, max_len: uint64): uint64 {.importc, cdecl.}
proc nexus_blk_read*(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_blk_write*(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
type
FileArgs* = object
name*: uint64
data*: uint64
len*: uint64
const CMD_FS_WRITE = 0x203
proc nexus_file_write*(name: string, data: string) =
var args: FileArgs
args.name = cast[uint64](cstring(name))
args.data = cast[uint64](cstring(data))
args.len = uint64(data.len)
discard nexus_syscall(cint(CMD_FS_WRITE), cast[uint64](addr args))
# Helper: Print to Stdout (FD 1)
proc print*(s: string) =
if s.len > 0:
discard write(1, unsafeAddr s[0], csize_t(s.len))
var nl = "\n"
discard write(1, unsafeAddr nl[0], 1)
proc print_raw*(s: string) =
if s.len > 0:
discard write(1, unsafeAddr s[0], csize_t(s.len))
proc print_int*(n: int) =
var s = ""
var v = n
if v == 0: s = "0"
else:
while v > 0:
s.add(char((v mod 10) + 48))
v = v div 10
# Reverse
var r = ""
var i = s.len - 1
while i >= 0:
r.add(s[i])
i -= 1
print_raw(r)
var read_buffer: array[512, char]
var read_pos = 0
var read_len = 0
# Need to pull poll_network from somewhere?
# Or just define a dummy or export proper one?
# poll_network logic causes circular dep if it's in main.
# Let's make my_readline simpler for now, or move poll_network here?
# poll_network uses `nexus_net_rx`.
# Let's move poll_network to std?
# It depends on `nipbox` logic (checksums?).
# Let's just do blocking read in my_readline for now without network polling for Phase 12 MVP.
# Actually `libc_shim` `read(0)` is synchronous (busy wait on ring).
# So poll_network inside loop was useful.
# We will skip net poll in readline for this refactor to avoid complexity.
proc my_readline*(out_str: var string): bool =
out_str = ""
while true:
var c: char
let n = read(0, addr c, 1)
if n <= 0: return false # EOF or Error
if c == '\n' or c == '\r':
print_raw("\n")
return true
elif c == '\b' or c == char(127): # Backspace
if out_str.len > 0:
# Visual backspace
var bs = "\b \b"
discard write(1, addr bs[0], 3)
out_str.setLen(out_str.len - 1)
else:
out_str.add(c)
discard write(1, addr c, 1) # Echo