feat(scribe): Implement Scribe Editor Save & Stabilize VirtIO-Block

- hal/virtio_block: Implemented global bounce buffers and Used Ring Polling for stable, synchronous I/O.
- core/fs/sfs: Implemented sfs_write_file to handle SFS file creation and data writing.
- core/ion: Added CMD_FS_WRITE syscall definition.
- core/kernel: Added CMD_FS_WRITE syscall handler and fs/sfs integration.
- npl/nipbox: Added nexus_file_write wrapper and updated Scribe (ed) to use it for saving files.
This commit is contained in:
Markus Maiwald 2025-12-31 23:20:30 +01:00
parent 3506988f21
commit 9a996976c5
7 changed files with 331 additions and 116 deletions

View File

@ -101,3 +101,88 @@ proc sfs_list*() =
if not found: if not found:
kprintln(" (Empty)") kprintln(" (Empty)")
proc sfs_write_file*(name: cstring, data: cstring, data_len: int) =
if not sfs_mounted:
kprintln("[SFS] Write Error: Not mounted.")
return
# 1. Read Directory Table (Sector 1)
virtio_blk_read(1, addr io_buffer[0])
var free_slot_offset = -1
var found_file_offset = -1
var max_sector: uint32 = 1
var offset = 0
while offset < 512:
if io_buffer[offset] != 0:
var entry_name: string = ""
for i in 0..31:
if io_buffer[offset+i] == 0: break
entry_name.add(char(io_buffer[offset+i]))
if entry_name == $name:
found_file_offset = offset
var s_sect: uint32 = uint32(io_buffer[offset+32]) or
(uint32(io_buffer[offset+33]) shl 8) or
(uint32(io_buffer[offset+34]) shl 16) or
(uint32(io_buffer[offset+35]) shl 24)
if s_sect > max_sector: max_sector = s_sect
elif free_slot_offset == -1:
free_slot_offset = offset
offset += 64
# 2. Determine Target Sector
var target_sector: uint32 = 0
var target_offset = 0
if found_file_offset != -1:
kprintln("[SFS] Overwriting existing file...")
target_offset = found_file_offset
target_sector = uint32(io_buffer[target_offset+32]) or
(uint32(io_buffer[target_offset+33]) shl 8) or
(uint32(io_buffer[target_offset+34]) shl 16) or
(uint32(io_buffer[target_offset+35]) shl 24)
elif free_slot_offset != -1:
kprintln("[SFS] Creating new file...")
target_offset = free_slot_offset
target_sector = max_sector + 1
else:
kprintln("[SFS] Error: Directory Full.")
return
# 3. Write Data
kprint("[SFS] Writing to Sector: ")
kprint_hex(uint64(target_sector))
kprintln("")
var data_buf: array[512, byte]
for i in 0..511: data_buf[i] = 0
for i in 0 ..< data_len:
if i < 512: data_buf[i] = byte(data[i])
virtio_blk_write(uint64(target_sector), addr data_buf[0])
# 4. Update Directory Entry
var n_str = $name
for i in 0..31:
if i < n_str.len: io_buffer[target_offset+i] = byte(n_str[i])
else: io_buffer[target_offset+i] = 0
io_buffer[target_offset+32] = byte(target_sector and 0xFF)
io_buffer[target_offset+33] = byte((target_sector shr 8) and 0xFF)
io_buffer[target_offset+34] = byte((target_sector shr 16) and 0xFF)
io_buffer[target_offset+35] = byte((target_sector shr 24) and 0xFF)
var sz = uint32(data_len)
io_buffer[target_offset+36] = byte(sz and 0xFF)
io_buffer[target_offset+37] = byte((sz shr 8) and 0xFF)
io_buffer[target_offset+38] = byte((sz shr 16) and 0xFF)
io_buffer[target_offset+39] = byte((sz shr 24) and 0xFF)
# 5. Write Directory Table Back
virtio_blk_write(1, addr io_buffer[0])
kprintln("[SFS] Write Complete.")

View File

@ -16,6 +16,7 @@ type
CMD_FS_OPEN = 0x200 CMD_FS_OPEN = 0x200
CMD_FS_READ = 0x201 CMD_FS_READ = 0x201
CMD_FS_READDIR = 0x202 # Returns raw listing CMD_FS_READDIR = 0x202 # Returns raw listing
CMD_FS_WRITE = 0x203 # Write File (arg1=ptr to FileArgs)
CMD_ION_FREE = 0x300 # Return slab to pool CMD_ION_FREE = 0x300 # Return slab to pool
CMD_SYS_EXEC = 0x400 # Swap Consciousness (ELF Loading) CMD_SYS_EXEC = 0x400 # Swap Consciousness (ELF Loading)
CMD_NET_TX = 0x500 # Send Network Packet (arg1=ptr, arg2=len) CMD_NET_TX = 0x500 # Send Network Packet (arg1=ptr, arg2=len)
@ -33,6 +34,11 @@ type
fd*: uint64 fd*: uint64
buffer*: uint64 buffer*: uint64
FileArgs* = object
name*: uint64
data*: uint64
len*: uint64
NetArgs* = object NetArgs* = object
buf*: uint64 buf*: uint64
len*: uint64 len*: uint64

View File

@ -7,7 +7,6 @@ import fiber
import ion import ion
import loader import loader
var ion_paused*: bool = false var ion_paused*: bool = false
var pause_start*: uint64 = 0 var pause_start*: uint64 = 0
var matrix_enabled*: bool = false var matrix_enabled*: bool = false
@ -271,6 +270,9 @@ proc ion_fiber_entry() {.cdecl.} =
of uint32(CmdType.CMD_BLK_WRITE): of uint32(CmdType.CMD_BLK_WRITE):
let args = cast[ptr BlkArgs](cmd.arg) let args = cast[ptr BlkArgs](cmd.arg)
virtio_blk_write(args.sector, cast[pointer](args.buf)) virtio_blk_write(args.sector, cast[pointer](args.buf))
of uint32(CmdType.CMD_FS_WRITE):
let args = cast[ptr FileArgs](cmd.arg)
sfs_write_file(cast[cstring](args.name), cast[cstring](args.data), int(args.len))
else: else:
discard discard

View File

@ -43,12 +43,20 @@ const SECTOR_SIZE: usize = 512;
pub const VirtioBlkDriver = struct { pub const VirtioBlkDriver = struct {
transport: pci.VirtioTransport, transport: pci.VirtioTransport,
req_queue: ?*Virtqueue = null, req_queue: ?*Virtqueue,
last_used_idx: u16,
pub fn init(base: usize) VirtioBlkDriver { pub fn init(transport: pci.VirtioTransport) !VirtioBlkDriver {
return .{ var driver = VirtioBlkDriver{
.transport = pci.VirtioTransport.init(base), .req_queue = null,
.transport = transport,
.last_used_idx = 0,
}; };
if (!driver.init_device()) {
return error.DeviceInitFailed;
}
return driver;
} }
pub fn probe() ?VirtioBlkDriver { pub fn probe() ?VirtioBlkDriver {
@ -69,7 +77,7 @@ pub const VirtioBlkDriver = struct {
// 0x1042 = 0x1040 + 2 // 0x1042 = 0x1040 + 2
if (id == 0x10011af4 or id == 0x10421af4) { if (id == 0x10011af4 or id == 0x10421af4) {
uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:02.0\n"); uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:02.0\n");
return VirtioBlkDriver.init(addr); return VirtioBlkDriver.init(pci.VirtioTransport.init(addr)) catch null;
} }
// Try Slot 3 just in case // Try Slot 3 just in case
@ -79,7 +87,7 @@ pub const VirtioBlkDriver = struct {
const id3 = ptr3.*; const id3 = ptr3.*;
if (id3 == 0x10011af4 or id3 == 0x10421af4) { if (id3 == 0x10011af4 or id3 == 0x10421af4) {
uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:03.0\n"); uart.print("[VirtIO] Found VirtIO-Block device at PCI 00:03.0\n");
return VirtioBlkDriver.init(addr3); return VirtioBlkDriver.init(pci.VirtioTransport.init(addr3)) catch null;
} }
return null; return null;
@ -110,94 +118,68 @@ pub const VirtioBlkDriver = struct {
uart.print("[VirtIO-Blk] Device Ready. Queue Size: "); uart.print("[VirtIO-Blk] Device Ready. Queue Size: ");
uart.print_hex(q_size); uart.print_hex(q_size);
uart.print(" HeaderSize: ");
uart.print_hex(@sizeOf(VirtioBlkReq));
uart.print("\n"); uart.print("\n");
return true; return true;
} }
pub fn read_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]u8) void { pub fn read_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]u8) void {
self.submit_request(VIRTIO_BLK_T_IN, sector, buf, 512); self.submit_request(VIRTIO_BLK_T_IN, sector, buf);
} }
pub fn write_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]const u8) void { pub fn write_sync(self: *VirtioBlkDriver, sector: u64, buf: [*]const u8) void {
// Cast const away because submit_request buffer logic is generic, but T_OUT implies read from buf // Cast const away because submit_request buffer logic is generic, but T_OUT implies read from buf
self.submit_request(VIRTIO_BLK_T_OUT, sector, @constCast(buf), 512); self.submit_request(VIRTIO_BLK_T_OUT, sector, @constCast(buf));
} }
fn submit_request(self: *VirtioBlkDriver, type_: u32, sector: u64, buf: [*]u8, len: u32) void { // SOVEREIGN BOUNCE BUFFERS (Aligned to avoid offset bugs)
var bounce_header: VirtioBlkReq align(16) = undefined;
var bounce_sector: [512]u8 align(4096) = undefined;
var bounce_status: u8 align(16) = 0;
fn submit_request(self: *VirtioBlkDriver, type_: u32, sector: u64, buf: [*]u8) void {
const q = self.req_queue orelse return; const q = self.req_queue orelse return;
const idx = q.avail.idx % q.num; const idx = q.avail.idx % q.num;
// We need 3 descriptors: Header, Buffer, Status // Use fixed descriptors indices 0, 1, 2
// For simplicity, we assume we have 3 consecutive descriptors available. const d1 = 0;
// A robust driver would allocate from a free list. const d2 = 1;
// We will just take 3 linearly? No, 'desc' is a ring. const d3 = 2;
// We need to find 3 free descriptors.
// Simplification: We assume traffic is low and we consume valid indices.
// Currently 'virtio_net' used a simple 1:1 map.
// We will just use `idx * 3`, `idx * 3 + 1`, `idx * 3 + 2`?
// No, `idx` is from avail ring.
// Let's implement a simple free list or just bump a counter?
// To be safe, let's just pick head = idx.
// Wait, standard `idx` tracks the avail ring index, not descriptor index.
// We can pick descriptor index = idx (modulo q.num/3?).
// Let's maintain a `next_free_desc` in the driver or Queue?
// Since this is Sync, we can just use descriptors 0, 1, 2 always?
// NO. Concurrency issues if called from multiple fibers?
// Since we are single-threaded (mostly) in ION fiber for now, maybe.
// But cleaner: use `idx` as base.
// Descriptor table size = q_size. If q_size=128, we can support 128/3 concurrent requests.
// Let's use `head_desc = (idx * 3) % q_size`.
// Ensure q_size is large enough.
const head = (idx * 3) % q.num; bounce_header.type = type_;
const d1 = head; bounce_header.reserved = 0;
const d2 = (head + 1) % q.num; bounce_header.sector = sector;
const d3 = (head + 2) % q.num; bounce_status = 0xFF;
// 1. Header
const req_header = malloc(@sizeOf(VirtioBlkReq)) orelse return;
const header: *VirtioBlkReq = @ptrCast(@alignCast(req_header));
header.type = type_;
header.reserved = 0;
header.sector = sector;
// 2. Status
const status_ptr = malloc(1) orelse return;
const status: *u8 = @ptrCast(@alignCast(status_ptr));
status.* = 0xFF; // Init with error
const VRING_DESC_F_NEXT: u16 = 1; const VRING_DESC_F_NEXT: u16 = 1;
const VRING_DESC_F_WRITE: u16 = 2; const VRING_DESC_F_WRITE: u16 = 2;
// Setup Desc 1 (Header) if (type_ == VIRTIO_BLK_T_OUT) {
q.desc[d1].addr = @intFromPtr(header); @memcpy(&bounce_sector, buf[0..512]);
}
q.desc[d1].addr = @intFromPtr(&bounce_header);
q.desc[d1].len = @sizeOf(VirtioBlkReq); q.desc[d1].len = @sizeOf(VirtioBlkReq);
q.desc[d1].flags = VRING_DESC_F_NEXT; q.desc[d1].flags = VRING_DESC_F_NEXT;
q.desc[d1].next = d2; q.desc[d1].next = d2;
// Setup Desc 2 (Buffer) q.desc[d2].addr = @intFromPtr(&bounce_sector);
q.desc[d2].addr = @intFromPtr(buf); q.desc[d2].len = 512;
q.desc[d2].len = len;
// If T_IN (0), Device Writes to Buffer (Needs WRITE flag)
if (type_ == VIRTIO_BLK_T_IN) { if (type_ == VIRTIO_BLK_T_IN) {
q.desc[d2].flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE; q.desc[d2].flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
// uart.print("[VirtIO-Blk] Read Req (Flags=3)\n");
} else { } else {
q.desc[d2].flags = VRING_DESC_F_NEXT; q.desc[d2].flags = VRING_DESC_F_NEXT;
// uart.print("[VirtIO-Blk] Write Req (Flags=1)\n");
} }
q.desc[d2].next = d3; q.desc[d2].next = d3;
// Setup Desc 3 (Status) q.desc[d3].addr = @intFromPtr(&bounce_status);
q.desc[d3].addr = @intFromPtr(status);
q.desc[d3].len = 1; q.desc[d3].len = 1;
q.desc[d3].flags = VRING_DESC_F_WRITE; // Device writes status! q.desc[d3].flags = VRING_DESC_F_WRITE;
q.desc[d3].next = 0; q.desc[d3].next = 0;
asm volatile ("fence" ::: .{ .memory = true }); asm volatile ("fence" ::: .{ .memory = true });
// Put head in Avail Ring
const avail_ring = get_avail_ring(q.avail); const avail_ring = get_avail_ring(q.avail);
avail_ring[idx] = d1; avail_ring[idx] = d1;
@ -207,40 +189,32 @@ pub const VirtioBlkDriver = struct {
self.transport.notify(0); self.transport.notify(0);
// Busy Wait for Completion (Sync) // Polling Used Ring
// We poll Used Ring.
// We need to track 'last_used_idx'.
// Simplified: Wait until status changes?
// No, status write might happen last.
// Wait for status to be 0 (OK) or 1 (Error).
// Safety timeout
var timeout: usize = 10000000; var timeout: usize = 10000000;
while (status.* == 0xFF and timeout > 0) : (timeout -= 1) { const used_ptr = q.used; // *VirtqUsed
// asm volatile ("pause");
// Invalidate cache? while (used_ptr.idx == self.last_used_idx and timeout > 0) : (timeout -= 1) {
asm volatile ("fence" ::: .{ .memory = true }); asm volatile ("fence" ::: .{ .memory = true });
} }
if (timeout == 0) { if (timeout == 0) {
uart.print("[VirtIO-Blk] Timeout on Sector "); uart.print("[VirtIO-Blk] Timeout Waiting for Used Ring!\n");
uart.print_hex(sector); } else {
uart.print("\n"); // Request Done.
} else if (status.* != 0) { self.last_used_idx +%= 1; // Consume
uart.print("[VirtIO-Blk] I/O Error: "); asm volatile ("fence" ::: .{ .memory = true });
uart.print_hex(status.*);
uart.print("\n");
}
// Cleanup? if (bounce_status != 0) {
// We used malloc, we should free. uart.print("[VirtIO-Blk] I/O Error Status: ");
// But implementing 'free' is hard if we don't have it exposed from stubs. uart.print_hex(bounce_status);
// 'virtio_net' used 'ion_alloc_raw' and 'ion_free_raw'. uart.print("\n");
// Here we simulated malloc. } else {
// Assumption: malloc usage here is leaky in this MVP unless we implement free. if (type_ == VIRTIO_BLK_T_IN) {
// For Phase 10: "The Ledger", leaking 16 bytes per block op is acceptable for a demo, const dest_slice = buf[0..512];
// OR we use a static buffer for headers if single threaded. @memcpy(dest_slice, &bounce_sector);
// Let's use a static global header buffer since we are sync. }
}
}
} }
fn setup_queue(self: *VirtioBlkDriver, index: u16, count: u16) !*Virtqueue { fn setup_queue(self: *VirtioBlkDriver, index: u16, count: u16) !*Virtqueue {
@ -277,7 +251,7 @@ pub const VirtioBlkDriver = struct {
} }
// structs ... // structs ...
const VirtioBlkReq = extern struct { const VirtioBlkReq = packed struct {
type: u32, type: u32,
reserved: u32, reserved: u32,
sector: u64, sector: u64,

63
npl/nipbox/editor.nim Normal file
View File

@ -0,0 +1,63 @@
# Markus Maiwald (Architect) | Voxis Forge (AI)
# Scribe: The Sovereign Editor
# A modal line editor for the Sovereign Userland.
import std
var scribe_buffer: seq[string] = @[]
var scribe_filename: string = ""
proc scribe_save() =
# 1. Create content string
var content = ""
for line in scribe_buffer:
content.add(line)
content.add('\n')
# 2. Write to Disk (Using SFS)
print("[Scribe] Saving '" & scribe_filename & "'...")
nexus_file_write(scribe_filename, content)
proc start_editor*(filename: string) =
scribe_filename = filename
scribe_buffer = @[]
print("Scribe v1.0. Editing: " & filename)
if filename == "matrix.conf":
# Try autoload?
print("(New File)")
while true:
var line = ""
print_raw(": ")
if not my_readline(line): break
if line == ".":
# Command Mode
while true:
var cmd = ""
print_raw("(cmd) ")
if not my_readline(cmd): break
if cmd == "w":
scribe_save()
print("Saved.")
break # Back to prompt or stay? Ed stays in command mode?
# Ed uses '.' to toggle? No, '.' ends insert.
# Scribe: '.' enters Command Menu single-shot.
elif cmd == "p":
var i = 1
for l in scribe_buffer:
print_int(i); print_raw(" "); print(l)
i += 1
break
elif cmd == "q":
return
elif cmd == "i":
print("(Insert Mode)")
break
else:
print("Unknown command. w=save, p=print, q=quit, i=insert")
else:
# Append
scribe_buffer.add(line)

View File

@ -1,33 +1,16 @@
# src/npl/nipbox/nipbox.nim # src/npl/nipbox/nipbox.nim
# --- 1. RAW IMPORTS (The Physics) --- import strutils
# We talk directly to libc_shim.zig import std
import editor
type # Constants
cint = int32 const
csize_t = uint CMD_SYS_EXIT = 1
cptr = pointer CMD_GPU_MATRIX = 0x100
CMD_GPU_STATUS = 0x102
# Standard POSIX-ish CMD_GET_GPU_STATUS = 0x102
proc write(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.} CMD_SYS_EXEC = 0x400
proc read(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.}
proc open(pathname: cstring, flags: cint): cint {.importc, cdecl.}
proc close(fd: cint): cint {.importc, cdecl.}
proc exit(status: cint) {.importc, cdecl.}
proc list_files(buf: pointer, len: uint64): int64 {.importc, cdecl.}
# Our Custom Syscalls (Defined in libc_shim)
proc nexus_syscall(cmd: cint, arg: uint64): cint {.importc, cdecl.}
proc nexus_yield() {.importc, cdecl.}
proc nexus_net_tx(buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_net_rx(buf: cptr, max_len: uint64): uint64 {.importc, cdecl.}
proc nexus_blk_read(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_blk_write(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
const CMD_GPU_MATRIX = 0x100
const CMD_GPU_STATUS = 0x102
const CMD_GET_GPU_STATUS = 0x102
const CMD_SYS_EXEC = 0x400
# --- SOVEREIGN NETWORKING TYPES --- # --- SOVEREIGN NETWORKING TYPES ---
type type
@ -348,6 +331,7 @@ proc main() =
elif cmd == "exec": do_exec(arg) elif cmd == "exec": do_exec(arg)
elif cmd == "dd": do_dd(arg) elif cmd == "dd": do_dd(arg)
elif cmd == "mkfs": do_mkfs() elif cmd == "mkfs": do_mkfs()
elif cmd == "ed": start_editor(arg)
elif cmd == "help": do_help() elif cmd == "help": do_help()
else: print("Unknown command: " & cmd) else: print("Unknown command: " & cmd)

101
npl/nipbox/std.nim Normal file
View File

@ -0,0 +1,101 @@
# Standard C Types
# cint, csize_t are in system/ctypes (implicitly available?)
# If not, we fix it by aliasing system ones or just using int/uint.
# Let's try relying on system.
type
cptr* = pointer
# Standard POSIX-ish Wrappers (from libc_shim)
proc write*(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.}
proc read*(fd: cint, buf: cptr, count: csize_t): csize_t {.importc, cdecl.}
proc open*(pathname: cstring, flags: cint): cint {.importc, cdecl.}
proc close*(fd: cint): cint {.importc, cdecl.}
proc exit*(status: cint) {.importc, cdecl.}
proc list_files*(buf: pointer, len: uint64): int64 {.importc, cdecl.}
# Our Custom Syscalls (Defined in libc_shim)
proc nexus_syscall*(cmd: cint, arg: uint64): cint {.importc, cdecl.}
proc nexus_yield*() {.importc, cdecl.}
proc nexus_net_tx*(buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_net_rx*(buf: cptr, max_len: uint64): uint64 {.importc, cdecl.}
proc nexus_blk_read*(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
proc nexus_blk_write*(sector: uint64, buf: cptr, len: uint64) {.importc, cdecl.}
type
FileArgs* = object
name*: uint64
data*: uint64
len*: uint64
const CMD_FS_WRITE = 0x203
proc nexus_file_write*(name: string, data: string) =
var args: FileArgs
args.name = cast[uint64](cstring(name))
args.data = cast[uint64](cstring(data))
args.len = uint64(data.len)
discard nexus_syscall(cint(CMD_FS_WRITE), cast[uint64](addr args))
# Helper: Print to Stdout (FD 1)
proc print*(s: string) =
if s.len > 0:
discard write(1, unsafeAddr s[0], csize_t(s.len))
var nl = "\n"
discard write(1, unsafeAddr nl[0], 1)
proc print_raw*(s: string) =
if s.len > 0:
discard write(1, unsafeAddr s[0], csize_t(s.len))
proc print_int*(n: int) =
var s = ""
var v = n
if v == 0: s = "0"
else:
while v > 0:
s.add(char((v mod 10) + 48))
v = v div 10
# Reverse
var r = ""
var i = s.len - 1
while i >= 0:
r.add(s[i])
i -= 1
print_raw(r)
var read_buffer: array[512, char]
var read_pos = 0
var read_len = 0
# Need to pull poll_network from somewhere?
# Or just define a dummy or export proper one?
# poll_network logic causes circular dep if it's in main.
# Let's make my_readline simpler for now, or move poll_network here?
# poll_network uses `nexus_net_rx`.
# Let's move poll_network to std?
# It depends on `nipbox` logic (checksums?).
# Let's just do blocking read in my_readline for now without network polling for Phase 12 MVP.
# Actually `libc_shim` `read(0)` is synchronous (busy wait on ring).
# So poll_network inside loop was useful.
# We will skip net poll in readline for this refactor to avoid complexity.
proc my_readline*(out_str: var string): bool =
out_str = ""
while true:
var c: char
let n = read(0, addr c, 1)
if n <= 0: return false # EOF or Error
if c == '\n' or c == '\r':
print_raw("\n")
return true
elif c == '\b' or c == char(127): # Backspace
if out_str.len > 0:
# Visual backspace
var bs = "\b \b"
discard write(1, addr bs[0], 3)
out_str.setLen(out_str.len - 1)
else:
out_str.add(c)
discard write(1, addr c, 1) # Echo