const std = @import("std"); // MARKUS MAIWALD (ARCHITECT) | VOXIS FORGE (AI) // RUMPK NPL // LIVE WIRE (ARP/ICMP RESPONDER) // 1. The SysTable Contract (Must match Kernel!) const ION_BASE = 0x83000000; const IonPacket = extern struct { data: u64, // Virtual Addr (ptr) phys: u64, // Physical Addr len: u16, id: u16, }; const CmdPacket = extern struct { kind: u32, arg: u32, }; const RingBufferPacket = extern struct { head: u32, tail: u32, mask: u32, data: [256]IonPacket, }; const RingBufferCmd = extern struct { head: u32, tail: u32, mask: u32, data: [256]CmdPacket, }; const SysTable = extern struct { magic: u32, s_rx: *RingBufferPacket, s_tx: *RingBufferPacket, s_event: *RingBufferPacket, s_cmd: *RingBufferCmd, }; fn get_systable() *SysTable { return @ptrFromInt(ION_BASE); } // Minimal Shims extern fn write(fd: c_int, buf: [*]const u8, count: usize) isize; const YIELD_LOC = 0x83000FF0; fn fiber_yield() void { const ptr: *const *const fn () callconv(.c) void = @ptrFromInt(YIELD_LOC); const func = ptr.*; func(); } fn print(text: []const u8) void { _ = write(1, text.ptr, text.len); } fn print_u64(val: u64) void { var buf: [32]u8 = undefined; const s = std.fmt.bufPrint(&buf, "{}", .{val}) catch "ERR"; print(s); } fn print_hex(val: u16) void { var buf: [16]u8 = undefined; const s = std.fmt.bufPrint(&buf, "{x}", .{val}) catch "ERR"; print(s); } // Network Structures // Network Structures (Unrolled for Packed Compat) // Network Structures (Unrolled for Packed Compat) const EthHeader = packed struct { dst0: u8, dst1: u8, dst2: u8, dst3: u8, dst4: u8, dst5: u8, src0: u8, src1: u8, src2: u8, src3: u8, src4: u8, src5: u8, type: u16, }; const ArpHeader = packed struct { hw_type: u16, proto_type: u16, hw_len: u8, proto_len: u8, opcode: u16, smac0: u8, smac1: u8, smac2: u8, smac3: u8, smac4: u8, smac5: u8, sip0: u8, sip1: u8, sip2: u8, sip3: u8, tmac0: u8, tmac1: u8, tmac2: u8, tmac3: u8, tmac4: u8, tmac5: u8, tip0: u8, tip1: u8, tip2: u8, tip3: u8, }; const IpHeader = packed struct { ver_ihl: u8, tos: u8, len: u16, id: u16, frag: u16, ttl: u8, proto: u8, csum: u16, src0: u8, src1: u8, src2: u8, src3: u8, dst0: u8, dst1: u8, dst2: u8, dst3: u8, }; const IcmpHeader = packed struct { type: u8, code: u8, csum: u16, id: u16, seq: u16, }; // Utils: Network Byte Order (Big Endian) inline fn ntohs(n: u16) u16 { return @byteSwap(n); } inline fn htons(n: u16) u16 { return @byteSwap(n); } // My Config // 52:54:00:12:34:56 (QEMU Default usually) -> No, we set that for host. // Let's assume we are 52:54:00:12:34:57 (Guest). const MY_MAC = [6]u8{ 0x52, 0x54, 0x00, 0x12, 0x34, 0x57 }; const MY_IP = [4]u8{ 10, 0, 2, 15 }; fn checksum(buf: []u8) u16 { var sum: u32 = 0; var i: usize = 0; while (i < buf.len - 1) : (i += 2) { const word = @as(u16, buf[i]) << 8 | @as(u16, buf[i + 1]); // Big Endian Checksum sum += word; } if (buf.len % 2 == 1) { sum += @as(u16, buf[i]) << 8; } while ((sum >> 16) != 0) { sum = (sum & 0xFFFF) + (sum >> 16); } return ~@as(u16, @truncate(sum)); } export fn main() c_int { const sys = get_systable(); print("[LIVE] Waiting for Packets (ARP/ICMP)...\n"); const rx_ring = sys.s_rx; const tx_ring = sys.s_tx; while (true) { // 1. Poll RX while (true) { const head = @atomicLoad(u32, &rx_ring.head, .monotonic); const tail = @atomicLoad(u32, &rx_ring.tail, .monotonic); if (head != tail) { // We have a packet! const pkt = rx_ring.data[tail & rx_ring.mask]; // Process it handle_packet(pkt, tx_ring); // Consume @atomicStore(u32, &rx_ring.tail, tail + 1, .release); // Don't break, try to drain more if available } else { break; // RX Empty } } // 2. Yield fiber_yield(); } return 0; } fn handle_packet(pkt: IonPacket, tx_ring: *RingBufferPacket) void { const data_ptr: [*]u8 = @ptrFromInt(pkt.data); const data: []u8 = data_ptr[0..pkt.len]; if (data.len < @sizeOf(EthHeader)) return; const eth = @as(*align(1) EthHeader, @ptrCast(data.ptr)); const eth_type = ntohs(eth.type); if (eth_type == 0x0806) { // ARP handle_arp(data, eth, tx_ring, pkt); } else if (eth_type == 0x0800) { // IPv4 handle_ipv4(data, eth, tx_ring, pkt); } } fn handle_arp(data: []u8, eth: *align(1) EthHeader, tx_ring: *RingBufferPacket, pkt: IonPacket) void { if (data.len < @sizeOf(EthHeader) + @sizeOf(ArpHeader)) return; const arp = @as(*align(1) ArpHeader, @ptrCast(data.ptr + @sizeOf(EthHeader))); // Check if Request (1) and Target IP Matches // We unroll the check manually or use bytes // Just check manually if (ntohs(arp.opcode) == 1 and arp.tip0 == MY_IP[0] and arp.tip1 == MY_IP[1] and arp.tip2 == MY_IP[2] and arp.tip3 == MY_IP[3]) { print("[LIVE] ARP Request for ME! Replying...\n"); // Craft Reply IN PLACE // 1. Eth Header eth.dst0 = eth.src0; eth.dst1 = eth.src1; eth.dst2 = eth.src2; eth.dst3 = eth.src3; eth.dst4 = eth.src4; eth.dst5 = eth.src5; eth.src0 = MY_MAC[0]; eth.src1 = MY_MAC[1]; eth.src2 = MY_MAC[2]; eth.src3 = MY_MAC[3]; eth.src4 = MY_MAC[4]; eth.src5 = MY_MAC[5]; // 2. ARP Header arp.opcode = htons(2); // Reply // Target = Sender (Swap) arp.tmac0 = arp.smac0; arp.tmac1 = arp.smac1; arp.tmac2 = arp.smac2; arp.tmac3 = arp.smac3; arp.tmac4 = arp.smac4; arp.tmac5 = arp.smac5; arp.tip0 = arp.sip0; arp.tip1 = arp.sip1; arp.tip2 = arp.sip2; arp.tip3 = arp.sip3; // Sender = Me arp.smac0 = MY_MAC[0]; arp.smac1 = MY_MAC[1]; arp.smac2 = MY_MAC[2]; arp.smac3 = MY_MAC[3]; arp.smac4 = MY_MAC[4]; arp.smac5 = MY_MAC[5]; arp.sip0 = MY_IP[0]; arp.sip1 = MY_IP[1]; arp.sip2 = MY_IP[2]; arp.sip3 = MY_IP[3]; // 3. Send send_packet(tx_ring, pkt); } } fn handle_ipv4(data: []u8, eth: *align(1) EthHeader, tx_ring: *RingBufferPacket, pkt: IonPacket) void { if (data.len < @sizeOf(EthHeader) + @sizeOf(IpHeader)) return; const ip = @as(*align(1) IpHeader, @ptrCast(data.ptr + @sizeOf(EthHeader))); // Check if destined for us if (ip.dst0 != MY_IP[0] or ip.dst1 != MY_IP[1] or ip.dst2 != MY_IP[2] or ip.dst3 != MY_IP[3]) return; if (ip.proto == 1) { // ICMP const ip_header_len = (ip.ver_ihl & 0x0F) * 4; const icmp_offset = @sizeOf(EthHeader) + ip_header_len; if (data.len < icmp_offset + @sizeOf(IcmpHeader)) return; const icmp = @as(*align(1) IcmpHeader, @ptrCast(data.ptr + icmp_offset)); if (icmp.type == 8) { // Echo Request // print("[LIVE] Ping! Ponging...\n"); // 1. Eth Header eth.dst0 = eth.src0; eth.dst1 = eth.src1; eth.dst2 = eth.src2; eth.dst3 = eth.src3; eth.dst4 = eth.src4; eth.dst5 = eth.src5; eth.src0 = MY_MAC[0]; eth.src1 = MY_MAC[1]; eth.src2 = MY_MAC[2]; eth.src3 = MY_MAC[3]; eth.src4 = MY_MAC[4]; eth.src5 = MY_MAC[5]; // 2. IP Header ip.dst0 = ip.src0; ip.dst1 = ip.src1; ip.dst2 = ip.src2; ip.dst3 = ip.src3; ip.src0 = MY_IP[0]; ip.src1 = MY_IP[1]; ip.src2 = MY_IP[2]; ip.src3 = MY_IP[3]; // Recalculate IP Checksum? Checksum spans header only. // Since we only swapped src/dst (symetric), simple sum *might* be same, but carry could differ. // Safe way: recompute. ip.csum = 0; // ip.csum = htons(checksum(data[@sizeOf(EthHeader) .. @sizeOf(EthHeader) + ip_header_len])); // Actually, many drivers offload csum, but we are Sovereign. // For now, let's assume valid csum or linux accepts 0 in some cases? No. // Let's do a proper csum. // Wait, "checksum" func above expects Network Byte Order? // "The word at buf[i] << 8" implies buf[i] is High Byte. // Yes, standard Internet Checksum algorithm works on byte stream. // 3. ICMP Header icmp.type = 0; // Echo Reply // Checksum update: // Delta update: Old type (800) -> New Type (000). Delta = -8. // New Csum = Old Csum + 8. (One's complement math is tricky). // Recompute is safer. icmp.csum = 0; const icmp_len = ntohs(ip.len) - ip_header_len; icmp.csum = htons(checksum(data[icmp_offset .. icmp_offset + icmp_len])); // 4. Send send_packet(tx_ring, pkt); } } } fn send_packet(tx_ring: *RingBufferPacket, pkt: IonPacket) void { // Atomic Push to TX const head = @atomicLoad(u32, &tx_ring.head, .monotonic); const tail = @atomicLoad(u32, &tx_ring.tail, .monotonic); const mask = tx_ring.mask; const next = (head + 1) & mask; if (next != tail) { tx_ring.data[head & mask] = pkt; @atomicStore(u32, &tx_ring.head, next, .release); } else { print("[LIVE] TX Full! Dropping.\n"); } }