feat(capsule): stabilize TUI monitor, implement control IPC, and fix leaks (Zig 0.15.2)

This commit is contained in:
Markus Maiwald 2026-02-01 10:35:35 +01:00
parent 842ebf631c
commit b6edd5c403
Signed by: markus
GPG Key ID: 07DDBEA3CBDC090A
19 changed files with 991 additions and 583 deletions

View File

@ -4,6 +4,10 @@ pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
const optimize = b.standardOptimizeOption(.{});
// Dependencies
const vaxis_dep = b.dependency("vaxis", .{});
const vaxis_mod = vaxis_dep.module("vaxis");
// ========================================================================
// Time Module (L0)
// ========================================================================
@ -76,6 +80,13 @@ pub fn build(b: *std.Build) void {
.optimize = optimize,
});
const l2_policy_mod = b.createModule(.{
.root_source_file = b.path("l2-membrane/policy.zig"),
.target = target,
.optimize = optimize,
});
l2_policy_mod.addImport("lwf", l0_mod);
// ========================================================================
// Crypto: SHA3/SHAKE & FIPS 202
// ========================================================================
@ -248,6 +259,12 @@ pub fn build(b: *std.Build) void {
});
const run_utcp_tests = b.addRunArtifact(utcp_tests);
// L2 Policy tests
const l2_policy_tests = b.addTest(.{
.root_module = l2_policy_mod,
});
const run_l2_policy_tests = b.addRunArtifact(l2_policy_tests);
// OPQ tests
const opq_tests = b.addTest(.{
.root_module = opq_mod,
@ -433,6 +450,7 @@ pub fn build(b: *std.Build) void {
test_step.dependOn(&run_bridge_tests.step);
test_step.dependOn(&run_l1_qvl_tests.step);
test_step.dependOn(&run_l1_qvl_ffi_tests.step);
test_step.dependOn(&run_l2_policy_tests.step);
// ========================================================================
// Examples
@ -483,6 +501,13 @@ pub fn build(b: *std.Build) void {
// ========================================================================
// Capsule Core (Phase 10) Reference Implementation
// ========================================================================
const capsule_control_mod = b.createModule(.{
.root_source_file = b.path("capsule-core/src/control.zig"),
.target = target,
.optimize = optimize,
});
capsule_control_mod.addImport("qvl", l1_qvl_mod);
const capsule_mod = b.createModule(.{
.root_source_file = b.path("capsule-core/src/main.zig"),
.target = target,
@ -500,6 +525,10 @@ pub fn build(b: *std.Build) void {
capsule_mod.addImport("gateway", gateway_mod);
capsule_mod.addImport("relay", relay_mod);
capsule_mod.addImport("quarantine", l0_quarantine_mod);
capsule_mod.addImport("policy", l2_policy_mod);
capsule_mod.addImport("soulkey", l1_soulkey_mod);
capsule_mod.addImport("vaxis", vaxis_mod);
capsule_mod.addImport("control", capsule_control_mod);
const capsule_exe = b.addExecutable(.{
.name = "capsule",

12
build.zig.zon Normal file
View File

@ -0,0 +1,12 @@
.{
.name = .libertaria_sdk,
.version = "0.15.2",
.dependencies = .{
.vaxis = .{
.url = "https://github.com/rockorager/libvaxis/archive/refs/heads/main.tar.gz",
.hash = "vaxis-0.5.1-BWNV_Bw_CQAIVNh1ekGVzbip25CYBQ_J3kgABnYGFnI4",
},
},
.paths = .{""},
.fingerprint = 0xb6db0622de53913f,
}

View File

@ -10,14 +10,23 @@ const QvlStore = @import("qvl_store.zig").QvlStore;
const PeerTable = @import("peer_table.zig").PeerTable;
const DhtService = dht.DhtService;
pub const ActiveCircuit = struct {
session_id: [16]u8,
relay_address: std.net.Address,
pub const CircuitHop = struct {
relay_id: [32]u8,
relay_pubkey: [32]u8,
// Sticky Ephemeral Key (for optimizations)
session_id: [16]u8,
ephemeral_keypair: crypto.dh.X25519.KeyPair,
};
pub const ActiveCircuit = struct {
path: std.ArrayList(CircuitHop),
target_id: [32]u8,
allocator: std.mem.Allocator,
pub fn deinit(self: *ActiveCircuit) void {
self.path.deinit();
}
};
pub const CircuitError = error{
NoRelaysAvailable,
TargetNotFound,
@ -103,41 +112,82 @@ pub const CircuitBuilder = struct {
return .{ .packet = packet, .first_hop = relay_node.address };
}
/// Create a sticky session circuit
pub fn createCircuit(self: *CircuitBuilder, relay_did: ?[]const u8) !ActiveCircuit {
// Select Relay (Random if null)
const selected_did = if (relay_did) |did| try self.allocator.dupe(u8, did) else blk: {
const trusted = try self.qvl_store.getTrustedRelays(0.5, 1);
if (trusted.len == 0) return error.NoRelaysAvailable;
break :blk trusted[0];
/// Build a multi-hop circuit to a specific target
/// Hops must be resolved NodeIDs [Relay1, Relay2, Relay3]
/// Packet flows: Me -> Relay1 -> Relay2 -> Relay3 -> Target
pub fn buildCircuit(
self: *CircuitBuilder,
hops: []const [32]u8,
) !ActiveCircuit {
var circuit = ActiveCircuit{
.path = std.ArrayList(CircuitHop).init(self.allocator),
.target_id = [_]u8{0} ** 32, // Set later or unused for pure circuit
.allocator = self.allocator,
};
defer self.allocator.free(selected_did);
errdefer circuit.deinit();
// Resolve Relay Keys
var relay_id = [_]u8{0} ** 32;
if (selected_did.len >= 32) @memcpy(&relay_id, selected_did[0..32]);
for (hops) |node_id| {
// Resolve Relay Keys
const node = self.dht.routing_table.findNode(node_id) orelse return error.RelayNotFound;
const relay_node = self.dht.routing_table.findNode(relay_id) orelse return error.RelayNotFound;
// Generate Session & Keys
const kp = crypto.dh.X25519.KeyPair.generate();
var session_id: [16]u8 = undefined;
std.crypto.random.bytes(&session_id);
const kp = crypto.dh.X25519.KeyPair.generate();
var session_id: [16]u8 = undefined;
std.crypto.random.bytes(&session_id);
return ActiveCircuit{
.session_id = session_id,
.relay_address = relay_node.address,
.relay_pubkey = relay_node.key,
.ephemeral_keypair = kp,
};
try circuit.path.append(CircuitHop{
.relay_id = node_id,
.relay_pubkey = node.key,
.session_id = session_id,
.ephemeral_keypair = kp,
});
}
return circuit;
}
/// Send a payload on an existing circuit (reusing session/keys)
pub fn sendOnCircuit(self: *CircuitBuilder, circuit: *ActiveCircuit, target_did: []const u8, payload: []const u8) !relay.RelayPacket {
var target_id = [_]u8{0} ** 32;
if (target_did.len >= 32) @memcpy(&target_id, target_did[0..32]);
/// Send payload through the circuit
/// Recursively wraps the onion: Target <- H3 <- H2 <- H1 <- Me
pub fn sendOnCircuit(
self: *CircuitBuilder,
circuit: *ActiveCircuit,
target_id: [32]u8,
payload: []const u8,
) !relay.RelayPacket {
// 1. Start with the payload destined for Target
// The last hop (Exit Node) sees: NextHop = Target.
// We wrap from inside out.
// Use stored keys for stickiness
return self.onion_builder.wrapLayer(payload, target_id, circuit.relay_pubkey, circuit.session_id, circuit.ephemeral_keypair);
// We need to construct the chain of packets.
// But `wrapLayer` produces a `RelayPacket` struct, which contains `payload`.
// To wrap again, we must ENCODE the inner packet to bytes, then wrap that as payload.
// Step A: Wrap for final destination
// The Exit Node (last hop) sends to Target.
// Exit Node uses `circuit.path.last`.
if (circuit.path.items.len == 0) return error.PathConstructionFailed;
const exit_hop = circuit.path.items[circuit.path.items.len - 1];
// Inner: Exit -> Target
var current_packet = try self.onion_builder.wrapLayer(payload, target_id, exit_hop.relay_pubkey, exit_hop.session_id, exit_hop.ephemeral_keypair);
// Step B: Wrap backwards
var i: usize = circuit.path.items.len - 1;
while (i > 0) : (i -= 1) {
const inner_hop = circuit.path.items[i]; // The one we just wrapped for
const outer_hop = circuit.path.items[i - 1]; // The one who sends to inner_hop
// Encode current packet to be payload for next layer
const inner_bytes = try current_packet.encode(self.allocator);
// Free the struct, we have bytes
current_packet.deinit(self.allocator);
defer self.allocator.free(inner_bytes);
// Wrap: Outer -> Inner
current_packet = try self.onion_builder.wrapLayer(inner_bytes, inner_hop.relay_id, outer_hop.relay_pubkey, outer_hop.session_id, outer_hop.ephemeral_keypair);
}
return current_packet;
}
};

View File

@ -10,10 +10,10 @@ pub const NodeConfig = struct {
port: u16 = 8710,
/// Control Socket Path (Unix Domain Socket)
control_socket_path: []const u8,
control_socket_path: []const u8 = "",
/// Identity Key Path (Ed25519 private key)
identity_key_path: []const u8,
identity_key_path: []const u8 = "",
/// Bootstrap peers (multiaddrs)
bootstrap_peers: [][]const u8 = &.{},

View File

@ -4,7 +4,7 @@ const std = @import("std");
const node_mod = @import("node.zig");
const config_mod = @import("config.zig");
const control_mod = @import("control.zig");
const control_mod = @import("control");
const tui_app = @import("tui/app.zig");
pub fn main() !void {
@ -129,7 +129,22 @@ pub fn main() !void {
const state = if (args.len > cmd_idx + 1) args[cmd_idx + 1] else "open";
try runCliCommand(allocator, .{ .Airlock = .{ .state = state } }, data_dir_override);
} else if (std.mem.eql(u8, command, "monitor")) {
try tui_app.run(allocator, "dummy_socket_path");
// Load config to find socket path
const config_path = "config.json";
var config = config_mod.NodeConfig.loadFromJsonFile(allocator, config_path) catch {
std.log.err("Failed to load config for monitor. Is config.json present?", .{});
return error.ConfigLoadFailed;
};
defer config.deinit(allocator);
const data_dir = data_dir_override orelse config.data_dir;
const socket_path = if (std.fs.path.isAbsolute(config.control_socket_path))
try allocator.dupe(u8, config.control_socket_path)
else
try std.fs.path.join(allocator, &[_][]const u8{ data_dir, std.fs.path.basename(config.control_socket_path) });
defer allocator.free(socket_path);
try tui_app.run(allocator, socket_path);
} else {
printUsage();
}

View File

@ -7,8 +7,7 @@ const l0 = @import("l0_transport");
// access UTCP from l0 or utcp module directly
// build.zig imports "utcp" into capsule
const utcp_mod = @import("utcp");
// l1_identity module
const l1 = @import("l1_identity");
const soulkey_mod = @import("soulkey");
// qvl module
const qvl = @import("qvl");
@ -19,15 +18,16 @@ const dht_mod = @import("dht");
const gateway_mod = @import("gateway");
const storage_mod = @import("storage.zig");
const qvl_store_mod = @import("qvl_store.zig");
const control_mod = @import("control.zig");
const control_mod = @import("control");
const quarantine_mod = @import("quarantine");
const circuit_mod = @import("circuit.zig");
const relay_service_mod = @import("relay_service.zig");
const policy_mod = @import("policy");
const NodeConfig = config_mod.NodeConfig;
const UTCP = utcp_mod.UTCP;
// SoulKey definition (temporarily embedded until module is available)
const SoulKey = l1.SoulKey;
// SoulKey definition
const SoulKey = soulkey_mod.SoulKey;
const RiskGraph = qvl.types.RiskGraph;
const DiscoveryService = discovery_mod.DiscoveryService;
const PeerTable = peer_table_mod.PeerTable;
@ -64,6 +64,9 @@ pub const CapsuleNode = struct {
gateway: ?gateway_mod.Gateway,
relay_service: ?relay_service_mod.RelayService,
circuit_builder: ?circuit_mod.CircuitBuilder,
policy_engine: policy_mod.PolicyEngine,
thread_pool: std.Thread.Pool,
state_mutex: std.Thread.Mutex,
storage: *StorageService,
qvl_store: *QvlStore,
control_socket: std.net.Server,
@ -77,6 +80,10 @@ pub const CapsuleNode = struct {
pub fn init(allocator: std.mem.Allocator, config: NodeConfig) !*CapsuleNode {
const self = try allocator.create(CapsuleNode);
// Initialize Thread Pool
var thread_pool: std.Thread.Pool = undefined;
try thread_pool.init(.{ .allocator = allocator });
// Ensure data directory exists
std.fs.cwd().makePath(config.data_dir) catch |err| {
if (err != error.PathAlreadyExists) return err;
@ -97,6 +104,9 @@ pub const CapsuleNode = struct {
// TODO: Generate real NodeID from Public Key
std.mem.copyForwards(u8, node_id[0..4], "NODE");
// Initialize Policy Engine
const policy_engine = policy_mod.PolicyEngine.init(allocator);
// Initialize Storage
const db_path = try std.fs.path.join(allocator, &[_][]const u8{ config.data_dir, "capsule.db" });
defer allocator.free(db_path);
@ -171,6 +181,9 @@ pub const CapsuleNode = struct {
.gateway = null, // Initialized below
.relay_service = null, // Initialized below
.circuit_builder = null, // Initialized below
.policy_engine = policy_engine,
.thread_pool = thread_pool,
.state_mutex = .{},
.storage = storage,
.qvl_store = qvl_store,
.control_socket = control_socket,
@ -232,9 +245,80 @@ pub const CapsuleNode = struct {
self.control_socket.deinit();
// Clean up socket file
std.fs.cwd().deleteFile(self.config.control_socket_path) catch {};
self.thread_pool.deinit();
self.allocator.destroy(self);
}
fn processFrame(self: *CapsuleNode, frame: l0.LWFFrame, sender: std.net.Address) void {
var f = frame;
defer f.deinit(self.allocator);
// L2 MEMBRANE: Policy Check (Unlocked - CPU Heavy)
const decision = self.policy_engine.decide(&f.header);
if (decision == .drop) {
std.log.info("Policy: Dropped frame from {f}", .{sender});
return;
}
switch (f.header.service_type) {
l0.LWFHeader.ServiceType.RELAY_FORWARD => {
if (self.relay_service) |*rs| {
// Unwrap (Unlocked)
// Unwrap (Locked - protects Sessions Map)
self.state_mutex.lock();
const result = rs.forwardPacket(f.payload, self.identity.x25519_private);
self.state_mutex.unlock();
if (result) |next_hop_data| {
defer self.allocator.free(next_hop_data.payload);
const next_node_id = next_hop_data.next_hop;
var is_final = true;
for (next_node_id) |b| {
if (b != 0) {
is_final = false;
break;
}
}
if (is_final) {
std.log.info("Relay: Final Packet Received for Session {x}! Size: {d}", .{ next_hop_data.session_id, next_hop_data.payload.len });
} else {
// DHT Lookup (Locked)
self.state_mutex.lock();
const next_remote = self.dht.routing_table.findNode(next_node_id);
self.state_mutex.unlock();
if (next_remote) |remote| {
var relay_frame = l0.LWFFrame.init(self.allocator, next_hop_data.payload.len) catch return;
defer relay_frame.deinit(self.allocator);
@memcpy(relay_frame.payload, next_hop_data.payload);
relay_frame.header.service_type = l0.LWFHeader.ServiceType.RELAY_FORWARD;
self.utcp.sendFrame(remote.address, &relay_frame, self.allocator) catch |err| {
std.log.warn("Relay Send Error: {}", .{err});
};
std.log.info("Relay: Forwarded packet to {f}", .{remote.address});
} else {
std.log.warn("Relay: Next hop {x} not found", .{next_node_id[0..4]});
}
}
} else |err| {
std.log.warn("Relay Forward Error: {}", .{err});
}
}
},
fed.SERVICE_TYPE => {
self.state_mutex.lock();
defer self.state_mutex.unlock();
self.handleFederationMessage(sender, f) catch |err| {
std.log.warn("Federation Error: {}", .{err});
};
},
else => {},
}
}
pub fn start(self: *CapsuleNode) !void {
self.running = true;
std.log.info("CapsuleNode starting on port {d}...", .{self.config.port});
@ -273,59 +357,12 @@ pub const CapsuleNode = struct {
if (poll_fds[0].revents & std.posix.POLL.IN != 0) {
var buf: [1500]u8 = undefined;
if (self.utcp.receiveFrame(self.allocator, &buf)) |result| {
var frame = result.frame;
defer frame.deinit(self.allocator);
if (frame.header.service_type == fed.SERVICE_TYPE) {
try self.handleFederationMessage(result.sender, frame);
// Phase 14: Relay Forwarding
if (self.relay_service) |*rs| {
std.log.debug("Relay: Received relay packet from {f}", .{result.sender});
// Unwrap and forward using our private key (as receiver)
if (rs.forwardPacket(frame.payload, self.identity.x25519_private)) |next_hop_data| {
// next_hop_data.payload is now the INNER payload
const next_node_id = next_hop_data.next_hop;
// Resolve next hop address
// TODO: Check if we are final destination (all zeros) handled by forwardPacket
// But forwardPacket returns the result to US to send.
// Check if we are destination handled by forwardPacket via null next_hop logic?
// forwardPacket returns next_hop. If all zeros, it means LOCAL delivery.
var is_final = true;
for (next_node_id) |b| {
if (b != 0) {
is_final = false;
break;
}
}
if (is_final) {
// Final delivery to US
std.log.info("Relay: Final Packet Received for Session {x}! Payload Size: {d}", .{ next_hop_data.session_id, next_hop_data.payload.len });
// TODO: Hand over payload to upper layers (e.g. Chat/Protocol handler)
// For MVP, just log.
} else {
// Forward to next hop
// Lookup IP
const next_remote = self.dht.routing_table.findNode(next_node_id);
if (next_remote) |remote| {
// Re-wrap in LWF for transport
try self.utcp.send(remote.address, next_hop_data.payload, l0.LWFHeader.ServiceType.RELAY_FORWARD);
std.log.info("Relay: Forwarded packet to {f} (Session {x})", .{ remote.address, next_hop_data.session_id });
} else {
std.log.warn("Relay: Next hop {x} not found in routing table", .{next_node_id[0..4]});
}
}
self.allocator.free(next_hop_data.payload);
} else |err| {
std.log.warn("Relay: Failed to forward packet: {}", .{err});
}
} else {
std.log.debug("Relay: Received relay packet but relay_service is disabled.", .{});
}
}
self.thread_pool.spawn(processFrame, .{ self, result.frame, result.sender }) catch |err| {
std.log.warn("Failed to spawn worker: {}", .{err});
// Fallback: Free resource
var f = result.frame;
f.deinit(self.allocator);
};
} else |err| {
if (err != error.WouldBlock) std.log.warn("UTCP receive error: {}", .{err});
}
@ -348,6 +385,8 @@ pub const CapsuleNode = struct {
// For local multi-port test, we allow it if port is different.
// But mDNS on host network might show our own announcement.
}
self.state_mutex.lock();
defer self.state_mutex.unlock();
try self.discovery.handlePacket(&self.peer_table, m_buf[0..bytes], addr);
}
}
@ -360,21 +399,27 @@ pub const CapsuleNode = struct {
};
defer conn.stream.close();
self.state_mutex.lock();
self.handleControlConnection(conn) catch |err| {
std.log.warn("Control handle error: {}", .{err});
};
self.state_mutex.unlock();
}
}
// 3. Periodic Ticks
const now = std.time.milliTimestamp();
if (now - last_tick >= TICK_MS) {
self.state_mutex.lock();
try self.tick();
self.state_mutex.unlock();
last_tick = now;
// Discovery cycle (every ~5s)
discovery_timer += 1;
if (discovery_timer >= 50) {
self.state_mutex.lock();
defer self.state_mutex.unlock();
self.discovery.announce() catch {};
self.discovery.query() catch {};
discovery_timer = 0;
@ -505,7 +550,7 @@ pub const CapsuleNode = struct {
// Convert to federation nodes
var nodes = try self.allocator.alloc(fed.DhtNode, closest.len);
for (closest, 0..) |node, i| {
nodes[i] = .{ .id = node.id, .address = node.address };
nodes[i] = .{ .id = node.id, .address = node.address, .key = [_]u8{0} ** 32 };
}
try self.sendFederationMessage(sender, .{
@ -560,18 +605,20 @@ pub const CapsuleNode = struct {
switch (cmd) {
.Status => {
const my_did_hex = std.fmt.bytesToHex(&self.identity.did, .lower);
response = .{
.NodeStatus = .{
.node_id = "NODE_ID_STUB",
.node_id = try self.allocator.dupe(u8, my_did_hex[0..12]),
.state = if (self.running) "Running" else "Stopping",
.peers_count = self.peer_table.peers.count(),
.uptime_seconds = 0, // TODO: Track start time
.version = "0.1.0",
.version = try self.allocator.dupe(u8, "0.15.2-voxis"),
},
};
},
.Peers => {
response = .{ .Ok = "Peer listing not yet fully implemented in CLI JSON" };
const peers = try self.getPeerList();
response = .{ .PeerList = peers };
},
.Sessions => {
const sessions = try self.getSessions();
@ -605,6 +652,10 @@ pub const CapsuleNode = struct {
const logs = try self.getSlashLog(args.limit);
response = .{ .SlashLogResult = logs };
},
.Topology => {
const topo = try self.getTopology();
response = .{ .TopologyInfo = topo };
},
.Ban => |args| {
if (try self.processBan(args)) {
response = .{ .Ok = "Peer banned successfully." };
@ -642,10 +693,6 @@ pub const CapsuleNode = struct {
std.log.info("AIRLOCK: State set to {s}", .{args.state});
response = .{ .LockdownStatus = try self.getLockdownStatus() };
},
.Topology => {
const topo = try self.getTopology();
response = .{ .TopologyInfo = topo };
},
.RelayControl => |args| {
if (args.enable) {
if (self.relay_service == null) {
@ -699,7 +746,12 @@ pub const CapsuleNode = struct {
const encoded = try packet.encode(self.allocator);
defer self.allocator.free(encoded);
try self.utcp.send(first_hop, encoded, l0.LWFHeader.ServiceType.RELAY_FORWARD);
var frame = try l0.LWFFrame.init(self.allocator, encoded.len);
defer frame.deinit(self.allocator);
@memcpy(frame.payload, encoded);
frame.header.service_type = l0.LWFHeader.ServiceType.RELAY_FORWARD;
try self.utcp.sendFrame(first_hop, &frame, self.allocator);
response = .{ .Ok = "Packet sent via Relay" };
} else |err| {
std.log.warn("RelaySend failed: {}", .{err});
@ -818,14 +870,12 @@ pub const CapsuleNode = struct {
}
fn getIdentityInfo(self: *CapsuleNode) !control_mod.IdentityInfo {
const did_hex = std.fmt.bytesToHex(&self.identity.did, .lower);
const pubkey_hex = std.fmt.bytesToHex(&self.identity.public_key, .lower);
const dht_id_hex = std.fmt.bytesToHex(&self.dht.routing_table.self_id, .lower);
const did_str = std.fmt.bytesToHex(&self.identity.did, .lower);
const pub_key_hex = std.fmt.bytesToHex(&self.identity.ed25519_public, .lower);
return control_mod.IdentityInfo{
.did = try self.allocator.dupe(u8, &did_hex),
.public_key = try self.allocator.dupe(u8, &pubkey_hex),
.dht_node_id = try self.allocator.dupe(u8, &dht_id_hex),
.did = try self.allocator.dupe(u8, &did_str),
.public_key = try self.allocator.dupe(u8, &pub_key_hex),
.dht_node_id = try self.allocator.dupe(u8, "00000000"), // TODO
};
}
@ -885,6 +935,24 @@ pub const CapsuleNode = struct {
};
}
fn getPeerList(self: *CapsuleNode) ![]control_mod.PeerInfo {
const count = self.peer_table.peers.count();
var list = try self.allocator.alloc(control_mod.PeerInfo, count);
var i: usize = 0;
var it = self.peer_table.peers.iterator();
while (it.next()) |entry| : (i += 1) {
const peer_did = std.fmt.bytesToHex(&entry.key_ptr.*, .lower);
const peer = entry.value_ptr;
list[i] = .{
.id = try self.allocator.dupe(u8, peer_did[0..8]),
.address = try std.fmt.allocPrint(self.allocator, "{any}", .{peer.address}),
.state = if (peer.is_active) "Active" else "Inactive",
.last_seen = peer.last_seen,
};
}
return list;
}
fn getQvlMetrics(self: *CapsuleNode, args: control_mod.QvlQueryArgs) !control_mod.QvlMetrics {
_ = args; // TODO: Use target_did for specific queries

View File

@ -43,7 +43,7 @@ pub const RelayService = struct {
self: *RelayService,
raw_packet: []const u8,
receiver_private_key: [32]u8,
) !struct { next_hop: [32]u8, payload: []u8, session_id: [16]u8 } {
) !relay_mod.RelayResult {
// Parse the wire packet
var packet = try relay_mod.RelayPacket.decode(self.allocator, raw_packet);
defer packet.deinit(self.allocator);
@ -67,7 +67,7 @@ pub const RelayService = struct {
}
// Forward to next hop
std.log.debug("Relay: Forwarding session {x} to next hop: {x}", .{ result.session_id, std.fmt.fmtSliceHexLower(&result.next_hop) });
std.log.debug("Relay: Forwarding session {x} to next hop: {x}", .{ result.session_id, result.next_hop });
// Update Sticky Session Stats
const now = std.time.timestamp();
@ -89,6 +89,29 @@ pub const RelayService = struct {
return result;
}
/// Prune inactive sessions (Garbage Collection)
/// Removes sessions inactive for more than max_age_seconds
/// Returns number of sessions removed
pub fn pruneSessions(self: *RelayService, max_age_seconds: u64) !usize {
const now = std.time.timestamp();
var expired_keys = std.ArrayList([16]u8).init(self.allocator);
defer expired_keys.deinit();
var it = self.sessions.iterator();
while (it.next()) |entry| {
const age = now - entry.value_ptr.last_seen;
if (age > @as(i64, @intCast(max_age_seconds))) {
try expired_keys.append(entry.key_ptr.*);
}
}
for (expired_keys.items) |key| {
_ = self.sessions.remove(key);
}
return expired_keys.items.len;
}
/// Get relay statistics
pub fn getStats(self: *const RelayService) RelayStats {
return .{
@ -141,3 +164,30 @@ test "RelayService: Forward packet" {
const stats = relay_service.getStats();
try std.testing.expectEqual(@as(u64, 1), stats.packets_forwarded);
}
test "RelayService: Session cleanup" {
const allocator = std.testing.allocator;
var service = RelayService.init(allocator);
defer service.deinit();
const session_id = [_]u8{0xAA} ** 16;
const now = std.time.timestamp();
// Add old session (2 hours ago)
try service.sessions.put(session_id, .{
.packet_count = 10,
.last_seen = now - 7200,
});
// Add fresh session (10 seconds ago)
const fresh_id = [_]u8{0xBB} ** 16;
try service.sessions.put(fresh_id, .{
.packet_count = 5,
.last_seen = now - 10,
});
const removed = try service.pruneSessions(3600); // 1 hour max age
try std.testing.expectEqual(@as(usize, 1), removed);
try std.testing.expect(service.sessions.get(session_id) == null);
try std.testing.expect(service.sessions.get(fresh_id) != null);
}

View File

@ -89,7 +89,10 @@ pub const StorageService = struct {
"ON CONFLICT(id) DO UPDATE SET address=excluded.address, last_seen=excluded.last_seen, seen_count=seen_count+1, x25519_key=excluded.x25519_key;";
var stmt: ?*c.sqlite3_stmt = null;
if (c.sqlite3_prepare_v2(self.db, sql, -1, &stmt, null) != c.SQLITE_OK) return error.PrepareFailed;
if (c.sqlite3_prepare_v2(self.db, sql, -1, &stmt, null) != c.SQLITE_OK) {
std.log.err("SQLite: Prepare failed for savePeer: {s}", .{c.sqlite3_errmsg(self.db)});
return error.PrepareFailed;
}
defer _ = c.sqlite3_finalize(stmt);
// Bind ID
@ -112,7 +115,10 @@ pub const StorageService = struct {
pub fn loadPeers(self: *StorageService, allocator: std.mem.Allocator) ![]RemoteNode {
const sql = "SELECT id, address, last_seen, x25519_key FROM peers;";
var stmt: ?*c.sqlite3_stmt = null;
if (c.sqlite3_prepare_v2(self.db, sql, -1, &stmt, null) != c.SQLITE_OK) return error.PrepareFailed;
if (c.sqlite3_prepare_v2(self.db, sql, -1, &stmt, null) != c.SQLITE_OK) {
std.log.err("SQLite: Prepare failed for loadPeers: {s}", .{c.sqlite3_errmsg(self.db)});
return error.PrepareFailed;
}
defer _ = c.sqlite3_finalize(stmt);
var list = std.ArrayList(RemoteNode){};

View File

@ -0,0 +1,37 @@
# Capsule TUI & Control Protocol Documentation
## Overview
The Capsule TUI Monitor (The "Luxury Deck") provides a real-time visualization of the node's internal state, network topology, and security events. It communicates with the Capsule daemon via a Unix Domain Socket using a custom JSON-based control protocol.
## Architecture
### 1. Control Protocol (`control.zig`)
A unified command/response schema shared between the daemon and any management client.
- **Commands**: `Status`, `Peers`, `Sessions`, `Topology`, `SlashLog`, `Shutdown`, `Lockdown`, `Unlock`.
- **Responses**: Tagged unions containing specific telemetry data.
### 2. TUI Engine (`tui/`)
- **`app.zig`**: Orchestrates the Vaxis event loop. Spawns a dedicated background thread for non-blocking I/O with the daemon.
- **`client.zig`**: Implements the IPC client with mandatory deep-copying and explicit memory management to ensure a zero-leak footprint.
- **`view.zig`**: Renders the stateful UI components:
- **Dashboard**: Core node stats (ID, Version, State, Uptime).
- **Slash Log**: Real-time list of network security interventions.
- **Trust Graph**: Circular topology visualization using f64 polar coordinates mapped to terminal cells.
## Memory Governance
In accordance with high-stakes SysOps standards:
- **Zero-Leak Polling**: Every data refresh explicitly frees the previously "duped" strings and slices.
- **Thread Safety**: `AppState` uses an internal Mutex to synchronize the rendering path with the background polling path.
- **Unmanaged Design**: Alignment with Zig 0.15.2 architecture by using explicit allocators for all dynamic structures.
## Usage
1. **Daemon**: Start the node using `./zig-out/bin/capsule start`.
2. **Monitor**: Connect the monitor using `./zig-out/bin/capsule monitor`.
3. **Navigation**:
- `Tab`: Cycle between Dashboard, Slash Log, and Trust Graph.
- `Ctrl+C` or `Q`: Exit monitor.
## Current Technical Debt
- [ ] Implement `uptime_seconds` tracking in `node.zig`.
- [ ] Implement `dht_node_id` extraction for IdentityInfo.
- [ ] Add interactive node inspection in the Trust Graph view.

View File

@ -1,16 +1,160 @@
//! Capsule TUI Application (Stub)
//! Vaxis dependency temporarily removed to fix build.
//! Capsule TUI Application
//! Built with Vaxis (The "Luxury Deck").
const std = @import("std");
const vaxis = @import("vaxis");
pub const App = struct {
pub fn run(_: *anyopaque) !void {
std.log.info("TUI functionality temporarily disabled.", .{});
const control = @import("control");
const client_mod = @import("client.zig");
const view_mod = @import("view.zig");
const Event = union(enum) {
key_press: vaxis.Key,
winsize: vaxis.Winsize,
update_data: void,
};
pub const AppState = struct {
allocator: std.mem.Allocator,
should_quit: bool,
client: client_mod.Client,
// UI State
active_tab: enum { Dashboard, SlashLog, TrustGraph } = .Dashboard,
// Data State (Protected by mutex)
mutex: std.Thread.Mutex = .{},
node_status: ?client_mod.NodeStatus = null,
slash_log: std.ArrayList(client_mod.SlashEvent),
topology: ?client_mod.TopologyInfo = null,
pub fn init(allocator: std.mem.Allocator) !AppState {
return .{
.allocator = allocator,
.should_quit = false,
.client = try client_mod.Client.init(allocator),
.slash_log = std.ArrayList(client_mod.SlashEvent){},
.topology = null,
.mutex = .{},
};
}
pub fn deinit(self: *AppState) void {
if (self.node_status) |s| self.client.freeStatus(s);
for (self.slash_log.items) |ev| {
self.client.allocator.free(ev.target_did);
self.client.allocator.free(ev.reason);
self.client.allocator.free(ev.severity);
self.client.allocator.free(ev.evidence_hash);
}
self.slash_log.deinit(self.allocator);
if (self.topology) |t| self.client.freeTopology(t);
self.client.deinit();
}
};
pub fn run(allocator: std.mem.Allocator, control_socket_path: []const u8) !void {
_ = allocator;
_ = control_socket_path;
std.log.info("TUI functionality temporarily disabled.", .{});
pub fn run(allocator: std.mem.Allocator, socket_path: []const u8) !void {
var app = try AppState.init(allocator);
defer app.deinit();
// Initialize Vaxis
var vx = try vaxis.init(allocator, .{});
// Initialize TTY
var tty = try vaxis.Tty.init(&.{});
defer tty.deinit();
defer vx.deinit(allocator, tty.writer());
// Event Loop
var loop: vaxis.Loop(Event) = .{ .vaxis = &vx, .tty = &tty };
try loop.init();
try loop.start();
defer loop.stop();
// Connect to Daemon
try app.client.connect(socket_path);
// Spawn Data Thread
const DataThread = struct {
fn run(l: *vaxis.Loop(Event), a: *AppState) void {
while (!a.should_quit) {
// Poll Status
if (a.client.getStatus()) |status| {
a.mutex.lock();
defer a.mutex.unlock();
if (a.node_status) |old| a.client.freeStatus(old);
a.node_status = status;
} else |_| {}
// Poll Slash Log
if (a.client.getSlashLog(20)) |logs| {
a.mutex.lock();
defer a.mutex.unlock();
// Free strings in existing events before clearing
for (a.slash_log.items) |ev| {
a.client.allocator.free(ev.target_did);
a.client.allocator.free(ev.reason);
a.client.allocator.free(ev.severity);
a.client.allocator.free(ev.evidence_hash);
}
a.slash_log.clearRetainingCapacity();
a.slash_log.appendSlice(a.allocator, logs) catch {};
a.allocator.free(logs);
} else |_| {}
// Poll Topology
if (a.client.getTopology()) |topo| {
a.mutex.lock();
defer a.mutex.unlock();
if (a.topology) |old| a.client.freeTopology(old);
a.topology = topo;
} else |_| {}
// Notify UI to redraw
l.postEvent(.{ .update_data = {} });
std.Thread.sleep(1 * std.time.ns_per_s);
}
}
};
var thread = try std.Thread.spawn(.{}, DataThread.run, .{ &loop, &app });
defer thread.join();
while (!app.should_quit) {
// Handle Events
const event = loop.nextEvent();
switch (event) {
.key_press => |key| {
if (key.matches('c', .{ .ctrl = true }) or key.matches('q', .{})) {
app.should_quit = true;
}
// Handle tab switching
if (key.matches(vaxis.Key.tab, .{})) {
app.active_tab = switch (app.active_tab) {
.Dashboard => .SlashLog,
.SlashLog => .TrustGraph,
.TrustGraph => .Dashboard,
};
}
},
.winsize => |ws| {
try vx.resize(allocator, tty.writer(), ws);
},
.update_data => {}, // Handled by redraw below
}
// Global Redraw
{
app.mutex.lock();
defer app.mutex.unlock();
const win = vx.window();
win.clear();
try view_mod.draw(&app, win);
try vx.render(tty.writer());
}
}
}

View File

@ -1,167 +0,0 @@
//! Capsule TUI Application
//! Built with Vaxis (The "Luxury Deck").
const std = @import("std");
const vaxis = @import("vaxis");
const control = @import("../control.zig");
const client_mod = @import("client.zig");
const view_mod = @import("view.zig");
const Event = union(enum) {
key_press: vaxis.Key,
winsize: vaxis.Winsize,
update_data: void,
};
pub const AppState = struct {
allocator: std.mem.Allocator,
should_quit: bool,
client: client_mod.Client,
// UI State
active_tab: enum { Dashboard, SlashLog, TrustGraph } = .Dashboard,
// Data State
node_status: ?client_mod.NodeStatus = null,
slash_log: std.ArrayList(client_mod.SlashEvent),
topology: ?client_mod.TopologyInfo = null,
pub fn init(allocator: std.mem.Allocator) !AppState {
return .{
.allocator = allocator,
.should_quit = false,
.client = try client_mod.Client.init(allocator),
.slash_log = std.ArrayList(client_mod.SlashEvent){},
.topology = null,
};
}
pub fn deinit(self: *AppState) void {
self.client.deinit();
if (self.node_status) |s| {
// Free strings in status if any? NodeStatus fields are slices.
// Client parser allocates them. We own them.
// We should free them.
// For now, simpler leak or arena. (TODO: correct cleanup)
_ = s;
}
for (self.slash_log.items) |ev| {
self.allocator.free(ev.target_did);
self.allocator.free(ev.reason);
self.allocator.free(ev.severity);
self.allocator.free(ev.evidence_hash);
}
self.slash_log.deinit(self.allocator);
}
};
pub fn run(allocator: std.mem.Allocator) !void {
var app = try AppState.init(allocator);
defer app.deinit();
// Initialize Vaxis
var vx = try vaxis.init(allocator, .{});
// Initialize TTY
var tty = try vaxis.Tty.init(&.{}); // Use empty buffer (vaxis manages its own if needed, or this is for buffered read?)
defer tty.deinit();
defer vx.deinit(allocator, tty.writer()); // Reset terminal
// Event Loop
var loop: vaxis.Loop(Event) = .{ .vaxis = &vx, .tty = &tty };
try loop.init();
try loop.start();
defer loop.stop();
// Connect to Daemon
try app.client.connect();
// Spawn Data Thread
const DataThread = struct {
fn run(l: *vaxis.Loop(Event), a: *AppState) void {
while (!a.should_quit) {
// Poll Status
if (a.client.getStatus()) |status| {
if (a.node_status) |old| {
// Free old strings
a.allocator.free(old.node_id);
a.allocator.free(old.state);
a.allocator.free(old.version);
}
a.node_status = status;
} else |_| {}
// Poll Slash Log
if (a.client.getSlashLog(20)) |logs| {
// Logs are new allocations. Replace list.
for (a.slash_log.items) |ev| {
a.allocator.free(ev.target_did);
a.allocator.free(ev.reason);
a.allocator.free(ev.severity);
a.allocator.free(ev.evidence_hash);
}
a.slash_log.clearRetainingCapacity();
a.slash_log.appendSlice(a.allocator, logs) catch {};
a.allocator.free(logs); // Free the slice itself (deep copy helper allocated slice)
} else |_| {}
if (a.client.getTopology()) |topo| {
if (a.topology) |old| {
// Free old
// TODO: Implement deep free or rely on allocator arena if we had one.
// For now we leak old topology strings if not careful.
// Ideally we should free the old one using a helper.
// But since we use a shared allocator, we should be careful.
// Given this is a TUI, we might accept some leakage for MVP or fix it properly.
// Let's rely on OS cleanup for now or implement freeTopology
_ = old;
}
a.topology = topo;
} else |_| {}
// Notify UI to redraw
l.postEvent(.{ .update_data = {} });
std.Thread.sleep(1 * std.time.ns_per_s);
}
}
};
var thread = try std.Thread.spawn(.{}, DataThread.run, .{ &loop, &app });
defer thread.join();
while (!app.should_quit) {
// Handle Events
const event = loop.nextEvent();
switch (event) {
.key_press => |key| {
if (key.matches('c', .{ .ctrl = true }) or key.matches('q', .{})) {
app.should_quit = true;
}
// Handle tab switching
if (key.matches(vaxis.Key.tab, .{})) {
app.active_tab = switch (app.active_tab) {
.Dashboard => .SlashLog,
.SlashLog => .TrustGraph,
.TrustGraph => .Dashboard,
};
}
},
.winsize => |ws| {
try vx.resize(allocator, tty.writer(), ws);
},
.update_data => {
// Just trigger render
},
}
// Render
const win = vx.window();
win.clear();
try view_mod.draw(&app, win);
try vx.render(tty.writer());
}
}

View File

@ -1,8 +1,8 @@
//! IPC Client for TUI -> Daemon communication.
//! Wraps control.zig types.
//! Wraps control.zig types with deep-copying logic for memory safety.
const std = @import("std");
const control = @import("../control.zig");
const control = @import("control");
pub const NodeStatus = control.NodeStatus;
pub const SlashEvent = control.SlashEvent;
@ -24,75 +24,68 @@ pub const Client = struct {
if (self.stream) |s| s.close();
}
pub fn connect(self: *Client) !void {
// Connect to /tmp/capsule.sock
// TODO: Load from config
const path = "/tmp/capsule.sock";
const address = try std.net.Address.initUnix(path);
self.stream = try std.net.tcpConnectToAddress(address);
pub fn connect(self: *Client, socket_path: []const u8) !void {
self.stream = std.net.connectUnixSocket(socket_path) catch |err| {
std.log.err("Failed to connect to daemon at {s}: {}. Is it running?", .{ socket_path, err });
return err;
};
}
pub fn getStatus(self: *Client) !NodeStatus {
const resp = try self.request(.Status);
switch (resp) {
.NodeStatus => |s| return s,
var parsed = try self.request(.Status);
defer parsed.deinit();
switch (parsed.value) {
.NodeStatus => |s| return try self.deepCopyStatus(s),
else => return error.UnexpectedResponse,
}
}
pub fn getSlashLog(self: *Client, limit: usize) ![]SlashEvent {
const resp = try self.request(.{ .SlashLog = .{ .limit = limit } });
switch (resp) {
.SlashLogResult => |l| {
// We need to duplicate the list because response memory is transient (if using an arena in request)
// But for now, let's assume the caller handles it or we deep copy.
// Simpler: Return generic Response and let caller handle.
// Actually, let's just return the slice and hope the buffer lifetime management in request isn't too tricky.
// Wait, request() will likely use a local buffer. Returning a slice into it is unsafe.
// I need to use an arena or return a deep copy.
// For this MVP, I'll return the response object completely if possible, or copy.
// Let's implement deep copy later. For now, assume single-threaded blocking.
return try self.deepCopySlashLog(l);
},
var parsed = try self.request(.{ .SlashLog = .{ .limit = limit } });
defer parsed.deinit();
switch (parsed.value) {
.SlashLogResult => |l| return try self.deepCopySlashLog(l),
else => return error.UnexpectedResponse,
}
}
pub fn request(self: *Client, cmd: control.Command) !control.Response {
if (self.stream == null) return error.NotConnected;
const stream = self.stream.?;
// Send
var req_buf = std.ArrayList(u8){};
defer req_buf.deinit(self.allocator);
var w_struct = req_buf.writer(self.allocator);
var buffer: [128]u8 = undefined;
var adapter = w_struct.adaptToNewApi(&buffer);
try std.json.Stringify.value(cmd, .{}, &adapter.new_interface);
try adapter.new_interface.flush();
try stream.writeAll(req_buf.items);
// Read (buffered)
var resp_buf: [32768]u8 = undefined; // Large buffer for slash log
const bytes = try stream.read(&resp_buf);
if (bytes == 0) return error.ConnectionClosed;
// Parse (using allocator for string allocations inside union)
const parsed = try std.json.parseFromSlice(control.Response, self.allocator, resp_buf[0..bytes], .{ .ignore_unknown_fields = true });
// Note: parsed.value contains pointers to resp_buf if we used Leaky, but here we used allocator.
// Wait, std.json.parseFromSlice with allocator allocates strings!
// So we can return parsed.value.
return parsed.value;
}
pub fn getTopology(self: *Client) !TopologyInfo {
const resp = try self.request(.Topology);
switch (resp) {
var parsed = try self.request(.Topology);
defer parsed.deinit();
switch (parsed.value) {
.TopologyInfo => |t| return try self.deepCopyTopology(t),
else => return error.UnexpectedResponse,
}
}
pub fn request(self: *Client, cmd: control.Command) !std.json.Parsed(control.Response) {
if (self.stream == null) return error.NotConnected;
const stream = self.stream.?;
const json_bytes = try std.json.Stringify.valueAlloc(self.allocator, cmd, .{});
defer self.allocator.free(json_bytes);
try stream.writeAll(json_bytes);
var resp_buf: [32768]u8 = undefined;
const bytes = try stream.read(&resp_buf);
if (bytes == 0) return error.ConnectionClosed;
return try std.json.parseFromSlice(control.Response, self.allocator, resp_buf[0..bytes], .{ .ignore_unknown_fields = true });
}
fn deepCopyStatus(self: *Client, s: NodeStatus) !NodeStatus {
return .{
.node_id = try self.allocator.dupe(u8, s.node_id),
.state = try self.allocator.dupe(u8, s.state),
.peers_count = s.peers_count,
.uptime_seconds = s.uptime_seconds,
.version = try self.allocator.dupe(u8, s.version),
};
}
fn deepCopySlashLog(self: *Client, events: []const SlashEvent) ![]SlashEvent {
const list = try self.allocator.alloc(SlashEvent, events.len);
for (events, 0..) |ev, i| {
@ -108,7 +101,6 @@ pub const Client = struct {
}
fn deepCopyTopology(self: *Client, topo: TopologyInfo) !TopologyInfo {
// Deep copy nodes
const nodes = try self.allocator.alloc(control.GraphNode, topo.nodes.len);
for (topo.nodes, 0..) |n, i| {
nodes[i] = .{
@ -119,7 +111,6 @@ pub const Client = struct {
};
}
// Deep copy edges
const edges = try self.allocator.alloc(control.GraphEdge, topo.edges.len);
for (topo.edges, 0..) |e, i| {
edges[i] = .{
@ -134,4 +125,35 @@ pub const Client = struct {
.edges = edges,
};
}
pub fn freeStatus(self: *Client, s: NodeStatus) void {
self.allocator.free(s.node_id);
self.allocator.free(s.state);
self.allocator.free(s.version);
}
pub fn freeSlashLog(self: *Client, events: []SlashEvent) void {
for (events) |ev| {
self.allocator.free(ev.target_did);
self.allocator.free(ev.reason);
self.allocator.free(ev.severity);
self.allocator.free(ev.evidence_hash);
}
self.allocator.free(events);
}
pub fn freeTopology(self: *Client, topo: TopologyInfo) void {
for (topo.nodes) |n| {
self.allocator.free(n.id);
self.allocator.free(n.status);
self.allocator.free(n.role);
}
self.allocator.free(topo.nodes);
for (topo.edges) |e| {
self.allocator.free(e.source);
self.allocator.free(e.target);
}
self.allocator.free(topo.edges);
}
};

View File

@ -121,8 +121,8 @@ fn drawTrustGraph(app: *app_mod.AppState, win: vaxis.Window) !void {
const nodes_count = topo.nodes.len;
// Skip self (index 0) loop for now to draw it specially at center
// Self
_ = win.printSegment(.{ .text = "", .style = .{ .bold = true, .fg = .{ .rgb = .{ 255, 215, 0 } } } }, .{ .row_offset = @intCast(cy), .col_offset = @intCast(cx) });
// Self (Preserved: - using for compatibility)
_ = win.printSegment(.{ .text = "", .style = .{ .bold = true, .fg = .{ .rgb = .{ 255, 215, 0 } } } }, .{ .row_offset = @intCast(cy), .col_offset = @intCast(cx) });
_ = win.printSegment(.{ .text = "SELF" }, .{ .row_offset = @intCast(cy + 1), .col_offset = @intCast(cx - 2) });
// Peers
@ -154,10 +154,10 @@ fn drawTrustGraph(app: *app_mod.AppState, win: vaxis.Window) !void {
if (std.mem.eql(u8, node.status, "slashed")) {
style = .{ .fg = .{ .rgb = .{ 255, 50, 50 } }, .bold = true, .blink = true };
char = "X";
char = "×"; // Preserved: X
} else if (node.trust_score > 0.8) {
style = .{ .fg = .{ .rgb = .{ 100, 255, 100 } }, .bold = true };
char = "";
char = ""; // Preserved: (Hexagon)
}
_ = win.printSegment(.{ .text = char, .style = style }, .{ .row_offset = @intCast(py), .col_offset = @intCast(px) });

View File

@ -1,61 +1,70 @@
//! RFC-0000: Libertaria Wire Frame Protocol
//! RFC-0000: Libertaria Wire Frame Protocol (v2)
//!
//! This module implements the core LWF frame structure for L0 transport.
//! Optimized for "Fast Drop" routing efficiency.
//!
//! Key features:
//! - Fixed-size header (72 bytes)
//! - Variable payload (up to 8828 bytes based on frame class)
//! - Fixed-size header (88 bytes) - Router Optimized Order
//! - Variable payload (up to 9000+ bytes)
//! - Fixed-size trailer (36 bytes)
//! - Checksum verification (CRC32-C)
//! - Signature support (Ed25519)
//! - Nonce/SessionID Binding:
//! Cryptography nonce construction MUST strictly bind to the Session ID.
//! Usage: `nonce[0..16] == session_id`, `nonce[16..24] == random/counter`.
//! - Explicit SessionID (16 bytes) for flow filtering
//!
//! Frame structure:
//!
//! Header (72B)
//!
//! Payload (var)
//!
//! Trailer (36B)
//!
//! Header Layout (88 bytes):
//!
//! 00-03: Magic (4) Fast
//! 04-27: Dest Hint (24) Route
//! 28-51: Src Hint (24) Filt
//!
//! 52-67: SessionID (16) Flow
//! 68-71: Sequence (4) Order
//!
//! 72-73: Service (2) Polcy
//! 74-75: Length (2) Alloc
//! 76-79: Meta (4) Misc
//! 80-87: Timestamp (8) TTL
//!
const std = @import("std");
/// RFC-0000 Section 4.1: Frame size classes
/// RFC-0000: Frame Types / Classes
pub const FrameClass = enum(u8) {
micro = 0x00, // 128 bytes
tiny = 0x01, // 512 bytes
standard = 0x02, // 1350 bytes (default)
large = 0x03, // 4096 bytes
jumbo = 0x04, // 9000 bytes
micro = 0x00, // 128 bytes (Microframe)
mini = 0x01, // 512 bytes (Miniframe) - formerly Tiny
standard = 0x02, // 1350 bytes (Frame)
big = 0x03, // 4096 bytes (Bigframe) - formerly Large
jumbo = 0x04, // 9000 bytes (Jumboframe)
variable = 0xFF, // Custom/Unlimited (Variableframe)
pub fn maxPayloadSize(self: FrameClass) usize {
const overhead = LWFHeader.SIZE + LWFTrailer.SIZE; // 88 + 36 = 124 bytes
return switch (self) {
.micro => 128 - LWFHeader.SIZE - LWFTrailer.SIZE,
.tiny => 512 - LWFHeader.SIZE - LWFTrailer.SIZE,
.standard => 1350 - LWFHeader.SIZE - LWFTrailer.SIZE,
.large => 4096 - LWFHeader.SIZE - LWFTrailer.SIZE,
.jumbo => 9000 - LWFHeader.SIZE - LWFTrailer.SIZE,
.micro => if (128 > overhead) 128 - overhead else 0,
.mini => 512 - overhead,
.standard => 1350 - overhead,
.big => 4096 - overhead,
.jumbo => 9000 - overhead,
.variable => std.math.maxInt(usize), // Limited by allocator/MTU
};
}
};
/// RFC-0000 Section 4.3: Frame flags
/// RFC-0000: Frame flags
pub const LWFFlags = struct {
pub const ENCRYPTED: u8 = 0x01; // Payload is encrypted
pub const SIGNED: u8 = 0x02; // Trailer has signature
pub const RELAYABLE: u8 = 0x04; // Can be relayed by nodes
pub const HAS_ENTROPY: u8 = 0x08; // Includes Entropy Stamp
pub const HAS_ENTROPY: u8 = 0x08; // Includes Entropy Stamp (Payload Prefix)
pub const FRAGMENTED: u8 = 0x10; // Part of fragmented message
pub const PRIORITY: u8 = 0x20; // High-priority frame
};
/// RFC-0000 Section 4.2: LWF Header (72 bytes fixed)
/// RFC-0000: LWF Header (88 bytes fixed)
/// Order optimized for Router Efficiency: Routing -> Flow -> Context -> Time
pub const LWFHeader = struct {
pub const VERSION: u8 = 0x01;
pub const SIZE: usize = 72;
pub const VERSION: u8 = 0x02;
pub const SIZE: usize = 88;
// RFC-0121: Service Types
pub const ServiceType = struct {
@ -63,30 +72,51 @@ pub const LWFHeader = struct {
pub const SLASH_PROTOCOL: u16 = 0x0002;
pub const IDENTITY_SIGNAL: u16 = 0x0003;
pub const ECONOMIC_SETTLEMENT: u16 = 0x0004;
pub const RELAY_FORWARD: u16 = 0x0005; // Phase 14: Onion routing
pub const RELAY_FORWARD: u16 = 0x0005;
// Streaming Media (0x0800-0x08FF)
pub const STREAM_AUDIO: u16 = 0x0800;
pub const STREAM_VIDEO: u16 = 0x0801;
pub const STREAM_DATA: u16 = 0x0802;
// P2P / Swarm (0x0B00-0x0BFF) - Low Priority / Bulk
pub const SWARM_MANIFEST: u16 = 0x0B00; // Handshake/InfoDict
pub const SWARM_HAVE: u16 = 0x0B01; // Bitfield
pub const SWARM_REQUEST: u16 = 0x0B02; // Interest
pub const SWARM_BLOCK: u16 = 0x0B03; // Data Payload
};
// 1. Identification & Routing (Top Priority)
magic: [4]u8, // "LWF\0"
version: u8, // 0x01
flags: u8, // Bitfield (see LWFFlags)
service_type: u16, // See ServiceType constants
source_hint: [24]u8, // Blake3 truncated DID hint (192-bit)
dest_hint: [24]u8, // Blake3 truncated DID hint (192-bit)
sequence: u32, // Big-endian, anti-replay counter
timestamp: u64, // Big-endian, nanoseconds since epoch
payload_len: u16, // Big-endian, actual payload size
entropy_difficulty: u8, // Entropy Stamp difficulty (0-255)
frame_class: u8, // FrameClass enum value
dest_hint: [24]u8, // Blake3 truncated DID hint
source_hint: [24]u8, // Blake3 truncated DID hint
// 2. Flow & Ordering (Filtering)
session_id: [16]u8, // Explicit Flow Context
sequence: u32, // Anti-replay counter
// 3. Technical Meta
service_type: u16, // Protocol ID
payload_len: u16, // Data size
frame_class: u8, // FrameClass enum
version: u8, // 0x02
flags: u8, // Bitfield
entropy_difficulty: u8, // PoW Target
// 4. Temporal (Least Critical for Routing)
timestamp: u64, // Nanoseconds
/// Initialize header with default values
pub fn init() LWFHeader {
return .{
.magic = [_]u8{ 'L', 'W', 'F', 0 },
.version = 0x01,
.version = VERSION,
.flags = 0,
.service_type = 0,
.source_hint = [_]u8{0} ** 24,
.dest_hint = [_]u8{0} ** 24,
.source_hint = [_]u8{0} ** 24,
.session_id = [_]u8{0} ** 16,
.sequence = 0,
.timestamp = 0,
.payload_len = 0,
@ -98,108 +128,91 @@ pub const LWFHeader = struct {
/// Validate header magic bytes
pub fn isValid(self: *const LWFHeader) bool {
const expected_magic = [4]u8{ 'L', 'W', 'F', 0 };
return std.mem.eql(u8, &self.magic, &expected_magic) and self.version == 0x01;
// Accept v1 or v2? Strict v2 for now.
return std.mem.eql(u8, &self.magic, &expected_magic) and self.version == VERSION;
}
/// Serialize header to exactly 72 bytes
pub fn toBytes(self: *const LWFHeader, buffer: *[72]u8) void {
/// Serialize header to exactly 88 bytes
pub fn toBytes(self: *const LWFHeader, buffer: *[88]u8) void {
var offset: usize = 0;
// magic: [4]u8
// 1. Magic (4)
@memcpy(buffer[offset..][0..4], &self.magic);
offset += 4;
// version: u8
buffer[offset] = self.version;
offset += 1;
// flags: u8
buffer[offset] = self.flags;
offset += 1;
// service_type: u16 (big-endian)
std.mem.writeInt(u16, buffer[offset..][0..2], self.service_type, .big);
offset += 2;
// source_hint: [24]u8
@memcpy(buffer[offset..][0..24], &self.source_hint);
offset += 24;
// dest_hint: [24]u8
// 2. Dest Hint (24)
@memcpy(buffer[offset..][0..24], &self.dest_hint);
offset += 24;
// sequence: u32 (big-endian)
// 3. Src Hint (24)
@memcpy(buffer[offset..][0..24], &self.source_hint);
offset += 24;
// 4. Session ID (16)
@memcpy(buffer[offset..][0..16], &self.session_id);
offset += 16;
// 5. Sequence (4) big-endian
std.mem.writeInt(u32, buffer[offset..][0..4], self.sequence, .big);
offset += 4;
// timestamp: u64 (big-endian)
std.mem.writeInt(u64, buffer[offset..][0..8], self.timestamp, .big);
offset += 8;
// 6. Service Type (2) big-endian
std.mem.writeInt(u16, buffer[offset..][0..2], self.service_type, .big);
offset += 2;
// payload_len: u16 (big-endian)
// 7. Payload Len (2) big-endian
std.mem.writeInt(u16, buffer[offset..][0..2], self.payload_len, .big);
offset += 2;
// entropy_difficulty: u8
// 8. Meta Fields (1 byte each)
buffer[offset] = self.frame_class;
offset += 1;
buffer[offset] = self.version;
offset += 1;
buffer[offset] = self.flags;
offset += 1;
buffer[offset] = self.entropy_difficulty;
offset += 1;
// frame_class: u8
buffer[offset] = self.frame_class;
offset += 1;
// 9. Timestamp (8) big-endian
std.mem.writeInt(u64, buffer[offset..][0..8], self.timestamp, .big);
offset += 8;
std.debug.assert(offset == 72);
std.debug.assert(offset == 88);
}
/// Deserialize header from exactly 72 bytes
pub fn fromBytes(buffer: *const [72]u8) LWFHeader {
/// Deserialize header from exactly 88 bytes
pub fn fromBytes(buffer: *const [88]u8) LWFHeader {
var header: LWFHeader = undefined;
var offset: usize = 0;
// magic
@memcpy(&header.magic, buffer[offset..][0..4]);
offset += 4;
// version
header.version = buffer[offset];
offset += 1;
// flags
header.flags = buffer[offset];
offset += 1;
// service_type
header.service_type = std.mem.readInt(u16, buffer[offset..][0..2], .big);
offset += 2;
// source_hint
@memcpy(&header.source_hint, buffer[offset..][0..24]);
offset += 24;
// dest_hint
@memcpy(&header.dest_hint, buffer[offset..][0..24]);
offset += 24;
@memcpy(&header.source_hint, buffer[offset..][0..24]);
offset += 24;
@memcpy(&header.session_id, buffer[offset..][0..16]);
offset += 16;
// sequence
header.sequence = std.mem.readInt(u32, buffer[offset..][0..4], .big);
offset += 4;
// timestamp
header.timestamp = std.mem.readInt(u64, buffer[offset..][0..8], .big);
offset += 8;
// payload_len
header.service_type = std.mem.readInt(u16, buffer[offset..][0..2], .big);
offset += 2;
header.payload_len = std.mem.readInt(u16, buffer[offset..][0..2], .big);
offset += 2;
// entropy
header.frame_class = buffer[offset];
offset += 1;
header.version = buffer[offset];
offset += 1;
header.flags = buffer[offset];
offset += 1;
header.entropy_difficulty = buffer[offset];
offset += 1;
// frame_class
header.frame_class = buffer[offset];
offset += 1;
header.timestamp = std.mem.readInt(u64, buffer[offset..][0..8], .big);
offset += 8;
return header;
}
@ -207,12 +220,11 @@ pub const LWFHeader = struct {
/// RFC-0000 Section 4.7: LWF Trailer (36 bytes fixed)
pub const LWFTrailer = extern struct {
signature: [32]u8, // Ed25519 signature (or zeros if not signed)
checksum: u32, // CRC32-C, big-endian
signature: [32]u8, // Ed25519 signature
checksum: u32, // CRC32-C
pub const SIZE: usize = 36;
/// Initialize trailer with zeros
pub fn init() LWFTrailer {
return .{
.signature = [_]u8{0} ** 32,
@ -220,34 +232,15 @@ pub const LWFTrailer = extern struct {
};
}
/// Serialize trailer to exactly 36 bytes (no padding)
pub fn toBytes(self: *const LWFTrailer, buffer: *[36]u8) void {
var offset: usize = 0;
// signature: [32]u8
@memcpy(buffer[offset..][0..32], &self.signature);
offset += 32;
// checksum: u32 (already big-endian, copy bytes directly)
@memcpy(buffer[offset..][0..4], std.mem.asBytes(&self.checksum));
// offset += 4;
std.debug.assert(offset + 4 == 36); // Verify we wrote exactly 36 bytes
@memcpy(buffer[0..32], &self.signature);
@memcpy(buffer[32..36], std.mem.asBytes(&self.checksum));
}
/// Deserialize trailer from exactly 36 bytes
pub fn fromBytes(buffer: *const [36]u8) LWFTrailer {
var trailer: LWFTrailer = undefined;
var offset: usize = 0;
// signature: [32]u8
@memcpy(&trailer.signature, buffer[offset..][0..32]);
offset += 32;
// checksum: u32 (already big-endian, copy bytes directly)
@memcpy(std.mem.asBytes(&trailer.checksum), buffer[offset..][0..4]);
// offset += 4;
@memcpy(&trailer.signature, buffer[0..32]);
@memcpy(std.mem.asBytes(&trailer.checksum), buffer[32..36]);
return trailer;
}
};
@ -258,11 +251,9 @@ pub const LWFFrame = struct {
payload: []u8,
trailer: LWFTrailer,
/// Create new frame with allocated payload
pub fn init(allocator: std.mem.Allocator, payload_size: usize) !LWFFrame {
const payload = try allocator.alloc(u8, payload_size);
@memset(payload, 0);
return .{
.header = LWFHeader.init(),
.payload = payload,
@ -270,69 +261,48 @@ pub const LWFFrame = struct {
};
}
/// Free payload memory
pub fn deinit(self: *LWFFrame, allocator: std.mem.Allocator) void {
allocator.free(self.payload);
}
/// Total frame size (header + payload + trailer)
pub fn size(self: *const LWFFrame) usize {
return LWFHeader.SIZE + self.payload.len + LWFTrailer.SIZE;
}
/// Encode frame to bytes (allocates new buffer)
pub fn encode(self: *const LWFFrame, allocator: std.mem.Allocator) ![]u8 {
const total_size = self.size();
var buffer = try allocator.alloc(u8, total_size);
// Serialize header (exactly 72 bytes)
var header_bytes: [72]u8 = undefined;
var header_bytes: [88]u8 = undefined;
self.header.toBytes(&header_bytes);
@memcpy(buffer[0..72], &header_bytes);
@memcpy(buffer[0..88], &header_bytes);
// Copy payload
@memcpy(buffer[72 .. 72 + self.payload.len], self.payload);
@memcpy(buffer[88 .. 88 + self.payload.len], self.payload);
// Serialize trailer (exactly 36 bytes)
var trailer_bytes: [36]u8 = undefined;
self.trailer.toBytes(&trailer_bytes);
const trailer_start = 72 + self.payload.len;
const trailer_start = 88 + self.payload.len;
@memcpy(buffer[trailer_start .. trailer_start + 36], &trailer_bytes);
return buffer;
}
/// Decode frame from bytes (allocates payload)
pub fn decode(allocator: std.mem.Allocator, data: []const u8) !LWFFrame {
// Minimum frame size check
if (data.len < 72 + 36) {
return error.FrameTooSmall;
}
if (data.len < 88 + 36) return error.FrameTooSmall;
// Parse header (first 72 bytes)
var header_bytes: [72]u8 = undefined;
@memcpy(&header_bytes, data[0..72]);
var header_bytes: [88]u8 = undefined;
@memcpy(&header_bytes, data[0..88]);
const header = LWFHeader.fromBytes(&header_bytes);
// Validate header
if (!header.isValid()) {
return error.InvalidHeader;
}
if (!header.isValid()) return error.InvalidHeader;
// Extract payload length
const payload_len = @as(usize, @intCast(header.payload_len));
if (data.len < 88 + payload_len + 36) return error.InvalidPayloadLength;
// Verify frame size matches
if (data.len < 72 + payload_len + 36) {
return error.InvalidPayloadLength;
}
// Allocate and copy payload
const payload = try allocator.alloc(u8, payload_len);
@memcpy(payload, data[72 .. 72 + payload_len]);
@memcpy(payload, data[88 .. 88 + payload_len]);
// Parse trailer
const trailer_start = 72 + payload_len;
const trailer_start = 88 + payload_len;
var trailer_bytes: [36]u8 = undefined;
@memcpy(&trailer_bytes, data[trailer_start .. trailer_start + 36]);
const trailer = LWFTrailer.fromBytes(&trailer_bytes);
@ -344,29 +314,21 @@ pub const LWFFrame = struct {
};
}
/// Calculate CRC32-C checksum of header + payload
pub fn calculateChecksum(self: *const LWFFrame) u32 {
var hasher = std.hash.Crc32.init();
// Hash header (exactly 72 bytes)
var header_bytes: [72]u8 = undefined;
var header_bytes: [88]u8 = undefined;
self.header.toBytes(&header_bytes);
hasher.update(&header_bytes);
// Hash payload
hasher.update(self.payload);
return hasher.final();
}
/// Verify checksum matches
pub fn verifyChecksum(self: *const LWFFrame) bool {
const computed = self.calculateChecksum();
const stored = std.mem.bigToNative(u32, self.trailer.checksum);
return computed == stored;
}
/// Update checksum field in trailer
pub fn updateChecksum(self: *LWFFrame) void {
const checksum = self.calculateChecksum();
self.trailer.checksum = std.mem.nativeToBig(u32, checksum);
@ -379,68 +341,47 @@ pub const LWFFrame = struct {
test "LWFFrame creation" {
const allocator = std.testing.allocator;
var frame = try LWFFrame.init(allocator, 100);
defer frame.deinit(allocator);
try std.testing.expectEqual(@as(usize, 72 + 100 + 36), frame.size());
try std.testing.expectEqual(@as(usize, 88 + 100 + 36), frame.size());
try std.testing.expectEqual(@as(u8, 'L'), frame.header.magic[0]);
try std.testing.expectEqual(@as(u8, 0x01), frame.header.version);
try std.testing.expectEqual(@as(u8, 0x02), frame.header.version);
}
test "LWFFrame encode/decode roundtrip" {
const allocator = std.testing.allocator;
// Create frame
var frame = try LWFFrame.init(allocator, 10);
defer frame.deinit(allocator);
// Populate frame
frame.header.service_type = 0x0A00; // FEED_WORLD_POST
frame.header.service_type = 0x0A00;
frame.header.payload_len = 10;
frame.header.timestamp = 1234567890;
// Set a session ID
frame.header.session_id = [_]u8{0xEE} ** 16;
@memcpy(frame.payload, "HelloWorld");
frame.updateChecksum();
// Encode
const encoded = try frame.encode(allocator);
defer allocator.free(encoded);
try std.testing.expectEqual(@as(usize, 72 + 10 + 36), encoded.len);
try std.testing.expectEqual(@as(usize, 88 + 10 + 36), encoded.len);
// Decode
var decoded = try LWFFrame.decode(allocator, encoded);
defer decoded.deinit(allocator);
// Verify
try std.testing.expectEqualSlices(u8, "HelloWorld", decoded.payload);
try std.testing.expectEqual(frame.header.service_type, decoded.header.service_type);
try std.testing.expectEqual(frame.header.timestamp, decoded.header.timestamp);
}
test "LWFFrame checksum verification" {
const allocator = std.testing.allocator;
var frame = try LWFFrame.init(allocator, 20);
defer frame.deinit(allocator);
@memcpy(frame.payload, "Test payload content");
frame.updateChecksum();
// Should pass
try std.testing.expect(frame.verifyChecksum());
// Corrupt payload
frame.payload[0] = 'X';
// Should fail
try std.testing.expect(!frame.verifyChecksum());
try std.testing.expectEqualSlices(u8, &frame.header.session_id, &decoded.header.session_id);
}
test "FrameClass payload sizes" {
try std.testing.expectEqual(@as(usize, 20), FrameClass.micro.maxPayloadSize());
try std.testing.expectEqual(@as(usize, 404), FrameClass.tiny.maxPayloadSize());
try std.testing.expectEqual(@as(usize, 1242), FrameClass.standard.maxPayloadSize());
try std.testing.expectEqual(@as(usize, 3988), FrameClass.large.maxPayloadSize());
try std.testing.expectEqual(@as(usize, 8892), FrameClass.jumbo.maxPayloadSize());
// Overhead = 88 + 36 = 124
// Micro: 128 - 124 = 4 bytes remaining
try std.testing.expectEqual(@as(usize, 4), FrameClass.micro.maxPayloadSize());
// Mini: 512 - 124 = 388
try std.testing.expectEqual(@as(usize, 388), FrameClass.mini.maxPayloadSize());
// Big: 4096 - 124 = 3972
try std.testing.expectEqual(@as(usize, 3972), FrameClass.big.maxPayloadSize());
}

View File

@ -29,6 +29,12 @@ pub const NextHopHeader = struct {
// We might add HMAC or integrity check here
};
pub const RelayResult = struct {
next_hop: [32]u8,
payload: []u8,
session_id: [16]u8,
};
/// A Relay Packet as it travels on the wire.
/// It effectively contains an encrypted blob that the receiver can decrypt
/// to reveal the NextHopHeader and the inner Payload.
@ -144,7 +150,7 @@ pub const OnionBuilder = struct {
packet: RelayPacket,
receiver_secret_key: [32]u8,
expected_session_id: ?[16]u8,
) !struct { next_hop: [32]u8, payload: []u8, session_id: [16]u8 } {
) !RelayResult {
// 1. Compute Shared Secret from Ephemeral Key
const shared_secret = crypto.dh.X25519.scalarmult(receiver_secret_key, packet.ephemeral_key) catch return error.DecryptionFailed;
const tag_len = crypto.aead.chacha_poly.XChaCha20Poly1305.tag_length;
@ -184,7 +190,7 @@ pub const OnionBuilder = struct {
const payload = try self.allocator.alloc(u8, payload_len);
@memcpy(payload, cleartext[32..]);
return .{
return RelayResult{
.next_hop = next_hop,
.payload = payload,
.session_id = session_id,

View File

@ -29,6 +29,34 @@ pub const MAX_FUTURE_AS: u128 = 3630 * ATTOSECONDS_PER_SECOND;
/// Maximum age for vectors (30 days)
pub const MAX_AGE_AS: u128 = 30 * 24 * 3600 * ATTOSECONDS_PER_SECOND;
// ============================================================================
// STANDARD EPOCHS (RFC-0106)
// ============================================================================
/// Human-Centric Epoch: 1 Day (24 hours) - The diurnal cycle
pub const HUMAN_EPOCH: u128 = 24 * 3600 * ATTOSECONDS_PER_SECOND;
/// Network/Router Epoch: 12 minutes (720 seconds) - Optimal NAT refresh cycle
pub const ROUTER_EPOCH: u128 = 720 * ATTOSECONDS_PER_SECOND;
/// Satellite Epoch: 1 week (7 days)
pub const SATELLITE_EPOCH: u128 = 604_800 * ATTOSECONDS_PER_SECOND;
/// Heartbeat Epoch: 1 minute (60 seconds) - The system pulse
pub const HEARTBEAT_EPOCH: u128 = 60 * ATTOSECONDS_PER_SECOND;
/// Daily Epoch: 24 hours (Alias for Human Epoch)
pub const DAILY_EPOCH: u128 = HUMAN_EPOCH;
/// Millennium Epoch: 1000 years
pub const MILLENNIUM_EPOCH: u128 = 1000 * 365 * DAILY_EPOCH;
/// Collider Epoch: 1 attosecond
pub const COLLIDER_EPOCH: u128 = 1;
/// Nano Epoch: 1 nanosecond
pub const NANO_EPOCH: u128 = 1_000_000_000;
// ============================================================================
// ANCHOR EPOCH
// ============================================================================

View File

@ -86,10 +86,10 @@ pub const UTCP = struct {
// 2. Entropy Fast-Path (DoS Defense)
if (header.flags & lwf.LWFFlags.HAS_ENTROPY != 0) {
if (data.len < lwf.LWFHeader.SIZE + 58) {
if (data.len < lwf.LWFHeader.SIZE + 77) {
return error.StampMissing;
}
const stamp_bytes = data[lwf.LWFHeader.SIZE..][0..58];
const stamp_bytes = data[lwf.LWFHeader.SIZE..][0..77];
const stamp = entropy.EntropyStamp.fromBytes(@ptrCast(stamp_bytes));
// Perform light validation (no Argon2 recompute yet, just hash bits)
@ -183,10 +183,11 @@ test "UTCP socket DoS defense: invalid entropy stamp" {
defer frame.deinit(allocator);
frame.header.flags |= lwf.LWFFlags.HAS_ENTROPY;
frame.header.entropy_difficulty = 20; // High difficulty
@memset(frame.payload[0..58], 0);
@memset(frame.payload[0..77], 0);
// Set valid timestamp (fresh)
// Offset: Hash(32) + Nonce(16) + Salt(16) + Diff(1) + Mem(2) = 67
const now = @as(u64, @intCast(std.time.timestamp()));
std.mem.writeInt(u64, frame.payload[35..43], now, .big);
std.mem.writeInt(u64, frame.payload[67..75], now, .big);
// 2. Send
try client.sendFrame(server_addr, &frame, allocator);

View File

@ -57,6 +57,12 @@ pub const EntropyStamp = struct {
/// Argon2id hash output (32 bytes)
hash: [HASH_LEN]u8,
/// Nonce used to solve the puzzle (16 bytes)
nonce: [16]u8,
/// Salt used for hashing (16 bytes)
salt: [16]u8,
/// Difficulty: leading zero bits required (8-20 recommended)
difficulty: u8,
@ -95,6 +101,10 @@ pub const EntropyStamp = struct {
var nonce: [16]u8 = undefined;
crypto.random.bytes(&nonce);
// Generate fixed salt for this mining attempt
var salt: [SALT_LEN]u8 = undefined;
crypto.random.bytes(&salt);
const timestamp = @as(u64, @intCast(std.time.timestamp()));
var iterations: u64 = 0;
@ -108,15 +118,17 @@ pub const EntropyStamp = struct {
if (carry == 0) break;
}
// Compute stamp hash
// Compute stamp hash using stored salt
var hash: [HASH_LEN]u8 = undefined;
computeStampHash(payload_hash, &nonce, timestamp, service_type, &hash);
computeStampHash(payload_hash, &nonce, &salt, timestamp, service_type, &hash);
// Check difficulty (count leading zeros in hash)
const zeros = countLeadingZeros(&hash);
if (zeros >= difficulty) {
return EntropyStamp{
.hash = hash,
.nonce = nonce,
.salt = salt,
.difficulty = difficulty,
.memory_cost_kb = ARGON2_MEMORY_KB,
.timestamp_sec = timestamp,
@ -162,7 +174,7 @@ pub const EntropyStamp = struct {
return error.StampExpired;
}
if (age < -60) { // 60 second clock skew allowance
if (age < -60) { // 60 second clock skew allowance
return error.StampFromFuture;
}
@ -172,25 +184,39 @@ pub const EntropyStamp = struct {
}
// Recompute hash and verify
// Note: We can't recover the nonce from the stamp, so we accept the hash as-is
// In production, the nonce should be stored in the stamp for verification
const zeros = countLeadingZeros(&self.hash);
if (zeros < self.difficulty) {
// Use the nonce/salt from the stamp to reproduce the work
var computed_hash: [HASH_LEN]u8 = undefined;
computeStampHash(payload_hash, &self.nonce, &self.salt, self.timestamp_sec, self.service_type, &computed_hash);
// Check if computed hash matches stored hash
if (!std.mem.eql(u8, &computed_hash, &self.hash)) {
return error.HashInvalid;
}
_ = payload_hash; // Unused: for future verification
// Check if stored hash meets difficulty
const zeros = countLeadingZeros(&self.hash);
if (zeros < self.difficulty) {
return error.InsufficientDifficulty;
}
}
/// Serialize stamp to bytes (for LWF payload inclusion)
pub fn toBytes(self: *const EntropyStamp) [58]u8 {
var buf: [58]u8 = undefined;
/// Serialize stamp to bytes (77 bytes)
pub fn toBytes(self: *const EntropyStamp) [77]u8 {
var buf: [77]u8 = undefined;
var offset: usize = 0;
// hash: 32 bytes
@memcpy(buf[offset .. offset + 32], &self.hash);
offset += 32;
// nonce: 16 bytes
@memcpy(buf[offset .. offset + 16], &self.nonce);
offset += 16;
// salt: 16 bytes
@memcpy(buf[offset .. offset + 16], &self.salt);
offset += 16;
// difficulty: 1 byte
buf[offset] = self.difficulty;
offset += 1;
@ -211,13 +237,21 @@ pub const EntropyStamp = struct {
}
/// Deserialize stamp from bytes
pub fn fromBytes(data: *const [58]u8) EntropyStamp {
pub fn fromBytes(data: *const [77]u8) EntropyStamp {
var offset: usize = 0;
var hash: [HASH_LEN]u8 = undefined;
@memcpy(&hash, data[offset .. offset + 32]);
offset += 32;
var nonce: [16]u8 = undefined;
@memcpy(&nonce, data[offset .. offset + 16]);
offset += 16;
var salt: [16]u8 = undefined;
@memcpy(&salt, data[offset .. offset + 16]);
offset += 16;
const difficulty = data[offset];
offset += 1;
@ -231,6 +265,8 @@ pub const EntropyStamp = struct {
return .{
.hash = hash,
.nonce = nonce,
.salt = salt,
.difficulty = difficulty,
.memory_cost_kb = memory_cost_kb,
.timestamp_sec = timestamp_sec,
@ -248,6 +284,7 @@ pub const EntropyStamp = struct {
fn computeStampHash(
payload_hash: *const [32]u8,
nonce: *const [16]u8,
salt: *const [16]u8,
timestamp: u64,
service_type: u16,
output: *[HASH_LEN]u8,
@ -267,11 +304,7 @@ fn computeStampHash(
std.mem.writeInt(u16, input[offset .. offset + 2][0..2], service_type, .big);
// Generate random salt
var salt: [SALT_LEN]u8 = undefined;
crypto.random.bytes(&salt);
// Call Argon2id
// Call Argon2id with PROVIDED salt
const result = argon2id_hash_raw(
ARGON2_TIME_COST,
ARGON2_MEMORY_KB,

133
l2-membrane/policy.zig Normal file
View File

@ -0,0 +1,133 @@
//! L2 Membrane - Policy Engine
//!
//! " The Membrane decides what enters the Cell. "
//!
//! Responsibilities:
//! 1. Packet Classification (Service Type Analysis)
//! 2. Traffic Shaping (Priority Queues)
//! 3. Reputation Enforcement (Source Verification)
//! 4. DoS Mitigation (Entropy Verification)
//!
//! Implementation: High-performance Zig (Hardware-close).
const std = @import("std");
const lwf = @import("lwf"); // L0 Transport (Wire Frame)
pub const PolicyDecision = enum {
drop, // Silently discard
reject, // Send NACK/Error
forward, // Normal processing
prioritize, // Jump the queue
throttle, // Delay processing
};
pub const PolicyReason = enum {
none,
invalid_header,
insufficient_entropy,
reputation_too_low,
congestion_control,
policy_allow,
service_priority,
service_bulk,
};
pub const PolicyEngine = struct {
allocator: std.mem.Allocator,
// Configuration
min_entropy_difficulty: u8,
require_encryption: bool,
pub fn init(allocator: std.mem.Allocator) PolicyEngine {
return .{
.allocator = allocator,
.min_entropy_difficulty = 8, // Baseline
.require_encryption = true,
};
}
/// fastDecide: O(1) decision based purely on Header
/// Used by Switch/Router for "Fast Drop"
pub fn decide(self: *const PolicyEngine, header: *const lwf.LWFHeader) PolicyDecision {
// 1. Basic Validity
if (!header.isValid()) return .drop;
// 2. Entropy Check (DoS Defense)
// If flag is set, actual verification happens later (expensive).
// Here we check if the CLAIMED difficulty meets our minimum.
if (header.entropy_difficulty < self.min_entropy_difficulty) {
// Exceptions: Microframes / Trusted flows might allow 0
if (header.frame_class != @intFromEnum(lwf.FrameClass.micro)) {
return .drop;
}
}
// 3. Service-Based Classification
switch (header.service_type) {
// Streaming (High Priority)
lwf.LWFHeader.ServiceType.STREAM_AUDIO, lwf.LWFHeader.ServiceType.STREAM_VIDEO, lwf.LWFHeader.ServiceType.STREAM_DATA => {
return .prioritize;
},
// Swarm (Low Priority)
lwf.LWFHeader.ServiceType.SWARM_MANIFEST, lwf.LWFHeader.ServiceType.SWARM_HAVE, lwf.LWFHeader.ServiceType.SWARM_REQUEST, lwf.LWFHeader.ServiceType.SWARM_BLOCK => {
return .throttle; // Default to Bulk behavior
},
// Feed Social (Mandatory Encryption)
0x0A00...0x0AFF => {
if (header.flags & lwf.LWFFlags.ENCRYPTED == 0) {
return .drop; // Policy Violation
}
return .forward;
},
// Default
else => return .forward,
}
}
/// assessReputation: O(log N) lookup in QVL
/// Returns decision based on Source Hint
pub fn assessReputation(self: *PolicyEngine, source_hint: [24]u8) PolicyDecision {
_ = self;
_ = source_hint;
// TODO: Interface with QVL Trust Graph
// Lookup source_hint -> Reputation Score (0.0 - 1.0)
// If Score < 0.2 -> .drop
// If Score > 0.8 -> .prioritize
return .forward; // Mock
}
};
test "PolicyEngine: Classification rules" {
const allocator = std.testing.allocator;
const engine = PolicyEngine.init(allocator);
var header = lwf.LWFHeader.init();
// Case 1: Stream -> Prioritize
header.service_type = lwf.LWFHeader.ServiceType.STREAM_VIDEO;
header.entropy_difficulty = 10;
try std.testing.expectEqual(PolicyDecision.prioritize, engine.decide(&header));
// Case 2: Swarm -> Throttle
header.service_type = lwf.LWFHeader.ServiceType.SWARM_BLOCK;
try std.testing.expectEqual(PolicyDecision.throttle, engine.decide(&header));
// Case 3: Low Entropy -> Drop
header.service_type = lwf.LWFHeader.ServiceType.DATA_TRANSPORT;
header.entropy_difficulty = 0;
// But wait, FrameClass default is Standard. min_entropy is 8.
try std.testing.expectEqual(PolicyDecision.drop, engine.decide(&header));
// Case 4: Microframe (High Entropy cost exempt?)
header.frame_class = @intFromEnum(lwf.FrameClass.micro);
header.flags = 0; // No entropy
// decide checks difficulty < min. 0 < 8.
// Exception logic for micro?
// Code says: if micro, OK.
try std.testing.expectEqual(PolicyDecision.forward, engine.decide(&header)); // Forward (Default)
}