feat(relay): Implement RelayPacket and onion wrapping logic
This commit is contained in:
parent
e2f9a8c38d
commit
43156fc033
|
|
@ -0,0 +1,22 @@
|
||||||
|
FROM archlinux:latest
|
||||||
|
|
||||||
|
RUN pacman -Syu --noconfirm \
|
||||||
|
sqlite \
|
||||||
|
gcc-libs \
|
||||||
|
bash \
|
||||||
|
procps
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy built binary from host
|
||||||
|
COPY capsule-core/zig-out/bin/capsule /usr/bin/capsule
|
||||||
|
|
||||||
|
# Copy duckdb from local context
|
||||||
|
COPY libs/libduckdb.so /usr/lib/libduckdb.so
|
||||||
|
|
||||||
|
# Expose ports
|
||||||
|
EXPOSE 9000/udp
|
||||||
|
EXPOSE 5353/udp
|
||||||
|
|
||||||
|
# Entrypoint
|
||||||
|
CMD ["capsule", "start"]
|
||||||
|
|
@ -0,0 +1,35 @@
|
||||||
|
FROM cgr.dev/chainguard/wolfi-base:latest
|
||||||
|
|
||||||
|
RUN apk update && apk add \
|
||||||
|
zig \
|
||||||
|
build-base \
|
||||||
|
git \
|
||||||
|
sqlite-dev \
|
||||||
|
bash \
|
||||||
|
curl \
|
||||||
|
unzip
|
||||||
|
|
||||||
|
# Install DuckDB
|
||||||
|
RUN curl -L -o libduckdb.zip https://github.com/duckdb/duckdb/releases/download/v1.1.3/libduckdb-linux-amd64.zip && \
|
||||||
|
unzip libduckdb.zip -d /usr/local && \
|
||||||
|
rm libduckdb.zip && \
|
||||||
|
ln -s /usr/local/libduckdb.so /usr/lib/libduckdb.so && \
|
||||||
|
cp /usr/local/duckdb.h /usr/include/duckdb.h
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy SDK
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build Capsule Core
|
||||||
|
WORKDIR /app/capsule-core
|
||||||
|
RUN zig build
|
||||||
|
|
||||||
|
# Expose ports
|
||||||
|
# 9000: UTCP/P2P
|
||||||
|
# 5353: mDNS
|
||||||
|
EXPOSE 9000/udp
|
||||||
|
EXPOSE 5353/udp
|
||||||
|
|
||||||
|
# Entrypoint
|
||||||
|
CMD ["./zig-out/bin/capsule", "start"]
|
||||||
45
build.zig
45
build.zig
|
|
@ -51,6 +51,25 @@ pub fn build(b: *std.Build) void {
|
||||||
l0_service_mod.addImport("utcp", utcp_mod);
|
l0_service_mod.addImport("utcp", utcp_mod);
|
||||||
l0_service_mod.addImport("opq", opq_mod);
|
l0_service_mod.addImport("opq", opq_mod);
|
||||||
|
|
||||||
|
const dht_mod = b.createModule(.{
|
||||||
|
.root_source_file = b.path("l0-transport/dht.zig"),
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
|
|
||||||
|
const gateway_mod = b.createModule(.{
|
||||||
|
.root_source_file = b.path("l0-transport/gateway.zig"),
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
|
gateway_mod.addImport("dht", dht_mod);
|
||||||
|
|
||||||
|
const relay_mod = b.createModule(.{
|
||||||
|
.root_source_file = b.path("l0-transport/relay.zig"),
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
|
|
||||||
// ========================================================================
|
// ========================================================================
|
||||||
// Crypto: SHA3/SHAKE & FIPS 202
|
// Crypto: SHA3/SHAKE & FIPS 202
|
||||||
// ========================================================================
|
// ========================================================================
|
||||||
|
|
@ -235,6 +254,24 @@ pub fn build(b: *std.Build) void {
|
||||||
});
|
});
|
||||||
const run_l0_service_tests = b.addRunArtifact(l0_service_tests);
|
const run_l0_service_tests = b.addRunArtifact(l0_service_tests);
|
||||||
|
|
||||||
|
// DHT tests
|
||||||
|
const dht_tests = b.addTest(.{
|
||||||
|
.root_module = dht_mod,
|
||||||
|
});
|
||||||
|
const run_dht_tests = b.addRunArtifact(dht_tests);
|
||||||
|
|
||||||
|
// Gateway tests
|
||||||
|
const gateway_tests = b.addTest(.{
|
||||||
|
.root_module = gateway_mod,
|
||||||
|
});
|
||||||
|
const run_gateway_tests = b.addRunArtifact(gateway_tests);
|
||||||
|
|
||||||
|
// Relay tests
|
||||||
|
const relay_tests = b.addTest(.{
|
||||||
|
.root_module = relay_mod,
|
||||||
|
});
|
||||||
|
const run_relay_tests = b.addRunArtifact(relay_tests);
|
||||||
|
|
||||||
// L1 SoulKey tests (Phase 2B)
|
// L1 SoulKey tests (Phase 2B)
|
||||||
const l1_soulkey_tests = b.addTest(.{
|
const l1_soulkey_tests = b.addTest(.{
|
||||||
.root_module = l1_soulkey_mod,
|
.root_module = l1_soulkey_mod,
|
||||||
|
|
@ -320,6 +357,7 @@ pub fn build(b: *std.Build) void {
|
||||||
l1_vector_mod.addImport("time", time_mod);
|
l1_vector_mod.addImport("time", time_mod);
|
||||||
l1_vector_mod.addImport("pqxdh", l1_pqxdh_mod);
|
l1_vector_mod.addImport("pqxdh", l1_pqxdh_mod);
|
||||||
l1_vector_mod.addImport("trust_graph", l1_trust_graph_mod);
|
l1_vector_mod.addImport("trust_graph", l1_trust_graph_mod);
|
||||||
|
l1_vector_mod.addImport("soulkey", l1_soulkey_mod);
|
||||||
|
|
||||||
const l1_vector_tests = b.addTest(.{
|
const l1_vector_tests = b.addTest(.{
|
||||||
.root_module = l1_vector_mod,
|
.root_module = l1_vector_mod,
|
||||||
|
|
@ -377,6 +415,9 @@ pub fn build(b: *std.Build) void {
|
||||||
test_step.dependOn(&run_utcp_tests.step);
|
test_step.dependOn(&run_utcp_tests.step);
|
||||||
test_step.dependOn(&run_opq_tests.step);
|
test_step.dependOn(&run_opq_tests.step);
|
||||||
test_step.dependOn(&run_l0_service_tests.step);
|
test_step.dependOn(&run_l0_service_tests.step);
|
||||||
|
test_step.dependOn(&run_dht_tests.step);
|
||||||
|
test_step.dependOn(&run_gateway_tests.step);
|
||||||
|
test_step.dependOn(&run_relay_tests.step);
|
||||||
test_step.dependOn(&run_l1_qvl_tests.step);
|
test_step.dependOn(&run_l1_qvl_tests.step);
|
||||||
test_step.dependOn(&run_l1_qvl_ffi_tests.step);
|
test_step.dependOn(&run_l1_qvl_ffi_tests.step);
|
||||||
|
|
||||||
|
|
@ -442,6 +483,10 @@ pub fn build(b: *std.Build) void {
|
||||||
// Link L1 (Identity)
|
// Link L1 (Identity)
|
||||||
capsule_mod.addImport("l1_identity", l1_mod);
|
capsule_mod.addImport("l1_identity", l1_mod);
|
||||||
capsule_mod.addImport("qvl", l1_qvl_mod);
|
capsule_mod.addImport("qvl", l1_qvl_mod);
|
||||||
|
capsule_mod.addImport("dht", dht_mod);
|
||||||
|
capsule_mod.addImport("gateway", gateway_mod);
|
||||||
|
capsule_mod.addImport("relay", relay_mod);
|
||||||
|
capsule_mod.addImport("quarantine", l0_quarantine_mod);
|
||||||
|
|
||||||
const capsule_exe = b.addExecutable(.{
|
const capsule_exe = b.addExecutable(.{
|
||||||
.name = "capsule",
|
.name = "capsule",
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
install
|
||||||
|
+- install capsule
|
||||||
|
+- compile exe capsule Debug native 1 errors
|
||||||
|
capsule-core/src/tui/app.zig:5:23: error: no module named 'vaxis' available within module 'root'
|
||||||
|
const vaxis = @import("vaxis");
|
||||||
|
^~~~~~~
|
||||||
|
referenced by:
|
||||||
|
run: capsule-core/src/tui/app.zig:64:18
|
||||||
|
main: capsule-core/src/main.zig:132:24
|
||||||
|
4 reference(s) hidden; use '-freference-trace=6' to see all references
|
||||||
|
error: the following command failed with 1 compilation errors:
|
||||||
|
/usr/bin/zig build-exe -lsqlite3 -lduckdb -ODebug --dep l0_transport=lwf --dep utcp --dep l1_identity=crypto --dep qvl --dep dht --dep gateway --dep quarantine -Mroot=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/capsule-core/src/main.zig -ODebug -Mlwf=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/lwf.zig -ODebug --dep ipc --dep lwf --dep entropy --dep quarantine -Mutcp=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/utcp/socket.zig -ODebug --dep shake --dep fips202_bridge --dep pqxdh --dep slash -Mcrypto=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/crypto.zig -ODebug --dep trust_graph --dep time -Mqvl=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/qvl.zig -ODebug -Mdht=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/dht.zig -ODebug --dep dht -Mgateway=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/gateway.zig -ODebug -Mquarantine=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/quarantine.zig -ODebug -Mipc=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/ipc/client.zig -cflags -std=c99 -O3 -fPIC -DHAVE_PTHREAD -- /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/argon2.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/core.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/blake2/blake2b.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/thread.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/encoding.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/opt.c -ODebug -I /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/include -Mentropy=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/entropy.zig -ODebug -Mshake=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/src/crypto/shake.zig -ODebug -Mfips202_bridge=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/src/crypto/fips202_bridge.zig -needed-loqs -ODebug -I /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/liboqs/install/include -L /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/liboqs/install/lib -Mpqxdh=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/pqxdh.zig -ODebug -Mslash=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/slash.zig -ODebug --dep crypto -Mtrust_graph=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/trust_graph.zig -ODebug -Mtime=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/time.zig -lc --cache-dir .zig-cache --global-cache-dir /home/markus/.cache/zig --name capsule --zig-lib-dir /usr/lib/zig/ --listen=-
|
||||||
|
|
||||||
|
Build Summary: 6/9 steps succeeded; 1 failed
|
||||||
|
install transitive failure
|
||||||
|
+- install capsule transitive failure
|
||||||
|
+- compile exe capsule Debug native 1 errors
|
||||||
|
|
||||||
|
error: the following build command failed with exit code 1:
|
||||||
|
.zig-cache/o/b5937c8bf2970c610fb30d2e05efe33c/build /usr/bin/zig /usr/lib/zig /home/markus/zWork/_Git/Libertaria/libertaria-sdk .zig-cache /home/markus/.cache/zig --seed 0x8d98622f -Z2d0f55519cb30a7a
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
install
|
||||||
|
+- install capsule
|
||||||
|
+- compile exe capsule Debug native 1 errors
|
||||||
|
capsule-core/src/node.zig:22:32: error: no module named 'quarantine' available within module 'root'
|
||||||
|
const quarantine_mod = @import("quarantine");
|
||||||
|
^~~~~~~~~~~~
|
||||||
|
referenced by:
|
||||||
|
node.CapsuleNode: capsule-core/src/node.zig:79:19
|
||||||
|
CapsuleNode: capsule-core/src/node.zig:62:25
|
||||||
|
6 reference(s) hidden; use '-freference-trace=8' to see all references
|
||||||
|
error: the following command failed with 1 compilation errors:
|
||||||
|
/usr/bin/zig build-exe -lsqlite3 -lduckdb -ODebug --dep l0_transport=lwf --dep utcp --dep l1_identity=crypto --dep qvl --dep dht -Mroot=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/capsule-core/src/main.zig -ODebug -Mlwf=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/lwf.zig -ODebug --dep ipc --dep lwf --dep entropy --dep quarantine -Mutcp=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/utcp/socket.zig -ODebug --dep shake --dep fips202_bridge --dep pqxdh --dep slash -Mcrypto=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/crypto.zig -ODebug --dep trust_graph --dep time -Mqvl=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/qvl.zig -ODebug -Mdht=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/dht.zig -ODebug -Mipc=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/ipc/client.zig -cflags -std=c99 -O3 -fPIC -DHAVE_PTHREAD -- /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/argon2.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/core.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/blake2/blake2b.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/thread.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/encoding.c /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/src/opt.c -ODebug -I /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/argon2/include -Mentropy=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/entropy.zig -ODebug -Mquarantine=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/quarantine.zig -ODebug -Mshake=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/src/crypto/shake.zig -ODebug -Mfips202_bridge=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/src/crypto/fips202_bridge.zig -needed-loqs -ODebug -I /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/liboqs/install/include -L /home/markus/zWork/_Git/Libertaria/libertaria-sdk/vendor/liboqs/install/lib -Mpqxdh=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/pqxdh.zig -ODebug -Mslash=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/slash.zig -ODebug --dep crypto -Mtrust_graph=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/trust_graph.zig -ODebug -Mtime=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/time.zig -lc --cache-dir .zig-cache --global-cache-dir /home/markus/.cache/zig --name capsule --zig-lib-dir /usr/lib/zig/ --listen=-
|
||||||
|
|
||||||
|
Build Summary: 6/9 steps succeeded; 1 failed
|
||||||
|
install transitive failure
|
||||||
|
+- install capsule transitive failure
|
||||||
|
+- compile exe capsule Debug native 1 errors
|
||||||
|
|
||||||
|
error: the following build command failed with exit code 1:
|
||||||
|
.zig-cache/o/adaac25b0555a4724eacbe0f6ad253fd/build /usr/bin/zig /usr/lib/zig /home/markus/zWork/_Git/Libertaria/libertaria-sdk .zig-cache /home/markus/.cache/zig --seed 0xbbd073e3 -Z6bc5376addff02a3 -j1
|
||||||
|
|
@ -73,6 +73,12 @@ pub fn build(b: *std.Build) void {
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const vaxis_dep = b.dependency("vaxis", .{
|
||||||
|
.target = target,
|
||||||
|
.optimize = optimize,
|
||||||
|
});
|
||||||
|
const vaxis_mod = vaxis_dep.module("vaxis");
|
||||||
|
|
||||||
const exe_mod = b.createModule(.{
|
const exe_mod = b.createModule(.{
|
||||||
.root_source_file = b.path("src/main.zig"),
|
.root_source_file = b.path("src/main.zig"),
|
||||||
.target = target,
|
.target = target,
|
||||||
|
|
@ -89,6 +95,7 @@ pub fn build(b: *std.Build) void {
|
||||||
exe.root_module.addImport("l1_identity", crypto); // Name mismatch? Step 4902 says l1_identity=crypto
|
exe.root_module.addImport("l1_identity", crypto); // Name mismatch? Step 4902 says l1_identity=crypto
|
||||||
exe.root_module.addImport("qvl", qvl);
|
exe.root_module.addImport("qvl", qvl);
|
||||||
exe.root_module.addImport("quarantine", quarantine);
|
exe.root_module.addImport("quarantine", quarantine);
|
||||||
|
exe.root_module.addImport("vaxis", vaxis_mod);
|
||||||
|
|
||||||
exe.linkSystemLibrary("sqlite3");
|
exe.linkSystemLibrary("sqlite3");
|
||||||
exe.linkSystemLibrary("duckdb");
|
exe.linkSystemLibrary("duckdb");
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
.{
|
||||||
|
.name = .capsule_core,
|
||||||
|
.version = "0.15.2",
|
||||||
|
.dependencies = .{
|
||||||
|
.vaxis = .{
|
||||||
|
.url = "https://github.com/rockorager/libvaxis/archive/refs/heads/main.tar.gz",
|
||||||
|
.hash = "vaxis-0.5.1-BWNV_Bw_CQAIVNh1ekGVzbip25CYBQ_J3kgABnYGFnI4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.paths = .{""},
|
||||||
|
.fingerprint = 0x8a316e2234f72ed1,
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
install
|
||||||
|
+- install capsule
|
||||||
|
+- compile exe capsule Debug native 1 errors
|
||||||
|
/usr/lib/zig/std/Io/Writer.zig:1200:9: error: ambiguous format string; specify {f} to call format method, or {any} to skip it
|
||||||
|
@compileError("ambiguous format string; specify {f} to call format method, or {any} to skip it");
|
||||||
|
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
referenced by:
|
||||||
|
print__anon_34066: /usr/lib/zig/std/Io/Writer.zig:700:25
|
||||||
|
bufPrint__anon_28107: /usr/lib/zig/std/fmt.zig:614:12
|
||||||
|
11 reference(s) hidden; use '-freference-trace=13' to see all references
|
||||||
|
error: the following command failed with 1 compilation errors:
|
||||||
|
/usr/bin/zig build-exe -lsqlite3 -lduckdb -ODebug --dep l0_transport=lwf --dep utcp --dep l1_identity --dep qvl --dep quarantine --dep vaxis -Mroot=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/capsule-core/src/main.zig --dep ipc --dep entropy --dep quarantine -Mlwf=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/lwf.zig --dep shake --dep fips202_bridge --dep pqxdh --dep slash --dep ipc --dep lwf --dep entropy -Mutcp=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/utcp/socket.zig --dep trust_graph --dep time -Ml1_identity=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/crypto.zig --dep time -Mqvl=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/qvl.zig -Mquarantine=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/quarantine.zig -ODebug --dep zigimg --dep uucode -Mvaxis=/home/markus/.cache/zig/p/vaxis-0.5.1-BWNV_Bw_CQAIVNh1ekGVzbip25CYBQ_J3kgABnYGFnI4/src/main.zig -Mipc=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/ipc/client.zig -Mentropy=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/entropy.zig -Mshake=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/src/crypto/shake.zig -Mfips202_bridge=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/src/crypto/fips202_bridge.zig -Mpqxdh=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/pqxdh.zig --dep crypto -Mslash=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/slash.zig -Mtrust_graph=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/trust_graph.zig -Mtime=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l0-transport/time.zig -ODebug --dep zigimg -Mzigimg=/home/markus/.cache/zig/p/zigimg-0.1.0-8_eo2vUZFgAAtN1c6dAO5DdqL0d4cEWHtn6iR5ucZJti/zigimg.zig -ODebug --dep types.zig --dep config.zig --dep types.x.zig --dep tables --dep get.zig -Muucode=/home/markus/.cache/zig/p/uucode-0.1.0-ZZjBPj96QADXyt5sqwBJUnhaDYs_qBeeKijZvlRa0eqM/src/root.zig -Mcrypto=/home/markus/zWork/_Git/Libertaria/libertaria-sdk/l1-identity/crypto.zig -ODebug --dep config.zig --dep get.zig -Mtypes.zig=/home/markus/.cache/zig/p/uucode-0.1.0-ZZjBPj96QADXyt5sqwBJUnhaDYs_qBeeKijZvlRa0eqM/src/types.zig -ODebug --dep types.zig -Mconfig.zig=/home/markus/.cache/zig/p/uucode-0.1.0-ZZjBPj96QADXyt5sqwBJUnhaDYs_qBeeKijZvlRa0eqM/src/config.zig -ODebug --dep config.x.zig -Mtypes.x.zig=/home/markus/.cache/zig/p/uucode-0.1.0-ZZjBPj96QADXyt5sqwBJUnhaDYs_qBeeKijZvlRa0eqM/src/x/types.x.zig -ODebug --dep types.zig --dep types.x.zig --dep config.zig --dep build_config -Mtables=.zig-cache/o/f992ecbd8ddf0ce62acb8ad5f358027c/tables.zig -ODebug --dep types.zig --dep tables -Mget.zig=/home/markus/.cache/zig/p/uucode-0.1.0-ZZjBPj96QADXyt5sqwBJUnhaDYs_qBeeKijZvlRa0eqM/src/get.zig -ODebug --dep types.x.zig --dep types.zig --dep config.zig -Mconfig.x.zig=/home/markus/.cache/zig/p/uucode-0.1.0-ZZjBPj96QADXyt5sqwBJUnhaDYs_qBeeKijZvlRa0eqM/src/x/config.x.zig --dep types.zig --dep config.zig --dep types.x.zig --dep config.x.zig -Mbuild_config=.zig-cache/o/fd18b32249ff398bc4015853405e77cf/build_config2.zig -lc --cache-dir .zig-cache --global-cache-dir /home/markus/.cache/zig --name capsule --zig-lib-dir /usr/lib/zig/ --listen=-
|
||||||
|
|
||||||
|
Build Summary: 3/6 steps succeeded; 1 failed
|
||||||
|
install transitive failure
|
||||||
|
+- install capsule transitive failure
|
||||||
|
+- compile exe capsule Debug native 1 errors
|
||||||
|
|
||||||
|
error: the following build command failed with exit code 1:
|
||||||
|
.zig-cache/o/4b65275f5eb170ee27bba10d107c990c/build /usr/bin/zig /usr/lib/zig /home/markus/zWork/_Git/Libertaria/libertaria-sdk/capsule-core .zig-cache /home/markus/.cache/zig --seed 0xb9d460a9 -Z4967dc0931849eb3
|
||||||
|
|
@ -21,6 +21,9 @@ pub const NodeConfig = struct {
|
||||||
/// Logging level
|
/// Logging level
|
||||||
log_level: std.log.Level = .info,
|
log_level: std.log.Level = .info,
|
||||||
|
|
||||||
|
/// Enable Gateway Service (Layer 1 Coordination)
|
||||||
|
gateway_enabled: bool = false,
|
||||||
|
|
||||||
/// Free allocated memory (strings, slices)
|
/// Free allocated memory (strings, slices)
|
||||||
pub fn deinit(self: *NodeConfig, allocator: std.mem.Allocator) void {
|
pub fn deinit(self: *NodeConfig, allocator: std.mem.Allocator) void {
|
||||||
allocator.free(self.data_dir);
|
allocator.free(self.data_dir);
|
||||||
|
|
@ -39,6 +42,7 @@ pub const NodeConfig = struct {
|
||||||
.control_socket_path = try allocator.dupe(u8, "data/capsule.sock"),
|
.control_socket_path = try allocator.dupe(u8, "data/capsule.sock"),
|
||||||
.identity_key_path = try allocator.dupe(u8, "data/identity.key"),
|
.identity_key_path = try allocator.dupe(u8, "data/identity.key"),
|
||||||
.port = 8710,
|
.port = 8710,
|
||||||
|
.gateway_enabled = false,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -96,6 +100,7 @@ pub const NodeConfig = struct {
|
||||||
.port = cfg.port,
|
.port = cfg.port,
|
||||||
.bootstrap_peers = try peers.toOwnedSlice(),
|
.bootstrap_peers = try peers.toOwnedSlice(),
|
||||||
.log_level = cfg.log_level,
|
.log_level = cfg.log_level,
|
||||||
|
.gateway_enabled = cfg.gateway_enabled,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,8 @@ pub const Command = union(enum) {
|
||||||
Airlock: AirlockArgs,
|
Airlock: AirlockArgs,
|
||||||
/// Shutdown the daemon (admin only)
|
/// Shutdown the daemon (admin only)
|
||||||
Shutdown: void,
|
Shutdown: void,
|
||||||
|
/// Get Topology for Graph Visualization
|
||||||
|
Topology: void,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const SlashArgs = struct {
|
pub const SlashArgs = struct {
|
||||||
|
|
@ -87,6 +89,8 @@ pub const Response = union(enum) {
|
||||||
IdentityInfo: IdentityInfo,
|
IdentityInfo: IdentityInfo,
|
||||||
/// Lockdown status
|
/// Lockdown status
|
||||||
LockdownStatus: LockdownInfo,
|
LockdownStatus: LockdownInfo,
|
||||||
|
/// Topology info
|
||||||
|
TopologyInfo: TopologyInfo,
|
||||||
/// QVL query results
|
/// QVL query results
|
||||||
QvlResult: QvlMetrics,
|
QvlResult: QvlMetrics,
|
||||||
/// Slash Log results
|
/// Slash Log results
|
||||||
|
|
@ -142,6 +146,24 @@ pub const LockdownInfo = struct {
|
||||||
locked_since: i64,
|
locked_since: i64,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const TopologyInfo = struct {
|
||||||
|
nodes: []const GraphNode,
|
||||||
|
edges: []const GraphEdge,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const GraphNode = struct {
|
||||||
|
id: []const u8, // short did or node id
|
||||||
|
trust_score: f64,
|
||||||
|
status: []const u8, // "active", "slashed", "ok"
|
||||||
|
role: []const u8, // "self", "peer"
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const GraphEdge = struct {
|
||||||
|
source: []const u8,
|
||||||
|
target: []const u8,
|
||||||
|
weight: f64,
|
||||||
|
};
|
||||||
|
|
||||||
pub const SlashEvent = struct {
|
pub const SlashEvent = struct {
|
||||||
timestamp: u64,
|
timestamp: u64,
|
||||||
target_did: []const u8,
|
target_did: []const u8,
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,14 @@ pub const FederationMessage = union(enum) {
|
||||||
dht_nodes: struct {
|
dht_nodes: struct {
|
||||||
nodes: []const DhtNode,
|
nodes: []const DhtNode,
|
||||||
},
|
},
|
||||||
|
// Gateway Coordination
|
||||||
|
hole_punch_request: struct {
|
||||||
|
target_id: [32]u8,
|
||||||
|
},
|
||||||
|
hole_punch_notify: struct {
|
||||||
|
peer_id: [32]u8,
|
||||||
|
address: net.Address,
|
||||||
|
},
|
||||||
|
|
||||||
pub fn encode(self: FederationMessage, writer: anytype) !void {
|
pub fn encode(self: FederationMessage, writer: anytype) !void {
|
||||||
try writer.writeByte(@intFromEnum(self));
|
try writer.writeByte(@intFromEnum(self));
|
||||||
|
|
@ -80,6 +88,19 @@ pub const FederationMessage = union(enum) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
.hole_punch_request => |r| {
|
||||||
|
try writer.writeAll(&r.target_id);
|
||||||
|
},
|
||||||
|
.hole_punch_notify => |n| {
|
||||||
|
try writer.writeAll(&n.peer_id);
|
||||||
|
// Serialize address (IPv4 only for now)
|
||||||
|
if (n.address.any.family == std.posix.AF.INET) {
|
||||||
|
try writer.writeAll(&std.mem.toBytes(n.address.in.sa.addr));
|
||||||
|
try writer.writeInt(u16, n.address.getPort(), .big);
|
||||||
|
} else {
|
||||||
|
return error.UnsupportedAddressFamily;
|
||||||
|
}
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -131,6 +152,22 @@ pub const FederationMessage = union(enum) {
|
||||||
}
|
}
|
||||||
return .{ .dht_nodes = .{ .nodes = nodes } };
|
return .{ .dht_nodes = .{ .nodes = nodes } };
|
||||||
},
|
},
|
||||||
|
.hole_punch_request => .{
|
||||||
|
.hole_punch_request = .{
|
||||||
|
.target_id = try reader.readBytesNoEof(32),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
.hole_punch_notify => {
|
||||||
|
const id = try reader.readBytesNoEof(32);
|
||||||
|
const addr_u32 = try reader.readInt(u32, @import("builtin").target.cpu.arch.endian());
|
||||||
|
const port = try reader.readInt(u16, .big);
|
||||||
|
return .{
|
||||||
|
.hole_punch_notify = .{
|
||||||
|
.peer_id = id,
|
||||||
|
.address = net.Address.initIp4(std.mem.toBytes(addr_u32), port),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,7 @@ const node_mod = @import("node.zig");
|
||||||
const config_mod = @import("config.zig");
|
const config_mod = @import("config.zig");
|
||||||
|
|
||||||
const control_mod = @import("control.zig");
|
const control_mod = @import("control.zig");
|
||||||
|
const tui_app = @import("tui/app.zig");
|
||||||
|
|
||||||
pub fn main() !void {
|
pub fn main() !void {
|
||||||
// Setup allocator
|
// Setup allocator
|
||||||
|
|
@ -16,98 +17,119 @@ pub fn main() !void {
|
||||||
const args = try std.process.argsAlloc(allocator);
|
const args = try std.process.argsAlloc(allocator);
|
||||||
defer std.process.argsFree(allocator, args);
|
defer std.process.argsFree(allocator, args);
|
||||||
|
|
||||||
if (args.len < 2) {
|
var data_dir_override: ?[]const u8 = null;
|
||||||
|
var port_override: ?u16 = null;
|
||||||
|
|
||||||
|
// Parse global options and find command index
|
||||||
|
var cmd_idx: usize = 1;
|
||||||
|
var i: usize = 1;
|
||||||
|
while (i < args.len) : (i += 1) {
|
||||||
|
if (std.mem.eql(u8, args[i], "--data-dir") and i + 1 < args.len) {
|
||||||
|
data_dir_override = args[i + 1];
|
||||||
|
i += 1;
|
||||||
|
} else if (std.mem.eql(u8, args[i], "--port") and i + 1 < args.len) {
|
||||||
|
port_override = std.fmt.parseInt(u16, args[i + 1], 10) catch null;
|
||||||
|
i += 1;
|
||||||
|
} else {
|
||||||
|
cmd_idx = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cmd_idx >= args.len) {
|
||||||
printUsage();
|
printUsage();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const command = args[1];
|
const command = args[cmd_idx];
|
||||||
|
|
||||||
if (std.mem.eql(u8, command, "start")) {
|
if (std.mem.eql(u8, command, "start")) {
|
||||||
try runDaemon(allocator);
|
// start already parses its own but we can unify
|
||||||
|
try runDaemon(allocator, port_override, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "status")) {
|
} else if (std.mem.eql(u8, command, "status")) {
|
||||||
try runCliCommand(allocator, .Status);
|
try runCliCommand(allocator, .Status, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "peers")) {
|
} else if (std.mem.eql(u8, command, "peers")) {
|
||||||
try runCliCommand(allocator, .Peers);
|
try runCliCommand(allocator, .Peers, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "stop")) {
|
} else if (std.mem.eql(u8, command, "stop")) {
|
||||||
try runCliCommand(allocator, .Shutdown);
|
try runCliCommand(allocator, .Shutdown, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "version")) {
|
} else if (std.mem.eql(u8, command, "version")) {
|
||||||
std.debug.print("Libertaria Capsule v0.1.0 (Shield)\n", .{});
|
std.debug.print("Libertaria Capsule v0.1.0 (Shield)\n", .{});
|
||||||
} else if (std.mem.eql(u8, command, "slash")) {
|
} else if (std.mem.eql(u8, command, "slash")) {
|
||||||
if (args.len < 5) {
|
if (args.len < cmd_idx + 4) {
|
||||||
std.debug.print("Usage: capsule slash <target_did> <reason> <severity>\n", .{});
|
std.debug.print("Usage: capsule slash <target_did> <reason> <severity>\n", .{});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const target_did = args[2];
|
const target_did = args[cmd_idx + 1];
|
||||||
const reason = args[3];
|
const reason = args[cmd_idx + 2];
|
||||||
const severity = args[4];
|
const severity = args[cmd_idx + 3];
|
||||||
|
|
||||||
// Validation could happen here or in node
|
|
||||||
try runCliCommand(allocator, .{ .Slash = .{
|
try runCliCommand(allocator, .{ .Slash = .{
|
||||||
.target_did = try allocator.dupe(u8, target_did),
|
.target_did = try allocator.dupe(u8, target_did),
|
||||||
.reason = try allocator.dupe(u8, reason),
|
.reason = try allocator.dupe(u8, reason),
|
||||||
.severity = try allocator.dupe(u8, severity),
|
.severity = try allocator.dupe(u8, severity),
|
||||||
.duration = 0,
|
.duration = 0,
|
||||||
} });
|
} }, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "slash-log")) {
|
} else if (std.mem.eql(u8, command, "slash-log")) {
|
||||||
var limit: usize = 50;
|
var limit: usize = 50;
|
||||||
if (args.len >= 3) {
|
if (args.len >= cmd_idx + 2) {
|
||||||
limit = std.fmt.parseInt(usize, args[2], 10) catch 50;
|
limit = std.fmt.parseInt(usize, args[cmd_idx + 1], 10) catch 50;
|
||||||
}
|
}
|
||||||
try runCliCommand(allocator, .{ .SlashLog = .{ .limit = limit } });
|
try runCliCommand(allocator, .{ .SlashLog = .{ .limit = limit } }, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "ban")) {
|
} else if (std.mem.eql(u8, command, "ban")) {
|
||||||
if (args.len < 4) {
|
if (args.len < cmd_idx + 3) {
|
||||||
std.debug.print("Usage: capsule ban <did> <reason>\n", .{});
|
std.debug.print("Usage: capsule ban <did> <reason>\n", .{});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const target_did = args[2];
|
const target_did = args[cmd_idx + 1];
|
||||||
const reason = args[3];
|
const reason = args[cmd_idx + 2];
|
||||||
try runCliCommand(allocator, .{ .Ban = .{
|
try runCliCommand(allocator, .{ .Ban = .{
|
||||||
.target_did = try allocator.dupe(u8, target_did),
|
.target_did = try allocator.dupe(u8, target_did),
|
||||||
.reason = try allocator.dupe(u8, reason),
|
.reason = try allocator.dupe(u8, reason),
|
||||||
} });
|
} }, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "unban")) {
|
} else if (std.mem.eql(u8, command, "unban")) {
|
||||||
if (args.len < 3) {
|
if (args.len < cmd_idx + 2) {
|
||||||
std.debug.print("Usage: capsule unban <did>\n", .{});
|
std.debug.print("Usage: capsule unban <did>\n", .{});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const target_did = args[2];
|
const target_did = args[cmd_idx + 1];
|
||||||
try runCliCommand(allocator, .{ .Unban = .{
|
try runCliCommand(allocator, .{ .Unban = .{
|
||||||
.target_did = try allocator.dupe(u8, target_did),
|
.target_did = try allocator.dupe(u8, target_did),
|
||||||
} });
|
} }, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "trust")) {
|
} else if (std.mem.eql(u8, command, "trust")) {
|
||||||
if (args.len < 4) {
|
if (args.len < cmd_idx + 3) {
|
||||||
std.debug.print("Usage: capsule trust <did> <score>\n", .{});
|
std.debug.print("Usage: capsule trust <did> <score>\n", .{});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const target_did = args[2];
|
const target_did = args[cmd_idx + 1];
|
||||||
const score = std.fmt.parseFloat(f64, args[3]) catch {
|
const score = std.fmt.parseFloat(f64, args[cmd_idx + 2]) catch {
|
||||||
std.debug.print("Error: Invalid score '{s}', must be a number\n", .{args[3]});
|
std.debug.print("Error: Invalid score '{s}', must be a number\n", .{args[cmd_idx + 2]});
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
try runCliCommand(allocator, .{ .Trust = .{
|
try runCliCommand(allocator, .{ .Trust = .{
|
||||||
.target_did = try allocator.dupe(u8, target_did),
|
.target_did = try allocator.dupe(u8, target_did),
|
||||||
.score = score,
|
.score = score,
|
||||||
} });
|
} }, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "sessions")) {
|
} else if (std.mem.eql(u8, command, "sessions")) {
|
||||||
try runCliCommand(allocator, .Sessions);
|
try runCliCommand(allocator, .Sessions, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "dht")) {
|
} else if (std.mem.eql(u8, command, "dht")) {
|
||||||
try runCliCommand(allocator, .Dht);
|
try runCliCommand(allocator, .Dht, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "qvl-query")) {
|
} else if (std.mem.eql(u8, command, "qvl-query")) {
|
||||||
var target_did: ?[]const u8 = null;
|
var target_did: ?[]const u8 = null;
|
||||||
if (args.len >= 3) {
|
if (args.len >= cmd_idx + 2) {
|
||||||
target_did = try allocator.dupe(u8, args[2]);
|
target_did = try allocator.dupe(u8, args[cmd_idx + 1]);
|
||||||
}
|
}
|
||||||
try runCliCommand(allocator, .{ .QvlQuery = .{ .target_did = target_did } });
|
try runCliCommand(allocator, .{ .QvlQuery = .{ .target_did = target_did } }, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "identity")) {
|
} else if (std.mem.eql(u8, command, "identity")) {
|
||||||
try runCliCommand(allocator, .Identity);
|
try runCliCommand(allocator, .Identity, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "lockdown")) {
|
} else if (std.mem.eql(u8, command, "lockdown")) {
|
||||||
try runCliCommand(allocator, .Lockdown);
|
try runCliCommand(allocator, .Lockdown, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "unlock")) {
|
} else if (std.mem.eql(u8, command, "unlock")) {
|
||||||
try runCliCommand(allocator, .Unlock);
|
try runCliCommand(allocator, .Unlock, data_dir_override);
|
||||||
} else if (std.mem.eql(u8, command, "airlock")) {
|
} else if (std.mem.eql(u8, command, "airlock")) {
|
||||||
const state = if (args.len > 2) args[2] else "open";
|
const state = if (args.len > cmd_idx + 1) args[cmd_idx + 1] else "open";
|
||||||
try runCliCommand(allocator, .{ .Airlock = .{ .state = state } });
|
try runCliCommand(allocator, .{ .Airlock = .{ .state = state } }, data_dir_override);
|
||||||
|
} else if (std.mem.eql(u8, command, "monitor")) {
|
||||||
|
try tui_app.run(allocator, "dummy_socket_path");
|
||||||
} else {
|
} else {
|
||||||
printUsage();
|
printUsage();
|
||||||
}
|
}
|
||||||
|
|
@ -134,21 +156,42 @@ fn printUsage() void {
|
||||||
\\ identity Show node identity
|
\\ identity Show node identity
|
||||||
\\ lockdown Emergency network lockdown
|
\\ lockdown Emergency network lockdown
|
||||||
\\ unlock Resume normal operation
|
\\ unlock Resume normal operation
|
||||||
\\ airlock <open|restricted|closed> Set airlock mode
|
\\ airlock <open|restricted|closed> Set airlock mode
|
||||||
|
\\ monitor Launch TUI Dashboard
|
||||||
\\
|
\\
|
||||||
, .{});
|
, .{});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn runDaemon(allocator: std.mem.Allocator) !void {
|
fn runDaemon(allocator: std.mem.Allocator, port_override: ?u16, data_dir_override: ?[]const u8) !void {
|
||||||
// Load Config
|
// Load Config
|
||||||
// Check for config.json, otherwise use default
|
// Check for config.json, otherwise use default
|
||||||
const config_path = "config.json";
|
const config_path = "config.json";
|
||||||
var config = config_mod.NodeConfig.loadFromJsonFile(allocator, config_path) catch |err| {
|
var config = config_mod.NodeConfig.loadFromJsonFile(allocator, config_path) catch |err| {
|
||||||
|
if (err == error.FileNotFound) {
|
||||||
|
std.log.info("Config missing, using defaults", .{});
|
||||||
|
var cfg = try config_mod.NodeConfig.default(allocator);
|
||||||
|
if (port_override) |p| cfg.port = p;
|
||||||
|
if (data_dir_override) |d| {
|
||||||
|
allocator.free(cfg.data_dir);
|
||||||
|
cfg.data_dir = try allocator.dupe(u8, d);
|
||||||
|
}
|
||||||
|
const node = try node_mod.CapsuleNode.init(allocator, cfg);
|
||||||
|
defer node.deinit();
|
||||||
|
try node.start();
|
||||||
|
return;
|
||||||
|
}
|
||||||
std.log.err("Failed to load configuration: {}", .{err});
|
std.log.err("Failed to load configuration: {}", .{err});
|
||||||
return err;
|
return err;
|
||||||
};
|
};
|
||||||
defer config.deinit(allocator);
|
defer config.deinit(allocator);
|
||||||
|
|
||||||
|
// Apply Overrides
|
||||||
|
if (port_override) |p| config.port = p;
|
||||||
|
if (data_dir_override) |d| {
|
||||||
|
allocator.free(config.data_dir);
|
||||||
|
config.data_dir = try allocator.dupe(u8, d);
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize Node
|
// Initialize Node
|
||||||
const node = try node_mod.CapsuleNode.init(allocator, config);
|
const node = try node_mod.CapsuleNode.init(allocator, config);
|
||||||
defer node.deinit();
|
defer node.deinit();
|
||||||
|
|
@ -157,7 +200,7 @@ fn runDaemon(allocator: std.mem.Allocator) !void {
|
||||||
try node.start();
|
try node.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn runCliCommand(allocator: std.mem.Allocator, cmd: control_mod.Command) !void {
|
fn runCliCommand(allocator: std.mem.Allocator, cmd: control_mod.Command, data_dir_override: ?[]const u8) !void {
|
||||||
// Load config to find socket path
|
// Load config to find socket path
|
||||||
const config_path = "config.json";
|
const config_path = "config.json";
|
||||||
var config = config_mod.NodeConfig.loadFromJsonFile(allocator, config_path) catch {
|
var config = config_mod.NodeConfig.loadFromJsonFile(allocator, config_path) catch {
|
||||||
|
|
@ -166,7 +209,12 @@ fn runCliCommand(allocator: std.mem.Allocator, cmd: control_mod.Command) !void {
|
||||||
};
|
};
|
||||||
defer config.deinit(allocator);
|
defer config.deinit(allocator);
|
||||||
|
|
||||||
const socket_path = config.control_socket_path;
|
const data_dir = data_dir_override orelse config.data_dir;
|
||||||
|
const socket_path = if (std.fs.path.isAbsolute(config.control_socket_path))
|
||||||
|
try allocator.dupe(u8, config.control_socket_path)
|
||||||
|
else
|
||||||
|
try std.fs.path.join(allocator, &[_][]const u8{ data_dir, std.fs.path.basename(config.control_socket_path) });
|
||||||
|
defer allocator.free(socket_path);
|
||||||
|
|
||||||
var stream = std.net.connectUnixSocket(socket_path) catch |err| {
|
var stream = std.net.connectUnixSocket(socket_path) catch |err| {
|
||||||
std.log.err("Failed to connect to daemon at {s}: {}. Is it running?", .{ socket_path, err });
|
std.log.err("Failed to connect to daemon at {s}: {}. Is it running?", .{ socket_path, err });
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,8 @@ const qvl = @import("qvl");
|
||||||
const discovery_mod = @import("discovery.zig");
|
const discovery_mod = @import("discovery.zig");
|
||||||
const peer_table_mod = @import("peer_table.zig");
|
const peer_table_mod = @import("peer_table.zig");
|
||||||
const fed = @import("federation.zig");
|
const fed = @import("federation.zig");
|
||||||
const dht_mod = @import("dht.zig");
|
const dht_mod = @import("dht");
|
||||||
|
const gateway_mod = @import("gateway");
|
||||||
const storage_mod = @import("storage.zig");
|
const storage_mod = @import("storage.zig");
|
||||||
const qvl_store_mod = @import("qvl_store.zig");
|
const qvl_store_mod = @import("qvl_store.zig");
|
||||||
const control_mod = @import("control.zig");
|
const control_mod = @import("control.zig");
|
||||||
|
|
@ -70,6 +71,7 @@ pub const CapsuleNode = struct {
|
||||||
peer_table: PeerTable,
|
peer_table: PeerTable,
|
||||||
sessions: std.HashMap(std.net.Address, PeerSession, AddressContext, std.hash_map.default_max_load_percentage),
|
sessions: std.HashMap(std.net.Address, PeerSession, AddressContext, std.hash_map.default_max_load_percentage),
|
||||||
dht: DhtService,
|
dht: DhtService,
|
||||||
|
gateway: ?gateway_mod.Gateway,
|
||||||
storage: *StorageService,
|
storage: *StorageService,
|
||||||
qvl_store: *QvlStore,
|
qvl_store: *QvlStore,
|
||||||
control_socket: std.net.Server,
|
control_socket: std.net.Server,
|
||||||
|
|
@ -104,39 +106,41 @@ pub const CapsuleNode = struct {
|
||||||
std.mem.copyForwards(u8, node_id[0..4], "NODE");
|
std.mem.copyForwards(u8, node_id[0..4], "NODE");
|
||||||
|
|
||||||
// Initialize Storage
|
// Initialize Storage
|
||||||
var db_path_buf: [256]u8 = undefined;
|
const db_path = try std.fs.path.join(allocator, &[_][]const u8{ config.data_dir, "capsule.db" });
|
||||||
const db_path = try std.fmt.bufPrint(&db_path_buf, "{s}/capsule.db", .{config.data_dir});
|
defer allocator.free(db_path);
|
||||||
const storage = try StorageService.init(allocator, db_path);
|
const storage = try StorageService.init(allocator, db_path);
|
||||||
|
|
||||||
const qvl_db_path = try std.fmt.allocPrint(allocator, "{s}/qvl.db", .{config.data_dir});
|
const qvl_db_path = try std.fs.path.join(allocator, &[_][]const u8{ config.data_dir, "qvl.db" });
|
||||||
defer allocator.free(qvl_db_path);
|
defer allocator.free(qvl_db_path);
|
||||||
const qvl_store = try QvlStore.init(allocator, qvl_db_path);
|
const qvl_store = try QvlStore.init(allocator, qvl_db_path);
|
||||||
|
|
||||||
// Initialize Control Socket
|
|
||||||
const socket_path = config.control_socket_path;
|
|
||||||
// Unlink if exists (check logic in start, or here? start binds.)
|
|
||||||
|
|
||||||
// Load or Generate Identity
|
// Load or Generate Identity
|
||||||
var seed: [32]u8 = undefined;
|
var seed: [32]u8 = undefined;
|
||||||
var identity: SoulKey = undefined;
|
var identity: SoulKey = undefined;
|
||||||
|
|
||||||
|
const identity_path = if (std.fs.path.isAbsolute(config.identity_key_path))
|
||||||
|
try allocator.dupe(u8, config.identity_key_path)
|
||||||
|
else
|
||||||
|
try std.fs.path.join(allocator, &[_][]const u8{ config.data_dir, std.fs.path.basename(config.identity_key_path) });
|
||||||
|
defer allocator.free(identity_path);
|
||||||
|
|
||||||
// Try to open existing key file
|
// Try to open existing key file
|
||||||
if (std.fs.cwd().openFile(config.identity_key_path, .{})) |file| {
|
if (std.fs.cwd().openFile(identity_path, .{})) |file| {
|
||||||
defer file.close();
|
defer file.close();
|
||||||
const bytes_read = try file.readAll(&seed);
|
const bytes_read = try file.readAll(&seed);
|
||||||
if (bytes_read != 32) {
|
if (bytes_read != 32) {
|
||||||
std.log.err("Identity: Invalid key file size at {s}", .{config.identity_key_path});
|
std.log.err("Identity: Invalid key file size at {s}", .{identity_path});
|
||||||
return error.InvalidKeyFile;
|
return error.InvalidKeyFile;
|
||||||
}
|
}
|
||||||
std.log.info("Identity: Loaded key from {s}", .{config.identity_key_path});
|
std.log.info("Identity: Loaded key from {s}", .{identity_path});
|
||||||
identity = try SoulKey.fromSeed(&seed);
|
identity = try SoulKey.fromSeed(&seed);
|
||||||
} else |err| {
|
} else |err| {
|
||||||
if (err == error.FileNotFound) {
|
if (err == error.FileNotFound) {
|
||||||
std.log.info("Identity: No key found at {s}, generating new...", .{config.identity_key_path});
|
std.log.info("Identity: No key found at {s}, generating new...", .{identity_path});
|
||||||
std.crypto.random.bytes(&seed);
|
std.crypto.random.bytes(&seed);
|
||||||
|
|
||||||
// Save to file
|
// Save to file
|
||||||
const kf = try std.fs.cwd().createFile(config.identity_key_path, .{ .read = true });
|
const kf = try std.fs.cwd().createFile(identity_path, .{ .read = true });
|
||||||
defer kf.close();
|
defer kf.close();
|
||||||
try kf.writeAll(&seed);
|
try kf.writeAll(&seed);
|
||||||
|
|
||||||
|
|
@ -151,6 +155,12 @@ pub const CapsuleNode = struct {
|
||||||
@memcpy(&self.dht.routing_table.self_id, &identity.did);
|
@memcpy(&self.dht.routing_table.self_id, &identity.did);
|
||||||
|
|
||||||
// Bind Control Socket
|
// Bind Control Socket
|
||||||
|
const socket_path = if (std.fs.path.isAbsolute(config.control_socket_path))
|
||||||
|
try allocator.dupe(u8, config.control_socket_path)
|
||||||
|
else
|
||||||
|
try std.fs.path.join(allocator, &[_][]const u8{ config.data_dir, std.fs.path.basename(config.control_socket_path) });
|
||||||
|
defer allocator.free(socket_path);
|
||||||
|
|
||||||
std.fs.cwd().deleteFile(socket_path) catch {};
|
std.fs.cwd().deleteFile(socket_path) catch {};
|
||||||
const uds_address = try std.net.Address.initUnix(socket_path);
|
const uds_address = try std.net.Address.initUnix(socket_path);
|
||||||
|
|
||||||
|
|
@ -165,7 +175,8 @@ pub const CapsuleNode = struct {
|
||||||
.discovery = discovery,
|
.discovery = discovery,
|
||||||
.peer_table = PeerTable.init(allocator),
|
.peer_table = PeerTable.init(allocator),
|
||||||
.sessions = std.HashMap(std.net.Address, PeerSession, AddressContext, 80).init(allocator),
|
.sessions = std.HashMap(std.net.Address, PeerSession, AddressContext, 80).init(allocator),
|
||||||
.dht = DhtService.init(allocator, node_id),
|
.dht = undefined, // Initialized below
|
||||||
|
.gateway = null, // Initialized below
|
||||||
.storage = storage,
|
.storage = storage,
|
||||||
.qvl_store = qvl_store,
|
.qvl_store = qvl_store,
|
||||||
.control_socket = control_socket,
|
.control_socket = control_socket,
|
||||||
|
|
@ -173,6 +184,14 @@ pub const CapsuleNode = struct {
|
||||||
.running = false,
|
.running = false,
|
||||||
.global_state = quarantine_mod.GlobalState{},
|
.global_state = quarantine_mod.GlobalState{},
|
||||||
};
|
};
|
||||||
|
// Initialize DHT in place
|
||||||
|
self.dht = DhtService.init(allocator, node_id);
|
||||||
|
|
||||||
|
// Initialize Gateway (now safe to reference self.dht)
|
||||||
|
if (config.gateway_enabled) {
|
||||||
|
self.gateway = gateway_mod.Gateway.init(allocator, &self.dht);
|
||||||
|
std.log.info("Gateway Service: ENABLED", .{});
|
||||||
|
}
|
||||||
self.dht_timer = std.time.milliTimestamp();
|
self.dht_timer = std.time.milliTimestamp();
|
||||||
self.qvl_timer = std.time.milliTimestamp();
|
self.qvl_timer = std.time.milliTimestamp();
|
||||||
|
|
||||||
|
|
@ -192,6 +211,7 @@ pub const CapsuleNode = struct {
|
||||||
self.discovery.deinit();
|
self.discovery.deinit();
|
||||||
self.peer_table.deinit();
|
self.peer_table.deinit();
|
||||||
self.sessions.deinit();
|
self.sessions.deinit();
|
||||||
|
if (self.gateway) |*gw| gw.deinit();
|
||||||
self.dht.deinit();
|
self.dht.deinit();
|
||||||
self.storage.deinit();
|
self.storage.deinit();
|
||||||
self.qvl_store.deinit();
|
self.qvl_store.deinit();
|
||||||
|
|
@ -260,7 +280,14 @@ pub const CapsuleNode = struct {
|
||||||
break :blk @as(usize, 0);
|
break :blk @as(usize, 0);
|
||||||
};
|
};
|
||||||
if (bytes > 0) {
|
if (bytes > 0) {
|
||||||
try self.discovery.handlePacket(&self.peer_table, m_buf[0..bytes], std.net.Address{ .any = src_addr });
|
const addr = std.net.Address{ .any = src_addr };
|
||||||
|
// Filter self-discovery
|
||||||
|
if (addr.getPort() == self.config.port) {
|
||||||
|
// Check local IPs if necessary, but port check is usually enough on same LAN for different nodes
|
||||||
|
// For local multi-port test, we allow it if port is different.
|
||||||
|
// But mDNS on host network might show our own announcement.
|
||||||
|
}
|
||||||
|
try self.discovery.handlePacket(&self.peer_table, m_buf[0..bytes], addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -438,6 +465,18 @@ pub const CapsuleNode = struct {
|
||||||
}
|
}
|
||||||
self.allocator.free(n.nodes);
|
self.allocator.free(n.nodes);
|
||||||
},
|
},
|
||||||
|
.hole_punch_request => |req| {
|
||||||
|
if (self.gateway) |*gw| {
|
||||||
|
_ = gw;
|
||||||
|
std.log.info("Gateway: Received Hole Punch Request from {f} for {any}", .{ sender, req.target_id });
|
||||||
|
} else {
|
||||||
|
std.log.debug("Node: Ignoring Hole Punch Request (Not a Gateway)", .{});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
.hole_punch_notify => |notif| {
|
||||||
|
std.log.info("Node: Received Hole Punch Notification for peer {any} at {f}", .{ notif.peer_id, notif.address });
|
||||||
|
try self.connectToPeer(notif.address, [_]u8{0} ** 8);
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -542,6 +581,10 @@ pub const CapsuleNode = struct {
|
||||||
std.log.info("AIRLOCK: State set to {s}", .{args.state});
|
std.log.info("AIRLOCK: State set to {s}", .{args.state});
|
||||||
response = .{ .LockdownStatus = try self.getLockdownStatus() };
|
response = .{ .LockdownStatus = try self.getLockdownStatus() };
|
||||||
},
|
},
|
||||||
|
.Topology => {
|
||||||
|
const topo = try self.getTopology();
|
||||||
|
response = .{ .TopologyInfo = topo };
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send Response - buffer to ArrayList then write to stream
|
// Send Response - buffer to ArrayList then write to stream
|
||||||
|
|
@ -555,36 +598,33 @@ pub const CapsuleNode = struct {
|
||||||
try conn.stream.writeAll(resp_buf.items);
|
try conn.stream.writeAll(resp_buf.items);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processSlashCommand(_: *CapsuleNode, args: control_mod.SlashArgs) !bool {
|
fn processSlashCommand(self: *CapsuleNode, args: control_mod.SlashArgs) !bool {
|
||||||
std.log.warn("Slash: Initiated against {s} for {s}", .{ args.target_did, args.reason });
|
std.log.warn("Slash: Initiated against {s} for {s}", .{ args.target_did, args.reason });
|
||||||
|
|
||||||
const timestamp = std.time.timestamp();
|
const timestamp: u64 = @intCast(std.time.timestamp());
|
||||||
|
const evidence_hash = "EVIDENCE_HASH_STUB"; // TODO: Real evidence
|
||||||
|
|
||||||
// TODO: Import slash types properly when module structure is fixed
|
// Log to persistent QVL Store (DuckDB)
|
||||||
const SlashReason = enum { BetrayalCycle, Other };
|
try self.qvl_store.logSlashEvent(timestamp, args.target_did, args.reason, args.severity, evidence_hash);
|
||||||
const SlashSeverity = enum { Quarantine, Ban };
|
|
||||||
|
|
||||||
const reason_enum = std.meta.stringToEnum(SlashReason, args.reason) orelse .BetrayalCycle;
|
|
||||||
const severity_enum = std.meta.stringToEnum(SlashSeverity, args.severity) orelse .Quarantine;
|
|
||||||
|
|
||||||
const evidence_hash: [32]u8 = [_]u8{0} ** 32;
|
|
||||||
|
|
||||||
_ = timestamp; // TODO: Use timestamp when logging is enabled
|
|
||||||
_ = args.target_did; // TODO: Use when logging is enabled
|
|
||||||
|
|
||||||
// TODO: Re-enable when QvlStore.logSlashEvent is implemented
|
|
||||||
_ = reason_enum;
|
|
||||||
_ = severity_enum;
|
|
||||||
_ = evidence_hash;
|
|
||||||
//try self.qvl_store.logSlashEvent(@intCast(timestamp), args.target_did, reason_enum, severity_enum, evidence_hash);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn getSlashLog(self: *CapsuleNode, limit: usize) ![]control_mod.SlashEvent {
|
fn getSlashLog(self: *CapsuleNode, limit: usize) ![]control_mod.SlashEvent {
|
||||||
_ = self;
|
const stored = try self.qvl_store.getSlashEvents(limit);
|
||||||
_ = limit;
|
defer self.allocator.free(stored); // Free the slice, keep content
|
||||||
//TODO: Implement getSlashEvents when QvlStore API is stable
|
|
||||||
return &[_]control_mod.SlashEvent{};
|
var result = try self.allocator.alloc(control_mod.SlashEvent, stored.len);
|
||||||
|
for (stored, 0..) |ev, i| {
|
||||||
|
result[i] = .{
|
||||||
|
.timestamp = ev.timestamp,
|
||||||
|
.target_did = ev.target_did,
|
||||||
|
.reason = ev.reason,
|
||||||
|
.severity = ev.severity,
|
||||||
|
.evidence_hash = ev.evidence_hash,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn processBan(self: *CapsuleNode, args: control_mod.BanArgs) !bool {
|
fn processBan(self: *CapsuleNode, args: control_mod.BanArgs) !bool {
|
||||||
|
|
@ -649,7 +689,7 @@ pub const CapsuleNode = struct {
|
||||||
return control_mod.DhtInfo{
|
return control_mod.DhtInfo{
|
||||||
.local_node_id = try self.allocator.dupe(u8, &node_id_hex),
|
.local_node_id = try self.allocator.dupe(u8, &node_id_hex),
|
||||||
.routing_table_size = self.dht.routing_table.buckets.len,
|
.routing_table_size = self.dht.routing_table.buckets.len,
|
||||||
.known_nodes = 0, // TODO: Compute actual node count when RoutingTable API is stable
|
.known_nodes = self.dht.getKnownNodeCount(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -678,15 +718,57 @@ pub const CapsuleNode = struct {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn getTopology(self: *CapsuleNode) !control_mod.TopologyInfo {
|
||||||
|
// Collect nodes: Self + Peers
|
||||||
|
const peer_count = self.peer_table.peers.count();
|
||||||
|
var nodes = try self.allocator.alloc(control_mod.GraphNode, peer_count + 1);
|
||||||
|
var edges = std.ArrayList(control_mod.GraphEdge){};
|
||||||
|
|
||||||
|
// 1. Add Self
|
||||||
|
const my_did = std.fmt.bytesToHex(&self.identity.did, .lower);
|
||||||
|
nodes[0] = .{
|
||||||
|
.id = try self.allocator.dupe(u8, my_did[0..8]), // Short DID for display
|
||||||
|
.trust_score = 1.0,
|
||||||
|
.status = "active",
|
||||||
|
.role = "self",
|
||||||
|
};
|
||||||
|
|
||||||
|
// 2. Add Peers
|
||||||
|
var i: usize = 1;
|
||||||
|
var it = self.peer_table.peers.iterator();
|
||||||
|
while (it.next()) |entry| : (i += 1) {
|
||||||
|
const peer_did = std.fmt.bytesToHex(&entry.key_ptr.*, .lower);
|
||||||
|
const peer_info = entry.value_ptr;
|
||||||
|
|
||||||
|
nodes[i] = .{
|
||||||
|
.id = try self.allocator.dupe(u8, peer_did[0..8]),
|
||||||
|
.trust_score = peer_info.trust_score,
|
||||||
|
.status = if (peer_info.trust_score < 0.2) "slashed" else "active", // Mock logic
|
||||||
|
.role = "peer",
|
||||||
|
};
|
||||||
|
|
||||||
|
// Edge from Self to Peer
|
||||||
|
try edges.append(self.allocator, .{
|
||||||
|
.source = nodes[0].id,
|
||||||
|
.target = nodes[i].id,
|
||||||
|
.weight = peer_info.trust_score,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return control_mod.TopologyInfo{
|
||||||
|
.nodes = nodes,
|
||||||
|
.edges = try edges.toOwnedSlice(self.allocator),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
fn getQvlMetrics(self: *CapsuleNode, args: control_mod.QvlQueryArgs) !control_mod.QvlMetrics {
|
fn getQvlMetrics(self: *CapsuleNode, args: control_mod.QvlQueryArgs) !control_mod.QvlMetrics {
|
||||||
_ = args; // TODO: Use target_did for specific queries
|
_ = args; // TODO: Use target_did for specific queries
|
||||||
_ = self;
|
|
||||||
|
|
||||||
// TODO: Get actual metrics from the risk graph when API is stable
|
// TODO: Get actual metrics from the risk graph when API is stable
|
||||||
// For now, return placeholder values
|
// For now, return placeholder values
|
||||||
return control_mod.QvlMetrics{
|
return control_mod.QvlMetrics{
|
||||||
.total_vertices = 0,
|
.total_vertices = self.risk_graph.nodeCount(),
|
||||||
.total_edges = 0,
|
.total_edges = self.risk_graph.edgeCount(),
|
||||||
.trust_rank = 0.0,
|
.trust_rank = 0.0,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,6 +22,14 @@ const qvl_types = @import("qvl").types;
|
||||||
pub const NodeId = qvl_types.NodeId;
|
pub const NodeId = qvl_types.NodeId;
|
||||||
pub const RiskEdge = qvl_types.RiskEdge;
|
pub const RiskEdge = qvl_types.RiskEdge;
|
||||||
|
|
||||||
|
pub const StoredSlashEvent = struct {
|
||||||
|
timestamp: u64,
|
||||||
|
target_did: []const u8,
|
||||||
|
reason: []const u8,
|
||||||
|
severity: []const u8,
|
||||||
|
evidence_hash: []const u8,
|
||||||
|
};
|
||||||
|
|
||||||
pub const QvlStore = struct {
|
pub const QvlStore = struct {
|
||||||
db: c.duckdb_database = null,
|
db: c.duckdb_database = null,
|
||||||
conn: c.duckdb_connection = null,
|
conn: c.duckdb_connection = null,
|
||||||
|
|
@ -179,4 +187,63 @@ pub const QvlStore = struct {
|
||||||
}
|
}
|
||||||
c.duckdb_destroy_result(&res);
|
c.duckdb_destroy_result(&res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn logSlashEvent(
|
||||||
|
self: *QvlStore,
|
||||||
|
timestamp: u64,
|
||||||
|
target_did: []const u8,
|
||||||
|
reason: []const u8,
|
||||||
|
severity: []const u8,
|
||||||
|
evidence_hash: []const u8,
|
||||||
|
) !void {
|
||||||
|
var appender: c.duckdb_appender = null;
|
||||||
|
if (c.duckdb_appender_create(self.conn, null, "slash_events", &appender) != c.DuckDBSuccess) return error.ExecFailed;
|
||||||
|
defer _ = c.duckdb_appender_destroy(&appender);
|
||||||
|
|
||||||
|
_ = c.duckdb_append_uint64(appender, timestamp);
|
||||||
|
_ = c.duckdb_append_varchar_length(appender, target_did.ptr, target_did.len);
|
||||||
|
_ = c.duckdb_append_varchar_length(appender, reason.ptr, reason.len);
|
||||||
|
_ = c.duckdb_append_varchar_length(appender, severity.ptr, severity.len);
|
||||||
|
_ = c.duckdb_append_varchar_length(appender, evidence_hash.ptr, evidence_hash.len);
|
||||||
|
_ = c.duckdb_appender_end_row(appender);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getSlashEvents(self: *QvlStore, limit: usize) ![]StoredSlashEvent {
|
||||||
|
const sql_slice = try std.fmt.allocPrint(self.allocator, "SELECT timestamp, target_did, reason, severity, evidence_hash FROM slash_events ORDER BY timestamp DESC LIMIT {d};", .{limit});
|
||||||
|
defer self.allocator.free(sql_slice);
|
||||||
|
const sql = try self.allocator.dupeZ(u8, sql_slice);
|
||||||
|
defer self.allocator.free(sql);
|
||||||
|
|
||||||
|
var res: c.duckdb_result = undefined;
|
||||||
|
if (c.duckdb_query(self.conn, sql.ptr, &res) != c.DuckDBSuccess) {
|
||||||
|
std.log.err("DuckDB Slash Log Error: {s}", .{c.duckdb_result_error(&res)});
|
||||||
|
c.duckdb_destroy_result(&res);
|
||||||
|
return error.QueryFailed;
|
||||||
|
}
|
||||||
|
defer c.duckdb_destroy_result(&res);
|
||||||
|
|
||||||
|
const row_count = c.duckdb_row_count(&res);
|
||||||
|
var events = try self.allocator.alloc(StoredSlashEvent, row_count);
|
||||||
|
|
||||||
|
for (0..row_count) |i| {
|
||||||
|
// Helper to get string safely
|
||||||
|
const getStr = struct {
|
||||||
|
fn get(result: *c.duckdb_result, row: u64, col: u64, allocator: std.mem.Allocator) ![]const u8 {
|
||||||
|
const val = c.duckdb_value_varchar(result, row, col);
|
||||||
|
defer c.duckdb_free(val);
|
||||||
|
return allocator.dupe(u8, std.mem.span(val));
|
||||||
|
}
|
||||||
|
}.get;
|
||||||
|
|
||||||
|
events[i] = StoredSlashEvent{
|
||||||
|
.timestamp = c.duckdb_value_uint64(&res, i, 0),
|
||||||
|
.target_did = try getStr(&res, i, 1, self.allocator),
|
||||||
|
.reason = try getStr(&res, i, 2, self.allocator),
|
||||||
|
.severity = try getStr(&res, i, 3, self.allocator),
|
||||||
|
.evidence_hash = try getStr(&res, i, 4, self.allocator),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return events;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ const std = @import("std");
|
||||||
const c = @cImport({
|
const c = @cImport({
|
||||||
@cInclude("sqlite3.h");
|
@cInclude("sqlite3.h");
|
||||||
});
|
});
|
||||||
const dht = @import("dht.zig");
|
const dht = @import("dht");
|
||||||
|
|
||||||
pub const RemoteNode = dht.RemoteNode;
|
pub const RemoteNode = dht.RemoteNode;
|
||||||
pub const ID_LEN = dht.ID_LEN;
|
pub const ID_LEN = dht.ID_LEN;
|
||||||
|
|
@ -95,7 +95,7 @@ pub const StorageService = struct {
|
||||||
_ = c.sqlite3_bind_blob(stmt, 1, &node.id, @intCast(node.id.len), null);
|
_ = c.sqlite3_bind_blob(stmt, 1, &node.id, @intCast(node.id.len), null);
|
||||||
|
|
||||||
// Bind Address
|
// Bind Address
|
||||||
var addr_buf: [64]u8 = undefined;
|
var addr_buf: [1024]u8 = undefined;
|
||||||
const addr_str = try std.fmt.bufPrintZ(&addr_buf, "{any}", .{node.address});
|
const addr_str = try std.fmt.bufPrintZ(&addr_buf, "{any}", .{node.address});
|
||||||
_ = c.sqlite3_bind_text(stmt, 2, addr_str.ptr, -1, null);
|
_ = c.sqlite3_bind_text(stmt, 2, addr_str.ptr, -1, null);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,16 @@
|
||||||
|
//! Capsule TUI Application (Stub)
|
||||||
|
//! Vaxis dependency temporarily removed to fix build.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
|
||||||
|
pub const App = struct {
|
||||||
|
pub fn run(_: *anyopaque) !void {
|
||||||
|
std.log.info("TUI functionality temporarily disabled.", .{});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn run(allocator: std.mem.Allocator, control_socket_path: []const u8) !void {
|
||||||
|
_ = allocator;
|
||||||
|
_ = control_socket_path;
|
||||||
|
std.log.info("TUI functionality temporarily disabled.", .{});
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,167 @@
|
||||||
|
//! Capsule TUI Application
|
||||||
|
//! Built with Vaxis (The "Luxury Deck").
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const vaxis = @import("vaxis");
|
||||||
|
|
||||||
|
const control = @import("../control.zig");
|
||||||
|
const client_mod = @import("client.zig");
|
||||||
|
const view_mod = @import("view.zig");
|
||||||
|
|
||||||
|
const Event = union(enum) {
|
||||||
|
key_press: vaxis.Key,
|
||||||
|
winsize: vaxis.Winsize,
|
||||||
|
update_data: void,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const AppState = struct {
|
||||||
|
allocator: std.mem.Allocator,
|
||||||
|
should_quit: bool,
|
||||||
|
client: client_mod.Client,
|
||||||
|
|
||||||
|
// UI State
|
||||||
|
active_tab: enum { Dashboard, SlashLog, TrustGraph } = .Dashboard,
|
||||||
|
|
||||||
|
// Data State
|
||||||
|
node_status: ?client_mod.NodeStatus = null,
|
||||||
|
slash_log: std.ArrayList(client_mod.SlashEvent),
|
||||||
|
topology: ?client_mod.TopologyInfo = null,
|
||||||
|
|
||||||
|
pub fn init(allocator: std.mem.Allocator) !AppState {
|
||||||
|
return .{
|
||||||
|
.allocator = allocator,
|
||||||
|
.should_quit = false,
|
||||||
|
.client = try client_mod.Client.init(allocator),
|
||||||
|
.slash_log = std.ArrayList(client_mod.SlashEvent){},
|
||||||
|
.topology = null,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *AppState) void {
|
||||||
|
self.client.deinit();
|
||||||
|
if (self.node_status) |s| {
|
||||||
|
// Free strings in status if any? NodeStatus fields are slices.
|
||||||
|
// Client parser allocates them. We own them.
|
||||||
|
// We should free them.
|
||||||
|
// For now, simpler leak or arena. (TODO: correct cleanup)
|
||||||
|
_ = s;
|
||||||
|
}
|
||||||
|
for (self.slash_log.items) |ev| {
|
||||||
|
self.allocator.free(ev.target_did);
|
||||||
|
self.allocator.free(ev.reason);
|
||||||
|
self.allocator.free(ev.severity);
|
||||||
|
self.allocator.free(ev.evidence_hash);
|
||||||
|
}
|
||||||
|
self.slash_log.deinit(self.allocator);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn run(allocator: std.mem.Allocator) !void {
|
||||||
|
var app = try AppState.init(allocator);
|
||||||
|
defer app.deinit();
|
||||||
|
|
||||||
|
// Initialize Vaxis
|
||||||
|
var vx = try vaxis.init(allocator, .{});
|
||||||
|
// Initialize TTY
|
||||||
|
var tty = try vaxis.Tty.init(&.{}); // Use empty buffer (vaxis manages its own if needed, or this is for buffered read?)
|
||||||
|
defer tty.deinit();
|
||||||
|
|
||||||
|
defer vx.deinit(allocator, tty.writer()); // Reset terminal
|
||||||
|
|
||||||
|
// Event Loop
|
||||||
|
var loop: vaxis.Loop(Event) = .{ .vaxis = &vx, .tty = &tty };
|
||||||
|
try loop.init();
|
||||||
|
try loop.start();
|
||||||
|
defer loop.stop();
|
||||||
|
|
||||||
|
// Connect to Daemon
|
||||||
|
try app.client.connect();
|
||||||
|
|
||||||
|
// Spawn Data Thread
|
||||||
|
const DataThread = struct {
|
||||||
|
fn run(l: *vaxis.Loop(Event), a: *AppState) void {
|
||||||
|
while (!a.should_quit) {
|
||||||
|
// Poll Status
|
||||||
|
if (a.client.getStatus()) |status| {
|
||||||
|
if (a.node_status) |old| {
|
||||||
|
// Free old strings
|
||||||
|
a.allocator.free(old.node_id);
|
||||||
|
a.allocator.free(old.state);
|
||||||
|
a.allocator.free(old.version);
|
||||||
|
}
|
||||||
|
a.node_status = status;
|
||||||
|
} else |_| {}
|
||||||
|
|
||||||
|
// Poll Slash Log
|
||||||
|
if (a.client.getSlashLog(20)) |logs| {
|
||||||
|
// Logs are new allocations. Replace list.
|
||||||
|
for (a.slash_log.items) |ev| {
|
||||||
|
a.allocator.free(ev.target_did);
|
||||||
|
a.allocator.free(ev.reason);
|
||||||
|
a.allocator.free(ev.severity);
|
||||||
|
a.allocator.free(ev.evidence_hash);
|
||||||
|
}
|
||||||
|
a.slash_log.clearRetainingCapacity();
|
||||||
|
a.slash_log.appendSlice(a.allocator, logs) catch {};
|
||||||
|
a.allocator.free(logs); // Free the slice itself (deep copy helper allocated slice)
|
||||||
|
} else |_| {}
|
||||||
|
|
||||||
|
if (a.client.getTopology()) |topo| {
|
||||||
|
if (a.topology) |old| {
|
||||||
|
// Free old
|
||||||
|
// TODO: Implement deep free or rely on allocator arena if we had one.
|
||||||
|
// For now we leak old topology strings if not careful.
|
||||||
|
// Ideally we should free the old one using a helper.
|
||||||
|
// But since we use a shared allocator, we should be careful.
|
||||||
|
// Given this is a TUI, we might accept some leakage for MVP or fix it properly.
|
||||||
|
// Let's rely on OS cleanup for now or implement freeTopology
|
||||||
|
_ = old;
|
||||||
|
}
|
||||||
|
a.topology = topo;
|
||||||
|
} else |_| {}
|
||||||
|
|
||||||
|
// Notify UI to redraw
|
||||||
|
l.postEvent(.{ .update_data = {} });
|
||||||
|
|
||||||
|
std.Thread.sleep(1 * std.time.ns_per_s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
var thread = try std.Thread.spawn(.{}, DataThread.run, .{ &loop, &app });
|
||||||
|
defer thread.join();
|
||||||
|
|
||||||
|
while (!app.should_quit) {
|
||||||
|
// Handle Events
|
||||||
|
const event = loop.nextEvent();
|
||||||
|
switch (event) {
|
||||||
|
.key_press => |key| {
|
||||||
|
if (key.matches('c', .{ .ctrl = true }) or key.matches('q', .{})) {
|
||||||
|
app.should_quit = true;
|
||||||
|
}
|
||||||
|
// Handle tab switching
|
||||||
|
if (key.matches(vaxis.Key.tab, .{})) {
|
||||||
|
app.active_tab = switch (app.active_tab) {
|
||||||
|
.Dashboard => .SlashLog,
|
||||||
|
.SlashLog => .TrustGraph,
|
||||||
|
.TrustGraph => .Dashboard,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
},
|
||||||
|
.winsize => |ws| {
|
||||||
|
try vx.resize(allocator, tty.writer(), ws);
|
||||||
|
},
|
||||||
|
.update_data => {
|
||||||
|
// Just trigger render
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render
|
||||||
|
const win = vx.window();
|
||||||
|
win.clear();
|
||||||
|
|
||||||
|
try view_mod.draw(&app, win);
|
||||||
|
|
||||||
|
try vx.render(tty.writer());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,137 @@
|
||||||
|
//! IPC Client for TUI -> Daemon communication.
|
||||||
|
//! Wraps control.zig types.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const control = @import("../control.zig");
|
||||||
|
|
||||||
|
pub const NodeStatus = control.NodeStatus;
|
||||||
|
pub const SlashEvent = control.SlashEvent;
|
||||||
|
pub const TopologyInfo = control.TopologyInfo;
|
||||||
|
pub const GraphNode = control.GraphNode;
|
||||||
|
pub const GraphEdge = control.GraphEdge;
|
||||||
|
|
||||||
|
pub const Client = struct {
|
||||||
|
allocator: std.mem.Allocator,
|
||||||
|
stream: ?std.net.Stream = null,
|
||||||
|
|
||||||
|
pub fn init(allocator: std.mem.Allocator) !Client {
|
||||||
|
return .{
|
||||||
|
.allocator = allocator,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Client) void {
|
||||||
|
if (self.stream) |s| s.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connect(self: *Client) !void {
|
||||||
|
// Connect to /tmp/capsule.sock
|
||||||
|
// TODO: Load from config
|
||||||
|
const path = "/tmp/capsule.sock";
|
||||||
|
const address = try std.net.Address.initUnix(path);
|
||||||
|
self.stream = try std.net.tcpConnectToAddress(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getStatus(self: *Client) !NodeStatus {
|
||||||
|
const resp = try self.request(.Status);
|
||||||
|
switch (resp) {
|
||||||
|
.NodeStatus => |s| return s,
|
||||||
|
else => return error.UnexpectedResponse,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getSlashLog(self: *Client, limit: usize) ![]SlashEvent {
|
||||||
|
const resp = try self.request(.{ .SlashLog = .{ .limit = limit } });
|
||||||
|
switch (resp) {
|
||||||
|
.SlashLogResult => |l| {
|
||||||
|
// We need to duplicate the list because response memory is transient (if using an arena in request)
|
||||||
|
// But for now, let's assume the caller handles it or we deep copy.
|
||||||
|
// Simpler: Return generic Response and let caller handle.
|
||||||
|
// Actually, let's just return the slice and hope the buffer lifetime management in request isn't too tricky.
|
||||||
|
// Wait, request() will likely use a local buffer. Returning a slice into it is unsafe.
|
||||||
|
// I need to use an arena or return a deep copy.
|
||||||
|
// For this MVP, I'll return the response object completely if possible, or copy.
|
||||||
|
// Let's implement deep copy later. For now, assume single-threaded blocking.
|
||||||
|
return try self.deepCopySlashLog(l);
|
||||||
|
},
|
||||||
|
else => return error.UnexpectedResponse,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn request(self: *Client, cmd: control.Command) !control.Response {
|
||||||
|
if (self.stream == null) return error.NotConnected;
|
||||||
|
const stream = self.stream.?;
|
||||||
|
|
||||||
|
// Send
|
||||||
|
var req_buf = std.ArrayList(u8){};
|
||||||
|
defer req_buf.deinit(self.allocator);
|
||||||
|
var w_struct = req_buf.writer(self.allocator);
|
||||||
|
var buffer: [128]u8 = undefined;
|
||||||
|
var adapter = w_struct.adaptToNewApi(&buffer);
|
||||||
|
try std.json.Stringify.value(cmd, .{}, &adapter.new_interface);
|
||||||
|
try adapter.new_interface.flush();
|
||||||
|
try stream.writeAll(req_buf.items);
|
||||||
|
|
||||||
|
// Read (buffered)
|
||||||
|
var resp_buf: [32768]u8 = undefined; // Large buffer for slash log
|
||||||
|
const bytes = try stream.read(&resp_buf);
|
||||||
|
if (bytes == 0) return error.ConnectionClosed;
|
||||||
|
|
||||||
|
// Parse (using allocator for string allocations inside union)
|
||||||
|
const parsed = try std.json.parseFromSlice(control.Response, self.allocator, resp_buf[0..bytes], .{ .ignore_unknown_fields = true });
|
||||||
|
// Note: parsed.value contains pointers to resp_buf if we used Leaky, but here we used allocator.
|
||||||
|
// Wait, std.json.parseFromSlice with allocator allocates strings!
|
||||||
|
// So we can return parsed.value.
|
||||||
|
return parsed.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn getTopology(self: *Client) !TopologyInfo {
|
||||||
|
const resp = try self.request(.Topology);
|
||||||
|
switch (resp) {
|
||||||
|
.TopologyInfo => |t| return try self.deepCopyTopology(t),
|
||||||
|
else => return error.UnexpectedResponse,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deepCopySlashLog(self: *Client, events: []const SlashEvent) ![]SlashEvent {
|
||||||
|
const list = try self.allocator.alloc(SlashEvent, events.len);
|
||||||
|
for (events, 0..) |ev, i| {
|
||||||
|
list[i] = .{
|
||||||
|
.timestamp = ev.timestamp,
|
||||||
|
.target_did = try self.allocator.dupe(u8, ev.target_did),
|
||||||
|
.reason = try self.allocator.dupe(u8, ev.reason),
|
||||||
|
.severity = try self.allocator.dupe(u8, ev.severity),
|
||||||
|
.evidence_hash = try self.allocator.dupe(u8, ev.evidence_hash),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return list;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deepCopyTopology(self: *Client, topo: TopologyInfo) !TopologyInfo {
|
||||||
|
// Deep copy nodes
|
||||||
|
const nodes = try self.allocator.alloc(control.GraphNode, topo.nodes.len);
|
||||||
|
for (topo.nodes, 0..) |n, i| {
|
||||||
|
nodes[i] = .{
|
||||||
|
.id = try self.allocator.dupe(u8, n.id),
|
||||||
|
.trust_score = n.trust_score,
|
||||||
|
.status = try self.allocator.dupe(u8, n.status),
|
||||||
|
.role = try self.allocator.dupe(u8, n.role),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deep copy edges
|
||||||
|
const edges = try self.allocator.alloc(control.GraphEdge, topo.edges.len);
|
||||||
|
for (topo.edges, 0..) |e, i| {
|
||||||
|
edges[i] = .{
|
||||||
|
.source = try self.allocator.dupe(u8, e.source),
|
||||||
|
.target = try self.allocator.dupe(u8, e.target),
|
||||||
|
.weight = e.weight,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return TopologyInfo{
|
||||||
|
.nodes = nodes,
|
||||||
|
.edges = edges,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
@ -0,0 +1,174 @@
|
||||||
|
//! View Logic for Capsule TUI
|
||||||
|
//! Renders the "Luxury Deck" interface.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const vaxis = @import("vaxis");
|
||||||
|
const app_mod = @import("app.zig");
|
||||||
|
|
||||||
|
pub fn draw(app: *app_mod.AppState, win: vaxis.Window) !void {
|
||||||
|
// 1. Draw Header
|
||||||
|
const header = win.child(.{
|
||||||
|
.x_off = 0,
|
||||||
|
.y_off = 0,
|
||||||
|
.width = win.width,
|
||||||
|
.height = 3,
|
||||||
|
});
|
||||||
|
header.fill(vaxis.Cell{ .style = .{ .bg = .{ .rgb = .{ 20, 20, 30 } } } });
|
||||||
|
|
||||||
|
_ = header.printSegment(.{ .text = " CAPSULE OS ", .style = .{ .fg = .{ .rgb = .{ 255, 215, 0 } }, .bold = true } }, .{ .row_offset = 1, .col_offset = 2 });
|
||||||
|
|
||||||
|
// Tabs
|
||||||
|
const tabs = [_][]const u8{ "Dashboard", "Slash Log", "Trust Graph" };
|
||||||
|
var col: usize = 20;
|
||||||
|
for (tabs, 0..) |tab, i| {
|
||||||
|
const is_active = i == @intFromEnum(app.active_tab);
|
||||||
|
const style: vaxis.Style = if (is_active)
|
||||||
|
.{ .fg = .{ .rgb = .{ 255, 255, 255 } }, .bg = .{ .rgb = .{ 60, 60, 80 } }, .bold = true }
|
||||||
|
else
|
||||||
|
.{ .fg = .{ .rgb = .{ 150, 150, 150 } } };
|
||||||
|
|
||||||
|
_ = header.printSegment(.{ .text = tab, .style = style }, .{ .row_offset = 1, .col_offset = @intCast(col) });
|
||||||
|
col += tab.len + 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Draw Content Area
|
||||||
|
const content = win.child(.{
|
||||||
|
.x_off = 0,
|
||||||
|
.y_off = 3,
|
||||||
|
.width = win.width,
|
||||||
|
.height = win.height - 3,
|
||||||
|
});
|
||||||
|
|
||||||
|
switch (app.active_tab) {
|
||||||
|
.Dashboard => try drawDashboard(app, content),
|
||||||
|
.SlashLog => try drawSlashLog(app, content),
|
||||||
|
.TrustGraph => try drawTrustGraph(app, content),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn drawDashboard(app: *app_mod.AppState, win: vaxis.Window) !void {
|
||||||
|
if (app.node_status) |status| {
|
||||||
|
// Node ID
|
||||||
|
var buf: [128]u8 = undefined;
|
||||||
|
const id_str = try std.fmt.bufPrint(&buf, "Node ID: {s}", .{status.node_id});
|
||||||
|
_ = win.printSegment(.{ .text = id_str, .style = .{ .fg = .{ .rgb = .{ 100, 200, 100 } } } }, .{ .row_offset = 1, .col_offset = 2 });
|
||||||
|
|
||||||
|
// State
|
||||||
|
const state_str = try std.fmt.bufPrint(&buf, "State: {s}", .{status.state});
|
||||||
|
_ = win.printSegment(.{ .text = state_str }, .{ .row_offset = 2, .col_offset = 2 });
|
||||||
|
|
||||||
|
// Version
|
||||||
|
const ver_str = try std.fmt.bufPrint(&buf, "Version: {s}", .{status.version});
|
||||||
|
_ = win.printSegment(.{ .text = ver_str }, .{ .row_offset = 3, .col_offset = 2 });
|
||||||
|
|
||||||
|
// Peers
|
||||||
|
const peers_str = try std.fmt.bufPrint(&buf, "Peers: {}", .{status.peers_count});
|
||||||
|
_ = win.printSegment(.{ .text = peers_str }, .{ .row_offset = 4, .col_offset = 2 });
|
||||||
|
} else {
|
||||||
|
_ = win.printSegment(.{ .text = "Fetching status...", .style = .{ .fg = .{ .rgb = .{ 150, 150, 150 } } } }, .{ .row_offset = 2, .col_offset = 2 });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn drawSlashLog(app: *app_mod.AppState, win: vaxis.Window) !void {
|
||||||
|
// Header
|
||||||
|
_ = win.printSegment(.{ .text = "Target DID", .style = .{ .bold = true, .ul_style = .single } }, .{ .row_offset = 1, .col_offset = 2 });
|
||||||
|
_ = win.printSegment(.{ .text = "Reason", .style = .{ .bold = true, .ul_style = .single } }, .{ .row_offset = 1, .col_offset = 40 });
|
||||||
|
_ = win.printSegment(.{ .text = "Severity", .style = .{ .bold = true, .ul_style = .single } }, .{ .row_offset = 1, .col_offset = 70 });
|
||||||
|
|
||||||
|
var row: u16 = 2;
|
||||||
|
for (app.slash_log.items) |ev| {
|
||||||
|
if (row >= win.height) break;
|
||||||
|
|
||||||
|
_ = win.printSegment(.{ .text = ev.target_did }, .{ .row_offset = row, .col_offset = 2 });
|
||||||
|
_ = win.printSegment(.{ .text = ev.reason }, .{ .row_offset = row, .col_offset = 40 });
|
||||||
|
_ = win.printSegment(.{ .text = ev.severity }, .{ .row_offset = row, .col_offset = 70 });
|
||||||
|
|
||||||
|
row += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (app.slash_log.items.len == 0) {
|
||||||
|
_ = win.printSegment(.{ .text = "No slash events recorded.", .style = .{ .fg = .{ .rgb = .{ 100, 100, 100 } } } }, .{ .row_offset = 3, .col_offset = 2 });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn drawTrustGraph(app: *app_mod.AppState, win: vaxis.Window) !void {
|
||||||
|
// 1. Draw Title
|
||||||
|
_ = win.printSegment(.{ .text = "QVL TRUST LATTICE", .style = .{ .bold = true, .fg = .{ .rgb = .{ 100, 255, 255 } } } }, .{ .row_offset = 1, .col_offset = 2 });
|
||||||
|
|
||||||
|
if (app.topology) |topo| {
|
||||||
|
// Center of the radar
|
||||||
|
const cx: usize = win.width / 2;
|
||||||
|
const cy: usize = win.height / 2;
|
||||||
|
|
||||||
|
// Max radius (smaller of width/height / 2, minus margin)
|
||||||
|
const max_radius = @min(cx, cy) - 2;
|
||||||
|
|
||||||
|
// Draw Rings (Orbits)
|
||||||
|
// 25%, 50%, 75%, 100% Trust
|
||||||
|
// Cannot draw circles easily with characters, so we just imply them by node position
|
||||||
|
// Or we could draw axes. Let's draw axes.
|
||||||
|
|
||||||
|
// X-Axis
|
||||||
|
// for (2..win.width-2) |x| {
|
||||||
|
// _ = win.printSegment(.{ .text = "-", .style = .{ .fg = .{ .rgb = .{ 60, 60, 60 } } } }, .{ .row_offset = @intCast(cy), .col_offset = @intCast(x) });
|
||||||
|
// }
|
||||||
|
// Y-Axis
|
||||||
|
// for (2..win.height-1) |y| {
|
||||||
|
// _ = win.printSegment(.{ .text = "|", .style = .{ .fg = .{ .rgb = .{ 60, 60, 60 } } } }, .{ .row_offset = @intCast(y), .col_offset = @intCast(cx) });
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Draw Nodes
|
||||||
|
const nodes_count = topo.nodes.len;
|
||||||
|
// Skip self (index 0) loop for now to draw it specially at center
|
||||||
|
|
||||||
|
// Self
|
||||||
|
_ = win.printSegment(.{ .text = "★", .style = .{ .bold = true, .fg = .{ .rgb = .{ 255, 215, 0 } } } }, .{ .row_offset = @intCast(cy), .col_offset = @intCast(cx) });
|
||||||
|
_ = win.printSegment(.{ .text = "SELF" }, .{ .row_offset = @intCast(cy + 1), .col_offset = @intCast(cx - 2) });
|
||||||
|
|
||||||
|
// Peers
|
||||||
|
// We will distribute them by angle (index) and radius (1.0 - trust)
|
||||||
|
// Trust 1.0 = Center (0 radius)
|
||||||
|
// Trust 0.0 = Edge (max radius)
|
||||||
|
|
||||||
|
const count_f: f64 = @floatFromInt(nodes_count);
|
||||||
|
|
||||||
|
for (topo.nodes, 0..) |node, i| {
|
||||||
|
if (i == 0) continue; // Skip self
|
||||||
|
|
||||||
|
const angle = (2.0 * std.math.pi * @as(f64, @floatFromInt(i))) / count_f;
|
||||||
|
const dist_factor = 1.0 - node.trust_score; // Higher trust = closer to center
|
||||||
|
const radius = dist_factor * @as(f64, @floatFromInt(max_radius));
|
||||||
|
|
||||||
|
// Polar to Cartesian
|
||||||
|
const dx = @cos(angle) * (radius * 2.0); // *2 for aspect ratio correction (roughly)
|
||||||
|
const dy = @sin(angle) * radius;
|
||||||
|
|
||||||
|
const px: usize = @intCast(@as(i64, @intCast(cx)) + @as(i64, @intFromFloat(dx)));
|
||||||
|
const py: usize = @intCast(@as(i64, @intCast(cy)) + @as(i64, @intFromFloat(dy)));
|
||||||
|
|
||||||
|
// Bound check
|
||||||
|
if (px > 0 and px < win.width and py > 0 and py < win.height) {
|
||||||
|
// Style based on status
|
||||||
|
var style: vaxis.Style = .{ .fg = .{ .rgb = .{ 200, 200, 200 } } };
|
||||||
|
var char: []const u8 = "o";
|
||||||
|
|
||||||
|
if (std.mem.eql(u8, node.status, "slashed")) {
|
||||||
|
style = .{ .fg = .{ .rgb = .{ 255, 50, 50 } }, .bold = true, .blink = true };
|
||||||
|
char = "X";
|
||||||
|
} else if (node.trust_score > 0.8) {
|
||||||
|
style = .{ .fg = .{ .rgb = .{ 100, 255, 100 } }, .bold = true };
|
||||||
|
char = "⬢";
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = win.printSegment(.{ .text = char, .style = style }, .{ .row_offset = @intCast(py), .col_offset = @intCast(px) });
|
||||||
|
|
||||||
|
// Label (ID)
|
||||||
|
if (win.width > 60) {
|
||||||
|
_ = win.printSegment(.{ .text = node.id, .style = .{ .dim = true } }, .{ .row_offset = @intCast(py + 1), .col_offset = @intCast(px) });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_ = win.printSegment(.{ .text = "Waiting for Topology Data...", .style = .{ .blink = true } }, .{ .row_offset = 2, .col_offset = 4 });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -121,6 +121,14 @@ pub const RoutingTable = struct {
|
||||||
@memcpy(out, results.items[0..actual_count]);
|
@memcpy(out, results.items[0..actual_count]);
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn getNodeCount(self: *const RoutingTable) usize {
|
||||||
|
var count: usize = 0;
|
||||||
|
for (self.buckets) |bucket| {
|
||||||
|
count += bucket.nodes.items.len;
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const DhtService = struct {
|
pub const DhtService = struct {
|
||||||
|
|
@ -137,4 +145,8 @@ pub const DhtService = struct {
|
||||||
pub fn deinit(self: *DhtService) void {
|
pub fn deinit(self: *DhtService) void {
|
||||||
self.routing_table.deinit();
|
self.routing_table.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn getKnownNodeCount(self: *const DhtService) usize {
|
||||||
|
return self.routing_table.getNodeCount();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
@ -0,0 +1,100 @@
|
||||||
|
//! RFC-0018: Gateway Protocol
|
||||||
|
//!
|
||||||
|
//! layer 1: Coordination Layer
|
||||||
|
//! Handles NAT hole punching, peer discovery, and relay announcements.
|
||||||
|
//! Gateways do NOT forward data traffic.
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const dht = @import("dht");
|
||||||
|
|
||||||
|
pub const Gateway = struct {
|
||||||
|
allocator: std.mem.Allocator,
|
||||||
|
|
||||||
|
// DHT for peer discovery
|
||||||
|
dht_service: *dht.DhtService,
|
||||||
|
|
||||||
|
// In-memory address registry (PeerID -> Public Address)
|
||||||
|
// This is a fast lookup for connected peers or those recently announced.
|
||||||
|
peer_addresses: std.AutoHashMap(dht.NodeId, std.net.Address),
|
||||||
|
|
||||||
|
pub fn init(allocator: std.mem.Allocator, dht_service: *dht.DhtService) Gateway {
|
||||||
|
return Gateway{
|
||||||
|
.allocator = allocator,
|
||||||
|
.dht_service = dht_service,
|
||||||
|
.peer_addresses = std.AutoHashMap(dht.NodeId, std.net.Address).init(allocator),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *Gateway) void {
|
||||||
|
self.peer_addresses.deinit();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a peer's public address
|
||||||
|
pub fn registerPeer(self: *Gateway, peer_id: dht.NodeId, addr: std.net.Address) !void {
|
||||||
|
// Store in local cache
|
||||||
|
try self.peer_addresses.put(peer_id, addr);
|
||||||
|
|
||||||
|
// Announce to DHT (Store operations would go here)
|
||||||
|
// For now, we update the local routing table if appropriate,
|
||||||
|
// but typically a Gateway *stores* values for others.
|
||||||
|
// The current DhtService implementation is basic (RoutingTable only).
|
||||||
|
// We'll treat the routing table as the primary storage for "live" nodes.
|
||||||
|
const remote = dht.RemoteNode{
|
||||||
|
.id = peer_id,
|
||||||
|
.address = addr,
|
||||||
|
.last_seen = std.time.milliTimestamp(),
|
||||||
|
};
|
||||||
|
try self.dht_service.routing_table.update(remote);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// STUN-like coordination for hole punching
|
||||||
|
pub fn coordinateHolePunch(
|
||||||
|
self: *Gateway,
|
||||||
|
peer_a: dht.NodeId,
|
||||||
|
peer_b: dht.NodeId,
|
||||||
|
) !HolePunchCoordination {
|
||||||
|
const addr_a = self.peer_addresses.get(peer_a) orelse return error.PeerNotFound;
|
||||||
|
const addr_b = self.peer_addresses.get(peer_b) orelse return error.PeerNotFound;
|
||||||
|
|
||||||
|
return HolePunchCoordination{
|
||||||
|
.peer_a_addr = addr_a,
|
||||||
|
.peer_b_addr = addr_b,
|
||||||
|
.timestamp = @intCast(std.time.timestamp()),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const HolePunchCoordination = struct {
|
||||||
|
peer_a_addr: std.net.Address,
|
||||||
|
peer_b_addr: std.net.Address,
|
||||||
|
timestamp: u64,
|
||||||
|
};
|
||||||
|
|
||||||
|
test "Gateway: register and coordinate" {
|
||||||
|
const allocator = std.testing.allocator;
|
||||||
|
|
||||||
|
var self_id = [_]u8{0} ** 32;
|
||||||
|
self_id[0] = 1;
|
||||||
|
|
||||||
|
var dht_svc = dht.DhtService.init(allocator, self_id);
|
||||||
|
defer dht_svc.deinit();
|
||||||
|
|
||||||
|
var gw = Gateway.init(allocator, &dht_svc);
|
||||||
|
defer gw.deinit();
|
||||||
|
|
||||||
|
var peer_a_id = [_]u8{0} ** 32;
|
||||||
|
peer_a_id[0] = 0xAA;
|
||||||
|
var peer_b_id = [_]u8{0} ** 32;
|
||||||
|
peer_b_id[0] = 0xBB;
|
||||||
|
|
||||||
|
const addr_a = try std.net.Address.parseIp("1.2.3.4", 8080);
|
||||||
|
const addr_b = try std.net.Address.parseIp("5.6.7.8", 9090);
|
||||||
|
|
||||||
|
try gw.registerPeer(peer_a_id, addr_a);
|
||||||
|
try gw.registerPeer(peer_b_id, addr_b);
|
||||||
|
|
||||||
|
const coord = try gw.coordinateHolePunch(peer_a_id, peer_b_id);
|
||||||
|
|
||||||
|
try std.testing.expect(coord.peer_a_addr.eql(addr_a));
|
||||||
|
try std.testing.expect(coord.peer_b_addr.eql(addr_b));
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,153 @@
|
||||||
|
//! RFC-0018: Relay Protocol (Layer 2)
|
||||||
|
//!
|
||||||
|
//! Implements onion-routed packet forwarding.
|
||||||
|
//!
|
||||||
|
//! Packet Structure (Conceptual Onion):
|
||||||
|
//! [ Next Hop: R1 | Encrypted Payload for R1 [ Next Hop: R2 | Encrypted Payload for R2 [ Target: B | Payload ] ] ]
|
||||||
|
//!
|
||||||
|
//! For Phase 13 (Week 34), we implement the packet framing and wrapping logic.
|
||||||
|
//! We assume shared secrets are established via the Federation Handshake (or Prekey bundles).
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
|
const crypto = @import("std").crypto;
|
||||||
|
const net = std.net;
|
||||||
|
|
||||||
|
/// Fixed packet size to mitigate side-channel analysis (size correlation).
|
||||||
|
/// Real-world implementation might use 4KB or 1KB chunks.
|
||||||
|
pub const RELAY_PACKET_SIZE = 1024 + 128; // Payload + Headers
|
||||||
|
|
||||||
|
pub const RelayError = error{
|
||||||
|
PacketTooLarge,
|
||||||
|
DecryptionFailed,
|
||||||
|
InvalidNextHop,
|
||||||
|
HopLimitExceeded,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// The routing header visible to the current relay after decryption.
|
||||||
|
pub const NextHopHeader = struct {
|
||||||
|
next_hop_id: [32]u8, // NodeID (0x00... for exit/final destination)
|
||||||
|
// We might add HMAC or integrity check here
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A Relay Packet as it travels on the wire.
|
||||||
|
/// It effectively contains an encrypted blob that the receiver can decrypt
|
||||||
|
/// to reveal the NextHopHeader and the inner Payload.
|
||||||
|
pub const RelayPacket = struct {
|
||||||
|
// Public ephemeral key for ECDH could be here if we do per-packet keying,
|
||||||
|
// but typically we use established session keys or pre-keys.
|
||||||
|
// For simplicity V1, we assume a session key exists or use a nonce.
|
||||||
|
|
||||||
|
nonce: [24]u8, // XChaCha20 nonce
|
||||||
|
ciphertext: []u8, // Encrypted [NextHopHeader + InnerPayload]
|
||||||
|
|
||||||
|
pub fn init(allocator: std.mem.Allocator, size: usize) !RelayPacket {
|
||||||
|
return RelayPacket{
|
||||||
|
.nonce = undefined, // To be filled
|
||||||
|
.ciphertext = try allocator.alloc(u8, size),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deinit(self: *RelayPacket, allocator: std.mem.Allocator) void {
|
||||||
|
allocator.free(self.ciphertext);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Logic to construct an onion packet.
|
||||||
|
pub const OnionBuilder = struct {
|
||||||
|
allocator: std.mem.Allocator,
|
||||||
|
|
||||||
|
pub fn init(allocator: std.mem.Allocator) OnionBuilder {
|
||||||
|
return .{
|
||||||
|
.allocator = allocator,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wraps a payload into a single layer of encryption for a specific relay.
|
||||||
|
/// In a real onion, this is called iteratively from innermost to outermost.
|
||||||
|
pub fn wrapLayer(
|
||||||
|
self: *OnionBuilder,
|
||||||
|
payload: []const u8,
|
||||||
|
next_hop: [32]u8,
|
||||||
|
shared_secret: [32]u8,
|
||||||
|
) !RelayPacket {
|
||||||
|
_ = shared_secret;
|
||||||
|
// 1. Construct Cleartext: [NextHop (32) | Payload (N)]
|
||||||
|
var cleartext = try self.allocator.alloc(u8, 32 + payload.len);
|
||||||
|
defer self.allocator.free(cleartext);
|
||||||
|
|
||||||
|
@memcpy(cleartext[0..32], &next_hop);
|
||||||
|
@memcpy(cleartext[32..], payload);
|
||||||
|
|
||||||
|
// 2. Encrypt
|
||||||
|
var packet = try RelayPacket.init(self.allocator, cleartext.len + 16); // +AuthTag
|
||||||
|
crypto.random.bytes(&packet.nonce);
|
||||||
|
|
||||||
|
// Mock Encryption (XChaCha20-Poly1305 would go here)
|
||||||
|
// For MVP structure, we just copy (TODO: Add actual crypto integration)
|
||||||
|
// We simulate "encryption" by XORing with a byte for testing proving modification works
|
||||||
|
for (cleartext, 0..) |b, i| {
|
||||||
|
packet.ciphertext[i] = b ^ 0xFF; // Simple NOT for mock encryption
|
||||||
|
}
|
||||||
|
// Mock Auth Tag
|
||||||
|
@memset(packet.ciphertext[cleartext.len..], 0xAA);
|
||||||
|
|
||||||
|
return packet;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unwraps a single layer (Server/Relay side logic).
|
||||||
|
pub fn unwrapLayer(
|
||||||
|
self: *OnionBuilder,
|
||||||
|
packet: RelayPacket,
|
||||||
|
shared_secret: [32]u8,
|
||||||
|
) !struct { next_hop: [32]u8, payload: []u8 } {
|
||||||
|
_ = shared_secret;
|
||||||
|
|
||||||
|
// Mock Decryption
|
||||||
|
if (packet.ciphertext.len < 32 + 16) return error.DecryptionFailed;
|
||||||
|
|
||||||
|
const content_len = packet.ciphertext.len - 16;
|
||||||
|
var cleartext = try self.allocator.alloc(u8, content_len);
|
||||||
|
|
||||||
|
for (0..content_len) |i| {
|
||||||
|
cleartext[i] = packet.ciphertext[i] ^ 0xFF;
|
||||||
|
}
|
||||||
|
|
||||||
|
var next_hop: [32]u8 = undefined;
|
||||||
|
@memcpy(&next_hop, cleartext[0..32]);
|
||||||
|
|
||||||
|
// Move payload to a new buffer to shrink
|
||||||
|
const payload_len = content_len - 32;
|
||||||
|
const payload = try self.allocator.alloc(u8, payload_len);
|
||||||
|
@memcpy(payload, cleartext[32..]);
|
||||||
|
|
||||||
|
self.allocator.free(cleartext);
|
||||||
|
|
||||||
|
return .{
|
||||||
|
.next_hop = next_hop,
|
||||||
|
.payload = payload,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
test "Relay: wrap and unwrap" {
|
||||||
|
const allocator = std.testing.allocator;
|
||||||
|
var builder = OnionBuilder.init(allocator);
|
||||||
|
|
||||||
|
const payload = "Hello Onion!";
|
||||||
|
const next_hop = [_]u8{0xAB} ** 32;
|
||||||
|
const shared_secret = [_]u8{0} ** 32;
|
||||||
|
|
||||||
|
var packet = try builder.wrapLayer(payload, next_hop, shared_secret);
|
||||||
|
defer packet.deinit(allocator);
|
||||||
|
|
||||||
|
// Verify it is "encrypted" (XOR 0xFF)
|
||||||
|
// Payload "H" (0x48) ^ 0xFF = 0xB7
|
||||||
|
// First byte of cleartext is next_hop[0] (0xAB) ^ 0xFF = 0x54
|
||||||
|
try std.testing.expectEqual(@as(u8, 0x54), packet.ciphertext[0]);
|
||||||
|
|
||||||
|
const result = try builder.unwrapLayer(packet, shared_secret);
|
||||||
|
defer allocator.free(result.payload);
|
||||||
|
|
||||||
|
try std.testing.expectEqualSlices(u8, &next_hop, &result.next_hop);
|
||||||
|
try std.testing.expectEqualSlices(u8, payload, result.payload);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,15 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Build
|
||||||
|
echo "Building Wolfi container..."
|
||||||
|
podman build -f Containerfile.wolfi -t capsule-wolfi .
|
||||||
|
|
||||||
|
# Run
|
||||||
|
echo "Running Capsule Node in Wolfi container..."
|
||||||
|
mkdir -p data-container
|
||||||
|
# Note: we override the CMD to pass arguments
|
||||||
|
podman run -d --rm --network host --name capsule-wolfi \
|
||||||
|
-v $(pwd)/data-container:/app/data \
|
||||||
|
capsule-wolfi \
|
||||||
|
./zig-out/bin/capsule start --port 9001 --data-dir /app/data
|
||||||
|
|
@ -0,0 +1,22 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
echo "Building capsule on host..."
|
||||||
|
cd capsule-core
|
||||||
|
zig build
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
echo "Preparing libs..."
|
||||||
|
mkdir -p libs
|
||||||
|
cp /usr/lib/libduckdb.so libs/
|
||||||
|
|
||||||
|
echo "Building Fast-Track container..."
|
||||||
|
podman build --platform linux/amd64 -f Containerfile.fast -t capsule-wolfi .
|
||||||
|
|
||||||
|
echo "Running Capsule Node in Fast-Track container..."
|
||||||
|
mkdir -p /tmp/libertaria-container-data
|
||||||
|
podman run -d --rm --network host --name capsule-wolfi \
|
||||||
|
-v "/tmp/libertaria-container-data:/app/data" \
|
||||||
|
-v "$(pwd)/capsule-core/config.json:/app/config.json" \
|
||||||
|
capsule-wolfi \
|
||||||
|
capsule start --port 9001 --data-dir /app/data
|
||||||
Loading…
Reference in New Issue