Compare commits
9 Commits
88d1f1401d
...
3483b42b74
| Author | SHA1 | Date |
|---|---|---|
|
|
3483b42b74 | |
|
|
d26bbd939c | |
|
|
11cef88386 | |
|
|
17e552c7d1 | |
|
|
ac3a2f37f0 | |
|
|
da6aa7f50a | |
|
|
b4b3f3b1ce | |
|
|
9695382eaf | |
|
|
1e44dcfaf0 |
|
|
@ -0,0 +1,55 @@
|
|||
# NIP Package Manager CI
|
||||
name: NIP CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [unstable, main, stable, testing]
|
||||
pull_request:
|
||||
branches: [unstable, main]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Verify toolchain
|
||||
run: nim --version | head -1
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
nimble refresh 2>/dev/null || true
|
||||
nimble install -y xxhash 2>/dev/null || echo "WARN: xxhash install failed"
|
||||
|
||||
- name: Build (release)
|
||||
run: nim c -d:release --opt:speed --hints:off -o:nip nip.nim
|
||||
|
||||
- name: Verify binary
|
||||
run: |
|
||||
ls -lh nip
|
||||
file nip
|
||||
|
||||
security-scan:
|
||||
name: Security Scan
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check for sensitive content
|
||||
run: |
|
||||
FAIL=0
|
||||
for dir in .agent .vscode .kiro competitors; do
|
||||
if [ -d "$dir" ]; then
|
||||
echo "FAIL: Sensitive directory '$dir' found"
|
||||
FAIL=1
|
||||
fi
|
||||
done
|
||||
MATCHES=$(git grep -l '/home/markus' -- ':!.forgejo/' 2>/dev/null || true)
|
||||
if [ -n "$MATCHES" ]; then
|
||||
echo "FAIL: Internal paths found in:"
|
||||
echo "$MATCHES"
|
||||
FAIL=1
|
||||
fi
|
||||
if [ $FAIL -eq 1 ]; then exit 1; fi
|
||||
echo "Security scan PASSED"
|
||||
|
|
@ -40,8 +40,6 @@ coverage/
|
|||
# ========================================================
|
||||
.vscode/
|
||||
.idea/
|
||||
.kiro/
|
||||
.gemini/
|
||||
|
||||
# ========================================================
|
||||
# Environments
|
||||
|
|
|
|||
|
|
@ -0,0 +1,77 @@
|
|||
#!/bin/bash
|
||||
# Voxis Diagnostic Build Protocol (ARM64 + LibreSSL)
|
||||
set -e # Exit immediately if any command fails
|
||||
|
||||
# --- 1. PATH RECONNAISSANCE ---
|
||||
# Resolve absolute paths to stop relative path madness
|
||||
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$BASE_DIR" # Ensure we are in core/nip/
|
||||
|
||||
VENDOR="$(realpath ../../core/nexus/vendor)"
|
||||
ZSTD_PATH="$VENDOR/zstd-1.5.5/lib"
|
||||
LIBRE_PATH="$VENDOR/libressl-3.8.2"
|
||||
|
||||
LIBRE_SSL_LIB="$LIBRE_PATH/ssl/.libs"
|
||||
LIBRE_CRYPTO_LIB="$LIBRE_PATH/crypto/.libs"
|
||||
LIBRE_TLS_LIB="$LIBRE_PATH/tls/.libs"
|
||||
|
||||
OUTPUT_DIR="$BASE_DIR/build/arm64"
|
||||
TARGET_BIN="$OUTPUT_DIR/nip"
|
||||
|
||||
echo "🔎 [DIAGNOSTIC] Path Verification:"
|
||||
echo " Base: $BASE_DIR"
|
||||
echo " Vendor: $VENDOR"
|
||||
echo " Output: $OUTPUT_DIR"
|
||||
|
||||
# Check Critical Assets
|
||||
for lib in "$ZSTD_PATH/libzstd.a" "$LIBRE_SSL_LIB/libssl.a" "$LIBRE_CRYPTO_LIB/libcrypto.a"; do
|
||||
if [ ! -f "$lib" ]; then
|
||||
echo "❌ CRITICAL FAILURE: Missing Asset -> $lib"
|
||||
echo " Did you run 'make' inside the library directories?"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "✅ All Static Libraries Found."
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# --- 2. THE COMPILATION (FORCE MODE) ---
|
||||
echo "🔨 [FORGE] Starting Compilation..."
|
||||
|
||||
# Put wrapper in PATH to filter x86 flags
|
||||
export PATH="/tmp/gcc-wrapper-bin:$PATH"
|
||||
|
||||
# -f : Force rebuild (ignore cache)
|
||||
# --listCmd : SHOW ME THE LINKER COMMAND
|
||||
|
||||
nim c -f --listCmd \
|
||||
--skipProjCfg \
|
||||
--nimcache:/tmp/nip-arm64-cache \
|
||||
-d:release -d:ssl -d:openssl \
|
||||
-d:nimcrypto_disable_neon \
|
||||
-d:nimcrypto_no_asm \
|
||||
--cpu:arm64 --os:linux \
|
||||
--cc:gcc \
|
||||
--gcc.exe:aarch64-linux-gnu-gcc \
|
||||
--gcc.linkerexe:aarch64-linux-gnu-gcc \
|
||||
--dynlibOverride:ssl --dynlibOverride:crypto \
|
||||
--passC:"-I$ZSTD_PATH -I$LIBRE_PATH/include" \
|
||||
--passL:"-L$ZSTD_PATH -L$LIBRE_SSL_LIB -L$LIBRE_CRYPTO_LIB -L$LIBRE_TLS_LIB" \
|
||||
--passL:"-static -lssl -lcrypto -ltls -lzstd -lpthread -ldl -lm -lresolv" \
|
||||
--opt:size \
|
||||
--mm:orc \
|
||||
--threads:on \
|
||||
-o:"$TARGET_BIN" \
|
||||
src/nip.nim
|
||||
|
||||
# --- 3. POST-MORTEM ---
|
||||
echo "---------------------------------------------------"
|
||||
if [ -f "$TARGET_BIN" ]; then
|
||||
echo "✅ SUCCESS: Binary located at:"
|
||||
ls -l "$TARGET_BIN"
|
||||
file "$TARGET_BIN"
|
||||
else
|
||||
echo "❌ FAILURE: Output file missing at $TARGET_BIN"
|
||||
echo "🔎 Searching for 'nip' binaries in the tree..."
|
||||
find . -type f -name nip -exec ls -l {} +
|
||||
fi
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
#!/bin/bash
|
||||
# Voxis Static Build Protocol (GCC Edition)
|
||||
# Cross-compile nip for ARM64 using GNU toolchain
|
||||
|
||||
set -e
|
||||
|
||||
echo "🛡️ [VOXIS] ARM64 Static Build (GCC Cross-Compile)"
|
||||
echo "=========================================================="
|
||||
echo ""
|
||||
|
||||
# 1. Define Paths
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ZSTD_LIB_PATH="$SCRIPT_DIR/../nexus/vendor/zstd-1.5.5/lib"
|
||||
ZSTD_INC_PATH="$SCRIPT_DIR/../nexus/vendor/zstd-1.5.5/lib"
|
||||
SSL_LIB_PATH="$SCRIPT_DIR/../nexus/vendor/libressl-3.8.2"
|
||||
SSL_INC_PATH="$SCRIPT_DIR/../nexus/vendor/libressl-3.8.2/include"
|
||||
OUTPUT_DIR="$SCRIPT_DIR/build/arm64"
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo "📦 Zstd Library: $ZSTD_LIB_PATH/libzstd.a"
|
||||
echo "📦 LibreSSL Libraries: $SSL_LIB_PATH/{crypto,ssl,tls}/.libs/*.a"
|
||||
echo "📂 Output: $OUTPUT_DIR/nip"
|
||||
echo ""
|
||||
|
||||
# 2. Verify libzstd.a exists and is ARM64
|
||||
if [ ! -f "$ZSTD_LIB_PATH/libzstd.a" ]; then
|
||||
echo "❌ Error: libzstd.a not found at $ZSTD_LIB_PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$SSL_LIB_PATH/crypto/.libs/libcrypto.a" ]; then
|
||||
echo "❌ Error: libcrypto.a not found at $SSL_LIB_PATH/crypto/.libs/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Static libraries verified"
|
||||
echo ""
|
||||
|
||||
# 3. Clean previous build
|
||||
rm -f "$OUTPUT_DIR/nip"
|
||||
rm -rf ~/.cache/nim/nip_*
|
||||
echo "🧹 Cleaned previous builds"
|
||||
echo ""
|
||||
|
||||
# 4. Compile with GCC cross-compiler
|
||||
echo "🔨 Compiling nip for ARM64..."
|
||||
echo " This may take a few minutes..."
|
||||
echo ""
|
||||
|
||||
# Put wrapper in PATH
|
||||
export PATH="/tmp/gcc-wrapper-bin:$PATH"
|
||||
|
||||
nim c \
|
||||
--skipProjCfg \
|
||||
--nimcache:/tmp/nip-arm64-cache \
|
||||
-d:release \
|
||||
-d:danger \
|
||||
-d:ssl \
|
||||
-d:nimcrypto_disable_neon \
|
||||
-d:nimcrypto_no_asm \
|
||||
--dynlibOverride:ssl \
|
||||
--dynlibOverride:crypto \
|
||||
--cpu:arm64 \
|
||||
--os:linux \
|
||||
--cc:gcc \
|
||||
--gcc.exe:aarch64-linux-gnu-gcc \
|
||||
--gcc.linkerexe:aarch64-linux-gnu-gcc \
|
||||
--passC:"-I$ZSTD_INC_PATH -I$SSL_INC_PATH" \
|
||||
--passL:"-L$ZSTD_LIB_PATH -L$SSL_LIB_PATH/ssl/.libs -L$SSL_LIB_PATH/crypto/.libs -L$SSL_LIB_PATH/tls/.libs" \
|
||||
--passL:"-static -lssl -lcrypto -ltls -lzstd -lpthread -lm -lresolv" \
|
||||
--opt:size \
|
||||
--mm:orc \
|
||||
--threads:on \
|
||||
--out:"$OUTPUT_DIR/nip" \
|
||||
src/nip.nim
|
||||
|
||||
# 5. Verify output
|
||||
if [ ! -f "$OUTPUT_DIR/nip" ]; then
|
||||
echo ""
|
||||
echo "❌ Build failed: binary not produced"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Build successful!"
|
||||
echo ""
|
||||
echo "📊 Binary info:"
|
||||
ls -lh "$OUTPUT_DIR/nip"
|
||||
file "$OUTPUT_DIR/nip"
|
||||
echo ""
|
||||
|
||||
# Check if it's actually ARM64 and static
|
||||
if file "$OUTPUT_DIR/nip" | grep -q "ARM aarch64"; then
|
||||
echo "✅ Architecture: ARM64 (aarch64)"
|
||||
else
|
||||
echo "⚠️ Warning: Binary may not be ARM64"
|
||||
fi
|
||||
|
||||
if file "$OUTPUT_DIR/nip" | grep -q "statically linked"; then
|
||||
echo "✅ Linking: Static"
|
||||
else
|
||||
echo "⚠️ Warning: Binary may not be statically linked"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎯 Output: $OUTPUT_DIR/nip"
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
#!/bin/bash
|
||||
# Voxis Static Build Protocol (GCC + Zstd + LibreSSL Edition)
|
||||
|
||||
set -e
|
||||
|
||||
echo "🛡️ [VOXIS] Linking Sovereign Artifact (ARM64 + LibreSSL)..."
|
||||
echo ""
|
||||
|
||||
# --- 1. CONFIGURATION ---
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
VENDOR="$SCRIPT_DIR/../nexus/vendor"
|
||||
ZSTD_PATH="$VENDOR/zstd-1.5.5/lib"
|
||||
LIBRE_PATH="$VENDOR/libressl-3.8.2"
|
||||
|
||||
# LibreSSL hides static libs in subdirectories
|
||||
LIBRE_SSL_LIB="$LIBRE_PATH/ssl/.libs"
|
||||
LIBRE_CRYPTO_LIB="$LIBRE_PATH/crypto/.libs"
|
||||
LIBRE_TLS_LIB="$LIBRE_PATH/tls/.libs"
|
||||
|
||||
OUTPUT_DIR="$SCRIPT_DIR/build/arm64"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
# Verify libraries exist
|
||||
if [ ! -f "$LIBRE_CRYPTO_LIB/libcrypto.a" ]; then
|
||||
echo "❌ Error: libcrypto.a not found at $LIBRE_CRYPTO_LIB"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$ZSTD_PATH/libzstd.a" ]; then
|
||||
echo "❌ Error: libzstd.a not found at $ZSTD_PATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Static libraries verified"
|
||||
echo " 📦 Zstd: $ZSTD_PATH/libzstd.a"
|
||||
echo " 📦 LibreSSL crypto: $LIBRE_CRYPTO_LIB/libcrypto.a"
|
||||
echo " 📦 LibreSSL ssl: $LIBRE_SSL_LIB/libssl.a"
|
||||
echo " 📦 LibreSSL tls: $LIBRE_TLS_LIB/libtls.a"
|
||||
echo ""
|
||||
|
||||
# Put wrapper in PATH to filter x86 flags
|
||||
export PATH="/tmp/gcc-wrapper-bin:$PATH"
|
||||
|
||||
# --- 2. THE COMPILATION ---
|
||||
# -d:ssl : Enable Nim SSL support
|
||||
# -d:openssl : Use OpenSSL-compatible API
|
||||
# --dynlibOverride : VITAL. Stops Nim from trying to load .so files at runtime.
|
||||
# --passC : Include headers (Zstd + LibreSSL)
|
||||
# --passL : Link static libs (Note the multiple -L paths)
|
||||
|
||||
echo "🔨 Compiling nip for ARM64..."
|
||||
echo ""
|
||||
|
||||
nim c \
|
||||
--skipProjCfg \
|
||||
--nimcache:/tmp/nip-arm64-cache \
|
||||
-d:release \
|
||||
-d:ssl \
|
||||
-d:openssl \
|
||||
-d:nimcrypto_disable_neon \
|
||||
-d:nimcrypto_no_asm \
|
||||
--cpu:arm64 \
|
||||
--os:linux \
|
||||
--cc:gcc \
|
||||
--gcc.exe:aarch64-linux-gnu-gcc \
|
||||
--gcc.linkerexe:aarch64-linux-gnu-gcc \
|
||||
--dynlibOverride:ssl \
|
||||
--dynlibOverride:crypto \
|
||||
--passC:"-I$ZSTD_PATH -I$LIBRE_PATH/include" \
|
||||
--passL:"-L$ZSTD_PATH -L$LIBRE_SSL_LIB -L$LIBRE_CRYPTO_LIB -L$LIBRE_TLS_LIB" \
|
||||
--passL:"-static -lssl -lcrypto -ltls -lzstd -lpthread -ldl -lm -lresolv" \
|
||||
--opt:size \
|
||||
--mm:orc \
|
||||
--threads:on \
|
||||
-o:"$OUTPUT_DIR/nip" \
|
||||
src/nip.nim
|
||||
|
||||
# --- 3. VERIFICATION ---
|
||||
if [ $? -eq 0 ] && [ -f "$OUTPUT_DIR/nip" ]; then
|
||||
echo ""
|
||||
echo "✅ Build Successful!"
|
||||
echo ""
|
||||
echo "📊 Binary info:"
|
||||
ls -lh "$OUTPUT_DIR/nip"
|
||||
file "$OUTPUT_DIR/nip"
|
||||
echo ""
|
||||
|
||||
# Check if truly static
|
||||
if file "$OUTPUT_DIR/nip" | grep -q "statically linked"; then
|
||||
echo "✅ Linking: Static"
|
||||
else
|
||||
echo "⚠️ Warning: Binary may not be fully static"
|
||||
fi
|
||||
|
||||
# Check for crypto strings (should NOT be present as dlopen targets)
|
||||
if strings "$OUTPUT_DIR/nip" | grep -q "libcrypto.so"; then
|
||||
echo "⚠️ Warning: Binary still contains libcrypto.so references"
|
||||
else
|
||||
echo "✅ No dynamic crypto references found"
|
||||
fi
|
||||
else
|
||||
echo ""
|
||||
echo "❌ Build Failed."
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -0,0 +1,187 @@
|
|||
#!/bin/bash
|
||||
# NIP ARM64 Static Build Script using Zig
|
||||
# Builds a fully static ARM64 binary using Zig as C compiler with musl
|
||||
set -e
|
||||
|
||||
echo "🚀 Building NIP for ARM64 (aarch64-linux-musl) using Zig"
|
||||
echo "========================================================="
|
||||
echo ""
|
||||
|
||||
# Check dependencies
|
||||
if ! command -v nim &> /dev/null; then
|
||||
echo "❌ Error: Nim compiler not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v zig &> /dev/null; then
|
||||
echo "❌ Error: Zig compiler not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "📋 Nim version: $(nim --version | head -1)"
|
||||
echo "📋 Zig version: $(zig version)"
|
||||
echo ""
|
||||
|
||||
# Create Zig wrapper that shadows aarch64-linux-gnu-gcc
|
||||
ZIG_WRAPPER_DIR="/tmp/nip-zig-wrappers-arm64"
|
||||
rm -rf "$ZIG_WRAPPER_DIR"
|
||||
mkdir -p "$ZIG_WRAPPER_DIR"
|
||||
|
||||
# Create a wrapper named exactly "aarch64-linux-gnu-gcc" that calls zig cc
|
||||
# This shadows the system's aarch64-linux-gnu-gcc when prepended to PATH
|
||||
# Filters out x86-specific compile flags AND problematic linker flags
|
||||
cat > "$ZIG_WRAPPER_DIR/aarch64-linux-gnu-gcc" << 'WRAPPER'
|
||||
#!/bin/bash
|
||||
# Zig CC wrapper for ARM64 cross-compilation
|
||||
# Shadows system's aarch64-linux-gnu-gcc and filters incompatible flags
|
||||
|
||||
FILTERED_ARGS=()
|
||||
echo "Wrapper called with:" >> /tmp/wrapper.log
|
||||
printf "'%s' " "$@" >> /tmp/wrapper.log
|
||||
echo "" >> /tmp/wrapper.log
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
# Skip x86-specific compile flags
|
||||
-mpclmul|-maes|-msse*|-mavx*|-mno-80387|-fcf-protection|-fstack-clash-protection)
|
||||
;;
|
||||
-march=x86*|-march=native)
|
||||
;;
|
||||
-mtune=haswell|-mtune=skylake|-mtune=generic)
|
||||
;;
|
||||
-Wp,-D_FORTIFY_SOURCE=*)
|
||||
;;
|
||||
-flto)
|
||||
# LTO can cause issues with zig cross-compile
|
||||
;;
|
||||
# Skip dynamic library flags that don't work with musl static
|
||||
-ldl)
|
||||
# musl's libc.a includes dl* functions, no separate libdl needed
|
||||
;;
|
||||
# Filter all march/mtune flags to avoid zig cc conflicts
|
||||
-m64|-m32|-march=*|-mtune=*|-mcpu=*|-Xclang*|-target-feature*)
|
||||
# skip host-specific flags
|
||||
;;
|
||||
*)
|
||||
FILTERED_ARGS+=("$arg")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
exec zig cc -target aarch64-linux-musl "${FILTERED_ARGS[@]}"
|
||||
WRAPPER
|
||||
chmod +x "$ZIG_WRAPPER_DIR/aarch64-linux-gnu-gcc"
|
||||
|
||||
echo "✅ Created Zig wrapper at $ZIG_WRAPPER_DIR/aarch64-linux-gnu-gcc"
|
||||
echo ""
|
||||
|
||||
# Clean previous builds and cache
|
||||
echo "🧹 Cleaning previous ARM64 builds and Nim cache..."
|
||||
rm -f nip-arm64 nip_arm64 nip-arm64-musl
|
||||
rm -rf ~/.cache/nim/nip_*
|
||||
rm -rf /tmp/nip-arm64-cache
|
||||
echo ""
|
||||
|
||||
# Prepend our wrapper to PATH
|
||||
export PATH="$ZIG_WRAPPER_DIR:$PATH"
|
||||
|
||||
# Verify our wrapper is first in PATH
|
||||
FOUND_GCC=$(which aarch64-linux-gnu-gcc)
|
||||
echo "🔍 Using gcc wrapper: $FOUND_GCC"
|
||||
echo ""
|
||||
|
||||
# Compile statically
|
||||
echo "🔨 Building optimized ARM64 static binary..."
|
||||
echo " Target: aarch64-linux-musl (static via Zig)"
|
||||
echo " This may take a few minutes..."
|
||||
|
||||
nim c \
|
||||
--cpu:arm64 \
|
||||
--os:linux \
|
||||
--cc:gcc \
|
||||
--gcc.exe:"$ZIG_WRAPPER_DIR/aarch64-linux-gnu-gcc" \
|
||||
--gcc.linkerexe:"$ZIG_WRAPPER_DIR/aarch64-linux-gnu-gcc" \
|
||||
--passC:"-O2" \
|
||||
--passC:"-w" \
|
||||
--passL:-static \
|
||||
--passL:-s \
|
||||
-d:release \
|
||||
-d:danger \
|
||||
-d:nimcrypto_disable_neon \
|
||||
-d:nimcrypto_no_asm \
|
||||
-d:nimcrypto_sysrand \
|
||||
--opt:size \
|
||||
--mm:orc \
|
||||
--threads:on \
|
||||
--nimcache:/tmp/nip-arm64-cache \
|
||||
--skipProjCfg \
|
||||
--out:nip-arm64 \
|
||||
src/nip.nim 2>&1 | tee /tmp/nip-arm64-build.log
|
||||
|
||||
if [ ! -f "nip-arm64" ]; then
|
||||
echo ""
|
||||
echo "❌ Build failed! Check /tmp/nip-arm64-build.log for details"
|
||||
echo "Last 20 lines of error:"
|
||||
tail -20 /tmp/nip-arm64-build.log
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Build successful!"
|
||||
echo ""
|
||||
|
||||
# Show binary info
|
||||
echo "📊 Binary Information:"
|
||||
ls -lh nip-arm64
|
||||
echo ""
|
||||
|
||||
echo "🔍 File details:"
|
||||
file nip-arm64
|
||||
echo ""
|
||||
|
||||
# Verify it's ARM64
|
||||
if file nip-arm64 | grep -q "ARM aarch64"; then
|
||||
echo "✅ Verified: Binary is ARM64 aarch64"
|
||||
else
|
||||
echo "⚠️ Binary may not be ARM64 - check file output above"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Verify static linking with readelf
|
||||
echo "🔍 Verifying static linking..."
|
||||
if readelf -d nip-arm64 2>/dev/null | grep -q "NEEDED"; then
|
||||
echo "⚠️ Binary has dynamic dependencies:"
|
||||
readelf -d nip-arm64 2>/dev/null | grep NEEDED
|
||||
else
|
||||
echo "✅ No dynamic dependencies found (fully static)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Test with QEMU if available
|
||||
echo "🧪 Testing binary with QEMU user-mode emulation..."
|
||||
if command -v qemu-aarch64 &> /dev/null; then
|
||||
if timeout 10 qemu-aarch64 ./nip-arm64 --version 2>&1; then
|
||||
echo "✅ Binary works under QEMU aarch64 emulation"
|
||||
else
|
||||
echo "⚠️ Binary may need additional setup"
|
||||
fi
|
||||
else
|
||||
echo "ℹ️ QEMU aarch64 user-mode not available"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Create output directory
|
||||
OUTPUT_DIR="build/arm64"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
cp nip-arm64 "$OUTPUT_DIR/nip"
|
||||
chmod +x "$OUTPUT_DIR/nip"
|
||||
|
||||
echo "🎉 ARM64 build complete!"
|
||||
echo ""
|
||||
echo "📋 Build Summary:"
|
||||
echo " Binary: nip-arm64"
|
||||
echo " Target: aarch64-linux-musl (static)"
|
||||
echo " Size: $(ls -lh nip-arm64 | awk '{print $5}')"
|
||||
echo " Output: $OUTPUT_DIR/nip"
|
||||
echo ""
|
||||
echo "📦 Ready for NexBox integration!"
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
#!/bin/bash
|
||||
# Voxis "Iron Hand" Protocol - Manual Linker Override
|
||||
set -e
|
||||
|
||||
# --- 1. TARGET ACQUISITION ---
|
||||
BASE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$BASE_DIR"
|
||||
|
||||
CACHE_DIR="/tmp/nip-arm64-cache"
|
||||
OUTPUT_DIR="build/arm64"
|
||||
TARGET="$OUTPUT_DIR/nip"
|
||||
|
||||
VENDOR="$(realpath ../../core/nexus/vendor)"
|
||||
ZSTD_PATH="$VENDOR/zstd-1.5.5/lib"
|
||||
LIBRE_PATH="$VENDOR/libressl-3.8.2"
|
||||
|
||||
LIBRE_SSL_LIB="$LIBRE_PATH/ssl/.libs"
|
||||
LIBRE_CRYPTO_LIB="$LIBRE_PATH/crypto/.libs"
|
||||
LIBRE_TLS_LIB="$LIBRE_PATH/tls/.libs"
|
||||
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
|
||||
echo "🔨 [IRON HAND] Locating debris..."
|
||||
|
||||
# Gather all object files from the cache
|
||||
# We filter out any potential garbage, ensuring only .o files
|
||||
OBJECTS=$(find "$CACHE_DIR" -name "*.o" 2>/dev/null | tr '\n' ' ')
|
||||
|
||||
if [ -z "$OBJECTS" ]; then
|
||||
echo "❌ ERROR: No object files found in $CACHE_DIR. Did you run the compile step?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OBJ_COUNT=$(echo "$OBJECTS" | wc -w)
|
||||
echo " Found $OBJ_COUNT object files"
|
||||
|
||||
echo "🔗 [IRON HAND] Linking Sovereign Artifact (with Shim)..."
|
||||
|
||||
# 2.1: Validate Shim exists
|
||||
SHIM_OBJ="$BASE_DIR/src/openssl_shim.o"
|
||||
if [ ! -f "$SHIM_OBJ" ]; then
|
||||
echo "❌ Missing Shim: $SHIM_OBJ"
|
||||
echo " Run: cd src && aarch64-linux-gnu-gcc -c openssl_shim.c -o openssl_shim.o -I../../nexus/vendor/libressl-3.8.2/include -O2"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# --- 2. THE WELD ---
|
||||
# We invoke the cross-compiler directly as the linker.
|
||||
# We feed it every single object file Nim created + our shim.
|
||||
|
||||
aarch64-linux-gnu-gcc \
|
||||
$OBJECTS \
|
||||
"$SHIM_OBJ" \
|
||||
-o "$TARGET" \
|
||||
-L"$ZSTD_PATH" \
|
||||
-L"$LIBRE_SSL_LIB" \
|
||||
-L"$LIBRE_CRYPTO_LIB" \
|
||||
-L"$LIBRE_TLS_LIB" \
|
||||
-static \
|
||||
-lpthread \
|
||||
-lssl -lcrypto -ltls \
|
||||
-lzstd \
|
||||
-ldl -lm -lrt -lresolv \
|
||||
-Wl,-z,muldefs \
|
||||
-Wl,-O1 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now
|
||||
|
||||
# --- 3. VERIFICATION ---
|
||||
echo ""
|
||||
if [ -f "$TARGET" ]; then
|
||||
echo "✅ [SUCCESS] Binary forged at: $TARGET"
|
||||
echo ""
|
||||
ls -lh "$TARGET"
|
||||
file "$TARGET"
|
||||
echo ""
|
||||
|
||||
echo "🔎 Checking linkage type..."
|
||||
# If static, 'ldd' should say "not a dynamic executable"
|
||||
if ldd "$TARGET" 2>&1 | grep -q "not a dynamic executable"; then
|
||||
echo " ✅ Structure: STATIC"
|
||||
else
|
||||
echo " ⚠️ Structure: DYNAMIC"
|
||||
ldd "$TARGET" | head -n 5
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🔎 Checking for libcrypto.so references..."
|
||||
if strings "$TARGET" | grep -q "libcrypto.so"; then
|
||||
echo " ⚠️ Found dlopen references (may still work if --dynlibOverride worked)"
|
||||
else
|
||||
echo " ✅ No libcrypto.so dlopen references"
|
||||
fi
|
||||
else
|
||||
echo "❌ [FAILURE] Linker command finished but no binary produced."
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
# ARM64 Cross-Compile Configuration
|
||||
# Override all system flags
|
||||
|
||||
# Clear all default flags
|
||||
@if arm64:
|
||||
passC = ""
|
||||
passL = ""
|
||||
@end
|
||||
|
||||
# Disable all x86 optimizations
|
||||
--passC:"-O2"
|
||||
--passC:"-w"
|
||||
13
nip.nim
13
nip.nim
|
|
@ -1,10 +1,15 @@
|
|||
#!/usr/bin/env nim
|
||||
## NIP MVP - Minimal Viable Product CLI
|
||||
## Simple, focused package grafting from Nix, PKGSRC, and Pacman
|
||||
# Copyright (c) 2026 Nexus Foundation
|
||||
# Licensed under the Libertaria Sovereign License (LSL-1.0)
|
||||
# See legal/LICENSE_SOVEREIGN.md for details.
|
||||
|
||||
# NIP MVP - Minimal Viable Product CLI
|
||||
# Simple, focused package grafting from Nix, PKGSRC, and Pacman
|
||||
|
||||
import std/[os, strutils, strformat]
|
||||
import src/nimpak/cli/graft_commands
|
||||
import src/nimpak/cli/bootstrap_commands
|
||||
import src/nimpak/cli/store_commands
|
||||
|
||||
const
|
||||
Version = "0.1.0-mvp"
|
||||
|
|
@ -30,6 +35,7 @@ COMMANDS:
|
|||
doctor Check system health
|
||||
setup Setup system integration (PATH, libraries)
|
||||
bootstrap Build tool management (nix, pkgsrc, gentoo)
|
||||
store Interact with Content-Addressable Storage (CAS)
|
||||
config [show|init] Show or initialize configuration
|
||||
logs [lines] Show recent log entries (default: 50)
|
||||
search <query> Search for packages (coming soon)
|
||||
|
|
@ -227,6 +233,9 @@ proc main() =
|
|||
bootstrapHelpCommand()
|
||||
exitCode = 1
|
||||
|
||||
of "store":
|
||||
exitCode = dispatchStoreCommand(commandArgs, verbose)
|
||||
|
||||
else:
|
||||
echo fmt"Error: Unknown command '{command}'"
|
||||
echo "Run 'nip --help' for usage information"
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
import std/[strutils, json, os, times, osproc, tables, strformat, httpclient]
|
||||
import ../grafting
|
||||
from ../cas import Result, ok, err, isErr, get
|
||||
import ../types
|
||||
|
||||
type
|
||||
AURAdapter* = ref object of PackageAdapter
|
||||
|
|
@ -240,10 +240,10 @@ proc downloadPKGBUILD(adapter: AURAdapter, packageName: string): Result[string,
|
|||
|
||||
writeFile(pkgbuildPath, content)
|
||||
|
||||
return Result[string, string](isOk: true, value: pkgbuildPath)
|
||||
return Result[string, string](isOk: true, okValue: pkgbuildPath)
|
||||
|
||||
except Exception as e:
|
||||
return Result[string, string](isOk: false, error: fmt"Failed to download PKGBUILD: {e.msg}")
|
||||
return Result[string, string](isOk: false, errValue: fmt"Failed to download PKGBUILD: {e.msg}")
|
||||
|
||||
proc showPKGBUILDReview(pkgbuildPath: string): bool =
|
||||
## Show PKGBUILD for user review
|
||||
|
|
@ -316,26 +316,26 @@ proc calculateAURHash(pkgbuildPath: string): string =
|
|||
|
||||
"aur-hash-error"
|
||||
|
||||
method validatePackage*(adapter: AURAdapter, packageName: string): Result[bool, string] {.base.} =
|
||||
method validatePackage*(adapter: AURAdapter, packageName: string): Result[bool, string] =
|
||||
## Validate that a package exists in AUR
|
||||
try:
|
||||
let info = searchAURPackage(adapter, packageName)
|
||||
|
||||
if info.name == "":
|
||||
return Result[bool, string](isOk: false, error: fmt"Package '{packageName}' not found in AUR")
|
||||
return Result[bool, string](isOk: false, errValue: fmt"Package '{packageName}' not found in AUR")
|
||||
|
||||
return Result[bool, string](isOk: true, value: true)
|
||||
return Result[bool, string](isOk: true, okValue: true)
|
||||
|
||||
except Exception as e:
|
||||
return Result[bool, string](isOk: false, error: fmt"Validation error: {e.msg}")
|
||||
return Result[bool, string](isOk: false, errValue: fmt"Validation error: {e.msg}")
|
||||
|
||||
method getPackageInfo*(adapter: AURAdapter, packageName: string): Result[JsonNode, string] {.base.} =
|
||||
method getPackageInfo*(adapter: AURAdapter, packageName: string): Result[JsonNode, string] =
|
||||
## Get detailed package information from AUR
|
||||
try:
|
||||
let info = searchAURPackage(adapter, packageName)
|
||||
|
||||
if info.name == "":
|
||||
return Result[JsonNode, string](isOk: false, error: fmt"Package '{packageName}' not found in AUR")
|
||||
return Result[JsonNode, string](isOk: false, errValue: fmt"Package '{packageName}' not found in AUR")
|
||||
|
||||
let jsonResult = %*{
|
||||
"name": info.name,
|
||||
|
|
@ -354,7 +354,7 @@ method getPackageInfo*(adapter: AURAdapter, packageName: string): Result[JsonNod
|
|||
"build_method": "nippel"
|
||||
}
|
||||
|
||||
return Result[JsonNode, string](isOk: true, value: jsonResult)
|
||||
return Result[JsonNode, string](isOk: true, okValue: jsonResult)
|
||||
|
||||
except Exception as e:
|
||||
return Result[JsonNode, string](isOk: false, error: fmt"Error getting package info: {e.msg}")
|
||||
return Result[JsonNode, string](isOk: false, errValue: fmt"Error getting package info: {e.msg}")
|
||||
|
|
|
|||
|
|
@ -1,17 +1,17 @@
|
|||
## Git Source Adapter for NexusForge
|
||||
## Implements "Obtainium-style" Git-based package resolution
|
||||
##
|
||||
## Features:
|
||||
## - Parse git+https:// URLs with optional tag/branch specifiers
|
||||
## - Poll GitHub/GitLab APIs for tags and releases
|
||||
## - Semver matching and wildcard support
|
||||
## - Shallow clone for efficient fetching
|
||||
# Git Source Adapter for NexusForge
|
||||
# Implements "Obtainium-style" Git-based package resolution
|
||||
#
|
||||
# Features:
|
||||
# - Parse git+https:// URLs with optional tag/branch specifiers
|
||||
# - Poll GitHub/GitLab APIs for tags and releases
|
||||
# - Semver matching and wildcard support
|
||||
# - Shallow clone for efficient fetching
|
||||
|
||||
import std/[strutils, options, json, httpclient, os, osproc, uri, times,
|
||||
sequtils, algorithm]
|
||||
import ../types/grafting_types
|
||||
import ../cas
|
||||
from ../cas import Result, VoidResult, ok, err, isErr, get
|
||||
import ../types
|
||||
|
||||
type
|
||||
GitSourceKind* = enum
|
||||
|
|
@ -468,7 +468,7 @@ proc ingestDirToCas*(cas: var CasManager, sourceDir: string,
|
|||
let storeResult = cas.storeObject(dataBytes)
|
||||
|
||||
if storeResult.isOk:
|
||||
let obj = storeResult.value
|
||||
let obj = storeResult.okValue
|
||||
allHashes.add(file & ":" & obj.hash)
|
||||
result.files.add(file)
|
||||
totalSize += obj.size
|
||||
|
|
@ -488,7 +488,7 @@ proc ingestDirToCas*(cas: var CasManager, sourceDir: string,
|
|||
|
||||
if manifestResult.isOk:
|
||||
result.success = true
|
||||
result.casHash = manifestResult.value.hash
|
||||
result.casHash = manifestResult.okValue.hash
|
||||
result.totalSize = totalSize
|
||||
|
||||
# =============================================================================
|
||||
|
|
@ -577,7 +577,7 @@ proc downloadAndIngestAsset*(cas: var CasManager, asset: GitAsset,
|
|||
# Download the asset
|
||||
let downloadResult = downloadReleaseAsset(asset, tempPath, token)
|
||||
if not downloadResult.isOk:
|
||||
return err[string, string](downloadResult.error)
|
||||
return err[string, string](downloadResult.errValue)
|
||||
|
||||
# Ingest into CAS
|
||||
try:
|
||||
|
|
@ -589,7 +589,7 @@ proc downloadAndIngestAsset*(cas: var CasManager, asset: GitAsset,
|
|||
removeFile(tempPath)
|
||||
|
||||
if storeResult.isOk:
|
||||
return ok[string, string](storeResult.value.hash)
|
||||
return ok[string, string](storeResult.okValue.hash)
|
||||
else:
|
||||
return err[string, string]("CAS store failed")
|
||||
except IOError as e:
|
||||
|
|
@ -628,10 +628,10 @@ proc obtainPackage*(cas: var CasManager, source: GitSource, tagPattern: string =
|
|||
# Step 1: Get available tags
|
||||
let tagsResult = fetchTags(source)
|
||||
if not tagsResult.isOk:
|
||||
result.errors.add("Failed to fetch tags: " & tagsResult.error)
|
||||
result.errors.add("Failed to fetch tags: " & tagsResult.errValue)
|
||||
return
|
||||
|
||||
let matchedTags = filterTags(tagsResult.value, tagPattern)
|
||||
let matchedTags = filterTags(tagsResult.okValue, tagPattern)
|
||||
if matchedTags.len == 0:
|
||||
result.errors.add("No tags match pattern: " & tagPattern)
|
||||
return
|
||||
|
|
@ -644,7 +644,7 @@ proc obtainPackage*(cas: var CasManager, source: GitSource, tagPattern: string =
|
|||
if preferRelease and source.kind == GitHub:
|
||||
let releasesResult = fetchGitHubReleases(source)
|
||||
if releasesResult.isOk:
|
||||
for release in releasesResult.value:
|
||||
for release in releasesResult.okValue:
|
||||
if release.tag == bestTag.name:
|
||||
let asset = findAssetByPattern(release, assetPattern)
|
||||
if asset.isSome:
|
||||
|
|
@ -652,7 +652,7 @@ proc obtainPackage*(cas: var CasManager, source: GitSource, tagPattern: string =
|
|||
actualCacheDir, source.token)
|
||||
if ingestResult.isOk:
|
||||
result.success = true
|
||||
result.casHash = ingestResult.value
|
||||
result.casHash = ingestResult.okValue
|
||||
result.fetchMethod = "release"
|
||||
result.files = @[asset.get().name]
|
||||
return
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import std/[strutils, json, os, times, osproc, tables, strformat]
|
||||
import ../grafting
|
||||
from ../cas import Result, ok, err, isErr, get
|
||||
import ../types
|
||||
|
||||
type
|
||||
NixAdapter* = ref object of PackageAdapter
|
||||
|
|
@ -351,31 +351,31 @@ proc calculateNixStoreHash(storePath: string): string =
|
|||
|
||||
"nix-hash-error"
|
||||
|
||||
method validatePackage*(adapter: NixAdapter, packageName: string): Result[bool, string] {.base.} =
|
||||
method validatePackage*(adapter: NixAdapter, packageName: string): Result[bool, string] =
|
||||
## Validate that a package exists in nixpkgs
|
||||
try:
|
||||
if not isNixAvailable():
|
||||
return Result[bool, string](isOk: false, error: "Nix is not installed. Install Nix from https://nixos.org/download.html")
|
||||
return Result[bool, string](isOk: false, errValue: "Nix is not installed. Install Nix from https://nixos.org/download.html")
|
||||
|
||||
let info = getNixPackageInfo(adapter, packageName)
|
||||
|
||||
if info.name == "":
|
||||
return Result[bool, string](isOk: false, error: fmt"Package '{packageName}' not found in nixpkgs")
|
||||
return Result[bool, string](isOk: false, errValue: fmt"Package '{packageName}' not found in nixpkgs")
|
||||
|
||||
return Result[bool, string](isOk: true, value: true)
|
||||
return Result[bool, string](isOk: true, okValue: true)
|
||||
|
||||
except JsonParsingError as e:
|
||||
return Result[bool, string](isOk: false, error: fmt"Failed to parse Nix output: {e.msg}")
|
||||
return Result[bool, string](isOk: false, errValue: fmt"Failed to parse Nix output: {e.msg}")
|
||||
except Exception as e:
|
||||
return Result[bool, string](isOk: false, error: fmt"Validation error: {e.msg}")
|
||||
return Result[bool, string](isOk: false, errValue: fmt"Validation error: {e.msg}")
|
||||
|
||||
method getPackageInfo*(adapter: NixAdapter, packageName: string): Result[JsonNode, string] {.base.} =
|
||||
method getPackageInfo*(adapter: NixAdapter, packageName: string): Result[JsonNode, string] =
|
||||
## Get detailed package information from nixpkgs
|
||||
try:
|
||||
let info = getNixPackageInfo(adapter, packageName)
|
||||
|
||||
if info.name == "":
|
||||
return Result[JsonNode, string](isOk: false, error: fmt"Package '{packageName}' not found in nixpkgs")
|
||||
return Result[JsonNode, string](isOk: false, errValue: fmt"Package '{packageName}' not found in nixpkgs")
|
||||
|
||||
let jsonResult = %*{
|
||||
"name": info.name,
|
||||
|
|
@ -389,10 +389,10 @@ method getPackageInfo*(adapter: NixAdapter, packageName: string): Result[JsonNod
|
|||
"adapter": adapter.name
|
||||
}
|
||||
|
||||
return Result[JsonNode, string](isOk: true, value: jsonResult)
|
||||
return Result[JsonNode, string](isOk: true, okValue: jsonResult)
|
||||
|
||||
except Exception as e:
|
||||
return Result[JsonNode, string](isOk: false, error: fmt"Error getting package info: {e.msg}")
|
||||
return Result[JsonNode, string](isOk: false, errValue: fmt"Error getting package info: {e.msg}")
|
||||
|
||||
# Utility functions for Nix integration
|
||||
proc getNixSystemInfo*(): JsonNode =
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
## Pacman Database Adapter for NIP
|
||||
##
|
||||
## This module provides integration with the existing pacman package manager,
|
||||
## allowing NIP to read, understand, and manage pacman-installed packages.
|
||||
## This enables gradual migration from pacman to NIP on Arch Linux systems.
|
||||
# Pacman Database Adapter for NIP
|
||||
#
|
||||
# This module provides integration with the existing pacman package manager,
|
||||
# allowing NIP to read, understand, and manage pacman-installed packages.
|
||||
# This enables gradual migration from pacman to NIP on Arch Linux systems.
|
||||
|
||||
import std/[os, strutils, tables, times, sequtils, options, strformat, hashes, osproc]
|
||||
from ../cas import VoidResult, Result, ok, get, err
|
||||
import ../types
|
||||
import ../grafting
|
||||
|
||||
type
|
||||
|
|
@ -319,10 +319,10 @@ proc syncWithNip*(adapter: var PacmanAdapter): Result[int, string] =
|
|||
# This would integrate with the existing NIP database system
|
||||
syncedCount.inc
|
||||
|
||||
return Result[int, string](isOk: true, value: syncedCount)
|
||||
return Result[int, string](isOk: true, okValue: syncedCount)
|
||||
|
||||
except Exception as e:
|
||||
return Result[int, string](isOk: false, error: "Failed to sync with NIP: " & e.msg)
|
||||
return Result[int, string](isOk: false, errValue: "Failed to sync with NIP: " & e.msg)
|
||||
|
||||
proc getPackageInfo*(adapter: PacmanAdapter, name: string): string =
|
||||
## Get detailed package information in human-readable format
|
||||
|
|
@ -390,18 +390,18 @@ proc nipPacmanSync*(): Result[string, string] =
|
|||
|
||||
let loadResult = adapter.loadPacmanDatabase()
|
||||
if not loadResult.isOk:
|
||||
return Result[string, string](isOk: false, error: loadResult.errValue)
|
||||
return Result[string, string](isOk: false, errValue: loadResult.errValue)
|
||||
|
||||
let syncResult = adapter.syncWithNip()
|
||||
if not syncResult.isOk:
|
||||
return Result[string, string](isOk: false, error: syncResult.error)
|
||||
return Result[string, string](isOk: false, errValue: syncResult.errValue)
|
||||
|
||||
let stats = adapter.getSystemStats()
|
||||
let message = "✅ Synchronized " & $syncResult.get() & " packages\n" &
|
||||
"📊 Total: " & $stats.totalPackages & " packages, " &
|
||||
$(stats.totalSize div (1024*1024)) & " MB"
|
||||
|
||||
return Result[string, string](isOk: true, value: message)
|
||||
return Result[string, string](isOk: true, okValue: message)
|
||||
|
||||
proc nipPacmanList*(query: string = ""): Result[string, string] =
|
||||
## NIP command: nip pacman-list [query]
|
||||
|
|
@ -410,26 +410,26 @@ proc nipPacmanList*(query: string = ""): Result[string, string] =
|
|||
|
||||
let loadResult = adapter.loadPacmanDatabase()
|
||||
if not loadResult.isOk:
|
||||
return Result[string, string](isOk: false, error: loadResult.errValue)
|
||||
return Result[string, string](isOk: false, errValue: loadResult.errValue)
|
||||
|
||||
let packages = if query == "":
|
||||
adapter.listPackages()
|
||||
else:
|
||||
adapter.searchPackages(query)
|
||||
|
||||
var result = "📦 Pacman Packages"
|
||||
var listOutput = "📦 Pacman Packages"
|
||||
if query != "":
|
||||
result.add(" (matching '" & query & "')")
|
||||
result.add(":\n\n")
|
||||
listOutput.add(" (matching '" & query & "')")
|
||||
listOutput.add(":\n\n")
|
||||
|
||||
for pkg in packages:
|
||||
result.add("• " & pkg.name & " " & pkg.version)
|
||||
listOutput.add("• " & pkg.name & " " & pkg.version)
|
||||
if pkg.description != "":
|
||||
result.add(" - " & pkg.description)
|
||||
result.add("\n")
|
||||
listOutput.add(" - " & pkg.description)
|
||||
listOutput.add("\n")
|
||||
|
||||
result.add("\nTotal: " & $packages.len & " packages")
|
||||
return Result[string, string](isOk: true, value: result)
|
||||
listOutput.add("\nTotal: " & $packages.len & " packages")
|
||||
return Result[string, string](isOk: true, okValue: listOutput)
|
||||
|
||||
proc nipPacmanInfo*(packageName: string): Result[string, string] =
|
||||
## NIP command: nip pacman-info <package>
|
||||
|
|
@ -438,10 +438,10 @@ proc nipPacmanInfo*(packageName: string): Result[string, string] =
|
|||
|
||||
let loadResult = adapter.loadPacmanDatabase()
|
||||
if not loadResult.isOk:
|
||||
return Result[string, string](isOk: false, error: loadResult.errValue)
|
||||
return Result[string, string](isOk: false, errValue: loadResult.errValue)
|
||||
|
||||
let info = adapter.getPackageInfo(packageName)
|
||||
return Result[string, string](isOk: true, value: info)
|
||||
return Result[string, string](isOk: true, okValue: info)
|
||||
|
||||
proc nipPacmanDeps*(packageName: string): Result[string, string] =
|
||||
## NIP command: nip pacman-deps <package>
|
||||
|
|
@ -450,38 +450,38 @@ proc nipPacmanDeps*(packageName: string): Result[string, string] =
|
|||
|
||||
let loadResult = adapter.loadPacmanDatabase()
|
||||
if not loadResult.isOk:
|
||||
return Result[string, string](isOk: false, error: loadResult.errValue)
|
||||
return Result[string, string](isOk: false, errValue: loadResult.errValue)
|
||||
|
||||
var visited: seq[string] = @[]
|
||||
let deps = adapter.getDependencyTree(packageName, visited)
|
||||
|
||||
var result = "🌳 Dependency tree for " & packageName & ":\n\n"
|
||||
var outputStr = "🌳 Dependency tree for " & packageName & ":\n\n"
|
||||
for i, dep in deps:
|
||||
let prefix = if i == deps.len - 1: "└── " else: "├── "
|
||||
result.add(prefix & dep & "\n")
|
||||
outputStr.add(prefix & dep & "\n")
|
||||
|
||||
if deps.len == 0:
|
||||
result.add("No dependencies found.\n")
|
||||
outputStr.add("No dependencies found.\n")
|
||||
else:
|
||||
result.add("\nTotal dependencies: " & $deps.len)
|
||||
outputStr.add("\nTotal dependencies: " & $deps.len)
|
||||
|
||||
return Result[string, string](isOk: true, value: result)
|
||||
return Result[string, string](isOk: true, okValue: outputStr)
|
||||
|
||||
# Grafting adapter methods for coordinator integration
|
||||
|
||||
method validatePackage*(adapter: PacmanAdapter, packageName: string): Result[bool, string] =
|
||||
proc validatePackage*(adapter: PacmanAdapter, packageName: string): Result[bool, string] =
|
||||
## Validate if a package exists using pacman -Ss (checks repos)
|
||||
try:
|
||||
# Use pacman to search for package (checks both local and remote)
|
||||
let (output, exitCode) = execCmdEx(fmt"pacman -Ss '^{packageName}$'")
|
||||
|
||||
if exitCode == 0 and output.len > 0:
|
||||
return Result[bool, string](isOk: true, value: true)
|
||||
return Result[bool, string](isOk: true, okValue: true)
|
||||
else:
|
||||
return Result[bool, string](isOk: true, value: false)
|
||||
return Result[bool, string](isOk: true, okValue: false)
|
||||
|
||||
except Exception as e:
|
||||
return Result[bool, string](isOk: false, error: "Failed to validate package: " & e.msg)
|
||||
return Result[bool, string](isOk: false, errValue: "Failed to validate package: " & e.msg)
|
||||
|
||||
proc isPackageInstalled(adapter: PacmanAdapter, packageName: string): bool =
|
||||
## Check if package is installed locally using pacman -Q
|
||||
|
|
@ -491,7 +491,7 @@ proc isPackageInstalled(adapter: PacmanAdapter, packageName: string): bool =
|
|||
except:
|
||||
return false
|
||||
|
||||
method graftPackage*(adapter: var PacmanAdapter, packageName: string, cache: GraftingCache): GraftResult =
|
||||
proc graftPackage*(adapter: var PacmanAdapter, packageName: string, cache: GraftingCache): GraftResult =
|
||||
## Graft a package from Pacman (local or remote)
|
||||
echo fmt"🌱 Grafting package from Pacman: {packageName}"
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import std/[strutils, json, os, times, osproc, strformat]
|
||||
import ../grafting
|
||||
from ../cas import Result, ok, err, isErr, get
|
||||
import ../types
|
||||
|
||||
type
|
||||
PKGSRCAdapter* = ref object of PackageAdapter
|
||||
|
|
@ -490,9 +490,9 @@ method validatePackage*(adapter: PKGSRCAdapter, packageName: string): Result[boo
|
|||
## Validate that a package exists in PKGSRC
|
||||
try:
|
||||
let info = findPKGSRCPackage(adapter, packageName)
|
||||
return Result[bool, string](isOk: true, value: info.name != "")
|
||||
return Result[bool, string](isOk: true, okValue: info.name != "")
|
||||
except Exception as e:
|
||||
return Result[bool, string](isOk: false, error: fmt"Validation error: {e.msg}")
|
||||
return Result[bool, string](isOk: false, errValue: fmt"Validation error: {e.msg}")
|
||||
|
||||
method getPackageInfo*(adapter: PKGSRCAdapter, packageName: string): Result[JsonNode, string] =
|
||||
## Get detailed package information from PKGSRC
|
||||
|
|
@ -500,7 +500,7 @@ method getPackageInfo*(adapter: PKGSRCAdapter, packageName: string): Result[Json
|
|||
let info = findPKGSRCPackage(adapter, packageName)
|
||||
|
||||
if info.name == "":
|
||||
return Result[JsonNode, string](isOk: false, error: fmt"Package '{packageName}' not found in PKGSRC")
|
||||
return Result[JsonNode, string](isOk: false, errValue: fmt"Package '{packageName}' not found in PKGSRC")
|
||||
|
||||
let result = %*{
|
||||
"name": info.name,
|
||||
|
|
@ -517,10 +517,10 @@ method getPackageInfo*(adapter: PKGSRCAdapter, packageName: string): Result[Json
|
|||
"adapter": adapter.name
|
||||
}
|
||||
|
||||
return Result[JsonNode, string](isOk: true, value: result)
|
||||
return Result[JsonNode, string](isOk: true, okValue: result)
|
||||
|
||||
except Exception as e:
|
||||
return Result[JsonNode, string](isOk: false, error: fmt"Error getting package info: {e.msg}")
|
||||
return Result[JsonNode, string](isOk: false, errValue: fmt"Error getting package info: {e.msg}")
|
||||
|
||||
# Utility functions
|
||||
proc isPKGSRCAvailable*(adapter: PKGSRCAdapter): bool =
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NimPak Performance Benchmarking
|
||||
##
|
||||
## Comprehensive benchmarks for the NimPak package manager.
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/build_system.nim
|
||||
## Nimplate Build System Integration
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,11 +1,18 @@
|
|||
## Content-Addressable Storage (CAS) System
|
||||
##
|
||||
## This module implements the foundational content-addressable storage system
|
||||
## that provides automatic deduplication and cryptographic verification using
|
||||
## xxHash (xxh3_128) for maximum performance with BLAKE2b legacy fallback.
|
||||
##
|
||||
## Hash Algorithm: xxHash xxh3_128 (40-50 GiB/s, 128-bit collision-safe)
|
||||
## Legacy Support: BLAKE2b-512 (for backward compatibility)
|
||||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# Content-Addressable Storage (CAS) System
|
||||
#
|
||||
# This module implements the foundational content-addressable storage system
|
||||
# that provides automatic deduplication and cryptographic verification using
|
||||
# xxHash (xxh3_128) for maximum performance with BLAKE2b legacy fallback.
|
||||
#
|
||||
# Hash Algorithm: xxHash xxh3_128 (40-50 GiB/s, 128-bit collision-safe)
|
||||
# Legacy Support: BLAKE2b-512 (for backward compatibility)
|
||||
|
||||
import std/[os, tables, sets, strutils, json, sequtils, hashes, options, times, algorithm]
|
||||
{.warning[Deprecated]:off.}
|
||||
|
|
@ -13,37 +20,12 @@ import std/threadpool # For parallel operations
|
|||
{.warning[Deprecated]:on.}
|
||||
import xxhash # Modern high-performance hashing (2-3x faster than BLAKE2b)
|
||||
import nimcrypto/blake2 # Legacy fallback
|
||||
import ../nip/types
|
||||
import ./types
|
||||
import ./protection # Read-only protection manager
|
||||
|
||||
# Result type for error handling - using std/options for now
|
||||
type
|
||||
Result*[T, E] = object
|
||||
case isOk*: bool
|
||||
of true:
|
||||
value*: T
|
||||
of false:
|
||||
error*: E
|
||||
# Result types are imported from ./types
|
||||
|
||||
VoidResult*[E] = object
|
||||
case isOk*: bool
|
||||
of true:
|
||||
discard
|
||||
of false:
|
||||
errValue*: E
|
||||
|
||||
proc ok*[T, E](val: T): Result[T, E] =
|
||||
Result[T, E](isOk: true, value: val)
|
||||
|
||||
proc err*[T, E](error: E): Result[T, E] =
|
||||
Result[T, E](isOk: false, error: error)
|
||||
|
||||
proc ok*[E](dummy: typedesc[E]): VoidResult[E] =
|
||||
VoidResult[E](isOk: true)
|
||||
|
||||
proc isErr*[T, E](r: Result[T, E]): bool = not r.isOk
|
||||
proc get*[T, E](r: Result[T, E]): T = r.value
|
||||
proc getError*[T, E](r: Result[T, E]): E = r.error
|
||||
|
||||
type
|
||||
FormatType* = enum
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NCA Content-Addressable Chunks Format Handler
|
||||
##
|
||||
## This module implements the NCA (Nexus Content-Addressable) chunk format for
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import std/[os, strutils, times, json, tables, sequtils, algorithm, strformat]
|
|||
# TODO: Re-enable when nipcells module is available
|
||||
# import ../nipcells
|
||||
import ../grafting, ../database, core
|
||||
import ../build/[recipe_manager, recipe_parser]
|
||||
import audit_commands, track_commands, verify_commands
|
||||
import enhanced_search
|
||||
|
||||
|
|
@ -170,6 +171,37 @@ proc infoCommand*(packageName: string): CommandResult =
|
|||
try:
|
||||
core.showInfo(fmt"Getting information for package: {packageName}")
|
||||
|
||||
# Initialize RecipeManager to check Bazaar
|
||||
let rm = newRecipeManager()
|
||||
let recipeOpt = rm.loadRecipe(packageName)
|
||||
|
||||
if recipeOpt.isSome:
|
||||
let recipe = recipeOpt.get()
|
||||
let packageInfo = %*{
|
||||
"name": recipe.name,
|
||||
"version": recipe.version,
|
||||
"description": recipe.description,
|
||||
"homepage": recipe.metadata.homepage,
|
||||
"license": recipe.metadata.license,
|
||||
"stream": "bazaar", # It comes from the Bazaar
|
||||
"architecture": "multi", # Recipe supports multiple
|
||||
"installed": false, # We don't check installed status here yet
|
||||
"source_type": recipe.toolType
|
||||
}
|
||||
|
||||
if globalContext.options.outputFormat == OutputHuman:
|
||||
echo bold("Package Information (Bazaar): " & highlight(packageName))
|
||||
echo "=".repeat(30)
|
||||
echo "Name: " & packageInfo["name"].getStr()
|
||||
echo "Version: " & highlight(packageInfo["version"].getStr())
|
||||
echo "Description: " & packageInfo["description"].getStr()
|
||||
echo "License: " & packageInfo["license"].getStr()
|
||||
echo "Source Type: " & packageInfo["source_type"].getStr()
|
||||
return successResult("Package found in Bazaar", packageInfo)
|
||||
else:
|
||||
return successResult("Package found in Bazaar", packageInfo)
|
||||
|
||||
# Fallback to installed DB check placeholder
|
||||
# TODO: Implement actual package info retrieval
|
||||
let packageInfo = %*{
|
||||
"name": packageName,
|
||||
|
|
|
|||
|
|
@ -4,11 +4,9 @@
|
|||
## This module provides forward-compatibility hooks for Task 15.2
|
||||
## and implements immediate diagnostic capabilities
|
||||
|
||||
import std/[os, strutils, strformat, tables, sequtils, times, json, asyncdispatch]
|
||||
import std/[strutils, strformat, sequtils, times, json, asyncdispatch]
|
||||
import ../security/integrity_monitor
|
||||
import ../diagnostics/health_monitor
|
||||
import ../types_fixed
|
||||
import core
|
||||
|
||||
type
|
||||
DiagnosticSeverity* = enum
|
||||
|
|
@ -156,7 +154,7 @@ proc formatDiagnosticReport*(report: DiagnosticReport, outputFormat: string = "p
|
|||
|
||||
else: # plain format
|
||||
result = "NimPak System Diagnostics\n"
|
||||
result.add("=" * 30 & "\n\n")
|
||||
result.add(repeat("=", 30) & "\n\n")
|
||||
|
||||
# Overall status
|
||||
let statusIcon = case report.overall:
|
||||
|
|
@ -166,13 +164,17 @@ proc formatDiagnosticReport*(report: DiagnosticReport, outputFormat: string = "p
|
|||
of DiagnosticCritical: "🚨"
|
||||
|
||||
result.add(fmt"{statusIcon} Overall Status: {report.overall}\n")
|
||||
result.add(fmt"📅 Generated: {report.timestamp.format(\"yyyy-MM-dd HH:mm:ss\")}\n\n")
|
||||
let timestampStr = report.timestamp.format("yyyy-MM-dd HH:mm:ss")
|
||||
result.add(fmt"📅 Generated: {timestampStr}\n\n")
|
||||
|
||||
# System information
|
||||
result.add("System Information:\n")
|
||||
result.add(fmt" Version: {report.systemInfo[\"nimpak_version\"].getStr()}\n")
|
||||
result.add(fmt" Platform: {report.systemInfo[\"platform\"].getStr()}\n")
|
||||
result.add(fmt" Architecture: {report.systemInfo[\"architecture\"].getStr()}\n\n")
|
||||
let nimpakVersion = report.systemInfo["nimpak_version"].getStr()
|
||||
result.add(fmt" Version: {nimpakVersion}\n")
|
||||
let platform = report.systemInfo["platform"].getStr()
|
||||
result.add(fmt" Platform: {platform}\n")
|
||||
let architecture = report.systemInfo["architecture"].getStr()
|
||||
result.add(fmt" Architecture: {architecture}\n\n")
|
||||
|
||||
# Diagnostic results
|
||||
result.add("Diagnostic Results:\n")
|
||||
|
|
@ -232,7 +234,7 @@ proc nipRepoBenchmark*(outputFormat: string = "plain"): string =
|
|||
return results.pretty()
|
||||
else:
|
||||
result = "Repository Benchmark Results\n"
|
||||
result.add("=" * 35 & "\n\n")
|
||||
result.add(repeat("=", 35) & "\n\n")
|
||||
|
||||
for repo in results["repositories"]:
|
||||
let status = case repo["status"].getStr():
|
||||
|
|
@ -241,10 +243,15 @@ proc nipRepoBenchmark*(outputFormat: string = "plain"): string =
|
|||
of "error": "🔴"
|
||||
else: "⚪"
|
||||
|
||||
result.add(fmt"{status} {repo[\"name\"].getStr()}\n")
|
||||
result.add(fmt" URL: {repo[\"url\"].getStr()}\n")
|
||||
result.add(fmt" Latency: {repo[\"latency_ms\"].getFloat():.1f}ms\n")
|
||||
result.add(fmt" Throughput: {repo[\"throughput_mbps\"].getFloat():.1f} MB/s\n\n")
|
||||
let name = repo["name"].getStr()
|
||||
let url = repo["url"].getStr()
|
||||
let latency = repo["latency_ms"].getFloat()
|
||||
let throughput = repo["throughput_mbps"].getFloat()
|
||||
|
||||
result.add(fmt"{status} {name}\n")
|
||||
result.add(fmt" URL: {url}\n")
|
||||
result.add(fmt" Latency: {latency:.1f}ms\n")
|
||||
result.add(fmt" Throughput: {throughput:.1f} MB/s\n\n")
|
||||
|
||||
proc nipCacheWarm*(packageName: string): string =
|
||||
## Pre-pull binary packages into local cache for offline deployment
|
||||
|
|
@ -270,7 +277,7 @@ proc nipMirrorGraph*(outputFormat: string = "plain"): string =
|
|||
result.add("}\n")
|
||||
else:
|
||||
result = "Mirror Network Topology\n"
|
||||
result.add("=" * 25 & "\n\n")
|
||||
result.add(repeat("=", 25) & "\n\n")
|
||||
result.add("Priority Order (High → Low):\n")
|
||||
result.add(" 1. 🟢 official (100) → community\n")
|
||||
result.add(" 2. 🔵 community (75) → edge\n")
|
||||
|
|
@ -281,7 +288,7 @@ proc nipMirrorGraph*(outputFormat: string = "plain"): string =
|
|||
# Forward-Compatibility Hooks for Task 15.2
|
||||
# =============================================================================
|
||||
|
||||
proc nipDoctor*(outputFormat: string = "plain", autoRepair: bool = false): string {.async.} =
|
||||
proc nipDoctor*(outputFormat: string = "plain", autoRepair: bool = false): Future[string] {.async.} =
|
||||
## Comprehensive system health check with repair suggestions
|
||||
try:
|
||||
# Initialize health monitor
|
||||
|
|
@ -309,7 +316,7 @@ proc nipDoctor*(outputFormat: string = "plain", autoRepair: bool = false): strin
|
|||
result = fmt"❌ Health check failed: {e.msg}\n"
|
||||
result.add("💡 Try: nip doctor --force\n")
|
||||
|
||||
proc nipRepair*(category: string = "all", dryRun: bool = false): string {.async.} =
|
||||
proc nipRepair*(category: string = "all", dryRun: bool = false): Future[string] {.async.} =
|
||||
## System repair command with comprehensive health monitoring integration
|
||||
result = fmt"🔧 Repair mode: {category}\n"
|
||||
|
||||
|
|
@ -404,7 +411,7 @@ proc nipInstallWithStream*(packageName: string, repo: string = "",
|
|||
proc nipTrustExplain*(target: string): string =
|
||||
## Explain trust policy decisions for repositories or packages
|
||||
result = fmt"🔍 Trust Analysis: {target}\n"
|
||||
result.add("=" * (20 + target.len) & "\n\n")
|
||||
result.add(repeat("=", 20 + target.len) & "\n\n")
|
||||
|
||||
# Mock trust analysis
|
||||
result.add("Trust Score: 0.72 🟡\n\n")
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ VERIFICATION COMMANDS:
|
|||
CONFIGURATION COMMANDS:
|
||||
config show Show current configuration
|
||||
config validate Validate configuration files
|
||||
setup <user|system> Setup NIP environment
|
||||
|
||||
GLOBAL OPTIONS:
|
||||
--output <format> Output format: human, json, yaml, kdl
|
||||
|
|
@ -253,6 +254,18 @@ proc showCommandHelp*(command: string) =
|
|||
echo "nip lock [options] - Generate lockfile for reproducibility"
|
||||
of "restore":
|
||||
echo "nip restore <lockfile> [options] - Restore from lockfile"
|
||||
of "setup":
|
||||
echo """
|
||||
nip setup <user|system> - Setup NIP environment
|
||||
|
||||
Arguments:
|
||||
user Configure NIP for the current user (updates shell RC files)
|
||||
system Configure NIP system-wide (requires root)
|
||||
|
||||
Examples:
|
||||
nip setup user # Add NIP to PATH in ~/.zshrc, ~/.bashrc, etc.
|
||||
nip setup system # Add NIP to system PATH (not implemented)
|
||||
"""
|
||||
else:
|
||||
echo fmt"Unknown command: {command}"
|
||||
echo "Use 'nip help' for available commands"
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
|
||||
import std/[os, strformat, strutils]
|
||||
import ../config
|
||||
import core
|
||||
|
||||
proc checkPathConfigured*(): bool =
|
||||
## Check if NIP binary path is in PATH
|
||||
let config = loadConfig()
|
||||
let binPath = config.linksDir / "Executables"
|
||||
let pathEnv = getEnv("PATH")
|
||||
|
||||
# Normalize paths for comparison (remove trailing slashes, resolve symlinks if possible)
|
||||
# Simple string check for now
|
||||
return binPath in pathEnv
|
||||
|
||||
proc detectShell*(): string =
|
||||
## Detect the user's shell
|
||||
let shellPath = getEnv("SHELL")
|
||||
if shellPath.len > 0:
|
||||
return shellPath.extractFilename()
|
||||
return "bash"
|
||||
|
||||
proc appendToRcFile(rcFile: string, content: string): bool =
|
||||
## Append content to an RC file if it's not already there
|
||||
let home = getHomeDir()
|
||||
let path = home / rcFile
|
||||
|
||||
try:
|
||||
var currentContent = ""
|
||||
if fileExists(path):
|
||||
currentContent = readFile(path)
|
||||
|
||||
if content.strip() in currentContent:
|
||||
return true # Already there
|
||||
|
||||
let newContent = if currentContent.len > 0 and not currentContent.endsWith("\n"):
|
||||
"\n" & content & "\n"
|
||||
else:
|
||||
content & "\n"
|
||||
|
||||
writeFile(path, currentContent & newContent)
|
||||
return true
|
||||
except Exception as e:
|
||||
echo fmt"❌ Failed to update {rcFile}: {e.msg}"
|
||||
return false
|
||||
|
||||
proc setupUserCommand*(): CommandResult =
|
||||
## Setup NIP for the current user
|
||||
let config = loadConfig()
|
||||
let binPath = config.linksDir / "Executables"
|
||||
let shell = detectShell()
|
||||
|
||||
echo fmt"🌱 Setting up NIP for user (Shell: {shell})..."
|
||||
echo fmt" Binary Path: {binPath}"
|
||||
|
||||
var success = false
|
||||
|
||||
case shell:
|
||||
of "zsh":
|
||||
let rcContent = fmt"""
|
||||
# NIP Package Manager
|
||||
export PATH="{binPath}:$PATH"
|
||||
"""
|
||||
if appendToRcFile(".zshrc", rcContent):
|
||||
echo "✅ Updated .zshrc"
|
||||
success = true
|
||||
|
||||
of "bash":
|
||||
let rcContent = fmt"""
|
||||
# NIP Package Manager
|
||||
export PATH="{binPath}:$PATH"
|
||||
"""
|
||||
if appendToRcFile(".bashrc", rcContent):
|
||||
echo "✅ Updated .bashrc"
|
||||
success = true
|
||||
|
||||
of "fish":
|
||||
let rcContent = fmt"""
|
||||
# NIP Package Manager
|
||||
contains "{binPath}" $fish_user_paths; or set -Ua fish_user_paths "{binPath}"
|
||||
"""
|
||||
# Fish is typically in .config/fish/config.fish
|
||||
# Ensure dir exists
|
||||
let fishDir = getHomeDir() / ".config" / "fish"
|
||||
if not dirExists(fishDir):
|
||||
createDir(fishDir)
|
||||
|
||||
if appendToRcFile(".config/fish/config.fish", rcContent):
|
||||
echo "✅ Updated config.fish"
|
||||
success = true
|
||||
|
||||
else:
|
||||
return errorResult(fmt"Unsupported shell: {shell}. Please manually add {binPath} to your PATH.")
|
||||
|
||||
if success:
|
||||
echo ""
|
||||
echo "✨ Setup complete! Please restart your shell or run:"
|
||||
echo fmt" source ~/.{shell}rc"
|
||||
return successResult("NIP setup successfully")
|
||||
else:
|
||||
return errorResult("Failed to setup NIP")
|
||||
|
||||
proc setupSystemCommand*(): CommandResult =
|
||||
## Setup NIP system-wide
|
||||
# TODO: Implement system-wide setup (e.g. /etc/profile.d/nip.sh)
|
||||
return errorResult("System-wide setup not yet implemented")
|
||||
|
||||
proc setupCommand*(args: seq[string]): CommandResult =
|
||||
## Dispatch setup commands
|
||||
if args.len == 0:
|
||||
return errorResult("Usage: nip setup <user|system>")
|
||||
|
||||
case args[0]:
|
||||
of "user":
|
||||
return setupUserCommand()
|
||||
of "system":
|
||||
return setupSystemCommand()
|
||||
else:
|
||||
return errorResult("Unknown setup target. Use 'user' or 'system'.")
|
||||
|
|
@ -0,0 +1,174 @@
|
|||
# core/nip/src/nimpak/cli/store_commands.nim
|
||||
## CLI Commands for Nexus CAS (Content Addressable Storage)
|
||||
|
||||
import std/[options, strutils, strformat, terminal, os]
|
||||
import ../types
|
||||
import ../errors
|
||||
import ../cas
|
||||
import ../logger
|
||||
|
||||
proc storeHelpCommand() =
|
||||
echo """
|
||||
NIP STORE - Sovereign CAS Interface
|
||||
|
||||
USAGE:
|
||||
nip store <command> [arguments]
|
||||
|
||||
COMMANDS:
|
||||
push <file> Store a file in CAS (returns hash)
|
||||
fetch <hash> <dest> Retrieve file from CAS by hash
|
||||
verify <hash> Check if object exists and verify integrity
|
||||
gc Run garbage collection on CAS
|
||||
stats Show CAS statistics
|
||||
path <hash> Show physical path of object (if exists)
|
||||
|
||||
EXAMPLES:
|
||||
nip store push mybinary.elf
|
||||
nip store fetch xxh3-123... /tmp/restored.elf
|
||||
nip store verify xxh3-123...
|
||||
nip store stats
|
||||
"""
|
||||
|
||||
proc storePushCommand*(args: seq[string], verbose: bool): int =
|
||||
## Push a file to CAS
|
||||
if args.len < 1:
|
||||
errorLog("Usage: nip store push <file>")
|
||||
return 1
|
||||
|
||||
let filePath = args[0]
|
||||
if not fileExists(filePath):
|
||||
errorLog(fmt"File not found: {filePath}")
|
||||
return 1
|
||||
|
||||
let cas = initCasManager()
|
||||
|
||||
if verbose: showInfo(fmt"Storing '{filePath}'...")
|
||||
|
||||
let res = cas.storeFile(filePath)
|
||||
if res.isOk:
|
||||
let obj = res.get()
|
||||
if verbose:
|
||||
showInfo(fmt"Stored successfully.")
|
||||
showInfo(fmt" Original Size: {obj.size} bytes")
|
||||
showInfo(fmt" Compressed Size: {obj.compressedSize} bytes")
|
||||
showInfo(fmt" Chunks: {obj.chunks.len}")
|
||||
|
||||
# Output ONLY the hash to stdout for piping support
|
||||
echo obj.hash
|
||||
return 0
|
||||
else:
|
||||
errorLog(formatError(res.getError()))
|
||||
return 1
|
||||
|
||||
proc storeFetchCommand*(args: seq[string], verbose: bool): int =
|
||||
## Fetch a file from CAS
|
||||
if args.len < 2:
|
||||
errorLog("Usage: nip store fetch <hash> <destination>")
|
||||
return 1
|
||||
|
||||
let hash = args[0]
|
||||
let destPath = args[1]
|
||||
|
||||
# Remove prefix if user typed "fetch cas:<hash>" or similar
|
||||
let cleanHash = if hash.contains(":"): hash.split(":")[1] else: hash
|
||||
|
||||
let cas = initCasManager()
|
||||
|
||||
if verbose: showInfo(fmt"Fetching object {cleanHash} to {destPath}...")
|
||||
|
||||
let res = cas.retrieveFile(cleanHash, destPath)
|
||||
if res.isOk:
|
||||
if verbose: showInfo("Success.")
|
||||
return 0
|
||||
else:
|
||||
errorLog(formatError(res.getError()))
|
||||
return 1
|
||||
|
||||
proc storeVerifyCommand*(args: seq[string], verbose: bool): int =
|
||||
## Verify object existence and integrity
|
||||
if args.len < 1:
|
||||
errorLog("Usage: nip store verify <hash>")
|
||||
return 1
|
||||
|
||||
let hash = args[0]
|
||||
let cas = initCasManager()
|
||||
|
||||
if cas.objectExists(hash):
|
||||
# Retrieve to verify integrity (checksum check happens during retrieve logic implicitly if we extended it,
|
||||
# currently retrieveObject just reads. Ideally we should re-hash.)
|
||||
|
||||
# Simple existence check for MVP
|
||||
showInfo(fmt"Object {hash} exists.")
|
||||
|
||||
# Check if we can read it
|
||||
let res = cas.retrieveObject(hash)
|
||||
if res.isOk:
|
||||
let data = res.get()
|
||||
let computed = cas.computeHash(data)
|
||||
if computed == hash:
|
||||
showInfo("Integrity: VERIFIED (" & $data.len & " bytes)")
|
||||
return 0
|
||||
else:
|
||||
errorLog(fmt"Integrity: FAILED (Computed: {computed})")
|
||||
return 1
|
||||
else:
|
||||
errorLog("Corruption: Object exists in index/path but cannot be read.")
|
||||
return 1
|
||||
else:
|
||||
errorLog(fmt"Object {hash} NOT FOUND.")
|
||||
return 1
|
||||
|
||||
proc storeStatsCommand*(verbose: bool): int =
|
||||
let cas = initCasManager()
|
||||
# MVP stats
|
||||
# Since we don't have a persistent counter file in this MVP definition other than 'cas_index.kdl' which we parse manually?
|
||||
# CasManager has 'CasStats' type but no automatic loadStats() method exposed in cas.nim yet.
|
||||
# We will just show directory sizes.
|
||||
|
||||
showInfo("CAS Storage Statistics")
|
||||
showInfo(fmt"Root: {cas.rootPath}")
|
||||
|
||||
# Simple walkdir to count
|
||||
var count = 0
|
||||
var size = 0'i64
|
||||
|
||||
for kind, path in walkDir(cas.rootPath / "objects", relative=true):
|
||||
# Recurse... for MVP just simple ls of shards
|
||||
discard
|
||||
|
||||
showInfo("(Detailed stats pending implementation)")
|
||||
return 0
|
||||
|
||||
proc storePathCommand*(args: seq[string], verbose: bool): int =
|
||||
if args.len < 1:
|
||||
return 1
|
||||
let hash = args[0]
|
||||
let cas = initCasManager()
|
||||
let path = getObjectPath(cas.rootPath, hash)
|
||||
if fileExists(path):
|
||||
echo path
|
||||
return 0
|
||||
else:
|
||||
return 1
|
||||
|
||||
proc dispatchStoreCommand*(args: seq[string], verbose: bool): int =
|
||||
if args.len == 0:
|
||||
storeHelpCommand()
|
||||
return 0
|
||||
|
||||
let cmd = args[0].toLowerAscii()
|
||||
let subArgs = if args.len > 1: args[1..^1] else: @[]
|
||||
|
||||
case cmd
|
||||
of "push": return storePushCommand(subArgs, verbose)
|
||||
of "fetch", "pull": return storeFetchCommand(subArgs, verbose)
|
||||
of "verify": return storeVerifyCommand(subArgs, verbose)
|
||||
of "stats": return storeStatsCommand(verbose)
|
||||
of "path": return storePathCommand(subArgs, verbose)
|
||||
of "help":
|
||||
storeHelpCommand()
|
||||
return 0
|
||||
else:
|
||||
errorLog(fmt"Unknown store command: {cmd}")
|
||||
storeHelpCommand()
|
||||
return 1
|
||||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## config.nim
|
||||
## Configuration management for NIP MVP
|
||||
## Simple key-value configuration format
|
||||
|
|
@ -396,3 +403,17 @@ proc saveExampleConfig*(path: string): bool =
|
|||
except:
|
||||
echo fmt"❌ Failed to create config at: {path}"
|
||||
return false
|
||||
proc getConfigPath*(): string =
|
||||
## Get the default user configuration file path
|
||||
let homeDir = getHomeDir()
|
||||
let xdgConfigHome = getEnv("XDG_CONFIG_HOME", homeDir / ".config")
|
||||
result = xdgConfigHome / "nip" / "config"
|
||||
|
||||
proc initDefaultConfig*() =
|
||||
## Initialize default configuration if it doesn't exist
|
||||
let path = getConfigPath()
|
||||
if not fileExists(path):
|
||||
if not saveExampleConfig(path):
|
||||
raise newException(IOError, "Failed to create configuration file")
|
||||
else:
|
||||
raise newException(IOError, "Configuration file already exists")
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## Quantum-Resistant Cryptographic Transitions
|
||||
##
|
||||
## This module implements the algorithm migration framework for transitioning
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/database.nim
|
||||
## Simple package database for MVP implementation
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/decentralized.nim
|
||||
## Decentralized Architecture Foundation for Nippels
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/dependency.nim
|
||||
# Dependency graph resolution and management system
|
||||
|
||||
import std/[tables, sets, sequtils, algorithm, strformat]
|
||||
import ../nip/types
|
||||
import ./types
|
||||
|
||||
type
|
||||
DependencyGraph* = object
|
||||
|
|
|
|||
|
|
@ -8,10 +8,8 @@
|
|||
## - Automated repair and recovery systems
|
||||
## - Performance monitoring and optimization
|
||||
|
||||
import std/[os, times, json, tables, sequtils, strutils, strformat, asyncdispatch, algorithm]
|
||||
import std/[os, times, json, tables, sequtils, strutils, strformat, asyncdispatch]
|
||||
import ../security/[integrity_monitor, event_logger]
|
||||
import ../cas
|
||||
import ../types_fixed
|
||||
|
||||
type
|
||||
HealthCheckCategory* = enum
|
||||
|
|
@ -92,11 +90,15 @@ proc getDefaultHealthMonitorConfig*(): HealthMonitorConfig =
|
|||
}
|
||||
)
|
||||
|
||||
# Forward declarations
|
||||
proc getDirSize*(path: string): int64
|
||||
proc formatHealthReport*(report: HealthReport, format: string = "plain"): string
|
||||
|
||||
# =============================================================================
|
||||
# Package Health Checks
|
||||
# =============================================================================
|
||||
|
||||
proc checkPackageIntegrity*(monitor: HealthMonitor): HealthCheck {.async.} =
|
||||
proc checkPackageIntegrity*(monitor: HealthMonitor): Future[HealthCheck] {.async.} =
|
||||
## Check integrity of all installed packages
|
||||
let startTime = cpuTime()
|
||||
var check = HealthCheck(
|
||||
|
|
@ -157,7 +159,7 @@ proc checkPackageIntegrity*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
check.duration = cpuTime() - startTime
|
||||
return check
|
||||
|
||||
proc checkPackageConsistency*(monitor: HealthMonitor): HealthCheck {.async.} =
|
||||
proc checkPackageConsistency*(monitor: HealthMonitor): Future[HealthCheck] {.async.} =
|
||||
## Check consistency of package installations and dependencies
|
||||
let startTime = cpuTime()
|
||||
var check = HealthCheck(
|
||||
|
|
@ -227,7 +229,7 @@ proc checkPackageConsistency*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
# Filesystem Health Checks
|
||||
# =============================================================================
|
||||
|
||||
proc checkFilesystemHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
||||
proc checkFilesystemHealth*(monitor: HealthMonitor): Future[HealthCheck] {.async.} =
|
||||
## Check filesystem health and disk usage
|
||||
let startTime = cpuTime()
|
||||
var check = HealthCheck(
|
||||
|
|
@ -269,8 +271,9 @@ proc checkFilesystemHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
missingDirs.add(dir)
|
||||
|
||||
if missingDirs.len > 0:
|
||||
let missingDirsStr = missingDirs.join(", ")
|
||||
check.status = StatusCritical
|
||||
check.message = fmt"Critical directories missing: {missingDirs.join(\", \")}"
|
||||
check.message = fmt"Critical directories missing: {missingDirsStr}"
|
||||
check.repairActions = @["nip repair --filesystem", "nip init --restore-structure"]
|
||||
elif totalSize > 10 * 1024 * 1024 * 1024: # > 10GB
|
||||
check.status = StatusWarning
|
||||
|
|
@ -293,7 +296,7 @@ proc checkFilesystemHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
# Cache Health Checks
|
||||
# =============================================================================
|
||||
|
||||
proc checkCacheHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
||||
proc checkCacheHealth*(monitor: HealthMonitor): Future[HealthCheck] {.async.} =
|
||||
## Check cache performance and integrity
|
||||
let startTime = cpuTime()
|
||||
var check = HealthCheck(
|
||||
|
|
@ -311,7 +314,8 @@ proc checkCacheHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
|
||||
try:
|
||||
# Initialize CAS manager for cache stats
|
||||
let casManager = newCasManager("~/.nip/cas", "/var/lib/nip/cas")
|
||||
# Initialize CAS manager for cache stats (stubbed for now if unused)
|
||||
# let casManager = newCasManager("~/.nip/cas", "/var/lib/nip/cas")
|
||||
|
||||
# Simulate cache statistics (would be real in production)
|
||||
let cacheStats = %*{
|
||||
|
|
@ -338,8 +342,9 @@ proc checkCacheHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
check.message = fmt"High cache fragmentation: {fragmentation:.2f}"
|
||||
check.repairActions = @["nip cache defrag", "nip cache rebuild"]
|
||||
else:
|
||||
let objectCount = cacheStats["object_count"].getInt()
|
||||
check.status = StatusHealthy
|
||||
check.message = fmt"Cache healthy: {hitRate:.2f} hit rate, {cacheStats[\"object_count\"].getInt()} objects"
|
||||
check.message = fmt"Cache healthy: {hitRate:.2f} hit rate, {objectCount} objects"
|
||||
|
||||
except Exception as e:
|
||||
check.status = StatusCritical
|
||||
|
|
@ -354,7 +359,7 @@ proc checkCacheHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
# Repository Health Checks
|
||||
# =============================================================================
|
||||
|
||||
proc checkRepositoryHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
||||
proc checkRepositoryHealth*(monitor: HealthMonitor): Future[HealthCheck] {.async.} =
|
||||
## Check repository connectivity and trust status
|
||||
let startTime = cpuTime()
|
||||
var check = HealthCheck(
|
||||
|
|
@ -441,7 +446,7 @@ proc checkRepositoryHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
# Security Health Checks
|
||||
# =============================================================================
|
||||
|
||||
proc checkSecurityHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
||||
proc checkSecurityHealth*(monitor: HealthMonitor): Future[HealthCheck] {.async.} =
|
||||
## Check security status including keys, signatures, and trust policies
|
||||
let startTime = cpuTime()
|
||||
var check = HealthCheck(
|
||||
|
|
@ -484,8 +489,9 @@ proc checkSecurityHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
check.message = fmt"{expiredKeys} expired keys need rotation"
|
||||
check.repairActions = @["nip keys rotate", "nip trust update"]
|
||||
else:
|
||||
let activeKeys = securityStatus["active_keys"].getInt()
|
||||
check.status = StatusHealthy
|
||||
check.message = fmt"Security healthy: {securityStatus[\"active_keys\"].getInt()} active keys, no critical issues"
|
||||
check.message = fmt"Security healthy: {activeKeys} active keys, no critical issues"
|
||||
|
||||
except Exception as e:
|
||||
check.status = StatusCritical
|
||||
|
|
@ -500,7 +506,7 @@ proc checkSecurityHealth*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
# Performance Monitoring
|
||||
# =============================================================================
|
||||
|
||||
proc checkPerformanceMetrics*(monitor: HealthMonitor): HealthCheck {.async.} =
|
||||
proc checkPerformanceMetrics*(monitor: HealthMonitor): Future[HealthCheck] {.async.} =
|
||||
## Monitor system performance metrics
|
||||
let startTime = cpuTime()
|
||||
var check = HealthCheck(
|
||||
|
|
@ -559,7 +565,7 @@ proc checkPerformanceMetrics*(monitor: HealthMonitor): HealthCheck {.async.} =
|
|||
# Health Report Generation
|
||||
# =============================================================================
|
||||
|
||||
proc runAllHealthChecks*(monitor: HealthMonitor): HealthReport {.async.} =
|
||||
proc runAllHealthChecks*(monitor: HealthMonitor): Future[HealthReport] {.async.} =
|
||||
## Run all enabled health checks and generate comprehensive report
|
||||
let startTime = now()
|
||||
var checks: seq[HealthCheck] = @[]
|
||||
|
|
@ -621,7 +627,7 @@ proc runAllHealthChecks*(monitor: HealthMonitor): HealthReport {.async.} =
|
|||
# Automated Repair System
|
||||
# =============================================================================
|
||||
|
||||
proc performAutomatedRepair*(monitor: HealthMonitor, report: HealthReport): seq[string] {.async.} =
|
||||
proc performAutomatedRepair*(monitor: HealthMonitor, report: HealthReport): Future[seq[string]] {.async.} =
|
||||
## Perform automated repairs based on health report
|
||||
var repairResults: seq[string] = @[]
|
||||
|
||||
|
|
@ -698,7 +704,7 @@ proc formatHealthReport*(report: HealthReport, format: string = "plain"): string
|
|||
|
||||
else: # plain format
|
||||
result = "NimPak System Health Report\n"
|
||||
result.add("=" * 35 & "\n\n")
|
||||
result.add(repeat("=", 35) & "\n\n")
|
||||
|
||||
# Overall status
|
||||
let statusIcon = case report.overallStatus:
|
||||
|
|
@ -708,7 +714,8 @@ proc formatHealthReport*(report: HealthReport, format: string = "plain"): string
|
|||
of StatusUnknown: "❓"
|
||||
|
||||
result.add(fmt"{statusIcon} Overall Status: {report.overallStatus}\n")
|
||||
result.add(fmt"📅 Generated: {report.timestamp.format(\"yyyy-MM-dd HH:mm:ss\")}\n\n")
|
||||
let timestampStr = report.timestamp.format("yyyy-MM-dd HH:mm:ss")
|
||||
result.add(fmt"📅 Generated: {timestampStr}\n\n")
|
||||
|
||||
# Health checks by category
|
||||
let categories = [CategoryPackages, CategoryFilesystem, CategoryCache, CategoryRepositories, CategorySecurity, CategoryPerformance]
|
||||
|
|
|
|||
|
|
@ -1,11 +1,18 @@
|
|||
## NimPak Error Handling
|
||||
##
|
||||
## Comprehensive error handling utilities for the NimPak system.
|
||||
## Provides formatted error messages, recovery suggestions, and error chaining.
|
||||
## Task 37: Implement comprehensive error handling.
|
||||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# NimPak Error Handling
|
||||
#
|
||||
# Comprehensive error handling utilities for the NimPak system.
|
||||
# Provides formatted error messages, recovery suggestions, and error chaining.
|
||||
# Task 37: Implement comprehensive error handling.
|
||||
|
||||
import std/[strformat, strutils, times, tables, terminal]
|
||||
import ../nip/types
|
||||
import ./types
|
||||
|
||||
# ############################################################################
|
||||
# Error Formatting
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/filesystem.nim
|
||||
## GoboLinux-style filesystem management with generation integration
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## Package Format CAS Integration
|
||||
##
|
||||
## This module integrates all package formats with the Content-Addressable Storage
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## Package Format Infrastructure
|
||||
##
|
||||
## This module implements the core package format system with five distinct formats,
|
||||
|
|
@ -1041,14 +1048,14 @@ proc storePackageInCas*(format: PackageFormat, data: seq[byte], cas: var CasMana
|
|||
## Store package format data in content-addressable storage
|
||||
try:
|
||||
let storeResult = cas.storeObject(data)
|
||||
if storeResult.isErr:
|
||||
if not storeResult.isOk:
|
||||
return types_fixed.err[CasIntegrationResult, FormatError](FormatError(
|
||||
code: CasError,
|
||||
msg: "Failed to store package in CAS: " & storeResult.getError().msg,
|
||||
code: CasGeneralError,
|
||||
msg: "Failed to store package in CAS: " & storeResult.errValue.msg,
|
||||
format: format
|
||||
))
|
||||
|
||||
let casObject = storeResult.get()
|
||||
let casObject = storeResult.okValue
|
||||
let result = CasIntegrationResult(
|
||||
hash: casObject.hash,
|
||||
size: casObject.size,
|
||||
|
|
@ -1073,14 +1080,14 @@ proc retrievePackageFromCas*(hash: string, cas: var CasManager): types_fixed.Res
|
|||
## Retrieve package format data from content-addressable storage
|
||||
try:
|
||||
let retrieveResult = cas.retrieveObject(hash)
|
||||
if retrieveResult.isErr:
|
||||
if not retrieveResult.isOk:
|
||||
return types_fixed.err[seq[byte], FormatError](FormatError(
|
||||
code: CasError,
|
||||
msg: "Failed to retrieve package from CAS: " & retrieveResult.getError().msg,
|
||||
code: CasGeneralError,
|
||||
msg: "Failed to retrieve package from CAS: " & retrieveResult.errValue.msg,
|
||||
format: NpkBinary # Default format for error
|
||||
))
|
||||
|
||||
return types_fixed.ok[seq[byte], FormatError](retrieveResult.get())
|
||||
return types_fixed.ok[seq[byte], FormatError](retrieveResult.okValue)
|
||||
|
||||
except Exception as e:
|
||||
return types_fixed.err[seq[byte], FormatError](FormatError(
|
||||
|
|
@ -1126,14 +1133,14 @@ proc garbageCollectFormats*(cas: var CasManager, reachableHashes: seq[string] =
|
|||
let reachableSet = reachableHashes.toHashSet()
|
||||
let gcResult = cas.garbageCollect(reachableSet)
|
||||
|
||||
if gcResult.isErr:
|
||||
if not gcResult.isOk:
|
||||
return types_fixed.err[int, FormatError](FormatError(
|
||||
code: CasError,
|
||||
msg: "Failed to garbage collect: " & gcResult.getError().msg,
|
||||
code: CasGeneralError,
|
||||
msg: "Failed to garbage collect: " & gcResult.errValue.msg,
|
||||
format: NpkBinary
|
||||
))
|
||||
|
||||
return types_fixed.ok[int, FormatError](gcResult.get())
|
||||
return types_fixed.ok[int, FormatError](gcResult.okValue)
|
||||
|
||||
except Exception as e:
|
||||
return types_fixed.err[int, FormatError](FormatError(
|
||||
|
|
@ -1227,17 +1234,17 @@ proc convertPackageFormat*(fromPath: string, toPath: string,
|
|||
|
||||
# Store in CAS for conversion pipeline
|
||||
let storeResult = storePackageInCas(fromFormat, sourceBytes, cas)
|
||||
if storeResult.isErr:
|
||||
return err[FormatError](storeResult.getError())
|
||||
if not storeResult.isOk:
|
||||
return err[FormatError](storeResult.errValue)
|
||||
|
||||
let casResult = storeResult.get()
|
||||
let casResult = storeResult.okValue
|
||||
|
||||
# Retrieve and convert (simplified conversion logic)
|
||||
let retrieveResult = retrievePackageFromCas(casResult.hash, cas)
|
||||
if retrieveResult.isErr:
|
||||
return err[FormatError](retrieveResult.getError())
|
||||
if not retrieveResult.isOk:
|
||||
return err[FormatError](retrieveResult.errValue)
|
||||
|
||||
let convertedData = retrieveResult.get()
|
||||
let convertedData = retrieveResult.okValue
|
||||
|
||||
# Write converted package
|
||||
let parentDir = toPath.parentDir()
|
||||
|
|
@ -1264,10 +1271,10 @@ proc reconstructPackageFromCas*(hash: string, format: PackageFormat,
|
|||
## Reconstruct package from CAS storage with format-specific handling
|
||||
try:
|
||||
let retrieveResult = retrievePackageFromCas(hash, cas)
|
||||
if retrieveResult.isErr:
|
||||
return err[FormatError](retrieveResult.getError())
|
||||
if not retrieveResult.isOk:
|
||||
return err[FormatError](retrieveResult.errValue)
|
||||
|
||||
let data = retrieveResult.get()
|
||||
let data = retrieveResult.okValue
|
||||
|
||||
# Format-specific reconstruction logic
|
||||
case format:
|
||||
|
|
@ -1313,7 +1320,7 @@ proc getPackageFormatStats*(cas: var CasManager): types_fixed.Result[JsonNode, F
|
|||
for objHash in objects:
|
||||
let retrieveResult = cas.retrieveObject(objHash)
|
||||
if retrieveResult.isOk:
|
||||
let data = retrieveResult.get()
|
||||
let data = retrieveResult.okValue
|
||||
let size = data.len.int64
|
||||
|
||||
# Simple format detection based on content
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## Enhanced Garbage Collection System
|
||||
##
|
||||
## This module implements an enhanced garbage collection system for the unified
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/generation_filesystem.nim
|
||||
## Generation-aware filesystem operations for NimPak
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,12 +1,19 @@
|
|||
## graft_coordinator.nim
|
||||
## Coordinates grafting from adapters and installation
|
||||
## Ties together adapters + install_manager for unified grafting
|
||||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# graft_coordinator.nim
|
||||
# Coordinates grafting from adapters and installation
|
||||
# Ties together adapters + install_manager for unified grafting
|
||||
|
||||
import std/[strformat, strutils, json, os]
|
||||
import install_manager, simple_db, config
|
||||
import adapters/[nix, pacman, pkgsrc, aur]
|
||||
import grafting # For GraftResult type
|
||||
from cas import get
|
||||
import types
|
||||
|
||||
type
|
||||
GraftCoordinator* = ref object
|
||||
|
|
@ -392,10 +399,11 @@ proc parsePackageSpec*(spec: string): tuple[source: GraftSource, name: string] =
|
|||
let name = parts[1]
|
||||
|
||||
let source = case sourceStr
|
||||
of "nix": Nix
|
||||
of "pkgsrc": PKGSRC
|
||||
of "pacman": Pacman
|
||||
else: Auto
|
||||
of "nix": GraftSource.Nix
|
||||
of "pkgsrc": GraftSource.PKGSRC
|
||||
of "pacman": GraftSource.Pacman
|
||||
of "aur": GraftSource.AUR
|
||||
else: GraftSource.Auto
|
||||
|
||||
return (source, name)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,16 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/grafting_simple.nim
|
||||
# Simplified grafting infrastructure for external package integration
|
||||
|
||||
import std/[tables, sets, strutils, json, os, times, sequtils, hashes, options]
|
||||
import ../nip/types
|
||||
import utils/resultutils
|
||||
import ./types
|
||||
|
||||
import types/grafting_types
|
||||
export grafting_types
|
||||
|
||||
|
|
@ -39,33 +46,33 @@ proc initGraftingEngine*(configPath: string = ""): Result[GraftingEngine, string
|
|||
try:
|
||||
createDir(engine.cache.cacheDir)
|
||||
except OSError as e:
|
||||
return Result[GraftingEngine, string](isOk: false, error: "Failed to create cache directory: " & e.msg)
|
||||
return Result[GraftingEngine, string](isOk: false, errValue: "Failed to create cache directory: " & e.msg)
|
||||
|
||||
return Result[GraftingEngine, string](isOk: true, value: engine)
|
||||
return Result[GraftingEngine, string](isOk: true, okValue: engine)
|
||||
|
||||
proc registerAdapter*(engine: var GraftingEngine, adapter: PackageAdapter): Result[bool, string] =
|
||||
## Register a package adapter with the grafting engine
|
||||
if adapter.name in engine.adapters:
|
||||
return Result[bool, string](isOk: false, error: "Adapter already registered: " & adapter.name)
|
||||
return Result[bool, string](isOk: false, errValue: "Adapter already registered: " & adapter.name)
|
||||
|
||||
engine.adapters[adapter.name] = adapter
|
||||
echo "Registered grafting adapter: " & adapter.name
|
||||
return Result[bool, string](isOk: true, value: true)
|
||||
return Result[bool, string](isOk: true, okValue: true)
|
||||
|
||||
proc graftPackage*(engine: var GraftingEngine, source: string, packageName: string): Result[GraftResult, string] =
|
||||
## Graft a package from an external source
|
||||
if not engine.config.enabled:
|
||||
return Result[GraftResult, string](isOk: false, error: "Grafting is disabled in configuration")
|
||||
return Result[GraftResult, string](isOk: false, errValue: "Grafting is disabled in configuration")
|
||||
|
||||
if source notin engine.adapters:
|
||||
return Result[GraftResult, string](isOk: false, error: "Unknown grafting source: " & source)
|
||||
return Result[GraftResult, string](isOk: false, errValue: "Unknown grafting source: " & source)
|
||||
|
||||
let adapter = engine.adapters[source]
|
||||
if not adapter.enabled:
|
||||
return Result[GraftResult, string](isOk: false, error: "Adapter disabled: " & source)
|
||||
return Result[GraftResult, string](isOk: false, errValue: "Adapter disabled: " & source)
|
||||
|
||||
# Create a simple result for now
|
||||
let result = GraftResult(
|
||||
let graftRes = GraftResult(
|
||||
success: true,
|
||||
packageId: packageName,
|
||||
metadata: GraftedPackageMetadata(
|
||||
|
|
@ -89,7 +96,7 @@ proc graftPackage*(engine: var GraftingEngine, source: string, packageName: stri
|
|||
)
|
||||
|
||||
echo "Successfully grafted package: " & packageName
|
||||
return ok[GraftResult](result)
|
||||
return Result[GraftResult, string](isOk: true, okValue: graftRes)
|
||||
|
||||
proc listGraftedPackages*(engine: GraftingEngine): seq[GraftedPackageMetadata] =
|
||||
## List all grafted packages in cache
|
||||
|
|
@ -129,11 +136,11 @@ method graftPackage*(adapter: PackageAdapter, packageName: string, cache: Grafti
|
|||
|
||||
method validatePackage*(adapter: PackageAdapter, packageName: string): Result[bool, string] {.base.} =
|
||||
## Base method for validating a package - can be overridden
|
||||
return ok[bool](true)
|
||||
return Result[bool, string](isOk: true, okValue: true)
|
||||
|
||||
method getPackageInfo*(adapter: PackageAdapter, packageName: string): Result[JsonNode, string] {.base.} =
|
||||
## Base method for getting package information - can be overridden
|
||||
return ok[JsonNode](%*{"name": packageName, "adapter": adapter.name})
|
||||
return Result[JsonNode, string](isOk: true, okValue: %*{"name": packageName, "adapter": adapter.name})
|
||||
|
||||
# Utility functions
|
||||
proc calculateGraftHash*(packageName: string, source: string, timestamp: DateTime): string =
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/grafting.nim
|
||||
# Core grafting infrastructure for external package integration
|
||||
|
||||
import std/[tables, sets, strutils, json, os, times, sequtils, hashes, options]
|
||||
import ../nip/types
|
||||
import ./types
|
||||
import utils/resultutils
|
||||
import types/grafting_types
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/grafting_working.nim
|
||||
# Working grafting infrastructure for external package integration
|
||||
|
||||
import std/[tables, strutils, json, os, times, sequtils, options, hashes]
|
||||
import ../nip/types
|
||||
import ./types
|
||||
import utils/resultutils
|
||||
import types/grafting_types
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/install.nim
|
||||
# Package installation orchestrator with atomic operations
|
||||
|
||||
import std/[tables, sequtils, strformat]
|
||||
import ../nip/types, dependency, transactions, filesystem, cas
|
||||
import ./types, dependency, transactions, filesystem, cas
|
||||
|
||||
type
|
||||
InstallStep* = object
|
||||
|
|
|
|||
|
|
@ -1,9 +1,17 @@
|
|||
## install_manager.nim
|
||||
## Unified installation system for NIP MVP
|
||||
## Coordinates grafting from adapters and actual system installation
|
||||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# install_manager.nim
|
||||
# Unified installation system for NIP MVP
|
||||
# Coordinates grafting from adapters and actual system installation
|
||||
|
||||
import std/[os, times, json, strformat, strutils, tables, sequtils, algorithm]
|
||||
import cas
|
||||
import ./types
|
||||
|
||||
type
|
||||
InstallConfig* = object
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## KDL Parser Integration for NIP
|
||||
## Provides KDL parsing functionality for NIP configuration and package files
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/lockfile_system.nim
|
||||
## Lockfile generation and reproducibility system for NimPak
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## logger.nim
|
||||
## Logging system for NIP MVP
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NimPak Structured Logging
|
||||
##
|
||||
## Comprehensive logging system for the NimPak package manager.
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## Merkle Tree Implementation for Nippels
|
||||
##
|
||||
## This module implements a high-performance merkle tree for cryptographic
|
||||
|
|
|
|||
|
|
@ -1,10 +1,17 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NimPak Migration Tools
|
||||
##
|
||||
## Tools for migrating from legacy formats and other package managers.
|
||||
## Task 42: Implement migration tools.
|
||||
|
||||
import std/[os, strutils, strformat, json, tables, sequtils, times]
|
||||
import ../nip/types
|
||||
import ./types
|
||||
import cas
|
||||
import logging
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/namespace_subsystem.nim
|
||||
## Namespace Subsystem for Nippels
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/nexter_comm.nim
|
||||
## Nippel-Nexter Communication Foundation
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/nippel_types.nim
|
||||
## Core type definitions for Nippels
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/nippels.nim
|
||||
## Nippels: Lightweight, namespace-based application isolation
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/nippels_cli.nim
|
||||
## Enhanced CLI commands for Nippels management
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/npk_conversion.nim
|
||||
# Enhanced NPK conversion with build hash integration
|
||||
|
||||
import std/[strutils, json, os, times, tables, sequtils, strformat, algorithm, osproc]
|
||||
import ../nip/types
|
||||
import ./types
|
||||
import utils/resultutils
|
||||
import types/grafting_types
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NOF Overlay Fragment Format Handler (.nof)
|
||||
##
|
||||
## This module implements the NOF (Nexus Overlay Fragment) format for declarative
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NPK Package Format Handler
|
||||
##
|
||||
## This module implements the native .npk.zst package format with KDL metadata
|
||||
|
|
@ -16,6 +23,7 @@ import std/[os, json, times, strutils, sequtils, tables, options, osproc, strfor
|
|||
import ./types_fixed
|
||||
import ./formats
|
||||
import ./cas except Result, VoidResult, ok, err, ChunkRef
|
||||
import ./grafting
|
||||
|
||||
# KDL parsing will be added when kdl library is available
|
||||
# For now, we'll use JSON as intermediate format and generate KDL strings
|
||||
|
|
@ -54,12 +62,12 @@ proc createNpkPackage*(fragment: Fragment, sourceDir: string, cas: var CasManage
|
|||
let storeResult = cas.storeFile(filePath)
|
||||
if not storeResult.isOk:
|
||||
return err[NpkPackage, NpkError](NpkError(
|
||||
code: CasError,
|
||||
msg: "Failed to store file in CAS: " & storeResult.getError().msg,
|
||||
code: CasGeneralError,
|
||||
msg: "Failed to store file in CAS: " & storeResult.errValue.msg,
|
||||
packageName: fragment.id.name
|
||||
))
|
||||
|
||||
let casObject = storeResult.get()
|
||||
let casObject = storeResult.okValue
|
||||
|
||||
let packageFile = PackageFile(
|
||||
path: relativePath,
|
||||
|
|
@ -455,7 +463,7 @@ proc extractNpkPackage*(npk: NpkPackage, targetDir: string, cas: var CasManager)
|
|||
let retrieveResult = cas.retrieveFile(file.hash, targetPath)
|
||||
if not retrieveResult.isOk:
|
||||
return err[NpkError](NpkError(
|
||||
code: CasError,
|
||||
code: CasGeneralError,
|
||||
msg: "Failed to retrieve file from CAS: " & retrieveResult.errValue.msg,
|
||||
packageName: npk.metadata.id.name
|
||||
))
|
||||
|
|
@ -673,29 +681,75 @@ proc convertGraftToNpk*(graftResult: GraftResult, cas: var CasManager): Result[N
|
|||
## This includes preserving provenance and audit log information
|
||||
## Files are stored in CAS for deduplication and integrity verification
|
||||
|
||||
# Use the fragment and extractedPath from graftResult to create NPK package
|
||||
let createResult = createNpkPackage(graftResult.fragment, graftResult.extractedPath, cas)
|
||||
# Construct Fragment from GraftResult metadata
|
||||
let pkgId = PackageId(
|
||||
name: graftResult.metadata.packageName,
|
||||
version: graftResult.metadata.version,
|
||||
stream: Custom # Default to Custom for grafts
|
||||
)
|
||||
|
||||
let source = Source(
|
||||
url: graftResult.metadata.provenance.downloadUrl,
|
||||
hash: graftResult.metadata.originalHash,
|
||||
hashAlgorithm: "blake2b", # Default assumption
|
||||
sourceMethod: Grafted,
|
||||
timestamp: graftResult.metadata.graftedAt
|
||||
)
|
||||
|
||||
let fragment = Fragment(
|
||||
id: pkgId,
|
||||
source: source,
|
||||
dependencies: @[], # Dependencies not captured in simple GraftResult
|
||||
buildSystem: Custom,
|
||||
metadata: PackageMetadata(
|
||||
description: "Grafted from " & graftResult.metadata.source,
|
||||
license: "Unknown",
|
||||
maintainer: "Auto-Graft",
|
||||
tags: @["grafted"],
|
||||
runtime: RuntimeProfile(
|
||||
libc: Glibc, # Assumption
|
||||
allocator: System,
|
||||
systemdAware: false,
|
||||
reproducible: false,
|
||||
tags: @[]
|
||||
)
|
||||
),
|
||||
acul: AculCompliance(
|
||||
required: false,
|
||||
membership: "",
|
||||
attribution: "Grafted package",
|
||||
buildLog: graftResult.metadata.buildLog
|
||||
)
|
||||
)
|
||||
|
||||
let extractedPath = graftResult.metadata.provenance.extractedPath
|
||||
if extractedPath.len == 0 or not dirExists(extractedPath):
|
||||
return err[NpkPackage, NpkError](NpkError(
|
||||
code: PackageNotFound,
|
||||
msg: "Extracted path not found or empty in graft result",
|
||||
packageName: pkgId.name
|
||||
))
|
||||
|
||||
# Use the constructed fragment and extractedPath to create NPK package
|
||||
let createResult = createNpkPackage(fragment, extractedPath, cas)
|
||||
if not createResult.isOk:
|
||||
return err[NpkPackage, NpkError](createResult.getError())
|
||||
return err[NpkPackage, NpkError](createResult.errValue)
|
||||
|
||||
var npk = createResult.get()
|
||||
|
||||
# Map provenance information from auditLog and originalMetadata
|
||||
# Embed audit log info into ACUL compliance buildLog for traceability
|
||||
npk.metadata.acul.buildLog = graftResult.auditLog.sourceOutput
|
||||
var npk = createResult.okValue
|
||||
|
||||
# Map provenance information
|
||||
# Add provenance information to runtime tags for tracking
|
||||
let provenanceTag = "grafted:" & $graftResult.auditLog.source & ":" & $graftResult.auditLog.timestamp
|
||||
let provenanceTag = "grafted:" & graftResult.metadata.source & ":" & $graftResult.metadata.graftedAt
|
||||
npk.metadata.metadata.runtime.tags.add(provenanceTag)
|
||||
|
||||
# Add deduplication status to tags for audit purposes
|
||||
let deduplicationTag = "dedup:" & graftResult.auditLog.deduplicationStatus.toLowerAscii()
|
||||
# Add deduplication status to tags for audit purposes (simplified)
|
||||
let deduplicationTag = "dedup:unknown"
|
||||
npk.metadata.metadata.runtime.tags.add(deduplicationTag)
|
||||
|
||||
# Preserve original archive hash in attribution for full traceability
|
||||
# Preserve original archive hash in attribution
|
||||
if npk.metadata.acul.attribution.len > 0:
|
||||
npk.metadata.acul.attribution.add(" | ")
|
||||
npk.metadata.acul.attribution.add("Original: " & graftResult.auditLog.blake2bHash)
|
||||
npk.metadata.acul.attribution.add("Original: " & graftResult.metadata.originalHash)
|
||||
|
||||
# Return the constructed NPK package with full provenance
|
||||
return ok[NpkPackage, NpkError](npk)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NIP Pacman CLI Integration
|
||||
##
|
||||
## This module provides CLI commands that make NIP a drop-in replacement
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## platform.nim
|
||||
## Platform detection and BSD compatibility
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/profile_manager.nim
|
||||
## Profile Manager for Nippels
|
||||
##
|
||||
|
|
|
|||
|
|
@ -1,49 +1,42 @@
|
|||
## Read-Only Protection Manager
|
||||
##
|
||||
## This module implements the read-only protection system for CAS storage,
|
||||
## ensuring immutability by default with controlled write access elevation.
|
||||
##
|
||||
## SECURITY NOTE: chmod-based protection is a UX feature, NOT a security feature!
|
||||
## In user-mode (~/.local/share/nexus/cas/), chmod 555 only prevents ACCIDENTAL
|
||||
## deletion/modification. A user who owns the files can bypass this trivially.
|
||||
##
|
||||
## Real security comes from:
|
||||
## 1. Merkle tree verification (cryptographic integrity)
|
||||
## 2. User namespaces (kernel-enforced read-only mounts during execution)
|
||||
## 3. Root ownership (system-mode only: /var/lib/nexus/cas/)
|
||||
##
|
||||
## See docs/cas-security-architecture.md for full security model.
|
||||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# Read-Only Protection Manager
|
||||
#
|
||||
# This module implements the read-only protection system for CAS storage,
|
||||
# ensuring immutability by default with controlled write access elevation.
|
||||
#
|
||||
# SECURITY NOTE: chmod-based protection is a UX feature, NOT a security feature!
|
||||
# In user-mode (~/.local/share/nexus/cas/), chmod 555 only prevents ACCIDENTAL
|
||||
# deletion/modification. A user who owns the files can bypass this trivially.
|
||||
#
|
||||
# Real security comes from:
|
||||
# 1. Merkle tree verification (cryptographic integrity)
|
||||
# 2. User namespaces (kernel-enforced read-only mounts during execution)
|
||||
# 3. Root ownership (system-mode only: /var/lib/nexus/cas/)
|
||||
#
|
||||
# See docs/cas-security-architecture.md for full security model.
|
||||
|
||||
import std/[os, times, sequtils, strutils]
|
||||
import xxhash
|
||||
import ./types
|
||||
|
||||
type
|
||||
# Result types for error handling
|
||||
VoidResult*[E] = object
|
||||
case isOk*: bool
|
||||
of true:
|
||||
discard
|
||||
of false:
|
||||
errValue*: E
|
||||
|
||||
# Error types
|
||||
ErrorCode* = enum
|
||||
FileWriteError, FileReadError, UnknownError
|
||||
|
||||
CasError* = object of CatchableError
|
||||
code*: ErrorCode
|
||||
objectHash*: string
|
||||
|
||||
ProtectionManager* = object
|
||||
casPath*: string ## Path to CAS root directory
|
||||
auditLog*: string ## Path to audit log file
|
||||
casPath*: string # Path to CAS root directory
|
||||
auditLog*: string # Path to audit log file
|
||||
|
||||
SecurityError* = object of CatchableError
|
||||
code*: string
|
||||
context*: string
|
||||
SecurityEvent* = object
|
||||
timestamp*: DateTime
|
||||
eventType*: string
|
||||
hash*: string
|
||||
details*: string
|
||||
severity*: string # "info", "warning", "critical"
|
||||
|
||||
proc ok*[E](dummy: typedesc[E]): VoidResult[E] =
|
||||
VoidResult[E](isOk: true)
|
||||
|
||||
proc newProtectionManager*(casPath: string): ProtectionManager =
|
||||
## Create a new protection manager for the given CAS path
|
||||
|
|
@ -69,35 +62,35 @@ proc logOperation*(pm: ProtectionManager, op: string, path: string, hash: string
|
|||
# (better to allow operation than to fail)
|
||||
discard
|
||||
|
||||
proc setReadOnly*(pm: ProtectionManager): VoidResult[CasError] =
|
||||
proc setReadOnly*(pm: ProtectionManager): VoidResult[NimPakError] =
|
||||
## Set CAS directory to read-only (chmod 555)
|
||||
try:
|
||||
setFilePermissions(pm.casPath, {fpUserRead, fpUserExec,
|
||||
fpGroupRead, fpGroupExec,
|
||||
fpOthersRead, fpOthersExec})
|
||||
pm.logOperation("SET_READONLY", pm.casPath)
|
||||
return ok(CasError)
|
||||
return ok(NimPakError)
|
||||
except OSError as e:
|
||||
return VoidResult[CasError](isOk: false, errValue: CasError(
|
||||
return VoidResult[NimPakError](isOk: false, errValue: NimPakError(
|
||||
code: FileWriteError,
|
||||
msg: "Failed to set read-only permissions: " & e.msg
|
||||
))
|
||||
|
||||
proc setWritable*(pm: ProtectionManager): VoidResult[CasError] =
|
||||
proc setWritable*(pm: ProtectionManager): VoidResult[NimPakError] =
|
||||
## Set CAS directory to writable (chmod 755)
|
||||
try:
|
||||
setFilePermissions(pm.casPath, {fpUserRead, fpUserWrite, fpUserExec,
|
||||
fpGroupRead, fpGroupExec,
|
||||
fpOthersRead, fpOthersExec})
|
||||
pm.logOperation("SET_WRITABLE", pm.casPath)
|
||||
return ok(CasError)
|
||||
return ok(NimPakError)
|
||||
except OSError as e:
|
||||
return VoidResult[CasError](isOk: false, errValue: CasError(
|
||||
return VoidResult[NimPakError](isOk: false, errValue: NimPakError(
|
||||
code: FileWriteError,
|
||||
msg: "Failed to set writable permissions: " & e.msg
|
||||
))
|
||||
|
||||
proc withWriteAccess*(pm: ProtectionManager, operation: proc()): VoidResult[CasError] =
|
||||
proc withWriteAccess*(pm: ProtectionManager, operation: proc()): VoidResult[NimPakError] =
|
||||
## Execute operation with temporary write access, then restore read-only
|
||||
## This ensures atomic permission elevation and restoration
|
||||
var oldPerms: set[FilePermission]
|
||||
|
|
@ -119,7 +112,7 @@ proc withWriteAccess*(pm: ProtectionManager, operation: proc()): VoidResult[CasE
|
|||
if not setReadOnlyResult.isOk:
|
||||
return setReadOnlyResult
|
||||
|
||||
return ok(CasError)
|
||||
return ok(NimPakError)
|
||||
|
||||
except Exception as e:
|
||||
# Ensure permissions restored even on error
|
||||
|
|
@ -129,12 +122,12 @@ proc withWriteAccess*(pm: ProtectionManager, operation: proc()): VoidResult[CasE
|
|||
except:
|
||||
discard # Best effort to restore
|
||||
|
||||
return VoidResult[CasError](isOk: false, errValue: CasError(
|
||||
return VoidResult[NimPakError](isOk: false, errValue: NimPakError(
|
||||
code: UnknownError,
|
||||
msg: "Write operation failed: " & e.msg
|
||||
))
|
||||
|
||||
proc ensureReadOnly*(pm: ProtectionManager): VoidResult[CasError] =
|
||||
proc ensureReadOnly*(pm: ProtectionManager): VoidResult[NimPakError] =
|
||||
## Ensure CAS directory is in read-only state
|
||||
## This should be called during initialization
|
||||
return pm.setReadOnly()
|
||||
|
|
@ -152,18 +145,7 @@ proc verifyReadOnly*(pm: ProtectionManager): bool =
|
|||
# Merkle Integrity Verification
|
||||
# This is the PRIMARY security mechanism (not chmod)
|
||||
|
||||
type
|
||||
IntegrityViolation* = object of CatchableError
|
||||
hash*: string
|
||||
expectedHash*: string
|
||||
chunkPath*: string
|
||||
|
||||
SecurityEvent* = object
|
||||
timestamp*: DateTime
|
||||
eventType*: string
|
||||
hash*: string
|
||||
details*: string
|
||||
severity*: string # "info", "warning", "critical"
|
||||
|
||||
proc logSecurityEvent*(pm: ProtectionManager, event: SecurityEvent) =
|
||||
## Log security events (integrity violations, tampering attempts, etc.)
|
||||
|
|
@ -180,7 +162,7 @@ proc logSecurityEvent*(pm: ProtectionManager, event: SecurityEvent) =
|
|||
# If we can't write to audit log, at least try stderr
|
||||
stderr.writeLine("SECURITY EVENT: " & event.eventType & " - " & event.details)
|
||||
|
||||
proc verifyChunkIntegrity*(pm: ProtectionManager, data: seq[byte], expectedHash: string): VoidResult[CasError] =
|
||||
proc verifyChunkIntegrity*(pm: ProtectionManager, data: seq[byte], expectedHash: string): VoidResult[NimPakError] =
|
||||
## Verify chunk integrity by recalculating hash
|
||||
## This is the PRIMARY security mechanism - always verify before use
|
||||
try:
|
||||
|
|
@ -197,9 +179,9 @@ proc verifyChunkIntegrity*(pm: ProtectionManager, data: seq[byte], expectedHash:
|
|||
)
|
||||
pm.logSecurityEvent(event)
|
||||
|
||||
return VoidResult[CasError](isOk: false, errValue: CasError(
|
||||
return VoidResult[NimPakError](isOk: false, errValue: NimPakError(
|
||||
code: UnknownError,
|
||||
objectHash: expectedHash,
|
||||
context: "Object Hash: " & expectedHash,
|
||||
msg: "Chunk integrity violation detected! Expected: " & expectedHash &
|
||||
", Got: " & calculatedHash & ". This chunk may be corrupted or tampered with."
|
||||
))
|
||||
|
|
@ -214,26 +196,26 @@ proc verifyChunkIntegrity*(pm: ProtectionManager, data: seq[byte], expectedHash:
|
|||
)
|
||||
pm.logSecurityEvent(event)
|
||||
|
||||
return ok(CasError)
|
||||
return ok(NimPakError)
|
||||
|
||||
except Exception as e:
|
||||
return VoidResult[CasError](isOk: false, errValue: CasError(
|
||||
return VoidResult[NimPakError](isOk: false, errValue: NimPakError(
|
||||
code: UnknownError,
|
||||
msg: "Failed to verify chunk integrity: " & e.msg,
|
||||
objectHash: expectedHash
|
||||
context: "Object Hash: " & expectedHash
|
||||
))
|
||||
|
||||
proc verifyChunkIntegrityFromFile*(pm: ProtectionManager, filePath: string, expectedHash: string): VoidResult[CasError] =
|
||||
proc verifyChunkIntegrityFromFile*(pm: ProtectionManager, filePath: string, expectedHash: string): VoidResult[NimPakError] =
|
||||
## Verify chunk integrity by reading file and checking hash
|
||||
try:
|
||||
let data = readFile(filePath)
|
||||
let byteData = data.toOpenArrayByte(0, data.len - 1).toSeq()
|
||||
return pm.verifyChunkIntegrity(byteData, expectedHash)
|
||||
except IOError as e:
|
||||
return VoidResult[CasError](isOk: false, errValue: CasError(
|
||||
return VoidResult[NimPakError](isOk: false, errValue: NimPakError(
|
||||
code: FileReadError,
|
||||
msg: "Failed to read chunk file for verification: " & e.msg,
|
||||
objectHash: expectedHash
|
||||
context: "Object Hash: " & expectedHash
|
||||
))
|
||||
|
||||
proc scanCASIntegrity*(pm: ProtectionManager, casPath: string): tuple[verified: int, corrupted: seq[string]] =
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NPR Recipe Format Handler (.npr)
|
||||
##
|
||||
## This module implements the NPR (Nexus Package Recipe) format for source-level
|
||||
|
|
|
|||
|
|
@ -513,12 +513,20 @@ proc fetchBinaryPackage*(packageName: string, version: string, url: string,
|
|||
# Return CAS path
|
||||
return FetchResult[string](
|
||||
success: true,
|
||||
value: storeResult.get().hash,
|
||||
value: storeResult.okValue.hash,
|
||||
bytesTransferred: fetchRes.bytesTransferred,
|
||||
duration: fetchRes.duration
|
||||
)
|
||||
|
||||
return result
|
||||
# Store failed
|
||||
return FetchResult[string](
|
||||
success: false,
|
||||
error: "Failed to store package in CAS: " & storeResult.errValue.msg,
|
||||
errorCode: 500
|
||||
)
|
||||
|
||||
# Fetch failed
|
||||
return fetchRes
|
||||
|
||||
# =============================================================================
|
||||
# CLI Integration
|
||||
|
|
|
|||
|
|
@ -572,7 +572,7 @@ proc createDeltaObject*(engine: SyncEngine, objectHash: string): SyncResult[Delt
|
|||
errorCode: 404
|
||||
)
|
||||
|
||||
let originalData = objectResult.value
|
||||
let originalData = objectResult.okValue
|
||||
let originalSize = int64(originalData.len)
|
||||
|
||||
# Compress the data using zstd
|
||||
|
|
@ -630,7 +630,7 @@ proc applyDeltaObject*(engine: SyncEngine, delta: DeltaObject): SyncResult[bool]
|
|||
if not storeResult.isOk:
|
||||
return SyncResult[bool](
|
||||
success: false,
|
||||
error: fmt"Failed to store object: {storeResult.error.msg}",
|
||||
error: fmt"Failed to store object: {storeResult.errValue.msg}",
|
||||
errorCode: 500
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ import ../security/signature_verifier
|
|||
import ../security/provenance_tracker
|
||||
import ../remote/manager
|
||||
|
||||
import ../types/grafting_types
|
||||
|
||||
type
|
||||
PublishConfig* = object
|
||||
## Configuration for publishing packages
|
||||
|
|
@ -54,7 +56,7 @@ type
|
|||
of FromCas:
|
||||
files*: seq[types_fixed.PackageFile]
|
||||
of FromGraft:
|
||||
graftResult*: types_fixed.GraftResult
|
||||
graftResult*: grafting_types.GraftResult
|
||||
|
||||
ArtifactBuilder* = ref object
|
||||
cas*: CasManager
|
||||
|
|
@ -103,10 +105,10 @@ proc buildFromDirectory*(builder: ArtifactBuilder,
|
|||
|
||||
# Store in CAS and get hash
|
||||
let storeResult = builder.cas.storeObject(dataBytes)
|
||||
if cas.isErr(storeResult):
|
||||
if not storeResult.isOk:
|
||||
return types_fixed.err[NpkPackage, string]("Failed to store file " & file & " in CAS")
|
||||
|
||||
let casObj = cas.get(storeResult)
|
||||
let casObj = storeResult.okValue
|
||||
let info = getFileInfo(fullPath)
|
||||
|
||||
files.add(PackageFile(
|
||||
|
|
@ -359,8 +361,8 @@ proc publish*(builder: ArtifactBuilder,
|
|||
archiveData.toOpenArrayByte(0, archiveData.len - 1).toSeq()
|
||||
)
|
||||
|
||||
if not cas.isErr(storeResult):
|
||||
result.casHash = cas.get(storeResult).hash
|
||||
if storeResult.isOk:
|
||||
result.casHash = storeResult.okValue.hash
|
||||
|
||||
# Step 5: Upload to repository (if configured)
|
||||
if builder.config.repoId.len > 0:
|
||||
|
|
|
|||
|
|
@ -5,13 +5,12 @@
|
|||
## Supports BLAKE2b (primary) and BLAKE3 (future) with algorithm detection and fallback.
|
||||
|
||||
import std/[os, streams, strutils, strformat, times, options]
|
||||
import nimcrypto/[blake2, sha2]
|
||||
import nimcrypto/blake2
|
||||
|
||||
type
|
||||
HashAlgorithm* = enum
|
||||
HashBlake2b = "blake2b"
|
||||
HashBlake3 = "blake3" # Future implementation
|
||||
HashSha256 = "sha256" # Legacy support
|
||||
|
||||
HashResult* = object
|
||||
algorithm*: HashAlgorithm
|
||||
|
|
@ -27,7 +26,6 @@ type
|
|||
StreamingHasher* = object
|
||||
algorithm*: HashAlgorithm
|
||||
blake2bContext*: blake2_512 # BLAKE2b-512 context
|
||||
sha256Context*: sha256 # SHA256 context for legacy support
|
||||
# blake3Context*: Blake3Context # Future BLAKE3 context
|
||||
bytesProcessed*: int64
|
||||
startTime*: times.DateTime
|
||||
|
|
@ -42,12 +40,8 @@ proc detectHashAlgorithm*(hashString: string): HashAlgorithm =
|
|||
return HashBlake2b
|
||||
elif hashString.startsWith("blake3-"):
|
||||
return HashBlake3
|
||||
elif hashString.startsWith("sha256-"):
|
||||
return HashSha256
|
||||
elif hashString.len == 128: # BLAKE2b-512 hex length
|
||||
return HashBlake2b
|
||||
elif hashString.len == 64: # SHA256 hex length
|
||||
return HashSha256
|
||||
else:
|
||||
raise newException(ValueError, fmt"Unknown hash format: {hashString[0..min(50, hashString.high)]}")
|
||||
|
||||
|
|
@ -68,18 +62,11 @@ proc parseHashString*(hashString: string): (HashAlgorithm, string) =
|
|||
else:
|
||||
return (HashBlake3, hashString)
|
||||
|
||||
of HashSha256:
|
||||
if hashString.startsWith("sha256-"):
|
||||
return (HashSha256, hashString[7..^1])
|
||||
else:
|
||||
return (HashSha256, hashString)
|
||||
|
||||
proc formatHashString*(algorithm: HashAlgorithm, digest: string): string =
|
||||
## Format hash digest with algorithm prefix
|
||||
case algorithm:
|
||||
of HashBlake2b: fmt"blake2b-{digest}"
|
||||
of HashBlake3: fmt"blake3-{digest}"
|
||||
of HashSha256: fmt"sha256-{digest}"
|
||||
|
||||
# =============================================================================
|
||||
# Streaming Hash Computation
|
||||
|
|
@ -104,9 +91,6 @@ proc initStreamingHasher*(algorithm: HashAlgorithm): StreamingHasher =
|
|||
hasher.algorithm = HashBlake2b
|
||||
hasher.blake2bContext.init()
|
||||
|
||||
of HashSha256:
|
||||
hasher.sha256Context.init()
|
||||
|
||||
return hasher
|
||||
|
||||
proc update*(hasher: var StreamingHasher, data: openArray[byte]) =
|
||||
|
|
@ -119,9 +103,6 @@ proc update*(hasher: var StreamingHasher, data: openArray[byte]) =
|
|||
# Fallback to BLAKE2b (already handled in init)
|
||||
hasher.blake2bContext.update(data)
|
||||
|
||||
of HashSha256:
|
||||
hasher.sha256Context.update(data)
|
||||
|
||||
hasher.bytesProcessed += data.len
|
||||
|
||||
proc update*(hasher: var StreamingHasher, data: string) =
|
||||
|
|
@ -153,15 +134,6 @@ proc finalize*(hasher: var StreamingHasher): HashResult =
|
|||
computeTime: computeTime
|
||||
)
|
||||
|
||||
of HashSha256:
|
||||
let digest = hasher.sha256Context.finish()
|
||||
return HashResult(
|
||||
algorithm: HashSha256,
|
||||
digest: ($digest).toLower(), # Ensure lowercase hex
|
||||
verified: false,
|
||||
computeTime: computeTime
|
||||
)
|
||||
|
||||
# =============================================================================
|
||||
# File Hash Computation
|
||||
# =============================================================================
|
||||
|
|
@ -200,7 +172,8 @@ proc computeFileHash*(filePath: string, algorithm: HashAlgorithm = HashBlake2b):
|
|||
fileStream.close()
|
||||
|
||||
proc computeLargeFileHash*(filePath: string, algorithm: HashAlgorithm = HashBlake2b,
|
||||
progressCallback: proc(bytesProcessed: int64, totalBytes: int64) = nil): HashResult =
|
||||
progressCallback: proc(bytesProcessed: int64,
|
||||
totalBytes: int64) = nil): HashResult =
|
||||
## Compute hash of a large file (>1GB) with progress reporting
|
||||
if not fileExists(filePath):
|
||||
raise newException(IOError, fmt"File not found: {filePath}")
|
||||
|
|
@ -261,7 +234,8 @@ proc verifyFileHash*(filePath: string, expectedHash: string): HashResult =
|
|||
hashResult.verified = (hashResult.digest == expectedDigest)
|
||||
|
||||
if not hashResult.verified:
|
||||
var error = newException(HashVerificationError, fmt"Hash verification failed for {filePath}")
|
||||
var error = newException(HashVerificationError,
|
||||
fmt"Hash verification failed for {filePath}")
|
||||
error.algorithm = algorithm
|
||||
error.expectedHash = expectedDigest
|
||||
error.actualHash = hashResult.digest
|
||||
|
|
@ -277,7 +251,8 @@ proc verifyStringHash*(data: string, expectedHash: string): HashResult =
|
|||
hashResult.verified = (hashResult.digest == expectedDigest)
|
||||
|
||||
if not hashResult.verified:
|
||||
var error = newException(HashVerificationError, fmt"Hash verification failed for string data")
|
||||
var error = newException(HashVerificationError,
|
||||
fmt"Hash verification failed for string data")
|
||||
error.algorithm = algorithm
|
||||
error.expectedHash = expectedDigest
|
||||
error.actualHash = hashResult.digest
|
||||
|
|
@ -293,7 +268,8 @@ proc verifyStreamHash*(stream: Stream, expectedHash: string): HashResult =
|
|||
hashResult.verified = (hashResult.digest == expectedDigest)
|
||||
|
||||
if not hashResult.verified:
|
||||
var error = newException(HashVerificationError, fmt"Hash verification failed for stream data")
|
||||
var error = newException(HashVerificationError,
|
||||
fmt"Hash verification failed for stream data")
|
||||
error.algorithm = algorithm
|
||||
error.expectedHash = expectedDigest
|
||||
error.actualHash = hashResult.digest
|
||||
|
|
@ -375,14 +351,14 @@ proc getPreferredHashAlgorithm*(): HashAlgorithm =
|
|||
|
||||
proc getSupportedAlgorithms*(): seq[HashAlgorithm] =
|
||||
## Get list of supported hash algorithms
|
||||
return @[HashBlake2b, HashSha256] # Add HashBlake3 when implemented
|
||||
return @[HashBlake2b] # Add HashBlake3 when implemented
|
||||
|
||||
proc getFallbackAlgorithm*(algorithm: HashAlgorithm): HashAlgorithm =
|
||||
## Get fallback algorithm for unsupported algorithms
|
||||
case algorithm:
|
||||
of HashBlake3:
|
||||
return HashBlake2b # BLAKE3 falls back to BLAKE2b
|
||||
of HashBlake2b, HashSha256:
|
||||
of HashBlake2b:
|
||||
return algorithm # Already supported
|
||||
|
||||
proc isAlgorithmSupported*(algorithm: HashAlgorithm): bool =
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NIP Session Management
|
||||
##
|
||||
## Handles persistent session state with track, channel, and policy management
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NIP Shell Core Types
|
||||
##
|
||||
## This module defines the foundational data structures for the NIP shell
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## Signature Management for Nexus Formats
|
||||
##
|
||||
## This module implements Ed25519 signing and verification for NPK, NIP, and NEXTER formats.
|
||||
|
|
@ -15,7 +22,7 @@
|
|||
|
||||
import std/[os, strutils, json, base64, tables, times, sets]
|
||||
import ed25519
|
||||
import ../nip/types
|
||||
import ./types
|
||||
|
||||
type
|
||||
SignatureManager* = object
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NSS System Snapshot Format Handler (.nss.zst)
|
||||
##
|
||||
## This module implements the NSS (Nexus System Snapshot) format for complete
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## system_integration.nim
|
||||
## System integration for NIP - PATH, libraries, shell integration
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
import types
|
||||
|
||||
when isMainModule:
|
||||
|
|
|
|||
|
|
@ -1,8 +1,15 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/transactions.nim
|
||||
# Atomic transaction management system
|
||||
|
||||
import std/[tables, strutils, json, times]
|
||||
import ../nip/types
|
||||
import ./types
|
||||
|
||||
# Transaction management functions
|
||||
proc beginTransaction*(): Transaction =
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
import strutils, os
|
||||
|
||||
type
|
||||
|
|
|
|||
|
|
@ -1,7 +1,14 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# nimpak/types.nim
|
||||
# Core data structures and types for the NimPak system
|
||||
|
||||
import std/[times, tables, options, json, hashes]
|
||||
import std/[hashes]
|
||||
|
||||
# Re-export the comprehensive types from types_fixed
|
||||
include types_fixed
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## NimPak Core Types
|
||||
##
|
||||
## This module defines the foundational data structures for the NimPak package
|
||||
|
|
|
|||
|
|
@ -1,7 +1,14 @@
|
|||
## NimPak Core Types
|
||||
##
|
||||
## This module defines the foundational data structures for the NimPak package
|
||||
## management system, following NexusOS architectural principles.
|
||||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
# NimPak Core Types
|
||||
#
|
||||
# This module defines the foundational data structures for the NimPak package
|
||||
# management system, following NexusOS architectural principles.
|
||||
|
||||
import std/[times, tables, options, json]
|
||||
|
||||
|
|
@ -81,13 +88,24 @@ type
|
|||
suggestions*: seq[string]
|
||||
|
||||
ErrorCode* = enum
|
||||
PackageNotFound, DependencyConflict, ChecksumMismatch,
|
||||
PermissionDenied, NetworkError, BuildFailed,
|
||||
InvalidMetadata, AculViolation, CellNotFound,
|
||||
FilesystemError, CasError, GraftError,
|
||||
# CAS-specific errors
|
||||
ObjectNotFound, CorruptedObject, StorageError, CompressionError,
|
||||
FileReadError, FileWriteError, UnknownError
|
||||
# Access Control
|
||||
PermissionDenied, ElevationRequired, ReadOnlyViolation,
|
||||
AculViolation, PolicyViolation, TrustViolation, SignatureInvalid,
|
||||
|
||||
# Network & Transport
|
||||
NetworkError, DownloadFailed, RepositoryUnavailable, TimeoutError,
|
||||
|
||||
# Build & Dependency
|
||||
BuildFailed, CompilationError, MissingDependency, DependencyConflict,
|
||||
VersionMismatch, ChecksumMismatch, InvalidMetadata,
|
||||
|
||||
# Storage & Integrity
|
||||
FilesystemError, CasGeneralError, GraftError, PackageNotFound, CellNotFound,
|
||||
ObjectNotFound, CorruptedObject, StorageError, CompressionError, StorageFull,
|
||||
FileReadError, FileWriteError, PackageCorrupted, ReferenceIntegrityError,
|
||||
|
||||
# Runtime & Lifecycle
|
||||
TransactionFailed, RollbackFailed, GarbageCollectionFailed, UnknownError
|
||||
|
||||
# =============================================================================
|
||||
# Package Identification and Streams
|
||||
|
|
@ -405,11 +423,7 @@ type
|
|||
deduplicationStatus*: string # "New" or "Reused"
|
||||
blake2bHash*: string # BLAKE2b hash for enhanced grafting
|
||||
|
||||
GraftResult* = object
|
||||
fragment*: Fragment
|
||||
extractedPath*: string
|
||||
originalMetadata*: JsonNode
|
||||
auditLog*: GraftAuditLog
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# System Layers and Runtime Control
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## use_flags.nim
|
||||
## USE flag parsing and management for NIP
|
||||
## Supports both simple key-value format and structured formats
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## UTCP (Universal Tool Communication Protocol) Implementation
|
||||
##
|
||||
## This module implements the Universal Tool Communication Protocol for
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_compiler.nim
|
||||
## Compiler flag resolution system for NIP variant management
|
||||
## Resolves domain flags to actual compiler flags with priority ordering
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_database.nim
|
||||
## Database operations for variant management
|
||||
## Extends the package database with variant tracking
|
||||
|
|
@ -11,10 +18,7 @@ type
|
|||
variants*: Table[string, VariantRecord] # fingerprint -> record
|
||||
references*: Table[string, seq[string]] # variant fingerprint -> list of dependent package names (Task 14.2)
|
||||
|
||||
# DEPRECATED: Use Option[VariantRecord] instead
|
||||
VariantQueryResult* {.deprecated: "Use Option[VariantRecord] instead".} = object
|
||||
found*: bool
|
||||
record*: VariantRecord
|
||||
|
||||
|
||||
VariantReferenceInfo* = object
|
||||
## Information about variant references (Task 14.2)
|
||||
|
|
@ -253,19 +257,7 @@ proc queryVariantByFingerprint*(
|
|||
else:
|
||||
return none(VariantRecord)
|
||||
|
||||
proc queryVariantByFingerprintLegacy*(
|
||||
db: VariantDatabase,
|
||||
fingerprint: string
|
||||
): VariantQueryResult {.deprecated: "Use queryVariantByFingerprint which returns Option[VariantRecord]".} =
|
||||
## DEPRECATED: Use queryVariantByFingerprint instead
|
||||
## Look up a variant by its fingerprint (legacy API)
|
||||
if fingerprint in db.variants:
|
||||
return VariantQueryResult(
|
||||
found: true,
|
||||
record: db.variants[fingerprint]
|
||||
)
|
||||
else:
|
||||
return VariantQueryResult(found: false)
|
||||
|
||||
|
||||
proc queryVariantByPath*(
|
||||
db: VariantDatabase,
|
||||
|
|
@ -281,21 +273,7 @@ proc queryVariantByPath*(
|
|||
|
||||
return none(VariantRecord)
|
||||
|
||||
proc queryVariantByPathLegacy*(
|
||||
db: VariantDatabase,
|
||||
installPath: string
|
||||
): VariantQueryResult {.deprecated: "Use queryVariantByPath which returns Option[VariantRecord]".} =
|
||||
## DEPRECATED: Use queryVariantByPath instead
|
||||
## Query variant by installation path (legacy API)
|
||||
|
||||
for variant in db.variants.values:
|
||||
if variant.installPath == installPath:
|
||||
return VariantQueryResult(
|
||||
found: true,
|
||||
record: variant
|
||||
)
|
||||
|
||||
return VariantQueryResult(found: false)
|
||||
|
||||
proc queryVariantsByPackage*(
|
||||
db: VariantDatabase,
|
||||
|
|
@ -320,33 +298,7 @@ proc queryVariantsByPackageVersion*(
|
|||
if variant.packageName == packageName and variant.version == version:
|
||||
result.add(variant)
|
||||
|
||||
proc deleteVariantRecord*(
|
||||
db: VariantDatabase,
|
||||
fingerprint: string
|
||||
): bool {.deprecated: "Use deleteVariantWithReferences to safely handle references".} =
|
||||
## DEPRECATED: Use deleteVariantWithReferences instead
|
||||
## Remove a variant record from the database
|
||||
## WARNING: This does not check for references and may cause dangling references
|
||||
## Returns true if successful, false if variant not found
|
||||
|
||||
# Check for references before deleting
|
||||
let refs = db.getVariantReferences(fingerprint)
|
||||
if refs.len > 0:
|
||||
echo "Warning: Deleting variant with active references: ", refs.join(", ")
|
||||
echo "Consider using deleteVariantWithReferences instead"
|
||||
|
||||
if fingerprint notin db.variants:
|
||||
return false
|
||||
|
||||
db.variants.del(fingerprint)
|
||||
|
||||
# Clean up references
|
||||
if fingerprint in db.references:
|
||||
db.references.del(fingerprint)
|
||||
|
||||
db.saveVariants()
|
||||
|
||||
return true
|
||||
|
||||
proc updateVariantPath*(
|
||||
db: VariantDatabase,
|
||||
|
|
@ -413,12 +365,7 @@ proc findVariantByPath*(
|
|||
# Utility Functions
|
||||
# #############################################################################
|
||||
|
||||
proc `$`*(qr: VariantQueryResult): string {.deprecated.} =
|
||||
## DEPRECATED: String representation of query result (legacy API)
|
||||
if qr.found:
|
||||
result = "Found: " & qr.record.fingerprint
|
||||
else:
|
||||
result = "Not found"
|
||||
|
||||
|
||||
proc prettyPrint*(variant: VariantRecord): string =
|
||||
## Pretty print a variant record
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_domains.nim
|
||||
## Semantic domain definitions for NIP variant system
|
||||
## Defines 9 orthogonal domains with typed constraints
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_fingerprint.nim
|
||||
## Variant fingerprint calculation using BLAKE2b
|
||||
## Provides deterministic content-addressed identifiers for package variants
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_manager.nim
|
||||
## Orchestration layer for NIP variant management
|
||||
## Coordinates all variant operations: creation, querying, validation
|
||||
|
|
@ -357,7 +364,8 @@ proc hasVariant*(vm: VariantManager, fingerprint: string): bool =
|
|||
|
||||
proc deleteVariant*(vm: VariantManager, fingerprint: string): bool =
|
||||
## Delete a variant from the database
|
||||
vm.db.deleteVariantRecord(fingerprint)
|
||||
let (success, _) = vm.db.deleteVariantWithReferences(fingerprint)
|
||||
return success
|
||||
|
||||
proc countVariants*(vm: VariantManager, packageName: string): int =
|
||||
## Count variants for a package
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_mappings.nim
|
||||
## Maps NIP variant domains to package manager specific flags
|
||||
## Each package can have custom mappings, with fallback to generic mappings
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_migration.nim
|
||||
## Migration utilities for transitioning from legacy USE flags to variant domains
|
||||
## Task 15: Legacy flag translation and migration warnings
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_parser.nim
|
||||
## CLI parser for domain-scoped variant flags
|
||||
## Supports both new domain syntax and legacy USE flags
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_paths.nim
|
||||
## Variant path management for NIP
|
||||
## Generates and validates content-addressed variant installation paths
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_profiles.nim
|
||||
## Profile system for NIP variant management
|
||||
## Loads and merges variant profiles from KDL files
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_types.nim
|
||||
## Core type system for NIP variant management
|
||||
## Defines typed semantic domains and variant fingerprinting
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variant_validator.nim
|
||||
## Domain validation system for NIP variant management
|
||||
## Validates domain configurations and enforces type constraints
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## variants.nim
|
||||
## Typed variant system for deterministic, content-addressed packages
|
||||
## Evolution of USE flags into semantic domains with type safety
|
||||
|
|
|
|||
|
|
@ -1,3 +1,10 @@
|
|||
# SPDX-License-Identifier: LSL-1.0
|
||||
# Copyright (c) 2026 Markus Maiwald
|
||||
# Stewardship: Self Sovereign Society Foundation
|
||||
#
|
||||
# This file is part of the Nexus Sovereign Core.
|
||||
# See legal/LICENSE_SOVEREIGN.md for license terms.
|
||||
|
||||
## nimpak/xdg_enforcer.nim
|
||||
## XDG Base Directory Enforcer for Nippels
|
||||
##
|
||||
|
|
|
|||
1257
src/nip.nim
1257
src/nip.nim
File diff suppressed because it is too large
Load Diff
|
|
@ -1,69 +0,0 @@
|
|||
## NIP Archive Handler
|
||||
##
|
||||
## This module handles the creation and extraction of .nip (and .npk) archives.
|
||||
## It enforces the compression strategy:
|
||||
## - Archives: zstd (default/auto) for performance.
|
||||
## - CAS: zstd -19 (handled by cas.nim, not here).
|
||||
|
||||
import std/[os, osproc, strutils, strformat, logging, tempfiles]
|
||||
import nip/manifest_parser
|
||||
|
||||
type
|
||||
ArchiveError* = object of CatchableError
|
||||
|
||||
proc runCmd(cmd: string) =
|
||||
let res = execCmdEx(cmd)
|
||||
if res.exitCode != 0:
|
||||
raise newException(ArchiveError, fmt"Command failed: {cmd}{'\n'}Output: {res.output}")
|
||||
|
||||
proc createArchive*(manifest: PackageManifest, sourceDir: string, outputFile: string) =
|
||||
## Create a .nip archive from a source directory and manifest.
|
||||
## The archive will contain:
|
||||
## - manifest.kdl
|
||||
## - files/ (content of sourceDir)
|
||||
|
||||
info(fmt"Creating archive {outputFile} from {sourceDir}")
|
||||
|
||||
let tempDir = createTempDir("nip_build_", "")
|
||||
defer: removeDir(tempDir)
|
||||
|
||||
# 1. Write manifest to temp root
|
||||
let manifestPath = tempDir / "manifest.kdl"
|
||||
writeFile(manifestPath, serializeManifestToKDL(manifest))
|
||||
|
||||
# 2. Copy source files to temp/files
|
||||
let filesDir = tempDir / "files"
|
||||
createDir(filesDir)
|
||||
copyDirWithPermissions(sourceDir, filesDir)
|
||||
|
||||
# 3. Create Archive
|
||||
# We use tar + zstd.
|
||||
# --zstd tells tar to use zstd. If not supported by tar version, we pipe.
|
||||
# To be safe and explicit about zstd options, we pipe.
|
||||
# cd tempDir && tar -cf - manifest.kdl files/ | zstd -T0 > outputFile
|
||||
# -T0 uses all cores.
|
||||
# No -19 here, just default (level 3 usually) or --auto if we wanted.
|
||||
# Default is good for "superb heuristic".
|
||||
|
||||
let cmd = fmt"tar -C {tempDir.quoteShell} -cf - manifest.kdl files | zstd -T0 -o {outputFile.quoteShell}"
|
||||
runCmd(cmd)
|
||||
|
||||
info(fmt"Archive created successfully: {outputFile}")
|
||||
|
||||
proc extractArchive*(archivePath: string, targetDir: string) =
|
||||
## Extract a .nip archive to targetDir.
|
||||
|
||||
info(fmt"Extracting archive {archivePath} to {targetDir}")
|
||||
createDir(targetDir)
|
||||
|
||||
# zstd -d -c archive | tar -C target -xf -
|
||||
let cmd = fmt"zstd -d -c {archivePath.quoteShell} | tar -C {targetDir.quoteShell} -xf -"
|
||||
runCmd(cmd)
|
||||
|
||||
info("Extraction complete")
|
||||
|
||||
proc verifyArchive*(archivePath: string): bool =
|
||||
## Verify archive integrity (zstd check)
|
||||
let cmd = fmt"zstd -t {archivePath.quoteShell}"
|
||||
let res = execCmdEx(cmd)
|
||||
return res.exitCode == 0
|
||||
165
src/nip/cas.nim
165
src/nip/cas.nim
|
|
@ -1,165 +0,0 @@
|
|||
## Content-Addressable Storage (CAS) system for NimPak
|
||||
##
|
||||
## This module provides the core functionality for storing and retrieving
|
||||
## content-addressed objects using BLAKE2b-512 hashing (with future support for BLAKE3).
|
||||
## Objects are stored in a sharded directory structure for scalability.
|
||||
|
||||
import std/[os, strutils, times, posix]
|
||||
import nimcrypto/hash
|
||||
import nimcrypto/blake2
|
||||
import nip/types
|
||||
|
||||
const
|
||||
DefaultHashAlgorithm* = "blake2b-512" # Default hash algorithm
|
||||
ShardingLevels* = 2 # Number of directory levels for sharding
|
||||
|
||||
type
|
||||
HashAlgorithm* = enum
|
||||
Blake2b512 = "blake2b-512"
|
||||
# Blake3 = "blake3" # Will be added when available in Nimble
|
||||
|
||||
CasObject* = object
|
||||
hash*: Multihash
|
||||
size*: int64
|
||||
compressed*: bool
|
||||
timestamp*: times.Time
|
||||
|
||||
proc calculateHash*(data: string, algorithm: HashAlgorithm = Blake2b512): Multihash =
|
||||
## Calculate the hash of a string using the specified algorithm
|
||||
case algorithm:
|
||||
of Blake2b512:
|
||||
let digest = blake2_512.digest(data)
|
||||
var hexDigest = ""
|
||||
for b in digest.data:
|
||||
hexDigest.add(b.toHex(2).toLowerAscii())
|
||||
result = Multihash(hexDigest)
|
||||
|
||||
proc calculateFileHash*(path: string, algorithm: HashAlgorithm = Blake2b512): Multihash =
|
||||
## Calculate the hash of a file using the specified algorithm
|
||||
if not fileExists(path):
|
||||
raise newException(IOError, "File not found: " & path)
|
||||
|
||||
let data = readFile(path)
|
||||
result = calculateHash(data, algorithm)
|
||||
|
||||
proc getShardPath*(hash: Multihash, levels: int = ShardingLevels): string =
|
||||
## Get the sharded path for a hash
|
||||
## e.g., "ab/cd" for hash "abcdef123456..."
|
||||
let hashStr = string(hash)
|
||||
var parts: seq[string] = @[]
|
||||
|
||||
for i in 0..<levels:
|
||||
if i*2+1 < hashStr.len:
|
||||
parts.add(hashStr[i*2..<i*2+2])
|
||||
else:
|
||||
break
|
||||
|
||||
result = parts.join("/")
|
||||
|
||||
proc storeObject*(data: string, casRoot: string, compress: bool = true): CasObject =
|
||||
## Store data in the CAS and return its hash
|
||||
let hash = calculateHash(data)
|
||||
let shardPath = getShardPath(hash)
|
||||
let fullShardPath = casRoot / shardPath
|
||||
|
||||
# Create shard directories if they don't exist
|
||||
createDir(fullShardPath)
|
||||
|
||||
# Store the object
|
||||
let objectPath = fullShardPath / string(hash)
|
||||
|
||||
# TODO: Add zstd compression when needed
|
||||
writeFile(objectPath, data)
|
||||
|
||||
result = CasObject(
|
||||
hash: hash,
|
||||
size: data.len.int64,
|
||||
compressed: compress,
|
||||
timestamp: getTime()
|
||||
)
|
||||
|
||||
proc retrieveObject*(hash: Multihash, casRoot: string): string =
|
||||
## Retrieve an object from the CAS by its hash
|
||||
let shardPath = getShardPath(hash)
|
||||
let objectPath = casRoot / shardPath / string(hash)
|
||||
|
||||
if not fileExists(objectPath):
|
||||
raise newException(IOError, "Object not found: " & string(hash))
|
||||
|
||||
# TODO: Add zstd decompression when needed
|
||||
result = readFile(objectPath)
|
||||
|
||||
proc verifyObject*(hash: Multihash, data: string): bool =
|
||||
## Verify that data matches its expected hash
|
||||
let calculatedHash = calculateHash(data)
|
||||
result = hash == calculatedHash
|
||||
|
||||
proc initCasManager*(userCasPath: string, systemCasPath: string): bool =
|
||||
## Initialize the CAS manager by creating necessary directories
|
||||
try:
|
||||
createDir(userCasPath)
|
||||
setFilePermissions(userCasPath, {fpUserRead, fpUserWrite, fpUserExec})
|
||||
|
||||
# Only create system CAS if running as root
|
||||
if posix.getuid() == 0:
|
||||
createDir(systemCasPath)
|
||||
setFilePermissions(systemCasPath, {fpUserRead, fpUserWrite, fpUserExec,
|
||||
fpGroupRead, fpGroupExec,
|
||||
fpOthersRead, fpOthersExec})
|
||||
|
||||
result = true
|
||||
result = true
|
||||
except:
|
||||
result = false
|
||||
|
||||
# ============================================================================
|
||||
# Reference Counting / Garbage Collection Support
|
||||
# ============================================================================
|
||||
|
||||
proc getRefPath(casRoot, refType, hash, refId: string): string =
|
||||
## Get path for a reference file: cas/refs/<type>/<hash>/<refId>
|
||||
result = casRoot / "refs" / refType / hash / refId
|
||||
|
||||
proc addReference*(casRoot: string, hash: Multihash, refType, refId: string) =
|
||||
## Add a reference to a CAS object
|
||||
## refType: "npk", "nip", "nexter"
|
||||
## refId: Unique identifier for the reference (e.g. "package-name:version")
|
||||
let path = getRefPath(casRoot, refType, string(hash), refId)
|
||||
createDir(path.parentDir)
|
||||
writeFile(path, "") # Empty file acts as reference
|
||||
|
||||
proc removeReference*(casRoot: string, hash: Multihash, refType, refId: string) =
|
||||
## Remove a reference to a CAS object
|
||||
let path = getRefPath(casRoot, refType, string(hash), refId)
|
||||
if fileExists(path):
|
||||
removeFile(path)
|
||||
# Try to remove parent dir (hash dir) if empty
|
||||
try:
|
||||
removeDir(path.parentDir)
|
||||
except:
|
||||
discard
|
||||
|
||||
proc hasReferences*(casRoot: string, hash: Multihash): bool =
|
||||
## Check if a CAS object has any references
|
||||
# We need to check all refTypes
|
||||
let refsDir = casRoot / "refs"
|
||||
if not dirExists(refsDir): return false
|
||||
|
||||
for kind, path in walkDir(refsDir):
|
||||
if kind == pcDir:
|
||||
let hashDir = path / string(hash)
|
||||
if dirExists(hashDir):
|
||||
# Check if directory is not empty
|
||||
for _ in walkDir(hashDir):
|
||||
return true
|
||||
return false
|
||||
|
||||
when isMainModule:
|
||||
# Simple test
|
||||
echo "Testing CAS functionality..."
|
||||
let testData = "Hello, NexusOS with Content-Addressable Storage!"
|
||||
let objHash = calculateHash(testData)
|
||||
echo "Hash: ", string(objHash)
|
||||
|
||||
# Test sharding
|
||||
echo "Shard path: ", getShardPath(objHash)
|
||||
|
|
@ -1,328 +0,0 @@
|
|||
## Resolve Command - CLI Interface for Dependency Resolution
|
||||
##
|
||||
## This module provides the CLI interface for the dependency resolver,
|
||||
## allowing users to resolve, explain, and inspect package dependencies.
|
||||
|
||||
import strformat
|
||||
import tables
|
||||
import terminal
|
||||
|
||||
# ============================================================================
|
||||
# Type Definitions
|
||||
# ============================================================================
|
||||
|
||||
import ../resolver/orchestrator
|
||||
import ../resolver/variant_types
|
||||
import ../resolver/dependency_graph
|
||||
import ../resolver/conflict_detection
|
||||
import std/[options, times]
|
||||
|
||||
type
|
||||
VersionConstraint* = object
|
||||
operator*: string
|
||||
version*: string
|
||||
|
||||
# ============================================================================
|
||||
# Helper Functions
|
||||
# ============================================================================
|
||||
|
||||
proc loadRepositories*(): seq[Repository] =
|
||||
## Load repositories from configuration
|
||||
result = @[
|
||||
Repository(name: "main", url: "https://packages.nexusos.org/main", priority: 100),
|
||||
Repository(name: "community", url: "https://packages.nexusos.org/community", priority: 50)
|
||||
]
|
||||
|
||||
|
||||
|
||||
proc parseVersionConstraint*(constraint: string): VersionConstraint =
|
||||
## Parse version constraint string
|
||||
result = VersionConstraint(operator: "any", version: constraint)
|
||||
|
||||
proc formatError*(msg: string): string =
|
||||
## Format error message
|
||||
result = fmt"Error: {msg}"
|
||||
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Command: nip resolve
|
||||
# ============================================================================
|
||||
|
||||
proc resolveCommand*(args: seq[string]): int =
|
||||
## Handle 'nip resolve <package>' command
|
||||
|
||||
if args.len < 1:
|
||||
echo "Usage: nip resolve <package> [constraint] [options]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --use-flags=<flags> Comma-separated USE flags"
|
||||
echo " --libc=<libc> C library (musl, glibc)"
|
||||
echo " --allocator=<alloc> Memory allocator (jemalloc, tcmalloc, default)"
|
||||
echo " --json Output in JSON format"
|
||||
return 1
|
||||
|
||||
let packageName = args[0]
|
||||
var jsonOutput = false
|
||||
|
||||
# Parse arguments
|
||||
for arg in args[1..^1]:
|
||||
if arg == "--json":
|
||||
jsonOutput = true
|
||||
|
||||
try:
|
||||
# Initialize Orchestrator
|
||||
let repos = loadRepositories()
|
||||
let config = defaultConfig()
|
||||
let orchestrator = newResolutionOrchestrator(repos, config)
|
||||
|
||||
# Create demand (default for now)
|
||||
let demand = VariantDemand(
|
||||
packageName: packageName,
|
||||
variantProfile: VariantProfile(hash: "any")
|
||||
)
|
||||
|
||||
# Resolve
|
||||
let result = orchestrator.resolve(packageName, "*", demand)
|
||||
|
||||
if result.isOk:
|
||||
let res = result.value
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"success": true,
|
||||
"package": "{packageName}",
|
||||
"packageCount": {res.packageCount},
|
||||
"resolutionTime": {res.resolutionTime},
|
||||
"cacheHit": {res.cacheHit},
|
||||
"installOrder": []
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgGreen, "✅ Resolution successful!\n")
|
||||
echo ""
|
||||
echo fmt"📦 Package: {packageName}"
|
||||
echo fmt"⏱️ Time: {res.resolutionTime * 1000:.2f}ms"
|
||||
echo fmt"📚 Packages: {res.packageCount}"
|
||||
echo fmt"💾 Cache Hit: {res.cacheHit}"
|
||||
echo ""
|
||||
|
||||
echo "📋 Resolution Plan:"
|
||||
for term in res.installOrder:
|
||||
stdout.styledWrite(fgCyan, fmt" • {term.packageName}")
|
||||
stdout.write(fmt" ({term.version})")
|
||||
stdout.styledWrite(fgYellow, fmt" [{term.source}]")
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
else:
|
||||
let err = result.error
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"success": false,
|
||||
"error": "{err.details}"
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgRed, "❌ Resolution Failed!\n")
|
||||
echo formatError(err)
|
||||
|
||||
return if result.isOk: 0 else: 1
|
||||
|
||||
except Exception as e:
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"success": false,
|
||||
"error": "{e.msg}"
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgRed, "❌ Error!\n")
|
||||
echo fmt"Error: {e.msg}"
|
||||
return 1
|
||||
|
||||
# ============================================================================
|
||||
# Command: nip explain
|
||||
# ============================================================================
|
||||
|
||||
proc explainCommand*(args: seq[string]): int =
|
||||
## Handle 'nip explain <package>' command
|
||||
|
||||
if args.len < 1:
|
||||
echo "Usage: nip explain <package> [options]"
|
||||
return 1
|
||||
|
||||
let packageName = args[0]
|
||||
var jsonOutput = false
|
||||
|
||||
for arg in args[1..^1]:
|
||||
if arg == "--json":
|
||||
jsonOutput = true
|
||||
|
||||
try:
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"success": true,
|
||||
"package": "{packageName}",
|
||||
"version": "1.0.0",
|
||||
"variant": "default",
|
||||
"buildHash": "blake3-abc123",
|
||||
"source": "main",
|
||||
"dependencyCount": 0,
|
||||
"dependencies": []
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgCyan, fmt"📖 Explaining resolution for: {packageName}\n")
|
||||
echo ""
|
||||
echo "Resolution explanation:"
|
||||
echo fmt" • Package source: main"
|
||||
echo fmt" • Version selected: 1.0.0"
|
||||
echo fmt" • Variant: default"
|
||||
echo fmt" • Dependencies: 0 packages"
|
||||
echo ""
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"success": false,
|
||||
"error": "{e.msg}"
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgRed, "❌ Error!\n")
|
||||
echo fmt"Error: {e.msg}"
|
||||
return 1
|
||||
|
||||
# ============================================================================
|
||||
# Command: nip conflicts
|
||||
# ============================================================================
|
||||
|
||||
proc conflictsCommand*(args: seq[string]): int =
|
||||
## Handle 'nip conflicts' command
|
||||
|
||||
var jsonOutput = false
|
||||
|
||||
for arg in args:
|
||||
if arg == "--json":
|
||||
jsonOutput = true
|
||||
|
||||
try:
|
||||
if jsonOutput:
|
||||
echo """{"success": true, "conflicts": []}"""
|
||||
else:
|
||||
stdout.styledWrite(fgGreen, "✅ No conflicts detected!\n")
|
||||
echo ""
|
||||
echo "All installed packages are compatible."
|
||||
echo ""
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"success": false,
|
||||
"error": "{e.msg}"
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgRed, "❌ Error!\n")
|
||||
echo fmt"Error: {e.msg}"
|
||||
return 1
|
||||
|
||||
# ============================================================================
|
||||
# Command: nip variants
|
||||
# ============================================================================
|
||||
|
||||
proc variantsCommand*(args: seq[string]): int =
|
||||
## Handle 'nip variants <package>' command
|
||||
|
||||
if args.len < 1:
|
||||
echo "Usage: nip variants <package> [options]"
|
||||
return 1
|
||||
|
||||
let packageName = args[0]
|
||||
var jsonOutput = false
|
||||
|
||||
for arg in args[1..^1]:
|
||||
if arg == "--json":
|
||||
jsonOutput = true
|
||||
|
||||
try:
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"package": "{packageName}",
|
||||
"variants": {{
|
||||
"useFlags": [
|
||||
{{"flag": "ssl", "description": "Enable SSL/TLS support", "default": false}},
|
||||
{{"flag": "http2", "description": "Enable HTTP/2 support", "default": false}}
|
||||
],
|
||||
"libc": [
|
||||
{{"option": "musl", "description": "Lightweight C library", "default": true}},
|
||||
{{"option": "glibc", "description": "GNU C library", "default": false}}
|
||||
],
|
||||
"allocator": [
|
||||
{{"option": "jemalloc", "description": "High-performance allocator", "default": true}},
|
||||
{{"option": "tcmalloc", "description": "Google's thread-caching allocator", "default": false}}
|
||||
]
|
||||
}}
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgCyan, fmt"🎨 Available variants for: {packageName}\n")
|
||||
echo ""
|
||||
echo "USE flags:"
|
||||
echo " • ssl (default) - Enable SSL/TLS support"
|
||||
echo " • http2 - Enable HTTP/2 support"
|
||||
echo ""
|
||||
echo "libc options:"
|
||||
echo " • musl (default) - Lightweight C library"
|
||||
echo " • glibc - GNU C library"
|
||||
echo ""
|
||||
echo "Allocator options:"
|
||||
echo " • jemalloc (default) - High-performance allocator"
|
||||
echo " • tcmalloc - Google's thread-caching allocator"
|
||||
echo ""
|
||||
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
if jsonOutput:
|
||||
echo fmt"""{{
|
||||
"success": false,
|
||||
"error": "{e.msg}"
|
||||
}}"""
|
||||
else:
|
||||
stdout.styledWrite(fgRed, "❌ Error!\n")
|
||||
echo fmt"Error: {e.msg}"
|
||||
return 1
|
||||
|
||||
# ============================================================================
|
||||
# Main CLI Entry Point
|
||||
# ============================================================================
|
||||
|
||||
when isMainModule:
|
||||
import os
|
||||
|
||||
let args = commandLineParams()
|
||||
|
||||
if args.len == 0:
|
||||
echo "NIP Dependency Resolver"
|
||||
echo ""
|
||||
echo "Usage: nip <command> [args]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " resolve <package> - Resolve dependencies"
|
||||
echo " explain <package> - Explain resolution decisions"
|
||||
echo " conflicts - Show detected conflicts"
|
||||
echo " variants <package> - Show available variants"
|
||||
echo ""
|
||||
quit(1)
|
||||
|
||||
let command = args[0]
|
||||
let commandArgs = args[1..^1]
|
||||
|
||||
let exitCode = case command:
|
||||
of "resolve": resolveCommand(commandArgs)
|
||||
of "explain": explainCommand(commandArgs)
|
||||
of "conflicts": conflictsCommand(commandArgs)
|
||||
of "variants": variantsCommand(commandArgs)
|
||||
else:
|
||||
echo fmt"Unknown command: {command}"
|
||||
1
|
||||
|
||||
quit(exitCode)
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
import std/[os, strutils, options]
|
||||
import nimpak/packages
|
||||
import nimpak/types
|
||||
import nimpak/cas
|
||||
|
||||
proc runConvertCommand*(args: seq[string]) =
|
||||
if args.len < 2:
|
||||
echo "Usage: nip convert <grafted_package_dir>"
|
||||
quit(1)
|
||||
|
||||
let graftedDir = args[1]
|
||||
|
||||
# Load graft result metadata (simulate loading from graftedDir)
|
||||
# In real implementation, this would parse graft metadata files
|
||||
# Here, we simulate with placeholders for demonstration
|
||||
|
||||
# TODO: Replace with actual loading/parsing of graft metadata
|
||||
let dummyFragment = Fragment(
|
||||
id: PackageId(name: "dummy", version: "0.1.0", stream: Stable),
|
||||
source: Source(
|
||||
url: "https://example.com/dummy-0.1.0.tar.gz",
|
||||
hash: "blake2b-dummyhash",
|
||||
hashAlgorithm: "blake2b",
|
||||
sourceMethod: Http,
|
||||
timestamp: now()
|
||||
),
|
||||
dependencies: @[],
|
||||
buildSystem: Custom,
|
||||
metadata: PackageMetadata(
|
||||
description: "Dummy package for conversion",
|
||||
license: "MIT",
|
||||
maintainer: "dummy@example.com",
|
||||
tags: @[],
|
||||
runtime: RuntimeProfile(
|
||||
libc: Musl,
|
||||
allocator: System,
|
||||
systemdAware: false,
|
||||
reproducible: true,
|
||||
tags: @[]
|
||||
)
|
||||
),
|
||||
acul: AculCompliance(required: false, membership: "", attribution: "", buildLog: "")
|
||||
)
|
||||
|
||||
let dummyAuditLog = GraftAuditLog(
|
||||
timestamp: now(),
|
||||
source: Pacman,
|
||||
packageName: "dummy",
|
||||
version: "0.1.0",
|
||||
downloadedFilename: "dummy-0.1.0.tar.gz",
|
||||
archiveHash: "blake2b-dummyhash",
|
||||
hashAlgorithm: "blake2b",
|
||||
sourceOutput: "Simulated graft source output",
|
||||
downloadUrl: none(string),
|
||||
originalSize: 12345,
|
||||
deduplicationStatus: "New"
|
||||
)
|
||||
|
||||
let graftResult = GraftResult(
|
||||
fragment: dummyFragment,
|
||||
extractedPath: graftedDir,
|
||||
originalMetadata: %*{},
|
||||
auditLog: dummyAuditLog
|
||||
)
|
||||
|
||||
let convertResult = convertGraftToNpk(graftResult)
|
||||
if convertResult.isErr:
|
||||
echo "Conversion failed: ", convertResult.getError().msg
|
||||
quit(1)
|
||||
|
||||
let npk = convertResult.get()
|
||||
|
||||
# Create archive path
|
||||
let archivePath = graftedDir / (npk.metadata.id.name & "-" & npk.metadata.id.version & ".npk")
|
||||
|
||||
let archiveResult = createNpkArchive(npk, archivePath)
|
||||
if archiveResult.isErr:
|
||||
echo "Failed to create NPK archive: ", archiveResult.getError().msg
|
||||
quit(1)
|
||||
|
||||
echo "Conversion successful. NPK archive created at: ", archivePath
|
||||
|
||||
# Entry point for the command
|
||||
when isMainModule:
|
||||
runConvertCommand(commandLineParams())
|
||||
|
|
@ -1,433 +0,0 @@
|
|||
## nip/commands/verify.nim
|
||||
## Implementation of nip verify command for package integrity verification
|
||||
##
|
||||
## This module implements the nip verifyage|--all> command that provides
|
||||
## comprehensive package integrity verification including hash and signature checks.
|
||||
|
||||
import std/[os, strutils, times, json, sequtils, strformat, algorithm, tables]
|
||||
import ../../nimpak/security/hash_verifier
|
||||
import ../../nimpak/cli/core
|
||||
|
||||
type
|
||||
VerifyOptions* = object
|
||||
target*: string # Package name or "--all"
|
||||
checkSignatures*: bool # Verify digital signatures
|
||||
checkHashes*: bool # Verify file hashes
|
||||
verbose*: bool # Verbose output
|
||||
outputFormat*: OutputFormat # Output format
|
||||
autoRepair*: bool # Attempt automatic repair
|
||||
showDetails*: bool # Show detailed verification info
|
||||
|
||||
VerificationSummary* = object
|
||||
totalPackages*: int
|
||||
verifiedPackages*: int
|
||||
failedPackages*: int
|
||||
skippedPackages*: int
|
||||
integrityPassed*: int
|
||||
integrityFailed*: int
|
||||
signaturesPassed*: int
|
||||
signaturesFailed*: int
|
||||
duration*: float
|
||||
timestamp*: times.DateTime
|
||||
|
||||
SimpleVerificationResult* = object
|
||||
packageName*: string
|
||||
success*: bool
|
||||
message*: string
|
||||
checkType*: string
|
||||
duration*: float
|
||||
|
||||
proc parseVerifyOptions*(args: seq[string]): VerifyOptions =
|
||||
## Parse nip verify command arguments
|
||||
var options = VerifyOptions(
|
||||
target: "",
|
||||
checkSignatures: true,
|
||||
checkHashes: true,
|
||||
verbose: false,
|
||||
outputFormat: OutputHuman,
|
||||
autoRepair: false,
|
||||
showDetails: false
|
||||
)
|
||||
|
||||
if args.len == 0:
|
||||
options.target = "--all"
|
||||
return options
|
||||
|
||||
var i = 0
|
||||
while i < args.len:
|
||||
case args[i]:
|
||||
of "--all":
|
||||
options.target = "--all"
|
||||
of "--no-signatures":
|
||||
options.checkSignatures = false
|
||||
of "--no-hashes":
|
||||
options.checkHashes = false
|
||||
of "--signatures-only":
|
||||
options.checkHashes = false
|
||||
options.checkSignatures = true
|
||||
of "--hashes-only":
|
||||
options.checkSignatures = false
|
||||
options.checkHashes = true
|
||||
of "--verbose", "-v":
|
||||
options.verbose = true
|
||||
of "--details":
|
||||
options.showDetails = true
|
||||
of "--auto-repair":
|
||||
options.autoRepair = true
|
||||
of "--output":
|
||||
if i + 1 < args.len:
|
||||
case args[i + 1].toLower():
|
||||
of "json": options.outputFormat = OutputJson
|
||||
of "yaml": options.outputFormat = OutputYaml
|
||||
of "kdl": options.outputFormat = OutputKdl
|
||||
else: options.outputFormat = OutputHuman
|
||||
i += 1
|
||||
else:
|
||||
# Assume it's a package name
|
||||
if options.target == "":
|
||||
options.target = args[i]
|
||||
i += 1
|
||||
|
||||
# Default to --all if no target specified
|
||||
if options.target == "":
|
||||
options.target = "--all"
|
||||
|
||||
return options
|
||||
|
||||
proc displayVerificationResult*(result: SimpleVerificationResult, options: VerifyOptions) =
|
||||
## Display a single verification result in human-readable format
|
||||
let statusSymbol = if result.success: success("✅") else: error("❌")
|
||||
|
||||
echo fmt"{statusSymbol} {result.checkType}: {result.packageName}"
|
||||
|
||||
if not result.success or options.verbose:
|
||||
echo fmt" {result.message}"
|
||||
|
||||
if result.duration > 0.0:
|
||||
echo fmt" Duration: {result.duration:.3f}s"
|
||||
|
||||
echo ""
|
||||
|
||||
proc displayVerificationSummary*(summary: VerificationSummary, options: VerifyOptions) =
|
||||
## Display verification summary
|
||||
echo bold("📋 Verification Summary")
|
||||
echo "=".repeat(40)
|
||||
echo "Timestamp: " & $summary.timestamp
|
||||
echo fmt"Duration: {summary.duration:.2f}s"
|
||||
echo ""
|
||||
|
||||
echo fmt"Packages: {summary.totalPackages} total, {summary.verifiedPackages} verified, {summary.failedPackages} failed"
|
||||
|
||||
if options.checkHashes:
|
||||
echo fmt"Hash Checks: {summary.integrityPassed} passed, {summary.integrityFailed} failed"
|
||||
|
||||
if options.checkSignatures:
|
||||
echo fmt"Signature Checks: {summary.signaturesPassed} passed, {summary.signaturesFailed} failed"
|
||||
|
||||
echo ""
|
||||
|
||||
# Overall status
|
||||
let overallSuccess = summary.failedPackages == 0
|
||||
let statusSymbol = if overallSuccess: success("✅") else: error("❌")
|
||||
let statusText = if overallSuccess: "PASSED" else: "FAILED"
|
||||
|
||||
echo fmt"Overall Status: {statusSymbol} {statusText}"
|
||||
|
||||
proc verifyPackageHash*(packageName: string, packagePath: string): SimpleVerificationResult =
|
||||
## Verify hash of a single package
|
||||
let startTime = cpuTime()
|
||||
|
||||
try:
|
||||
if not fileExists(packagePath):
|
||||
return SimpleVerificationResult(
|
||||
packageName: packageName,
|
||||
success: false,
|
||||
message: fmt"Package file not found: {packagePath}",
|
||||
checkType: "Hash",
|
||||
duration: cpuTime() - startTime
|
||||
)
|
||||
|
||||
# For now, just check if file exists and is readable
|
||||
# In a real implementation, we would check against stored hash
|
||||
let hashResult = computeFileHash(packagePath, HashBlake2b)
|
||||
|
||||
return SimpleVerificationResult(
|
||||
packageName: packageName,
|
||||
success: true,
|
||||
message: fmt"Package hash verified: {packageName}",
|
||||
checkType: "Hash",
|
||||
duration: cpuTime() - startTime
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return SimpleVerificationResult(
|
||||
packageName: packageName,
|
||||
success: false,
|
||||
message: fmt"Hash verification error: {e.msg}",
|
||||
checkType: "Hash",
|
||||
duration: cpuTime() - startTime
|
||||
)
|
||||
|
||||
proc verifySpecificPackage*(packageName: string, options: VerifyOptions): seq[SimpleVerificationResult] =
|
||||
## Verify a specific package
|
||||
var results: seq[SimpleVerificationResult] = @[]
|
||||
|
||||
if options.verbose:
|
||||
showInfo(fmt"Verifying package: {packageName}")
|
||||
|
||||
# Find package file
|
||||
let packagePath = fmt"/Programs/{packageName}/current/{packageName}.npk"
|
||||
if not fileExists(packagePath):
|
||||
# Try to find any version
|
||||
let packageDir = fmt"/Programs/{packageName}"
|
||||
if dirExists(packageDir):
|
||||
var foundVersion = false
|
||||
for versionDir in walkDirs(packageDir / "*"):
|
||||
let versionPackagePath = versionDir / (packageName & ".npk")
|
||||
if fileExists(versionPackagePath):
|
||||
if options.checkHashes:
|
||||
results.add(verifyPackageHash(packageName, versionPackagePath))
|
||||
foundVersion = true
|
||||
break
|
||||
|
||||
if not foundVersion:
|
||||
results.add(SimpleVerificationResult(
|
||||
packageName: packageName,
|
||||
success: false,
|
||||
message: fmt"Package file not found for {packageName}",
|
||||
checkType: "Hash",
|
||||
duration: 0.0
|
||||
))
|
||||
else:
|
||||
results.add(SimpleVerificationResult(
|
||||
packageName: packageName,
|
||||
success: false,
|
||||
message: fmt"Package directory not found: {packageName}",
|
||||
checkType: "Hash",
|
||||
duration: 0.0
|
||||
))
|
||||
else:
|
||||
if options.checkHashes:
|
||||
results.add(verifyPackageHash(packageName, packagePath))
|
||||
|
||||
return results
|
||||
|
||||
proc verifyAllPackages*(options: VerifyOptions): seq[SimpleVerificationResult] =
|
||||
## Verify all installed packages
|
||||
var results: seq[SimpleVerificationResult] = @[]
|
||||
|
||||
if options.verbose:
|
||||
showInfo("Verifying all installed packages...")
|
||||
|
||||
# Scan /Programs directory for packages
|
||||
if not dirExists("/Programs"):
|
||||
results.add(SimpleVerificationResult(
|
||||
packageName: "system",
|
||||
success: false,
|
||||
message: "/Programs directory not found",
|
||||
checkType: "System",
|
||||
duration: 0.0
|
||||
))
|
||||
return results
|
||||
|
||||
var packageCount = 0
|
||||
for packageDir in walkDirs("/Programs/*"):
|
||||
let packageName = extractFilename(packageDir)
|
||||
packageCount += 1
|
||||
|
||||
if options.verbose:
|
||||
showInfo(fmt"Verifying package {packageCount}: {packageName}")
|
||||
|
||||
# Look for package files in version directories
|
||||
var foundPackage = false
|
||||
for versionDir in walkDirs(packageDir / "*"):
|
||||
let packageFile = versionDir / (packageName & ".npk")
|
||||
if fileExists(packageFile):
|
||||
foundPackage = true
|
||||
|
||||
# Hash verification
|
||||
if options.checkHashes:
|
||||
results.add(verifyPackageHash(packageName, packageFile))
|
||||
|
||||
break # Only verify the first found version
|
||||
|
||||
if not foundPackage:
|
||||
results.add(SimpleVerificationResult(
|
||||
packageName: packageName,
|
||||
success: false,
|
||||
message: fmt"No package file found for {packageName}",
|
||||
checkType: "Hash",
|
||||
duration: 0.0
|
||||
))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
|
||||
proc calculateVerificationSummary*(results: seq[SimpleVerificationResult], duration: float): VerificationSummary =
|
||||
## Calculate verification summary from results
|
||||
var summary = VerificationSummary(
|
||||
totalPackages: 0,
|
||||
verifiedPackages: 0,
|
||||
failedPackages: 0,
|
||||
skippedPackages: 0,
|
||||
integrityPassed: 0,
|
||||
integrityFailed: 0,
|
||||
signaturesPassed: 0,
|
||||
signaturesFailed: 0,
|
||||
duration: duration,
|
||||
timestamp: now()
|
||||
)
|
||||
|
||||
var packageNames: seq[string] = @[]
|
||||
|
||||
for result in results:
|
||||
# Count unique packages
|
||||
if result.packageName notin packageNames and result.packageName != "system":
|
||||
packageNames.add(result.packageName)
|
||||
|
||||
# Count by check type
|
||||
if result.checkType == "Hash":
|
||||
if result.success:
|
||||
summary.integrityPassed += 1
|
||||
else:
|
||||
summary.integrityFailed += 1
|
||||
elif result.checkType == "Signature":
|
||||
if result.success:
|
||||
summary.signaturesPassed += 1
|
||||
else:
|
||||
summary.signaturesFailed += 1
|
||||
|
||||
summary.totalPackages = packageNames.len
|
||||
|
||||
# Calculate verified/failed packages
|
||||
var packageResults: Table[string, bool] = initTable[string, bool]()
|
||||
for result in results:
|
||||
if result.packageName != "system":
|
||||
if result.packageName in packageResults:
|
||||
# If any check fails for a package, mark it as failed
|
||||
packageResults[result.packageName] = packageResults[result.packageName] and result.success
|
||||
else:
|
||||
packageResults[result.packageName] = result.success
|
||||
|
||||
for packageName, success in packageResults.pairs:
|
||||
if success:
|
||||
summary.verifiedPackages += 1
|
||||
else:
|
||||
summary.failedPackages += 1
|
||||
|
||||
return summary
|
||||
|
||||
proc attemptAutoRepair*(results: seq[SimpleVerificationResult], options: VerifyOptions): seq[string] =
|
||||
## Attempt automatic repair of failed verifications
|
||||
var repairActions: seq[string] = @[]
|
||||
|
||||
if not options.autoRepair:
|
||||
return repairActions
|
||||
|
||||
showInfo("Attempting automatic repair of failed verifications...")
|
||||
|
||||
for result in results:
|
||||
if not result.success:
|
||||
if result.checkType == "Hash":
|
||||
# For hash failures, we could attempt to re-download or restore from backup
|
||||
repairActions.add(fmt"Hash failure for {result.packageName}: Consider reinstalling package")
|
||||
elif result.checkType == "Signature":
|
||||
# For signature failures, we could attempt to update keyrings
|
||||
repairActions.add(fmt"Signature failure for {result.packageName}: Consider updating keyrings")
|
||||
|
||||
if repairActions.len > 0:
|
||||
showWarning(fmt"Auto-repair identified {repairActions.len} potential actions (manual intervention required)")
|
||||
for action in repairActions:
|
||||
echo fmt" • {action}"
|
||||
|
||||
return repairActions
|
||||
|
||||
proc nipVerifyCommand*(args: seq[string]): CommandResult =
|
||||
## Main implementation of nip verify command
|
||||
let startTime = cpuTime()
|
||||
|
||||
try:
|
||||
let options = parseVerifyOptions(args)
|
||||
|
||||
if options.verbose:
|
||||
showInfo(fmt"Starting verification: {options.target}")
|
||||
if not options.checkHashes:
|
||||
showInfo("Hash verification disabled")
|
||||
if not options.checkSignatures:
|
||||
showInfo("Signature verification disabled")
|
||||
|
||||
# Run verification
|
||||
var results: seq[SimpleVerificationResult] = @[]
|
||||
|
||||
if options.target == "--all" or options.target == "all":
|
||||
results = verifyAllPackages(options)
|
||||
else:
|
||||
results = verifySpecificPackage(options.target, options)
|
||||
|
||||
let duration = cpuTime() - startTime
|
||||
let summary = calculateVerificationSummary(results, duration)
|
||||
|
||||
# Display results
|
||||
case options.outputFormat:
|
||||
of OutputHuman:
|
||||
if options.verbose or results.len <= 20: # Show individual results for small sets
|
||||
for result in results:
|
||||
displayVerificationResult(result, options)
|
||||
|
||||
displayVerificationSummary(summary, options)
|
||||
|
||||
# Show auto-repair suggestions
|
||||
if summary.failedPackages > 0:
|
||||
let repairActions = attemptAutoRepair(results, options)
|
||||
if repairActions.len == 0 and not options.autoRepair:
|
||||
showInfo("Run with --auto-repair to attempt automatic fixes")
|
||||
|
||||
else:
|
||||
# Structured output
|
||||
let outputData = %*{
|
||||
"summary": %*{
|
||||
"total_packages": summary.totalPackages,
|
||||
"verified_packages": summary.verifiedPackages,
|
||||
"failed_packages": summary.failedPackages,
|
||||
"integrity_passed": summary.integrityPassed,
|
||||
"integrity_failed": summary.integrityFailed,
|
||||
"signatures_passed": summary.signaturesPassed,
|
||||
"signatures_failed": summary.signaturesFailed,
|
||||
"duration": summary.duration,
|
||||
"timestamp": $summary.timestamp
|
||||
},
|
||||
"results": results.mapIt(%*{
|
||||
"check_type": it.checkType,
|
||||
"package_name": it.packageName,
|
||||
"success": it.success,
|
||||
"message": it.message,
|
||||
"duration": it.duration
|
||||
}),
|
||||
"options": %*{
|
||||
"target": options.target,
|
||||
"check_signatures": options.checkSignatures,
|
||||
"check_hashes": options.checkHashes,
|
||||
"auto_repair": options.autoRepair
|
||||
}
|
||||
}
|
||||
outputData(outputData)
|
||||
|
||||
# Log verification event (simplified)
|
||||
if options.verbose:
|
||||
if summary.failedPackages == 0:
|
||||
showSuccess(fmt"Package verification completed: {summary.verifiedPackages}/{summary.totalPackages} packages verified")
|
||||
else:
|
||||
showWarning(fmt"Package verification completed with issues: {summary.failedPackages}/{summary.totalPackages} packages failed")
|
||||
|
||||
# Return appropriate result
|
||||
if summary.failedPackages == 0:
|
||||
return successResult(fmt"Verification completed: {summary.verifiedPackages}/{summary.totalPackages} packages verified successfully")
|
||||
else:
|
||||
return errorResult(fmt"Verification failed: {summary.failedPackages}/{summary.totalPackages} packages failed verification", 1)
|
||||
|
||||
except Exception as e:
|
||||
return errorResult(fmt"Verify command failed: {e.msg}")
|
||||
|
||||
# Export main functions
|
||||
export nipVerifyCommand, VerifyOptions, parseVerifyOptions, VerificationSummary
|
||||
|
|
@ -1,342 +0,0 @@
|
|||
## NEXTER Container Namespace and Isolation
|
||||
##
|
||||
## **Purpose:**
|
||||
## Implements container namespace isolation for NEXTER containers.
|
||||
## Handles network, PID, IPC, UTS namespace creation and management.
|
||||
## Sets up environment variables and mounts CAS chunks.
|
||||
##
|
||||
## **Design Principles:**
|
||||
## - Lightweight container isolation
|
||||
## - Namespace-based process isolation
|
||||
## - Read-only CAS chunk mounts
|
||||
## - Capability-based security
|
||||
##
|
||||
## **Requirements:**
|
||||
## - Requirement 5.4: Container isolation (network, PID, IPC, UTS)
|
||||
## - Requirement 5.4: Environment variables and CAS mounts
|
||||
## - Requirement 5.4: Capability configuration
|
||||
|
||||
import std/[os, times, options, tables, osproc, strutils]
|
||||
import nip/[nexter_manifest, namespace]
|
||||
|
||||
type
|
||||
ContainerNamespaceConfig* = object
|
||||
## Container namespace configuration
|
||||
isolationType*: string ## "full", "network", "pid", "ipc", "uts"
|
||||
capabilities*: seq[string] ## Linux capabilities
|
||||
mounts*: seq[ContainerMount]
|
||||
devices*: seq[DeviceSpec] ## Use DeviceSpec from manifest
|
||||
environment*: Table[string, string]
|
||||
|
||||
ContainerMount* = object
|
||||
## Container mount specification
|
||||
source*: string
|
||||
target*: string
|
||||
mountType*: string ## "bind", "tmpfs", "devtmpfs"
|
||||
readOnly*: bool
|
||||
options*: seq[string]
|
||||
|
||||
ContainerRuntime* = object
|
||||
## Container runtime state
|
||||
id*: string
|
||||
name*: string
|
||||
manifest*: NEXTERManifest
|
||||
config*: ContainerNamespaceConfig
|
||||
pid*: int
|
||||
startTime*: DateTime
|
||||
status*: ContainerStatus
|
||||
environment*: Table[string, string]
|
||||
|
||||
ContainerStatus* = enum
|
||||
## Container lifecycle status
|
||||
Created,
|
||||
Running,
|
||||
Paused,
|
||||
Stopped,
|
||||
Exited,
|
||||
Error
|
||||
|
||||
ContainerError* = object of CatchableError
|
||||
code*: ContainerErrorCode
|
||||
context*: string
|
||||
suggestions*: seq[string]
|
||||
|
||||
ContainerErrorCode* = enum
|
||||
NamespaceCreationFailed,
|
||||
MountFailed,
|
||||
CapabilityFailed,
|
||||
EnvironmentSetupFailed,
|
||||
ProcessExecutionFailed,
|
||||
InvalidConfiguration
|
||||
|
||||
# ============================================================================
|
||||
# Container Configuration
|
||||
# ============================================================================
|
||||
|
||||
proc createContainerConfig*(manifest: NEXTERManifest, casRoot: string): ContainerNamespaceConfig =
|
||||
## Create container namespace configuration from manifest
|
||||
##
|
||||
## **Requirements:**
|
||||
## - Requirement 5.4: Create namespace config with isolation settings
|
||||
##
|
||||
## **Process:**
|
||||
## 1. Extract namespace configuration from manifest
|
||||
## 2. Set up environment variables
|
||||
## 3. Configure mounts for CAS chunks
|
||||
## 4. Configure capabilities
|
||||
## 5. Configure devices
|
||||
|
||||
var config = ContainerNamespaceConfig(
|
||||
isolationType: manifest.namespace.isolationType,
|
||||
capabilities: manifest.namespace.capabilities,
|
||||
mounts: @[],
|
||||
devices: manifest.namespace.devices,
|
||||
environment: manifest.environment
|
||||
)
|
||||
|
||||
# Add CAS mount for read-only access to chunks
|
||||
config.mounts.add(ContainerMount(
|
||||
source: casRoot / "chunks",
|
||||
target: "/cas",
|
||||
mountType: "bind",
|
||||
readOnly: true,
|
||||
options: @["rbind", "ro"]
|
||||
))
|
||||
|
||||
# Add standard mounts
|
||||
config.mounts.add(ContainerMount(
|
||||
source: "tmpfs",
|
||||
target: "/tmp",
|
||||
mountType: "tmpfs",
|
||||
readOnly: false,
|
||||
options: @["size=1G", "mode=1777"]
|
||||
))
|
||||
|
||||
config.mounts.add(ContainerMount(
|
||||
source: "tmpfs",
|
||||
target: "/run",
|
||||
mountType: "tmpfs",
|
||||
readOnly: false,
|
||||
options: @["size=1G", "mode=0755"]
|
||||
))
|
||||
|
||||
return config
|
||||
|
||||
# ============================================================================
|
||||
# Namespace Setup
|
||||
# ============================================================================
|
||||
|
||||
proc setupContainerNamespace*(config: ContainerNamespaceConfig): bool =
|
||||
## Set up container namespace isolation
|
||||
##
|
||||
## **Requirements:**
|
||||
## - Requirement 5.4: Create isolated namespaces
|
||||
##
|
||||
## **Process:**
|
||||
## 1. Create user namespace
|
||||
## 2. Create mount namespace
|
||||
## 3. Create PID namespace (if requested)
|
||||
## 4. Create network namespace (if requested)
|
||||
## 5. Create IPC namespace (if requested)
|
||||
## 6. Create UTS namespace (if requested)
|
||||
|
||||
try:
|
||||
# Validate isolation type
|
||||
case config.isolationType:
|
||||
of "full":
|
||||
# Full isolation: all namespaces
|
||||
# This would use unshare() with all namespace flags
|
||||
discard
|
||||
of "network":
|
||||
# Network isolation only
|
||||
discard
|
||||
of "pid":
|
||||
# PID isolation only
|
||||
discard
|
||||
of "ipc":
|
||||
# IPC isolation only
|
||||
discard
|
||||
of "uts":
|
||||
# UTS (hostname) isolation only
|
||||
discard
|
||||
else:
|
||||
return false
|
||||
|
||||
# In a real implementation, we would call unshare() here
|
||||
# For now, just validate the configuration
|
||||
return true
|
||||
|
||||
except Exception as e:
|
||||
return false
|
||||
|
||||
# ============================================================================
|
||||
# Mount Management
|
||||
# ============================================================================
|
||||
|
||||
proc setupContainerMounts*(config: ContainerNamespaceConfig): bool =
|
||||
## Set up container mounts
|
||||
##
|
||||
## **Requirements:**
|
||||
## - Requirement 5.4: Mount CAS chunks and configure filesystem
|
||||
##
|
||||
## **Process:**
|
||||
## 1. Create mount points
|
||||
## 2. Mount CAS chunks read-only
|
||||
## 3. Mount tmpfs for temporary storage
|
||||
## 4. Mount devices if configured
|
||||
|
||||
try:
|
||||
for mount in config.mounts:
|
||||
# Create target directory if needed
|
||||
if not dirExists(mount.target):
|
||||
createDir(mount.target)
|
||||
|
||||
# Mount based on type
|
||||
case mount.mountType:
|
||||
of "bind":
|
||||
# Bind mount
|
||||
let flags = if mount.readOnly: "rbind,ro" else: "rbind"
|
||||
let cmd = "mount -o " & flags & " " & mount.source & " " & mount.target
|
||||
let exitCode = execCmd(cmd)
|
||||
if exitCode != 0:
|
||||
return false
|
||||
|
||||
of "tmpfs":
|
||||
# Tmpfs mount
|
||||
let options = mount.options.join(",")
|
||||
let cmd = "mount -t tmpfs -o " & options & " tmpfs " & mount.target
|
||||
let exitCode = execCmd(cmd)
|
||||
if exitCode != 0:
|
||||
return false
|
||||
|
||||
of "devtmpfs":
|
||||
# Device tmpfs mount
|
||||
let options = mount.options.join(",")
|
||||
let cmd = "mount -t devtmpfs -o " & options & " devtmpfs " & mount.target
|
||||
let exitCode = execCmd(cmd)
|
||||
if exitCode != 0:
|
||||
return false
|
||||
|
||||
else:
|
||||
return false
|
||||
|
||||
return true
|
||||
|
||||
except Exception as e:
|
||||
return false
|
||||
|
||||
# ============================================================================
|
||||
# Capability Management
|
||||
# ============================================================================
|
||||
|
||||
proc setupContainerCapabilities*(config: ContainerNamespaceConfig): bool =
|
||||
## Set up container capabilities
|
||||
##
|
||||
## **Requirements:**
|
||||
## - Requirement 5.4: Configure Linux capabilities
|
||||
##
|
||||
## **Process:**
|
||||
## 1. Parse capability list
|
||||
## 2. Drop unnecessary capabilities
|
||||
## 3. Keep only required capabilities
|
||||
|
||||
try:
|
||||
if config.capabilities.len == 0:
|
||||
# No capabilities specified - drop all
|
||||
let cmd = "setcap -r /proc/self/exe"
|
||||
discard execCmd(cmd)
|
||||
else:
|
||||
# Set specific capabilities
|
||||
let capString = config.capabilities.join(",")
|
||||
let cmd = "setcap cap_" & capString & "+ep /proc/self/exe"
|
||||
let exitCode = execCmd(cmd)
|
||||
if exitCode != 0:
|
||||
return false
|
||||
|
||||
return true
|
||||
|
||||
except Exception as e:
|
||||
return false
|
||||
|
||||
# ============================================================================
|
||||
# Environment Setup
|
||||
# ============================================================================
|
||||
|
||||
proc setupContainerEnvironment*(config: ContainerNamespaceConfig): bool =
|
||||
## Set up container environment variables
|
||||
##
|
||||
## **Requirements:**
|
||||
## - Requirement 5.4: Configure environment variables
|
||||
##
|
||||
## **Process:**
|
||||
## 1. Parse environment variables from config
|
||||
## 2. Set environment variables in current process
|
||||
## 3. Prepare for child process inheritance
|
||||
|
||||
try:
|
||||
for key, value in config.environment.pairs:
|
||||
putEnv(key, value)
|
||||
|
||||
return true
|
||||
|
||||
except Exception as e:
|
||||
return false
|
||||
|
||||
# ============================================================================
|
||||
# Container Runtime
|
||||
# ============================================================================
|
||||
|
||||
var containerCounter = 0
|
||||
|
||||
proc createContainerRuntime*(name: string, manifest: NEXTERManifest,
|
||||
config: ContainerNamespaceConfig): ContainerRuntime =
|
||||
## Create container runtime state
|
||||
##
|
||||
## **Requirements:**
|
||||
## - Requirement 5.4: Initialize container runtime
|
||||
|
||||
containerCounter += 1
|
||||
return ContainerRuntime(
|
||||
id: "container-" & $getTime().toUnix() & "-" & $containerCounter,
|
||||
name: name,
|
||||
manifest: manifest,
|
||||
config: config,
|
||||
pid: 0,
|
||||
startTime: now(),
|
||||
status: Created,
|
||||
environment: config.environment
|
||||
)
|
||||
|
||||
proc getContainerStatus*(runtime: ContainerRuntime): ContainerStatus =
|
||||
## Get current container status
|
||||
if runtime.pid > 0:
|
||||
# Check if process is still running
|
||||
let cmd = "kill -0 " & $runtime.pid
|
||||
let exitCode = execCmd(cmd)
|
||||
if exitCode == 0:
|
||||
return Running
|
||||
else:
|
||||
return Exited
|
||||
else:
|
||||
return runtime.status
|
||||
|
||||
# ============================================================================
|
||||
# Formatting
|
||||
# ============================================================================
|
||||
|
||||
proc `$`*(config: ContainerNamespaceConfig): string =
|
||||
## Format container config as string
|
||||
result = "Container Config:\n"
|
||||
result.add(" Isolation: " & config.isolationType & "\n")
|
||||
result.add(" Capabilities: " & config.capabilities.join(", ") & "\n")
|
||||
result.add(" Mounts: " & $config.mounts.len & "\n")
|
||||
result.add(" Devices: " & $config.devices.len & "\n")
|
||||
result.add(" Environment: " & $config.environment.len & " variables\n")
|
||||
|
||||
proc `$`*(runtime: ContainerRuntime): string =
|
||||
## Format container runtime as string
|
||||
result = "Container: " & runtime.name & "\n"
|
||||
result.add(" ID: " & runtime.id & "\n")
|
||||
result.add(" PID: " & $runtime.pid & "\n")
|
||||
result.add(" Status: " & $runtime.status & "\n")
|
||||
result.add(" Started: " & runtime.startTime.format("yyyy-MM-dd HH:mm:ss") & "\n")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue