Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
840a5bbf5f | ||
|
|
0f420fc6d0 | ||
|
|
29e2421771 | ||
|
|
cce1f2f0fd | ||
|
|
281c686fde | ||
|
|
a5880ebdf6 | ||
|
|
a5f92e4da3 |
134
Dockerfile.alpine-musl
Normal file
134
Dockerfile.alpine-musl
Normal file
@@ -0,0 +1,134 @@
|
||||
# Alpine-based MUSL static binary builder for Ginxsom
|
||||
# Produces truly portable binaries with zero runtime dependencies
|
||||
|
||||
ARG DEBUG_BUILD=false
|
||||
|
||||
FROM alpine:3.19 AS builder
|
||||
|
||||
# Re-declare build argument in this stage
|
||||
ARG DEBUG_BUILD=false
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
musl-dev \
|
||||
git \
|
||||
cmake \
|
||||
pkgconfig \
|
||||
autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
zlib-dev \
|
||||
zlib-static \
|
||||
curl-dev \
|
||||
curl-static \
|
||||
sqlite-dev \
|
||||
sqlite-static \
|
||||
fcgi-dev \
|
||||
fcgi \
|
||||
linux-headers \
|
||||
wget \
|
||||
bash \
|
||||
nghttp2-dev \
|
||||
nghttp2-static \
|
||||
c-ares-dev \
|
||||
c-ares-static \
|
||||
libidn2-dev \
|
||||
libidn2-static \
|
||||
libunistring-dev \
|
||||
libunistring-static \
|
||||
libpsl-dev \
|
||||
libpsl-static \
|
||||
brotli-dev \
|
||||
brotli-static
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Build libsecp256k1 static (cached layer - only rebuilds if Alpine version changes)
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||
cd secp256k1 && \
|
||||
./autogen.sh && \
|
||||
./configure --enable-static --disable-shared --prefix=/usr \
|
||||
CFLAGS="-fPIC" && \
|
||||
make -j$(nproc) && \
|
||||
make install && \
|
||||
rm -rf /tmp/secp256k1
|
||||
|
||||
# Copy only submodule configuration and git directory
|
||||
COPY .gitmodules /build/.gitmodules
|
||||
COPY .git /build/.git
|
||||
|
||||
# Initialize submodules (cached unless .gitmodules changes)
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
# Copy nostr_core_lib source files (cached unless nostr_core_lib changes)
|
||||
COPY nostr_core_lib /build/nostr_core_lib/
|
||||
|
||||
# Build nostr_core_lib with required NIPs (cached unless nostr_core_lib changes)
|
||||
# Disable fortification in build.sh to prevent __*_chk symbol issues
|
||||
# NIPs: 001(Basic), 006(Keys), 013(PoW), 017(DMs), 019(Bech32), 042(Auth), 044(Encryption), 059(Gift Wrap)
|
||||
RUN cd nostr_core_lib && \
|
||||
chmod +x build.sh && \
|
||||
sed -i 's/CFLAGS="-Wall -Wextra -std=c99 -fPIC -O2"/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"/' build.sh && \
|
||||
rm -f *.o *.a 2>/dev/null || true && \
|
||||
./build.sh --nips=1,6,13,17,19,42,44,59
|
||||
|
||||
# Copy web interface files for embedding
|
||||
# Note: Changes to api/ files will trigger rebuild from this point
|
||||
COPY api/ /build/api/
|
||||
COPY scripts/embed_web_files.sh /build/scripts/
|
||||
|
||||
# Create src directory and embed web files into C headers
|
||||
RUN mkdir -p src && \
|
||||
chmod +x scripts/embed_web_files.sh && \
|
||||
./scripts/embed_web_files.sh
|
||||
|
||||
# Copy Ginxsom source files LAST (only this layer rebuilds on source changes)
|
||||
# Note: The embedded header from previous step will be overwritten by this COPY
|
||||
# So we need to ensure src/admin_interface_embedded.h is NOT in src/ directory
|
||||
COPY src/ /build/src/
|
||||
COPY include/ /build/include/
|
||||
|
||||
# Build Ginxsom with full static linking (only rebuilds when src/ changes)
|
||||
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
|
||||
# Use conditional compilation flags based on DEBUG_BUILD argument
|
||||
RUN if [ "$DEBUG_BUILD" = "true" ]; then \
|
||||
CFLAGS="-g -O0 -DDEBUG"; \
|
||||
STRIP_CMD=""; \
|
||||
echo "Building with DEBUG symbols enabled"; \
|
||||
else \
|
||||
CFLAGS="-O2"; \
|
||||
STRIP_CMD="strip /build/ginxsom-fcgi_static"; \
|
||||
echo "Building optimized production binary"; \
|
||||
fi && \
|
||||
gcc -static $CFLAGS -Wall -Wextra -std=gnu99 \
|
||||
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
|
||||
-I. -Iinclude -Inostr_core_lib -Inostr_core_lib/nostr_core \
|
||||
-Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
|
||||
src/main.c src/admin_api.c src/admin_auth.c src/admin_event.c \
|
||||
src/admin_handlers.c src/admin_interface.c src/admin_commands.c \
|
||||
src/bud04.c src/bud06.c src/bud08.c src/bud09.c \
|
||||
src/request_validator.c src/relay_client.c \
|
||||
nostr_core_lib/nostr_core/core_relay_pool.c \
|
||||
-o /build/ginxsom-fcgi_static \
|
||||
nostr_core_lib/libnostr_core_x64.a \
|
||||
-lfcgi -lsqlite3 -lsecp256k1 -lssl -lcrypto -lcurl \
|
||||
-lnghttp2 -lcares -lidn2 -lunistring -lpsl -lbrotlidec -lbrotlicommon \
|
||||
-lz -lpthread -lm -ldl && \
|
||||
eval "$STRIP_CMD"
|
||||
|
||||
# Verify it's truly static
|
||||
RUN echo "=== Binary Information ===" && \
|
||||
file /build/ginxsom-fcgi_static && \
|
||||
ls -lh /build/ginxsom-fcgi_static && \
|
||||
echo "=== Checking for dynamic dependencies ===" && \
|
||||
(ldd /build/ginxsom-fcgi_static 2>&1 || echo "Binary is static") && \
|
||||
echo "=== Build complete ==="
|
||||
|
||||
# Output stage - just the binary
|
||||
FROM scratch AS output
|
||||
COPY --from=builder /build/ginxsom-fcgi_static /ginxsom-fcgi_static
|
||||
20
Makefile
20
Makefile
@@ -43,10 +43,18 @@ $(POOL_OBJ): $(POOL_SRC) | $(BUILDDIR)
|
||||
$(TARGET): $(OBJECTS) $(POOL_OBJ)
|
||||
$(CC) $(OBJECTS) $(POOL_OBJ) $(LIBS) -o $@
|
||||
|
||||
# Clean build files
|
||||
# Clean build files (preserves static binaries)
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)
|
||||
rm -f $(EMBEDDED_HEADER)
|
||||
@echo "Note: Static binaries (ginxsom-fcgi_static_*) are preserved."
|
||||
@echo "To remove everything: make clean-all"
|
||||
|
||||
# Clean everything including static binaries
|
||||
clean-all:
|
||||
rm -rf $(BUILDDIR)
|
||||
rm -f $(EMBEDDED_HEADER)
|
||||
@echo "✓ All build artifacts removed"
|
||||
|
||||
# Install (copy to system location)
|
||||
install: $(TARGET)
|
||||
@@ -69,4 +77,12 @@ debug: $(TARGET)
|
||||
embed:
|
||||
@$(EMBED_SCRIPT)
|
||||
|
||||
.PHONY: all clean install uninstall run debug embed
|
||||
# Static MUSL build via Docker
|
||||
static:
|
||||
./build_static.sh
|
||||
|
||||
# Static MUSL build with debug symbols
|
||||
static-debug:
|
||||
./build_static.sh --debug
|
||||
|
||||
.PHONY: all clean clean-all install uninstall run debug embed static static-debug
|
||||
|
||||
15
README.md
15
README.md
@@ -431,6 +431,13 @@ All commands are sent as NIP-44 encrypted JSON arrays in the event content:
|
||||
| `storage_stats` | `["storage_stats"]` | Get detailed storage statistics |
|
||||
| `mirror_status` | `["mirror_status"]` | Get status of mirroring operations |
|
||||
| `report_query` | `["report_query", "all"]` | Query content reports (BUD-09) |
|
||||
| **Authorization Rules Management** |
|
||||
| `auth_add_blacklist` | `["blacklist", "pubkey", "abc123..."]` | Add pubkey to blacklist |
|
||||
| `auth_add_whitelist` | `["whitelist", "pubkey", "def456..."]` | Add pubkey to whitelist |
|
||||
| `auth_delete_rule` | `["delete_auth_rule", "blacklist", "pubkey", "abc123..."]` | Delete specific auth rule |
|
||||
| `auth_query_all` | `["auth_query", "all"]` | Query all auth rules |
|
||||
| `auth_query_type` | `["auth_query", "whitelist"]` | Query specific rule type |
|
||||
| `auth_query_pattern` | `["auth_query", "pattern", "abc123..."]` | Query specific pattern |
|
||||
| **Database Queries** |
|
||||
| `sql_query` | `["sql_query", "SELECT * FROM blobs LIMIT 10"]` | Execute read-only SQL query |
|
||||
|
||||
@@ -448,10 +455,16 @@ All commands are sent as NIP-44 encrypted JSON arrays in the event content:
|
||||
- `kind_10002_tags`: Relay list JSON array
|
||||
|
||||
**Authentication Settings:**
|
||||
- `auth_enabled`: Enable auth rules system
|
||||
- `auth_rules_enabled`: Enable auth rules system
|
||||
- `require_auth_upload`: Require authentication for uploads
|
||||
- `require_auth_delete`: Require authentication for deletes
|
||||
|
||||
**Authorization Rules:**
|
||||
- `rule_type`: Type of rule (`pubkey_blacklist`, `pubkey_whitelist`, `hash_blacklist`, `mime_blacklist`, `mime_whitelist`)
|
||||
- `pattern_type`: Pattern matching type (`pubkey`, `hash`, `mime`)
|
||||
- `pattern_value`: The actual value to match (64-char hex for pubkey/hash, MIME type string for mime)
|
||||
- `active`: Whether rule is active (1) or disabled (0)
|
||||
|
||||
**Limits:**
|
||||
- `max_blobs_per_user`: Per-user blob limit
|
||||
- `rate_limit_uploads`: Uploads per minute
|
||||
|
||||
@@ -100,6 +100,10 @@
|
||||
<td>Total Size</td>
|
||||
<td id="total-size">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Version</td>
|
||||
<td id="version">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Process ID</td>
|
||||
<td id="process-id">-</td>
|
||||
@@ -116,6 +120,14 @@
|
||||
<td>CPU Usage</td>
|
||||
<td id="cpu-usage">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Filesystem Blob Count</td>
|
||||
<td id="fs-blob-count">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Filesystem Blob Size</td>
|
||||
<td id="fs-blob-size">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Oldest Blob</td>
|
||||
<td id="oldest-event">-</td>
|
||||
@@ -240,7 +252,7 @@ AUTH RULES MANAGEMENT
|
||||
</div>
|
||||
|
||||
<!-- Auth Rules Table -->
|
||||
<div id="authRulesTableContainer" style="display: none;">
|
||||
<div id="authRulesTableContainer" class="config-table-container">
|
||||
<table class="config-table" id="authRulesTable">
|
||||
<thead>
|
||||
<tr>
|
||||
@@ -252,6 +264,9 @@ AUTH RULES MANAGEMENT
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="authRulesTableBody">
|
||||
<tr>
|
||||
<td colspan="5" style="text-align: center; font-style: italic;">Loading auth rules...</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -263,8 +278,8 @@ AUTH RULES MANAGEMENT
|
||||
|
||||
|
||||
<div class="input-group">
|
||||
<label for="authRulePubkey">Pubkey (nsec or hex):</label>
|
||||
<input type="text" id="authRulePubkey" placeholder="nsec1... or 64-character hex pubkey">
|
||||
<label for="authRulePubkey">Pubkey (npub or hex):</label>
|
||||
<input type="text" id="authRulePubkey" placeholder="npub1... or 64-character hex pubkey">
|
||||
|
||||
</div>
|
||||
<div id="whitelistWarning" class="warning-box" style="display: none;">
|
||||
|
||||
1701
api/index.js
1701
api/index.js
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
build/main.o
BIN
build/main.o
Binary file not shown.
Binary file not shown.
223
build_static.sh
Executable file
223
build_static.sh
Executable file
@@ -0,0 +1,223 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Build fully static MUSL binaries for Ginxsom using Alpine Docker
|
||||
# Produces truly portable binaries with zero runtime dependencies
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
BUILD_DIR="$SCRIPT_DIR/build"
|
||||
DOCKERFILE="$SCRIPT_DIR/Dockerfile.alpine-musl"
|
||||
|
||||
# Parse command line arguments
|
||||
DEBUG_BUILD=false
|
||||
if [[ "$1" == "--debug" ]]; then
|
||||
DEBUG_BUILD=true
|
||||
echo "=========================================="
|
||||
echo "Ginxsom MUSL Static Binary Builder (DEBUG MODE)"
|
||||
echo "=========================================="
|
||||
else
|
||||
echo "=========================================="
|
||||
echo "Ginxsom MUSL Static Binary Builder (PRODUCTION MODE)"
|
||||
echo "=========================================="
|
||||
fi
|
||||
echo "Project directory: $SCRIPT_DIR"
|
||||
echo "Build directory: $BUILD_DIR"
|
||||
echo "Debug build: $DEBUG_BUILD"
|
||||
echo ""
|
||||
|
||||
# Create build directory
|
||||
mkdir -p "$BUILD_DIR"
|
||||
|
||||
# Check if Docker is available
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "ERROR: Docker is not installed or not in PATH"
|
||||
echo ""
|
||||
echo "Docker is required to build MUSL static binaries."
|
||||
echo "Please install Docker:"
|
||||
echo " - Ubuntu/Debian: sudo apt install docker.io"
|
||||
echo " - Or visit: https://docs.docker.com/engine/install/"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Docker daemon is running
|
||||
if ! docker info &> /dev/null; then
|
||||
echo "ERROR: Docker daemon is not running or user not in docker group"
|
||||
echo ""
|
||||
echo "Please start Docker and ensure you're in the docker group:"
|
||||
echo " - sudo systemctl start docker"
|
||||
echo " - sudo usermod -aG docker $USER && newgrp docker"
|
||||
echo " - Or start Docker Desktop"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKER_CMD="docker"
|
||||
|
||||
echo "✓ Docker is available and running"
|
||||
echo ""
|
||||
|
||||
# Detect architecture
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
PLATFORM="linux/amd64"
|
||||
OUTPUT_NAME="ginxsom-fcgi_static_x86_64"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
PLATFORM="linux/arm64"
|
||||
OUTPUT_NAME="ginxsom-fcgi_static_arm64"
|
||||
;;
|
||||
*)
|
||||
echo "WARNING: Unknown architecture: $ARCH"
|
||||
echo "Defaulting to linux/amd64"
|
||||
PLATFORM="linux/amd64"
|
||||
OUTPUT_NAME="ginxsom-fcgi_static_${ARCH}"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Building for platform: $PLATFORM"
|
||||
echo "Output binary: $OUTPUT_NAME"
|
||||
echo ""
|
||||
|
||||
# Build the Docker image
|
||||
echo "=========================================="
|
||||
echo "Step 1: Building Alpine Docker image"
|
||||
echo "=========================================="
|
||||
echo "This will:"
|
||||
echo " - Use Alpine Linux (native MUSL)"
|
||||
echo " - Build all dependencies statically"
|
||||
echo " - Compile Ginxsom with full static linking"
|
||||
echo ""
|
||||
|
||||
$DOCKER_CMD build \
|
||||
--platform "$PLATFORM" \
|
||||
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
|
||||
-f "$DOCKERFILE" \
|
||||
-t ginxsom-musl-builder:latest \
|
||||
--progress=plain \
|
||||
. || {
|
||||
echo ""
|
||||
echo "ERROR: Docker build failed"
|
||||
echo "Check the output above for details"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo "✓ Docker image built successfully"
|
||||
echo ""
|
||||
|
||||
# Extract the binary from the container
|
||||
echo "=========================================="
|
||||
echo "Step 2: Extracting static binary"
|
||||
echo "=========================================="
|
||||
|
||||
# Build the builder stage to extract the binary
|
||||
$DOCKER_CMD build \
|
||||
--platform "$PLATFORM" \
|
||||
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
|
||||
--target builder \
|
||||
-f "$DOCKERFILE" \
|
||||
-t ginxsom-static-builder-stage:latest \
|
||||
. > /dev/null 2>&1
|
||||
|
||||
# Create a temporary container to copy the binary
|
||||
CONTAINER_ID=$($DOCKER_CMD create ginxsom-static-builder-stage:latest)
|
||||
|
||||
# Copy binary from container
|
||||
$DOCKER_CMD cp "$CONTAINER_ID:/build/ginxsom-fcgi_static" "$BUILD_DIR/$OUTPUT_NAME" || {
|
||||
echo "ERROR: Failed to extract binary from container"
|
||||
$DOCKER_CMD rm "$CONTAINER_ID" 2>/dev/null
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Clean up container
|
||||
$DOCKER_CMD rm "$CONTAINER_ID" > /dev/null
|
||||
|
||||
echo "✓ Binary extracted to: $BUILD_DIR/$OUTPUT_NAME"
|
||||
echo ""
|
||||
|
||||
# Make binary executable
|
||||
chmod +x "$BUILD_DIR/$OUTPUT_NAME"
|
||||
|
||||
# Verify the binary
|
||||
echo "=========================================="
|
||||
echo "Step 3: Verifying static binary"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
echo "Checking for dynamic dependencies:"
|
||||
if LDD_OUTPUT=$(timeout 5 ldd "$BUILD_DIR/$OUTPUT_NAME" 2>&1); then
|
||||
if echo "$LDD_OUTPUT" | grep -q "not a dynamic executable"; then
|
||||
echo "✓ Binary is fully static (no dynamic dependencies)"
|
||||
TRULY_STATIC=true
|
||||
elif echo "$LDD_OUTPUT" | grep -q "statically linked"; then
|
||||
echo "✓ Binary is statically linked"
|
||||
TRULY_STATIC=true
|
||||
else
|
||||
echo "⚠ WARNING: Binary may have dynamic dependencies:"
|
||||
echo "$LDD_OUTPUT"
|
||||
TRULY_STATIC=false
|
||||
fi
|
||||
else
|
||||
# ldd failed or timed out - check with file command instead
|
||||
if file "$BUILD_DIR/$OUTPUT_NAME" | grep -q "statically linked"; then
|
||||
echo "✓ Binary is statically linked (verified with file command)"
|
||||
TRULY_STATIC=true
|
||||
else
|
||||
echo "⚠ Could not verify static linking (ldd check failed)"
|
||||
TRULY_STATIC=false
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "File size: $(ls -lh "$BUILD_DIR/$OUTPUT_NAME" | awk '{print $5}')"
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "=========================================="
|
||||
echo "Build Summary"
|
||||
echo "=========================================="
|
||||
echo "Binary: $BUILD_DIR/$OUTPUT_NAME"
|
||||
echo "Size: $(du -h "$BUILD_DIR/$OUTPUT_NAME" | cut -f1)"
|
||||
echo "Platform: $PLATFORM"
|
||||
if [ "$DEBUG_BUILD" = true ]; then
|
||||
echo "Build Type: DEBUG (with symbols, no optimization)"
|
||||
else
|
||||
echo "Build Type: PRODUCTION (optimized, stripped)"
|
||||
fi
|
||||
if [ "$TRULY_STATIC" = true ]; then
|
||||
echo "Linkage: Fully static binary (Alpine MUSL-based)"
|
||||
echo "Portability: Works on ANY Linux distribution"
|
||||
else
|
||||
echo "Linkage: Static binary (may have minimal dependencies)"
|
||||
fi
|
||||
echo ""
|
||||
echo "✓ Build complete!"
|
||||
echo ""
|
||||
|
||||
# Clean up old dynamic build artifacts
|
||||
echo "=========================================="
|
||||
echo "Cleaning up old build artifacts"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
if ls build/*.o 2>/dev/null | grep -q .; then
|
||||
echo "Removing old .o files from dynamic builds..."
|
||||
rm -f build/*.o
|
||||
echo "✓ Cleanup complete"
|
||||
else
|
||||
echo "No .o files to clean"
|
||||
fi
|
||||
|
||||
# Also remove old dynamic binary if it exists
|
||||
if [ -f "build/ginxsom-fcgi" ]; then
|
||||
echo "Removing old dynamic binary..."
|
||||
rm -f build/ginxsom-fcgi
|
||||
echo "✓ Old binary removed"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Deployment:"
|
||||
echo " scp $BUILD_DIR/$OUTPUT_NAME user@server:/path/to/ginxsom/"
|
||||
echo ""
|
||||
Binary file not shown.
458
deploy_lt.sh
458
deploy_lt.sh
@@ -22,231 +22,226 @@ fi
|
||||
# Configuration
|
||||
REMOTE_HOST="laantungir.net"
|
||||
REMOTE_USER="ubuntu"
|
||||
REMOTE_DIR="/home/ubuntu/ginxsom"
|
||||
REMOTE_DB_PATH="/home/ubuntu/ginxsom/db/ginxsom.db"
|
||||
REMOTE_NGINX_CONFIG="/etc/nginx/conf.d/default.conf"
|
||||
REMOTE_BINARY_PATH="/home/ubuntu/ginxsom/ginxsom.fcgi"
|
||||
|
||||
# Deployment paths
|
||||
REMOTE_BINARY_DIR="/usr/local/bin/ginxsom"
|
||||
REMOTE_BINARY_PATH="$REMOTE_BINARY_DIR/ginxsom-fcgi"
|
||||
REMOTE_DB_PATH="$REMOTE_BINARY_DIR"
|
||||
REMOTE_BLOB_DIR="/var/www/blobs"
|
||||
REMOTE_SOCKET="/tmp/ginxsom-fcgi.sock"
|
||||
REMOTE_DATA_DIR="/var/www/html/blossom"
|
||||
|
||||
print_status "Starting deployment to $REMOTE_HOST..."
|
||||
# Production keys
|
||||
ADMIN_PUBKEY="1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139"
|
||||
SERVER_PRIVKEY="90df3fe61e7d19e50f387e4c5db87eff1a7d2a1037cd55026c4b21a4fda8ecf6"
|
||||
|
||||
# Step 1: Build and prepare local binary
|
||||
print_status "Building ginxsom binary..."
|
||||
make clean && make
|
||||
if [[ ! -f "build/ginxsom-fcgi" ]]; then
|
||||
print_error "Build failed - binary not found"
|
||||
# Local paths
|
||||
LOCAL_BINARY="build/ginxsom-fcgi_static_x86_64"
|
||||
|
||||
print_status "=========================================="
|
||||
print_status "Ginxsom Static Binary Deployment"
|
||||
print_status "=========================================="
|
||||
print_status "Target: $REMOTE_HOST"
|
||||
print_status "Binary: $REMOTE_BINARY_PATH"
|
||||
print_status "Database: $REMOTE_DB_PATH"
|
||||
print_status "Blobs: $REMOTE_BLOB_DIR"
|
||||
print_status "Fresh install: $FRESH_INSTALL"
|
||||
print_status "=========================================="
|
||||
echo ""
|
||||
|
||||
# Step 1: Verify local binary exists
|
||||
print_status "Step 1: Verifying local static binary..."
|
||||
if [[ ! -f "$LOCAL_BINARY" ]]; then
|
||||
print_error "Static binary not found: $LOCAL_BINARY"
|
||||
print_status "Please run: ./build_static.sh"
|
||||
exit 1
|
||||
fi
|
||||
print_success "Binary built successfully"
|
||||
|
||||
# Step 2: Setup remote environment first (before copying files)
|
||||
print_status "Setting up remote environment..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
# Verify it's actually static
|
||||
if ldd "$LOCAL_BINARY" 2>&1 | grep -q "not a dynamic executable\|statically linked"; then
|
||||
print_success "Binary is static"
|
||||
else
|
||||
print_warning "Binary may not be fully static - proceeding anyway"
|
||||
fi
|
||||
|
||||
BINARY_SIZE=$(du -h "$LOCAL_BINARY" | cut -f1)
|
||||
print_success "Found static binary ($BINARY_SIZE)"
|
||||
echo ""
|
||||
|
||||
# Step 2: Upload binary to server
|
||||
print_status "Step 2: Uploading binary to server..."
|
||||
scp "$LOCAL_BINARY" $REMOTE_USER@$REMOTE_HOST:~/ginxsom-fcgi_new || {
|
||||
print_error "Failed to upload binary"
|
||||
exit 1
|
||||
}
|
||||
print_success "Binary uploaded to ~/ginxsom-fcgi_new"
|
||||
echo ""
|
||||
|
||||
# Step 3: Setup directories
|
||||
print_status "Step 3: Setting up directories..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
# Create data directory if it doesn't exist (using existing /var/www/html/blossom)
|
||||
sudo mkdir -p /var/www/html/blossom
|
||||
sudo chown www-data:www-data /var/www/html/blossom
|
||||
sudo chmod 755 /var/www/html/blossom
|
||||
|
||||
# Ensure socket directory exists
|
||||
sudo mkdir -p /tmp
|
||||
sudo chmod 755 /tmp
|
||||
|
||||
# Install required dependencies
|
||||
echo "Installing required dependencies..."
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y spawn-fcgi libfcgi-dev
|
||||
|
||||
# Stop any existing ginxsom processes
|
||||
echo "Stopping existing ginxsom processes..."
|
||||
sudo pkill -f ginxsom-fcgi || true
|
||||
sudo rm -f /tmp/ginxsom-fcgi.sock || true
|
||||
|
||||
echo "Remote environment setup complete"
|
||||
|
||||
# Create binary/database directory
|
||||
echo "Creating application directory..."
|
||||
sudo mkdir -p $REMOTE_BINARY_DIR
|
||||
sudo chown www-data:www-data $REMOTE_BINARY_DIR
|
||||
sudo chmod 755 $REMOTE_BINARY_DIR
|
||||
|
||||
# Create blob storage directory
|
||||
echo "Creating blob storage directory..."
|
||||
sudo mkdir -p $REMOTE_BLOB_DIR
|
||||
sudo chown www-data:www-data $REMOTE_BLOB_DIR
|
||||
sudo chmod 755 $REMOTE_BLOB_DIR
|
||||
|
||||
# Create logs directory
|
||||
echo "Creating logs directory..."
|
||||
sudo mkdir -p $REMOTE_BINARY_DIR/logs/app
|
||||
sudo chown -R www-data:www-data $REMOTE_BINARY_DIR/logs
|
||||
sudo chmod -R 755 $REMOTE_BINARY_DIR/logs
|
||||
|
||||
echo "Directories created successfully"
|
||||
EOF
|
||||
|
||||
print_success "Remote environment configured"
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to create directories"
|
||||
exit 1
|
||||
fi
|
||||
print_success "Directories created"
|
||||
echo ""
|
||||
|
||||
# Step 3: Copy files to remote server
|
||||
print_status "Copying files to remote server..."
|
||||
# Step 4: Handle fresh install if requested
|
||||
if [ "$FRESH_INSTALL" = true ]; then
|
||||
print_status "Step 4: Fresh install - removing existing data..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
sudo rm -f $REMOTE_DB_PATH/*.db
|
||||
sudo rm -rf $REMOTE_BLOB_DIR/*
|
||||
echo "Existing data removed"
|
||||
EOF
|
||||
print_success "Fresh install prepared"
|
||||
echo ""
|
||||
else
|
||||
print_status "Step 4: Preserving existing data"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Copy entire project directory (excluding unnecessary files)
|
||||
print_status "Copying entire ginxsom project..."
|
||||
rsync -avz --exclude='.git' --exclude='build' --exclude='logs' --exclude='Trash' --exclude='blobs' --exclude='db' --no-g --no-o --no-perms --omit-dir-times . $REMOTE_USER@$REMOTE_HOST:$REMOTE_DIR/
|
||||
# Step 5: Install minimal dependencies
|
||||
print_status "Step 5: Installing minimal dependencies..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
set -e
|
||||
|
||||
# Check if spawn-fcgi is installed
|
||||
if ! command -v spawn-fcgi &> /dev/null; then
|
||||
echo "Installing spawn-fcgi..."
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y spawn-fcgi
|
||||
echo "spawn-fcgi installed"
|
||||
else
|
||||
echo "spawn-fcgi already installed"
|
||||
fi
|
||||
EOF
|
||||
|
||||
# Build on remote server to ensure compatibility
|
||||
print_status "Building ginxsom on remote server..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST "cd $REMOTE_DIR && make clean && make" || {
|
||||
print_error "Build failed on remote server"
|
||||
print_status "Checking what packages are actually installed..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST "dpkg -l | grep -E '(sqlite|fcgi)'"
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Dependencies verified"
|
||||
else
|
||||
print_error "Failed to install dependencies"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 6: Upload and install systemd service file
|
||||
print_status "Step 6: Installing systemd service file..."
|
||||
scp ginxsom.service $REMOTE_USER@$REMOTE_HOST:~/ginxsom.service || {
|
||||
print_error "Failed to upload service file"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Copy binary to application directory
|
||||
print_status "Copying ginxsom binary to application directory..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
# Stop any running process first
|
||||
sudo pkill -f ginxsom-fcgi || true
|
||||
sleep 1
|
||||
|
||||
# Remove old binary if it exists
|
||||
rm -f $REMOTE_BINARY_PATH
|
||||
|
||||
# Copy new binary
|
||||
cp $REMOTE_DIR/build/ginxsom-fcgi $REMOTE_BINARY_PATH
|
||||
chmod +x $REMOTE_BINARY_PATH
|
||||
chown ubuntu:ubuntu $REMOTE_BINARY_PATH
|
||||
|
||||
echo "Binary copied successfully"
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
sudo cp ~/ginxsom.service /etc/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
echo "Service file installed"
|
||||
EOF
|
||||
|
||||
# NOTE: Do NOT update nginx configuration automatically
|
||||
# The deployment script should only update ginxsom binaries and do nothing else with the system
|
||||
# Nginx configuration should be managed manually by the system administrator
|
||||
print_status "Skipping nginx configuration update (manual control required)"
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Service file installed"
|
||||
else
|
||||
print_error "Failed to install service file"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
print_success "Files copied to remote server"
|
||||
|
||||
# Step 3: Setup remote environment
|
||||
print_status "Setting up remote environment..."
|
||||
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
# Step 7: Stop existing service and install new binary
|
||||
print_status "Step 7: Stopping existing service and installing new binary..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
# Create data directory if it doesn't exist (using existing /var/www/html/blossom)
|
||||
sudo mkdir -p /var/www/html/blossom
|
||||
sudo chown www-data:www-data /var/www/html/blossom
|
||||
sudo chmod 755 /var/www/html/blossom
|
||||
|
||||
# Ensure socket directory exists
|
||||
sudo mkdir -p /tmp
|
||||
sudo chmod 755 /tmp
|
||||
|
||||
# Install required dependencies
|
||||
echo "Installing required dependencies..."
|
||||
sudo apt-get update 2>/dev/null || true # Continue even if apt update has issues
|
||||
sudo apt-get install -y spawn-fcgi libfcgi-dev libsqlite3-dev sqlite3 libcurl4-openssl-dev
|
||||
|
||||
# Verify installations
|
||||
echo "Verifying installations..."
|
||||
if ! dpkg -l libsqlite3-dev >/dev/null 2>&1; then
|
||||
echo "libsqlite3-dev not found, trying alternative..."
|
||||
sudo apt-get install -y libsqlite3-dev || {
|
||||
echo "Failed to install libsqlite3-dev"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
if ! dpkg -l libfcgi-dev >/dev/null 2>&1; then
|
||||
echo "libfcgi-dev not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if sqlite3.h exists
|
||||
if [ ! -f /usr/include/sqlite3.h ]; then
|
||||
echo "sqlite3.h not found in /usr/include/"
|
||||
find /usr -name "sqlite3.h" 2>/dev/null || echo "sqlite3.h not found anywhere"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Stop any existing ginxsom processes
|
||||
echo "Stopping existing ginxsom processes..."
|
||||
sudo pkill -f ginxsom-fcgi || true
|
||||
sudo rm -f /tmp/ginxsom-fcgi.sock || true
|
||||
|
||||
echo "Remote environment setup complete"
|
||||
EOF
|
||||
|
||||
print_success "Remote environment configured"
|
||||
|
||||
# Step 4: Setup database directory and migrate database
|
||||
print_status "Setting up database directory..."
|
||||
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
# Create db directory if it doesn't exist
|
||||
mkdir -p $REMOTE_DIR/db
|
||||
|
||||
if [ "$FRESH_INSTALL" = "true" ]; then
|
||||
echo "Fresh install: removing existing database and blobs..."
|
||||
# Remove existing database
|
||||
sudo rm -f $REMOTE_DB_PATH
|
||||
sudo rm -f /var/www/html/blossom/ginxsom.db
|
||||
# Remove existing blobs
|
||||
sudo rm -rf $REMOTE_DATA_DIR/*
|
||||
echo "Existing data removed"
|
||||
else
|
||||
# Backup current database if it exists in old location
|
||||
if [ -f /var/www/html/blossom/ginxsom.db ]; then
|
||||
echo "Backing up existing database..."
|
||||
cp /var/www/html/blossom/ginxsom.db /var/www/html/blossom/ginxsom.db.backup.\$(date +%Y%m%d_%H%M%S)
|
||||
|
||||
# Migrate database to new location if not already there
|
||||
if [ ! -f $REMOTE_DB_PATH ]; then
|
||||
echo "Migrating database to new location..."
|
||||
cp /var/www/html/blossom/ginxsom.db $REMOTE_DB_PATH
|
||||
else
|
||||
echo "Database already exists at new location"
|
||||
fi
|
||||
elif [ ! -f $REMOTE_DB_PATH ]; then
|
||||
echo "No existing database found - will be created on first run"
|
||||
else
|
||||
echo "Database already exists at $REMOTE_DB_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set proper permissions - www-data needs write access to db directory for SQLite journal files
|
||||
sudo chown -R www-data:www-data $REMOTE_DIR/db
|
||||
sudo chmod 755 $REMOTE_DIR/db
|
||||
sudo chmod 644 $REMOTE_DB_PATH 2>/dev/null || true
|
||||
|
||||
# Allow www-data to access the application directory for spawn-fcgi chdir
|
||||
chmod 755 $REMOTE_DIR
|
||||
|
||||
echo "Database directory setup complete"
|
||||
EOF
|
||||
|
||||
print_success "Database directory configured"
|
||||
|
||||
# Step 5: Start ginxsom FastCGI process
|
||||
print_status "Starting ginxsom FastCGI process..."
|
||||
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
# Clean up any existing socket
|
||||
sleep 2
|
||||
|
||||
# Remove old socket
|
||||
sudo rm -f $REMOTE_SOCKET
|
||||
|
||||
# Install new binary
|
||||
echo "Installing new binary..."
|
||||
sudo mv ~/ginxsom-fcgi_new $REMOTE_BINARY_PATH
|
||||
sudo chmod +x $REMOTE_BINARY_PATH
|
||||
sudo chown www-data:www-data $REMOTE_BINARY_PATH
|
||||
|
||||
echo "Binary installed successfully"
|
||||
EOF
|
||||
|
||||
# Start FastCGI process with explicit paths
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Binary installed"
|
||||
else
|
||||
print_error "Failed to install binary"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 8: Start ginxsom FastCGI process
|
||||
print_status "Step 8: Starting ginxsom service..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
echo "Starting ginxsom FastCGI with configuration:"
|
||||
echo " Working directory: $REMOTE_DIR"
|
||||
echo " Binary: $REMOTE_BINARY_PATH"
|
||||
echo " Database: $REMOTE_DB_PATH"
|
||||
echo " Storage: $REMOTE_DATA_DIR"
|
||||
echo " Storage: $REMOTE_BLOB_DIR"
|
||||
echo " Socket: $REMOTE_SOCKET"
|
||||
echo ""
|
||||
|
||||
sudo spawn-fcgi \
|
||||
-M 666 \
|
||||
-u www-data \
|
||||
-g www-data \
|
||||
-s $REMOTE_SOCKET \
|
||||
-U www-data \
|
||||
-G www-data \
|
||||
-d $REMOTE_BINARY_DIR \
|
||||
-- $REMOTE_BINARY_PATH \
|
||||
--admin-pubkey $ADMIN_PUBKEY \
|
||||
--server-privkey $SERVER_PRIVKEY \
|
||||
--db-path $REMOTE_DB_PATH \
|
||||
--storage-dir $REMOTE_BLOB_DIR
|
||||
|
||||
sudo spawn-fcgi -M 666 -u www-data -g www-data -s $REMOTE_SOCKET -U www-data -G www-data -d $REMOTE_DIR -- $REMOTE_BINARY_PATH --db-path "$REMOTE_DB_PATH" --storage-dir "$REMOTE_DATA_DIR"
|
||||
|
||||
# Give it a moment to start
|
||||
sleep 2
|
||||
|
||||
|
||||
# Verify process is running
|
||||
if pgrep -f "ginxsom-fcgi" > /dev/null; then
|
||||
echo "FastCGI process started successfully"
|
||||
echo "PID: \$(pgrep -f ginxsom-fcgi)"
|
||||
else
|
||||
echo "Process not found by pgrep, but socket exists - this may be normal for FastCGI"
|
||||
echo "Checking socket..."
|
||||
if [ -S $REMOTE_SOCKET ]; then
|
||||
echo "FastCGI socket created successfully"
|
||||
ls -la $REMOTE_SOCKET
|
||||
echo "Checking if binary exists and is executable..."
|
||||
ls -la $REMOTE_BINARY_PATH
|
||||
echo "Testing if we can connect to the socket..."
|
||||
# Try to test the FastCGI connection
|
||||
if command -v cgi-fcgi >/dev/null 2>&1; then
|
||||
echo "Testing FastCGI connection..."
|
||||
SCRIPT_NAME=/health SCRIPT_FILENAME=$REMOTE_BINARY_PATH REQUEST_METHOD=GET cgi-fcgi -bind -connect $REMOTE_SOCKET 2>/dev/null | head -5 || echo "Connection test failed"
|
||||
else
|
||||
echo "cgi-fcgi not available for testing"
|
||||
fi
|
||||
# Don't exit - the socket existing means spawn-fcgi worked
|
||||
else
|
||||
echo "ERROR: Socket not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if process is running
|
||||
if pgrep -f ginxsom-fcgi > /dev/null; then
|
||||
echo "Process is running (PID: \$(pgrep -f ginxsom-fcgi))"
|
||||
else
|
||||
echo "WARNING: Process not found by pgrep (may be normal for FastCGI)"
|
||||
fi
|
||||
EOF
|
||||
|
||||
@@ -256,51 +251,84 @@ else
|
||||
print_error "Failed to start FastCGI process"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 6: Test nginx configuration and reload
|
||||
print_status "Testing and reloading nginx..."
|
||||
|
||||
# Step 8: Test nginx configuration and reload
|
||||
print_status "Step 8: Testing and reloading nginx..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
# Test nginx configuration
|
||||
if sudo nginx -t; then
|
||||
if sudo nginx -t 2>&1; then
|
||||
echo "Nginx configuration test passed"
|
||||
sudo nginx -s reload
|
||||
echo "Nginx reloaded successfully"
|
||||
else
|
||||
echo "Nginx configuration test failed"
|
||||
exit 1
|
||||
echo "WARNING: Nginx configuration test failed"
|
||||
echo "You may need to update nginx configuration manually"
|
||||
echo "See docs/STATIC_DEPLOYMENT_PLAN.md for details"
|
||||
fi
|
||||
EOF
|
||||
|
||||
print_success "Nginx reloaded"
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Nginx reloaded"
|
||||
else
|
||||
print_warning "Nginx reload had issues - check configuration"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 7: Test deployment
|
||||
print_status "Testing deployment..."
|
||||
# Step 9: Test deployment
|
||||
print_status "Step 9: Testing deployment..."
|
||||
echo ""
|
||||
|
||||
# Wait a moment for service to fully start
|
||||
sleep 2
|
||||
|
||||
# Test health endpoint
|
||||
echo "Testing health endpoint..."
|
||||
if curl -k -s --max-time 10 "https://blossom.laantungir.net/health" | grep -q "OK"; then
|
||||
print_success "Health check passed"
|
||||
print_success "✓ Health check passed"
|
||||
else
|
||||
print_warning "Health check failed - checking response..."
|
||||
print_warning "✗ Health check failed - checking response..."
|
||||
curl -k -v --max-time 10 "https://blossom.laantungir.net/health" 2>&1 | head -10
|
||||
fi
|
||||
|
||||
# Test basic endpoints
|
||||
# Test root endpoint
|
||||
echo ""
|
||||
echo "Testing root endpoint..."
|
||||
if curl -k -s --max-time 10 "https://blossom.laantungir.net/" | grep -q "Ginxsom"; then
|
||||
print_success "Root endpoint responding"
|
||||
print_success "✓ Root endpoint responding"
|
||||
else
|
||||
print_warning "Root endpoint not responding as expected - checking response..."
|
||||
curl -k -v --max-time 10 "https://blossom.laantungir.net/" 2>&1 | head -10
|
||||
print_warning "✗ Root endpoint not responding as expected"
|
||||
fi
|
||||
|
||||
print_success "Deployment to $REMOTE_HOST completed!"
|
||||
print_status "Ginxsom should now be available at: https://blossom.laantungir.net"
|
||||
print_status "Test endpoints:"
|
||||
echo ""
|
||||
print_status "=========================================="
|
||||
print_success "Deployment completed!"
|
||||
print_status "=========================================="
|
||||
echo ""
|
||||
print_status "Service Information:"
|
||||
echo " URL: https://blossom.laantungir.net"
|
||||
echo " Binary: $REMOTE_BINARY_PATH"
|
||||
echo " Database: $REMOTE_DB_PATH"
|
||||
echo " Blobs: $REMOTE_BLOB_DIR"
|
||||
echo " Socket: $REMOTE_SOCKET"
|
||||
echo ""
|
||||
print_status "Test Commands:"
|
||||
echo " Health: curl -k https://blossom.laantungir.net/health"
|
||||
echo " Root: curl -k https://blossom.laantungir.net/"
|
||||
echo " List: curl -k https://blossom.laantungir.net/list"
|
||||
if [ "$FRESH_INSTALL" = "true" ]; then
|
||||
echo " Info: curl -k https://blossom.laantungir.net/"
|
||||
echo " Upload: ./tests/file_put_bud02.sh"
|
||||
echo ""
|
||||
print_status "Server Commands:"
|
||||
echo " Check status: ssh $REMOTE_USER@$REMOTE_HOST 'ps aux | grep ginxsom-fcgi'"
|
||||
echo " View logs: ssh $REMOTE_USER@$REMOTE_HOST 'sudo journalctl -f | grep ginxsom'"
|
||||
echo " Restart: ssh $REMOTE_USER@$REMOTE_HOST 'sudo pkill ginxsom-fcgi && sudo spawn-fcgi ...'"
|
||||
echo ""
|
||||
|
||||
if [ "$FRESH_INSTALL" = true ]; then
|
||||
print_warning "Fresh install completed - database and blobs have been reset"
|
||||
fi
|
||||
else
|
||||
print_status "Existing data preserved - verify database and blobs"
|
||||
echo " Check blobs: ssh $REMOTE_USER@$REMOTE_HOST 'ls -la $REMOTE_BLOB_DIR | wc -l'"
|
||||
echo " Check DB: ssh $REMOTE_USER@$REMOTE_HOST 'sudo -u www-data sqlite3 $REMOTE_DB_PATH \"SELECT COUNT(*) FROM blobs;\"'"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
162
deploy_static.sh
Executable file
162
deploy_static.sh
Executable file
@@ -0,0 +1,162 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
|
||||
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
|
||||
print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
|
||||
# Configuration
|
||||
REMOTE_HOST="laantungir.net"
|
||||
REMOTE_USER="ubuntu"
|
||||
REMOTE_DIR="/home/ubuntu/ginxsom"
|
||||
REMOTE_BINARY_PATH="/home/ubuntu/ginxsom/ginxsom-fcgi_static"
|
||||
REMOTE_SOCKET="/tmp/ginxsom-fcgi.sock"
|
||||
REMOTE_DATA_DIR="/var/www/html/blossom"
|
||||
REMOTE_DB_PATH="/home/ubuntu/ginxsom/db/ginxsom.db"
|
||||
|
||||
# Detect architecture
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
BINARY_NAME="ginxsom-fcgi_static_x86_64"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
BINARY_NAME="ginxsom-fcgi_static_arm64"
|
||||
;;
|
||||
*)
|
||||
print_error "Unsupported architecture: $ARCH"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
LOCAL_BINARY="./build/$BINARY_NAME"
|
||||
|
||||
print_status "Starting static binary deployment to $REMOTE_HOST..."
|
||||
|
||||
# Check if static binary exists
|
||||
if [ ! -f "$LOCAL_BINARY" ]; then
|
||||
print_error "Static binary not found: $LOCAL_BINARY"
|
||||
print_status "Building static binary..."
|
||||
./build_static.sh
|
||||
|
||||
if [ ! -f "$LOCAL_BINARY" ]; then
|
||||
print_error "Build failed - binary still not found"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
print_success "Static binary found: $LOCAL_BINARY"
|
||||
print_status "Binary size: $(du -h "$LOCAL_BINARY" | cut -f1)"
|
||||
|
||||
# Verify binary is static
|
||||
if ldd "$LOCAL_BINARY" 2>&1 | grep -q "not a dynamic executable"; then
|
||||
print_success "Binary is fully static"
|
||||
elif ldd "$LOCAL_BINARY" 2>&1 | grep -q "statically linked"; then
|
||||
print_success "Binary is statically linked"
|
||||
else
|
||||
print_warning "Binary may have dynamic dependencies"
|
||||
ldd "$LOCAL_BINARY" 2>&1 || true
|
||||
fi
|
||||
|
||||
# Setup remote environment
|
||||
print_status "Setting up remote environment..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
set -e
|
||||
|
||||
# Create directories
|
||||
mkdir -p /home/ubuntu/ginxsom/db
|
||||
sudo mkdir -p /var/www/html/blossom
|
||||
sudo chown www-data:www-data /var/www/html/blossom
|
||||
sudo chmod 755 /var/www/html/blossom
|
||||
|
||||
# Stop existing processes
|
||||
echo "Stopping existing ginxsom processes..."
|
||||
sudo pkill -f ginxsom-fcgi || true
|
||||
sudo rm -f /tmp/ginxsom-fcgi.sock || true
|
||||
|
||||
echo "Remote environment ready"
|
||||
EOF
|
||||
|
||||
print_success "Remote environment configured"
|
||||
|
||||
# Copy static binary
|
||||
print_status "Copying static binary to remote server..."
|
||||
scp "$LOCAL_BINARY" $REMOTE_USER@$REMOTE_HOST:$REMOTE_BINARY_PATH
|
||||
|
||||
print_success "Binary copied successfully"
|
||||
|
||||
# Set permissions and start service
|
||||
print_status "Starting ginxsom FastCGI process..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
# Make binary executable
|
||||
chmod +x $REMOTE_BINARY_PATH
|
||||
|
||||
# Clean up any existing socket
|
||||
sudo rm -f $REMOTE_SOCKET
|
||||
|
||||
# Start FastCGI process
|
||||
echo "Starting ginxsom FastCGI..."
|
||||
sudo spawn-fcgi -M 666 -u www-data -g www-data -s $REMOTE_SOCKET -U www-data -G www-data -d $REMOTE_DIR -- $REMOTE_BINARY_PATH --db-path "$REMOTE_DB_PATH" --storage-dir "$REMOTE_DATA_DIR"
|
||||
|
||||
# Give it a moment to start
|
||||
sleep 2
|
||||
|
||||
# Verify process is running
|
||||
if pgrep -f "ginxsom-fcgi" > /dev/null; then
|
||||
echo "FastCGI process started successfully"
|
||||
echo "PID: \$(pgrep -f ginxsom-fcgi)"
|
||||
else
|
||||
echo "Process verification: socket exists"
|
||||
ls -la $REMOTE_SOCKET
|
||||
fi
|
||||
EOF
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "FastCGI process started"
|
||||
else
|
||||
print_error "Failed to start FastCGI process"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Reload nginx
|
||||
print_status "Reloading nginx..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
if sudo nginx -t; then
|
||||
sudo nginx -s reload
|
||||
echo "Nginx reloaded successfully"
|
||||
else
|
||||
echo "Nginx configuration test failed"
|
||||
exit 1
|
||||
fi
|
||||
EOF
|
||||
|
||||
print_success "Nginx reloaded"
|
||||
|
||||
# Test deployment
|
||||
print_status "Testing deployment..."
|
||||
|
||||
echo "Testing health endpoint..."
|
||||
if curl -k -s --max-time 10 "https://blossom.laantungir.net/health" | grep -q "OK"; then
|
||||
print_success "Health check passed"
|
||||
else
|
||||
print_warning "Health check failed - checking response..."
|
||||
curl -k -v --max-time 10 "https://blossom.laantungir.net/health" 2>&1 | head -10
|
||||
fi
|
||||
|
||||
print_success "Deployment to $REMOTE_HOST completed!"
|
||||
print_status "Ginxsom should now be available at: https://blossom.laantungir.net"
|
||||
print_status ""
|
||||
print_status "Deployment Summary:"
|
||||
echo " Binary: $BINARY_NAME"
|
||||
echo " Size: $(du -h "$LOCAL_BINARY" | cut -f1)"
|
||||
echo " Type: Fully static MUSL binary"
|
||||
echo " Portability: Works on any Linux distribution"
|
||||
echo " Deployment time: ~10 seconds (vs ~5 minutes for dynamic build)"
|
||||
302
docs/AUTH_RULES_STATUS.md
Normal file
302
docs/AUTH_RULES_STATUS.md
Normal file
@@ -0,0 +1,302 @@
|
||||
# Auth Rules Management System - Current Status
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The auth rules management system is **fully implemented** with a database schema that differs from c-relay. This document outlines the current state and proposes alignment with c-relay's schema.
|
||||
|
||||
## Current Database Schema
|
||||
|
||||
### Ginxsom Schema (Current)
|
||||
```sql
|
||||
CREATE TABLE auth_rules (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
rule_type TEXT NOT NULL, -- 'pubkey_blacklist', 'pubkey_whitelist', etc.
|
||||
rule_target TEXT NOT NULL, -- The pubkey, hash, or MIME type to match
|
||||
operation TEXT NOT NULL DEFAULT '*', -- 'upload', 'delete', 'list', or '*'
|
||||
enabled INTEGER NOT NULL DEFAULT 1, -- 1 = enabled, 0 = disabled
|
||||
priority INTEGER NOT NULL DEFAULT 100,-- Lower number = higher priority
|
||||
description TEXT, -- Human-readable description
|
||||
created_by TEXT, -- Admin pubkey who created the rule
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
|
||||
CHECK (rule_type IN ('pubkey_blacklist', 'pubkey_whitelist',
|
||||
'hash_blacklist', 'mime_blacklist', 'mime_whitelist')),
|
||||
CHECK (operation IN ('upload', 'delete', 'list', '*')),
|
||||
CHECK (enabled IN (0, 1)),
|
||||
CHECK (priority >= 0),
|
||||
UNIQUE(rule_type, rule_target, operation)
|
||||
);
|
||||
```
|
||||
|
||||
### C-Relay Schema (Target)
|
||||
```sql
|
||||
CREATE TABLE auth_rules (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
rule_type TEXT NOT NULL,
|
||||
pattern_type TEXT NOT NULL,
|
||||
pattern_value TEXT NOT NULL,
|
||||
active INTEGER NOT NULL DEFAULT 1,
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
```
|
||||
|
||||
## Schema Differences
|
||||
|
||||
| Field | Ginxsom | C-Relay | Notes |
|
||||
|-------|---------|---------|-------|
|
||||
| `id` | ✅ | ✅ | Same |
|
||||
| `rule_type` | ✅ | ✅ | Same |
|
||||
| `rule_target` | ✅ | ❌ | Ginxsom-specific |
|
||||
| `pattern_type` | ❌ | ✅ | C-relay-specific |
|
||||
| `pattern_value` | ❌ | ✅ | C-relay-specific |
|
||||
| `operation` | ✅ | ❌ | Ginxsom-specific |
|
||||
| `enabled` | ✅ (1/0) | ❌ | Ginxsom uses `enabled` |
|
||||
| `active` | ❌ | ✅ (1/0) | C-relay uses `active` |
|
||||
| `priority` | ✅ | ❌ | Ginxsom-specific |
|
||||
| `description` | ✅ | ❌ | Ginxsom-specific |
|
||||
| `created_by` | ✅ | ❌ | Ginxsom-specific |
|
||||
| `created_at` | ✅ | ✅ | Same |
|
||||
| `updated_at` | ✅ | ✅ | Same |
|
||||
|
||||
## What Has Been Implemented
|
||||
|
||||
### ✅ Database Layer
|
||||
- **Schema Created**: [`auth_rules`](../db/52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a.db) table exists with full schema
|
||||
- **Indexes**: 5 indexes for performance optimization
|
||||
- **Constraints**: CHECK constraints for data validation
|
||||
- **Unique Constraint**: Prevents duplicate rules
|
||||
|
||||
### ✅ Rule Evaluation Engine
|
||||
Location: [`src/request_validator.c:1318-1592`](../src/request_validator.c#L1318-L1592)
|
||||
|
||||
**Implemented Features:**
|
||||
1. **Pubkey Blacklist** (Priority 1) - Lines 1346-1377
|
||||
2. **Hash Blacklist** (Priority 2) - Lines 1382-1420
|
||||
3. **MIME Blacklist** (Priority 3) - Lines 1423-1462
|
||||
4. **Pubkey Whitelist** (Priority 4) - Lines 1464-1491
|
||||
5. **MIME Whitelist** (Priority 5) - Lines 1493-1526
|
||||
6. **Whitelist Default Denial** (Priority 6) - Lines 1528-1591
|
||||
|
||||
**Features:**
|
||||
- ✅ Priority-based rule evaluation
|
||||
- ✅ Wildcard operation matching (`*`)
|
||||
- ✅ MIME type pattern matching (`image/*`)
|
||||
- ✅ Whitelist default-deny behavior
|
||||
- ✅ Detailed violation tracking
|
||||
- ✅ Performance-optimized queries
|
||||
|
||||
### ✅ Admin API Commands
|
||||
Location: [`src/admin_commands.c`](../src/admin_commands.c)
|
||||
|
||||
**Implemented Commands:**
|
||||
- ✅ `config_query` - Query configuration values
|
||||
- ✅ `config_update` - Update configuration
|
||||
- ✅ `stats_query` - Get system statistics (includes auth_rules count)
|
||||
- ✅ `system_status` - System health check
|
||||
- ✅ `blob_list` - List stored blobs
|
||||
- ✅ `storage_stats` - Storage statistics
|
||||
- ✅ `sql_query` - Direct SQL queries (read-only)
|
||||
|
||||
**Note:** The stats_query command already queries auth_rules:
|
||||
```c
|
||||
// Line 390-395
|
||||
sql = "SELECT COUNT(*) FROM auth_rules WHERE enabled = 1";
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK && sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
cJSON_AddNumberToObject(stats, "active_auth_rules", sqlite3_column_int(stmt, 0));
|
||||
}
|
||||
```
|
||||
|
||||
### ❌ Missing Admin API Endpoints
|
||||
|
||||
The following endpoints from [`docs/AUTH_RULES_IMPLEMENTATION_PLAN.md`](../docs/AUTH_RULES_IMPLEMENTATION_PLAN.md) are **NOT implemented**:
|
||||
|
||||
1. **GET /api/rules** - List authentication rules
|
||||
2. **POST /api/rules** - Create new rule
|
||||
3. **PUT /api/rules/:id** - Update existing rule
|
||||
4. **DELETE /api/rules/:id** - Delete rule
|
||||
5. **POST /api/rules/clear-cache** - Clear auth cache
|
||||
6. **GET /api/rules/test** - Test rule evaluation
|
||||
|
||||
### ✅ Configuration System
|
||||
- ✅ `auth_rules_enabled` config flag (checked in [`reload_auth_config()`](../src/request_validator.c#L1049-L1145))
|
||||
- ✅ Cache system with 5-minute TTL
|
||||
- ✅ Environment variable support (`GINX_NO_CACHE`, `GINX_CACHE_TIMEOUT`)
|
||||
|
||||
### ✅ Documentation
|
||||
- ✅ [`docs/AUTH_API.md`](../docs/AUTH_API.md) - Complete authentication flow
|
||||
- ✅ [`docs/AUTH_RULES_IMPLEMENTATION_PLAN.md`](../docs/AUTH_RULES_IMPLEMENTATION_PLAN.md) - Implementation plan
|
||||
- ✅ Flow diagrams and performance metrics
|
||||
|
||||
## Proposed Schema Migration to C-Relay Format
|
||||
|
||||
### Option 1: Minimal Changes (Recommended)
|
||||
Keep Ginxsom's richer schema but rename fields for compatibility:
|
||||
|
||||
```sql
|
||||
ALTER TABLE auth_rules RENAME COLUMN enabled TO active;
|
||||
ALTER TABLE auth_rules ADD COLUMN pattern_type TEXT;
|
||||
ALTER TABLE auth_rules ADD COLUMN pattern_value TEXT;
|
||||
|
||||
-- Populate new fields from existing data
|
||||
UPDATE auth_rules SET
|
||||
pattern_type = CASE
|
||||
WHEN rule_type LIKE '%pubkey%' THEN 'pubkey'
|
||||
WHEN rule_type LIKE '%hash%' THEN 'hash'
|
||||
WHEN rule_type LIKE '%mime%' THEN 'mime'
|
||||
END,
|
||||
pattern_value = rule_target;
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Maintains all Ginxsom features (operation, priority, description)
|
||||
- Adds c-relay compatibility fields
|
||||
- No data loss
|
||||
- Backward compatible
|
||||
|
||||
**Cons:**
|
||||
- Redundant fields (`rule_target` + `pattern_value`)
|
||||
- Larger schema
|
||||
|
||||
### Option 2: Full Migration to C-Relay Schema
|
||||
Drop Ginxsom-specific fields and adopt c-relay schema:
|
||||
|
||||
```sql
|
||||
-- Create new table with c-relay schema
|
||||
CREATE TABLE auth_rules_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
rule_type TEXT NOT NULL,
|
||||
pattern_type TEXT NOT NULL,
|
||||
pattern_value TEXT NOT NULL,
|
||||
active INTEGER NOT NULL DEFAULT 1,
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- Migrate data
|
||||
INSERT INTO auth_rules_new (id, rule_type, pattern_type, pattern_value, active, created_at, updated_at)
|
||||
SELECT
|
||||
id,
|
||||
rule_type,
|
||||
CASE
|
||||
WHEN rule_type LIKE '%pubkey%' THEN 'pubkey'
|
||||
WHEN rule_type LIKE '%hash%' THEN 'hash'
|
||||
WHEN rule_type LIKE '%mime%' THEN 'mime'
|
||||
END as pattern_type,
|
||||
rule_target as pattern_value,
|
||||
enabled as active,
|
||||
created_at,
|
||||
updated_at
|
||||
FROM auth_rules;
|
||||
|
||||
-- Replace old table
|
||||
DROP TABLE auth_rules;
|
||||
ALTER TABLE auth_rules_new RENAME TO auth_rules;
|
||||
```
|
||||
|
||||
**Pros:**
|
||||
- Full c-relay compatibility
|
||||
- Simpler schema
|
||||
- Smaller database
|
||||
|
||||
**Cons:**
|
||||
- **Loss of operation-specific rules** (upload/delete/list)
|
||||
- **Loss of priority system**
|
||||
- **Loss of description and created_by tracking**
|
||||
- **Breaking change** - requires code updates in [`request_validator.c`](../src/request_validator.c)
|
||||
|
||||
## Code Impact Analysis
|
||||
|
||||
### Files Requiring Updates for C-Relay Schema
|
||||
|
||||
1. **[`src/request_validator.c`](../src/request_validator.c)**
|
||||
- Lines 1346-1591: Rule evaluation queries need field name changes
|
||||
- Change `enabled` → `active`
|
||||
- Change `rule_target` → `pattern_value`
|
||||
- Add `pattern_type` to queries if using Option 1
|
||||
|
||||
2. **[`src/admin_commands.c`](../src/admin_commands.c)**
|
||||
- Line 390: Stats query uses `enabled` field
|
||||
- Any future rule management endpoints
|
||||
|
||||
3. **[`docs/AUTH_RULES_IMPLEMENTATION_PLAN.md`](../docs/AUTH_RULES_IMPLEMENTATION_PLAN.md)**
|
||||
- Update schema documentation
|
||||
- Update API endpoint specifications
|
||||
|
||||
## Recommendations
|
||||
|
||||
### For C-Relay Alignment
|
||||
**Use Option 1 (Minimal Changes)** because:
|
||||
1. Preserves Ginxsom's advanced features (operation-specific rules, priority)
|
||||
2. Adds c-relay compatibility without breaking existing functionality
|
||||
3. Minimal code changes required
|
||||
4. No data loss
|
||||
|
||||
### For Admin API Completion
|
||||
Implement the missing endpoints in priority order:
|
||||
1. **POST /api/rules** - Create rules (highest priority)
|
||||
2. **GET /api/rules** - List rules
|
||||
3. **DELETE /api/rules/:id** - Delete rules
|
||||
4. **PUT /api/rules/:id** - Update rules
|
||||
5. **GET /api/rules/test** - Test rules
|
||||
6. **POST /api/rules/clear-cache** - Clear cache
|
||||
|
||||
### Migration Script
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# migrate_auth_rules_to_crelay.sh
|
||||
|
||||
DB_PATH="db/52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a.db"
|
||||
|
||||
sqlite3 "$DB_PATH" <<EOF
|
||||
-- Backup current table
|
||||
CREATE TABLE auth_rules_backup AS SELECT * FROM auth_rules;
|
||||
|
||||
-- Add c-relay compatibility fields
|
||||
ALTER TABLE auth_rules ADD COLUMN pattern_type TEXT;
|
||||
ALTER TABLE auth_rules ADD COLUMN pattern_value TEXT;
|
||||
|
||||
-- Populate new fields
|
||||
UPDATE auth_rules SET
|
||||
pattern_type = CASE
|
||||
WHEN rule_type LIKE '%pubkey%' THEN 'pubkey'
|
||||
WHEN rule_type LIKE '%hash%' THEN 'hash'
|
||||
WHEN rule_type LIKE '%mime%' THEN 'mime'
|
||||
END,
|
||||
pattern_value = rule_target;
|
||||
|
||||
-- Rename enabled to active for c-relay compatibility
|
||||
-- Note: SQLite doesn't support RENAME COLUMN directly in older versions
|
||||
-- So we'll keep both fields for now
|
||||
ALTER TABLE auth_rules ADD COLUMN active INTEGER NOT NULL DEFAULT 1;
|
||||
UPDATE auth_rules SET active = enabled;
|
||||
|
||||
-- Verify migration
|
||||
SELECT COUNT(*) as total_rules FROM auth_rules;
|
||||
SELECT COUNT(*) as rules_with_pattern FROM auth_rules WHERE pattern_type IS NOT NULL;
|
||||
EOF
|
||||
```
|
||||
|
||||
## Summary
|
||||
|
||||
**Current State:**
|
||||
- ✅ Database schema exists and is functional
|
||||
- ✅ Rule evaluation engine fully implemented
|
||||
- ✅ Configuration system working
|
||||
- ✅ Documentation complete
|
||||
- ❌ Admin API endpoints for rule management missing
|
||||
|
||||
**To Align with C-Relay:**
|
||||
- Add `pattern_type` and `pattern_value` fields
|
||||
- Optionally rename `enabled` to `active`
|
||||
- Keep Ginxsom's advanced features (operation, priority, description)
|
||||
- Update queries to use new field names
|
||||
|
||||
**Next Steps:**
|
||||
1. Decide on migration strategy (Option 1 recommended)
|
||||
2. Run migration script
|
||||
3. Update code to use new field names
|
||||
4. Implement missing Admin API endpoints
|
||||
5. Test rule evaluation with new schema
|
||||
388
docs/NEW_DEPLOY_SCRIPT.md
Normal file
388
docs/NEW_DEPLOY_SCRIPT.md
Normal file
@@ -0,0 +1,388 @@
|
||||
# New deploy_lt.sh Script
|
||||
|
||||
This is the complete new deployment script for static binary deployment. Save this as `deploy_lt.sh` in the project root.
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
|
||||
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
|
||||
print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
|
||||
# Parse command line arguments
|
||||
FRESH_INSTALL=false
|
||||
MIGRATE_DATA=true
|
||||
if [[ "$1" == "--fresh" ]]; then
|
||||
FRESH_INSTALL=true
|
||||
MIGRATE_DATA=false
|
||||
elif [[ "$1" == "--no-migrate" ]]; then
|
||||
MIGRATE_DATA=false
|
||||
fi
|
||||
|
||||
# Configuration
|
||||
REMOTE_HOST="laantungir.net"
|
||||
REMOTE_USER="ubuntu"
|
||||
|
||||
# New paths (static binary deployment)
|
||||
REMOTE_BINARY_DIR="/usr/local/bin/ginxsom"
|
||||
REMOTE_BINARY_PATH="$REMOTE_BINARY_DIR/ginxsom-fcgi"
|
||||
REMOTE_DB_DIR="/var/lib/ginxsom"
|
||||
REMOTE_DB_PATH="$REMOTE_DB_DIR/ginxsom.db"
|
||||
REMOTE_BLOB_DIR="/var/www/blobs"
|
||||
REMOTE_SOCKET="/tmp/ginxsom-fcgi.sock"
|
||||
|
||||
# Old paths (for migration)
|
||||
OLD_BINARY_PATH="/home/ubuntu/ginxsom/ginxsom.fcgi"
|
||||
OLD_DB_PATH="/home/ubuntu/ginxsom/db/ginxsom.db"
|
||||
OLD_BLOB_DIR="/var/www/html/blossom"
|
||||
|
||||
# Local paths
|
||||
LOCAL_BINARY="build/ginxsom-fcgi_static_x86_64"
|
||||
|
||||
print_status "=========================================="
|
||||
print_status "Ginxsom Static Binary Deployment"
|
||||
print_status "=========================================="
|
||||
print_status "Target: $REMOTE_HOST"
|
||||
print_status "Binary: $REMOTE_BINARY_PATH"
|
||||
print_status "Database: $REMOTE_DB_PATH"
|
||||
print_status "Blobs: $REMOTE_BLOB_DIR"
|
||||
print_status "Fresh install: $FRESH_INSTALL"
|
||||
print_status "Migrate data: $MIGRATE_DATA"
|
||||
print_status "=========================================="
|
||||
echo ""
|
||||
|
||||
# Step 1: Verify local binary exists
|
||||
print_status "Step 1: Verifying local static binary..."
|
||||
if [[ ! -f "$LOCAL_BINARY" ]]; then
|
||||
print_error "Static binary not found: $LOCAL_BINARY"
|
||||
print_status "Please run: ./build_static.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify it's actually static
|
||||
if ldd "$LOCAL_BINARY" 2>&1 | grep -q "not a dynamic executable\|statically linked"; then
|
||||
print_success "Binary is static"
|
||||
else
|
||||
print_warning "Binary may not be fully static - proceeding anyway"
|
||||
fi
|
||||
|
||||
BINARY_SIZE=$(du -h "$LOCAL_BINARY" | cut -f1)
|
||||
print_success "Found static binary ($BINARY_SIZE)"
|
||||
echo ""
|
||||
|
||||
# Step 2: Upload binary to server
|
||||
print_status "Step 2: Uploading binary to server..."
|
||||
scp "$LOCAL_BINARY" $REMOTE_USER@$REMOTE_HOST:/tmp/ginxsom-fcgi_new || {
|
||||
print_error "Failed to upload binary"
|
||||
exit 1
|
||||
}
|
||||
print_success "Binary uploaded to /tmp/ginxsom-fcgi_new"
|
||||
echo ""
|
||||
|
||||
# Step 3: Setup directories and install binary
|
||||
print_status "Step 3: Setting up directories and installing binary..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
# Create binary directory
|
||||
echo "Creating binary directory..."
|
||||
sudo mkdir -p $REMOTE_BINARY_DIR
|
||||
|
||||
# Create database directory
|
||||
echo "Creating database directory..."
|
||||
sudo mkdir -p $REMOTE_DB_DIR/backups
|
||||
sudo chown www-data:www-data $REMOTE_DB_DIR
|
||||
sudo chmod 755 $REMOTE_DB_DIR
|
||||
|
||||
# Create blob storage directory
|
||||
echo "Creating blob storage directory..."
|
||||
sudo mkdir -p $REMOTE_BLOB_DIR
|
||||
sudo chown www-data:www-data $REMOTE_BLOB_DIR
|
||||
sudo chmod 755 $REMOTE_BLOB_DIR
|
||||
|
||||
echo "Directories created successfully"
|
||||
EOF
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
print_error "Failed to create directories"
|
||||
exit 1
|
||||
fi
|
||||
print_success "Directories created"
|
||||
echo ""
|
||||
|
||||
# Step 4: Migrate data if requested
|
||||
if [ "$MIGRATE_DATA" = true ] && [ "$FRESH_INSTALL" = false ]; then
|
||||
print_status "Step 4: Migrating existing data..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
# Migrate database
|
||||
if [ -f $OLD_DB_PATH ]; then
|
||||
echo "Migrating database from $OLD_DB_PATH..."
|
||||
sudo cp $OLD_DB_PATH $REMOTE_DB_PATH
|
||||
sudo chown www-data:www-data $REMOTE_DB_PATH
|
||||
sudo chmod 644 $REMOTE_DB_PATH
|
||||
echo "Database migrated"
|
||||
elif [ -f $OLD_BLOB_DIR/ginxsom.db ]; then
|
||||
echo "Migrating database from $OLD_BLOB_DIR/ginxsom.db..."
|
||||
sudo cp $OLD_BLOB_DIR/ginxsom.db $REMOTE_DB_PATH
|
||||
sudo chown www-data:www-data $REMOTE_DB_PATH
|
||||
sudo chmod 644 $REMOTE_DB_PATH
|
||||
echo "Database migrated"
|
||||
else
|
||||
echo "No existing database found - will be created on first run"
|
||||
fi
|
||||
|
||||
# Migrate blobs
|
||||
if [ -d $OLD_BLOB_DIR ] && [ "\$(ls -A $OLD_BLOB_DIR 2>/dev/null)" ]; then
|
||||
echo "Migrating blobs from $OLD_BLOB_DIR..."
|
||||
# Copy only blob files (SHA256 hashes with extensions)
|
||||
sudo find $OLD_BLOB_DIR -type f -regextype posix-extended -regex '.*/[a-f0-9]{64}\.[a-z0-9]+' -exec cp {} $REMOTE_BLOB_DIR/ \; 2>/dev/null || true
|
||||
sudo chown -R www-data:www-data $REMOTE_BLOB_DIR
|
||||
BLOB_COUNT=\$(ls -1 $REMOTE_BLOB_DIR | wc -l)
|
||||
echo "Migrated \$BLOB_COUNT blob files"
|
||||
else
|
||||
echo "No existing blobs found"
|
||||
fi
|
||||
EOF
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Data migration completed"
|
||||
else
|
||||
print_warning "Data migration had issues - check manually"
|
||||
fi
|
||||
echo ""
|
||||
elif [ "$FRESH_INSTALL" = true ]; then
|
||||
print_status "Step 4: Fresh install - removing existing data..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
sudo rm -f $REMOTE_DB_PATH
|
||||
sudo rm -rf $REMOTE_BLOB_DIR/*
|
||||
echo "Existing data removed"
|
||||
EOF
|
||||
print_success "Fresh install prepared"
|
||||
echo ""
|
||||
else
|
||||
print_status "Step 4: Skipping data migration (--no-migrate)"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Step 5: Install minimal dependencies
|
||||
print_status "Step 5: Installing minimal dependencies..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
set -e
|
||||
|
||||
# Check if spawn-fcgi is installed
|
||||
if ! command -v spawn-fcgi &> /dev/null; then
|
||||
echo "Installing spawn-fcgi..."
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y spawn-fcgi
|
||||
echo "spawn-fcgi installed"
|
||||
else
|
||||
echo "spawn-fcgi already installed"
|
||||
fi
|
||||
EOF
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Dependencies verified"
|
||||
else
|
||||
print_error "Failed to install dependencies"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 6: Stop existing service and install new binary
|
||||
print_status "Step 6: Stopping existing service and installing new binary..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
# Stop any existing ginxsom processes
|
||||
echo "Stopping existing ginxsom processes..."
|
||||
sudo pkill -f ginxsom-fcgi || true
|
||||
sleep 2
|
||||
|
||||
# Remove old socket
|
||||
sudo rm -f $REMOTE_SOCKET
|
||||
|
||||
# Install new binary
|
||||
echo "Installing new binary..."
|
||||
sudo mv /tmp/ginxsom-fcgi_new $REMOTE_BINARY_PATH
|
||||
sudo chmod +x $REMOTE_BINARY_PATH
|
||||
sudo chown root:root $REMOTE_BINARY_PATH
|
||||
|
||||
echo "Binary installed successfully"
|
||||
EOF
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Binary installed"
|
||||
else
|
||||
print_error "Failed to install binary"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 7: Start ginxsom FastCGI process
|
||||
print_status "Step 7: Starting ginxsom FastCGI process..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||
set -e
|
||||
|
||||
echo "Starting ginxsom FastCGI with configuration:"
|
||||
echo " Binary: $REMOTE_BINARY_PATH"
|
||||
echo " Database: $REMOTE_DB_PATH"
|
||||
echo " Storage: $REMOTE_BLOB_DIR"
|
||||
echo " Socket: $REMOTE_SOCKET"
|
||||
echo ""
|
||||
|
||||
sudo spawn-fcgi \
|
||||
-M 666 \
|
||||
-u www-data \
|
||||
-g www-data \
|
||||
-s $REMOTE_SOCKET \
|
||||
-U www-data \
|
||||
-G www-data \
|
||||
-d $REMOTE_DB_DIR \
|
||||
-- $REMOTE_BINARY_PATH \
|
||||
--db-path $REMOTE_DB_PATH \
|
||||
--storage-dir $REMOTE_BLOB_DIR
|
||||
|
||||
# Give it a moment to start
|
||||
sleep 2
|
||||
|
||||
# Verify process is running
|
||||
if [ -S $REMOTE_SOCKET ]; then
|
||||
echo "FastCGI socket created successfully"
|
||||
ls -la $REMOTE_SOCKET
|
||||
else
|
||||
echo "ERROR: Socket not created"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if process is running
|
||||
if pgrep -f ginxsom-fcgi > /dev/null; then
|
||||
echo "Process is running (PID: \$(pgrep -f ginxsom-fcgi))"
|
||||
else
|
||||
echo "WARNING: Process not found by pgrep (may be normal for FastCGI)"
|
||||
fi
|
||||
EOF
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "FastCGI process started"
|
||||
else
|
||||
print_error "Failed to start FastCGI process"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 8: Test nginx configuration and reload
|
||||
print_status "Step 8: Testing and reloading nginx..."
|
||||
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||
# Test nginx configuration
|
||||
if sudo nginx -t 2>&1; then
|
||||
echo "Nginx configuration test passed"
|
||||
sudo nginx -s reload
|
||||
echo "Nginx reloaded successfully"
|
||||
else
|
||||
echo "WARNING: Nginx configuration test failed"
|
||||
echo "You may need to update nginx configuration manually"
|
||||
echo "See docs/STATIC_DEPLOYMENT_PLAN.md for details"
|
||||
fi
|
||||
EOF
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
print_success "Nginx reloaded"
|
||||
else
|
||||
print_warning "Nginx reload had issues - check configuration"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Step 9: Test deployment
|
||||
print_status "Step 9: Testing deployment..."
|
||||
echo ""
|
||||
|
||||
# Wait a moment for service to fully start
|
||||
sleep 2
|
||||
|
||||
# Test health endpoint
|
||||
echo "Testing health endpoint..."
|
||||
if curl -k -s --max-time 10 "https://blossom.laantungir.net/health" | grep -q "OK"; then
|
||||
print_success "✓ Health check passed"
|
||||
else
|
||||
print_warning "✗ Health check failed - checking response..."
|
||||
curl -k -v --max-time 10 "https://blossom.laantungir.net/health" 2>&1 | head -10
|
||||
fi
|
||||
|
||||
# Test root endpoint
|
||||
echo ""
|
||||
echo "Testing root endpoint..."
|
||||
if curl -k -s --max-time 10 "https://blossom.laantungir.net/" | grep -q "Ginxsom"; then
|
||||
print_success "✓ Root endpoint responding"
|
||||
else
|
||||
print_warning "✗ Root endpoint not responding as expected"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_status "=========================================="
|
||||
print_success "Deployment completed!"
|
||||
print_status "=========================================="
|
||||
echo ""
|
||||
print_status "Service Information:"
|
||||
echo " URL: https://blossom.laantungir.net"
|
||||
echo " Binary: $REMOTE_BINARY_PATH"
|
||||
echo " Database: $REMOTE_DB_PATH"
|
||||
echo " Blobs: $REMOTE_BLOB_DIR"
|
||||
echo " Socket: $REMOTE_SOCKET"
|
||||
echo ""
|
||||
print_status "Test Commands:"
|
||||
echo " Health: curl -k https://blossom.laantungir.net/health"
|
||||
echo " Info: curl -k https://blossom.laantungir.net/"
|
||||
echo " Upload: ./tests/file_put_bud02.sh"
|
||||
echo ""
|
||||
print_status "Server Commands:"
|
||||
echo " Check status: ssh $REMOTE_USER@$REMOTE_HOST 'ps aux | grep ginxsom-fcgi'"
|
||||
echo " View logs: ssh $REMOTE_USER@$REMOTE_HOST 'sudo journalctl -f | grep ginxsom'"
|
||||
echo " Restart: ssh $REMOTE_USER@$REMOTE_HOST 'sudo pkill ginxsom-fcgi && sudo spawn-fcgi ...'"
|
||||
echo ""
|
||||
|
||||
if [ "$FRESH_INSTALL" = true ]; then
|
||||
print_warning "Fresh install completed - database and blobs have been reset"
|
||||
fi
|
||||
|
||||
if [ "$MIGRATE_DATA" = true ] && [ "$FRESH_INSTALL" = false ]; then
|
||||
print_status "Data migration completed - verify blob count and database"
|
||||
echo " Check blobs: ssh $REMOTE_USER@$REMOTE_HOST 'ls -la $REMOTE_BLOB_DIR | wc -l'"
|
||||
echo " Check DB: ssh $REMOTE_USER@$REMOTE_HOST 'sudo -u www-data sqlite3 $REMOTE_DB_PATH \"SELECT COUNT(*) FROM blobs;\"'"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
print_status "For nginx configuration updates, see: docs/STATIC_DEPLOYMENT_PLAN.md"
|
||||
print_status "=========================================="
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Normal deployment with data migration
|
||||
./deploy_lt.sh
|
||||
|
||||
# Fresh install (removes all data)
|
||||
./deploy_lt.sh --fresh
|
||||
|
||||
# Deploy without migrating data
|
||||
./deploy_lt.sh --no-migrate
|
||||
```
|
||||
|
||||
## Key Changes from Old Script
|
||||
|
||||
1. **No remote compilation** - uploads pre-built static binary
|
||||
2. **New directory structure** - follows FHS standards
|
||||
3. **Minimal dependencies** - only spawn-fcgi needed
|
||||
4. **Data migration** - automatically migrates from old locations
|
||||
5. **Simplified process** - ~30 seconds vs ~5-10 minutes
|
||||
478
docs/NGINX_CONFIG_UPDATES.md
Normal file
478
docs/NGINX_CONFIG_UPDATES.md
Normal file
@@ -0,0 +1,478 @@
|
||||
# Nginx Configuration Updates for Static Binary Deployment
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the required nginx configuration changes to support the new static binary deployment with updated directory paths.
|
||||
|
||||
## Changes Required
|
||||
|
||||
### 1. Blob Storage Root Directory
|
||||
|
||||
**Change from:**
|
||||
```nginx
|
||||
root /var/www/html/blossom;
|
||||
```
|
||||
|
||||
**Change to:**
|
||||
```nginx
|
||||
root /var/www/blobs;
|
||||
```
|
||||
|
||||
### 2. FastCGI Script Filename
|
||||
|
||||
**Change from:**
|
||||
```nginx
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
```
|
||||
|
||||
**Change to:**
|
||||
```nginx
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
```
|
||||
|
||||
## Complete Updated Configuration
|
||||
|
||||
Save this as `/etc/nginx/conf.d/default.conf` on the server (or update the existing file):
|
||||
|
||||
```nginx
|
||||
# FastCGI upstream configuration
|
||||
upstream ginxsom_backend {
|
||||
server unix:/tmp/ginxsom-fcgi.sock;
|
||||
}
|
||||
|
||||
# Main domains
|
||||
server {
|
||||
if ($host = laantungir.net) {
|
||||
return 301 https://$host$request_uri;
|
||||
} # managed by Certbot
|
||||
|
||||
listen 80;
|
||||
server_name laantungir.com www.laantungir.com laantungir.net www.laantungir.net laantungir.org www.laantungir.org;
|
||||
|
||||
root /var/www/html;
|
||||
index index.html index.htm;
|
||||
# CORS for Nostr NIP-05 verification
|
||||
add_header Access-Control-Allow-Origin * always;
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS" always;
|
||||
add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range" always;
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/certbot;
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /var/www/html;
|
||||
}
|
||||
}
|
||||
|
||||
# Main domains HTTPS - using the main certificate
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name laantungir.com www.laantungir.com laantungir.net www.laantungir.net laantungir.org www.laantungir.org;
|
||||
ssl_certificate /etc/letsencrypt/live/laantungir.net/fullchain.pem; # managed by Certbot
|
||||
ssl_certificate_key /etc/letsencrypt/live/laantungir.net/privkey.pem; # managed by Certbot
|
||||
|
||||
root /var/www/html;
|
||||
index index.html index.htm;
|
||||
# CORS for Nostr NIP-05 verification
|
||||
add_header Access-Control-Allow-Origin * always;
|
||||
add_header Access-Control-Allow-Methods "GET, OPTIONS" always;
|
||||
add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range" always;
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root /var/www/html;
|
||||
}
|
||||
}
|
||||
|
||||
# Blossom subdomains HTTP - redirect to HTTPS (keep for ACME)
|
||||
server {
|
||||
listen 80;
|
||||
server_name blossom.laantungir.net;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/certbot;
|
||||
}
|
||||
|
||||
location / {
|
||||
return 301 https://$server_name$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
# Blossom subdomains HTTPS - ginxsom FastCGI
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name blossom.laantungir.net;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
|
||||
|
||||
# Security headers
|
||||
add_header X-Content-Type-Options nosniff always;
|
||||
add_header X-Frame-Options DENY always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
|
||||
# CORS for Blossom protocol
|
||||
add_header Access-Control-Allow-Origin * always;
|
||||
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, HEAD, OPTIONS, PATCH" always;
|
||||
add_header Access-Control-Allow-Headers "Authorization, Content-Type, Content-Length, Accept, Origin, User-Agent, DNT, Cache-Control, X-Mx-ReqToken, Keep-Alive, X-Requested-With, If-Modified-Since, *" always;
|
||||
add_header Access-Control-Max-Age 86400 always;
|
||||
|
||||
# UPDATED: Root directory for blob storage
|
||||
root /var/www/blobs;
|
||||
|
||||
# Maximum upload size
|
||||
client_max_body_size 100M;
|
||||
|
||||
# OPTIONS preflight handler
|
||||
if ($request_method = OPTIONS) {
|
||||
return 204;
|
||||
}
|
||||
|
||||
# PUT /upload - File uploads
|
||||
location = /upload {
|
||||
if ($request_method !~ ^(PUT|HEAD)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# GET /list/<pubkey> - List user blobs
|
||||
location ~ "^/list/([a-f0-9]{64})$" {
|
||||
if ($request_method !~ ^(GET)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# PUT /mirror - Mirror content
|
||||
location = /mirror {
|
||||
if ($request_method !~ ^(PUT)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# PUT /report - Report content
|
||||
location = /report {
|
||||
if ($request_method !~ ^(PUT)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# GET /auth - NIP-42 challenges
|
||||
location = /auth {
|
||||
if ($request_method !~ ^(GET)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# Admin API
|
||||
location /api/ {
|
||||
if ($request_method !~ ^(GET|PUT)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# Blob serving - SHA256 patterns
|
||||
location ~ "^/([a-f0-9]{64})(\.[a-zA-Z0-9]+)?$" {
|
||||
# Handle DELETE via rewrite
|
||||
if ($request_method = DELETE) {
|
||||
rewrite ^/(.*)$ /fcgi-delete/$1 last;
|
||||
}
|
||||
|
||||
# Route HEAD to FastCGI
|
||||
if ($request_method = HEAD) {
|
||||
rewrite ^/(.*)$ /fcgi-head/$1 last;
|
||||
}
|
||||
|
||||
# GET requests - serve files directly
|
||||
if ($request_method != GET) {
|
||||
return 405;
|
||||
}
|
||||
|
||||
try_files /$1.txt /$1.jpg /$1.jpeg /$1.png /$1.webp /$1.gif /$1.pdf /$1.mp4 /$1.mp3 /$1.md =404;
|
||||
|
||||
# Cache headers
|
||||
add_header Cache-Control "public, max-age=31536000, immutable";
|
||||
}
|
||||
|
||||
# Internal FastCGI handlers
|
||||
location ~ "^/fcgi-delete/([a-f0-9]{64}).*$" {
|
||||
internal;
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
fastcgi_param REQUEST_URI /$1;
|
||||
}
|
||||
|
||||
location ~ "^/fcgi-head/([a-f0-9]{64}).*$" {
|
||||
internal;
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
fastcgi_param REQUEST_URI /$1;
|
||||
}
|
||||
|
||||
# Health check
|
||||
location /health {
|
||||
access_log off;
|
||||
return 200 "OK\n";
|
||||
add_header Content-Type text/plain;
|
||||
add_header Access-Control-Allow-Origin * always;
|
||||
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, HEAD, OPTIONS, PATCH" always;
|
||||
add_header Access-Control-Allow-Headers "Authorization, Content-Type, Content-Length, Accept, Origin, User-Agent, DNT, Cache-Control, X-Mx-ReqToken, Keep-Alive, X-Requested-With, If-Modified-Since, *" always;
|
||||
add_header Access-Control-Max-Age 86400 always;
|
||||
}
|
||||
|
||||
# Default location - Server info from FastCGI
|
||||
location / {
|
||||
if ($request_method !~ ^(GET)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
# UPDATED: Direct path to binary
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name relay.laantungir.com relay.laantungir.net relay.laantungir.org;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/certbot;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8888;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
|
||||
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 86400s;
|
||||
proxy_connect_timeout 60s;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
gzip off;
|
||||
}
|
||||
}
|
||||
|
||||
# Relay HTTPS - proxy to c-relay
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name relay.laantungir.com relay.laantungir.net relay.laantungir.org;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:8888;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
|
||||
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 86400s;
|
||||
proxy_connect_timeout 60s;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
gzip off;
|
||||
}
|
||||
}
|
||||
|
||||
# Git subdomains HTTP - redirect to HTTPS
|
||||
server {
|
||||
listen 80;
|
||||
server_name git.laantungir.com git.laantungir.net git.laantungir.org;
|
||||
|
||||
# Allow larger file uploads for Git releases
|
||||
client_max_body_size 50M;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/certbot;
|
||||
}
|
||||
|
||||
location / {
|
||||
return 301 https://$server_name$request_uri;
|
||||
}
|
||||
}
|
||||
|
||||
# Auth subdomains HTTP - redirect to HTTPS
|
||||
server {
|
||||
listen 80;
|
||||
server_name auth.laantungir.com auth.laantungir.net auth.laantungir.org;
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
root /var/www/certbot;
|
||||
}
|
||||
|
||||
location / {
|
||||
}
|
||||
}
|
||||
|
||||
# Git subdomains HTTPS - proxy to gitea
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name git.laantungir.com git.laantungir.net git.laantungir.org;
|
||||
|
||||
# Allow larger file uploads for Git releases
|
||||
client_max_body_size 50M;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 86400s;
|
||||
proxy_connect_timeout 60s;
|
||||
gzip off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
|
||||
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
}
|
||||
}
|
||||
|
||||
# Auth subdomains HTTPS - proxy to nostr-auth
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name auth.laantungir.com auth.laantungir.net auth.laantungir.org;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3001;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 86400s;
|
||||
proxy_connect_timeout 60s;
|
||||
gzip off;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
|
||||
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_cache_bypass $http_upgrade;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Manual Update Steps
|
||||
|
||||
If you prefer to update the existing configuration manually:
|
||||
|
||||
```bash
|
||||
# 1. Backup current configuration
|
||||
ssh ubuntu@laantungir.net
|
||||
sudo cp /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/default.conf.backup
|
||||
|
||||
# 2. Edit the configuration
|
||||
sudo nano /etc/nginx/conf.d/default.conf
|
||||
|
||||
# 3. Find and replace (in the blossom server block):
|
||||
# - Change: root /var/www/html/blossom;
|
||||
# - To: root /var/www/blobs;
|
||||
|
||||
# 4. Find and replace (all FastCGI locations):
|
||||
# - Change: fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
# - To: fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
|
||||
# 5. Test configuration
|
||||
sudo nginx -t
|
||||
|
||||
# 6. If test passes, reload nginx
|
||||
sudo nginx -s reload
|
||||
|
||||
# 7. If test fails, restore backup
|
||||
sudo cp /etc/nginx/conf.d/default.conf.backup /etc/nginx/conf.d/default.conf
|
||||
sudo nginx -s reload
|
||||
```
|
||||
|
||||
## Verification
|
||||
|
||||
After updating the configuration:
|
||||
|
||||
```bash
|
||||
# Check nginx syntax
|
||||
sudo nginx -t
|
||||
|
||||
# Check if ginxsom is responding
|
||||
curl -k https://blossom.laantungir.net/health
|
||||
|
||||
# Check blob serving (if you have existing blobs)
|
||||
curl -k https://blossom.laantungir.net/<some-sha256-hash>.jpg
|
||||
```
|
||||
|
||||
## Summary of Changes
|
||||
|
||||
| Item | Old Value | New Value |
|
||||
|------|-----------|-----------|
|
||||
| Blob root | `/var/www/html/blossom` | `/var/www/blobs` |
|
||||
| Binary path | `$document_root/ginxsom.fcgi` | `/usr/local/bin/ginxsom/ginxsom-fcgi` |
|
||||
| Binary location | `/home/ubuntu/ginxsom/ginxsom.fcgi` | `/usr/local/bin/ginxsom/ginxsom-fcgi` |
|
||||
|
||||
These changes align with the new static binary deployment architecture and Linux FHS standards.
|
||||
296
docs/STATIC_BUILD.md
Normal file
296
docs/STATIC_BUILD.md
Normal file
@@ -0,0 +1,296 @@
|
||||
# Ginxsom Static MUSL Build Guide
|
||||
|
||||
This guide explains how to build and deploy Ginxsom as a fully static MUSL binary with zero runtime dependencies.
|
||||
|
||||
## Overview
|
||||
|
||||
Ginxsom now supports building as a static MUSL binary using Alpine Linux and Docker. This produces a truly portable binary that works on **any Linux distribution** without requiring any system libraries.
|
||||
|
||||
## Benefits
|
||||
|
||||
| Feature | Static MUSL | Dynamic glibc |
|
||||
|---------|-------------|---------------|
|
||||
| **Portability** | ✓ Any Linux | ✗ Requires matching libs |
|
||||
| **Dependencies** | None | libfcgi, libsqlite3, etc. |
|
||||
| **Deployment** | Copy one file | Build on target |
|
||||
| **Binary Size** | ~7-10 MB | ~2-3 MB + libraries |
|
||||
| **Deployment Time** | ~10 seconds | ~5-10 minutes |
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker installed and running
|
||||
- Internet connection (for first build only)
|
||||
- ~2GB disk space for Docker images
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Build Static Binary
|
||||
|
||||
```bash
|
||||
# Build production binary (optimized, stripped)
|
||||
make static
|
||||
|
||||
# Or build debug binary (with symbols)
|
||||
make static-debug
|
||||
|
||||
# Or use the script directly
|
||||
./build_static.sh
|
||||
./build_static.sh --debug
|
||||
```
|
||||
|
||||
The binary will be created in `build/ginxsom-fcgi_static_x86_64` (or `_arm64` for ARM systems).
|
||||
|
||||
### 2. Verify Binary
|
||||
|
||||
```bash
|
||||
# Check if truly static
|
||||
ldd build/ginxsom-fcgi_static_x86_64
|
||||
# Should output: "not a dynamic executable"
|
||||
|
||||
# Check file info
|
||||
file build/ginxsom-fcgi_static_x86_64
|
||||
# Should show: "statically linked"
|
||||
|
||||
# Check size
|
||||
ls -lh build/ginxsom-fcgi_static_x86_64
|
||||
```
|
||||
|
||||
### 3. Deploy to Server
|
||||
|
||||
```bash
|
||||
# Use the simplified deployment script
|
||||
./deploy_static.sh
|
||||
|
||||
# Or manually copy and start
|
||||
scp build/ginxsom-fcgi_static_x86_64 user@server:/path/to/ginxsom/
|
||||
ssh user@server
|
||||
chmod +x /path/to/ginxsom/ginxsom-fcgi_static_x86_64
|
||||
sudo spawn-fcgi -M 666 -u www-data -g www-data \
|
||||
-s /tmp/ginxsom-fcgi.sock \
|
||||
-- /path/to/ginxsom/ginxsom-fcgi_static_x86_64 \
|
||||
--db-path /path/to/db/ginxsom.db \
|
||||
--storage-dir /var/www/html/blossom
|
||||
```
|
||||
|
||||
## Build Process Details
|
||||
|
||||
### What Happens During Build
|
||||
|
||||
1. **Docker Image Creation** (5-10 minutes first time, cached after):
|
||||
- Uses Alpine Linux 3.19 (native MUSL)
|
||||
- Builds secp256k1 statically
|
||||
- Builds nostr_core_lib with required NIPs
|
||||
- Embeds web interface files
|
||||
- Compiles Ginxsom with full static linking
|
||||
|
||||
2. **Binary Extraction**:
|
||||
- Extracts binary from Docker container
|
||||
- Verifies static linking
|
||||
- Makes executable
|
||||
|
||||
3. **Verification**:
|
||||
- Checks for dynamic dependencies
|
||||
- Reports file size
|
||||
- Tests execution
|
||||
|
||||
### Docker Layers (Cached)
|
||||
|
||||
The Dockerfile uses multi-stage builds with caching:
|
||||
|
||||
```
|
||||
Layer 1: Alpine base + dependencies (cached)
|
||||
Layer 2: Build secp256k1 (cached)
|
||||
Layer 3: Initialize git submodules (cached unless .gitmodules changes)
|
||||
Layer 4: Build nostr_core_lib (cached unless nostr_core_lib changes)
|
||||
Layer 5: Embed web files (cached unless api/ changes)
|
||||
Layer 6: Build Ginxsom (rebuilds when src/ changes)
|
||||
```
|
||||
|
||||
This means subsequent builds are **much faster** (~1-2 minutes) since only changed layers rebuild.
|
||||
|
||||
## Deployment Comparison
|
||||
|
||||
### Old Dynamic Build Deployment
|
||||
|
||||
```bash
|
||||
# 1. Sync entire project (30 seconds)
|
||||
rsync -avz . user@server:/path/
|
||||
|
||||
# 2. Build on remote server (5-10 minutes)
|
||||
ssh user@server "cd /path && make clean && make"
|
||||
|
||||
# 3. Restart service (10 seconds)
|
||||
ssh user@server "sudo systemctl restart ginxsom"
|
||||
|
||||
# Total: ~6-11 minutes
|
||||
```
|
||||
|
||||
### New Static Build Deployment
|
||||
|
||||
```bash
|
||||
# 1. Build locally once (5-10 minutes first time, cached after)
|
||||
make static
|
||||
|
||||
# 2. Copy binary (10 seconds)
|
||||
scp build/ginxsom-fcgi_static_x86_64 user@server:/path/
|
||||
|
||||
# 3. Restart service (10 seconds)
|
||||
ssh user@server "sudo systemctl restart ginxsom"
|
||||
|
||||
# Total: ~20 seconds (after first build)
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
### Automatic Cleanup
|
||||
|
||||
The static build script automatically cleans up old dynamic build artifacts (`.o` files and `ginxsom-fcgi` binary) after successfully building the static binary. This keeps your `build/` directory clean.
|
||||
|
||||
### Manual Cleanup
|
||||
|
||||
```bash
|
||||
# Clean dynamic build artifacts (preserves static binaries)
|
||||
make clean
|
||||
|
||||
# Clean everything including static binaries
|
||||
make clean-all
|
||||
|
||||
# Or manually remove specific files
|
||||
rm -f build/*.o
|
||||
rm -f build/ginxsom-fcgi
|
||||
rm -f build/ginxsom-fcgi_static_*
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Docker Not Found
|
||||
|
||||
```bash
|
||||
# Install Docker
|
||||
sudo apt install docker.io
|
||||
|
||||
# Add user to docker group
|
||||
sudo usermod -aG docker $USER
|
||||
newgrp docker
|
||||
```
|
||||
|
||||
### Build Fails
|
||||
|
||||
```bash
|
||||
# Clean Docker cache and rebuild
|
||||
docker system prune -a
|
||||
make static
|
||||
```
|
||||
|
||||
### Binary Won't Run on Target
|
||||
|
||||
```bash
|
||||
# Verify it's static
|
||||
ldd build/ginxsom-fcgi_static_x86_64
|
||||
|
||||
# Check architecture matches
|
||||
file build/ginxsom-fcgi_static_x86_64
|
||||
uname -m # On target system
|
||||
```
|
||||
|
||||
### Alpine Package Not Found
|
||||
|
||||
If you get errors about missing Alpine packages, the package name may have changed. Check Alpine's package database:
|
||||
- https://pkgs.alpinelinux.org/packages
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Cross-Compilation
|
||||
|
||||
Build for different architectures:
|
||||
|
||||
```bash
|
||||
# Build for ARM64 on x86_64 machine
|
||||
docker build --platform linux/arm64 -f Dockerfile.alpine-musl -t ginxsom-arm64 .
|
||||
```
|
||||
|
||||
### Custom NIPs
|
||||
|
||||
Edit `Dockerfile.alpine-musl` line 66 to change which NIPs are included:
|
||||
|
||||
```dockerfile
|
||||
./build.sh --nips=1,6,19 # Minimal
|
||||
./build.sh --nips=1,6,13,17,19,44,59 # Full (default)
|
||||
```
|
||||
|
||||
### Debug Build
|
||||
|
||||
```bash
|
||||
# Build with debug symbols (no optimization)
|
||||
make static-debug
|
||||
|
||||
# Binary will be larger but include debugging info
|
||||
gdb build/ginxsom-fcgi_static_x86_64
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
ginxsom/
|
||||
├── Dockerfile.alpine-musl # Alpine Docker build definition
|
||||
├── build_static.sh # Build script wrapper
|
||||
├── deploy_static.sh # Simplified deployment script
|
||||
├── Makefile # Updated with 'static' target
|
||||
└── build/
|
||||
└── ginxsom-fcgi_static_x86_64 # Output binary
|
||||
```
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
### GitHub Actions Example
|
||||
|
||||
```yaml
|
||||
name: Build Static Binary
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Build static binary
|
||||
run: make static
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ginxsom-static
|
||||
path: build/ginxsom-fcgi_static_x86_64
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
Static MUSL binaries have minimal performance impact:
|
||||
|
||||
| Metric | Static MUSL | Dynamic glibc |
|
||||
|--------|-------------|---------------|
|
||||
| Startup Time | ~50ms | ~40ms |
|
||||
| Memory Usage | Similar | Similar |
|
||||
| Request Latency | Identical | Identical |
|
||||
| Binary Size | 7-10 MB | 2-3 MB + libs |
|
||||
|
||||
The slight startup delay is negligible for a long-running FastCGI process.
|
||||
|
||||
## References
|
||||
|
||||
- [MUSL libc](https://musl.libc.org/)
|
||||
- [Alpine Linux](https://alpinelinux.org/)
|
||||
- [Static Linking Best Practices](https://www.musl-libc.org/faq.html)
|
||||
- [c-relay Static Build](../c-relay/STATIC_BUILD.md)
|
||||
|
||||
## Support
|
||||
|
||||
For issues with static builds:
|
||||
1. Check Docker is running: `docker info`
|
||||
2. Verify submodules: `git submodule status`
|
||||
3. Clean and rebuild: `docker system prune -a && make static`
|
||||
4. Check logs in Docker build output
|
||||
383
docs/STATIC_DEPLOYMENT_PLAN.md
Normal file
383
docs/STATIC_DEPLOYMENT_PLAN.md
Normal file
@@ -0,0 +1,383 @@
|
||||
# Static MUSL Binary Deployment Plan
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines the deployment architecture for ginxsom using static MUSL binaries. The new approach eliminates remote compilation and simplifies deployment to a single binary upload.
|
||||
|
||||
## Architecture Changes
|
||||
|
||||
### Current Deployment (Old)
|
||||
```
|
||||
Local Machine:
|
||||
- Build dynamic binary with make
|
||||
- Upload entire project via rsync
|
||||
- Remote server compiles from source
|
||||
- Install dependencies (libsqlite3-dev, libfcgi-dev, etc.)
|
||||
- Build nostr_core_lib submodules remotely
|
||||
- Binary location: /home/ubuntu/ginxsom/ginxsom.fcgi
|
||||
- Database: /home/ubuntu/ginxsom/db/ginxsom.db
|
||||
- Blobs: /var/www/html/blossom/
|
||||
```
|
||||
|
||||
### New Deployment (Static MUSL)
|
||||
```
|
||||
Local Machine:
|
||||
- Build static MUSL binary with Docker (build_static.sh)
|
||||
- Upload only the binary (no source code needed)
|
||||
- No remote compilation required
|
||||
- Minimal dependencies (only spawn-fcgi)
|
||||
- Binary location: /usr/local/bin/ginxsom/ginxsom-fcgi
|
||||
- Database: /var/lib/ginxsom/ginxsom.db
|
||||
- Blobs: /var/www/blobs/
|
||||
```
|
||||
|
||||
## Directory Structure
|
||||
|
||||
### Production Server Layout
|
||||
```
|
||||
/usr/local/bin/ginxsom/
|
||||
├── ginxsom-fcgi # Static binary (executable)
|
||||
└── README.md # Version info and deployment notes
|
||||
|
||||
/var/lib/ginxsom/
|
||||
├── ginxsom.db # SQLite database
|
||||
└── backups/ # Database backups
|
||||
|
||||
/var/www/blobs/
|
||||
├── <sha256>.jpg # Blob files
|
||||
├── <sha256>.png
|
||||
└── ...
|
||||
|
||||
/tmp/
|
||||
└── ginxsom-fcgi.sock # FastCGI socket
|
||||
```
|
||||
|
||||
## Deployment Process
|
||||
|
||||
### Phase 1: Build Static Binary (Local)
|
||||
```bash
|
||||
# Build the static binary
|
||||
./build_static.sh
|
||||
|
||||
# Output: build/ginxsom-fcgi_static_x86_64
|
||||
# Size: ~7-10 MB
|
||||
# Dependencies: NONE (fully static)
|
||||
```
|
||||
|
||||
### Phase 2: Upload Binary
|
||||
```bash
|
||||
# Upload to server
|
||||
scp build/ginxsom-fcgi_static_x86_64 ubuntu@laantungir.net:/tmp/
|
||||
|
||||
# Install to /usr/local/bin/ginxsom/
|
||||
ssh ubuntu@laantungir.net << 'EOF'
|
||||
sudo mkdir -p /usr/local/bin/ginxsom
|
||||
sudo mv /tmp/ginxsom-fcgi_static_x86_64 /usr/local/bin/ginxsom/ginxsom-fcgi
|
||||
sudo chmod +x /usr/local/bin/ginxsom/ginxsom-fcgi
|
||||
sudo chown root:root /usr/local/bin/ginxsom/ginxsom-fcgi
|
||||
EOF
|
||||
```
|
||||
|
||||
### Phase 3: Setup Data Directories
|
||||
```bash
|
||||
ssh ubuntu@laantungir.net << 'EOF'
|
||||
# Create database directory
|
||||
sudo mkdir -p /var/lib/ginxsom/backups
|
||||
sudo chown www-data:www-data /var/lib/ginxsom
|
||||
sudo chmod 755 /var/lib/ginxsom
|
||||
|
||||
# Create blob storage directory
|
||||
sudo mkdir -p /var/www/blobs
|
||||
sudo chown www-data:www-data /var/www/blobs
|
||||
sudo chmod 755 /var/www/blobs
|
||||
|
||||
# Migrate existing data if needed
|
||||
if [ -f /var/www/html/blossom/ginxsom.db ]; then
|
||||
sudo cp /var/www/html/blossom/ginxsom.db /var/lib/ginxsom/
|
||||
sudo chown www-data:www-data /var/lib/ginxsom/ginxsom.db
|
||||
fi
|
||||
|
||||
if [ -d /var/www/html/blossom ]; then
|
||||
sudo cp -r /var/www/html/blossom/* /var/www/blobs/ 2>/dev/null || true
|
||||
sudo chown -R www-data:www-data /var/www/blobs
|
||||
fi
|
||||
EOF
|
||||
```
|
||||
|
||||
### Phase 4: Install Minimal Dependencies
|
||||
```bash
|
||||
ssh ubuntu@laantungir.net << 'EOF'
|
||||
# Only spawn-fcgi is needed (no build tools!)
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y spawn-fcgi
|
||||
EOF
|
||||
```
|
||||
|
||||
### Phase 5: Start Service
|
||||
```bash
|
||||
ssh ubuntu@laantungir.net << 'EOF'
|
||||
# Stop existing process
|
||||
sudo pkill -f ginxsom-fcgi || true
|
||||
sudo rm -f /tmp/ginxsom-fcgi.sock
|
||||
|
||||
# Start with spawn-fcgi
|
||||
sudo spawn-fcgi \
|
||||
-M 666 \
|
||||
-u www-data \
|
||||
-g www-data \
|
||||
-s /tmp/ginxsom-fcgi.sock \
|
||||
-U www-data \
|
||||
-G www-data \
|
||||
-d /var/lib/ginxsom \
|
||||
-- /usr/local/bin/ginxsom/ginxsom-fcgi \
|
||||
--db-path /var/lib/ginxsom/ginxsom.db \
|
||||
--storage-dir /var/www/blobs
|
||||
EOF
|
||||
```
|
||||
|
||||
## Nginx Configuration Updates
|
||||
|
||||
### Required Changes to `/etc/nginx/conf.d/default.conf`
|
||||
|
||||
```nginx
|
||||
# Blossom subdomains HTTPS - ginxsom FastCGI
|
||||
server {
|
||||
listen 443 ssl;
|
||||
server_name blossom.laantungir.net;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
|
||||
|
||||
# Security headers
|
||||
add_header X-Content-Type-Options nosniff always;
|
||||
add_header X-Frame-Options DENY always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
|
||||
# CORS for Blossom protocol
|
||||
add_header Access-Control-Allow-Origin * always;
|
||||
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, HEAD, OPTIONS, PATCH" always;
|
||||
add_header Access-Control-Allow-Headers "Authorization, Content-Type, Content-Length, Accept, Origin, User-Agent, DNT, Cache-Control, X-Mx-ReqToken, Keep-Alive, X-Requested-With, If-Modified-Since, *" always;
|
||||
add_header Access-Control-Max-Age 86400 always;
|
||||
|
||||
# CHANGED: Root directory for blob storage
|
||||
root /var/www/blobs; # Was: /var/www/html/blossom
|
||||
|
||||
# Maximum upload size
|
||||
client_max_body_size 100M;
|
||||
|
||||
# ... rest of configuration remains the same ...
|
||||
|
||||
# CHANGED: Update SCRIPT_FILENAME references
|
||||
location = /upload {
|
||||
if ($request_method !~ ^(PUT|HEAD)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi; # Was: $document_root/ginxsom.fcgi
|
||||
}
|
||||
|
||||
# Apply same change to all other FastCGI locations...
|
||||
}
|
||||
```
|
||||
|
||||
## Benefits of New Architecture
|
||||
|
||||
### 1. Simplified Deployment
|
||||
- **Before**: Upload source → Install deps → Build submodules → Compile → Deploy
|
||||
- **After**: Upload binary → Start service
|
||||
|
||||
### 2. Reduced Dependencies
|
||||
- **Before**: gcc, make, git, libsqlite3-dev, libfcgi-dev, libcurl4-openssl-dev, etc.
|
||||
- **After**: spawn-fcgi only
|
||||
|
||||
### 3. Better Security
|
||||
- No build tools on production server
|
||||
- No source code on production server
|
||||
- Smaller attack surface
|
||||
|
||||
### 4. Faster Deployments
|
||||
- **Before**: ~5-10 minutes (build time)
|
||||
- **After**: ~30 seconds (upload + restart)
|
||||
|
||||
### 5. Consistent Binaries
|
||||
- Same binary works on any Linux distribution
|
||||
- No "works on my machine" issues
|
||||
- Reproducible builds via Docker
|
||||
|
||||
### 6. Cleaner Organization
|
||||
- Binary in standard location (`/usr/local/bin/`)
|
||||
- Data in standard location (`/var/lib/`)
|
||||
- Blobs separate from web root (`/var/www/blobs/`)
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
### Option 1: In-Place Migration (Recommended)
|
||||
1. Build static binary locally
|
||||
2. Upload to `/tmp/`
|
||||
3. Stop current service
|
||||
4. Create new directories
|
||||
5. Migrate data
|
||||
6. Update nginx config
|
||||
7. Start new service
|
||||
8. Verify functionality
|
||||
9. Clean up old files
|
||||
|
||||
### Option 2: Blue-Green Deployment
|
||||
1. Setup new directories alongside old
|
||||
2. Deploy static binary
|
||||
3. Test on different port
|
||||
4. Switch nginx config
|
||||
5. Remove old deployment
|
||||
|
||||
### Option 3: Fresh Install
|
||||
1. Backup database and blobs
|
||||
2. Remove old installation
|
||||
3. Deploy static binary
|
||||
4. Restore data
|
||||
5. Configure nginx
|
||||
6. Start service
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If issues occur, rollback is simple:
|
||||
|
||||
```bash
|
||||
# Stop new service
|
||||
sudo pkill -f ginxsom-fcgi
|
||||
|
||||
# Restore old binary location
|
||||
sudo spawn-fcgi \
|
||||
-M 666 \
|
||||
-u www-data \
|
||||
-g www-data \
|
||||
-s /tmp/ginxsom-fcgi.sock \
|
||||
-U www-data \
|
||||
-G www-data \
|
||||
-d /home/ubuntu/ginxsom \
|
||||
-- /home/ubuntu/ginxsom/ginxsom.fcgi \
|
||||
--db-path /home/ubuntu/ginxsom/db/ginxsom.db \
|
||||
--storage-dir /var/www/html/blossom
|
||||
|
||||
# Revert nginx config
|
||||
sudo cp /etc/nginx/conf.d/default.conf.backup /etc/nginx/conf.d/default.conf
|
||||
sudo nginx -s reload
|
||||
```
|
||||
|
||||
## SystemD Service (Future Enhancement)
|
||||
|
||||
Create `/etc/systemd/system/ginxsom.service`:
|
||||
|
||||
```ini
|
||||
[Unit]
|
||||
Description=Ginxsom Blossom Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
User=www-data
|
||||
Group=www-data
|
||||
WorkingDirectory=/var/lib/ginxsom
|
||||
ExecStart=/usr/bin/spawn-fcgi \
|
||||
-M 666 \
|
||||
-u www-data \
|
||||
-g www-data \
|
||||
-s /tmp/ginxsom-fcgi.sock \
|
||||
-U www-data \
|
||||
-G www-data \
|
||||
-d /var/lib/ginxsom \
|
||||
-- /usr/local/bin/ginxsom/ginxsom-fcgi \
|
||||
--db-path /var/lib/ginxsom/ginxsom.db \
|
||||
--storage-dir /var/www/blobs
|
||||
ExecStop=/usr/bin/pkill -f ginxsom-fcgi
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Enable and start:
|
||||
```bash
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable ginxsom
|
||||
sudo systemctl start ginxsom
|
||||
sudo systemctl status ginxsom
|
||||
```
|
||||
|
||||
## Verification Steps
|
||||
|
||||
After deployment, verify:
|
||||
|
||||
1. **Binary is static**:
|
||||
```bash
|
||||
ldd /usr/local/bin/ginxsom/ginxsom-fcgi
|
||||
# Should show: "not a dynamic executable"
|
||||
```
|
||||
|
||||
2. **Service is running**:
|
||||
```bash
|
||||
ps aux | grep ginxsom-fcgi
|
||||
ls -la /tmp/ginxsom-fcgi.sock
|
||||
```
|
||||
|
||||
3. **Health endpoint**:
|
||||
```bash
|
||||
curl -k https://blossom.laantungir.net/health
|
||||
# Should return: OK
|
||||
```
|
||||
|
||||
4. **Upload test**:
|
||||
```bash
|
||||
# Use existing test scripts
|
||||
./tests/file_put_bud02.sh
|
||||
```
|
||||
|
||||
5. **Database access**:
|
||||
```bash
|
||||
sudo -u www-data sqlite3 /var/lib/ginxsom/ginxsom.db "SELECT COUNT(*) FROM blobs;"
|
||||
```
|
||||
|
||||
6. **Blob storage**:
|
||||
```bash
|
||||
ls -la /var/www/blobs/ | head
|
||||
```
|
||||
|
||||
## Monitoring
|
||||
|
||||
Key metrics to monitor:
|
||||
|
||||
- Binary size: `du -h /usr/local/bin/ginxsom/ginxsom-fcgi`
|
||||
- Database size: `du -h /var/lib/ginxsom/ginxsom.db`
|
||||
- Blob storage: `du -sh /var/www/blobs/`
|
||||
- Process status: `systemctl status ginxsom` (if using systemd)
|
||||
- Socket status: `ls -la /tmp/ginxsom-fcgi.sock`
|
||||
|
||||
## Backup Strategy
|
||||
|
||||
### Database Backups
|
||||
```bash
|
||||
# Daily backup
|
||||
sudo -u www-data sqlite3 /var/lib/ginxsom/ginxsom.db ".backup /var/lib/ginxsom/backups/ginxsom-$(date +%Y%m%d).db"
|
||||
|
||||
# Keep last 7 days
|
||||
find /var/lib/ginxsom/backups/ -name "ginxsom-*.db" -mtime +7 -delete
|
||||
```
|
||||
|
||||
### Blob Backups
|
||||
```bash
|
||||
# Sync to backup location
|
||||
rsync -av /var/www/blobs/ /backup/ginxsom-blobs/
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The static MUSL binary deployment provides:
|
||||
- ✅ Simpler deployment process
|
||||
- ✅ Fewer dependencies
|
||||
- ✅ Better security
|
||||
- ✅ Faster updates
|
||||
- ✅ Universal compatibility
|
||||
- ✅ Cleaner organization
|
||||
|
||||
This architecture follows Linux FHS (Filesystem Hierarchy Standard) best practices and provides a solid foundation for production deployment.
|
||||
25
ginxsom.service
Normal file
25
ginxsom.service
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=Ginxsom Blossom Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
ExecStartPre=/bin/rm -f /tmp/ginxsom-fcgi.sock
|
||||
ExecStart=/usr/bin/spawn-fcgi \
|
||||
-s /tmp/ginxsom-fcgi.sock \
|
||||
-M 666 \
|
||||
-u www-data \
|
||||
-g www-data \
|
||||
-d /usr/local/bin/ginxsom \
|
||||
-- /usr/local/bin/ginxsom/ginxsom-fcgi \
|
||||
--admin-pubkey 1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139 \
|
||||
--server-privkey 90df3fe61e7d19e50f387e4c5db87eff1a7d2a1037cd55026c4b21a4fda8ecf6 \
|
||||
--db-path /usr/local/bin/ginxsom \
|
||||
--storage-dir /var/www/blobs
|
||||
ExecStop=/usr/bin/pkill -f ginxsom-fcgi
|
||||
ExecStopPost=/bin/rm -f /tmp/ginxsom-fcgi.sock
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -97,7 +97,7 @@ server {
|
||||
add_header Access-Control-Max-Age 86400 always;
|
||||
|
||||
# Root directory for blob storage
|
||||
root /var/www/html/blossom;
|
||||
root /var/www/blobs;
|
||||
|
||||
# Maximum upload size
|
||||
client_max_body_size 100M;
|
||||
@@ -114,7 +114,7 @@ server {
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# GET /list/<pubkey> - List user blobs
|
||||
@@ -124,7 +124,7 @@ server {
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# PUT /mirror - Mirror content
|
||||
@@ -134,7 +134,7 @@ server {
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# PUT /report - Report content
|
||||
@@ -144,7 +144,7 @@ server {
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# GET /auth - NIP-42 challenges
|
||||
@@ -154,17 +154,17 @@ server {
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# Admin API
|
||||
location /api/ {
|
||||
if ($request_method !~ ^(GET|PUT)$) {
|
||||
if ($request_method !~ ^(GET|POST|PUT)$) {
|
||||
return 405;
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
|
||||
# Blob serving - SHA256 patterns
|
||||
@@ -195,7 +195,7 @@ server {
|
||||
internal;
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
fastcgi_param REQUEST_URI /$1;
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ server {
|
||||
internal;
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
fastcgi_param REQUEST_URI /$1;
|
||||
}
|
||||
|
||||
@@ -225,7 +225,7 @@ server {
|
||||
}
|
||||
fastcgi_pass ginxsom_backend;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,22 @@ if [[ $FOLLOW_LOGS -eq 1 ]]; then
|
||||
wait
|
||||
exit 0
|
||||
fi
|
||||
FCGI_BINARY="./build/ginxsom-fcgi"
|
||||
# Detect architecture for static binary name
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64) STATIC_BINARY="./build/ginxsom-fcgi_static_x86_64" ;;
|
||||
aarch64|arm64) STATIC_BINARY="./build/ginxsom-fcgi_static_arm64" ;;
|
||||
*) STATIC_BINARY="./build/ginxsom-fcgi_static_${ARCH}" ;;
|
||||
esac
|
||||
|
||||
# Use static binary if available, fallback to dynamic
|
||||
if [ -f "$STATIC_BINARY" ]; then
|
||||
FCGI_BINARY="$STATIC_BINARY"
|
||||
echo "Using static binary: $FCGI_BINARY"
|
||||
else
|
||||
FCGI_BINARY="./build/ginxsom-fcgi"
|
||||
echo "Static binary not found, using dynamic binary: $FCGI_BINARY"
|
||||
fi
|
||||
SOCKET_PATH="/tmp/ginxsom-fcgi.sock"
|
||||
PID_FILE="/tmp/ginxsom-fcgi.pid"
|
||||
NGINX_CONFIG="config/local-nginx.conf"
|
||||
@@ -173,21 +188,28 @@ fi
|
||||
|
||||
echo -e "${GREEN}FastCGI cleanup complete${NC}"
|
||||
|
||||
# Step 3: Always rebuild FastCGI binary with clean build
|
||||
echo -e "\n${YELLOW}3. Rebuilding FastCGI binary (clean build)...${NC}"
|
||||
echo "Embedding web files..."
|
||||
./scripts/embed_web_files.sh
|
||||
# Step 3: Always rebuild FastCGI binary with static build
|
||||
echo -e "\n${YELLOW}3. Rebuilding FastCGI binary (static build)...${NC}"
|
||||
echo "Cleaning old build artifacts to ensure fresh embedding..."
|
||||
make clean
|
||||
echo "Removing local embedded header to prevent Docker cache issues..."
|
||||
rm -f src/admin_interface_embedded.h
|
||||
echo "Building static binary with Docker..."
|
||||
make static
|
||||
if [ $? -ne 0 ]; then
|
||||
echo -e "${RED}Web file embedding failed! Cannot continue.${NC}"
|
||||
echo -e "${RED}Static build failed! Cannot continue.${NC}"
|
||||
echo -e "${RED}Docker must be available and running for static builds.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Performing clean rebuild to ensure all changes are compiled..."
|
||||
make clean && make
|
||||
if [ $? -ne 0 ]; then
|
||||
echo -e "${RED}Build failed! Cannot continue.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${GREEN}Clean rebuild complete${NC}"
|
||||
|
||||
# Update FCGI_BINARY to use the newly built static binary
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64) FCGI_BINARY="./build/ginxsom-fcgi_static_x86_64" ;;
|
||||
aarch64|arm64) FCGI_BINARY="./build/ginxsom-fcgi_static_arm64" ;;
|
||||
*) FCGI_BINARY="./build/ginxsom-fcgi_static_${ARCH}" ;;
|
||||
esac
|
||||
echo -e "${GREEN}Static build complete: $FCGI_BINARY${NC}"
|
||||
|
||||
# Step 3.5: Clean database directory for fresh testing
|
||||
echo -e "\n${YELLOW}3.5. Cleaning database directory...${NC}"
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <time.h>
|
||||
|
||||
// Forward declare app_log
|
||||
@@ -142,6 +143,19 @@ cJSON* admin_commands_process(cJSON* command_array, const char* request_event_id
|
||||
else if (strcmp(command, "sql_query") == 0) {
|
||||
return admin_cmd_sql_query(command_array);
|
||||
}
|
||||
else if (strcmp(command, "query_view") == 0) {
|
||||
return admin_cmd_query_view(command_array);
|
||||
}
|
||||
// Auth rules management commands (c-relay compatible)
|
||||
else if (strcmp(command, "blacklist") == 0 || strcmp(command, "whitelist") == 0) {
|
||||
return admin_cmd_auth_add_rule(command_array);
|
||||
}
|
||||
else if (strcmp(command, "delete_auth_rule") == 0) {
|
||||
return admin_cmd_auth_delete_rule(command_array);
|
||||
}
|
||||
else if (strcmp(command, "auth_query") == 0) {
|
||||
return admin_cmd_auth_query(command_array);
|
||||
}
|
||||
else {
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Unknown command: %s", command);
|
||||
@@ -167,16 +181,23 @@ cJSON* admin_cmd_config_query(cJSON* args) {
|
||||
return response;
|
||||
}
|
||||
|
||||
// Check if specific keys were requested (args[1] should be array of keys or null for all)
|
||||
// Check if specific keys were requested (args[1] should be array of keys, null, or "all" for all)
|
||||
cJSON* keys_array = NULL;
|
||||
if (cJSON_GetArraySize(args) >= 2) {
|
||||
keys_array = cJSON_GetArrayItem(args, 1);
|
||||
// Accept array, null, or string "all" for querying all configs
|
||||
if (!cJSON_IsArray(keys_array) && !cJSON_IsNull(keys_array)) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Keys parameter must be array or null");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
// Check if it's the string "all"
|
||||
if (cJSON_IsString(keys_array) && strcmp(keys_array->valuestring, "all") == 0) {
|
||||
// Treat "all" as null (query all configs)
|
||||
keys_array = NULL;
|
||||
} else {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Keys parameter must be array, null, or \"all\"");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,18 +280,18 @@ cJSON* admin_cmd_config_update(cJSON* args) {
|
||||
cJSON* response = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(response, "query_type", "config_update");
|
||||
|
||||
// Expected format: ["config_update", {"key1": "value1", "key2": "value2"}]
|
||||
// Expected format: ["config_update", [{key: "x", value: "y", data_type: "z", category: "w"}]]
|
||||
if (cJSON_GetArraySize(args) < 2) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Missing config updates object");
|
||||
cJSON_AddStringToObject(response, "error", "Missing config updates array");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
cJSON* updates = cJSON_GetArrayItem(args, 1);
|
||||
if (!cJSON_IsObject(updates)) {
|
||||
if (!cJSON_IsArray(updates)) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Updates must be an object");
|
||||
cJSON_AddStringToObject(response, "error", "Updates must be an array of config objects");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
@@ -297,50 +318,66 @@ cJSON* admin_cmd_config_update(cJSON* args) {
|
||||
return response;
|
||||
}
|
||||
|
||||
// Process each update
|
||||
cJSON* updated_keys = cJSON_CreateArray();
|
||||
cJSON* failed_keys = cJSON_CreateArray();
|
||||
// Process each update - expecting array of config objects
|
||||
cJSON* data_array = cJSON_CreateArray();
|
||||
int success_count = 0;
|
||||
int fail_count = 0;
|
||||
|
||||
cJSON* item = NULL;
|
||||
cJSON_ArrayForEach(item, updates) {
|
||||
const char* key = item->string;
|
||||
const char* value = cJSON_GetStringValue(item);
|
||||
|
||||
if (!value) {
|
||||
cJSON_AddItemToArray(failed_keys, cJSON_CreateString(key));
|
||||
cJSON* config_obj = NULL;
|
||||
cJSON_ArrayForEach(config_obj, updates) {
|
||||
if (!cJSON_IsObject(config_obj)) {
|
||||
fail_count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
cJSON* key_item = cJSON_GetObjectItem(config_obj, "key");
|
||||
cJSON* value_item = cJSON_GetObjectItem(config_obj, "value");
|
||||
|
||||
if (!cJSON_IsString(key_item) || !cJSON_IsString(value_item)) {
|
||||
fail_count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
const char* key = key_item->valuestring;
|
||||
const char* value = value_item->valuestring;
|
||||
|
||||
sqlite3_reset(stmt);
|
||||
sqlite3_bind_text(stmt, 1, value, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_bind_text(stmt, 2, key, -1, SQLITE_TRANSIENT);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
|
||||
// Create result object for this config update
|
||||
cJSON* result_obj = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(result_obj, "key", key);
|
||||
|
||||
if (rc == SQLITE_DONE && sqlite3_changes(db) > 0) {
|
||||
cJSON_AddItemToArray(updated_keys, cJSON_CreateString(key));
|
||||
cJSON_AddStringToObject(result_obj, "status", "success");
|
||||
cJSON_AddStringToObject(result_obj, "value", value);
|
||||
|
||||
// Add optional fields if present
|
||||
cJSON* data_type_item = cJSON_GetObjectItem(config_obj, "data_type");
|
||||
if (cJSON_IsString(data_type_item)) {
|
||||
cJSON_AddStringToObject(result_obj, "data_type", data_type_item->valuestring);
|
||||
}
|
||||
|
||||
success_count++;
|
||||
app_log(LOG_INFO, "Updated config key: %s", key);
|
||||
app_log(LOG_INFO, "Updated config key: %s = %s", key, value);
|
||||
} else {
|
||||
cJSON_AddItemToArray(failed_keys, cJSON_CreateString(key));
|
||||
cJSON_AddStringToObject(result_obj, "status", "error");
|
||||
cJSON_AddStringToObject(result_obj, "error", "Failed to update");
|
||||
fail_count++;
|
||||
}
|
||||
|
||||
cJSON_AddItemToArray(data_array, result_obj);
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
|
||||
cJSON_AddStringToObject(response, "status", "success");
|
||||
cJSON_AddNumberToObject(response, "updated_count", success_count);
|
||||
cJSON_AddNumberToObject(response, "failed_count", fail_count);
|
||||
cJSON_AddItemToObject(response, "updated_keys", updated_keys);
|
||||
if (fail_count > 0) {
|
||||
cJSON_AddItemToObject(response, "failed_keys", failed_keys);
|
||||
} else {
|
||||
cJSON_Delete(failed_keys);
|
||||
}
|
||||
cJSON_AddStringToObject(response, "status", success_count > 0 ? "success" : "error");
|
||||
cJSON_AddNumberToObject(response, "updates_applied", success_count);
|
||||
cJSON_AddItemToObject(response, "data", data_array);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
|
||||
return response;
|
||||
@@ -387,7 +424,7 @@ cJSON* admin_cmd_stats_query(cJSON* args) {
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Get auth rules count
|
||||
sql = "SELECT COUNT(*) FROM auth_rules WHERE enabled = 1";
|
||||
sql = "SELECT COUNT(*) FROM auth_rules WHERE active = 1";
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK && sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
cJSON_AddNumberToObject(stats, "active_auth_rules", sqlite3_column_int(stmt, 0));
|
||||
@@ -637,7 +674,7 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
|
||||
cJSON* response = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(response, "query_type", "sql_query");
|
||||
|
||||
// Expected format: ["sql_query", "SELECT ..."]
|
||||
// Expected format: ["sql_query", "SQL STATEMENT"]
|
||||
if (cJSON_GetArraySize(args) < 2) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Missing SQL query");
|
||||
@@ -654,20 +691,26 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
|
||||
}
|
||||
|
||||
const char* sql = query_item->valuestring;
|
||||
const char* trimmed_sql = sql;
|
||||
while (*trimmed_sql && isspace((unsigned char)*trimmed_sql)) {
|
||||
trimmed_sql++;
|
||||
}
|
||||
|
||||
// Security: Only allow SELECT queries
|
||||
const char* sql_upper = sql;
|
||||
while (*sql_upper == ' ' || *sql_upper == '\t' || *sql_upper == '\n') sql_upper++;
|
||||
if (strncasecmp(sql_upper, "SELECT", 6) != 0) {
|
||||
int is_select = strncasecmp(trimmed_sql, "SELECT", 6) == 0;
|
||||
int is_delete = strncasecmp(trimmed_sql, "DELETE", 6) == 0;
|
||||
int is_update = strncasecmp(trimmed_sql, "UPDATE", 6) == 0;
|
||||
int is_insert = strncasecmp(trimmed_sql, "INSERT", 6) == 0;
|
||||
|
||||
if (!is_select && !is_delete && !is_update && !is_insert) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Only SELECT queries are allowed");
|
||||
cJSON_AddStringToObject(response, "error", "Only SELECT, INSERT, UPDATE, or DELETE queries are allowed");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
// Open database (read-only for safety)
|
||||
int open_flags = is_select ? SQLITE_OPEN_READONLY : (SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE);
|
||||
sqlite3* db;
|
||||
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, open_flags, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||
@@ -675,7 +718,70 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
|
||||
return response;
|
||||
}
|
||||
|
||||
// Prepare and execute query
|
||||
if (is_select) {
|
||||
sqlite3_stmt* stmt;
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "SQL error: %s", sqlite3_errmsg(db));
|
||||
cJSON_AddStringToObject(response, "error", error_msg);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
int col_count = sqlite3_column_count(stmt);
|
||||
cJSON* columns = cJSON_CreateArray();
|
||||
for (int i = 0; i < col_count; i++) {
|
||||
cJSON_AddItemToArray(columns, cJSON_CreateString(sqlite3_column_name(stmt, i)));
|
||||
}
|
||||
|
||||
cJSON* rows = cJSON_CreateArray();
|
||||
int row_count = 0;
|
||||
const int MAX_ROWS = 1000;
|
||||
while (row_count < MAX_ROWS && (rc = sqlite3_step(stmt)) == SQLITE_ROW) {
|
||||
cJSON* row = cJSON_CreateArray();
|
||||
for (int i = 0; i < col_count; i++) {
|
||||
int col_type = sqlite3_column_type(stmt, i);
|
||||
switch (col_type) {
|
||||
case SQLITE_INTEGER:
|
||||
cJSON_AddItemToArray(row, cJSON_CreateNumber(sqlite3_column_int64(stmt, i)));
|
||||
break;
|
||||
case SQLITE_FLOAT:
|
||||
cJSON_AddItemToArray(row, cJSON_CreateNumber(sqlite3_column_double(stmt, i)));
|
||||
break;
|
||||
case SQLITE_TEXT:
|
||||
cJSON_AddItemToArray(row, cJSON_CreateString((const char*)sqlite3_column_text(stmt, i)));
|
||||
break;
|
||||
case SQLITE_NULL:
|
||||
cJSON_AddItemToArray(row, cJSON_CreateNull());
|
||||
break;
|
||||
default:
|
||||
cJSON_AddItemToArray(row, cJSON_CreateString(""));
|
||||
}
|
||||
}
|
||||
cJSON_AddItemToArray(rows, row);
|
||||
row_count++;
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
|
||||
cJSON_AddStringToObject(response, "status", "success");
|
||||
cJSON_AddItemToObject(response, "columns", columns);
|
||||
cJSON_AddItemToObject(response, "rows", rows);
|
||||
cJSON_AddNumberToObject(response, "row_count", row_count);
|
||||
if (row_count >= MAX_ROWS) {
|
||||
cJSON_AddBoolToObject(response, "truncated", 1);
|
||||
}
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
|
||||
app_log(LOG_INFO, "SQL query executed: %d rows returned", row_count);
|
||||
return response;
|
||||
}
|
||||
|
||||
// Handle DELETE/UPDATE/INSERT
|
||||
sqlite3_stmt* stmt;
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
@@ -688,19 +794,113 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
|
||||
return response;
|
||||
}
|
||||
|
||||
// Get column names
|
||||
rc = sqlite3_step(stmt);
|
||||
if (rc != SQLITE_DONE) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "SQL execution error: %s", sqlite3_errmsg(db));
|
||||
cJSON_AddStringToObject(response, "error", error_msg);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
int affected_rows = sqlite3_changes(db);
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
|
||||
cJSON_AddStringToObject(response, "status", "success");
|
||||
cJSON_AddNumberToObject(response, "affected_rows", affected_rows);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
|
||||
app_log(LOG_INFO, "SQL modification executed: %d rows affected", affected_rows);
|
||||
return response;
|
||||
}
|
||||
|
||||
cJSON* admin_cmd_query_view(cJSON* args) {
|
||||
cJSON* response = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(response, "query_type", "query_view");
|
||||
|
||||
// Expected format: ["query_view", "view_name"]
|
||||
if (cJSON_GetArraySize(args) < 2) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Missing view name");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
cJSON* view_name_item = cJSON_GetArrayItem(args, 1);
|
||||
if (!cJSON_IsString(view_name_item)) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "View name must be a string");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
const char* view_name = view_name_item->valuestring;
|
||||
|
||||
// Open database
|
||||
sqlite3* db;
|
||||
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
// Build SQL query based on view name
|
||||
char sql[512];
|
||||
|
||||
if (strcmp(view_name, "blob_overview") == 0) {
|
||||
// Query blob_overview view
|
||||
snprintf(sql, sizeof(sql), "SELECT * FROM blob_overview");
|
||||
} else if (strcmp(view_name, "storage_stats") == 0) {
|
||||
// Query storage_stats view
|
||||
snprintf(sql, sizeof(sql), "SELECT * FROM storage_stats");
|
||||
} else if (strcmp(view_name, "blob_type_distribution") == 0) {
|
||||
// Query blob_type_distribution view
|
||||
snprintf(sql, sizeof(sql), "SELECT * FROM blob_type_distribution");
|
||||
} else if (strcmp(view_name, "blob_time_stats") == 0) {
|
||||
// Query blob_time_stats view
|
||||
snprintf(sql, sizeof(sql), "SELECT * FROM blob_time_stats");
|
||||
} else if (strcmp(view_name, "top_uploaders") == 0) {
|
||||
// Query top_uploaders view
|
||||
snprintf(sql, sizeof(sql), "SELECT * FROM top_uploaders");
|
||||
} else {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Unknown view: %s", view_name);
|
||||
cJSON_AddStringToObject(response, "error", error_msg);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Failed to prepare query: %s", sqlite3_errmsg(db));
|
||||
cJSON_AddStringToObject(response, "error", error_msg);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
// Execute query and build results
|
||||
int col_count = sqlite3_column_count(stmt);
|
||||
cJSON* columns = cJSON_CreateArray();
|
||||
for (int i = 0; i < col_count; i++) {
|
||||
cJSON_AddItemToArray(columns, cJSON_CreateString(sqlite3_column_name(stmt, i)));
|
||||
}
|
||||
|
||||
// Execute and collect rows (limit to 1000 rows for safety)
|
||||
cJSON* rows = cJSON_CreateArray();
|
||||
int row_count = 0;
|
||||
const int MAX_ROWS = 1000;
|
||||
|
||||
while (row_count < MAX_ROWS && (rc = sqlite3_step(stmt)) == SQLITE_ROW) {
|
||||
while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
|
||||
cJSON* row = cJSON_CreateArray();
|
||||
for (int i = 0; i < col_count; i++) {
|
||||
int col_type = sqlite3_column_type(stmt, i);
|
||||
@@ -729,15 +929,313 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
|
||||
sqlite3_close(db);
|
||||
|
||||
cJSON_AddStringToObject(response, "status", "success");
|
||||
cJSON_AddStringToObject(response, "view_name", view_name);
|
||||
cJSON_AddItemToObject(response, "columns", columns);
|
||||
cJSON_AddItemToObject(response, "rows", rows);
|
||||
cJSON_AddNumberToObject(response, "row_count", row_count);
|
||||
if (row_count >= MAX_ROWS) {
|
||||
cJSON_AddBoolToObject(response, "truncated", 1);
|
||||
}
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
|
||||
app_log(LOG_INFO, "SQL query executed: %d rows returned", row_count);
|
||||
app_log(LOG_INFO, "View query executed: %s (%d rows)", view_name, row_count);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// AUTH RULES MANAGEMENT COMMANDS (c-relay compatible)
|
||||
// ============================================================================
|
||||
|
||||
// Add blacklist or whitelist rule
|
||||
// Format: ["blacklist", "pubkey", "abc123..."] or ["whitelist", "pubkey", "def456..."]
|
||||
cJSON* admin_cmd_auth_add_rule(cJSON* args) {
|
||||
cJSON* response = cJSON_CreateObject();
|
||||
|
||||
// Get command type (blacklist or whitelist)
|
||||
cJSON* cmd_type = cJSON_GetArrayItem(args, 0);
|
||||
if (!cJSON_IsString(cmd_type)) {
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Invalid command type");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
const char* command = cmd_type->valuestring;
|
||||
const char* rule_type_prefix = command; // "blacklist" or "whitelist"
|
||||
|
||||
// Expected format: ["blacklist/whitelist", "pattern_type", "pattern_value"]
|
||||
if (cJSON_GetArraySize(args) < 3) {
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Missing parameters. Format: [\"blacklist/whitelist\", \"pattern_type\", \"pattern_value\"]");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
cJSON* pattern_type_item = cJSON_GetArrayItem(args, 1);
|
||||
cJSON* pattern_value_item = cJSON_GetArrayItem(args, 2);
|
||||
|
||||
if (!cJSON_IsString(pattern_type_item) || !cJSON_IsString(pattern_value_item)) {
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Pattern type and value must be strings");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
const char* pattern_type = pattern_type_item->valuestring;
|
||||
const char* pattern_value = pattern_value_item->valuestring;
|
||||
|
||||
char rule_type[64];
|
||||
snprintf(rule_type, sizeof(rule_type), "%s_%s", rule_type_prefix, pattern_type);
|
||||
|
||||
// Validate pattern_type
|
||||
if (strcmp(pattern_type, "pubkey") != 0 && strcmp(pattern_type, "hash") != 0 && strcmp(pattern_type, "mime") != 0) {
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Invalid pattern_type. Must be 'pubkey', 'hash', or 'mime'");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
// Open database
|
||||
sqlite3* db;
|
||||
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READWRITE, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
// Insert rule
|
||||
const char* sql = "INSERT INTO auth_rules (rule_type, pattern_type, pattern_value) VALUES (?, ?, ?)";
|
||||
sqlite3_stmt* stmt;
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to prepare insert statement");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
sqlite3_bind_text(stmt, 1, rule_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, pattern_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 3, pattern_value, -1, SQLITE_STATIC);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
int rule_id = 0;
|
||||
|
||||
if (rc == SQLITE_DONE) {
|
||||
rule_id = sqlite3_last_insert_rowid(db);
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "success");
|
||||
cJSON_AddNumberToObject(response, "rule_id", rule_id);
|
||||
cJSON_AddStringToObject(response, "rule_type", rule_type);
|
||||
cJSON_AddStringToObject(response, "pattern_type", pattern_type);
|
||||
cJSON_AddStringToObject(response, "pattern_value", pattern_value);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
|
||||
app_log(LOG_INFO, "Added %s rule: %s=%s (ID: %d)", rule_type, pattern_type, pattern_value, rule_id);
|
||||
} else {
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Failed to insert rule: %s", sqlite3_errmsg(db));
|
||||
cJSON_AddStringToObject(response, "error", error_msg);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
// Delete auth rule
|
||||
// Format: ["delete_auth_rule", "blacklist", "pubkey", "abc123..."]
|
||||
cJSON* admin_cmd_auth_delete_rule(cJSON* args) {
|
||||
cJSON* response = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(response, "query_type", "delete_auth_rule");
|
||||
|
||||
// Expected format: ["delete_auth_rule", "rule_type", "pattern_type", "pattern_value"]
|
||||
if (cJSON_GetArraySize(args) < 4) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Missing parameters. Format: [\"delete_auth_rule\", \"blacklist/whitelist\", \"pattern_type\", \"pattern_value\"]");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
cJSON* rule_type_item = cJSON_GetArrayItem(args, 1);
|
||||
cJSON* pattern_type_item = cJSON_GetArrayItem(args, 2);
|
||||
cJSON* pattern_value_item = cJSON_GetArrayItem(args, 3);
|
||||
|
||||
if (!cJSON_IsString(rule_type_item) || !cJSON_IsString(pattern_type_item) || !cJSON_IsString(pattern_value_item)) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "All parameters must be strings");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
const char* rule_type_str = cJSON_GetStringValue(rule_type_item);
|
||||
const char* pattern_type = cJSON_GetStringValue(pattern_type_item);
|
||||
const char* pattern_value = cJSON_GetStringValue(pattern_value_item);
|
||||
|
||||
char full_rule_type[64];
|
||||
snprintf(full_rule_type, sizeof(full_rule_type), "%s_%s", rule_type_str, pattern_type);
|
||||
|
||||
// Open database
|
||||
sqlite3* db;
|
||||
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READWRITE, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
// Delete rule
|
||||
const char* sql = "DELETE FROM auth_rules WHERE rule_type = ? AND pattern_type = ? AND pattern_value = ?";
|
||||
sqlite3_stmt* stmt;
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to prepare delete statement");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
sqlite3_bind_text(stmt, 1, full_rule_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, pattern_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 3, pattern_value, -1, SQLITE_STATIC);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
int changes = sqlite3_changes(db);
|
||||
|
||||
if (rc == SQLITE_DONE) {
|
||||
cJSON_AddStringToObject(response, "status", "success");
|
||||
cJSON_AddNumberToObject(response, "deleted_count", changes);
|
||||
cJSON_AddStringToObject(response, "rule_type", full_rule_type);
|
||||
cJSON_AddStringToObject(response, "pattern_type", pattern_type);
|
||||
cJSON_AddStringToObject(response, "pattern_value", pattern_value);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
|
||||
app_log(LOG_INFO, "Deleted %d %s rule(s): %s=%s", changes, full_rule_type, pattern_type, pattern_value);
|
||||
} else {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to delete rule");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
// Query auth rules
|
||||
// Format: ["auth_query", "all"] or ["auth_query", "whitelist"] or ["auth_query", "pattern", "abc123..."]
|
||||
cJSON* admin_cmd_auth_query(cJSON* args) {
|
||||
cJSON* response = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(response, "query_type", "auth_query");
|
||||
|
||||
// Get query type
|
||||
const char* query_type = "all";
|
||||
const char* filter_value = NULL;
|
||||
|
||||
if (cJSON_GetArraySize(args) >= 2) {
|
||||
cJSON* query_type_item = cJSON_GetArrayItem(args, 1);
|
||||
if (cJSON_IsString(query_type_item)) {
|
||||
query_type = query_type_item->valuestring;
|
||||
}
|
||||
}
|
||||
|
||||
if (cJSON_GetArraySize(args) >= 3) {
|
||||
cJSON* filter_value_item = cJSON_GetArrayItem(args, 2);
|
||||
if (cJSON_IsString(filter_value_item)) {
|
||||
filter_value = filter_value_item->valuestring;
|
||||
}
|
||||
}
|
||||
|
||||
// Open database
|
||||
sqlite3* db;
|
||||
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
return response;
|
||||
}
|
||||
|
||||
// Build SQL query based on query type
|
||||
char sql[512];
|
||||
sqlite3_stmt* stmt;
|
||||
|
||||
if (strcmp(query_type, "all") == 0) {
|
||||
snprintf(sql, sizeof(sql), "SELECT id, rule_type, pattern_type, pattern_value, active, created_at, updated_at FROM auth_rules ORDER BY id");
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
}
|
||||
else if (strcmp(query_type, "blacklist") == 0 || strcmp(query_type, "whitelist") == 0) {
|
||||
snprintf(sql, sizeof(sql), "SELECT id, rule_type, pattern_type, pattern_value, active, created_at, updated_at FROM auth_rules WHERE rule_type LIKE ? || '_%%' ORDER BY id");
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, query_type, -1, SQLITE_STATIC);
|
||||
}
|
||||
}
|
||||
else if (strcmp(query_type, "pattern") == 0 && filter_value) {
|
||||
snprintf(sql, sizeof(sql), "SELECT id, rule_type, pattern_type, pattern_value, active, created_at, updated_at FROM auth_rules WHERE pattern_value = ? ORDER BY id");
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, filter_value, -1, SQLITE_STATIC);
|
||||
}
|
||||
}
|
||||
else {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Invalid query type. Use 'all', 'blacklist', 'whitelist', or 'pattern'");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response, "status", "error");
|
||||
cJSON_AddStringToObject(response, "error", "Failed to prepare query");
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
sqlite3_close(db);
|
||||
return response;
|
||||
}
|
||||
|
||||
// Execute query and build results
|
||||
cJSON* rules = cJSON_CreateArray();
|
||||
int count = 0;
|
||||
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
cJSON* rule = cJSON_CreateObject();
|
||||
cJSON_AddNumberToObject(rule, "id", sqlite3_column_int(stmt, 0));
|
||||
cJSON_AddStringToObject(rule, "rule_type", (const char*)sqlite3_column_text(stmt, 1));
|
||||
cJSON_AddStringToObject(rule, "pattern_type", (const char*)sqlite3_column_text(stmt, 2));
|
||||
cJSON_AddStringToObject(rule, "pattern_value", (const char*)sqlite3_column_text(stmt, 3));
|
||||
cJSON_AddNumberToObject(rule, "active", sqlite3_column_int(stmt, 4));
|
||||
cJSON_AddNumberToObject(rule, "created_at", sqlite3_column_int64(stmt, 5));
|
||||
cJSON_AddNumberToObject(rule, "updated_at", sqlite3_column_int64(stmt, 6));
|
||||
|
||||
cJSON_AddItemToArray(rules, rule);
|
||||
count++;
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
|
||||
cJSON_AddStringToObject(response, "status", "success");
|
||||
cJSON_AddNumberToObject(response, "count", count);
|
||||
cJSON_AddStringToObject(response, "filter", query_type);
|
||||
cJSON_AddItemToObject(response, "rules", rules);
|
||||
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||
|
||||
app_log(LOG_INFO, "Auth query executed: %d rules returned (filter: %s)", count, query_type);
|
||||
|
||||
return response;
|
||||
}
|
||||
@@ -35,6 +35,12 @@ cJSON* admin_cmd_system_status(cJSON* args);
|
||||
cJSON* admin_cmd_blob_list(cJSON* args);
|
||||
cJSON* admin_cmd_storage_stats(cJSON* args);
|
||||
cJSON* admin_cmd_sql_query(cJSON* args);
|
||||
cJSON* admin_cmd_query_view(cJSON* args);
|
||||
|
||||
// Auth rules management handlers (c-relay compatible)
|
||||
cJSON* admin_cmd_auth_add_rule(cJSON* args);
|
||||
cJSON* admin_cmd_auth_delete_rule(cJSON* args);
|
||||
cJSON* admin_cmd_auth_query(cJSON* args);
|
||||
|
||||
// NIP-44 encryption/decryption helpers
|
||||
int admin_encrypt_response(
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include "ginxsom.h"
|
||||
#include "admin_commands.h"
|
||||
|
||||
// Forward declarations for nostr_core_lib functions
|
||||
int nostr_hex_to_bytes(const char* hex, unsigned char* bytes, size_t bytes_len);
|
||||
@@ -28,10 +29,8 @@ extern char g_db_path[];
|
||||
// Forward declarations
|
||||
static int get_server_privkey(unsigned char* privkey_bytes);
|
||||
static int get_server_pubkey(char* pubkey_hex, size_t size);
|
||||
static int handle_config_query_command(cJSON* response_data);
|
||||
static int handle_query_view_command(cJSON* command_array, cJSON* response_data);
|
||||
static int send_admin_response_event(const char* admin_pubkey, const char* request_id,
|
||||
cJSON* response_data);
|
||||
cJSON* response_data);
|
||||
static cJSON* parse_authorization_header(void);
|
||||
static int process_admin_event(cJSON* event);
|
||||
|
||||
@@ -304,20 +303,35 @@ static int process_admin_event(cJSON* event) {
|
||||
cJSON_AddStringToObject(response_data, "query_type", cmd);
|
||||
cJSON_AddNumberToObject(response_data, "timestamp", (double)time(NULL));
|
||||
|
||||
// Handle command
|
||||
// Handle command - use admin_commands system for processing
|
||||
cJSON* command_response = admin_commands_process(command_array, request_id);
|
||||
|
||||
int result = -1;
|
||||
if (strcmp(cmd, "config_query") == 0) {
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: Handling config_query command");
|
||||
result = handle_config_query_command(response_data);
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: config_query result: %d", result);
|
||||
} else if (strcmp(cmd, "query_view") == 0) {
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: Handling query_view command");
|
||||
result = handle_query_view_command(command_array, response_data);
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: query_view result: %d", result);
|
||||
if (command_response) {
|
||||
// Check if command was successful
|
||||
cJSON* status = cJSON_GetObjectItem(command_response, "status");
|
||||
if (status && cJSON_IsString(status)) {
|
||||
const char* status_str = cJSON_GetStringValue(status);
|
||||
if (strcmp(status_str, "success") == 0) {
|
||||
result = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Copy response data from command_response to response_data
|
||||
cJSON* item = NULL;
|
||||
cJSON_ArrayForEach(item, command_response) {
|
||||
if (item->string) {
|
||||
cJSON* copy = cJSON_Duplicate(item, 1);
|
||||
cJSON_AddItemToObject(response_data, item->string, copy);
|
||||
}
|
||||
}
|
||||
|
||||
cJSON_Delete(command_response);
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: Command processed with result: %d", result);
|
||||
} else {
|
||||
app_log(LOG_WARN, "ADMIN_EVENT: Unknown command: %s", cmd);
|
||||
app_log(LOG_ERROR, "ADMIN_EVENT: Command processing returned NULL");
|
||||
cJSON_AddStringToObject(response_data, "status", "error");
|
||||
cJSON_AddStringToObject(response_data, "error", "Unknown command");
|
||||
cJSON_AddStringToObject(response_data, "error", "Command processing failed");
|
||||
result = -1;
|
||||
}
|
||||
|
||||
@@ -397,160 +411,6 @@ static int get_server_pubkey(char* pubkey_hex, size_t size) {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle config_query command - returns all config values
|
||||
*/
|
||||
static int handle_config_query_command(cJSON* response_data) {
|
||||
sqlite3* db;
|
||||
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
cJSON_AddStringToObject(response_data, "status", "error");
|
||||
cJSON_AddStringToObject(response_data, "error", "Database error");
|
||||
return -1;
|
||||
}
|
||||
|
||||
cJSON_AddStringToObject(response_data, "status", "success");
|
||||
cJSON* data = cJSON_CreateObject();
|
||||
|
||||
// Query all config settings
|
||||
sqlite3_stmt* stmt;
|
||||
const char* sql = "SELECT key, value FROM config ORDER BY key";
|
||||
|
||||
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) == SQLITE_OK) {
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char* key = (const char*)sqlite3_column_text(stmt, 0);
|
||||
const char* value = (const char*)sqlite3_column_text(stmt, 1);
|
||||
if (key && value) {
|
||||
cJSON_AddStringToObject(data, key, value);
|
||||
}
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
|
||||
cJSON_AddItemToObject(response_data, "data", data);
|
||||
sqlite3_close(db);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle query_view command - returns data from a specified database view
|
||||
* Command format: ["query_view", "view_name"]
|
||||
*/
|
||||
static int handle_query_view_command(cJSON* command_array, cJSON* response_data) {
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: handle_query_view_command called");
|
||||
|
||||
// Get view name from command array
|
||||
cJSON* view_name_obj = cJSON_GetArrayItem(command_array, 1);
|
||||
if (!view_name_obj || !cJSON_IsString(view_name_obj)) {
|
||||
app_log(LOG_ERROR, "ADMIN_EVENT: View name missing or not a string");
|
||||
cJSON_AddStringToObject(response_data, "status", "error");
|
||||
cJSON_AddStringToObject(response_data, "error", "View name required");
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char* view_name = cJSON_GetStringValue(view_name_obj);
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: Querying view: %s", view_name);
|
||||
|
||||
// Validate view name (whitelist approach for security)
|
||||
const char* allowed_views[] = {
|
||||
"blob_overview",
|
||||
"blob_type_distribution",
|
||||
"blob_time_stats",
|
||||
"top_uploaders",
|
||||
NULL
|
||||
};
|
||||
|
||||
int view_allowed = 0;
|
||||
for (int i = 0; allowed_views[i] != NULL; i++) {
|
||||
if (strcmp(view_name, allowed_views[i]) == 0) {
|
||||
view_allowed = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!view_allowed) {
|
||||
cJSON_AddStringToObject(response_data, "status", "error");
|
||||
cJSON_AddStringToObject(response_data, "error", "Invalid view name");
|
||||
app_log(LOG_WARN, "ADMIN_EVENT: Attempted to query invalid view: %s", view_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: View '%s' is allowed, opening database: %s", view_name, g_db_path);
|
||||
|
||||
// Open database
|
||||
sqlite3* db;
|
||||
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
app_log(LOG_ERROR, "ADMIN_EVENT: Failed to open database: %s (error: %s)", g_db_path, sqlite3_errmsg(db));
|
||||
cJSON_AddStringToObject(response_data, "status", "error");
|
||||
cJSON_AddStringToObject(response_data, "error", "Database error");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Build SQL query
|
||||
char sql[256];
|
||||
snprintf(sql, sizeof(sql), "SELECT * FROM %s", view_name);
|
||||
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: Executing SQL: %s", sql);
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
app_log(LOG_ERROR, "ADMIN_EVENT: Failed to prepare query: %s (error: %s)", sql, sqlite3_errmsg(db));
|
||||
sqlite3_close(db);
|
||||
cJSON_AddStringToObject(response_data, "status", "error");
|
||||
cJSON_AddStringToObject(response_data, "error", "Failed to prepare query");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Get column count and names
|
||||
int col_count = sqlite3_column_count(stmt);
|
||||
|
||||
// Create results array
|
||||
cJSON* results = cJSON_CreateArray();
|
||||
|
||||
// Fetch all rows
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
cJSON* row = cJSON_CreateObject();
|
||||
|
||||
for (int i = 0; i < col_count; i++) {
|
||||
const char* col_name = sqlite3_column_name(stmt, i);
|
||||
int col_type = sqlite3_column_type(stmt, i);
|
||||
|
||||
switch (col_type) {
|
||||
case SQLITE_INTEGER:
|
||||
cJSON_AddNumberToObject(row, col_name, (double)sqlite3_column_int64(stmt, i));
|
||||
break;
|
||||
case SQLITE_FLOAT:
|
||||
cJSON_AddNumberToObject(row, col_name, sqlite3_column_double(stmt, i));
|
||||
break;
|
||||
case SQLITE_TEXT:
|
||||
cJSON_AddStringToObject(row, col_name, (const char*)sqlite3_column_text(stmt, i));
|
||||
break;
|
||||
case SQLITE_NULL:
|
||||
cJSON_AddNullToObject(row, col_name);
|
||||
break;
|
||||
default:
|
||||
// For BLOB or unknown types, skip
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cJSON_AddItemToArray(results, row);
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
|
||||
// Build response
|
||||
cJSON_AddStringToObject(response_data, "status", "success");
|
||||
cJSON_AddStringToObject(response_data, "view_name", view_name);
|
||||
cJSON_AddItemToObject(response_data, "data", results);
|
||||
|
||||
app_log(LOG_DEBUG, "ADMIN_EVENT: Query view '%s' returned %d rows", view_name, cJSON_GetArraySize(results));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send Kind 23459 admin response event
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
static void serve_embedded_file(const unsigned char* data, size_t size, const char* content_type) {
|
||||
printf("Status: 200 OK\r\n");
|
||||
printf("Content-Type: %s\r\n", content_type);
|
||||
printf("Content-Length: %zu\r\n", size);
|
||||
printf("Content-Length: %lu\r\n", (unsigned long)size);
|
||||
printf("Cache-Control: public, max-age=3600\r\n");
|
||||
printf("\r\n");
|
||||
fwrite((void*)data, 1, size, stdout);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,8 +10,8 @@
|
||||
// Version information (auto-updated by build system)
|
||||
#define VERSION_MAJOR 0
|
||||
#define VERSION_MINOR 1
|
||||
#define VERSION_PATCH 17
|
||||
#define VERSION "v0.1.17"
|
||||
#define VERSION_PATCH 24
|
||||
#define VERSION "v0.1.24"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
437
src/main.c
437
src/main.c
@@ -21,6 +21,8 @@
|
||||
#include <sys/stat.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <dirent.h>
|
||||
#include <sched.h>
|
||||
|
||||
// Centralized logging system (declaration in ginxsom.h)
|
||||
void app_log(log_level_t level, const char *format, ...) {
|
||||
@@ -176,25 +178,32 @@ int initialize_database(const char *db_path) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create auth_rules table
|
||||
// Create system table for runtime metrics (read-only, updated by server)
|
||||
const char *create_system =
|
||||
"CREATE TABLE IF NOT EXISTS system ("
|
||||
" key TEXT PRIMARY KEY NOT NULL,"
|
||||
" value TEXT NOT NULL,"
|
||||
" updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))"
|
||||
");";
|
||||
|
||||
rc = sqlite3_exec(db, create_system, NULL, NULL, &err_msg);
|
||||
if (rc != SQLITE_OK) {
|
||||
fprintf(stderr, "Failed to create system table: %s\n", err_msg);
|
||||
sqlite3_free(err_msg);
|
||||
sqlite3_close(db);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create auth_rules table (c-relay compatible schema)
|
||||
const char *create_auth_rules =
|
||||
"CREATE TABLE IF NOT EXISTS auth_rules ("
|
||||
" id INTEGER PRIMARY KEY AUTOINCREMENT,"
|
||||
" rule_type TEXT NOT NULL,"
|
||||
" rule_target TEXT NOT NULL,"
|
||||
" operation TEXT NOT NULL DEFAULT '*',"
|
||||
" enabled INTEGER NOT NULL DEFAULT 1,"
|
||||
" priority INTEGER NOT NULL DEFAULT 100,"
|
||||
" description TEXT,"
|
||||
" created_by TEXT,"
|
||||
" pattern_type TEXT NOT NULL,"
|
||||
" pattern_value TEXT NOT NULL,"
|
||||
" active INTEGER NOT NULL DEFAULT 1,"
|
||||
" created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),"
|
||||
" updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),"
|
||||
" CHECK (rule_type IN ('pubkey_blacklist', 'pubkey_whitelist',"
|
||||
" 'hash_blacklist', 'mime_blacklist', 'mime_whitelist')),"
|
||||
" CHECK (operation IN ('upload', 'delete', 'list', '*')),"
|
||||
" CHECK (enabled IN (0, 1)),"
|
||||
" CHECK (priority >= 0),"
|
||||
" UNIQUE(rule_type, rule_target, operation)"
|
||||
" updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))"
|
||||
");";
|
||||
|
||||
rc = sqlite3_exec(db, create_auth_rules, NULL, NULL, &err_msg);
|
||||
@@ -211,11 +220,9 @@ int initialize_database(const char *db_path) {
|
||||
"CREATE INDEX IF NOT EXISTS idx_blobs_uploader_pubkey ON blobs(uploader_pubkey);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_blobs_type ON blobs(type);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_config_updated_at ON config(updated_at);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_type_target ON auth_rules(rule_type, rule_target);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_operation ON auth_rules(operation);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_enabled ON auth_rules(enabled);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_priority ON auth_rules(priority);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_type_operation ON auth_rules(rule_type, operation, enabled);";
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_type ON auth_rules(rule_type);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_pattern ON auth_rules(pattern_type, pattern_value);"
|
||||
"CREATE INDEX IF NOT EXISTS idx_auth_rules_active ON auth_rules(active);";
|
||||
|
||||
rc = sqlite3_exec(db, create_indexes, NULL, NULL, &err_msg);
|
||||
if (rc != SQLITE_OK) {
|
||||
@@ -268,14 +275,20 @@ int initialize_database(const char *db_path) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Create blob_overview view for admin dashboard
|
||||
// Create blob_overview view for admin dashboard with system metrics
|
||||
const char *create_overview_view =
|
||||
"CREATE VIEW IF NOT EXISTS blob_overview AS "
|
||||
"SELECT "
|
||||
" COUNT(*) as total_blobs, "
|
||||
" COALESCE(SUM(size), 0) as total_bytes, "
|
||||
" MIN(uploaded_at) as first_upload, "
|
||||
" MAX(uploaded_at) as last_upload "
|
||||
" MAX(uploaded_at) as last_upload, "
|
||||
" (SELECT value FROM system WHERE key = 'version') as version, "
|
||||
" (SELECT value FROM system WHERE key = 'process_id') as process_id, "
|
||||
" (SELECT value FROM system WHERE key = 'memory_mb') as memory_mb, "
|
||||
" (SELECT value FROM system WHERE key = 'cpu_core') as cpu_core, "
|
||||
" (SELECT value FROM system WHERE key = 'fs_blob_count') as fs_blob_count, "
|
||||
" (SELECT value FROM system WHERE key = 'fs_blob_size_mb') as fs_blob_size_mb "
|
||||
"FROM blobs;";
|
||||
|
||||
rc = sqlite3_exec(db, create_overview_view, NULL, NULL, &err_msg);
|
||||
@@ -732,6 +745,171 @@ int get_blossom_private_key(char *seckey_out, size_t max_len) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Helper function to count filesystem blobs
|
||||
static int count_filesystem_blobs(long *total_count, long *total_size_bytes) {
|
||||
DIR *dir = opendir(g_storage_dir);
|
||||
if (!dir) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
*total_count = 0;
|
||||
*total_size_bytes = 0;
|
||||
|
||||
struct dirent *entry;
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
// Skip . and ..
|
||||
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Build full path
|
||||
char filepath[MAX_PATH_LEN];
|
||||
snprintf(filepath, sizeof(filepath), "%s/%s", g_storage_dir, entry->d_name);
|
||||
|
||||
// Get file stats
|
||||
struct stat st;
|
||||
if (stat(filepath, &st) == 0 && S_ISREG(st.st_mode)) {
|
||||
(*total_count)++;
|
||||
*total_size_bytes += st.st_size;
|
||||
}
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Helper function to get memory usage in MB from /proc/self/status
|
||||
static long get_memory_usage_mb(void) {
|
||||
FILE *fp = fopen("/proc/self/status", "r");
|
||||
if (!fp) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
char line[256];
|
||||
long vmrss_kb = -1;
|
||||
|
||||
while (fgets(line, sizeof(line), fp)) {
|
||||
if (strncmp(line, "VmRSS:", 6) == 0) {
|
||||
// Parse VmRSS value (in kB)
|
||||
char *p = line + 6;
|
||||
while (*p == ' ' || *p == '\t') p++;
|
||||
vmrss_kb = atol(p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
|
||||
if (vmrss_kb > 0) {
|
||||
return vmrss_kb / 1024; // Convert kB to MB
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Helper function to get CPU core
|
||||
static int get_cpu_core(void) {
|
||||
#ifdef __linux__
|
||||
return sched_getcpu();
|
||||
#else
|
||||
return -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Update system metrics in system table (key-value pairs)
|
||||
static int update_system_metrics(void) {
|
||||
sqlite3 *db;
|
||||
sqlite3_stmt *stmt;
|
||||
int rc;
|
||||
|
||||
rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READWRITE, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Get system metrics
|
||||
int pid = getpid();
|
||||
long memory_mb = get_memory_usage_mb();
|
||||
int cpu_core = get_cpu_core();
|
||||
long fs_blob_count = 0;
|
||||
long fs_blob_size = 0;
|
||||
count_filesystem_blobs(&fs_blob_count, &fs_blob_size);
|
||||
long fs_blob_size_mb = fs_blob_size / (1024 * 1024);
|
||||
|
||||
// Prepare INSERT OR REPLACE statement for key-value updates
|
||||
const char *sql = "INSERT OR REPLACE INTO system (key, value, updated_at) VALUES (?, ?, strftime('%s', 'now'))";
|
||||
|
||||
// Update version
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, "version", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, VERSION, -1, SQLITE_STATIC);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
|
||||
// Update process_id
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
char pid_str[32];
|
||||
snprintf(pid_str, sizeof(pid_str), "%d", pid);
|
||||
sqlite3_bind_text(stmt, 1, "process_id", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, pid_str, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
|
||||
// Update memory_mb
|
||||
if (memory_mb > 0) {
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
char mem_str[32];
|
||||
snprintf(mem_str, sizeof(mem_str), "%ld", memory_mb);
|
||||
sqlite3_bind_text(stmt, 1, "memory_mb", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, mem_str, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
}
|
||||
|
||||
// Update cpu_core
|
||||
if (cpu_core >= 0) {
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
char core_str[32];
|
||||
snprintf(core_str, sizeof(core_str), "%d", cpu_core);
|
||||
sqlite3_bind_text(stmt, 1, "cpu_core", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, core_str, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
}
|
||||
|
||||
// Update fs_blob_count
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
char count_str[32];
|
||||
snprintf(count_str, sizeof(count_str), "%ld", fs_blob_count);
|
||||
sqlite3_bind_text(stmt, 1, "fs_blob_count", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, count_str, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
|
||||
// Update fs_blob_size_mb
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
char size_str[32];
|
||||
snprintf(size_str, sizeof(size_str), "%ld", fs_blob_size_mb);
|
||||
sqlite3_bind_text(stmt, 1, "fs_blob_size_mb", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, size_str, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
|
||||
sqlite3_close(db);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Insert blob metadata into database
|
||||
int insert_blob_metadata(const char *sha256, long size, const char *type,
|
||||
long uploaded_at, const char *uploader_pubkey,
|
||||
@@ -1848,6 +2026,9 @@ void handle_upload_request_with_validation(nostr_request_result_t* validation_re
|
||||
return;
|
||||
}
|
||||
|
||||
// Update system metrics after successful blob upload
|
||||
update_system_metrics();
|
||||
|
||||
// Get origin from config
|
||||
char origin[256];
|
||||
nip94_get_origin(origin, sizeof(origin));
|
||||
@@ -2142,37 +2323,105 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
fprintf(stderr, "KEYS: Derived pubkey: %s\n", g_blossom_pubkey);
|
||||
|
||||
// Scenario 5: Both database and keys specified - validate match
|
||||
// Scenario 5: Both database path and keys specified
|
||||
if (db_path_specified) {
|
||||
fprintf(stderr, "\n=== SCENARIO 5: DATABASE + KEYS (VALIDATION) ===\n");
|
||||
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
||||
g_db_path[sizeof(g_db_path) - 1] = '\0';
|
||||
fprintf(stderr, "\n=== SCENARIO 5: DATABASE PATH + KEYS ===\n");
|
||||
|
||||
// Check if specified path is a directory or file
|
||||
struct stat st;
|
||||
int is_directory = 0;
|
||||
|
||||
if (stat(specified_db_path, &st) == 0) {
|
||||
is_directory = S_ISDIR(st.st_mode);
|
||||
} else {
|
||||
// Path doesn't exist - assume it's meant to be a directory
|
||||
is_directory = (specified_db_path[strlen(specified_db_path) - 1] == '/' ||
|
||||
strstr(specified_db_path, ".db") == NULL);
|
||||
}
|
||||
|
||||
if (is_directory) {
|
||||
// Build database path from directory + derived pubkey
|
||||
snprintf(g_db_path, sizeof(g_db_path), "%s/%s.db", specified_db_path, g_blossom_pubkey);
|
||||
fprintf(stderr, "DATABASE: Using directory path, derived database: %s\n", g_db_path);
|
||||
} else {
|
||||
// Use specified file path directly
|
||||
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
||||
g_db_path[sizeof(g_db_path) - 1] = '\0';
|
||||
fprintf(stderr, "DATABASE: Using file path: %s\n", g_db_path);
|
||||
}
|
||||
|
||||
// Check if database exists
|
||||
struct stat st;
|
||||
if (stat(g_db_path, &st) != 0) {
|
||||
fprintf(stderr, "ERROR: Database file not found: %s\n", g_db_path);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Load keys from database
|
||||
if (get_blossom_private_key(g_blossom_seckey, sizeof(g_blossom_seckey)) != 0) {
|
||||
fprintf(stderr, "ERROR: Invalid database: missing server keys\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Compare with provided key
|
||||
if (strcmp(g_blossom_seckey, test_server_privkey) != 0) {
|
||||
fprintf(stderr, "ERROR: Server private key doesn't match database\n");
|
||||
fprintf(stderr, " Provided key and database keys are different\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "VALIDATION: Keys match database - continuing\n");
|
||||
|
||||
// Validate pubkey matches filename
|
||||
if (validate_database_pubkey_match(g_db_path, g_blossom_pubkey) != 0) {
|
||||
return 1;
|
||||
if (stat(g_db_path, &st) == 0) {
|
||||
// Database exists - validate keys match
|
||||
fprintf(stderr, "DATABASE: Found existing database, validating keys...\n");
|
||||
|
||||
// Load keys from database
|
||||
if (get_blossom_private_key(g_blossom_seckey, sizeof(g_blossom_seckey)) != 0) {
|
||||
fprintf(stderr, "ERROR: Invalid database: missing server keys\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Compare with provided key
|
||||
if (strcmp(g_blossom_seckey, test_server_privkey) != 0) {
|
||||
fprintf(stderr, "ERROR: Server private key doesn't match database\n");
|
||||
fprintf(stderr, " Provided key and database keys are different\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "VALIDATION: Keys match database - continuing\n");
|
||||
|
||||
// Validate pubkey matches filename
|
||||
if (validate_database_pubkey_match(g_db_path, g_blossom_pubkey) != 0) {
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
// Database doesn't exist - create it with provided keys
|
||||
fprintf(stderr, "DATABASE: No existing database, creating new one...\n");
|
||||
|
||||
// Initialize new database
|
||||
if (initialize_database(g_db_path) != 0) {
|
||||
fprintf(stderr, "ERROR: Failed to initialize database\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Store keys
|
||||
strncpy(g_blossom_seckey, test_server_privkey, sizeof(g_blossom_seckey) - 1);
|
||||
g_blossom_seckey[64] = '\0';
|
||||
|
||||
if (store_blossom_private_key(test_server_privkey) != 0) {
|
||||
fprintf(stderr, "ERROR: Failed to store private key\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Store pubkey in config
|
||||
sqlite3 *db;
|
||||
sqlite3_stmt *stmt;
|
||||
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READWRITE, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
const char *sql = "INSERT OR REPLACE INTO config (key, value, description) VALUES (?, ?, ?)";
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, "blossom_pubkey", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, g_blossom_pubkey, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 3, "Blossom server's public key", -1, SQLITE_STATIC);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
|
||||
if (strlen(g_admin_pubkey) > 0) {
|
||||
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, "admin_pubkey", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, g_admin_pubkey, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 3, "Admin public key", -1, SQLITE_STATIC);
|
||||
sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
}
|
||||
sqlite3_close(db);
|
||||
}
|
||||
|
||||
fprintf(stderr, "DATABASE: New database created successfully\n");
|
||||
}
|
||||
}
|
||||
// Scenario 3 continued: Create new database with provided keys
|
||||
@@ -2238,30 +2487,78 @@ int main(int argc, char *argv[]) {
|
||||
}
|
||||
}
|
||||
|
||||
// Scenario 2: Database Specified (--db-path)
|
||||
// Scenario 2: Database Path Specified (--db-path)
|
||||
// Note: --db-path should specify a DIRECTORY, not a full file path
|
||||
// The actual database filename will be derived from the server's pubkey
|
||||
else if (db_path_specified) {
|
||||
fprintf(stderr, "\n=== SCENARIO 2: DATABASE SPECIFIED ===\n");
|
||||
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
||||
g_db_path[sizeof(g_db_path) - 1] = '\0';
|
||||
fprintf(stderr, "\n=== SCENARIO 2: DATABASE DIRECTORY SPECIFIED ===\n");
|
||||
|
||||
// Check if database exists
|
||||
// Check if specified path is a directory or file
|
||||
struct stat st;
|
||||
if (stat(g_db_path, &st) != 0) {
|
||||
fprintf(stderr, "ERROR: Database file not found: %s\n", g_db_path);
|
||||
fprintf(stderr, " → Specify a different database or let the application create a new one\n");
|
||||
return 1;
|
||||
int is_directory = 0;
|
||||
|
||||
if (stat(specified_db_path, &st) == 0) {
|
||||
is_directory = S_ISDIR(st.st_mode);
|
||||
} else {
|
||||
// Path doesn't exist - assume it's meant to be a directory
|
||||
is_directory = (specified_db_path[strlen(specified_db_path) - 1] == '/' ||
|
||||
strstr(specified_db_path, ".db") == NULL);
|
||||
}
|
||||
|
||||
fprintf(stderr, "DATABASE: Opening existing database: %s\n", g_db_path);
|
||||
|
||||
// Load keys from database
|
||||
if (load_server_keys() != 0) {
|
||||
fprintf(stderr, "ERROR: Failed to load keys from database\n");
|
||||
fprintf(stderr, " → Database may be corrupted or not a valid ginxsom database\n");
|
||||
return 1;
|
||||
if (is_directory) {
|
||||
// Treat as directory - will derive filename from pubkey after loading keys
|
||||
fprintf(stderr, "DATABASE: Directory specified: %s\n", specified_db_path);
|
||||
fprintf(stderr, "DATABASE: Will derive filename from server pubkey\n");
|
||||
|
||||
// Look for any .db file that matches the pubkey pattern
|
||||
DIR *dir = opendir(specified_db_path);
|
||||
int found_db = 0;
|
||||
|
||||
if (dir) {
|
||||
struct dirent *entry;
|
||||
while ((entry = readdir(dir)) != NULL) {
|
||||
// Check if filename matches pattern: <64-hex-chars>.db
|
||||
size_t name_len = strlen(entry->d_name);
|
||||
if (name_len == 67 && strcmp(entry->d_name + 64, ".db") == 0) {
|
||||
// Found a potential database file
|
||||
snprintf(g_db_path, sizeof(g_db_path), "%s/%s", specified_db_path, entry->d_name);
|
||||
found_db = 1;
|
||||
fprintf(stderr, "DATABASE: Found existing database: %s\n", g_db_path);
|
||||
break;
|
||||
}
|
||||
}
|
||||
closedir(dir);
|
||||
}
|
||||
|
||||
if (!found_db) {
|
||||
// No database found - this is OK, we'll create one if we have keys
|
||||
fprintf(stderr, "DATABASE: No existing database found in directory\n");
|
||||
// g_db_path will be set later based on pubkey
|
||||
}
|
||||
} else {
|
||||
// Treat as full file path (legacy behavior)
|
||||
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
||||
g_db_path[sizeof(g_db_path) - 1] = '\0';
|
||||
}
|
||||
|
||||
fprintf(stderr, "DATABASE: Keys loaded and validated successfully\n");
|
||||
// If we found a database file, try to load it
|
||||
if (g_db_path[0] != '\0' && stat(g_db_path, &st) == 0) {
|
||||
fprintf(stderr, "DATABASE: Opening existing database: %s\n", g_db_path);
|
||||
|
||||
// Load keys from database
|
||||
if (load_server_keys() != 0) {
|
||||
fprintf(stderr, "ERROR: Failed to load keys from database\n");
|
||||
fprintf(stderr, " → Database may be corrupted or not a valid ginxsom database\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
fprintf(stderr, "DATABASE: Keys loaded and validated successfully\n");
|
||||
} else {
|
||||
// No database file exists - we need keys to create one
|
||||
fprintf(stderr, "ERROR: No database found and no --server-privkey provided\n");
|
||||
fprintf(stderr, " → Use --server-privkey to create a new database\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Scenario 1: No Arguments (Fresh Start)
|
||||
@@ -2353,6 +2650,14 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
||||
app_log(LOG_INFO, "Admin commands system initialized successfully");
|
||||
}
|
||||
|
||||
// Initialize system metrics at startup
|
||||
app_log(LOG_INFO, "Initializing system metrics...");
|
||||
if (update_system_metrics() == 0) {
|
||||
app_log(LOG_INFO, "System metrics initialized successfully");
|
||||
} else {
|
||||
app_log(LOG_WARN, "Failed to initialize system metrics");
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// THIS IS WHERE THE REQUESTS ENTER THE FastCGI
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
@@ -23,6 +23,8 @@
|
||||
#include <strings.h>
|
||||
#include <time.h>
|
||||
|
||||
#define MAX_MIME_TYPE_LEN 128 // Define here for direct use
|
||||
|
||||
// Additional error codes for ginxsom-specific functionality
|
||||
#define NOSTR_ERROR_CRYPTO_INIT -100
|
||||
#define NOSTR_ERROR_AUTH_REQUIRED -101
|
||||
@@ -671,8 +673,8 @@ int nostr_validate_unified_request(const nostr_unified_request_t *request,
|
||||
"VALIDATOR_DEBUG: STEP 10 PASSED - Blossom authentication succeeded\n");
|
||||
strcpy(result->reason, "Blossom authentication passed");
|
||||
|
||||
} else if (event_kind == 33335) {
|
||||
// 10. Admin/Configuration Event Validation (Kind 33335)
|
||||
} else if (event_kind == 33335 || event_kind == 23459 || event_kind == 23458) {
|
||||
// 10. Admin/Configuration Event Validation (Kind 33335, 23459, 23458)
|
||||
// Verify admin authorization, check required tags, validate expiration
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 10 - Processing Admin/Configuration "
|
||||
"authentication (kind 33335)\n");
|
||||
@@ -775,6 +777,16 @@ int nostr_validate_unified_request(const nostr_unified_request_t *request,
|
||||
|
||||
cJSON_Delete(event);
|
||||
|
||||
// Skip rule evaluation for admin events
|
||||
if (event_kind == 33335 || event_kind == 23459 || event_kind == 23458) {
|
||||
char admin_skip_msg[256];
|
||||
snprintf(admin_skip_msg, sizeof(admin_skip_msg),
|
||||
"VALIDATOR_DEBUG: Admin event (kind %d) - skipping rule evaluation\n", event_kind);
|
||||
validator_debug_log(admin_skip_msg);
|
||||
strcpy(result->reason, "Admin event validated - rules bypassed");
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
|
||||
// STEP 12 PASSED: Protocol validation complete - continue to database rule
|
||||
// evaluation
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 12 PASSED - Protocol validation "
|
||||
@@ -1321,6 +1333,13 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
sqlite3 *db = NULL;
|
||||
sqlite3_stmt *stmt = NULL;
|
||||
int rc;
|
||||
int pubkey_whitelisted = 0;
|
||||
int pubkey_whitelist_exists = 0;
|
||||
int mime_whitelisted = 0;
|
||||
int mime_whitelist_exists = 0;
|
||||
int mime_whitelist_count = 0;
|
||||
int pubkey_whitelist_count = 0;
|
||||
char rules_msg[256];
|
||||
|
||||
if (!pubkey) {
|
||||
validator_debug_log(
|
||||
@@ -1328,7 +1347,12 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
return NOSTR_ERROR_INVALID_INPUT;
|
||||
}
|
||||
|
||||
char rules_msg[256];
|
||||
if (operation && (strcmp(operation, "admin_event") == 0 ||
|
||||
strcmp(operation, "admin") == 0)) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - Admin management request, skipping auth rules\n");
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
|
||||
sprintf(rules_msg,
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Checking rules for pubkey=%.32s..., "
|
||||
"operation=%s, mime_type=%s\n",
|
||||
@@ -1344,18 +1368,14 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
}
|
||||
|
||||
// Step 1: Check pubkey blacklist (highest priority)
|
||||
// Match both exact operation and wildcard '*'
|
||||
const char *blacklist_sql =
|
||||
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
|
||||
"'pubkey_blacklist' AND rule_target = ? AND (operation = ? OR operation = '*') AND enabled = "
|
||||
"1 ORDER BY priority LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'blacklist_pubkey' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, blacklist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *description = (const char *)sqlite3_column_text(stmt, 1);
|
||||
const char *description = "Pubkey blacklisted";
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 FAILED - "
|
||||
"Pubkey blacklisted\n");
|
||||
char blacklist_msg[256];
|
||||
@@ -1380,18 +1400,14 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
|
||||
// Step 2: Check hash blacklist
|
||||
if (resource_hash) {
|
||||
// Match both exact operation and wildcard '*'
|
||||
const char *hash_blacklist_sql =
|
||||
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
|
||||
"'hash_blacklist' AND rule_target = ? AND (operation = ? OR operation = '*') AND enabled = "
|
||||
"1 ORDER BY priority LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'blacklist_hash' AND pattern_type = 'hash' AND pattern_value = ? AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, hash_blacklist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, resource_hash, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *description = (const char *)sqlite3_column_text(stmt, 1);
|
||||
const char *description = "Hash blacklisted";
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 FAILED - "
|
||||
"Hash blacklisted\n");
|
||||
char hash_blacklist_msg[256];
|
||||
@@ -1423,17 +1439,14 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
if (mime_type) {
|
||||
// Match both exact MIME type and wildcard patterns (e.g., 'image/*')
|
||||
const char *mime_blacklist_sql =
|
||||
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
|
||||
"'mime_blacklist' AND (rule_target = ? OR rule_target LIKE '%/*' AND ? LIKE REPLACE(rule_target, '*', '%')) AND (operation = ? OR operation = '*') AND enabled = "
|
||||
"1 ORDER BY priority LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'blacklist_mime' AND pattern_type = 'mime' AND (pattern_value = ? OR pattern_value LIKE '%/*' AND ? LIKE REPLACE(pattern_value, '*', '%')) AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, mime_blacklist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, mime_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, mime_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 3, operation ? operation : "", -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *description = (const char *)sqlite3_column_text(stmt, 1);
|
||||
const char *description = "MIME type blacklisted";
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 FAILED - "
|
||||
"MIME type blacklisted\n");
|
||||
char mime_blacklist_msg[256];
|
||||
@@ -1462,133 +1475,151 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
}
|
||||
|
||||
// Step 4: Check pubkey whitelist
|
||||
// Match both exact operation and wildcard '*'
|
||||
const char *whitelist_sql =
|
||||
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
|
||||
"'pubkey_whitelist' AND rule_target = ? AND (operation = ? OR operation = '*') AND enabled = "
|
||||
"1 ORDER BY priority LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'whitelist_pubkey' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, whitelist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *description = (const char *)sqlite3_column_text(stmt, 1);
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 PASSED - "
|
||||
const char *description = "Pubkey whitelisted";
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 PASSED - "
|
||||
"Pubkey whitelisted\n");
|
||||
char whitelist_msg[256];
|
||||
sprintf(whitelist_msg,
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: %s\n",
|
||||
description ? description : "Unknown");
|
||||
snprintf(whitelist_msg,
|
||||
sizeof(whitelist_msg),
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: %s\n",
|
||||
description ? description : "Unknown");
|
||||
validator_debug_log(whitelist_msg);
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
return NOSTR_SUCCESS; // Allow whitelisted pubkey
|
||||
pubkey_whitelisted = 1;
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 - Pubkey not whitelisted\n");
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 FAILED - Pubkey whitelist query failed\n");
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 FAILED - Pubkey "
|
||||
"not whitelisted\n");
|
||||
|
||||
// Step 5: Check MIME type whitelist (only if not already denied)
|
||||
// Step 5: Check MIME type whitelist
|
||||
if (mime_type) {
|
||||
// Match both exact MIME type and wildcard patterns (e.g., 'image/*')
|
||||
char mime_pattern_wildcard[MAX_MIME_TYPE_LEN + 2];
|
||||
const char *mime_whitelist_sql =
|
||||
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
|
||||
"'mime_whitelist' AND (rule_target = ? OR rule_target LIKE '%/*' AND ? LIKE REPLACE(rule_target, '*', '%')) AND (operation = ? OR operation = '*') AND enabled = "
|
||||
"1 ORDER BY priority LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'whitelist_mime' AND pattern_type = 'mime' AND (pattern_value = ? OR pattern_value LIKE ? ) AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, mime_whitelist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, mime_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, mime_type, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 3, operation ? operation : "", -1, SQLITE_STATIC);
|
||||
const char *slash_pos = strchr(mime_type, '/');
|
||||
if (slash_pos != NULL) {
|
||||
size_t prefix_len = slash_pos - mime_type;
|
||||
if (prefix_len < MAX_MIME_TYPE_LEN) {
|
||||
snprintf(mime_pattern_wildcard, sizeof(mime_pattern_wildcard), "%.*s/%%", (int)prefix_len, mime_type);
|
||||
} else {
|
||||
snprintf(mime_pattern_wildcard, sizeof(mime_pattern_wildcard), "%%/%%");
|
||||
}
|
||||
} else {
|
||||
snprintf(mime_pattern_wildcard, sizeof(mime_pattern_wildcard), "%s/%%", mime_type);
|
||||
}
|
||||
sqlite3_bind_text(stmt, 2, mime_pattern_wildcard, -1, SQLITE_TRANSIENT);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *description = (const char *)sqlite3_column_text(stmt, 1);
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 PASSED - "
|
||||
"MIME type whitelisted\n");
|
||||
const char *description = "MIME type whitelisted";
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 PASSED - MIME type whitelisted\n");
|
||||
char mime_whitelist_msg[256];
|
||||
sprintf(mime_whitelist_msg,
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist rule matched: %s\n",
|
||||
description ? description : "Unknown");
|
||||
snprintf(mime_whitelist_msg,
|
||||
sizeof(mime_whitelist_msg),
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist rule matched: %s (pattern=%s)\n",
|
||||
description ? description : "Unknown",
|
||||
mime_pattern_wildcard);
|
||||
validator_debug_log(mime_whitelist_msg);
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
return NOSTR_SUCCESS; // Allow whitelisted MIME type
|
||||
mime_whitelisted = 1;
|
||||
} else {
|
||||
char mime_not_msg[256];
|
||||
snprintf(mime_not_msg,
|
||||
sizeof(mime_not_msg),
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - STEP 5 - MIME type not whitelisted (pattern=%s)\n",
|
||||
mime_pattern_wildcard);
|
||||
validator_debug_log(mime_not_msg);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 FAILED - Failed to prepare MIME whitelist query\n");
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 FAILED - MIME "
|
||||
"type not whitelisted\n");
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 SKIPPED - No "
|
||||
"MIME type provided\n");
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 SKIPPED - No MIME type provided\n");
|
||||
}
|
||||
|
||||
// Step 6: Check if any MIME whitelist rules exist - if yes, deny by default
|
||||
// Match both exact operation and wildcard '*'
|
||||
// Step 6: Count MIME whitelist rules
|
||||
const char *mime_whitelist_exists_sql =
|
||||
"SELECT COUNT(*) FROM auth_rules WHERE rule_type = 'mime_whitelist' "
|
||||
"AND (operation = ? OR operation = '*') AND enabled = 1 LIMIT 1";
|
||||
"SELECT COUNT(*) FROM auth_rules WHERE rule_type LIKE 'whitelist_mime' "
|
||||
"AND pattern_type = 'mime' AND active = 1";
|
||||
rc = sqlite3_prepare_v2(db, mime_whitelist_exists_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, operation ? operation : "", -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
int mime_whitelist_count = sqlite3_column_int(stmt, 0);
|
||||
if (mime_whitelist_count > 0) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 6 FAILED - "
|
||||
"MIME whitelist exists but type not in it\n");
|
||||
|
||||
// Set specific violation details for status code mapping
|
||||
strcpy(g_last_rule_violation.violation_type, "mime_whitelist_violation");
|
||||
strcpy(g_last_rule_violation.reason,
|
||||
"MIME type not whitelisted for this operation");
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
return NOSTR_ERROR_AUTH_REQUIRED;
|
||||
}
|
||||
mime_whitelist_count = sqlite3_column_int(stmt, 0);
|
||||
char mime_cnt_msg[256];
|
||||
snprintf(mime_cnt_msg, sizeof(mime_cnt_msg),
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist count: %d\n",
|
||||
mime_whitelist_count);
|
||||
validator_debug_log(mime_cnt_msg);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 6 FAILED - Failed to prepare MIME whitelist count query\n");
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 6 PASSED - No "
|
||||
"MIME whitelist restrictions apply\n");
|
||||
|
||||
// Step 7: Check if any whitelist rules exist - if yes, deny by default
|
||||
// Match both exact operation and wildcard '*'
|
||||
if (mime_whitelist_count > 0 && !mime_whitelisted) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist exists but MIME type not allowed\n");
|
||||
strcpy(g_last_rule_violation.violation_type, "mime_whitelist_violation");
|
||||
strcpy(g_last_rule_violation.reason, "MIME type not whitelisted for this operation");
|
||||
sqlite3_close(db);
|
||||
return NOSTR_ERROR_AUTH_REQUIRED;
|
||||
}
|
||||
|
||||
// Step 7: Count pubkey whitelist rules
|
||||
const char *whitelist_exists_sql =
|
||||
"SELECT COUNT(*) FROM auth_rules WHERE rule_type = 'pubkey_whitelist' "
|
||||
"AND (operation = ? OR operation = '*') AND enabled = 1 LIMIT 1";
|
||||
"SELECT COUNT(*) FROM auth_rules WHERE (rule_type LIKE 'whitelist_pubkey' OR rule_type LIKE 'pubkey_whitelist') "
|
||||
"AND pattern_type = 'pubkey' AND active = 1";
|
||||
rc = sqlite3_prepare_v2(db, whitelist_exists_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, operation ? operation : "", -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
int whitelist_count = sqlite3_column_int(stmt, 0);
|
||||
if (whitelist_count > 0) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 FAILED - "
|
||||
"Whitelist exists but pubkey not in it\n");
|
||||
|
||||
// Set specific violation details for status code mapping
|
||||
strcpy(g_last_rule_violation.violation_type, "whitelist_violation");
|
||||
strcpy(g_last_rule_violation.reason,
|
||||
"Public key not whitelisted for this operation");
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
return NOSTR_ERROR_AUTH_REQUIRED;
|
||||
}
|
||||
pubkey_whitelist_count = sqlite3_column_int(stmt, 0);
|
||||
char pubkey_cnt_msg[256];
|
||||
snprintf(pubkey_cnt_msg, sizeof(pubkey_cnt_msg),
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Pubkey whitelist count: %d\n",
|
||||
pubkey_whitelist_count);
|
||||
validator_debug_log(pubkey_cnt_msg);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 7 FAILED - Failed to prepare pubkey whitelist count query\n");
|
||||
}
|
||||
|
||||
if (pubkey_whitelist_count > 0) {
|
||||
char pubkey_whitelist_msg[256];
|
||||
snprintf(pubkey_whitelist_msg, sizeof(pubkey_whitelist_msg),
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Pubkey whitelist exists (%d entries)\n",
|
||||
pubkey_whitelist_count);
|
||||
validator_debug_log(pubkey_whitelist_msg);
|
||||
}
|
||||
|
||||
if (pubkey_whitelist_count > 0 && !pubkey_whitelisted) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - Pubkey whitelist exists but pubkey not allowed\n");
|
||||
strcpy(g_last_rule_violation.violation_type, "whitelist_violation");
|
||||
strcpy(g_last_rule_violation.reason, "Public key not whitelisted for this operation");
|
||||
sqlite3_close(db);
|
||||
return NOSTR_ERROR_AUTH_REQUIRED;
|
||||
}
|
||||
|
||||
if ((mime_whitelist_count > 0 && !mime_whitelisted) ||
|
||||
(pubkey_whitelist_count > 0 && !pubkey_whitelisted)) {
|
||||
// Already handled above but include fallback
|
||||
sqlite3_close(db);
|
||||
return NOSTR_ERROR_AUTH_REQUIRED;
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 PASSED - No "
|
||||
"whitelist restrictions apply\n");
|
||||
|
||||
sqlite3_close(db);
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 7 PASSED - All "
|
||||
"rule checks completed, default ALLOW\n");
|
||||
return NOSTR_SUCCESS; // Default allow if no restrictive rules matched
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - Completed whitelist checks\n");
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -11,8 +11,13 @@ SERVER_URL="https://localhost:9443"
|
||||
UPLOAD_ENDPOINT="${SERVER_URL}/upload"
|
||||
TEST_FILE="test_blob_$(date +%s).txt"
|
||||
CLEANUP_FILES=()
|
||||
NOSTR_PRIVKEY="22cc83aa57928a2800234c939240c9a6f0f44a33ea3838a860ed38930b195afd"
|
||||
NOSTR_PUBKEY="8ff74724ed641b3c28e5a86d7c5cbc49c37638ace8c6c38935860e7a5eedde0e"
|
||||
NOSTR_PRIVKEY="39079f9fbdead31b5ec1724479e62c892a6866699c7873613c19832caff447bd"
|
||||
NOSTR_PUBKEY="2a38db7fc1ffdabb43c79b5ad525f7d97102d4d235efc257dfd1514571f8159f"
|
||||
# NOSTR_PRIVKEY="22cc83aa57928a2800234c939240c9a6f0f44a33ea3838a860ed38930b195afd"
|
||||
# NOSTR_PUBKEY="8ff74724ed641b3c28e5a86d7c5cbc49c37638ace8c6c38935860e7a5eedde0e"
|
||||
|
||||
|
||||
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
|
||||
@@ -1,19 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# white_black_list_test.sh - Whitelist/Blacklist Rules Test Suite
|
||||
# Tests the auth_rules table functionality for pubkey and MIME type filtering
|
||||
# Tests the auth_rules table functionality using Kind 23458 admin commands
|
||||
|
||||
# Configuration
|
||||
SERVER_URL="http://localhost:9001"
|
||||
UPLOAD_ENDPOINT="${SERVER_URL}/upload"
|
||||
DB_PATH="db/ginxsom.db"
|
||||
ADMIN_API_ENDPOINT="${SERVER_URL}/api/admin"
|
||||
DB_PATH="db/52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a.db"
|
||||
TEST_DIR="tests/auth_test_tmp"
|
||||
TEST_KEYS_FILE=".test_keys"
|
||||
|
||||
# Test results tracking
|
||||
TESTS_PASSED=0
|
||||
TESTS_FAILED=0
|
||||
TOTAL_TESTS=0
|
||||
|
||||
# Load admin keys from .test_keys
|
||||
if [[ ! -f "$TEST_KEYS_FILE" ]]; then
|
||||
echo "❌ $TEST_KEYS_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
source "$TEST_KEYS_FILE"
|
||||
|
||||
# Test keys for different scenarios - Using WSB's keys for TEST_USER1
|
||||
# Generated using: nak key public <privkey>
|
||||
TEST_USER1_PRIVKEY="22cc83aa57928a2800234c939240c9a6f0f44a33ea3838a860ed38930b195afd"
|
||||
@@ -42,6 +51,37 @@ record_test_result() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to send admin command via Kind 23458
|
||||
send_admin_command() {
|
||||
local command_json="$1"
|
||||
|
||||
# Encrypt command with NIP-44
|
||||
local encrypted_command=$(nak encrypt --sec "$ADMIN_PRIVKEY" -p "$SERVER_PUBKEY" "$command_json")
|
||||
|
||||
if [[ -z "$encrypted_command" ]]; then
|
||||
echo "❌ Failed to encrypt command"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create Kind 23458 event
|
||||
local event=$(nak event -k 23458 \
|
||||
-c "$encrypted_command" \
|
||||
--tag p="$SERVER_PUBKEY" \
|
||||
--sec "$ADMIN_PRIVKEY")
|
||||
|
||||
if [[ -z "$event" ]]; then
|
||||
echo "❌ Failed to create admin event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Send to admin API endpoint
|
||||
local response=$(curl -s -X POST "$ADMIN_API_ENDPOINT" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$event")
|
||||
|
||||
echo "$response"
|
||||
}
|
||||
|
||||
# Check prerequisites
|
||||
for cmd in nak curl jq sqlite3; do
|
||||
if ! command -v $cmd &> /dev/null; then
|
||||
@@ -130,20 +170,24 @@ test_upload() {
|
||||
}
|
||||
|
||||
# Clean up any existing rules from previous tests
|
||||
echo "Cleaning up existing auth rules..."
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" 2>/dev/null
|
||||
echo "Cleaning up existing auth rules via admin command..."
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Enable authentication rules
|
||||
echo "Enabling authentication rules..."
|
||||
sqlite3 "$DB_PATH" "UPDATE config SET value = 'true' WHERE key = 'auth_rules_enabled';"
|
||||
ENABLE_CMD='["config_update", {"auth_rules_enabled": "true"}]'
|
||||
send_admin_command "$ENABLE_CMD" > /dev/null 2>&1
|
||||
|
||||
echo
|
||||
echo "=== SECTION 1: PUBKEY BLACKLIST TESTS ==="
|
||||
echo
|
||||
|
||||
# Test 1: Add pubkey blacklist rule
|
||||
echo "Adding blacklist rule for TEST_USER3..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER3_PUBKEY', 'upload', 10, 'Test blacklist');"
|
||||
# Test 1: Add pubkey blacklist rule via admin command
|
||||
echo "Adding blacklist rule for TEST_USER3 via admin API..."
|
||||
BLACKLIST_CMD='["blacklist", "pubkey", "'$TEST_USER3_PUBKEY'"]'
|
||||
BLACKLIST_RESPONSE=$(send_admin_command "$BLACKLIST_CMD")
|
||||
echo "Response: $BLACKLIST_RESPONSE" | jq -c '.' 2>/dev/null || echo "$BLACKLIST_RESPONSE"
|
||||
|
||||
# Test 1a: Blacklisted user should be denied
|
||||
test_file1=$(create_test_file "blacklist_test1.txt" "Content from blacklisted user")
|
||||
@@ -157,13 +201,16 @@ echo
|
||||
echo "=== SECTION 2: PUBKEY WHITELIST TESTS ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;"
|
||||
# Clean rules via admin command
|
||||
echo "Cleaning rules via admin API..."
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 2: Add pubkey whitelist rule
|
||||
echo "Adding whitelist rule for TEST_USER1..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_whitelist', '$TEST_USER1_PUBKEY', 'upload', 300, 'Test whitelist');"
|
||||
# Test 2: Add pubkey whitelist rule via admin command
|
||||
echo "Adding whitelist rule for TEST_USER1 via admin API..."
|
||||
WHITELIST_CMD='["whitelist", "pubkey", "'$TEST_USER1_PUBKEY'"]'
|
||||
WHITELIST_RESPONSE=$(send_admin_command "$WHITELIST_CMD")
|
||||
echo "Response: $WHITELIST_RESPONSE" | jq -c '.' 2>/dev/null || echo "$WHITELIST_RESPONSE"
|
||||
|
||||
# Test 2a: Whitelisted user should succeed
|
||||
test_file3=$(create_test_file "whitelist_test1.txt" "Content from whitelisted user")
|
||||
@@ -177,15 +224,17 @@ echo
|
||||
echo "=== SECTION 3: HASH BLACKLIST TESTS ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
# Clean rules via admin command
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 3: Create a file and blacklist its hash
|
||||
# Test 3: Create a file and blacklist its hash via admin command
|
||||
test_file5=$(create_test_file "hash_blacklist_test.txt" "This specific file is blacklisted")
|
||||
BLACKLISTED_HASH=$(sha256sum "$test_file5" | cut -d' ' -f1)
|
||||
|
||||
echo "Adding hash blacklist rule for $BLACKLISTED_HASH..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('hash_blacklist', '$BLACKLISTED_HASH', 'upload', 100, 'Test hash blacklist');"
|
||||
echo "Adding hash blacklist rule for $BLACKLISTED_HASH via admin API..."
|
||||
HASH_BLACKLIST_CMD='["blacklist", "hash", "'$BLACKLISTED_HASH'"]'
|
||||
send_admin_command "$HASH_BLACKLIST_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 3a: Blacklisted hash should be denied
|
||||
test_upload "Test 3a: Blacklisted Hash Upload" "$TEST_USER1_PRIVKEY" "$test_file5" "403"
|
||||
@@ -198,13 +247,14 @@ echo
|
||||
echo "=== SECTION 4: MIME TYPE BLACKLIST TESTS ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;"
|
||||
# Clean rules via admin command
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 4: Blacklist executable MIME types
|
||||
echo "Adding MIME type blacklist rules..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('mime_blacklist', 'application/x-executable', 'upload', 200, 'Block executables');"
|
||||
# Test 4: Blacklist executable MIME types via admin command
|
||||
echo "Adding MIME type blacklist rules via admin API..."
|
||||
MIME_BLACKLIST_CMD='["blacklist", "mime", "application/x-executable"]'
|
||||
send_admin_command "$MIME_BLACKLIST_CMD" > /dev/null 2>&1
|
||||
|
||||
# Note: This test would require the server to detect MIME types from file content
|
||||
# For now, we'll test with text/plain which should be allowed
|
||||
@@ -215,14 +265,16 @@ echo
|
||||
echo "=== SECTION 5: MIME TYPE WHITELIST TESTS ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;"
|
||||
# Clean rules via admin command
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 5: Whitelist only image MIME types
|
||||
echo "Adding MIME type whitelist rules..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('mime_whitelist', 'image/jpeg', 'upload', 400, 'Allow JPEG');"
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('mime_whitelist', 'image/png', 'upload', 400, 'Allow PNG');"
|
||||
# Test 5: Whitelist only image MIME types via admin command
|
||||
echo "Adding MIME type whitelist rules via admin API..."
|
||||
MIME_WL1_CMD='["whitelist", "mime", "image/jpeg"]'
|
||||
MIME_WL2_CMD='["whitelist", "mime", "image/png"]'
|
||||
send_admin_command "$MIME_WL1_CMD" > /dev/null 2>&1
|
||||
send_admin_command "$MIME_WL2_CMD" > /dev/null 2>&1
|
||||
|
||||
# Note: MIME type detection would need to be implemented in the server
|
||||
# For now, text/plain should be denied if whitelist exists
|
||||
@@ -233,14 +285,16 @@ echo
|
||||
echo "=== SECTION 6: PRIORITY ORDERING TESTS ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;"
|
||||
# Clean rules via admin command
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 6: Blacklist should override whitelist (priority ordering)
|
||||
echo "Adding both blacklist (priority 10) and whitelist (priority 300) for same pubkey..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER1_PUBKEY', 'upload', 10, 'Blacklist priority test');"
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_whitelist', '$TEST_USER1_PUBKEY', 'upload', 300, 'Whitelist priority test');"
|
||||
echo "Adding both blacklist and whitelist for same pubkey via admin API..."
|
||||
BL_CMD='["blacklist", "pubkey", "'$TEST_USER1_PUBKEY'"]'
|
||||
WL_CMD='["whitelist", "pubkey", "'$TEST_USER1_PUBKEY'"]'
|
||||
send_admin_command "$BL_CMD" > /dev/null 2>&1
|
||||
send_admin_command "$WL_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 6a: Blacklist should win (lower priority number = higher priority)
|
||||
test_file9=$(create_test_file "priority_test.txt" "Testing priority ordering")
|
||||
@@ -250,13 +304,14 @@ echo
|
||||
echo "=== SECTION 7: OPERATION-SPECIFIC RULES ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;"
|
||||
# Clean rules via admin command
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 7: Blacklist only for upload operation
|
||||
echo "Adding blacklist rule for upload operation only..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER2_PUBKEY', 'upload', 10, 'Upload-only blacklist');"
|
||||
# Test 7: Blacklist for user via admin command
|
||||
echo "Adding blacklist rule for TEST_USER2 via admin API..."
|
||||
BL_USER2_CMD='["blacklist", "pubkey", "'$TEST_USER2_PUBKEY'"]'
|
||||
send_admin_command "$BL_USER2_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 7a: Upload should be denied
|
||||
test_file10=$(create_test_file "operation_test.txt" "Testing operation-specific rules")
|
||||
@@ -266,13 +321,14 @@ echo
|
||||
echo "=== SECTION 8: WILDCARD OPERATION TESTS ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;"
|
||||
# Clean rules via admin command
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 8: Blacklist for all operations using wildcard
|
||||
echo "Adding blacklist rule for all operations (*)..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER3_PUBKEY', '*', 10, 'All operations blacklist');"
|
||||
# Test 8: Blacklist for user via admin command
|
||||
echo "Adding blacklist rule for TEST_USER3 via admin API..."
|
||||
BL_USER3_CMD='["blacklist", "pubkey", "'$TEST_USER3_PUBKEY'"]'
|
||||
send_admin_command "$BL_USER3_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 8a: Upload should be denied
|
||||
test_file11=$(create_test_file "wildcard_test.txt" "Testing wildcard operation")
|
||||
@@ -282,13 +338,13 @@ echo
|
||||
echo "=== SECTION 9: ENABLED/DISABLED RULES ==="
|
||||
echo
|
||||
|
||||
# Clean rules
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;"
|
||||
# Clean rules via admin command
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Test 9: Disabled rule should not be enforced
|
||||
echo "Adding disabled blacklist rule..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, enabled, description) VALUES ('pubkey_blacklist', '$TEST_USER1_PUBKEY', 'upload', 10, 0, 'Disabled blacklist');"
|
||||
echo "Adding disabled blacklist rule via SQL (admin API doesn't support active=0 on create)..."
|
||||
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, pattern_type, pattern_value, active) VALUES ('blacklist_pubkey', 'pubkey', '$TEST_USER1_PUBKEY', 0);"
|
||||
|
||||
# Test 9a: Upload should succeed (rule is disabled)
|
||||
test_file12=$(create_test_file "disabled_rule_test.txt" "Testing disabled rule")
|
||||
@@ -296,7 +352,7 @@ test_upload "Test 9a: Disabled Rule Not Enforced" "$TEST_USER1_PRIVKEY" "$test_f
|
||||
|
||||
# Test 9b: Enable the rule
|
||||
echo "Enabling the blacklist rule..."
|
||||
sqlite3 "$DB_PATH" "UPDATE auth_rules SET enabled = 1 WHERE rule_target = '$TEST_USER1_PUBKEY';"
|
||||
sqlite3 "$DB_PATH" "UPDATE auth_rules SET active = 1 WHERE pattern_value = '$TEST_USER1_PUBKEY';"
|
||||
|
||||
# Test 9c: Upload should now be denied
|
||||
test_file13=$(create_test_file "enabled_rule_test.txt" "Testing enabled rule")
|
||||
@@ -307,9 +363,10 @@ echo
|
||||
echo "=== SECTION 11: CLEANUP AND RESET ==="
|
||||
echo
|
||||
|
||||
# Clean up all test rules
|
||||
echo "Cleaning up test rules..."
|
||||
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;"
|
||||
# Clean up all test rules via admin command
|
||||
echo "Cleaning up test rules via admin API..."
|
||||
CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
|
||||
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
|
||||
|
||||
# Verify cleanup
|
||||
RULE_COUNT=$(sqlite3 "$DB_PATH" "SELECT COUNT(*) FROM auth_rules;" 2>/dev/null)
|
||||
|
||||
Reference in New Issue
Block a user