2 Commits

Author SHA1 Message Date
Your Name
281c686fde v0.1.20 - Fixed auth white and black lists 2025-12-16 06:54:26 -04:00
Your Name
a5880ebdf6 v0.1.19 - Lots of remote hosting fixes 2025-12-13 14:53:25 -04:00
29 changed files with 34056 additions and 7135 deletions

View File

@@ -78,6 +78,7 @@ RUN cd nostr_core_lib && \
./build.sh --nips=1,6,13,17,19,42,44,59 ./build.sh --nips=1,6,13,17,19,42,44,59
# Copy web interface files for embedding # Copy web interface files for embedding
# Note: Changes to api/ files will trigger rebuild from this point
COPY api/ /build/api/ COPY api/ /build/api/
COPY scripts/embed_web_files.sh /build/scripts/ COPY scripts/embed_web_files.sh /build/scripts/
@@ -87,6 +88,8 @@ RUN mkdir -p src && \
./scripts/embed_web_files.sh ./scripts/embed_web_files.sh
# Copy Ginxsom source files LAST (only this layer rebuilds on source changes) # Copy Ginxsom source files LAST (only this layer rebuilds on source changes)
# Note: The embedded header from previous step will be overwritten by this COPY
# So we need to ensure src/admin_interface_embedded.h is NOT in src/ directory
COPY src/ /build/src/ COPY src/ /build/src/
COPY include/ /build/include/ COPY include/ /build/include/

View File

@@ -431,6 +431,13 @@ All commands are sent as NIP-44 encrypted JSON arrays in the event content:
| `storage_stats` | `["storage_stats"]` | Get detailed storage statistics | | `storage_stats` | `["storage_stats"]` | Get detailed storage statistics |
| `mirror_status` | `["mirror_status"]` | Get status of mirroring operations | | `mirror_status` | `["mirror_status"]` | Get status of mirroring operations |
| `report_query` | `["report_query", "all"]` | Query content reports (BUD-09) | | `report_query` | `["report_query", "all"]` | Query content reports (BUD-09) |
| **Authorization Rules Management** |
| `auth_add_blacklist` | `["blacklist", "pubkey", "abc123..."]` | Add pubkey to blacklist |
| `auth_add_whitelist` | `["whitelist", "pubkey", "def456..."]` | Add pubkey to whitelist |
| `auth_delete_rule` | `["delete_auth_rule", "blacklist", "pubkey", "abc123..."]` | Delete specific auth rule |
| `auth_query_all` | `["auth_query", "all"]` | Query all auth rules |
| `auth_query_type` | `["auth_query", "whitelist"]` | Query specific rule type |
| `auth_query_pattern` | `["auth_query", "pattern", "abc123..."]` | Query specific pattern |
| **Database Queries** | | **Database Queries** |
| `sql_query` | `["sql_query", "SELECT * FROM blobs LIMIT 10"]` | Execute read-only SQL query | | `sql_query` | `["sql_query", "SELECT * FROM blobs LIMIT 10"]` | Execute read-only SQL query |
@@ -448,10 +455,16 @@ All commands are sent as NIP-44 encrypted JSON arrays in the event content:
- `kind_10002_tags`: Relay list JSON array - `kind_10002_tags`: Relay list JSON array
**Authentication Settings:** **Authentication Settings:**
- `auth_enabled`: Enable auth rules system - `auth_rules_enabled`: Enable auth rules system
- `require_auth_upload`: Require authentication for uploads - `require_auth_upload`: Require authentication for uploads
- `require_auth_delete`: Require authentication for deletes - `require_auth_delete`: Require authentication for deletes
**Authorization Rules:**
- `rule_type`: Type of rule (`pubkey_blacklist`, `pubkey_whitelist`, `hash_blacklist`, `mime_blacklist`, `mime_whitelist`)
- `pattern_type`: Pattern matching type (`pubkey`, `hash`, `mime`)
- `pattern_value`: The actual value to match (64-char hex for pubkey/hash, MIME type string for mime)
- `active`: Whether rule is active (1) or disabled (0)
**Limits:** **Limits:**
- `max_blobs_per_user`: Per-user blob limit - `max_blobs_per_user`: Per-user blob limit
- `rate_limit_uploads`: Uploads per minute - `rate_limit_uploads`: Uploads per minute

View File

@@ -100,6 +100,10 @@
<td>Total Size</td> <td>Total Size</td>
<td id="total-size">-</td> <td id="total-size">-</td>
</tr> </tr>
<tr>
<td>Version</td>
<td id="version">-</td>
</tr>
<tr> <tr>
<td>Process ID</td> <td>Process ID</td>
<td id="process-id">-</td> <td id="process-id">-</td>
@@ -116,6 +120,14 @@
<td>CPU Usage</td> <td>CPU Usage</td>
<td id="cpu-usage">-</td> <td id="cpu-usage">-</td>
</tr> </tr>
<tr>
<td>Filesystem Blob Count</td>
<td id="fs-blob-count">-</td>
</tr>
<tr>
<td>Filesystem Blob Size</td>
<td id="fs-blob-size">-</td>
</tr>
<tr> <tr>
<td>Oldest Blob</td> <td>Oldest Blob</td>
<td id="oldest-event">-</td> <td id="oldest-event">-</td>

View File

@@ -3889,40 +3889,69 @@ function handleViewQueryResponse(viewName, responseData) {
// Route to appropriate handler based on view name // Route to appropriate handler based on view name
switch (viewName) { switch (viewName) {
case 'blob_overview': case 'blob_overview':
if (responseData.data && Array.isArray(responseData.data) && responseData.data.length > 0) { if (responseData.rows && Array.isArray(responseData.rows) && responseData.rows.length > 0) {
const overviewData = responseData.data[0]; // Convert row array to object using column names
const overviewData = {};
responseData.columns.forEach((col, idx) => {
overviewData[col] = responseData.rows[0][idx];
});
populateStatsOverview(overviewData); populateStatsOverview(overviewData);
// Update chart with total blobs count // Update chart with total blobs count
const currentTotal = overviewData.total_blobs; const currentTotal = overviewData.total_blobs;
if (currentTotal !== undefined) { if (currentTotal !== undefined) {
// Calculate new blobs since last update for chart // Initialize previousTotalBlobs on first load
if (previousTotalBlobs > 0) { if (previousTotalBlobs === 0) {
previousTotalBlobs = currentTotal;
console.log(`Initialized previousTotalBlobs to ${currentTotal}`);
} else {
// Calculate new blobs since last update for chart
const newBlobs = currentTotal - previousTotalBlobs; const newBlobs = currentTotal - previousTotalBlobs;
if (newBlobs > 0 && eventRateChart) { if (newBlobs > 0 && eventRateChart) {
console.log(`Adding ${newBlobs} new blobs to rate chart (${currentTotal} - ${previousTotalBlobs})`); console.log(`Adding ${newBlobs} new blobs to rate chart (${currentTotal} - ${previousTotalBlobs})`);
eventRateChart.addValue(newBlobs); eventRateChart.addValue(newBlobs);
} }
// Update previous total for next calculation
previousTotalBlobs = currentTotal;
} }
// Update previous total for next calculation
previousTotalBlobs = currentTotal;
} }
} }
break; break;
case 'blob_type_distribution': case 'blob_type_distribution':
if (responseData.data && Array.isArray(responseData.data)) { if (responseData.rows && Array.isArray(responseData.rows)) {
populateStatsKinds(responseData.data); // Convert rows to array of objects
const typeData = responseData.rows.map(row => {
const obj = {};
responseData.columns.forEach((col, idx) => {
obj[col] = row[idx];
});
return obj;
});
populateStatsKinds(typeData);
} }
break; break;
case 'blob_time_stats': case 'blob_time_stats':
if (responseData.data && Array.isArray(responseData.data) && responseData.data.length > 0) { if (responseData.rows && Array.isArray(responseData.rows) && responseData.rows.length > 0) {
populateStatsTime(responseData.data[0]); // Convert row array to object using column names
const timeData = {};
responseData.columns.forEach((col, idx) => {
timeData[col] = responseData.rows[0][idx];
});
populateStatsTime(timeData);
} }
break; break;
case 'top_uploaders': case 'top_uploaders':
if (responseData.data && Array.isArray(responseData.data)) { if (responseData.rows && Array.isArray(responseData.rows)) {
populateStatsPubkeys(responseData.data); // Convert rows to array of objects
const uploadersData = responseData.rows.map(row => {
const obj = {};
responseData.columns.forEach((col, idx) => {
obj[col] = row[idx];
});
return obj;
});
populateStatsPubkeys(uploadersData);
} }
break; break;
default: default:
@@ -4143,12 +4172,32 @@ function populateStatsOverview(data) {
if (!data) return; if (!data) return;
// Update individual cells with flash animation for changed values // Update individual cells with flash animation for changed values
// Backend sends: total_bytes, total_blobs, first_upload, last_upload // Backend sends: total_bytes, total_blobs, first_upload, last_upload, version, process_id, memory_mb, cpu_core, fs_blob_count, fs_blob_size_mb
updateStatsCell('db-size', data.total_bytes ? formatFileSize(data.total_bytes) : '-'); updateStatsCell('db-size', data.total_bytes ? formatFileSize(data.total_bytes) : '-');
updateStatsCell('total-size', data.total_bytes ? formatFileSize(data.total_bytes) : '-'); updateStatsCell('total-size', data.total_bytes ? formatFileSize(data.total_bytes) : '-');
updateStatsCell('total-events', data.total_blobs || '-'); updateStatsCell('total-events', data.total_blobs || '-');
updateStatsCell('oldest-event', data.first_upload ? formatTimestamp(data.first_upload) : '-'); updateStatsCell('oldest-event', data.first_upload ? formatTimestamp(data.first_upload) : '-');
updateStatsCell('newest-event', data.last_upload ? formatTimestamp(data.last_upload) : '-'); updateStatsCell('newest-event', data.last_upload ? formatTimestamp(data.last_upload) : '-');
// System metrics from system table
if (data.version) {
updateStatsCell('version', data.version);
}
if (data.process_id) {
updateStatsCell('process-id', data.process_id);
}
if (data.memory_mb) {
updateStatsCell('memory-usage', data.memory_mb + ' MB');
}
if (data.cpu_core) {
updateStatsCell('cpu-core', 'Core ' + data.cpu_core);
}
if (data.fs_blob_count !== undefined) {
updateStatsCell('fs-blob-count', data.fs_blob_count);
}
if (data.fs_blob_size_mb !== undefined) {
updateStatsCell('fs-blob-size', data.fs_blob_size_mb + ' MB');
}
} }
// Populate event kinds distribution table // Populate event kinds distribution table
@@ -4526,6 +4575,14 @@ function updateStatsCell(cellId, newValue) {
// Start polling for statistics (every 10 seconds) // Start polling for statistics (every 10 seconds)
function startStatsPolling() { function startStatsPolling() {
console.log('=== STARTING STATISTICS POLLING ==='); console.log('=== STARTING STATISTICS POLLING ===');
// Initialize the event rate chart if not already initialized
if (!eventRateChart) {
console.log('Initializing event rate chart from startStatsPolling...');
setTimeout(() => {
initializeEventRateChart();
}, 1000); // Delay to ensure text_graph.js is loaded
}
console.log('Current page:', currentPage); console.log('Current page:', currentPage);
console.log('Is logged in:', isLoggedIn); console.log('Is logged in:', isLoggedIn);
console.log('User pubkey:', userPubkey); console.log('User pubkey:', userPubkey);

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

24290
debug.log

File diff suppressed because it is too large Load Diff

View File

@@ -22,278 +22,226 @@ fi
# Configuration # Configuration
REMOTE_HOST="laantungir.net" REMOTE_HOST="laantungir.net"
REMOTE_USER="ubuntu" REMOTE_USER="ubuntu"
REMOTE_DIR="/home/ubuntu/ginxsom"
REMOTE_DB_PATH="/home/ubuntu/ginxsom/db/ginxsom.db" # Deployment paths
REMOTE_NGINX_CONFIG="/etc/nginx/conf.d/default.conf" REMOTE_BINARY_DIR="/usr/local/bin/ginxsom"
REMOTE_BINARY_PATH="/home/ubuntu/ginxsom/ginxsom.fcgi" REMOTE_BINARY_PATH="$REMOTE_BINARY_DIR/ginxsom-fcgi"
REMOTE_DB_PATH="$REMOTE_BINARY_DIR"
REMOTE_BLOB_DIR="/var/www/blobs"
REMOTE_SOCKET="/tmp/ginxsom-fcgi.sock" REMOTE_SOCKET="/tmp/ginxsom-fcgi.sock"
REMOTE_DATA_DIR="/var/www/html/blossom"
print_status "Starting deployment to $REMOTE_HOST..." # Production keys
ADMIN_PUBKEY="1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139"
SERVER_PRIVKEY="90df3fe61e7d19e50f387e4c5db87eff1a7d2a1037cd55026c4b21a4fda8ecf6"
# Step 1: Build and prepare local binary # Local paths
print_status "Building ginxsom binary..." LOCAL_BINARY="build/ginxsom-fcgi_static_x86_64"
make clean && make
if [[ ! -f "build/ginxsom-fcgi" ]]; then print_status "=========================================="
print_error "Build failed - binary not found" print_status "Ginxsom Static Binary Deployment"
print_status "=========================================="
print_status "Target: $REMOTE_HOST"
print_status "Binary: $REMOTE_BINARY_PATH"
print_status "Database: $REMOTE_DB_PATH"
print_status "Blobs: $REMOTE_BLOB_DIR"
print_status "Fresh install: $FRESH_INSTALL"
print_status "=========================================="
echo ""
# Step 1: Verify local binary exists
print_status "Step 1: Verifying local static binary..."
if [[ ! -f "$LOCAL_BINARY" ]]; then
print_error "Static binary not found: $LOCAL_BINARY"
print_status "Please run: ./build_static.sh"
exit 1 exit 1
fi fi
print_success "Binary built successfully"
# Step 2: Setup remote environment first (before copying files) # Verify it's actually static
print_status "Setting up remote environment..." if ldd "$LOCAL_BINARY" 2>&1 | grep -q "not a dynamic executable\|statically linked"; then
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF' print_success "Binary is static"
else
print_warning "Binary may not be fully static - proceeding anyway"
fi
BINARY_SIZE=$(du -h "$LOCAL_BINARY" | cut -f1)
print_success "Found static binary ($BINARY_SIZE)"
echo ""
# Step 2: Upload binary to server
print_status "Step 2: Uploading binary to server..."
scp "$LOCAL_BINARY" $REMOTE_USER@$REMOTE_HOST:~/ginxsom-fcgi_new || {
print_error "Failed to upload binary"
exit 1
}
print_success "Binary uploaded to ~/ginxsom-fcgi_new"
echo ""
# Step 3: Setup directories
print_status "Step 3: Setting up directories..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
set -e set -e
# Create data directory if it doesn't exist (using existing /var/www/html/blossom)
sudo mkdir -p /var/www/html/blossom
sudo chown www-data:www-data /var/www/html/blossom
sudo chmod 755 /var/www/html/blossom
# Ensure socket directory exists
sudo mkdir -p /tmp
sudo chmod 755 /tmp
# Install required dependencies
echo "Installing required dependencies..."
sudo apt-get update
sudo apt-get install -y spawn-fcgi libfcgi-dev
# Stop any existing ginxsom processes
echo "Stopping existing ginxsom processes..."
sudo pkill -f ginxsom-fcgi || true
sudo rm -f /tmp/ginxsom-fcgi.sock || true
echo "Remote environment setup complete"
EOF
print_success "Remote environment configured"
# Step 3: Copy files to remote server
print_status "Copying files to remote server..."
# Copy entire project directory (excluding unnecessary files)
# Note: We include .git and .gitmodules to allow submodule initialization on remote
print_status "Copying entire ginxsom project..."
rsync -avz --exclude='build' --exclude='logs' --exclude='Trash' --exclude='blobs' --exclude='db' --no-g --no-o --no-perms --omit-dir-times . $REMOTE_USER@$REMOTE_HOST:$REMOTE_DIR/
# Initialize git submodules on remote server
print_status "Initializing git submodules on remote server..."
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
cd /home/ubuntu/ginxsom
# Check if .git exists # Create binary/database directory
if [ ! -d .git ]; then echo "Creating application directory..."
echo "ERROR: .git directory not found - git repository not copied" sudo mkdir -p $REMOTE_BINARY_DIR
exit 1 sudo chown www-data:www-data $REMOTE_BINARY_DIR
fi sudo chmod 755 $REMOTE_BINARY_DIR
# Check if .gitmodules exists # Create blob storage directory
if [ ! -f .gitmodules ]; then echo "Creating blob storage directory..."
echo "ERROR: .gitmodules file not found" sudo mkdir -p $REMOTE_BLOB_DIR
exit 1 sudo chown www-data:www-data $REMOTE_BLOB_DIR
fi sudo chmod 755 $REMOTE_BLOB_DIR
echo "Initializing git submodules..." # Create logs directory
git submodule update --init --recursive echo "Creating logs directory..."
sudo mkdir -p $REMOTE_BINARY_DIR/logs/app
sudo chown -R www-data:www-data $REMOTE_BINARY_DIR/logs
sudo chmod -R 755 $REMOTE_BINARY_DIR/logs
# Verify submodule was initialized echo "Directories created successfully"
if [ ! -f nostr_core_lib/cjson/cJSON.h ]; then
echo "ERROR: Submodule initialization failed - cJSON.h not found"
echo "Checking nostr_core_lib directory:"
ls -la nostr_core_lib/ || echo "nostr_core_lib directory not found"
exit 1
fi
echo "Submodules initialized successfully"
# Build nostr_core_lib
echo "Building nostr_core_lib..."
cd nostr_core_lib
./build.sh
if [ $? -ne 0 ]; then
echo "ERROR: Failed to build nostr_core_lib"
exit 1
fi
echo "nostr_core_lib built successfully"
EOF EOF
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
print_error "Failed to initialize git submodules or build nostr_core_lib" print_error "Failed to create directories"
exit 1 exit 1
fi fi
print_success "Directories created"
echo ""
# Build on remote server to ensure compatibility # Step 4: Handle fresh install if requested
print_status "Building ginxsom on remote server..." if [ "$FRESH_INSTALL" = true ]; then
ssh $REMOTE_USER@$REMOTE_HOST "cd $REMOTE_DIR && make clean && make" || { print_status "Step 4: Fresh install - removing existing data..."
print_error "Build failed on remote server" ssh $REMOTE_USER@$REMOTE_HOST << EOF
print_status "Checking what packages are actually installed..." sudo rm -f $REMOTE_DB_PATH/*.db
ssh $REMOTE_USER@$REMOTE_HOST "dpkg -l | grep -E '(sqlite|fcgi)'" sudo rm -rf $REMOTE_BLOB_DIR/*
echo "Existing data removed"
EOF
print_success "Fresh install prepared"
echo ""
else
print_status "Step 4: Preserving existing data"
echo ""
fi
# Step 5: Install minimal dependencies
print_status "Step 5: Installing minimal dependencies..."
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
set -e
# Check if spawn-fcgi is installed
if ! command -v spawn-fcgi &> /dev/null; then
echo "Installing spawn-fcgi..."
sudo apt-get update -qq
sudo apt-get install -y spawn-fcgi
echo "spawn-fcgi installed"
else
echo "spawn-fcgi already installed"
fi
EOF
if [ $? -eq 0 ]; then
print_success "Dependencies verified"
else
print_error "Failed to install dependencies"
exit 1
fi
echo ""
# Step 6: Upload and install systemd service file
print_status "Step 6: Installing systemd service file..."
scp ginxsom.service $REMOTE_USER@$REMOTE_HOST:~/ginxsom.service || {
print_error "Failed to upload service file"
exit 1 exit 1
} }
# Copy binary to application directory ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
print_status "Copying ginxsom binary to application directory..." sudo cp ~/ginxsom.service /etc/systemd/system/
ssh $REMOTE_USER@$REMOTE_HOST << EOF sudo systemctl daemon-reload
# Stop any running process first echo "Service file installed"
sudo pkill -f ginxsom-fcgi || true
sleep 1
# Remove old binary if it exists
rm -f $REMOTE_BINARY_PATH
# Copy new binary
cp $REMOTE_DIR/build/ginxsom-fcgi $REMOTE_BINARY_PATH
chmod +x $REMOTE_BINARY_PATH
chown ubuntu:ubuntu $REMOTE_BINARY_PATH
echo "Binary copied successfully"
EOF EOF
# NOTE: Do NOT update nginx configuration automatically if [ $? -eq 0 ]; then
# The deployment script should only update ginxsom binaries and do nothing else with the system print_success "Service file installed"
# Nginx configuration should be managed manually by the system administrator else
print_status "Skipping nginx configuration update (manual control required)" print_error "Failed to install service file"
exit 1
fi
echo ""
print_success "Files copied to remote server" # Step 7: Stop existing service and install new binary
print_status "Step 7: Stopping existing service and installing new binary..."
# Step 3: Setup remote environment ssh $REMOTE_USER@$REMOTE_HOST << EOF
print_status "Setting up remote environment..."
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
set -e set -e
# Create data directory if it doesn't exist (using existing /var/www/html/blossom)
sudo mkdir -p /var/www/html/blossom
sudo chown www-data:www-data /var/www/html/blossom
sudo chmod 755 /var/www/html/blossom
# Ensure socket directory exists
sudo mkdir -p /tmp
sudo chmod 755 /tmp
# Install required dependencies
echo "Installing required dependencies..."
sudo apt-get update 2>/dev/null || true # Continue even if apt update has issues
sudo apt-get install -y spawn-fcgi libfcgi-dev libsqlite3-dev sqlite3 libcurl4-openssl-dev
# Verify installations
echo "Verifying installations..."
if ! dpkg -l libsqlite3-dev >/dev/null 2>&1; then
echo "libsqlite3-dev not found, trying alternative..."
sudo apt-get install -y libsqlite3-dev || {
echo "Failed to install libsqlite3-dev"
exit 1
}
fi
if ! dpkg -l libfcgi-dev >/dev/null 2>&1; then
echo "libfcgi-dev not found"
exit 1
fi
# Check if sqlite3.h exists
if [ ! -f /usr/include/sqlite3.h ]; then
echo "sqlite3.h not found in /usr/include/"
find /usr -name "sqlite3.h" 2>/dev/null || echo "sqlite3.h not found anywhere"
exit 1
fi
# Stop any existing ginxsom processes # Stop any existing ginxsom processes
echo "Stopping existing ginxsom processes..." echo "Stopping existing ginxsom processes..."
sudo pkill -f ginxsom-fcgi || true sudo pkill -f ginxsom-fcgi || true
sudo rm -f /tmp/ginxsom-fcgi.sock || true sleep 2
echo "Remote environment setup complete" # Remove old socket
EOF
print_success "Remote environment configured"
# Step 4: Setup database directory and migrate database
print_status "Setting up database directory..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
# Create db directory if it doesn't exist
mkdir -p $REMOTE_DIR/db
if [ "$FRESH_INSTALL" = "true" ]; then
echo "Fresh install: removing existing database and blobs..."
# Remove existing database
sudo rm -f $REMOTE_DB_PATH
sudo rm -f /var/www/html/blossom/ginxsom.db
# Remove existing blobs
sudo rm -rf $REMOTE_DATA_DIR/*
echo "Existing data removed"
else
# Backup current database if it exists in old location
if [ -f /var/www/html/blossom/ginxsom.db ]; then
echo "Backing up existing database..."
cp /var/www/html/blossom/ginxsom.db /var/www/html/blossom/ginxsom.db.backup.\$(date +%Y%m%d_%H%M%S)
# Migrate database to new location if not already there
if [ ! -f $REMOTE_DB_PATH ]; then
echo "Migrating database to new location..."
cp /var/www/html/blossom/ginxsom.db $REMOTE_DB_PATH
else
echo "Database already exists at new location"
fi
elif [ ! -f $REMOTE_DB_PATH ]; then
echo "No existing database found - will be created on first run"
else
echo "Database already exists at $REMOTE_DB_PATH"
fi
fi
# Set proper permissions - www-data needs write access to db directory for SQLite journal files
sudo chown -R www-data:www-data $REMOTE_DIR/db
sudo chmod 755 $REMOTE_DIR/db
sudo chmod 644 $REMOTE_DB_PATH 2>/dev/null || true
# Allow www-data to access the application directory for spawn-fcgi chdir
chmod 755 $REMOTE_DIR
echo "Database directory setup complete"
EOF
print_success "Database directory configured"
# Step 5: Start ginxsom FastCGI process
print_status "Starting ginxsom FastCGI process..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
# Clean up any existing socket
sudo rm -f $REMOTE_SOCKET sudo rm -f $REMOTE_SOCKET
# Install new binary
echo "Installing new binary..."
sudo mv ~/ginxsom-fcgi_new $REMOTE_BINARY_PATH
sudo chmod +x $REMOTE_BINARY_PATH
sudo chown www-data:www-data $REMOTE_BINARY_PATH
echo "Binary installed successfully"
EOF
# Start FastCGI process with explicit paths if [ $? -eq 0 ]; then
print_success "Binary installed"
else
print_error "Failed to install binary"
exit 1
fi
echo ""
# Step 8: Start ginxsom FastCGI process
print_status "Step 8: Starting ginxsom service..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
set -e
echo "Starting ginxsom FastCGI with configuration:" echo "Starting ginxsom FastCGI with configuration:"
echo " Working directory: $REMOTE_DIR"
echo " Binary: $REMOTE_BINARY_PATH" echo " Binary: $REMOTE_BINARY_PATH"
echo " Database: $REMOTE_DB_PATH" echo " Database: $REMOTE_DB_PATH"
echo " Storage: $REMOTE_DATA_DIR" echo " Storage: $REMOTE_BLOB_DIR"
echo " Socket: $REMOTE_SOCKET"
echo ""
sudo spawn-fcgi \
-M 666 \
-u www-data \
-g www-data \
-s $REMOTE_SOCKET \
-U www-data \
-G www-data \
-d $REMOTE_BINARY_DIR \
-- $REMOTE_BINARY_PATH \
--admin-pubkey $ADMIN_PUBKEY \
--server-privkey $SERVER_PRIVKEY \
--db-path $REMOTE_DB_PATH \
--storage-dir $REMOTE_BLOB_DIR
sudo spawn-fcgi -M 666 -u www-data -g www-data -s $REMOTE_SOCKET -U www-data -G www-data -d $REMOTE_DIR -- $REMOTE_BINARY_PATH --db-path "$REMOTE_DB_PATH" --storage-dir "$REMOTE_DATA_DIR"
# Give it a moment to start # Give it a moment to start
sleep 2 sleep 2
# Verify process is running # Verify process is running
if pgrep -f "ginxsom-fcgi" > /dev/null; then if [ -S $REMOTE_SOCKET ]; then
echo "FastCGI process started successfully" echo "FastCGI socket created successfully"
echo "PID: \$(pgrep -f ginxsom-fcgi)"
else
echo "Process not found by pgrep, but socket exists - this may be normal for FastCGI"
echo "Checking socket..."
ls -la $REMOTE_SOCKET ls -la $REMOTE_SOCKET
echo "Checking if binary exists and is executable..." else
ls -la $REMOTE_BINARY_PATH echo "ERROR: Socket not created"
echo "Testing if we can connect to the socket..." exit 1
# Try to test the FastCGI connection fi
if command -v cgi-fcgi >/dev/null 2>&1; then
echo "Testing FastCGI connection..." # Check if process is running
SCRIPT_NAME=/health SCRIPT_FILENAME=$REMOTE_BINARY_PATH REQUEST_METHOD=GET cgi-fcgi -bind -connect $REMOTE_SOCKET 2>/dev/null | head -5 || echo "Connection test failed" if pgrep -f ginxsom-fcgi > /dev/null; then
else echo "Process is running (PID: \$(pgrep -f ginxsom-fcgi))"
echo "cgi-fcgi not available for testing" else
fi echo "WARNING: Process not found by pgrep (may be normal for FastCGI)"
# Don't exit - the socket existing means spawn-fcgi worked
fi fi
EOF EOF
@@ -303,51 +251,84 @@ else
print_error "Failed to start FastCGI process" print_error "Failed to start FastCGI process"
exit 1 exit 1
fi fi
echo ""
# Step 6: Test nginx configuration and reload # Step 8: Test nginx configuration and reload
print_status "Testing and reloading nginx..." print_status "Step 8: Testing and reloading nginx..."
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF' ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
# Test nginx configuration # Test nginx configuration
if sudo nginx -t; then if sudo nginx -t 2>&1; then
echo "Nginx configuration test passed" echo "Nginx configuration test passed"
sudo nginx -s reload sudo nginx -s reload
echo "Nginx reloaded successfully" echo "Nginx reloaded successfully"
else else
echo "Nginx configuration test failed" echo "WARNING: Nginx configuration test failed"
exit 1 echo "You may need to update nginx configuration manually"
echo "See docs/STATIC_DEPLOYMENT_PLAN.md for details"
fi fi
EOF EOF
print_success "Nginx reloaded" if [ $? -eq 0 ]; then
print_success "Nginx reloaded"
else
print_warning "Nginx reload had issues - check configuration"
fi
echo ""
# Step 7: Test deployment # Step 9: Test deployment
print_status "Testing deployment..." print_status "Step 9: Testing deployment..."
echo ""
# Wait a moment for service to fully start
sleep 2
# Test health endpoint # Test health endpoint
echo "Testing health endpoint..." echo "Testing health endpoint..."
if curl -k -s --max-time 10 "https://blossom.laantungir.net/health" | grep -q "OK"; then if curl -k -s --max-time 10 "https://blossom.laantungir.net/health" | grep -q "OK"; then
print_success "Health check passed" print_success "Health check passed"
else else
print_warning "Health check failed - checking response..." print_warning "Health check failed - checking response..."
curl -k -v --max-time 10 "https://blossom.laantungir.net/health" 2>&1 | head -10 curl -k -v --max-time 10 "https://blossom.laantungir.net/health" 2>&1 | head -10
fi fi
# Test basic endpoints # Test root endpoint
echo ""
echo "Testing root endpoint..." echo "Testing root endpoint..."
if curl -k -s --max-time 10 "https://blossom.laantungir.net/" | grep -q "Ginxsom"; then if curl -k -s --max-time 10 "https://blossom.laantungir.net/" | grep -q "Ginxsom"; then
print_success "Root endpoint responding" print_success "Root endpoint responding"
else else
print_warning "Root endpoint not responding as expected - checking response..." print_warning "Root endpoint not responding as expected"
curl -k -v --max-time 10 "https://blossom.laantungir.net/" 2>&1 | head -10
fi fi
print_success "Deployment to $REMOTE_HOST completed!" echo ""
print_status "Ginxsom should now be available at: https://blossom.laantungir.net" print_status "=========================================="
print_status "Test endpoints:" print_success "Deployment completed!"
print_status "=========================================="
echo ""
print_status "Service Information:"
echo " URL: https://blossom.laantungir.net"
echo " Binary: $REMOTE_BINARY_PATH"
echo " Database: $REMOTE_DB_PATH"
echo " Blobs: $REMOTE_BLOB_DIR"
echo " Socket: $REMOTE_SOCKET"
echo ""
print_status "Test Commands:"
echo " Health: curl -k https://blossom.laantungir.net/health" echo " Health: curl -k https://blossom.laantungir.net/health"
echo " Root: curl -k https://blossom.laantungir.net/" echo " Info: curl -k https://blossom.laantungir.net/"
echo " List: curl -k https://blossom.laantungir.net/list" echo " Upload: ./tests/file_put_bud02.sh"
if [ "$FRESH_INSTALL" = "true" ]; then echo ""
print_status "Server Commands:"
echo " Check status: ssh $REMOTE_USER@$REMOTE_HOST 'ps aux | grep ginxsom-fcgi'"
echo " View logs: ssh $REMOTE_USER@$REMOTE_HOST 'sudo journalctl -f | grep ginxsom'"
echo " Restart: ssh $REMOTE_USER@$REMOTE_HOST 'sudo pkill ginxsom-fcgi && sudo spawn-fcgi ...'"
echo ""
if [ "$FRESH_INSTALL" = true ]; then
print_warning "Fresh install completed - database and blobs have been reset" print_warning "Fresh install completed - database and blobs have been reset"
fi else
print_status "Existing data preserved - verify database and blobs"
echo " Check blobs: ssh $REMOTE_USER@$REMOTE_HOST 'ls -la $REMOTE_BLOB_DIR | wc -l'"
echo " Check DB: ssh $REMOTE_USER@$REMOTE_HOST 'sudo -u www-data sqlite3 $REMOTE_DB_PATH \"SELECT COUNT(*) FROM blobs;\"'"
fi
echo ""

302
docs/AUTH_RULES_STATUS.md Normal file
View File

@@ -0,0 +1,302 @@
# Auth Rules Management System - Current Status
## Executive Summary
The auth rules management system is **fully implemented** with a database schema that differs from c-relay. This document outlines the current state and proposes alignment with c-relay's schema.
## Current Database Schema
### Ginxsom Schema (Current)
```sql
CREATE TABLE auth_rules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
rule_type TEXT NOT NULL, -- 'pubkey_blacklist', 'pubkey_whitelist', etc.
rule_target TEXT NOT NULL, -- The pubkey, hash, or MIME type to match
operation TEXT NOT NULL DEFAULT '*', -- 'upload', 'delete', 'list', or '*'
enabled INTEGER NOT NULL DEFAULT 1, -- 1 = enabled, 0 = disabled
priority INTEGER NOT NULL DEFAULT 100,-- Lower number = higher priority
description TEXT, -- Human-readable description
created_by TEXT, -- Admin pubkey who created the rule
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
CHECK (rule_type IN ('pubkey_blacklist', 'pubkey_whitelist',
'hash_blacklist', 'mime_blacklist', 'mime_whitelist')),
CHECK (operation IN ('upload', 'delete', 'list', '*')),
CHECK (enabled IN (0, 1)),
CHECK (priority >= 0),
UNIQUE(rule_type, rule_target, operation)
);
```
### C-Relay Schema (Target)
```sql
CREATE TABLE auth_rules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
rule_type TEXT NOT NULL,
pattern_type TEXT NOT NULL,
pattern_value TEXT NOT NULL,
active INTEGER NOT NULL DEFAULT 1,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
);
```
## Schema Differences
| Field | Ginxsom | C-Relay | Notes |
|-------|---------|---------|-------|
| `id` | ✅ | ✅ | Same |
| `rule_type` | ✅ | ✅ | Same |
| `rule_target` | ✅ | ❌ | Ginxsom-specific |
| `pattern_type` | ❌ | ✅ | C-relay-specific |
| `pattern_value` | ❌ | ✅ | C-relay-specific |
| `operation` | ✅ | ❌ | Ginxsom-specific |
| `enabled` | ✅ (1/0) | ❌ | Ginxsom uses `enabled` |
| `active` | ❌ | ✅ (1/0) | C-relay uses `active` |
| `priority` | ✅ | ❌ | Ginxsom-specific |
| `description` | ✅ | ❌ | Ginxsom-specific |
| `created_by` | ✅ | ❌ | Ginxsom-specific |
| `created_at` | ✅ | ✅ | Same |
| `updated_at` | ✅ | ✅ | Same |
## What Has Been Implemented
### ✅ Database Layer
- **Schema Created**: [`auth_rules`](../db/52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a.db) table exists with full schema
- **Indexes**: 5 indexes for performance optimization
- **Constraints**: CHECK constraints for data validation
- **Unique Constraint**: Prevents duplicate rules
### ✅ Rule Evaluation Engine
Location: [`src/request_validator.c:1318-1592`](../src/request_validator.c#L1318-L1592)
**Implemented Features:**
1. **Pubkey Blacklist** (Priority 1) - Lines 1346-1377
2. **Hash Blacklist** (Priority 2) - Lines 1382-1420
3. **MIME Blacklist** (Priority 3) - Lines 1423-1462
4. **Pubkey Whitelist** (Priority 4) - Lines 1464-1491
5. **MIME Whitelist** (Priority 5) - Lines 1493-1526
6. **Whitelist Default Denial** (Priority 6) - Lines 1528-1591
**Features:**
- ✅ Priority-based rule evaluation
- ✅ Wildcard operation matching (`*`)
- ✅ MIME type pattern matching (`image/*`)
- ✅ Whitelist default-deny behavior
- ✅ Detailed violation tracking
- ✅ Performance-optimized queries
### ✅ Admin API Commands
Location: [`src/admin_commands.c`](../src/admin_commands.c)
**Implemented Commands:**
-`config_query` - Query configuration values
-`config_update` - Update configuration
-`stats_query` - Get system statistics (includes auth_rules count)
-`system_status` - System health check
-`blob_list` - List stored blobs
-`storage_stats` - Storage statistics
-`sql_query` - Direct SQL queries (read-only)
**Note:** The stats_query command already queries auth_rules:
```c
// Line 390-395
sql = "SELECT COUNT(*) FROM auth_rules WHERE enabled = 1";
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK && sqlite3_step(stmt) == SQLITE_ROW) {
cJSON_AddNumberToObject(stats, "active_auth_rules", sqlite3_column_int(stmt, 0));
}
```
### ❌ Missing Admin API Endpoints
The following endpoints from [`docs/AUTH_RULES_IMPLEMENTATION_PLAN.md`](../docs/AUTH_RULES_IMPLEMENTATION_PLAN.md) are **NOT implemented**:
1. **GET /api/rules** - List authentication rules
2. **POST /api/rules** - Create new rule
3. **PUT /api/rules/:id** - Update existing rule
4. **DELETE /api/rules/:id** - Delete rule
5. **POST /api/rules/clear-cache** - Clear auth cache
6. **GET /api/rules/test** - Test rule evaluation
### ✅ Configuration System
-`auth_rules_enabled` config flag (checked in [`reload_auth_config()`](../src/request_validator.c#L1049-L1145))
- ✅ Cache system with 5-minute TTL
- ✅ Environment variable support (`GINX_NO_CACHE`, `GINX_CACHE_TIMEOUT`)
### ✅ Documentation
- ✅ [`docs/AUTH_API.md`](../docs/AUTH_API.md) - Complete authentication flow
- ✅ [`docs/AUTH_RULES_IMPLEMENTATION_PLAN.md`](../docs/AUTH_RULES_IMPLEMENTATION_PLAN.md) - Implementation plan
- ✅ Flow diagrams and performance metrics
## Proposed Schema Migration to C-Relay Format
### Option 1: Minimal Changes (Recommended)
Keep Ginxsom's richer schema but rename fields for compatibility:
```sql
ALTER TABLE auth_rules RENAME COLUMN enabled TO active;
ALTER TABLE auth_rules ADD COLUMN pattern_type TEXT;
ALTER TABLE auth_rules ADD COLUMN pattern_value TEXT;
-- Populate new fields from existing data
UPDATE auth_rules SET
pattern_type = CASE
WHEN rule_type LIKE '%pubkey%' THEN 'pubkey'
WHEN rule_type LIKE '%hash%' THEN 'hash'
WHEN rule_type LIKE '%mime%' THEN 'mime'
END,
pattern_value = rule_target;
```
**Pros:**
- Maintains all Ginxsom features (operation, priority, description)
- Adds c-relay compatibility fields
- No data loss
- Backward compatible
**Cons:**
- Redundant fields (`rule_target` + `pattern_value`)
- Larger schema
### Option 2: Full Migration to C-Relay Schema
Drop Ginxsom-specific fields and adopt c-relay schema:
```sql
-- Create new table with c-relay schema
CREATE TABLE auth_rules_new (
id INTEGER PRIMARY KEY AUTOINCREMENT,
rule_type TEXT NOT NULL,
pattern_type TEXT NOT NULL,
pattern_value TEXT NOT NULL,
active INTEGER NOT NULL DEFAULT 1,
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
);
-- Migrate data
INSERT INTO auth_rules_new (id, rule_type, pattern_type, pattern_value, active, created_at, updated_at)
SELECT
id,
rule_type,
CASE
WHEN rule_type LIKE '%pubkey%' THEN 'pubkey'
WHEN rule_type LIKE '%hash%' THEN 'hash'
WHEN rule_type LIKE '%mime%' THEN 'mime'
END as pattern_type,
rule_target as pattern_value,
enabled as active,
created_at,
updated_at
FROM auth_rules;
-- Replace old table
DROP TABLE auth_rules;
ALTER TABLE auth_rules_new RENAME TO auth_rules;
```
**Pros:**
- Full c-relay compatibility
- Simpler schema
- Smaller database
**Cons:**
- **Loss of operation-specific rules** (upload/delete/list)
- **Loss of priority system**
- **Loss of description and created_by tracking**
- **Breaking change** - requires code updates in [`request_validator.c`](../src/request_validator.c)
## Code Impact Analysis
### Files Requiring Updates for C-Relay Schema
1. **[`src/request_validator.c`](../src/request_validator.c)**
- Lines 1346-1591: Rule evaluation queries need field name changes
- Change `enabled``active`
- Change `rule_target``pattern_value`
- Add `pattern_type` to queries if using Option 1
2. **[`src/admin_commands.c`](../src/admin_commands.c)**
- Line 390: Stats query uses `enabled` field
- Any future rule management endpoints
3. **[`docs/AUTH_RULES_IMPLEMENTATION_PLAN.md`](../docs/AUTH_RULES_IMPLEMENTATION_PLAN.md)**
- Update schema documentation
- Update API endpoint specifications
## Recommendations
### For C-Relay Alignment
**Use Option 1 (Minimal Changes)** because:
1. Preserves Ginxsom's advanced features (operation-specific rules, priority)
2. Adds c-relay compatibility without breaking existing functionality
3. Minimal code changes required
4. No data loss
### For Admin API Completion
Implement the missing endpoints in priority order:
1. **POST /api/rules** - Create rules (highest priority)
2. **GET /api/rules** - List rules
3. **DELETE /api/rules/:id** - Delete rules
4. **PUT /api/rules/:id** - Update rules
5. **GET /api/rules/test** - Test rules
6. **POST /api/rules/clear-cache** - Clear cache
### Migration Script
```bash
#!/bin/bash
# migrate_auth_rules_to_crelay.sh
DB_PATH="db/52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a.db"
sqlite3 "$DB_PATH" <<EOF
-- Backup current table
CREATE TABLE auth_rules_backup AS SELECT * FROM auth_rules;
-- Add c-relay compatibility fields
ALTER TABLE auth_rules ADD COLUMN pattern_type TEXT;
ALTER TABLE auth_rules ADD COLUMN pattern_value TEXT;
-- Populate new fields
UPDATE auth_rules SET
pattern_type = CASE
WHEN rule_type LIKE '%pubkey%' THEN 'pubkey'
WHEN rule_type LIKE '%hash%' THEN 'hash'
WHEN rule_type LIKE '%mime%' THEN 'mime'
END,
pattern_value = rule_target;
-- Rename enabled to active for c-relay compatibility
-- Note: SQLite doesn't support RENAME COLUMN directly in older versions
-- So we'll keep both fields for now
ALTER TABLE auth_rules ADD COLUMN active INTEGER NOT NULL DEFAULT 1;
UPDATE auth_rules SET active = enabled;
-- Verify migration
SELECT COUNT(*) as total_rules FROM auth_rules;
SELECT COUNT(*) as rules_with_pattern FROM auth_rules WHERE pattern_type IS NOT NULL;
EOF
```
## Summary
**Current State:**
- ✅ Database schema exists and is functional
- ✅ Rule evaluation engine fully implemented
- ✅ Configuration system working
- ✅ Documentation complete
- ❌ Admin API endpoints for rule management missing
**To Align with C-Relay:**
- Add `pattern_type` and `pattern_value` fields
- Optionally rename `enabled` to `active`
- Keep Ginxsom's advanced features (operation, priority, description)
- Update queries to use new field names
**Next Steps:**
1. Decide on migration strategy (Option 1 recommended)
2. Run migration script
3. Update code to use new field names
4. Implement missing Admin API endpoints
5. Test rule evaluation with new schema

388
docs/NEW_DEPLOY_SCRIPT.md Normal file
View File

@@ -0,0 +1,388 @@
# New deploy_lt.sh Script
This is the complete new deployment script for static binary deployment. Save this as `deploy_lt.sh` in the project root.
```bash
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
print_status() { echo -e "${BLUE}[INFO]${NC} $1"; }
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Parse command line arguments
FRESH_INSTALL=false
MIGRATE_DATA=true
if [[ "$1" == "--fresh" ]]; then
FRESH_INSTALL=true
MIGRATE_DATA=false
elif [[ "$1" == "--no-migrate" ]]; then
MIGRATE_DATA=false
fi
# Configuration
REMOTE_HOST="laantungir.net"
REMOTE_USER="ubuntu"
# New paths (static binary deployment)
REMOTE_BINARY_DIR="/usr/local/bin/ginxsom"
REMOTE_BINARY_PATH="$REMOTE_BINARY_DIR/ginxsom-fcgi"
REMOTE_DB_DIR="/var/lib/ginxsom"
REMOTE_DB_PATH="$REMOTE_DB_DIR/ginxsom.db"
REMOTE_BLOB_DIR="/var/www/blobs"
REMOTE_SOCKET="/tmp/ginxsom-fcgi.sock"
# Old paths (for migration)
OLD_BINARY_PATH="/home/ubuntu/ginxsom/ginxsom.fcgi"
OLD_DB_PATH="/home/ubuntu/ginxsom/db/ginxsom.db"
OLD_BLOB_DIR="/var/www/html/blossom"
# Local paths
LOCAL_BINARY="build/ginxsom-fcgi_static_x86_64"
print_status "=========================================="
print_status "Ginxsom Static Binary Deployment"
print_status "=========================================="
print_status "Target: $REMOTE_HOST"
print_status "Binary: $REMOTE_BINARY_PATH"
print_status "Database: $REMOTE_DB_PATH"
print_status "Blobs: $REMOTE_BLOB_DIR"
print_status "Fresh install: $FRESH_INSTALL"
print_status "Migrate data: $MIGRATE_DATA"
print_status "=========================================="
echo ""
# Step 1: Verify local binary exists
print_status "Step 1: Verifying local static binary..."
if [[ ! -f "$LOCAL_BINARY" ]]; then
print_error "Static binary not found: $LOCAL_BINARY"
print_status "Please run: ./build_static.sh"
exit 1
fi
# Verify it's actually static
if ldd "$LOCAL_BINARY" 2>&1 | grep -q "not a dynamic executable\|statically linked"; then
print_success "Binary is static"
else
print_warning "Binary may not be fully static - proceeding anyway"
fi
BINARY_SIZE=$(du -h "$LOCAL_BINARY" | cut -f1)
print_success "Found static binary ($BINARY_SIZE)"
echo ""
# Step 2: Upload binary to server
print_status "Step 2: Uploading binary to server..."
scp "$LOCAL_BINARY" $REMOTE_USER@$REMOTE_HOST:/tmp/ginxsom-fcgi_new || {
print_error "Failed to upload binary"
exit 1
}
print_success "Binary uploaded to /tmp/ginxsom-fcgi_new"
echo ""
# Step 3: Setup directories and install binary
print_status "Step 3: Setting up directories and installing binary..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
set -e
# Create binary directory
echo "Creating binary directory..."
sudo mkdir -p $REMOTE_BINARY_DIR
# Create database directory
echo "Creating database directory..."
sudo mkdir -p $REMOTE_DB_DIR/backups
sudo chown www-data:www-data $REMOTE_DB_DIR
sudo chmod 755 $REMOTE_DB_DIR
# Create blob storage directory
echo "Creating blob storage directory..."
sudo mkdir -p $REMOTE_BLOB_DIR
sudo chown www-data:www-data $REMOTE_BLOB_DIR
sudo chmod 755 $REMOTE_BLOB_DIR
echo "Directories created successfully"
EOF
if [ $? -ne 0 ]; then
print_error "Failed to create directories"
exit 1
fi
print_success "Directories created"
echo ""
# Step 4: Migrate data if requested
if [ "$MIGRATE_DATA" = true ] && [ "$FRESH_INSTALL" = false ]; then
print_status "Step 4: Migrating existing data..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
set -e
# Migrate database
if [ -f $OLD_DB_PATH ]; then
echo "Migrating database from $OLD_DB_PATH..."
sudo cp $OLD_DB_PATH $REMOTE_DB_PATH
sudo chown www-data:www-data $REMOTE_DB_PATH
sudo chmod 644 $REMOTE_DB_PATH
echo "Database migrated"
elif [ -f $OLD_BLOB_DIR/ginxsom.db ]; then
echo "Migrating database from $OLD_BLOB_DIR/ginxsom.db..."
sudo cp $OLD_BLOB_DIR/ginxsom.db $REMOTE_DB_PATH
sudo chown www-data:www-data $REMOTE_DB_PATH
sudo chmod 644 $REMOTE_DB_PATH
echo "Database migrated"
else
echo "No existing database found - will be created on first run"
fi
# Migrate blobs
if [ -d $OLD_BLOB_DIR ] && [ "\$(ls -A $OLD_BLOB_DIR 2>/dev/null)" ]; then
echo "Migrating blobs from $OLD_BLOB_DIR..."
# Copy only blob files (SHA256 hashes with extensions)
sudo find $OLD_BLOB_DIR -type f -regextype posix-extended -regex '.*/[a-f0-9]{64}\.[a-z0-9]+' -exec cp {} $REMOTE_BLOB_DIR/ \; 2>/dev/null || true
sudo chown -R www-data:www-data $REMOTE_BLOB_DIR
BLOB_COUNT=\$(ls -1 $REMOTE_BLOB_DIR | wc -l)
echo "Migrated \$BLOB_COUNT blob files"
else
echo "No existing blobs found"
fi
EOF
if [ $? -eq 0 ]; then
print_success "Data migration completed"
else
print_warning "Data migration had issues - check manually"
fi
echo ""
elif [ "$FRESH_INSTALL" = true ]; then
print_status "Step 4: Fresh install - removing existing data..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
sudo rm -f $REMOTE_DB_PATH
sudo rm -rf $REMOTE_BLOB_DIR/*
echo "Existing data removed"
EOF
print_success "Fresh install prepared"
echo ""
else
print_status "Step 4: Skipping data migration (--no-migrate)"
echo ""
fi
# Step 5: Install minimal dependencies
print_status "Step 5: Installing minimal dependencies..."
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
set -e
# Check if spawn-fcgi is installed
if ! command -v spawn-fcgi &> /dev/null; then
echo "Installing spawn-fcgi..."
sudo apt-get update -qq
sudo apt-get install -y spawn-fcgi
echo "spawn-fcgi installed"
else
echo "spawn-fcgi already installed"
fi
EOF
if [ $? -eq 0 ]; then
print_success "Dependencies verified"
else
print_error "Failed to install dependencies"
exit 1
fi
echo ""
# Step 6: Stop existing service and install new binary
print_status "Step 6: Stopping existing service and installing new binary..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
set -e
# Stop any existing ginxsom processes
echo "Stopping existing ginxsom processes..."
sudo pkill -f ginxsom-fcgi || true
sleep 2
# Remove old socket
sudo rm -f $REMOTE_SOCKET
# Install new binary
echo "Installing new binary..."
sudo mv /tmp/ginxsom-fcgi_new $REMOTE_BINARY_PATH
sudo chmod +x $REMOTE_BINARY_PATH
sudo chown root:root $REMOTE_BINARY_PATH
echo "Binary installed successfully"
EOF
if [ $? -eq 0 ]; then
print_success "Binary installed"
else
print_error "Failed to install binary"
exit 1
fi
echo ""
# Step 7: Start ginxsom FastCGI process
print_status "Step 7: Starting ginxsom FastCGI process..."
ssh $REMOTE_USER@$REMOTE_HOST << EOF
set -e
echo "Starting ginxsom FastCGI with configuration:"
echo " Binary: $REMOTE_BINARY_PATH"
echo " Database: $REMOTE_DB_PATH"
echo " Storage: $REMOTE_BLOB_DIR"
echo " Socket: $REMOTE_SOCKET"
echo ""
sudo spawn-fcgi \
-M 666 \
-u www-data \
-g www-data \
-s $REMOTE_SOCKET \
-U www-data \
-G www-data \
-d $REMOTE_DB_DIR \
-- $REMOTE_BINARY_PATH \
--db-path $REMOTE_DB_PATH \
--storage-dir $REMOTE_BLOB_DIR
# Give it a moment to start
sleep 2
# Verify process is running
if [ -S $REMOTE_SOCKET ]; then
echo "FastCGI socket created successfully"
ls -la $REMOTE_SOCKET
else
echo "ERROR: Socket not created"
exit 1
fi
# Check if process is running
if pgrep -f ginxsom-fcgi > /dev/null; then
echo "Process is running (PID: \$(pgrep -f ginxsom-fcgi))"
else
echo "WARNING: Process not found by pgrep (may be normal for FastCGI)"
fi
EOF
if [ $? -eq 0 ]; then
print_success "FastCGI process started"
else
print_error "Failed to start FastCGI process"
exit 1
fi
echo ""
# Step 8: Test nginx configuration and reload
print_status "Step 8: Testing and reloading nginx..."
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
# Test nginx configuration
if sudo nginx -t 2>&1; then
echo "Nginx configuration test passed"
sudo nginx -s reload
echo "Nginx reloaded successfully"
else
echo "WARNING: Nginx configuration test failed"
echo "You may need to update nginx configuration manually"
echo "See docs/STATIC_DEPLOYMENT_PLAN.md for details"
fi
EOF
if [ $? -eq 0 ]; then
print_success "Nginx reloaded"
else
print_warning "Nginx reload had issues - check configuration"
fi
echo ""
# Step 9: Test deployment
print_status "Step 9: Testing deployment..."
echo ""
# Wait a moment for service to fully start
sleep 2
# Test health endpoint
echo "Testing health endpoint..."
if curl -k -s --max-time 10 "https://blossom.laantungir.net/health" | grep -q "OK"; then
print_success "✓ Health check passed"
else
print_warning "✗ Health check failed - checking response..."
curl -k -v --max-time 10 "https://blossom.laantungir.net/health" 2>&1 | head -10
fi
# Test root endpoint
echo ""
echo "Testing root endpoint..."
if curl -k -s --max-time 10 "https://blossom.laantungir.net/" | grep -q "Ginxsom"; then
print_success "✓ Root endpoint responding"
else
print_warning "✗ Root endpoint not responding as expected"
fi
echo ""
print_status "=========================================="
print_success "Deployment completed!"
print_status "=========================================="
echo ""
print_status "Service Information:"
echo " URL: https://blossom.laantungir.net"
echo " Binary: $REMOTE_BINARY_PATH"
echo " Database: $REMOTE_DB_PATH"
echo " Blobs: $REMOTE_BLOB_DIR"
echo " Socket: $REMOTE_SOCKET"
echo ""
print_status "Test Commands:"
echo " Health: curl -k https://blossom.laantungir.net/health"
echo " Info: curl -k https://blossom.laantungir.net/"
echo " Upload: ./tests/file_put_bud02.sh"
echo ""
print_status "Server Commands:"
echo " Check status: ssh $REMOTE_USER@$REMOTE_HOST 'ps aux | grep ginxsom-fcgi'"
echo " View logs: ssh $REMOTE_USER@$REMOTE_HOST 'sudo journalctl -f | grep ginxsom'"
echo " Restart: ssh $REMOTE_USER@$REMOTE_HOST 'sudo pkill ginxsom-fcgi && sudo spawn-fcgi ...'"
echo ""
if [ "$FRESH_INSTALL" = true ]; then
print_warning "Fresh install completed - database and blobs have been reset"
fi
if [ "$MIGRATE_DATA" = true ] && [ "$FRESH_INSTALL" = false ]; then
print_status "Data migration completed - verify blob count and database"
echo " Check blobs: ssh $REMOTE_USER@$REMOTE_HOST 'ls -la $REMOTE_BLOB_DIR | wc -l'"
echo " Check DB: ssh $REMOTE_USER@$REMOTE_HOST 'sudo -u www-data sqlite3 $REMOTE_DB_PATH \"SELECT COUNT(*) FROM blobs;\"'"
fi
echo ""
print_status "For nginx configuration updates, see: docs/STATIC_DEPLOYMENT_PLAN.md"
print_status "=========================================="
```
## Usage
```bash
# Normal deployment with data migration
./deploy_lt.sh
# Fresh install (removes all data)
./deploy_lt.sh --fresh
# Deploy without migrating data
./deploy_lt.sh --no-migrate
```
## Key Changes from Old Script
1. **No remote compilation** - uploads pre-built static binary
2. **New directory structure** - follows FHS standards
3. **Minimal dependencies** - only spawn-fcgi needed
4. **Data migration** - automatically migrates from old locations
5. **Simplified process** - ~30 seconds vs ~5-10 minutes

View File

@@ -0,0 +1,478 @@
# Nginx Configuration Updates for Static Binary Deployment
## Overview
This document describes the required nginx configuration changes to support the new static binary deployment with updated directory paths.
## Changes Required
### 1. Blob Storage Root Directory
**Change from:**
```nginx
root /var/www/html/blossom;
```
**Change to:**
```nginx
root /var/www/blobs;
```
### 2. FastCGI Script Filename
**Change from:**
```nginx
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
```
**Change to:**
```nginx
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
```
## Complete Updated Configuration
Save this as `/etc/nginx/conf.d/default.conf` on the server (or update the existing file):
```nginx
# FastCGI upstream configuration
upstream ginxsom_backend {
server unix:/tmp/ginxsom-fcgi.sock;
}
# Main domains
server {
if ($host = laantungir.net) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80;
server_name laantungir.com www.laantungir.com laantungir.net www.laantungir.net laantungir.org www.laantungir.org;
root /var/www/html;
index index.html index.htm;
# CORS for Nostr NIP-05 verification
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, OPTIONS" always;
add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range" always;
location / {
try_files $uri $uri/ =404;
}
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /var/www/html;
}
}
# Main domains HTTPS - using the main certificate
server {
listen 443 ssl;
server_name laantungir.com www.laantungir.com laantungir.net www.laantungir.net laantungir.org www.laantungir.org;
ssl_certificate /etc/letsencrypt/live/laantungir.net/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/laantungir.net/privkey.pem; # managed by Certbot
root /var/www/html;
index index.html index.htm;
# CORS for Nostr NIP-05 verification
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, OPTIONS" always;
add_header Access-Control-Allow-Headers "DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range" always;
location / {
try_files $uri $uri/ =404;
}
error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /var/www/html;
}
}
# Blossom subdomains HTTP - redirect to HTTPS (keep for ACME)
server {
listen 80;
server_name blossom.laantungir.net;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
return 301 https://$server_name$request_uri;
}
}
# Blossom subdomains HTTPS - ginxsom FastCGI
server {
listen 443 ssl;
server_name blossom.laantungir.net;
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
# Security headers
add_header X-Content-Type-Options nosniff always;
add_header X-Frame-Options DENY always;
add_header X-XSS-Protection "1; mode=block" always;
# CORS for Blossom protocol
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, HEAD, OPTIONS, PATCH" always;
add_header Access-Control-Allow-Headers "Authorization, Content-Type, Content-Length, Accept, Origin, User-Agent, DNT, Cache-Control, X-Mx-ReqToken, Keep-Alive, X-Requested-With, If-Modified-Since, *" always;
add_header Access-Control-Max-Age 86400 always;
# UPDATED: Root directory for blob storage
root /var/www/blobs;
# Maximum upload size
client_max_body_size 100M;
# OPTIONS preflight handler
if ($request_method = OPTIONS) {
return 204;
}
# PUT /upload - File uploads
location = /upload {
if ($request_method !~ ^(PUT|HEAD)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
}
# GET /list/<pubkey> - List user blobs
location ~ "^/list/([a-f0-9]{64})$" {
if ($request_method !~ ^(GET)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
}
# PUT /mirror - Mirror content
location = /mirror {
if ($request_method !~ ^(PUT)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
}
# PUT /report - Report content
location = /report {
if ($request_method !~ ^(PUT)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
}
# GET /auth - NIP-42 challenges
location = /auth {
if ($request_method !~ ^(GET)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
}
# Admin API
location /api/ {
if ($request_method !~ ^(GET|PUT)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
}
# Blob serving - SHA256 patterns
location ~ "^/([a-f0-9]{64})(\.[a-zA-Z0-9]+)?$" {
# Handle DELETE via rewrite
if ($request_method = DELETE) {
rewrite ^/(.*)$ /fcgi-delete/$1 last;
}
# Route HEAD to FastCGI
if ($request_method = HEAD) {
rewrite ^/(.*)$ /fcgi-head/$1 last;
}
# GET requests - serve files directly
if ($request_method != GET) {
return 405;
}
try_files /$1.txt /$1.jpg /$1.jpeg /$1.png /$1.webp /$1.gif /$1.pdf /$1.mp4 /$1.mp3 /$1.md =404;
# Cache headers
add_header Cache-Control "public, max-age=31536000, immutable";
}
# Internal FastCGI handlers
location ~ "^/fcgi-delete/([a-f0-9]{64}).*$" {
internal;
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
fastcgi_param REQUEST_URI /$1;
}
location ~ "^/fcgi-head/([a-f0-9]{64}).*$" {
internal;
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
fastcgi_param REQUEST_URI /$1;
}
# Health check
location /health {
access_log off;
return 200 "OK\n";
add_header Content-Type text/plain;
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, HEAD, OPTIONS, PATCH" always;
add_header Access-Control-Allow-Headers "Authorization, Content-Type, Content-Length, Accept, Origin, User-Agent, DNT, Cache-Control, X-Mx-ReqToken, Keep-Alive, X-Requested-With, If-Modified-Since, *" always;
add_header Access-Control-Max-Age 86400 always;
}
# Default location - Server info from FastCGI
location / {
if ($request_method !~ ^(GET)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
# UPDATED: Direct path to binary
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
}
}
server {
listen 80;
server_name relay.laantungir.com relay.laantungir.net relay.laantungir.org;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
proxy_pass http://127.0.0.1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
proxy_connect_timeout 60s;
proxy_buffering off;
proxy_request_buffering off;
gzip off;
}
}
# Relay HTTPS - proxy to c-relay
server {
listen 443 ssl;
server_name relay.laantungir.com relay.laantungir.net relay.laantungir.org;
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
location / {
proxy_pass http://127.0.0.1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
proxy_connect_timeout 60s;
proxy_buffering off;
proxy_request_buffering off;
gzip off;
}
}
# Git subdomains HTTP - redirect to HTTPS
server {
listen 80;
server_name git.laantungir.com git.laantungir.net git.laantungir.org;
# Allow larger file uploads for Git releases
client_max_body_size 50M;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
return 301 https://$server_name$request_uri;
}
}
# Auth subdomains HTTP - redirect to HTTPS
server {
listen 80;
server_name auth.laantungir.com auth.laantungir.net auth.laantungir.org;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
}
}
# Git subdomains HTTPS - proxy to gitea
server {
listen 443 ssl;
server_name git.laantungir.com git.laantungir.net git.laantungir.org;
# Allow larger file uploads for Git releases
client_max_body_size 50M;
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
location / {
proxy_pass http://localhost:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_buffering off;
proxy_request_buffering off;
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
proxy_connect_timeout 60s;
gzip off;
proxy_set_header Host $host;
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
}
# Auth subdomains HTTPS - proxy to nostr-auth
server {
listen 443 ssl;
server_name auth.laantungir.com auth.laantungir.net auth.laantungir.org;
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
location / {
proxy_pass http://localhost:3001;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_buffering off;
proxy_request_buffering off;
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
proxy_connect_timeout 60s;
gzip off;
proxy_set_header Host $host;
proxy_set_header Sec-WebSocket-Key $http_sec_websocket_key;
proxy_set_header Sec-WebSocket-Version $http_sec_websocket_version;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
}
```
## Manual Update Steps
If you prefer to update the existing configuration manually:
```bash
# 1. Backup current configuration
ssh ubuntu@laantungir.net
sudo cp /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/default.conf.backup
# 2. Edit the configuration
sudo nano /etc/nginx/conf.d/default.conf
# 3. Find and replace (in the blossom server block):
# - Change: root /var/www/html/blossom;
# - To: root /var/www/blobs;
# 4. Find and replace (all FastCGI locations):
# - Change: fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
# - To: fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
# 5. Test configuration
sudo nginx -t
# 6. If test passes, reload nginx
sudo nginx -s reload
# 7. If test fails, restore backup
sudo cp /etc/nginx/conf.d/default.conf.backup /etc/nginx/conf.d/default.conf
sudo nginx -s reload
```
## Verification
After updating the configuration:
```bash
# Check nginx syntax
sudo nginx -t
# Check if ginxsom is responding
curl -k https://blossom.laantungir.net/health
# Check blob serving (if you have existing blobs)
curl -k https://blossom.laantungir.net/<some-sha256-hash>.jpg
```
## Summary of Changes
| Item | Old Value | New Value |
|------|-----------|-----------|
| Blob root | `/var/www/html/blossom` | `/var/www/blobs` |
| Binary path | `$document_root/ginxsom.fcgi` | `/usr/local/bin/ginxsom/ginxsom-fcgi` |
| Binary location | `/home/ubuntu/ginxsom/ginxsom.fcgi` | `/usr/local/bin/ginxsom/ginxsom-fcgi` |
These changes align with the new static binary deployment architecture and Linux FHS standards.

View File

@@ -0,0 +1,383 @@
# Static MUSL Binary Deployment Plan
## Overview
This document outlines the deployment architecture for ginxsom using static MUSL binaries. The new approach eliminates remote compilation and simplifies deployment to a single binary upload.
## Architecture Changes
### Current Deployment (Old)
```
Local Machine:
- Build dynamic binary with make
- Upload entire project via rsync
- Remote server compiles from source
- Install dependencies (libsqlite3-dev, libfcgi-dev, etc.)
- Build nostr_core_lib submodules remotely
- Binary location: /home/ubuntu/ginxsom/ginxsom.fcgi
- Database: /home/ubuntu/ginxsom/db/ginxsom.db
- Blobs: /var/www/html/blossom/
```
### New Deployment (Static MUSL)
```
Local Machine:
- Build static MUSL binary with Docker (build_static.sh)
- Upload only the binary (no source code needed)
- No remote compilation required
- Minimal dependencies (only spawn-fcgi)
- Binary location: /usr/local/bin/ginxsom/ginxsom-fcgi
- Database: /var/lib/ginxsom/ginxsom.db
- Blobs: /var/www/blobs/
```
## Directory Structure
### Production Server Layout
```
/usr/local/bin/ginxsom/
├── ginxsom-fcgi # Static binary (executable)
└── README.md # Version info and deployment notes
/var/lib/ginxsom/
├── ginxsom.db # SQLite database
└── backups/ # Database backups
/var/www/blobs/
├── <sha256>.jpg # Blob files
├── <sha256>.png
└── ...
/tmp/
└── ginxsom-fcgi.sock # FastCGI socket
```
## Deployment Process
### Phase 1: Build Static Binary (Local)
```bash
# Build the static binary
./build_static.sh
# Output: build/ginxsom-fcgi_static_x86_64
# Size: ~7-10 MB
# Dependencies: NONE (fully static)
```
### Phase 2: Upload Binary
```bash
# Upload to server
scp build/ginxsom-fcgi_static_x86_64 ubuntu@laantungir.net:/tmp/
# Install to /usr/local/bin/ginxsom/
ssh ubuntu@laantungir.net << 'EOF'
sudo mkdir -p /usr/local/bin/ginxsom
sudo mv /tmp/ginxsom-fcgi_static_x86_64 /usr/local/bin/ginxsom/ginxsom-fcgi
sudo chmod +x /usr/local/bin/ginxsom/ginxsom-fcgi
sudo chown root:root /usr/local/bin/ginxsom/ginxsom-fcgi
EOF
```
### Phase 3: Setup Data Directories
```bash
ssh ubuntu@laantungir.net << 'EOF'
# Create database directory
sudo mkdir -p /var/lib/ginxsom/backups
sudo chown www-data:www-data /var/lib/ginxsom
sudo chmod 755 /var/lib/ginxsom
# Create blob storage directory
sudo mkdir -p /var/www/blobs
sudo chown www-data:www-data /var/www/blobs
sudo chmod 755 /var/www/blobs
# Migrate existing data if needed
if [ -f /var/www/html/blossom/ginxsom.db ]; then
sudo cp /var/www/html/blossom/ginxsom.db /var/lib/ginxsom/
sudo chown www-data:www-data /var/lib/ginxsom/ginxsom.db
fi
if [ -d /var/www/html/blossom ]; then
sudo cp -r /var/www/html/blossom/* /var/www/blobs/ 2>/dev/null || true
sudo chown -R www-data:www-data /var/www/blobs
fi
EOF
```
### Phase 4: Install Minimal Dependencies
```bash
ssh ubuntu@laantungir.net << 'EOF'
# Only spawn-fcgi is needed (no build tools!)
sudo apt-get update
sudo apt-get install -y spawn-fcgi
EOF
```
### Phase 5: Start Service
```bash
ssh ubuntu@laantungir.net << 'EOF'
# Stop existing process
sudo pkill -f ginxsom-fcgi || true
sudo rm -f /tmp/ginxsom-fcgi.sock
# Start with spawn-fcgi
sudo spawn-fcgi \
-M 666 \
-u www-data \
-g www-data \
-s /tmp/ginxsom-fcgi.sock \
-U www-data \
-G www-data \
-d /var/lib/ginxsom \
-- /usr/local/bin/ginxsom/ginxsom-fcgi \
--db-path /var/lib/ginxsom/ginxsom.db \
--storage-dir /var/www/blobs
EOF
```
## Nginx Configuration Updates
### Required Changes to `/etc/nginx/conf.d/default.conf`
```nginx
# Blossom subdomains HTTPS - ginxsom FastCGI
server {
listen 443 ssl;
server_name blossom.laantungir.net;
ssl_certificate /etc/letsencrypt/live/git.laantungir.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/git.laantungir.net/privkey.pem;
# Security headers
add_header X-Content-Type-Options nosniff always;
add_header X-Frame-Options DENY always;
add_header X-XSS-Protection "1; mode=block" always;
# CORS for Blossom protocol
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, HEAD, OPTIONS, PATCH" always;
add_header Access-Control-Allow-Headers "Authorization, Content-Type, Content-Length, Accept, Origin, User-Agent, DNT, Cache-Control, X-Mx-ReqToken, Keep-Alive, X-Requested-With, If-Modified-Since, *" always;
add_header Access-Control-Max-Age 86400 always;
# CHANGED: Root directory for blob storage
root /var/www/blobs; # Was: /var/www/html/blossom
# Maximum upload size
client_max_body_size 100M;
# ... rest of configuration remains the same ...
# CHANGED: Update SCRIPT_FILENAME references
location = /upload {
if ($request_method !~ ^(PUT|HEAD)$) {
return 405;
}
fastcgi_pass ginxsom_backend;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi; # Was: $document_root/ginxsom.fcgi
}
# Apply same change to all other FastCGI locations...
}
```
## Benefits of New Architecture
### 1. Simplified Deployment
- **Before**: Upload source → Install deps → Build submodules → Compile → Deploy
- **After**: Upload binary → Start service
### 2. Reduced Dependencies
- **Before**: gcc, make, git, libsqlite3-dev, libfcgi-dev, libcurl4-openssl-dev, etc.
- **After**: spawn-fcgi only
### 3. Better Security
- No build tools on production server
- No source code on production server
- Smaller attack surface
### 4. Faster Deployments
- **Before**: ~5-10 minutes (build time)
- **After**: ~30 seconds (upload + restart)
### 5. Consistent Binaries
- Same binary works on any Linux distribution
- No "works on my machine" issues
- Reproducible builds via Docker
### 6. Cleaner Organization
- Binary in standard location (`/usr/local/bin/`)
- Data in standard location (`/var/lib/`)
- Blobs separate from web root (`/var/www/blobs/`)
## Migration Strategy
### Option 1: In-Place Migration (Recommended)
1. Build static binary locally
2. Upload to `/tmp/`
3. Stop current service
4. Create new directories
5. Migrate data
6. Update nginx config
7. Start new service
8. Verify functionality
9. Clean up old files
### Option 2: Blue-Green Deployment
1. Setup new directories alongside old
2. Deploy static binary
3. Test on different port
4. Switch nginx config
5. Remove old deployment
### Option 3: Fresh Install
1. Backup database and blobs
2. Remove old installation
3. Deploy static binary
4. Restore data
5. Configure nginx
6. Start service
## Rollback Plan
If issues occur, rollback is simple:
```bash
# Stop new service
sudo pkill -f ginxsom-fcgi
# Restore old binary location
sudo spawn-fcgi \
-M 666 \
-u www-data \
-g www-data \
-s /tmp/ginxsom-fcgi.sock \
-U www-data \
-G www-data \
-d /home/ubuntu/ginxsom \
-- /home/ubuntu/ginxsom/ginxsom.fcgi \
--db-path /home/ubuntu/ginxsom/db/ginxsom.db \
--storage-dir /var/www/html/blossom
# Revert nginx config
sudo cp /etc/nginx/conf.d/default.conf.backup /etc/nginx/conf.d/default.conf
sudo nginx -s reload
```
## SystemD Service (Future Enhancement)
Create `/etc/systemd/system/ginxsom.service`:
```ini
[Unit]
Description=Ginxsom Blossom Server
After=network.target
[Service]
Type=forking
User=www-data
Group=www-data
WorkingDirectory=/var/lib/ginxsom
ExecStart=/usr/bin/spawn-fcgi \
-M 666 \
-u www-data \
-g www-data \
-s /tmp/ginxsom-fcgi.sock \
-U www-data \
-G www-data \
-d /var/lib/ginxsom \
-- /usr/local/bin/ginxsom/ginxsom-fcgi \
--db-path /var/lib/ginxsom/ginxsom.db \
--storage-dir /var/www/blobs
ExecStop=/usr/bin/pkill -f ginxsom-fcgi
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
```
Enable and start:
```bash
sudo systemctl daemon-reload
sudo systemctl enable ginxsom
sudo systemctl start ginxsom
sudo systemctl status ginxsom
```
## Verification Steps
After deployment, verify:
1. **Binary is static**:
```bash
ldd /usr/local/bin/ginxsom/ginxsom-fcgi
# Should show: "not a dynamic executable"
```
2. **Service is running**:
```bash
ps aux | grep ginxsom-fcgi
ls -la /tmp/ginxsom-fcgi.sock
```
3. **Health endpoint**:
```bash
curl -k https://blossom.laantungir.net/health
# Should return: OK
```
4. **Upload test**:
```bash
# Use existing test scripts
./tests/file_put_bud02.sh
```
5. **Database access**:
```bash
sudo -u www-data sqlite3 /var/lib/ginxsom/ginxsom.db "SELECT COUNT(*) FROM blobs;"
```
6. **Blob storage**:
```bash
ls -la /var/www/blobs/ | head
```
## Monitoring
Key metrics to monitor:
- Binary size: `du -h /usr/local/bin/ginxsom/ginxsom-fcgi`
- Database size: `du -h /var/lib/ginxsom/ginxsom.db`
- Blob storage: `du -sh /var/www/blobs/`
- Process status: `systemctl status ginxsom` (if using systemd)
- Socket status: `ls -la /tmp/ginxsom-fcgi.sock`
## Backup Strategy
### Database Backups
```bash
# Daily backup
sudo -u www-data sqlite3 /var/lib/ginxsom/ginxsom.db ".backup /var/lib/ginxsom/backups/ginxsom-$(date +%Y%m%d).db"
# Keep last 7 days
find /var/lib/ginxsom/backups/ -name "ginxsom-*.db" -mtime +7 -delete
```
### Blob Backups
```bash
# Sync to backup location
rsync -av /var/www/blobs/ /backup/ginxsom-blobs/
```
## Conclusion
The static MUSL binary deployment provides:
- ✅ Simpler deployment process
- ✅ Fewer dependencies
- ✅ Better security
- ✅ Faster updates
- ✅ Universal compatibility
- ✅ Cleaner organization
This architecture follows Linux FHS (Filesystem Hierarchy Standard) best practices and provides a solid foundation for production deployment.

25
ginxsom.service Normal file
View File

@@ -0,0 +1,25 @@
[Unit]
Description=Ginxsom Blossom Server
After=network.target
[Service]
Type=forking
ExecStartPre=/bin/rm -f /tmp/ginxsom-fcgi.sock
ExecStart=/usr/bin/spawn-fcgi \
-s /tmp/ginxsom-fcgi.sock \
-M 666 \
-u www-data \
-g www-data \
-d /usr/local/bin/ginxsom \
-- /usr/local/bin/ginxsom/ginxsom-fcgi \
--admin-pubkey 1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139 \
--server-privkey 90df3fe61e7d19e50f387e4c5db87eff1a7d2a1037cd55026c4b21a4fda8ecf6 \
--db-path /usr/local/bin/ginxsom \
--storage-dir /var/www/blobs
ExecStop=/usr/bin/pkill -f ginxsom-fcgi
ExecStopPost=/bin/rm -f /tmp/ginxsom-fcgi.sock
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

View File

@@ -97,7 +97,7 @@ server {
add_header Access-Control-Max-Age 86400 always; add_header Access-Control-Max-Age 86400 always;
# Root directory for blob storage # Root directory for blob storage
root /var/www/html/blossom; root /var/www/blobs;
# Maximum upload size # Maximum upload size
client_max_body_size 100M; client_max_body_size 100M;
@@ -114,7 +114,7 @@ server {
} }
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
} }
# GET /list/<pubkey> - List user blobs # GET /list/<pubkey> - List user blobs
@@ -124,7 +124,7 @@ server {
} }
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
} }
# PUT /mirror - Mirror content # PUT /mirror - Mirror content
@@ -134,7 +134,7 @@ server {
} }
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
} }
# PUT /report - Report content # PUT /report - Report content
@@ -144,7 +144,7 @@ server {
} }
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
} }
# GET /auth - NIP-42 challenges # GET /auth - NIP-42 challenges
@@ -154,17 +154,17 @@ server {
} }
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
} }
# Admin API # Admin API
location /api/ { location /api/ {
if ($request_method !~ ^(GET|PUT)$) { if ($request_method !~ ^(GET|POST|PUT)$) {
return 405; return 405;
} }
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
} }
# Blob serving - SHA256 patterns # Blob serving - SHA256 patterns
@@ -195,7 +195,7 @@ server {
internal; internal;
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
fastcgi_param REQUEST_URI /$1; fastcgi_param REQUEST_URI /$1;
} }
@@ -203,7 +203,7 @@ server {
internal; internal;
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
fastcgi_param REQUEST_URI /$1; fastcgi_param REQUEST_URI /$1;
} }
@@ -225,7 +225,7 @@ server {
} }
fastcgi_pass ginxsom_backend; fastcgi_pass ginxsom_backend;
include fastcgi_params; include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi; fastcgi_param SCRIPT_FILENAME /usr/local/bin/ginxsom/ginxsom-fcgi;
} }
} }

View File

@@ -190,6 +190,10 @@ echo -e "${GREEN}FastCGI cleanup complete${NC}"
# Step 3: Always rebuild FastCGI binary with static build # Step 3: Always rebuild FastCGI binary with static build
echo -e "\n${YELLOW}3. Rebuilding FastCGI binary (static build)...${NC}" echo -e "\n${YELLOW}3. Rebuilding FastCGI binary (static build)...${NC}"
echo "Cleaning old build artifacts to ensure fresh embedding..."
make clean
echo "Removing local embedded header to prevent Docker cache issues..."
rm -f src/admin_interface_embedded.h
echo "Building static binary with Docker..." echo "Building static binary with Docker..."
make static make static
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then

View File

@@ -8,6 +8,7 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <ctype.h>
#include <time.h> #include <time.h>
// Forward declare app_log // Forward declare app_log
@@ -142,6 +143,19 @@ cJSON* admin_commands_process(cJSON* command_array, const char* request_event_id
else if (strcmp(command, "sql_query") == 0) { else if (strcmp(command, "sql_query") == 0) {
return admin_cmd_sql_query(command_array); return admin_cmd_sql_query(command_array);
} }
else if (strcmp(command, "query_view") == 0) {
return admin_cmd_query_view(command_array);
}
// Auth rules management commands (c-relay compatible)
else if (strcmp(command, "blacklist") == 0 || strcmp(command, "whitelist") == 0) {
return admin_cmd_auth_add_rule(command_array);
}
else if (strcmp(command, "delete_auth_rule") == 0) {
return admin_cmd_auth_delete_rule(command_array);
}
else if (strcmp(command, "auth_query") == 0) {
return admin_cmd_auth_query(command_array);
}
else { else {
char error_msg[256]; char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Unknown command: %s", command); snprintf(error_msg, sizeof(error_msg), "Unknown command: %s", command);
@@ -387,7 +401,7 @@ cJSON* admin_cmd_stats_query(cJSON* args) {
sqlite3_finalize(stmt); sqlite3_finalize(stmt);
// Get auth rules count // Get auth rules count
sql = "SELECT COUNT(*) FROM auth_rules WHERE enabled = 1"; sql = "SELECT COUNT(*) FROM auth_rules WHERE active = 1";
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK && sqlite3_step(stmt) == SQLITE_ROW) { if (rc == SQLITE_OK && sqlite3_step(stmt) == SQLITE_ROW) {
cJSON_AddNumberToObject(stats, "active_auth_rules", sqlite3_column_int(stmt, 0)); cJSON_AddNumberToObject(stats, "active_auth_rules", sqlite3_column_int(stmt, 0));
@@ -637,7 +651,7 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
cJSON* response = cJSON_CreateObject(); cJSON* response = cJSON_CreateObject();
cJSON_AddStringToObject(response, "query_type", "sql_query"); cJSON_AddStringToObject(response, "query_type", "sql_query");
// Expected format: ["sql_query", "SELECT ..."] // Expected format: ["sql_query", "SQL STATEMENT"]
if (cJSON_GetArraySize(args) < 2) { if (cJSON_GetArraySize(args) < 2) {
cJSON_AddStringToObject(response, "status", "error"); cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Missing SQL query"); cJSON_AddStringToObject(response, "error", "Missing SQL query");
@@ -654,20 +668,26 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
} }
const char* sql = query_item->valuestring; const char* sql = query_item->valuestring;
const char* trimmed_sql = sql;
while (*trimmed_sql && isspace((unsigned char)*trimmed_sql)) {
trimmed_sql++;
}
// Security: Only allow SELECT queries int is_select = strncasecmp(trimmed_sql, "SELECT", 6) == 0;
const char* sql_upper = sql; int is_delete = strncasecmp(trimmed_sql, "DELETE", 6) == 0;
while (*sql_upper == ' ' || *sql_upper == '\t' || *sql_upper == '\n') sql_upper++; int is_update = strncasecmp(trimmed_sql, "UPDATE", 6) == 0;
if (strncasecmp(sql_upper, "SELECT", 6) != 0) { int is_insert = strncasecmp(trimmed_sql, "INSERT", 6) == 0;
if (!is_select && !is_delete && !is_update && !is_insert) {
cJSON_AddStringToObject(response, "status", "error"); cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Only SELECT queries are allowed"); cJSON_AddStringToObject(response, "error", "Only SELECT, INSERT, UPDATE, or DELETE queries are allowed");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL)); cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response; return response;
} }
// Open database (read-only for safety) int open_flags = is_select ? SQLITE_OPEN_READONLY : (SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE);
sqlite3* db; sqlite3* db;
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL); int rc = sqlite3_open_v2(g_admin_state.db_path, &db, open_flags, NULL);
if (rc != SQLITE_OK) { if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error"); cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to open database"); cJSON_AddStringToObject(response, "error", "Failed to open database");
@@ -675,7 +695,70 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
return response; return response;
} }
// Prepare and execute query if (is_select) {
sqlite3_stmt* stmt;
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error");
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "SQL error: %s", sqlite3_errmsg(db));
cJSON_AddStringToObject(response, "error", error_msg);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_close(db);
return response;
}
int col_count = sqlite3_column_count(stmt);
cJSON* columns = cJSON_CreateArray();
for (int i = 0; i < col_count; i++) {
cJSON_AddItemToArray(columns, cJSON_CreateString(sqlite3_column_name(stmt, i)));
}
cJSON* rows = cJSON_CreateArray();
int row_count = 0;
const int MAX_ROWS = 1000;
while (row_count < MAX_ROWS && (rc = sqlite3_step(stmt)) == SQLITE_ROW) {
cJSON* row = cJSON_CreateArray();
for (int i = 0; i < col_count; i++) {
int col_type = sqlite3_column_type(stmt, i);
switch (col_type) {
case SQLITE_INTEGER:
cJSON_AddItemToArray(row, cJSON_CreateNumber(sqlite3_column_int64(stmt, i)));
break;
case SQLITE_FLOAT:
cJSON_AddItemToArray(row, cJSON_CreateNumber(sqlite3_column_double(stmt, i)));
break;
case SQLITE_TEXT:
cJSON_AddItemToArray(row, cJSON_CreateString((const char*)sqlite3_column_text(stmt, i)));
break;
case SQLITE_NULL:
cJSON_AddItemToArray(row, cJSON_CreateNull());
break;
default:
cJSON_AddItemToArray(row, cJSON_CreateString(""));
}
}
cJSON_AddItemToArray(rows, row);
row_count++;
}
sqlite3_finalize(stmt);
sqlite3_close(db);
cJSON_AddStringToObject(response, "status", "success");
cJSON_AddItemToObject(response, "columns", columns);
cJSON_AddItemToObject(response, "rows", rows);
cJSON_AddNumberToObject(response, "row_count", row_count);
if (row_count >= MAX_ROWS) {
cJSON_AddBoolToObject(response, "truncated", 1);
}
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
app_log(LOG_INFO, "SQL query executed: %d rows returned", row_count);
return response;
}
// Handle DELETE/UPDATE/INSERT
sqlite3_stmt* stmt; sqlite3_stmt* stmt;
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) { if (rc != SQLITE_OK) {
@@ -688,19 +771,113 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
return response; return response;
} }
// Get column names rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
cJSON_AddStringToObject(response, "status", "error");
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "SQL execution error: %s", sqlite3_errmsg(db));
cJSON_AddStringToObject(response, "error", error_msg);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_finalize(stmt);
sqlite3_close(db);
return response;
}
int affected_rows = sqlite3_changes(db);
sqlite3_finalize(stmt);
sqlite3_close(db);
cJSON_AddStringToObject(response, "status", "success");
cJSON_AddNumberToObject(response, "affected_rows", affected_rows);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
app_log(LOG_INFO, "SQL modification executed: %d rows affected", affected_rows);
return response;
}
cJSON* admin_cmd_query_view(cJSON* args) {
cJSON* response = cJSON_CreateObject();
cJSON_AddStringToObject(response, "query_type", "query_view");
// Expected format: ["query_view", "view_name"]
if (cJSON_GetArraySize(args) < 2) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Missing view name");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
cJSON* view_name_item = cJSON_GetArrayItem(args, 1);
if (!cJSON_IsString(view_name_item)) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "View name must be a string");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
const char* view_name = view_name_item->valuestring;
// Open database
sqlite3* db;
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to open database");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
// Build SQL query based on view name
char sql[512];
if (strcmp(view_name, "blob_overview") == 0) {
// Query blob_overview view
snprintf(sql, sizeof(sql), "SELECT * FROM blob_overview");
} else if (strcmp(view_name, "storage_stats") == 0) {
// Query storage_stats view
snprintf(sql, sizeof(sql), "SELECT * FROM storage_stats");
} else if (strcmp(view_name, "blob_type_distribution") == 0) {
// Query blob_type_distribution view
snprintf(sql, sizeof(sql), "SELECT * FROM blob_type_distribution");
} else if (strcmp(view_name, "blob_time_stats") == 0) {
// Query blob_time_stats view
snprintf(sql, sizeof(sql), "SELECT * FROM blob_time_stats");
} else if (strcmp(view_name, "top_uploaders") == 0) {
// Query top_uploaders view
snprintf(sql, sizeof(sql), "SELECT * FROM top_uploaders");
} else {
cJSON_AddStringToObject(response, "status", "error");
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Unknown view: %s", view_name);
cJSON_AddStringToObject(response, "error", error_msg);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_close(db);
return response;
}
sqlite3_stmt* stmt;
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error");
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Failed to prepare query: %s", sqlite3_errmsg(db));
cJSON_AddStringToObject(response, "error", error_msg);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_close(db);
return response;
}
// Execute query and build results
int col_count = sqlite3_column_count(stmt); int col_count = sqlite3_column_count(stmt);
cJSON* columns = cJSON_CreateArray(); cJSON* columns = cJSON_CreateArray();
for (int i = 0; i < col_count; i++) { for (int i = 0; i < col_count; i++) {
cJSON_AddItemToArray(columns, cJSON_CreateString(sqlite3_column_name(stmt, i))); cJSON_AddItemToArray(columns, cJSON_CreateString(sqlite3_column_name(stmt, i)));
} }
// Execute and collect rows (limit to 1000 rows for safety)
cJSON* rows = cJSON_CreateArray(); cJSON* rows = cJSON_CreateArray();
int row_count = 0; int row_count = 0;
const int MAX_ROWS = 1000;
while (row_count < MAX_ROWS && (rc = sqlite3_step(stmt)) == SQLITE_ROW) { while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
cJSON* row = cJSON_CreateArray(); cJSON* row = cJSON_CreateArray();
for (int i = 0; i < col_count; i++) { for (int i = 0; i < col_count; i++) {
int col_type = sqlite3_column_type(stmt, i); int col_type = sqlite3_column_type(stmt, i);
@@ -729,15 +906,313 @@ cJSON* admin_cmd_sql_query(cJSON* args) {
sqlite3_close(db); sqlite3_close(db);
cJSON_AddStringToObject(response, "status", "success"); cJSON_AddStringToObject(response, "status", "success");
cJSON_AddStringToObject(response, "view_name", view_name);
cJSON_AddItemToObject(response, "columns", columns); cJSON_AddItemToObject(response, "columns", columns);
cJSON_AddItemToObject(response, "rows", rows); cJSON_AddItemToObject(response, "rows", rows);
cJSON_AddNumberToObject(response, "row_count", row_count); cJSON_AddNumberToObject(response, "row_count", row_count);
if (row_count >= MAX_ROWS) {
cJSON_AddBoolToObject(response, "truncated", 1);
}
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL)); cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
app_log(LOG_INFO, "SQL query executed: %d rows returned", row_count); app_log(LOG_INFO, "View query executed: %s (%d rows)", view_name, row_count);
return response;
}
// ============================================================================
// AUTH RULES MANAGEMENT COMMANDS (c-relay compatible)
// ============================================================================
// Add blacklist or whitelist rule
// Format: ["blacklist", "pubkey", "abc123..."] or ["whitelist", "pubkey", "def456..."]
cJSON* admin_cmd_auth_add_rule(cJSON* args) {
cJSON* response = cJSON_CreateObject();
// Get command type (blacklist or whitelist)
cJSON* cmd_type = cJSON_GetArrayItem(args, 0);
if (!cJSON_IsString(cmd_type)) {
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Invalid command type");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
const char* command = cmd_type->valuestring;
const char* rule_type_prefix = command; // "blacklist" or "whitelist"
// Expected format: ["blacklist/whitelist", "pattern_type", "pattern_value"]
if (cJSON_GetArraySize(args) < 3) {
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Missing parameters. Format: [\"blacklist/whitelist\", \"pattern_type\", \"pattern_value\"]");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
cJSON* pattern_type_item = cJSON_GetArrayItem(args, 1);
cJSON* pattern_value_item = cJSON_GetArrayItem(args, 2);
if (!cJSON_IsString(pattern_type_item) || !cJSON_IsString(pattern_value_item)) {
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Pattern type and value must be strings");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
const char* pattern_type = pattern_type_item->valuestring;
const char* pattern_value = pattern_value_item->valuestring;
char rule_type[64];
snprintf(rule_type, sizeof(rule_type), "%s_%s", rule_type_prefix, pattern_type);
// Validate pattern_type
if (strcmp(pattern_type, "pubkey") != 0 && strcmp(pattern_type, "hash") != 0 && strcmp(pattern_type, "mime") != 0) {
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Invalid pattern_type. Must be 'pubkey', 'hash', or 'mime'");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
// Open database
sqlite3* db;
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READWRITE, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to open database");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
// Insert rule
const char* sql = "INSERT INTO auth_rules (rule_type, pattern_type, pattern_value) VALUES (?, ?, ?)";
sqlite3_stmt* stmt;
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to prepare insert statement");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_close(db);
return response;
}
sqlite3_bind_text(stmt, 1, rule_type, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, pattern_type, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, pattern_value, -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
int rule_id = 0;
if (rc == SQLITE_DONE) {
rule_id = sqlite3_last_insert_rowid(db);
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "success");
cJSON_AddNumberToObject(response, "rule_id", rule_id);
cJSON_AddStringToObject(response, "rule_type", rule_type);
cJSON_AddStringToObject(response, "pattern_type", pattern_type);
cJSON_AddStringToObject(response, "pattern_value", pattern_value);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
app_log(LOG_INFO, "Added %s rule: %s=%s (ID: %d)", rule_type, pattern_type, pattern_value, rule_id);
} else {
cJSON_AddStringToObject(response, "query_type", "auth_add_rule");
cJSON_AddStringToObject(response, "status", "error");
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Failed to insert rule: %s", sqlite3_errmsg(db));
cJSON_AddStringToObject(response, "error", error_msg);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
}
sqlite3_finalize(stmt);
sqlite3_close(db);
return response;
}
// Delete auth rule
// Format: ["delete_auth_rule", "blacklist", "pubkey", "abc123..."]
cJSON* admin_cmd_auth_delete_rule(cJSON* args) {
cJSON* response = cJSON_CreateObject();
cJSON_AddStringToObject(response, "query_type", "delete_auth_rule");
// Expected format: ["delete_auth_rule", "rule_type", "pattern_type", "pattern_value"]
if (cJSON_GetArraySize(args) < 4) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Missing parameters. Format: [\"delete_auth_rule\", \"blacklist/whitelist\", \"pattern_type\", \"pattern_value\"]");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
cJSON* rule_type_item = cJSON_GetArrayItem(args, 1);
cJSON* pattern_type_item = cJSON_GetArrayItem(args, 2);
cJSON* pattern_value_item = cJSON_GetArrayItem(args, 3);
if (!cJSON_IsString(rule_type_item) || !cJSON_IsString(pattern_type_item) || !cJSON_IsString(pattern_value_item)) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "All parameters must be strings");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
const char* rule_type_str = cJSON_GetStringValue(rule_type_item);
const char* pattern_type = cJSON_GetStringValue(pattern_type_item);
const char* pattern_value = cJSON_GetStringValue(pattern_value_item);
char full_rule_type[64];
snprintf(full_rule_type, sizeof(full_rule_type), "%s_%s", rule_type_str, pattern_type);
// Open database
sqlite3* db;
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READWRITE, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to open database");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
// Delete rule
const char* sql = "DELETE FROM auth_rules WHERE rule_type = ? AND pattern_type = ? AND pattern_value = ?";
sqlite3_stmt* stmt;
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to prepare delete statement");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_close(db);
return response;
}
sqlite3_bind_text(stmt, 1, full_rule_type, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, pattern_type, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, pattern_value, -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
int changes = sqlite3_changes(db);
if (rc == SQLITE_DONE) {
cJSON_AddStringToObject(response, "status", "success");
cJSON_AddNumberToObject(response, "deleted_count", changes);
cJSON_AddStringToObject(response, "rule_type", full_rule_type);
cJSON_AddStringToObject(response, "pattern_type", pattern_type);
cJSON_AddStringToObject(response, "pattern_value", pattern_value);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
app_log(LOG_INFO, "Deleted %d %s rule(s): %s=%s", changes, full_rule_type, pattern_type, pattern_value);
} else {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to delete rule");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
}
sqlite3_finalize(stmt);
sqlite3_close(db);
return response;
}
// Query auth rules
// Format: ["auth_query", "all"] or ["auth_query", "whitelist"] or ["auth_query", "pattern", "abc123..."]
cJSON* admin_cmd_auth_query(cJSON* args) {
cJSON* response = cJSON_CreateObject();
cJSON_AddStringToObject(response, "query_type", "auth_query");
// Get query type
const char* query_type = "all";
const char* filter_value = NULL;
if (cJSON_GetArraySize(args) >= 2) {
cJSON* query_type_item = cJSON_GetArrayItem(args, 1);
if (cJSON_IsString(query_type_item)) {
query_type = query_type_item->valuestring;
}
}
if (cJSON_GetArraySize(args) >= 3) {
cJSON* filter_value_item = cJSON_GetArrayItem(args, 2);
if (cJSON_IsString(filter_value_item)) {
filter_value = filter_value_item->valuestring;
}
}
// Open database
sqlite3* db;
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to open database");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
return response;
}
// Build SQL query based on query type
char sql[512];
sqlite3_stmt* stmt;
if (strcmp(query_type, "all") == 0) {
snprintf(sql, sizeof(sql), "SELECT id, rule_type, pattern_type, pattern_value, active, created_at, updated_at FROM auth_rules ORDER BY id");
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
}
else if (strcmp(query_type, "blacklist") == 0 || strcmp(query_type, "whitelist") == 0) {
snprintf(sql, sizeof(sql), "SELECT id, rule_type, pattern_type, pattern_value, active, created_at, updated_at FROM auth_rules WHERE rule_type LIKE ? || '_%%' ORDER BY id");
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, query_type, -1, SQLITE_STATIC);
}
}
else if (strcmp(query_type, "pattern") == 0 && filter_value) {
snprintf(sql, sizeof(sql), "SELECT id, rule_type, pattern_type, pattern_value, active, created_at, updated_at FROM auth_rules WHERE pattern_value = ? ORDER BY id");
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, filter_value, -1, SQLITE_STATIC);
}
}
else {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Invalid query type. Use 'all', 'blacklist', 'whitelist', or 'pattern'");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_close(db);
return response;
}
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response, "status", "error");
cJSON_AddStringToObject(response, "error", "Failed to prepare query");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
sqlite3_close(db);
return response;
}
// Execute query and build results
cJSON* rules = cJSON_CreateArray();
int count = 0;
while (sqlite3_step(stmt) == SQLITE_ROW) {
cJSON* rule = cJSON_CreateObject();
cJSON_AddNumberToObject(rule, "id", sqlite3_column_int(stmt, 0));
cJSON_AddStringToObject(rule, "rule_type", (const char*)sqlite3_column_text(stmt, 1));
cJSON_AddStringToObject(rule, "pattern_type", (const char*)sqlite3_column_text(stmt, 2));
cJSON_AddStringToObject(rule, "pattern_value", (const char*)sqlite3_column_text(stmt, 3));
cJSON_AddNumberToObject(rule, "active", sqlite3_column_int(stmt, 4));
cJSON_AddNumberToObject(rule, "created_at", sqlite3_column_int64(stmt, 5));
cJSON_AddNumberToObject(rule, "updated_at", sqlite3_column_int64(stmt, 6));
cJSON_AddItemToArray(rules, rule);
count++;
}
sqlite3_finalize(stmt);
sqlite3_close(db);
cJSON_AddStringToObject(response, "status", "success");
cJSON_AddNumberToObject(response, "count", count);
cJSON_AddStringToObject(response, "filter", query_type);
cJSON_AddItemToObject(response, "rules", rules);
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
app_log(LOG_INFO, "Auth query executed: %d rules returned (filter: %s)", count, query_type);
return response; return response;
} }

View File

@@ -35,6 +35,12 @@ cJSON* admin_cmd_system_status(cJSON* args);
cJSON* admin_cmd_blob_list(cJSON* args); cJSON* admin_cmd_blob_list(cJSON* args);
cJSON* admin_cmd_storage_stats(cJSON* args); cJSON* admin_cmd_storage_stats(cJSON* args);
cJSON* admin_cmd_sql_query(cJSON* args); cJSON* admin_cmd_sql_query(cJSON* args);
cJSON* admin_cmd_query_view(cJSON* args);
// Auth rules management handlers (c-relay compatible)
cJSON* admin_cmd_auth_add_rule(cJSON* args);
cJSON* admin_cmd_auth_delete_rule(cJSON* args);
cJSON* admin_cmd_auth_query(cJSON* args);
// NIP-44 encryption/decryption helpers // NIP-44 encryption/decryption helpers
int admin_encrypt_response( int admin_encrypt_response(

View File

@@ -6,6 +6,7 @@
#include <unistd.h> #include <unistd.h>
#include <sys/types.h> #include <sys/types.h>
#include "ginxsom.h" #include "ginxsom.h"
#include "admin_commands.h"
// Forward declarations for nostr_core_lib functions // Forward declarations for nostr_core_lib functions
int nostr_hex_to_bytes(const char* hex, unsigned char* bytes, size_t bytes_len); int nostr_hex_to_bytes(const char* hex, unsigned char* bytes, size_t bytes_len);
@@ -28,10 +29,8 @@ extern char g_db_path[];
// Forward declarations // Forward declarations
static int get_server_privkey(unsigned char* privkey_bytes); static int get_server_privkey(unsigned char* privkey_bytes);
static int get_server_pubkey(char* pubkey_hex, size_t size); static int get_server_pubkey(char* pubkey_hex, size_t size);
static int handle_config_query_command(cJSON* response_data);
static int handle_query_view_command(cJSON* command_array, cJSON* response_data);
static int send_admin_response_event(const char* admin_pubkey, const char* request_id, static int send_admin_response_event(const char* admin_pubkey, const char* request_id,
cJSON* response_data); cJSON* response_data);
static cJSON* parse_authorization_header(void); static cJSON* parse_authorization_header(void);
static int process_admin_event(cJSON* event); static int process_admin_event(cJSON* event);
@@ -304,20 +303,35 @@ static int process_admin_event(cJSON* event) {
cJSON_AddStringToObject(response_data, "query_type", cmd); cJSON_AddStringToObject(response_data, "query_type", cmd);
cJSON_AddNumberToObject(response_data, "timestamp", (double)time(NULL)); cJSON_AddNumberToObject(response_data, "timestamp", (double)time(NULL));
// Handle command // Handle command - use admin_commands system for processing
cJSON* command_response = admin_commands_process(command_array, request_id);
int result = -1; int result = -1;
if (strcmp(cmd, "config_query") == 0) { if (command_response) {
app_log(LOG_DEBUG, "ADMIN_EVENT: Handling config_query command"); // Check if command was successful
result = handle_config_query_command(response_data); cJSON* status = cJSON_GetObjectItem(command_response, "status");
app_log(LOG_DEBUG, "ADMIN_EVENT: config_query result: %d", result); if (status && cJSON_IsString(status)) {
} else if (strcmp(cmd, "query_view") == 0) { const char* status_str = cJSON_GetStringValue(status);
app_log(LOG_DEBUG, "ADMIN_EVENT: Handling query_view command"); if (strcmp(status_str, "success") == 0) {
result = handle_query_view_command(command_array, response_data); result = 0;
app_log(LOG_DEBUG, "ADMIN_EVENT: query_view result: %d", result); }
}
// Copy response data from command_response to response_data
cJSON* item = NULL;
cJSON_ArrayForEach(item, command_response) {
if (item->string) {
cJSON* copy = cJSON_Duplicate(item, 1);
cJSON_AddItemToObject(response_data, item->string, copy);
}
}
cJSON_Delete(command_response);
app_log(LOG_DEBUG, "ADMIN_EVENT: Command processed with result: %d", result);
} else { } else {
app_log(LOG_WARN, "ADMIN_EVENT: Unknown command: %s", cmd); app_log(LOG_ERROR, "ADMIN_EVENT: Command processing returned NULL");
cJSON_AddStringToObject(response_data, "status", "error"); cJSON_AddStringToObject(response_data, "status", "error");
cJSON_AddStringToObject(response_data, "error", "Unknown command"); cJSON_AddStringToObject(response_data, "error", "Command processing failed");
result = -1; result = -1;
} }
@@ -397,160 +411,6 @@ static int get_server_pubkey(char* pubkey_hex, size_t size) {
return result; return result;
} }
/**
* Handle config_query command - returns all config values
*/
static int handle_config_query_command(cJSON* response_data) {
sqlite3* db;
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READONLY, NULL);
if (rc != SQLITE_OK) {
cJSON_AddStringToObject(response_data, "status", "error");
cJSON_AddStringToObject(response_data, "error", "Database error");
return -1;
}
cJSON_AddStringToObject(response_data, "status", "success");
cJSON* data = cJSON_CreateObject();
// Query all config settings
sqlite3_stmt* stmt;
const char* sql = "SELECT key, value FROM config ORDER BY key";
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) == SQLITE_OK) {
while (sqlite3_step(stmt) == SQLITE_ROW) {
const char* key = (const char*)sqlite3_column_text(stmt, 0);
const char* value = (const char*)sqlite3_column_text(stmt, 1);
if (key && value) {
cJSON_AddStringToObject(data, key, value);
}
}
sqlite3_finalize(stmt);
}
cJSON_AddItemToObject(response_data, "data", data);
sqlite3_close(db);
return 0;
}
/**
* Handle query_view command - returns data from a specified database view
* Command format: ["query_view", "view_name"]
*/
static int handle_query_view_command(cJSON* command_array, cJSON* response_data) {
app_log(LOG_DEBUG, "ADMIN_EVENT: handle_query_view_command called");
// Get view name from command array
cJSON* view_name_obj = cJSON_GetArrayItem(command_array, 1);
if (!view_name_obj || !cJSON_IsString(view_name_obj)) {
app_log(LOG_ERROR, "ADMIN_EVENT: View name missing or not a string");
cJSON_AddStringToObject(response_data, "status", "error");
cJSON_AddStringToObject(response_data, "error", "View name required");
return -1;
}
const char* view_name = cJSON_GetStringValue(view_name_obj);
app_log(LOG_DEBUG, "ADMIN_EVENT: Querying view: %s", view_name);
// Validate view name (whitelist approach for security)
const char* allowed_views[] = {
"blob_overview",
"blob_type_distribution",
"blob_time_stats",
"top_uploaders",
NULL
};
int view_allowed = 0;
for (int i = 0; allowed_views[i] != NULL; i++) {
if (strcmp(view_name, allowed_views[i]) == 0) {
view_allowed = 1;
break;
}
}
if (!view_allowed) {
cJSON_AddStringToObject(response_data, "status", "error");
cJSON_AddStringToObject(response_data, "error", "Invalid view name");
app_log(LOG_WARN, "ADMIN_EVENT: Attempted to query invalid view: %s", view_name);
return -1;
}
app_log(LOG_DEBUG, "ADMIN_EVENT: View '%s' is allowed, opening database: %s", view_name, g_db_path);
// Open database
sqlite3* db;
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READONLY, NULL);
if (rc != SQLITE_OK) {
app_log(LOG_ERROR, "ADMIN_EVENT: Failed to open database: %s (error: %s)", g_db_path, sqlite3_errmsg(db));
cJSON_AddStringToObject(response_data, "status", "error");
cJSON_AddStringToObject(response_data, "error", "Database error");
return -1;
}
// Build SQL query
char sql[256];
snprintf(sql, sizeof(sql), "SELECT * FROM %s", view_name);
app_log(LOG_DEBUG, "ADMIN_EVENT: Executing SQL: %s", sql);
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
app_log(LOG_ERROR, "ADMIN_EVENT: Failed to prepare query: %s (error: %s)", sql, sqlite3_errmsg(db));
sqlite3_close(db);
cJSON_AddStringToObject(response_data, "status", "error");
cJSON_AddStringToObject(response_data, "error", "Failed to prepare query");
return -1;
}
// Get column count and names
int col_count = sqlite3_column_count(stmt);
// Create results array
cJSON* results = cJSON_CreateArray();
// Fetch all rows
while (sqlite3_step(stmt) == SQLITE_ROW) {
cJSON* row = cJSON_CreateObject();
for (int i = 0; i < col_count; i++) {
const char* col_name = sqlite3_column_name(stmt, i);
int col_type = sqlite3_column_type(stmt, i);
switch (col_type) {
case SQLITE_INTEGER:
cJSON_AddNumberToObject(row, col_name, (double)sqlite3_column_int64(stmt, i));
break;
case SQLITE_FLOAT:
cJSON_AddNumberToObject(row, col_name, sqlite3_column_double(stmt, i));
break;
case SQLITE_TEXT:
cJSON_AddStringToObject(row, col_name, (const char*)sqlite3_column_text(stmt, i));
break;
case SQLITE_NULL:
cJSON_AddNullToObject(row, col_name);
break;
default:
// For BLOB or unknown types, skip
break;
}
}
cJSON_AddItemToArray(results, row);
}
sqlite3_finalize(stmt);
sqlite3_close(db);
// Build response
cJSON_AddStringToObject(response_data, "status", "success");
cJSON_AddStringToObject(response_data, "view_name", view_name);
cJSON_AddItemToObject(response_data, "data", results);
app_log(LOG_DEBUG, "ADMIN_EVENT: Query view '%s' returned %d rows", view_name, cJSON_GetArraySize(results));
return 0;
}
/** /**
* Send Kind 23459 admin response event * Send Kind 23459 admin response event

View File

@@ -10,7 +10,7 @@
static void serve_embedded_file(const unsigned char* data, size_t size, const char* content_type) { static void serve_embedded_file(const unsigned char* data, size_t size, const char* content_type) {
printf("Status: 200 OK\r\n"); printf("Status: 200 OK\r\n");
printf("Content-Type: %s\r\n", content_type); printf("Content-Type: %s\r\n", content_type);
printf("Content-Length: %zu\r\n", size); printf("Content-Length: %lu\r\n", (unsigned long)size);
printf("Cache-Control: public, max-age=3600\r\n"); printf("Cache-Control: public, max-age=3600\r\n");
printf("\r\n"); printf("\r\n");
fwrite((void*)data, 1, size, stdout); fwrite((void*)data, 1, size, stdout);

File diff suppressed because it is too large Load Diff

View File

@@ -10,8 +10,8 @@
// Version information (auto-updated by build system) // Version information (auto-updated by build system)
#define VERSION_MAJOR 0 #define VERSION_MAJOR 0
#define VERSION_MINOR 1 #define VERSION_MINOR 1
#define VERSION_PATCH 18 #define VERSION_PATCH 20
#define VERSION "v0.1.18" #define VERSION "v0.1.20"
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>

View File

@@ -21,6 +21,8 @@
#include <sys/stat.h> #include <sys/stat.h>
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <dirent.h>
#include <sched.h>
// Centralized logging system (declaration in ginxsom.h) // Centralized logging system (declaration in ginxsom.h)
void app_log(log_level_t level, const char *format, ...) { void app_log(log_level_t level, const char *format, ...) {
@@ -176,25 +178,32 @@ int initialize_database(const char *db_path) {
return -1; return -1;
} }
// Create auth_rules table // Create system table for runtime metrics (read-only, updated by server)
const char *create_system =
"CREATE TABLE IF NOT EXISTS system ("
" key TEXT PRIMARY KEY NOT NULL,"
" value TEXT NOT NULL,"
" updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))"
");";
rc = sqlite3_exec(db, create_system, NULL, NULL, &err_msg);
if (rc != SQLITE_OK) {
fprintf(stderr, "Failed to create system table: %s\n", err_msg);
sqlite3_free(err_msg);
sqlite3_close(db);
return -1;
}
// Create auth_rules table (c-relay compatible schema)
const char *create_auth_rules = const char *create_auth_rules =
"CREATE TABLE IF NOT EXISTS auth_rules (" "CREATE TABLE IF NOT EXISTS auth_rules ("
" id INTEGER PRIMARY KEY AUTOINCREMENT," " id INTEGER PRIMARY KEY AUTOINCREMENT,"
" rule_type TEXT NOT NULL," " rule_type TEXT NOT NULL,"
" rule_target TEXT NOT NULL," " pattern_type TEXT NOT NULL,"
" operation TEXT NOT NULL DEFAULT '*'," " pattern_value TEXT NOT NULL,"
" enabled INTEGER NOT NULL DEFAULT 1," " active INTEGER NOT NULL DEFAULT 1,"
" priority INTEGER NOT NULL DEFAULT 100,"
" description TEXT,"
" created_by TEXT,"
" created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))," " created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),"
" updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))," " updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))"
" CHECK (rule_type IN ('pubkey_blacklist', 'pubkey_whitelist',"
" 'hash_blacklist', 'mime_blacklist', 'mime_whitelist')),"
" CHECK (operation IN ('upload', 'delete', 'list', '*')),"
" CHECK (enabled IN (0, 1)),"
" CHECK (priority >= 0),"
" UNIQUE(rule_type, rule_target, operation)"
");"; ");";
rc = sqlite3_exec(db, create_auth_rules, NULL, NULL, &err_msg); rc = sqlite3_exec(db, create_auth_rules, NULL, NULL, &err_msg);
@@ -211,11 +220,9 @@ int initialize_database(const char *db_path) {
"CREATE INDEX IF NOT EXISTS idx_blobs_uploader_pubkey ON blobs(uploader_pubkey);" "CREATE INDEX IF NOT EXISTS idx_blobs_uploader_pubkey ON blobs(uploader_pubkey);"
"CREATE INDEX IF NOT EXISTS idx_blobs_type ON blobs(type);" "CREATE INDEX IF NOT EXISTS idx_blobs_type ON blobs(type);"
"CREATE INDEX IF NOT EXISTS idx_config_updated_at ON config(updated_at);" "CREATE INDEX IF NOT EXISTS idx_config_updated_at ON config(updated_at);"
"CREATE INDEX IF NOT EXISTS idx_auth_rules_type_target ON auth_rules(rule_type, rule_target);" "CREATE INDEX IF NOT EXISTS idx_auth_rules_type ON auth_rules(rule_type);"
"CREATE INDEX IF NOT EXISTS idx_auth_rules_operation ON auth_rules(operation);" "CREATE INDEX IF NOT EXISTS idx_auth_rules_pattern ON auth_rules(pattern_type, pattern_value);"
"CREATE INDEX IF NOT EXISTS idx_auth_rules_enabled ON auth_rules(enabled);" "CREATE INDEX IF NOT EXISTS idx_auth_rules_active ON auth_rules(active);";
"CREATE INDEX IF NOT EXISTS idx_auth_rules_priority ON auth_rules(priority);"
"CREATE INDEX IF NOT EXISTS idx_auth_rules_type_operation ON auth_rules(rule_type, operation, enabled);";
rc = sqlite3_exec(db, create_indexes, NULL, NULL, &err_msg); rc = sqlite3_exec(db, create_indexes, NULL, NULL, &err_msg);
if (rc != SQLITE_OK) { if (rc != SQLITE_OK) {
@@ -268,14 +275,20 @@ int initialize_database(const char *db_path) {
return -1; return -1;
} }
// Create blob_overview view for admin dashboard // Create blob_overview view for admin dashboard with system metrics
const char *create_overview_view = const char *create_overview_view =
"CREATE VIEW IF NOT EXISTS blob_overview AS " "CREATE VIEW IF NOT EXISTS blob_overview AS "
"SELECT " "SELECT "
" COUNT(*) as total_blobs, " " COUNT(*) as total_blobs, "
" COALESCE(SUM(size), 0) as total_bytes, " " COALESCE(SUM(size), 0) as total_bytes, "
" MIN(uploaded_at) as first_upload, " " MIN(uploaded_at) as first_upload, "
" MAX(uploaded_at) as last_upload " " MAX(uploaded_at) as last_upload, "
" (SELECT value FROM system WHERE key = 'version') as version, "
" (SELECT value FROM system WHERE key = 'process_id') as process_id, "
" (SELECT value FROM system WHERE key = 'memory_mb') as memory_mb, "
" (SELECT value FROM system WHERE key = 'cpu_core') as cpu_core, "
" (SELECT value FROM system WHERE key = 'fs_blob_count') as fs_blob_count, "
" (SELECT value FROM system WHERE key = 'fs_blob_size_mb') as fs_blob_size_mb "
"FROM blobs;"; "FROM blobs;";
rc = sqlite3_exec(db, create_overview_view, NULL, NULL, &err_msg); rc = sqlite3_exec(db, create_overview_view, NULL, NULL, &err_msg);
@@ -732,6 +745,171 @@ int get_blossom_private_key(char *seckey_out, size_t max_len) {
return -1; return -1;
} }
// Helper function to count filesystem blobs
static int count_filesystem_blobs(long *total_count, long *total_size_bytes) {
DIR *dir = opendir(g_storage_dir);
if (!dir) {
return -1;
}
*total_count = 0;
*total_size_bytes = 0;
struct dirent *entry;
while ((entry = readdir(dir)) != NULL) {
// Skip . and ..
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
// Build full path
char filepath[MAX_PATH_LEN];
snprintf(filepath, sizeof(filepath), "%s/%s", g_storage_dir, entry->d_name);
// Get file stats
struct stat st;
if (stat(filepath, &st) == 0 && S_ISREG(st.st_mode)) {
(*total_count)++;
*total_size_bytes += st.st_size;
}
}
closedir(dir);
return 0;
}
// Helper function to get memory usage in MB from /proc/self/status
static long get_memory_usage_mb(void) {
FILE *fp = fopen("/proc/self/status", "r");
if (!fp) {
return -1;
}
char line[256];
long vmrss_kb = -1;
while (fgets(line, sizeof(line), fp)) {
if (strncmp(line, "VmRSS:", 6) == 0) {
// Parse VmRSS value (in kB)
char *p = line + 6;
while (*p == ' ' || *p == '\t') p++;
vmrss_kb = atol(p);
break;
}
}
fclose(fp);
if (vmrss_kb > 0) {
return vmrss_kb / 1024; // Convert kB to MB
}
return -1;
}
// Helper function to get CPU core
static int get_cpu_core(void) {
#ifdef __linux__
return sched_getcpu();
#else
return -1;
#endif
}
// Update system metrics in system table (key-value pairs)
static int update_system_metrics(void) {
sqlite3 *db;
sqlite3_stmt *stmt;
int rc;
rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READWRITE, NULL);
if (rc != SQLITE_OK) {
return -1;
}
// Get system metrics
int pid = getpid();
long memory_mb = get_memory_usage_mb();
int cpu_core = get_cpu_core();
long fs_blob_count = 0;
long fs_blob_size = 0;
count_filesystem_blobs(&fs_blob_count, &fs_blob_size);
long fs_blob_size_mb = fs_blob_size / (1024 * 1024);
// Prepare INSERT OR REPLACE statement for key-value updates
const char *sql = "INSERT OR REPLACE INTO system (key, value, updated_at) VALUES (?, ?, strftime('%s', 'now'))";
// Update version
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, "version", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, VERSION, -1, SQLITE_STATIC);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
// Update process_id
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
char pid_str[32];
snprintf(pid_str, sizeof(pid_str), "%d", pid);
sqlite3_bind_text(stmt, 1, "process_id", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, pid_str, -1, SQLITE_TRANSIENT);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
// Update memory_mb
if (memory_mb > 0) {
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
char mem_str[32];
snprintf(mem_str, sizeof(mem_str), "%ld", memory_mb);
sqlite3_bind_text(stmt, 1, "memory_mb", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, mem_str, -1, SQLITE_TRANSIENT);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
}
// Update cpu_core
if (cpu_core >= 0) {
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
char core_str[32];
snprintf(core_str, sizeof(core_str), "%d", cpu_core);
sqlite3_bind_text(stmt, 1, "cpu_core", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, core_str, -1, SQLITE_TRANSIENT);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
}
// Update fs_blob_count
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
char count_str[32];
snprintf(count_str, sizeof(count_str), "%ld", fs_blob_count);
sqlite3_bind_text(stmt, 1, "fs_blob_count", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, count_str, -1, SQLITE_TRANSIENT);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
// Update fs_blob_size_mb
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
char size_str[32];
snprintf(size_str, sizeof(size_str), "%ld", fs_blob_size_mb);
sqlite3_bind_text(stmt, 1, "fs_blob_size_mb", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, size_str, -1, SQLITE_TRANSIENT);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
sqlite3_close(db);
return 0;
}
// Insert blob metadata into database // Insert blob metadata into database
int insert_blob_metadata(const char *sha256, long size, const char *type, int insert_blob_metadata(const char *sha256, long size, const char *type,
long uploaded_at, const char *uploader_pubkey, long uploaded_at, const char *uploader_pubkey,
@@ -1848,6 +2026,9 @@ void handle_upload_request_with_validation(nostr_request_result_t* validation_re
return; return;
} }
// Update system metrics after successful blob upload
update_system_metrics();
// Get origin from config // Get origin from config
char origin[256]; char origin[256];
nip94_get_origin(origin, sizeof(origin)); nip94_get_origin(origin, sizeof(origin));
@@ -2142,37 +2323,105 @@ int main(int argc, char *argv[]) {
fprintf(stderr, "KEYS: Derived pubkey: %s\n", g_blossom_pubkey); fprintf(stderr, "KEYS: Derived pubkey: %s\n", g_blossom_pubkey);
// Scenario 5: Both database and keys specified - validate match // Scenario 5: Both database path and keys specified
if (db_path_specified) { if (db_path_specified) {
fprintf(stderr, "\n=== SCENARIO 5: DATABASE + KEYS (VALIDATION) ===\n"); fprintf(stderr, "\n=== SCENARIO 5: DATABASE PATH + KEYS ===\n");
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
g_db_path[sizeof(g_db_path) - 1] = '\0'; // Check if specified path is a directory or file
struct stat st;
int is_directory = 0;
if (stat(specified_db_path, &st) == 0) {
is_directory = S_ISDIR(st.st_mode);
} else {
// Path doesn't exist - assume it's meant to be a directory
is_directory = (specified_db_path[strlen(specified_db_path) - 1] == '/' ||
strstr(specified_db_path, ".db") == NULL);
}
if (is_directory) {
// Build database path from directory + derived pubkey
snprintf(g_db_path, sizeof(g_db_path), "%s/%s.db", specified_db_path, g_blossom_pubkey);
fprintf(stderr, "DATABASE: Using directory path, derived database: %s\n", g_db_path);
} else {
// Use specified file path directly
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
g_db_path[sizeof(g_db_path) - 1] = '\0';
fprintf(stderr, "DATABASE: Using file path: %s\n", g_db_path);
}
// Check if database exists // Check if database exists
struct stat st; if (stat(g_db_path, &st) == 0) {
if (stat(g_db_path, &st) != 0) { // Database exists - validate keys match
fprintf(stderr, "ERROR: Database file not found: %s\n", g_db_path); fprintf(stderr, "DATABASE: Found existing database, validating keys...\n");
return 1;
} // Load keys from database
if (get_blossom_private_key(g_blossom_seckey, sizeof(g_blossom_seckey)) != 0) {
// Load keys from database fprintf(stderr, "ERROR: Invalid database: missing server keys\n");
if (get_blossom_private_key(g_blossom_seckey, sizeof(g_blossom_seckey)) != 0) { return 1;
fprintf(stderr, "ERROR: Invalid database: missing server keys\n"); }
return 1;
} // Compare with provided key
if (strcmp(g_blossom_seckey, test_server_privkey) != 0) {
// Compare with provided key fprintf(stderr, "ERROR: Server private key doesn't match database\n");
if (strcmp(g_blossom_seckey, test_server_privkey) != 0) { fprintf(stderr, " Provided key and database keys are different\n");
fprintf(stderr, "ERROR: Server private key doesn't match database\n"); return 1;
fprintf(stderr, " Provided key and database keys are different\n"); }
return 1;
} fprintf(stderr, "VALIDATION: Keys match database - continuing\n");
fprintf(stderr, "VALIDATION: Keys match database - continuing\n"); // Validate pubkey matches filename
if (validate_database_pubkey_match(g_db_path, g_blossom_pubkey) != 0) {
// Validate pubkey matches filename return 1;
if (validate_database_pubkey_match(g_db_path, g_blossom_pubkey) != 0) { }
return 1; } else {
// Database doesn't exist - create it with provided keys
fprintf(stderr, "DATABASE: No existing database, creating new one...\n");
// Initialize new database
if (initialize_database(g_db_path) != 0) {
fprintf(stderr, "ERROR: Failed to initialize database\n");
return 1;
}
// Store keys
strncpy(g_blossom_seckey, test_server_privkey, sizeof(g_blossom_seckey) - 1);
g_blossom_seckey[64] = '\0';
if (store_blossom_private_key(test_server_privkey) != 0) {
fprintf(stderr, "ERROR: Failed to store private key\n");
return 1;
}
// Store pubkey in config
sqlite3 *db;
sqlite3_stmt *stmt;
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READWRITE, NULL);
if (rc == SQLITE_OK) {
const char *sql = "INSERT OR REPLACE INTO config (key, value, description) VALUES (?, ?, ?)";
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, "blossom_pubkey", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, g_blossom_pubkey, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, "Blossom server's public key", -1, SQLITE_STATIC);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
if (strlen(g_admin_pubkey) > 0) {
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, "admin_pubkey", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, g_admin_pubkey, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, "Admin public key", -1, SQLITE_STATIC);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
}
sqlite3_close(db);
}
fprintf(stderr, "DATABASE: New database created successfully\n");
} }
} }
// Scenario 3 continued: Create new database with provided keys // Scenario 3 continued: Create new database with provided keys
@@ -2238,30 +2487,78 @@ int main(int argc, char *argv[]) {
} }
} }
// Scenario 2: Database Specified (--db-path) // Scenario 2: Database Path Specified (--db-path)
// Note: --db-path should specify a DIRECTORY, not a full file path
// The actual database filename will be derived from the server's pubkey
else if (db_path_specified) { else if (db_path_specified) {
fprintf(stderr, "\n=== SCENARIO 2: DATABASE SPECIFIED ===\n"); fprintf(stderr, "\n=== SCENARIO 2: DATABASE DIRECTORY SPECIFIED ===\n");
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
g_db_path[sizeof(g_db_path) - 1] = '\0';
// Check if database exists // Check if specified path is a directory or file
struct stat st; struct stat st;
if (stat(g_db_path, &st) != 0) { int is_directory = 0;
fprintf(stderr, "ERROR: Database file not found: %s\n", g_db_path);
fprintf(stderr, " → Specify a different database or let the application create a new one\n"); if (stat(specified_db_path, &st) == 0) {
return 1; is_directory = S_ISDIR(st.st_mode);
} else {
// Path doesn't exist - assume it's meant to be a directory
is_directory = (specified_db_path[strlen(specified_db_path) - 1] == '/' ||
strstr(specified_db_path, ".db") == NULL);
} }
fprintf(stderr, "DATABASE: Opening existing database: %s\n", g_db_path); if (is_directory) {
// Treat as directory - will derive filename from pubkey after loading keys
// Load keys from database fprintf(stderr, "DATABASE: Directory specified: %s\n", specified_db_path);
if (load_server_keys() != 0) { fprintf(stderr, "DATABASE: Will derive filename from server pubkey\n");
fprintf(stderr, "ERROR: Failed to load keys from database\n");
fprintf(stderr, " → Database may be corrupted or not a valid ginxsom database\n"); // Look for any .db file that matches the pubkey pattern
return 1; DIR *dir = opendir(specified_db_path);
int found_db = 0;
if (dir) {
struct dirent *entry;
while ((entry = readdir(dir)) != NULL) {
// Check if filename matches pattern: <64-hex-chars>.db
size_t name_len = strlen(entry->d_name);
if (name_len == 67 && strcmp(entry->d_name + 64, ".db") == 0) {
// Found a potential database file
snprintf(g_db_path, sizeof(g_db_path), "%s/%s", specified_db_path, entry->d_name);
found_db = 1;
fprintf(stderr, "DATABASE: Found existing database: %s\n", g_db_path);
break;
}
}
closedir(dir);
}
if (!found_db) {
// No database found - this is OK, we'll create one if we have keys
fprintf(stderr, "DATABASE: No existing database found in directory\n");
// g_db_path will be set later based on pubkey
}
} else {
// Treat as full file path (legacy behavior)
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
g_db_path[sizeof(g_db_path) - 1] = '\0';
} }
fprintf(stderr, "DATABASE: Keys loaded and validated successfully\n"); // If we found a database file, try to load it
if (g_db_path[0] != '\0' && stat(g_db_path, &st) == 0) {
fprintf(stderr, "DATABASE: Opening existing database: %s\n", g_db_path);
// Load keys from database
if (load_server_keys() != 0) {
fprintf(stderr, "ERROR: Failed to load keys from database\n");
fprintf(stderr, " → Database may be corrupted or not a valid ginxsom database\n");
return 1;
}
fprintf(stderr, "DATABASE: Keys loaded and validated successfully\n");
} else {
// No database file exists - we need keys to create one
fprintf(stderr, "ERROR: No database found and no --server-privkey provided\n");
fprintf(stderr, " → Use --server-privkey to create a new database\n");
return 1;
}
} }
// Scenario 1: No Arguments (Fresh Start) // Scenario 1: No Arguments (Fresh Start)
@@ -2353,6 +2650,14 @@ if (!config_loaded /* && !initialize_server_config() */) {
app_log(LOG_INFO, "Admin commands system initialized successfully"); app_log(LOG_INFO, "Admin commands system initialized successfully");
} }
// Initialize system metrics at startup
app_log(LOG_INFO, "Initializing system metrics...");
if (update_system_metrics() == 0) {
app_log(LOG_INFO, "System metrics initialized successfully");
} else {
app_log(LOG_WARN, "Failed to initialize system metrics");
}
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////
// THIS IS WHERE THE REQUESTS ENTER THE FastCGI // THIS IS WHERE THE REQUESTS ENTER THE FastCGI
///////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////

View File

@@ -23,6 +23,8 @@
#include <strings.h> #include <strings.h>
#include <time.h> #include <time.h>
#define MAX_MIME_TYPE_LEN 128 // Define here for direct use
// Additional error codes for ginxsom-specific functionality // Additional error codes for ginxsom-specific functionality
#define NOSTR_ERROR_CRYPTO_INIT -100 #define NOSTR_ERROR_CRYPTO_INIT -100
#define NOSTR_ERROR_AUTH_REQUIRED -101 #define NOSTR_ERROR_AUTH_REQUIRED -101
@@ -671,8 +673,8 @@ int nostr_validate_unified_request(const nostr_unified_request_t *request,
"VALIDATOR_DEBUG: STEP 10 PASSED - Blossom authentication succeeded\n"); "VALIDATOR_DEBUG: STEP 10 PASSED - Blossom authentication succeeded\n");
strcpy(result->reason, "Blossom authentication passed"); strcpy(result->reason, "Blossom authentication passed");
} else if (event_kind == 33335) { } else if (event_kind == 33335 || event_kind == 23459 || event_kind == 23458) {
// 10. Admin/Configuration Event Validation (Kind 33335) // 10. Admin/Configuration Event Validation (Kind 33335, 23459, 23458)
// Verify admin authorization, check required tags, validate expiration // Verify admin authorization, check required tags, validate expiration
validator_debug_log("VALIDATOR_DEBUG: STEP 10 - Processing Admin/Configuration " validator_debug_log("VALIDATOR_DEBUG: STEP 10 - Processing Admin/Configuration "
"authentication (kind 33335)\n"); "authentication (kind 33335)\n");
@@ -775,6 +777,16 @@ int nostr_validate_unified_request(const nostr_unified_request_t *request,
cJSON_Delete(event); cJSON_Delete(event);
// Skip rule evaluation for admin events
if (event_kind == 33335 || event_kind == 23459 || event_kind == 23458) {
char admin_skip_msg[256];
snprintf(admin_skip_msg, sizeof(admin_skip_msg),
"VALIDATOR_DEBUG: Admin event (kind %d) - skipping rule evaluation\n", event_kind);
validator_debug_log(admin_skip_msg);
strcpy(result->reason, "Admin event validated - rules bypassed");
return NOSTR_SUCCESS;
}
// STEP 12 PASSED: Protocol validation complete - continue to database rule // STEP 12 PASSED: Protocol validation complete - continue to database rule
// evaluation // evaluation
validator_debug_log("VALIDATOR_DEBUG: STEP 12 PASSED - Protocol validation " validator_debug_log("VALIDATOR_DEBUG: STEP 12 PASSED - Protocol validation "
@@ -1321,6 +1333,13 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
sqlite3 *db = NULL; sqlite3 *db = NULL;
sqlite3_stmt *stmt = NULL; sqlite3_stmt *stmt = NULL;
int rc; int rc;
int pubkey_whitelisted = 0;
int pubkey_whitelist_exists = 0;
int mime_whitelisted = 0;
int mime_whitelist_exists = 0;
int mime_whitelist_count = 0;
int pubkey_whitelist_count = 0;
char rules_msg[256];
if (!pubkey) { if (!pubkey) {
validator_debug_log( validator_debug_log(
@@ -1328,7 +1347,12 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
return NOSTR_ERROR_INVALID_INPUT; return NOSTR_ERROR_INVALID_INPUT;
} }
char rules_msg[256]; if (operation && (strcmp(operation, "admin_event") == 0 ||
strcmp(operation, "admin") == 0)) {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - Admin management request, skipping auth rules\n");
return NOSTR_SUCCESS;
}
sprintf(rules_msg, sprintf(rules_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Checking rules for pubkey=%.32s..., " "VALIDATOR_DEBUG: RULES ENGINE - Checking rules for pubkey=%.32s..., "
"operation=%s, mime_type=%s\n", "operation=%s, mime_type=%s\n",
@@ -1344,18 +1368,14 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
} }
// Step 1: Check pubkey blacklist (highest priority) // Step 1: Check pubkey blacklist (highest priority)
// Match both exact operation and wildcard '*'
const char *blacklist_sql = const char *blacklist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = " "SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'blacklist_pubkey' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1 LIMIT 1";
"'pubkey_blacklist' AND rule_target = ? AND (operation = ? OR operation = '*') AND enabled = "
"1 ORDER BY priority LIMIT 1";
rc = sqlite3_prepare_v2(db, blacklist_sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, blacklist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) { if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) { if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1); const char *description = "Pubkey blacklisted";
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 FAILED - " validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 FAILED - "
"Pubkey blacklisted\n"); "Pubkey blacklisted\n");
char blacklist_msg[256]; char blacklist_msg[256];
@@ -1380,18 +1400,14 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
// Step 2: Check hash blacklist // Step 2: Check hash blacklist
if (resource_hash) { if (resource_hash) {
// Match both exact operation and wildcard '*'
const char *hash_blacklist_sql = const char *hash_blacklist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = " "SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'blacklist_hash' AND pattern_type = 'hash' AND pattern_value = ? AND active = 1 LIMIT 1";
"'hash_blacklist' AND rule_target = ? AND (operation = ? OR operation = '*') AND enabled = "
"1 ORDER BY priority LIMIT 1";
rc = sqlite3_prepare_v2(db, hash_blacklist_sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, hash_blacklist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) { if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, resource_hash, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 1, resource_hash, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) { if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1); const char *description = "Hash blacklisted";
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 FAILED - " validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 FAILED - "
"Hash blacklisted\n"); "Hash blacklisted\n");
char hash_blacklist_msg[256]; char hash_blacklist_msg[256];
@@ -1423,17 +1439,14 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
if (mime_type) { if (mime_type) {
// Match both exact MIME type and wildcard patterns (e.g., 'image/*') // Match both exact MIME type and wildcard patterns (e.g., 'image/*')
const char *mime_blacklist_sql = const char *mime_blacklist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = " "SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'blacklist_mime' AND pattern_type = 'mime' AND (pattern_value = ? OR pattern_value LIKE '%/*' AND ? LIKE REPLACE(pattern_value, '*', '%')) AND active = 1 LIMIT 1";
"'mime_blacklist' AND (rule_target = ? OR rule_target LIKE '%/*' AND ? LIKE REPLACE(rule_target, '*', '%')) AND (operation = ? OR operation = '*') AND enabled = "
"1 ORDER BY priority LIMIT 1";
rc = sqlite3_prepare_v2(db, mime_blacklist_sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, mime_blacklist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) { if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, mime_type, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 1, mime_type, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, mime_type, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 2, mime_type, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) { if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1); const char *description = "MIME type blacklisted";
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 FAILED - " validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 FAILED - "
"MIME type blacklisted\n"); "MIME type blacklisted\n");
char mime_blacklist_msg[256]; char mime_blacklist_msg[256];
@@ -1462,133 +1475,151 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
} }
// Step 4: Check pubkey whitelist // Step 4: Check pubkey whitelist
// Match both exact operation and wildcard '*'
const char *whitelist_sql = const char *whitelist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = " "SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'whitelist_pubkey' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1 LIMIT 1";
"'pubkey_whitelist' AND rule_target = ? AND (operation = ? OR operation = '*') AND enabled = "
"1 ORDER BY priority LIMIT 1";
rc = sqlite3_prepare_v2(db, whitelist_sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, whitelist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) { if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) { if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1); const char *description = "Pubkey whitelisted";
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 PASSED - " validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 PASSED - "
"Pubkey whitelisted\n"); "Pubkey whitelisted\n");
char whitelist_msg[256]; char whitelist_msg[256];
sprintf(whitelist_msg, snprintf(whitelist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: %s\n", sizeof(whitelist_msg),
description ? description : "Unknown"); "VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: %s\n",
description ? description : "Unknown");
validator_debug_log(whitelist_msg); validator_debug_log(whitelist_msg);
sqlite3_finalize(stmt); pubkey_whitelisted = 1;
sqlite3_close(db); } else {
return NOSTR_SUCCESS; // Allow whitelisted pubkey validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 - Pubkey not whitelisted\n");
} }
sqlite3_finalize(stmt); sqlite3_finalize(stmt);
} else {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 FAILED - Pubkey whitelist query failed\n");
} }
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 FAILED - Pubkey "
"not whitelisted\n");
// Step 5: Check MIME type whitelist (only if not already denied) // Step 5: Check MIME type whitelist
if (mime_type) { if (mime_type) {
// Match both exact MIME type and wildcard patterns (e.g., 'image/*') char mime_pattern_wildcard[MAX_MIME_TYPE_LEN + 2];
const char *mime_whitelist_sql = const char *mime_whitelist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = " "SELECT rule_type FROM auth_rules WHERE rule_type LIKE 'whitelist_mime' AND pattern_type = 'mime' AND (pattern_value = ? OR pattern_value LIKE ? ) AND active = 1 LIMIT 1";
"'mime_whitelist' AND (rule_target = ? OR rule_target LIKE '%/*' AND ? LIKE REPLACE(rule_target, '*', '%')) AND (operation = ? OR operation = '*') AND enabled = "
"1 ORDER BY priority LIMIT 1";
rc = sqlite3_prepare_v2(db, mime_whitelist_sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, mime_whitelist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) { if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, mime_type, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 1, mime_type, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, mime_type, -1, SQLITE_STATIC); const char *slash_pos = strchr(mime_type, '/');
sqlite3_bind_text(stmt, 3, operation ? operation : "", -1, SQLITE_STATIC); if (slash_pos != NULL) {
size_t prefix_len = slash_pos - mime_type;
if (prefix_len < MAX_MIME_TYPE_LEN) {
snprintf(mime_pattern_wildcard, sizeof(mime_pattern_wildcard), "%.*s/%%", (int)prefix_len, mime_type);
} else {
snprintf(mime_pattern_wildcard, sizeof(mime_pattern_wildcard), "%%/%%");
}
} else {
snprintf(mime_pattern_wildcard, sizeof(mime_pattern_wildcard), "%s/%%", mime_type);
}
sqlite3_bind_text(stmt, 2, mime_pattern_wildcard, -1, SQLITE_TRANSIENT);
if (sqlite3_step(stmt) == SQLITE_ROW) { if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1); const char *description = "MIME type whitelisted";
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 PASSED - " validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 PASSED - MIME type whitelisted\n");
"MIME type whitelisted\n");
char mime_whitelist_msg[256]; char mime_whitelist_msg[256];
sprintf(mime_whitelist_msg, snprintf(mime_whitelist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist rule matched: %s\n", sizeof(mime_whitelist_msg),
description ? description : "Unknown"); "VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist rule matched: %s (pattern=%s)\n",
description ? description : "Unknown",
mime_pattern_wildcard);
validator_debug_log(mime_whitelist_msg); validator_debug_log(mime_whitelist_msg);
sqlite3_finalize(stmt); mime_whitelisted = 1;
sqlite3_close(db); } else {
return NOSTR_SUCCESS; // Allow whitelisted MIME type char mime_not_msg[256];
snprintf(mime_not_msg,
sizeof(mime_not_msg),
"VALIDATOR_DEBUG: RULES ENGINE - STEP 5 - MIME type not whitelisted (pattern=%s)\n",
mime_pattern_wildcard);
validator_debug_log(mime_not_msg);
} }
sqlite3_finalize(stmt); sqlite3_finalize(stmt);
} else {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 FAILED - Failed to prepare MIME whitelist query\n");
} }
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 FAILED - MIME "
"type not whitelisted\n");
} else { } else {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 SKIPPED - No " validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 SKIPPED - No MIME type provided\n");
"MIME type provided\n");
} }
// Step 6: Check if any MIME whitelist rules exist - if yes, deny by default // Step 6: Count MIME whitelist rules
// Match both exact operation and wildcard '*'
const char *mime_whitelist_exists_sql = const char *mime_whitelist_exists_sql =
"SELECT COUNT(*) FROM auth_rules WHERE rule_type = 'mime_whitelist' " "SELECT COUNT(*) FROM auth_rules WHERE rule_type LIKE 'whitelist_mime' "
"AND (operation = ? OR operation = '*') AND enabled = 1 LIMIT 1"; "AND pattern_type = 'mime' AND active = 1";
rc = sqlite3_prepare_v2(db, mime_whitelist_exists_sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, mime_whitelist_exists_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) { if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) { if (sqlite3_step(stmt) == SQLITE_ROW) {
int mime_whitelist_count = sqlite3_column_int(stmt, 0); mime_whitelist_count = sqlite3_column_int(stmt, 0);
if (mime_whitelist_count > 0) { char mime_cnt_msg[256];
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 6 FAILED - " snprintf(mime_cnt_msg, sizeof(mime_cnt_msg),
"MIME whitelist exists but type not in it\n"); "VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist count: %d\n",
mime_whitelist_count);
// Set specific violation details for status code mapping validator_debug_log(mime_cnt_msg);
strcpy(g_last_rule_violation.violation_type, "mime_whitelist_violation");
strcpy(g_last_rule_violation.reason,
"MIME type not whitelisted for this operation");
sqlite3_finalize(stmt);
sqlite3_close(db);
return NOSTR_ERROR_AUTH_REQUIRED;
}
} }
sqlite3_finalize(stmt); sqlite3_finalize(stmt);
} else {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 6 FAILED - Failed to prepare MIME whitelist count query\n");
} }
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 6 PASSED - No "
"MIME whitelist restrictions apply\n");
// Step 7: Check if any whitelist rules exist - if yes, deny by default if (mime_whitelist_count > 0 && !mime_whitelisted) {
// Match both exact operation and wildcard '*' validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - MIME whitelist exists but MIME type not allowed\n");
strcpy(g_last_rule_violation.violation_type, "mime_whitelist_violation");
strcpy(g_last_rule_violation.reason, "MIME type not whitelisted for this operation");
sqlite3_close(db);
return NOSTR_ERROR_AUTH_REQUIRED;
}
// Step 7: Count pubkey whitelist rules
const char *whitelist_exists_sql = const char *whitelist_exists_sql =
"SELECT COUNT(*) FROM auth_rules WHERE rule_type = 'pubkey_whitelist' " "SELECT COUNT(*) FROM auth_rules WHERE (rule_type LIKE 'whitelist_pubkey' OR rule_type LIKE 'pubkey_whitelist') "
"AND (operation = ? OR operation = '*') AND enabled = 1 LIMIT 1"; "AND pattern_type = 'pubkey' AND active = 1";
rc = sqlite3_prepare_v2(db, whitelist_exists_sql, -1, &stmt, NULL); rc = sqlite3_prepare_v2(db, whitelist_exists_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) { if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) { if (sqlite3_step(stmt) == SQLITE_ROW) {
int whitelist_count = sqlite3_column_int(stmt, 0); pubkey_whitelist_count = sqlite3_column_int(stmt, 0);
if (whitelist_count > 0) { char pubkey_cnt_msg[256];
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 FAILED - " snprintf(pubkey_cnt_msg, sizeof(pubkey_cnt_msg),
"Whitelist exists but pubkey not in it\n"); "VALIDATOR_DEBUG: RULES ENGINE - Pubkey whitelist count: %d\n",
pubkey_whitelist_count);
// Set specific violation details for status code mapping validator_debug_log(pubkey_cnt_msg);
strcpy(g_last_rule_violation.violation_type, "whitelist_violation");
strcpy(g_last_rule_violation.reason,
"Public key not whitelisted for this operation");
sqlite3_finalize(stmt);
sqlite3_close(db);
return NOSTR_ERROR_AUTH_REQUIRED;
}
} }
sqlite3_finalize(stmt); sqlite3_finalize(stmt);
} else {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 7 FAILED - Failed to prepare pubkey whitelist count query\n");
}
if (pubkey_whitelist_count > 0) {
char pubkey_whitelist_msg[256];
snprintf(pubkey_whitelist_msg, sizeof(pubkey_whitelist_msg),
"VALIDATOR_DEBUG: RULES ENGINE - Pubkey whitelist exists (%d entries)\n",
pubkey_whitelist_count);
validator_debug_log(pubkey_whitelist_msg);
}
if (pubkey_whitelist_count > 0 && !pubkey_whitelisted) {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - Pubkey whitelist exists but pubkey not allowed\n");
strcpy(g_last_rule_violation.violation_type, "whitelist_violation");
strcpy(g_last_rule_violation.reason, "Public key not whitelisted for this operation");
sqlite3_close(db);
return NOSTR_ERROR_AUTH_REQUIRED;
}
if ((mime_whitelist_count > 0 && !mime_whitelisted) ||
(pubkey_whitelist_count > 0 && !pubkey_whitelisted)) {
// Already handled above but include fallback
sqlite3_close(db);
return NOSTR_ERROR_AUTH_REQUIRED;
} }
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 PASSED - No "
"whitelist restrictions apply\n");
sqlite3_close(db); sqlite3_close(db);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 7 PASSED - All " validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - Completed whitelist checks\n");
"rule checks completed, default ALLOW\n"); return NOSTR_SUCCESS;
return NOSTR_SUCCESS; // Default allow if no restrictive rules matched
} }
/** /**

View File

@@ -1,19 +1,28 @@
#!/bin/bash #!/bin/bash
# white_black_list_test.sh - Whitelist/Blacklist Rules Test Suite # white_black_list_test.sh - Whitelist/Blacklist Rules Test Suite
# Tests the auth_rules table functionality for pubkey and MIME type filtering # Tests the auth_rules table functionality using Kind 23458 admin commands
# Configuration # Configuration
SERVER_URL="http://localhost:9001" SERVER_URL="http://localhost:9001"
UPLOAD_ENDPOINT="${SERVER_URL}/upload" UPLOAD_ENDPOINT="${SERVER_URL}/upload"
DB_PATH="db/ginxsom.db" ADMIN_API_ENDPOINT="${SERVER_URL}/api/admin"
DB_PATH="db/52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a.db"
TEST_DIR="tests/auth_test_tmp" TEST_DIR="tests/auth_test_tmp"
TEST_KEYS_FILE=".test_keys"
# Test results tracking # Test results tracking
TESTS_PASSED=0 TESTS_PASSED=0
TESTS_FAILED=0 TESTS_FAILED=0
TOTAL_TESTS=0 TOTAL_TESTS=0
# Load admin keys from .test_keys
if [[ ! -f "$TEST_KEYS_FILE" ]]; then
echo "$TEST_KEYS_FILE not found"
exit 1
fi
source "$TEST_KEYS_FILE"
# Test keys for different scenarios - Using WSB's keys for TEST_USER1 # Test keys for different scenarios - Using WSB's keys for TEST_USER1
# Generated using: nak key public <privkey> # Generated using: nak key public <privkey>
TEST_USER1_PRIVKEY="22cc83aa57928a2800234c939240c9a6f0f44a33ea3838a860ed38930b195afd" TEST_USER1_PRIVKEY="22cc83aa57928a2800234c939240c9a6f0f44a33ea3838a860ed38930b195afd"
@@ -42,6 +51,37 @@ record_test_result() {
fi fi
} }
# Helper function to send admin command via Kind 23458
send_admin_command() {
local command_json="$1"
# Encrypt command with NIP-44
local encrypted_command=$(nak encrypt --sec "$ADMIN_PRIVKEY" -p "$SERVER_PUBKEY" "$command_json")
if [[ -z "$encrypted_command" ]]; then
echo "❌ Failed to encrypt command"
return 1
fi
# Create Kind 23458 event
local event=$(nak event -k 23458 \
-c "$encrypted_command" \
--tag p="$SERVER_PUBKEY" \
--sec "$ADMIN_PRIVKEY")
if [[ -z "$event" ]]; then
echo "❌ Failed to create admin event"
return 1
fi
# Send to admin API endpoint
local response=$(curl -s -X POST "$ADMIN_API_ENDPOINT" \
-H "Content-Type: application/json" \
-d "$event")
echo "$response"
}
# Check prerequisites # Check prerequisites
for cmd in nak curl jq sqlite3; do for cmd in nak curl jq sqlite3; do
if ! command -v $cmd &> /dev/null; then if ! command -v $cmd &> /dev/null; then
@@ -130,20 +170,24 @@ test_upload() {
} }
# Clean up any existing rules from previous tests # Clean up any existing rules from previous tests
echo "Cleaning up existing auth rules..." echo "Cleaning up existing auth rules via admin command..."
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" 2>/dev/null CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Enable authentication rules # Enable authentication rules
echo "Enabling authentication rules..." echo "Enabling authentication rules..."
sqlite3 "$DB_PATH" "UPDATE config SET value = 'true' WHERE key = 'auth_rules_enabled';" ENABLE_CMD='["config_update", {"auth_rules_enabled": "true"}]'
send_admin_command "$ENABLE_CMD" > /dev/null 2>&1
echo echo
echo "=== SECTION 1: PUBKEY BLACKLIST TESTS ===" echo "=== SECTION 1: PUBKEY BLACKLIST TESTS ==="
echo echo
# Test 1: Add pubkey blacklist rule # Test 1: Add pubkey blacklist rule via admin command
echo "Adding blacklist rule for TEST_USER3..." echo "Adding blacklist rule for TEST_USER3 via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER3_PUBKEY', 'upload', 10, 'Test blacklist');" BLACKLIST_CMD='["blacklist", "pubkey", "'$TEST_USER3_PUBKEY'"]'
BLACKLIST_RESPONSE=$(send_admin_command "$BLACKLIST_CMD")
echo "Response: $BLACKLIST_RESPONSE" | jq -c '.' 2>/dev/null || echo "$BLACKLIST_RESPONSE"
# Test 1a: Blacklisted user should be denied # Test 1a: Blacklisted user should be denied
test_file1=$(create_test_file "blacklist_test1.txt" "Content from blacklisted user") test_file1=$(create_test_file "blacklist_test1.txt" "Content from blacklisted user")
@@ -157,13 +201,16 @@ echo
echo "=== SECTION 2: PUBKEY WHITELIST TESTS ===" echo "=== SECTION 2: PUBKEY WHITELIST TESTS ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" echo "Cleaning rules via admin API..."
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 2: Add pubkey whitelist rule # Test 2: Add pubkey whitelist rule via admin command
echo "Adding whitelist rule for TEST_USER1..." echo "Adding whitelist rule for TEST_USER1 via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_whitelist', '$TEST_USER1_PUBKEY', 'upload', 300, 'Test whitelist');" WHITELIST_CMD='["whitelist", "pubkey", "'$TEST_USER1_PUBKEY'"]'
WHITELIST_RESPONSE=$(send_admin_command "$WHITELIST_CMD")
echo "Response: $WHITELIST_RESPONSE" | jq -c '.' 2>/dev/null || echo "$WHITELIST_RESPONSE"
# Test 2a: Whitelisted user should succeed # Test 2a: Whitelisted user should succeed
test_file3=$(create_test_file "whitelist_test1.txt" "Content from whitelisted user") test_file3=$(create_test_file "whitelist_test1.txt" "Content from whitelisted user")
@@ -177,15 +224,17 @@ echo
echo "=== SECTION 3: HASH BLACKLIST TESTS ===" echo "=== SECTION 3: HASH BLACKLIST TESTS ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 3: Create a file and blacklist its hash # Test 3: Create a file and blacklist its hash via admin command
test_file5=$(create_test_file "hash_blacklist_test.txt" "This specific file is blacklisted") test_file5=$(create_test_file "hash_blacklist_test.txt" "This specific file is blacklisted")
BLACKLISTED_HASH=$(sha256sum "$test_file5" | cut -d' ' -f1) BLACKLISTED_HASH=$(sha256sum "$test_file5" | cut -d' ' -f1)
echo "Adding hash blacklist rule for $BLACKLISTED_HASH..." echo "Adding hash blacklist rule for $BLACKLISTED_HASH via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('hash_blacklist', '$BLACKLISTED_HASH', 'upload', 100, 'Test hash blacklist');" HASH_BLACKLIST_CMD='["blacklist", "hash", "'$BLACKLISTED_HASH'"]'
send_admin_command "$HASH_BLACKLIST_CMD" > /dev/null 2>&1
# Test 3a: Blacklisted hash should be denied # Test 3a: Blacklisted hash should be denied
test_upload "Test 3a: Blacklisted Hash Upload" "$TEST_USER1_PRIVKEY" "$test_file5" "403" test_upload "Test 3a: Blacklisted Hash Upload" "$TEST_USER1_PRIVKEY" "$test_file5" "403"
@@ -198,13 +247,14 @@ echo
echo "=== SECTION 4: MIME TYPE BLACKLIST TESTS ===" echo "=== SECTION 4: MIME TYPE BLACKLIST TESTS ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;" send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 4: Blacklist executable MIME types # Test 4: Blacklist executable MIME types via admin command
echo "Adding MIME type blacklist rules..." echo "Adding MIME type blacklist rules via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('mime_blacklist', 'application/x-executable', 'upload', 200, 'Block executables');" MIME_BLACKLIST_CMD='["blacklist", "mime", "application/x-executable"]'
send_admin_command "$MIME_BLACKLIST_CMD" > /dev/null 2>&1
# Note: This test would require the server to detect MIME types from file content # Note: This test would require the server to detect MIME types from file content
# For now, we'll test with text/plain which should be allowed # For now, we'll test with text/plain which should be allowed
@@ -215,14 +265,16 @@ echo
echo "=== SECTION 5: MIME TYPE WHITELIST TESTS ===" echo "=== SECTION 5: MIME TYPE WHITELIST TESTS ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;" send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 5: Whitelist only image MIME types # Test 5: Whitelist only image MIME types via admin command
echo "Adding MIME type whitelist rules..." echo "Adding MIME type whitelist rules via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('mime_whitelist', 'image/jpeg', 'upload', 400, 'Allow JPEG');" MIME_WL1_CMD='["whitelist", "mime", "image/jpeg"]'
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('mime_whitelist', 'image/png', 'upload', 400, 'Allow PNG');" MIME_WL2_CMD='["whitelist", "mime", "image/png"]'
send_admin_command "$MIME_WL1_CMD" > /dev/null 2>&1
send_admin_command "$MIME_WL2_CMD" > /dev/null 2>&1
# Note: MIME type detection would need to be implemented in the server # Note: MIME type detection would need to be implemented in the server
# For now, text/plain should be denied if whitelist exists # For now, text/plain should be denied if whitelist exists
@@ -233,14 +285,16 @@ echo
echo "=== SECTION 6: PRIORITY ORDERING TESTS ===" echo "=== SECTION 6: PRIORITY ORDERING TESTS ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;" send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 6: Blacklist should override whitelist (priority ordering) # Test 6: Blacklist should override whitelist (priority ordering)
echo "Adding both blacklist (priority 10) and whitelist (priority 300) for same pubkey..." echo "Adding both blacklist and whitelist for same pubkey via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER1_PUBKEY', 'upload', 10, 'Blacklist priority test');" BL_CMD='["blacklist", "pubkey", "'$TEST_USER1_PUBKEY'"]'
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_whitelist', '$TEST_USER1_PUBKEY', 'upload', 300, 'Whitelist priority test');" WL_CMD='["whitelist", "pubkey", "'$TEST_USER1_PUBKEY'"]'
send_admin_command "$BL_CMD" > /dev/null 2>&1
send_admin_command "$WL_CMD" > /dev/null 2>&1
# Test 6a: Blacklist should win (lower priority number = higher priority) # Test 6a: Blacklist should win (lower priority number = higher priority)
test_file9=$(create_test_file "priority_test.txt" "Testing priority ordering") test_file9=$(create_test_file "priority_test.txt" "Testing priority ordering")
@@ -250,13 +304,14 @@ echo
echo "=== SECTION 7: OPERATION-SPECIFIC RULES ===" echo "=== SECTION 7: OPERATION-SPECIFIC RULES ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;" send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 7: Blacklist only for upload operation # Test 7: Blacklist for user via admin command
echo "Adding blacklist rule for upload operation only..." echo "Adding blacklist rule for TEST_USER2 via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER2_PUBKEY', 'upload', 10, 'Upload-only blacklist');" BL_USER2_CMD='["blacklist", "pubkey", "'$TEST_USER2_PUBKEY'"]'
send_admin_command "$BL_USER2_CMD" > /dev/null 2>&1
# Test 7a: Upload should be denied # Test 7a: Upload should be denied
test_file10=$(create_test_file "operation_test.txt" "Testing operation-specific rules") test_file10=$(create_test_file "operation_test.txt" "Testing operation-specific rules")
@@ -266,13 +321,14 @@ echo
echo "=== SECTION 8: WILDCARD OPERATION TESTS ===" echo "=== SECTION 8: WILDCARD OPERATION TESTS ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;" send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 8: Blacklist for all operations using wildcard # Test 8: Blacklist for user via admin command
echo "Adding blacklist rule for all operations (*)..." echo "Adding blacklist rule for TEST_USER3 via admin API..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, description) VALUES ('pubkey_blacklist', '$TEST_USER3_PUBKEY', '*', 10, 'All operations blacklist');" BL_USER3_CMD='["blacklist", "pubkey", "'$TEST_USER3_PUBKEY'"]'
send_admin_command "$BL_USER3_CMD" > /dev/null 2>&1
# Test 8a: Upload should be denied # Test 8a: Upload should be denied
test_file11=$(create_test_file "wildcard_test.txt" "Testing wildcard operation") test_file11=$(create_test_file "wildcard_test.txt" "Testing wildcard operation")
@@ -282,13 +338,13 @@ echo
echo "=== SECTION 9: ENABLED/DISABLED RULES ===" echo "=== SECTION 9: ENABLED/DISABLED RULES ==="
echo echo
# Clean rules # Clean rules via admin command
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
sqlite3 "$DB_PATH" "DELETE FROM auth_rules_cache;" send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Test 9: Disabled rule should not be enforced # Test 9: Disabled rule should not be enforced
echo "Adding disabled blacklist rule..." echo "Adding disabled blacklist rule via SQL (admin API doesn't support active=0 on create)..."
sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, rule_target, operation, priority, enabled, description) VALUES ('pubkey_blacklist', '$TEST_USER1_PUBKEY', 'upload', 10, 0, 'Disabled blacklist');" sqlite3 "$DB_PATH" "INSERT INTO auth_rules (rule_type, pattern_type, pattern_value, active) VALUES ('blacklist_pubkey', 'pubkey', '$TEST_USER1_PUBKEY', 0);"
# Test 9a: Upload should succeed (rule is disabled) # Test 9a: Upload should succeed (rule is disabled)
test_file12=$(create_test_file "disabled_rule_test.txt" "Testing disabled rule") test_file12=$(create_test_file "disabled_rule_test.txt" "Testing disabled rule")
@@ -296,7 +352,7 @@ test_upload "Test 9a: Disabled Rule Not Enforced" "$TEST_USER1_PRIVKEY" "$test_f
# Test 9b: Enable the rule # Test 9b: Enable the rule
echo "Enabling the blacklist rule..." echo "Enabling the blacklist rule..."
sqlite3 "$DB_PATH" "UPDATE auth_rules SET enabled = 1 WHERE rule_target = '$TEST_USER1_PUBKEY';" sqlite3 "$DB_PATH" "UPDATE auth_rules SET active = 1 WHERE pattern_value = '$TEST_USER1_PUBKEY';"
# Test 9c: Upload should now be denied # Test 9c: Upload should now be denied
test_file13=$(create_test_file "enabled_rule_test.txt" "Testing enabled rule") test_file13=$(create_test_file "enabled_rule_test.txt" "Testing enabled rule")
@@ -307,9 +363,10 @@ echo
echo "=== SECTION 11: CLEANUP AND RESET ===" echo "=== SECTION 11: CLEANUP AND RESET ==="
echo echo
# Clean up all test rules # Clean up all test rules via admin command
echo "Cleaning up test rules..." echo "Cleaning up test rules via admin API..."
sqlite3 "$DB_PATH" "DELETE FROM auth_rules;" CLEANUP_CMD='["sql_query", "DELETE FROM auth_rules"]'
send_admin_command "$CLEANUP_CMD" > /dev/null 2>&1
# Verify cleanup # Verify cleanup
RULE_COUNT=$(sqlite3 "$DB_PATH" "SELECT COUNT(*) FROM auth_rules;" 2>/dev/null) RULE_COUNT=$(sqlite3 "$DB_PATH" "SELECT COUNT(*) FROM auth_rules;" 2>/dev/null)