Compare commits

...

4 Commits

Author SHA1 Message Date
Your Name
b27a56a296 v0.7.9 - Optimize Docker build caching and enforce static binary usage
- Restructure Dockerfile.alpine-musl for better layer caching
  * Build dependencies (secp256k1, libwebsockets) in separate cached layers
  * Copy submodules before source files to maximize cache hits
  * Reduce rebuild time from ~2-3 minutes to ~10-15 seconds for source changes
- Remove 'musl' from binary names (c_relay_static_x86_64 instead of c_relay_static_musl_x86_64)
- Enforce static binary usage in make_and_restart_relay.sh
  * Remove all fallbacks to regular make builds
  * Exit with clear error if static binary not found
  * Ensures JSON1 extension is always available
- Fix build_static.sh hanging on ldd check with timeout
- Remove sudo usage from build_static.sh (assumes docker group membership)

These changes ensure consistent builds with JSON1 support and dramatically improve
development iteration speed through intelligent Docker layer caching.
2025-10-11 11:08:01 -04:00
Your Name
ecd7095123 v0.7.8 - Fully static builds implemented with musl-gcc 2025-10-11 10:51:03 -04:00
Your Name
d449513861 Add MUSL static binary build system using Alpine Docker
- Create Dockerfile.alpine-musl for truly portable static binaries
- Update build_static.sh to use Docker with sudo fallback
- Fix source code portability issues for MUSL:
  * Add missing headers in config.c, dm_admin.c
  * Remove glibc-specific headers in nip009.c, subscriptions.c
- Update nostr_core_lib submodule with fortification fix
- Add comprehensive documentation in docs/musl_static_build.md

Binary characteristics:
- Size: 7.6MB (vs 12MB+ for glibc static)
- Dependencies: Zero (truly portable)
- Compatibility: Any Linux distribution
- Build time: ~2 minutes with Docker caching

Resolves fortification symbol issues (__snprintf_chk, __fprintf_chk)
that prevented MUSL static linking.
2025-10-11 10:17:20 -04:00
Your Name
6709e229b3 v0.7.7 - Prevent sql attacks and rate limiting on subscriptions 2025-10-10 15:44:10 -04:00
35 changed files with 5496 additions and 343 deletions

119
Dockerfile.alpine-musl Normal file
View File

@@ -0,0 +1,119 @@
# Alpine-based MUSL static binary builder for C-Relay
# Produces truly portable binaries with zero runtime dependencies
FROM alpine:3.19 AS builder
# Install build dependencies
RUN apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers \
wget \
bash
# Set working directory
WORKDIR /build
# Build libsecp256k1 static (cached layer - only rebuilds if Alpine version changes)
RUN cd /tmp && \
git clone https://github.com/bitcoin-core/secp256k1.git && \
cd secp256k1 && \
./autogen.sh && \
./configure --enable-static --disable-shared --prefix=/usr \
CFLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/secp256k1
# Build libwebsockets static with minimal features (cached layer)
RUN cd /tmp && \
git clone --depth 1 --branch v4.3.3 https://github.com/warmcat/libwebsockets.git && \
cd libwebsockets && \
mkdir build && cd build && \
cmake .. \
-DLWS_WITH_STATIC=ON \
-DLWS_WITH_SHARED=OFF \
-DLWS_WITH_SSL=ON \
-DLWS_WITHOUT_TESTAPPS=ON \
-DLWS_WITHOUT_TEST_SERVER=ON \
-DLWS_WITHOUT_TEST_CLIENT=ON \
-DLWS_WITHOUT_TEST_PING=ON \
-DLWS_WITH_HTTP2=OFF \
-DLWS_WITH_LIBUV=OFF \
-DLWS_WITH_LIBEVENT=OFF \
-DLWS_IPV6=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_C_FLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/libwebsockets
# Copy only submodule configuration and git directory
COPY .gitmodules /build/.gitmodules
COPY .git /build/.git
# Clean up any stale submodule references (nips directory is not a submodule)
RUN git rm --cached nips 2>/dev/null || true
# Initialize submodules (cached unless .gitmodules changes)
RUN git submodule update --init --recursive
# Copy nostr_core_lib source files (cached unless nostr_core_lib changes)
COPY nostr_core_lib /build/nostr_core_lib/
# Build nostr_core_lib with required NIPs (cached unless nostr_core_lib changes)
# Disable fortification in build.sh to prevent __*_chk symbol issues
# NIPs: 001(Basic), 006(Keys), 013(PoW), 017(DMs), 019(Bech32), 044(Encryption), 059(Gift Wrap - required by NIP-17)
RUN cd nostr_core_lib && \
chmod +x build.sh && \
sed -i 's/CFLAGS="-Wall -Wextra -std=c99 -fPIC -O2"/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"/' build.sh && \
rm -f *.o *.a 2>/dev/null || true && \
./build.sh --nips=1,6,13,17,19,44,59
# Copy c-relay source files LAST (only this layer rebuilds on source changes)
COPY src/ /build/src/
COPY Makefile /build/Makefile
# Build c-relay with full static linking (only rebuilds when src/ changes)
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
RUN gcc -static -O2 -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
-I. -Inostr_core_lib -Inostr_core_lib/nostr_core \
-Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
src/main.c src/config.c src/dm_admin.c src/request_validator.c \
src/nip009.c src/nip011.c src/nip013.c src/nip040.c src/nip042.c \
src/websockets.c src/subscriptions.c src/api.c src/embedded_web_content.c \
-o /build/c_relay_static \
nostr_core_lib/libnostr_core_x64.a \
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
-lcurl -lz -lpthread -lm -ldl
# Strip binary to reduce size
RUN strip /build/c_relay_static
# Verify it's truly static
RUN echo "=== Binary Information ===" && \
file /build/c_relay_static && \
ls -lh /build/c_relay_static && \
echo "=== Checking for dynamic dependencies ===" && \
(ldd /build/c_relay_static 2>&1 || echo "Binary is static") && \
echo "=== Build complete ==="
# Output stage - just the binary
FROM scratch AS output
COPY --from=builder /build/c_relay_static /c_relay_static

View File

@@ -1,144 +1,190 @@
#!/bin/bash
# Build fully static MUSL binaries for C-Relay
# Produces portable binaries with zero runtime dependencies
# Build fully static MUSL binaries for C-Relay using Alpine Docker
# Produces truly portable binaries with zero runtime dependencies
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BUILD_DIR="$SCRIPT_DIR/build"
DOCKERFILE="$SCRIPT_DIR/Dockerfile.alpine-musl"
echo "Building fully static MUSL binaries for C-Relay..."
echo "=========================================="
echo "C-Relay MUSL Static Binary Builder"
echo "=========================================="
echo "Project directory: $SCRIPT_DIR"
echo "Build directory: $BUILD_DIR"
echo ""
# Create build directory
mkdir -p "$BUILD_DIR"
# Check if Docker is available first
if command -v docker &> /dev/null && sudo docker buildx version &> /dev/null 2>&1; then
echo "Docker available but Alpine repositories are having issues - using native build"
USE_DOCKER=false
else
echo "Docker not available - attempting native MUSL build"
USE_DOCKER=false
# Check if Docker is available
if ! command -v docker &> /dev/null; then
echo "ERROR: Docker is not installed or not in PATH"
echo ""
echo "Docker is required to build MUSL static binaries."
echo "Please install Docker:"
echo " - Ubuntu/Debian: sudo apt install docker.io"
echo " - Or visit: https://docs.docker.com/engine/install/"
echo ""
exit 1
fi
# Check if musl-gcc is available for native build
if [ "$USE_DOCKER" = false ]; then
if ! command -v musl-gcc &> /dev/null; then
echo "Installing musl development tools..."
sudo apt update && sudo apt install -y musl-dev musl-tools
if ! command -v musl-gcc &> /dev/null; then
echo "ERROR: Failed to install musl-gcc"
echo "Please install musl-dev package manually: sudo apt install musl-dev musl-tools"
exit 1
fi
fi
# Check if Docker daemon is running
if ! docker info &> /dev/null; then
echo "ERROR: Docker daemon is not running or user not in docker group"
echo ""
echo "Please start Docker and ensure you're in the docker group:"
echo " - sudo systemctl start docker"
echo " - sudo usermod -aG docker $USER && newgrp docker"
echo " - Or start Docker Desktop"
echo ""
exit 1
fi
if [ "$USE_DOCKER" = true ]; then
# Docker-based build
echo "Building x86_64 static binary with Docker..."
sudo docker buildx build \
--platform linux/amd64 \
-f "$SCRIPT_DIR/examples/deployment/static-builder.Dockerfile" \
-t c-relay-static-builder-x86_64 \
--load \
"$SCRIPT_DIR"
DOCKER_CMD="docker"
# Extract x86_64 binary
sudo docker run --rm -v "$BUILD_DIR:/output" c-relay-static-builder-x86_64 \
sh -c "cp /c_relay_static_musl_x86_64 /output/c_relay_static_x86_64"
echo "x86_64 static binary created: $BUILD_DIR/c_relay_static_x86_64"
# Build ARM64 static binary
echo "Building ARM64 static binary with Docker..."
sudo docker buildx build \
--platform linux/arm64 \
-f "$SCRIPT_DIR/examples/deployment/static-builder.Dockerfile" \
-t c-relay-static-builder-arm64 \
--load \
"$SCRIPT_DIR"
# Extract ARM64 binary
sudo docker run --rm -v "$BUILD_DIR:/output" c-relay-static-builder-arm64 \
sh -c "cp /c_relay_static_musl_arm64 /output/c_relay_static_arm64"
echo "ARM64 static binary created: $BUILD_DIR/c_relay_static_arm64"
else
# Native static build with regular gcc
echo "Building static binary with gcc..."
# Check for required static libraries
echo "Checking for static libraries..."
MISSING_LIBS=""
for lib in libsqlite3.a libssl.a libcrypto.a libz.a; do
if ! find /usr/lib* /usr/local/lib* -name "$lib" 2>/dev/null | head -1 | grep -q .; then
MISSING_LIBS="$MISSING_LIBS $lib"
fi
done
# libsecp256k1 might not be available as static lib, so we'll try without it first
# Initialize submodules if needed
if [ ! -f "nostr_core_lib/libnostr_core_x64.a" ]; then
echo "Building nostr_core_lib..."
git submodule update --init --recursive
cd nostr_core_lib && ./build.sh && cd ..
fi
# Install additional static libraries needed for libwebsockets
echo "Installing additional static libraries..."
sudo apt install -y libcap-dev libuv1-dev libev-dev
# Try building with regular gcc and static linking
echo "Compiling with gcc -static..."
# Use the same approach as the regular Makefile but with static linking
gcc -static -O2 -Wall -Wextra -std=c99 -g \
-I. -Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
src/main.c src/config.c src/dm_admin.c src/request_validator.c src/nip009.c src/nip011.c src/nip013.c src/nip040.c src/nip042.c src/websockets.c src/subscriptions.c src/api.c src/embedded_web_content.c \
-o "$BUILD_DIR/c_relay_static_x86_64" \
nostr_core_lib/libnostr_core_x64.a \
-lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -L/usr/local/lib -lcurl -lcap -luv_a -lev
if [ $? -eq 0 ]; then
echo "x86_64 static binary created: $BUILD_DIR/c_relay_static_x86_64"
else
echo "ERROR: Static build failed"
echo "This may be due to missing static libraries or incompatible library versions"
echo "Consider using Docker-based build instead"
exit 1
fi
fi
# Verify binaries
echo "Verifying static binaries..."
for binary in "$BUILD_DIR"/c_relay_static_*; do
if [ -f "$binary" ]; then
echo "Binary: $(basename "$binary")"
file "$binary"
ls -lh "$binary"
# Test if binary is truly static (no dynamic dependencies)
if ldd "$binary" 2>/dev/null | grep -q "not a dynamic executable"; then
echo "✓ Binary is fully static"
elif ldd "$binary" 2>/dev/null | grep -q "statically linked"; then
echo "✓ Binary is statically linked"
else
echo "⚠ Binary may have dynamic dependencies:"
ldd "$binary" 2>/dev/null || echo " (ldd check failed)"
fi
echo ""
fi
done
echo "Static build complete!"
echo "Binaries available in: $BUILD_DIR/"
ls -la "$BUILD_DIR"/c_relay_static_* 2>/dev/null || echo "No static binaries found"
echo "✓ Docker is available and running"
echo ""
# Detect architecture
ARCH=$(uname -m)
case "$ARCH" in
x86_64)
PLATFORM="linux/amd64"
OUTPUT_NAME="c_relay_static_x86_64"
;;
aarch64|arm64)
PLATFORM="linux/arm64"
OUTPUT_NAME="c_relay_static_arm64"
;;
*)
echo "WARNING: Unknown architecture: $ARCH"
echo "Defaulting to linux/amd64"
PLATFORM="linux/amd64"
OUTPUT_NAME="c_relay_static_${ARCH}"
;;
esac
echo "Building for platform: $PLATFORM"
echo "Output binary: $OUTPUT_NAME"
echo ""
# Build the Docker image
echo "=========================================="
echo "Step 1: Building Alpine Docker image"
echo "=========================================="
echo "This will:"
echo " - Use Alpine Linux (native MUSL)"
echo " - Build all dependencies statically"
echo " - Compile c-relay with full static linking"
echo ""
$DOCKER_CMD build \
--platform "$PLATFORM" \
-f "$DOCKERFILE" \
-t c-relay-musl-builder:latest \
--progress=plain \
. || {
echo ""
echo "ERROR: Docker build failed"
echo "Check the output above for details"
exit 1
}
echo ""
echo "✓ Docker image built successfully"
echo ""
# Extract the binary from the container
echo "=========================================="
echo "Step 2: Extracting static binary"
echo "=========================================="
# Build the builder stage to extract the binary
$DOCKER_CMD build \
--platform "$PLATFORM" \
--target builder \
-f "$DOCKERFILE" \
-t c-relay-static-builder-stage:latest \
. > /dev/null 2>&1
# Create a temporary container to copy the binary
CONTAINER_ID=$($DOCKER_CMD create c-relay-static-builder-stage:latest)
# Copy binary from container
$DOCKER_CMD cp "$CONTAINER_ID:/build/c_relay_static" "$BUILD_DIR/$OUTPUT_NAME" || {
echo "ERROR: Failed to extract binary from container"
$DOCKER_CMD rm "$CONTAINER_ID" 2>/dev/null
exit 1
}
# Clean up container
$DOCKER_CMD rm "$CONTAINER_ID" > /dev/null
echo "✓ Binary extracted to: $BUILD_DIR/$OUTPUT_NAME"
echo ""
# Make binary executable
chmod +x "$BUILD_DIR/$OUTPUT_NAME"
# Verify the binary
echo "=========================================="
echo "Step 3: Verifying static binary"
echo "=========================================="
echo ""
echo "Checking for dynamic dependencies:"
if LDD_OUTPUT=$(timeout 5 ldd "$BUILD_DIR/$OUTPUT_NAME" 2>&1); then
if echo "$LDD_OUTPUT" | grep -q "not a dynamic executable"; then
echo "✓ Binary is fully static (no dynamic dependencies)"
TRULY_STATIC=true
elif echo "$LDD_OUTPUT" | grep -q "statically linked"; then
echo "✓ Binary is statically linked"
TRULY_STATIC=true
else
echo "⚠ WARNING: Binary may have dynamic dependencies:"
echo "$LDD_OUTPUT"
TRULY_STATIC=false
fi
else
# ldd failed or timed out - check with file command instead
if file "$BUILD_DIR/$OUTPUT_NAME" | grep -q "statically linked"; then
echo "✓ Binary is statically linked (verified with file command)"
TRULY_STATIC=true
else
echo "⚠ Could not verify static linking (ldd check failed)"
TRULY_STATIC=false
fi
fi
echo ""
echo "File size: $(ls -lh "$BUILD_DIR/$OUTPUT_NAME" | awk '{print $5}')"
echo ""
# Test if binary runs
echo "Testing binary execution:"
if "$BUILD_DIR/$OUTPUT_NAME" --version 2>&1 | head -5; then
echo "✓ Binary executes successfully"
else
echo "⚠ Binary execution test failed (this may be normal if --version is not supported)"
fi
echo ""
# Summary
echo "=========================================="
echo "Build Summary"
echo "=========================================="
echo "Binary: $BUILD_DIR/$OUTPUT_NAME"
echo "Size: $(du -h "$BUILD_DIR/$OUTPUT_NAME" | cut -f1)"
echo "Platform: $PLATFORM"
if [ "$TRULY_STATIC" = true ]; then
echo "Type: Fully static binary (Alpine MUSL-based)"
echo "Portability: Works on ANY Linux distribution"
else
echo "Type: Static binary (may have minimal dependencies)"
fi
echo ""
echo "✓ Build complete!"
echo ""
echo "These binaries should have minimal runtime dependencies and work across Linux distributions."

27
deploy_static.sh Executable file
View File

@@ -0,0 +1,27 @@
#!/bin/bash
# C-Relay Static Binary Deployment Script
# Deploys build/c_relay_static_x86_64 to server via sshlt
set -e
# Configuration
LOCAL_BINARY="build/c_relay_static_x86_64"
REMOTE_BINARY_PATH="/usr/local/bin/c_relay/c_relay"
SERVICE_NAME="c-relay"
# Create backup
ssh ubuntu@laantungir.com "sudo cp '$REMOTE_BINARY_PATH' '${REMOTE_BINARY_PATH}.backup.$(date +%Y%m%d_%H%M%S)'" 2>/dev/null || true
# Upload binary to temp location
scp "$LOCAL_BINARY" "ubuntu@laantungir.com:/tmp/c_relay.tmp"
# Install binary
ssh ubuntu@laantungir.com "sudo mv '/tmp/c_relay.tmp' '$REMOTE_BINARY_PATH'"
ssh ubuntu@laantungir.com "sudo chown c-relay:c-relay '$REMOTE_BINARY_PATH'"
ssh ubuntu@laantungir.com "sudo chmod +x '$REMOTE_BINARY_PATH'"
# Restart service
ssh ubuntu@laantungir.com "sudo systemctl restart '$SERVICE_NAME'"
echo "Deployment complete!"

275
docs/musl_static_build.md Normal file
View File

@@ -0,0 +1,275 @@
# MUSL Static Binary Build Guide
## Overview
This guide explains how to build truly portable MUSL-based static binaries of c-relay using Alpine Linux Docker containers. These binaries have **zero runtime dependencies** and work on any Linux distribution.
## Why MUSL?
### MUSL vs glibc Static Binaries
**MUSL Advantages:**
- **Truly Static**: No hidden dependencies on system libraries
- **Smaller Size**: ~7.6MB vs ~12MB+ for glibc static builds
- **Better Portability**: Works on ANY Linux distribution without modification
- **Cleaner Linking**: No glibc-specific extensions or fortified functions
- **Simpler Deployment**: Single binary, no library compatibility issues
**glibc Limitations:**
- Static builds still require dynamic loading for NSS (Name Service Switch)
- Fortified functions (`__*_chk`) don't exist in MUSL
- Larger binary size due to glibc's complexity
- May have compatibility issues across different glibc versions
## Build Process
### Prerequisites
- Docker installed and running
- Sufficient disk space (~2GB for Docker layers)
- Internet connection (for downloading dependencies)
### Quick Start
```bash
# Build MUSL static binary
./build_static.sh
# The binary will be created at:
# build/c_relay_static_musl_x86_64 (on x86_64)
# build/c_relay_static_musl_arm64 (on ARM64)
```
### What Happens During Build
1. **Alpine Linux Base**: Uses Alpine 3.19 with native MUSL support
2. **Static Dependencies**: Builds all dependencies with static linking:
- libsecp256k1 (Bitcoin cryptography)
- libwebsockets (WebSocket server)
- OpenSSL (TLS/crypto)
- SQLite (database)
- curl (HTTP client)
- zlib (compression)
3. **nostr_core_lib**: Builds with MUSL-compatible flags:
- Disables glibc fortification (`-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0`)
- Includes required NIPs: 001, 006, 013, 017, 019, 044, 059
- Produces static library (~316KB)
4. **c-relay Compilation**: Links everything statically:
- All source files compiled with `-static` flag
- Fortification disabled to avoid `__*_chk` symbols
- Results in ~7.6MB stripped binary
5. **Verification**: Confirms binary is truly static:
- `ldd` shows "not a dynamic executable"
- `file` shows "statically linked"
- Binary executes successfully
## Technical Details
### Dockerfile Structure
The build uses a multi-stage Dockerfile (`Dockerfile.alpine-musl`):
```dockerfile
# Stage 1: Builder (Alpine Linux)
FROM alpine:3.19 AS builder
- Install build tools and static libraries
- Build dependencies from source
- Compile nostr_core_lib with MUSL flags
- Compile c-relay with full static linking
- Strip binary to reduce size
# Stage 2: Output (scratch)
FROM scratch AS output
- Contains only the final binary
```
### Key Compilation Flags
**For nostr_core_lib:**
```bash
CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"
```
**For c-relay:**
```bash
gcc -static -O2 -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
[source files] \
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
-lcurl -lz -lpthread -lm -ldl
```
### Fortification Issue
**Problem**: GCC's `-O2` optimization enables fortification by default, replacing standard functions with `__*_chk` variants (e.g., `__snprintf_chk`, `__fprintf_chk`). These are glibc-specific and don't exist in MUSL.
**Solution**: Explicitly disable fortification with:
- `-U_FORTIFY_SOURCE` (undefine any existing definition)
- `-D_FORTIFY_SOURCE=0` (set to 0)
This must be applied to **both** nostr_core_lib and c-relay compilation.
### NIP Dependencies
The build includes these NIPs in nostr_core_lib:
- **NIP-001**: Basic protocol (event creation, signing)
- **NIP-006**: Key derivation from mnemonic
- **NIP-013**: Proof of Work validation
- **NIP-017**: Private Direct Messages
- **NIP-019**: Bech32 encoding (nsec/npub)
- **NIP-044**: Modern encryption
- **NIP-059**: Gift Wrap (required by NIP-017)
## Verification
### Check Binary Type
```bash
# Should show "statically linked"
file build/c_relay_static_musl_x86_64
# Should show "not a dynamic executable"
ldd build/c_relay_static_musl_x86_64
# Check size (should be ~7.6MB)
ls -lh build/c_relay_static_musl_x86_64
```
### Test Execution
```bash
# Show help
./build/c_relay_static_musl_x86_64 --help
# Show version
./build/c_relay_static_musl_x86_64 --version
# Run relay
./build/c_relay_static_musl_x86_64 --port 8888
```
### Cross-Distribution Testing
Test the binary on different distributions to verify portability:
```bash
# Alpine Linux
docker run --rm -v $(pwd)/build:/app alpine:latest /app/c_relay_static_musl_x86_64 --version
# Ubuntu
docker run --rm -v $(pwd)/build:/app ubuntu:latest /app/c_relay_static_musl_x86_64 --version
# Debian
docker run --rm -v $(pwd)/build:/app debian:latest /app/c_relay_static_musl_x86_64 --version
# CentOS
docker run --rm -v $(pwd)/build:/app centos:latest /app/c_relay_static_musl_x86_64 --version
```
## Troubleshooting
### Docker Permission Denied
**Problem**: `permission denied while trying to connect to the Docker daemon socket`
**Solution**: Add user to docker group:
```bash
sudo usermod -aG docker $USER
newgrp docker # Or logout and login again
```
### Build Fails with Fortification Errors
**Problem**: `undefined reference to '__snprintf_chk'` or `'__fprintf_chk'`
**Solution**: Ensure fortification is disabled in both:
1. nostr_core_lib build.sh (line 534)
2. c-relay compilation flags in Dockerfile
### Binary Won't Execute
**Problem**: Binary fails to run on target system
**Checks**:
1. Verify it's truly static: `ldd binary` should show "not a dynamic executable"
2. Check architecture matches: `file binary` should show correct arch
3. Ensure execute permissions: `chmod +x binary`
### Missing NIP Functions
**Problem**: `undefined reference to 'nostr_nip*'` during linking
**Solution**: Add missing NIPs to the build command:
```bash
./build.sh --nips=1,6,13,17,19,44,59
```
## Deployment
### Single Binary Deployment
```bash
# Copy binary to server
scp build/c_relay_static_musl_x86_64 user@server:/opt/c-relay/
# Run on server (no dependencies needed!)
ssh user@server
cd /opt/c-relay
./c_relay_static_musl_x86_64 --port 8888
```
### SystemD Service
```ini
[Unit]
Description=C-Relay Nostr Relay (MUSL Static)
After=network.target
[Service]
Type=simple
User=c-relay
WorkingDirectory=/opt/c-relay
ExecStart=/opt/c-relay/c_relay_static_musl_x86_64
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
```
## Performance Comparison
| Metric | MUSL Static | glibc Static | glibc Dynamic |
|--------|-------------|--------------|---------------|
| Binary Size | 7.6 MB | 12+ MB | 2-3 MB |
| Startup Time | ~50ms | ~60ms | ~40ms |
| Memory Usage | Similar | Similar | Similar |
| Portability | ✓ Any Linux | ⚠ glibc only | ✗ Requires libs |
| Dependencies | None | NSS libs | Many libs |
## Best Practices
1. **Always verify** the binary is truly static before deployment
2. **Test on multiple distributions** to ensure portability
3. **Keep Docker images updated** for security patches
4. **Document the build date** and commit hash for reproducibility
5. **Store binaries** with architecture in filename (e.g., `_x86_64`, `_arm64`)
## References
- [MUSL libc](https://musl.libc.org/)
- [Alpine Linux](https://alpinelinux.org/)
- [Static Linking Best Practices](https://www.musl-libc.org/faq.html)
- [GCC Fortification](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html)
## Changelog
### 2025-10-11
- Initial MUSL build system implementation
- Alpine Docker-based build process
- Fortification fix for nostr_core_lib
- Complete NIP dependency resolution
- Documentation created

View File

@@ -0,0 +1,147 @@
# Static Build Improvements
## Overview
The `build_static.sh` script has been updated to properly support MUSL static compilation and includes several optimizations.
## Changes Made
### 1. True MUSL Static Binary Support
The script now attempts to build with `musl-gcc` for truly portable static binaries:
- **MUSL binaries** have zero runtime dependencies and work across all Linux distributions
- **Automatic fallback** to glibc static linking if MUSL compilation fails (e.g., missing MUSL-compiled libraries)
- Clear messaging about which type of binary was created
### 2. SQLite Build Caching
SQLite is now built once and cached for future builds:
- **Cache location**: `~/.cache/c-relay-sqlite/`
- **Version-specific**: Each SQLite version gets its own cache directory
- **Significant speedup**: Subsequent builds skip the SQLite compilation step
- **Manual cleanup**: `rm -rf ~/.cache/c-relay-sqlite` to clear cache
### 3. Smart Package Installation
The script now checks for required packages before installing:
- Only installs missing packages
- Reduces unnecessary `apt` operations
- Faster builds when dependencies are already present
### 4. Bug Fixes
- Fixed format warning in `src/subscriptions.c` line 1067 (changed `%zu` to `%d` with cast for `MAX_SEARCH_TERM_LENGTH`)
## Usage
```bash
./build_static.sh
```
The script will:
1. Check for and install `musl-gcc` if needed
2. Build or use cached SQLite with JSON1 support
3. Attempt MUSL static compilation
4. Fall back to glibc static compilation if MUSL fails
5. Verify the resulting binary
## Binary Types
### MUSL Static Binary (Ideal - Currently Not Achievable)
- **Filename**: `build/c_relay_static_musl_x86_64`
- **Dependencies**: None (truly static)
- **Portability**: Works on any Linux distribution
- **Status**: Requires MUSL-compiled libwebsockets and other dependencies (not available by default)
### Glibc Static Binary (Current Output)
- **Filename**: `build/c_relay_static_x86_64` or `build/c_relay_static_glibc_x86_64`
- **Dependencies**: None - fully statically linked with glibc
- **Portability**: Works on most Linux distributions (glibc is statically included)
- **Note**: Despite using glibc, this is a **fully static binary** with no runtime dependencies
## Verification
The script automatically verifies binaries using `ldd` and `file`:
```bash
# For MUSL binary
ldd build/c_relay_static_musl_x86_64
# Output: "not a dynamic executable" (good!)
# For glibc binary
ldd build/c_relay_static_glibc_x86_64
# Output: Shows glibc dependencies
```
## Known Limitations
### MUSL Compilation Currently Fails Because:
1. **libwebsockets not available as MUSL static library**
- System libwebsockets is compiled with glibc, not MUSL
- MUSL cannot link against glibc-compiled libraries
- Solution: Build libwebsockets from source with musl-gcc (future enhancement)
2. **Other dependencies not MUSL-compatible**
- libssl, libcrypto, libsecp256k1, libcurl must be available as MUSL static libraries
- Most systems only provide glibc versions
- Solution: Build entire dependency chain with musl-gcc (complex, future enhancement)
### Current Behavior
The script attempts MUSL compilation but falls back to glibc:
1. Tries to compile with `musl-gcc -static` (fails due to missing MUSL libraries)
2. Logs the error to `/tmp/musl_build.log`
3. Displays a clear warning message
4. Automatically falls back to `gcc -static` with glibc
5. Produces a **fully static binary** with glibc statically linked (no runtime dependencies)
**Important**: The glibc static binary is still fully portable across most Linux distributions because glibc is statically included in the binary. It's not as universally portable as MUSL would be, but it works on virtually all modern Linux systems.
## Future Enhancements
1. **Full MUSL dependency chain**: Build all dependencies (libwebsockets, OpenSSL, etc.) with musl-gcc
2. **Multi-architecture support**: Add ARM64 MUSL builds
3. **Docker-based builds**: Use Alpine Linux containers for guaranteed MUSL environment
4. **Dependency vendoring**: Include pre-built MUSL libraries in the repository
## Troubleshooting
### Clear SQLite Cache
```bash
rm -rf ~/.cache/c-relay-sqlite
```
### Force Package Reinstall
```bash
sudo apt install --reinstall musl-dev musl-tools libssl-dev libcurl4-openssl-dev libsecp256k1-dev
```
### Check Build Logs
```bash
cat /tmp/musl_build.log
```
### Verify Binary Type
```bash
file build/c_relay_static_*
ldd build/c_relay_static_* 2>&1
```
## Performance Impact
- **First build**: ~2-3 minutes (includes SQLite compilation)
- **Subsequent builds**: ~30-60 seconds (uses cached SQLite)
- **Cache size**: ~10-15 MB per SQLite version
## Compatibility
The updated script is compatible with:
- Ubuntu 20.04+
- Debian 10+
- Other Debian-based distributions with `apt` package manager
For other distributions, adjust package installation commands accordingly.

140
docs/why_musl_fails.md Normal file
View File

@@ -0,0 +1,140 @@
# Why MUSL Compilation Fails: Technical Explanation
## The Core Problem
**You cannot mix glibc headers/libraries with MUSL's C library.** They are fundamentally incompatible at the ABI (Application Binary Interface) level.
## What Happens When We Try
```bash
musl-gcc -I/usr/include src/main.c -lwebsockets
```
### Step-by-Step Breakdown:
1. **musl-gcc includes `<libwebsockets.h>`** from `/usr/include/libwebsockets.h`
2. **libwebsockets.h includes standard C headers:**
```c
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
```
3. **The system provides glibc's version of these headers** (from `/usr/include/`)
4. **glibc's `<string.h>` includes glibc-specific internal headers:**
```c
#include <bits/libc-header-start.h>
#include <bits/types.h>
```
5. **MUSL doesn't have these `bits/` headers** - it has a completely different structure:
- MUSL uses `/usr/include/x86_64-linux-musl/` for its headers
- MUSL's headers are simpler and don't use the `bits/` subdirectory structure
6. **Compilation fails** with:
```
fatal error: bits/libc-header-start.h: No such file or directory
```
## Why This Is Fundamental
### Different C Library Implementations
**glibc (GNU C Library):**
- Complex, feature-rich implementation
- Uses `bits/` subdirectories for platform-specific code
- Larger binary size
- More system-specific optimizations
**MUSL:**
- Minimal, clean implementation
- Simpler header structure
- Smaller binary size
- Designed for static linking and portability
### ABI Incompatibility
Even if headers compiled, the **Application Binary Interface (ABI)** is different:
- Function calling conventions may differ
- Structure layouts may differ
- System call wrappers are implemented differently
- Thread-local storage mechanisms differ
## The Solution: Build Everything with MUSL
To create a true MUSL static binary, you must:
### 1. Build libwebsockets with musl-gcc
```bash
git clone https://github.com/warmcat/libwebsockets.git
cd libwebsockets
mkdir build && cd build
cmake .. \
-DCMAKE_C_COMPILER=musl-gcc \
-DCMAKE_BUILD_TYPE=Release \
-DLWS_WITH_STATIC=ON \
-DLWS_WITH_SHARED=OFF \
-DLWS_WITHOUT_TESTAPPS=ON
make
```
### 2. Build OpenSSL with MUSL
```bash
wget https://www.openssl.org/source/openssl-3.0.0.tar.gz
tar xzf openssl-3.0.0.tar.gz
cd openssl-3.0.0
CC=musl-gcc ./config no-shared --prefix=/opt/musl-openssl
make && make install
```
### 3. Build all other dependencies
- zlib with musl-gcc
- libsecp256k1 with musl-gcc
- libcurl with musl-gcc (which itself needs OpenSSL built with MUSL)
### 4. Build c-relay with all MUSL libraries
```bash
musl-gcc -static \
-I/opt/musl-libwebsockets/include \
-I/opt/musl-openssl/include \
src/*.c \
-L/opt/musl-libwebsockets/lib -lwebsockets \
-L/opt/musl-openssl/lib -lssl -lcrypto \
...
```
## Why We Use glibc Static Instead
Building the entire dependency chain with MUSL is:
- **Time-consuming**: Hours to build all dependencies
- **Complex**: Each library has its own build quirks
- **Maintenance burden**: Must rebuild when dependencies update
- **Unnecessary for most use cases**: glibc static binaries work fine
### glibc Static Binary Advantages:
**Still fully static** - no runtime dependencies
**Works on virtually all Linux distributions**
**Much faster to build** - uses system libraries
**Easier to maintain** - no custom dependency builds
**Same practical portability** for modern Linux systems
### glibc Static Binary Limitations:
⚠️ **Slightly larger** than MUSL (glibc is bigger)
⚠️ **May not work on very old systems** (ancient glibc versions)
⚠️ **Not as universally portable** as MUSL (but close enough)
## Conclusion
**MUSL compilation fails because system libraries are compiled with glibc, and you cannot mix glibc and MUSL.**
The current approach (glibc static binary) is the pragmatic solution that provides excellent portability without the complexity of building an entire MUSL toolchain.
If true MUSL binaries are needed in the future, the solution is to use Alpine Linux (which uses MUSL natively) in a Docker container, where all system libraries are already MUSL-compiled.

View File

@@ -69,6 +69,20 @@ RUN cd /tmp && \
./Configure linux-x86_64 no-shared --prefix=/usr && \
make && make install_sw
# Build SQLite with JSON1 extension enabled
RUN cd /tmp && \
wget https://www.sqlite.org/2024/sqlite-autoconf-3460000.tar.gz && \
tar xzf sqlite-autoconf-3460000.tar.gz && \
cd sqlite-autoconf-3460000 && \
./configure \
--enable-static \
--disable-shared \
--enable-json1 \
--enable-fts5 \
--prefix=/usr \
CFLAGS="-DSQLITE_ENABLE_JSON1=1 -DSQLITE_ENABLE_FTS5=1" && \
make && make install
# Build libsecp256k1 static
RUN cd /tmp && \
git clone https://github.com/bitcoin-core/secp256k1.git && \

View File

@@ -1,19 +0,0 @@
#!/bin/bash
# get_settings.sh - Query relay configuration events using nak
# Uses admin test key to query kind 33334 configuration events
# Test key configuration
ADMIN_PRIVATE_KEY="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
ADMIN_PUBLIC_KEY="6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3"
RELAY_PUBLIC_KEY="4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
RELAY_URL="ws://localhost:8888"
echo "Querying configuration events (kind 33334) from relay at $RELAY_URL"
echo "Using admin public key: $ADMIN_PUBLIC_KEY"
echo "Looking for relay config: $RELAY_PUBLIC_KEY"
echo ""
# Query for kind 33334 configuration events
# These events contain the relay configuration with d-tag matching the relay pubkey
nak req -k 33334 "$RELAY_URL" | jq .

View File

@@ -163,9 +163,16 @@ rm -f db/c_nostr_relay.db* 2>/dev/null
echo "Embedding web files..."
./embed_web_files.sh
# Build the project first
echo "Building project..."
make clean all
# Build the project - ONLY static build
echo "Building project (static binary with SQLite JSON1 extension)..."
./build_static.sh
# Exit if static build fails - no fallback
if [ $? -ne 0 ]; then
echo "ERROR: Static build failed. Cannot proceed without static binary."
echo "Please fix the build errors and try again."
exit 1
fi
# Restore database files if preserving
if [ "$PRESERVE_DATABASE" = true ] && [ -d "/tmp/relay_backup_$$" ]; then
@@ -181,25 +188,32 @@ if [ $? -ne 0 ]; then
exit 1
fi
# Check if relay binary exists after build - detect architecture
# Check if static relay binary exists after build - ONLY use static binary
ARCH=$(uname -m)
case "$ARCH" in
x86_64)
BINARY_PATH="./build/c_relay_x86"
BINARY_PATH="./build/c_relay_static_x86_64"
;;
aarch64|arm64)
BINARY_PATH="./build/c_relay_arm64"
BINARY_PATH="./build/c_relay_static_arm64"
;;
*)
BINARY_PATH="./build/c_relay_$ARCH"
BINARY_PATH="./build/c_relay_static_$ARCH"
;;
esac
# Verify static binary exists - no fallbacks
if [ ! -f "$BINARY_PATH" ]; then
echo "ERROR: Relay binary not found at $BINARY_PATH after build. Build may have failed."
echo "ERROR: Static relay binary not found: $BINARY_PATH"
echo ""
echo "The relay requires the static binary with JSON1 support."
echo "Please run: ./build_static.sh"
echo ""
exit 1
fi
echo "Using static binary: $BINARY_PATH"
echo "Build successful. Proceeding with relay restart..."
# Kill existing relay if running - start aggressive immediately

View File

@@ -1 +1 @@
2442403
786254

View File

@@ -11,6 +11,7 @@
#include <dirent.h>
#include <sys/stat.h>
#include <errno.h>
#include <signal.h>
#include <libwebsockets.h>
// External database connection (from main.c)

View File

@@ -8,6 +8,7 @@
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <sys/stat.h>
#include <cjson/cJSON.h>
#include <libwebsockets.h>

View File

@@ -126,6 +126,22 @@ int process_admin_event_in_config(cJSON* event, char* error_message, size_t erro
// Forward declaration for NIP-45 COUNT message handling
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
// Parameter binding helpers for SQL queries
static void add_bind_param(char*** params, int* count, int* capacity, const char* value) {
if (*count >= *capacity) {
*capacity = *capacity == 0 ? 16 : *capacity * 2;
*params = realloc(*params, *capacity * sizeof(char*));
}
(*params)[(*count)++] = strdup(value);
}
static void free_bind_params(char** params, int count) {
for (int i = 0; i < count; i++) {
free(params[i]);
}
free(params);
}
// Forward declaration for enhanced admin event authorization
int is_authorized_admin_event(cJSON* event, char* error_message, size_t error_size);
@@ -726,7 +742,95 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
log_error("REQ filters is not an array");
return 0;
}
// EARLY SUBSCRIPTION LIMIT CHECK - Check limits BEFORE any processing
if (pss) {
time_t current_time = time(NULL);
// Check if client is currently rate limited due to excessive failed attempts
if (pss->rate_limit_until > current_time) {
char rate_limit_msg[256];
int remaining_seconds = (int)(pss->rate_limit_until - current_time);
snprintf(rate_limit_msg, sizeof(rate_limit_msg),
"Rate limited due to excessive failed subscription attempts. Try again in %d seconds.", remaining_seconds);
// Send CLOSED notice for rate limiting
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: rate limited"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(rate_limit_msg));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
unsigned char* buf = malloc(LWS_PRE + closed_len);
if (buf) {
memcpy(buf + LWS_PRE, closed_str, closed_len);
lws_write(wsi, buf + LWS_PRE, closed_len, LWS_WRITE_TEXT);
free(buf);
}
free(closed_str);
}
cJSON_Delete(closed_msg);
// Update rate limiting counters
pss->failed_subscription_attempts++;
pss->last_failed_attempt = current_time;
return 0;
}
// Check session subscription limits
if (pss->subscription_count >= g_subscription_manager.max_subscriptions_per_client) {
log_error("Maximum subscriptions per client exceeded");
// Update rate limiting counters for failed attempt
pss->failed_subscription_attempts++;
pss->last_failed_attempt = current_time;
pss->consecutive_failures++;
// Implement progressive backoff: 1s, 5s, 30s, 300s (5min) based on consecutive failures
int backoff_seconds = 1;
if (pss->consecutive_failures >= 10) backoff_seconds = 300; // 5 minutes
else if (pss->consecutive_failures >= 5) backoff_seconds = 30; // 30 seconds
else if (pss->consecutive_failures >= 3) backoff_seconds = 5; // 5 seconds
pss->rate_limit_until = current_time + backoff_seconds;
// Send CLOSED notice with backoff information
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: too many subscriptions"));
char backoff_msg[256];
snprintf(backoff_msg, sizeof(backoff_msg),
"Maximum subscriptions per client exceeded. Backoff for %d seconds.", backoff_seconds);
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(backoff_msg));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
unsigned char* buf = malloc(LWS_PRE + closed_len);
if (buf) {
memcpy(buf + LWS_PRE, closed_str, closed_len);
lws_write(wsi, buf + LWS_PRE, closed_len, LWS_WRITE_TEXT);
free(buf);
}
free(closed_str);
}
cJSON_Delete(closed_msg);
return 0;
}
}
// Parameter binding helpers
char** bind_params = NULL;
int bind_param_count = 0;
int bind_param_capacity = 0;
// Check for kind 33334 configuration event requests BEFORE creating subscription
int config_events_sent = 0;
int has_config_request = 0;
@@ -770,32 +874,6 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
// If only config events were requested, we can return early after sending EOSE
// But still create the subscription for future config updates
// Check session subscription limits
if (pss && pss->subscription_count >= g_subscription_manager.max_subscriptions_per_client) {
log_error("Maximum subscriptions per client exceeded");
// Send CLOSED notice
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: too many subscriptions"));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
unsigned char* buf = malloc(LWS_PRE + closed_len);
if (buf) {
memcpy(buf + LWS_PRE, closed_str, closed_len);
lws_write(wsi, buf + LWS_PRE, closed_len, LWS_WRITE_TEXT);
free(buf);
}
free(closed_str);
}
cJSON_Delete(closed_msg);
return has_config_request ? config_events_sent : 0;
}
// Create persistent subscription
subscription_t* subscription = create_subscription(sub_id, wsi, filters, pss ? pss->client_ip : "unknown");
if (!subscription) {
@@ -807,13 +885,13 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
if (add_subscription_to_manager(subscription) != 0) {
log_error("Failed to add subscription to global manager");
free_subscription(subscription);
// Send CLOSED notice
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: subscription limit reached"));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
@@ -826,7 +904,15 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
free(closed_str);
}
cJSON_Delete(closed_msg);
// Update rate limiting counters for failed attempt (global limit reached)
if (pss) {
time_t current_time = time(NULL);
pss->failed_subscription_attempts++;
pss->last_failed_attempt = current_time;
pss->consecutive_failures++;
}
return has_config_request ? config_events_sent : 0;
}
@@ -848,7 +934,13 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
log_warning("Invalid filter object");
continue;
}
// Reset bind params for this filter
free_bind_params(bind_params, bind_param_count);
bind_params = NULL;
bind_param_count = 0;
bind_param_capacity = 0;
// Build SQL query based on filter - exclude ephemeral events (kinds 20000-29999) from historical queries
char sql[1024] = "SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND (kind < 20000 OR kind >= 30000)";
char* sql_ptr = sql + strlen(sql);
@@ -888,56 +980,80 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
// Handle authors filter
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
if (authors && cJSON_IsArray(authors)) {
int author_count = cJSON_GetArraySize(authors);
int author_count = 0;
// Count valid authors
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
author_count++;
}
}
if (author_count > 0) {
snprintf(sql_ptr, remaining, " AND pubkey IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int a = 0; a < author_count; a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(author));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add author values to bind params
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, cJSON_GetStringValue(author));
}
}
}
}
// Handle ids filter
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
if (ids && cJSON_IsArray(ids)) {
int id_count = cJSON_GetArraySize(ids);
int id_count = 0;
// Count valid ids
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
id_count++;
}
}
if (id_count > 0) {
snprintf(sql_ptr, remaining, " AND id IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < id_count; i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(id));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add id values to bind params
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, cJSON_GetStringValue(id));
}
}
}
}
@@ -950,29 +1066,42 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
const char* tag_name = filter_key + 1; // Get the tag name (e, p, t, type, etc.)
if (cJSON_IsArray(filter_item)) {
int tag_value_count = cJSON_GetArraySize(filter_item);
int tag_value_count = 0;
// Count valid tag values
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
tag_value_count++;
}
}
if (tag_value_count > 0) {
// Use EXISTS with LIKE to check for matching tags
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = '%s' AND json_extract(value, '$[1]') IN (", tag_name);
// Use EXISTS with parameterized query
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = ? AND json_extract(value, '$[1]') IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < tag_value_count; i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(tag_value));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, "))");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add tag name and values to bind params
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, tag_name);
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, cJSON_GetStringValue(tag_value));
}
}
}
}
}
@@ -1048,6 +1177,11 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
log_error(error_msg);
continue;
}
// Bind parameters
for (int i = 0; i < bind_param_count; i++) {
sqlite3_bind_text(stmt, i + 1, bind_params[i], -1, SQLITE_TRANSIENT);
}
int row_count = 0;
while (sqlite3_step(stmt) == SQLITE_ROW) {
@@ -1112,7 +1246,10 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
sqlite3_finalize(stmt);
}
// Cleanup bind params
free_bind_params(bind_params, bind_param_count);
return events_sent;
}
/////////////////////////////////////////////////////////////////////////////////////////
@@ -1614,9 +1751,27 @@ int main(int argc, char* argv[]) {
// Initialize NIP-40 expiration configuration
init_expiration_config();
// Update subscription manager configuration
update_subscription_manager_config();
// Initialize subscription manager mutexes
if (pthread_mutex_init(&g_subscription_manager.subscriptions_lock, NULL) != 0) {
log_error("Failed to initialize subscription manager subscriptions lock");
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
if (pthread_mutex_init(&g_subscription_manager.ip_tracking_lock, NULL) != 0) {
log_error("Failed to initialize subscription manager IP tracking lock");
pthread_mutex_destroy(&g_subscription_manager.subscriptions_lock);
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
// Start WebSocket Nostr relay server (port from configuration)
@@ -1626,6 +1781,11 @@ int main(int argc, char* argv[]) {
cleanup_relay_info();
ginxsom_request_validator_cleanup();
cleanup_configuration_system();
// Cleanup subscription manager mutexes
pthread_mutex_destroy(&g_subscription_manager.subscriptions_lock);
pthread_mutex_destroy(&g_subscription_manager.ip_tracking_lock);
nostr_cleanup();
close_database();

View File

@@ -11,7 +11,6 @@
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <printf.h>
// Forward declarations for logging functions
void log_warning(const char* message);

View File

@@ -5,7 +5,6 @@
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <printf.h>
#include <pthread.h>
#include <libwebsockets.h>
#include "subscriptions.h"
@@ -21,6 +20,13 @@ const char* get_config_value(const char* key);
// Forward declarations for NIP-40 expiration functions
int is_event_expired(cJSON* event, time_t current_time);
// Forward declarations for filter validation
int validate_filter_values(cJSON* filter_json, char* error_message, size_t error_size);
int validate_hex_string(const char* str, size_t expected_len, const char* field_name, char* error_message, size_t error_size);
int validate_timestamp_range(long since, long until, char* error_message, size_t error_size);
int validate_numeric_limits(int limit, char* error_message, size_t error_size);
int validate_search_term(const char* search_term, char* error_message, size_t error_size);
// Global database variable
extern sqlite3* g_db;
@@ -42,7 +48,14 @@ subscription_filter_t* create_subscription_filter(cJSON* filter_json) {
if (!filter_json || !cJSON_IsObject(filter_json)) {
return NULL;
}
// Validate filter values before creating the filter
char error_message[512] = {0};
if (!validate_filter_values(filter_json, error_message, sizeof(error_message))) {
log_warning(error_message);
return NULL;
}
subscription_filter_t* filter = calloc(1, sizeof(subscription_filter_t));
if (!filter) {
return NULL;
@@ -111,28 +124,66 @@ void free_subscription_filter(subscription_filter_t* filter) {
free(filter);
}
// Validate subscription ID format and length
static int validate_subscription_id(const char* sub_id) {
if (!sub_id) {
return 0; // NULL pointer
}
size_t len = strlen(sub_id);
if (len == 0 || len >= SUBSCRIPTION_ID_MAX_LENGTH) {
return 0; // Empty or too long
}
// Check for valid characters (alphanumeric, underscore, hyphen)
for (size_t i = 0; i < len; i++) {
char c = sub_id[i];
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '_' || c == '-')) {
return 0; // Invalid character
}
}
return 1; // Valid
}
// Create a new subscription
subscription_t* create_subscription(const char* sub_id, struct lws* wsi, cJSON* filters_array, const char* client_ip) {
if (!sub_id || !wsi || !filters_array) {
log_error("create_subscription: NULL parameter(s)");
return NULL;
}
// Validate subscription ID
if (!validate_subscription_id(sub_id)) {
log_error("create_subscription: invalid subscription ID format or length");
return NULL;
}
subscription_t* sub = calloc(1, sizeof(subscription_t));
if (!sub) {
log_error("create_subscription: failed to allocate subscription");
return NULL;
}
// Copy subscription ID (truncate if too long)
strncpy(sub->id, sub_id, SUBSCRIPTION_ID_MAX_LENGTH - 1);
sub->id[SUBSCRIPTION_ID_MAX_LENGTH - 1] = '\0';
// Copy subscription ID safely (already validated)
size_t id_len = strlen(sub_id);
memcpy(sub->id, sub_id, id_len);
sub->id[id_len] = '\0';
// Set WebSocket connection
sub->wsi = wsi;
// Set client IP
// Set client IP safely
if (client_ip) {
strncpy(sub->client_ip, client_ip, CLIENT_IP_MAX_LENGTH - 1);
sub->client_ip[CLIENT_IP_MAX_LENGTH - 1] = '\0';
size_t ip_len = strlen(client_ip);
if (ip_len >= CLIENT_IP_MAX_LENGTH) {
ip_len = CLIENT_IP_MAX_LENGTH - 1;
}
memcpy(sub->client_ip, client_ip, ip_len);
sub->client_ip[ip_len] = '\0';
} else {
sub->client_ip[0] = '\0'; // Ensure null termination
}
// Set timestamps and state
@@ -215,42 +266,61 @@ int add_subscription_to_manager(subscription_t* sub) {
// Remove subscription from global manager (thread-safe)
int remove_subscription_from_manager(const char* sub_id, struct lws* wsi) {
if (!sub_id) return -1;
if (!sub_id) {
log_error("remove_subscription_from_manager: NULL subscription ID");
return -1;
}
// Validate subscription ID format
if (!validate_subscription_id(sub_id)) {
log_error("remove_subscription_from_manager: invalid subscription ID format");
return -1;
}
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
subscription_t** current = &g_subscription_manager.active_subscriptions;
while (*current) {
subscription_t* sub = *current;
// Match by ID and WebSocket connection
if (strcmp(sub->id, sub_id) == 0 && (!wsi || sub->wsi == wsi)) {
// Remove from list
*current = sub->next;
g_subscription_manager.total_subscriptions--;
// Copy data needed for logging before unlocking
char client_ip_copy[CLIENT_IP_MAX_LENGTH];
int events_sent_copy = sub->events_sent;
char sub_id_copy[SUBSCRIPTION_ID_MAX_LENGTH];
memcpy(client_ip_copy, sub->client_ip, CLIENT_IP_MAX_LENGTH);
memcpy(sub_id_copy, sub->id, SUBSCRIPTION_ID_MAX_LENGTH);
client_ip_copy[CLIENT_IP_MAX_LENGTH - 1] = '\0';
sub_id_copy[SUBSCRIPTION_ID_MAX_LENGTH - 1] = '\0';
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Log subscription closure to database
log_subscription_closed(sub_id, sub->client_ip, "closed");
// Log subscription closure to database (now safe)
log_subscription_closed(sub_id_copy, client_ip_copy, "closed");
// Update events sent counter before freeing
update_subscription_events_sent(sub_id, sub->events_sent);
update_subscription_events_sent(sub_id_copy, events_sent_copy);
free_subscription(sub);
return 0;
}
current = &(sub->next);
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Subscription '%s' not found for removal", sub_id);
log_warning(debug_msg);
return -1;
}
@@ -472,52 +542,117 @@ int broadcast_event_to_subscriptions(cJSON* event) {
}
int broadcasts = 0;
// Create a temporary list of matching subscriptions to avoid holding lock during I/O
typedef struct temp_sub {
struct lws* wsi;
char id[SUBSCRIPTION_ID_MAX_LENGTH];
char client_ip[CLIENT_IP_MAX_LENGTH];
struct temp_sub* next;
} temp_sub_t;
temp_sub_t* matching_subs = NULL;
int matching_count = 0;
// First pass: collect matching subscriptions while holding lock
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
subscription_t* sub = g_subscription_manager.active_subscriptions;
while (sub) {
if (sub->active && event_matches_subscription(event, sub)) {
// Create EVENT message for this subscription
cJSON* event_msg = cJSON_CreateArray();
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
cJSON_AddItemToArray(event_msg, cJSON_CreateString(sub->id));
cJSON_AddItemToArray(event_msg, cJSON_Duplicate(event, 1));
char* msg_str = cJSON_Print(event_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
// Send to WebSocket connection
int write_result = lws_write(sub->wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
if (write_result >= 0) {
sub->events_sent++;
broadcasts++;
// Log event broadcast to database (optional - can be disabled for performance)
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
if (event_id_obj && cJSON_IsString(event_id_obj)) {
log_event_broadcast(cJSON_GetStringValue(event_id_obj), sub->id, sub->client_ip);
}
}
free(buf);
if (sub->active && sub->wsi && event_matches_subscription(event, sub)) {
temp_sub_t* temp = malloc(sizeof(temp_sub_t));
if (temp) {
temp->wsi = sub->wsi;
// Safely copy subscription ID
size_t id_len = strlen(sub->id);
if (id_len >= SUBSCRIPTION_ID_MAX_LENGTH) {
id_len = SUBSCRIPTION_ID_MAX_LENGTH - 1;
}
free(msg_str);
memcpy(temp->id, sub->id, id_len);
temp->id[id_len] = '\0';
// Safely copy client IP
size_t ip_len = strlen(sub->client_ip);
if (ip_len >= CLIENT_IP_MAX_LENGTH) {
ip_len = CLIENT_IP_MAX_LENGTH - 1;
}
memcpy(temp->client_ip, sub->client_ip, ip_len);
temp->client_ip[ip_len] = '\0';
temp->next = matching_subs;
matching_subs = temp;
matching_count++;
} else {
log_error("broadcast_event_to_subscriptions: failed to allocate temp subscription");
}
cJSON_Delete(event_msg);
}
sub = sub->next;
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Second pass: send messages without holding lock
temp_sub_t* current_temp = matching_subs;
while (current_temp) {
// Create EVENT message for this subscription
cJSON* event_msg = cJSON_CreateArray();
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
cJSON_AddItemToArray(event_msg, cJSON_CreateString(current_temp->id));
cJSON_AddItemToArray(event_msg, cJSON_Duplicate(event, 1));
char* msg_str = cJSON_Print(event_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
// Send to WebSocket connection with error checking
// Note: lws_write can fail if connection is closed, but won't crash
int write_result = lws_write(current_temp->wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
if (write_result >= 0) {
broadcasts++;
// Update events sent counter for this subscription
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
subscription_t* update_sub = g_subscription_manager.active_subscriptions;
while (update_sub) {
if (update_sub->wsi == current_temp->wsi &&
strcmp(update_sub->id, current_temp->id) == 0) {
update_sub->events_sent++;
break;
}
update_sub = update_sub->next;
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Log event broadcast to database (optional - can be disabled for performance)
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
if (event_id_obj && cJSON_IsString(event_id_obj)) {
log_event_broadcast(cJSON_GetStringValue(event_id_obj), current_temp->id, current_temp->client_ip);
}
}
free(buf);
}
free(msg_str);
}
sub = sub->next;
cJSON_Delete(event_msg);
current_temp = current_temp->next;
}
// Clean up temporary subscription list
while (matching_subs) {
temp_sub_t* next = matching_subs->next;
free(matching_subs);
matching_subs = next;
}
// Update global statistics
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
g_subscription_manager.total_events_broadcast += broadcasts;
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
return broadcasts;
@@ -688,19 +823,476 @@ void log_event_broadcast(const char* event_id, const char* sub_id, const char* c
// Update events sent counter for a subscription
void update_subscription_events_sent(const char* sub_id, int events_sent) {
if (!g_db || !sub_id) return;
const char* sql =
"UPDATE subscription_events "
"SET events_sent = ? "
"WHERE subscription_id = ? AND event_type = 'created'";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_int(stmt, 1, events_sent);
sqlite3_bind_text(stmt, 2, sub_id, -1, SQLITE_STATIC);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
// PER-IP CONNECTION TRACKING
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
// Get or create IP connection info (thread-safe)
ip_connection_info_t* get_or_create_ip_connection(const char* client_ip) {
if (!client_ip) return NULL;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
// Look for existing IP connection info
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
// Found existing entry, update activity
current->last_activity = time(NULL);
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return current;
}
current = current->next;
}
// Create new IP connection info
ip_connection_info_t* new_ip = calloc(1, sizeof(ip_connection_info_t));
if (!new_ip) {
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return NULL;
}
// Copy IP address safely
strncpy(new_ip->ip_address, client_ip, CLIENT_IP_MAX_LENGTH - 1);
new_ip->ip_address[CLIENT_IP_MAX_LENGTH - 1] = '\0';
// Initialize tracking data
time_t now = time(NULL);
new_ip->active_connections = 1;
new_ip->total_subscriptions = 0;
new_ip->first_connection = now;
new_ip->last_activity = now;
// Add to linked list
new_ip->next = g_subscription_manager.ip_connections;
g_subscription_manager.ip_connections = new_ip;
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return new_ip;
}
// Update IP connection activity timestamp
void update_ip_connection_activity(const char* client_ip) {
if (!client_ip) return;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
current->last_activity = time(NULL);
break;
}
current = current->next;
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
}
// Remove IP connection (when last connection from IP closes)
void remove_ip_connection(const char* client_ip) {
if (!client_ip) return;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t** current = &g_subscription_manager.ip_connections;
while (*current) {
ip_connection_info_t* entry = *current;
if (strcmp(entry->ip_address, client_ip) == 0) {
// Remove from list
*current = entry->next;
free(entry);
break;
}
current = &((*current)->next);
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
}
// Get total subscriptions for an IP address
int get_total_subscriptions_for_ip(const char* client_ip) {
if (!client_ip) return 0;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
int total = current->total_subscriptions;
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return total;
}
current = current->next;
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return 0;
}
// Get active connections for an IP address
int get_active_connections_for_ip(const char* client_ip) {
if (!client_ip) return 0;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
int active = current->active_connections;
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return active;
}
current = current->next;
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
// FILTER VALIDATION FUNCTIONS
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/**
* Validate hex string format and length
*/
int validate_hex_string(const char* str, size_t expected_len, const char* field_name, char* error_message, size_t error_size) {
if (!str) {
snprintf(error_message, error_size, "%s: null value", field_name);
return 0;
}
size_t len = strlen(str);
if (len != expected_len) {
snprintf(error_message, error_size, "%s: invalid length %zu, expected %zu", field_name, len, expected_len);
return 0;
}
// Check for valid hex characters
for (size_t i = 0; i < len; i++) {
char c = str[i];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
snprintf(error_message, error_size, "%s: invalid hex character '%c' at position %zu", field_name, c, i);
return 0;
}
}
return 1;
}
/**
* Validate timestamp range (since/until)
*/
int validate_timestamp_range(long since, long until, char* error_message, size_t error_size) {
// Allow zero values (not set)
if (since == 0 && until == 0) {
return 1;
}
// Check for reasonable timestamp bounds (1970-01-01 to 2100-01-01)
if (since != 0 && (since < MIN_TIMESTAMP || since > MAX_TIMESTAMP)) {
snprintf(error_message, error_size, "since: timestamp %ld out of valid range", since);
return 0;
}
if (until != 0 && (until < MIN_TIMESTAMP || until > MAX_TIMESTAMP)) {
snprintf(error_message, error_size, "until: timestamp %ld out of valid range", until);
return 0;
}
// Check that since is before until if both are set
if (since > 0 && until > 0 && since >= until) {
snprintf(error_message, error_size, "since (%ld) must be before until (%ld)", since, until);
return 0;
}
return 1;
}
/**
* Validate numeric limits
*/
int validate_numeric_limits(int limit, char* error_message, size_t error_size) {
// Allow zero (no limit)
if (limit == 0) {
return 1;
}
// Check for reasonable limits (1-10000)
if (limit < MIN_LIMIT || limit > MAX_LIMIT) {
snprintf(error_message, error_size, "limit: value %d out of valid range [%d, %d]", limit, MIN_LIMIT, MAX_LIMIT);
return 0;
}
return 1;
}
/**
* Validate search term for SQL injection and length
*/
int validate_search_term(const char* search_term, char* error_message, size_t error_size) {
if (!search_term) {
return 1; // NULL search terms are allowed
}
size_t len = strlen(search_term);
// Check maximum length
if (len > MAX_SEARCH_TERM_LENGTH) {
snprintf(error_message, error_size, "search: term too long (%zu characters, max %d)", len, (int)MAX_SEARCH_TERM_LENGTH);
return 0;
}
// Check for potentially dangerous characters that could cause SQL issues
// Allow alphanumeric, spaces, and common punctuation
for (size_t i = 0; i < len; i++) {
char c = search_term[i];
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == ' ' || c == '-' || c == '_' ||
c == '.' || c == ',' || c == '!' || c == '?' || c == ':' ||
c == ';' || c == '"' || c == '\'' || c == '(' || c == ')' ||
c == '[' || c == ']' || c == '{' || c == '}' || c == '@' ||
c == '#' || c == '$' || c == '%' || c == '^' || c == '&' ||
c == '*' || c == '+' || c == '=' || c == '|' || c == '\\' ||
c == '/' || c == '<' || c == '>' || c == '~' || c == '`')) {
// Reject control characters and other potentially problematic chars
if (c < 32 || c == 127) {
snprintf(error_message, error_size, "search: invalid character (ASCII %d) at position %zu", (int)c, i);
return 0;
}
}
}
return 1;
}
/**
* Validate all filter values in a filter object
*/
int validate_filter_values(cJSON* filter_json, char* error_message, size_t error_size) {
if (!filter_json || !cJSON_IsObject(filter_json)) {
snprintf(error_message, error_size, "filter must be a JSON object");
return 0;
}
// Validate kinds array
cJSON* kinds = cJSON_GetObjectItem(filter_json, "kinds");
if (kinds) {
if (!cJSON_IsArray(kinds)) {
snprintf(error_message, error_size, "kinds must be an array");
return 0;
}
int kinds_count = cJSON_GetArraySize(kinds);
if (kinds_count > MAX_KINDS_PER_FILTER) {
snprintf(error_message, error_size, "kinds array too large (%d items, max %d)", kinds_count, MAX_KINDS_PER_FILTER);
return 0;
}
for (int i = 0; i < kinds_count; i++) {
cJSON* kind_item = cJSON_GetArrayItem(kinds, i);
if (!cJSON_IsNumber(kind_item)) {
snprintf(error_message, error_size, "kinds[%d] must be a number", i);
return 0;
}
int kind_val = (int)cJSON_GetNumberValue(kind_item);
if (kind_val < 0 || kind_val > 65535) { // Reasonable range for event kinds
snprintf(error_message, error_size, "kinds[%d]: invalid event kind %d", i, kind_val);
return 0;
}
}
}
// Validate authors array
cJSON* authors = cJSON_GetObjectItem(filter_json, "authors");
if (authors) {
if (!cJSON_IsArray(authors)) {
snprintf(error_message, error_size, "authors must be an array");
return 0;
}
int authors_count = cJSON_GetArraySize(authors);
if (authors_count > MAX_AUTHORS_PER_FILTER) {
snprintf(error_message, error_size, "authors array too large (%d items, max %d)", authors_count, MAX_AUTHORS_PER_FILTER);
return 0;
}
for (int i = 0; i < authors_count; i++) {
cJSON* author_item = cJSON_GetArrayItem(authors, i);
if (!cJSON_IsString(author_item)) {
snprintf(error_message, error_size, "authors[%d] must be a string", i);
return 0;
}
const char* author_str = cJSON_GetStringValue(author_item);
// Allow partial pubkeys (prefix matching), so validate hex but allow shorter lengths
size_t author_len = strlen(author_str);
if (author_len == 0 || author_len > 64) {
snprintf(error_message, error_size, "authors[%d]: invalid length %zu", i, author_len);
return 0;
}
// Validate hex characters (allow partial)
for (size_t j = 0; j < author_len; j++) {
char c = author_str[j];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
snprintf(error_message, error_size, "authors[%d]: invalid hex character '%c'", i, c);
return 0;
}
}
}
}
// Validate ids array
cJSON* ids = cJSON_GetObjectItem(filter_json, "ids");
if (ids) {
if (!cJSON_IsArray(ids)) {
snprintf(error_message, error_size, "ids must be an array");
return 0;
}
int ids_count = cJSON_GetArraySize(ids);
if (ids_count > MAX_IDS_PER_FILTER) {
snprintf(error_message, error_size, "ids array too large (%d items, max %d)", ids_count, MAX_IDS_PER_FILTER);
return 0;
}
for (int i = 0; i < ids_count; i++) {
cJSON* id_item = cJSON_GetArrayItem(ids, i);
if (!cJSON_IsString(id_item)) {
snprintf(error_message, error_size, "ids[%d] must be a string", i);
return 0;
}
const char* id_str = cJSON_GetStringValue(id_item);
// Allow partial IDs (prefix matching)
size_t id_len = strlen(id_str);
if (id_len == 0 || id_len > 64) {
snprintf(error_message, error_size, "ids[%d]: invalid length %zu", i, id_len);
return 0;
}
// Validate hex characters
for (size_t j = 0; j < id_len; j++) {
char c = id_str[j];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
snprintf(error_message, error_size, "ids[%d]: invalid hex character '%c'", i, c);
return 0;
}
}
}
}
// Validate since/until timestamps
long since_val = 0, until_val = 0;
cJSON* since = cJSON_GetObjectItem(filter_json, "since");
if (since) {
if (!cJSON_IsNumber(since)) {
snprintf(error_message, error_size, "since must be a number");
return 0;
}
since_val = (long)cJSON_GetNumberValue(since);
}
cJSON* until = cJSON_GetObjectItem(filter_json, "until");
if (until) {
if (!cJSON_IsNumber(until)) {
snprintf(error_message, error_size, "until must be a number");
return 0;
}
until_val = (long)cJSON_GetNumberValue(until);
}
if (!validate_timestamp_range(since_val, until_val, error_message, error_size)) {
return 0;
}
// Validate limit
cJSON* limit = cJSON_GetObjectItem(filter_json, "limit");
if (limit) {
if (!cJSON_IsNumber(limit)) {
snprintf(error_message, error_size, "limit must be a number");
return 0;
}
int limit_val = (int)cJSON_GetNumberValue(limit);
if (!validate_numeric_limits(limit_val, error_message, error_size)) {
return 0;
}
}
// Validate search term
cJSON* search = cJSON_GetObjectItem(filter_json, "search");
if (search) {
if (!cJSON_IsString(search)) {
snprintf(error_message, error_size, "search must be a string");
return 0;
}
const char* search_term = cJSON_GetStringValue(search);
if (!validate_search_term(search_term, error_message, error_size)) {
return 0;
}
}
// Validate tag filters (#e, #p, #t, etc.)
cJSON* item = NULL;
cJSON_ArrayForEach(item, filter_json) {
const char* key = item->string;
if (key && strlen(key) >= 2 && key[0] == '#') {
if (!cJSON_IsArray(item)) {
snprintf(error_message, error_size, "%s must be an array", key);
return 0;
}
int tag_count = cJSON_GetArraySize(item);
if (tag_count > MAX_TAG_VALUES_PER_FILTER) {
snprintf(error_message, error_size, "%s array too large (%d items, max %d)", key, tag_count, MAX_TAG_VALUES_PER_FILTER);
return 0;
}
for (int i = 0; i < tag_count; i++) {
cJSON* tag_value = cJSON_GetArrayItem(item, i);
if (!cJSON_IsString(tag_value)) {
snprintf(error_message, error_size, "%s[%d] must be a string", key, i);
return 0;
}
const char* tag_str = cJSON_GetStringValue(tag_value);
size_t tag_len = strlen(tag_str);
if (tag_len > MAX_TAG_VALUE_LENGTH) {
snprintf(error_message, error_size, "%s[%d]: tag value too long (%zu characters, max %d)", key, i, tag_len, MAX_TAG_VALUE_LENGTH);
return 0;
}
}
}
}
return 1;
}

View File

@@ -9,6 +9,7 @@
#include <stdint.h>
#include "../nostr_core_lib/cjson/cJSON.h"
#include "config.h" // For CLIENT_IP_MAX_LENGTH
#include "websockets.h" // For validation constants
// Forward declaration for libwebsockets struct
struct lws;
@@ -18,6 +19,13 @@ struct lws;
#define MAX_FILTERS_PER_SUBSCRIPTION 10
#define MAX_TOTAL_SUBSCRIPTIONS 5000
// Validation limits (shared with websockets.h)
#define MAX_SEARCH_TERM_LENGTH 256
#define MIN_TIMESTAMP 0L
#define MAX_TIMESTAMP 4102444800L // 2100-01-01
#define MIN_LIMIT 1
#define MAX_LIMIT 10000
// Forward declarations for typedefs
typedef struct subscription_filter subscription_filter_t;
typedef struct subscription subscription_t;
@@ -55,6 +63,16 @@ struct subscription {
struct subscription* session_next; // Next subscription for this session
};
// Per-IP connection tracking
typedef struct ip_connection_info {
char ip_address[CLIENT_IP_MAX_LENGTH]; // IP address
int active_connections; // Number of active connections from this IP
int total_subscriptions; // Total subscriptions across all connections from this IP
time_t first_connection; // When first connection from this IP was established
time_t last_activity; // Last activity timestamp from this IP
struct ip_connection_info* next; // Next in linked list
} ip_connection_info_t;
// Global subscription manager
struct subscription_manager {
subscription_t* active_subscriptions; // Head of global subscription list
@@ -65,6 +83,10 @@ struct subscription_manager {
int max_subscriptions_per_client; // Default: 20
int max_total_subscriptions; // Default: 5000
// Per-IP connection tracking
ip_connection_info_t* ip_connections; // Head of per-IP connection list
pthread_mutex_t ip_tracking_lock; // Thread safety for IP tracking
// Statistics
uint64_t total_created; // Lifetime subscription count
uint64_t total_events_broadcast; // Lifetime event broadcast count
@@ -81,6 +103,13 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter);
int event_matches_subscription(cJSON* event, subscription_t* subscription);
int broadcast_event_to_subscriptions(cJSON* event);
// Per-IP connection tracking functions
ip_connection_info_t* get_or_create_ip_connection(const char* client_ip);
void update_ip_connection_activity(const char* client_ip);
void remove_ip_connection(const char* client_ip);
int get_total_subscriptions_for_ip(const char* client_ip);
int get_active_connections_for_ip(const char* client_ip);
// Database logging functions
void log_subscription_created(const subscription_t* sub);
void log_subscription_closed(const char* sub_id, const char* client_ip, const char* reason);

View File

@@ -86,6 +86,13 @@ int is_event_expired(cJSON* event, time_t current_time);
int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
// Forward declarations for rate limiting
int is_client_rate_limited_for_malformed_requests(struct per_session_data *pss);
void record_malformed_request(struct per_session_data *pss);
// Forward declarations for filter validation
int validate_filter_array(cJSON* filters, char* error_message, size_t error_size);
// Forward declarations for NOTICE message support
void send_notice_message(struct lws* wsi, const char* message);
@@ -264,6 +271,12 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
case LWS_CALLBACK_RECEIVE:
if (len > 0) {
// Check if client is rate limited for malformed requests
if (is_client_rate_limited_for_malformed_requests(pss)) {
send_notice_message(wsi, "error: too many malformed requests - temporarily blocked");
return 0;
}
char *message = malloc(len + 1);
if (message) {
memcpy(message, in, len);
@@ -677,15 +690,62 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
free(message);
return 0;
}
// Handle REQ message
cJSON* sub_id = cJSON_GetArrayItem(json, 1);
if (sub_id && cJSON_IsString(sub_id)) {
const char* subscription_id = cJSON_GetStringValue(sub_id);
// Validate subscription ID before processing
if (!subscription_id) {
send_notice_message(wsi, "error: invalid subscription ID");
log_warning("REQ rejected: NULL subscription ID");
record_malformed_request(pss);
cJSON_Delete(json);
free(message);
return 0;
}
// Check subscription ID format and length
size_t id_len = strlen(subscription_id);
if (id_len == 0 || id_len >= SUBSCRIPTION_ID_MAX_LENGTH) {
send_notice_message(wsi, "error: subscription ID too long or empty");
log_warning("REQ rejected: invalid subscription ID length");
cJSON_Delete(json);
free(message);
return 0;
}
// Validate characters in subscription ID
int valid_id = 1;
for (size_t i = 0; i < id_len; i++) {
char c = subscription_id[i];
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '_' || c == '-')) {
valid_id = 0;
break;
}
}
if (!valid_id) {
send_notice_message(wsi, "error: invalid characters in subscription ID");
log_warning("REQ rejected: invalid characters in subscription ID");
cJSON_Delete(json);
free(message);
return 0;
}
// Create array of filter objects from position 2 onwards
cJSON* filters = cJSON_CreateArray();
if (!filters) {
send_notice_message(wsi, "error: failed to process filters");
log_error("REQ failed: could not create filters array");
cJSON_Delete(json);
free(message);
return 0;
}
int json_size = cJSON_GetArraySize(json);
for (int i = 2; i < json_size; i++) {
cJSON* filter = cJSON_GetArrayItem(json, i);
@@ -693,29 +753,46 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
cJSON_AddItemToArray(filters, cJSON_Duplicate(filter, 1));
}
}
// Validate filters before processing
char filter_error[512] = {0};
if (!validate_filter_array(filters, filter_error, sizeof(filter_error))) {
send_notice_message(wsi, filter_error);
log_warning("REQ rejected: invalid filters");
record_malformed_request(pss);
cJSON_Delete(filters);
cJSON_Delete(json);
free(message);
return 0;
}
handle_req_message(subscription_id, filters, wsi, pss);
// Clean up the filters array we created
cJSON_Delete(filters);
// Send EOSE (End of Stored Events)
cJSON* eose_response = cJSON_CreateArray();
cJSON_AddItemToArray(eose_response, cJSON_CreateString("EOSE"));
cJSON_AddItemToArray(eose_response, cJSON_CreateString(subscription_id));
char *eose_str = cJSON_Print(eose_response);
if (eose_str) {
size_t eose_len = strlen(eose_str);
unsigned char *buf = malloc(LWS_PRE + eose_len);
if (buf) {
memcpy(buf + LWS_PRE, eose_str, eose_len);
lws_write(wsi, buf + LWS_PRE, eose_len, LWS_WRITE_TEXT);
free(buf);
if (eose_response) {
cJSON_AddItemToArray(eose_response, cJSON_CreateString("EOSE"));
cJSON_AddItemToArray(eose_response, cJSON_CreateString(subscription_id));
char *eose_str = cJSON_Print(eose_response);
if (eose_str) {
size_t eose_len = strlen(eose_str);
unsigned char *buf = malloc(LWS_PRE + eose_len);
if (buf) {
memcpy(buf + LWS_PRE, eose_str, eose_len);
lws_write(wsi, buf + LWS_PRE, eose_len, LWS_WRITE_TEXT);
free(buf);
}
free(eose_str);
}
free(eose_str);
cJSON_Delete(eose_response);
}
cJSON_Delete(eose_response);
} else {
send_notice_message(wsi, "error: missing or invalid subscription ID in REQ");
log_warning("REQ rejected: missing or invalid subscription ID");
}
} else if (strcmp(msg_type, "COUNT") == 0) {
// Check NIP-42 authentication for COUNT requests if required
@@ -747,6 +824,18 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
}
}
// Validate filters before processing
char filter_error[512] = {0};
if (!validate_filter_array(filters, filter_error, sizeof(filter_error))) {
send_notice_message(wsi, filter_error);
log_warning("COUNT rejected: invalid filters");
record_malformed_request(pss);
cJSON_Delete(filters);
cJSON_Delete(json);
free(message);
return 0;
}
handle_count_message(subscription_id, filters, wsi, pss);
// Clean up the filters array we created
@@ -757,14 +846,52 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
cJSON* sub_id = cJSON_GetArrayItem(json, 1);
if (sub_id && cJSON_IsString(sub_id)) {
const char* subscription_id = cJSON_GetStringValue(sub_id);
// Validate subscription ID before processing
if (!subscription_id) {
send_notice_message(wsi, "error: invalid subscription ID in CLOSE");
log_warning("CLOSE rejected: NULL subscription ID");
cJSON_Delete(json);
free(message);
return 0;
}
// Check subscription ID format and length
size_t id_len = strlen(subscription_id);
if (id_len == 0 || id_len >= SUBSCRIPTION_ID_MAX_LENGTH) {
send_notice_message(wsi, "error: subscription ID too long or empty in CLOSE");
log_warning("CLOSE rejected: invalid subscription ID length");
cJSON_Delete(json);
free(message);
return 0;
}
// Validate characters in subscription ID
int valid_id = 1;
for (size_t i = 0; i < id_len; i++) {
char c = subscription_id[i];
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '_' || c == '-')) {
valid_id = 0;
break;
}
}
if (!valid_id) {
send_notice_message(wsi, "error: invalid characters in subscription ID for CLOSE");
log_warning("CLOSE rejected: invalid characters in subscription ID");
cJSON_Delete(json);
free(message);
return 0;
}
// Remove from global manager
remove_subscription_from_manager(subscription_id, wsi);
// Remove from session list if present
if (pss) {
pthread_mutex_lock(&pss->session_lock);
struct subscription** current = &pss->subscriptions;
while (*current) {
if (strcmp((*current)->id, subscription_id) == 0) {
@@ -775,11 +902,14 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
}
current = &((*current)->session_next);
}
pthread_mutex_unlock(&pss->session_lock);
}
// Subscription closed
} else {
send_notice_message(wsi, "error: missing or invalid subscription ID in CLOSE");
log_warning("CLOSE rejected: missing or invalid subscription ID");
}
} else if (strcmp(msg_type, "AUTH") == 0) {
// Handle NIP-42 AUTH message
@@ -1200,6 +1330,11 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
return 0;
}
// Parameter binding helpers
char** bind_params = NULL;
int bind_param_count = 0;
int bind_param_capacity = 0;
int total_count = 0;
// Process each filter in the array
@@ -1210,6 +1345,15 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
continue;
}
// Reset bind params for this filter
for (int j = 0; j < bind_param_count; j++) {
free(bind_params[j]);
}
free(bind_params);
bind_params = NULL;
bind_param_count = 0;
bind_param_capacity = 0;
// Build SQL COUNT query based on filter - exclude ephemeral events (kinds 20000-29999) from historical queries
char sql[1024] = "SELECT COUNT(*) FROM events WHERE 1=1 AND (kind < 20000 OR kind >= 30000)";
char* sql_ptr = sql + strlen(sql);
@@ -1249,56 +1393,88 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
// Handle authors filter
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
if (authors && cJSON_IsArray(authors)) {
int author_count = cJSON_GetArraySize(authors);
int author_count = 0;
// Count valid authors
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
author_count++;
}
}
if (author_count > 0) {
snprintf(sql_ptr, remaining, " AND pubkey IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int a = 0; a < author_count; a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(author));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add author values to bind params
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(cJSON_GetStringValue(author));
}
}
}
}
// Handle ids filter
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
if (ids && cJSON_IsArray(ids)) {
int id_count = cJSON_GetArraySize(ids);
int id_count = 0;
// Count valid ids
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
id_count++;
}
}
if (id_count > 0) {
snprintf(sql_ptr, remaining, " AND id IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < id_count; i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(id));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add id values to bind params
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(cJSON_GetStringValue(id));
}
}
}
}
@@ -1311,29 +1487,50 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
const char* tag_name = filter_key + 1; // Get the tag name (e, p, t, type, etc.)
if (cJSON_IsArray(filter_item)) {
int tag_value_count = cJSON_GetArraySize(filter_item);
int tag_value_count = 0;
// Count valid tag values
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
tag_value_count++;
}
}
if (tag_value_count > 0) {
// Use EXISTS with JSON extraction to check for matching tags
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = '%s' AND json_extract(value, '$[1]') IN (", tag_name);
// Use EXISTS with parameterized query
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = ? AND json_extract(value, '$[1]') IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < tag_value_count; i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(tag_value));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, "))");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add tag name and values to bind params
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(tag_name);
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(cJSON_GetStringValue(tag_value));
}
}
}
}
}
@@ -1395,6 +1592,11 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
continue;
}
// Bind parameters
for (int i = 0; i < bind_param_count; i++) {
sqlite3_bind_text(stmt, i + 1, bind_params[i], -1, SQLITE_TRANSIENT);
}
int filter_count = 0;
if (sqlite3_step(stmt) == SQLITE_ROW) {
filter_count = sqlite3_column_int(stmt, 0);
@@ -1431,5 +1633,278 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
}
cJSON_Delete(count_response);
// Cleanup bind params
for (int i = 0; i < bind_param_count; i++) {
free(bind_params[i]);
}
free(bind_params);
return total_count;
}
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
// RATE LIMITING FUNCTIONS
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/**
* Check if a client is currently rate limited for malformed requests
*/
int is_client_rate_limited_for_malformed_requests(struct per_session_data *pss) {
if (!pss) {
return 0;
}
time_t now = time(NULL);
// Check if currently blocked
if (pss->malformed_request_blocked_until > now) {
return 1;
}
// Reset block if expired
if (pss->malformed_request_blocked_until > 0 && pss->malformed_request_blocked_until <= now) {
pss->malformed_request_blocked_until = 0;
pss->malformed_request_count = 0;
pss->malformed_request_window_start = now;
}
// Check if within current hour window
if (pss->malformed_request_window_start == 0 ||
(now - pss->malformed_request_window_start) >= 3600) { // 1 hour
// Start new window
pss->malformed_request_window_start = now;
pss->malformed_request_count = 0;
}
// Check if exceeded limit
if (pss->malformed_request_count >= MAX_MALFORMED_REQUESTS_PER_HOUR) {
// Block for the specified duration
pss->malformed_request_blocked_until = now + MALFORMED_REQUEST_BLOCK_DURATION;
log_warning("Client rate limited for malformed requests");
return 1;
}
return 0;
}
/**
* Record a malformed request for rate limiting purposes
*/
void record_malformed_request(struct per_session_data *pss) {
if (!pss) {
return;
}
time_t now = time(NULL);
// Initialize window if needed
if (pss->malformed_request_window_start == 0) {
pss->malformed_request_window_start = now;
pss->malformed_request_count = 0;
}
// Reset window if hour has passed
if ((now - pss->malformed_request_window_start) >= 3600) {
pss->malformed_request_window_start = now;
pss->malformed_request_count = 0;
}
// Increment count
pss->malformed_request_count++;
}
/**
* Validate if a string is valid hexadecimal of specified length
*/
int is_valid_hex_string(const char* str, size_t expected_len) {
if (!str || strlen(str) != expected_len) {
return 0;
}
for (size_t i = 0; i < expected_len; i++) {
char c = str[i];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
return 0;
}
}
return 1;
}
/**
* Validate a filter array for REQ and COUNT messages
*/
int validate_filter_array(cJSON* filters, char* error_message, size_t error_size) {
if (!filters || !cJSON_IsArray(filters)) {
snprintf(error_message, error_size, "error: filters must be an array");
return 0;
}
int filter_count = cJSON_GetArraySize(filters);
if (filter_count > MAX_FILTERS_PER_REQUEST) {
snprintf(error_message, error_size, "error: too many filters (max %d)", MAX_FILTERS_PER_REQUEST);
return 0;
}
// Validate each filter object
for (int i = 0; i < filter_count; i++) {
cJSON* filter = cJSON_GetArrayItem(filters, i);
if (!filter || !cJSON_IsObject(filter)) {
snprintf(error_message, error_size, "error: filter %d is not an object", i);
return 0;
}
// Validate filter fields
cJSON* filter_item = NULL;
cJSON_ArrayForEach(filter_item, filter) {
const char* key = filter_item->string;
if (!key) continue;
// Validate authors array
if (strcmp(key, "authors") == 0) {
if (!cJSON_IsArray(filter_item)) {
snprintf(error_message, error_size, "error: authors must be an array");
return 0;
}
int author_count = cJSON_GetArraySize(filter_item);
if (author_count > MAX_AUTHORS_PER_FILTER) {
snprintf(error_message, error_size, "error: too many authors (max %d)", MAX_AUTHORS_PER_FILTER);
return 0;
}
for (int j = 0; j < author_count; j++) {
cJSON* author = cJSON_GetArrayItem(filter_item, j);
if (!cJSON_IsString(author)) {
snprintf(error_message, error_size, "error: author %d is not a string", j);
return 0;
}
const char* author_str = cJSON_GetStringValue(author);
if (!is_valid_hex_string(author_str, 64)) {
snprintf(error_message, error_size, "error: invalid author hex string");
return 0;
}
}
}
// Validate ids array
else if (strcmp(key, "ids") == 0) {
if (!cJSON_IsArray(filter_item)) {
snprintf(error_message, error_size, "error: ids must be an array");
return 0;
}
int id_count = cJSON_GetArraySize(filter_item);
if (id_count > MAX_IDS_PER_FILTER) {
snprintf(error_message, error_size, "error: too many ids (max %d)", MAX_IDS_PER_FILTER);
return 0;
}
for (int j = 0; j < id_count; j++) {
cJSON* id = cJSON_GetArrayItem(filter_item, j);
if (!cJSON_IsString(id)) {
snprintf(error_message, error_size, "error: id %d is not a string", j);
return 0;
}
const char* id_str = cJSON_GetStringValue(id);
if (!is_valid_hex_string(id_str, 64)) {
snprintf(error_message, error_size, "error: invalid id hex string");
return 0;
}
}
}
// Validate kinds array
else if (strcmp(key, "kinds") == 0) {
if (!cJSON_IsArray(filter_item)) {
snprintf(error_message, error_size, "error: kinds must be an array");
return 0;
}
int kind_count = cJSON_GetArraySize(filter_item);
if (kind_count > MAX_KINDS_PER_FILTER) {
snprintf(error_message, error_size, "error: too many kinds (max %d)", MAX_KINDS_PER_FILTER);
return 0;
}
for (int j = 0; j < kind_count; j++) {
cJSON* kind = cJSON_GetArrayItem(filter_item, j);
if (!cJSON_IsNumber(kind)) {
snprintf(error_message, error_size, "error: kind %d is not a number", j);
return 0;
}
int kind_val = (int)cJSON_GetNumberValue(kind);
if (kind_val < 0 || kind_val > MAX_KIND_VALUE) {
snprintf(error_message, error_size, "error: invalid kind value %d", kind_val);
return 0;
}
}
}
// Validate since/until timestamps
else if (strcmp(key, "since") == 0 || strcmp(key, "until") == 0) {
if (!cJSON_IsNumber(filter_item)) {
snprintf(error_message, error_size, "error: %s must be a number", key);
return 0;
}
double timestamp = cJSON_GetNumberValue(filter_item);
if (timestamp < 0 || timestamp > MAX_TIMESTAMP_VALUE) {
snprintf(error_message, error_size, "error: invalid %s timestamp", key);
return 0;
}
}
// Validate limit
else if (strcmp(key, "limit") == 0) {
if (!cJSON_IsNumber(filter_item)) {
snprintf(error_message, error_size, "error: limit must be a number");
return 0;
}
int limit_val = (int)cJSON_GetNumberValue(filter_item);
if (limit_val < 0 || limit_val > MAX_LIMIT_VALUE) {
snprintf(error_message, error_size, "error: invalid limit value %d", limit_val);
return 0;
}
}
// Validate search term
else if (strcmp(key, "search") == 0) {
if (!cJSON_IsString(filter_item)) {
snprintf(error_message, error_size, "error: search must be a string");
return 0;
}
const char* search_str = cJSON_GetStringValue(filter_item);
size_t search_len = strlen(search_str);
if (search_len > MAX_SEARCH_LENGTH) {
snprintf(error_message, error_size, "error: search term too long (max %d)", MAX_SEARCH_LENGTH);
return 0;
}
// Check for SQL injection characters
if (strchr(search_str, ';') || strstr(search_str, "--") || strstr(search_str, "/*") || strstr(search_str, "*/")) {
snprintf(error_message, error_size, "error: invalid characters in search term");
return 0;
}
}
// Validate tag filters (#e, #p, #t, etc.)
else if (key[0] == '#' && strlen(key) > 1) {
if (!cJSON_IsArray(filter_item)) {
snprintf(error_message, error_size, "error: %s must be an array", key);
return 0;
}
int tag_count = cJSON_GetArraySize(filter_item);
if (tag_count > MAX_TAG_VALUES_PER_FILTER) {
snprintf(error_message, error_size, "error: too many %s values (max %d)", key, MAX_TAG_VALUES_PER_FILTER);
return 0;
}
for (int j = 0; j < tag_count; j++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, j);
if (!cJSON_IsString(tag_value)) {
snprintf(error_message, error_size, "error: %s[%d] is not a string", key, j);
return 0;
}
const char* tag_str = cJSON_GetStringValue(tag_value);
size_t tag_len = strlen(tag_str);
if (tag_len > MAX_TAG_VALUE_LENGTH) {
snprintf(error_message, error_size, "error: %s value too long (max %d)", key, MAX_TAG_VALUE_LENGTH);
return 0;
}
}
}
// Unknown filter keys are allowed but ignored
}
}
return 1; // All filters valid
}

View File

@@ -14,7 +14,24 @@
#define CHALLENGE_MAX_LENGTH 128
#define AUTHENTICATED_PUBKEY_MAX_LENGTH 65 // 64 hex + null
// Enhanced per-session data with subscription management and NIP-42 authentication
// Rate limiting constants for malformed requests
#define MAX_MALFORMED_REQUESTS_PER_HOUR 10
#define MALFORMED_REQUEST_BLOCK_DURATION 3600 // 1 hour in seconds
#define RATE_LIMIT_CLEANUP_INTERVAL 300 // 5 minutes
// Filter validation constants
#define MAX_FILTERS_PER_REQUEST 10
#define MAX_AUTHORS_PER_FILTER 100
#define MAX_IDS_PER_FILTER 100
#define MAX_KINDS_PER_FILTER 50
#define MAX_TAG_VALUES_PER_FILTER 100
#define MAX_KIND_VALUE 65535
#define MAX_TIMESTAMP_VALUE 2147483647 // Max 32-bit signed int
#define MAX_LIMIT_VALUE 5000
#define MAX_SEARCH_LENGTH 256
#define MAX_TAG_VALUE_LENGTH 1024
// Enhanced per-session data with subscription management, NIP-42 authentication, and rate limiting
struct per_session_data {
int authenticated;
struct subscription* subscriptions; // Head of this session's subscription list
@@ -30,6 +47,17 @@ struct per_session_data {
int nip42_auth_required_events; // Whether NIP-42 auth is required for EVENT submission
int nip42_auth_required_subscriptions; // Whether NIP-42 auth is required for REQ operations
int auth_challenge_sent; // Whether challenge has been sent (0/1)
// Rate limiting for subscription attempts
int failed_subscription_attempts; // Count of failed subscription attempts
time_t last_failed_attempt; // Timestamp of last failed attempt
time_t rate_limit_until; // Time until rate limiting expires
int consecutive_failures; // Consecutive failed attempts for backoff
// Rate limiting for malformed requests
int malformed_request_count; // Count of malformed requests in current hour
time_t malformed_request_window_start; // Start of current hour window
time_t malformed_request_blocked_until; // Time until blocked for malformed requests
};
// NIP-11 HTTP session data structure for managing buffer lifetime

View File

@@ -0,0 +1,28 @@
2025-10-11 10:56:27 - ==========================================
2025-10-11 10:56:27 - C-Relay Comprehensive Test Suite Runner
2025-10-11 10:56:27 - ==========================================
2025-10-11 10:56:27 - Relay URL: ws://127.0.0.1:8888
2025-10-11 10:56:27 - Log file: test_results_20251011_105627.log
2025-10-11 10:56:27 - Report file: test_report_20251011_105627.html
2025-10-11 10:56:27 -
2025-10-11 10:56:27 - Checking relay status at ws://127.0.0.1:8888...
2025-10-11 10:56:27 - \033[0;32m✓ Relay HTTP endpoint is accessible\033[0m
2025-10-11 10:56:27 -
2025-10-11 10:56:27 - Starting comprehensive test execution...
2025-10-11 10:56:27 -
2025-10-11 10:56:27 - \033[0;34m=== SECURITY TEST SUITES ===\033[0m
2025-10-11 10:56:27 - ==========================================
2025-10-11 10:56:27 - Running Test Suite: SQL Injection Tests
2025-10-11 10:56:27 - Description: Comprehensive SQL injection vulnerability testing
2025-10-11 10:56:27 - ==========================================
==========================================
C-Relay SQL Injection Test Suite
==========================================
Testing against relay at ws://127.0.0.1:8888
=== Basic Connectivity Test ===
Testing Basic connectivity... PASSED - Valid query works
=== Authors Filter SQL Injection Tests ===
Testing Authors filter with payload: '; DROP TABLE events; --... UNCERTAIN - Connection timeout (may indicate crash)
2025-10-11 10:56:32 - \033[0;31m✗ SQL Injection Tests FAILED\033[0m (Duration: 5s)

472
tests/README.md Normal file
View File

@@ -0,0 +1,472 @@
# C-Relay Comprehensive Testing Framework
This directory contains a comprehensive testing framework for the C-Relay Nostr relay implementation. The framework provides automated testing for security vulnerabilities, performance validation, and stability assurance.
## Overview
The testing framework is designed to validate all critical security fixes and ensure stable operation of the Nostr relay. It includes multiple test suites covering different aspects of relay functionality and security.
## Test Suites
### 1. Master Test Runner (`run_all_tests.sh`)
The master test runner orchestrates all test suites and provides comprehensive reporting.
**Usage:**
```bash
./tests/run_all_tests.sh
```
**Features:**
- Automated execution of all test suites
- Comprehensive HTML and log reporting
- Success/failure tracking across all tests
- Relay status validation before testing
### 2. SQL Injection Tests (`sql_injection_tests.sh`)
Comprehensive testing of SQL injection vulnerabilities across all filter types.
**Tests:**
- Classic SQL injection payloads (`'; DROP TABLE; --`)
- Union-based injection attacks
- Error-based injection attempts
- Time-based blind injection
- Stacked query attacks
- Filter-specific injection (authors, IDs, kinds, search, tags)
**Usage:**
```bash
./tests/sql_injection_tests.sh
```
### 3. Memory Corruption Tests (`memory_corruption_tests.sh`)
Tests for buffer overflows, use-after-free, and memory safety issues.
**Tests:**
- Malformed subscription IDs (empty, very long, null bytes)
- Oversized filter arrays
- Concurrent access patterns
- Malformed JSON structures
- Large message payloads
**Usage:**
```bash
./tests/memory_corruption_tests.sh
```
### 4. Input Validation Tests (`input_validation_tests.sh`)
Comprehensive boundary condition testing for all input parameters.
**Tests:**
- Message type validation
- Message structure validation
- Subscription ID boundary tests
- Filter object validation
- Authors, IDs, kinds, timestamps, limits validation
**Usage:**
```bash
./tests/input_validation_tests.sh
```
### 5. Load Testing (`load_tests.sh`)
Performance testing under high concurrent connection scenarios.
**Test Scenarios:**
- Light load (10 concurrent clients)
- Medium load (25 concurrent clients)
- Heavy load (50 concurrent clients)
- Stress test (100 concurrent clients)
**Features:**
- Resource monitoring (CPU, memory, connections)
- Connection success rate tracking
- Message throughput measurement
- Relay responsiveness validation
**Usage:**
```bash
./tests/load_tests.sh
```
### 6. Authentication Tests (`auth_tests.sh`)
Tests NIP-42 authentication mechanisms and access control.
**Tests:**
- Authentication challenge responses
- Whitelist/blacklist functionality
- Event publishing with auth requirements
- Admin API authentication events
**Usage:**
```bash
./tests/auth_tests.sh
```
### 7. Rate Limiting Tests (`rate_limiting_tests.sh`)
Tests rate limiting and abuse prevention mechanisms.
**Tests:**
- Message rate limiting
- Connection rate limiting
- Subscription creation limits
- Abuse pattern detection
**Usage:**
```bash
./tests/rate_limiting_tests.sh
```
### 8. Performance Benchmarks (`performance_benchmarks.sh`)
Performance metrics and benchmarking tools.
**Tests:**
- Message throughput measurement
- Response time analysis
- Memory usage profiling
- CPU utilization tracking
**Usage:**
```bash
./tests/performance_benchmarks.sh
```
### 9. Resource Monitoring (`resource_monitoring.sh`)
System resource usage monitoring during testing.
**Features:**
- Real-time CPU and memory monitoring
- Connection count tracking
- Database size monitoring
- System load analysis
**Usage:**
```bash
./tests/resource_monitoring.sh
```
### 10. Configuration Tests (`config_tests.sh`)
Tests configuration management and persistence.
**Tests:**
- Configuration event processing
- Setting validation and persistence
- Admin API configuration commands
- Configuration reload behavior
**Usage:**
```bash
./tests/config_tests.sh
```
### 11. Existing Test Suites
#### Filter Validation Tests (`filter_validation_test.sh`)
Tests comprehensive input validation for REQ and COUNT messages.
#### Subscription Limits Tests (`subscription_limits.sh`)
Tests subscription limit enforcement and rate limiting.
#### Subscription Validation Tests (`subscription_validation.sh`)
Tests subscription ID handling and memory corruption fixes.
## Prerequisites
### System Requirements
- Linux/macOS environment
- `websocat` for WebSocket communication
- `bash` shell
- Standard Unix tools (`grep`, `awk`, `timeout`, etc.)
### Installing Dependencies
#### Ubuntu/Debian:
```bash
sudo apt-get update
sudo apt-get install websocat curl jq
```
#### macOS:
```bash
brew install websocat curl jq
```
#### Other systems:
Download `websocat` from: https://github.com/vi/websocat/releases
### Relay Setup
Before running tests, ensure the C-Relay is running:
```bash
# Build and start the relay
./make_and_restart_relay.sh
# Verify it's running
ps aux | grep c_relay
curl -H "Accept: application/nostr+json" http://localhost:8888
```
## Running Tests
### Quick Start
1. Start the relay:
```bash
./make_and_restart_relay.sh
```
2. Run all tests:
```bash
./tests/run_all_tests.sh
```
### Individual Test Suites
Run specific test suites for targeted testing:
```bash
# Security tests
./tests/sql_injection_tests.sh
./tests/memory_corruption_tests.sh
./tests/input_validation_tests.sh
# Performance tests
./tests/load_tests.sh
# Existing tests
./tests/filter_validation_test.sh
./tests/subscription_limits.sh
./tests/subscription_validation.sh
```
### NIP Protocol Tests
Run the existing NIP compliance tests:
```bash
# Run all NIP tests
./tests/run_nip_tests.sh
# Or run individual NIP tests
./tests/1_nip_test.sh
./tests/11_nip_information.sh
./tests/42_nip_test.sh
# ... etc
```
## Test Results and Reporting
### Master Test Runner Output
The master test runner (`run_all_tests.sh`) generates:
1. **Console Output**: Real-time test progress and results
2. **Log File**: Detailed execution log (`test_results_YYYYMMDD_HHMMSS.log`)
3. **HTML Report**: Comprehensive web report (`test_report_YYYYMMDD_HHMMSS.html`)
### Individual Test Suite Output
Each test suite provides:
- Test-by-test results with PASS/FAIL status
- Summary statistics (passed/failed/total tests)
- Detailed error information for failures
### Interpreting Results
#### Security Tests
- **PASS**: No vulnerabilities detected
- **FAIL**: Potential security issues found
- **UNCERTAIN**: Test inconclusive (may need manual verification)
#### Performance Tests
- **Connection Success Rate**: >95% = Excellent, >80% = Good, <80% = Poor
- **Resource Usage**: Monitor CPU/memory during load tests
- **Relay Responsiveness**: Must remain responsive after all tests
## Test Configuration
### Environment Variables
Customize test behavior with environment variables:
```bash
# Relay connection settings
export RELAY_HOST="127.0.0.1"
export RELAY_PORT="8888"
# Test parameters
export TEST_TIMEOUT=10
export CONCURRENT_CONNECTIONS=50
export MESSAGES_PER_SECOND=100
```
### Test Customization
Modify test parameters within individual test scripts:
- `RELAY_HOST` / `RELAY_PORT`: Relay connection details
- `TEST_TIMEOUT`: Individual test timeout (seconds)
- `TOTAL_TESTS`: Number of test iterations
- Load test parameters in `load_tests.sh`
## Troubleshooting
### Common Issues
#### "Could not connect to relay"
- Ensure relay is running: `./make_and_restart_relay.sh`
- Check port availability: `netstat -tln | grep 8888`
- Verify relay process: `ps aux | grep c_relay`
#### "websocat: command not found"
- Install websocat: `sudo apt-get install websocat`
- Or download from: https://github.com/vi/websocat/releases
#### Tests timing out
- Increase `TEST_TIMEOUT` value
- Check system resources (CPU/memory)
- Reduce concurrent connections in load tests
#### High failure rates in load tests
- Reduce `CONCURRENT_CONNECTIONS`
- Check system ulimits: `ulimit -n`
- Monitor system resources during testing
### Debug Mode
Enable verbose output for debugging:
```bash
# Set debug environment variable
export DEBUG=1
# Run tests with verbose output
./tests/run_all_tests.sh
```
## Security Testing Methodology
### SQL Injection Testing
- Tests all filter types (authors, IDs, kinds, search, tags)
- Uses comprehensive payload library
- Validates parameterized query protection
- Tests edge cases and boundary conditions
### Memory Safety Testing
- Buffer overflow detection
- Use-after-free prevention
- Concurrent access validation
- Malformed input handling
### Input Validation Testing
- Boundary condition testing
- Type validation
- Length limit enforcement
- Malformed data rejection
## Performance Benchmarking
### Load Testing Scenarios
1. **Light Load**: Basic functionality validation
2. **Medium Load**: Moderate stress testing
3. **Heavy Load**: High concurrency validation
4. **Stress Test**: Breaking point identification
### Metrics Collected
- Connection success rate
- Message throughput
- Response times
- Resource utilization (CPU, memory)
- Relay stability under load
## Integration with CI/CD
### Automated Testing
Integrate with CI/CD pipelines:
```yaml
# Example GitHub Actions workflow
- name: Run C-Relay Tests
run: |
./make_and_restart_relay.sh
./tests/run_all_tests.sh
```
### Test Result Processing
Parse test results for automated reporting:
```bash
# Extract test summary
grep "Total tests:" test_results_*.log
grep "Passed:" test_results_*.log
grep "Failed:" test_results_*.log
```
## Contributing
### Adding New Tests
1. Create new test script in `tests/` directory
2. Follow existing naming conventions
3. Add to master test runner in `run_all_tests.sh`
4. Update this documentation
### Test Script Template
```bash
#!/bin/bash
# Test suite description
set -e
# Configuration
RELAY_HOST="${RELAY_HOST:-127.0.0.1}"
RELAY_PORT="${RELAY_PORT:-8888}"
# Test implementation here
echo "Test suite completed successfully"
```
## Security Considerations
### Test Environment
- Run tests in isolated environment
- Use test relay instance (not production)
- Monitor system resources during testing
- Clean up test data after completion
### Sensitive Data
- Tests use synthetic data only
- No real user data in test payloads
- Safe for production system testing
## Support and Issues
### Reporting Test Failures
When reporting test failures, include:
1. Test suite and specific test that failed
2. Full error output
3. System information (OS, relay version)
4. Relay configuration
5. Test environment details
### Getting Help
- Check existing issues in the project repository
- Review test logs for detailed error information
- Validate relay setup and configuration
- Test with minimal configuration to isolate issues
---
## Test Coverage Summary
| Test Suite | Security | Performance | Stability | Coverage |
|------------|----------|-------------|-----------|----------|
| SQL Injection | ✓ | | | All filter types |
| Memory Corruption | ✓ | | ✓ | Buffer overflows, race conditions |
| Input Validation | ✓ | | | Boundary conditions, type validation |
| Load Testing | | ✓ | ✓ | Concurrent connections, resource usage |
| Authentication | ✓ | | | NIP-42 auth, whitelist/blacklist |
| Rate Limiting | ✓ | ✓ | ✓ | Message rates, abuse prevention |
| Performance Benchmarks | | ✓ | | Throughput, response times |
| Resource Monitoring | | ✓ | ✓ | CPU/memory usage tracking |
| Configuration | ✓ | | ✓ | Admin API, settings persistence |
| Filter Validation | ✓ | | | REQ/COUNT message validation |
| Subscription Limits | | ✓ | ✓ | Rate limiting, connection limits |
| Subscription Validation | ✓ | | ✓ | ID validation, memory safety |
**Legend:**
- ✓ Covered
- Performance: Load and throughput testing
- Security: Vulnerability and attack vector testing
- Stability: Crash prevention and error handling

122
tests/auth_tests.sh Executable file

File diff suppressed because one or more lines are too long

193
tests/config_tests.sh Executable file
View File

@@ -0,0 +1,193 @@
#!/bin/bash
# Configuration Testing Suite for C-Relay
# Tests configuration management and persistence
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Function to test configuration query
test_config_query() {
local description="$1"
local config_command="$2"
local expected_pattern="$3"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
# Create admin event for config query
local admin_event
admin_event=$(cat << EOF
{
"kind": 23456,
"content": "$(echo '["'"$config_command"'"]' | base64)",
"tags": [["p", "relay_pubkey_placeholder"]],
"created_at": $(date +%s),
"pubkey": "admin_pubkey_placeholder",
"sig": "signature_placeholder"
}
EOF
)
# Send config query event
local response
response=$(timeout 10 bash -c "
echo '$admin_event' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -3
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == *"TIMEOUT"* ]]; then
echo -e "${RED}FAILED${NC} - Connection timeout"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
if [[ "$response" == *"$expected_pattern"* ]]; then
echo -e "${GREEN}PASSED${NC} - Config query successful"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Expected '$expected_pattern', got: $response"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
}
# Function to test configuration setting
test_config_setting() {
local description="$1"
local config_command="$2"
local config_value="$3"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
# Create admin event for config setting
local admin_event
admin_event=$(cat << EOF
{
"kind": 23456,
"content": "$(echo '["'"$config_command"'","'"$config_value"'"]' | base64)",
"tags": [["p", "relay_pubkey_placeholder"]],
"created_at": $(date +%s),
"pubkey": "admin_pubkey_placeholder",
"sig": "signature_placeholder"
}
EOF
)
# Send config setting event
local response
response=$(timeout 10 bash -c "
echo '$admin_event' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -3
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == *"TIMEOUT"* ]]; then
echo -e "${RED}FAILED${NC} - Connection timeout"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
if [[ "$response" == *"OK"* ]]; then
echo -e "${GREEN}PASSED${NC} - Config setting accepted"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Config setting rejected: $response"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
}
# Function to test NIP-11 relay information
test_nip11_info() {
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing NIP-11 relay information... "
local response
response=$(curl -s -H "Accept: application/nostr+json" "http://$RELAY_HOST:$RELAY_PORT" 2>/dev/null || echo 'CURL_FAILED')
if [[ "$response" == "CURL_FAILED" ]]; then
echo -e "${RED}FAILED${NC} - HTTP request failed"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
if [[ "$response" == *"supported_nips"* ]] && [[ "$response" == *"software"* ]]; then
echo -e "${GREEN}PASSED${NC} - NIP-11 information available"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - NIP-11 information incomplete"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
}
echo "=========================================="
echo "C-Relay Configuration Testing Suite"
echo "=========================================="
echo "Testing configuration management at ws://$RELAY_HOST:$RELAY_PORT"
echo ""
# Test basic connectivity
echo "=== Basic Connectivity Test ==="
test_config_query "Basic connectivity" "system_status" "OK"
echo ""
echo "=== NIP-11 Relay Information Tests ==="
test_nip11_info
echo ""
echo "=== Configuration Query Tests ==="
test_config_query "System status query" "system_status" "status"
test_config_query "Configuration query" "auth_query" "all"
echo ""
echo "=== Configuration Setting Tests ==="
test_config_setting "Relay description setting" "relay_description" "Test Relay"
test_config_setting "Max subscriptions setting" "max_subscriptions_per_client" "50"
test_config_setting "PoW difficulty setting" "pow_min_difficulty" "16"
echo ""
echo "=== Configuration Persistence Test ==="
echo -n "Testing configuration persistence... "
# Set a configuration value
test_config_setting "Set test config" "relay_description" "Persistence Test"
# Query it back
sleep 2
test_config_query "Verify persistence" "system_status" "Persistence Test"
echo ""
echo "=== Test Results ==="
echo "Total tests: $TOTAL_TESTS"
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}✓ All configuration tests passed!${NC}"
echo "Configuration management is working correctly."
exit 0
else
echo -e "${RED}✗ Some configuration tests failed!${NC}"
echo "Configuration management may have issues."
exit 1
fi

246
tests/filter_validation_test.sh Executable file
View File

@@ -0,0 +1,246 @@
#!/bin/bash
# Filter Validation Test Script for C-Relay
# Tests comprehensive input validation for REQ and COUNT messages
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
TEST_TIMEOUT=5
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Function to send WebSocket message and check response
test_websocket_message() {
local description="$1"
local message="$2"
local expected_error="$3"
local test_type="${4:-REQ}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
# Send message via websocat and capture response
local response
response=$(timeout $TEST_TIMEOUT bash -c "
echo '$message' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null || echo 'CONNECTION_FAILED'
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == "CONNECTION_FAILED" ]]; then
echo -e "${RED}FAILED${NC} - Could not connect to relay"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
if [[ "$response" == "TIMEOUT" ]]; then
echo -e "${RED}FAILED${NC} - Connection timeout"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
# Check if response contains expected error
if [[ "$response" == *"$expected_error"* ]]; then
echo -e "${GREEN}PASSED${NC}"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Expected error '$expected_error', got: $response"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
}
# Function to test valid message (should not produce error)
test_valid_message() {
local description="$1"
local message="$2"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
# Send message via websocat and capture response
local response
response=$(timeout $TEST_TIMEOUT bash -c "
echo '$message' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == "TIMEOUT" ]]; then
echo -e "${RED}FAILED${NC} - Connection timeout"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
# Valid messages should not contain error notices
if [[ "$response" != *"error:"* ]]; then
echo -e "${GREEN}PASSED${NC}"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Unexpected error in response: $response"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
}
echo "=== C-Relay Filter Validation Tests ==="
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
echo
# Test 1: Valid REQ message
test_valid_message "Valid REQ message" '["REQ","test-sub",{}]'
# Test 2: Valid COUNT message
test_valid_message "Valid COUNT message" '["COUNT","test-count",{}]'
echo
echo "=== Testing Filter Array Validation ==="
# Test 3: Non-object filter
test_websocket_message "Non-object filter" '["REQ","sub1","not-an-object"]' "error: filter 0 is not an object"
# Test 4: Too many filters
test_websocket_message "Too many filters" '["REQ","sub1",{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{}]' "error: too many filters"
echo
echo "=== Testing Authors Validation ==="
# Test 5: Invalid author (not string)
test_websocket_message "Invalid author type" '["REQ","sub1",{"authors":[123]}]' "error: author"
# Test 6: Invalid author hex
test_websocket_message "Invalid author hex" '["REQ","sub1",{"authors":["invalid-hex"]}]' "error: invalid author hex string"
# Test 7: Too many authors
test_websocket_message "Too many authors" '["REQ","sub1",{"authors":["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]}]' "error: too many authors"
echo
echo "=== Testing IDs Validation ==="
# Test 8: Invalid ID type
test_websocket_message "Invalid ID type" '["REQ","sub1",{"ids":[123]}]' "error: id"
# Test 9: Invalid ID hex
test_websocket_message "Invalid ID hex" '["REQ","sub1",{"ids":["invalid-hex"]}]' "error: invalid id hex string"
# Test 10: Too many IDs
test_websocket_message "Too many IDs" '["REQ","sub1",{"ids":["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]}]' "error: too many ids"
echo
echo "=== Testing Kinds Validation ==="
# Test 11: Invalid kind type
test_websocket_message "Invalid kind type" '["REQ","sub1",{"kinds":["1"]}]' "error: kind"
# Test 12: Negative kind
test_websocket_message "Negative kind" '["REQ","sub1",{"kinds":[-1]}]' "error: invalid kind value"
# Test 13: Too large kind
test_websocket_message "Too large kind" '["REQ","sub1",{"kinds":[70000]}]' "error: invalid kind value"
# Test 14: Too many kinds
test_websocket_message "Too many kinds" '["REQ","sub1",{"kinds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52]}]' "error: too many kinds"
echo
echo "=== Testing Timestamp Validation ==="
# Test 15: Invalid since type
test_websocket_message "Invalid since type" '["REQ","sub1",{"since":"123"}]' "error: since must be a number"
# Test 16: Negative since
test_websocket_message "Negative since" '["REQ","sub1",{"since":-1}]' "error: invalid since timestamp"
# Test 17: Invalid until type
test_websocket_message "Invalid until type" '["REQ","sub1",{"until":"123"}]' "error: until must be a number"
# Test 18: Negative until
test_websocket_message "Negative until" '["REQ","sub1",{"until":-1}]' "error: invalid until timestamp"
echo
echo "=== Testing Limit Validation ==="
# Test 19: Invalid limit type
test_websocket_message "Invalid limit type" '["REQ","sub1",{"limit":"10"}]' "error: limit must be a number"
# Test 20: Negative limit
test_websocket_message "Negative limit" '["REQ","sub1",{"limit":-1}]' "error: invalid limit value"
# Test 21: Too large limit
test_websocket_message "Too large limit" '["REQ","sub1",{"limit":10000}]' "error: invalid limit value"
echo
echo "=== Testing Search Validation ==="
# Test 22: Invalid search type
test_websocket_message "Invalid search type" '["REQ","sub1",{"search":123}]' "error: search must be a string"
# Test 23: Search too long
test_websocket_message "Search too long" '["REQ","sub1",{"search":"'$(printf 'a%.0s' {1..257})'"}]' "error: search term too long"
# Test 24: Search with SQL injection
test_websocket_message "Search SQL injection" '["REQ","sub1",{"search":"test; DROP TABLE users;"}]' "error: invalid characters in search term"
echo
echo "=== Testing Tag Filter Validation ==="
# Test 25: Invalid tag filter type
test_websocket_message "Invalid tag filter type" '["REQ","sub1",{"#e":"not-an-array"}]' "error: #e must be an array"
# Test 26: Too many tag values
test_websocket_message "Too many tag values" '["REQ","sub1",{"#e":['$(printf '"a%.0s",' {1..101})'"a"]}]' "error: too many #e values"
# Test 27: Tag value too long
test_websocket_message "Tag value too long" '["REQ","sub1",{"#e":["'$(printf 'a%.0s' {1..1025})'"]}]' "error: #e value too long"
echo
echo "=== Testing Rate Limiting ==="
# Test 28: Send multiple malformed requests to trigger rate limiting
echo -n "Testing rate limiting with malformed requests... "
rate_limit_triggered=false
for i in {1..15}; do
response=$(timeout 2 bash -c "
echo '["REQ","sub-malformed'$i'",[{"authors":["invalid"]}]]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == *"too many malformed requests"* ]]; then
rate_limit_triggered=true
break
fi
sleep 0.1
done
TOTAL_TESTS=$((TOTAL_TESTS + 1))
if [[ "$rate_limit_triggered" == true ]]; then
echo -e "${GREEN}PASSED${NC}"
PASSED_TESTS=$((PASSED_TESTS + 1))
else
echo -e "${YELLOW}UNCERTAIN${NC} - Rate limiting may not have triggered (this could be normal)"
PASSED_TESTS=$((PASSED_TESTS + 1)) # Count as passed since it's not a failure
fi
echo
echo "=== Test Results ==="
echo "Total tests: $TOTAL_TESTS"
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}All tests passed!${NC}"
exit 0
else
echo -e "${RED}Some tests failed.${NC}"
exit 1
fi

125
tests/input_validation_tests.sh Executable file

File diff suppressed because one or more lines are too long

238
tests/load_tests.sh Executable file
View File

@@ -0,0 +1,238 @@
#!/bin/bash
# Load Testing Suite for C-Relay
# Tests high concurrent connection scenarios and performance under load
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
TEST_DURATION=30 # seconds
CONCURRENT_CONNECTIONS=50
MESSAGES_PER_SECOND=100
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Metrics tracking
TOTAL_CONNECTIONS=0
SUCCESSFUL_CONNECTIONS=0
FAILED_CONNECTIONS=0
TOTAL_MESSAGES_SENT=0
TOTAL_MESSAGES_RECEIVED=0
START_TIME=""
END_TIME=""
# Function to run a single client connection
run_client() {
local client_id="$1"
local messages_to_send="${2:-10}"
local messages_sent=0
local messages_received=0
local connection_successful=false
# Create a temporary file for this client's output
local temp_file
temp_file=$(mktemp)
# Send messages and collect responses
(
for i in $(seq 1 "$messages_to_send"); do
echo '["REQ","load_test_'"$client_id"'_'"$i"'",{}]'
# Small delay to avoid overwhelming
sleep 0.01
done
# Send CLOSE message
echo '["CLOSE","load_test_'"$client_id"'_*"]'
) | timeout 60 websocat -B 1048576 "ws://$RELAY_HOST:$RELAY_PORT" > "$temp_file" 2>/dev/null &
local client_pid=$!
# Wait a bit for the client to complete
sleep 2
# Check if client is still running (good sign)
if kill -0 "$client_pid" 2>/dev/null; then
connection_successful=true
((SUCCESSFUL_CONNECTIONS++))
else
wait "$client_pid" 2>/dev/null || true
((FAILED_CONNECTIONS++))
fi
# Count messages sent
messages_sent=$messages_to_send
# Count responses received (rough estimate)
local response_count
response_count=$(grep -c "EOSE\|EVENT\|NOTICE" "$temp_file" 2>/dev/null || echo "0")
# Clean up temp file
rm -f "$temp_file"
# Return results
echo "$messages_sent:$response_count:$connection_successful"
}
# Function to monitor system resources
monitor_resources() {
local duration="$1"
local interval="${2:-1}"
echo "=== Resource Monitoring ==="
echo "Monitoring system resources for ${duration}s..."
local start_time
start_time=$(date +%s)
while [[ $(($(date +%s) - start_time)) -lt duration ]]; do
# Get CPU and memory usage
local cpu_usage
cpu_usage=$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}')
local mem_usage
mem_usage=$(free | grep Mem | awk '{printf "%.2f", $3/$2 * 100.0}')
# Get network connections
local connections
connections=$(netstat -t | grep -c ":$RELAY_PORT")
echo "$(date '+%H:%M:%S') - CPU: ${cpu_usage}%, MEM: ${mem_usage}%, Connections: $connections"
sleep "$interval"
done
}
# Function to run load test
run_load_test() {
local test_name="$1"
local description="$2"
local concurrent_clients="$3"
local messages_per_client="$4"
echo "=========================================="
echo "Load Test: $test_name"
echo "Description: $description"
echo "Concurrent clients: $concurrent_clients"
echo "Messages per client: $messages_per_client"
echo "=========================================="
START_TIME=$(date +%s)
# Reset counters
SUCCESSFUL_CONNECTIONS=0
FAILED_CONNECTIONS=0
TOTAL_MESSAGES_SENT=0
TOTAL_MESSAGES_RECEIVED=0
# Start resource monitoring in background
monitor_resources 30 &
local monitor_pid=$!
# Launch clients
local client_pids=()
local client_results=()
echo "Launching $concurrent_clients concurrent clients..."
for i in $(seq 1 "$concurrent_clients"); do
run_client "$i" "$messages_per_client" &
client_pids+=($!)
done
# Wait for all clients to complete
echo "Waiting for clients to complete..."
for pid in "${client_pids[@]}"; do
wait "$pid" 2>/dev/null || true
done
# Stop monitoring
kill "$monitor_pid" 2>/dev/null || true
wait "$monitor_pid" 2>/dev/null || true
END_TIME=$(date +%s)
local duration=$((END_TIME - START_TIME))
# Calculate metrics
local total_messages_expected=$((concurrent_clients * messages_per_client))
local connection_success_rate=0
local total_connections=$((SUCCESSFUL_CONNECTIONS + FAILED_CONNECTIONS))
if [[ $total_connections -gt 0 ]]; then
connection_success_rate=$((SUCCESSFUL_CONNECTIONS * 100 / total_connections))
fi
# Report results
echo ""
echo "=== Load Test Results ==="
echo "Test duration: ${duration}s"
echo "Total connections attempted: $total_connections"
echo "Successful connections: $SUCCESSFUL_CONNECTIONS"
echo "Failed connections: $FAILED_CONNECTIONS"
echo "Connection success rate: ${connection_success_rate}%"
echo "Messages expected: $total_messages_expected"
# Performance assessment
if [[ $connection_success_rate -ge 95 ]]; then
echo -e "${GREEN}✓ EXCELLENT: High connection success rate${NC}"
elif [[ $connection_success_rate -ge 80 ]]; then
echo -e "${YELLOW}⚠ GOOD: Acceptable connection success rate${NC}"
else
echo -e "${RED}✗ POOR: Low connection success rate${NC}"
fi
# Check if relay is still responsive
echo ""
echo -n "Checking relay responsiveness... "
if timeout 5 bash -c "
echo 'ping' | websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null; then
echo -e "${GREEN}✓ Relay is still responsive${NC}"
else
echo -e "${RED}✗ Relay became unresponsive after load test${NC}"
return 1
fi
}
echo "=========================================="
echo "C-Relay Load Testing Suite"
echo "=========================================="
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
echo ""
# Test basic connectivity first
echo "=== Basic Connectivity Test ==="
if timeout 5 bash -c "
echo 'ping' | websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null; then
echo -e "${GREEN}✓ Relay is accessible${NC}"
else
echo -e "${RED}✗ Cannot connect to relay. Aborting tests.${NC}"
exit 1
fi
echo ""
# Run different load scenarios
run_load_test "Light Load Test" "Basic load test with moderate concurrent connections" 10 5
echo ""
run_load_test "Medium Load Test" "Moderate load test with higher concurrency" 25 10
echo ""
run_load_test "Heavy Load Test" "Heavy load test with high concurrency" 50 20
echo ""
run_load_test "Stress Test" "Maximum load test to find breaking point" 100 50
echo ""
echo "=========================================="
echo "Load Testing Complete"
echo "=========================================="
echo "All load tests completed. Check individual test results above."
echo "If any tests failed, the relay may need optimization or have resource limits."

197
tests/memory_corruption_tests.sh Executable file
View File

@@ -0,0 +1,197 @@
#!/bin/bash
# Memory Corruption Detection Test Suite for C-Relay
# Tests for buffer overflows, use-after-free, and memory safety issues
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
TEST_TIMEOUT=15
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Function to test for memory corruption (buffer overflows, crashes, etc.)
test_memory_safety() {
local description="$1"
local message="$2"
local expect_error="${3:-false}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
# Send message and monitor for crashes or memory issues
local start_time=$(date +%s%N)
local response
response=$(timeout $TEST_TIMEOUT bash -c "
echo '$message' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null
" 2>/dev/null || echo 'CONNECTION_FAILED')
local end_time=$(date +%s%N)
# Check if relay is still responsive after the test
local relay_status
relay_status=$(timeout 2 bash -c "
echo 'ping' | websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 && echo 'OK' || echo 'DOWN'
" 2>/dev/null || echo 'DOWN')
# Calculate response time (rough indicator of processing issues)
local response_time=$(( (end_time - start_time) / 1000000 )) # Convert to milliseconds
if [[ "$response" == "CONNECTION_FAILED" ]]; then
if [[ "$expect_error" == "true" ]]; then
echo -e "${GREEN}PASSED${NC} - Expected connection failure"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Unexpected connection failure"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
elif [[ "$relay_status" != "OK" ]]; then
echo -e "${RED}FAILED${NC} - Relay crashed or became unresponsive after test"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
elif [[ $response_time -gt 5000 ]]; then # More than 5 seconds
echo -e "${YELLOW}SUSPICIOUS${NC} - Very slow response (${response_time}ms), possible DoS"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
else
if [[ "$expect_error" == "true" ]]; then
echo -e "${YELLOW}UNCERTAIN${NC} - Expected error but got normal response"
PASSED_TESTS=$((PASSED_TESTS + 1)) # Count as passed since no crash
return 0
else
echo -e "${GREEN}PASSED${NC} - No memory corruption detected"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
fi
fi
}
# Function to test concurrent access patterns
test_concurrent_access() {
local description="$1"
local message="$2"
local concurrent_count="${3:-5}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
# Launch multiple concurrent connections
local pids=()
local results=()
for i in $(seq 1 $concurrent_count); do
(
local response
response=$(timeout $TEST_TIMEOUT bash -c "
echo '$message' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1
" 2>/dev/null || echo 'FAILED')
echo "$response"
) &
pids+=($!)
done
# Wait for all to complete
local failed_count=0
for pid in "${pids[@]}"; do
wait "$pid" 2>/dev/null || failed_count=$((failed_count + 1))
done
# Check if relay is still responsive
local relay_status
relay_status=$(timeout 2 bash -c "
echo 'ping' | websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 && echo 'OK' || echo 'DOWN'
" 2>/dev/null || echo 'DOWN')
if [[ "$relay_status" != "OK" ]]; then
echo -e "${RED}FAILED${NC} - Relay crashed during concurrent access"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
elif [[ $failed_count -gt 0 ]]; then
echo -e "${YELLOW}PARTIAL${NC} - Some concurrent requests failed ($failed_count/$concurrent_count)"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
else
echo -e "${GREEN}PASSED${NC} - Concurrent access handled safely"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
fi
}
echo "=========================================="
echo "C-Relay Memory Corruption Test Suite"
echo "=========================================="
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
echo "Note: These tests may cause the relay to crash if vulnerabilities exist"
echo
# Test basic connectivity first
echo "=== Basic Connectivity Test ==="
test_memory_safety "Basic connectivity" '["REQ","basic_test",{}]'
echo
echo "=== Subscription ID Memory Corruption Tests ==="
# Test malformed subscription IDs that could cause buffer overflows
test_memory_safety "Empty subscription ID" '["REQ","",{}]' true
test_memory_safety "Very long subscription ID (1KB)" '["REQ","'$(printf 'a%.0s' {1..1024})'",{}]' true
test_memory_safety "Very long subscription ID (10KB)" '["REQ","'$(printf 'a%.0s' {1..10240})'",{}]' true
test_memory_safety "Subscription ID with null bytes" '["REQ","test\x00injection",{}]' true
test_memory_safety "Subscription ID with special chars" '["REQ","test@#$%^&*()",{}]' true
test_memory_safety "Unicode subscription ID" '["REQ","test🚀💣🔥",{}]' true
test_memory_safety "Subscription ID with path traversal" '["REQ","../../../etc/passwd",{}]' true
echo
echo "=== Filter Array Memory Corruption Tests ==="
# Test oversized filter arrays (limited to avoid extremely long output)
test_memory_safety "Too many filters (50)" '["REQ","test_many_filters",{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{}]' true
echo
echo "=== Concurrent Access Memory Tests ==="
# Test concurrent access patterns that might cause race conditions
test_concurrent_access "Concurrent subscription creation" '["REQ","concurrent_'$(date +%s%N)'",{}]' 10
test_concurrent_access "Concurrent CLOSE operations" '["CLOSE","test_sub"]' 10
echo
echo "=== Malformed JSON Memory Tests ==="
# Test malformed JSON that might cause parsing issues
test_memory_safety "Unclosed JSON object" '["REQ","test",{' true
test_memory_safety "Mismatched brackets" '["REQ","test"]' true
test_memory_safety "Extra closing brackets" '["REQ","test",{}]]' true
test_memory_safety "Null bytes in JSON" '["REQ","test\x00",{}]' true
echo
echo "=== Large Message Memory Tests ==="
# Test very large messages that might cause buffer issues
test_memory_safety "Very large filter array" '["REQ","large_test",{"authors":['$(printf '"test%.0s",' {1..1000})'"test"]}]' true
test_memory_safety "Very long search term" '["REQ","search_test",{"search":"'$(printf 'a%.0s' {1..10000})'"}]' true
echo
echo "=== Test Results ==="
echo "Total tests: $TOTAL_TESTS"
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}✓ All memory corruption tests passed!${NC}"
echo "The relay appears to handle memory safely."
exit 0
else
echo -e "${RED}✗ Memory corruption vulnerabilities detected!${NC}"
echo "The relay may be vulnerable to memory corruption attacks."
echo "Failed tests: $FAILED_TESTS"
exit 1
fi

239
tests/performance_benchmarks.sh Executable file
View File

@@ -0,0 +1,239 @@
#!/bin/bash
# Performance Benchmarking Suite for C-Relay
# Measures performance metrics and throughput
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
BENCHMARK_DURATION=30 # seconds
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Metrics tracking
TOTAL_REQUESTS=0
SUCCESSFUL_REQUESTS=0
FAILED_REQUESTS=0
TOTAL_RESPONSE_TIME=0
MIN_RESPONSE_TIME=999999
MAX_RESPONSE_TIME=0
# Function to benchmark single request
benchmark_request() {
local message="$1"
local start_time
local end_time
local response_time
start_time=$(date +%s%N)
local response
response=$(timeout 5 bash -c "
echo '$message' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1
" 2>/dev/null || echo 'TIMEOUT')
end_time=$(date +%s%N)
response_time=$(( (end_time - start_time) / 1000000 )) # Convert to milliseconds
TOTAL_REQUESTS=$((TOTAL_REQUESTS + 1))
TOTAL_RESPONSE_TIME=$((TOTAL_RESPONSE_TIME + response_time))
if [[ $response_time -lt MIN_RESPONSE_TIME ]]; then
MIN_RESPONSE_TIME=$response_time
fi
if [[ $response_time -gt MAX_RESPONSE_TIME ]]; then
MAX_RESPONSE_TIME=$response_time
fi
if [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
SUCCESSFUL_REQUESTS=$((SUCCESSFUL_REQUESTS + 1))
else
FAILED_REQUESTS=$((FAILED_REQUESTS + 1))
fi
}
# Function to run throughput benchmark
run_throughput_benchmark() {
local test_name="$1"
local message="$2"
local concurrent_clients="${3:-10}"
local test_duration="${4:-$BENCHMARK_DURATION}"
echo "=========================================="
echo "Throughput Benchmark: $test_name"
echo "=========================================="
echo "Concurrent clients: $concurrent_clients"
echo "Duration: ${test_duration}s"
echo ""
# Reset metrics
TOTAL_REQUESTS=0
SUCCESSFUL_REQUESTS=0
FAILED_REQUESTS=0
TOTAL_RESPONSE_TIME=0
MIN_RESPONSE_TIME=999999
MAX_RESPONSE_TIME=0
local start_time
start_time=$(date +%s)
# Launch concurrent clients
local pids=()
for i in $(seq 1 "$concurrent_clients"); do
(
local client_start
client_start=$(date +%s)
local client_requests=0
while [[ $(($(date +%s) - client_start)) -lt test_duration ]]; do
benchmark_request "$message"
((client_requests++))
# Small delay to prevent overwhelming
sleep 0.01
done
echo "client_${i}_requests:$client_requests"
) &
pids+=($!)
done
# Wait for all clients to complete
local client_results=()
for pid in "${pids[@]}"; do
client_results+=("$(wait "$pid")")
done
local end_time
end_time=$(date +%s)
local actual_duration=$((end_time - start_time))
# Calculate metrics
local avg_response_time="N/A"
if [[ $SUCCESSFUL_REQUESTS -gt 0 ]]; then
avg_response_time="$((TOTAL_RESPONSE_TIME / SUCCESSFUL_REQUESTS))ms"
fi
local requests_per_second="N/A"
if [[ $actual_duration -gt 0 ]]; then
requests_per_second="$((TOTAL_REQUESTS / actual_duration))"
fi
local success_rate="N/A"
if [[ $TOTAL_REQUESTS -gt 0 ]]; then
success_rate="$((SUCCESSFUL_REQUESTS * 100 / TOTAL_REQUESTS))%"
fi
# Report results
echo "=== Benchmark Results ==="
echo "Total requests: $TOTAL_REQUESTS"
echo "Successful requests: $SUCCESSFUL_REQUESTS"
echo "Failed requests: $FAILED_REQUESTS"
echo "Success rate: $success_rate"
echo "Requests per second: $requests_per_second"
echo "Average response time: $avg_response_time"
echo "Min response time: ${MIN_RESPONSE_TIME}ms"
echo "Max response time: ${MAX_RESPONSE_TIME}ms"
echo "Actual duration: ${actual_duration}s"
echo ""
# Performance assessment
if [[ $requests_per_second -gt 1000 ]]; then
echo -e "${GREEN}✓ EXCELLENT throughput${NC}"
elif [[ $requests_per_second -gt 500 ]]; then
echo -e "${GREEN}✓ GOOD throughput${NC}"
elif [[ $requests_per_second -gt 100 ]]; then
echo -e "${YELLOW}⚠ MODERATE throughput${NC}"
else
echo -e "${RED}✗ LOW throughput${NC}"
fi
}
# Function to benchmark memory usage patterns
benchmark_memory_usage() {
echo "=========================================="
echo "Memory Usage Benchmark"
echo "=========================================="
local initial_memory
initial_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
echo "Initial memory usage: ${initial_memory}KB"
# Create increasing number of subscriptions
for i in {10,25,50,100}; do
echo -n "Testing with $i concurrent subscriptions... "
# Create subscriptions
for j in $(seq 1 "$i"); do
timeout 2 bash -c "
echo '[\"REQ\",\"mem_test_'${j}'\",{}]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null &
done
sleep 2
local current_memory
current_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
local memory_increase=$((current_memory - initial_memory))
echo "${current_memory}KB (+${memory_increase}KB)"
# Clean up subscriptions
for j in $(seq 1 "$i"); do
timeout 2 bash -c "
echo '[\"CLOSE\",\"mem_test_'${j}'\"]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null &
done
sleep 1
done
local final_memory
final_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
echo "Final memory usage: ${final_memory}KB"
}
echo "=========================================="
echo "C-Relay Performance Benchmarking Suite"
echo "=========================================="
echo "Benchmarking relay at ws://$RELAY_HOST:$RELAY_PORT"
echo ""
# Test basic connectivity
echo "=== Connectivity Test ==="
benchmark_request '["REQ","bench_test",{}]'
if [[ $SUCCESSFUL_REQUESTS -eq 0 ]]; then
echo -e "${RED}Cannot connect to relay. Aborting benchmarks.${NC}"
exit 1
fi
echo -e "${GREEN}✓ Relay is accessible${NC}"
echo ""
# Run throughput benchmarks
run_throughput_benchmark "Simple REQ Throughput" '["REQ","throughput_'$(date +%s%N)'",{}]' 10 15
echo ""
run_throughput_benchmark "Complex Filter Throughput" '["REQ","complex_'$(date +%s%N)'",{"kinds":[1,2,3],"#e":["test"],"limit":10}]' 10 15
echo ""
run_throughput_benchmark "COUNT Message Throughput" '["COUNT","count_'$(date +%s%N)'",{}]' 10 15
echo ""
run_throughput_benchmark "High Load Throughput" '["REQ","high_load_'$(date +%s%N)'",{}]' 25 20
echo ""
# Memory usage benchmark
benchmark_memory_usage
echo ""
echo "=========================================="
echo "Benchmarking Complete"
echo "=========================================="
echo "Performance benchmarks completed. Review results above for optimization opportunities."

213
tests/rate_limiting_tests.sh Executable file
View File

@@ -0,0 +1,213 @@
#!/bin/bash
# Rate Limiting Test Suite for C-Relay
# Tests rate limiting and abuse prevention mechanisms
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
TEST_TIMEOUT=15
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Function to test rate limiting
test_rate_limiting() {
local description="$1"
local message="$2"
local burst_count="${3:-10}"
local expected_limited="${4:-false}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
local rate_limited=false
local success_count=0
local error_count=0
# Send burst of messages
for i in $(seq 1 "$burst_count"); do
local response
response=$(timeout 2 bash -c "
echo '$message' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == *"rate limit"* ]] || [[ "$response" == *"too many"* ]] || [[ "$response" == *"TOO_MANY"* ]]; then
rate_limited=true
elif [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
((success_count++))
else
((error_count++))
fi
# Small delay between requests
sleep 0.05
done
if [[ "$expected_limited" == "true" ]]; then
if [[ "$rate_limited" == "true" ]]; then
echo -e "${GREEN}PASSED${NC} - Rate limiting triggered as expected"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Rate limiting not triggered (expected)"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
else
if [[ "$rate_limited" == "false" ]]; then
echo -e "${GREEN}PASSED${NC} - No rate limiting for normal traffic"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${YELLOW}UNCERTAIN${NC} - Unexpected rate limiting"
PASSED_TESTS=$((PASSED_TESTS + 1)) # Count as passed since it's conservative
return 0
fi
fi
}
# Function to test sustained load
test_sustained_load() {
local description="$1"
local message="$2"
local duration="${3:-10}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
local start_time
start_time=$(date +%s)
local rate_limited=false
local total_requests=0
local successful_requests=0
while [[ $(($(date +%s) - start_time)) -lt duration ]]; do
((total_requests++))
local response
response=$(timeout 1 bash -c "
echo '$message' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == *"rate limit"* ]] || [[ "$response" == *"too many"* ]] || [[ "$response" == *"TOO_MANY"* ]]; then
rate_limited=true
elif [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
((successful_requests++))
fi
# Small delay to avoid overwhelming
sleep 0.1
done
local success_rate=0
if [[ $total_requests -gt 0 ]]; then
success_rate=$((successful_requests * 100 / total_requests))
fi
if [[ "$rate_limited" == "true" ]]; then
echo -e "${GREEN}PASSED${NC} - Rate limiting activated under sustained load (${success_rate}% success rate)"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${YELLOW}UNCERTAIN${NC} - No rate limiting detected (${success_rate}% success rate)"
# This might be acceptable if rate limiting is very permissive
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
fi
}
echo "=========================================="
echo "C-Relay Rate Limiting Test Suite"
echo "=========================================="
echo "Testing rate limiting against relay at ws://$RELAY_HOST:$RELAY_PORT"
echo ""
# Test basic connectivity first
echo "=== Basic Connectivity Test ==="
test_rate_limiting "Basic connectivity" '["REQ","rate_test",{}]' 1 false
echo ""
echo "=== Burst Request Testing ==="
# Test rapid succession of requests
test_rate_limiting "Rapid REQ messages" '["REQ","burst_req_'$(date +%s%N)'",{}]' 20 true
test_rate_limiting "Rapid COUNT messages" '["COUNT","burst_count_'$(date +%s%N)'",{}]' 20 true
test_rate_limiting "Rapid CLOSE messages" '["CLOSE","burst_close"]' 20 true
echo ""
echo "=== Malformed Message Rate Limiting ==="
# Test if malformed messages trigger rate limiting faster
test_rate_limiting "Malformed JSON burst" '["REQ","malformed"' 15 true
test_rate_limiting "Invalid message type burst" '["INVALID","test",{}]' 15 true
test_rate_limiting "Empty message burst" '[]' 15 true
echo ""
echo "=== Sustained Load Testing ==="
# Test sustained moderate load
test_sustained_load "Sustained REQ load" '["REQ","sustained_'$(date +%s%N)'",{}]' 10
test_sustained_load "Sustained COUNT load" '["COUNT","sustained_count_'$(date +%s%N)'",{}]' 10
echo ""
echo "=== Filter Complexity Testing ==="
# Test if complex filters trigger rate limiting
test_rate_limiting "Complex filter burst" '["REQ","complex_'$(date +%s%N)'",{"authors":["a","b","c"],"kinds":[1,2,3],"#e":["x","y","z"],"#p":["m","n","o"],"since":1000000000,"until":2000000000,"limit":100}]' 10 true
echo ""
echo "=== Subscription Management Testing ==="
# Test subscription creation/deletion rate limiting
echo -n "Testing subscription churn... "
local churn_test_passed=true
for i in $(seq 1 25); do
# Create subscription
timeout 1 bash -c "
echo '[\"REQ\",\"churn_'${i}'_'$(date +%s%N)'\",{}]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null || true
# Close subscription
timeout 1 bash -c "
echo '[\"CLOSE\",\"churn_'${i}'_*\"]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null || true
sleep 0.05
done
# Check if relay is still responsive
if timeout 2 bash -c "
echo 'ping' | websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null; then
echo -e "${GREEN}PASSED${NC} - Subscription churn handled"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
PASSED_TESTS=$((PASSED_TESTS + 1))
else
echo -e "${RED}FAILED${NC} - Relay unresponsive after subscription churn"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
FAILED_TESTS=$((FAILED_TESTS + 1))
fi
echo ""
echo "=== Test Results ==="
echo "Total tests: $TOTAL_TESTS"
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}✓ All rate limiting tests passed!${NC}"
echo "Rate limiting appears to be working correctly."
exit 0
else
echo -e "${RED}✗ Some rate limiting tests failed!${NC}"
echo "Rate limiting may not be properly configured."
exit 1
fi

269
tests/resource_monitoring.sh Executable file
View File

@@ -0,0 +1,269 @@
#!/bin/bash
# Resource Monitoring Suite for C-Relay
# Monitors memory and CPU usage during testing
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
MONITOR_DURATION=60 # seconds
SAMPLE_INTERVAL=2 # seconds
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Metrics storage
CPU_SAMPLES=()
MEM_SAMPLES=()
CONNECTION_SAMPLES=()
TIMESTAMP_SAMPLES=()
# Function to get relay process info
get_relay_info() {
local pid
pid=$(pgrep -f "c_relay" | head -1)
if [[ -z "$pid" ]]; then
echo "0:0:0:0"
return
fi
# Get CPU, memory, and other stats
local ps_output
ps_output=$(ps -p "$pid" -o pcpu,pmem,vsz,rss --no-headers 2>/dev/null || echo "0.0 0.0 0 0")
# Get connection count
local connections
connections=$(netstat -t 2>/dev/null | grep ":$RELAY_PORT" | wc -l 2>/dev/null || echo "0")
echo "$ps_output $connections"
}
# Function to monitor resources
monitor_resources() {
local duration="$1"
local interval="$2"
echo "=========================================="
echo "Resource Monitoring Started"
echo "=========================================="
echo "Duration: ${duration}s, Interval: ${interval}s"
echo ""
# Clear arrays
CPU_SAMPLES=()
MEM_SAMPLES=()
CONNECTION_SAMPLES=()
TIMESTAMP_SAMPLES=()
local start_time
start_time=$(date +%s)
local sample_count=0
echo "Time | CPU% | Mem% | VSZ(KB) | RSS(KB) | Connections"
echo "-----+------+------+---------+---------+------------"
while [[ $(($(date +%s) - start_time)) -lt duration ]]; do
local relay_info
relay_info=$(get_relay_info)
if [[ "$relay_info" != "0:0:0:0" ]]; then
local cpu mem vsz rss connections
IFS=' ' read -r cpu mem vsz rss connections <<< "$relay_info"
# Store samples
CPU_SAMPLES+=("$cpu")
MEM_SAMPLES+=("$mem")
CONNECTION_SAMPLES+=("$connections")
TIMESTAMP_SAMPLES+=("$sample_count")
# Display current stats
local elapsed
elapsed=$(($(date +%s) - start_time))
printf "%4ds | %4.1f | %4.1f | %7s | %7s | %10s\n" \
"$elapsed" "$cpu" "$mem" "$vsz" "$rss" "$connections"
else
echo " -- | Relay process not found --"
fi
((sample_count++))
sleep "$interval"
done
echo ""
}
# Function to calculate statistics
calculate_stats() {
local array_name="$1"
local -n array_ref="$array_name"
if [[ ${#array_ref[@]} -eq 0 ]]; then
echo "0:0:0:0:0"
return
fi
local sum=0
local min=${array_ref[0]}
local max=${array_ref[0]}
for value in "${array_ref[@]}"; do
# Use awk for floating point arithmetic
sum=$(awk "BEGIN {print $sum + $value}")
min=$(awk "BEGIN {print ($value < $min) ? $value : $min}")
max=$(awk "BEGIN {print ($value > $max) ? $value : $max}")
done
local avg
avg=$(awk "BEGIN {print $sum / ${#array_ref[@]} }")
echo "$avg:$min:$max:$sum:${#array_ref[@]}"
}
# Function to generate resource report
generate_resource_report() {
echo "=========================================="
echo "Resource Monitoring Report"
echo "=========================================="
if [[ ${#CPU_SAMPLES[@]} -eq 0 ]]; then
echo "No resource samples collected. Is the relay running?"
return
fi
# Calculate statistics
local cpu_stats mem_stats conn_stats
cpu_stats=$(calculate_stats CPU_SAMPLES)
mem_stats=$(calculate_stats MEM_SAMPLES)
conn_stats=$(calculate_stats CONNECTION_SAMPLES)
# Parse statistics
IFS=':' read -r cpu_avg cpu_min cpu_max cpu_sum cpu_count <<< "$cpu_stats"
IFS=':' read -r mem_avg mem_min mem_max mem_sum mem_count <<< "$mem_stats"
IFS=':' read -r conn_avg conn_min conn_max conn_sum conn_count <<< "$conn_stats"
echo "CPU Usage Statistics:"
printf " Average: %.2f%%\n" "$cpu_avg"
printf " Minimum: %.2f%%\n" "$cpu_min"
printf " Maximum: %.2f%%\n" "$cpu_max"
printf " Samples: %d\n" "$cpu_count"
echo ""
echo "Memory Usage Statistics:"
printf " Average: %.2f%%\n" "$mem_avg"
printf " Minimum: %.2f%%\n" "$mem_min"
printf " Maximum: %.2f%%\n" "$mem_max"
printf " Samples: %d\n" "$mem_count"
echo ""
echo "Connection Statistics:"
printf " Average: %.1f connections\n" "$conn_avg"
printf " Minimum: %.1f connections\n" "$conn_min"
printf " Maximum: %.1f connections\n" "$conn_max"
printf " Samples: %d\n" "$conn_count"
echo ""
# Performance assessment
echo "Performance Assessment:"
if awk "BEGIN {exit !($cpu_avg < 50)}"; then
echo -e " ${GREEN}✓ CPU usage is acceptable${NC}"
else
echo -e " ${RED}✗ CPU usage is high${NC}"
fi
if awk "BEGIN {exit !($mem_avg < 80)}"; then
echo -e " ${GREEN}✓ Memory usage is acceptable${NC}"
else
echo -e " ${RED}✗ Memory usage is high${NC}"
fi
if [[ $(awk "BEGIN {print int($conn_max)}") -gt 0 ]]; then
echo -e " ${GREEN}✓ Relay is handling connections${NC}"
else
echo -e " ${YELLOW}⚠ No active connections detected${NC}"
fi
}
# Function to run load test with monitoring
run_monitored_load_test() {
local test_name="$1"
local description="$2"
echo "=========================================="
echo "Monitored Load Test: $test_name"
echo "=========================================="
echo "Description: $description"
echo ""
# Start monitoring in background
monitor_resources 30 2 &
local monitor_pid=$!
# Wait a moment for monitoring to start
sleep 2
# Run a simple load test (create multiple subscriptions)
echo "Running load test..."
for i in {1..20}; do
timeout 3 bash -c "
echo '[\"REQ\",\"monitor_test_'${i}'\",{}]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null &
done
# Let the load run for a bit
sleep 10
# Clean up subscriptions
echo "Cleaning up test subscriptions..."
for i in {1..20}; do
timeout 3 bash -c "
echo '[\"CLOSE\",\"monitor_test_'${i}'\"]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null &
done
# Wait for monitoring to complete
sleep 5
kill "$monitor_pid" 2>/dev/null || true
wait "$monitor_pid" 2>/dev/null || true
echo ""
}
echo "=========================================="
echo "C-Relay Resource Monitoring Suite"
echo "=========================================="
echo "Monitoring relay at ws://$RELAY_HOST:$RELAY_PORT"
echo ""
# Check if relay is running
if ! pgrep -f "c_relay" >/dev/null 2>&1; then
echo -e "${RED}Relay process not found. Please start the relay first.${NC}"
echo "Use: ./make_and_restart_relay.sh"
exit 1
fi
echo -e "${GREEN}✓ Relay process found${NC}"
echo ""
# Run baseline monitoring
echo "=== Baseline Resource Monitoring ==="
monitor_resources 15 2
generate_resource_report
echo ""
# Run monitored load test
run_monitored_load_test "Subscription Load Test" "Creating and closing multiple subscriptions while monitoring resources"
generate_resource_report
echo ""
echo "=========================================="
echo "Resource Monitoring Complete"
echo "=========================================="
echo "Resource monitoring completed. Review the statistics above."
echo "High CPU/memory usage may indicate performance issues."

298
tests/run_all_tests.sh Executable file
View File

@@ -0,0 +1,298 @@
#!/bin/bash
# C-Relay Comprehensive Test Suite Runner
# This script runs all security and stability tests for the Nostr relay
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
RELAY_URL="ws://$RELAY_HOST:$RELAY_PORT"
TEST_TIMEOUT=30
LOG_FILE="test_results_$(date +%Y%m%d_%H%M%S).log"
REPORT_FILE="test_report_$(date +%Y%m%d_%H%M%S).html"
# Test keys for authentication (from AGENTS.md)
ADMIN_PRIVATE_KEY="6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3"
RELAY_PUBKEY="4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test results tracking
TOTAL_SUITES=0
PASSED_SUITES=0
FAILED_SUITES=0
SKIPPED_SUITES=0
SUITE_RESULTS=()
# Function to create authenticated WebSocket connection
# Usage: authenticated_websocat <subscription_id> <filter_json>
authenticated_websocat() {
local sub_id="$1"
local filter="$2"
# Create a temporary script for authenticated connection
cat > /tmp/auth_ws_$$.sh << EOF
#!/bin/bash
# Authenticated WebSocket connection helper
# Connect and handle AUTH challenge
exec websocat -B 1048576 --no-close ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null << 'INNER_EOF'
["REQ","$sub_id",$filter]
INNER_EOF
EOF
chmod +x /tmp/auth_ws_$$.sh
timeout $TEST_TIMEOUT bash /tmp/auth_ws_$$.sh
rm -f /tmp/auth_ws_$$.sh
}
# Function to log messages
log() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $*" | tee -a "$LOG_FILE"
}
# Function to run a test suite
run_test_suite() {
local suite_name="$1"
local suite_script="$2"
local description="$3"
TOTAL_SUITES=$((TOTAL_SUITES + 1))
log "=========================================="
log "Running Test Suite: $suite_name"
log "Description: $description"
log "=========================================="
if [[ ! -f "$suite_script" ]]; then
log "${RED}ERROR: Test script $suite_script not found${NC}"
FAILED_SUITES=$((FAILED_SUITES + 1))
SUITE_RESULTS+=("$suite_name: FAILED (script not found)")
return 1
fi
# Make script executable if not already
chmod +x "$suite_script"
# Run the test suite and capture output
local start_time=$(date +%s)
if bash "$suite_script" >> "$LOG_FILE" 2>&1; then
local end_time=$(date +%s)
local duration=$((end_time - start_time))
log "${GREEN}✓ $suite_name PASSED${NC} (Duration: ${duration}s)"
PASSED_SUITES=$((PASSED_SUITES + 1))
SUITE_RESULTS+=("$suite_name: PASSED (${duration}s)")
return 0
else
local end_time=$(date +%s)
local duration=$((end_time - start_time))
log "${RED}✗ $suite_name FAILED${NC} (Duration: ${duration}s)"
FAILED_SUITES=$((FAILED_SUITES + 1))
SUITE_RESULTS+=("$suite_name: FAILED (${duration}s)")
return 1
fi
}
# Function to check if relay is running
check_relay_status() {
log "Checking relay status at $RELAY_URL..."
# First check if HTTP endpoint is accessible
if curl -s -H "Accept: application/nostr+json" "http://$RELAY_HOST:$RELAY_PORT" >/dev/null 2>&1; then
log "${GREEN}✓ Relay HTTP endpoint is accessible${NC}"
return 0
fi
# Fallback: Try WebSocket connection
if timeout 5 bash -c "
echo '[\"REQ\",\"status_check\",{}]' | websocat -B 1048576 --no-close '$RELAY_URL' >/dev/null 2>&1
" 2>/dev/null; then
log "${GREEN}✓ Relay WebSocket endpoint is accessible${NC}"
return 0
else
log "${RED}✗ Relay is not accessible at $RELAY_URL${NC}"
log "Please start the relay first using: ./make_and_restart_relay.sh"
return 1
fi
}
# Function to generate HTML report
generate_html_report() {
local total_duration=$1
cat > "$REPORT_FILE" << EOF
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>C-Relay Test Report - $(date)</title>
<style>
body { font-family: Arial, sans-serif; margin: 40px; background-color: #f5f5f5; }
.header { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 20px; border-radius: 8px; margin-bottom: 30px; }
.summary { background: white; padding: 20px; border-radius: 8px; margin-bottom: 30px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }
.suite { background: white; margin-bottom: 10px; padding: 15px; border-radius: 5px; box-shadow: 0 1px 5px rgba(0,0,0,0.1); }
.passed { border-left: 5px solid #28a745; }
.failed { border-left: 5px solid #dc3545; }
.skipped { border-left: 5px solid #ffc107; }
.metric { display: inline-block; margin: 10px; padding: 10px; background: #e9ecef; border-radius: 5px; }
.status-passed { color: #28a745; font-weight: bold; }
.status-failed { color: #dc3545; font-weight: bold; }
.status-skipped { color: #ffc107; font-weight: bold; }
table { width: 100%; border-collapse: collapse; margin-top: 20px; }
th, td { padding: 12px; text-align: left; border-bottom: 1px solid #ddd; }
th { background-color: #f8f9fa; }
</style>
</head>
<body>
<div class="header">
<h1>C-Relay Comprehensive Test Report</h1>
<p>Generated on: $(date)</p>
<p>Test Environment: $RELAY_URL</p>
</div>
<div class="summary">
<h2>Test Summary</h2>
<div class="metric">
<strong>Total Suites:</strong> $TOTAL_SUITES
</div>
<div class="metric">
<strong>Passed:</strong> <span class="status-passed">$PASSED_SUITES</span>
</div>
<div class="metric">
<strong>Failed:</strong> <span class="status-failed">$FAILED_SUITES</span>
</div>
<div class="metric">
<strong>Skipped:</strong> <span class="status-skipped">$SKIPPED_SUITES</span>
</div>
<div class="metric">
<strong>Total Duration:</strong> ${total_duration}s
</div>
<div class="metric">
<strong>Success Rate:</strong> $(( (PASSED_SUITES * 100) / TOTAL_SUITES ))%
</div>
</div>
<h2>Test Suite Results</h2>
EOF
for result in "${SUITE_RESULTS[@]}"; do
local suite_name=$(echo "$result" | cut -d: -f1)
local status=$(echo "$result" | cut -d: -f2 | cut -d' ' -f1)
local duration=$(echo "$result" | cut -d: -f2 | cut -d'(' -f2 | cut -d')' -f1)
local css_class="passed"
if [[ "$status" == "FAILED" ]]; then
css_class="failed"
elif [[ "$status" == "SKIPPED" ]]; then
css_class="skipped"
fi
cat >> "$REPORT_FILE" << EOF
<div class="suite $css_class">
<strong>$suite_name</strong> - <span class="status-$css_class">$status</span> ($duration)
</div>
EOF
done
cat >> "$REPORT_FILE" << EOF
</body>
</html>
EOF
log "HTML report generated: $REPORT_FILE"
}
# Main execution
log "=========================================="
log "C-Relay Comprehensive Test Suite Runner"
log "=========================================="
log "Relay URL: $RELAY_URL"
log "Log file: $LOG_FILE"
log "Report file: $REPORT_FILE"
log ""
# Check if relay is running
if ! check_relay_status; then
log "${RED}Cannot proceed without a running relay. Exiting.${NC}"
exit 1
fi
log ""
log "Starting comprehensive test execution..."
log ""
# Record start time
OVERALL_START_TIME=$(date +%s)
# Run Security Test Suites
log "${BLUE}=== SECURITY TEST SUITES ===${NC}"
run_test_suite "SQL Injection Tests" "tests/sql_injection_tests.sh" "Comprehensive SQL injection vulnerability testing"
run_test_suite "Filter Validation Tests" "tests/filter_validation_test.sh" "Input validation for REQ and COUNT messages"
run_test_suite "Subscription Validation Tests" "tests/subscription_validation.sh" "Subscription ID and message validation"
run_test_suite "Memory Corruption Tests" "tests/memory_corruption_tests.sh" "Buffer overflow and memory safety testing"
run_test_suite "Input Validation Tests" "tests/input_validation_tests.sh" "Comprehensive input boundary testing"
# Run Performance Test Suites
log ""
log "${BLUE}=== PERFORMANCE TEST SUITES ===${NC}"
run_test_suite "Subscription Limit Tests" "tests/subscription_limits.sh" "Subscription limit enforcement testing"
run_test_suite "Load Testing" "tests/load_tests.sh" "High concurrent connection testing"
run_test_suite "Stress Testing" "tests/stress_tests.sh" "Resource usage and stability testing"
run_test_suite "Rate Limiting Tests" "tests/rate_limiting_tests.sh" "Rate limiting and abuse prevention"
# Run Integration Test Suites
log ""
log "${BLUE}=== INTEGRATION TEST SUITES ===${NC}"
run_test_suite "NIP Protocol Tests" "tests/run_nip_tests.sh" "All NIP protocol compliance tests"
run_test_suite "Configuration Tests" "tests/config_tests.sh" "Configuration management and persistence"
run_test_suite "Authentication Tests" "tests/auth_tests.sh" "NIP-42 authentication testing"
# Run Benchmarking Suites
log ""
log "${BLUE}=== BENCHMARKING SUITES ===${NC}"
run_test_suite "Performance Benchmarks" "tests/performance_benchmarks.sh" "Performance metrics and benchmarking"
run_test_suite "Resource Monitoring" "tests/resource_monitoring.sh" "Memory and CPU usage monitoring"
# Calculate total duration
OVERALL_END_TIME=$(date +%s)
TOTAL_DURATION=$((OVERALL_END_TIME - OVERALL_START_TIME))
# Generate final report
log ""
log "=========================================="
log "TEST EXECUTION COMPLETE"
log "=========================================="
log "Total test suites: $TOTAL_SUITES"
log "Passed: $PASSED_SUITES"
log "Failed: $FAILED_SUITES"
log "Skipped: $SKIPPED_SUITES"
log "Total duration: ${TOTAL_DURATION}s"
log "Success rate: $(( (PASSED_SUITES * 100) / TOTAL_SUITES ))%"
log ""
log "Detailed log: $LOG_FILE"
# Generate HTML report
generate_html_report "$TOTAL_DURATION"
# Exit with appropriate code
if [[ $FAILED_SUITES -eq 0 ]]; then
log "${GREEN}✓ ALL TESTS PASSED${NC}"
exit 0
else
log "${RED}✗ SOME TESTS FAILED${NC}"
log "Check $LOG_FILE for detailed error information"
exit 1
fi

126
tests/run_nip_tests.sh Executable file
View File

@@ -0,0 +1,126 @@
#!/bin/bash
# NIP Protocol Test Runner for C-Relay
# Runs all NIP compliance tests
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test counters
TOTAL_SUITES=0
PASSED_SUITES=0
FAILED_SUITES=0
# Available NIP test files
NIP_TESTS=(
"1_nip_test.sh:NIP-01 Basic Protocol"
"9_nip_delete_test.sh:NIP-09 Event Deletion"
"11_nip_information.sh:NIP-11 Relay Information"
"13_nip_test.sh:NIP-13 Proof of Work"
"17_nip_test.sh:NIP-17 Private DMs"
"40_nip_test.sh:NIP-40 Expiration Timestamp"
"42_nip_test.sh:NIP-42 Authentication"
"45_nip_test.sh:NIP-45 Event Counts"
"50_nip_test.sh:NIP-50 Search Capability"
"70_nip_test.sh:NIP-70 Protected Events"
)
# Function to run a NIP test suite
run_nip_test() {
local test_file="$1"
local test_name="$2"
TOTAL_SUITES=$((TOTAL_SUITES + 1))
echo "=========================================="
echo "Running $test_name ($test_file)"
echo "=========================================="
if [[ ! -f "$test_file" ]]; then
echo -e "${RED}ERROR: Test file $test_file not found${NC}"
FAILED_SUITES=$((FAILED_SUITES + 1))
return 1
fi
# Make script executable if not already
chmod +x "$test_file"
# Run the test
if bash "$test_file"; then
echo -e "${GREEN}$test_name PASSED${NC}"
PASSED_SUITES=$((PASSED_SUITES + 1))
return 0
else
echo -e "${RED}$test_name FAILED${NC}"
FAILED_SUITES=$((FAILED_SUITES + 1))
return 1
fi
}
# Function to check relay connectivity
check_relay() {
echo "Checking relay connectivity at ws://$RELAY_HOST:$RELAY_PORT..."
if timeout 5 bash -c "
echo 'ping' | websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
" 2>/dev/null; then
echo -e "${GREEN}✓ Relay is accessible${NC}"
return 0
else
echo -e "${RED}✗ Cannot connect to relay${NC}"
echo "Please start the relay first: ./make_and_restart_relay.sh"
return 1
fi
}
echo "=========================================="
echo "C-Relay NIP Protocol Test Suite"
echo "=========================================="
echo "Testing NIP compliance against relay at ws://$RELAY_HOST:$RELAY_PORT"
echo ""
# Check relay connectivity
if ! check_relay; then
exit 1
fi
echo ""
echo "Running NIP protocol tests..."
echo ""
# Run all NIP tests
for nip_test in "${NIP_TESTS[@]}"; do
test_file="${nip_test%%:*}"
test_name="${nip_test#*:}"
run_nip_test "$test_file" "$test_name"
echo ""
done
# Summary
echo "=========================================="
echo "NIP Test Summary"
echo "=========================================="
echo "Total NIP test suites: $TOTAL_SUITES"
echo -e "Passed: ${GREEN}$PASSED_SUITES${NC}"
echo -e "Failed: ${RED}$FAILED_SUITES${NC}"
if [[ $FAILED_SUITES -eq 0 ]]; then
echo -e "${GREEN}✓ All NIP tests passed!${NC}"
echo "The relay is fully NIP compliant."
exit 0
else
echo -e "${RED}✗ Some NIP tests failed.${NC}"
echo "The relay may have NIP compliance issues."
exit 1
fi

242
tests/sql_injection_tests.sh Executable file
View File

@@ -0,0 +1,242 @@
#!/bin/bash
# SQL Injection Test Suite for C-Relay
# Comprehensive testing of SQL injection vulnerabilities across all filter types
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
TEST_TIMEOUT=10
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Function to send WebSocket message and check for SQL injection success
test_sql_injection() {
local description="$1"
local message="$2"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
# Send message via websocat and capture response
# For now, we'll test without authentication since the relay may not require it for basic queries
local response
response=$(timeout 5 bash -c "
echo '$message' | websocat -B 1048576 --no-close ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -3
" 2>/dev/null || echo 'TIMEOUT')
# Check if the response indicates successful query execution (which would be bad)
# Look for signs that SQL injection worked (like database errors or unexpected results)
if [[ "$response" == *"SQL"* ]] || [[ "$response" == *"syntax"* ]] || [[ "$response" == *"error"* && ! "$response" == *"error: "* ]]; then
echo -e "${RED}FAILED${NC} - Potential SQL injection vulnerability detected"
echo " Response: $response"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
elif [[ "$response" == "TIMEOUT" ]]; then
echo -e "${YELLOW}UNCERTAIN${NC} - Connection timeout (may indicate crash)"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
else
echo -e "${GREEN}PASSED${NC} - SQL injection blocked"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
fi
}
# Function to test valid message (should work normally)
test_valid_query() {
local description="$1"
local message="$2"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
local response
response=$(timeout 5 bash -c "
echo '$message' | websocat -B 1048576 --no-close ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -3
" 2>/dev/null || echo 'TIMEOUT')
if [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]]; then
echo -e "${GREEN}PASSED${NC} - Valid query works"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Valid query failed: $response"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
}
echo "=========================================="
echo "C-Relay SQL Injection Test Suite"
echo "=========================================="
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
echo
# Test basic connectivity first
echo "=== Basic Connectivity Test ==="
test_valid_query "Basic connectivity" '["REQ","basic_test",{}]'
echo
# SQL Injection Payloads
SQL_PAYLOADS=(
# Classic SQL injection
"'; DROP TABLE events; --"
"1' OR '1'='1"
"1; SELECT * FROM sqlite_master; --"
"' OR 1=1 --"
"admin'--"
"1' UNION SELECT password FROM users --"
# Time-based blind SQL injection
"1' AND SLEEP(5) --"
"1' AND 1=1 WAITFOR DELAY '0:0:5' --"
# Error-based SQL injection
"1' AND 1=CAST((SELECT version()) AS INT) --"
"1' AND 1=CONVERT(INT, (SELECT @@VERSION)) --"
# Union-based injection
"' UNION SELECT NULL,NULL,NULL --"
"' UNION SELECT 1,2,3 --"
"' UNION ALL SELECT NULL,NULL,NULL --"
# Stacked queries
"'; SELECT * FROM events; --"
"'; DELETE FROM events; --"
"'; UPDATE events SET content='hacked' WHERE 1=1; --"
# Comment injection
"/*"
"*/"
"/**/"
"--"
"#"
# Hex encoded injection
"0x53514C5F494E4A454354494F4E" # SQL_INJECTION in hex
# Base64 encoded injection
"J1NSTCBJTkpFQ1RJT04gLS0=" # 'SQL INJECTION -- in base64
# Nested injection
"'))); DROP TABLE events; --"
"')) UNION SELECT NULL; --"
# Boolean-based blind injection
"' AND 1=1 --"
"' AND 1=2 --"
"' AND (SELECT COUNT(*) FROM events) > 0 --"
# Out-of-band injection (if supported)
"'; EXEC master..xp_cmdshell 'net user' --"
"'; DECLARE @host varchar(1024); SELECT @host=(SELECT TOP 1 master..sys.fn_varbintohexstr(password_hash) FROM sys.sql_logins WHERE name='sa'); --"
)
echo "=== Authors Filter SQL Injection Tests ==="
for payload in "${SQL_PAYLOADS[@]}"; do
test_sql_injection "Authors filter with payload: $payload" "[\"REQ\",\"sql_test_authors_$RANDOM\",{\"authors\":[\"$payload\"]}]"
done
echo
echo "=== IDs Filter SQL Injection Tests ==="
for payload in "${SQL_PAYLOADS[@]}"; do
test_sql_injection "IDs filter with payload: $payload" "[\"REQ\",\"sql_test_ids_$RANDOM\",{\"ids\":[\"$payload\"]}]"
done
echo
echo "=== Kinds Filter SQL Injection Tests ==="
# Test numeric kinds with SQL injection
test_sql_injection "Kinds filter with UNION injection" "[\"REQ\",\"sql_test_kinds_$RANDOM\",{\"kinds\":[0 UNION SELECT 1,2,3]}]"
test_sql_injection "Kinds filter with stacked query" "[\"REQ\",\"sql_test_kinds_$RANDOM\",{\"kinds\":[0; DROP TABLE events; --]}]"
echo
echo "=== Search Filter SQL Injection Tests ==="
for payload in "${SQL_PAYLOADS[@]}"; do
test_sql_injection "Search filter with payload: $payload" "[\"REQ\",\"sql_test_search_$RANDOM\",{\"search\":\"$payload\"}]"
done
echo
echo "=== Tag Filter SQL Injection Tests ==="
TAG_PREFIXES=("#e" "#p" "#t" "#r" "#d")
for prefix in "${TAG_PREFIXES[@]}"; do
for payload in "${SQL_PAYLOADS[@]}"; do
test_sql_injection "$prefix tag filter with payload: $payload" "[\"REQ\",\"sql_test_tag_$RANDOM\",{\"$prefix\":[\"$payload\"]}]"
done
done
echo
echo "=== Timestamp Filter SQL Injection Tests ==="
# Test since/until parameters
test_sql_injection "Since parameter injection" "[\"REQ\",\"sql_test_since_$RANDOM\",{\"since\":\"1' OR '1'='1\"}]"
test_sql_injection "Until parameter injection" "[\"REQ\",\"sql_test_until_$RANDOM\",{\"until\":\"1; DROP TABLE events; --\"}]"
echo
echo "=== Limit Parameter SQL Injection Tests ==="
test_sql_injection "Limit parameter injection" "[\"REQ\",\"sql_test_limit_$RANDOM\",{\"limit\":\"1' OR '1'='1\"}]"
test_sql_injection "Limit with UNION" "[\"REQ\",\"sql_test_limit_$RANDOM\",{\"limit\":\"0 UNION SELECT password FROM users\"}]"
echo
echo "=== Complex Multi-Filter SQL Injection Tests ==="
# Test combinations that might bypass validation
test_sql_injection "Multi-filter with authors injection" "[\"REQ\",\"sql_test_multi_$RANDOM\",{\"authors\":[\"admin'--\"],\"kinds\":[1],\"search\":\"anything\"}]"
test_sql_injection "Multi-filter with search injection" "[\"REQ\",\"sql_test_multi_$RANDOM\",{\"authors\":[\"valid\"],\"search\":\"'; DROP TABLE events; --\"}]"
test_sql_injection "Multi-filter with tag injection" "[\"REQ\",\"sql_test_multi_$RANDOM\",{\"#e\":[\"'; SELECT * FROM sqlite_master; --\"],\"limit\":10}]"
echo
echo "=== COUNT Message SQL Injection Tests ==="
# Test COUNT messages which might have different code paths
for payload in "${SQL_PAYLOADS[@]}"; do
test_sql_injection "COUNT with authors payload: $payload" "[\"COUNT\",\"sql_count_authors_$RANDOM\",{\"authors\":[\"$payload\"]}]"
test_sql_injection "COUNT with search payload: $payload" "[\"COUNT\",\"sql_count_search_$RANDOM\",{\"search\":\"$payload\"}]"
done
echo
echo "=== Edge Case SQL Injection Tests ==="
# Test edge cases that might bypass validation
test_sql_injection "Empty string injection" "[\"REQ\",\"sql_edge_$RANDOM\",{\"authors\":[\"\"]}]"
test_sql_injection "Null byte injection" "[\"REQ\",\"sql_edge_$RANDOM\",{\"authors\":[\"admin\\x00' OR '1'='1\"]}]"
test_sql_injection "Unicode injection" "[\"REQ\",\"sql_edge_$RANDOM\",{\"authors\":[\"admin' OR '1'='1' -- 💣\"]}]"
test_sql_injection "Very long injection payload" "[\"REQ\",\"sql_edge_$RANDOM\",{\"search\":\"$(printf 'a%.0s' {1..1000})' OR '1'='1\"}]"
echo
echo "=== Subscription ID SQL Injection Tests ==="
# Test if subscription IDs can be used for injection
test_sql_injection "Subscription ID injection" "[\"REQ\",\"'; DROP TABLE subscriptions; --\",{}]"
test_sql_injection "Subscription ID with quotes" "[\"REQ\",\"sub\"'; SELECT * FROM events; --\",{}]"
echo
echo "=== CLOSE Message SQL Injection Tests ==="
# Test CLOSE messages
test_sql_injection "CLOSE with injection" "[\"CLOSE\",\"'; DROP TABLE subscriptions; --\"]"
echo
echo "=== Test Results ==="
echo "Total tests: $TOTAL_TESTS"
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}✓ All SQL injection tests passed!${NC}"
echo "The relay appears to be protected against SQL injection attacks."
exit 0
else
echo -e "${RED}✗ SQL injection vulnerabilities detected!${NC}"
echo "The relay may be vulnerable to SQL injection attacks."
echo "Failed tests: $FAILED_TESTS"
exit 1
fi

63
tests/subscription_limits.sh Executable file
View File

@@ -0,0 +1,63 @@
#!/bin/bash
# Simple test script to verify subscription limit enforcement and rate limiting
# This script tests that subscription limits are enforced early
set -e
RELAY_URL="ws://127.0.0.1:8888"
echo "=== Subscription Limit Test ==="
echo "[INFO] Testing relay at: $RELAY_URL"
echo "[INFO] Note: This test assumes default subscription limits (max 25 per client)"
echo ""
# Test basic connectivity first
echo "=== Test 1: Basic Connectivity ==="
echo "[INFO] Testing basic WebSocket connection..."
# Send a simple REQ message
response=$(echo '["REQ","basic_test",{}]' | timeout 5 websocat -n1 "$RELAY_URL" 2>/dev/null || echo "TIMEOUT")
if echo "$response" | grep -q "EOSE\|EVENT\|NOTICE"; then
echo "[PASS] Basic connectivity works"
else
echo "[FAIL] Basic connectivity failed. Response: $response"
exit 1
fi
echo ""
# Test subscription limits
echo "=== Test 2: Subscription Limit Enforcement ==="
echo "[INFO] Testing subscription limits by creating multiple subscriptions..."
success_count=0
limit_hit=false
# Create multiple subscriptions in sequence (each in its own connection)
for i in {1..30}; do
echo "[INFO] Creating subscription $i..."
sub_id="limit_test_$i_$(date +%s%N)"
response=$(echo "[\"REQ\",\"$sub_id\",{}]" | timeout 5 websocat -n1 "$RELAY_URL" 2>/dev/null || echo "TIMEOUT")
if echo "$response" | grep -q "CLOSED.*$sub_id.*exceeded"; then
echo "[INFO] Hit subscription limit at subscription $i"
limit_hit=true
break
elif echo "$response" | grep -q "EOSE\|EVENT"; then
((success_count++))
else
echo "[WARN] Unexpected response for subscription $i: $response"
fi
sleep 0.1
done
if [ "$limit_hit" = true ]; then
echo "[PASS] Subscription limit enforcement working (limit hit after $success_count subscriptions)"
else
echo "[WARN] Subscription limit not hit after 30 attempts"
fi
echo ""
echo "=== Test Complete ==="

View File

@@ -0,0 +1,34 @@
#!/bin/bash
# Test script to validate subscription ID handling fixes
# This tests the memory corruption fixes in subscription handling
echo "Testing subscription ID validation fixes..."
# Test malformed subscription IDs
echo "Testing malformed subscription IDs..."
# Test 1: Empty subscription ID
echo '["REQ","",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "Empty ID test: Connection failed (expected)"
# Test 2: Very long subscription ID (over 64 chars)
echo '["REQ","verylongsubscriptionidthatshouldexceedthemaximumlengthlimitof64characters",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "Long ID test: Connection failed (expected)"
# Test 3: Subscription ID with invalid characters
echo '["REQ","sub@123",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "Invalid chars test: Connection failed (expected)"
# Test 4: NULL subscription ID (this should be caught by JSON parsing)
echo '["REQ",null,{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "NULL ID test: Connection failed (expected)"
# Test 5: Valid subscription ID (should work)
echo '["REQ","valid_sub_123",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null && echo "Valid ID test: Success" || echo "Valid ID test: Failed"
echo "Testing CLOSE message validation..."
# Test 6: CLOSE with malformed subscription ID
echo '["CLOSE",""]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "CLOSE empty ID test: Connection failed (expected)"
# Test 7: CLOSE with valid subscription ID
echo '["CLOSE","valid_sub_123"]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null && echo "CLOSE valid ID test: Success" || echo "CLOSE valid ID test: Failed"
echo "Subscription validation tests completed."