Compare commits
27 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6c38aaebf3 | ||
|
|
18b0ac44bf | ||
|
|
b6749eff2f | ||
|
|
c73a103280 | ||
|
|
a5d194f730 | ||
|
|
6320436b88 | ||
|
|
87325927ed | ||
|
|
4435cdf5b6 | ||
|
|
b041654611 | ||
|
|
e833dcefd4 | ||
|
|
29680f0ee8 | ||
|
|
670329700c | ||
|
|
62e17af311 | ||
|
|
e3938a2c85 | ||
|
|
49ffc3d99e | ||
|
|
34bb1c34a2 | ||
|
|
b27a56a296 | ||
|
|
ecd7095123 | ||
|
|
d449513861 | ||
|
|
6709e229b3 | ||
|
|
00a8f16262 | ||
|
|
00d16f8615 | ||
|
|
c90676d2b2 | ||
|
|
b89c011ad5 | ||
|
|
c3de31aa88 | ||
|
|
b6df0be865 | ||
|
|
a89f84f76e |
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,3 +1,6 @@
|
||||
[submodule "nostr_core_lib"]
|
||||
path = nostr_core_lib
|
||||
url = https://git.laantungir.net/laantungir/nostr_core_lib.git
|
||||
[submodule "c_utils_lib"]
|
||||
path = c_utils_lib
|
||||
url = ssh://git@git.laantungir.net:2222/laantungir/c_utils_lib.git
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
description: "Brief description of what this command does"
|
||||
---
|
||||
|
||||
Run build_and_push.sh, and supply a good git commit message. For example:
|
||||
Run increment_and_push.sh, and supply a good git commit message. For example:
|
||||
|
||||
./build_and_push.sh "Fixed the bug with nip05 implementation"
|
||||
./increment_and_push.sh "Fixed the bug with nip05 implementation"
|
||||
Binary file not shown.
Binary file not shown.
139
Dockerfile.alpine-musl
Normal file
139
Dockerfile.alpine-musl
Normal file
@@ -0,0 +1,139 @@
|
||||
# Alpine-based MUSL static binary builder for C-Relay
|
||||
# Produces truly portable binaries with zero runtime dependencies
|
||||
|
||||
ARG DEBUG_BUILD=false
|
||||
|
||||
FROM alpine:3.19 AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
musl-dev \
|
||||
git \
|
||||
cmake \
|
||||
pkgconfig \
|
||||
autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
zlib-dev \
|
||||
zlib-static \
|
||||
curl-dev \
|
||||
curl-static \
|
||||
sqlite-dev \
|
||||
sqlite-static \
|
||||
linux-headers \
|
||||
wget \
|
||||
bash
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Build libsecp256k1 static (cached layer - only rebuilds if Alpine version changes)
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||
cd secp256k1 && \
|
||||
./autogen.sh && \
|
||||
./configure --enable-static --disable-shared --prefix=/usr \
|
||||
CFLAGS="-fPIC" && \
|
||||
make -j$(nproc) && \
|
||||
make install && \
|
||||
rm -rf /tmp/secp256k1
|
||||
|
||||
# Build libwebsockets static with minimal features (cached layer)
|
||||
RUN cd /tmp && \
|
||||
git clone --depth 1 --branch v4.3.3 https://github.com/warmcat/libwebsockets.git && \
|
||||
cd libwebsockets && \
|
||||
mkdir build && cd build && \
|
||||
cmake .. \
|
||||
-DLWS_WITH_STATIC=ON \
|
||||
-DLWS_WITH_SHARED=OFF \
|
||||
-DLWS_WITH_SSL=ON \
|
||||
-DLWS_WITHOUT_TESTAPPS=ON \
|
||||
-DLWS_WITHOUT_TEST_SERVER=ON \
|
||||
-DLWS_WITHOUT_TEST_CLIENT=ON \
|
||||
-DLWS_WITHOUT_TEST_PING=ON \
|
||||
-DLWS_WITH_HTTP2=OFF \
|
||||
-DLWS_WITH_LIBUV=OFF \
|
||||
-DLWS_WITH_LIBEVENT=OFF \
|
||||
-DLWS_IPV6=ON \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr \
|
||||
-DCMAKE_C_FLAGS="-fPIC" && \
|
||||
make -j$(nproc) && \
|
||||
make install && \
|
||||
rm -rf /tmp/libwebsockets
|
||||
|
||||
# Copy only submodule configuration and git directory
|
||||
COPY .gitmodules /build/.gitmodules
|
||||
COPY .git /build/.git
|
||||
|
||||
# Clean up any stale submodule references (nips directory is not a submodule)
|
||||
RUN git rm --cached nips 2>/dev/null || true
|
||||
|
||||
# Initialize submodules (cached unless .gitmodules changes)
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
# Copy nostr_core_lib source files (cached unless nostr_core_lib changes)
|
||||
COPY nostr_core_lib /build/nostr_core_lib/
|
||||
|
||||
# Copy c_utils_lib source files (cached unless c_utils_lib changes)
|
||||
COPY c_utils_lib /build/c_utils_lib/
|
||||
|
||||
# Build c_utils_lib with MUSL-compatible flags (cached unless c_utils_lib changes)
|
||||
RUN cd c_utils_lib && \
|
||||
sed -i 's/CFLAGS = -Wall -Wextra -std=c99 -O2 -g/CFLAGS = -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -O2 -g/' Makefile && \
|
||||
make clean && \
|
||||
make
|
||||
|
||||
# Build nostr_core_lib with required NIPs (cached unless nostr_core_lib changes)
|
||||
# Disable fortification in build.sh to prevent __*_chk symbol issues
|
||||
# NIPs: 001(Basic), 006(Keys), 013(PoW), 017(DMs), 019(Bech32), 044(Encryption), 059(Gift Wrap - required by NIP-17)
|
||||
RUN cd nostr_core_lib && \
|
||||
chmod +x build.sh && \
|
||||
sed -i 's/CFLAGS="-Wall -Wextra -std=c99 -fPIC -O2"/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"/' build.sh && \
|
||||
rm -f *.o *.a 2>/dev/null || true && \
|
||||
./build.sh --nips=1,6,13,17,19,44,59
|
||||
|
||||
# Copy c-relay source files LAST (only this layer rebuilds on source changes)
|
||||
COPY src/ /build/src/
|
||||
COPY Makefile /build/Makefile
|
||||
|
||||
# Build c-relay with full static linking (only rebuilds when src/ changes)
|
||||
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
|
||||
# Use conditional compilation flags based on DEBUG_BUILD argument
|
||||
RUN if [ "$DEBUG_BUILD" = "true" ]; then \
|
||||
CFLAGS="-g -O0 -DDEBUG"; \
|
||||
STRIP_CMD=""; \
|
||||
echo "Building with DEBUG symbols enabled"; \
|
||||
else \
|
||||
CFLAGS="-O2"; \
|
||||
STRIP_CMD="strip /build/c_relay_static"; \
|
||||
echo "Building optimized production binary"; \
|
||||
fi && \
|
||||
gcc -static $CFLAGS -Wall -Wextra -std=c99 \
|
||||
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
|
||||
-I. -Ic_utils_lib/src -Inostr_core_lib -Inostr_core_lib/nostr_core \
|
||||
-Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
|
||||
src/main.c src/config.c src/dm_admin.c src/request_validator.c \
|
||||
src/nip009.c src/nip011.c src/nip013.c src/nip040.c src/nip042.c \
|
||||
src/websockets.c src/subscriptions.c src/api.c src/embedded_web_content.c \
|
||||
-o /build/c_relay_static \
|
||||
c_utils_lib/libc_utils.a \
|
||||
nostr_core_lib/libnostr_core_x64.a \
|
||||
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
|
||||
-lcurl -lz -lpthread -lm -ldl && \
|
||||
eval "$STRIP_CMD"
|
||||
|
||||
# Verify it's truly static
|
||||
RUN echo "=== Binary Information ===" && \
|
||||
file /build/c_relay_static && \
|
||||
ls -lh /build/c_relay_static && \
|
||||
echo "=== Checking for dynamic dependencies ===" && \
|
||||
(ldd /build/c_relay_static 2>&1 || echo "Binary is static") && \
|
||||
echo "=== Build complete ==="
|
||||
|
||||
# Output stage - just the binary
|
||||
FROM scratch AS output
|
||||
COPY --from=builder /build/c_relay_static /c_relay_static
|
||||
48
Makefile
48
Makefile
@@ -2,8 +2,8 @@
|
||||
|
||||
CC = gcc
|
||||
CFLAGS = -Wall -Wextra -std=c99 -g -O2
|
||||
INCLUDES = -I. -Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket
|
||||
LIBS = -lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -L/usr/local/lib -lcurl
|
||||
INCLUDES = -I. -Ic_utils_lib/src -Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket
|
||||
LIBS = -lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -L/usr/local/lib -lcurl -Lc_utils_lib -lc_utils
|
||||
|
||||
# Build directory
|
||||
BUILD_DIR = build
|
||||
@@ -11,6 +11,7 @@ BUILD_DIR = build
|
||||
# Source files
|
||||
MAIN_SRC = src/main.c src/config.c src/dm_admin.c src/request_validator.c src/nip009.c src/nip011.c src/nip013.c src/nip040.c src/nip042.c src/websockets.c src/subscriptions.c src/api.c src/embedded_web_content.c
|
||||
NOSTR_CORE_LIB = nostr_core_lib/libnostr_core_x64.a
|
||||
C_UTILS_LIB = c_utils_lib/libc_utils.a
|
||||
|
||||
# Architecture detection
|
||||
ARCH = $(shell uname -m)
|
||||
@@ -32,9 +33,16 @@ $(BUILD_DIR):
|
||||
mkdir -p $(BUILD_DIR)
|
||||
|
||||
# Check if nostr_core_lib is built
|
||||
# Explicitly specify NIPs to ensure NIP-44 (encryption) is included
|
||||
# NIPs: 1 (basic), 6 (keys), 13 (PoW), 17 (DMs), 19 (bech32), 44 (encryption), 59 (gift wrap)
|
||||
$(NOSTR_CORE_LIB):
|
||||
@echo "Building nostr_core_lib..."
|
||||
cd nostr_core_lib && ./build.sh
|
||||
@echo "Building nostr_core_lib with required NIPs (including NIP-44 for encryption)..."
|
||||
cd nostr_core_lib && ./build.sh --nips=1,6,13,17,19,44,59
|
||||
|
||||
# Check if c_utils_lib is built
|
||||
$(C_UTILS_LIB):
|
||||
@echo "Building c_utils_lib..."
|
||||
cd c_utils_lib && ./build.sh lib
|
||||
|
||||
# Update main.h version information (requires main.h to exist)
|
||||
src/main.h:
|
||||
@@ -73,18 +81,18 @@ force-version:
|
||||
@$(MAKE) src/main.h
|
||||
|
||||
# Build the relay
|
||||
$(TARGET): $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
$(TARGET): $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB) $(C_UTILS_LIB)
|
||||
@echo "Compiling C-Relay for architecture: $(ARCH)"
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(TARGET) $(NOSTR_CORE_LIB) $(LIBS)
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(TARGET) $(NOSTR_CORE_LIB) $(C_UTILS_LIB) $(LIBS)
|
||||
@echo "Build complete: $(TARGET)"
|
||||
|
||||
# Build for specific architectures
|
||||
x86: $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
x86: $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB) $(C_UTILS_LIB)
|
||||
@echo "Building C-Relay for x86_64..."
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_x86 $(NOSTR_CORE_LIB) $(LIBS)
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_x86 $(NOSTR_CORE_LIB) $(C_UTILS_LIB) $(LIBS)
|
||||
@echo "Build complete: $(BUILD_DIR)/c_relay_x86"
|
||||
|
||||
arm64: $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
arm64: $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB) $(C_UTILS_LIB)
|
||||
@echo "Cross-compiling C-Relay for ARM64..."
|
||||
@if ! command -v aarch64-linux-gnu-gcc >/dev/null 2>&1; then \
|
||||
echo "ERROR: ARM64 cross-compiler not found."; \
|
||||
@@ -108,7 +116,7 @@ arm64: $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
fi
|
||||
@echo "Using aarch64-linux-gnu-gcc with ARM64 libraries..."
|
||||
PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig:/usr/share/pkgconfig \
|
||||
aarch64-linux-gnu-gcc $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_arm64 $(NOSTR_CORE_LIB) \
|
||||
aarch64-linux-gnu-gcc $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_arm64 $(NOSTR_CORE_LIB) $(C_UTILS_LIB) \
|
||||
-L/usr/lib/aarch64-linux-gnu $(LIBS)
|
||||
@echo "Build complete: $(BUILD_DIR)/c_relay_arm64"
|
||||
|
||||
@@ -159,9 +167,10 @@ clean:
|
||||
rm -rf $(BUILD_DIR)
|
||||
@echo "Clean complete"
|
||||
|
||||
# Clean everything including nostr_core_lib
|
||||
# Clean everything including nostr_core_lib and c_utils_lib
|
||||
clean-all: clean
|
||||
cd nostr_core_lib && make clean 2>/dev/null || true
|
||||
cd c_utils_lib && make clean 2>/dev/null || true
|
||||
|
||||
# Install dependencies (Ubuntu/Debian)
|
||||
install-deps:
|
||||
@@ -197,4 +206,21 @@ help:
|
||||
@echo " make init-db # Set up database"
|
||||
@echo " make force-version # Force regenerate main.h from git"
|
||||
|
||||
# Build fully static MUSL binaries using Docker
|
||||
static-musl-x86_64:
|
||||
@echo "Building fully static MUSL binary for x86_64..."
|
||||
docker buildx build --platform linux/amd64 -f examples/deployment/static-builder.Dockerfile -t c-relay-static-builder-x86_64 --load .
|
||||
docker run --rm -v $(PWD)/build:/output c-relay-static-builder-x86_64 sh -c "cp /c_relay_static_musl_x86_64 /output/"
|
||||
@echo "Static binary created: build/c_relay_static_musl_x86_64"
|
||||
|
||||
static-musl-arm64:
|
||||
@echo "Building fully static MUSL binary for ARM64..."
|
||||
docker buildx build --platform linux/arm64 -f examples/deployment/static-builder.Dockerfile -t c-relay-static-builder-arm64 --load .
|
||||
docker run --rm -v $(PWD)/build:/output c-relay-static-builder-arm64 sh -c "cp /c_relay_static_musl_x86_64 /output/c_relay_static_musl_arm64"
|
||||
@echo "Static binary created: build/c_relay_static_musl_arm64"
|
||||
|
||||
static-musl: static-musl-x86_64 static-musl-arm64
|
||||
@echo "Built static MUSL binaries for both architectures"
|
||||
|
||||
.PHONY: static-musl-x86_64 static-musl-arm64 static-musl
|
||||
.PHONY: all x86 arm64 test init-db clean clean-all install-deps install-cross-tools install-arm64-deps check-toolchain help force-version
|
||||
68
README.md
68
README.md
@@ -1,6 +1,6 @@
|
||||
# C-Nostr Relay
|
||||
|
||||
A high-performance Nostr relay implemented in C with SQLite backend, featuring a revolutionary **zero-configuration** approach using event-based configuration management.
|
||||
A high-performance Nostr relay implemented in C with SQLite backend, featuring nostr event-based management.
|
||||
|
||||
## Supported NIPs
|
||||
|
||||
@@ -22,6 +22,69 @@ Do NOT modify the formatting, add emojis, or change the text. Keep the simple fo
|
||||
- [x] NIP-50: Keywords filter
|
||||
- [x] NIP-70: Protected Events
|
||||
|
||||
## Quick Start
|
||||
|
||||
Get your C-Relay up and running in minutes with a static binary (no dependencies required):
|
||||
|
||||
### 1. Download Static Binary
|
||||
|
||||
Download the latest static release from the [releases page](https://git.laantungir.net/laantungir/c-relay/releases):
|
||||
|
||||
```bash
|
||||
# Static binary - works on all Linux distributions (no dependencies)
|
||||
wget https://git.laantungir.net/laantungir/c-relay/releases/download/v0.6.0/c-relay-v0.6.0-linux-x86_64-static
|
||||
chmod +x c-relay-v0.6.0-linux-x86_64-static
|
||||
mv c-relay-v0.6.0-linux-x86_64-static c-relay
|
||||
```
|
||||
|
||||
### 2. Start the Relay
|
||||
|
||||
Simply run the binary - no configuration files needed:
|
||||
|
||||
```bash
|
||||
./c-relay
|
||||
```
|
||||
|
||||
On first startup, you'll see:
|
||||
- **Admin Private Key**: Save this securely! You'll need it for administration
|
||||
- **Relay Public Key**: Your relay's identity on the Nostr network
|
||||
- **Port Information**: Default is 8888, or the next available port
|
||||
|
||||
### 3. Access the Web Interface
|
||||
|
||||
Open your browser and navigate to:
|
||||
```
|
||||
http://localhost:8888/api/
|
||||
```
|
||||
|
||||
The web interface provides:
|
||||
- Real-time configuration management
|
||||
- Database statistics dashboard
|
||||
- Auth rules management
|
||||
- Secure admin authentication with your Nostr identity
|
||||
|
||||
### 4. Test Your Relay
|
||||
|
||||
Test basic connectivity:
|
||||
```bash
|
||||
# Test WebSocket connection
|
||||
curl -H "Accept: application/nostr+json" http://localhost:8888
|
||||
|
||||
# Test with a Nostr client
|
||||
# Add ws://localhost:8888 to your client's relay list
|
||||
```
|
||||
|
||||
### 5. Configure Your Relay (Optional)
|
||||
|
||||
Use the web interface or send admin commands to customize:
|
||||
- Relay name and description
|
||||
- Authentication rules (whitelist/blacklist)
|
||||
- Connection limits
|
||||
- Proof-of-work requirements
|
||||
|
||||
**That's it!** Your relay is now running with zero configuration required. The event-based configuration system means you can adjust all settings through the web interface or admin API without editing config files.
|
||||
|
||||
|
||||
## Web Admin Interface
|
||||
|
||||
C-Relay includes a **built-in web-based administration interface** accessible at `http://localhost:8888/api/`. The interface provides:
|
||||
@@ -34,6 +97,7 @@ C-Relay includes a **built-in web-based administration interface** accessible at
|
||||
|
||||
The web interface serves embedded static files with no external dependencies and includes proper CORS headers for browser compatibility.
|
||||
|
||||
|
||||
## Administrator API
|
||||
|
||||
C-Relay uses an innovative **event-based administration system** where all configuration and management commands are sent as signed Nostr events using the admin private key generated during first startup. All admin commands use **NIP-44 encrypted command arrays** for security and compatibility.
|
||||
@@ -269,7 +333,7 @@ All admin commands return **signed EVENT responses** via WebSocket following sta
|
||||
|
||||
In addition to the above admin API, c-relay allows the administrator to direct message the relay to get information or control some settings. As long as the administrator is signed in with any nostr client that allows sending nip-17 direct messages (DMs), they can control the relay.
|
||||
|
||||
The is possible because the relay is a full nostr citizen with it's own private and public key.
|
||||
The is possible because the relay is a full nostr citizen with it's own private and public key, and it knows the administrator's public key.
|
||||
|
||||
|
||||
|
||||
|
||||
58
api/embedded.html
Normal file
58
api/embedded.html
Normal file
@@ -0,0 +1,58 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Embedded NOSTR_LOGIN_LITE</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
margin: 0;
|
||||
padding: 40px;
|
||||
background: white;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 400px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
#login-container {
|
||||
/* No styling - let embedded modal blend seamlessly */
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div id="login-container"></div>
|
||||
</div>
|
||||
|
||||
<script src="../lite/nostr.bundle.js"></script>
|
||||
<script src="../lite/nostr-lite.js"></script>
|
||||
|
||||
<script>
|
||||
document.addEventListener('DOMContentLoaded', async () => {
|
||||
await window.NOSTR_LOGIN_LITE.init({
|
||||
theme:'default',
|
||||
methods: {
|
||||
extension: true,
|
||||
local: true,
|
||||
seedphrase: true,
|
||||
readonly: true,
|
||||
connect: true,
|
||||
remote: true,
|
||||
otp: true
|
||||
}
|
||||
});
|
||||
|
||||
window.NOSTR_LOGIN_LITE.embed('#login-container', {
|
||||
seamless: true
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
386
api/index.css
386
api/index.css
@@ -6,7 +6,7 @@
|
||||
--muted-color: #dddddd;
|
||||
--border-color: var(--muted-color);
|
||||
--font-family: "Courier New", Courier, monospace;
|
||||
--border-radius: 15px;
|
||||
--border-radius: 5px;
|
||||
--border-width: 1px;
|
||||
|
||||
/* Floating Tab Variables (8) */
|
||||
@@ -22,6 +22,23 @@
|
||||
--tab-border-opacity-logged-in: 0.1;
|
||||
}
|
||||
|
||||
/* Dark Mode Overrides */
|
||||
body.dark-mode {
|
||||
--primary-color: #ffffff;
|
||||
--secondary-color: #000000;
|
||||
--accent-color: #ff0000;
|
||||
--muted-color: #222222;
|
||||
--border-color: var(--muted-color);
|
||||
|
||||
|
||||
--tab-bg-logged-out: #000000;
|
||||
--tab-color-logged-out: #ffffff;
|
||||
--tab-border-logged-out: #ffffff;
|
||||
--tab-bg-logged-in: #000000;
|
||||
--tab-color-logged-in: #ffffff;
|
||||
--tab-border-logged-in: #00ffff;
|
||||
}
|
||||
|
||||
* {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
@@ -33,11 +50,229 @@ body {
|
||||
background-color: var(--secondary-color);
|
||||
color: var(--primary-color);
|
||||
/* line-height: 1.4; */
|
||||
padding: 20px;
|
||||
padding: 0;
|
||||
max-width: none;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
/* Header Styles */
|
||||
.main-header {
|
||||
background-color: var(--secondary-color);
|
||||
|
||||
padding: 15px 20px;
|
||||
z-index: 100;
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.header-content {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.header-title {
|
||||
margin: 0;
|
||||
font-size: 24px;
|
||||
font-weight: normal;
|
||||
color: var(--primary-color);
|
||||
border: none;
|
||||
padding: 0;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.relay-info {
|
||||
text-align: center;
|
||||
flex: 1;
|
||||
}
|
||||
|
||||
.relay-name {
|
||||
font-size: 14px;
|
||||
font-weight: bold;
|
||||
color: var(--primary-color);
|
||||
margin-bottom: 2px;
|
||||
}
|
||||
|
||||
.relay-pubkey-container {
|
||||
border: 1px solid transparent;
|
||||
border-radius: var(--border-radius);
|
||||
padding: 4px;
|
||||
margin-top: 4px;
|
||||
cursor: pointer;
|
||||
transition: border-color 0.2s ease;
|
||||
background-color: var(--secondary-color);
|
||||
}
|
||||
|
||||
.relay-pubkey-container:hover {
|
||||
border-color: var(--border-color);
|
||||
}
|
||||
|
||||
.relay-pubkey-container.copied {
|
||||
border-color: var(--accent-color);
|
||||
animation: flash-accent 0.5s ease-in-out;
|
||||
}
|
||||
|
||||
.relay-pubkey {
|
||||
font-size: 8px;
|
||||
color: var(--primary-color);
|
||||
font-family: "Courier New", Courier, monospace;
|
||||
line-height: 1.2;
|
||||
white-space: pre-line;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
@keyframes flash-accent {
|
||||
0% { border-color: var(--accent-color); }
|
||||
50% { border-color: var(--accent-color); }
|
||||
100% { border-color: transparent; }
|
||||
}
|
||||
|
||||
.relay-description {
|
||||
font-size: 10px;
|
||||
color: var(--primary-color);
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
.header-title {
|
||||
margin: 0;
|
||||
font-size: 24px;
|
||||
font-weight: bolder;
|
||||
color: var(--primary-color);
|
||||
border: none;
|
||||
padding: 0;
|
||||
text-align: left;
|
||||
display: flex;
|
||||
gap: 2px;
|
||||
}
|
||||
|
||||
.relay-letter {
|
||||
position: relative;
|
||||
display: inline-block;
|
||||
transition: all 0.05s ease;
|
||||
}
|
||||
|
||||
.relay-letter.underlined::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
bottom: -2px;
|
||||
left: 0;
|
||||
right: 0;
|
||||
height: 2px;
|
||||
background-color: var(--accent-color);
|
||||
}
|
||||
|
||||
.header-user-name {
|
||||
display: block;
|
||||
font-weight: 500;
|
||||
color: var(--primary-color);
|
||||
font-size: 10px;
|
||||
text-align: center;
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
.profile-area {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
position: relative;
|
||||
cursor: pointer;
|
||||
padding: 8px 12px;
|
||||
border-radius: var(--border-radius);
|
||||
transition: background-color 0.2s ease;
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.admin-label {
|
||||
font-size: 10px;
|
||||
color: var(--primary-color);
|
||||
font-weight: normal;
|
||||
margin-bottom: 4px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.profile-container {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
.profile-area:hover {
|
||||
background-color: rgba(0, 0, 0, 0.05);
|
||||
}
|
||||
|
||||
.profile-info {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 10px;
|
||||
}
|
||||
|
||||
.header-user-image {
|
||||
width: 48px; /* 50% larger than 32px */
|
||||
height: 48px; /* 50% larger than 32px */
|
||||
border-radius: var(--border-radius); /* Curved corners like other elements */
|
||||
object-fit: cover;
|
||||
border: 2px solid transparent; /* Invisible border */
|
||||
background-color: var(--secondary-color);
|
||||
}
|
||||
|
||||
|
||||
.logout-dropdown {
|
||||
position: absolute;
|
||||
top: 100%;
|
||||
right: 0;
|
||||
background-color: var(--secondary-color);
|
||||
border: var(--border-width) solid var(--border-color);
|
||||
border-radius: var(--border-radius);
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
|
||||
min-width: 120px;
|
||||
z-index: 200;
|
||||
margin-top: 4px;
|
||||
}
|
||||
|
||||
.logout-btn {
|
||||
width: 100%;
|
||||
padding: 5px 10px;
|
||||
background: none;
|
||||
border: none;
|
||||
color: var(--primary-color);
|
||||
text-align: left;
|
||||
cursor: pointer;
|
||||
font-size: 10px;
|
||||
font-family: var(--font-family);
|
||||
border-radius: var(--border-radius);
|
||||
transition: background-color 0.2s ease;
|
||||
}
|
||||
|
||||
.logout-btn:hover {
|
||||
background-color: rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
|
||||
/* Login Modal Styles */
|
||||
.login-modal-overlay {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background-color: rgba(0, 0, 0, 0.8);
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
.login-modal-content {
|
||||
background-color: var(--secondary-color);
|
||||
border: var(--border-width) solid var(--border-color);
|
||||
border-radius: var(--border-radius);
|
||||
padding: 30px;
|
||||
max-width: 400px;
|
||||
width: 90%;
|
||||
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3);
|
||||
}
|
||||
|
||||
h1 {
|
||||
border-bottom: var(--border-width) solid var(--border-color);
|
||||
padding-bottom: 10px;
|
||||
@@ -124,10 +359,44 @@ button:active {
|
||||
}
|
||||
|
||||
button:disabled {
|
||||
background-color: #ccc;
|
||||
color: var(--muted-color);
|
||||
background-color: var(--muted-color);
|
||||
color: var(--primary-color);
|
||||
cursor: not-allowed;
|
||||
border-color: #ccc;
|
||||
border-color: var(--muted-color);
|
||||
}
|
||||
|
||||
/* Flash animation for refresh button */
|
||||
@keyframes flash-red {
|
||||
0% { border-color: var(--border-color); }
|
||||
50% { border-color: var(--accent-color); }
|
||||
100% { border-color: var(--border-color); }
|
||||
}
|
||||
|
||||
.flash-red {
|
||||
animation: flash-red 1s ease-in-out;
|
||||
}
|
||||
|
||||
/* Flash animation for updated statistics values */
|
||||
@keyframes flash-value {
|
||||
0% { color: var(--primary-color); }
|
||||
50% { color: var(--accent-color); }
|
||||
100% { color: var(--primary-color); }
|
||||
}
|
||||
|
||||
.flash-value {
|
||||
animation: flash-value 1s ease-in-out;
|
||||
}
|
||||
|
||||
/* Npub links styling */
|
||||
.npub-link {
|
||||
color: var(--primary-color);
|
||||
text-decoration: none;
|
||||
font-weight: normal;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
|
||||
.npub-link:hover {
|
||||
color: var(--accent-color);
|
||||
}
|
||||
|
||||
.status {
|
||||
@@ -161,6 +430,7 @@ button:disabled {
|
||||
border-color: var(--accent-color);
|
||||
}
|
||||
|
||||
|
||||
.config-table {
|
||||
border: 1px solid var(--border-color);
|
||||
border-radius: var(--border-radius);
|
||||
@@ -180,6 +450,10 @@ button:disabled {
|
||||
font-size: 10px;
|
||||
}
|
||||
|
||||
.config-table tbody tr:hover {
|
||||
background-color: rgba(0, 0, 0, 0.05);
|
||||
}
|
||||
|
||||
.config-table-container {
|
||||
overflow-x: auto;
|
||||
max-width: 100%;
|
||||
@@ -187,12 +461,13 @@ button:disabled {
|
||||
|
||||
.config-table th {
|
||||
font-weight: bold;
|
||||
height: 40px; /* Double the default height */
|
||||
line-height: 40px; /* Center text vertically */
|
||||
height: 24px; /* Base height for tbody rows */
|
||||
line-height: 24px; /* Center text vertically */
|
||||
}
|
||||
|
||||
.config-table tr:hover {
|
||||
background-color: var(--muted-color);
|
||||
.config-table td {
|
||||
height: 16px; /* 50% taller than tbody rows would be */
|
||||
line-height: 16px; /* Center text vertically */
|
||||
}
|
||||
|
||||
/* Inline config value inputs - remove borders and padding to fit seamlessly in table cells */
|
||||
@@ -218,9 +493,13 @@ button:disabled {
|
||||
.config-actions-cell {
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
text-align: center;
|
||||
text-align: center !important;
|
||||
font-weight: bold;
|
||||
vertical-align: middle;
|
||||
width: 60px;
|
||||
min-width: 60px;
|
||||
max-width: 60px;
|
||||
padding: 8px 4px;
|
||||
}
|
||||
|
||||
.config-actions-cell:hover {
|
||||
@@ -282,12 +561,21 @@ button:disabled {
|
||||
|
||||
.user-info-container {
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
gap: 20px;
|
||||
flex-direction: column;
|
||||
gap: 15px;
|
||||
}
|
||||
|
||||
.user-details {
|
||||
flex: 1;
|
||||
order: -1; /* Show user details first when logged in */
|
||||
}
|
||||
|
||||
.login-section {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.logout-section {
|
||||
display: flex;
|
||||
justify-content: flex-end;
|
||||
}
|
||||
|
||||
.login-logout-btn {
|
||||
@@ -334,6 +622,31 @@ button:disabled {
|
||||
margin: 5px 0;
|
||||
}
|
||||
|
||||
/* User profile header with image */
|
||||
.user-profile-header {
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
gap: 15px;
|
||||
}
|
||||
|
||||
.user-image-container {
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.user-profile-image {
|
||||
width: 60px;
|
||||
height: 60px;
|
||||
border-radius: var(--border-radius);
|
||||
object-fit: cover;
|
||||
border: 2px solid var(--border-color);
|
||||
background-color: var(--bg-color);
|
||||
}
|
||||
|
||||
.user-text-info {
|
||||
flex: 1;
|
||||
min-width: 0; /* Allow text to wrap */
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
@@ -347,6 +660,40 @@ button:disabled {
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
|
||||
.countdown-btn {
|
||||
width: auto;
|
||||
min-width: 40px;
|
||||
padding: 8px 12px;
|
||||
background: var(--secondary-color);
|
||||
color: var(--primary-color);
|
||||
border: var(--border-width) solid var(--border-color);
|
||||
border-radius: var(--border-radius);
|
||||
font-family: var(--font-family);
|
||||
font-size: 10px;
|
||||
/* font-weight: bold; */
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
margin-left: auto;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.countdown-btn:hover::after {
|
||||
content: "countdown";
|
||||
position: absolute;
|
||||
top: -30px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
background: var(--primary-color);
|
||||
color: var(--secondary-color);
|
||||
padding: 4px 8px;
|
||||
border-radius: 4px;
|
||||
font-size: 12px;
|
||||
font-weight: normal;
|
||||
white-space: nowrap;
|
||||
z-index: 1000;
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.auth-rules-controls {
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
@@ -458,19 +805,6 @@ button:disabled {
|
||||
transition: all 0.2s ease;
|
||||
}
|
||||
|
||||
/* Main Sections Wrapper */
|
||||
.main-sections-wrapper {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: var(--border-width);
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.flex-section {
|
||||
flex: 1;
|
||||
min-width: 300px;
|
||||
}
|
||||
|
||||
@media (max-width: 700px) {
|
||||
body {
|
||||
padding: 10px;
|
||||
|
||||
336
api/index.html
336
api/index.html
@@ -4,90 +4,165 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>C-Relay Admin API</title>
|
||||
<title>C-Relay Admin</title>
|
||||
<link rel="stylesheet" href="/api/index.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>C-RELAY ADMIN API</h1>
|
||||
<!-- Header with title and profile display -->
|
||||
<div class="section">
|
||||
|
||||
<!-- Main Sections Wrapper -->
|
||||
<div class="main-sections-wrapper">
|
||||
|
||||
<!-- Persistent Authentication Header - Always Visible -->
|
||||
<div id="persistent-auth-container" class="section flex-section">
|
||||
<div class="user-info-container">
|
||||
<button type="button" id="login-logout-btn" class="login-logout-btn">LOGIN</button>
|
||||
<div class="user-details" id="persistent-user-details" style="display: none;">
|
||||
<div><strong>Name:</strong> <span id="persistent-user-name">Loading...</span></div>
|
||||
<div><strong>Public Key:</strong>
|
||||
<div class="user-pubkey" id="persistent-user-pubkey">Loading...</div>
|
||||
<div class="header-content">
|
||||
<div class="header-title">
|
||||
<span class="relay-letter" data-letter="R">R</span>
|
||||
<span class="relay-letter" data-letter="E">E</span>
|
||||
<span class="relay-letter" data-letter="L">L</span>
|
||||
<span class="relay-letter" data-letter="A">A</span>
|
||||
<span class="relay-letter" data-letter="Y">Y</span>
|
||||
</div>
|
||||
<div class="relay-info">
|
||||
<div id="relay-name" class="relay-name">C-Relay</div>
|
||||
<div id="relay-description" class="relay-description">Loading...</div>
|
||||
<div id="relay-pubkey-container" class="relay-pubkey-container">
|
||||
<div id="relay-pubkey" class="relay-pubkey">Loading...</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="profile-area" id="profile-area" style="display: none;">
|
||||
<div class="admin-label">admin</div>
|
||||
<div class="profile-container">
|
||||
<img id="header-user-image" class="header-user-image" alt="Profile" style="display: none;">
|
||||
<span id="header-user-name" class="header-user-name">Loading...</span>
|
||||
</div>
|
||||
<!-- Logout dropdown -->
|
||||
<div class="logout-dropdown" id="logout-dropdown" style="display: none;">
|
||||
<button type="button" id="dark-mode-btn" class="logout-btn">🌙 DARK MODE</button>
|
||||
<button type="button" id="logout-btn" class="logout-btn">LOGOUT</button>
|
||||
</div>
|
||||
<div><strong>About:</strong> <span id="persistent-user-about">Loading...</span></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Login Modal Overlay -->
|
||||
<div id="login-modal" class="login-modal-overlay" style="display: none;">
|
||||
<div class="login-modal-content">
|
||||
<div id="login-modal-container"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Login Section -->
|
||||
<div id="login-section" class="flex-section">
|
||||
<div class="section">
|
||||
<h2>NOSTR AUTHENTICATION</h2>
|
||||
<p id="login-instructions">Please login with your Nostr identity to access the admin interface.</p>
|
||||
<!-- nostr-lite login UI will be injected here -->
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Relay Connection Section -->
|
||||
<div id="relay-connection-section" class="flex-section">
|
||||
<div class="section">
|
||||
<h2>RELAY CONNECTION</h2>
|
||||
|
||||
<div class="input-group">
|
||||
<label for="relay-connection-url">Relay URL:</label>
|
||||
<input type="text" id="relay-connection-url" value="ws://localhost:8888"
|
||||
placeholder="ws://localhost:8888 or wss://relay.example.com">
|
||||
</div>
|
||||
|
||||
<div class="input-group">
|
||||
<label for="relay-pubkey-manual">Relay Pubkey (if not available via NIP-11):</label>
|
||||
<input type="text" id="relay-pubkey-manual" placeholder="64-character hex pubkey"
|
||||
pattern="[0-9a-fA-F]{64}" title="64-character hexadecimal public key">
|
||||
|
||||
</div>
|
||||
|
||||
<div class="inline-buttons">
|
||||
<button type="button" id="connect-relay-btn">CONNECT TO RELAY</button>
|
||||
<button type="button" id="disconnect-relay-btn" disabled>DISCONNECT</button>
|
||||
<button type="button" id="restart-relay-btn" disabled>RESTART RELAY</button>
|
||||
</div>
|
||||
|
||||
<div class="status disconnected" id="relay-connection-status">NOT CONNECTED</div>
|
||||
|
||||
<!-- Relay Information Display -->
|
||||
<div id="relay-info-display" class="hidden">
|
||||
<h3>Relay Information (NIP-11)</h3>
|
||||
<table class="config-table" id="relay-info-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Property</th>
|
||||
<th>Value</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="relay-info-table-body">
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<!-- DATABASE STATISTICS Section -->
|
||||
<div class="section flex-section" id="databaseStatisticsSection" style="display: none;">
|
||||
<div class="section-header">
|
||||
<h2>DATABASE STATISTICS</h2>
|
||||
<button type="button" id="refresh-stats-btn" class="countdown-btn"></button>
|
||||
</div>
|
||||
|
||||
|
||||
<!-- Database Overview Table -->
|
||||
<div class="input-group">
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-overview-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Metric</th>
|
||||
<th>Value</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-overview-table-body">
|
||||
<tr>
|
||||
<td>Database Size</td>
|
||||
<td id="db-size">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total Events</td>
|
||||
<td id="total-events">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Oldest Event</td>
|
||||
<td id="oldest-event">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Newest Event</td>
|
||||
<td id="newest-event">-</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Event Kind Distribution Table -->
|
||||
<div class="input-group">
|
||||
<label>Event Kind Distribution:</label>
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-kinds-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Event Kind</th>
|
||||
<th>Count</th>
|
||||
<th>Percentage</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-kinds-table-body">
|
||||
<tr>
|
||||
<td colspan="3" style="text-align: center; font-style: italic;">No data loaded</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div> <!-- End Main Sections Wrapper -->
|
||||
|
||||
<!-- Time-based Statistics Table -->
|
||||
<div class="input-group">
|
||||
<label>Time-based Statistics:</label>
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-time-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Period</th>
|
||||
<th>Events</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-time-table-body">
|
||||
<tr>
|
||||
<td>Last 24 Hours</td>
|
||||
<td id="events-24h">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Last 7 Days</td>
|
||||
<td id="events-7d">-</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Last 30 Days</td>
|
||||
<td id="events-30d">-</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Top Pubkeys Table -->
|
||||
<div class="input-group">
|
||||
<label>Top Pubkeys by Event Count:</label>
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-pubkeys-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Rank</th>
|
||||
<th>Pubkey</th>
|
||||
<th>Event Count</th>
|
||||
<th>Percentage</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-pubkeys-table-body">
|
||||
<tr>
|
||||
<td colspan="4" style="text-align: center; font-style: italic;">No data loaded</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Testing Section -->
|
||||
<div id="div_config" class="section flex-section" style="display: none;">
|
||||
@@ -129,7 +204,6 @@
|
||||
<th>Rule Type</th>
|
||||
<th>Pattern Type</th>
|
||||
<th>Pattern Value</th>
|
||||
<th>Action</th>
|
||||
<th>Status</th>
|
||||
<th>Actions</th>
|
||||
</tr>
|
||||
@@ -173,132 +247,6 @@
|
||||
|
||||
|
||||
|
||||
<!-- DATABASE STATISTICS Section -->
|
||||
<div class="section">
|
||||
<div class="section-header">
|
||||
<h2>DATABASE STATISTICS</h2>
|
||||
</div>
|
||||
|
||||
|
||||
<!-- Database Overview Table -->
|
||||
<div class="input-group">
|
||||
<label>Database Overview:</label>
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-overview-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Metric</th>
|
||||
<th>Value</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-overview-table-body">
|
||||
<tr>
|
||||
<td>Database Size</td>
|
||||
<td id="db-size">-</td>
|
||||
<td>Current database file size</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Total Events</td>
|
||||
<td id="total-events">-</td>
|
||||
<td>Total number of events stored</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Oldest Event</td>
|
||||
<td id="oldest-event">-</td>
|
||||
<td>Timestamp of oldest event</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Newest Event</td>
|
||||
<td id="newest-event">-</td>
|
||||
<td>Timestamp of newest event</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Event Kind Distribution Table -->
|
||||
<div class="input-group">
|
||||
<label>Event Kind Distribution:</label>
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-kinds-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Event Kind</th>
|
||||
<th>Count</th>
|
||||
<th>Percentage</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-kinds-table-body">
|
||||
<tr>
|
||||
<td colspan="3" style="text-align: center; font-style: italic;">No data loaded</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Time-based Statistics Table -->
|
||||
<div class="input-group">
|
||||
<label>Time-based Statistics:</label>
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-time-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Period</th>
|
||||
<th>Events</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-time-table-body">
|
||||
<tr>
|
||||
<td>Last 24 Hours</td>
|
||||
<td id="events-24h">-</td>
|
||||
<td>Events in the last day</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Last 7 Days</td>
|
||||
<td id="events-7d">-</td>
|
||||
<td>Events in the last week</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Last 30 Days</td>
|
||||
<td id="events-30d">-</td>
|
||||
<td>Events in the last month</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Top Pubkeys Table -->
|
||||
<div class="input-group">
|
||||
<label>Top Pubkeys by Event Count:</label>
|
||||
<div class="config-table-container">
|
||||
<table class="config-table" id="stats-pubkeys-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Rank</th>
|
||||
<th>Pubkey</th>
|
||||
<th>Event Count</th>
|
||||
<th>Percentage</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="stats-pubkeys-table-body">
|
||||
<tr>
|
||||
<td colspan="4" style="text-align: center; font-style: italic;">No data loaded</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Refresh Button -->
|
||||
<div class="input-group">
|
||||
<button type="button" id="refresh-stats-btn">REFRESH STATISTICS</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- NIP-17 DIRECT MESSAGES Section -->
|
||||
<div class="section" id="nip17DMSection" style="display: none;">
|
||||
|
||||
6731
api/index.js
6731
api/index.js
File diff suppressed because it is too large
Load Diff
207
build_static.sh
Executable file
207
build_static.sh
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Build fully static MUSL binaries for C-Relay using Alpine Docker
|
||||
# Produces truly portable binaries with zero runtime dependencies
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
BUILD_DIR="$SCRIPT_DIR/build"
|
||||
DOCKERFILE="$SCRIPT_DIR/Dockerfile.alpine-musl"
|
||||
|
||||
# Parse command line arguments
|
||||
DEBUG_BUILD=false
|
||||
if [[ "$1" == "--debug" ]]; then
|
||||
DEBUG_BUILD=true
|
||||
echo "=========================================="
|
||||
echo "C-Relay MUSL Static Binary Builder (DEBUG MODE)"
|
||||
echo "=========================================="
|
||||
else
|
||||
echo "=========================================="
|
||||
echo "C-Relay MUSL Static Binary Builder (PRODUCTION MODE)"
|
||||
echo "=========================================="
|
||||
fi
|
||||
echo "Project directory: $SCRIPT_DIR"
|
||||
echo "Build directory: $BUILD_DIR"
|
||||
echo "Debug build: $DEBUG_BUILD"
|
||||
echo ""
|
||||
|
||||
# Create build directory
|
||||
mkdir -p "$BUILD_DIR"
|
||||
|
||||
# Check if Docker is available
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo "ERROR: Docker is not installed or not in PATH"
|
||||
echo ""
|
||||
echo "Docker is required to build MUSL static binaries."
|
||||
echo "Please install Docker:"
|
||||
echo " - Ubuntu/Debian: sudo apt install docker.io"
|
||||
echo " - Or visit: https://docs.docker.com/engine/install/"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Docker daemon is running
|
||||
if ! docker info &> /dev/null; then
|
||||
echo "ERROR: Docker daemon is not running or user not in docker group"
|
||||
echo ""
|
||||
echo "Please start Docker and ensure you're in the docker group:"
|
||||
echo " - sudo systemctl start docker"
|
||||
echo " - sudo usermod -aG docker $USER && newgrp docker"
|
||||
echo " - Or start Docker Desktop"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DOCKER_CMD="docker"
|
||||
|
||||
echo "✓ Docker is available and running"
|
||||
echo ""
|
||||
|
||||
# Detect architecture
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
PLATFORM="linux/amd64"
|
||||
OUTPUT_NAME="c_relay_static_x86_64"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
PLATFORM="linux/arm64"
|
||||
OUTPUT_NAME="c_relay_static_arm64"
|
||||
;;
|
||||
*)
|
||||
echo "WARNING: Unknown architecture: $ARCH"
|
||||
echo "Defaulting to linux/amd64"
|
||||
PLATFORM="linux/amd64"
|
||||
OUTPUT_NAME="c_relay_static_${ARCH}"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Building for platform: $PLATFORM"
|
||||
echo "Output binary: $OUTPUT_NAME"
|
||||
echo ""
|
||||
|
||||
# Build the Docker image
|
||||
echo "=========================================="
|
||||
echo "Step 1: Building Alpine Docker image"
|
||||
echo "=========================================="
|
||||
echo "This will:"
|
||||
echo " - Use Alpine Linux (native MUSL)"
|
||||
echo " - Build all dependencies statically"
|
||||
echo " - Compile c-relay with full static linking"
|
||||
echo ""
|
||||
|
||||
$DOCKER_CMD build \
|
||||
--platform "$PLATFORM" \
|
||||
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
|
||||
-f "$DOCKERFILE" \
|
||||
-t c-relay-musl-builder:latest \
|
||||
--progress=plain \
|
||||
. || {
|
||||
echo ""
|
||||
echo "ERROR: Docker build failed"
|
||||
echo "Check the output above for details"
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo "✓ Docker image built successfully"
|
||||
echo ""
|
||||
|
||||
# Extract the binary from the container
|
||||
echo "=========================================="
|
||||
echo "Step 2: Extracting static binary"
|
||||
echo "=========================================="
|
||||
|
||||
# Build the builder stage to extract the binary
|
||||
$DOCKER_CMD build \
|
||||
--platform "$PLATFORM" \
|
||||
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
|
||||
--target builder \
|
||||
-f "$DOCKERFILE" \
|
||||
-t c-relay-static-builder-stage:latest \
|
||||
. > /dev/null 2>&1
|
||||
|
||||
# Create a temporary container to copy the binary
|
||||
CONTAINER_ID=$($DOCKER_CMD create c-relay-static-builder-stage:latest)
|
||||
|
||||
# Copy binary from container
|
||||
$DOCKER_CMD cp "$CONTAINER_ID:/build/c_relay_static" "$BUILD_DIR/$OUTPUT_NAME" || {
|
||||
echo "ERROR: Failed to extract binary from container"
|
||||
$DOCKER_CMD rm "$CONTAINER_ID" 2>/dev/null
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Clean up container
|
||||
$DOCKER_CMD rm "$CONTAINER_ID" > /dev/null
|
||||
|
||||
echo "✓ Binary extracted to: $BUILD_DIR/$OUTPUT_NAME"
|
||||
echo ""
|
||||
|
||||
# Make binary executable
|
||||
chmod +x "$BUILD_DIR/$OUTPUT_NAME"
|
||||
|
||||
# Verify the binary
|
||||
echo "=========================================="
|
||||
echo "Step 3: Verifying static binary"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
|
||||
echo "Checking for dynamic dependencies:"
|
||||
if LDD_OUTPUT=$(timeout 5 ldd "$BUILD_DIR/$OUTPUT_NAME" 2>&1); then
|
||||
if echo "$LDD_OUTPUT" | grep -q "not a dynamic executable"; then
|
||||
echo "✓ Binary is fully static (no dynamic dependencies)"
|
||||
TRULY_STATIC=true
|
||||
elif echo "$LDD_OUTPUT" | grep -q "statically linked"; then
|
||||
echo "✓ Binary is statically linked"
|
||||
TRULY_STATIC=true
|
||||
else
|
||||
echo "⚠ WARNING: Binary may have dynamic dependencies:"
|
||||
echo "$LDD_OUTPUT"
|
||||
TRULY_STATIC=false
|
||||
fi
|
||||
else
|
||||
# ldd failed or timed out - check with file command instead
|
||||
if file "$BUILD_DIR/$OUTPUT_NAME" | grep -q "statically linked"; then
|
||||
echo "✓ Binary is statically linked (verified with file command)"
|
||||
TRULY_STATIC=true
|
||||
else
|
||||
echo "⚠ Could not verify static linking (ldd check failed)"
|
||||
TRULY_STATIC=false
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "File size: $(ls -lh "$BUILD_DIR/$OUTPUT_NAME" | awk '{print $5}')"
|
||||
echo ""
|
||||
|
||||
# Test if binary runs
|
||||
echo "Testing binary execution:"
|
||||
if "$BUILD_DIR/$OUTPUT_NAME" --version 2>&1 | head -5; then
|
||||
echo "✓ Binary executes successfully"
|
||||
else
|
||||
echo "⚠ Binary execution test failed (this may be normal if --version is not supported)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
echo "=========================================="
|
||||
echo "Build Summary"
|
||||
echo "=========================================="
|
||||
echo "Binary: $BUILD_DIR/$OUTPUT_NAME"
|
||||
echo "Size: $(du -h "$BUILD_DIR/$OUTPUT_NAME" | cut -f1)"
|
||||
echo "Platform: $PLATFORM"
|
||||
if [ "$DEBUG_BUILD" = true ]; then
|
||||
echo "Build Type: DEBUG (with symbols, no optimization)"
|
||||
else
|
||||
echo "Build Type: PRODUCTION (optimized, stripped)"
|
||||
fi
|
||||
if [ "$TRULY_STATIC" = true ]; then
|
||||
echo "Linkage: Fully static binary (Alpine MUSL-based)"
|
||||
echo "Portability: Works on ANY Linux distribution"
|
||||
else
|
||||
echo "Linkage: Static binary (may have minimal dependencies)"
|
||||
fi
|
||||
echo ""
|
||||
echo "✓ Build complete!"
|
||||
echo ""
|
||||
1
c_utils_lib
Submodule
1
c_utils_lib
Submodule
Submodule c_utils_lib added at 442facd7e3
28
deploy_static.sh
Executable file
28
deploy_static.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# C-Relay Static Binary Deployment Script
|
||||
# Deploys build/c_relay_static_x86_64 to server via ssh
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
LOCAL_BINARY="build/c_relay_static_x86_64"
|
||||
REMOTE_BINARY_PATH="/usr/local/bin/c_relay/c_relay"
|
||||
SERVICE_NAME="c-relay"
|
||||
|
||||
# Create backup
|
||||
ssh ubuntu@laantungir.com "sudo cp '$REMOTE_BINARY_PATH' '${REMOTE_BINARY_PATH}.backup.$(date +%Y%m%d_%H%M%S)'" 2>/dev/null || true
|
||||
|
||||
# Upload binary to temp location
|
||||
scp "$LOCAL_BINARY" "ubuntu@laantungir.com:/tmp/c_relay.tmp"
|
||||
|
||||
# Install binary
|
||||
ssh ubuntu@laantungir.com "sudo mv '/tmp/c_relay.tmp' '$REMOTE_BINARY_PATH'"
|
||||
ssh ubuntu@laantungir.com "sudo chown c-relay:c-relay '$REMOTE_BINARY_PATH'"
|
||||
ssh ubuntu@laantungir.com "sudo chmod +x '$REMOTE_BINARY_PATH'"
|
||||
|
||||
# Reload systemd and restart service
|
||||
ssh ubuntu@laantungir.com "sudo systemctl daemon-reload"
|
||||
ssh ubuntu@laantungir.com "sudo systemctl restart '$SERVICE_NAME'"
|
||||
|
||||
echo "Deployment complete!"
|
||||
457
docs/c_utils_lib_architecture.md
Normal file
457
docs/c_utils_lib_architecture.md
Normal file
@@ -0,0 +1,457 @@
|
||||
# c_utils_lib Architecture Plan
|
||||
|
||||
## Overview
|
||||
|
||||
`c_utils_lib` is a standalone C utility library designed to provide reusable, general-purpose functions for C projects. It serves as a learning repository and a practical toolkit for common C programming tasks.
|
||||
|
||||
## Design Philosophy
|
||||
|
||||
1. **Zero External Dependencies**: Only standard C library dependencies
|
||||
2. **Modular Design**: Each utility is independent and can be used separately
|
||||
3. **Learning-Oriented**: Well-documented code suitable for learning C
|
||||
4. **Production-Ready**: Battle-tested utilities from real projects
|
||||
5. **Cross-Platform**: Works on Linux, macOS, and other POSIX systems
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
c_utils_lib/
|
||||
├── README.md # Main documentation
|
||||
├── LICENSE # MIT License
|
||||
├── VERSION # Current version (e.g., v0.1.0)
|
||||
├── build.sh # Build script
|
||||
├── Makefile # Build system
|
||||
├── .gitignore # Git ignore rules
|
||||
│
|
||||
├── include/ # Public headers
|
||||
│ ├── c_utils.h # Main header (includes all utilities)
|
||||
│ ├── debug.h # Debug/logging system
|
||||
│ ├── version.h # Version utilities
|
||||
│ ├── string_utils.h # String utilities (future)
|
||||
│ └── memory_utils.h # Memory utilities (future)
|
||||
│
|
||||
├── src/ # Implementation files
|
||||
│ ├── debug.c # Debug system implementation
|
||||
│ ├── version.c # Version utilities implementation
|
||||
│ ├── string_utils.c # String utilities (future)
|
||||
│ └── memory_utils.c # Memory utilities (future)
|
||||
│
|
||||
├── examples/ # Usage examples
|
||||
│ ├── debug_example.c # Debug system example
|
||||
│ ├── version_example.c # Version utilities example
|
||||
│ └── Makefile # Examples build system
|
||||
│
|
||||
├── tests/ # Unit tests
|
||||
│ ├── test_debug.c # Debug system tests
|
||||
│ ├── test_version.c # Version utilities tests
|
||||
│ ├── run_tests.sh # Test runner
|
||||
│ └── Makefile # Tests build system
|
||||
│
|
||||
└── docs/ # Additional documentation
|
||||
├── API.md # Complete API reference
|
||||
├── INTEGRATION.md # How to integrate into projects
|
||||
├── VERSIONING.md # Versioning system guide
|
||||
└── CONTRIBUTING.md # Contribution guidelines
|
||||
```
|
||||
|
||||
## Initial Utilities (v0.1.0)
|
||||
|
||||
### 1. Debug System (`debug.h`, `debug.c`)
|
||||
|
||||
**Purpose**: Unified logging and debugging system with configurable verbosity levels.
|
||||
|
||||
**Features**:
|
||||
- 5 debug levels: NONE, ERROR, WARN, INFO, DEBUG, TRACE
|
||||
- Timestamp formatting
|
||||
- File/line information at TRACE level
|
||||
- Macro-based API for zero-cost when disabled
|
||||
- Thread-safe (future enhancement)
|
||||
|
||||
**API**:
|
||||
```c
|
||||
// Initialization
|
||||
void debug_init(int level);
|
||||
|
||||
// Logging macros
|
||||
DEBUG_ERROR(format, ...);
|
||||
DEBUG_WARN(format, ...);
|
||||
DEBUG_INFO(format, ...);
|
||||
DEBUG_LOG(format, ...);
|
||||
DEBUG_TRACE(format, ...);
|
||||
|
||||
// Global debug level
|
||||
extern debug_level_t g_debug_level;
|
||||
```
|
||||
|
||||
**Usage Example**:
|
||||
```c
|
||||
#include <c_utils/debug.h>
|
||||
|
||||
int main() {
|
||||
debug_init(DEBUG_LEVEL_INFO);
|
||||
DEBUG_INFO("Application started");
|
||||
DEBUG_ERROR("Critical error: %s", error_msg);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Version Utilities (`version.h`, `version.c`)
|
||||
|
||||
**Purpose**: Reusable versioning system for C projects using git tags.
|
||||
|
||||
**Features**:
|
||||
- Automatic version extraction from git tags
|
||||
- Semantic versioning support (MAJOR.MINOR.PATCH)
|
||||
- Version comparison functions
|
||||
- Header file generation for embedding version info
|
||||
- Build number tracking
|
||||
|
||||
**API**:
|
||||
```c
|
||||
// Version structure
|
||||
typedef struct {
|
||||
int major;
|
||||
int minor;
|
||||
int patch;
|
||||
char* git_hash;
|
||||
char* build_date;
|
||||
} version_info_t;
|
||||
|
||||
// Get version from git
|
||||
int version_get_from_git(version_info_t* version);
|
||||
|
||||
// Generate version header file
|
||||
int version_generate_header(const char* output_path, const char* prefix);
|
||||
|
||||
// Compare versions
|
||||
int version_compare(version_info_t* v1, version_info_t* v2);
|
||||
|
||||
// Format version string
|
||||
char* version_to_string(version_info_t* version);
|
||||
```
|
||||
|
||||
**Usage Example**:
|
||||
```c
|
||||
#include <c_utils/version.h>
|
||||
|
||||
// In your build system:
|
||||
version_generate_header("src/version.h", "MY_APP");
|
||||
|
||||
// In your code:
|
||||
#include "version.h"
|
||||
printf("Version: %s\n", MY_APP_VERSION);
|
||||
```
|
||||
|
||||
**Integration with Projects**:
|
||||
```bash
|
||||
# In project Makefile
|
||||
version.h:
|
||||
c_utils_lib/bin/generate_version src/version.h MY_PROJECT
|
||||
```
|
||||
|
||||
## Build System
|
||||
|
||||
### Static Library Output
|
||||
|
||||
```
|
||||
libc_utils.a # Static library for linking
|
||||
```
|
||||
|
||||
### Build Targets
|
||||
|
||||
```bash
|
||||
make # Build static library
|
||||
make examples # Build examples
|
||||
make test # Run tests
|
||||
make install # Install to system (optional)
|
||||
make clean # Clean build artifacts
|
||||
```
|
||||
|
||||
### Build Script (`build.sh`)
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Simplified build script similar to nostr_core_lib
|
||||
|
||||
case "$1" in
|
||||
lib|"")
|
||||
make
|
||||
;;
|
||||
examples)
|
||||
make examples
|
||||
;;
|
||||
test)
|
||||
make test
|
||||
;;
|
||||
clean)
|
||||
make clean
|
||||
;;
|
||||
install)
|
||||
make install
|
||||
;;
|
||||
*)
|
||||
echo "Usage: ./build.sh [lib|examples|test|clean|install]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
```
|
||||
|
||||
## Versioning System Design
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Git Tags as Source of Truth**
|
||||
- Version tags: `v0.1.0`, `v0.2.0`, etc.
|
||||
- Follows semantic versioning
|
||||
|
||||
2. **Automatic Header Generation**
|
||||
- Script reads git tags
|
||||
- Generates header with version macros
|
||||
- Includes build date and git hash
|
||||
|
||||
3. **Reusable Across Projects**
|
||||
- Each project calls `version_generate_header()`
|
||||
- Customizable prefix (e.g., `C_RELAY_VERSION`, `NOSTR_CORE_VERSION`)
|
||||
- No hardcoded version numbers in source
|
||||
|
||||
### Example Generated Header
|
||||
|
||||
```c
|
||||
// Auto-generated by c_utils_lib version system
|
||||
#ifndef MY_PROJECT_VERSION_H
|
||||
#define MY_PROJECT_VERSION_H
|
||||
|
||||
#define MY_PROJECT_VERSION "v0.1.0"
|
||||
#define MY_PROJECT_VERSION_MAJOR 0
|
||||
#define MY_PROJECT_VERSION_MINOR 1
|
||||
#define MY_PROJECT_VERSION_PATCH 0
|
||||
#define MY_PROJECT_GIT_HASH "a1b2c3d"
|
||||
#define MY_PROJECT_BUILD_DATE "2025-10-15"
|
||||
|
||||
#endif
|
||||
```
|
||||
|
||||
### Integration Pattern
|
||||
|
||||
```makefile
|
||||
# In consuming project's Makefile
|
||||
VERSION_SCRIPT = c_utils_lib/bin/generate_version
|
||||
|
||||
src/version.h: .git/refs/tags/*
|
||||
$(VERSION_SCRIPT) src/version.h MY_PROJECT
|
||||
|
||||
my_app: src/version.h src/main.c
|
||||
$(CC) src/main.c -o my_app -Ic_utils_lib/include -Lc_utils_lib -lc_utils
|
||||
```
|
||||
|
||||
## Future Utilities (Roadmap)
|
||||
|
||||
### String Utilities (`string_utils.h`)
|
||||
- Safe string operations (bounds checking)
|
||||
- String trimming, splitting, joining
|
||||
- Case conversion
|
||||
- Pattern matching helpers
|
||||
|
||||
### Memory Utilities (`memory_utils.h`)
|
||||
- Safe allocation wrappers
|
||||
- Memory pool management
|
||||
- Leak detection helpers (debug builds)
|
||||
- Arena allocators
|
||||
|
||||
### Configuration Utilities (`config_utils.h`)
|
||||
- INI file parsing
|
||||
- JSON configuration (using cJSON)
|
||||
- Environment variable helpers
|
||||
- Command-line argument parsing
|
||||
|
||||
### File Utilities (`file_utils.h`)
|
||||
- Safe file operations
|
||||
- Directory traversal
|
||||
- Path manipulation
|
||||
- File watching (inotify wrapper)
|
||||
|
||||
### Time Utilities (`time_utils.h`)
|
||||
- Timestamp formatting
|
||||
- Duration calculations
|
||||
- Timer utilities
|
||||
- Rate limiting helpers
|
||||
|
||||
## Integration Guide
|
||||
|
||||
### As Git Submodule
|
||||
|
||||
```bash
|
||||
# In your project
|
||||
git submodule add https://github.com/yourusername/c_utils_lib.git
|
||||
git submodule update --init --recursive
|
||||
|
||||
# Build the library
|
||||
cd c_utils_lib && ./build.sh lib && cd ..
|
||||
|
||||
# Update your Makefile
|
||||
INCLUDES += -Ic_utils_lib/include
|
||||
LIBS += -Lc_utils_lib -lc_utils
|
||||
```
|
||||
|
||||
### In Your Makefile
|
||||
|
||||
```makefile
|
||||
# Check if c_utils_lib is built
|
||||
c_utils_lib/libc_utils.a:
|
||||
cd c_utils_lib && ./build.sh lib
|
||||
|
||||
# Link against it
|
||||
my_app: c_utils_lib/libc_utils.a src/main.c
|
||||
$(CC) src/main.c -o my_app \
|
||||
-Ic_utils_lib/include \
|
||||
-Lc_utils_lib -lc_utils
|
||||
```
|
||||
|
||||
### In Your Code
|
||||
|
||||
```c
|
||||
// Option 1: Include everything
|
||||
#include <c_utils/c_utils.h>
|
||||
|
||||
// Option 2: Include specific utilities
|
||||
#include <c_utils/debug.h>
|
||||
#include <c_utils/version.h>
|
||||
|
||||
int main() {
|
||||
debug_init(DEBUG_LEVEL_INFO);
|
||||
DEBUG_INFO("Starting application version %s", MY_APP_VERSION);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
## Migration Plan for c-relay
|
||||
|
||||
### Phase 1: Extract Debug System
|
||||
1. Create `c_utils_lib` repository
|
||||
2. Move [`debug.c`](../src/debug.c) and [`debug.h`](../src/debug.h)
|
||||
3. Create build system
|
||||
4. Add basic tests
|
||||
|
||||
### Phase 2: Add Versioning System
|
||||
1. Extract version generation logic from c-relay
|
||||
2. Create reusable version utilities
|
||||
3. Update c-relay to use new system
|
||||
4. Update nostr_core_lib to use new system
|
||||
|
||||
### Phase 3: Add as Submodule
|
||||
1. Add `c_utils_lib` as submodule to c-relay
|
||||
2. Update c-relay Makefile
|
||||
3. Update includes in c-relay source files
|
||||
4. Remove old debug files from c-relay
|
||||
|
||||
### Phase 4: Documentation & Examples
|
||||
1. Create comprehensive README
|
||||
2. Add usage examples
|
||||
3. Write integration guide
|
||||
4. Document API
|
||||
|
||||
## Benefits
|
||||
|
||||
### For c-relay
|
||||
- Cleaner separation of concerns
|
||||
- Reusable utilities across projects
|
||||
- Easier to maintain and test
|
||||
- Consistent logging across codebase
|
||||
|
||||
### For Learning C
|
||||
- Real-world utility implementations
|
||||
- Best practices examples
|
||||
- Modular design patterns
|
||||
- Build system examples
|
||||
|
||||
### For Future Projects
|
||||
- Drop-in utility library
|
||||
- Proven, tested code
|
||||
- Consistent patterns
|
||||
- Time savings
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- Test each utility independently
|
||||
- Mock external dependencies
|
||||
- Edge case coverage
|
||||
- Memory leak detection (valgrind)
|
||||
|
||||
### Integration Tests
|
||||
- Test with real projects (c-relay, nostr_core_lib)
|
||||
- Cross-platform testing
|
||||
- Performance benchmarks
|
||||
|
||||
### Continuous Integration
|
||||
- GitHub Actions for automated testing
|
||||
- Multiple compiler versions (gcc, clang)
|
||||
- Multiple platforms (Linux, macOS)
|
||||
- Static analysis (cppcheck, clang-tidy)
|
||||
|
||||
## Documentation Standards
|
||||
|
||||
### Code Documentation
|
||||
- Doxygen-style comments
|
||||
- Function purpose and parameters
|
||||
- Return value descriptions
|
||||
- Usage examples in comments
|
||||
|
||||
### API Documentation
|
||||
- Complete API reference in `docs/API.md`
|
||||
- Usage examples for each function
|
||||
- Common patterns and best practices
|
||||
- Migration guides
|
||||
|
||||
### Learning Resources
|
||||
- Detailed explanations of implementations
|
||||
- Links to relevant C standards
|
||||
- Common pitfalls and how to avoid them
|
||||
- Performance considerations
|
||||
|
||||
## License
|
||||
|
||||
MIT License - permissive and suitable for learning and commercial use.
|
||||
|
||||
## Version History
|
||||
|
||||
- **v0.1.0** (Planned)
|
||||
- Initial release
|
||||
- Debug system
|
||||
- Version utilities
|
||||
- Basic documentation
|
||||
|
||||
- **v0.2.0** (Future)
|
||||
- String utilities
|
||||
- Memory utilities
|
||||
- Enhanced documentation
|
||||
|
||||
- **v0.3.0** (Future)
|
||||
- Configuration utilities
|
||||
- File utilities
|
||||
- Time utilities
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. ✅ Successfully integrated into c-relay
|
||||
2. ✅ Successfully integrated into nostr_core_lib
|
||||
3. ✅ All tests passing
|
||||
4. ✅ Documentation complete
|
||||
5. ✅ Examples working
|
||||
6. ✅ Zero external dependencies (except standard library)
|
||||
7. ✅ Cross-platform compatibility verified
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Create repository structure
|
||||
2. Implement debug system
|
||||
3. Implement version utilities
|
||||
4. Create build system
|
||||
5. Write tests
|
||||
6. Create documentation
|
||||
7. Integrate into c-relay
|
||||
8. Publish to GitHub
|
||||
|
||||
---
|
||||
|
||||
**Note**: This is a living document. Update as the library evolves and new utilities are added.
|
||||
621
docs/c_utils_lib_implementation_plan.md
Normal file
621
docs/c_utils_lib_implementation_plan.md
Normal file
@@ -0,0 +1,621 @@
|
||||
# c_utils_lib Implementation Plan
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a step-by-step implementation plan for creating the `c_utils_lib` library and integrating it into the c-relay project.
|
||||
|
||||
## Phase 1: Repository Setup & Structure
|
||||
|
||||
### Step 1.1: Create Repository Structure
|
||||
|
||||
**Location**: Create outside c-relay project (sibling directory)
|
||||
|
||||
```bash
|
||||
# Create directory structure
|
||||
mkdir -p c_utils_lib/{include,src,examples,tests,docs,bin}
|
||||
cd c_utils_lib
|
||||
|
||||
# Create subdirectories
|
||||
mkdir -p include/c_utils
|
||||
mkdir -p tests/results
|
||||
```
|
||||
|
||||
### Step 1.2: Initialize Git Repository
|
||||
|
||||
```bash
|
||||
cd c_utils_lib
|
||||
git init
|
||||
git branch -M main
|
||||
```
|
||||
|
||||
### Step 1.3: Create Core Files
|
||||
|
||||
**Files to create**:
|
||||
1. `README.md` - Main documentation
|
||||
2. `LICENSE` - MIT License
|
||||
3. `VERSION` - Version file (v0.1.0)
|
||||
4. `.gitignore` - Git ignore rules
|
||||
5. `Makefile` - Build system
|
||||
6. `build.sh` - Build script
|
||||
|
||||
## Phase 2: Debug System Implementation
|
||||
|
||||
### Step 2.1: Move Debug Files
|
||||
|
||||
**Source files** (from c-relay):
|
||||
- `src/debug.c` → `c_utils_lib/src/debug.c`
|
||||
- `src/debug.h` → `c_utils_lib/include/c_utils/debug.h`
|
||||
|
||||
**Modifications needed**:
|
||||
1. Update header guard in `debug.h`:
|
||||
```c
|
||||
#ifndef C_UTILS_DEBUG_H
|
||||
#define C_UTILS_DEBUG_H
|
||||
```
|
||||
|
||||
2. No namespace changes needed (keep simple API)
|
||||
|
||||
3. Add header documentation:
|
||||
```c
|
||||
/**
|
||||
* @file debug.h
|
||||
* @brief Debug and logging system with configurable verbosity levels
|
||||
*
|
||||
* Provides a simple, efficient logging system with 5 levels:
|
||||
* - ERROR: Critical errors
|
||||
* - WARN: Warnings
|
||||
* - INFO: Informational messages
|
||||
* - DEBUG: Debug messages
|
||||
* - TRACE: Detailed trace with file:line info
|
||||
*/
|
||||
```
|
||||
|
||||
### Step 2.2: Create Main Header
|
||||
|
||||
**File**: `include/c_utils/c_utils.h`
|
||||
|
||||
```c
|
||||
#ifndef C_UTILS_H
|
||||
#define C_UTILS_H
|
||||
|
||||
/**
|
||||
* @file c_utils.h
|
||||
* @brief Main header for c_utils_lib - includes all utilities
|
||||
*
|
||||
* Include this header to access all c_utils_lib functionality.
|
||||
* Alternatively, include specific headers for modular usage.
|
||||
*/
|
||||
|
||||
// Version information
|
||||
#define C_UTILS_VERSION "v0.1.0"
|
||||
#define C_UTILS_VERSION_MAJOR 0
|
||||
#define C_UTILS_VERSION_MINOR 1
|
||||
#define C_UTILS_VERSION_PATCH 0
|
||||
|
||||
// Include all utilities
|
||||
#include "debug.h"
|
||||
#include "version.h"
|
||||
|
||||
#endif /* C_UTILS_H */
|
||||
```
|
||||
|
||||
## Phase 3: Version Utilities Implementation
|
||||
|
||||
### Step 3.1: Design Version API
|
||||
|
||||
**File**: `include/c_utils/version.h`
|
||||
|
||||
```c
|
||||
#ifndef C_UTILS_VERSION_H
|
||||
#define C_UTILS_VERSION_H
|
||||
|
||||
#include <time.h>
|
||||
|
||||
/**
|
||||
* @brief Version information structure
|
||||
*/
|
||||
typedef struct {
|
||||
int major;
|
||||
int minor;
|
||||
int patch;
|
||||
char git_hash[41]; // SHA-1 hash (40 chars + null)
|
||||
char build_date[32]; // ISO 8601 format
|
||||
char version_string[64]; // "vX.Y.Z" format
|
||||
} version_info_t;
|
||||
|
||||
/**
|
||||
* @brief Extract version from git tags
|
||||
* @param version Output version structure
|
||||
* @return 0 on success, -1 on error
|
||||
*/
|
||||
int version_get_from_git(version_info_t* version);
|
||||
|
||||
/**
|
||||
* @brief Generate version header file for a project
|
||||
* @param output_path Path to output header file
|
||||
* @param prefix Prefix for macros (e.g., "MY_APP")
|
||||
* @return 0 on success, -1 on error
|
||||
*/
|
||||
int version_generate_header(const char* output_path, const char* prefix);
|
||||
|
||||
/**
|
||||
* @brief Compare two versions
|
||||
* @return -1 if v1 < v2, 0 if equal, 1 if v1 > v2
|
||||
*/
|
||||
int version_compare(const version_info_t* v1, const version_info_t* v2);
|
||||
|
||||
/**
|
||||
* @brief Format version as string
|
||||
* @param version Version structure
|
||||
* @param buffer Output buffer
|
||||
* @param buffer_size Size of output buffer
|
||||
* @return Number of characters written
|
||||
*/
|
||||
int version_to_string(const version_info_t* version, char* buffer, size_t buffer_size);
|
||||
|
||||
#endif /* C_UTILS_VERSION_H */
|
||||
```
|
||||
|
||||
### Step 3.2: Implement Version Utilities
|
||||
|
||||
**File**: `src/version.c`
|
||||
|
||||
Key functions to implement:
|
||||
1. `version_get_from_git()` - Execute `git describe --tags` and parse
|
||||
2. `version_generate_header()` - Generate header file with macros
|
||||
3. `version_compare()` - Semantic version comparison
|
||||
4. `version_to_string()` - Format version string
|
||||
|
||||
### Step 3.3: Create Version Generation Script
|
||||
|
||||
**File**: `bin/generate_version`
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Generate version header for a project
|
||||
|
||||
OUTPUT_FILE="$1"
|
||||
PREFIX="$2"
|
||||
|
||||
if [ -z "$OUTPUT_FILE" ] || [ -z "$PREFIX" ]; then
|
||||
echo "Usage: $0 <output_file> <prefix>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get version from git
|
||||
if [ -d .git ]; then
|
||||
VERSION=$(git describe --tags --always 2>/dev/null || echo "v0.0.0")
|
||||
GIT_HASH=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
|
||||
else
|
||||
VERSION="v0.0.0"
|
||||
GIT_HASH="unknown"
|
||||
fi
|
||||
|
||||
# Parse version
|
||||
CLEAN_VERSION=$(echo "$VERSION" | sed 's/^v//' | cut -d- -f1)
|
||||
MAJOR=$(echo "$CLEAN_VERSION" | cut -d. -f1)
|
||||
MINOR=$(echo "$CLEAN_VERSION" | cut -d. -f2)
|
||||
PATCH=$(echo "$CLEAN_VERSION" | cut -d. -f3)
|
||||
BUILD_DATE=$(date -u +"%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
# Generate header
|
||||
cat > "$OUTPUT_FILE" << EOF
|
||||
/* Auto-generated by c_utils_lib version system */
|
||||
/* DO NOT EDIT - This file is automatically generated */
|
||||
|
||||
#ifndef ${PREFIX}_VERSION_H
|
||||
#define ${PREFIX}_VERSION_H
|
||||
|
||||
#define ${PREFIX}_VERSION "v${CLEAN_VERSION}"
|
||||
#define ${PREFIX}_VERSION_MAJOR ${MAJOR}
|
||||
#define ${PREFIX}_VERSION_MINOR ${MINOR}
|
||||
#define ${PREFIX}_VERSION_PATCH ${PATCH}
|
||||
#define ${PREFIX}_GIT_HASH "${GIT_HASH}"
|
||||
#define ${PREFIX}_BUILD_DATE "${BUILD_DATE}"
|
||||
|
||||
#endif /* ${PREFIX}_VERSION_H */
|
||||
EOF
|
||||
|
||||
echo "Generated $OUTPUT_FILE with version v${CLEAN_VERSION}"
|
||||
```
|
||||
|
||||
## Phase 4: Build System
|
||||
|
||||
### Step 4.1: Create Makefile
|
||||
|
||||
**File**: `Makefile`
|
||||
|
||||
```makefile
|
||||
# c_utils_lib Makefile
|
||||
|
||||
CC = gcc
|
||||
AR = ar
|
||||
CFLAGS = -Wall -Wextra -std=c99 -O2 -g
|
||||
INCLUDES = -Iinclude
|
||||
|
||||
# Directories
|
||||
SRC_DIR = src
|
||||
INCLUDE_DIR = include
|
||||
BUILD_DIR = build
|
||||
EXAMPLES_DIR = examples
|
||||
TESTS_DIR = tests
|
||||
|
||||
# Source files
|
||||
SOURCES = $(wildcard $(SRC_DIR)/*.c)
|
||||
OBJECTS = $(SOURCES:$(SRC_DIR)/%.c=$(BUILD_DIR)/%.o)
|
||||
|
||||
# Output library
|
||||
LIBRARY = libc_utils.a
|
||||
|
||||
# Default target
|
||||
all: $(LIBRARY)
|
||||
|
||||
# Create build directory
|
||||
$(BUILD_DIR):
|
||||
mkdir -p $(BUILD_DIR)
|
||||
|
||||
# Compile source files
|
||||
$(BUILD_DIR)/%.o: $(SRC_DIR)/%.c | $(BUILD_DIR)
|
||||
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
|
||||
|
||||
# Create static library
|
||||
$(LIBRARY): $(OBJECTS)
|
||||
$(AR) rcs $@ $^
|
||||
@echo "Built $(LIBRARY)"
|
||||
|
||||
# Build examples
|
||||
examples: $(LIBRARY)
|
||||
$(MAKE) -C $(EXAMPLES_DIR)
|
||||
|
||||
# Run tests
|
||||
test: $(LIBRARY)
|
||||
$(MAKE) -C $(TESTS_DIR)
|
||||
$(TESTS_DIR)/run_tests.sh
|
||||
|
||||
# Install to system (optional)
|
||||
install: $(LIBRARY)
|
||||
install -d /usr/local/lib
|
||||
install -m 644 $(LIBRARY) /usr/local/lib/
|
||||
install -d /usr/local/include/c_utils
|
||||
install -m 644 $(INCLUDE_DIR)/c_utils/*.h /usr/local/include/c_utils/
|
||||
@echo "Installed to /usr/local"
|
||||
|
||||
# Uninstall from system
|
||||
uninstall:
|
||||
rm -f /usr/local/lib/$(LIBRARY)
|
||||
rm -rf /usr/local/include/c_utils
|
||||
@echo "Uninstalled from /usr/local"
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
rm -rf $(BUILD_DIR) $(LIBRARY)
|
||||
$(MAKE) -C $(EXAMPLES_DIR) clean 2>/dev/null || true
|
||||
$(MAKE) -C $(TESTS_DIR) clean 2>/dev/null || true
|
||||
|
||||
# Help
|
||||
help:
|
||||
@echo "c_utils_lib Build System"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " all Build static library (default)"
|
||||
@echo " examples Build examples"
|
||||
@echo " test Run tests"
|
||||
@echo " install Install to /usr/local"
|
||||
@echo " uninstall Remove from /usr/local"
|
||||
@echo " clean Clean build artifacts"
|
||||
@echo " help Show this help"
|
||||
|
||||
.PHONY: all examples test install uninstall clean help
|
||||
```
|
||||
|
||||
### Step 4.2: Create Build Script
|
||||
|
||||
**File**: `build.sh`
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# c_utils_lib build script
|
||||
|
||||
set -e
|
||||
|
||||
case "$1" in
|
||||
lib|"")
|
||||
echo "Building c_utils_lib..."
|
||||
make
|
||||
;;
|
||||
examples)
|
||||
echo "Building examples..."
|
||||
make examples
|
||||
;;
|
||||
test)
|
||||
echo "Running tests..."
|
||||
make test
|
||||
;;
|
||||
clean)
|
||||
echo "Cleaning..."
|
||||
make clean
|
||||
;;
|
||||
install)
|
||||
echo "Installing..."
|
||||
make install
|
||||
;;
|
||||
*)
|
||||
echo "Usage: ./build.sh [lib|examples|test|clean|install]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Done!"
|
||||
```
|
||||
|
||||
## Phase 5: Examples & Tests
|
||||
|
||||
### Step 5.1: Create Debug Example
|
||||
|
||||
**File**: `examples/debug_example.c`
|
||||
|
||||
```c
|
||||
#include <c_utils/debug.h>
|
||||
|
||||
int main() {
|
||||
// Initialize with INFO level
|
||||
debug_init(DEBUG_LEVEL_INFO);
|
||||
|
||||
DEBUG_INFO("Application started");
|
||||
DEBUG_WARN("This is a warning");
|
||||
DEBUG_ERROR("This is an error");
|
||||
|
||||
// This won't print (level too high)
|
||||
DEBUG_LOG("This debug message won't show");
|
||||
|
||||
// Change level to DEBUG
|
||||
g_debug_level = DEBUG_LEVEL_DEBUG;
|
||||
DEBUG_LOG("Now debug messages show");
|
||||
|
||||
// Change to TRACE to see file:line info
|
||||
g_debug_level = DEBUG_LEVEL_TRACE;
|
||||
DEBUG_TRACE("Trace with file:line information");
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5.2: Create Version Example
|
||||
|
||||
**File**: `examples/version_example.c`
|
||||
|
||||
```c
|
||||
#include <c_utils/version.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
version_info_t version;
|
||||
|
||||
// Get version from git
|
||||
if (version_get_from_git(&version) == 0) {
|
||||
char version_str[64];
|
||||
version_to_string(&version, version_str, sizeof(version_str));
|
||||
|
||||
printf("Version: %s\n", version_str);
|
||||
printf("Git Hash: %s\n", version.git_hash);
|
||||
printf("Build Date: %s\n", version.build_date);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5.3: Create Test Suite
|
||||
|
||||
**File**: `tests/test_debug.c`
|
||||
|
||||
```c
|
||||
#include <c_utils/debug.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
int test_debug_init() {
|
||||
debug_init(DEBUG_LEVEL_INFO);
|
||||
return (g_debug_level == DEBUG_LEVEL_INFO) ? 0 : -1;
|
||||
}
|
||||
|
||||
int test_debug_levels() {
|
||||
// Test that higher levels don't print at lower settings
|
||||
debug_init(DEBUG_LEVEL_ERROR);
|
||||
// Would need to capture stdout to verify
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
int failed = 0;
|
||||
|
||||
printf("Running debug tests...\n");
|
||||
|
||||
if (test_debug_init() != 0) {
|
||||
printf("FAIL: test_debug_init\n");
|
||||
failed++;
|
||||
} else {
|
||||
printf("PASS: test_debug_init\n");
|
||||
}
|
||||
|
||||
if (test_debug_levels() != 0) {
|
||||
printf("FAIL: test_debug_levels\n");
|
||||
failed++;
|
||||
} else {
|
||||
printf("PASS: test_debug_levels\n");
|
||||
}
|
||||
|
||||
return failed;
|
||||
}
|
||||
```
|
||||
|
||||
## Phase 6: Documentation
|
||||
|
||||
### Step 6.1: Create README.md
|
||||
|
||||
Key sections:
|
||||
1. Overview and purpose
|
||||
2. Quick start guide
|
||||
3. Installation instructions
|
||||
4. Usage examples
|
||||
5. API reference (brief)
|
||||
6. Integration guide
|
||||
7. Contributing guidelines
|
||||
8. License
|
||||
|
||||
### Step 6.2: Create API Documentation
|
||||
|
||||
**File**: `docs/API.md`
|
||||
|
||||
Complete API reference with:
|
||||
- Function signatures
|
||||
- Parameter descriptions
|
||||
- Return values
|
||||
- Usage examples
|
||||
- Common patterns
|
||||
|
||||
### Step 6.3: Create Integration Guide
|
||||
|
||||
**File**: `docs/INTEGRATION.md`
|
||||
|
||||
How to integrate into projects:
|
||||
1. As git submodule
|
||||
2. Makefile integration
|
||||
3. Code examples
|
||||
4. Migration from standalone utilities
|
||||
|
||||
## Phase 7: Integration with c-relay
|
||||
|
||||
### Step 7.1: Add as Submodule
|
||||
|
||||
```bash
|
||||
cd /path/to/c-relay
|
||||
git submodule add <repo-url> c_utils_lib
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
### Step 7.2: Update c-relay Makefile
|
||||
|
||||
```makefile
|
||||
# Add to c-relay Makefile
|
||||
C_UTILS_LIB = c_utils_lib/libc_utils.a
|
||||
|
||||
# Update includes
|
||||
INCLUDES += -Ic_utils_lib/include
|
||||
|
||||
# Update libs
|
||||
LIBS += -Lc_utils_lib -lc_utils
|
||||
|
||||
# Add dependency
|
||||
$(C_UTILS_LIB):
|
||||
cd c_utils_lib && ./build.sh lib
|
||||
|
||||
# Update main target
|
||||
$(TARGET): $(C_UTILS_LIB) ...
|
||||
```
|
||||
|
||||
### Step 7.3: Update c-relay Source Files
|
||||
|
||||
**Changes needed**:
|
||||
|
||||
1. Update includes:
|
||||
```c
|
||||
// Old
|
||||
#include "debug.h"
|
||||
|
||||
// New
|
||||
#include <c_utils/debug.h>
|
||||
```
|
||||
|
||||
2. Remove old debug files:
|
||||
```bash
|
||||
git rm src/debug.c src/debug.h
|
||||
```
|
||||
|
||||
3. Update all files that use debug system:
|
||||
- `src/main.c`
|
||||
- `src/config.c`
|
||||
- `src/dm_admin.c`
|
||||
- `src/websockets.c`
|
||||
- `src/subscriptions.c`
|
||||
- Any other files using DEBUG_* macros
|
||||
|
||||
### Step 7.4: Test Integration
|
||||
|
||||
```bash
|
||||
cd c-relay
|
||||
make clean
|
||||
make
|
||||
./make_and_restart_relay.sh
|
||||
```
|
||||
|
||||
Verify:
|
||||
- Compilation succeeds
|
||||
- Debug output works correctly
|
||||
- No functionality regressions
|
||||
|
||||
## Phase 8: Version System Integration
|
||||
|
||||
### Step 8.1: Update c-relay Makefile for Versioning
|
||||
|
||||
```makefile
|
||||
# Add version generation
|
||||
src/version.h: .git/refs/tags/*
|
||||
c_utils_lib/bin/generate_version src/version.h C_RELAY
|
||||
|
||||
# Add dependency
|
||||
$(TARGET): src/version.h ...
|
||||
```
|
||||
|
||||
### Step 8.2: Update c-relay to Use Generated Version
|
||||
|
||||
Replace hardcoded version in `src/main.h` with:
|
||||
```c
|
||||
#include "version.h"
|
||||
// Use C_RELAY_VERSION instead of hardcoded VERSION
|
||||
```
|
||||
|
||||
## Timeline Estimate
|
||||
|
||||
- **Phase 1**: Repository Setup - 1 hour
|
||||
- **Phase 2**: Debug System - 2 hours
|
||||
- **Phase 3**: Version Utilities - 4 hours
|
||||
- **Phase 4**: Build System - 2 hours
|
||||
- **Phase 5**: Examples & Tests - 3 hours
|
||||
- **Phase 6**: Documentation - 3 hours
|
||||
- **Phase 7**: c-relay Integration - 2 hours
|
||||
- **Phase 8**: Version Integration - 2 hours
|
||||
|
||||
**Total**: ~19 hours
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] c_utils_lib builds successfully
|
||||
- [ ] All tests pass
|
||||
- [ ] Examples compile and run
|
||||
- [ ] c-relay integrates successfully
|
||||
- [ ] Debug output works in c-relay
|
||||
- [ ] Version generation works
|
||||
- [ ] Documentation complete
|
||||
- [ ] No regressions in c-relay functionality
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Review this plan with stakeholders
|
||||
2. Create repository structure
|
||||
3. Implement debug system
|
||||
4. Implement version utilities
|
||||
5. Create build system
|
||||
6. Write tests and examples
|
||||
7. Create documentation
|
||||
8. Integrate into c-relay
|
||||
9. Test thoroughly
|
||||
10. Publish to GitHub
|
||||
|
||||
## Notes
|
||||
|
||||
- Keep the API simple and intuitive
|
||||
- Focus on zero external dependencies
|
||||
- Prioritize learning value in code comments
|
||||
- Make integration as easy as possible
|
||||
- Document everything thoroughly
|
||||
562
docs/debug_system.md
Normal file
562
docs/debug_system.md
Normal file
@@ -0,0 +1,562 @@
|
||||
# Simple Debug System Proposal
|
||||
|
||||
## Overview
|
||||
|
||||
A minimal debug system with 6 levels (0-5) controlled by a single `--debug-level` flag. TRACE level (5) automatically includes file:line information for ALL messages. Uses compile-time macros to ensure **zero performance impact and zero size increase** in production builds.
|
||||
|
||||
## Debug Levels
|
||||
|
||||
```c
|
||||
typedef enum {
|
||||
DEBUG_LEVEL_NONE = 0, // Production: no debug output
|
||||
DEBUG_LEVEL_ERROR = 1, // Errors only
|
||||
DEBUG_LEVEL_WARN = 2, // Errors + Warnings
|
||||
DEBUG_LEVEL_INFO = 3, // Errors + Warnings + Info
|
||||
DEBUG_LEVEL_DEBUG = 4, // All above + Debug messages
|
||||
DEBUG_LEVEL_TRACE = 5 // All above + Trace (very verbose)
|
||||
} debug_level_t;
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Production (default - no debug output)
|
||||
./c_relay_x86
|
||||
|
||||
# Show errors only
|
||||
./c_relay_x86 --debug-level=1
|
||||
|
||||
# Show errors and warnings
|
||||
./c_relay_x86 --debug-level=2
|
||||
|
||||
# Show errors, warnings, and info (recommended for development)
|
||||
./c_relay_x86 --debug-level=3
|
||||
|
||||
# Show all debug messages
|
||||
./c_relay_x86 --debug-level=4
|
||||
|
||||
# Show everything including trace with file:line (very verbose)
|
||||
./c_relay_x86 --debug-level=5
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### 1. Header File (`src/debug.h`)
|
||||
|
||||
```c
|
||||
#ifndef DEBUG_H
|
||||
#define DEBUG_H
|
||||
|
||||
#include <stdio.h>
|
||||
#include <time.h>
|
||||
|
||||
// Debug levels
|
||||
typedef enum {
|
||||
DEBUG_LEVEL_NONE = 0,
|
||||
DEBUG_LEVEL_ERROR = 1,
|
||||
DEBUG_LEVEL_WARN = 2,
|
||||
DEBUG_LEVEL_INFO = 3,
|
||||
DEBUG_LEVEL_DEBUG = 4,
|
||||
DEBUG_LEVEL_TRACE = 5
|
||||
} debug_level_t;
|
||||
|
||||
// Global debug level (set at runtime via CLI)
|
||||
extern debug_level_t g_debug_level;
|
||||
|
||||
// Initialize debug system
|
||||
void debug_init(int level);
|
||||
|
||||
// Core logging function
|
||||
void debug_log(debug_level_t level, const char* file, int line, const char* format, ...);
|
||||
|
||||
// Convenience macros that check level before calling
|
||||
// Note: TRACE level (5) and above include file:line information for ALL messages
|
||||
#define DEBUG_ERROR(...) \
|
||||
do { if (g_debug_level >= DEBUG_LEVEL_ERROR) debug_log(DEBUG_LEVEL_ERROR, __FILE__, __LINE__, __VA_ARGS__); } while(0)
|
||||
|
||||
#define DEBUG_WARN(...) \
|
||||
do { if (g_debug_level >= DEBUG_LEVEL_WARN) debug_log(DEBUG_LEVEL_WARN, __FILE__, __LINE__, __VA_ARGS__); } while(0)
|
||||
|
||||
#define DEBUG_INFO(...) \
|
||||
do { if (g_debug_level >= DEBUG_LEVEL_INFO) debug_log(DEBUG_LEVEL_INFO, __FILE__, __LINE__, __VA_ARGS__); } while(0)
|
||||
|
||||
#define DEBUG_LOG(...) \
|
||||
do { if (g_debug_level >= DEBUG_LEVEL_DEBUG) debug_log(DEBUG_LEVEL_DEBUG, __FILE__, __LINE__, __VA_ARGS__); } while(0)
|
||||
|
||||
#define DEBUG_TRACE(...) \
|
||||
do { if (g_debug_level >= DEBUG_LEVEL_TRACE) debug_log(DEBUG_LEVEL_TRACE, __FILE__, __LINE__, __VA_ARGS__); } while(0)
|
||||
|
||||
#endif /* DEBUG_H */
|
||||
```
|
||||
|
||||
### 2. Implementation File (`src/debug.c`)
|
||||
|
||||
```c
|
||||
#include "debug.h"
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
|
||||
// Global debug level (default: no debug output)
|
||||
debug_level_t g_debug_level = DEBUG_LEVEL_NONE;
|
||||
|
||||
void debug_init(int level) {
|
||||
if (level < 0) level = 0;
|
||||
if (level > 5) level = 5;
|
||||
g_debug_level = (debug_level_t)level;
|
||||
}
|
||||
|
||||
void debug_log(debug_level_t level, const char* file, int line, const char* format, ...) {
|
||||
// Get timestamp
|
||||
time_t now = time(NULL);
|
||||
struct tm* tm_info = localtime(&now);
|
||||
char timestamp[32];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
|
||||
|
||||
// Get level string
|
||||
const char* level_str = "UNKNOWN";
|
||||
switch (level) {
|
||||
case DEBUG_LEVEL_ERROR: level_str = "ERROR"; break;
|
||||
case DEBUG_LEVEL_WARN: level_str = "WARN "; break;
|
||||
case DEBUG_LEVEL_INFO: level_str = "INFO "; break;
|
||||
case DEBUG_LEVEL_DEBUG: level_str = "DEBUG"; break;
|
||||
case DEBUG_LEVEL_TRACE: level_str = "TRACE"; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
// Print prefix with timestamp and level
|
||||
printf("[%s] [%s] ", timestamp, level_str);
|
||||
|
||||
// Print source location when debug level is TRACE (5) or higher
|
||||
if (file && g_debug_level >= DEBUG_LEVEL_TRACE) {
|
||||
// Extract just the filename (not full path)
|
||||
const char* filename = strrchr(file, '/');
|
||||
filename = filename ? filename + 1 : file;
|
||||
printf("[%s:%d] ", filename, line);
|
||||
}
|
||||
|
||||
// Print message
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
vprintf(format, args);
|
||||
va_end(args);
|
||||
|
||||
printf("\n");
|
||||
fflush(stdout);
|
||||
}
|
||||
```
|
||||
|
||||
### 3. CLI Argument Parsing (add to `src/main.c`)
|
||||
|
||||
```c
|
||||
// In main() function, add to argument parsing:
|
||||
|
||||
int debug_level = 0; // Default: no debug output
|
||||
|
||||
for (int i = 1; i < argc; i++) {
|
||||
if (strncmp(argv[i], "--debug-level=", 14) == 0) {
|
||||
debug_level = atoi(argv[i] + 14);
|
||||
if (debug_level < 0) debug_level = 0;
|
||||
if (debug_level > 5) debug_level = 5;
|
||||
}
|
||||
// ... other arguments ...
|
||||
}
|
||||
|
||||
// Initialize debug system
|
||||
debug_init(debug_level);
|
||||
```
|
||||
|
||||
### 4. Update Makefile
|
||||
|
||||
```makefile
|
||||
# Add debug.c to source files
|
||||
MAIN_SRC = src/main.c src/config.c src/debug.c src/dm_admin.c src/request_validator.c ...
|
||||
```
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
### Keep Existing Functions
|
||||
|
||||
The existing `log_*` functions can remain as wrappers:
|
||||
|
||||
```c
|
||||
// src/main.c - Update existing functions
|
||||
// Note: These don't include file:line since they're wrappers
|
||||
void log_info(const char* message) {
|
||||
if (g_debug_level >= DEBUG_LEVEL_INFO) {
|
||||
debug_log(DEBUG_LEVEL_INFO, NULL, 0, "%s", message);
|
||||
}
|
||||
}
|
||||
|
||||
void log_error(const char* message) {
|
||||
if (g_debug_level >= DEBUG_LEVEL_ERROR) {
|
||||
debug_log(DEBUG_LEVEL_ERROR, NULL, 0, "%s", message);
|
||||
}
|
||||
}
|
||||
|
||||
void log_warning(const char* message) {
|
||||
if (g_debug_level >= DEBUG_LEVEL_WARN) {
|
||||
debug_log(DEBUG_LEVEL_WARN, NULL, 0, "%s", message);
|
||||
}
|
||||
}
|
||||
|
||||
void log_success(const char* message) {
|
||||
if (g_debug_level >= DEBUG_LEVEL_INFO) {
|
||||
debug_log(DEBUG_LEVEL_INFO, NULL, 0, "✓ %s", message);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Gradual Migration
|
||||
|
||||
Gradually replace log calls with debug macros:
|
||||
|
||||
```c
|
||||
// Before:
|
||||
log_info("Starting WebSocket relay server");
|
||||
|
||||
// After:
|
||||
DEBUG_INFO("Starting WebSocket relay server");
|
||||
|
||||
// Before:
|
||||
log_error("Failed to initialize database");
|
||||
|
||||
// After:
|
||||
DEBUG_ERROR("Failed to initialize database");
|
||||
```
|
||||
|
||||
### Add New Debug Levels
|
||||
|
||||
Add debug and trace messages where needed:
|
||||
|
||||
```c
|
||||
// Detailed debugging
|
||||
DEBUG_LOG("Processing subscription: %s", sub_id);
|
||||
DEBUG_LOG("Filter count: %d", filter_count);
|
||||
|
||||
// Very verbose tracing
|
||||
DEBUG_TRACE("Entering handle_req_message()");
|
||||
DEBUG_TRACE("Subscription ID validated: %s", sub_id);
|
||||
DEBUG_TRACE("Exiting handle_req_message()");
|
||||
```
|
||||
## Manual Guards for Expensive Operations
|
||||
|
||||
### The Problem
|
||||
|
||||
Debug macros use **runtime checks**, which means function arguments are always evaluated:
|
||||
|
||||
```c
|
||||
// ❌ BAD: Database query executes even when debug level is 0
|
||||
DEBUG_LOG("Count: %d", expensive_database_query());
|
||||
```
|
||||
|
||||
The `expensive_database_query()` will **always execute** because function arguments are evaluated before the `if` check inside the macro.
|
||||
|
||||
### The Solution: Manual Guards
|
||||
|
||||
For expensive operations (database queries, file I/O, complex calculations), use manual guards:
|
||||
|
||||
```c
|
||||
// ✅ GOOD: Query only executes when debugging is enabled
|
||||
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
|
||||
int count = expensive_database_query();
|
||||
DEBUG_LOG("Count: %d", count);
|
||||
}
|
||||
```
|
||||
|
||||
### Standardized Comment Format
|
||||
|
||||
To make temporary debug guards easy to find and remove, use this standardized format:
|
||||
|
||||
```c
|
||||
// DEBUG_GUARD_START
|
||||
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
|
||||
// Expensive operation here
|
||||
sqlite3_stmt* stmt;
|
||||
const char* sql = "SELECT COUNT(*) FROM events";
|
||||
int count = 0;
|
||||
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
count = sqlite3_column_int(stmt, 0);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
DEBUG_LOG("Event count: %d", count);
|
||||
}
|
||||
// DEBUG_GUARD_END
|
||||
```
|
||||
|
||||
### Easy Removal
|
||||
|
||||
When you're done debugging, find and remove all temporary guards:
|
||||
|
||||
```bash
|
||||
# Find all debug guards
|
||||
grep -n "DEBUG_GUARD_START" src/*.c
|
||||
|
||||
# Remove guards with sed (between START and END markers)
|
||||
sed -i '/DEBUG_GUARD_START/,/DEBUG_GUARD_END/d' src/config.c
|
||||
```
|
||||
|
||||
Or use a simple script:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# remove_debug_guards.sh
|
||||
for file in src/*.c; do
|
||||
sed -i '/DEBUG_GUARD_START/,/DEBUG_GUARD_END/d' "$file"
|
||||
echo "Removed debug guards from $file"
|
||||
done
|
||||
```
|
||||
|
||||
### When to Use Manual Guards
|
||||
|
||||
Use manual guards for:
|
||||
- ✅ Database queries
|
||||
- ✅ File I/O operations
|
||||
- ✅ Network requests
|
||||
- ✅ Complex calculations
|
||||
- ✅ Memory allocations for debug data
|
||||
- ✅ String formatting with multiple operations
|
||||
|
||||
Don't need guards for:
|
||||
- ❌ Simple variable access
|
||||
- ❌ Basic arithmetic
|
||||
- ❌ String literals
|
||||
- ❌ Function calls that are already cheap
|
||||
|
||||
### Example: Database Query Guard
|
||||
|
||||
```c
|
||||
// DEBUG_GUARD_START
|
||||
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
|
||||
sqlite3_stmt* count_stmt;
|
||||
const char* count_sql = "SELECT COUNT(*) FROM config";
|
||||
int config_count = 0;
|
||||
|
||||
if (sqlite3_prepare_v2(g_db, count_sql, -1, &count_stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(count_stmt) == SQLITE_ROW) {
|
||||
config_count = sqlite3_column_int(count_stmt, 0);
|
||||
}
|
||||
sqlite3_finalize(count_stmt);
|
||||
}
|
||||
|
||||
DEBUG_LOG("Config table has %d rows", config_count);
|
||||
}
|
||||
// DEBUG_GUARD_END
|
||||
```
|
||||
|
||||
### Example: Complex String Formatting Guard
|
||||
|
||||
```c
|
||||
// DEBUG_GUARD_START
|
||||
if (g_debug_level >= DEBUG_LEVEL_TRACE) {
|
||||
char filter_str[1024] = {0};
|
||||
int offset = 0;
|
||||
|
||||
for (int i = 0; i < filter_count && offset < sizeof(filter_str) - 1; i++) {
|
||||
offset += snprintf(filter_str + offset, sizeof(filter_str) - offset,
|
||||
"Filter %d: kind=%d, author=%s; ",
|
||||
i, filters[i].kind, filters[i].author);
|
||||
}
|
||||
|
||||
DEBUG_TRACE("Processing filters: %s", filter_str);
|
||||
}
|
||||
// DEBUG_GUARD_END
|
||||
```
|
||||
|
||||
### Alternative: Compile-Time Guards
|
||||
|
||||
For permanent debug code that should be completely removed in production builds, use compile-time guards:
|
||||
|
||||
```c
|
||||
#ifdef ENABLE_DEBUG_CODE
|
||||
// This code is completely removed when ENABLE_DEBUG_CODE is not defined
|
||||
int count = expensive_database_query();
|
||||
DEBUG_LOG("Count: %d", count);
|
||||
#endif
|
||||
```
|
||||
|
||||
Build with debug code:
|
||||
```bash
|
||||
make CFLAGS="-DENABLE_DEBUG_CODE"
|
||||
```
|
||||
|
||||
Build without debug code (production):
|
||||
```bash
|
||||
make # No debug code compiled in
|
||||
```
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Always use standardized markers** (`DEBUG_GUARD_START`/`DEBUG_GUARD_END`) for temporary guards
|
||||
2. **Add a comment** explaining what you're debugging
|
||||
3. **Remove guards** when debugging is complete
|
||||
4. **Use compile-time guards** for permanent debug infrastructure
|
||||
5. **Keep guards simple** - one guard per logical debug operation
|
||||
|
||||
|
||||
## Performance Impact
|
||||
|
||||
### Runtime Check
|
||||
|
||||
The macros include a runtime check:
|
||||
|
||||
```c
|
||||
#define DEBUG_INFO(...) \
|
||||
do { if (g_debug_level >= DEBUG_LEVEL_INFO) debug_log(DEBUG_LEVEL_INFO, NULL, 0, __VA_ARGS__); } while(0)
|
||||
```
|
||||
|
||||
**Cost**: One integer comparison per debug statement (~1 CPU cycle)
|
||||
|
||||
**Impact**: Negligible - the comparison is faster than a function call
|
||||
|
||||
**Note**: Only `DEBUG_TRACE` includes `__FILE__` and `__LINE__`, which are compile-time constants with no runtime overhead.
|
||||
|
||||
### When Debug Level is 0 (Production)
|
||||
|
||||
```c
|
||||
// With g_debug_level = 0:
|
||||
DEBUG_INFO("Starting server");
|
||||
|
||||
// Becomes:
|
||||
if (0 >= 3) debug_log(...); // Never executes
|
||||
|
||||
// Compiler optimizes to:
|
||||
// (nothing - branch is eliminated)
|
||||
```
|
||||
|
||||
**Result**: Modern compilers (gcc -O2 or higher) will completely eliminate the dead code branch.
|
||||
|
||||
### Size Impact
|
||||
|
||||
**Test Case**: 100 debug statements in code
|
||||
|
||||
**Without optimization** (`-O0`):
|
||||
- Binary size increase: ~2KB (branch instructions)
|
||||
- Runtime cost: 100 comparisons per execution
|
||||
|
||||
**With optimization** (`-O2` or `-O3`):
|
||||
- Binary size increase: **0 bytes** (dead code eliminated when g_debug_level = 0)
|
||||
- Runtime cost: **0 cycles** (branches removed by compiler)
|
||||
|
||||
### Verification
|
||||
|
||||
You can verify the optimization with:
|
||||
|
||||
```bash
|
||||
# Compile with optimization
|
||||
gcc -O2 -c debug_test.c -o debug_test.o
|
||||
|
||||
# Disassemble and check
|
||||
objdump -d debug_test.o | grep -A 10 "debug_log"
|
||||
```
|
||||
|
||||
When `g_debug_level = 0` (constant), you'll see the compiler has removed all debug calls.
|
||||
|
||||
## Example Output
|
||||
|
||||
### Level 0 (Production)
|
||||
```
|
||||
(no output)
|
||||
```
|
||||
|
||||
### Level 1 (Errors Only)
|
||||
```
|
||||
[2025-01-12 14:30:15] [ERROR] Failed to open database: permission denied
|
||||
[2025-01-12 14:30:20] [ERROR] WebSocket connection failed: port in use
|
||||
```
|
||||
|
||||
### Level 2 (Errors + Warnings)
|
||||
```
|
||||
[2025-01-12 14:30:15] [ERROR] Failed to open database: permission denied
|
||||
[2025-01-12 14:30:16] [WARN ] Port 8888 unavailable, trying 8889
|
||||
[2025-01-12 14:30:17] [WARN ] Configuration key 'relay_name' not found, using default
|
||||
```
|
||||
|
||||
### Level 3 (Errors + Warnings + Info)
|
||||
```
|
||||
[2025-01-12 14:30:15] [INFO ] Initializing C-Relay v0.4.6
|
||||
[2025-01-12 14:30:15] [INFO ] Loading configuration from database
|
||||
[2025-01-12 14:30:15] [ERROR] Failed to open database: permission denied
|
||||
[2025-01-12 14:30:16] [WARN ] Port 8888 unavailable, trying 8889
|
||||
[2025-01-12 14:30:17] [INFO ] WebSocket relay started on ws://127.0.0.1:8889
|
||||
```
|
||||
|
||||
### Level 4 (All Debug Messages)
|
||||
```
|
||||
[2025-01-12 14:30:15] [INFO ] Initializing C-Relay v0.4.6
|
||||
[2025-01-12 14:30:15] [DEBUG] Opening database: build/abc123...def.db
|
||||
[2025-01-12 14:30:15] [DEBUG] Executing schema initialization
|
||||
[2025-01-12 14:30:15] [INFO ] SQLite WAL mode enabled
|
||||
[2025-01-12 14:30:16] [DEBUG] Attempting to bind to port 8888
|
||||
[2025-01-12 14:30:16] [WARN ] Port 8888 unavailable, trying 8889
|
||||
[2025-01-12 14:30:17] [DEBUG] Successfully bound to port 8889
|
||||
[2025-01-12 14:30:17] [INFO ] WebSocket relay started on ws://127.0.0.1:8889
|
||||
```
|
||||
|
||||
### Level 5 (Everything Including file:line for ALL messages)
|
||||
```
|
||||
[2025-01-12 14:30:15] [INFO ] [main.c:1607] Initializing C-Relay v0.4.6
|
||||
[2025-01-12 14:30:15] [DEBUG] [main.c:348] Opening database: build/abc123...def.db
|
||||
[2025-01-12 14:30:15] [TRACE] [main.c:330] Entering init_database()
|
||||
[2025-01-12 14:30:15] [ERROR] [config.c:125] Database locked
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Step 1: Create Files (5 minutes)
|
||||
|
||||
1. Create `src/debug.h` with the header code above
|
||||
2. Create `src/debug.c` with the implementation code above
|
||||
3. Update `Makefile` to include `src/debug.c` in `MAIN_SRC`
|
||||
|
||||
### Step 2: Add CLI Parsing (5 minutes)
|
||||
|
||||
Add `--debug-level` argument parsing to `main()` in `src/main.c`
|
||||
|
||||
### Step 3: Update Existing Functions (5 minutes)
|
||||
|
||||
Update the existing `log_*` functions to use the new debug macros
|
||||
|
||||
### Step 4: Test (5 minutes)
|
||||
|
||||
```bash
|
||||
# Build
|
||||
make clean && make
|
||||
|
||||
# Test different levels
|
||||
./build/c_relay_x86 # No output
|
||||
./build/c_relay_x86 --debug-level=1 # Errors only
|
||||
./build/c_relay_x86 --debug-level=3 # Info + warnings + errors
|
||||
./build/c_relay_x86 --debug-level=4 # All debug messages
|
||||
./build/c_relay_x86 --debug-level=5 # Everything with file:line on TRACE
|
||||
```
|
||||
|
||||
### Step 5: Gradual Migration (Ongoing)
|
||||
|
||||
As you work on different parts of the code, replace `log_*` calls with `DEBUG_*` macros and add new debug/trace statements where helpful.
|
||||
|
||||
## Benefits
|
||||
|
||||
✅ **Simple**: Single flag, 6 levels, easy to understand
|
||||
✅ **Zero Overhead**: Compiler optimizes away unused debug code
|
||||
✅ **Zero Size Impact**: No binary size increase in production
|
||||
✅ **Backward Compatible**: Existing `log_*` functions still work
|
||||
✅ **Easy Migration**: Gradual replacement of log calls
|
||||
✅ **Flexible**: Can add detailed debugging without affecting production
|
||||
|
||||
## Total Implementation Time
|
||||
|
||||
**~20 minutes** for basic implementation
|
||||
**Ongoing** for gradual migration of existing log calls
|
||||
|
||||
## Recommendation
|
||||
|
||||
This is the simplest possible debug system that provides:
|
||||
- Multiple debug levels for different verbosity
|
||||
- Zero performance impact in production
|
||||
- Zero binary size increase
|
||||
- Easy to use and understand
|
||||
- Backward compatible with existing code
|
||||
|
||||
Start with the basic implementation, test it, then gradually migrate existing log calls and add new debug statements as needed.
|
||||
@@ -1,358 +0,0 @@
|
||||
# Event-Based Configuration System Implementation Plan
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a detailed implementation plan for transitioning the C Nostr Relay from command line arguments and file-based configuration to a pure event-based configuration system using kind 33334 Nostr events stored directly in the database.
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 0: File Structure Preparation ✅ COMPLETED
|
||||
|
||||
#### 0.1 Backup and Prepare Files ✅ COMPLETED
|
||||
**Actions:**
|
||||
1. ✅ Rename `src/config.c` to `src/config.c.old` - DONE
|
||||
2. ✅ Rename `src/config.h` to `src/config.h.old` - DONE
|
||||
3. ✅ Create new empty `src/config.c` and `src/config.h` - DONE
|
||||
4. ✅ Create new `src/default_config_event.h` - DONE
|
||||
|
||||
### Phase 1: Database Schema and Core Infrastructure ✅ COMPLETED
|
||||
|
||||
#### 1.1 Update Database Naming System ✅ COMPLETED
|
||||
**File:** `src/main.c`, new `src/config.c`, new `src/config.h`
|
||||
|
||||
```c
|
||||
// New functions implemented: ✅
|
||||
char* get_database_name_from_relay_pubkey(const char* relay_pubkey);
|
||||
int create_database_with_relay_pubkey(const char* relay_pubkey);
|
||||
```
|
||||
|
||||
**Changes Completed:** ✅
|
||||
- ✅ Create completely new `src/config.c` and `src/config.h` files
|
||||
- ✅ Rename old files to `src/config.c.old` and `src/config.h.old`
|
||||
- ✅ Modify `init_database()` to use relay pubkey for database naming
|
||||
- ✅ Use `nostr_core_lib` functions for all keypair generation
|
||||
- ✅ Database path: `./<relay_pubkey>.nrdb`
|
||||
- ✅ Remove all database path command line argument handling
|
||||
|
||||
#### 1.2 Configuration Event Storage ✅ COMPLETED
|
||||
**File:** new `src/config.c`, new `src/default_config_event.h`
|
||||
|
||||
```c
|
||||
// Configuration functions implemented: ✅
|
||||
int store_config_event_in_database(const cJSON* event);
|
||||
cJSON* load_config_event_from_database(const char* relay_pubkey);
|
||||
```
|
||||
|
||||
**Changes Completed:** ✅
|
||||
- ✅ Create new `src/default_config_event.h` for default configuration values
|
||||
- ✅ Add functions to store/retrieve kind 33334 events from events table
|
||||
- ✅ Use `nostr_core_lib` functions for all event validation
|
||||
- ✅ Clean separation: default config values isolated in header file
|
||||
- ✅ Remove existing config table dependencies
|
||||
|
||||
### Phase 2: Event Processing Integration ✅ COMPLETED
|
||||
|
||||
#### 2.1 Real-time Configuration Processing ✅ COMPLETED
|
||||
**File:** `src/main.c` (event processing functions)
|
||||
|
||||
**Integration Points:** ✅ IMPLEMENTED
|
||||
```c
|
||||
// In existing event processing loop: ✅ IMPLEMENTED
|
||||
// Added kind 33334 event detection in main event loop
|
||||
if (kind_num == 33334) {
|
||||
if (handle_configuration_event(event, error_message, sizeof(error_message)) == 0) {
|
||||
// Configuration event processed successfully
|
||||
}
|
||||
}
|
||||
|
||||
// Configuration event processing implemented: ✅
|
||||
int process_configuration_event(const cJSON* event);
|
||||
int handle_configuration_event(cJSON* event, char* error_message, size_t error_size);
|
||||
```
|
||||
|
||||
#### 2.2 Configuration Application System ⚠️ PARTIALLY COMPLETED
|
||||
**File:** `src/config.c`
|
||||
|
||||
**Status:** Configuration access functions implemented, field handlers need completion
|
||||
```c
|
||||
// Configuration access implemented: ✅
|
||||
const char* get_config_value(const char* key);
|
||||
int get_config_int(const char* key, int default_value);
|
||||
int get_config_bool(const char* key, int default_value);
|
||||
|
||||
// Field handlers need implementation: ⏳ IN PROGRESS
|
||||
// Need to implement specific apply functions for runtime changes
|
||||
```
|
||||
|
||||
### Phase 3: First-Time Startup System ✅ COMPLETED
|
||||
|
||||
#### 3.1 Key Generation and Initial Setup ✅ COMPLETED
|
||||
**File:** new `src/config.c`, `src/default_config_event.h`
|
||||
|
||||
**Status:** ✅ FULLY IMPLEMENTED with secure /dev/urandom + nostr_core_lib validation
|
||||
|
||||
```c
|
||||
int first_time_startup_sequence() {
|
||||
// 1. Generate admin keypair using nostr_core_lib
|
||||
unsigned char admin_privkey_bytes[32];
|
||||
char admin_privkey[65], admin_pubkey[65];
|
||||
|
||||
if (nostr_generate_private_key(admin_privkey_bytes) != 0) {
|
||||
return -1;
|
||||
}
|
||||
nostr_bytes_to_hex(admin_privkey_bytes, 32, admin_privkey);
|
||||
|
||||
unsigned char admin_pubkey_bytes[32];
|
||||
if (nostr_ec_public_key_from_private_key(admin_privkey_bytes, admin_pubkey_bytes) != 0) {
|
||||
return -1;
|
||||
}
|
||||
nostr_bytes_to_hex(admin_pubkey_bytes, 32, admin_pubkey);
|
||||
|
||||
// 2. Generate relay keypair using nostr_core_lib
|
||||
unsigned char relay_privkey_bytes[32];
|
||||
char relay_privkey[65], relay_pubkey[65];
|
||||
|
||||
if (nostr_generate_private_key(relay_privkey_bytes) != 0) {
|
||||
return -1;
|
||||
}
|
||||
nostr_bytes_to_hex(relay_privkey_bytes, 32, relay_privkey);
|
||||
|
||||
unsigned char relay_pubkey_bytes[32];
|
||||
if (nostr_ec_public_key_from_private_key(relay_privkey_bytes, relay_pubkey_bytes) != 0) {
|
||||
return -1;
|
||||
}
|
||||
nostr_bytes_to_hex(relay_pubkey_bytes, 32, relay_pubkey);
|
||||
|
||||
// 3. Create database with relay pubkey name
|
||||
if (create_database_with_relay_pubkey(relay_pubkey) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 4. Create initial configuration event using defaults from header
|
||||
cJSON* config_event = create_default_config_event(admin_privkey_bytes, relay_privkey, relay_pubkey);
|
||||
|
||||
// 5. Store configuration event in database
|
||||
store_config_event_in_database(config_event);
|
||||
|
||||
// 6. Print admin private key for user to save
|
||||
printf("=== SAVE THIS ADMIN PRIVATE KEY ===\n");
|
||||
printf("Admin Private Key: %s\n", admin_privkey);
|
||||
printf("===================================\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.2 Database Detection Logic ✅ COMPLETED
|
||||
**File:** `src/main.c`
|
||||
|
||||
**Status:** ✅ FULLY IMPLEMENTED
|
||||
```c
|
||||
// Implemented functions: ✅
|
||||
char** find_existing_nrdb_files(void);
|
||||
char* extract_pubkey_from_filename(const char* filename);
|
||||
int is_first_time_startup(void);
|
||||
int first_time_startup_sequence(void);
|
||||
int startup_existing_relay(const char* relay_pubkey);
|
||||
```
|
||||
|
||||
### Phase 4: Legacy System Removal ✅ PARTIALLY COMPLETED
|
||||
|
||||
#### 4.1 Remove Command Line Arguments ✅ COMPLETED
|
||||
**File:** `src/main.c`
|
||||
|
||||
**Status:** ✅ COMPLETED
|
||||
- ✅ All argument parsing logic removed except --help and --version
|
||||
- ✅ `--port`, `--config-dir`, `--config-file`, `--database-path` handling removed
|
||||
- ✅ Environment variable override systems removed
|
||||
- ✅ Clean help and version functions implemented
|
||||
|
||||
#### 4.2 Remove Configuration File System ✅ COMPLETED
|
||||
**File:** `src/config.c`
|
||||
|
||||
**Status:** ✅ COMPLETED - New file created from scratch
|
||||
- ✅ All legacy file-based configuration functions removed
|
||||
- ✅ XDG configuration directory logic removed
|
||||
- ✅ Pure event-based system implemented
|
||||
|
||||
#### 4.3 Remove Legacy Database Tables ⏳ PENDING
|
||||
**File:** `src/sql_schema.h`
|
||||
|
||||
**Status:** ⏳ NEEDS COMPLETION
|
||||
```sql
|
||||
-- Still need to remove these tables:
|
||||
DROP TABLE IF EXISTS config;
|
||||
DROP TABLE IF EXISTS config_history;
|
||||
DROP TABLE IF EXISTS config_file_cache;
|
||||
DROP VIEW IF EXISTS active_config;
|
||||
```
|
||||
|
||||
### Phase 5: Configuration Management
|
||||
|
||||
#### 5.1 Configuration Field Mapping
|
||||
**File:** `src/config.c`
|
||||
|
||||
```c
|
||||
// Map configuration tags to current system
|
||||
static const config_field_handler_t config_handlers[] = {
|
||||
{"auth_enabled", 0, apply_auth_enabled},
|
||||
{"relay_port", 1, apply_relay_port}, // requires restart
|
||||
{"max_connections", 0, apply_max_connections},
|
||||
{"relay_description", 0, apply_relay_description},
|
||||
{"relay_contact", 0, apply_relay_contact},
|
||||
{"relay_pubkey", 1, apply_relay_pubkey}, // requires restart
|
||||
{"relay_privkey", 1, apply_relay_privkey}, // requires restart
|
||||
{"pow_min_difficulty", 0, apply_pow_difficulty},
|
||||
{"nip40_expiration_enabled", 0, apply_expiration_enabled},
|
||||
{"max_subscriptions_per_client", 0, apply_max_subscriptions},
|
||||
{"max_event_tags", 0, apply_max_event_tags},
|
||||
{"max_content_length", 0, apply_max_content_length},
|
||||
{"default_limit", 0, apply_default_limit},
|
||||
{"max_limit", 0, apply_max_limit},
|
||||
// ... etc
|
||||
};
|
||||
```
|
||||
|
||||
#### 5.2 Startup Configuration Loading
|
||||
**File:** `src/main.c`
|
||||
|
||||
```c
|
||||
int startup_existing_relay(const char* relay_pubkey) {
|
||||
// 1. Open database
|
||||
if (init_database_with_pubkey(relay_pubkey) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 2. Load configuration event from database
|
||||
cJSON* config_event = load_config_event_from_database(relay_pubkey);
|
||||
if (!config_event) {
|
||||
log_error("No configuration event found in database");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 3. Apply all configuration from event
|
||||
if (apply_configuration_from_event(config_event) != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// 4. Continue with normal startup
|
||||
return start_relay_services();
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Order - PROGRESS STATUS
|
||||
|
||||
### Step 1: Core Infrastructure ✅ COMPLETED
|
||||
1. ✅ Implement database naming with relay pubkey
|
||||
2. ✅ Add key generation functions using `nostr_core_lib`
|
||||
3. ✅ Create configuration event storage/retrieval functions
|
||||
4. ✅ Test basic event creation and storage
|
||||
|
||||
### Step 2: Event Processing Integration ✅ MOSTLY COMPLETED
|
||||
1. ✅ Add kind 33334 event detection to event processing loop
|
||||
2. ✅ Implement configuration event validation
|
||||
3. ⚠️ Create configuration application handlers (basic access implemented, runtime handlers pending)
|
||||
4. ⏳ Test real-time configuration updates (infrastructure ready)
|
||||
|
||||
### Step 3: First-Time Startup ✅ COMPLETED
|
||||
1. ✅ Implement first-time startup detection
|
||||
2. ✅ Add automatic key generation and database creation
|
||||
3. ✅ Create default configuration event generation
|
||||
4. ✅ Test complete first-time startup flow
|
||||
|
||||
### Step 4: Legacy Removal ⚠️ MOSTLY COMPLETED
|
||||
1. ✅ Remove command line argument parsing
|
||||
2. ✅ Remove configuration file system
|
||||
3. ⏳ Remove legacy database tables (pending)
|
||||
4. ✅ Update all references to use event-based config
|
||||
|
||||
### Step 5: Testing and Validation ⚠️ PARTIALLY COMPLETED
|
||||
1. ✅ Test complete startup flow (first time and existing)
|
||||
2. ⏳ Test configuration updates via events (infrastructure ready)
|
||||
3. ⚠️ Test error handling and recovery (basic error handling implemented)
|
||||
4. ⏳ Performance testing and optimization (pending)
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
### For Existing Installations
|
||||
Since the new system uses a completely different approach:
|
||||
|
||||
1. **No Automatic Migration**: The new system starts fresh
|
||||
2. **Manual Migration**: Users can manually copy configuration values
|
||||
3. **Documentation**: Provide clear migration instructions
|
||||
4. **Coexistence**: Old and new systems use different database names
|
||||
|
||||
### Migration Steps for Users
|
||||
1. Stop existing relay
|
||||
2. Note current configuration values
|
||||
3. Start new relay (generates keys and new database)
|
||||
4. Create kind 33334 event with desired configuration using admin private key
|
||||
5. Send event to relay to update configuration
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
### Unit Tests
|
||||
- Key generation functions
|
||||
- Configuration event creation and validation
|
||||
- Database naming logic
|
||||
- Configuration application handlers
|
||||
|
||||
### Integration Tests
|
||||
- Complete first-time startup flow
|
||||
- Configuration update via events
|
||||
- Error handling scenarios
|
||||
- Database operations
|
||||
|
||||
### Performance Tests
|
||||
- Startup time comparison
|
||||
- Configuration update response time
|
||||
- Memory usage analysis
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Admin Private Key**: Never stored, only printed once
|
||||
2. **Event Validation**: All configuration events must be signed by admin
|
||||
3. **Database Security**: Relay database contains relay private key
|
||||
4. **Key Generation**: Use `nostr_core_lib` for cryptographically secure generation
|
||||
|
||||
## Files to Modify
|
||||
|
||||
### Major Changes
|
||||
- `src/main.c` - Startup logic, event processing, argument removal
|
||||
- `src/config.c` - Complete rewrite for event-based configuration
|
||||
- `src/config.h` - Update function signatures and structures
|
||||
- `src/sql_schema.h` - Remove config tables
|
||||
|
||||
### Minor Changes
|
||||
- `Makefile` - Remove any config file generation
|
||||
- `systemd/` - Update service files if needed
|
||||
- Documentation updates
|
||||
|
||||
## Backwards Compatibility
|
||||
|
||||
**Breaking Changes:**
|
||||
- Command line arguments removed (except --help, --version)
|
||||
- Configuration files no longer used
|
||||
- Database naming scheme changed
|
||||
- Configuration table removed
|
||||
|
||||
**Migration Required:** This is a breaking change that requires manual migration for existing installations.
|
||||
|
||||
## Success Criteria - CURRENT STATUS
|
||||
|
||||
1. ✅ **Zero Command Line Arguments**: Relay starts with just `./c-relay`
|
||||
2. ✅ **Automatic First-Time Setup**: Generates keys and database automatically
|
||||
3. ⚠️ **Real-Time Configuration**: Infrastructure ready, handlers need completion
|
||||
4. ✅ **Single Database File**: All configuration and data in one `.nrdb` file
|
||||
5. ⚠️ **Admin Control**: Event processing implemented, signature validation ready
|
||||
6. ⚠️ **Clean Codebase**: Most legacy code removed, database tables cleanup pending
|
||||
|
||||
## Risk Mitigation
|
||||
|
||||
1. **Backup Strategy**: Document manual backup procedures for relay database
|
||||
2. **Key Loss Recovery**: Document recovery procedures if admin key is lost
|
||||
3. **Testing Coverage**: Comprehensive test suite before deployment
|
||||
4. **Rollback Plan**: Keep old version available during transition period
|
||||
5. **Documentation**: Comprehensive user and developer documentation
|
||||
|
||||
This implementation plan provides a clear path from the current system to the new event-based configuration architecture while maintaining security and reliability.
|
||||
275
docs/musl_static_build.md
Normal file
275
docs/musl_static_build.md
Normal file
@@ -0,0 +1,275 @@
|
||||
# MUSL Static Binary Build Guide
|
||||
|
||||
## Overview
|
||||
|
||||
This guide explains how to build truly portable MUSL-based static binaries of c-relay using Alpine Linux Docker containers. These binaries have **zero runtime dependencies** and work on any Linux distribution.
|
||||
|
||||
## Why MUSL?
|
||||
|
||||
### MUSL vs glibc Static Binaries
|
||||
|
||||
**MUSL Advantages:**
|
||||
- **Truly Static**: No hidden dependencies on system libraries
|
||||
- **Smaller Size**: ~7.6MB vs ~12MB+ for glibc static builds
|
||||
- **Better Portability**: Works on ANY Linux distribution without modification
|
||||
- **Cleaner Linking**: No glibc-specific extensions or fortified functions
|
||||
- **Simpler Deployment**: Single binary, no library compatibility issues
|
||||
|
||||
**glibc Limitations:**
|
||||
- Static builds still require dynamic loading for NSS (Name Service Switch)
|
||||
- Fortified functions (`__*_chk`) don't exist in MUSL
|
||||
- Larger binary size due to glibc's complexity
|
||||
- May have compatibility issues across different glibc versions
|
||||
|
||||
## Build Process
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker installed and running
|
||||
- Sufficient disk space (~2GB for Docker layers)
|
||||
- Internet connection (for downloading dependencies)
|
||||
|
||||
### Quick Start
|
||||
|
||||
```bash
|
||||
# Build MUSL static binary
|
||||
./build_static.sh
|
||||
|
||||
# The binary will be created at:
|
||||
# build/c_relay_static_musl_x86_64 (on x86_64)
|
||||
# build/c_relay_static_musl_arm64 (on ARM64)
|
||||
```
|
||||
|
||||
### What Happens During Build
|
||||
|
||||
1. **Alpine Linux Base**: Uses Alpine 3.19 with native MUSL support
|
||||
2. **Static Dependencies**: Builds all dependencies with static linking:
|
||||
- libsecp256k1 (Bitcoin cryptography)
|
||||
- libwebsockets (WebSocket server)
|
||||
- OpenSSL (TLS/crypto)
|
||||
- SQLite (database)
|
||||
- curl (HTTP client)
|
||||
- zlib (compression)
|
||||
|
||||
3. **nostr_core_lib**: Builds with MUSL-compatible flags:
|
||||
- Disables glibc fortification (`-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0`)
|
||||
- Includes required NIPs: 001, 006, 013, 017, 019, 044, 059
|
||||
- Produces static library (~316KB)
|
||||
|
||||
4. **c-relay Compilation**: Links everything statically:
|
||||
- All source files compiled with `-static` flag
|
||||
- Fortification disabled to avoid `__*_chk` symbols
|
||||
- Results in ~7.6MB stripped binary
|
||||
|
||||
5. **Verification**: Confirms binary is truly static:
|
||||
- `ldd` shows "not a dynamic executable"
|
||||
- `file` shows "statically linked"
|
||||
- Binary executes successfully
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Dockerfile Structure
|
||||
|
||||
The build uses a multi-stage Dockerfile (`Dockerfile.alpine-musl`):
|
||||
|
||||
```dockerfile
|
||||
# Stage 1: Builder (Alpine Linux)
|
||||
FROM alpine:3.19 AS builder
|
||||
- Install build tools and static libraries
|
||||
- Build dependencies from source
|
||||
- Compile nostr_core_lib with MUSL flags
|
||||
- Compile c-relay with full static linking
|
||||
- Strip binary to reduce size
|
||||
|
||||
# Stage 2: Output (scratch)
|
||||
FROM scratch AS output
|
||||
- Contains only the final binary
|
||||
```
|
||||
|
||||
### Key Compilation Flags
|
||||
|
||||
**For nostr_core_lib:**
|
||||
```bash
|
||||
CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"
|
||||
```
|
||||
|
||||
**For c-relay:**
|
||||
```bash
|
||||
gcc -static -O2 -Wall -Wextra -std=c99 \
|
||||
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
|
||||
[source files] \
|
||||
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
|
||||
-lcurl -lz -lpthread -lm -ldl
|
||||
```
|
||||
|
||||
### Fortification Issue
|
||||
|
||||
**Problem**: GCC's `-O2` optimization enables fortification by default, replacing standard functions with `__*_chk` variants (e.g., `__snprintf_chk`, `__fprintf_chk`). These are glibc-specific and don't exist in MUSL.
|
||||
|
||||
**Solution**: Explicitly disable fortification with:
|
||||
- `-U_FORTIFY_SOURCE` (undefine any existing definition)
|
||||
- `-D_FORTIFY_SOURCE=0` (set to 0)
|
||||
|
||||
This must be applied to **both** nostr_core_lib and c-relay compilation.
|
||||
|
||||
### NIP Dependencies
|
||||
|
||||
The build includes these NIPs in nostr_core_lib:
|
||||
- **NIP-001**: Basic protocol (event creation, signing)
|
||||
- **NIP-006**: Key derivation from mnemonic
|
||||
- **NIP-013**: Proof of Work validation
|
||||
- **NIP-017**: Private Direct Messages
|
||||
- **NIP-019**: Bech32 encoding (nsec/npub)
|
||||
- **NIP-044**: Modern encryption
|
||||
- **NIP-059**: Gift Wrap (required by NIP-017)
|
||||
|
||||
## Verification
|
||||
|
||||
### Check Binary Type
|
||||
|
||||
```bash
|
||||
# Should show "statically linked"
|
||||
file build/c_relay_static_musl_x86_64
|
||||
|
||||
# Should show "not a dynamic executable"
|
||||
ldd build/c_relay_static_musl_x86_64
|
||||
|
||||
# Check size (should be ~7.6MB)
|
||||
ls -lh build/c_relay_static_musl_x86_64
|
||||
```
|
||||
|
||||
### Test Execution
|
||||
|
||||
```bash
|
||||
# Show help
|
||||
./build/c_relay_static_musl_x86_64 --help
|
||||
|
||||
# Show version
|
||||
./build/c_relay_static_musl_x86_64 --version
|
||||
|
||||
# Run relay
|
||||
./build/c_relay_static_musl_x86_64 --port 8888
|
||||
```
|
||||
|
||||
### Cross-Distribution Testing
|
||||
|
||||
Test the binary on different distributions to verify portability:
|
||||
|
||||
```bash
|
||||
# Alpine Linux
|
||||
docker run --rm -v $(pwd)/build:/app alpine:latest /app/c_relay_static_musl_x86_64 --version
|
||||
|
||||
# Ubuntu
|
||||
docker run --rm -v $(pwd)/build:/app ubuntu:latest /app/c_relay_static_musl_x86_64 --version
|
||||
|
||||
# Debian
|
||||
docker run --rm -v $(pwd)/build:/app debian:latest /app/c_relay_static_musl_x86_64 --version
|
||||
|
||||
# CentOS
|
||||
docker run --rm -v $(pwd)/build:/app centos:latest /app/c_relay_static_musl_x86_64 --version
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Docker Permission Denied
|
||||
|
||||
**Problem**: `permission denied while trying to connect to the Docker daemon socket`
|
||||
|
||||
**Solution**: Add user to docker group:
|
||||
```bash
|
||||
sudo usermod -aG docker $USER
|
||||
newgrp docker # Or logout and login again
|
||||
```
|
||||
|
||||
### Build Fails with Fortification Errors
|
||||
|
||||
**Problem**: `undefined reference to '__snprintf_chk'` or `'__fprintf_chk'`
|
||||
|
||||
**Solution**: Ensure fortification is disabled in both:
|
||||
1. nostr_core_lib build.sh (line 534)
|
||||
2. c-relay compilation flags in Dockerfile
|
||||
|
||||
### Binary Won't Execute
|
||||
|
||||
**Problem**: Binary fails to run on target system
|
||||
|
||||
**Checks**:
|
||||
1. Verify it's truly static: `ldd binary` should show "not a dynamic executable"
|
||||
2. Check architecture matches: `file binary` should show correct arch
|
||||
3. Ensure execute permissions: `chmod +x binary`
|
||||
|
||||
### Missing NIP Functions
|
||||
|
||||
**Problem**: `undefined reference to 'nostr_nip*'` during linking
|
||||
|
||||
**Solution**: Add missing NIPs to the build command:
|
||||
```bash
|
||||
./build.sh --nips=1,6,13,17,19,44,59
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
### Single Binary Deployment
|
||||
|
||||
```bash
|
||||
# Copy binary to server
|
||||
scp build/c_relay_static_musl_x86_64 user@server:/opt/c-relay/
|
||||
|
||||
# Run on server (no dependencies needed!)
|
||||
ssh user@server
|
||||
cd /opt/c-relay
|
||||
./c_relay_static_musl_x86_64 --port 8888
|
||||
```
|
||||
|
||||
### SystemD Service
|
||||
|
||||
```ini
|
||||
[Unit]
|
||||
Description=C-Relay Nostr Relay (MUSL Static)
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=c-relay
|
||||
WorkingDirectory=/opt/c-relay
|
||||
ExecStart=/opt/c-relay/c_relay_static_musl_x86_64
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
## Performance Comparison
|
||||
|
||||
| Metric | MUSL Static | glibc Static | glibc Dynamic |
|
||||
|--------|-------------|--------------|---------------|
|
||||
| Binary Size | 7.6 MB | 12+ MB | 2-3 MB |
|
||||
| Startup Time | ~50ms | ~60ms | ~40ms |
|
||||
| Memory Usage | Similar | Similar | Similar |
|
||||
| Portability | ✓ Any Linux | ⚠ glibc only | ✗ Requires libs |
|
||||
| Dependencies | None | NSS libs | Many libs |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Always verify** the binary is truly static before deployment
|
||||
2. **Test on multiple distributions** to ensure portability
|
||||
3. **Keep Docker images updated** for security patches
|
||||
4. **Document the build date** and commit hash for reproducibility
|
||||
5. **Store binaries** with architecture in filename (e.g., `_x86_64`, `_arm64`)
|
||||
|
||||
## References
|
||||
|
||||
- [MUSL libc](https://musl.libc.org/)
|
||||
- [Alpine Linux](https://alpinelinux.org/)
|
||||
- [Static Linking Best Practices](https://www.musl-libc.org/faq.html)
|
||||
- [GCC Fortification](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html)
|
||||
|
||||
## Changelog
|
||||
|
||||
### 2025-10-11
|
||||
- Initial MUSL build system implementation
|
||||
- Alpine Docker-based build process
|
||||
- Fortification fix for nostr_core_lib
|
||||
- Complete NIP dependency resolution
|
||||
- Documentation created
|
||||
@@ -1,128 +0,0 @@
|
||||
# Startup Configuration Design Analysis
|
||||
|
||||
## Review of startup_config_design.md
|
||||
|
||||
### Key Design Principles Identified
|
||||
|
||||
1. **Zero Command Line Arguments**: Complete elimination of CLI arguments for true "quick start"
|
||||
2. **Event-Based Configuration**: Configuration stored as Nostr event (kind 33334) in events table
|
||||
3. **Self-Contained Database**: Database named after relay pubkey (`<pubkey>.nrdb`)
|
||||
4. **First-Time Setup**: Automatic key generation and initial configuration creation
|
||||
5. **Configuration Consistency**: Always read from event, never from hardcoded defaults
|
||||
|
||||
### Implementation Gaps and Specifications Needed
|
||||
|
||||
#### 1. Key Generation Process
|
||||
**Specification:**
|
||||
```
|
||||
First Startup Key Generation:
|
||||
1. Generate all keys on first startup (admin private/public, relay private/public)
|
||||
2. Use nostr_core_lib for key generation entropy
|
||||
3. Keys are encoded in hex format
|
||||
4. Print admin private key to stdout for user to save (never stored)
|
||||
5. Store admin public key, relay private key, and relay public key in configuration event
|
||||
6. Admin can later change the 33334 event to alter stored keys
|
||||
```
|
||||
|
||||
#### 2. Database Naming and Location
|
||||
**Specification:**
|
||||
```
|
||||
Database Naming:
|
||||
1. Database is named using relay pubkey: ./<relay_pubkey>.nrdb
|
||||
2. Database path structure: ./<relay_pubkey>.nrdb
|
||||
3. If database creation fails, program quits (can't run without database)
|
||||
4. c_nostr_relay.db should never exist in new system
|
||||
```
|
||||
|
||||
#### 3. Configuration Event Structure (Kind 33334)
|
||||
**Specification:**
|
||||
```
|
||||
Event Structure:
|
||||
- Kind: 33334 (parameterized replaceable event)
|
||||
- Event validation: Use nostr_core_lib to validate event
|
||||
- Event content field: "C Nostr Relay Configuration" (descriptive text)
|
||||
- Configuration update mechanism: TBD
|
||||
- Complete tag structure provided in configuration section below
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### 4. Configuration Change Monitoring
|
||||
**Configuration Monitoring System:**
|
||||
```
|
||||
Every event that is received is checked to see if it is a kind 33334 event from the admin pubkey.
|
||||
If so, it is processed as a configuration update.
|
||||
```
|
||||
|
||||
#### 5. Error Handling and Recovery
|
||||
**Specification:**
|
||||
```
|
||||
Error Recovery Priority:
|
||||
1. Try to load latest valid config event
|
||||
2. Generate new default configuration event if none exists
|
||||
3. Exit with error if all recovery attempts fail
|
||||
|
||||
Note: There is only ever one configuration event (parameterized replaceable event),
|
||||
so no fallback to previous versions.
|
||||
```
|
||||
|
||||
### Design Clarifications
|
||||
|
||||
**Key Management:**
|
||||
- Admin private key is never stored, only printed once at first startup
|
||||
- Single admin system (no multi-admin support)
|
||||
- No key rotation support
|
||||
|
||||
**Configuration Management:**
|
||||
- No configuration versioning/timestamping
|
||||
- No automatic backup of configuration events
|
||||
- Configuration events are not broadcastable to other relays
|
||||
- Future: Auth system to restrict admin access to configuration events
|
||||
|
||||
---
|
||||
|
||||
## Complete Current Configuration Structure
|
||||
|
||||
Based on analysis of [`src/config.c`](src/config.c:753-795), here is the complete current configuration structure that will be converted to event tags:
|
||||
|
||||
### Complete Event Structure Example
|
||||
```json
|
||||
{
|
||||
"kind": 33334,
|
||||
"created_at": 1725661483,
|
||||
"tags": [
|
||||
["d", "<relay_pubkey>"],
|
||||
["auth_enabled", "false"],
|
||||
["relay_port", "8888"],
|
||||
["max_connections", "100"],
|
||||
|
||||
["relay_description", "High-performance C Nostr relay with SQLite storage"],
|
||||
["relay_contact", ""],
|
||||
["relay_pubkey", "<relay_public_key>"],
|
||||
["relay_privkey", "<relay_private_key>"],
|
||||
["relay_software", "https://git.laantungir.net/laantungir/c-relay.git"],
|
||||
["relay_version", "v1.0.0"],
|
||||
|
||||
["pow_min_difficulty", "0"],
|
||||
["pow_mode", "basic"],
|
||||
["nip40_expiration_enabled", "true"],
|
||||
["nip40_expiration_strict", "true"],
|
||||
["nip40_expiration_filter", "true"],
|
||||
["nip40_expiration_grace_period", "300"],
|
||||
["max_subscriptions_per_client", "25"],
|
||||
["max_total_subscriptions", "5000"],
|
||||
["max_filters_per_subscription", "10"],
|
||||
["max_event_tags", "100"],
|
||||
["max_content_length", "8196"],
|
||||
["max_message_length", "16384"],
|
||||
["default_limit", "500"],
|
||||
["max_limit", "5000"]
|
||||
],
|
||||
"content": "C Nostr Relay Configuration",
|
||||
"pubkey": "<admin_public_key>",
|
||||
"id": "<computed_event_id>",
|
||||
"sig": "<event_signature>"
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** The `admin_pubkey` tag is omitted as it's redundant with the event's `pubkey` field.
|
||||
1090
docs/startup_flows_complete.md
Normal file
1090
docs/startup_flows_complete.md
Normal file
File diff suppressed because it is too large
Load Diff
147
docs/static_build_improvements.md
Normal file
147
docs/static_build_improvements.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Static Build Improvements
|
||||
|
||||
## Overview
|
||||
|
||||
The `build_static.sh` script has been updated to properly support MUSL static compilation and includes several optimizations.
|
||||
|
||||
## Changes Made
|
||||
|
||||
### 1. True MUSL Static Binary Support
|
||||
|
||||
The script now attempts to build with `musl-gcc` for truly portable static binaries:
|
||||
|
||||
- **MUSL binaries** have zero runtime dependencies and work across all Linux distributions
|
||||
- **Automatic fallback** to glibc static linking if MUSL compilation fails (e.g., missing MUSL-compiled libraries)
|
||||
- Clear messaging about which type of binary was created
|
||||
|
||||
### 2. SQLite Build Caching
|
||||
|
||||
SQLite is now built once and cached for future builds:
|
||||
|
||||
- **Cache location**: `~/.cache/c-relay-sqlite/`
|
||||
- **Version-specific**: Each SQLite version gets its own cache directory
|
||||
- **Significant speedup**: Subsequent builds skip the SQLite compilation step
|
||||
- **Manual cleanup**: `rm -rf ~/.cache/c-relay-sqlite` to clear cache
|
||||
|
||||
### 3. Smart Package Installation
|
||||
|
||||
The script now checks for required packages before installing:
|
||||
|
||||
- Only installs missing packages
|
||||
- Reduces unnecessary `apt` operations
|
||||
- Faster builds when dependencies are already present
|
||||
|
||||
### 4. Bug Fixes
|
||||
|
||||
- Fixed format warning in `src/subscriptions.c` line 1067 (changed `%zu` to `%d` with cast for `MAX_SEARCH_TERM_LENGTH`)
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
./build_static.sh
|
||||
```
|
||||
|
||||
The script will:
|
||||
1. Check for and install `musl-gcc` if needed
|
||||
2. Build or use cached SQLite with JSON1 support
|
||||
3. Attempt MUSL static compilation
|
||||
4. Fall back to glibc static compilation if MUSL fails
|
||||
5. Verify the resulting binary
|
||||
|
||||
## Binary Types
|
||||
|
||||
### MUSL Static Binary (Ideal - Currently Not Achievable)
|
||||
- **Filename**: `build/c_relay_static_musl_x86_64`
|
||||
- **Dependencies**: None (truly static)
|
||||
- **Portability**: Works on any Linux distribution
|
||||
- **Status**: Requires MUSL-compiled libwebsockets and other dependencies (not available by default)
|
||||
|
||||
### Glibc Static Binary (Current Output)
|
||||
- **Filename**: `build/c_relay_static_x86_64` or `build/c_relay_static_glibc_x86_64`
|
||||
- **Dependencies**: None - fully statically linked with glibc
|
||||
- **Portability**: Works on most Linux distributions (glibc is statically included)
|
||||
- **Note**: Despite using glibc, this is a **fully static binary** with no runtime dependencies
|
||||
|
||||
## Verification
|
||||
|
||||
The script automatically verifies binaries using `ldd` and `file`:
|
||||
|
||||
```bash
|
||||
# For MUSL binary
|
||||
ldd build/c_relay_static_musl_x86_64
|
||||
# Output: "not a dynamic executable" (good!)
|
||||
|
||||
# For glibc binary
|
||||
ldd build/c_relay_static_glibc_x86_64
|
||||
# Output: Shows glibc dependencies
|
||||
```
|
||||
|
||||
## Known Limitations
|
||||
|
||||
### MUSL Compilation Currently Fails Because:
|
||||
|
||||
1. **libwebsockets not available as MUSL static library**
|
||||
- System libwebsockets is compiled with glibc, not MUSL
|
||||
- MUSL cannot link against glibc-compiled libraries
|
||||
- Solution: Build libwebsockets from source with musl-gcc (future enhancement)
|
||||
|
||||
2. **Other dependencies not MUSL-compatible**
|
||||
- libssl, libcrypto, libsecp256k1, libcurl must be available as MUSL static libraries
|
||||
- Most systems only provide glibc versions
|
||||
- Solution: Build entire dependency chain with musl-gcc (complex, future enhancement)
|
||||
|
||||
### Current Behavior
|
||||
|
||||
The script attempts MUSL compilation but falls back to glibc:
|
||||
1. Tries to compile with `musl-gcc -static` (fails due to missing MUSL libraries)
|
||||
2. Logs the error to `/tmp/musl_build.log`
|
||||
3. Displays a clear warning message
|
||||
4. Automatically falls back to `gcc -static` with glibc
|
||||
5. Produces a **fully static binary** with glibc statically linked (no runtime dependencies)
|
||||
|
||||
**Important**: The glibc static binary is still fully portable across most Linux distributions because glibc is statically included in the binary. It's not as universally portable as MUSL would be, but it works on virtually all modern Linux systems.
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
1. **Full MUSL dependency chain**: Build all dependencies (libwebsockets, OpenSSL, etc.) with musl-gcc
|
||||
2. **Multi-architecture support**: Add ARM64 MUSL builds
|
||||
3. **Docker-based builds**: Use Alpine Linux containers for guaranteed MUSL environment
|
||||
4. **Dependency vendoring**: Include pre-built MUSL libraries in the repository
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Clear SQLite Cache
|
||||
```bash
|
||||
rm -rf ~/.cache/c-relay-sqlite
|
||||
```
|
||||
|
||||
### Force Package Reinstall
|
||||
```bash
|
||||
sudo apt install --reinstall musl-dev musl-tools libssl-dev libcurl4-openssl-dev libsecp256k1-dev
|
||||
```
|
||||
|
||||
### Check Build Logs
|
||||
```bash
|
||||
cat /tmp/musl_build.log
|
||||
```
|
||||
|
||||
### Verify Binary Type
|
||||
```bash
|
||||
file build/c_relay_static_*
|
||||
ldd build/c_relay_static_* 2>&1
|
||||
```
|
||||
|
||||
## Performance Impact
|
||||
|
||||
- **First build**: ~2-3 minutes (includes SQLite compilation)
|
||||
- **Subsequent builds**: ~30-60 seconds (uses cached SQLite)
|
||||
- **Cache size**: ~10-15 MB per SQLite version
|
||||
|
||||
## Compatibility
|
||||
|
||||
The updated script is compatible with:
|
||||
- Ubuntu 20.04+
|
||||
- Debian 10+
|
||||
- Other Debian-based distributions with `apt` package manager
|
||||
|
||||
For other distributions, adjust package installation commands accordingly.
|
||||
427
docs/unified_startup_design.md
Normal file
427
docs/unified_startup_design.md
Normal file
@@ -0,0 +1,427 @@
|
||||
# Unified Startup Sequence Design
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the new unified startup sequence where all config values are created first, then CLI overrides are applied as a separate atomic operation. This eliminates the current 3-step incremental building process.
|
||||
|
||||
## Current Problems
|
||||
|
||||
1. **Incremental Config Building**: Config is built in 3 steps:
|
||||
- Step 1: `populate_default_config_values()` - adds defaults
|
||||
- Step 2: CLI overrides applied via `update_config_in_table()`
|
||||
- Step 3: `add_pubkeys_to_config_table()` - adds generated keys
|
||||
|
||||
2. **Race Conditions**: Cache can be refreshed between steps, causing incomplete config reads
|
||||
|
||||
3. **Complexity**: Multiple code paths for first-time vs restart scenarios
|
||||
|
||||
## New Design Principles
|
||||
|
||||
1. **Atomic Config Creation**: All config values created in single transaction
|
||||
2. **Separate Override Phase**: CLI overrides applied after complete config exists
|
||||
3. **Unified Code Path**: Same logic for first-time and restart scenarios
|
||||
4. **Cache Safety**: Cache only loaded after config is complete
|
||||
|
||||
---
|
||||
|
||||
## Scenario 1: First-Time Startup (No Database)
|
||||
|
||||
### Sequence
|
||||
|
||||
```
|
||||
1. Key Generation Phase
|
||||
├─ generate_random_private_key_bytes() → admin_privkey_bytes
|
||||
├─ nostr_bytes_to_hex() → admin_privkey (hex)
|
||||
├─ nostr_ec_public_key_from_private_key() → admin_pubkey_bytes
|
||||
├─ nostr_bytes_to_hex() → admin_pubkey (hex)
|
||||
├─ generate_random_private_key_bytes() → relay_privkey_bytes
|
||||
├─ nostr_bytes_to_hex() → relay_privkey (hex)
|
||||
├─ nostr_ec_public_key_from_private_key() → relay_pubkey_bytes
|
||||
└─ nostr_bytes_to_hex() → relay_pubkey (hex)
|
||||
|
||||
2. Database Creation Phase
|
||||
├─ create_database_with_relay_pubkey(relay_pubkey)
|
||||
│ └─ Sets g_database_path = "<relay_pubkey>.db"
|
||||
└─ init_database(g_database_path)
|
||||
└─ Creates database with embedded schema (includes config table)
|
||||
|
||||
3. Complete Config Population Phase (ATOMIC)
|
||||
├─ BEGIN TRANSACTION
|
||||
├─ populate_all_config_values_atomic()
|
||||
│ ├─ Insert ALL default config values from DEFAULT_CONFIG_VALUES[]
|
||||
│ ├─ Insert admin_pubkey
|
||||
│ └─ Insert relay_pubkey
|
||||
└─ COMMIT TRANSACTION
|
||||
|
||||
4. CLI Override Phase (ATOMIC)
|
||||
├─ BEGIN TRANSACTION
|
||||
├─ apply_cli_overrides()
|
||||
│ ├─ IF cli_options.port_override > 0:
|
||||
│ │ └─ UPDATE config SET value = ? WHERE key = 'relay_port'
|
||||
│ ├─ IF cli_options.admin_pubkey_override[0]:
|
||||
│ │ └─ UPDATE config SET value = ? WHERE key = 'admin_pubkey'
|
||||
│ └─ IF cli_options.relay_privkey_override[0]:
|
||||
│ └─ UPDATE config SET value = ? WHERE key = 'relay_privkey'
|
||||
└─ COMMIT TRANSACTION
|
||||
|
||||
5. Secure Key Storage Phase
|
||||
└─ store_relay_private_key(relay_privkey)
|
||||
└─ INSERT INTO relay_seckey (private_key_hex) VALUES (?)
|
||||
|
||||
6. Cache Initialization Phase
|
||||
└─ refresh_unified_cache_from_table()
|
||||
└─ Loads complete config into g_unified_cache
|
||||
```
|
||||
|
||||
### Function Call Sequence
|
||||
|
||||
```c
|
||||
// In main.c - first_time_startup branch
|
||||
if (is_first_time_startup()) {
|
||||
// 1. Key Generation
|
||||
first_time_startup_sequence(&cli_options);
|
||||
// → Generates keys, stores in g_unified_cache
|
||||
// → Sets g_database_path
|
||||
// → Does NOT populate config yet
|
||||
|
||||
// 2. Database Creation
|
||||
init_database(g_database_path);
|
||||
// → Creates database with schema
|
||||
|
||||
// 3. Complete Config Population (NEW FUNCTION)
|
||||
populate_all_config_values_atomic(&cli_options);
|
||||
// → Inserts ALL defaults + pubkeys in single transaction
|
||||
// → Does NOT apply CLI overrides yet
|
||||
|
||||
// 4. CLI Override Phase (NEW FUNCTION)
|
||||
apply_cli_overrides_atomic(&cli_options);
|
||||
// → Updates config table with CLI overrides
|
||||
// → Separate transaction after complete config exists
|
||||
|
||||
// 5. Secure Key Storage
|
||||
store_relay_private_key(relay_privkey);
|
||||
|
||||
// 6. Cache Initialization
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
### New Functions Needed
|
||||
|
||||
```c
|
||||
// In config.c
|
||||
int populate_all_config_values_atomic(const cli_options_t* cli_options) {
|
||||
// BEGIN TRANSACTION
|
||||
// Insert ALL defaults from DEFAULT_CONFIG_VALUES[]
|
||||
// Insert admin_pubkey from g_unified_cache
|
||||
// Insert relay_pubkey from g_unified_cache
|
||||
// COMMIT TRANSACTION
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apply_cli_overrides_atomic(const cli_options_t* cli_options) {
|
||||
// BEGIN TRANSACTION
|
||||
// IF port_override: UPDATE config SET value = ? WHERE key = 'relay_port'
|
||||
// IF admin_pubkey_override: UPDATE config SET value = ? WHERE key = 'admin_pubkey'
|
||||
// IF relay_privkey_override: UPDATE config SET value = ? WHERE key = 'relay_privkey'
|
||||
// COMMIT TRANSACTION
|
||||
// invalidate_config_cache()
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scenario 2: Restart with Existing Database + CLI Options
|
||||
|
||||
### Sequence
|
||||
|
||||
```
|
||||
1. Database Discovery Phase
|
||||
├─ find_existing_db_files() → ["<relay_pubkey>.db"]
|
||||
├─ extract_pubkey_from_filename() → relay_pubkey
|
||||
└─ Sets g_database_path = "<relay_pubkey>.db"
|
||||
|
||||
2. Database Initialization Phase
|
||||
└─ init_database(g_database_path)
|
||||
└─ Opens existing database
|
||||
|
||||
3. Config Validation Phase
|
||||
└─ validate_config_table_completeness()
|
||||
├─ Check if all required keys exist
|
||||
└─ IF missing keys: populate_missing_config_values()
|
||||
|
||||
4. CLI Override Phase (ATOMIC)
|
||||
├─ BEGIN TRANSACTION
|
||||
├─ apply_cli_overrides()
|
||||
│ └─ UPDATE config SET value = ? WHERE key = ?
|
||||
└─ COMMIT TRANSACTION
|
||||
|
||||
5. Cache Initialization Phase
|
||||
└─ refresh_unified_cache_from_table()
|
||||
└─ Loads complete config into g_unified_cache
|
||||
```
|
||||
|
||||
### Function Call Sequence
|
||||
|
||||
```c
|
||||
// In main.c - existing relay branch
|
||||
else {
|
||||
// 1. Database Discovery
|
||||
char** existing_files = find_existing_db_files();
|
||||
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
|
||||
startup_existing_relay(relay_pubkey);
|
||||
// → Sets g_database_path
|
||||
|
||||
// 2. Database Initialization
|
||||
init_database(g_database_path);
|
||||
|
||||
// 3. Config Validation (NEW FUNCTION)
|
||||
validate_config_table_completeness();
|
||||
// → Checks for missing keys
|
||||
// → Populates any missing defaults
|
||||
|
||||
// 4. CLI Override Phase (REUSE FUNCTION)
|
||||
if (has_cli_overrides(&cli_options)) {
|
||||
apply_cli_overrides_atomic(&cli_options);
|
||||
}
|
||||
|
||||
// 5. Cache Initialization
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
### New Functions Needed
|
||||
|
||||
```c
|
||||
// In config.c
|
||||
int validate_config_table_completeness(void) {
|
||||
// Check if all DEFAULT_CONFIG_VALUES keys exist
|
||||
// IF missing: populate_missing_config_values()
|
||||
return 0;
|
||||
}
|
||||
|
||||
int populate_missing_config_values(void) {
|
||||
// BEGIN TRANSACTION
|
||||
// For each key in DEFAULT_CONFIG_VALUES:
|
||||
// IF NOT EXISTS: INSERT INTO config
|
||||
// COMMIT TRANSACTION
|
||||
return 0;
|
||||
}
|
||||
|
||||
int has_cli_overrides(const cli_options_t* cli_options) {
|
||||
return (cli_options->port_override > 0 ||
|
||||
cli_options->admin_pubkey_override[0] != '\0' ||
|
||||
cli_options->relay_privkey_override[0] != '\0');
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scenario 3: Restart with Existing Database + No CLI Options
|
||||
|
||||
### Sequence
|
||||
|
||||
```
|
||||
1. Database Discovery Phase
|
||||
├─ find_existing_db_files() → ["<relay_pubkey>.db"]
|
||||
├─ extract_pubkey_from_filename() → relay_pubkey
|
||||
└─ Sets g_database_path = "<relay_pubkey>.db"
|
||||
|
||||
2. Database Initialization Phase
|
||||
└─ init_database(g_database_path)
|
||||
└─ Opens existing database
|
||||
|
||||
3. Config Validation Phase
|
||||
└─ validate_config_table_completeness()
|
||||
├─ Check if all required keys exist
|
||||
└─ IF missing keys: populate_missing_config_values()
|
||||
|
||||
4. Cache Initialization Phase (IMMEDIATE)
|
||||
└─ refresh_unified_cache_from_table()
|
||||
└─ Loads complete config into g_unified_cache
|
||||
```
|
||||
|
||||
### Function Call Sequence
|
||||
|
||||
```c
|
||||
// In main.c - existing relay branch (no CLI overrides)
|
||||
else {
|
||||
// 1. Database Discovery
|
||||
char** existing_files = find_existing_db_files();
|
||||
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
|
||||
startup_existing_relay(relay_pubkey);
|
||||
|
||||
// 2. Database Initialization
|
||||
init_database(g_database_path);
|
||||
|
||||
// 3. Config Validation
|
||||
validate_config_table_completeness();
|
||||
|
||||
// 4. Cache Initialization (IMMEDIATE - no overrides to apply)
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Improvements
|
||||
|
||||
### 1. Atomic Config Creation
|
||||
|
||||
**Before:**
|
||||
```c
|
||||
populate_default_config_values(); // Step 1
|
||||
update_config_in_table("relay_port", port_str); // Step 2
|
||||
add_pubkeys_to_config_table(); // Step 3
|
||||
```
|
||||
|
||||
**After:**
|
||||
```c
|
||||
populate_all_config_values_atomic(&cli_options); // Single transaction
|
||||
apply_cli_overrides_atomic(&cli_options); // Separate transaction
|
||||
```
|
||||
|
||||
### 2. Elimination of Race Conditions
|
||||
|
||||
**Before:**
|
||||
- Cache could refresh between steps 1-3
|
||||
- Incomplete config could be read
|
||||
|
||||
**After:**
|
||||
- Config created atomically
|
||||
- Cache only refreshed after complete config exists
|
||||
|
||||
### 3. Unified Code Path
|
||||
|
||||
**Before:**
|
||||
- Different logic for first-time vs restart
|
||||
- `populate_default_config_values()` vs `add_pubkeys_to_config_table()`
|
||||
|
||||
**After:**
|
||||
- Same validation logic for both scenarios
|
||||
- `validate_config_table_completeness()` handles both cases
|
||||
|
||||
### 4. Clear Separation of Concerns
|
||||
|
||||
**Before:**
|
||||
- CLI overrides mixed with default population
|
||||
- Unclear when overrides are applied
|
||||
|
||||
**After:**
|
||||
- Phase 1: Complete config creation
|
||||
- Phase 2: CLI overrides (if any)
|
||||
- Phase 3: Cache initialization
|
||||
|
||||
---
|
||||
|
||||
## Implementation Changes Required
|
||||
|
||||
### 1. New Functions in config.c
|
||||
|
||||
```c
|
||||
// Atomic config population for first-time startup
|
||||
int populate_all_config_values_atomic(const cli_options_t* cli_options);
|
||||
|
||||
// Atomic CLI override application
|
||||
int apply_cli_overrides_atomic(const cli_options_t* cli_options);
|
||||
|
||||
// Config validation for existing databases
|
||||
int validate_config_table_completeness(void);
|
||||
int populate_missing_config_values(void);
|
||||
|
||||
// Helper function
|
||||
int has_cli_overrides(const cli_options_t* cli_options);
|
||||
```
|
||||
|
||||
### 2. Modified Functions in config.c
|
||||
|
||||
```c
|
||||
// Simplify to only generate keys and set database path
|
||||
int first_time_startup_sequence(const cli_options_t* cli_options);
|
||||
|
||||
// Remove config population logic
|
||||
int add_pubkeys_to_config_table(void); // DEPRECATED - logic moved to populate_all_config_values_atomic()
|
||||
```
|
||||
|
||||
### 3. Modified Startup Flow in main.c
|
||||
|
||||
```c
|
||||
// First-time startup
|
||||
if (is_first_time_startup()) {
|
||||
first_time_startup_sequence(&cli_options);
|
||||
init_database(g_database_path);
|
||||
populate_all_config_values_atomic(&cli_options); // NEW
|
||||
apply_cli_overrides_atomic(&cli_options); // NEW
|
||||
store_relay_private_key(relay_privkey);
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
|
||||
// Existing relay
|
||||
else {
|
||||
startup_existing_relay(relay_pubkey);
|
||||
init_database(g_database_path);
|
||||
validate_config_table_completeness(); // NEW
|
||||
if (has_cli_overrides(&cli_options)) {
|
||||
apply_cli_overrides_atomic(&cli_options); // NEW
|
||||
}
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Benefits
|
||||
|
||||
1. **Atomicity**: Config creation is atomic - no partial states
|
||||
2. **Simplicity**: Clear phases with single responsibility
|
||||
3. **Safety**: Cache only loaded after complete config exists
|
||||
4. **Consistency**: Same validation logic for all scenarios
|
||||
5. **Maintainability**: Easier to understand and modify
|
||||
6. **Testability**: Each phase can be tested independently
|
||||
|
||||
---
|
||||
|
||||
## Migration Path
|
||||
|
||||
1. Implement new functions in config.c
|
||||
2. Update main.c startup flow
|
||||
3. Test first-time startup scenario
|
||||
4. Test restart with CLI overrides
|
||||
5. Test restart without CLI overrides
|
||||
6. Remove deprecated functions
|
||||
7. Update documentation
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Test Cases
|
||||
|
||||
1. **First-time startup with defaults**
|
||||
- Verify all config values created atomically
|
||||
- Verify cache loads complete config
|
||||
|
||||
2. **First-time startup with port override**
|
||||
- Verify defaults created first
|
||||
- Verify port override applied second
|
||||
- Verify cache reflects override
|
||||
|
||||
3. **Restart with complete config**
|
||||
- Verify no config changes
|
||||
- Verify cache loads immediately
|
||||
|
||||
4. **Restart with missing config keys**
|
||||
- Verify missing keys populated
|
||||
- Verify existing keys unchanged
|
||||
|
||||
5. **Restart with CLI overrides**
|
||||
- Verify overrides applied atomically
|
||||
- Verify cache invalidated and refreshed
|
||||
|
||||
### Validation Points
|
||||
|
||||
- Config table row count after each phase
|
||||
- Cache validity state after each phase
|
||||
- Transaction boundaries (BEGIN/COMMIT)
|
||||
- Error handling for failed transactions
|
||||
746
docs/unified_startup_implementation_plan.md
Normal file
746
docs/unified_startup_implementation_plan.md
Normal file
@@ -0,0 +1,746 @@
|
||||
# Unified Startup Implementation Plan
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a detailed implementation plan for refactoring the startup sequence to use atomic config creation followed by CLI overrides. This plan breaks down the work into discrete, testable steps.
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Create New Functions in config.c
|
||||
|
||||
### Step 1.1: Implement `populate_all_config_values_atomic()`
|
||||
|
||||
**Location**: `src/config.c`
|
||||
|
||||
**Purpose**: Create complete config table in single transaction for first-time startup
|
||||
|
||||
**Function Signature**:
|
||||
```c
|
||||
int populate_all_config_values_atomic(const cli_options_t* cli_options);
|
||||
```
|
||||
|
||||
**Implementation Details**:
|
||||
```c
|
||||
int populate_all_config_values_atomic(const cli_options_t* cli_options) {
|
||||
if (!g_database) {
|
||||
DEBUG_ERROR("Database not initialized");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Begin transaction
|
||||
char* err_msg = NULL;
|
||||
int rc = sqlite3_exec(g_database, "BEGIN TRANSACTION;", NULL, NULL, &err_msg);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to begin transaction: %s", err_msg);
|
||||
sqlite3_free(err_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Prepare INSERT statement
|
||||
sqlite3_stmt* stmt = NULL;
|
||||
const char* sql = "INSERT INTO config (key, value) VALUES (?, ?)";
|
||||
rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Insert all default config values
|
||||
for (size_t i = 0; i < sizeof(DEFAULT_CONFIG_VALUES) / sizeof(DEFAULT_CONFIG_VALUES[0]); i++) {
|
||||
sqlite3_reset(stmt);
|
||||
sqlite3_bind_text(stmt, 1, DEFAULT_CONFIG_VALUES[i].key, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, DEFAULT_CONFIG_VALUES[i].value, -1, SQLITE_STATIC);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
if (rc != SQLITE_DONE) {
|
||||
DEBUG_ERROR("Failed to insert config key '%s': %s",
|
||||
DEFAULT_CONFIG_VALUES[i].key, sqlite3_errmsg(g_database));
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Insert admin_pubkey from cache
|
||||
sqlite3_reset(stmt);
|
||||
sqlite3_bind_text(stmt, 1, "admin_pubkey", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, g_unified_cache.admin_pubkey, -1, SQLITE_STATIC);
|
||||
rc = sqlite3_step(stmt);
|
||||
if (rc != SQLITE_DONE) {
|
||||
DEBUG_ERROR("Failed to insert admin_pubkey: %s", sqlite3_errmsg(g_database));
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Insert relay_pubkey from cache
|
||||
sqlite3_reset(stmt);
|
||||
sqlite3_bind_text(stmt, 1, "relay_pubkey", -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, g_unified_cache.relay_pubkey, -1, SQLITE_STATIC);
|
||||
rc = sqlite3_step(stmt);
|
||||
if (rc != SQLITE_DONE) {
|
||||
DEBUG_ERROR("Failed to insert relay_pubkey: %s", sqlite3_errmsg(g_database));
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Commit transaction
|
||||
rc = sqlite3_exec(g_database, "COMMIT;", NULL, NULL, &err_msg);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to commit transaction: %s", err_msg);
|
||||
sqlite3_free(err_msg);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
DEBUG_INFO("Successfully populated all config values atomically");
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
**Testing**:
|
||||
- Verify transaction atomicity (all or nothing)
|
||||
- Verify all DEFAULT_CONFIG_VALUES inserted
|
||||
- Verify admin_pubkey and relay_pubkey inserted
|
||||
- Verify error handling on failure
|
||||
|
||||
---
|
||||
|
||||
### Step 1.2: Implement `apply_cli_overrides_atomic()`
|
||||
|
||||
**Location**: `src/config.c`
|
||||
|
||||
**Purpose**: Apply CLI overrides to existing config table in single transaction
|
||||
|
||||
**Function Signature**:
|
||||
```c
|
||||
int apply_cli_overrides_atomic(const cli_options_t* cli_options);
|
||||
```
|
||||
|
||||
**Implementation Details**:
|
||||
```c
|
||||
int apply_cli_overrides_atomic(const cli_options_t* cli_options) {
|
||||
if (!g_database) {
|
||||
DEBUG_ERROR("Database not initialized");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!cli_options) {
|
||||
DEBUG_ERROR("CLI options is NULL");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Check if any overrides exist
|
||||
bool has_overrides = false;
|
||||
if (cli_options->port_override > 0) has_overrides = true;
|
||||
if (cli_options->admin_pubkey_override[0] != '\0') has_overrides = true;
|
||||
if (cli_options->relay_privkey_override[0] != '\0') has_overrides = true;
|
||||
|
||||
if (!has_overrides) {
|
||||
DEBUG_INFO("No CLI overrides to apply");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Begin transaction
|
||||
char* err_msg = NULL;
|
||||
int rc = sqlite3_exec(g_database, "BEGIN TRANSACTION;", NULL, NULL, &err_msg);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to begin transaction: %s", err_msg);
|
||||
sqlite3_free(err_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Prepare UPDATE statement
|
||||
sqlite3_stmt* stmt = NULL;
|
||||
const char* sql = "UPDATE config SET value = ? WHERE key = ?";
|
||||
rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Apply port override
|
||||
if (cli_options->port_override > 0) {
|
||||
char port_str[16];
|
||||
snprintf(port_str, sizeof(port_str), "%d", cli_options->port_override);
|
||||
|
||||
sqlite3_reset(stmt);
|
||||
sqlite3_bind_text(stmt, 1, port_str, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_bind_text(stmt, 2, "relay_port", -1, SQLITE_STATIC);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
if (rc != SQLITE_DONE) {
|
||||
DEBUG_ERROR("Failed to update relay_port: %s", sqlite3_errmsg(g_database));
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
DEBUG_INFO("Applied CLI override: relay_port = %s", port_str);
|
||||
}
|
||||
|
||||
// Apply admin_pubkey override
|
||||
if (cli_options->admin_pubkey_override[0] != '\0') {
|
||||
sqlite3_reset(stmt);
|
||||
sqlite3_bind_text(stmt, 1, cli_options->admin_pubkey_override, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, "admin_pubkey", -1, SQLITE_STATIC);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
if (rc != SQLITE_DONE) {
|
||||
DEBUG_ERROR("Failed to update admin_pubkey: %s", sqlite3_errmsg(g_database));
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
DEBUG_INFO("Applied CLI override: admin_pubkey");
|
||||
}
|
||||
|
||||
// Apply relay_privkey override
|
||||
if (cli_options->relay_privkey_override[0] != '\0') {
|
||||
sqlite3_reset(stmt);
|
||||
sqlite3_bind_text(stmt, 1, cli_options->relay_privkey_override, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, "relay_privkey", -1, SQLITE_STATIC);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
if (rc != SQLITE_DONE) {
|
||||
DEBUG_ERROR("Failed to update relay_privkey: %s", sqlite3_errmsg(g_database));
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
DEBUG_INFO("Applied CLI override: relay_privkey");
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Commit transaction
|
||||
rc = sqlite3_exec(g_database, "COMMIT;", NULL, NULL, &err_msg);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to commit transaction: %s", err_msg);
|
||||
sqlite3_free(err_msg);
|
||||
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Invalidate cache to force refresh
|
||||
invalidate_config_cache();
|
||||
|
||||
DEBUG_INFO("Successfully applied CLI overrides atomically");
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
**Testing**:
|
||||
- Verify transaction atomicity
|
||||
- Verify each override type (port, admin_pubkey, relay_privkey)
|
||||
- Verify cache invalidation after overrides
|
||||
- Verify no-op when no overrides present
|
||||
|
||||
---
|
||||
|
||||
### Step 1.3: Implement `validate_config_table_completeness()`
|
||||
|
||||
**Location**: `src/config.c`
|
||||
|
||||
**Purpose**: Validate config table has all required keys, populate missing ones
|
||||
|
||||
**Function Signature**:
|
||||
```c
|
||||
int validate_config_table_completeness(void);
|
||||
```
|
||||
|
||||
**Implementation Details**:
|
||||
```c
|
||||
int validate_config_table_completeness(void) {
|
||||
if (!g_database) {
|
||||
DEBUG_ERROR("Database not initialized");
|
||||
return -1;
|
||||
}
|
||||
|
||||
DEBUG_INFO("Validating config table completeness");
|
||||
|
||||
// Check each default config key
|
||||
for (size_t i = 0; i < sizeof(DEFAULT_CONFIG_VALUES) / sizeof(DEFAULT_CONFIG_VALUES[0]); i++) {
|
||||
const char* key = DEFAULT_CONFIG_VALUES[i].key;
|
||||
|
||||
// Check if key exists
|
||||
sqlite3_stmt* stmt = NULL;
|
||||
const char* sql = "SELECT COUNT(*) FROM config WHERE key = ?";
|
||||
int rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
|
||||
return -1;
|
||||
}
|
||||
|
||||
sqlite3_bind_text(stmt, 1, key, -1, SQLITE_STATIC);
|
||||
rc = sqlite3_step(stmt);
|
||||
|
||||
int count = 0;
|
||||
if (rc == SQLITE_ROW) {
|
||||
count = sqlite3_column_int(stmt, 0);
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// If key missing, populate it
|
||||
if (count == 0) {
|
||||
DEBUG_WARN("Config key '%s' missing, populating with default", key);
|
||||
rc = populate_missing_config_key(key, DEFAULT_CONFIG_VALUES[i].value);
|
||||
if (rc != 0) {
|
||||
DEBUG_ERROR("Failed to populate missing key '%s'", key);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG_INFO("Config table validation complete");
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
**Helper Function**:
|
||||
```c
|
||||
static int populate_missing_config_key(const char* key, const char* value) {
|
||||
sqlite3_stmt* stmt = NULL;
|
||||
const char* sql = "INSERT INTO config (key, value) VALUES (?, ?)";
|
||||
|
||||
int rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
|
||||
return -1;
|
||||
}
|
||||
|
||||
sqlite3_bind_text(stmt, 1, key, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, value, -1, SQLITE_STATIC);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
if (rc != SQLITE_DONE) {
|
||||
DEBUG_ERROR("Failed to insert config key '%s': %s", key, sqlite3_errmsg(g_database));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
**Testing**:
|
||||
- Verify detection of missing keys
|
||||
- Verify population of missing keys with defaults
|
||||
- Verify no changes when all keys present
|
||||
- Verify error handling
|
||||
|
||||
---
|
||||
|
||||
### Step 1.4: Implement `has_cli_overrides()`
|
||||
|
||||
**Location**: `src/config.c`
|
||||
|
||||
**Purpose**: Check if any CLI overrides are present
|
||||
|
||||
**Function Signature**:
|
||||
```c
|
||||
bool has_cli_overrides(const cli_options_t* cli_options);
|
||||
```
|
||||
|
||||
**Implementation Details**:
|
||||
```c
|
||||
bool has_cli_overrides(const cli_options_t* cli_options) {
|
||||
if (!cli_options) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (cli_options->port_override > 0 ||
|
||||
cli_options->admin_pubkey_override[0] != '\0' ||
|
||||
cli_options->relay_privkey_override[0] != '\0');
|
||||
}
|
||||
```
|
||||
|
||||
**Testing**:
|
||||
- Verify returns true when any override present
|
||||
- Verify returns false when no overrides
|
||||
- Verify NULL safety
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Update Function Declarations in config.h
|
||||
|
||||
### Step 2.1: Add New Function Declarations
|
||||
|
||||
**Location**: `src/config.h`
|
||||
|
||||
**Changes**:
|
||||
```c
|
||||
// Add after existing function declarations
|
||||
|
||||
// Atomic config population for first-time startup
|
||||
int populate_all_config_values_atomic(const cli_options_t* cli_options);
|
||||
|
||||
// Atomic CLI override application
|
||||
int apply_cli_overrides_atomic(const cli_options_t* cli_options);
|
||||
|
||||
// Config validation for existing databases
|
||||
int validate_config_table_completeness(void);
|
||||
|
||||
// Helper function to check for CLI overrides
|
||||
bool has_cli_overrides(const cli_options_t* cli_options);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: Refactor Startup Flow in main.c
|
||||
|
||||
### Step 3.1: Update First-Time Startup Branch
|
||||
|
||||
**Location**: `src/main.c` (around lines 1624-1740)
|
||||
|
||||
**Current Code**:
|
||||
```c
|
||||
if (is_first_time_startup()) {
|
||||
first_time_startup_sequence(&cli_options);
|
||||
init_database(g_database_path);
|
||||
|
||||
// Current incremental approach
|
||||
populate_default_config_values();
|
||||
if (cli_options.port_override > 0) {
|
||||
char port_str[16];
|
||||
snprintf(port_str, sizeof(port_str), "%d", cli_options.port_override);
|
||||
update_config_in_table("relay_port", port_str);
|
||||
}
|
||||
add_pubkeys_to_config_table();
|
||||
|
||||
store_relay_private_key(relay_privkey);
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
**New Code**:
|
||||
```c
|
||||
if (is_first_time_startup()) {
|
||||
// 1. Generate keys and set database path
|
||||
first_time_startup_sequence(&cli_options);
|
||||
|
||||
// 2. Create database with schema
|
||||
init_database(g_database_path);
|
||||
|
||||
// 3. Populate ALL config values atomically (defaults + pubkeys)
|
||||
if (populate_all_config_values_atomic(&cli_options) != 0) {
|
||||
DEBUG_ERROR("Failed to populate config values");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
// 4. Apply CLI overrides atomically (separate transaction)
|
||||
if (apply_cli_overrides_atomic(&cli_options) != 0) {
|
||||
DEBUG_ERROR("Failed to apply CLI overrides");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
// 5. Store relay private key securely
|
||||
store_relay_private_key(relay_privkey);
|
||||
|
||||
// 6. Load complete config into cache
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
**Testing**:
|
||||
- Verify first-time startup creates complete config
|
||||
- Verify CLI overrides applied correctly
|
||||
- Verify cache loads complete config
|
||||
- Verify error handling at each step
|
||||
|
||||
---
|
||||
|
||||
### Step 3.2: Update Existing Relay Startup Branch
|
||||
|
||||
**Location**: `src/main.c` (around lines 1741-1928)
|
||||
|
||||
**Current Code**:
|
||||
```c
|
||||
else {
|
||||
char** existing_files = find_existing_db_files();
|
||||
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
|
||||
startup_existing_relay(relay_pubkey);
|
||||
|
||||
init_database(g_database_path);
|
||||
|
||||
// Current approach - unclear when overrides applied
|
||||
populate_default_config_values();
|
||||
if (cli_options.port_override > 0) {
|
||||
// ... override logic ...
|
||||
}
|
||||
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
**New Code**:
|
||||
```c
|
||||
else {
|
||||
// 1. Discover existing database
|
||||
char** existing_files = find_existing_db_files();
|
||||
if (!existing_files || !existing_files[0]) {
|
||||
DEBUG_ERROR("No existing database files found");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
|
||||
startup_existing_relay(relay_pubkey);
|
||||
|
||||
// 2. Open existing database
|
||||
init_database(g_database_path);
|
||||
|
||||
// 3. Validate config table completeness (populate missing keys)
|
||||
if (validate_config_table_completeness() != 0) {
|
||||
DEBUG_ERROR("Failed to validate config table");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
// 4. Apply CLI overrides if present (separate transaction)
|
||||
if (has_cli_overrides(&cli_options)) {
|
||||
if (apply_cli_overrides_atomic(&cli_options) != 0) {
|
||||
DEBUG_ERROR("Failed to apply CLI overrides");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Load complete config into cache
|
||||
refresh_unified_cache_from_table();
|
||||
}
|
||||
```
|
||||
|
||||
**Testing**:
|
||||
- Verify existing relay startup with complete config
|
||||
- Verify missing keys populated
|
||||
- Verify CLI overrides applied when present
|
||||
- Verify no changes when no overrides
|
||||
- Verify cache loads correctly
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Deprecate Old Functions
|
||||
|
||||
### Step 4.1: Mark Functions as Deprecated
|
||||
|
||||
**Location**: `src/config.c`
|
||||
|
||||
**Functions to Deprecate**:
|
||||
1. `populate_default_config_values()` - replaced by `populate_all_config_values_atomic()`
|
||||
2. `add_pubkeys_to_config_table()` - logic moved to `populate_all_config_values_atomic()`
|
||||
|
||||
**Changes**:
|
||||
```c
|
||||
// Mark as deprecated in comments
|
||||
// DEPRECATED: Use populate_all_config_values_atomic() instead
|
||||
// This function will be removed in a future version
|
||||
int populate_default_config_values(void) {
|
||||
// ... existing implementation ...
|
||||
}
|
||||
|
||||
// DEPRECATED: Use populate_all_config_values_atomic() instead
|
||||
// This function will be removed in a future version
|
||||
int add_pubkeys_to_config_table(void) {
|
||||
// ... existing implementation ...
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
1. **Test `populate_all_config_values_atomic()`**
|
||||
- Test with valid cli_options
|
||||
- Test transaction rollback on error
|
||||
- Test all config keys inserted
|
||||
- Test pubkeys inserted correctly
|
||||
|
||||
2. **Test `apply_cli_overrides_atomic()`**
|
||||
- Test port override
|
||||
- Test admin_pubkey override
|
||||
- Test relay_privkey override
|
||||
- Test multiple overrides
|
||||
- Test no overrides
|
||||
- Test transaction rollback on error
|
||||
|
||||
3. **Test `validate_config_table_completeness()`**
|
||||
- Test with complete config
|
||||
- Test with missing keys
|
||||
- Test population of missing keys
|
||||
|
||||
4. **Test `has_cli_overrides()`**
|
||||
- Test with each override type
|
||||
- Test with no overrides
|
||||
- Test with NULL cli_options
|
||||
|
||||
### Integration Tests
|
||||
|
||||
1. **First-Time Startup**
|
||||
```bash
|
||||
# Clean environment
|
||||
rm -f *.db
|
||||
|
||||
# Start relay with defaults
|
||||
./build/c_relay_x86
|
||||
|
||||
# Verify config table complete
|
||||
sqlite3 <relay_pubkey>.db "SELECT COUNT(*) FROM config;"
|
||||
# Expected: 20+ rows (all defaults + pubkeys)
|
||||
|
||||
# Verify cache loaded
|
||||
# Check relay.log for cache refresh message
|
||||
```
|
||||
|
||||
2. **First-Time Startup with CLI Overrides**
|
||||
```bash
|
||||
# Clean environment
|
||||
rm -f *.db
|
||||
|
||||
# Start relay with port override
|
||||
./build/c_relay_x86 --port 9999
|
||||
|
||||
# Verify port override applied
|
||||
sqlite3 <relay_pubkey>.db "SELECT value FROM config WHERE key='relay_port';"
|
||||
# Expected: 9999
|
||||
```
|
||||
|
||||
3. **Restart with Existing Database**
|
||||
```bash
|
||||
# Start relay (creates database)
|
||||
./build/c_relay_x86
|
||||
|
||||
# Stop relay
|
||||
pkill -f c_relay_
|
||||
|
||||
# Restart relay
|
||||
./build/c_relay_x86
|
||||
|
||||
# Verify config unchanged
|
||||
# Check relay.log for validation message
|
||||
```
|
||||
|
||||
4. **Restart with CLI Overrides**
|
||||
```bash
|
||||
# Start relay (creates database)
|
||||
./build/c_relay_x86
|
||||
|
||||
# Stop relay
|
||||
pkill -f c_relay_
|
||||
|
||||
# Restart with port override
|
||||
./build/c_relay_x86 --port 9999
|
||||
|
||||
# Verify port override applied
|
||||
sqlite3 <relay_pubkey>.db "SELECT value FROM config WHERE key='relay_port';"
|
||||
# Expected: 9999
|
||||
```
|
||||
|
||||
### Regression Tests
|
||||
|
||||
Run existing test suite to ensure no breakage:
|
||||
```bash
|
||||
./tests/run_all_tests.sh
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: Documentation Updates
|
||||
|
||||
### Files to Update
|
||||
|
||||
1. **docs/configuration_guide.md**
|
||||
- Update startup sequence description
|
||||
- Document new atomic config creation
|
||||
- Document CLI override behavior
|
||||
|
||||
2. **docs/startup_flows_complete.md**
|
||||
- Update with new flow diagrams
|
||||
- Document new function calls
|
||||
|
||||
3. **README.md**
|
||||
- Update CLI options documentation
|
||||
- Document override behavior
|
||||
|
||||
---
|
||||
|
||||
## Implementation Timeline
|
||||
|
||||
### Week 1: Core Functions
|
||||
- Day 1-2: Implement `populate_all_config_values_atomic()`
|
||||
- Day 3-4: Implement `apply_cli_overrides_atomic()`
|
||||
- Day 5: Implement `validate_config_table_completeness()` and `has_cli_overrides()`
|
||||
|
||||
### Week 2: Integration
|
||||
- Day 1-2: Update main.c startup flow
|
||||
- Day 3-4: Testing and bug fixes
|
||||
- Day 5: Documentation updates
|
||||
|
||||
### Week 3: Cleanup
|
||||
- Day 1-2: Deprecate old functions
|
||||
- Day 3-4: Final testing and validation
|
||||
- Day 5: Code review and merge
|
||||
|
||||
---
|
||||
|
||||
## Risk Mitigation
|
||||
|
||||
### Potential Issues
|
||||
|
||||
1. **Database Lock Contention**
|
||||
- Risk: Multiple transactions could cause locks
|
||||
- Mitigation: Use BEGIN IMMEDIATE for write transactions
|
||||
|
||||
2. **Cache Invalidation Timing**
|
||||
- Risk: Cache could be read before overrides applied
|
||||
- Mitigation: Invalidate cache immediately after overrides
|
||||
|
||||
3. **Backward Compatibility**
|
||||
- Risk: Existing databases might have incomplete config
|
||||
- Mitigation: `validate_config_table_completeness()` handles this
|
||||
|
||||
4. **Transaction Rollback**
|
||||
- Risk: Partial config on error
|
||||
- Mitigation: All operations in transactions with proper rollback
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. ✅ All config values created atomically in first-time startup
|
||||
2. ✅ CLI overrides applied in separate atomic transaction
|
||||
3. ✅ Existing databases validated and missing keys populated
|
||||
4. ✅ Cache only loaded after complete config exists
|
||||
5. ✅ All existing tests pass
|
||||
6. ✅ No race conditions in config creation
|
||||
7. ✅ Clear separation between config creation and override phases
|
||||
|
||||
---
|
||||
|
||||
## Rollback Plan
|
||||
|
||||
If issues arise during implementation:
|
||||
|
||||
1. **Revert main.c changes** - restore original startup flow
|
||||
2. **Keep new functions** - they can coexist with old code
|
||||
3. **Add feature flag** - allow toggling between old and new behavior
|
||||
4. **Gradual migration** - enable new behavior per scenario
|
||||
|
||||
```c
|
||||
// Feature flag approach
|
||||
#define USE_ATOMIC_CONFIG_CREATION 1
|
||||
|
||||
#if USE_ATOMIC_CONFIG_CREATION
|
||||
// New atomic approach
|
||||
populate_all_config_values_atomic(&cli_options);
|
||||
apply_cli_overrides_atomic(&cli_options);
|
||||
#else
|
||||
// Old incremental approach
|
||||
populate_default_config_values();
|
||||
// ... existing code ...
|
||||
#endif
|
||||
```
|
||||
150
examples/deployment/static-builder.Dockerfile
Normal file
150
examples/deployment/static-builder.Dockerfile
Normal file
@@ -0,0 +1,150 @@
|
||||
# MUSL-based fully static C-Relay builder
|
||||
# Produces portable binaries with zero runtime dependencies
|
||||
|
||||
FROM alpine:latest AS builder
|
||||
|
||||
# Add alternative mirrors and install build dependencies with retry
|
||||
RUN echo "http://dl-cdn.alpinelinux.org/alpine/v3.22/main" > /etc/apk/repositories && \
|
||||
echo "http://dl-cdn.alpinelinux.org/alpine/v3.22/community" >> /etc/apk/repositories && \
|
||||
echo "http://mirror.leaseweb.com/alpine/v3.22/main" >> /etc/apk/repositories && \
|
||||
echo "http://mirror.leaseweb.com/alpine/v3.22/community" >> /etc/apk/repositories && \
|
||||
apk update --no-cache || (sleep 5 && apk update --no-cache) || (sleep 10 && apk update --no-cache)
|
||||
|
||||
# Install build dependencies with retry logic
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
musl-dev \
|
||||
git \
|
||||
cmake \
|
||||
pkgconfig \
|
||||
autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
zlib-dev \
|
||||
zlib-static \
|
||||
curl-dev \
|
||||
curl-static \
|
||||
sqlite-dev \
|
||||
sqlite-static \
|
||||
linux-headers || \
|
||||
(sleep 10 && apk add --no-cache \
|
||||
build-base \
|
||||
musl-dev \
|
||||
git \
|
||||
cmake \
|
||||
pkgconfig \
|
||||
autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
zlib-dev \
|
||||
zlib-static \
|
||||
curl-dev \
|
||||
curl-static \
|
||||
sqlite-dev \
|
||||
sqlite-static \
|
||||
linux-headers)
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Build zlib static (if needed)
|
||||
RUN if [ ! -f /usr/lib/libz.a ]; then \
|
||||
cd /tmp && \
|
||||
wget https://zlib.net/zlib-1.3.1.tar.gz && \
|
||||
tar xzf zlib-1.3.1.tar.gz && \
|
||||
cd zlib-1.3.1 && \
|
||||
./configure --static --prefix=/usr && \
|
||||
make && make install; \
|
||||
fi
|
||||
|
||||
# Build OpenSSL static
|
||||
RUN cd /tmp && \
|
||||
wget https://www.openssl.org/source/openssl-3.0.13.tar.gz && \
|
||||
tar xzf openssl-3.0.13.tar.gz && \
|
||||
cd openssl-3.0.13 && \
|
||||
./Configure linux-x86_64 no-shared --prefix=/usr && \
|
||||
make && make install_sw
|
||||
|
||||
# Build SQLite with JSON1 extension enabled
|
||||
RUN cd /tmp && \
|
||||
wget https://www.sqlite.org/2024/sqlite-autoconf-3460000.tar.gz && \
|
||||
tar xzf sqlite-autoconf-3460000.tar.gz && \
|
||||
cd sqlite-autoconf-3460000 && \
|
||||
./configure \
|
||||
--enable-static \
|
||||
--disable-shared \
|
||||
--enable-json1 \
|
||||
--enable-fts5 \
|
||||
--prefix=/usr \
|
||||
CFLAGS="-DSQLITE_ENABLE_JSON1=1 -DSQLITE_ENABLE_FTS5=1" && \
|
||||
make && make install
|
||||
|
||||
# Build libsecp256k1 static
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||
cd secp256k1 && \
|
||||
./autogen.sh && \
|
||||
./configure --enable-static --disable-shared --prefix=/usr && \
|
||||
make && make install
|
||||
|
||||
# Build libwebsockets static with OpenSSL
|
||||
RUN cd /tmp && \
|
||||
git clone https://github.com/warmcat/libwebsockets.git && \
|
||||
cd libwebsockets && \
|
||||
mkdir build && cd build && \
|
||||
cmake .. \
|
||||
-DLWS_WITH_STATIC=ON \
|
||||
-DLWS_WITH_SHARED=OFF \
|
||||
-DLWS_WITH_SSL=ON \
|
||||
-DLWS_OPENSSL_LIBRARIES="/usr/lib/libssl.a;/usr/lib/libcrypto.a" \
|
||||
-DLWS_OPENSSL_INCLUDE_DIRS="/usr/include" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_INSTALL_PREFIX=/usr && \
|
||||
make && make install
|
||||
|
||||
# Build curl static (minimal features)
|
||||
RUN cd /tmp && \
|
||||
wget https://curl.se/download/curl-8.6.0.tar.gz && \
|
||||
tar xzf curl-8.6.0.tar.gz && \
|
||||
cd curl-8.6.0 && \
|
||||
./configure \
|
||||
--disable-shared \
|
||||
--enable-static \
|
||||
--disable-ldap \
|
||||
--without-libidn2 \
|
||||
--without-brotli \
|
||||
--without-zstd \
|
||||
--without-rtmp \
|
||||
--without-libpsl \
|
||||
--without-krb5 \
|
||||
--with-openssl \
|
||||
--prefix=/usr && \
|
||||
make && make install
|
||||
|
||||
# Copy c-relay source
|
||||
COPY . /build/
|
||||
|
||||
# Initialize submodules
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
# Build nostr_core_lib
|
||||
RUN cd nostr_core_lib && ./build.sh
|
||||
|
||||
# Build c-relay static
|
||||
RUN make clean && \
|
||||
CC="musl-gcc -static" \
|
||||
CFLAGS="-O2 -Wall -Wextra -std=c99 -g" \
|
||||
LDFLAGS="-static -Wl,--whole-archive -lpthread -Wl,--no-whole-archive" \
|
||||
LIBS="-lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -lsecp256k1 -lssl -lcrypto -lcurl" \
|
||||
make
|
||||
|
||||
# Strip binary for size
|
||||
RUN strip build/c_relay_x86
|
||||
|
||||
# Multi-stage build to produce minimal output
|
||||
FROM scratch AS output
|
||||
COPY --from=builder /build/build/c_relay_x86 /c_relay_static_musl_x86_64
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# get_settings.sh - Query relay configuration events using nak
|
||||
# Uses admin test key to query kind 33334 configuration events
|
||||
|
||||
# Test key configuration
|
||||
ADMIN_PRIVATE_KEY="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
ADMIN_PUBLIC_KEY="6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3"
|
||||
RELAY_PUBLIC_KEY="4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
|
||||
RELAY_URL="ws://localhost:8888"
|
||||
|
||||
echo "Querying configuration events (kind 33334) from relay at $RELAY_URL"
|
||||
echo "Using admin public key: $ADMIN_PUBLIC_KEY"
|
||||
echo "Looking for relay config: $RELAY_PUBLIC_KEY"
|
||||
echo ""
|
||||
|
||||
# Query for kind 33334 configuration events
|
||||
# These events contain the relay configuration with d-tag matching the relay pubkey
|
||||
nak req -k 33334 "$RELAY_URL" | jq .
|
||||
@@ -17,6 +17,29 @@ print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
COMMIT_MESSAGE=""
|
||||
RELEASE_MODE=false
|
||||
|
||||
show_usage() {
|
||||
echo "C-Relay Increment and Push Script"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " $0 \"commit message\" - Default: increment patch, commit & push"
|
||||
echo " $0 -r \"commit message\" - Release: increment minor, create release"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 \"Fixed event validation bug\""
|
||||
echo " $0 --release \"Major release with new features\""
|
||||
echo ""
|
||||
echo "Default Mode (patch increment):"
|
||||
echo " - Increment patch version (v1.2.3 → v1.2.4)"
|
||||
echo " - Git add, commit with message, and push"
|
||||
echo ""
|
||||
echo "Release Mode (-r flag):"
|
||||
echo " - Increment minor version, zero patch (v1.2.3 → v1.3.0)"
|
||||
echo " - Git add, commit, push, and create Gitea release"
|
||||
echo ""
|
||||
echo "Requirements for Release Mode:"
|
||||
echo " - Gitea token in ~/.gitea_token for release uploads"
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
@@ -38,32 +61,6 @@ while [[ $# -gt 0 ]]; do
|
||||
esac
|
||||
done
|
||||
|
||||
show_usage() {
|
||||
echo "C-Relay Build and Push Script"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " $0 \"commit message\" - Default: compile, increment patch, commit & push"
|
||||
echo " $0 -r \"commit message\" - Release: compile x86+arm64, increment minor, create release"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 \"Fixed event validation bug\""
|
||||
echo " $0 --release \"Major release with new features\""
|
||||
echo ""
|
||||
echo "Default Mode (patch increment):"
|
||||
echo " - Compile C-Relay"
|
||||
echo " - Increment patch version (v1.2.3 → v1.2.4)"
|
||||
echo " - Git add, commit with message, and push"
|
||||
echo ""
|
||||
echo "Release Mode (-r flag):"
|
||||
echo " - Compile C-Relay for x86_64 and arm64"
|
||||
echo " - Increment minor version, zero patch (v1.2.3 → v1.3.0)"
|
||||
echo " - Git add, commit, push, and create Gitea release"
|
||||
echo ""
|
||||
echo "Requirements for Release Mode:"
|
||||
echo " - For ARM64 builds: make install-arm64-deps (optional - will build x86_64 only if missing)"
|
||||
echo " - Gitea token in ~/.gitea_token for release uploads"
|
||||
}
|
||||
|
||||
# Validate inputs
|
||||
if [[ -z "$COMMIT_MESSAGE" ]]; then
|
||||
print_error "Commit message is required"
|
||||
@@ -83,19 +80,19 @@ check_git_repo() {
|
||||
# Function to get current version and increment appropriately
|
||||
increment_version() {
|
||||
local increment_type="$1" # "patch" or "minor"
|
||||
|
||||
|
||||
print_status "Getting current version..."
|
||||
|
||||
|
||||
# Get the highest version tag (not chronologically latest)
|
||||
LATEST_TAG=$(git tag -l 'v*.*.*' | sort -V | tail -n 1 || echo "")
|
||||
if [[ -z "$LATEST_TAG" ]]; then
|
||||
LATEST_TAG="v0.0.0"
|
||||
print_warning "No version tags found, starting from $LATEST_TAG"
|
||||
fi
|
||||
|
||||
|
||||
# Extract version components (remove 'v' prefix)
|
||||
VERSION=${LATEST_TAG#v}
|
||||
|
||||
|
||||
# Parse major.minor.patch using regex
|
||||
if [[ $VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
|
||||
MAJOR=${BASH_REMATCH[1]}
|
||||
@@ -106,7 +103,7 @@ increment_version() {
|
||||
print_error "Expected format: v0.1.0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Increment version based on type
|
||||
if [[ "$increment_type" == "minor" ]]; then
|
||||
# Minor release: increment minor, zero patch
|
||||
@@ -120,85 +117,18 @@ increment_version() {
|
||||
NEW_VERSION="v${MAJOR}.${MINOR}.${NEW_PATCH}"
|
||||
print_status "Default mode: incrementing patch version"
|
||||
fi
|
||||
|
||||
|
||||
print_status "Current version: $LATEST_TAG"
|
||||
print_status "New version: $NEW_VERSION"
|
||||
|
||||
|
||||
# Export for use in other functions
|
||||
export NEW_VERSION
|
||||
}
|
||||
|
||||
# Function to compile the C-Relay project
|
||||
compile_project() {
|
||||
print_status "Compiling C-Relay..."
|
||||
|
||||
# Clean previous build
|
||||
if make clean > /dev/null 2>&1; then
|
||||
print_success "Cleaned previous build"
|
||||
else
|
||||
print_warning "Clean failed or no Makefile found"
|
||||
fi
|
||||
|
||||
# Force regenerate main.h to pick up new tags
|
||||
if make force-version > /dev/null 2>&1; then
|
||||
print_success "Regenerated main.h"
|
||||
else
|
||||
print_warning "Failed to regenerate main.h"
|
||||
fi
|
||||
|
||||
# Compile the project
|
||||
if make > /dev/null 2>&1; then
|
||||
print_success "C-Relay compiled successfully"
|
||||
else
|
||||
print_error "Compilation failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to build release binaries
|
||||
build_release_binaries() {
|
||||
print_status "Building release binaries..."
|
||||
|
||||
# Build x86_64 version
|
||||
print_status "Building x86_64 version..."
|
||||
make clean > /dev/null 2>&1
|
||||
if make x86 > /dev/null 2>&1; then
|
||||
if [[ -f "build/c_relay_x86" ]]; then
|
||||
cp build/c_relay_x86 c-relay-x86_64
|
||||
print_success "x86_64 binary created: c-relay-x86_64"
|
||||
else
|
||||
print_error "x86_64 binary not found after compilation"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
print_error "x86_64 build failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Try to build ARM64 version
|
||||
print_status "Attempting ARM64 build..."
|
||||
make clean > /dev/null 2>&1
|
||||
if make arm64 > /dev/null 2>&1; then
|
||||
if [[ -f "build/c_relay_arm64" ]]; then
|
||||
cp build/c_relay_arm64 c-relay-arm64
|
||||
print_success "ARM64 binary created: c-relay-arm64"
|
||||
else
|
||||
print_warning "ARM64 binary not found after compilation"
|
||||
fi
|
||||
else
|
||||
print_warning "ARM64 build failed - ARM64 cross-compilation not properly set up"
|
||||
print_status "Only x86_64 binary will be included in release"
|
||||
fi
|
||||
|
||||
# Restore normal build
|
||||
make clean > /dev/null 2>&1
|
||||
make > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Function to commit and push changes
|
||||
git_commit_and_push() {
|
||||
print_status "Preparing git commit..."
|
||||
|
||||
|
||||
# Stage all changes
|
||||
if git add . > /dev/null 2>&1; then
|
||||
print_success "Staged all changes"
|
||||
@@ -206,7 +136,7 @@ git_commit_and_push() {
|
||||
print_error "Failed to stage changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Check if there are changes to commit
|
||||
if git diff --staged --quiet; then
|
||||
print_warning "No changes to commit"
|
||||
@@ -219,14 +149,14 @@ git_commit_and_push() {
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Create new git tag
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag $NEW_VERSION already exists"
|
||||
fi
|
||||
|
||||
|
||||
# Push changes and tags
|
||||
print_status "Pushing to remote repository..."
|
||||
if git push > /dev/null 2>&1; then
|
||||
@@ -235,7 +165,7 @@ git_commit_and_push() {
|
||||
print_error "Failed to push changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Push only the new tag to avoid conflicts with existing tags
|
||||
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Pushed tag: $NEW_VERSION"
|
||||
@@ -253,7 +183,7 @@ git_commit_and_push() {
|
||||
# Function to commit and push changes without creating a tag (tag already created)
|
||||
git_commit_and_push_no_tag() {
|
||||
print_status "Preparing git commit..."
|
||||
|
||||
|
||||
# Stage all changes
|
||||
if git add . > /dev/null 2>&1; then
|
||||
print_success "Staged all changes"
|
||||
@@ -261,7 +191,7 @@ git_commit_and_push_no_tag() {
|
||||
print_error "Failed to stage changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Check if there are changes to commit
|
||||
if git diff --staged --quiet; then
|
||||
print_warning "No changes to commit"
|
||||
@@ -274,7 +204,7 @@ git_commit_and_push_no_tag() {
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Push changes and tags
|
||||
print_status "Pushing to remote repository..."
|
||||
if git push > /dev/null 2>&1; then
|
||||
@@ -283,7 +213,7 @@ git_commit_and_push_no_tag() {
|
||||
print_error "Failed to push changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Push only the new tag to avoid conflicts with existing tags
|
||||
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Pushed tag: $NEW_VERSION"
|
||||
@@ -301,40 +231,40 @@ git_commit_and_push_no_tag() {
|
||||
# Function to create Gitea release
|
||||
create_gitea_release() {
|
||||
print_status "Creating Gitea release..."
|
||||
|
||||
|
||||
# Check for Gitea token
|
||||
if [[ ! -f "$HOME/.gitea_token" ]]; then
|
||||
print_warning "No ~/.gitea_token found. Skipping release creation."
|
||||
print_warning "Create ~/.gitea_token with your Gitea access token to enable releases."
|
||||
return 0
|
||||
fi
|
||||
|
||||
|
||||
local token=$(cat "$HOME/.gitea_token" | tr -d '\n\r')
|
||||
local api_url="https://git.laantungir.net/api/v1/repos/laantungir/c-relay"
|
||||
|
||||
|
||||
# Create release
|
||||
print_status "Creating release $NEW_VERSION..."
|
||||
local response=$(curl -s -X POST "$api_url/releases" \
|
||||
-H "Authorization: token $token" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"tag_name\": \"$NEW_VERSION\", \"name\": \"$NEW_VERSION\", \"body\": \"$COMMIT_MESSAGE\"}")
|
||||
|
||||
|
||||
if echo "$response" | grep -q '"id"'; then
|
||||
print_success "Created release $NEW_VERSION"
|
||||
upload_release_binaries "$api_url" "$token"
|
||||
return 0
|
||||
elif echo "$response" | grep -q "already exists"; then
|
||||
print_warning "Release $NEW_VERSION already exists"
|
||||
upload_release_binaries "$api_url" "$token"
|
||||
return 0
|
||||
else
|
||||
print_error "Failed to create release $NEW_VERSION"
|
||||
print_error "Response: $response"
|
||||
|
||||
|
||||
# Try to check if the release exists anyway
|
||||
print_status "Checking if release exists..."
|
||||
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
|
||||
if echo "$check_response" | grep -q '"id"'; then
|
||||
print_warning "Release exists but creation response was unexpected"
|
||||
upload_release_binaries "$api_url" "$token"
|
||||
return 0
|
||||
else
|
||||
print_error "Release does not exist and creation failed"
|
||||
return 1
|
||||
@@ -342,78 +272,19 @@ create_gitea_release() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to upload release binaries
|
||||
upload_release_binaries() {
|
||||
local api_url="$1"
|
||||
local token="$2"
|
||||
|
||||
# Get release ID with more robust parsing
|
||||
print_status "Getting release ID for $NEW_VERSION..."
|
||||
local response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
|
||||
local release_id=$(echo "$response" | grep -o '"id":[0-9]*' | head -n1 | cut -d: -f2)
|
||||
|
||||
if [[ -z "$release_id" ]]; then
|
||||
print_error "Could not get release ID for $NEW_VERSION"
|
||||
print_error "API Response: $response"
|
||||
|
||||
# Try to list all releases to debug
|
||||
print_status "Available releases:"
|
||||
curl -s -H "Authorization: token $token" "$api_url/releases" | grep -o '"tag_name":"[^"]*"' | head -5
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "Found release ID: $release_id"
|
||||
|
||||
# Upload x86_64 binary
|
||||
if [[ -f "c-relay-x86_64" ]]; then
|
||||
print_status "Uploading x86_64 binary..."
|
||||
if curl -s -X POST "$api_url/releases/$release_id/assets" \
|
||||
-H "Authorization: token $token" \
|
||||
-F "attachment=@c-relay-x86_64;filename=c-relay-${NEW_VERSION}-linux-x86_64" > /dev/null; then
|
||||
print_success "Uploaded x86_64 binary"
|
||||
else
|
||||
print_warning "Failed to upload x86_64 binary"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Upload ARM64 binary
|
||||
if [[ -f "c-relay-arm64" ]]; then
|
||||
print_status "Uploading ARM64 binary..."
|
||||
if curl -s -X POST "$api_url/releases/$release_id/assets" \
|
||||
-H "Authorization: token $token" \
|
||||
-F "attachment=@c-relay-arm64;filename=c-relay-${NEW_VERSION}-linux-arm64" > /dev/null; then
|
||||
print_success "Uploaded ARM64 binary"
|
||||
else
|
||||
print_warning "Failed to upload ARM64 binary"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to clean up release binaries
|
||||
cleanup_release_binaries() {
|
||||
if [[ -f "c-relay-x86_64" ]]; then
|
||||
rm -f c-relay-x86_64
|
||||
print_status "Cleaned up x86_64 binary"
|
||||
fi
|
||||
if [[ -f "c-relay-arm64" ]]; then
|
||||
rm -f c-relay-arm64
|
||||
print_status "Cleaned up ARM64 binary"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
print_status "C-Relay Build and Push Script"
|
||||
|
||||
print_status "C-Relay Increment and Push Script"
|
||||
|
||||
# Check prerequisites
|
||||
check_git_repo
|
||||
|
||||
|
||||
if [[ "$RELEASE_MODE" == true ]]; then
|
||||
print_status "=== RELEASE MODE ==="
|
||||
|
||||
|
||||
# Increment minor version for releases
|
||||
increment_version "minor"
|
||||
|
||||
|
||||
# Create new git tag BEFORE compilation so version.h picks it up
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
@@ -422,31 +293,23 @@ main() {
|
||||
git tag -d "$NEW_VERSION" > /dev/null 2>&1
|
||||
git tag "$NEW_VERSION" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Compile project first (will now pick up the new tag)
|
||||
compile_project
|
||||
|
||||
# Build release binaries
|
||||
build_release_binaries
|
||||
|
||||
|
||||
# Commit and push (but skip tag creation since we already did it)
|
||||
git_commit_and_push_no_tag
|
||||
|
||||
# Create Gitea release with binaries
|
||||
create_gitea_release
|
||||
|
||||
# Cleanup
|
||||
cleanup_release_binaries
|
||||
|
||||
print_success "Release $NEW_VERSION completed successfully!"
|
||||
print_status "Binaries uploaded to Gitea release"
|
||||
|
||||
|
||||
# Create Gitea release
|
||||
if create_gitea_release; then
|
||||
print_success "Release $NEW_VERSION completed successfully!"
|
||||
else
|
||||
print_error "Release creation failed"
|
||||
fi
|
||||
|
||||
else
|
||||
print_status "=== DEFAULT MODE ==="
|
||||
|
||||
|
||||
# Increment patch version for regular commits
|
||||
increment_version "patch"
|
||||
|
||||
|
||||
# Create new git tag BEFORE compilation so version.h picks it up
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
@@ -455,17 +318,14 @@ main() {
|
||||
git tag -d "$NEW_VERSION" > /dev/null 2>&1
|
||||
git tag "$NEW_VERSION" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Compile project (will now pick up the new tag)
|
||||
compile_project
|
||||
|
||||
|
||||
# Commit and push (but skip tag creation since we already did it)
|
||||
git_commit_and_push_no_tag
|
||||
|
||||
print_success "Build and push completed successfully!"
|
||||
|
||||
print_success "Increment and push completed successfully!"
|
||||
print_status "Version $NEW_VERSION pushed to repository"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execute main function
|
||||
main
|
||||
main
|
||||
@@ -12,6 +12,7 @@ USE_TEST_KEYS=false
|
||||
ADMIN_KEY=""
|
||||
RELAY_KEY=""
|
||||
PORT_OVERRIDE=""
|
||||
DEBUG_LEVEL="5"
|
||||
|
||||
# Key validation function
|
||||
validate_hex_key() {
|
||||
@@ -71,6 +72,34 @@ while [[ $# -gt 0 ]]; do
|
||||
USE_TEST_KEYS=true
|
||||
shift
|
||||
;;
|
||||
--debug-level=*)
|
||||
DEBUG_LEVEL="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
-d=*)
|
||||
DEBUG_LEVEL="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--debug-level)
|
||||
if [ -z "$2" ]; then
|
||||
echo "ERROR: Debug level option requires a value"
|
||||
HELP=true
|
||||
shift
|
||||
else
|
||||
DEBUG_LEVEL="$2"
|
||||
shift 2
|
||||
fi
|
||||
;;
|
||||
-d)
|
||||
if [ -z "$2" ]; then
|
||||
echo "ERROR: Debug level option requires a value"
|
||||
HELP=true
|
||||
shift
|
||||
else
|
||||
DEBUG_LEVEL="$2"
|
||||
shift 2
|
||||
fi
|
||||
;;
|
||||
--help|-h)
|
||||
HELP=true
|
||||
shift
|
||||
@@ -104,6 +133,14 @@ if [ -n "$PORT_OVERRIDE" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# Validate debug level if provided
|
||||
if [ -n "$DEBUG_LEVEL" ]; then
|
||||
if ! [[ "$DEBUG_LEVEL" =~ ^[0-5]$ ]]; then
|
||||
echo "ERROR: Debug level must be 0-5, got: $DEBUG_LEVEL"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Show help
|
||||
if [ "$HELP" = true ]; then
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
@@ -112,6 +149,7 @@ if [ "$HELP" = true ]; then
|
||||
echo " -a, --admin-key <hex> 64-character hex admin private key"
|
||||
echo " -r, --relay-key <hex> 64-character hex relay private key"
|
||||
echo " -p, --port <port> Custom port override (default: 8888)"
|
||||
echo " -d, --debug-level <0-5> Set debug level: 0=none, 1=errors, 2=warnings, 3=info, 4=debug, 5=trace"
|
||||
echo " --preserve-database Keep existing database files (don't delete for fresh start)"
|
||||
echo " --test-keys, -t Use deterministic test keys for development (admin: all 'a's, relay: all '1's)"
|
||||
echo " --help, -h Show this help message"
|
||||
@@ -125,6 +163,8 @@ if [ "$HELP" = true ]; then
|
||||
echo " $0 # Fresh start with random keys"
|
||||
echo " $0 -a <admin-hex> -r <relay-hex> # Use custom keys"
|
||||
echo " $0 -a <admin-hex> -p 9000 # Custom admin key on port 9000"
|
||||
echo " $0 --debug-level=3 # Start with debug level 3 (info)"
|
||||
echo " $0 -d=5 # Start with debug level 5 (trace)"
|
||||
echo " $0 --preserve-database # Preserve existing database and keys"
|
||||
echo " $0 --test-keys # Use test keys for consistent development"
|
||||
echo " $0 -t --preserve-database # Use test keys and preserve database"
|
||||
@@ -137,22 +177,15 @@ fi
|
||||
|
||||
# Handle database file cleanup for fresh start
|
||||
if [ "$PRESERVE_DATABASE" = false ]; then
|
||||
if ls *.db >/dev/null 2>&1 || ls build/*.db >/dev/null 2>&1; then
|
||||
echo "Removing existing database files to trigger fresh key generation..."
|
||||
rm -f *.db build/*.db
|
||||
if ls *.db* >/dev/null 2>&1 || ls build/*.db* >/dev/null 2>&1; then
|
||||
echo "Removing existing database files (including WAL/SHM) to trigger fresh key generation..."
|
||||
rm -f *.db* build/*.db*
|
||||
echo "✓ Database files removed - will generate new keys and database"
|
||||
else
|
||||
echo "No existing database found - will generate fresh setup"
|
||||
fi
|
||||
else
|
||||
echo "Preserving existing database files as requested"
|
||||
# Back up database files before clean build
|
||||
if ls build/*.db >/dev/null 2>&1; then
|
||||
echo "Backing up existing database files..."
|
||||
mkdir -p /tmp/relay_backup_$$
|
||||
cp build/*.db* /tmp/relay_backup_$$/ 2>/dev/null || true
|
||||
echo "Database files backed up to temporary location"
|
||||
fi
|
||||
echo "Preserving existing database files (build process does not touch database files)"
|
||||
fi
|
||||
|
||||
# Clean up legacy files that are no longer used
|
||||
@@ -163,16 +196,15 @@ rm -f db/c_nostr_relay.db* 2>/dev/null
|
||||
echo "Embedding web files..."
|
||||
./embed_web_files.sh
|
||||
|
||||
# Build the project first
|
||||
echo "Building project..."
|
||||
make clean all
|
||||
# Build the project - ONLY static build
|
||||
echo "Building project (static binary with SQLite JSON1 extension)..."
|
||||
./build_static.sh
|
||||
|
||||
# Restore database files if preserving
|
||||
if [ "$PRESERVE_DATABASE" = true ] && [ -d "/tmp/relay_backup_$$" ]; then
|
||||
echo "Restoring preserved database files..."
|
||||
cp /tmp/relay_backup_$$/*.db* build/ 2>/dev/null || true
|
||||
rm -rf /tmp/relay_backup_$$
|
||||
echo "Database files restored to build directory"
|
||||
# Exit if static build fails - no fallback
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR: Static build failed. Cannot proceed without static binary."
|
||||
echo "Please fix the build errors and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if build was successful
|
||||
@@ -181,25 +213,32 @@ if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if relay binary exists after build - detect architecture
|
||||
# Check if static relay binary exists after build - ONLY use static binary
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
BINARY_PATH="./build/c_relay_x86"
|
||||
BINARY_PATH="./build/c_relay_static_x86_64"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
BINARY_PATH="./build/c_relay_arm64"
|
||||
BINARY_PATH="./build/c_relay_static_arm64"
|
||||
;;
|
||||
*)
|
||||
BINARY_PATH="./build/c_relay_$ARCH"
|
||||
BINARY_PATH="./build/c_relay_static_$ARCH"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Verify static binary exists - no fallbacks
|
||||
if [ ! -f "$BINARY_PATH" ]; then
|
||||
echo "ERROR: Relay binary not found at $BINARY_PATH after build. Build may have failed."
|
||||
echo "ERROR: Static relay binary not found: $BINARY_PATH"
|
||||
echo ""
|
||||
echo "The relay requires the static binary with JSON1 support."
|
||||
echo "Please run: ./build_static.sh"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Using static binary: $BINARY_PATH"
|
||||
|
||||
echo "Build successful. Proceeding with relay restart..."
|
||||
|
||||
# Kill existing relay if running - start aggressive immediately
|
||||
@@ -281,19 +320,24 @@ if [ -n "$PORT_OVERRIDE" ]; then
|
||||
echo "Using custom port: $PORT_OVERRIDE"
|
||||
fi
|
||||
|
||||
if [ -n "$DEBUG_LEVEL" ]; then
|
||||
RELAY_ARGS="$RELAY_ARGS --debug-level=$DEBUG_LEVEL"
|
||||
echo "Using debug level: $DEBUG_LEVEL"
|
||||
fi
|
||||
|
||||
# Change to build directory before starting relay so database files are created there
|
||||
cd build
|
||||
# Start relay in background and capture its PID
|
||||
if [ "$USE_TEST_KEYS" = true ]; then
|
||||
echo "Using deterministic test keys for development..."
|
||||
./$(basename $BINARY_PATH) -a 6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3 -r 1111111111111111111111111111111111111111111111111111111111111111 --strict-port > ../relay.log 2>&1 &
|
||||
./$(basename $BINARY_PATH) -a 6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3 -r 1111111111111111111111111111111111111111111111111111111111111111 --debug-level=$DEBUG_LEVEL --strict-port > ../relay.log 2>&1 &
|
||||
elif [ -n "$RELAY_ARGS" ]; then
|
||||
echo "Starting relay with custom configuration..."
|
||||
./$(basename $BINARY_PATH) $RELAY_ARGS --strict-port > ../relay.log 2>&1 &
|
||||
./$(basename $BINARY_PATH) $RELAY_ARGS --debug-level=$DEBUG_LEVEL --strict-port > ../relay.log 2>&1 &
|
||||
else
|
||||
# No command line arguments needed for random key generation
|
||||
echo "Starting relay with random key generation..."
|
||||
./$(basename $BINARY_PATH) --strict-port > ../relay.log 2>&1 &
|
||||
./$(basename $BINARY_PATH) --debug-level=$DEBUG_LEVEL --strict-port > ../relay.log 2>&1 &
|
||||
fi
|
||||
RELAY_PID=$!
|
||||
# Change back to original directory
|
||||
|
||||
Submodule nostr_core_lib updated: c0784fc890...5066ba8dd0
78
notes.txt
Normal file
78
notes.txt
Normal file
@@ -0,0 +1,78 @@
|
||||
|
||||
Task Completed
|
||||
Perfect! The core dump limit is now set correctly. You can see both:
|
||||
|
||||
LimitCORE=infinity
|
||||
LimitCORESoft=infinity
|
||||
What happens next:
|
||||
The relay is now configured to generate core dumps when it crashes
|
||||
Wait for the next crash to occur (based on your logs, it seems to happen intermittently)
|
||||
When it crashes, the core dump will be automatically captured by systemd
|
||||
After the next crash, analyze it:
|
||||
# List all core dumps (most recent first)
|
||||
sudo coredumpctl list
|
||||
|
||||
# View info about the most recent c-relay crash
|
||||
sudo coredumpctl info c-relay
|
||||
|
||||
# Load the core dump in gdb for detailed analysis
|
||||
sudo coredumpctl gdb c-relay
|
||||
|
||||
Inside gdb, run these commands:
|
||||
(gdb) bt full # Full backtrace with all variables
|
||||
(gdb) info threads # Show all threads
|
||||
(gdb) thread apply all bt # Backtrace for all threads
|
||||
(gdb) frame 0 # Go to crash frame
|
||||
(gdb) info locals # Show local variables
|
||||
(gdb) list # Show source code around crash
|
||||
|
||||
The backtrace will show you exactly which line of code caused the segmentation fault and the state of all variables at that moment, which should pinpoint whether it's the subscription management issue, buffer overflow, or something else.
|
||||
|
||||
You're all set up now - just wait for the next crash and then run the coredumpctl commands to analyze it.
|
||||
|
||||
|
||||
--------------------------
|
||||
|
||||
|
||||
Even simpler: Use this one-liner
|
||||
# Start relay and immediately attach gdb
|
||||
cd /usr/local/bin/c_relay
|
||||
sudo -u c-relay ./c_relay --debug-level=5 & sleep 2 && sudo gdb -p $(pgrep c_relay)
|
||||
|
||||
|
||||
|
||||
How to View the Logs
|
||||
Check systemd journal:
|
||||
# View all c-relay logs
|
||||
sudo journalctl -u c-relay
|
||||
|
||||
# View recent logs (last 50 lines)
|
||||
sudo journalctl -u c-relay -n 50
|
||||
|
||||
# Follow logs in real-time
|
||||
sudo journalctl -u c-relay -f
|
||||
|
||||
# View logs since last boot
|
||||
sudo journalctl -u c-relay -b
|
||||
|
||||
Check if service is running:
|
||||
|
||||
|
||||
|
||||
To immediately trim the syslog file size:
|
||||
|
||||
Safe Syslog Truncation
|
||||
Stop syslog service first:
|
||||
sudo systemctl stop rsyslog
|
||||
|
||||
Truncate the syslog file:
|
||||
sudo truncate -s 0 /var/log/syslog
|
||||
|
||||
Restart syslog service:
|
||||
sudo systemctl start rsyslog
|
||||
sudo systemctl status rsyslog
|
||||
|
||||
|
||||
sudo -u c-relay ./c_relay --debug-level=5 -r 85d0b37e2ae822966dcadd06b2dc9368cde73865f90ea4d44f8b57d47ef0820a -a 1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139
|
||||
|
||||
./c_relay_static_x86_64 -p 7889 --debug-level=5 -r 85d0b37e2ae822966dcadd06b2dc9368cde73865f90ea4d44f8b57d47ef0820a -a 1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139
|
||||
22
src/api.c
22
src/api.c
@@ -10,21 +10,13 @@
|
||||
#include "api.h"
|
||||
#include "embedded_web_content.h"
|
||||
#include "config.h"
|
||||
|
||||
|
||||
// Forward declarations for logging functions
|
||||
void log_info(const char* message);
|
||||
void log_success(const char* message);
|
||||
void log_error(const char* message);
|
||||
void log_warning(const char* message);
|
||||
#include "debug.h"
|
||||
|
||||
// Forward declarations for database functions
|
||||
int store_event(cJSON* event);
|
||||
|
||||
// Handle HTTP request for embedded files (assumes GET)
|
||||
int handle_embedded_file_request(struct lws* wsi, const char* requested_uri) {
|
||||
log_info("Handling embedded file request");
|
||||
|
||||
const char* file_path;
|
||||
|
||||
// Handle /api requests
|
||||
@@ -37,7 +29,7 @@ int handle_embedded_file_request(struct lws* wsi, const char* requested_uri) {
|
||||
snprintf(temp_path, sizeof(temp_path), "/%s", requested_uri + 5); // Add leading slash
|
||||
file_path = temp_path;
|
||||
} else {
|
||||
log_warning("Embedded file request without /api prefix");
|
||||
DEBUG_WARN("Embedded file request without /api prefix");
|
||||
lws_return_http_status(wsi, HTTP_STATUS_NOT_FOUND, NULL);
|
||||
return -1;
|
||||
}
|
||||
@@ -45,7 +37,7 @@ int handle_embedded_file_request(struct lws* wsi, const char* requested_uri) {
|
||||
// Get embedded file
|
||||
embedded_file_t* file = get_embedded_file(file_path);
|
||||
if (!file) {
|
||||
log_warning("Embedded file not found");
|
||||
DEBUG_WARN("Embedded file not found");
|
||||
lws_return_http_status(wsi, HTTP_STATUS_NOT_FOUND, NULL);
|
||||
return -1;
|
||||
}
|
||||
@@ -53,7 +45,7 @@ int handle_embedded_file_request(struct lws* wsi, const char* requested_uri) {
|
||||
// Allocate session data
|
||||
struct embedded_file_session_data* session_data = malloc(sizeof(struct embedded_file_session_data));
|
||||
if (!session_data) {
|
||||
log_error("Failed to allocate embedded file session data");
|
||||
DEBUG_ERROR("Failed to allocate embedded file session data");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -124,7 +116,6 @@ int handle_embedded_file_request(struct lws* wsi, const char* requested_uri) {
|
||||
// Request callback for body transmission
|
||||
lws_callback_on_writable(wsi);
|
||||
|
||||
log_success("Embedded file headers sent, body transmission scheduled");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -138,7 +129,7 @@ int handle_embedded_file_writeable(struct lws* wsi) {
|
||||
// Allocate buffer for data transmission
|
||||
unsigned char *buf = malloc(LWS_PRE + session_data->size);
|
||||
if (!buf) {
|
||||
log_error("Failed to allocate buffer for embedded file transmission");
|
||||
DEBUG_ERROR("Failed to allocate buffer for embedded file transmission");
|
||||
free(session_data);
|
||||
lws_set_wsi_user(wsi, NULL);
|
||||
return -1;
|
||||
@@ -154,7 +145,7 @@ int handle_embedded_file_writeable(struct lws* wsi) {
|
||||
free(buf);
|
||||
|
||||
if (write_result < 0) {
|
||||
log_error("Failed to write embedded file data");
|
||||
DEBUG_ERROR("Failed to write embedded file data");
|
||||
free(session_data);
|
||||
lws_set_wsi_user(wsi, NULL);
|
||||
return -1;
|
||||
@@ -165,6 +156,5 @@ int handle_embedded_file_writeable(struct lws* wsi) {
|
||||
free(session_data);
|
||||
lws_set_wsi_user(wsi, NULL);
|
||||
|
||||
log_success("Embedded file served successfully");
|
||||
return 0;
|
||||
}
|
||||
|
||||
1917
src/config.c
1917
src/config.c
File diff suppressed because it is too large
Load Diff
86
src/config.h
86
src/config.h
@@ -27,89 +27,15 @@ struct lws;
|
||||
// Database path for event-based config
|
||||
extern char g_database_path[512];
|
||||
|
||||
// Unified configuration cache structure (consolidates all caching systems)
|
||||
typedef struct {
|
||||
// Critical keys (frequently accessed)
|
||||
char admin_pubkey[65];
|
||||
char relay_pubkey[65];
|
||||
|
||||
// Auth config (from request_validator)
|
||||
int auth_required;
|
||||
long max_file_size;
|
||||
int admin_enabled;
|
||||
int nip42_mode;
|
||||
int nip42_challenge_timeout;
|
||||
int nip42_time_tolerance;
|
||||
int nip70_protected_events_enabled;
|
||||
|
||||
// Static buffer for config values (replaces static buffers in get_config_value functions)
|
||||
char temp_buffer[CONFIG_VALUE_MAX_LENGTH];
|
||||
|
||||
// NIP-11 relay information (migrated from g_relay_info in main.c)
|
||||
struct {
|
||||
char name[RELAY_NAME_MAX_LENGTH];
|
||||
char description[RELAY_DESCRIPTION_MAX_LENGTH];
|
||||
char banner[RELAY_URL_MAX_LENGTH];
|
||||
char icon[RELAY_URL_MAX_LENGTH];
|
||||
char pubkey[RELAY_PUBKEY_MAX_LENGTH];
|
||||
char contact[RELAY_CONTACT_MAX_LENGTH];
|
||||
char software[RELAY_URL_MAX_LENGTH];
|
||||
char version[64];
|
||||
char privacy_policy[RELAY_URL_MAX_LENGTH];
|
||||
char terms_of_service[RELAY_URL_MAX_LENGTH];
|
||||
// Raw string values for parsing into cJSON arrays
|
||||
char supported_nips_str[CONFIG_VALUE_MAX_LENGTH];
|
||||
char language_tags_str[CONFIG_VALUE_MAX_LENGTH];
|
||||
char relay_countries_str[CONFIG_VALUE_MAX_LENGTH];
|
||||
// Parsed cJSON arrays
|
||||
cJSON* supported_nips;
|
||||
cJSON* limitation;
|
||||
cJSON* retention;
|
||||
cJSON* relay_countries;
|
||||
cJSON* language_tags;
|
||||
cJSON* tags;
|
||||
char posting_policy[RELAY_URL_MAX_LENGTH];
|
||||
cJSON* fees;
|
||||
char payments_url[RELAY_URL_MAX_LENGTH];
|
||||
} relay_info;
|
||||
|
||||
// NIP-13 PoW configuration (migrated from g_pow_config in main.c)
|
||||
struct {
|
||||
int enabled;
|
||||
int min_pow_difficulty;
|
||||
int validation_flags;
|
||||
int require_nonce_tag;
|
||||
int reject_lower_targets;
|
||||
int strict_format;
|
||||
int anti_spam_mode;
|
||||
} pow_config;
|
||||
|
||||
// NIP-40 Expiration configuration (migrated from g_expiration_config in main.c)
|
||||
struct {
|
||||
int enabled;
|
||||
int strict_mode;
|
||||
int filter_responses;
|
||||
int delete_expired;
|
||||
long grace_period;
|
||||
} expiration_config;
|
||||
|
||||
// Cache management
|
||||
time_t cache_expires;
|
||||
int cache_valid;
|
||||
pthread_mutex_t cache_lock;
|
||||
} unified_config_cache_t;
|
||||
|
||||
// Command line options structure for first-time startup
|
||||
typedef struct {
|
||||
int port_override; // -1 = not set, >0 = port value
|
||||
char admin_pubkey_override[65]; // Empty string = not set, 64-char hex = override
|
||||
char relay_privkey_override[65]; // Empty string = not set, 64-char hex = override
|
||||
int strict_port; // 0 = allow port increment, 1 = fail if exact port unavailable
|
||||
int debug_level; // 0-5, default 0 (no debug output)
|
||||
} cli_options_t;
|
||||
|
||||
// Global unified configuration cache
|
||||
extern unified_config_cache_t g_unified_cache;
|
||||
|
||||
// Core configuration functions (temporary compatibility)
|
||||
int init_configuration_system(const char* config_dir_override, const char* config_file_override);
|
||||
void cleanup_configuration_system(void);
|
||||
@@ -137,8 +63,8 @@ int get_config_bool(const char* key, int default_value);
|
||||
|
||||
// First-time startup functions
|
||||
int is_first_time_startup(void);
|
||||
int first_time_startup_sequence(const cli_options_t* cli_options);
|
||||
int startup_existing_relay(const char* relay_pubkey);
|
||||
int first_time_startup_sequence(const cli_options_t* cli_options, char* admin_pubkey_out, char* relay_pubkey_out, char* relay_privkey_out);
|
||||
int startup_existing_relay(const char* relay_pubkey, const cli_options_t* cli_options);
|
||||
|
||||
// Configuration application functions
|
||||
int apply_configuration_from_event(const cJSON* event);
|
||||
@@ -168,6 +94,7 @@ int set_config_value_in_table(const char* key, const char* value, const char* da
|
||||
const char* description, const char* category, int requires_restart);
|
||||
int update_config_in_table(const char* key, const char* value);
|
||||
int populate_default_config_values(void);
|
||||
int populate_all_config_values_atomic(const char* admin_pubkey, const char* relay_pubkey);
|
||||
int add_pubkeys_to_config_table(void);
|
||||
|
||||
// Admin event processing functions (updated with WebSocket support)
|
||||
@@ -187,7 +114,7 @@ cJSON* build_query_response(const char* query_type, cJSON* results_array, int to
|
||||
|
||||
// Auth rules management functions
|
||||
int add_auth_rule_from_config(const char* rule_type, const char* pattern_type,
|
||||
const char* pattern_value, const char* action);
|
||||
const char* pattern_value);
|
||||
int remove_auth_rule_from_config(const char* rule_type, const char* pattern_type,
|
||||
const char* pattern_value);
|
||||
|
||||
@@ -211,6 +138,9 @@ int populate_config_table_from_event(const cJSON* event);
|
||||
int process_startup_config_event(const cJSON* event);
|
||||
int process_startup_config_event_with_fallback(const cJSON* event);
|
||||
|
||||
// Atomic CLI override application
|
||||
int apply_cli_overrides_atomic(const cli_options_t* cli_options);
|
||||
|
||||
// Dynamic event generation functions for WebSocket configuration fetching
|
||||
cJSON* generate_config_event_from_table(void);
|
||||
int req_filter_requests_config_events(const cJSON* filter);
|
||||
|
||||
@@ -28,6 +28,8 @@ static const struct {
|
||||
{"nip42_auth_required_subscriptions", "false"},
|
||||
{"nip42_auth_required_kinds", "4,14"}, // Default: DM kinds require auth
|
||||
{"nip42_challenge_expiration", "600"}, // 10 minutes
|
||||
{"nip42_challenge_timeout", "600"}, // Challenge timeout (seconds)
|
||||
{"nip42_time_tolerance", "300"}, // Time tolerance (seconds)
|
||||
|
||||
// NIP-70 Protected Events
|
||||
{"nip70_protected_events_enabled", "false"},
|
||||
|
||||
206
src/dm_admin.c
206
src/dm_admin.c
@@ -1,5 +1,6 @@
|
||||
#define _GNU_SOURCE
|
||||
#include "config.h"
|
||||
#include "debug.h"
|
||||
#include "../nostr_core_lib/nostr_core/nostr_core.h"
|
||||
#include "../nostr_core_lib/nostr_core/nip017.h"
|
||||
#include "../nostr_core_lib/nostr_core/nip044.h"
|
||||
@@ -8,18 +9,13 @@
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <cjson/cJSON.h>
|
||||
#include <libwebsockets.h>
|
||||
|
||||
// External database connection (from main.c)
|
||||
extern sqlite3* g_db;
|
||||
|
||||
// Logging functions (defined in main.c)
|
||||
extern void log_info(const char* message);
|
||||
extern void log_success(const char* message);
|
||||
extern void log_warning(const char* message);
|
||||
extern void log_error(const char* message);
|
||||
|
||||
// Forward declarations for unified handlers
|
||||
extern int handle_auth_query_unified(cJSON* event, const char* query_type, char* error_message, size_t error_size, struct lws* wsi);
|
||||
extern int handle_config_query_unified(cJSON* event, const char* query_type, char* error_message, size_t error_size, struct lws* wsi);
|
||||
@@ -34,11 +30,9 @@ extern const char* get_first_tag_name(cJSON* event);
|
||||
extern const char* get_tag_value(cJSON* event, const char* tag_name, int value_index);
|
||||
|
||||
// Forward declarations for config functions
|
||||
extern const char* get_relay_pubkey_cached(void);
|
||||
extern char* get_relay_private_key(void);
|
||||
extern const char* get_config_value(const char* key);
|
||||
extern int get_config_bool(const char* key, int default_value);
|
||||
extern const char* get_admin_pubkey_cached(void);
|
||||
|
||||
// Forward declarations for database functions
|
||||
extern int store_event(cJSON* event);
|
||||
@@ -136,14 +130,14 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
// This handles commands sent as direct JSON arrays, not wrapped in inner events
|
||||
int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_message, size_t error_size, struct lws* wsi) {
|
||||
if (!command_array || !cJSON_IsArray(command_array) || !event) {
|
||||
log_error("DM Admin: Invalid command array or event");
|
||||
DEBUG_ERROR("DM Admin: Invalid command array or event");
|
||||
snprintf(error_message, error_size, "invalid: null command array or event");
|
||||
return -1;
|
||||
}
|
||||
|
||||
int array_size = cJSON_GetArraySize(command_array);
|
||||
if (array_size < 1) {
|
||||
log_error("DM Admin: Empty command array");
|
||||
DEBUG_ERROR("DM Admin: Empty command array");
|
||||
snprintf(error_message, error_size, "invalid: empty command array");
|
||||
return -1;
|
||||
}
|
||||
@@ -151,15 +145,12 @@ int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_mes
|
||||
// Get the command type from the first element
|
||||
cJSON* command_item = cJSON_GetArrayItem(command_array, 0);
|
||||
if (!command_item || !cJSON_IsString(command_item)) {
|
||||
log_error("DM Admin: First element is not a string command");
|
||||
DEBUG_ERROR("DM Admin: First element is not a string command");
|
||||
snprintf(error_message, error_size, "invalid: command must be a string");
|
||||
return -1;
|
||||
}
|
||||
|
||||
const char* command_type = cJSON_GetStringValue(command_item);
|
||||
log_info("DM Admin: Processing command");
|
||||
printf(" Command: %s\n", command_type);
|
||||
printf(" Parameters: %d\n", array_size - 1);
|
||||
|
||||
// Create synthetic tags from the command array for unified handler compatibility
|
||||
cJSON* synthetic_tags = cJSON_CreateArray();
|
||||
@@ -211,20 +202,18 @@ int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_mes
|
||||
if (strcmp(command_type, "auth_query") == 0) {
|
||||
const char* query_type = get_tag_value(event, "auth_query", 1);
|
||||
if (!query_type) {
|
||||
log_error("DM Admin: Missing auth_query type parameter");
|
||||
DEBUG_ERROR("DM Admin: Missing auth_query type parameter");
|
||||
snprintf(error_message, error_size, "invalid: missing auth_query type");
|
||||
} else {
|
||||
printf(" Query type: %s\n", query_type);
|
||||
result = handle_auth_query_unified(event, query_type, error_message, error_size, wsi);
|
||||
}
|
||||
}
|
||||
else if (strcmp(command_type, "config_query") == 0) {
|
||||
const char* query_type = get_tag_value(event, "config_query", 1);
|
||||
if (!query_type) {
|
||||
log_error("DM Admin: Missing config_query type parameter");
|
||||
DEBUG_ERROR("DM Admin: Missing config_query type parameter");
|
||||
snprintf(error_message, error_size, "invalid: missing config_query type");
|
||||
} else {
|
||||
printf(" Query type: %s\n", query_type);
|
||||
result = handle_config_query_unified(event, query_type, error_message, error_size, wsi);
|
||||
}
|
||||
}
|
||||
@@ -232,10 +221,9 @@ int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_mes
|
||||
const char* config_key = get_tag_value(event, "config_set", 1);
|
||||
const char* config_value = get_tag_value(event, "config_set", 2);
|
||||
if (!config_key || !config_value) {
|
||||
log_error("DM Admin: Missing config_set parameters");
|
||||
DEBUG_ERROR("DM Admin: Missing config_set parameters");
|
||||
snprintf(error_message, error_size, "invalid: missing config_set key or value");
|
||||
} else {
|
||||
printf(" Key: %s, Value: %s\n", config_key, config_value);
|
||||
result = handle_config_set_unified(event, config_key, config_value, error_message, error_size, wsi);
|
||||
}
|
||||
}
|
||||
@@ -245,10 +233,9 @@ int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_mes
|
||||
else if (strcmp(command_type, "system_command") == 0) {
|
||||
const char* command = get_tag_value(event, "system_command", 1);
|
||||
if (!command) {
|
||||
log_error("DM Admin: Missing system_command type parameter");
|
||||
DEBUG_ERROR("DM Admin: Missing system_command type parameter");
|
||||
snprintf(error_message, error_size, "invalid: missing system_command type");
|
||||
} else {
|
||||
printf(" System command: %s\n", command);
|
||||
result = handle_system_command_unified(event, command, error_message, error_size, wsi);
|
||||
}
|
||||
}
|
||||
@@ -256,19 +243,16 @@ int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_mes
|
||||
result = handle_stats_query_unified(event, error_message, error_size, wsi);
|
||||
}
|
||||
else if (strcmp(command_type, "whitelist") == 0 || strcmp(command_type, "blacklist") == 0) {
|
||||
printf(" Rule type: %s\n", command_type);
|
||||
result = handle_auth_rule_modification_unified(event, error_message, error_size, wsi);
|
||||
}
|
||||
else {
|
||||
log_error("DM Admin: Unknown command type");
|
||||
DEBUG_ERROR("DM Admin: Unknown command type");
|
||||
printf(" Unknown command: %s\n", command_type);
|
||||
snprintf(error_message, error_size, "invalid: unknown DM command type '%s'", command_type);
|
||||
}
|
||||
|
||||
if (result == 0) {
|
||||
log_success("DM Admin: Command processed successfully");
|
||||
} else {
|
||||
log_error("DM Admin: Command processing failed");
|
||||
if (result != 0) {
|
||||
DEBUG_ERROR("DM Admin: Command processing failed");
|
||||
}
|
||||
|
||||
return result;
|
||||
@@ -291,11 +275,6 @@ int parse_config_command(const char* message, char* key, char* value) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
log_info("DEBUG: Parsing config command");
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "DEBUG: Input message: '%.100s'", message);
|
||||
log_info(debug_msg);
|
||||
|
||||
// Clean up the message - convert to lowercase and trim
|
||||
char clean_msg[512];
|
||||
size_t msg_len = strlen(message);
|
||||
@@ -408,7 +387,6 @@ int parse_config_command(const char* message, char* key, char* value) {
|
||||
}
|
||||
}
|
||||
|
||||
log_info("DEBUG: No config command pattern matched");
|
||||
return 0; // No pattern matched
|
||||
}
|
||||
|
||||
@@ -593,7 +571,6 @@ void cleanup_expired_pending_changes(void) {
|
||||
while (current) {
|
||||
pending_config_change_t* next = current->next;
|
||||
if (now - current->timestamp > CONFIG_CHANGE_TIMEOUT) {
|
||||
log_info("Cleaning up expired config change request");
|
||||
remove_pending_change(current);
|
||||
}
|
||||
current = next;
|
||||
@@ -603,21 +580,15 @@ void cleanup_expired_pending_changes(void) {
|
||||
// Apply a configuration change to the database
|
||||
int apply_config_change(const char* key, const char* value) {
|
||||
if (!key || !value) {
|
||||
log_error("DEBUG: apply_config_change called with NULL key or value");
|
||||
return -1;
|
||||
}
|
||||
|
||||
extern sqlite3* g_db;
|
||||
if (!g_db) {
|
||||
log_error("Database not available for config change");
|
||||
DEBUG_ERROR("Database not available for config change");
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_info("DEBUG: Applying config change");
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "DEBUG: Key='%s', Value='%s'", key, value);
|
||||
log_info(debug_msg);
|
||||
|
||||
// Normalize boolean values
|
||||
char normalized_value[256];
|
||||
strncpy(normalized_value, value, sizeof(normalized_value) - 1);
|
||||
@@ -630,11 +601,6 @@ int apply_config_change(const char* key, const char* value) {
|
||||
strcpy(normalized_value, "false");
|
||||
}
|
||||
|
||||
log_info("DEBUG: Normalized value");
|
||||
char norm_msg[256];
|
||||
snprintf(norm_msg, sizeof(norm_msg), "DEBUG: Normalized value='%s'", normalized_value);
|
||||
log_info(norm_msg);
|
||||
|
||||
// Determine the data type based on the configuration key
|
||||
const char* data_type = "string"; // Default to string
|
||||
for (int i = 0; known_configs[i].key != NULL; i++) {
|
||||
@@ -654,35 +620,27 @@ int apply_config_change(const char* key, const char* value) {
|
||||
sqlite3_stmt* stmt;
|
||||
const char* sql = "INSERT OR REPLACE INTO config (key, value, data_type) VALUES (?, ?, ?)";
|
||||
|
||||
log_info("DEBUG: Preparing SQL statement");
|
||||
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
log_error("Failed to prepare config update statement");
|
||||
DEBUG_ERROR("Failed to prepare config update statement");
|
||||
const char* err_msg = sqlite3_errmsg(g_db);
|
||||
log_error(err_msg);
|
||||
DEBUG_ERROR(err_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_info("DEBUG: Binding parameters");
|
||||
sqlite3_bind_text(stmt, 1, key, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, normalized_value, -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 3, data_type, -1, SQLITE_STATIC);
|
||||
|
||||
log_info("DEBUG: Executing SQL statement");
|
||||
int result = sqlite3_step(stmt);
|
||||
if (result != SQLITE_DONE) {
|
||||
log_error("Failed to update configuration in database");
|
||||
DEBUG_ERROR("Failed to update configuration in database");
|
||||
const char* err_msg = sqlite3_errmsg(g_db);
|
||||
log_error(err_msg);
|
||||
DEBUG_ERROR(err_msg);
|
||||
sqlite3_finalize(stmt);
|
||||
return -1;
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
log_info("DEBUG: SQL execution successful");
|
||||
char log_msg[512];
|
||||
snprintf(log_msg, sizeof(log_msg), "Configuration updated: %s = %s", key, normalized_value);
|
||||
log_success(log_msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -784,11 +742,9 @@ int handle_config_confirmation(const char* admin_pubkey, const char* response) {
|
||||
|
||||
if (is_yes) {
|
||||
// Apply the configuration change
|
||||
log_info("DEBUG: Applying configuration change");
|
||||
int result = apply_config_change(change->config_key, change->new_value);
|
||||
if (result == 0) {
|
||||
// Send success response
|
||||
log_info("DEBUG: Configuration change applied successfully, sending success response");
|
||||
char success_msg[1024];
|
||||
snprintf(success_msg, sizeof(success_msg),
|
||||
"✅ Configuration Updated\n"
|
||||
@@ -803,10 +759,7 @@ int handle_config_confirmation(const char* admin_pubkey, const char* response) {
|
||||
char error_msg[256];
|
||||
int send_result = send_nip17_response(admin_pubkey, success_msg, error_msg, sizeof(error_msg));
|
||||
if (send_result != 0) {
|
||||
log_error("DEBUG: Failed to send success response");
|
||||
log_error(error_msg);
|
||||
} else {
|
||||
log_success("DEBUG: Success response sent");
|
||||
DEBUG_ERROR(error_msg);
|
||||
}
|
||||
|
||||
// Remove the pending change
|
||||
@@ -814,7 +767,6 @@ int handle_config_confirmation(const char* admin_pubkey, const char* response) {
|
||||
return 1; // Success
|
||||
} else {
|
||||
// Send error response
|
||||
log_error("DEBUG: Configuration change failed, sending error response");
|
||||
char error_msg[1024];
|
||||
snprintf(error_msg, sizeof(error_msg),
|
||||
"❌ Configuration Update Failed\n"
|
||||
@@ -829,10 +781,7 @@ int handle_config_confirmation(const char* admin_pubkey, const char* response) {
|
||||
char send_error_msg[256];
|
||||
int send_result = send_nip17_response(admin_pubkey, error_msg, send_error_msg, sizeof(send_error_msg));
|
||||
if (send_result != 0) {
|
||||
log_error("DEBUG: Failed to send error response");
|
||||
log_error(send_error_msg);
|
||||
} else {
|
||||
log_success("DEBUG: Error response sent");
|
||||
DEBUG_ERROR(send_error_msg);
|
||||
}
|
||||
|
||||
// Remove the pending change
|
||||
@@ -929,21 +878,14 @@ int process_config_change_request(const char* admin_pubkey, const char* message)
|
||||
}
|
||||
|
||||
// Generate and send confirmation message
|
||||
log_info("DEBUG: Generating confirmation message");
|
||||
char* confirmation = generate_config_change_confirmation(key, current_value, value);
|
||||
if (confirmation) {
|
||||
log_info("DEBUG: Confirmation message generated, sending response");
|
||||
char error_msg[256];
|
||||
int send_result = send_nip17_response(admin_pubkey, confirmation, error_msg, sizeof(error_msg));
|
||||
if (send_result == 0) {
|
||||
log_success("DEBUG: Confirmation response sent successfully");
|
||||
} else {
|
||||
log_error("DEBUG: Failed to send confirmation response");
|
||||
log_error(error_msg);
|
||||
if (send_result != 0) {
|
||||
DEBUG_ERROR(error_msg);
|
||||
}
|
||||
free(confirmation);
|
||||
} else {
|
||||
log_error("DEBUG: Failed to generate confirmation message");
|
||||
}
|
||||
|
||||
free(change_id);
|
||||
@@ -954,12 +896,10 @@ int process_config_change_request(const char* admin_pubkey, const char* message)
|
||||
char* generate_stats_json(void) {
|
||||
extern sqlite3* g_db;
|
||||
if (!g_db) {
|
||||
log_error("Database not available for stats generation");
|
||||
DEBUG_ERROR("Database not available for stats generation");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
log_info("Generating stats JSON from database");
|
||||
|
||||
// Build response with database statistics
|
||||
cJSON* response = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(response, "query_type", "stats_query");
|
||||
@@ -1059,10 +999,8 @@ char* generate_stats_json(void) {
|
||||
char* json_string = cJSON_Print(response);
|
||||
cJSON_Delete(response);
|
||||
|
||||
if (json_string) {
|
||||
log_success("Stats JSON generated successfully");
|
||||
} else {
|
||||
log_error("Failed to generate stats JSON");
|
||||
if (!json_string) {
|
||||
DEBUG_ERROR("Failed to generate stats JSON");
|
||||
}
|
||||
|
||||
return json_string;
|
||||
@@ -1079,7 +1017,7 @@ int send_nip17_response(const char* sender_pubkey, const char* response_content,
|
||||
}
|
||||
|
||||
// Get relay keys for signing
|
||||
const char* relay_pubkey = get_relay_pubkey_cached();
|
||||
const char* relay_pubkey = get_config_value("relay_pubkey");
|
||||
char* relay_privkey_hex = get_relay_private_key();
|
||||
if (!relay_pubkey || !relay_privkey_hex) {
|
||||
if (relay_privkey_hex) free(relay_privkey_hex);
|
||||
@@ -1142,30 +1080,36 @@ int send_nip17_response(const char* sender_pubkey, const char* response_content,
|
||||
strcmp(cJSON_GetStringValue(tag_name), "p") == 0) {
|
||||
// Replace the p tag value with the correct user pubkey
|
||||
cJSON_ReplaceItemInArray(tag, 1, cJSON_CreateString(sender_pubkey));
|
||||
log_info("NIP-17: Fixed p tag in response gift wrap");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the gift wrap in database
|
||||
// Broadcast FIRST before storing (broadcasting needs the event intact)
|
||||
// Make a copy for broadcasting to avoid use-after-free issues
|
||||
cJSON* gift_wrap_copy = cJSON_Duplicate(gift_wraps[0], 1);
|
||||
if (!gift_wrap_copy) {
|
||||
cJSON_Delete(gift_wraps[0]);
|
||||
strncpy(error_message, "NIP-17: Failed to duplicate gift wrap for broadcast", error_size - 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Broadcast the copy to active subscriptions
|
||||
broadcast_event_to_subscriptions(gift_wrap_copy);
|
||||
|
||||
// Store the original in database
|
||||
int store_result = store_event(gift_wraps[0]);
|
||||
|
||||
// Clean up both copies
|
||||
cJSON_Delete(gift_wrap_copy);
|
||||
cJSON_Delete(gift_wraps[0]);
|
||||
|
||||
if (store_result != 0) {
|
||||
cJSON_Delete(gift_wraps[0]);
|
||||
strncpy(error_message, "NIP-17: Failed to store response gift wrap", error_size - 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Broadcast the response event to active subscriptions
|
||||
int broadcast_count = broadcast_event_to_subscriptions(gift_wraps[0]);
|
||||
char debug_broadcast_msg[128];
|
||||
snprintf(debug_broadcast_msg, sizeof(debug_broadcast_msg),
|
||||
"NIP-17: Response broadcast to %d subscriptions", broadcast_count);
|
||||
log_info(debug_broadcast_msg);
|
||||
|
||||
cJSON_Delete(gift_wraps[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1173,14 +1117,14 @@ int send_nip17_response(const char* sender_pubkey, const char* response_content,
|
||||
char* generate_config_text(void) {
|
||||
extern sqlite3* g_db;
|
||||
if (!g_db) {
|
||||
log_error("NIP-17: Database not available for config query");
|
||||
DEBUG_ERROR("NIP-17: Database not available for config query");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Build comprehensive config text from database
|
||||
char* config_text = malloc(8192);
|
||||
if (!config_text) {
|
||||
log_error("NIP-17: Failed to allocate memory for config text");
|
||||
DEBUG_ERROR("NIP-17: Failed to allocate memory for config text");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1206,7 +1150,7 @@ char* generate_config_text(void) {
|
||||
sqlite3_finalize(stmt);
|
||||
} else {
|
||||
free(config_text);
|
||||
log_error("NIP-17: Failed to query config from database");
|
||||
DEBUG_ERROR("NIP-17: Failed to query config from database");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1221,7 +1165,7 @@ char* generate_config_text(void) {
|
||||
char* generate_stats_text(void) {
|
||||
char* stats_json = generate_stats_json();
|
||||
if (!stats_json) {
|
||||
log_error("NIP-17: Failed to generate stats for plain text command");
|
||||
DEBUG_ERROR("NIP-17: Failed to generate stats for plain text command");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1405,7 +1349,7 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
// Convert hex private key to bytes
|
||||
unsigned char relay_privkey[32];
|
||||
if (nostr_hex_to_bytes(relay_privkey_hex, relay_privkey, sizeof(relay_privkey)) != 0) {
|
||||
log_error("NIP-17: Failed to convert relay private key from hex");
|
||||
DEBUG_ERROR("NIP-17: Failed to convert relay private key from hex");
|
||||
free(relay_privkey_hex);
|
||||
strncpy(error_message, "NIP-17: Failed to convert relay private key", error_size - 1);
|
||||
return NULL;
|
||||
@@ -1413,16 +1357,15 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
free(relay_privkey_hex);
|
||||
|
||||
// Step 3: Decrypt and parse inner event using library function
|
||||
log_info("NIP-17: Attempting to decrypt gift wrap with nostr_nip17_receive_dm");
|
||||
cJSON* inner_dm = nostr_nip17_receive_dm(gift_wrap_event, relay_privkey);
|
||||
if (!inner_dm) {
|
||||
log_error("NIP-17: nostr_nip17_receive_dm returned NULL");
|
||||
DEBUG_ERROR("NIP-17: nostr_nip17_receive_dm returned NULL");
|
||||
// Debug: Print the gift wrap event
|
||||
char* gift_wrap_debug = cJSON_Print(gift_wrap_event);
|
||||
if (gift_wrap_debug) {
|
||||
char debug_msg[1024];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "NIP-17: Gift wrap event: %.500s", gift_wrap_debug);
|
||||
log_error(debug_msg);
|
||||
DEBUG_ERROR(debug_msg);
|
||||
free(gift_wrap_debug);
|
||||
}
|
||||
// Debug: Check if private key is valid
|
||||
@@ -1431,14 +1374,10 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
sprintf(privkey_hex + (i * 2), "%02x", relay_privkey[i]);
|
||||
}
|
||||
privkey_hex[64] = '\0';
|
||||
char privkey_msg[128];
|
||||
snprintf(privkey_msg, sizeof(privkey_msg), "NIP-17: Using relay private key: %.16s...", privkey_hex);
|
||||
log_info(privkey_msg);
|
||||
|
||||
strncpy(error_message, "NIP-17: Failed to decrypt and parse inner DM event", error_size - 1);
|
||||
return NULL;
|
||||
}
|
||||
log_info("NIP-17: Successfully decrypted gift wrap");
|
||||
|
||||
// Step 4: Process admin command
|
||||
int result = process_nip17_admin_command(inner_dm, error_message, error_size, wsi);
|
||||
@@ -1468,7 +1407,6 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
// If it's a plain text stats or config command, don't create additional response
|
||||
if (strstr(content_lower, "stats") != NULL || strstr(content_lower, "statistics") != NULL ||
|
||||
strstr(content_lower, "config") != NULL || strstr(content_lower, "configuration") != NULL) {
|
||||
log_info("NIP-17: Plain text command already handled response, skipping generic response");
|
||||
cJSON_Delete(inner_dm);
|
||||
return NULL; // No additional response needed
|
||||
}
|
||||
@@ -1478,7 +1416,6 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
if (command_array && cJSON_IsArray(command_array) && cJSON_GetArraySize(command_array) > 0) {
|
||||
cJSON* first_item = cJSON_GetArrayItem(command_array, 0);
|
||||
if (cJSON_IsString(first_item) && strcmp(cJSON_GetStringValue(first_item), "stats") == 0) {
|
||||
log_info("NIP-17: JSON stats command already handled response, skipping generic response");
|
||||
cJSON_Delete(command_array);
|
||||
cJSON_Delete(inner_dm);
|
||||
return NULL; // No additional response needed
|
||||
@@ -1488,7 +1425,6 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
}
|
||||
} else if (result > 0) {
|
||||
// Command was handled and response was sent, don't create generic response
|
||||
log_info("NIP-17: Command handled with custom response, skipping generic response");
|
||||
cJSON_Delete(inner_dm);
|
||||
return NULL;
|
||||
|
||||
@@ -1503,7 +1439,7 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
|
||||
"[\"command_processed\", \"success\", \"%s\"]", "NIP-17 admin command executed");
|
||||
|
||||
// Get relay pubkey for creating DM event
|
||||
const char* relay_pubkey = get_relay_pubkey_cached();
|
||||
const char* relay_pubkey = get_config_value("relay_pubkey");
|
||||
if (relay_pubkey) {
|
||||
cJSON* success_dm = nostr_nip17_create_chat_event(
|
||||
response_content, // message content
|
||||
@@ -1591,9 +1527,9 @@ int is_nip17_gift_wrap_for_relay(cJSON* event) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char* relay_pubkey = get_relay_pubkey_cached();
|
||||
const char* relay_pubkey = get_config_value("relay_pubkey");
|
||||
if (!relay_pubkey) {
|
||||
log_error("NIP-17: Could not get relay pubkey for validation");
|
||||
DEBUG_ERROR("NIP-17: Could not get relay pubkey for validation");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1634,20 +1570,14 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
// Check if sender is admin before processing any commands
|
||||
cJSON* sender_pubkey_obj = cJSON_GetObjectItem(dm_event, "pubkey");
|
||||
if (!sender_pubkey_obj || !cJSON_IsString(sender_pubkey_obj)) {
|
||||
log_info("NIP-17: DM missing sender pubkey, treating as user DM");
|
||||
return 0; // Not an error, just treat as user DM
|
||||
}
|
||||
const char* sender_pubkey = cJSON_GetStringValue(sender_pubkey_obj);
|
||||
|
||||
// Check if sender is admin
|
||||
const char* admin_pubkey = get_admin_pubkey_cached();
|
||||
const char* admin_pubkey = get_config_value("admin_pubkey");
|
||||
int is_admin = admin_pubkey && strlen(admin_pubkey) > 0 && strcmp(sender_pubkey, admin_pubkey) == 0;
|
||||
|
||||
log_info("NIP-17: Processing admin command from DM content");
|
||||
char log_msg[256];
|
||||
snprintf(log_msg, sizeof(log_msg), "NIP-17: Received DM content: '%.50s'%s", dm_content, strlen(dm_content) > 50 ? "..." : "");
|
||||
log_info(log_msg);
|
||||
|
||||
// Parse DM content as JSON array of commands
|
||||
cJSON* command_array = cJSON_Parse(dm_content);
|
||||
if (!command_array || !cJSON_IsArray(command_array)) {
|
||||
@@ -1669,9 +1599,6 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
|
||||
// Check for stats commands
|
||||
if (strstr(content_lower, "stats") != NULL || strstr(content_lower, "statistics") != NULL) {
|
||||
log_info("NIP-17: Recognized plain text 'stats' command from admin");
|
||||
log_info("NIP-17: Action: Generate and send relay statistics");
|
||||
|
||||
char* stats_text = generate_stats_text();
|
||||
if (!stats_text) {
|
||||
return -1;
|
||||
@@ -1682,18 +1609,14 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
free(stats_text);
|
||||
|
||||
if (result != 0) {
|
||||
log_error(error_msg);
|
||||
DEBUG_ERROR(error_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_success("NIP-17: Stats command processed successfully, response sent");
|
||||
|
||||
return 0;
|
||||
}
|
||||
// Check for config commands
|
||||
else if (strstr(content_lower, "config") != NULL || strstr(content_lower, "configuration") != NULL) {
|
||||
log_info("NIP-17: Recognized plain text 'config' command from admin");
|
||||
log_info("NIP-17: Action: Generate and send relay configuration");
|
||||
|
||||
char* config_text = generate_config_text();
|
||||
if (!config_text) {
|
||||
return -1;
|
||||
@@ -1704,11 +1627,10 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
free(config_text);
|
||||
|
||||
if (result != 0) {
|
||||
log_error(error_msg);
|
||||
DEBUG_ERROR(error_msg);
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_success("NIP-17: Config command processed successfully, response sent");
|
||||
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
@@ -1716,7 +1638,7 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
int confirmation_result = handle_config_confirmation(sender_pubkey, dm_content);
|
||||
if (confirmation_result != 0) {
|
||||
if (confirmation_result > 0) {
|
||||
log_success("NIP-17: Configuration confirmation processed successfully");
|
||||
// Configuration confirmation processed successfully
|
||||
} else if (confirmation_result == -2) {
|
||||
// No pending changes
|
||||
char no_pending_msg[256];
|
||||
@@ -1737,20 +1659,17 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
int config_result = process_config_change_request(sender_pubkey, dm_content);
|
||||
if (config_result != 0) {
|
||||
if (config_result > 0) {
|
||||
log_success("NIP-17: Configuration change request processed successfully");
|
||||
return 1; // Return positive value to indicate response was handled
|
||||
} else {
|
||||
log_error("NIP-17: Configuration change request failed");
|
||||
DEBUG_ERROR("NIP-17: Configuration change request failed");
|
||||
return -1; // Return error to prevent generic success response
|
||||
}
|
||||
}
|
||||
|
||||
log_info("NIP-17: Plain text content from admin not recognized as command, treating as user DM");
|
||||
return 0; // Admin sent unrecognized plain text, treat as user DM
|
||||
}
|
||||
} else {
|
||||
// Not admin, treat as user DM
|
||||
log_info("NIP-17: Content is not JSON array and sender is not admin, treating as user DM");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -1759,8 +1678,6 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
if (cJSON_GetArraySize(command_array) > 0) {
|
||||
cJSON* first_item = cJSON_GetArrayItem(command_array, 0);
|
||||
if (cJSON_IsString(first_item) && strcmp(cJSON_GetStringValue(first_item), "stats") == 0) {
|
||||
log_info("NIP-17: Processing 'stats' command directly");
|
||||
|
||||
// Get sender pubkey for response
|
||||
cJSON* sender_pubkey_obj = cJSON_GetObjectItem(dm_event, "pubkey");
|
||||
if (!sender_pubkey_obj || !cJSON_IsString(sender_pubkey_obj)) {
|
||||
@@ -1784,12 +1701,11 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
|
||||
cJSON_Delete(command_array);
|
||||
|
||||
if (result != 0) {
|
||||
log_error(error_msg);
|
||||
DEBUG_ERROR(error_msg);
|
||||
strncpy(error_message, error_msg, error_size - 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_success("NIP-17: Stats command processed successfully");
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
1027
src/main.c
1027
src/main.c
File diff suppressed because it is too large
Load Diff
28
src/nip009.c
28
src/nip009.c
@@ -6,16 +6,13 @@
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
#include <cjson/cJSON.h>
|
||||
#include "debug.h"
|
||||
#include <sqlite3.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <stdio.h>
|
||||
#include <printf.h>
|
||||
|
||||
// Forward declarations for logging functions
|
||||
void log_warning(const char* message);
|
||||
void log_info(const char* message);
|
||||
|
||||
// Forward declaration for database functions
|
||||
int store_event(cJSON* event);
|
||||
@@ -140,13 +137,9 @@ int handle_deletion_request(cJSON* event, char* error_message, size_t error_size
|
||||
|
||||
// Store the deletion request itself (it should be kept according to NIP-09)
|
||||
if (store_event(event) != 0) {
|
||||
log_warning("Failed to store deletion request event");
|
||||
DEBUG_WARN("Failed to store deletion request event");
|
||||
}
|
||||
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "Deletion request processed: %d events deleted", deleted_count);
|
||||
log_info(debug_msg);
|
||||
|
||||
|
||||
error_message[0] = '\0'; // Success - empty error message
|
||||
return 0;
|
||||
}
|
||||
@@ -196,10 +189,6 @@ int delete_events_by_id(const char* requester_pubkey, cJSON* event_ids) {
|
||||
|
||||
if (sqlite3_step(delete_stmt) == SQLITE_DONE && sqlite3_changes(g_db) > 0) {
|
||||
deleted_count++;
|
||||
|
||||
char debug_msg[128];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "Deleted event by ID: %.16s...", id);
|
||||
log_info(debug_msg);
|
||||
}
|
||||
sqlite3_finalize(delete_stmt);
|
||||
}
|
||||
@@ -207,13 +196,10 @@ int delete_events_by_id(const char* requester_pubkey, cJSON* event_ids) {
|
||||
sqlite3_finalize(check_stmt);
|
||||
char warning_msg[128];
|
||||
snprintf(warning_msg, sizeof(warning_msg), "Unauthorized deletion attempt for event: %.16s...", id);
|
||||
log_warning(warning_msg);
|
||||
DEBUG_WARN(warning_msg);
|
||||
}
|
||||
} else {
|
||||
sqlite3_finalize(check_stmt);
|
||||
char debug_msg[128];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "Event not found for deletion: %.16s...", id);
|
||||
log_info(debug_msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,7 +242,7 @@ int delete_events_by_address(const char* requester_pubkey, cJSON* addresses, lon
|
||||
free(addr_copy);
|
||||
char warning_msg[128];
|
||||
snprintf(warning_msg, sizeof(warning_msg), "Unauthorized deletion attempt for address: %.32s...", addr);
|
||||
log_warning(warning_msg);
|
||||
DEBUG_WARN(warning_msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -287,10 +273,6 @@ int delete_events_by_address(const char* requester_pubkey, cJSON* addresses, lon
|
||||
int changes = sqlite3_changes(g_db);
|
||||
if (changes > 0) {
|
||||
deleted_count += changes;
|
||||
|
||||
char debug_msg[128];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "Deleted %d events by address: %.32s...", changes, addr);
|
||||
log_info(debug_msg);
|
||||
}
|
||||
}
|
||||
sqlite3_finalize(delete_stmt);
|
||||
|
||||
665
src/nip011.c
665
src/nip011.c
@@ -1,6 +1,7 @@
|
||||
// NIP-11 Relay Information Document module
|
||||
#define _GNU_SOURCE
|
||||
#include <stdio.h>
|
||||
#include "debug.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
@@ -8,19 +9,13 @@
|
||||
#include "../nostr_core_lib/cjson/cJSON.h"
|
||||
#include "config.h"
|
||||
|
||||
// Forward declarations for logging functions
|
||||
void log_info(const char* message);
|
||||
void log_success(const char* message);
|
||||
void log_error(const char* message);
|
||||
void log_warning(const char* message);
|
||||
|
||||
// Forward declarations for configuration functions
|
||||
const char* get_config_value(const char* key);
|
||||
int get_config_int(const char* key, int default_value);
|
||||
int get_config_bool(const char* key, int default_value);
|
||||
|
||||
// Forward declarations for global cache access
|
||||
extern unified_config_cache_t g_unified_cache;
|
||||
// NIP-11 relay information is now managed directly from config table
|
||||
|
||||
// Forward declarations for constants (defined in config.h and other headers)
|
||||
#define HTTP_STATUS_OK 200
|
||||
@@ -36,566 +31,255 @@ extern unified_config_cache_t g_unified_cache;
|
||||
|
||||
// Helper function to parse comma-separated string into cJSON array
|
||||
cJSON* parse_comma_separated_array(const char* csv_string) {
|
||||
log_info("parse_comma_separated_array called");
|
||||
if (!csv_string || strlen(csv_string) == 0) {
|
||||
log_info("Empty or null csv_string, returning empty array");
|
||||
return cJSON_CreateArray();
|
||||
}
|
||||
|
||||
log_info("Creating cJSON array");
|
||||
cJSON* array = cJSON_CreateArray();
|
||||
if (!array) {
|
||||
log_info("Failed to create cJSON array");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
log_info("Duplicating csv_string");
|
||||
char* csv_copy = strdup(csv_string);
|
||||
if (!csv_copy) {
|
||||
log_info("Failed to duplicate csv_string");
|
||||
cJSON_Delete(array);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
log_info("Starting token parsing");
|
||||
char* token = strtok(csv_copy, ",");
|
||||
while (token) {
|
||||
log_info("Processing token");
|
||||
// Trim whitespace
|
||||
while (*token == ' ') token++;
|
||||
char* end = token + strlen(token) - 1;
|
||||
while (end > token && *end == ' ') *end-- = '\0';
|
||||
|
||||
if (strlen(token) > 0) {
|
||||
log_info("Token has content, parsing");
|
||||
// Try to parse as number first (for supported_nips)
|
||||
char* endptr;
|
||||
long num = strtol(token, &endptr, 10);
|
||||
if (*endptr == '\0') {
|
||||
log_info("Token is number, adding to array");
|
||||
// It's a number
|
||||
cJSON_AddItemToArray(array, cJSON_CreateNumber(num));
|
||||
} else {
|
||||
log_info("Token is string, adding to array");
|
||||
// It's a string
|
||||
cJSON_AddItemToArray(array, cJSON_CreateString(token));
|
||||
}
|
||||
} else {
|
||||
log_info("Token is empty, skipping");
|
||||
}
|
||||
token = strtok(NULL, ",");
|
||||
}
|
||||
|
||||
log_info("Freeing csv_copy");
|
||||
free(csv_copy);
|
||||
log_info("Returning parsed array");
|
||||
return array;
|
||||
}
|
||||
|
||||
// Initialize relay information using configuration system
|
||||
void init_relay_info() {
|
||||
log_info("Initializing relay information from configuration...");
|
||||
|
||||
// Get all config values first (without holding mutex to avoid deadlock)
|
||||
// Note: These may be dynamically allocated strings that need to be freed
|
||||
log_info("Fetching relay configuration values...");
|
||||
const char* relay_name = get_config_value("relay_name");
|
||||
log_info("relay_name fetched");
|
||||
const char* relay_description = get_config_value("relay_description");
|
||||
log_info("relay_description fetched");
|
||||
const char* relay_software = get_config_value("relay_software");
|
||||
log_info("relay_software fetched");
|
||||
const char* relay_version = get_config_value("relay_version");
|
||||
log_info("relay_version fetched");
|
||||
const char* relay_contact = get_config_value("relay_contact");
|
||||
log_info("relay_contact fetched");
|
||||
const char* relay_pubkey = get_config_value("relay_pubkey");
|
||||
log_info("relay_pubkey fetched");
|
||||
const char* supported_nips_csv = get_config_value("supported_nips");
|
||||
log_info("supported_nips fetched");
|
||||
const char* language_tags_csv = get_config_value("language_tags");
|
||||
log_info("language_tags fetched");
|
||||
const char* relay_countries_csv = get_config_value("relay_countries");
|
||||
log_info("relay_countries fetched");
|
||||
const char* posting_policy = get_config_value("posting_policy");
|
||||
log_info("posting_policy fetched");
|
||||
const char* payments_url = get_config_value("payments_url");
|
||||
log_info("payments_url fetched");
|
||||
|
||||
// Get config values for limitations
|
||||
log_info("Fetching limitation configuration values...");
|
||||
int max_message_length = get_config_int("max_message_length", 16384);
|
||||
log_info("max_message_length fetched");
|
||||
int max_subscriptions_per_client = get_config_int("max_subscriptions_per_client", 20);
|
||||
log_info("max_subscriptions_per_client fetched");
|
||||
int max_limit = get_config_int("max_limit", 5000);
|
||||
log_info("max_limit fetched");
|
||||
int max_event_tags = get_config_int("max_event_tags", 100);
|
||||
log_info("max_event_tags fetched");
|
||||
int max_content_length = get_config_int("max_content_length", 8196);
|
||||
log_info("max_content_length fetched");
|
||||
int default_limit = get_config_int("default_limit", 500);
|
||||
log_info("default_limit fetched");
|
||||
int admin_enabled = get_config_bool("admin_enabled", 0);
|
||||
log_info("admin_enabled fetched");
|
||||
|
||||
pthread_mutex_lock(&g_unified_cache.cache_lock);
|
||||
|
||||
// Update relay information fields
|
||||
log_info("Storing string values in cache...");
|
||||
if (relay_name) {
|
||||
log_info("Storing relay_name");
|
||||
strncpy(g_unified_cache.relay_info.name, relay_name, sizeof(g_unified_cache.relay_info.name) - 1);
|
||||
free((char*)relay_name); // Free dynamically allocated string
|
||||
log_info("relay_name stored and freed");
|
||||
} else {
|
||||
log_info("Using default relay_name");
|
||||
strncpy(g_unified_cache.relay_info.name, "C Nostr Relay", sizeof(g_unified_cache.relay_info.name) - 1);
|
||||
}
|
||||
|
||||
if (relay_description) {
|
||||
log_info("Storing relay_description");
|
||||
strncpy(g_unified_cache.relay_info.description, relay_description, sizeof(g_unified_cache.relay_info.description) - 1);
|
||||
free((char*)relay_description); // Free dynamically allocated string
|
||||
log_info("relay_description stored and freed");
|
||||
} else {
|
||||
log_info("Using default relay_description");
|
||||
strncpy(g_unified_cache.relay_info.description, "A high-performance Nostr relay implemented in C with SQLite storage", sizeof(g_unified_cache.relay_info.description) - 1);
|
||||
}
|
||||
|
||||
if (relay_software) {
|
||||
log_info("Storing relay_software");
|
||||
strncpy(g_unified_cache.relay_info.software, relay_software, sizeof(g_unified_cache.relay_info.software) - 1);
|
||||
free((char*)relay_software); // Free dynamically allocated string
|
||||
log_info("relay_software stored and freed");
|
||||
} else {
|
||||
log_info("Using default relay_software");
|
||||
strncpy(g_unified_cache.relay_info.software, "https://git.laantungir.net/laantungir/c-relay.git", sizeof(g_unified_cache.relay_info.software) - 1);
|
||||
}
|
||||
|
||||
if (relay_version) {
|
||||
log_info("Storing relay_version");
|
||||
strncpy(g_unified_cache.relay_info.version, relay_version, sizeof(g_unified_cache.relay_info.version) - 1);
|
||||
free((char*)relay_version); // Free dynamically allocated string
|
||||
log_info("relay_version stored and freed");
|
||||
} else {
|
||||
log_info("Using default relay_version");
|
||||
strncpy(g_unified_cache.relay_info.version, "0.2.0", sizeof(g_unified_cache.relay_info.version) - 1);
|
||||
}
|
||||
|
||||
if (relay_contact) {
|
||||
log_info("Storing relay_contact");
|
||||
strncpy(g_unified_cache.relay_info.contact, relay_contact, sizeof(g_unified_cache.relay_info.contact) - 1);
|
||||
free((char*)relay_contact); // Free dynamically allocated string
|
||||
log_info("relay_contact stored and freed");
|
||||
}
|
||||
|
||||
if (relay_pubkey) {
|
||||
log_info("Storing relay_pubkey");
|
||||
strncpy(g_unified_cache.relay_info.pubkey, relay_pubkey, sizeof(g_unified_cache.relay_info.pubkey) - 1);
|
||||
free((char*)relay_pubkey); // Free dynamically allocated string
|
||||
log_info("relay_pubkey stored and freed");
|
||||
}
|
||||
|
||||
if (posting_policy) {
|
||||
log_info("Storing posting_policy");
|
||||
strncpy(g_unified_cache.relay_info.posting_policy, posting_policy, sizeof(g_unified_cache.relay_info.posting_policy) - 1);
|
||||
free((char*)posting_policy); // Free dynamically allocated string
|
||||
log_info("posting_policy stored and freed");
|
||||
}
|
||||
|
||||
if (payments_url) {
|
||||
log_info("Storing payments_url");
|
||||
strncpy(g_unified_cache.relay_info.payments_url, payments_url, sizeof(g_unified_cache.relay_info.payments_url) - 1);
|
||||
free((char*)payments_url); // Free dynamically allocated string
|
||||
log_info("payments_url stored and freed");
|
||||
}
|
||||
|
||||
// Initialize supported NIPs array from config
|
||||
log_info("Initializing supported_nips array");
|
||||
if (supported_nips_csv) {
|
||||
log_info("Parsing supported_nips from config");
|
||||
g_unified_cache.relay_info.supported_nips = parse_comma_separated_array(supported_nips_csv);
|
||||
log_info("supported_nips parsed successfully");
|
||||
free((char*)supported_nips_csv); // Free dynamically allocated string
|
||||
log_info("supported_nips_csv freed");
|
||||
} else {
|
||||
log_info("Using default supported_nips");
|
||||
// Fallback to default supported NIPs
|
||||
g_unified_cache.relay_info.supported_nips = cJSON_CreateArray();
|
||||
if (g_unified_cache.relay_info.supported_nips) {
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(1)); // NIP-01: Basic protocol
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(9)); // NIP-09: Event deletion
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(11)); // NIP-11: Relay information
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(13)); // NIP-13: Proof of Work
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(15)); // NIP-15: EOSE
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(20)); // NIP-20: Command results
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(40)); // NIP-40: Expiration Timestamp
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(42)); // NIP-42: Authentication
|
||||
}
|
||||
log_info("Default supported_nips created");
|
||||
}
|
||||
|
||||
// Initialize server limitations using configuration
|
||||
log_info("Initializing server limitations");
|
||||
g_unified_cache.relay_info.limitation = cJSON_CreateObject();
|
||||
if (g_unified_cache.relay_info.limitation) {
|
||||
log_info("Adding limitation fields");
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_message_length", max_message_length);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_subscriptions", max_subscriptions_per_client);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_limit", max_limit);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_subid_length", SUBSCRIPTION_ID_MAX_LENGTH);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_event_tags", max_event_tags);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_content_length", max_content_length);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "min_pow_difficulty", g_unified_cache.pow_config.min_pow_difficulty);
|
||||
cJSON_AddBoolToObject(g_unified_cache.relay_info.limitation, "auth_required", admin_enabled ? cJSON_True : cJSON_False);
|
||||
cJSON_AddBoolToObject(g_unified_cache.relay_info.limitation, "payment_required", cJSON_False);
|
||||
cJSON_AddBoolToObject(g_unified_cache.relay_info.limitation, "restricted_writes", cJSON_False);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "created_at_lower_limit", 0);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "created_at_upper_limit", 2147483647);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "default_limit", default_limit);
|
||||
log_info("Limitation fields added");
|
||||
} else {
|
||||
log_info("Failed to create limitation object");
|
||||
}
|
||||
|
||||
// Initialize empty retention policies (can be configured later)
|
||||
log_info("Initializing retention policies");
|
||||
g_unified_cache.relay_info.retention = cJSON_CreateArray();
|
||||
|
||||
// Initialize language tags from config
|
||||
log_info("Initializing language_tags");
|
||||
if (language_tags_csv) {
|
||||
log_info("Parsing language_tags from config");
|
||||
g_unified_cache.relay_info.language_tags = parse_comma_separated_array(language_tags_csv);
|
||||
log_info("language_tags parsed successfully");
|
||||
free((char*)language_tags_csv); // Free dynamically allocated string
|
||||
log_info("language_tags_csv freed");
|
||||
} else {
|
||||
log_info("Using default language_tags");
|
||||
// Fallback to global
|
||||
g_unified_cache.relay_info.language_tags = cJSON_CreateArray();
|
||||
if (g_unified_cache.relay_info.language_tags) {
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.language_tags, cJSON_CreateString("*"));
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize relay countries from config
|
||||
log_info("Initializing relay_countries");
|
||||
if (relay_countries_csv) {
|
||||
log_info("Parsing relay_countries from config");
|
||||
g_unified_cache.relay_info.relay_countries = parse_comma_separated_array(relay_countries_csv);
|
||||
log_info("relay_countries parsed successfully");
|
||||
free((char*)relay_countries_csv); // Free dynamically allocated string
|
||||
log_info("relay_countries_csv freed");
|
||||
} else {
|
||||
log_info("Using default relay_countries");
|
||||
// Fallback to global
|
||||
g_unified_cache.relay_info.relay_countries = cJSON_CreateArray();
|
||||
if (g_unified_cache.relay_info.relay_countries) {
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.relay_countries, cJSON_CreateString("*"));
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize content tags as empty array
|
||||
log_info("Initializing tags");
|
||||
g_unified_cache.relay_info.tags = cJSON_CreateArray();
|
||||
|
||||
// Initialize fees as empty object (no payment required by default)
|
||||
log_info("Initializing fees");
|
||||
g_unified_cache.relay_info.fees = cJSON_CreateObject();
|
||||
|
||||
log_info("Unlocking cache mutex");
|
||||
pthread_mutex_unlock(&g_unified_cache.cache_lock);
|
||||
|
||||
log_success("Relay information initialized with default values");
|
||||
// NIP-11 relay information is now generated dynamically from config table
|
||||
// No initialization needed - data is fetched directly from database when requested
|
||||
}
|
||||
|
||||
// Clean up relay information JSON objects
|
||||
void cleanup_relay_info() {
|
||||
pthread_mutex_lock(&g_unified_cache.cache_lock);
|
||||
if (g_unified_cache.relay_info.supported_nips) {
|
||||
cJSON_Delete(g_unified_cache.relay_info.supported_nips);
|
||||
g_unified_cache.relay_info.supported_nips = NULL;
|
||||
}
|
||||
if (g_unified_cache.relay_info.limitation) {
|
||||
cJSON_Delete(g_unified_cache.relay_info.limitation);
|
||||
g_unified_cache.relay_info.limitation = NULL;
|
||||
}
|
||||
if (g_unified_cache.relay_info.retention) {
|
||||
cJSON_Delete(g_unified_cache.relay_info.retention);
|
||||
g_unified_cache.relay_info.retention = NULL;
|
||||
}
|
||||
if (g_unified_cache.relay_info.language_tags) {
|
||||
cJSON_Delete(g_unified_cache.relay_info.language_tags);
|
||||
g_unified_cache.relay_info.language_tags = NULL;
|
||||
}
|
||||
if (g_unified_cache.relay_info.relay_countries) {
|
||||
cJSON_Delete(g_unified_cache.relay_info.relay_countries);
|
||||
g_unified_cache.relay_info.relay_countries = NULL;
|
||||
}
|
||||
if (g_unified_cache.relay_info.tags) {
|
||||
cJSON_Delete(g_unified_cache.relay_info.tags);
|
||||
g_unified_cache.relay_info.tags = NULL;
|
||||
}
|
||||
if (g_unified_cache.relay_info.fees) {
|
||||
cJSON_Delete(g_unified_cache.relay_info.fees);
|
||||
g_unified_cache.relay_info.fees = NULL;
|
||||
}
|
||||
pthread_mutex_unlock(&g_unified_cache.cache_lock);
|
||||
// NIP-11 relay information is now generated dynamically from config table
|
||||
// No cleanup needed - data is fetched directly from database when requested
|
||||
}
|
||||
|
||||
// Generate NIP-11 compliant JSON document
|
||||
cJSON* generate_relay_info_json() {
|
||||
cJSON* info = cJSON_CreateObject();
|
||||
if (!info) {
|
||||
log_error("Failed to create relay info JSON object");
|
||||
DEBUG_ERROR("Failed to create relay info JSON object");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&g_unified_cache.cache_lock);
|
||||
// Get all config values directly from database
|
||||
const char* relay_name = get_config_value("relay_name");
|
||||
const char* relay_description = get_config_value("relay_description");
|
||||
const char* relay_banner = get_config_value("relay_banner");
|
||||
const char* relay_icon = get_config_value("relay_icon");
|
||||
const char* relay_pubkey = get_config_value("relay_pubkey");
|
||||
const char* relay_contact = get_config_value("relay_contact");
|
||||
const char* supported_nips_csv = get_config_value("supported_nips");
|
||||
const char* relay_software = get_config_value("relay_software");
|
||||
const char* relay_version = get_config_value("relay_version");
|
||||
const char* privacy_policy = get_config_value("privacy_policy");
|
||||
const char* terms_of_service = get_config_value("terms_of_service");
|
||||
const char* posting_policy = get_config_value("posting_policy");
|
||||
const char* language_tags_csv = get_config_value("language_tags");
|
||||
const char* relay_countries_csv = get_config_value("relay_countries");
|
||||
const char* payments_url = get_config_value("payments_url");
|
||||
|
||||
// Defensive reinit: if relay_info appears empty (cache refresh wiped it), rebuild it directly from table
|
||||
if (strlen(g_unified_cache.relay_info.name) == 0 &&
|
||||
strlen(g_unified_cache.relay_info.description) == 0 &&
|
||||
strlen(g_unified_cache.relay_info.software) == 0) {
|
||||
log_warning("NIP-11 relay_info appears empty, rebuilding directly from config table");
|
||||
|
||||
// Rebuild relay_info directly from config table to avoid circular cache dependency
|
||||
// Get values directly from table (similar to init_relay_info but without cache calls)
|
||||
const char* relay_name = get_config_value_from_table("relay_name");
|
||||
if (relay_name) {
|
||||
strncpy(g_unified_cache.relay_info.name, relay_name, sizeof(g_unified_cache.relay_info.name) - 1);
|
||||
free((char*)relay_name);
|
||||
} else {
|
||||
strncpy(g_unified_cache.relay_info.name, "C Nostr Relay", sizeof(g_unified_cache.relay_info.name) - 1);
|
||||
}
|
||||
|
||||
const char* relay_description = get_config_value_from_table("relay_description");
|
||||
if (relay_description) {
|
||||
strncpy(g_unified_cache.relay_info.description, relay_description, sizeof(g_unified_cache.relay_info.description) - 1);
|
||||
free((char*)relay_description);
|
||||
} else {
|
||||
strncpy(g_unified_cache.relay_info.description, "A high-performance Nostr relay implemented in C with SQLite storage", sizeof(g_unified_cache.relay_info.description) - 1);
|
||||
}
|
||||
|
||||
const char* relay_software = get_config_value_from_table("relay_software");
|
||||
if (relay_software) {
|
||||
strncpy(g_unified_cache.relay_info.software, relay_software, sizeof(g_unified_cache.relay_info.software) - 1);
|
||||
free((char*)relay_software);
|
||||
} else {
|
||||
strncpy(g_unified_cache.relay_info.software, "https://git.laantungir.net/laantungir/c-relay.git", sizeof(g_unified_cache.relay_info.software) - 1);
|
||||
}
|
||||
|
||||
const char* relay_version = get_config_value_from_table("relay_version");
|
||||
if (relay_version) {
|
||||
strncpy(g_unified_cache.relay_info.version, relay_version, sizeof(g_unified_cache.relay_info.version) - 1);
|
||||
free((char*)relay_version);
|
||||
} else {
|
||||
strncpy(g_unified_cache.relay_info.version, "0.2.0", sizeof(g_unified_cache.relay_info.version) - 1);
|
||||
}
|
||||
|
||||
const char* relay_contact = get_config_value_from_table("relay_contact");
|
||||
if (relay_contact) {
|
||||
strncpy(g_unified_cache.relay_info.contact, relay_contact, sizeof(g_unified_cache.relay_info.contact) - 1);
|
||||
free((char*)relay_contact);
|
||||
}
|
||||
|
||||
const char* relay_pubkey = get_config_value_from_table("relay_pubkey");
|
||||
if (relay_pubkey) {
|
||||
strncpy(g_unified_cache.relay_info.pubkey, relay_pubkey, sizeof(g_unified_cache.relay_info.pubkey) - 1);
|
||||
free((char*)relay_pubkey);
|
||||
}
|
||||
|
||||
const char* posting_policy = get_config_value_from_table("posting_policy");
|
||||
if (posting_policy) {
|
||||
strncpy(g_unified_cache.relay_info.posting_policy, posting_policy, sizeof(g_unified_cache.relay_info.posting_policy) - 1);
|
||||
free((char*)posting_policy);
|
||||
}
|
||||
|
||||
const char* payments_url = get_config_value_from_table("payments_url");
|
||||
if (payments_url) {
|
||||
strncpy(g_unified_cache.relay_info.payments_url, payments_url, sizeof(g_unified_cache.relay_info.payments_url) - 1);
|
||||
free((char*)payments_url);
|
||||
}
|
||||
|
||||
// Rebuild supported_nips array
|
||||
const char* supported_nips_csv = get_config_value_from_table("supported_nips");
|
||||
if (supported_nips_csv) {
|
||||
g_unified_cache.relay_info.supported_nips = parse_comma_separated_array(supported_nips_csv);
|
||||
free((char*)supported_nips_csv);
|
||||
} else {
|
||||
g_unified_cache.relay_info.supported_nips = cJSON_CreateArray();
|
||||
if (g_unified_cache.relay_info.supported_nips) {
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(1));
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(9));
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(11));
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(13));
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(15));
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(20));
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(40));
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(42));
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild limitation object
|
||||
int max_message_length = 16384;
|
||||
const char* max_msg_str = get_config_value_from_table("max_message_length");
|
||||
if (max_msg_str) {
|
||||
max_message_length = atoi(max_msg_str);
|
||||
free((char*)max_msg_str);
|
||||
}
|
||||
|
||||
int max_subscriptions_per_client = 20;
|
||||
const char* max_subs_str = get_config_value_from_table("max_subscriptions_per_client");
|
||||
if (max_subs_str) {
|
||||
max_subscriptions_per_client = atoi(max_subs_str);
|
||||
free((char*)max_subs_str);
|
||||
}
|
||||
|
||||
int max_limit = 5000;
|
||||
const char* max_limit_str = get_config_value_from_table("max_limit");
|
||||
if (max_limit_str) {
|
||||
max_limit = atoi(max_limit_str);
|
||||
free((char*)max_limit_str);
|
||||
}
|
||||
|
||||
int max_event_tags = 100;
|
||||
const char* max_tags_str = get_config_value_from_table("max_event_tags");
|
||||
if (max_tags_str) {
|
||||
max_event_tags = atoi(max_tags_str);
|
||||
free((char*)max_tags_str);
|
||||
}
|
||||
|
||||
int max_content_length = 8196;
|
||||
const char* max_content_str = get_config_value_from_table("max_content_length");
|
||||
if (max_content_str) {
|
||||
max_content_length = atoi(max_content_str);
|
||||
free((char*)max_content_str);
|
||||
}
|
||||
|
||||
int default_limit = 500;
|
||||
const char* default_limit_str = get_config_value_from_table("default_limit");
|
||||
if (default_limit_str) {
|
||||
default_limit = atoi(default_limit_str);
|
||||
free((char*)default_limit_str);
|
||||
}
|
||||
|
||||
int admin_enabled = 0;
|
||||
const char* admin_enabled_str = get_config_value_from_table("admin_enabled");
|
||||
if (admin_enabled_str) {
|
||||
admin_enabled = (strcmp(admin_enabled_str, "true") == 0) ? 1 : 0;
|
||||
free((char*)admin_enabled_str);
|
||||
}
|
||||
|
||||
g_unified_cache.relay_info.limitation = cJSON_CreateObject();
|
||||
if (g_unified_cache.relay_info.limitation) {
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_message_length", max_message_length);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_subscriptions", max_subscriptions_per_client);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_limit", max_limit);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_subid_length", SUBSCRIPTION_ID_MAX_LENGTH);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_event_tags", max_event_tags);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_content_length", max_content_length);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "min_pow_difficulty", g_unified_cache.pow_config.min_pow_difficulty);
|
||||
cJSON_AddBoolToObject(g_unified_cache.relay_info.limitation, "auth_required", admin_enabled ? cJSON_True : cJSON_False);
|
||||
cJSON_AddBoolToObject(g_unified_cache.relay_info.limitation, "payment_required", cJSON_False);
|
||||
cJSON_AddBoolToObject(g_unified_cache.relay_info.limitation, "restricted_writes", cJSON_False);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "created_at_lower_limit", 0);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "created_at_upper_limit", 2147483647);
|
||||
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "default_limit", default_limit);
|
||||
}
|
||||
|
||||
// Rebuild other arrays (empty for now)
|
||||
g_unified_cache.relay_info.retention = cJSON_CreateArray();
|
||||
g_unified_cache.relay_info.language_tags = cJSON_CreateArray();
|
||||
if (g_unified_cache.relay_info.language_tags) {
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.language_tags, cJSON_CreateString("*"));
|
||||
}
|
||||
g_unified_cache.relay_info.relay_countries = cJSON_CreateArray();
|
||||
if (g_unified_cache.relay_info.relay_countries) {
|
||||
cJSON_AddItemToArray(g_unified_cache.relay_info.relay_countries, cJSON_CreateString("*"));
|
||||
}
|
||||
g_unified_cache.relay_info.tags = cJSON_CreateArray();
|
||||
g_unified_cache.relay_info.fees = cJSON_CreateObject();
|
||||
|
||||
log_info("NIP-11 relay_info rebuilt directly from config table");
|
||||
}
|
||||
// Get config values for limitations
|
||||
int max_message_length = get_config_int("max_message_length", 16384);
|
||||
int max_subscriptions_per_client = get_config_int("max_subscriptions_per_client", 20);
|
||||
int max_limit = get_config_int("max_limit", 5000);
|
||||
int max_event_tags = get_config_int("max_event_tags", 100);
|
||||
int max_content_length = get_config_int("max_content_length", 8196);
|
||||
int default_limit = get_config_int("default_limit", 500);
|
||||
int min_pow_difficulty = get_config_int("pow_min_difficulty", 0);
|
||||
int admin_enabled = get_config_bool("admin_enabled", 0);
|
||||
|
||||
// Add basic relay information
|
||||
if (strlen(g_unified_cache.relay_info.name) > 0) {
|
||||
cJSON_AddStringToObject(info, "name", g_unified_cache.relay_info.name);
|
||||
if (relay_name && strlen(relay_name) > 0) {
|
||||
cJSON_AddStringToObject(info, "name", relay_name);
|
||||
free((char*)relay_name);
|
||||
} else {
|
||||
cJSON_AddStringToObject(info, "name", "C Nostr Relay");
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.description) > 0) {
|
||||
cJSON_AddStringToObject(info, "description", g_unified_cache.relay_info.description);
|
||||
|
||||
if (relay_description && strlen(relay_description) > 0) {
|
||||
cJSON_AddStringToObject(info, "description", relay_description);
|
||||
free((char*)relay_description);
|
||||
} else {
|
||||
cJSON_AddStringToObject(info, "description", "A high-performance Nostr relay implemented in C with SQLite storage");
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.banner) > 0) {
|
||||
cJSON_AddStringToObject(info, "banner", g_unified_cache.relay_info.banner);
|
||||
|
||||
if (relay_banner && strlen(relay_banner) > 0) {
|
||||
cJSON_AddStringToObject(info, "banner", relay_banner);
|
||||
free((char*)relay_banner);
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.icon) > 0) {
|
||||
cJSON_AddStringToObject(info, "icon", g_unified_cache.relay_info.icon);
|
||||
|
||||
if (relay_icon && strlen(relay_icon) > 0) {
|
||||
cJSON_AddStringToObject(info, "icon", relay_icon);
|
||||
free((char*)relay_icon);
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.pubkey) > 0) {
|
||||
cJSON_AddStringToObject(info, "pubkey", g_unified_cache.relay_info.pubkey);
|
||||
|
||||
if (relay_pubkey && strlen(relay_pubkey) > 0) {
|
||||
cJSON_AddStringToObject(info, "pubkey", relay_pubkey);
|
||||
free((char*)relay_pubkey);
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.contact) > 0) {
|
||||
cJSON_AddStringToObject(info, "contact", g_unified_cache.relay_info.contact);
|
||||
|
||||
if (relay_contact && strlen(relay_contact) > 0) {
|
||||
cJSON_AddStringToObject(info, "contact", relay_contact);
|
||||
free((char*)relay_contact);
|
||||
}
|
||||
|
||||
|
||||
// Add supported NIPs
|
||||
if (g_unified_cache.relay_info.supported_nips) {
|
||||
cJSON_AddItemToObject(info, "supported_nips", cJSON_Duplicate(g_unified_cache.relay_info.supported_nips, 1));
|
||||
if (supported_nips_csv && strlen(supported_nips_csv) > 0) {
|
||||
cJSON* supported_nips = parse_comma_separated_array(supported_nips_csv);
|
||||
if (supported_nips) {
|
||||
cJSON_AddItemToObject(info, "supported_nips", supported_nips);
|
||||
}
|
||||
free((char*)supported_nips_csv);
|
||||
} else {
|
||||
// Default supported NIPs
|
||||
cJSON* supported_nips = cJSON_CreateArray();
|
||||
if (supported_nips) {
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(1)); // NIP-01: Basic protocol
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(9)); // NIP-09: Event deletion
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(11)); // NIP-11: Relay information
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(13)); // NIP-13: Proof of Work
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(15)); // NIP-15: EOSE
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(20)); // NIP-20: Command results
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(40)); // NIP-40: Expiration Timestamp
|
||||
cJSON_AddItemToArray(supported_nips, cJSON_CreateNumber(42)); // NIP-42: Authentication
|
||||
cJSON_AddItemToObject(info, "supported_nips", supported_nips);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Add software information
|
||||
if (strlen(g_unified_cache.relay_info.software) > 0) {
|
||||
cJSON_AddStringToObject(info, "software", g_unified_cache.relay_info.software);
|
||||
if (relay_software && strlen(relay_software) > 0) {
|
||||
cJSON_AddStringToObject(info, "software", relay_software);
|
||||
free((char*)relay_software);
|
||||
} else {
|
||||
cJSON_AddStringToObject(info, "software", "https://git.laantungir.net/laantungir/c-relay.git");
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.version) > 0) {
|
||||
cJSON_AddStringToObject(info, "version", g_unified_cache.relay_info.version);
|
||||
|
||||
if (relay_version && strlen(relay_version) > 0) {
|
||||
cJSON_AddStringToObject(info, "version", relay_version);
|
||||
free((char*)relay_version);
|
||||
} else {
|
||||
cJSON_AddStringToObject(info, "version", "0.2.0");
|
||||
}
|
||||
|
||||
|
||||
// Add policies
|
||||
if (strlen(g_unified_cache.relay_info.privacy_policy) > 0) {
|
||||
cJSON_AddStringToObject(info, "privacy_policy", g_unified_cache.relay_info.privacy_policy);
|
||||
if (privacy_policy && strlen(privacy_policy) > 0) {
|
||||
cJSON_AddStringToObject(info, "privacy_policy", privacy_policy);
|
||||
free((char*)privacy_policy);
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.terms_of_service) > 0) {
|
||||
cJSON_AddStringToObject(info, "terms_of_service", g_unified_cache.relay_info.terms_of_service);
|
||||
|
||||
if (terms_of_service && strlen(terms_of_service) > 0) {
|
||||
cJSON_AddStringToObject(info, "terms_of_service", terms_of_service);
|
||||
free((char*)terms_of_service);
|
||||
}
|
||||
if (strlen(g_unified_cache.relay_info.posting_policy) > 0) {
|
||||
cJSON_AddStringToObject(info, "posting_policy", g_unified_cache.relay_info.posting_policy);
|
||||
|
||||
if (posting_policy && strlen(posting_policy) > 0) {
|
||||
cJSON_AddStringToObject(info, "posting_policy", posting_policy);
|
||||
free((char*)posting_policy);
|
||||
}
|
||||
|
||||
|
||||
// Add server limitations
|
||||
if (g_unified_cache.relay_info.limitation) {
|
||||
cJSON_AddItemToObject(info, "limitation", cJSON_Duplicate(g_unified_cache.relay_info.limitation, 1));
|
||||
cJSON* limitation = cJSON_CreateObject();
|
||||
if (limitation) {
|
||||
cJSON_AddNumberToObject(limitation, "max_message_length", max_message_length);
|
||||
cJSON_AddNumberToObject(limitation, "max_subscriptions", max_subscriptions_per_client);
|
||||
cJSON_AddNumberToObject(limitation, "max_limit", max_limit);
|
||||
cJSON_AddNumberToObject(limitation, "max_subid_length", SUBSCRIPTION_ID_MAX_LENGTH);
|
||||
cJSON_AddNumberToObject(limitation, "max_event_tags", max_event_tags);
|
||||
cJSON_AddNumberToObject(limitation, "max_content_length", max_content_length);
|
||||
cJSON_AddNumberToObject(limitation, "min_pow_difficulty", min_pow_difficulty);
|
||||
cJSON_AddBoolToObject(limitation, "auth_required", admin_enabled ? cJSON_True : cJSON_False);
|
||||
cJSON_AddBoolToObject(limitation, "payment_required", cJSON_False);
|
||||
cJSON_AddBoolToObject(limitation, "restricted_writes", cJSON_False);
|
||||
cJSON_AddNumberToObject(limitation, "created_at_lower_limit", 0);
|
||||
cJSON_AddNumberToObject(limitation, "created_at_upper_limit", 2147483647);
|
||||
cJSON_AddNumberToObject(limitation, "default_limit", default_limit);
|
||||
cJSON_AddItemToObject(info, "limitation", limitation);
|
||||
}
|
||||
|
||||
// Add retention policies if configured
|
||||
if (g_unified_cache.relay_info.retention && cJSON_GetArraySize(g_unified_cache.relay_info.retention) > 0) {
|
||||
cJSON_AddItemToObject(info, "retention", cJSON_Duplicate(g_unified_cache.relay_info.retention, 1));
|
||||
|
||||
// Add retention policies (empty array for now)
|
||||
cJSON* retention = cJSON_CreateArray();
|
||||
if (retention) {
|
||||
cJSON_AddItemToObject(info, "retention", retention);
|
||||
}
|
||||
|
||||
|
||||
// Add geographical and language information
|
||||
if (g_unified_cache.relay_info.relay_countries) {
|
||||
cJSON_AddItemToObject(info, "relay_countries", cJSON_Duplicate(g_unified_cache.relay_info.relay_countries, 1));
|
||||
if (relay_countries_csv && strlen(relay_countries_csv) > 0) {
|
||||
cJSON* relay_countries = parse_comma_separated_array(relay_countries_csv);
|
||||
if (relay_countries) {
|
||||
cJSON_AddItemToObject(info, "relay_countries", relay_countries);
|
||||
}
|
||||
free((char*)relay_countries_csv);
|
||||
} else {
|
||||
cJSON* relay_countries = cJSON_CreateArray();
|
||||
if (relay_countries) {
|
||||
cJSON_AddItemToArray(relay_countries, cJSON_CreateString("*"));
|
||||
cJSON_AddItemToObject(info, "relay_countries", relay_countries);
|
||||
}
|
||||
}
|
||||
if (g_unified_cache.relay_info.language_tags) {
|
||||
cJSON_AddItemToObject(info, "language_tags", cJSON_Duplicate(g_unified_cache.relay_info.language_tags, 1));
|
||||
|
||||
if (language_tags_csv && strlen(language_tags_csv) > 0) {
|
||||
cJSON* language_tags = parse_comma_separated_array(language_tags_csv);
|
||||
if (language_tags) {
|
||||
cJSON_AddItemToObject(info, "language_tags", language_tags);
|
||||
}
|
||||
free((char*)language_tags_csv);
|
||||
} else {
|
||||
cJSON* language_tags = cJSON_CreateArray();
|
||||
if (language_tags) {
|
||||
cJSON_AddItemToArray(language_tags, cJSON_CreateString("*"));
|
||||
cJSON_AddItemToObject(info, "language_tags", language_tags);
|
||||
}
|
||||
}
|
||||
if (g_unified_cache.relay_info.tags && cJSON_GetArraySize(g_unified_cache.relay_info.tags) > 0) {
|
||||
cJSON_AddItemToObject(info, "tags", cJSON_Duplicate(g_unified_cache.relay_info.tags, 1));
|
||||
|
||||
// Add content tags (empty array)
|
||||
cJSON* tags = cJSON_CreateArray();
|
||||
if (tags) {
|
||||
cJSON_AddItemToObject(info, "tags", tags);
|
||||
}
|
||||
|
||||
|
||||
// Add payment information if configured
|
||||
if (strlen(g_unified_cache.relay_info.payments_url) > 0) {
|
||||
cJSON_AddStringToObject(info, "payments_url", g_unified_cache.relay_info.payments_url);
|
||||
if (payments_url && strlen(payments_url) > 0) {
|
||||
cJSON_AddStringToObject(info, "payments_url", payments_url);
|
||||
free((char*)payments_url);
|
||||
}
|
||||
if (g_unified_cache.relay_info.fees && cJSON_GetObjectItem(g_unified_cache.relay_info.fees, "admission")) {
|
||||
cJSON_AddItemToObject(info, "fees", cJSON_Duplicate(g_unified_cache.relay_info.fees, 1));
|
||||
|
||||
// Add fees (empty object - no payment required by default)
|
||||
cJSON* fees = cJSON_CreateObject();
|
||||
if (fees) {
|
||||
cJSON_AddItemToObject(info, "fees", fees);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&g_unified_cache.cache_lock);
|
||||
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
@@ -610,7 +294,6 @@ struct nip11_session_data {
|
||||
|
||||
// Handle NIP-11 HTTP request with proper asynchronous buffer management
|
||||
int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
|
||||
log_info("Handling NIP-11 relay information request");
|
||||
|
||||
// Check if client accepts application/nostr+json
|
||||
int accepts_nostr_json = 0;
|
||||
@@ -621,7 +304,7 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
|
||||
}
|
||||
|
||||
if (!accepts_nostr_json) {
|
||||
log_warning("HTTP request without proper Accept header for NIP-11");
|
||||
DEBUG_WARN("HTTP request without proper Accept header for NIP-11");
|
||||
// Return 406 Not Acceptable
|
||||
unsigned char buf[LWS_PRE + 256];
|
||||
unsigned char *p = &buf[LWS_PRE];
|
||||
@@ -647,7 +330,7 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
|
||||
// Generate relay information JSON
|
||||
cJSON* info_json = generate_relay_info_json();
|
||||
if (!info_json) {
|
||||
log_error("Failed to generate relay info JSON");
|
||||
DEBUG_ERROR("Failed to generate relay info JSON");
|
||||
unsigned char buf[LWS_PRE + 256];
|
||||
unsigned char *p = &buf[LWS_PRE];
|
||||
unsigned char *start = p;
|
||||
@@ -673,7 +356,7 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
|
||||
cJSON_Delete(info_json);
|
||||
|
||||
if (!json_string) {
|
||||
log_error("Failed to serialize relay info JSON");
|
||||
DEBUG_ERROR("Failed to serialize relay info JSON");
|
||||
unsigned char buf[LWS_PRE + 256];
|
||||
unsigned char *p = &buf[LWS_PRE];
|
||||
unsigned char *start = p;
|
||||
@@ -696,14 +379,11 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
|
||||
}
|
||||
|
||||
size_t json_len = strlen(json_string);
|
||||
log_info("Generated NIP-11 JSON");
|
||||
printf(" JSON length: %zu bytes\n", json_len);
|
||||
printf(" JSON preview: %.100s%s\n", json_string, json_len > 100 ? "..." : "");
|
||||
|
||||
// Allocate session data to manage buffer lifetime across callbacks
|
||||
struct nip11_session_data* session_data = malloc(sizeof(struct nip11_session_data));
|
||||
if (!session_data) {
|
||||
log_error("Failed to allocate NIP-11 session data");
|
||||
DEBUG_ERROR("Failed to allocate NIP-11 session data");
|
||||
free(json_string);
|
||||
return -1;
|
||||
}
|
||||
@@ -791,8 +471,7 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
|
||||
|
||||
// Request callback for body transmission
|
||||
lws_callback_on_writable(wsi);
|
||||
|
||||
log_success("NIP-11 headers sent, body transmission scheduled");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
124
src/nip013.c
124
src/nip013.c
@@ -1,5 +1,6 @@
|
||||
// NIP-13 Proof of Work validation module
|
||||
#include <stdio.h>
|
||||
#include "debug.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <pthread.h>
|
||||
@@ -8,86 +9,39 @@
|
||||
#include "../nostr_core_lib/nostr_core/nip013.h"
|
||||
#include "config.h"
|
||||
|
||||
// Forward declarations for logging functions
|
||||
void log_info(const char* message);
|
||||
void log_success(const char* message);
|
||||
void log_error(const char* message);
|
||||
void log_warning(const char* message);
|
||||
|
||||
// NIP-13 PoW configuration structure
|
||||
struct pow_config {
|
||||
int enabled; // 0 = disabled, 1 = enabled
|
||||
int min_pow_difficulty; // Minimum required difficulty (0 = no requirement)
|
||||
int validation_flags; // Bitflags for validation options
|
||||
int require_nonce_tag; // 1 = require nonce tag presence
|
||||
int reject_lower_targets; // 1 = reject if committed < actual difficulty
|
||||
int strict_format; // 1 = enforce strict nonce tag format
|
||||
int anti_spam_mode; // 1 = full anti-spam validation
|
||||
};
|
||||
// Configuration functions from config.c
|
||||
extern int get_config_bool(const char* key, int default_value);
|
||||
extern int get_config_int(const char* key, int default_value);
|
||||
extern const char* get_config_value(const char* key);
|
||||
|
||||
// Initialize PoW configuration using configuration system
|
||||
void init_pow_config() {
|
||||
log_info("Initializing NIP-13 Proof of Work configuration");
|
||||
|
||||
// Get all config values first (without holding mutex to avoid deadlock)
|
||||
int pow_enabled = get_config_bool("pow_enabled", 1);
|
||||
int pow_min_difficulty = get_config_int("pow_min_difficulty", 0);
|
||||
const char* pow_mode = get_config_value("pow_mode");
|
||||
|
||||
pthread_mutex_lock(&g_unified_cache.cache_lock);
|
||||
|
||||
// Load PoW settings from configuration system
|
||||
g_unified_cache.pow_config.enabled = pow_enabled;
|
||||
g_unified_cache.pow_config.min_pow_difficulty = pow_min_difficulty;
|
||||
|
||||
// Configure PoW mode
|
||||
if (pow_mode) {
|
||||
if (strcmp(pow_mode, "strict") == 0) {
|
||||
g_unified_cache.pow_config.validation_flags = NOSTR_POW_VALIDATE_ANTI_SPAM | NOSTR_POW_STRICT_FORMAT;
|
||||
g_unified_cache.pow_config.require_nonce_tag = 1;
|
||||
g_unified_cache.pow_config.reject_lower_targets = 1;
|
||||
g_unified_cache.pow_config.strict_format = 1;
|
||||
g_unified_cache.pow_config.anti_spam_mode = 1;
|
||||
log_info("PoW configured in strict anti-spam mode");
|
||||
} else if (strcmp(pow_mode, "full") == 0) {
|
||||
g_unified_cache.pow_config.validation_flags = NOSTR_POW_VALIDATE_FULL;
|
||||
g_unified_cache.pow_config.require_nonce_tag = 1;
|
||||
log_info("PoW configured in full validation mode");
|
||||
} else if (strcmp(pow_mode, "basic") == 0) {
|
||||
g_unified_cache.pow_config.validation_flags = NOSTR_POW_VALIDATE_BASIC;
|
||||
log_info("PoW configured in basic validation mode");
|
||||
} else if (strcmp(pow_mode, "disabled") == 0) {
|
||||
g_unified_cache.pow_config.enabled = 0;
|
||||
log_info("PoW validation disabled via configuration");
|
||||
}
|
||||
free((char*)pow_mode); // Free dynamically allocated string
|
||||
} else {
|
||||
// Default to basic mode
|
||||
g_unified_cache.pow_config.validation_flags = NOSTR_POW_VALIDATE_BASIC;
|
||||
log_info("PoW configured in basic validation mode (default)");
|
||||
}
|
||||
|
||||
// Log final configuration
|
||||
char config_msg[512];
|
||||
snprintf(config_msg, sizeof(config_msg),
|
||||
"PoW Configuration: enabled=%s, min_difficulty=%d, validation_flags=0x%x, mode=%s",
|
||||
g_unified_cache.pow_config.enabled ? "true" : "false",
|
||||
g_unified_cache.pow_config.min_pow_difficulty,
|
||||
g_unified_cache.pow_config.validation_flags,
|
||||
g_unified_cache.pow_config.anti_spam_mode ? "anti-spam" :
|
||||
(g_unified_cache.pow_config.validation_flags & NOSTR_POW_VALIDATE_FULL) ? "full" : "basic");
|
||||
log_info(config_msg);
|
||||
|
||||
pthread_mutex_unlock(&g_unified_cache.cache_lock);
|
||||
// Configuration is now handled directly through database queries
|
||||
// No cache initialization needed
|
||||
}
|
||||
|
||||
// Validate event Proof of Work according to NIP-13
|
||||
int validate_event_pow(cJSON* event, char* error_message, size_t error_size) {
|
||||
pthread_mutex_lock(&g_unified_cache.cache_lock);
|
||||
int enabled = g_unified_cache.pow_config.enabled;
|
||||
int min_pow_difficulty = g_unified_cache.pow_config.min_pow_difficulty;
|
||||
int validation_flags = g_unified_cache.pow_config.validation_flags;
|
||||
pthread_mutex_unlock(&g_unified_cache.cache_lock);
|
||||
// Get PoW configuration directly from database
|
||||
int enabled = get_config_bool("pow_enabled", 1);
|
||||
int min_pow_difficulty = get_config_int("pow_min_difficulty", 0);
|
||||
const char* pow_mode = get_config_value("pow_mode");
|
||||
|
||||
// Determine validation flags based on mode
|
||||
int validation_flags = NOSTR_POW_VALIDATE_BASIC; // Default
|
||||
if (pow_mode) {
|
||||
if (strcmp(pow_mode, "strict") == 0) {
|
||||
validation_flags = NOSTR_POW_VALIDATE_ANTI_SPAM | NOSTR_POW_STRICT_FORMAT;
|
||||
} else if (strcmp(pow_mode, "full") == 0) {
|
||||
validation_flags = NOSTR_POW_VALIDATE_FULL;
|
||||
} else if (strcmp(pow_mode, "basic") == 0) {
|
||||
validation_flags = NOSTR_POW_VALIDATE_BASIC;
|
||||
} else if (strcmp(pow_mode, "disabled") == 0) {
|
||||
enabled = 0;
|
||||
}
|
||||
free((char*)pow_mode);
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
return 0; // PoW validation disabled
|
||||
@@ -138,54 +92,42 @@ int validate_event_pow(cJSON* event, char* error_message, size_t error_size) {
|
||||
snprintf(error_message, error_size,
|
||||
"pow: insufficient difficulty: %d < %d",
|
||||
pow_result.actual_difficulty, min_pow_difficulty);
|
||||
log_warning("Event rejected: insufficient PoW difficulty");
|
||||
DEBUG_WARN("Event rejected: insufficient PoW difficulty");
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_NO_NONCE_TAG:
|
||||
// This should not happen with min_difficulty=0 after our check above
|
||||
if (min_pow_difficulty > 0) {
|
||||
snprintf(error_message, error_size, "pow: missing required nonce tag");
|
||||
log_warning("Event rejected: missing nonce tag");
|
||||
DEBUG_WARN("Event rejected: missing nonce tag");
|
||||
} else {
|
||||
return 0; // Allow when min_difficulty=0
|
||||
}
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_INVALID_NONCE_TAG:
|
||||
snprintf(error_message, error_size, "pow: invalid nonce tag format");
|
||||
log_warning("Event rejected: invalid nonce tag format");
|
||||
DEBUG_WARN("Event rejected: invalid nonce tag format");
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_TARGET_MISMATCH:
|
||||
snprintf(error_message, error_size,
|
||||
"pow: committed target (%d) lower than minimum (%d)",
|
||||
pow_result.committed_target, min_pow_difficulty);
|
||||
log_warning("Event rejected: committed target too low (anti-spam protection)");
|
||||
DEBUG_WARN("Event rejected: committed target too low (anti-spam protection)");
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_CALCULATION:
|
||||
snprintf(error_message, error_size, "pow: difficulty calculation failed");
|
||||
log_error("PoW difficulty calculation error");
|
||||
DEBUG_ERROR("PoW difficulty calculation error");
|
||||
break;
|
||||
case NOSTR_ERROR_EVENT_INVALID_ID:
|
||||
snprintf(error_message, error_size, "pow: invalid event ID format");
|
||||
log_warning("Event rejected: invalid event ID for PoW calculation");
|
||||
DEBUG_WARN("Event rejected: invalid event ID for PoW calculation");
|
||||
break;
|
||||
default:
|
||||
snprintf(error_message, error_size, "pow: validation failed - %s",
|
||||
strlen(pow_result.error_detail) > 0 ? pow_result.error_detail : "unknown error");
|
||||
log_warning("Event rejected: PoW validation failed");
|
||||
DEBUG_WARN("Event rejected: PoW validation failed");
|
||||
}
|
||||
return validation_result;
|
||||
}
|
||||
|
||||
// Log successful PoW validation (only if minimum difficulty is required)
|
||||
if (min_pow_difficulty > 0 || pow_result.has_nonce_tag) {
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg),
|
||||
"PoW validated: difficulty=%d, target=%d, nonce=%llu%s",
|
||||
pow_result.actual_difficulty,
|
||||
pow_result.committed_target,
|
||||
(unsigned long long)pow_result.nonce_value,
|
||||
pow_result.has_nonce_tag ? "" : " (no nonce tag)");
|
||||
log_info(debug_msg);
|
||||
}
|
||||
|
||||
return 0; // Success
|
||||
}
|
||||
26
src/nip040.c
26
src/nip040.c
@@ -1,5 +1,6 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <stdio.h>
|
||||
#include "debug.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
@@ -28,13 +29,9 @@ struct expiration_config g_expiration_config = {
|
||||
.grace_period = 1 // 1 second grace period for testing (was 300)
|
||||
};
|
||||
|
||||
// Forward declarations for logging functions
|
||||
void log_info(const char* message);
|
||||
void log_warning(const char* message);
|
||||
|
||||
// Initialize expiration configuration using configuration system
|
||||
void init_expiration_config() {
|
||||
log_info("Initializing NIP-40 Expiration Timestamp configuration");
|
||||
|
||||
// Get all config values first (without holding mutex to avoid deadlock)
|
||||
int expiration_enabled = get_config_bool("expiration_enabled", 1);
|
||||
@@ -52,19 +49,10 @@ void init_expiration_config() {
|
||||
|
||||
// Validate grace period bounds
|
||||
if (g_expiration_config.grace_period < 0 || g_expiration_config.grace_period > 86400) {
|
||||
log_warning("Invalid grace period, using default of 300 seconds");
|
||||
DEBUG_WARN("Invalid grace period, using default of 300 seconds");
|
||||
g_expiration_config.grace_period = 300;
|
||||
}
|
||||
|
||||
// Log final configuration
|
||||
char config_msg[512];
|
||||
snprintf(config_msg, sizeof(config_msg),
|
||||
"Expiration Configuration: enabled=%s, strict_mode=%s, filter_responses=%s, grace_period=%ld seconds",
|
||||
g_expiration_config.enabled ? "true" : "false",
|
||||
g_expiration_config.strict_mode ? "true" : "false",
|
||||
g_expiration_config.filter_responses ? "true" : "false",
|
||||
g_expiration_config.grace_period);
|
||||
log_info(config_msg);
|
||||
}
|
||||
|
||||
// Extract expiration timestamp from event tags
|
||||
@@ -104,7 +92,7 @@ long extract_expiration_timestamp(cJSON* tags) {
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg),
|
||||
"Ignoring malformed expiration tag value: '%.32s'", value);
|
||||
log_warning(debug_msg);
|
||||
DEBUG_WARN(debug_msg);
|
||||
continue; // Ignore malformed expiration tag
|
||||
}
|
||||
|
||||
@@ -158,14 +146,10 @@ int validate_event_expiration(cJSON* event, char* error_message, size_t error_si
|
||||
snprintf(error_message, error_size,
|
||||
"invalid: event expired (expiration=%ld, current=%ld, grace=%ld)",
|
||||
expiration_ts, (long)current_time, g_expiration_config.grace_period);
|
||||
log_warning("Event rejected: expired timestamp");
|
||||
DEBUG_WARN("Event rejected: expired timestamp");
|
||||
return -1;
|
||||
} else {
|
||||
// In non-strict mode, log but allow expired events
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg),
|
||||
"Accepting expired event (strict_mode disabled)");
|
||||
log_info(debug_msg);
|
||||
// In non-strict mode, allow expired events
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
23
src/nip042.c
23
src/nip042.c
@@ -6,17 +6,13 @@
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
#include <pthread.h>
|
||||
#include "debug.h"
|
||||
#include <cjson/cJSON.h>
|
||||
#include <libwebsockets.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
// Forward declarations for logging functions
|
||||
void log_error(const char* message);
|
||||
void log_info(const char* message);
|
||||
void log_warning(const char* message);
|
||||
void log_success(const char* message);
|
||||
|
||||
// Forward declaration for notice message function
|
||||
void send_notice_message(struct lws* wsi, const char* message);
|
||||
@@ -52,7 +48,7 @@ void send_nip42_auth_challenge(struct lws* wsi, struct per_session_data* pss) {
|
||||
// Generate challenge using existing request_validator function
|
||||
char challenge[65];
|
||||
if (nostr_nip42_generate_challenge(challenge, sizeof(challenge)) != 0) {
|
||||
log_error("Failed to generate NIP-42 challenge");
|
||||
DEBUG_ERROR("Failed to generate NIP-42 challenge");
|
||||
send_notice_message(wsi, "Authentication temporarily unavailable");
|
||||
return;
|
||||
}
|
||||
@@ -83,10 +79,6 @@ void send_nip42_auth_challenge(struct lws* wsi, struct per_session_data* pss) {
|
||||
free(msg_str);
|
||||
}
|
||||
cJSON_Delete(auth_msg);
|
||||
|
||||
char debug_msg[128];
|
||||
snprintf(debug_msg, sizeof(debug_msg), "NIP-42 auth challenge sent: %.16s...", challenge);
|
||||
log_info(debug_msg);
|
||||
}
|
||||
|
||||
// Handle NIP-42 signed authentication event from client
|
||||
@@ -112,7 +104,7 @@ void handle_nip42_auth_signed_event(struct lws* wsi, struct per_session_data* ps
|
||||
if (current_time > challenge_expires) {
|
||||
free(event_json);
|
||||
send_notice_message(wsi, "Authentication challenge expired, please retry");
|
||||
log_warning("NIP-42 authentication failed: challenge expired");
|
||||
DEBUG_WARN("NIP-42 authentication failed: challenge expired");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -152,18 +144,13 @@ void handle_nip42_auth_signed_event(struct lws* wsi, struct per_session_data* ps
|
||||
pss->auth_challenge_sent = 0;
|
||||
pthread_mutex_unlock(&pss->session_lock);
|
||||
|
||||
char success_msg[256];
|
||||
snprintf(success_msg, sizeof(success_msg),
|
||||
"NIP-42 authentication successful for pubkey: %.16s...", authenticated_pubkey);
|
||||
log_success(success_msg);
|
||||
|
||||
send_notice_message(wsi, "NIP-42 authentication successful");
|
||||
} else {
|
||||
// Authentication failed
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg),
|
||||
"NIP-42 authentication failed (error code: %d)", result);
|
||||
log_warning(error_msg);
|
||||
DEBUG_WARN(error_msg);
|
||||
|
||||
send_notice_message(wsi, "NIP-42 authentication failed - invalid signature or challenge");
|
||||
}
|
||||
@@ -175,6 +162,6 @@ void handle_nip42_auth_challenge_response(struct lws* wsi, struct per_session_da
|
||||
|
||||
// NIP-42 doesn't typically use challenge responses from client to server
|
||||
// This is reserved for potential future use or protocol extensions
|
||||
log_warning("Received unexpected challenge response from client (not part of standard NIP-42 flow)");
|
||||
DEBUG_WARN("Received unexpected challenge response from client (not part of standard NIP-42 flow)");
|
||||
send_notice_message(wsi, "Challenge responses are not supported - please send signed authentication event");
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "../nostr_core_lib/nostr_core/nip013.h" // NIP-13: Proof of Work
|
||||
#include "../nostr_core_lib/nostr_core/nostr_common.h"
|
||||
#include "../nostr_core_lib/nostr_core/utils.h"
|
||||
#include "debug.h" // C-relay debug system
|
||||
#include "config.h" // C-relay configuration system
|
||||
#include <sqlite3.h>
|
||||
#include <stdio.h>
|
||||
@@ -139,21 +140,6 @@ struct {
|
||||
char reason[500]; // specific reason string
|
||||
} g_last_rule_violation = {0};
|
||||
|
||||
/**
|
||||
* Helper function for consistent debug logging to main relay.log file
|
||||
*/
|
||||
static void validator_debug_log(const char *message) {
|
||||
FILE *relay_log = fopen("relay.log", "a");
|
||||
if (relay_log) {
|
||||
// Use same format as main logging system
|
||||
time_t now = time(NULL);
|
||||
struct tm *tm_info = localtime(&now);
|
||||
char timestamp[20];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
|
||||
fprintf(relay_log, "[%s] [DEBUG] %s", timestamp, message);
|
||||
fclose(relay_log);
|
||||
}
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// FORWARD DECLARATIONS
|
||||
@@ -188,16 +174,12 @@ int ginxsom_request_validator_init(const char *db_path, const char *app_name) {
|
||||
|
||||
// Initialize nostr_core_lib if not already done
|
||||
if (nostr_crypto_init() != NOSTR_SUCCESS) {
|
||||
validator_debug_log(
|
||||
"VALIDATOR: Failed to initialize nostr crypto system\n");
|
||||
return NOSTR_ERROR_CRYPTO_INIT;
|
||||
}
|
||||
|
||||
// Load initial configuration from database
|
||||
int result = reload_auth_config();
|
||||
if (result != NOSTR_SUCCESS) {
|
||||
validator_debug_log(
|
||||
"VALIDATOR: Failed to load configuration from database\n");
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -215,8 +197,6 @@ int ginxsom_request_validator_init(const char *db_path, const char *app_name) {
|
||||
g_challenge_manager.last_cleanup = time(NULL);
|
||||
|
||||
g_validator_initialized = 1;
|
||||
validator_debug_log(
|
||||
"VALIDATOR: Request validator initialized successfully\n");
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -257,20 +237,17 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
|
||||
|
||||
// 1. Null Pointer Checks - Reject malformed requests instantly
|
||||
if (!json_string || json_length == 0) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 1 FAILED - Null input\n");
|
||||
return NOSTR_ERROR_INVALID_INPUT;
|
||||
}
|
||||
|
||||
// 2. Initialization Check - Verify system is properly initialized
|
||||
if (!g_validator_initialized) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 2 FAILED - Validator not initialized\n");
|
||||
return NOSTR_ERROR_INVALID_INPUT;
|
||||
}
|
||||
|
||||
// 3. Parse JSON string to cJSON event object
|
||||
cJSON *event = cJSON_ParseWithLength(json_string, json_length);
|
||||
if (!event) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 3 FAILED - Failed to parse JSON event\n");
|
||||
return NOSTR_ERROR_INVALID_INPUT;
|
||||
}
|
||||
|
||||
@@ -290,20 +267,14 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
|
||||
!tags || !cJSON_IsArray(tags) ||
|
||||
!content || !cJSON_IsString(content) ||
|
||||
!sig || !cJSON_IsString(sig)) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 4 FAILED - Invalid event structure\n");
|
||||
cJSON_Delete(event);
|
||||
return NOSTR_ERROR_INVALID_INPUT;
|
||||
}
|
||||
|
||||
int event_kind = (int)cJSON_GetNumberValue(kind);
|
||||
|
||||
|
||||
// 5. Check configuration using unified cache
|
||||
int auth_required = nostr_auth_rules_enabled();
|
||||
|
||||
char config_msg[256];
|
||||
sprintf(config_msg, "VALIDATOR_DEBUG: STEP 5 PASSED - Event kind: %d, auth_required: %d\n",
|
||||
event_kind, auth_required);
|
||||
validator_debug_log(config_msg);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// PHASE 2: NOSTR EVENT VALIDATION
|
||||
@@ -312,39 +283,42 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
|
||||
// 6. Nostr Event Structure Validation using nostr_core_lib
|
||||
int validation_result = nostr_validate_event(event);
|
||||
if (validation_result != NOSTR_SUCCESS) {
|
||||
char validation_msg[256];
|
||||
sprintf(validation_msg, "VALIDATOR_DEBUG: STEP 6 FAILED - NOSTR event validation failed (error=%d)\n",
|
||||
validation_result);
|
||||
validator_debug_log(validation_msg);
|
||||
cJSON_Delete(event);
|
||||
return validation_result;
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 6 PASSED - Event structure and signature valid\n");
|
||||
|
||||
// 7. Extract pubkey for rule evaluation
|
||||
const char *event_pubkey = cJSON_GetStringValue(pubkey);
|
||||
if (!event_pubkey || strlen(event_pubkey) != 64) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 7 FAILED - Invalid pubkey format\n");
|
||||
cJSON_Delete(event);
|
||||
return NOSTR_ERROR_EVENT_INVALID_PUBKEY;
|
||||
}
|
||||
|
||||
char pubkey_msg[256];
|
||||
sprintf(pubkey_msg, "VALIDATOR_DEBUG: STEP 7 PASSED - Extracted pubkey: %.16s...\n", event_pubkey);
|
||||
validator_debug_log(pubkey_msg);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// PHASE 3: EVENT KIND SPECIFIC VALIDATION
|
||||
// PHASE 3: ADMIN EVENT BYPASS CHECK
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// 8. Handle NIP-42 authentication challenge events (kind 22242)
|
||||
// 8. Check if this is a kind 23456 admin event from authorized admin
|
||||
// This must happen AFTER signature validation but BEFORE auth rules
|
||||
if (event_kind == 23456) {
|
||||
const char* admin_pubkey = get_config_value("admin_pubkey");
|
||||
if (admin_pubkey && strcmp(event_pubkey, admin_pubkey) == 0) {
|
||||
// Valid admin event - bypass remaining validation
|
||||
cJSON_Delete(event);
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
// Not from admin - continue with normal validation
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// PHASE 4: EVENT KIND SPECIFIC VALIDATION
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// 9. Handle NIP-42 authentication challenge events (kind 22242)
|
||||
if (event_kind == 22242) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 8 - Processing NIP-42 challenge response\n");
|
||||
|
||||
// Check NIP-42 mode using unified cache
|
||||
const char* nip42_enabled = get_config_value("nip42_auth_enabled");
|
||||
if (nip42_enabled && strcmp(nip42_enabled, "false") == 0) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 8 FAILED - NIP-42 is disabled\n");
|
||||
free((char*)nip42_enabled);
|
||||
cJSON_Delete(event);
|
||||
return NOSTR_ERROR_NIP42_DISABLED;
|
||||
@@ -353,21 +327,18 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
|
||||
|
||||
// TODO: Implement full NIP-42 challenge validation
|
||||
// For now, accept all valid NIP-42 events
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 8 PASSED - NIP-42 challenge response accepted\n");
|
||||
cJSON_Delete(event);
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// PHASE 4: AUTHENTICATION RULES (Database Queries)
|
||||
// PHASE 5: AUTHENTICATION RULES (Database Queries)
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// 9. Check if authentication rules are enabled
|
||||
// 10. Check if authentication rules are enabled
|
||||
if (!auth_required) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 9 - Authentication disabled, skipping database auth rules\n");
|
||||
} else {
|
||||
// 10. Check database authentication rules (only if auth enabled)
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 10 - Checking database authentication rules\n");
|
||||
// 11. Check database authentication rules (only if auth enabled)
|
||||
|
||||
// Create operation string with event kind for more specific rule matching
|
||||
char operation_str[64];
|
||||
@@ -379,69 +350,46 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
|
||||
// If generic event check fails, try specific event kind check
|
||||
rules_result = check_database_auth_rules(event_pubkey, operation_str, NULL);
|
||||
if (rules_result != NOSTR_SUCCESS) {
|
||||
char rules_msg[256];
|
||||
sprintf(rules_msg, "VALIDATOR_DEBUG: STEP 10 FAILED - Database rules denied request (kind=%d)\n", event_kind);
|
||||
validator_debug_log(rules_msg);
|
||||
cJSON_Delete(event);
|
||||
return rules_result;
|
||||
}
|
||||
}
|
||||
|
||||
char rules_success_msg[256];
|
||||
sprintf(rules_success_msg, "VALIDATOR_DEBUG: STEP 10 PASSED - Database rules allow request (kind=%d)\n", event_kind);
|
||||
validator_debug_log(rules_success_msg);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// PHASE 5: ADDITIONAL VALIDATIONS (C-relay specific)
|
||||
// PHASE 6: ADDITIONAL VALIDATIONS (C-relay specific)
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
// 11. NIP-13 Proof of Work validation
|
||||
pthread_mutex_lock(&g_unified_cache.cache_lock);
|
||||
int pow_enabled = g_unified_cache.pow_config.enabled;
|
||||
int pow_min_difficulty = g_unified_cache.pow_config.min_pow_difficulty;
|
||||
int pow_validation_flags = g_unified_cache.pow_config.validation_flags;
|
||||
pthread_mutex_unlock(&g_unified_cache.cache_lock);
|
||||
|
||||
// 12. NIP-13 Proof of Work validation
|
||||
int pow_enabled = get_config_bool("pow_enabled", 0);
|
||||
int pow_min_difficulty = get_config_int("pow_min_difficulty", 0);
|
||||
int pow_validation_flags = get_config_int("pow_validation_flags", 1);
|
||||
|
||||
if (pow_enabled && pow_min_difficulty > 0) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 11 - Validating NIP-13 Proof of Work\n");
|
||||
|
||||
nostr_pow_result_t pow_result;
|
||||
int pow_validation_result = nostr_validate_pow(event, pow_min_difficulty,
|
||||
pow_validation_flags, &pow_result);
|
||||
|
||||
|
||||
if (pow_validation_result != NOSTR_SUCCESS) {
|
||||
char pow_msg[256];
|
||||
sprintf(pow_msg, "VALIDATOR_DEBUG: STEP 11 FAILED - PoW validation failed (error=%d, difficulty=%d/%d)\n",
|
||||
pow_validation_result, pow_result.actual_difficulty, pow_min_difficulty);
|
||||
validator_debug_log(pow_msg);
|
||||
cJSON_Delete(event);
|
||||
return pow_validation_result;
|
||||
}
|
||||
|
||||
char pow_success_msg[256];
|
||||
sprintf(pow_success_msg, "VALIDATOR_DEBUG: STEP 11 PASSED - PoW validated (difficulty=%d, target=%d)\n",
|
||||
pow_result.actual_difficulty, pow_result.committed_target);
|
||||
validator_debug_log(pow_success_msg);
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 11 SKIPPED - PoW validation disabled or min_difficulty=0\n");
|
||||
}
|
||||
|
||||
// 12. NIP-40 Expiration validation
|
||||
// 13. NIP-40 Expiration validation
|
||||
// Always check expiration tags if present (following NIP-40 specification)
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 12 - Starting NIP-40 Expiration validation\n");
|
||||
|
||||
|
||||
cJSON *expiration_tag = NULL;
|
||||
cJSON *tags_array = cJSON_GetObjectItem(event, "tags");
|
||||
|
||||
|
||||
if (tags_array && cJSON_IsArray(tags_array)) {
|
||||
cJSON *tag = NULL;
|
||||
cJSON_ArrayForEach(tag, tags_array) {
|
||||
if (!cJSON_IsArray(tag)) continue;
|
||||
|
||||
|
||||
cJSON *tag_name = cJSON_GetArrayItem(tag, 0);
|
||||
if (!tag_name || !cJSON_IsString(tag_name)) continue;
|
||||
|
||||
|
||||
const char *tag_name_str = cJSON_GetStringValue(tag_name);
|
||||
if (strcmp(tag_name_str, "expiration") == 0) {
|
||||
cJSON *tag_value = cJSON_GetArrayItem(tag, 1);
|
||||
@@ -452,57 +400,40 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (expiration_tag) {
|
||||
const char *expiration_str = cJSON_GetStringValue(expiration_tag);
|
||||
|
||||
|
||||
// Validate that the expiration string contains only digits (and optional leading whitespace)
|
||||
const char* p = expiration_str;
|
||||
|
||||
|
||||
// Skip leading whitespace
|
||||
while (*p == ' ' || *p == '\t') p++;
|
||||
|
||||
|
||||
// Check if we have at least one digit
|
||||
if (*p == '\0') {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 12 SKIPPED - Empty expiration tag value, ignoring\n");
|
||||
} else {
|
||||
// Validate that all remaining characters are digits
|
||||
const char* digit_start = p;
|
||||
while (*p >= '0' && *p <= '9') p++;
|
||||
|
||||
|
||||
// If we didn't consume the entire string or found no digits, it's malformed
|
||||
if (*p != '\0' || p == digit_start) {
|
||||
char malformed_msg[256];
|
||||
sprintf(malformed_msg, "VALIDATOR_DEBUG: STEP 12 SKIPPED - Malformed expiration tag value '%.32s', ignoring\n",
|
||||
expiration_str);
|
||||
validator_debug_log(malformed_msg);
|
||||
} else {
|
||||
// Valid numeric string, parse and check expiration
|
||||
time_t expiration_time = (time_t)atol(expiration_str);
|
||||
time_t now = time(NULL);
|
||||
int grace_period = get_config_int("nip40_expiration_grace_period", 60);
|
||||
|
||||
|
||||
if (expiration_time > 0 && now > expiration_time + grace_period) {
|
||||
char exp_msg[256];
|
||||
sprintf(exp_msg, "VALIDATOR_DEBUG: STEP 12 FAILED - Event expired (now=%ld, exp=%ld, grace=%d)\n",
|
||||
(long)now, (long)expiration_time, grace_period);
|
||||
validator_debug_log(exp_msg);
|
||||
cJSON_Delete(event);
|
||||
return NOSTR_ERROR_EVENT_EXPIRED;
|
||||
}
|
||||
|
||||
char exp_success_msg[256];
|
||||
sprintf(exp_success_msg, "VALIDATOR_DEBUG: STEP 12 PASSED - Event not expired (exp=%ld, now=%ld)\n",
|
||||
(long)expiration_time, (long)now);
|
||||
validator_debug_log(exp_success_msg);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 12 SKIPPED - No expiration tag found\n");
|
||||
}
|
||||
|
||||
// All validations passed
|
||||
validator_debug_log("VALIDATOR_DEBUG: STEP 13 PASSED - All validations complete, event ACCEPTED\n");
|
||||
cJSON_Delete(event);
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
@@ -573,12 +504,10 @@ void nostr_request_result_free_file_data(nostr_request_result_t *result) {
|
||||
|
||||
|
||||
/**
|
||||
* Force cache refresh - use unified cache system
|
||||
* Force cache refresh - cache no longer exists, function kept for compatibility
|
||||
*/
|
||||
void nostr_request_validator_force_cache_refresh(void) {
|
||||
// Use unified cache refresh from config.c
|
||||
force_config_cache_refresh();
|
||||
validator_debug_log("VALIDATOR: Cache forcibly invalidated via unified cache\n");
|
||||
// Cache no longer exists - direct database queries are used
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -586,7 +515,6 @@ void nostr_request_validator_force_cache_refresh(void) {
|
||||
*/
|
||||
static int reload_auth_config(void) {
|
||||
// Configuration is now handled by the unified cache in config.c
|
||||
validator_debug_log("VALIDATOR: Using unified cache system for configuration\n");
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -598,60 +526,45 @@ static int reload_auth_config(void) {
|
||||
* Check database authentication rules for the request
|
||||
* Implements the 6-step rule evaluation engine from AUTH_API.md
|
||||
*/
|
||||
int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
int check_database_auth_rules(const char *pubkey, const char *operation __attribute__((unused)),
|
||||
const char *resource_hash) {
|
||||
sqlite3 *db = NULL;
|
||||
sqlite3_stmt *stmt = NULL;
|
||||
int rc;
|
||||
|
||||
DEBUG_TRACE("Checking auth rules for pubkey: %s", pubkey);
|
||||
|
||||
if (!pubkey) {
|
||||
validator_debug_log(
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Missing pubkey for rule evaluation\n");
|
||||
return NOSTR_ERROR_INVALID_INPUT;
|
||||
}
|
||||
|
||||
char rules_msg[256];
|
||||
sprintf(rules_msg,
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Checking rules for pubkey=%.32s..., "
|
||||
"operation=%s\n",
|
||||
pubkey, operation ? operation : "NULL");
|
||||
validator_debug_log(rules_msg);
|
||||
|
||||
// Open database using global database path
|
||||
if (strlen(g_database_path) == 0) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - No database path available\n");
|
||||
return NOSTR_SUCCESS; // Default allow on DB error
|
||||
}
|
||||
|
||||
|
||||
rc = sqlite3_open_v2(g_database_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
validator_debug_log(
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Failed to open database\n");
|
||||
return NOSTR_SUCCESS; // Default allow on DB error
|
||||
}
|
||||
|
||||
// Step 1: Check pubkey blacklist (highest priority)
|
||||
const char *blacklist_sql =
|
||||
"SELECT rule_type, action FROM auth_rules WHERE rule_type = "
|
||||
"'blacklist' AND pattern_type = 'pubkey' AND pattern_value = ? LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type = "
|
||||
"'blacklist' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1 LIMIT 1";
|
||||
DEBUG_TRACE("Blacklist SQL: %s", blacklist_sql);
|
||||
rc = sqlite3_prepare_v2(db, blacklist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *action = (const char *)sqlite3_column_text(stmt, 1);
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 FAILED - "
|
||||
"Pubkey blacklisted\n");
|
||||
char blacklist_msg[256];
|
||||
sprintf(blacklist_msg,
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Blacklist rule matched: action=%s\n",
|
||||
action ? action : "deny");
|
||||
validator_debug_log(blacklist_msg);
|
||||
int step_result = sqlite3_step(stmt);
|
||||
DEBUG_TRACE("Blacklist query result: %s", step_result == SQLITE_ROW ? "FOUND" : "NOT_FOUND");
|
||||
|
||||
if (step_result == SQLITE_ROW) {
|
||||
DEBUG_TRACE("BLACKLIST HIT: Denying access for pubkey: %s", pubkey);
|
||||
// Set specific violation details for status code mapping
|
||||
strcpy(g_last_rule_violation.violation_type, "pubkey_blacklist");
|
||||
sprintf(g_last_rule_violation.reason, "Public key blacklisted: %s",
|
||||
action ? action : "PUBKEY_BLACKLIST");
|
||||
sprintf(g_last_rule_violation.reason, "Public key blacklisted");
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
@@ -659,33 +572,20 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 PASSED - Pubkey "
|
||||
"not blacklisted\n");
|
||||
|
||||
// Step 2: Check hash blacklist
|
||||
if (resource_hash) {
|
||||
const char *hash_blacklist_sql =
|
||||
"SELECT rule_type, action FROM auth_rules WHERE rule_type = "
|
||||
"'blacklist' AND pattern_type = 'hash' AND pattern_value = ? LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type = "
|
||||
"'blacklist' AND pattern_type = 'hash' AND pattern_value = ? AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, hash_blacklist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, resource_hash, -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *action = (const char *)sqlite3_column_text(stmt, 1);
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 FAILED - "
|
||||
"Hash blacklisted\n");
|
||||
char hash_blacklist_msg[256];
|
||||
sprintf(
|
||||
hash_blacklist_msg,
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Hash blacklist rule matched: action=%s\n",
|
||||
action ? action : "deny");
|
||||
validator_debug_log(hash_blacklist_msg);
|
||||
|
||||
// Set specific violation details for status code mapping
|
||||
strcpy(g_last_rule_violation.violation_type, "hash_blacklist");
|
||||
sprintf(g_last_rule_violation.reason, "File hash blacklisted: %s",
|
||||
action ? action : "HASH_BLACKLIST");
|
||||
sprintf(g_last_rule_violation.reason, "File hash blacklisted");
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
@@ -693,51 +593,33 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 PASSED - Hash "
|
||||
"not blacklisted\n");
|
||||
} else {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 SKIPPED - No "
|
||||
"resource hash provided\n");
|
||||
}
|
||||
|
||||
// Step 3: Check pubkey whitelist
|
||||
const char *whitelist_sql =
|
||||
"SELECT rule_type, action FROM auth_rules WHERE rule_type = "
|
||||
"'whitelist' AND pattern_type = 'pubkey' AND pattern_value = ? LIMIT 1";
|
||||
"SELECT rule_type FROM auth_rules WHERE rule_type = "
|
||||
"'whitelist' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, whitelist_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
|
||||
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char *action = (const char *)sqlite3_column_text(stmt, 1);
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 PASSED - "
|
||||
"Pubkey whitelisted\n");
|
||||
char whitelist_msg[256];
|
||||
sprintf(whitelist_msg,
|
||||
"VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: action=%s\n",
|
||||
action ? action : "allow");
|
||||
validator_debug_log(whitelist_msg);
|
||||
sqlite3_finalize(stmt);
|
||||
sqlite3_close(db);
|
||||
return NOSTR_SUCCESS; // Allow whitelisted pubkey
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 FAILED - Pubkey "
|
||||
"not whitelisted\n");
|
||||
|
||||
// Step 4: Check if any whitelist rules exist - if yes, deny by default
|
||||
const char *whitelist_exists_sql =
|
||||
"SELECT COUNT(*) FROM auth_rules WHERE rule_type = 'whitelist' "
|
||||
"AND pattern_type = 'pubkey' LIMIT 1";
|
||||
"AND pattern_type = 'pubkey' AND active = 1 LIMIT 1";
|
||||
rc = sqlite3_prepare_v2(db, whitelist_exists_sql, -1, &stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
int whitelist_count = sqlite3_column_int(stmt, 0);
|
||||
if (whitelist_count > 0) {
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 FAILED - "
|
||||
"Whitelist exists but pubkey not in it\n");
|
||||
|
||||
// Set specific violation details for status code mapping
|
||||
strcpy(g_last_rule_violation.violation_type, "whitelist_violation");
|
||||
strcpy(g_last_rule_violation.reason,
|
||||
@@ -750,12 +632,8 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
|
||||
}
|
||||
sqlite3_finalize(stmt);
|
||||
}
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 PASSED - No "
|
||||
"whitelist restrictions apply\n");
|
||||
|
||||
sqlite3_close(db);
|
||||
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 PASSED - All "
|
||||
"rule checks completed, default ALLOW\n");
|
||||
return NOSTR_SUCCESS; // Default allow if no restrictive rules matched
|
||||
}
|
||||
|
||||
@@ -821,11 +699,6 @@ static void cleanup_expired_challenges(void) {
|
||||
}
|
||||
|
||||
g_challenge_manager.last_cleanup = now;
|
||||
|
||||
char cleanup_msg[256];
|
||||
sprintf(cleanup_msg, "NIP-42: Cleaned up challenges, %d active remaining\n",
|
||||
active_count);
|
||||
validator_debug_log(cleanup_msg);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -877,12 +750,6 @@ static int store_challenge(const char *challenge_id, const char *client_ip) {
|
||||
entry->expires_at = now + g_challenge_manager.timeout_seconds;
|
||||
entry->active = 1;
|
||||
|
||||
char store_msg[256];
|
||||
sprintf(store_msg,
|
||||
"NIP-42: Stored challenge %.16s... (expires in %d seconds)\n",
|
||||
challenge_id, g_challenge_manager.timeout_seconds);
|
||||
validator_debug_log(store_msg);
|
||||
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
@@ -142,8 +142,6 @@ CREATE TABLE auth_rules (\n\
|
||||
rule_type TEXT NOT NULL CHECK (rule_type IN ('whitelist', 'blacklist', 'rate_limit', 'auth_required')),\n\
|
||||
pattern_type TEXT NOT NULL CHECK (pattern_type IN ('pubkey', 'kind', 'ip', 'global')),\n\
|
||||
pattern_value TEXT,\n\
|
||||
action TEXT NOT NULL CHECK (action IN ('allow', 'deny', 'require_auth', 'rate_limit')),\n\
|
||||
parameters TEXT, -- JSON parameters for rate limiting, etc.\n\
|
||||
active INTEGER NOT NULL DEFAULT 1,\n\
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
@@ -180,34 +178,6 @@ BEGIN\n\
|
||||
UPDATE config SET updated_at = strftime('%s', 'now') WHERE key = NEW.key;\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Insert default configuration values\n\
|
||||
INSERT INTO config (key, value, data_type, description, category, requires_restart) VALUES\n\
|
||||
('relay_description', 'A C Nostr Relay', 'string', 'Relay description', 'general', 0),\n\
|
||||
('relay_contact', '', 'string', 'Relay contact information', 'general', 0),\n\
|
||||
('relay_software', 'https://github.com/laanwj/c-relay', 'string', 'Relay software URL', 'general', 0),\n\
|
||||
('relay_version', '1.0.0', 'string', 'Relay version', 'general', 0),\n\
|
||||
('relay_port', '8888', 'integer', 'Relay port number', 'network', 1),\n\
|
||||
('max_connections', '1000', 'integer', 'Maximum concurrent connections', 'network', 1),\n\
|
||||
('auth_enabled', 'false', 'boolean', 'Enable NIP-42 authentication', 'auth', 0),\n\
|
||||
('nip42_auth_required_events', 'false', 'boolean', 'Require auth for event publishing', 'auth', 0),\n\
|
||||
('nip42_auth_required_subscriptions', 'false', 'boolean', 'Require auth for subscriptions', 'auth', 0),\n\
|
||||
('nip42_auth_required_kinds', '[]', 'json', 'Event kinds requiring authentication', 'auth', 0),\n\
|
||||
('nip42_challenge_expiration', '600', 'integer', 'Auth challenge expiration seconds', 'auth', 0),\n\
|
||||
('pow_min_difficulty', '0', 'integer', 'Minimum proof-of-work difficulty', 'validation', 0),\n\
|
||||
('pow_mode', 'optional', 'string', 'Proof-of-work mode', 'validation', 0),\n\
|
||||
('nip40_expiration_enabled', 'true', 'boolean', 'Enable event expiration', 'validation', 0),\n\
|
||||
('nip40_expiration_strict', 'false', 'boolean', 'Strict expiration mode', 'validation', 0),\n\
|
||||
('nip40_expiration_filter', 'true', 'boolean', 'Filter expired events in queries', 'validation', 0),\n\
|
||||
('nip40_expiration_grace_period', '60', 'integer', 'Expiration grace period seconds', 'validation', 0),\n\
|
||||
('max_subscriptions_per_client', '25', 'integer', 'Maximum subscriptions per client', 'limits', 0),\n\
|
||||
('max_total_subscriptions', '1000', 'integer', 'Maximum total subscriptions', 'limits', 0),\n\
|
||||
('max_filters_per_subscription', '10', 'integer', 'Maximum filters per subscription', 'limits', 0),\n\
|
||||
('max_event_tags', '2000', 'integer', 'Maximum tags per event', 'limits', 0),\n\
|
||||
('max_content_length', '100000', 'integer', 'Maximum event content length', 'limits', 0),\n\
|
||||
('max_message_length', '131072', 'integer', 'Maximum WebSocket message length', 'limits', 0),\n\
|
||||
('default_limit', '100', 'integer', 'Default query limit', 'limits', 0),\n\
|
||||
('max_limit', '5000', 'integer', 'Maximum query limit', 'limits', 0);\n\
|
||||
\n\
|
||||
-- Persistent Subscriptions Logging Tables (Phase 2)\n\
|
||||
-- Optional database logging for subscription analytics and debugging\n\
|
||||
\n\
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -9,6 +9,7 @@
|
||||
#include <stdint.h>
|
||||
#include "../nostr_core_lib/cjson/cJSON.h"
|
||||
#include "config.h" // For CLIENT_IP_MAX_LENGTH
|
||||
#include "websockets.h" // For validation constants
|
||||
|
||||
// Forward declaration for libwebsockets struct
|
||||
struct lws;
|
||||
@@ -18,6 +19,13 @@ struct lws;
|
||||
#define MAX_FILTERS_PER_SUBSCRIPTION 10
|
||||
#define MAX_TOTAL_SUBSCRIPTIONS 5000
|
||||
|
||||
// Validation limits (shared with websockets.h)
|
||||
#define MAX_SEARCH_TERM_LENGTH 256
|
||||
#define MIN_TIMESTAMP 0L
|
||||
#define MAX_TIMESTAMP 4102444800L // 2100-01-01
|
||||
#define MIN_LIMIT 1
|
||||
#define MAX_LIMIT 10000
|
||||
|
||||
// Forward declarations for typedefs
|
||||
typedef struct subscription_filter subscription_filter_t;
|
||||
typedef struct subscription subscription_t;
|
||||
@@ -55,6 +63,16 @@ struct subscription {
|
||||
struct subscription* session_next; // Next subscription for this session
|
||||
};
|
||||
|
||||
// Per-IP connection tracking
|
||||
typedef struct ip_connection_info {
|
||||
char ip_address[CLIENT_IP_MAX_LENGTH]; // IP address
|
||||
int active_connections; // Number of active connections from this IP
|
||||
int total_subscriptions; // Total subscriptions across all connections from this IP
|
||||
time_t first_connection; // When first connection from this IP was established
|
||||
time_t last_activity; // Last activity timestamp from this IP
|
||||
struct ip_connection_info* next; // Next in linked list
|
||||
} ip_connection_info_t;
|
||||
|
||||
// Global subscription manager
|
||||
struct subscription_manager {
|
||||
subscription_t* active_subscriptions; // Head of global subscription list
|
||||
@@ -65,6 +83,10 @@ struct subscription_manager {
|
||||
int max_subscriptions_per_client; // Default: 20
|
||||
int max_total_subscriptions; // Default: 5000
|
||||
|
||||
// Per-IP connection tracking
|
||||
ip_connection_info_t* ip_connections; // Head of per-IP connection list
|
||||
pthread_mutex_t ip_tracking_lock; // Thread safety for IP tracking
|
||||
|
||||
// Statistics
|
||||
uint64_t total_created; // Lifetime subscription count
|
||||
uint64_t total_events_broadcast; // Lifetime event broadcast count
|
||||
@@ -81,6 +103,13 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter);
|
||||
int event_matches_subscription(cJSON* event, subscription_t* subscription);
|
||||
int broadcast_event_to_subscriptions(cJSON* event);
|
||||
|
||||
// Per-IP connection tracking functions
|
||||
ip_connection_info_t* get_or_create_ip_connection(const char* client_ip);
|
||||
void update_ip_connection_activity(const char* client_ip);
|
||||
void remove_ip_connection(const char* client_ip);
|
||||
int get_total_subscriptions_for_ip(const char* client_ip);
|
||||
int get_active_connections_for_ip(const char* client_ip);
|
||||
|
||||
// Database logging functions
|
||||
void log_subscription_created(const subscription_t* sub);
|
||||
void log_subscription_closed(const char* sub_id, const char* client_ip, const char* reason);
|
||||
|
||||
1035
src/websockets.c
1035
src/websockets.c
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,24 @@
|
||||
#define CHALLENGE_MAX_LENGTH 128
|
||||
#define AUTHENTICATED_PUBKEY_MAX_LENGTH 65 // 64 hex + null
|
||||
|
||||
// Enhanced per-session data with subscription management and NIP-42 authentication
|
||||
// Rate limiting constants for malformed requests
|
||||
#define MAX_MALFORMED_REQUESTS_PER_HOUR 10
|
||||
#define MALFORMED_REQUEST_BLOCK_DURATION 3600 // 1 hour in seconds
|
||||
#define RATE_LIMIT_CLEANUP_INTERVAL 300 // 5 minutes
|
||||
|
||||
// Filter validation constants
|
||||
#define MAX_FILTERS_PER_REQUEST 10
|
||||
#define MAX_AUTHORS_PER_FILTER 100
|
||||
#define MAX_IDS_PER_FILTER 100
|
||||
#define MAX_KINDS_PER_FILTER 50
|
||||
#define MAX_TAG_VALUES_PER_FILTER 100
|
||||
#define MAX_KIND_VALUE 65535
|
||||
#define MAX_TIMESTAMP_VALUE 2147483647 // Max 32-bit signed int
|
||||
#define MAX_LIMIT_VALUE 5000
|
||||
#define MAX_SEARCH_LENGTH 256
|
||||
#define MAX_TAG_VALUE_LENGTH 1024
|
||||
|
||||
// Enhanced per-session data with subscription management, NIP-42 authentication, and rate limiting
|
||||
struct per_session_data {
|
||||
int authenticated;
|
||||
struct subscription* subscriptions; // Head of this session's subscription list
|
||||
@@ -30,6 +47,17 @@ struct per_session_data {
|
||||
int nip42_auth_required_events; // Whether NIP-42 auth is required for EVENT submission
|
||||
int nip42_auth_required_subscriptions; // Whether NIP-42 auth is required for REQ operations
|
||||
int auth_challenge_sent; // Whether challenge has been sent (0/1)
|
||||
|
||||
// Rate limiting for subscription attempts
|
||||
int failed_subscription_attempts; // Count of failed subscription attempts
|
||||
time_t last_failed_attempt; // Timestamp of last failed attempt
|
||||
time_t rate_limit_until; // Time until rate limiting expires
|
||||
int consecutive_failures; // Consecutive failed attempts for backoff
|
||||
|
||||
// Rate limiting for malformed requests
|
||||
int malformed_request_count; // Count of malformed requests in current hour
|
||||
time_t malformed_request_window_start; // Start of current hour window
|
||||
time_t malformed_request_blocked_until; // Time until blocked for malformed requests
|
||||
};
|
||||
|
||||
// NIP-11 HTTP session data structure for managing buffer lifetime
|
||||
|
||||
@@ -9,7 +9,8 @@ Type=simple
|
||||
User=c-relay
|
||||
Group=c-relay
|
||||
WorkingDirectory=/opt/c-relay
|
||||
ExecStart=/opt/c-relay/c_relay_x86
|
||||
Environment=DEBUG_LEVEL=0
|
||||
ExecStart=/opt/c-relay/c_relay_x86 --debug-level=$DEBUG_LEVEL
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
|
||||
@@ -28,7 +28,7 @@ echo "✓ nak command found"
|
||||
|
||||
# Check if relay is running by testing connection
|
||||
echo "Testing relay connection..."
|
||||
if ! timeout 5 bash -c "</dev/tcp/localhost/8888" 2>/dev/null; then
|
||||
if ! timeout 5 nc -z localhost 8888 2>/dev/null; then
|
||||
echo "ERROR: Relay does not appear to be running on localhost:8888"
|
||||
echo "Please start the relay first with: ./make_and_restart_relay.sh"
|
||||
exit 1
|
||||
|
||||
472
tests/README.md
Normal file
472
tests/README.md
Normal file
@@ -0,0 +1,472 @@
|
||||
# C-Relay Comprehensive Testing Framework
|
||||
|
||||
This directory contains a comprehensive testing framework for the C-Relay Nostr relay implementation. The framework provides automated testing for security vulnerabilities, performance validation, and stability assurance.
|
||||
|
||||
## Overview
|
||||
|
||||
The testing framework is designed to validate all critical security fixes and ensure stable operation of the Nostr relay. It includes multiple test suites covering different aspects of relay functionality and security.
|
||||
|
||||
## Test Suites
|
||||
|
||||
### 1. Master Test Runner (`run_all_tests.sh`)
|
||||
The master test runner orchestrates all test suites and provides comprehensive reporting.
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/run_all_tests.sh
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Automated execution of all test suites
|
||||
- Comprehensive HTML and log reporting
|
||||
- Success/failure tracking across all tests
|
||||
- Relay status validation before testing
|
||||
|
||||
### 2. SQL Injection Tests (`sql_injection_tests.sh`)
|
||||
Comprehensive testing of SQL injection vulnerabilities across all filter types.
|
||||
|
||||
**Tests:**
|
||||
- Classic SQL injection payloads (`'; DROP TABLE; --`)
|
||||
- Union-based injection attacks
|
||||
- Error-based injection attempts
|
||||
- Time-based blind injection
|
||||
- Stacked query attacks
|
||||
- Filter-specific injection (authors, IDs, kinds, search, tags)
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/sql_injection_tests.sh
|
||||
```
|
||||
|
||||
### 3. Memory Corruption Tests (`memory_corruption_tests.sh`)
|
||||
Tests for buffer overflows, use-after-free, and memory safety issues.
|
||||
|
||||
**Tests:**
|
||||
- Malformed subscription IDs (empty, very long, null bytes)
|
||||
- Oversized filter arrays
|
||||
- Concurrent access patterns
|
||||
- Malformed JSON structures
|
||||
- Large message payloads
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/memory_corruption_tests.sh
|
||||
```
|
||||
|
||||
### 4. Input Validation Tests (`input_validation_tests.sh`)
|
||||
Comprehensive boundary condition testing for all input parameters.
|
||||
|
||||
**Tests:**
|
||||
- Message type validation
|
||||
- Message structure validation
|
||||
- Subscription ID boundary tests
|
||||
- Filter object validation
|
||||
- Authors, IDs, kinds, timestamps, limits validation
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/input_validation_tests.sh
|
||||
```
|
||||
|
||||
### 5. Load Testing (`load_tests.sh`)
|
||||
Performance testing under high concurrent connection scenarios.
|
||||
|
||||
**Test Scenarios:**
|
||||
- Light load (10 concurrent clients)
|
||||
- Medium load (25 concurrent clients)
|
||||
- Heavy load (50 concurrent clients)
|
||||
- Stress test (100 concurrent clients)
|
||||
|
||||
**Features:**
|
||||
- Resource monitoring (CPU, memory, connections)
|
||||
- Connection success rate tracking
|
||||
- Message throughput measurement
|
||||
- Relay responsiveness validation
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/load_tests.sh
|
||||
```
|
||||
|
||||
### 6. Authentication Tests (`auth_tests.sh`)
|
||||
Tests NIP-42 authentication mechanisms and access control.
|
||||
|
||||
**Tests:**
|
||||
- Authentication challenge responses
|
||||
- Whitelist/blacklist functionality
|
||||
- Event publishing with auth requirements
|
||||
- Admin API authentication events
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/auth_tests.sh
|
||||
```
|
||||
|
||||
### 7. Rate Limiting Tests (`rate_limiting_tests.sh`)
|
||||
Tests rate limiting and abuse prevention mechanisms.
|
||||
|
||||
**Tests:**
|
||||
- Message rate limiting
|
||||
- Connection rate limiting
|
||||
- Subscription creation limits
|
||||
- Abuse pattern detection
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/rate_limiting_tests.sh
|
||||
```
|
||||
|
||||
### 8. Performance Benchmarks (`performance_benchmarks.sh`)
|
||||
Performance metrics and benchmarking tools.
|
||||
|
||||
**Tests:**
|
||||
- Message throughput measurement
|
||||
- Response time analysis
|
||||
- Memory usage profiling
|
||||
- CPU utilization tracking
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/performance_benchmarks.sh
|
||||
```
|
||||
|
||||
### 9. Resource Monitoring (`resource_monitoring.sh`)
|
||||
System resource usage monitoring during testing.
|
||||
|
||||
**Features:**
|
||||
- Real-time CPU and memory monitoring
|
||||
- Connection count tracking
|
||||
- Database size monitoring
|
||||
- System load analysis
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/resource_monitoring.sh
|
||||
```
|
||||
|
||||
### 10. Configuration Tests (`config_tests.sh`)
|
||||
Tests configuration management and persistence.
|
||||
|
||||
**Tests:**
|
||||
- Configuration event processing
|
||||
- Setting validation and persistence
|
||||
- Admin API configuration commands
|
||||
- Configuration reload behavior
|
||||
|
||||
**Usage:**
|
||||
```bash
|
||||
./tests/config_tests.sh
|
||||
```
|
||||
|
||||
### 11. Existing Test Suites
|
||||
|
||||
#### Filter Validation Tests (`filter_validation_test.sh`)
|
||||
Tests comprehensive input validation for REQ and COUNT messages.
|
||||
|
||||
#### Subscription Limits Tests (`subscription_limits.sh`)
|
||||
Tests subscription limit enforcement and rate limiting.
|
||||
|
||||
#### Subscription Validation Tests (`subscription_validation.sh`)
|
||||
Tests subscription ID handling and memory corruption fixes.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### System Requirements
|
||||
- Linux/macOS environment
|
||||
- `websocat` for WebSocket communication
|
||||
- `bash` shell
|
||||
- Standard Unix tools (`grep`, `awk`, `timeout`, etc.)
|
||||
|
||||
### Installing Dependencies
|
||||
|
||||
#### Ubuntu/Debian:
|
||||
```bash
|
||||
sudo apt-get update
|
||||
sudo apt-get install websocat curl jq
|
||||
```
|
||||
|
||||
#### macOS:
|
||||
```bash
|
||||
brew install websocat curl jq
|
||||
```
|
||||
|
||||
#### Other systems:
|
||||
Download `websocat` from: https://github.com/vi/websocat/releases
|
||||
|
||||
### Relay Setup
|
||||
Before running tests, ensure the C-Relay is running:
|
||||
|
||||
```bash
|
||||
# Build and start the relay
|
||||
./make_and_restart_relay.sh
|
||||
|
||||
# Verify it's running
|
||||
ps aux | grep c_relay
|
||||
curl -H "Accept: application/nostr+json" http://localhost:8888
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Quick Start
|
||||
1. Start the relay:
|
||||
```bash
|
||||
./make_and_restart_relay.sh
|
||||
```
|
||||
|
||||
2. Run all tests:
|
||||
```bash
|
||||
./tests/run_all_tests.sh
|
||||
```
|
||||
|
||||
### Individual Test Suites
|
||||
Run specific test suites for targeted testing:
|
||||
|
||||
```bash
|
||||
# Security tests
|
||||
./tests/sql_injection_tests.sh
|
||||
./tests/memory_corruption_tests.sh
|
||||
./tests/input_validation_tests.sh
|
||||
|
||||
# Performance tests
|
||||
./tests/load_tests.sh
|
||||
|
||||
# Existing tests
|
||||
./tests/filter_validation_test.sh
|
||||
./tests/subscription_limits.sh
|
||||
./tests/subscription_validation.sh
|
||||
```
|
||||
|
||||
### NIP Protocol Tests
|
||||
Run the existing NIP compliance tests:
|
||||
|
||||
```bash
|
||||
# Run all NIP tests
|
||||
./tests/run_nip_tests.sh
|
||||
|
||||
# Or run individual NIP tests
|
||||
./tests/1_nip_test.sh
|
||||
./tests/11_nip_information.sh
|
||||
./tests/42_nip_test.sh
|
||||
# ... etc
|
||||
```
|
||||
|
||||
## Test Results and Reporting
|
||||
|
||||
### Master Test Runner Output
|
||||
The master test runner (`run_all_tests.sh`) generates:
|
||||
|
||||
1. **Console Output**: Real-time test progress and results
|
||||
2. **Log File**: Detailed execution log (`test_results_YYYYMMDD_HHMMSS.log`)
|
||||
3. **HTML Report**: Comprehensive web report (`test_report_YYYYMMDD_HHMMSS.html`)
|
||||
|
||||
### Individual Test Suite Output
|
||||
Each test suite provides:
|
||||
- Test-by-test results with PASS/FAIL status
|
||||
- Summary statistics (passed/failed/total tests)
|
||||
- Detailed error information for failures
|
||||
|
||||
### Interpreting Results
|
||||
|
||||
#### Security Tests
|
||||
- **PASS**: No vulnerabilities detected
|
||||
- **FAIL**: Potential security issues found
|
||||
- **UNCERTAIN**: Test inconclusive (may need manual verification)
|
||||
|
||||
#### Performance Tests
|
||||
- **Connection Success Rate**: >95% = Excellent, >80% = Good, <80% = Poor
|
||||
- **Resource Usage**: Monitor CPU/memory during load tests
|
||||
- **Relay Responsiveness**: Must remain responsive after all tests
|
||||
|
||||
## Test Configuration
|
||||
|
||||
### Environment Variables
|
||||
Customize test behavior with environment variables:
|
||||
|
||||
```bash
|
||||
# Relay connection settings
|
||||
export RELAY_HOST="127.0.0.1"
|
||||
export RELAY_PORT="8888"
|
||||
|
||||
# Test parameters
|
||||
export TEST_TIMEOUT=10
|
||||
export CONCURRENT_CONNECTIONS=50
|
||||
export MESSAGES_PER_SECOND=100
|
||||
```
|
||||
|
||||
### Test Customization
|
||||
Modify test parameters within individual test scripts:
|
||||
|
||||
- `RELAY_HOST` / `RELAY_PORT`: Relay connection details
|
||||
- `TEST_TIMEOUT`: Individual test timeout (seconds)
|
||||
- `TOTAL_TESTS`: Number of test iterations
|
||||
- Load test parameters in `load_tests.sh`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### "Could not connect to relay"
|
||||
- Ensure relay is running: `./make_and_restart_relay.sh`
|
||||
- Check port availability: `netstat -tln | grep 8888`
|
||||
- Verify relay process: `ps aux | grep c_relay`
|
||||
|
||||
#### "websocat: command not found"
|
||||
- Install websocat: `sudo apt-get install websocat`
|
||||
- Or download from: https://github.com/vi/websocat/releases
|
||||
|
||||
#### Tests timing out
|
||||
- Increase `TEST_TIMEOUT` value
|
||||
- Check system resources (CPU/memory)
|
||||
- Reduce concurrent connections in load tests
|
||||
|
||||
#### High failure rates in load tests
|
||||
- Reduce `CONCURRENT_CONNECTIONS`
|
||||
- Check system ulimits: `ulimit -n`
|
||||
- Monitor system resources during testing
|
||||
|
||||
### Debug Mode
|
||||
Enable verbose output for debugging:
|
||||
|
||||
```bash
|
||||
# Set debug environment variable
|
||||
export DEBUG=1
|
||||
|
||||
# Run tests with verbose output
|
||||
./tests/run_all_tests.sh
|
||||
```
|
||||
|
||||
## Security Testing Methodology
|
||||
|
||||
### SQL Injection Testing
|
||||
- Tests all filter types (authors, IDs, kinds, search, tags)
|
||||
- Uses comprehensive payload library
|
||||
- Validates parameterized query protection
|
||||
- Tests edge cases and boundary conditions
|
||||
|
||||
### Memory Safety Testing
|
||||
- Buffer overflow detection
|
||||
- Use-after-free prevention
|
||||
- Concurrent access validation
|
||||
- Malformed input handling
|
||||
|
||||
### Input Validation Testing
|
||||
- Boundary condition testing
|
||||
- Type validation
|
||||
- Length limit enforcement
|
||||
- Malformed data rejection
|
||||
|
||||
## Performance Benchmarking
|
||||
|
||||
### Load Testing Scenarios
|
||||
1. **Light Load**: Basic functionality validation
|
||||
2. **Medium Load**: Moderate stress testing
|
||||
3. **Heavy Load**: High concurrency validation
|
||||
4. **Stress Test**: Breaking point identification
|
||||
|
||||
### Metrics Collected
|
||||
- Connection success rate
|
||||
- Message throughput
|
||||
- Response times
|
||||
- Resource utilization (CPU, memory)
|
||||
- Relay stability under load
|
||||
|
||||
## Integration with CI/CD
|
||||
|
||||
### Automated Testing
|
||||
Integrate with CI/CD pipelines:
|
||||
|
||||
```yaml
|
||||
# Example GitHub Actions workflow
|
||||
- name: Run C-Relay Tests
|
||||
run: |
|
||||
./make_and_restart_relay.sh
|
||||
./tests/run_all_tests.sh
|
||||
```
|
||||
|
||||
### Test Result Processing
|
||||
Parse test results for automated reporting:
|
||||
|
||||
```bash
|
||||
# Extract test summary
|
||||
grep "Total tests:" test_results_*.log
|
||||
grep "Passed:" test_results_*.log
|
||||
grep "Failed:" test_results_*.log
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
### Adding New Tests
|
||||
1. Create new test script in `tests/` directory
|
||||
2. Follow existing naming conventions
|
||||
3. Add to master test runner in `run_all_tests.sh`
|
||||
4. Update this documentation
|
||||
|
||||
### Test Script Template
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Test suite description
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="${RELAY_HOST:-127.0.0.1}"
|
||||
RELAY_PORT="${RELAY_PORT:-8888}"
|
||||
|
||||
# Test implementation here
|
||||
|
||||
echo "Test suite completed successfully"
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Test Environment
|
||||
- Run tests in isolated environment
|
||||
- Use test relay instance (not production)
|
||||
- Monitor system resources during testing
|
||||
- Clean up test data after completion
|
||||
|
||||
### Sensitive Data
|
||||
- Tests use synthetic data only
|
||||
- No real user data in test payloads
|
||||
- Safe for production system testing
|
||||
|
||||
## Support and Issues
|
||||
|
||||
### Reporting Test Failures
|
||||
When reporting test failures, include:
|
||||
1. Test suite and specific test that failed
|
||||
2. Full error output
|
||||
3. System information (OS, relay version)
|
||||
4. Relay configuration
|
||||
5. Test environment details
|
||||
|
||||
### Getting Help
|
||||
- Check existing issues in the project repository
|
||||
- Review test logs for detailed error information
|
||||
- Validate relay setup and configuration
|
||||
- Test with minimal configuration to isolate issues
|
||||
|
||||
---
|
||||
|
||||
## Test Coverage Summary
|
||||
|
||||
| Test Suite | Security | Performance | Stability | Coverage |
|
||||
|------------|----------|-------------|-----------|----------|
|
||||
| SQL Injection | ✓ | | | All filter types |
|
||||
| Memory Corruption | ✓ | | ✓ | Buffer overflows, race conditions |
|
||||
| Input Validation | ✓ | | | Boundary conditions, type validation |
|
||||
| Load Testing | | ✓ | ✓ | Concurrent connections, resource usage |
|
||||
| Authentication | ✓ | | | NIP-42 auth, whitelist/blacklist |
|
||||
| Rate Limiting | ✓ | ✓ | ✓ | Message rates, abuse prevention |
|
||||
| Performance Benchmarks | | ✓ | | Throughput, response times |
|
||||
| Resource Monitoring | | ✓ | ✓ | CPU/memory usage tracking |
|
||||
| Configuration | ✓ | | ✓ | Admin API, settings persistence |
|
||||
| Filter Validation | ✓ | | | REQ/COUNT message validation |
|
||||
| Subscription Limits | | ✓ | ✓ | Rate limiting, connection limits |
|
||||
| Subscription Validation | ✓ | | ✓ | ID validation, memory safety |
|
||||
|
||||
**Legend:**
|
||||
- ✓ Covered
|
||||
- Performance: Load and throughput testing
|
||||
- Security: Vulnerability and attack vector testing
|
||||
- Stability: Crash prevention and error handling
|
||||
118
tests/auth_tests.sh
Executable file
118
tests/auth_tests.sh
Executable file
File diff suppressed because one or more lines are too long
189
tests/config_tests.sh
Executable file
189
tests/config_tests.sh
Executable file
@@ -0,0 +1,189 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Configuration Testing Suite for C-Relay
|
||||
# Tests configuration management and persistence
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TOTAL_TESTS=0
|
||||
PASSED_TESTS=0
|
||||
FAILED_TESTS=0
|
||||
|
||||
# Function to test configuration query
|
||||
test_config_query() {
|
||||
local description="$1"
|
||||
local config_command="$2"
|
||||
local expected_pattern="$3"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Create admin event for config query
|
||||
local admin_event
|
||||
admin_event=$(cat << EOF
|
||||
{
|
||||
"kind": 23456,
|
||||
"content": "$(echo '["'"$config_command"'"]' | base64)",
|
||||
"tags": [["p", "relay_pubkey_placeholder"]],
|
||||
"created_at": $(date +%s),
|
||||
"pubkey": "admin_pubkey_placeholder",
|
||||
"sig": "signature_placeholder"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Send config query event
|
||||
local response
|
||||
response=$(echo "$admin_event" | timeout 10 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -3 || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == *"TIMEOUT"* ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Connection timeout"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$response" == *"$expected_pattern"* ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Config query successful"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Expected '$expected_pattern', got: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test configuration setting
|
||||
test_config_setting() {
|
||||
local description="$1"
|
||||
local config_command="$2"
|
||||
local config_value="$3"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Create admin event for config setting
|
||||
local admin_event
|
||||
admin_event=$(cat << EOF
|
||||
{
|
||||
"kind": 23456,
|
||||
"content": "$(echo '["'"$config_command"'","'"$config_value"'"]' | base64)",
|
||||
"tags": [["p", "relay_pubkey_placeholder"]],
|
||||
"created_at": $(date +%s),
|
||||
"pubkey": "admin_pubkey_placeholder",
|
||||
"sig": "signature_placeholder"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
# Send config setting event
|
||||
local response
|
||||
response=$(echo "$admin_event" | timeout 10 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -3 || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == *"TIMEOUT"* ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Connection timeout"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$response" == *"OK"* ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Config setting accepted"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Config setting rejected: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test NIP-11 relay information
|
||||
test_nip11_info() {
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing NIP-11 relay information... "
|
||||
|
||||
local response
|
||||
response=$(curl -s -H "Accept: application/nostr+json" "http://$RELAY_HOST:$RELAY_PORT" 2>/dev/null || echo 'CURL_FAILED')
|
||||
|
||||
if [[ "$response" == "CURL_FAILED" ]]; then
|
||||
echo -e "${RED}FAILED${NC} - HTTP request failed"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$response" == *"supported_nips"* ]] && [[ "$response" == *"software"* ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - NIP-11 information available"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - NIP-11 information incomplete"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=========================================="
|
||||
echo "C-Relay Configuration Testing Suite"
|
||||
echo "=========================================="
|
||||
echo "Testing configuration management at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo ""
|
||||
|
||||
# Test basic connectivity
|
||||
echo "=== Basic Connectivity Test ==="
|
||||
test_config_query "Basic connectivity" "system_status" "OK"
|
||||
echo ""
|
||||
|
||||
echo "=== NIP-11 Relay Information Tests ==="
|
||||
test_nip11_info
|
||||
echo ""
|
||||
|
||||
echo "=== Configuration Query Tests ==="
|
||||
test_config_query "System status query" "system_status" "status"
|
||||
test_config_query "Configuration query" "auth_query" "all"
|
||||
echo ""
|
||||
|
||||
echo "=== Configuration Setting Tests ==="
|
||||
test_config_setting "Relay description setting" "relay_description" "Test Relay"
|
||||
test_config_setting "Max subscriptions setting" "max_subscriptions_per_client" "50"
|
||||
test_config_setting "PoW difficulty setting" "pow_min_difficulty" "16"
|
||||
echo ""
|
||||
|
||||
echo "=== Configuration Persistence Test ==="
|
||||
echo -n "Testing configuration persistence... "
|
||||
# Set a configuration value
|
||||
test_config_setting "Set test config" "relay_description" "Persistence Test"
|
||||
|
||||
# Query it back
|
||||
sleep 2
|
||||
test_config_query "Verify persistence" "system_status" "Persistence Test"
|
||||
echo ""
|
||||
|
||||
echo "=== Test Results ==="
|
||||
echo "Total tests: $TOTAL_TESTS"
|
||||
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
|
||||
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
|
||||
|
||||
if [[ $FAILED_TESTS -eq 0 ]]; then
|
||||
echo -e "${GREEN}✓ All configuration tests passed!${NC}"
|
||||
echo "Configuration management is working correctly."
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}✗ Some configuration tests failed!${NC}"
|
||||
echo "Configuration management may have issues."
|
||||
exit 1
|
||||
fi
|
||||
48
tests/debug_perf.sh
Executable file
48
tests/debug_perf.sh
Executable file
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Debug script for performance_benchmarks.sh
|
||||
|
||||
source ./performance_benchmarks.sh
|
||||
|
||||
echo "Testing benchmark_request function..."
|
||||
result=$(benchmark_request '["REQ","test",{}]')
|
||||
echo "Result: $result"
|
||||
|
||||
echo "Testing full client subprocess..."
|
||||
(
|
||||
client_start=$(date +%s)
|
||||
client_requests=0
|
||||
client_total_response_time=0
|
||||
client_successful_requests=0
|
||||
client_min_time=999999
|
||||
client_max_time=0
|
||||
|
||||
while [[ $(($(date +%s) - client_start)) -lt 3 ]]; do
|
||||
result=$(benchmark_request '["REQ","test",{}]')
|
||||
IFS=':' read -r response_time success <<< "$result"
|
||||
|
||||
client_total_response_time=$((client_total_response_time + response_time))
|
||||
client_requests=$((client_requests + 1))
|
||||
|
||||
if [[ "$success" == "1" ]]; then
|
||||
client_successful_requests=$((client_successful_requests + 1))
|
||||
fi
|
||||
|
||||
if [[ $response_time -lt client_min_time ]]; then
|
||||
client_min_time=$response_time
|
||||
fi
|
||||
|
||||
if [[ $response_time -gt client_max_time ]]; then
|
||||
client_max_time=$response_time
|
||||
fi
|
||||
|
||||
echo "Request $client_requests: ${response_time}ms, success=$success"
|
||||
sleep 0.1
|
||||
done
|
||||
|
||||
echo "$client_requests:$client_successful_requests:$client_total_response_time:$client_min_time:$client_max_time"
|
||||
) &
|
||||
pid=$!
|
||||
echo "Waiting for client..."
|
||||
wait "$pid"
|
||||
echo "Client finished."
|
||||
242
tests/filter_validation_test.sh
Executable file
242
tests/filter_validation_test.sh
Executable file
@@ -0,0 +1,242 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Filter Validation Test Script for C-Relay
|
||||
# Tests comprehensive input validation for REQ and COUNT messages
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
TEST_TIMEOUT=5
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TOTAL_TESTS=0
|
||||
PASSED_TESTS=0
|
||||
FAILED_TESTS=0
|
||||
|
||||
# Function to send WebSocket message and check response
|
||||
test_websocket_message() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
local expected_error="$3"
|
||||
local test_type="${4:-REQ}"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Send message via websocat and capture response
|
||||
local response
|
||||
response=$(echo "$message" | timeout $TEST_TIMEOUT websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null || echo 'CONNECTION_FAILED')
|
||||
|
||||
if [[ "$response" == "CONNECTION_FAILED" ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Could not connect to relay"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$response" == "TIMEOUT" ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Connection timeout"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if response contains expected error
|
||||
if [[ "$response" == *"$expected_error"* ]]; then
|
||||
echo -e "${GREEN}PASSED${NC}"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Expected error '$expected_error', got: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test valid message (should not produce error)
|
||||
test_valid_message() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Send message via websocat and capture response
|
||||
local response
|
||||
response=$(echo "$message" | timeout $TEST_TIMEOUT websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == "TIMEOUT" ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Connection timeout"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Valid messages should not contain error notices
|
||||
if [[ "$response" != *"error:"* ]]; then
|
||||
echo -e "${GREEN}PASSED${NC}"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Unexpected error in response: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=== C-Relay Filter Validation Tests ==="
|
||||
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo
|
||||
|
||||
# Test 1: Valid REQ message
|
||||
test_valid_message "Valid REQ message" '["REQ","test-sub",{}]'
|
||||
|
||||
# Test 2: Valid COUNT message
|
||||
test_valid_message "Valid COUNT message" '["COUNT","test-count",{}]'
|
||||
|
||||
echo
|
||||
echo "=== Testing Filter Array Validation ==="
|
||||
|
||||
# Test 3: Non-object filter
|
||||
test_websocket_message "Non-object filter" '["REQ","sub1","not-an-object"]' "error: filter 0 is not an object"
|
||||
|
||||
# Test 4: Too many filters
|
||||
test_websocket_message "Too many filters" '["REQ","sub1",{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{}]' "error: too many filters"
|
||||
|
||||
echo
|
||||
echo "=== Testing Authors Validation ==="
|
||||
|
||||
# Test 5: Invalid author (not string)
|
||||
test_websocket_message "Invalid author type" '["REQ","sub1",{"authors":[123]}]' "error: author"
|
||||
|
||||
# Test 6: Invalid author hex
|
||||
test_websocket_message "Invalid author hex" '["REQ","sub1",{"authors":["invalid-hex"]}]' "error: invalid author hex string"
|
||||
|
||||
# Test 7: Too many authors
|
||||
test_websocket_message "Too many authors" '["REQ","sub1",{"authors":["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]}]' "error: too many authors"
|
||||
|
||||
echo
|
||||
echo "=== Testing IDs Validation ==="
|
||||
|
||||
# Test 8: Invalid ID type
|
||||
test_websocket_message "Invalid ID type" '["REQ","sub1",{"ids":[123]}]' "error: id"
|
||||
|
||||
# Test 9: Invalid ID hex
|
||||
test_websocket_message "Invalid ID hex" '["REQ","sub1",{"ids":["invalid-hex"]}]' "error: invalid id hex string"
|
||||
|
||||
# Test 10: Too many IDs
|
||||
test_websocket_message "Too many IDs" '["REQ","sub1",{"ids":["a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a","a"]}]' "error: too many ids"
|
||||
|
||||
echo
|
||||
echo "=== Testing Kinds Validation ==="
|
||||
|
||||
# Test 11: Invalid kind type
|
||||
test_websocket_message "Invalid kind type" '["REQ","sub1",{"kinds":["1"]}]' "error: kind"
|
||||
|
||||
# Test 12: Negative kind
|
||||
test_websocket_message "Negative kind" '["REQ","sub1",{"kinds":[-1]}]' "error: invalid kind value"
|
||||
|
||||
# Test 13: Too large kind
|
||||
test_websocket_message "Too large kind" '["REQ","sub1",{"kinds":[70000]}]' "error: invalid kind value"
|
||||
|
||||
# Test 14: Too many kinds
|
||||
test_websocket_message "Too many kinds" '["REQ","sub1",{"kinds":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52]}]' "error: too many kinds"
|
||||
|
||||
echo
|
||||
echo "=== Testing Timestamp Validation ==="
|
||||
|
||||
# Test 15: Invalid since type
|
||||
test_websocket_message "Invalid since type" '["REQ","sub1",{"since":"123"}]' "error: since must be a number"
|
||||
|
||||
# Test 16: Negative since
|
||||
test_websocket_message "Negative since" '["REQ","sub1",{"since":-1}]' "error: invalid since timestamp"
|
||||
|
||||
# Test 17: Invalid until type
|
||||
test_websocket_message "Invalid until type" '["REQ","sub1",{"until":"123"}]' "error: until must be a number"
|
||||
|
||||
# Test 18: Negative until
|
||||
test_websocket_message "Negative until" '["REQ","sub1",{"until":-1}]' "error: invalid until timestamp"
|
||||
|
||||
echo
|
||||
echo "=== Testing Limit Validation ==="
|
||||
|
||||
# Test 19: Invalid limit type
|
||||
test_websocket_message "Invalid limit type" '["REQ","sub1",{"limit":"10"}]' "error: limit must be a number"
|
||||
|
||||
# Test 20: Negative limit
|
||||
test_websocket_message "Negative limit" '["REQ","sub1",{"limit":-1}]' "error: invalid limit value"
|
||||
|
||||
# Test 21: Too large limit
|
||||
test_websocket_message "Too large limit" '["REQ","sub1",{"limit":10000}]' "error: invalid limit value"
|
||||
|
||||
echo
|
||||
echo "=== Testing Search Validation ==="
|
||||
|
||||
# Test 22: Invalid search type
|
||||
test_websocket_message "Invalid search type" '["REQ","sub1",{"search":123}]' "error: search must be a string"
|
||||
|
||||
# Test 23: Search too long
|
||||
test_websocket_message "Search too long" '["REQ","sub1",{"search":"'$(printf 'a%.0s' {1..257})'"}]' "error: search term too long"
|
||||
|
||||
# Test 24: Search with SQL injection
|
||||
test_websocket_message "Search SQL injection" '["REQ","sub1",{"search":"test; DROP TABLE users;"}]' "error: invalid characters in search term"
|
||||
|
||||
echo
|
||||
echo "=== Testing Tag Filter Validation ==="
|
||||
|
||||
# Test 25: Invalid tag filter type
|
||||
test_websocket_message "Invalid tag filter type" '["REQ","sub1",{"#e":"not-an-array"}]' "error: #e must be an array"
|
||||
|
||||
# Test 26: Too many tag values
|
||||
test_websocket_message "Too many tag values" '["REQ","sub1",{"#e":['$(printf '"a%.0s",' {1..101})'"a"]}]' "error: too many #e values"
|
||||
|
||||
# Test 27: Tag value too long
|
||||
test_websocket_message "Tag value too long" '["REQ","sub1",{"#e":["'$(printf 'a%.0s' {1..1025})'"]}]' "error: #e value too long"
|
||||
|
||||
echo
|
||||
echo "=== Testing Rate Limiting ==="
|
||||
|
||||
# Test 28: Send multiple malformed requests to trigger rate limiting
|
||||
echo -n "Testing rate limiting with malformed requests... "
|
||||
rate_limit_triggered=false
|
||||
for i in {1..15}; do
|
||||
response=$(timeout 2 bash -c "
|
||||
echo '["REQ","sub-malformed'$i'",[{"authors":["invalid"]}]]' | websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1
|
||||
" 2>/dev/null || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == *"too many malformed requests"* ]]; then
|
||||
rate_limit_triggered=true
|
||||
break
|
||||
fi
|
||||
sleep 0.1
|
||||
done
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
if [[ "$rate_limit_triggered" == true ]]; then
|
||||
echo -e "${GREEN}PASSED${NC}"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
else
|
||||
echo -e "${YELLOW}UNCERTAIN${NC} - Rate limiting may not have triggered (this could be normal)"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1)) # Count as passed since it's not a failure
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "=== Test Results ==="
|
||||
echo "Total tests: $TOTAL_TESTS"
|
||||
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
|
||||
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
|
||||
|
||||
if [[ $FAILED_TESTS -eq 0 ]]; then
|
||||
echo -e "${GREEN}All tests passed!${NC}"
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}Some tests failed.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
181
tests/input_validation_tests.sh
Executable file
181
tests/input_validation_tests.sh
Executable file
@@ -0,0 +1,181 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Input Validation Test Suite for C-Relay
|
||||
# Comprehensive testing of input boundary conditions and malformed data
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
TEST_TIMEOUT=10
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TOTAL_TESTS=0
|
||||
PASSED_TESTS=0
|
||||
FAILED_TESTS=0
|
||||
|
||||
# Function to test input validation
|
||||
test_input_validation() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
local expect_success="${3:-false}"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Send message via websocat and capture response
|
||||
local response
|
||||
response=$(echo "$message" | timeout $TEST_TIMEOUT websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -3 || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == "TIMEOUT" ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Connection timeout"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if response indicates success or proper error handling
|
||||
if [[ "$expect_success" == "true" ]]; then
|
||||
# Valid input should get EOSE, EVENT, NOTICE (non-error), or COUNT response
|
||||
if [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"COUNT"* ]] || [[ "$response" == *"NOTICE"* && ! "$response" == *"error:"* ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Input accepted correctly"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Valid input rejected: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
# Invalid input should get error NOTICE, NOTICE, connection failure, or empty response (connection closed)
|
||||
if [[ "$response" == *"error:"* ]] || [[ "$response" == *"NOTICE"* ]] || [[ "$response" == *"CONNECTION_FAILED"* ]] || [[ -z "$response" ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Invalid input properly rejected"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Invalid input not rejected: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=========================================="
|
||||
echo "C-Relay Input Validation Test Suite"
|
||||
echo "=========================================="
|
||||
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo
|
||||
|
||||
# Test basic connectivity first
|
||||
echo "=== Basic Connectivity Test ==="
|
||||
test_input_validation "Basic connectivity" '["REQ","basic_test",{}]' true
|
||||
echo
|
||||
|
||||
echo "=== Message Type Validation ==="
|
||||
# Test invalid message types
|
||||
test_input_validation "Invalid message type - string" '["INVALID","test",{}]' false
|
||||
test_input_validation "Invalid message type - number" '[123,"test",{}]' false
|
||||
test_input_validation "Invalid message type - null" '[null,"test",{}]' false
|
||||
test_input_validation "Invalid message type - object" '[{"type":"invalid"},"test",{}]' false
|
||||
test_input_validation "Empty message type" '["","test",{}]' false
|
||||
test_input_validation "Very long message type" '["'$(printf 'a%.0s' {1..1000})'","test",{}]' false
|
||||
echo
|
||||
|
||||
echo "=== Message Structure Validation ==="
|
||||
# Test malformed message structures
|
||||
test_input_validation "Too few arguments" '["REQ"]' false
|
||||
test_input_validation "Too many arguments" '["REQ","test",{}, "extra"]' false
|
||||
test_input_validation "Non-array message" '"not an array"' false
|
||||
test_input_validation "Empty array" '[]' false
|
||||
test_input_validation "Nested arrays incorrectly" '[["REQ","test",{}]]' false
|
||||
echo
|
||||
|
||||
echo "=== Subscription ID Boundary Tests ==="
|
||||
# Test subscription ID limits
|
||||
test_input_validation "Valid subscription ID" '["REQ","valid_sub_123",{}]' true
|
||||
test_input_validation "Empty subscription ID" '["REQ","",{}]' false
|
||||
test_input_validation "Subscription ID with spaces" '["REQ","sub with spaces",{}]' false
|
||||
test_input_validation "Subscription ID with newlines" '["REQ","sub\nwith\nlines",{}]' false
|
||||
test_input_validation "Subscription ID with tabs" '["REQ","sub\twith\ttabs",{}]' false
|
||||
test_input_validation "Subscription ID with control chars" '["REQ","sub\x01\x02",{}]' false
|
||||
test_input_validation "Unicode subscription ID" '["REQ","test🚀",{}]' false
|
||||
test_input_validation "Very long subscription ID" '["REQ","'$(printf 'a%.0s' {1..200})'",{}]' false
|
||||
echo
|
||||
|
||||
echo "=== Filter Object Validation ==="
|
||||
# Test filter object structure
|
||||
test_input_validation "Valid empty filter" '["REQ","test",{}]' true
|
||||
test_input_validation "Non-object filter" '["REQ","test","not an object"]' false
|
||||
test_input_validation "Null filter" '["REQ","test",null]' false
|
||||
test_input_validation "Array filter" '["REQ","test",[]]' false
|
||||
test_input_validation "Filter with invalid keys" '["REQ","test",{"invalid_key":"value"}]' true
|
||||
echo
|
||||
|
||||
echo "=== Authors Field Validation ==="
|
||||
# Test authors field with valid 64-char hex pubkey
|
||||
test_input_validation "Valid authors array" '["REQ","test",{"authors":["0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"]}]' true
|
||||
test_input_validation "Empty authors array" '["REQ","test",{"authors":[]}]' true
|
||||
test_input_validation "Non-array authors" '["REQ","test",{"authors":"not an array"}]' false
|
||||
test_input_validation "Invalid hex in authors" '["REQ","test",{"authors":["invalid_hex"]}]' false
|
||||
test_input_validation "Short pubkey in authors" '["REQ","test",{"authors":["0123456789abcdef"]}]' false
|
||||
echo
|
||||
|
||||
echo "=== IDs Field Validation ==="
|
||||
# Test ids field
|
||||
test_input_validation "Valid ids array" '["REQ","test",{"ids":["0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"]}]' true
|
||||
test_input_validation "Empty ids array" '["REQ","test",{"ids":[]}]' true
|
||||
test_input_validation "Non-array ids" '["REQ","test",{"ids":"not an array"}]' false
|
||||
echo
|
||||
|
||||
echo "=== Kinds Field Validation ==="
|
||||
# Test kinds field
|
||||
test_input_validation "Valid kinds array" '["REQ","test",{"kinds":[1,2,3]}]' true
|
||||
test_input_validation "Empty kinds array" '["REQ","test",{"kinds":[]}]' true
|
||||
test_input_validation "Non-array kinds" '["REQ","test",{"kinds":"not an array"}]' false
|
||||
test_input_validation "String in kinds" '["REQ","test",{"kinds":["1"]}]' false
|
||||
echo
|
||||
|
||||
echo "=== Timestamp Field Validation ==="
|
||||
# Test timestamp fields
|
||||
test_input_validation "Valid since timestamp" '["REQ","test",{"since":1234567890}]' true
|
||||
test_input_validation "Valid until timestamp" '["REQ","test",{"until":1234567890}]' true
|
||||
test_input_validation "String since timestamp" '["REQ","test",{"since":"1234567890"}]' false
|
||||
test_input_validation "Negative timestamp" '["REQ","test",{"since":-1}]' false
|
||||
echo
|
||||
|
||||
echo "=== Limit Field Validation ==="
|
||||
# Test limit field
|
||||
test_input_validation "Valid limit" '["REQ","test",{"limit":100}]' true
|
||||
test_input_validation "Zero limit" '["REQ","test",{"limit":0}]' true
|
||||
test_input_validation "String limit" '["REQ","test",{"limit":"100"}]' false
|
||||
test_input_validation "Negative limit" '["REQ","test",{"limit":-1}]' false
|
||||
echo
|
||||
|
||||
echo "=== Multiple Filters ==="
|
||||
# Test multiple filters
|
||||
test_input_validation "Two valid filters" '["REQ","test",{"kinds":[1]},{"kinds":[2]}]' true
|
||||
test_input_validation "Many filters" '["REQ","test",{},{},{},{},{}]' true
|
||||
echo
|
||||
|
||||
echo "=== Test Results ==="
|
||||
echo "Total tests: $TOTAL_TESTS"
|
||||
echo "Passed: $PASSED_TESTS"
|
||||
echo "Failed: $FAILED_TESTS"
|
||||
|
||||
if [ $FAILED_TESTS -eq 0 ]; then
|
||||
echo -e "${GREEN}✓ All input validation tests passed!${NC}"
|
||||
echo "The relay properly validates input."
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}✗ Some input validation tests failed${NC}"
|
||||
echo "The relay may have input validation issues."
|
||||
exit 1
|
||||
fi
|
||||
239
tests/load_tests.sh
Executable file
239
tests/load_tests.sh
Executable file
@@ -0,0 +1,239 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Load Testing Suite for C-Relay
|
||||
# Tests high concurrent connection scenarios and performance under load
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
TEST_DURATION=30 # seconds
|
||||
CONCURRENT_CONNECTIONS=50
|
||||
MESSAGES_PER_SECOND=100
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Metrics tracking
|
||||
TOTAL_CONNECTIONS=0
|
||||
SUCCESSFUL_CONNECTIONS=0
|
||||
FAILED_CONNECTIONS=0
|
||||
TOTAL_MESSAGES_SENT=0
|
||||
TOTAL_MESSAGES_RECEIVED=0
|
||||
START_TIME=""
|
||||
END_TIME=""
|
||||
|
||||
# Function to run a single client connection
|
||||
run_client() {
|
||||
local client_id="$1"
|
||||
local messages_to_send="${2:-10}"
|
||||
|
||||
local messages_sent=0
|
||||
local messages_received=0
|
||||
local connection_successful=false
|
||||
|
||||
# Create a temporary file for this client's output
|
||||
local temp_file
|
||||
temp_file=$(mktemp)
|
||||
|
||||
# Send messages and collect responses
|
||||
(
|
||||
for i in $(seq 1 "$messages_to_send"); do
|
||||
echo '["REQ","load_test_'"$client_id"'_'"$i"'",{}]'
|
||||
# Small delay to avoid overwhelming
|
||||
sleep 0.01
|
||||
done
|
||||
# Send CLOSE message
|
||||
echo '["CLOSE","load_test_'"$client_id"'_*"]'
|
||||
) | timeout 30 websocat -B 1048576 "ws://$RELAY_HOST:$RELAY_PORT" > "$temp_file" 2>/dev/null
|
||||
|
||||
local exit_code=$?
|
||||
|
||||
# Check if connection was successful (exit code 0 means successful)
|
||||
if [[ $exit_code -eq 0 ]]; then
|
||||
connection_successful=true
|
||||
fi
|
||||
|
||||
# Count messages sent
|
||||
messages_sent=$messages_to_send
|
||||
|
||||
# Count responses received (rough estimate)
|
||||
local response_count
|
||||
response_count=$(grep -c "EOSE\|EVENT\|NOTICE" "$temp_file" 2>/dev/null || echo "0")
|
||||
|
||||
# Clean up temp file
|
||||
rm -f "$temp_file"
|
||||
|
||||
# Return results
|
||||
echo "$messages_sent:$response_count:$connection_successful"
|
||||
}
|
||||
|
||||
# Function to monitor system resources
|
||||
monitor_resources() {
|
||||
local duration="$1"
|
||||
local interval="${2:-1}"
|
||||
|
||||
echo "=== Resource Monitoring ==="
|
||||
echo "Monitoring system resources for ${duration}s..."
|
||||
|
||||
local start_time
|
||||
start_time=$(date +%s)
|
||||
|
||||
while [[ $(($(date +%s) - start_time)) -lt duration ]]; do
|
||||
# Get CPU and memory usage
|
||||
local cpu_usage
|
||||
cpu_usage=$(top -bn1 | grep "Cpu(s)" | sed "s/.*, *\([0-9.]*\)%* id.*/\1/" | awk '{print 100 - $1}')
|
||||
|
||||
local mem_usage
|
||||
mem_usage=$(free | grep Mem | awk '{printf "%.2f", $3/$2 * 100.0}')
|
||||
|
||||
# Get network connections
|
||||
local connections
|
||||
connections=$(netstat -t | grep -c ":$RELAY_PORT")
|
||||
|
||||
echo "$(date '+%H:%M:%S') - CPU: ${cpu_usage}%, MEM: ${mem_usage}%, Connections: $connections"
|
||||
|
||||
sleep "$interval"
|
||||
done
|
||||
}
|
||||
|
||||
# Function to run load test
|
||||
run_load_test() {
|
||||
local test_name="$1"
|
||||
local description="$2"
|
||||
local concurrent_clients="$3"
|
||||
local messages_per_client="$4"
|
||||
|
||||
echo "=========================================="
|
||||
echo "Load Test: $test_name"
|
||||
echo "Description: $description"
|
||||
echo "Concurrent clients: $concurrent_clients"
|
||||
echo "Messages per client: $messages_per_client"
|
||||
echo "=========================================="
|
||||
|
||||
START_TIME=$(date +%s)
|
||||
|
||||
# Reset counters
|
||||
SUCCESSFUL_CONNECTIONS=0
|
||||
FAILED_CONNECTIONS=0
|
||||
TOTAL_MESSAGES_SENT=0
|
||||
TOTAL_MESSAGES_RECEIVED=0
|
||||
|
||||
# Launch clients sequentially for now (simpler debugging)
|
||||
local client_results=()
|
||||
|
||||
echo "Launching $concurrent_clients clients..."
|
||||
|
||||
for i in $(seq 1 "$concurrent_clients"); do
|
||||
local result
|
||||
result=$(run_client "$i" "$messages_per_client")
|
||||
client_results+=("$result")
|
||||
TOTAL_CONNECTIONS=$((TOTAL_CONNECTIONS + 1))
|
||||
done
|
||||
|
||||
echo "All clients completed. Processing results..."
|
||||
|
||||
END_TIME=$(date +%s)
|
||||
local duration=$((END_TIME - START_TIME))
|
||||
|
||||
# Process client results
|
||||
local successful_connections=0
|
||||
local failed_connections=0
|
||||
local total_messages_sent=0
|
||||
local total_messages_received=0
|
||||
|
||||
for result in "${client_results[@]}"; do
|
||||
messages_sent=$(echo "$result" | cut -d: -f1)
|
||||
messages_received=$(echo "$result" | cut -d: -f2)
|
||||
connection_successful=$(echo "$result" | cut -d: -f3)
|
||||
if [[ "$connection_successful" == "true" ]]; then
|
||||
successful_connections=$((successful_connections + 1))
|
||||
else
|
||||
failed_connections=$((failed_connections + 1))
|
||||
fi
|
||||
total_messages_sent=$((total_messages_sent + messages_sent))
|
||||
total_messages_received=$((total_messages_received + messages_received))
|
||||
done
|
||||
|
||||
# Calculate metrics
|
||||
local total_messages_expected=$((concurrent_clients * messages_per_client))
|
||||
local connection_success_rate=0
|
||||
|
||||
if [[ $TOTAL_CONNECTIONS -gt 0 ]]; then
|
||||
connection_success_rate=$((successful_connections * 100 / TOTAL_CONNECTIONS))
|
||||
fi
|
||||
|
||||
# Report results
|
||||
echo ""
|
||||
echo "=== Load Test Results ==="
|
||||
echo "Test duration: ${duration}s"
|
||||
echo "Total connections attempted: $TOTAL_CONNECTIONS"
|
||||
echo "Successful connections: $successful_connections"
|
||||
echo "Failed connections: $failed_connections"
|
||||
echo "Connection success rate: ${connection_success_rate}%"
|
||||
echo "Messages expected: $total_messages_expected"
|
||||
echo "Messages sent: $total_messages_sent"
|
||||
echo "Messages received: $total_messages_received"
|
||||
|
||||
# Performance assessment
|
||||
if [[ $connection_success_rate -ge 95 ]]; then
|
||||
echo -e "${GREEN}✓ EXCELLENT: High connection success rate${NC}"
|
||||
elif [[ $connection_success_rate -ge 80 ]]; then
|
||||
echo -e "${YELLOW}⚠ GOOD: Acceptable connection success rate${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ POOR: Low connection success rate${NC}"
|
||||
fi
|
||||
|
||||
# Check if relay is still responsive
|
||||
echo ""
|
||||
echo -n "Checking relay responsiveness... "
|
||||
if echo 'ping' | timeout 5 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓ Relay is still responsive${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Relay became unresponsive after load test${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Only run main code if script is executed directly (not sourced)
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
echo "=========================================="
|
||||
echo "C-Relay Load Testing Suite"
|
||||
echo "=========================================="
|
||||
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo ""
|
||||
|
||||
# Test basic connectivity first
|
||||
echo "=== Basic Connectivity Test ==="
|
||||
if echo 'ping' | timeout 5 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1; then
|
||||
echo -e "${GREEN}✓ Relay is accessible${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ Cannot connect to relay. Aborting tests.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Run different load scenarios
|
||||
run_load_test "Light Load Test" "Basic load test with moderate concurrent connections" 10 5
|
||||
echo ""
|
||||
|
||||
run_load_test "Medium Load Test" "Moderate load test with higher concurrency" 25 10
|
||||
echo ""
|
||||
|
||||
run_load_test "Heavy Load Test" "Heavy load test with high concurrency" 50 20
|
||||
echo ""
|
||||
|
||||
run_load_test "Stress Test" "Maximum load test to find breaking point" 100 50
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo "Load Testing Complete"
|
||||
echo "=========================================="
|
||||
echo "All load tests completed. Check individual test results above."
|
||||
echo "If any tests failed, the relay may need optimization or have resource limits."
|
||||
fi
|
||||
199
tests/memory_corruption_tests.sh
Executable file
199
tests/memory_corruption_tests.sh
Executable file
@@ -0,0 +1,199 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Memory Corruption Detection Test Suite for C-Relay
|
||||
# Tests for buffer overflows, use-after-free, and memory safety issues
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
TEST_TIMEOUT=15
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TOTAL_TESTS=0
|
||||
PASSED_TESTS=0
|
||||
FAILED_TESTS=0
|
||||
|
||||
# Function to test for memory corruption (buffer overflows, crashes, etc.)
|
||||
test_memory_safety() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
local expect_error="${3:-false}"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Send message and monitor for crashes or memory issues
|
||||
local start_time=$(date +%s%N)
|
||||
local response
|
||||
response=$(echo "$message" | timeout $TEST_TIMEOUT websocat ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'CONNECTION_FAILED')
|
||||
local end_time=$(date +%s%N)
|
||||
|
||||
# Check if relay is still responsive after the test
|
||||
local relay_status
|
||||
local ping_response=$(echo '["REQ","ping_test_'$RANDOM'",{}]' | timeout 2 websocat ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1)
|
||||
if [[ -n "$ping_response" ]]; then
|
||||
relay_status="OK"
|
||||
else
|
||||
relay_status="DOWN"
|
||||
fi
|
||||
|
||||
# Calculate response time (rough indicator of processing issues)
|
||||
local response_time=$(( (end_time - start_time) / 1000000 )) # Convert to milliseconds
|
||||
|
||||
if [[ "$response" == "CONNECTION_FAILED" ]]; then
|
||||
if [[ "$expect_error" == "true" ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Expected connection failure"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Unexpected connection failure"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
elif [[ "$relay_status" != "OK" ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Relay crashed or became unresponsive after test"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
elif [[ $response_time -gt 5000 ]]; then # More than 5 seconds
|
||||
echo -e "${YELLOW}SUSPICIOUS${NC} - Very slow response (${response_time}ms), possible DoS"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
else
|
||||
if [[ "$expect_error" == "true" ]]; then
|
||||
echo -e "${YELLOW}UNCERTAIN${NC} - Expected error but got normal response"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1)) # Count as passed since no crash
|
||||
return 0
|
||||
else
|
||||
echo -e "${GREEN}PASSED${NC} - No memory corruption detected"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test concurrent access patterns
|
||||
test_concurrent_access() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
local concurrent_count="${3:-5}"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Launch multiple concurrent connections
|
||||
local pids=()
|
||||
local results=()
|
||||
|
||||
for i in $(seq 1 $concurrent_count); do
|
||||
(
|
||||
local response
|
||||
response=$(echo "$message" | timeout $TEST_TIMEOUT websocat ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'FAILED')
|
||||
echo "$response"
|
||||
) &
|
||||
pids+=($!)
|
||||
done
|
||||
|
||||
# Wait for all to complete
|
||||
local failed_count=0
|
||||
for pid in "${pids[@]}"; do
|
||||
wait "$pid" 2>/dev/null || failed_count=$((failed_count + 1))
|
||||
done
|
||||
|
||||
# Check if relay is still responsive
|
||||
local relay_status
|
||||
local ping_response=$(echo '["REQ","ping_test_'$RANDOM'",{}]' | timeout 2 websocat ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1)
|
||||
if [[ -n "$ping_response" ]]; then
|
||||
relay_status="OK"
|
||||
else
|
||||
relay_status="DOWN"
|
||||
fi
|
||||
|
||||
if [[ "$relay_status" != "OK" ]]; then
|
||||
echo -e "${RED}FAILED${NC} - Relay crashed during concurrent access"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
elif [[ $failed_count -gt 0 ]]; then
|
||||
echo -e "${YELLOW}PARTIAL${NC} - Some concurrent requests failed ($failed_count/$concurrent_count)"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
else
|
||||
echo -e "${GREEN}PASSED${NC} - Concurrent access handled safely"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=========================================="
|
||||
echo "C-Relay Memory Corruption Test Suite"
|
||||
echo "=========================================="
|
||||
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo "Note: These tests may cause the relay to crash if vulnerabilities exist"
|
||||
echo
|
||||
|
||||
# Test basic connectivity first
|
||||
echo "=== Basic Connectivity Test ==="
|
||||
test_memory_safety "Basic connectivity" '["REQ","basic_test",{}]'
|
||||
echo
|
||||
|
||||
echo "=== Subscription ID Memory Corruption Tests ==="
|
||||
# Test malformed subscription IDs that could cause buffer overflows
|
||||
test_memory_safety "Empty subscription ID" '["REQ","",{}]' true
|
||||
test_memory_safety "Very long subscription ID (1KB)" '["REQ","'$(printf 'a%.0s' {1..1024})'",{}]' true
|
||||
test_memory_safety "Very long subscription ID (10KB)" '["REQ","'$(printf 'a%.0s' {1..10240})'",{}]' true
|
||||
test_memory_safety "Subscription ID with null bytes" '["REQ","test\x00injection",{}]' true
|
||||
test_memory_safety "Subscription ID with special chars" '["REQ","test@#$%^&*()",{}]' true
|
||||
test_memory_safety "Unicode subscription ID" '["REQ","test🚀💣🔥",{}]' true
|
||||
test_memory_safety "Subscription ID with path traversal" '["REQ","../../../etc/passwd",{}]' true
|
||||
echo
|
||||
|
||||
echo "=== Filter Array Memory Corruption Tests ==="
|
||||
# Test oversized filter arrays (limited to avoid extremely long output)
|
||||
test_memory_safety "Too many filters (50)" '["REQ","test_many_filters",{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{} ,{}]' true
|
||||
echo
|
||||
|
||||
echo "=== Concurrent Access Memory Tests ==="
|
||||
# Test concurrent access patterns that might cause race conditions
|
||||
test_concurrent_access "Concurrent subscription creation" '["REQ","concurrent_'$(date +%s%N)'",{}]' 10
|
||||
test_concurrent_access "Concurrent CLOSE operations" '["CLOSE","test_sub"]' 10
|
||||
echo
|
||||
|
||||
echo "=== Malformed JSON Memory Tests ==="
|
||||
# Test malformed JSON that might cause parsing issues
|
||||
test_memory_safety "Unclosed JSON object" '["REQ","test",{' true
|
||||
test_memory_safety "Mismatched brackets" '["REQ","test"]' true
|
||||
test_memory_safety "Extra closing brackets" '["REQ","test",{}]]' true
|
||||
test_memory_safety "Null bytes in JSON" '["REQ","test\x00",{}]' true
|
||||
echo
|
||||
|
||||
echo "=== Large Message Memory Tests ==="
|
||||
# Test very large messages that might cause buffer issues
|
||||
test_memory_safety "Very large filter array" '["REQ","large_test",{"authors":['$(printf '"test%.0s",' {1..1000})'"test"]}]' true
|
||||
test_memory_safety "Very long search term" '["REQ","search_test",{"search":"'$(printf 'a%.0s' {1..10000})'"}]' true
|
||||
echo
|
||||
|
||||
echo "=== Test Results ==="
|
||||
echo "Total tests: $TOTAL_TESTS"
|
||||
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
|
||||
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
|
||||
|
||||
if [[ $FAILED_TESTS -eq 0 ]]; then
|
||||
echo -e "${GREEN}✓ All memory corruption tests passed!${NC}"
|
||||
echo "The relay appears to handle memory safely."
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}✗ Memory corruption vulnerabilities detected!${NC}"
|
||||
echo "The relay may be vulnerable to memory corruption attacks."
|
||||
echo "Failed tests: $FAILED_TESTS"
|
||||
exit 1
|
||||
fi
|
||||
279
tests/performance_benchmarks.sh
Executable file
279
tests/performance_benchmarks.sh
Executable file
@@ -0,0 +1,279 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Performance Benchmarking Suite for C-Relay
|
||||
# Measures performance metrics and throughput
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
BENCHMARK_DURATION=30 # seconds
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Metrics tracking
|
||||
TOTAL_REQUESTS=0
|
||||
SUCCESSFUL_REQUESTS=0
|
||||
FAILED_REQUESTS=0
|
||||
TOTAL_RESPONSE_TIME=0
|
||||
MIN_RESPONSE_TIME=999999
|
||||
MAX_RESPONSE_TIME=0
|
||||
|
||||
# Function to benchmark single request
|
||||
benchmark_request() {
|
||||
local message="$1"
|
||||
local start_time
|
||||
local end_time
|
||||
local response_time
|
||||
local success=0
|
||||
|
||||
start_time=$(date +%s%N)
|
||||
local response
|
||||
response=$(echo "$message" | timeout 5 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
|
||||
end_time=$(date +%s%N)
|
||||
|
||||
response_time=$(( (end_time - start_time) / 1000000 )) # Convert to milliseconds
|
||||
|
||||
if [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
|
||||
success=1
|
||||
fi
|
||||
|
||||
# Return: response_time:success
|
||||
echo "$response_time:$success"
|
||||
}
|
||||
|
||||
# Function to run throughput benchmark
|
||||
run_throughput_benchmark() {
|
||||
local test_name="$1"
|
||||
local message="$2"
|
||||
local concurrent_clients="${3:-10}"
|
||||
local test_duration="${4:-$BENCHMARK_DURATION}"
|
||||
|
||||
echo "=========================================="
|
||||
echo "Throughput Benchmark: $test_name"
|
||||
echo "=========================================="
|
||||
echo "Concurrent clients: $concurrent_clients"
|
||||
echo "Duration: ${test_duration}s"
|
||||
echo ""
|
||||
|
||||
# Reset metrics
|
||||
TOTAL_REQUESTS=0
|
||||
SUCCESSFUL_REQUESTS=0
|
||||
FAILED_REQUESTS=0
|
||||
TOTAL_RESPONSE_TIME=0
|
||||
MIN_RESPONSE_TIME=999999
|
||||
MAX_RESPONSE_TIME=0
|
||||
|
||||
local start_time
|
||||
start_time=$(date +%s)
|
||||
|
||||
# Launch concurrent clients and collect results
|
||||
local pids=()
|
||||
local client_results=()
|
||||
|
||||
for i in $(seq 1 "$concurrent_clients"); do
|
||||
(
|
||||
local client_start
|
||||
client_start=$(date +%s)
|
||||
local client_requests=0
|
||||
local client_total_response_time=0
|
||||
local client_successful_requests=0
|
||||
local client_min_time=999999
|
||||
local client_max_time=0
|
||||
|
||||
while [[ $(($(date +%s) - client_start)) -lt test_duration ]]; do
|
||||
local result
|
||||
result=$(benchmark_request "$message")
|
||||
local response_time success
|
||||
IFS=':' read -r response_time success <<< "$result"
|
||||
|
||||
client_total_response_time=$((client_total_response_time + response_time))
|
||||
client_requests=$((client_requests + 1))
|
||||
|
||||
if [[ "$success" == "1" ]]; then
|
||||
client_successful_requests=$((client_successful_requests + 1))
|
||||
fi
|
||||
|
||||
if [[ $response_time -lt client_min_time ]]; then
|
||||
client_min_time=$response_time
|
||||
fi
|
||||
|
||||
if [[ $response_time -gt client_max_time ]]; then
|
||||
client_max_time=$response_time
|
||||
fi
|
||||
|
||||
# Small delay to prevent overwhelming
|
||||
sleep 0.01
|
||||
done
|
||||
|
||||
# Return client results: requests:successful:total_response_time:min_time:max_time
|
||||
echo "$client_requests:$client_successful_requests:$client_total_response_time:$client_min_time:$client_max_time"
|
||||
) &
|
||||
pids+=($!)
|
||||
done
|
||||
|
||||
# Wait for all clients to complete and collect results
|
||||
for pid in "${pids[@]}"; do
|
||||
local result
|
||||
result=$(wait "$pid")
|
||||
client_results+=("$result")
|
||||
done
|
||||
|
||||
local end_time
|
||||
end_time=$(date +%s)
|
||||
local actual_duration=$((end_time - start_time))
|
||||
|
||||
# Process client results
|
||||
local total_requests=0
|
||||
local successful_requests=0
|
||||
local total_response_time=0
|
||||
local min_response_time=999999
|
||||
local max_response_time=0
|
||||
|
||||
for client_result in "${client_results[@]}"; do
|
||||
IFS=':' read -r client_requests client_successful client_total_time client_min_time client_max_time <<< "$client_result"
|
||||
|
||||
total_requests=$((total_requests + client_requests))
|
||||
successful_requests=$((successful_requests + client_successful))
|
||||
total_response_time=$((total_response_time + client_total_time))
|
||||
|
||||
if [[ $client_min_time -lt min_response_time ]]; then
|
||||
min_response_time=$client_min_time
|
||||
fi
|
||||
|
||||
if [[ $client_max_time -gt max_response_time ]]; then
|
||||
max_response_time=$client_max_time
|
||||
fi
|
||||
done
|
||||
|
||||
# Calculate metrics
|
||||
local avg_response_time="N/A"
|
||||
if [[ $successful_requests -gt 0 ]]; then
|
||||
avg_response_time="$((total_response_time / successful_requests))ms"
|
||||
fi
|
||||
|
||||
local requests_per_second="N/A"
|
||||
if [[ $actual_duration -gt 0 ]]; then
|
||||
requests_per_second="$((total_requests / actual_duration))"
|
||||
fi
|
||||
|
||||
local success_rate="N/A"
|
||||
if [[ $total_requests -gt 0 ]]; then
|
||||
success_rate="$((successful_requests * 100 / total_requests))%"
|
||||
fi
|
||||
|
||||
local failed_requests=$((total_requests - successful_requests))
|
||||
|
||||
# Report results
|
||||
echo "=== Benchmark Results ==="
|
||||
echo "Total requests: $total_requests"
|
||||
echo "Successful requests: $successful_requests"
|
||||
echo "Failed requests: $failed_requests"
|
||||
echo "Success rate: $success_rate"
|
||||
echo "Requests per second: $requests_per_second"
|
||||
echo "Average response time: $avg_response_time"
|
||||
echo "Min response time: ${min_response_time}ms"
|
||||
echo "Max response time: ${max_response_time}ms"
|
||||
echo "Actual duration: ${actual_duration}s"
|
||||
echo ""
|
||||
|
||||
# Performance assessment
|
||||
if [[ $requests_per_second -gt 1000 ]]; then
|
||||
echo -e "${GREEN}✓ EXCELLENT throughput${NC}"
|
||||
elif [[ $requests_per_second -gt 500 ]]; then
|
||||
echo -e "${GREEN}✓ GOOD throughput${NC}"
|
||||
elif [[ $requests_per_second -gt 100 ]]; then
|
||||
echo -e "${YELLOW}⚠ MODERATE throughput${NC}"
|
||||
else
|
||||
echo -e "${RED}✗ LOW throughput${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to benchmark memory usage patterns
|
||||
benchmark_memory_usage() {
|
||||
echo "=========================================="
|
||||
echo "Memory Usage Benchmark"
|
||||
echo "=========================================="
|
||||
|
||||
local initial_memory
|
||||
initial_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
|
||||
|
||||
echo "Initial memory usage: ${initial_memory}KB"
|
||||
|
||||
# Create increasing number of subscriptions
|
||||
for i in {10,25,50,100}; do
|
||||
echo -n "Testing with $i concurrent subscriptions... "
|
||||
|
||||
# Create subscriptions
|
||||
for j in $(seq 1 "$i"); do
|
||||
echo "[\"REQ\",\"mem_test_${j}\",{}]" | timeout 2 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
sleep 2
|
||||
|
||||
local current_memory
|
||||
current_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
|
||||
local memory_increase=$((current_memory - initial_memory))
|
||||
|
||||
echo "${current_memory}KB (+${memory_increase}KB)"
|
||||
|
||||
# Clean up subscriptions
|
||||
for j in $(seq 1 "$i"); do
|
||||
echo "[\"CLOSE\",\"mem_test_${j}\"]" | timeout 2 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
sleep 1
|
||||
done
|
||||
|
||||
local final_memory
|
||||
final_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
|
||||
echo "Final memory usage: ${final_memory}KB"
|
||||
}
|
||||
|
||||
# Only run main code if script is executed directly (not sourced)
|
||||
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
echo "=========================================="
|
||||
echo "C-Relay Performance Benchmarking Suite"
|
||||
echo "=========================================="
|
||||
echo "Benchmarking relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo ""
|
||||
|
||||
# Test basic connectivity
|
||||
echo "=== Connectivity Test ==="
|
||||
connectivity_result=$(benchmark_request '["REQ","bench_test",{}]')
|
||||
IFS=':' read -r response_time success <<< "$connectivity_result"
|
||||
if [[ "$success" != "1" ]]; then
|
||||
echo -e "${RED}Cannot connect to relay. Aborting benchmarks.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
echo -e "${GREEN}✓ Relay is accessible${NC}"
|
||||
echo ""
|
||||
|
||||
# Run throughput benchmarks
|
||||
run_throughput_benchmark "Simple REQ Throughput" '["REQ","throughput_'$(date +%s%N)'",{}]' 10 15
|
||||
echo ""
|
||||
|
||||
run_throughput_benchmark "Complex Filter Throughput" '["REQ","complex_'$(date +%s%N)'",{"kinds":[1,2,3],"#e":["test"],"limit":10}]' 10 15
|
||||
echo ""
|
||||
|
||||
run_throughput_benchmark "COUNT Message Throughput" '["REQ","count_'$(date +%s%N)'",{}]' 10 15
|
||||
echo ""
|
||||
|
||||
run_throughput_benchmark "High Load Throughput" '["REQ","high_load_'$(date +%s%N)'",{}]' 25 20
|
||||
echo ""
|
||||
|
||||
# Memory usage benchmark
|
||||
benchmark_memory_usage
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo "Benchmarking Complete"
|
||||
echo "=========================================="
|
||||
echo "Performance benchmarks completed. Review results above for optimization opportunities."
|
||||
fi
|
||||
40
tests/post_events.sh
Executable file
40
tests/post_events.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Test script to post kind 1 events to the relay every second
|
||||
# Cycles through three different secret keys
|
||||
# Content includes current timestamp
|
||||
|
||||
# Array of secret keys to cycle through
|
||||
SECRET_KEYS=(
|
||||
"3fdd8227a920c2385559400b2b14e464f22e80df312a73cc7a86e1d7e91d608f"
|
||||
"a156011cd65b71f84b4a488ac81687f2aed57e490b31c28f58195d787030db60"
|
||||
"1618aaa21f5bd45c5ffede0d9a60556db67d4a046900e5f66b0bae5c01c801fb"
|
||||
)
|
||||
|
||||
RELAY_URL="ws://localhost:8888"
|
||||
KEY_INDEX=0
|
||||
|
||||
echo "Starting event posting test to $RELAY_URL"
|
||||
echo "Press Ctrl+C to stop"
|
||||
|
||||
while true; do
|
||||
# Get current timestamp
|
||||
TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S UTC")
|
||||
|
||||
# Get current secret key
|
||||
CURRENT_KEY=${SECRET_KEYS[$KEY_INDEX]}
|
||||
|
||||
# Create content with timestamp
|
||||
CONTENT="Test event at $TIMESTAMP"
|
||||
|
||||
echo "[$TIMESTAMP] Posting event with key ${KEY_INDEX}: ${CURRENT_KEY:0:16}..."
|
||||
|
||||
# Post event using nak
|
||||
nak event -c "$CONTENT" --sec "$CURRENT_KEY" "$RELAY_URL"
|
||||
|
||||
# Cycle to next key
|
||||
KEY_INDEX=$(( (KEY_INDEX + 1) % ${#SECRET_KEYS[@]} ))
|
||||
|
||||
# Wait 1 second
|
||||
sleep 1
|
||||
done
|
||||
203
tests/rate_limiting_tests.sh
Executable file
203
tests/rate_limiting_tests.sh
Executable file
@@ -0,0 +1,203 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Rate Limiting Test Suite for C-Relay
|
||||
# Tests rate limiting and abuse prevention mechanisms
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
TEST_TIMEOUT=15
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TOTAL_TESTS=0
|
||||
PASSED_TESTS=0
|
||||
FAILED_TESTS=0
|
||||
|
||||
# Function to test rate limiting
|
||||
test_rate_limiting() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
local burst_count="${3:-10}"
|
||||
local expected_limited="${4:-false}"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
local rate_limited=false
|
||||
local success_count=0
|
||||
local error_count=0
|
||||
|
||||
# Send burst of messages
|
||||
for i in $(seq 1 "$burst_count"); do
|
||||
local response
|
||||
response=$(echo "$message" | timeout 2 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == *"rate limit"* ]] || [[ "$response" == *"too many"* ]] || [[ "$response" == *"TOO_MANY"* ]]; then
|
||||
rate_limited=true
|
||||
elif [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
|
||||
((success_count++))
|
||||
else
|
||||
((error_count++))
|
||||
fi
|
||||
|
||||
# Small delay between requests
|
||||
sleep 0.05
|
||||
done
|
||||
|
||||
if [[ "$expected_limited" == "true" ]]; then
|
||||
if [[ "$rate_limited" == "true" ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Rate limiting triggered as expected"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Rate limiting not triggered (expected)"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
if [[ "$rate_limited" == "false" ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - No rate limiting for normal traffic"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${YELLOW}UNCERTAIN${NC} - Unexpected rate limiting"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1)) # Count as passed since it's conservative
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test sustained load
|
||||
test_sustained_load() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
local duration="${3:-10}"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
local start_time
|
||||
start_time=$(date +%s)
|
||||
local rate_limited=false
|
||||
local total_requests=0
|
||||
local successful_requests=0
|
||||
|
||||
while [[ $(($(date +%s) - start_time)) -lt duration ]]; do
|
||||
((total_requests++))
|
||||
local response
|
||||
response=$(echo "$message" | timeout 1 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == *"rate limit"* ]] || [[ "$response" == *"too many"* ]] || [[ "$response" == *"TOO_MANY"* ]]; then
|
||||
rate_limited=true
|
||||
elif [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
|
||||
((successful_requests++))
|
||||
fi
|
||||
|
||||
# Small delay to avoid overwhelming
|
||||
sleep 0.1
|
||||
done
|
||||
|
||||
local success_rate=0
|
||||
if [[ $total_requests -gt 0 ]]; then
|
||||
success_rate=$((successful_requests * 100 / total_requests))
|
||||
fi
|
||||
|
||||
if [[ "$rate_limited" == "true" ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Rate limiting activated under sustained load (${success_rate}% success rate)"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${YELLOW}UNCERTAIN${NC} - No rate limiting detected (${success_rate}% success rate)"
|
||||
# This might be acceptable if rate limiting is very permissive
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=========================================="
|
||||
echo "C-Relay Rate Limiting Test Suite"
|
||||
echo "=========================================="
|
||||
echo "Testing rate limiting against relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo ""
|
||||
|
||||
# Test basic connectivity first
|
||||
echo "=== Basic Connectivity Test ==="
|
||||
test_rate_limiting "Basic connectivity" '["REQ","rate_test",{}]' 1 false
|
||||
echo ""
|
||||
|
||||
echo "=== Burst Request Testing ==="
|
||||
# Test rapid succession of requests
|
||||
test_rate_limiting "Rapid REQ messages" '["REQ","burst_req_'$(date +%s%N)'",{}]' 20 true
|
||||
test_rate_limiting "Rapid COUNT messages" '["COUNT","burst_count_'$(date +%s%N)'",{}]' 20 true
|
||||
test_rate_limiting "Rapid CLOSE messages" '["CLOSE","burst_close"]' 20 true
|
||||
echo ""
|
||||
|
||||
echo "=== Malformed Message Rate Limiting ==="
|
||||
# Test if malformed messages trigger rate limiting faster
|
||||
test_rate_limiting "Malformed JSON burst" '["REQ","malformed"' 15 true
|
||||
test_rate_limiting "Invalid message type burst" '["INVALID","test",{}]' 15 true
|
||||
test_rate_limiting "Empty message burst" '[]' 15 true
|
||||
echo ""
|
||||
|
||||
echo "=== Sustained Load Testing ==="
|
||||
# Test sustained moderate load
|
||||
test_sustained_load "Sustained REQ load" '["REQ","sustained_'$(date +%s%N)'",{}]' 10
|
||||
test_sustained_load "Sustained COUNT load" '["COUNT","sustained_count_'$(date +%s%N)'",{}]' 10
|
||||
echo ""
|
||||
|
||||
echo "=== Filter Complexity Testing ==="
|
||||
# Test if complex filters trigger rate limiting
|
||||
test_rate_limiting "Complex filter burst" '["REQ","complex_'$(date +%s%N)'",{"authors":["a","b","c"],"kinds":[1,2,3],"#e":["x","y","z"],"#p":["m","n","o"],"since":1000000000,"until":2000000000,"limit":100}]' 10 true
|
||||
echo ""
|
||||
|
||||
echo "=== Subscription Management Testing ==="
|
||||
# Test subscription creation/deletion rate limiting
|
||||
echo -n "Testing subscription churn... "
|
||||
local churn_test_passed=true
|
||||
for i in $(seq 1 25); do
|
||||
# Create subscription
|
||||
echo "[\"REQ\",\"churn_${i}_$(date +%s%N)\",{}]" | timeout 1 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 || true
|
||||
|
||||
# Close subscription
|
||||
echo "[\"CLOSE\",\"churn_${i}_*\"]" | timeout 1 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 || true
|
||||
|
||||
sleep 0.05
|
||||
done
|
||||
|
||||
# Check if relay is still responsive
|
||||
if echo 'ping' | timeout 2 websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1; then
|
||||
echo -e "${GREEN}PASSED${NC} - Subscription churn handled"
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Relay unresponsive after subscription churn"
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "=== Test Results ==="
|
||||
echo "Total tests: $TOTAL_TESTS"
|
||||
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
|
||||
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
|
||||
|
||||
if [[ $FAILED_TESTS -eq 0 ]]; then
|
||||
echo -e "${GREEN}✓ All rate limiting tests passed!${NC}"
|
||||
echo "Rate limiting appears to be working correctly."
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}✗ Some rate limiting tests failed!${NC}"
|
||||
echo "Rate limiting may not be properly configured."
|
||||
exit 1
|
||||
fi
|
||||
265
tests/resource_monitoring.sh
Executable file
265
tests/resource_monitoring.sh
Executable file
@@ -0,0 +1,265 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Resource Monitoring Suite for C-Relay
|
||||
# Monitors memory and CPU usage during testing
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
MONITOR_DURATION=60 # seconds
|
||||
SAMPLE_INTERVAL=2 # seconds
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Metrics storage
|
||||
CPU_SAMPLES=()
|
||||
MEM_SAMPLES=()
|
||||
CONNECTION_SAMPLES=()
|
||||
TIMESTAMP_SAMPLES=()
|
||||
|
||||
# Function to get relay process info
|
||||
get_relay_info() {
|
||||
local pid
|
||||
pid=$(pgrep -f "c_relay" | head -1)
|
||||
|
||||
if [[ -z "$pid" ]]; then
|
||||
echo "0:0:0:0"
|
||||
return
|
||||
fi
|
||||
|
||||
# Get CPU, memory, and other stats
|
||||
local ps_output
|
||||
ps_output=$(ps -p "$pid" -o pcpu,pmem,vsz,rss --no-headers 2>/dev/null || echo "0.0 0.0 0 0")
|
||||
|
||||
# Get connection count
|
||||
local connections
|
||||
connections=$(netstat -t 2>/dev/null | grep ":$RELAY_PORT" | wc -l 2>/dev/null || echo "0")
|
||||
|
||||
echo "$ps_output $connections"
|
||||
}
|
||||
|
||||
# Function to monitor resources
|
||||
monitor_resources() {
|
||||
local duration="$1"
|
||||
local interval="$2"
|
||||
|
||||
echo "=========================================="
|
||||
echo "Resource Monitoring Started"
|
||||
echo "=========================================="
|
||||
echo "Duration: ${duration}s, Interval: ${interval}s"
|
||||
echo ""
|
||||
|
||||
# Clear arrays
|
||||
CPU_SAMPLES=()
|
||||
MEM_SAMPLES=()
|
||||
CONNECTION_SAMPLES=()
|
||||
TIMESTAMP_SAMPLES=()
|
||||
|
||||
local start_time
|
||||
start_time=$(date +%s)
|
||||
local sample_count=0
|
||||
|
||||
echo "Time | CPU% | Mem% | VSZ(KB) | RSS(KB) | Connections"
|
||||
echo "-----+------+------+---------+---------+------------"
|
||||
|
||||
while [[ $(($(date +%s) - start_time)) -lt duration ]]; do
|
||||
local relay_info
|
||||
relay_info=$(get_relay_info)
|
||||
|
||||
if [[ "$relay_info" != "0:0:0:0" ]]; then
|
||||
local cpu mem vsz rss connections
|
||||
IFS=' ' read -r cpu mem vsz rss connections <<< "$relay_info"
|
||||
|
||||
# Store samples
|
||||
CPU_SAMPLES+=("$cpu")
|
||||
MEM_SAMPLES+=("$mem")
|
||||
CONNECTION_SAMPLES+=("$connections")
|
||||
TIMESTAMP_SAMPLES+=("$sample_count")
|
||||
|
||||
# Display current stats
|
||||
local elapsed
|
||||
elapsed=$(($(date +%s) - start_time))
|
||||
printf "%4ds | %4.1f | %4.1f | %7s | %7s | %10s\n" \
|
||||
"$elapsed" "$cpu" "$mem" "$vsz" "$rss" "$connections"
|
||||
else
|
||||
echo " -- | Relay process not found --"
|
||||
fi
|
||||
|
||||
((sample_count++))
|
||||
sleep "$interval"
|
||||
done
|
||||
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to calculate statistics
|
||||
calculate_stats() {
|
||||
local array_name="$1"
|
||||
local -n array_ref="$array_name"
|
||||
|
||||
if [[ ${#array_ref[@]} -eq 0 ]]; then
|
||||
echo "0:0:0:0:0"
|
||||
return
|
||||
fi
|
||||
|
||||
local sum=0
|
||||
local min=${array_ref[0]}
|
||||
local max=${array_ref[0]}
|
||||
|
||||
for value in "${array_ref[@]}"; do
|
||||
# Use awk for floating point arithmetic
|
||||
sum=$(awk "BEGIN {print $sum + $value}")
|
||||
min=$(awk "BEGIN {print ($value < $min) ? $value : $min}")
|
||||
max=$(awk "BEGIN {print ($value > $max) ? $value : $max}")
|
||||
done
|
||||
|
||||
local avg
|
||||
avg=$(awk "BEGIN {print $sum / ${#array_ref[@]} }")
|
||||
|
||||
echo "$avg:$min:$max:$sum:${#array_ref[@]}"
|
||||
}
|
||||
|
||||
# Function to generate resource report
|
||||
generate_resource_report() {
|
||||
echo "=========================================="
|
||||
echo "Resource Monitoring Report"
|
||||
echo "=========================================="
|
||||
|
||||
if [[ ${#CPU_SAMPLES[@]} -eq 0 ]]; then
|
||||
echo "No resource samples collected. Is the relay running?"
|
||||
return
|
||||
fi
|
||||
|
||||
# Calculate statistics
|
||||
local cpu_stats mem_stats conn_stats
|
||||
cpu_stats=$(calculate_stats CPU_SAMPLES)
|
||||
mem_stats=$(calculate_stats MEM_SAMPLES)
|
||||
conn_stats=$(calculate_stats CONNECTION_SAMPLES)
|
||||
|
||||
# Parse statistics
|
||||
IFS=':' read -r cpu_avg cpu_min cpu_max cpu_sum cpu_count <<< "$cpu_stats"
|
||||
IFS=':' read -r mem_avg mem_min mem_max mem_sum mem_count <<< "$mem_stats"
|
||||
IFS=':' read -r conn_avg conn_min conn_max conn_sum conn_count <<< "$conn_stats"
|
||||
|
||||
echo "CPU Usage Statistics:"
|
||||
printf " Average: %.2f%%\n" "$cpu_avg"
|
||||
printf " Minimum: %.2f%%\n" "$cpu_min"
|
||||
printf " Maximum: %.2f%%\n" "$cpu_max"
|
||||
printf " Samples: %d\n" "$cpu_count"
|
||||
echo ""
|
||||
|
||||
echo "Memory Usage Statistics:"
|
||||
printf " Average: %.2f%%\n" "$mem_avg"
|
||||
printf " Minimum: %.2f%%\n" "$mem_min"
|
||||
printf " Maximum: %.2f%%\n" "$mem_max"
|
||||
printf " Samples: %d\n" "$mem_count"
|
||||
echo ""
|
||||
|
||||
echo "Connection Statistics:"
|
||||
printf " Average: %.1f connections\n" "$conn_avg"
|
||||
printf " Minimum: %.1f connections\n" "$conn_min"
|
||||
printf " Maximum: %.1f connections\n" "$conn_max"
|
||||
printf " Samples: %d\n" "$conn_count"
|
||||
echo ""
|
||||
|
||||
# Performance assessment
|
||||
echo "Performance Assessment:"
|
||||
if awk "BEGIN {exit !($cpu_avg < 50)}"; then
|
||||
echo -e " ${GREEN}✓ CPU usage is acceptable${NC}"
|
||||
else
|
||||
echo -e " ${RED}✗ CPU usage is high${NC}"
|
||||
fi
|
||||
|
||||
if awk "BEGIN {exit !($mem_avg < 80)}"; then
|
||||
echo -e " ${GREEN}✓ Memory usage is acceptable${NC}"
|
||||
else
|
||||
echo -e " ${RED}✗ Memory usage is high${NC}"
|
||||
fi
|
||||
|
||||
if [[ $(awk "BEGIN {print int($conn_max)}") -gt 0 ]]; then
|
||||
echo -e " ${GREEN}✓ Relay is handling connections${NC}"
|
||||
else
|
||||
echo -e " ${YELLOW}⚠ No active connections detected${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to run load test with monitoring
|
||||
run_monitored_load_test() {
|
||||
local test_name="$1"
|
||||
local description="$2"
|
||||
|
||||
echo "=========================================="
|
||||
echo "Monitored Load Test: $test_name"
|
||||
echo "=========================================="
|
||||
echo "Description: $description"
|
||||
echo ""
|
||||
|
||||
# Start monitoring in background
|
||||
monitor_resources 30 2 &
|
||||
local monitor_pid=$!
|
||||
|
||||
# Wait a moment for monitoring to start
|
||||
sleep 2
|
||||
|
||||
# Run a simple load test (create multiple subscriptions)
|
||||
echo "Running load test..."
|
||||
for i in {1..20}; do
|
||||
echo "[\"REQ\",\"monitor_test_${i}\",{}]" | timeout 3 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
# Let the load run for a bit
|
||||
sleep 10
|
||||
|
||||
# Clean up subscriptions
|
||||
echo "Cleaning up test subscriptions..."
|
||||
for i in {1..20}; do
|
||||
echo "[\"CLOSE\",\"monitor_test_${i}\"]" | timeout 3 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 &
|
||||
done
|
||||
|
||||
# Wait for monitoring to complete
|
||||
sleep 5
|
||||
kill "$monitor_pid" 2>/dev/null || true
|
||||
wait "$monitor_pid" 2>/dev/null || true
|
||||
|
||||
echo ""
|
||||
}
|
||||
|
||||
echo "=========================================="
|
||||
echo "C-Relay Resource Monitoring Suite"
|
||||
echo "=========================================="
|
||||
echo "Monitoring relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo ""
|
||||
|
||||
# Check if relay is running
|
||||
if ! pgrep -f "c_relay" >/dev/null 2>&1; then
|
||||
echo -e "${RED}Relay process not found. Please start the relay first.${NC}"
|
||||
echo "Use: ./make_and_restart_relay.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ Relay process found${NC}"
|
||||
echo ""
|
||||
|
||||
# Run baseline monitoring
|
||||
echo "=== Baseline Resource Monitoring ==="
|
||||
monitor_resources 15 2
|
||||
generate_resource_report
|
||||
echo ""
|
||||
|
||||
# Run monitored load test
|
||||
run_monitored_load_test "Subscription Load Test" "Creating and closing multiple subscriptions while monitoring resources"
|
||||
generate_resource_report
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo "Resource Monitoring Complete"
|
||||
echo "=========================================="
|
||||
echo "Resource monitoring completed. Review the statistics above."
|
||||
echo "High CPU/memory usage may indicate performance issues."
|
||||
296
tests/run_all_tests.sh
Executable file
296
tests/run_all_tests.sh
Executable file
@@ -0,0 +1,296 @@
|
||||
#!/bin/bash
|
||||
|
||||
# C-Relay Comprehensive Test Suite Runner
|
||||
# This script runs all security and stability tests for the Nostr relay
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
RELAY_URL="ws://$RELAY_HOST:$RELAY_PORT"
|
||||
TEST_TIMEOUT=30
|
||||
LOG_FILE="test_results_$(date +%Y%m%d_%H%M%S).log"
|
||||
REPORT_FILE="test_report_$(date +%Y%m%d_%H%M%S).html"
|
||||
|
||||
# Test keys for authentication (from AGENTS.md)
|
||||
ADMIN_PRIVATE_KEY="6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3"
|
||||
RELAY_PUBKEY="4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test results tracking
|
||||
TOTAL_SUITES=0
|
||||
PASSED_SUITES=0
|
||||
FAILED_SUITES=0
|
||||
SKIPPED_SUITES=0
|
||||
|
||||
SUITE_RESULTS=()
|
||||
|
||||
# Function to create authenticated WebSocket connection
|
||||
# Usage: authenticated_websocat <subscription_id> <filter_json>
|
||||
authenticated_websocat() {
|
||||
local sub_id="$1"
|
||||
local filter="$2"
|
||||
|
||||
# Create a temporary script for authenticated connection
|
||||
cat > /tmp/auth_ws_$$.sh << EOF
|
||||
#!/bin/bash
|
||||
# Authenticated WebSocket connection helper
|
||||
|
||||
# Connect and handle AUTH challenge
|
||||
exec websocat -B 1048576 --no-close ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null << 'INNER_EOF'
|
||||
["REQ","$sub_id",$filter]
|
||||
INNER_EOF
|
||||
EOF
|
||||
|
||||
chmod +x /tmp/auth_ws_$$.sh
|
||||
timeout $TEST_TIMEOUT bash /tmp/auth_ws_$$.sh
|
||||
rm -f /tmp/auth_ws_$$.sh
|
||||
}
|
||||
|
||||
# Function to log messages
|
||||
log() {
|
||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - $*" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Function to run a test suite
|
||||
run_test_suite() {
|
||||
local suite_name="$1"
|
||||
local suite_script="$2"
|
||||
local description="$3"
|
||||
|
||||
TOTAL_SUITES=$((TOTAL_SUITES + 1))
|
||||
|
||||
log "=========================================="
|
||||
log "Running Test Suite: $suite_name"
|
||||
log "Description: $description"
|
||||
log "=========================================="
|
||||
|
||||
if [[ ! -f "$suite_script" ]]; then
|
||||
log "${RED}ERROR: Test script $suite_script not found${NC}"
|
||||
FAILED_SUITES=$((FAILED_SUITES + 1))
|
||||
SUITE_RESULTS+=("$suite_name: FAILED (script not found)")
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Make script executable if not already
|
||||
chmod +x "$suite_script"
|
||||
|
||||
# Run the test suite and capture output
|
||||
local start_time=$(date +%s)
|
||||
if bash "$suite_script" >> "$LOG_FILE" 2>&1; then
|
||||
local end_time=$(date +%s)
|
||||
local duration=$((end_time - start_time))
|
||||
log "${GREEN}✓ $suite_name PASSED${NC} (Duration: ${duration}s)"
|
||||
PASSED_SUITES=$((PASSED_SUITES + 1))
|
||||
SUITE_RESULTS+=("$suite_name: PASSED (${duration}s)")
|
||||
return 0
|
||||
else
|
||||
local end_time=$(date +%s)
|
||||
local duration=$((end_time - start_time))
|
||||
log "${RED}✗ $suite_name FAILED${NC} (Duration: ${duration}s)"
|
||||
FAILED_SUITES=$((FAILED_SUITES + 1))
|
||||
SUITE_RESULTS+=("$suite_name: FAILED (${duration}s)")
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check if relay is running
|
||||
check_relay_status() {
|
||||
log "Checking relay status at $RELAY_URL..."
|
||||
|
||||
# First check if HTTP endpoint is accessible
|
||||
if curl -s -H "Accept: application/nostr+json" "http://$RELAY_HOST:$RELAY_PORT" >/dev/null 2>&1; then
|
||||
log "${GREEN}✓ Relay HTTP endpoint is accessible${NC}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Fallback: Try WebSocket connection
|
||||
if echo '["REQ","status_check",{}]' | timeout 5 websocat -B 1048576 --no-close "$RELAY_URL" >/dev/null 2>&1; then
|
||||
log "${GREEN}✓ Relay WebSocket endpoint is accessible${NC}"
|
||||
return 0
|
||||
else
|
||||
log "${RED}✗ Relay is not accessible at $RELAY_URL${NC}"
|
||||
log "Please start the relay first using: ./make_and_restart_relay.sh"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to generate HTML report
|
||||
generate_html_report() {
|
||||
local total_duration=$1
|
||||
|
||||
cat > "$REPORT_FILE" << EOF
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>C-Relay Test Report - $(date)</title>
|
||||
<style>
|
||||
body { font-family: Arial, sans-serif; margin: 40px; background-color: #f5f5f5; }
|
||||
.header { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 20px; border-radius: 8px; margin-bottom: 30px; }
|
||||
.summary { background: white; padding: 20px; border-radius: 8px; margin-bottom: 30px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }
|
||||
.suite { background: white; margin-bottom: 10px; padding: 15px; border-radius: 5px; box-shadow: 0 1px 5px rgba(0,0,0,0.1); }
|
||||
.passed { border-left: 5px solid #28a745; }
|
||||
.failed { border-left: 5px solid #dc3545; }
|
||||
.skipped { border-left: 5px solid #ffc107; }
|
||||
.metric { display: inline-block; margin: 10px; padding: 10px; background: #e9ecef; border-radius: 5px; }
|
||||
.status-passed { color: #28a745; font-weight: bold; }
|
||||
.status-failed { color: #dc3545; font-weight: bold; }
|
||||
.status-skipped { color: #ffc107; font-weight: bold; }
|
||||
table { width: 100%; border-collapse: collapse; margin-top: 20px; }
|
||||
th, td { padding: 12px; text-align: left; border-bottom: 1px solid #ddd; }
|
||||
th { background-color: #f8f9fa; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="header">
|
||||
<h1>C-Relay Comprehensive Test Report</h1>
|
||||
<p>Generated on: $(date)</p>
|
||||
<p>Test Environment: $RELAY_URL</p>
|
||||
</div>
|
||||
|
||||
<div class="summary">
|
||||
<h2>Test Summary</h2>
|
||||
<div class="metric">
|
||||
<strong>Total Suites:</strong> $TOTAL_SUITES
|
||||
</div>
|
||||
<div class="metric">
|
||||
<strong>Passed:</strong> <span class="status-passed">$PASSED_SUITES</span>
|
||||
</div>
|
||||
<div class="metric">
|
||||
<strong>Failed:</strong> <span class="status-failed">$FAILED_SUITES</span>
|
||||
</div>
|
||||
<div class="metric">
|
||||
<strong>Skipped:</strong> <span class="status-skipped">$SKIPPED_SUITES</span>
|
||||
</div>
|
||||
<div class="metric">
|
||||
<strong>Total Duration:</strong> ${total_duration}s
|
||||
</div>
|
||||
<div class="metric">
|
||||
<strong>Success Rate:</strong> $(( (PASSED_SUITES * 100) / TOTAL_SUITES ))%
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h2>Test Suite Results</h2>
|
||||
EOF
|
||||
|
||||
for result in "${SUITE_RESULTS[@]}"; do
|
||||
local suite_name=$(echo "$result" | cut -d: -f1)
|
||||
local status=$(echo "$result" | cut -d: -f2 | cut -d' ' -f1)
|
||||
local duration=$(echo "$result" | cut -d: -f2 | cut -d'(' -f2 | cut -d')' -f1)
|
||||
|
||||
local css_class="passed"
|
||||
if [[ "$status" == "FAILED" ]]; then
|
||||
css_class="failed"
|
||||
elif [[ "$status" == "SKIPPED" ]]; then
|
||||
css_class="skipped"
|
||||
fi
|
||||
|
||||
cat >> "$REPORT_FILE" << EOF
|
||||
<div class="suite $css_class">
|
||||
<strong>$suite_name</strong> - <span class="status-$css_class">$status</span> ($duration)
|
||||
</div>
|
||||
EOF
|
||||
done
|
||||
|
||||
cat >> "$REPORT_FILE" << EOF
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
||||
log "HTML report generated: $REPORT_FILE"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
log "=========================================="
|
||||
log "C-Relay Comprehensive Test Suite Runner"
|
||||
log "=========================================="
|
||||
log "Relay URL: $RELAY_URL"
|
||||
log "Log file: $LOG_FILE"
|
||||
log "Report file: $REPORT_FILE"
|
||||
log ""
|
||||
|
||||
# Check if relay is running
|
||||
if ! check_relay_status; then
|
||||
log "${RED}Cannot proceed without a running relay. Exiting.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log ""
|
||||
log "Starting comprehensive test execution..."
|
||||
log ""
|
||||
|
||||
# Record start time
|
||||
OVERALL_START_TIME=$(date +%s)
|
||||
|
||||
# Run Security Test Suites
|
||||
log "${BLUE}=== SECURITY TEST SUITES ===${NC}"
|
||||
|
||||
run_test_suite "SQL Injection Tests" "sql_injection_tests.sh" "Comprehensive SQL injection vulnerability testing"
|
||||
run_test_suite "Filter Validation Tests" "filter_validation_test.sh" "Input validation for REQ and COUNT messages"
|
||||
run_test_suite "Subscription Validation Tests" "subscription_validation.sh" "Subscription ID and message validation"
|
||||
run_test_suite "Memory Corruption Tests" "memory_corruption_tests.sh" "Buffer overflow and memory safety testing"
|
||||
run_test_suite "Input Validation Tests" "input_validation_tests.sh" "Comprehensive input boundary testing"
|
||||
|
||||
# Run Performance Test Suites
|
||||
log ""
|
||||
log "${BLUE}=== PERFORMANCE TEST SUITES ===${NC}"
|
||||
|
||||
run_test_suite "Subscription Limit Tests" "subscription_limits.sh" "Subscription limit enforcement testing"
|
||||
run_test_suite "Load Testing" "load_tests.sh" "High concurrent connection testing"
|
||||
run_test_suite "Stress Testing" "stress_tests.sh" "Resource usage and stability testing"
|
||||
run_test_suite "Rate Limiting Tests" "rate_limiting_tests.sh" "Rate limiting and abuse prevention"
|
||||
|
||||
# Run Integration Test Suites
|
||||
log ""
|
||||
log "${BLUE}=== INTEGRATION TEST SUITES ===${NC}"
|
||||
|
||||
run_test_suite "NIP Protocol Tests" "run_nip_tests.sh" "All NIP protocol compliance tests"
|
||||
run_test_suite "Configuration Tests" "config_tests.sh" "Configuration management and persistence"
|
||||
run_test_suite "Authentication Tests" "auth_tests.sh" "NIP-42 authentication testing"
|
||||
|
||||
# Run Benchmarking Suites
|
||||
log ""
|
||||
log "${BLUE}=== BENCHMARKING SUITES ===${NC}"
|
||||
|
||||
run_test_suite "Performance Benchmarks" "performance_benchmarks.sh" "Performance metrics and benchmarking"
|
||||
run_test_suite "Resource Monitoring" "resource_monitoring.sh" "Memory and CPU usage monitoring"
|
||||
|
||||
# Calculate total duration
|
||||
OVERALL_END_TIME=$(date +%s)
|
||||
TOTAL_DURATION=$((OVERALL_END_TIME - OVERALL_START_TIME))
|
||||
|
||||
# Generate final report
|
||||
log ""
|
||||
log "=========================================="
|
||||
log "TEST EXECUTION COMPLETE"
|
||||
log "=========================================="
|
||||
log "Total test suites: $TOTAL_SUITES"
|
||||
log "Passed: $PASSED_SUITES"
|
||||
log "Failed: $FAILED_SUITES"
|
||||
log "Skipped: $SKIPPED_SUITES"
|
||||
log "Total duration: ${TOTAL_DURATION}s"
|
||||
log "Success rate: $(( (PASSED_SUITES * 100) / TOTAL_SUITES ))%"
|
||||
log ""
|
||||
log "Detailed log: $LOG_FILE"
|
||||
|
||||
# Generate HTML report
|
||||
generate_html_report "$TOTAL_DURATION"
|
||||
|
||||
# Exit with appropriate code
|
||||
if [[ $FAILED_SUITES -eq 0 ]]; then
|
||||
log "${GREEN}✓ ALL TESTS PASSED${NC}"
|
||||
exit 0
|
||||
else
|
||||
log "${RED}✗ SOME TESTS FAILED${NC}"
|
||||
log "Check $LOG_FILE for detailed error information"
|
||||
exit 1
|
||||
fi
|
||||
126
tests/run_nip_tests.sh
Executable file
126
tests/run_nip_tests.sh
Executable file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP Protocol Test Runner for C-Relay
|
||||
# Runs all NIP compliance tests
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TOTAL_SUITES=0
|
||||
PASSED_SUITES=0
|
||||
FAILED_SUITES=0
|
||||
|
||||
# Available NIP test files
|
||||
NIP_TESTS=(
|
||||
"1_nip_test.sh:NIP-01 Basic Protocol"
|
||||
"9_nip_delete_test.sh:NIP-09 Event Deletion"
|
||||
"11_nip_information.sh:NIP-11 Relay Information"
|
||||
"13_nip_test.sh:NIP-13 Proof of Work"
|
||||
"17_nip_test.sh:NIP-17 Private DMs"
|
||||
"40_nip_test.sh:NIP-40 Expiration Timestamp"
|
||||
"42_nip_test.sh:NIP-42 Authentication"
|
||||
"45_nip_test.sh:NIP-45 Event Counts"
|
||||
"50_nip_test.sh:NIP-50 Search Capability"
|
||||
"70_nip_test.sh:NIP-70 Protected Events"
|
||||
)
|
||||
|
||||
# Function to run a NIP test suite
|
||||
run_nip_test() {
|
||||
local test_file="$1"
|
||||
local test_name="$2"
|
||||
|
||||
TOTAL_SUITES=$((TOTAL_SUITES + 1))
|
||||
|
||||
echo "=========================================="
|
||||
echo "Running $test_name ($test_file)"
|
||||
echo "=========================================="
|
||||
|
||||
if [[ ! -f "$test_file" ]]; then
|
||||
echo -e "${RED}ERROR: Test file $test_file not found${NC}"
|
||||
FAILED_SUITES=$((FAILED_SUITES + 1))
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Make script executable if not already
|
||||
chmod +x "$test_file"
|
||||
|
||||
# Run the test
|
||||
if bash "$test_file"; then
|
||||
echo -e "${GREEN}✓ $test_name PASSED${NC}"
|
||||
PASSED_SUITES=$((PASSED_SUITES + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗ $test_name FAILED${NC}"
|
||||
FAILED_SUITES=$((FAILED_SUITES + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to check relay connectivity
|
||||
check_relay() {
|
||||
echo "Checking relay connectivity at ws://$RELAY_HOST:$RELAY_PORT..."
|
||||
|
||||
if timeout 5 bash -c "
|
||||
echo 'ping' | websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1
|
||||
" 2>/dev/null; then
|
||||
echo -e "${GREEN}✓ Relay is accessible${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗ Cannot connect to relay${NC}"
|
||||
echo "Please start the relay first: ./make_and_restart_relay.sh"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=========================================="
|
||||
echo "C-Relay NIP Protocol Test Suite"
|
||||
echo "=========================================="
|
||||
echo "Testing NIP compliance against relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo ""
|
||||
|
||||
# Check relay connectivity
|
||||
if ! check_relay; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Running NIP protocol tests..."
|
||||
echo ""
|
||||
|
||||
# Run all NIP tests
|
||||
for nip_test in "${NIP_TESTS[@]}"; do
|
||||
test_file="${nip_test%%:*}"
|
||||
test_name="${nip_test#*:}"
|
||||
|
||||
run_nip_test "$test_file" "$test_name"
|
||||
echo ""
|
||||
done
|
||||
|
||||
# Summary
|
||||
echo "=========================================="
|
||||
echo "NIP Test Summary"
|
||||
echo "=========================================="
|
||||
echo "Total NIP test suites: $TOTAL_SUITES"
|
||||
echo -e "Passed: ${GREEN}$PASSED_SUITES${NC}"
|
||||
echo -e "Failed: ${RED}$FAILED_SUITES${NC}"
|
||||
|
||||
if [[ $FAILED_SUITES -eq 0 ]]; then
|
||||
echo -e "${GREEN}✓ All NIP tests passed!${NC}"
|
||||
echo "The relay is fully NIP compliant."
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}✗ Some NIP tests failed.${NC}"
|
||||
echo "The relay may have NIP compliance issues."
|
||||
exit 1
|
||||
fi
|
||||
254
tests/sql_injection_tests.sh
Executable file
254
tests/sql_injection_tests.sh
Executable file
@@ -0,0 +1,254 @@
|
||||
#!/bin/bash
|
||||
|
||||
# SQL Injection Test Suite for C-Relay
|
||||
# Comprehensive testing of SQL injection vulnerabilities across all filter types
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="8888"
|
||||
TEST_TIMEOUT=10
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test counters
|
||||
TOTAL_TESTS=0
|
||||
PASSED_TESTS=0
|
||||
FAILED_TESTS=0
|
||||
|
||||
# Function to send WebSocket message and check for SQL injection success
|
||||
test_sql_injection() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
# Send message via websocat and capture response
|
||||
local response
|
||||
response=$(echo "$message" | timeout 2 websocat ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
|
||||
|
||||
# Check if the response indicates successful query execution (which would be bad)
|
||||
# Look for signs that SQL injection worked (like database errors or unexpected results)
|
||||
if [[ "$response" == "TIMEOUT" ]]; then
|
||||
echo -e "${YELLOW}UNCERTAIN${NC} - Connection timeout (may indicate crash)"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
elif [[ -z "$response" ]]; then
|
||||
# Empty response - relay silently rejected malformed input
|
||||
echo -e "${GREEN}PASSED${NC} - SQL injection blocked (silently rejected)"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
elif [[ "$response" == *"NOTICE"* ]] && [[ "$response" == *"error:"* ]]; then
|
||||
# Relay properly rejected the input with a NOTICE error message
|
||||
echo -e "${GREEN}PASSED${NC} - SQL injection blocked (rejected with error)"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
elif [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"COUNT"* ]] || [[ "$response" == *"EVENT"* ]]; then
|
||||
# Query completed normally - this is expected for properly sanitized input
|
||||
echo -e "${GREEN}PASSED${NC} - SQL injection blocked (query sanitized)"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
elif [[ "$response" == *"SQL"* ]] || [[ "$response" == *"syntax"* ]]; then
|
||||
# Database error leaked - potential vulnerability
|
||||
echo -e "${RED}FAILED${NC} - SQL error leaked: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
else
|
||||
# Unknown response
|
||||
echo -e "${YELLOW}UNCERTAIN${NC} - Unexpected response: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test valid message (should work normally)
|
||||
test_valid_query() {
|
||||
local description="$1"
|
||||
local message="$2"
|
||||
|
||||
TOTAL_TESTS=$((TOTAL_TESTS + 1))
|
||||
|
||||
echo -n "Testing $description... "
|
||||
|
||||
local response
|
||||
response=$(echo "$message" | timeout 2 websocat ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
|
||||
|
||||
if [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]]; then
|
||||
echo -e "${GREEN}PASSED${NC} - Valid query works"
|
||||
PASSED_TESTS=$((PASSED_TESTS + 1))
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}FAILED${NC} - Valid query failed: $response"
|
||||
FAILED_TESTS=$((FAILED_TESTS + 1))
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
echo "=========================================="
|
||||
echo "C-Relay SQL Injection Test Suite"
|
||||
echo "=========================================="
|
||||
echo "Testing against relay at ws://$RELAY_HOST:$RELAY_PORT"
|
||||
echo
|
||||
|
||||
# Test basic connectivity first
|
||||
echo "=== Basic Connectivity Test ==="
|
||||
test_valid_query "Basic connectivity" '["REQ","basic_test",{}]'
|
||||
echo
|
||||
|
||||
# SQL Injection Payloads
|
||||
SQL_PAYLOADS=(
|
||||
# Classic SQL injection
|
||||
"'; DROP TABLE events; --"
|
||||
"1' OR '1'='1"
|
||||
"1; SELECT * FROM sqlite_master; --"
|
||||
"' OR 1=1 --"
|
||||
"admin'--"
|
||||
"1' UNION SELECT password FROM users --"
|
||||
|
||||
# Time-based blind SQL injection
|
||||
"1' AND SLEEP(5) --"
|
||||
"1' AND 1=1 WAITFOR DELAY '0:0:5' --"
|
||||
|
||||
# Error-based SQL injection
|
||||
"1' AND 1=CAST((SELECT version()) AS INT) --"
|
||||
"1' AND 1=CONVERT(INT, (SELECT @@VERSION)) --"
|
||||
|
||||
# Union-based injection
|
||||
"' UNION SELECT NULL,NULL,NULL --"
|
||||
"' UNION SELECT 1,2,3 --"
|
||||
"' UNION ALL SELECT NULL,NULL,NULL --"
|
||||
|
||||
# Stacked queries
|
||||
"'; SELECT * FROM events; --"
|
||||
"'; DELETE FROM events; --"
|
||||
"'; UPDATE events SET content='hacked' WHERE 1=1; --"
|
||||
|
||||
# Comment injection
|
||||
"/*"
|
||||
"*/"
|
||||
"/**/"
|
||||
"--"
|
||||
"#"
|
||||
|
||||
# Hex encoded injection
|
||||
"0x53514C5F494E4A454354494F4E" # SQL_INJECTION in hex
|
||||
|
||||
# Base64 encoded injection
|
||||
"J1NSTCBJTkpFQ1RJT04gLS0=" # 'SQL INJECTION -- in base64
|
||||
|
||||
# Nested injection
|
||||
"'))); DROP TABLE events; --"
|
||||
"')) UNION SELECT NULL; --"
|
||||
|
||||
# Boolean-based blind injection
|
||||
"' AND 1=1 --"
|
||||
"' AND 1=2 --"
|
||||
"' AND (SELECT COUNT(*) FROM events) > 0 --"
|
||||
|
||||
# Out-of-band injection (if supported)
|
||||
"'; EXEC master..xp_cmdshell 'net user' --"
|
||||
"'; DECLARE @host varchar(1024); SELECT @host=(SELECT TOP 1 master..sys.fn_varbintohexstr(password_hash) FROM sys.sql_logins WHERE name='sa'); --"
|
||||
)
|
||||
|
||||
echo "=== Authors Filter SQL Injection Tests ==="
|
||||
for payload in "${SQL_PAYLOADS[@]}"; do
|
||||
test_sql_injection "Authors filter with payload: $payload" "[\"REQ\",\"sql_test_authors_$RANDOM\",{\"authors\":[\"$payload\"]}]"
|
||||
done
|
||||
echo
|
||||
|
||||
echo "=== IDs Filter SQL Injection Tests ==="
|
||||
for payload in "${SQL_PAYLOADS[@]}"; do
|
||||
test_sql_injection "IDs filter with payload: $payload" "[\"REQ\",\"sql_test_ids_$RANDOM\",{\"ids\":[\"$payload\"]}]"
|
||||
done
|
||||
echo
|
||||
|
||||
echo "=== Kinds Filter SQL Injection Tests ==="
|
||||
# Test numeric kinds with SQL injection attempts (these will fail JSON parsing, which is expected)
|
||||
test_sql_injection "Kinds filter with string injection" "[\"REQ\",\"sql_test_kinds_$RANDOM\",{\"kinds\":[\"1' OR '1'='1\"]}]"
|
||||
test_sql_injection "Kinds filter with negative value" "[\"REQ\",\"sql_test_kinds_$RANDOM\",{\"kinds\":[-1]}]"
|
||||
test_sql_injection "Kinds filter with very large value" "[\"REQ\",\"sql_test_kinds_$RANDOM\",{\"kinds\":[999999999]}]"
|
||||
echo
|
||||
|
||||
echo "=== Search Filter SQL Injection Tests ==="
|
||||
for payload in "${SQL_PAYLOADS[@]}"; do
|
||||
test_sql_injection "Search filter with payload: $payload" "[\"REQ\",\"sql_test_search_$RANDOM\",{\"search\":\"$payload\"}]"
|
||||
done
|
||||
echo
|
||||
|
||||
echo "=== Tag Filter SQL Injection Tests ==="
|
||||
TAG_PREFIXES=("#e" "#p" "#t" "#r" "#d")
|
||||
for prefix in "${TAG_PREFIXES[@]}"; do
|
||||
for payload in "${SQL_PAYLOADS[@]}"; do
|
||||
test_sql_injection "$prefix tag filter with payload: $payload" "[\"REQ\",\"sql_test_tag_$RANDOM\",{\"$prefix\":[\"$payload\"]}]"
|
||||
done
|
||||
done
|
||||
echo
|
||||
|
||||
echo "=== Timestamp Filter SQL Injection Tests ==="
|
||||
# Test since/until parameters
|
||||
test_sql_injection "Since parameter injection" "[\"REQ\",\"sql_test_since_$RANDOM\",{\"since\":\"1' OR '1'='1\"}]"
|
||||
test_sql_injection "Until parameter injection" "[\"REQ\",\"sql_test_until_$RANDOM\",{\"until\":\"1; DROP TABLE events; --\"}]"
|
||||
echo
|
||||
|
||||
echo "=== Limit Parameter SQL Injection Tests ==="
|
||||
test_sql_injection "Limit parameter injection" "[\"REQ\",\"sql_test_limit_$RANDOM\",{\"limit\":\"1' OR '1'='1\"}]"
|
||||
test_sql_injection "Limit with UNION" "[\"REQ\",\"sql_test_limit_$RANDOM\",{\"limit\":\"0 UNION SELECT password FROM users\"}]"
|
||||
echo
|
||||
|
||||
echo "=== Complex Multi-Filter SQL Injection Tests ==="
|
||||
# Test combinations that might bypass validation
|
||||
test_sql_injection "Multi-filter with authors injection" "[\"REQ\",\"sql_test_multi_$RANDOM\",{\"authors\":[\"admin'--\"],\"kinds\":[1],\"search\":\"anything\"}]"
|
||||
test_sql_injection "Multi-filter with search injection" "[\"REQ\",\"sql_test_multi_$RANDOM\",{\"authors\":[\"valid\"],\"search\":\"'; DROP TABLE events; --\"}]"
|
||||
test_sql_injection "Multi-filter with tag injection" "[\"REQ\",\"sql_test_multi_$RANDOM\",{\"#e\":[\"'; SELECT * FROM sqlite_master; --\"],\"limit\":10}]"
|
||||
echo
|
||||
|
||||
echo "=== COUNT Message SQL Injection Tests ==="
|
||||
# Test COUNT messages which might have different code paths
|
||||
for payload in "${SQL_PAYLOADS[@]}"; do
|
||||
test_sql_injection "COUNT with authors payload: $payload" "[\"COUNT\",\"sql_count_authors_$RANDOM\",{\"authors\":[\"$payload\"]}]"
|
||||
test_sql_injection "COUNT with search payload: $payload" "[\"COUNT\",\"sql_count_search_$RANDOM\",{\"search\":\"$payload\"}]"
|
||||
done
|
||||
echo
|
||||
|
||||
echo "=== Edge Case SQL Injection Tests ==="
|
||||
# Test edge cases that might bypass validation
|
||||
test_sql_injection "Empty string injection" "[\"REQ\",\"sql_edge_$RANDOM\",{\"authors\":[\"\"]}]"
|
||||
test_sql_injection "Null byte injection" "[\"REQ\",\"sql_edge_$RANDOM\",{\"authors\":[\"admin\\x00' OR '1'='1\"]}]"
|
||||
test_sql_injection "Unicode injection" "[\"REQ\",\"sql_edge_$RANDOM\",{\"authors\":[\"admin' OR '1'='1' -- 💣\"]}]"
|
||||
test_sql_injection "Very long injection payload" "[\"REQ\",\"sql_edge_$RANDOM\",{\"search\":\"$(printf 'a%.0s' {1..1000})' OR '1'='1\"}]"
|
||||
echo
|
||||
|
||||
echo "=== Subscription ID SQL Injection Tests ==="
|
||||
# Test if subscription IDs can be used for injection
|
||||
test_sql_injection "Subscription ID injection" "[\"REQ\",\"'; DROP TABLE subscriptions; --\",{}]"
|
||||
test_sql_injection "Subscription ID with quotes" "[\"REQ\",\"sub\"'; SELECT * FROM events; --\",{}]"
|
||||
echo
|
||||
|
||||
echo "=== CLOSE Message SQL Injection Tests ==="
|
||||
# Test CLOSE messages
|
||||
test_sql_injection "CLOSE with injection" "[\"CLOSE\",\"'; DROP TABLE subscriptions; --\"]"
|
||||
echo
|
||||
|
||||
echo "=== Test Results ==="
|
||||
echo "Total tests: $TOTAL_TESTS"
|
||||
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
|
||||
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
|
||||
|
||||
if [[ $FAILED_TESTS -eq 0 ]]; then
|
||||
echo -e "${GREEN}✓ All SQL injection tests passed!${NC}"
|
||||
echo "The relay appears to be protected against SQL injection attacks."
|
||||
exit 0
|
||||
else
|
||||
echo -e "${RED}✗ SQL injection vulnerabilities detected!${NC}"
|
||||
echo "The relay may be vulnerable to SQL injection attacks."
|
||||
echo "Failed tests: $FAILED_TESTS"
|
||||
exit 1
|
||||
fi
|
||||
75
tests/subscription_limits.sh
Executable file
75
tests/subscription_limits.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simple test script to verify subscription limit enforcement and rate limiting
|
||||
# This script tests that subscription limits are enforced early
|
||||
|
||||
set -e
|
||||
|
||||
RELAY_URL="ws://127.0.0.1:8888"
|
||||
|
||||
echo "=== Subscription Limit Test ==="
|
||||
echo "[INFO] Testing relay at: $RELAY_URL"
|
||||
echo "[INFO] Note: This test assumes default subscription limits (max 25 per client)"
|
||||
echo ""
|
||||
|
||||
# Test basic connectivity first
|
||||
echo "=== Test 1: Basic Connectivity ==="
|
||||
echo "[INFO] Testing basic WebSocket connection..."
|
||||
|
||||
# Send a simple REQ message
|
||||
response=$(echo '["REQ","basic_test",{}]' | timeout 5 websocat -n1 "$RELAY_URL" 2>/dev/null || echo "TIMEOUT")
|
||||
|
||||
if echo "$response" | grep -q "EOSE\|EVENT\|NOTICE"; then
|
||||
echo "[PASS] Basic connectivity works"
|
||||
else
|
||||
echo "[FAIL] Basic connectivity failed. Response: $response"
|
||||
exit 1
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Test subscription limits
|
||||
echo "=== Test 2: Subscription Limit Enforcement ==="
|
||||
echo "[INFO] Testing subscription limits by creating multiple subscriptions..."
|
||||
|
||||
success_count=0
|
||||
limit_hit=false
|
||||
|
||||
# Create multiple subscriptions within a single WebSocket connection
|
||||
echo "[INFO] Creating multiple subscriptions within a single connection..."
|
||||
|
||||
# Build a sequence of REQ messages
|
||||
req_messages=""
|
||||
for i in {1..30}; do
|
||||
sub_id="limit_test_$i"
|
||||
req_messages="${req_messages}[\"REQ\",\"$sub_id\",{}]\n"
|
||||
done
|
||||
|
||||
# Send all messages through a single websocat connection and save to temp file
|
||||
temp_file=$(mktemp)
|
||||
echo -e "$req_messages" | timeout 10 websocat -B 1048576 "$RELAY_URL" 2>/dev/null > "$temp_file" || echo "TIMEOUT" >> "$temp_file"
|
||||
|
||||
# Parse the response to check for subscription limit enforcement
|
||||
subscription_count=0
|
||||
while read -r line; do
|
||||
if [[ "$line" == *"CLOSED"* && "$line" == *"exceeded"* ]]; then
|
||||
echo "[INFO] Hit subscription limit at subscription $((subscription_count + 1))"
|
||||
limit_hit=true
|
||||
break
|
||||
elif [[ "$line" == *"EOSE"* ]]; then
|
||||
subscription_count=$((subscription_count + 1))
|
||||
fi
|
||||
done < "$temp_file"
|
||||
|
||||
success_count=$subscription_count
|
||||
|
||||
# Clean up temp file
|
||||
rm -f "$temp_file"
|
||||
|
||||
if [ "$limit_hit" = true ]; then
|
||||
echo "[PASS] Subscription limit enforcement working (limit hit after $success_count subscriptions)"
|
||||
else
|
||||
echo "[WARN] Subscription limit not hit after 30 attempts"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
echo "=== Test Complete ==="
|
||||
34
tests/subscription_validation.sh
Executable file
34
tests/subscription_validation.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Test script to validate subscription ID handling fixes
|
||||
# This tests the memory corruption fixes in subscription handling
|
||||
|
||||
echo "Testing subscription ID validation fixes..."
|
||||
|
||||
# Test malformed subscription IDs
|
||||
echo "Testing malformed subscription IDs..."
|
||||
|
||||
# Test 1: Empty subscription ID
|
||||
echo '["REQ","",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "Empty ID test: Connection failed (expected)"
|
||||
|
||||
# Test 2: Very long subscription ID (over 64 chars)
|
||||
echo '["REQ","verylongsubscriptionidthatshouldexceedthemaximumlengthlimitof64characters",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "Long ID test: Connection failed (expected)"
|
||||
|
||||
# Test 3: Subscription ID with invalid characters
|
||||
echo '["REQ","sub@123",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "Invalid chars test: Connection failed (expected)"
|
||||
|
||||
# Test 4: NULL subscription ID (this should be caught by JSON parsing)
|
||||
echo '["REQ",null,{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "NULL ID test: Connection failed (expected)"
|
||||
|
||||
# Test 5: Valid subscription ID (should work)
|
||||
echo '["REQ","valid_sub_123",{}]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null && echo "Valid ID test: Success" || echo "Valid ID test: Failed"
|
||||
|
||||
echo "Testing CLOSE message validation..."
|
||||
|
||||
# Test 6: CLOSE with malformed subscription ID
|
||||
echo '["CLOSE",""]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null || echo "CLOSE empty ID test: Connection failed (expected)"
|
||||
|
||||
# Test 7: CLOSE with valid subscription ID
|
||||
echo '["CLOSE","valid_sub_123"]' | timeout 5 wscat -c ws://localhost:8888 2>/dev/null && echo "CLOSE valid ID test: Success" || echo "CLOSE valid ID test: Failed"
|
||||
|
||||
echo "Subscription validation tests completed."
|
||||
Reference in New Issue
Block a user