Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
580aec7d57 | ||
|
|
54b91af76c | ||
|
|
6d9b4efb7e | ||
|
|
6f51f445b7 | ||
|
|
6de9518de7 | ||
|
|
517cc020c7 | ||
|
|
2c699652b0 | ||
|
|
2e4ffc0e79 | ||
|
|
70c91ec858 | ||
|
|
b7c4609c2d | ||
|
|
7f69367666 | ||
|
|
fa17aa1f78 | ||
|
|
7e560b4247 | ||
|
|
9a29ea51e3 | ||
|
|
6c10713e18 | ||
|
|
b810982a17 | ||
|
|
23c95fd2ea | ||
|
|
e96957f91b | ||
|
|
de3e7c75a5 | ||
|
|
646adac981 | ||
|
|
2d93c2f819 | ||
|
|
ce7f7ad11b |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,2 +1,9 @@
|
||||
nostr_core_lib/
|
||||
nips/
|
||||
build/
|
||||
relay.log
|
||||
relay.pid
|
||||
Trash/
|
||||
src/version.h
|
||||
dev-config/
|
||||
db/
|
||||
|
||||
1
.roo/rules-code/rules.md
Normal file
1
.roo/rules-code/rules.md
Normal file
@@ -0,0 +1 @@
|
||||
Use ./make_and_restart_relay.sh instead of make to build the project.
|
||||
183
Makefile
183
Makefile
@@ -5,40 +5,173 @@ CFLAGS = -Wall -Wextra -std=c99 -g -O2
|
||||
INCLUDES = -I. -Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket
|
||||
LIBS = -lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -L/usr/local/lib -lcurl
|
||||
|
||||
# Build directory
|
||||
BUILD_DIR = build
|
||||
|
||||
# Source files
|
||||
MAIN_SRC = src/main.c
|
||||
MAIN_SRC = src/main.c src/config.c
|
||||
NOSTR_CORE_LIB = nostr_core_lib/libnostr_core_x64.a
|
||||
|
||||
# Target binary
|
||||
TARGET = src/main
|
||||
# Architecture detection
|
||||
ARCH = $(shell uname -m)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
TARGET = $(BUILD_DIR)/c_relay_x86
|
||||
else ifeq ($(ARCH),aarch64)
|
||||
TARGET = $(BUILD_DIR)/c_relay_arm64
|
||||
else ifeq ($(ARCH),arm64)
|
||||
TARGET = $(BUILD_DIR)/c_relay_arm64
|
||||
else
|
||||
TARGET = $(BUILD_DIR)/c_relay_$(ARCH)
|
||||
endif
|
||||
|
||||
# Default target
|
||||
all: $(TARGET)
|
||||
|
||||
# Create build directory
|
||||
$(BUILD_DIR):
|
||||
mkdir -p $(BUILD_DIR)
|
||||
|
||||
# Check if nostr_core_lib is built
|
||||
$(NOSTR_CORE_LIB):
|
||||
@echo "Building nostr_core_lib..."
|
||||
cd nostr_core_lib && ./build.sh
|
||||
|
||||
# Generate version.h from git tags
|
||||
src/version.h:
|
||||
@if [ -d .git ]; then \
|
||||
echo "Generating version.h from git tags..."; \
|
||||
RAW_VERSION=$$(git describe --tags --always 2>/dev/null || echo "unknown"); \
|
||||
if echo "$$RAW_VERSION" | grep -q "^v[0-9]"; then \
|
||||
CLEAN_VERSION=$$(echo "$$RAW_VERSION" | sed 's/^v//' | cut -d- -f1); \
|
||||
VERSION="v$$CLEAN_VERSION"; \
|
||||
MAJOR=$$(echo "$$CLEAN_VERSION" | cut -d. -f1); \
|
||||
MINOR=$$(echo "$$CLEAN_VERSION" | cut -d. -f2); \
|
||||
PATCH=$$(echo "$$CLEAN_VERSION" | cut -d. -f3); \
|
||||
else \
|
||||
VERSION="v0.0.0"; \
|
||||
MAJOR=0; MINOR=0; PATCH=0; \
|
||||
fi; \
|
||||
echo "/* Auto-generated version information */" > src/version.h; \
|
||||
echo "#ifndef VERSION_H" >> src/version.h; \
|
||||
echo "#define VERSION_H" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#define VERSION \"$$VERSION\"" >> src/version.h; \
|
||||
echo "#define VERSION_MAJOR $$MAJOR" >> src/version.h; \
|
||||
echo "#define VERSION_MINOR $$MINOR" >> src/version.h; \
|
||||
echo "#define VERSION_PATCH $$PATCH" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#endif /* VERSION_H */" >> src/version.h; \
|
||||
echo "Generated version.h with clean version: $$VERSION"; \
|
||||
elif [ ! -f src/version.h ]; then \
|
||||
echo "Git not available and version.h missing, creating fallback version.h..."; \
|
||||
VERSION="v0.0.0"; \
|
||||
echo "/* Auto-generated version information */" > src/version.h; \
|
||||
echo "#ifndef VERSION_H" >> src/version.h; \
|
||||
echo "#define VERSION_H" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#define VERSION \"$$VERSION\"" >> src/version.h; \
|
||||
echo "#define VERSION_MAJOR 0" >> src/version.h; \
|
||||
echo "#define VERSION_MINOR 0" >> src/version.h; \
|
||||
echo "#define VERSION_PATCH 0" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#endif /* VERSION_H */" >> src/version.h; \
|
||||
echo "Created fallback version.h with version: $$VERSION"; \
|
||||
else \
|
||||
echo "Git not available, preserving existing version.h"; \
|
||||
fi
|
||||
|
||||
# Force version.h regeneration (useful for development)
|
||||
force-version:
|
||||
@echo "Force regenerating version.h..."
|
||||
@rm -f src/version.h
|
||||
@$(MAKE) src/version.h
|
||||
|
||||
# Build the relay
|
||||
$(TARGET): $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
@echo "Compiling C-Relay..."
|
||||
$(TARGET): $(BUILD_DIR) src/version.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
@echo "Compiling C-Relay for architecture: $(ARCH)"
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(TARGET) $(NOSTR_CORE_LIB) $(LIBS)
|
||||
@echo "Build complete: $(TARGET)"
|
||||
|
||||
# Build for specific architectures
|
||||
x86: $(BUILD_DIR) src/version.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
@echo "Building C-Relay for x86_64..."
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_x86 $(NOSTR_CORE_LIB) $(LIBS)
|
||||
@echo "Build complete: $(BUILD_DIR)/c_relay_x86"
|
||||
|
||||
arm64: $(BUILD_DIR) src/version.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
@echo "Cross-compiling C-Relay for ARM64..."
|
||||
@if ! command -v aarch64-linux-gnu-gcc >/dev/null 2>&1; then \
|
||||
echo "ERROR: ARM64 cross-compiler not found."; \
|
||||
echo "Install with: make install-cross-tools"; \
|
||||
echo "Or install manually: sudo apt install gcc-aarch64-linux-gnu"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "Checking for ARM64 development libraries..."
|
||||
@if ! dpkg -l | grep -q "libssl-dev:arm64\|libsqlite3-dev:arm64"; then \
|
||||
echo "ERROR: ARM64 libraries not found. Cross-compilation requires ARM64 versions of:"; \
|
||||
echo " - libssl-dev:arm64"; \
|
||||
echo " - libsqlite3-dev:arm64"; \
|
||||
echo " - libwebsockets-dev:arm64"; \
|
||||
echo " - libsecp256k1-dev:arm64"; \
|
||||
echo " - zlib1g-dev:arm64"; \
|
||||
echo " - libcurl4-openssl-dev:arm64"; \
|
||||
echo ""; \
|
||||
echo "Install ARM64 libraries with: make install-arm64-deps"; \
|
||||
echo "Or use Docker for cross-platform builds."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "Using aarch64-linux-gnu-gcc with ARM64 libraries..."
|
||||
PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig:/usr/share/pkgconfig \
|
||||
aarch64-linux-gnu-gcc $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_arm64 $(NOSTR_CORE_LIB) \
|
||||
-L/usr/lib/aarch64-linux-gnu $(LIBS)
|
||||
@echo "Build complete: $(BUILD_DIR)/c_relay_arm64"
|
||||
|
||||
# Install ARM64 cross-compilation dependencies
|
||||
install-arm64-deps:
|
||||
@echo "Installing ARM64 cross-compilation dependencies..."
|
||||
@echo "This requires adding ARM64 architecture and installing cross-libraries..."
|
||||
sudo dpkg --add-architecture arm64
|
||||
sudo apt update
|
||||
sudo apt install -y \
|
||||
gcc-aarch64-linux-gnu \
|
||||
libc6-dev-arm64-cross \
|
||||
libssl-dev:arm64 \
|
||||
libsqlite3-dev:arm64 \
|
||||
zlib1g-dev:arm64 \
|
||||
libcurl4-openssl-dev:arm64
|
||||
@echo "Note: libwebsockets-dev:arm64 and libsecp256k1-dev:arm64 may need manual building"
|
||||
|
||||
# Install cross-compilation tools
|
||||
install-cross-tools:
|
||||
@echo "Installing cross-compilation tools..."
|
||||
sudo apt update
|
||||
sudo apt install -y gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
||||
|
||||
# Check what architectures we can actually build
|
||||
check-toolchain:
|
||||
@echo "Checking available toolchains:"
|
||||
@echo "Native compiler: $(shell $(CC) --version | head -1)"
|
||||
@if command -v aarch64-linux-gnu-gcc >/dev/null 2>&1; then \
|
||||
echo "ARM64 cross-compiler: $(shell aarch64-linux-gnu-gcc --version | head -1)"; \
|
||||
else \
|
||||
echo "ARM64 cross-compiler: NOT INSTALLED (install with 'make install-cross-tools')"; \
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
test: $(TARGET)
|
||||
@echo "Running tests..."
|
||||
./tests/1_nip_test.sh
|
||||
|
||||
# Initialize database
|
||||
# Initialize database (now handled automatically when server starts)
|
||||
init-db:
|
||||
@echo "Initializing database..."
|
||||
./db/init.sh --force
|
||||
@echo "Database initialization is now handled automatically when the server starts."
|
||||
@echo "The schema is embedded in the binary - no external files needed."
|
||||
@echo "To manually recreate database: rm -f db/c_nostr_relay.db && ./build/c_relay_x86"
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
rm -f $(TARGET)
|
||||
rm -rf $(BUILD_DIR)
|
||||
rm -f src/version.h
|
||||
@echo "Clean complete"
|
||||
|
||||
# Clean everything including nostr_core_lib
|
||||
@@ -56,17 +189,27 @@ help:
|
||||
@echo "C-Relay Build System"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " all Build the relay (default)"
|
||||
@echo " test Build and run tests"
|
||||
@echo " init-db Initialize the database"
|
||||
@echo " clean Clean build artifacts"
|
||||
@echo " clean-all Clean everything including dependencies"
|
||||
@echo " install-deps Install system dependencies"
|
||||
@echo " help Show this help"
|
||||
@echo " all Build the relay for current architecture (default)"
|
||||
@echo " x86 Build specifically for x86_64"
|
||||
@echo " arm64 Build for ARM64 (requires cross-compilation setup)"
|
||||
@echo " test Build and run tests"
|
||||
@echo " init-db Initialize the database"
|
||||
@echo " clean Clean build artifacts"
|
||||
@echo " clean-all Clean everything including dependencies"
|
||||
@echo " install-deps Install system dependencies"
|
||||
@echo " install-cross-tools Install basic ARM64 cross-compiler"
|
||||
@echo " install-arm64-deps Install ARM64 cross-compilation libraries"
|
||||
@echo " check-toolchain Check available compilers"
|
||||
@echo " help Show this help"
|
||||
@echo ""
|
||||
@echo "Usage:"
|
||||
@echo " make # Build the relay"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make init-db # Set up database"
|
||||
@echo " make # Build the relay for current arch"
|
||||
@echo " make x86 # Build for x86_64"
|
||||
@echo " make arm64 # Build for ARM64 (fails if cross-compilation not set up)"
|
||||
@echo " make install-arm64-deps # Install full ARM64 cross-compilation setup"
|
||||
@echo " make check-toolchain # Check what compilers are available"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make init-db # Set up database"
|
||||
@echo " make force-version # Force regenerate version.h from git"
|
||||
|
||||
.PHONY: all test init-db clean clean-all install-deps help
|
||||
.PHONY: all x86 arm64 test init-db clean clean-all install-deps install-cross-tools install-arm64-deps check-toolchain help force-version
|
||||
18
README.md
18
README.md
@@ -1,4 +1,22 @@
|
||||
A nostr relay in C with sqlite on the back end.
|
||||
|
||||
<!--
|
||||
NOTE FOR ASSISTANTS: When updating the NIPs checklist below, ONLY change [ ] to [x] to mark as complete.
|
||||
Do NOT modify the formatting, add emojis, or change the text. Keep the simple format consistent.
|
||||
-->
|
||||
|
||||
|
||||
### [NIPs](https://github.com/nostr-protocol/nips)
|
||||
|
||||
- [x] NIP-01: Basic protocol flow implementation
|
||||
- [x] NIP-09: Event deletion
|
||||
- [x] NIP-11: Relay information document
|
||||
- [x] NIP-13: Proof of Work
|
||||
- [x] NIP-15: End of Stored Events Notice
|
||||
- [x] NIP-20: Command Results
|
||||
- [x] NIP-33: Parameterized Replaceable Events
|
||||
- [x] NIP-40: Expiration Timestamp
|
||||
- [ ] NIP-42: Authentication of clients to relays
|
||||
- [ ] NIP-45: Counting results.
|
||||
- [ ] NIP-50: Keywords filter.
|
||||
- [ ] NIP-70: Protected Events
|
||||
|
||||
471
build_and_push.sh
Executable file
471
build_and_push.sh
Executable file
@@ -0,0 +1,471 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m'
|
||||
|
||||
print_status() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
|
||||
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
|
||||
print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||
|
||||
# Global variables
|
||||
COMMIT_MESSAGE=""
|
||||
RELEASE_MODE=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-r|--release)
|
||||
RELEASE_MODE=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
# First non-flag argument is the commit message
|
||||
if [[ -z "$COMMIT_MESSAGE" ]]; then
|
||||
COMMIT_MESSAGE="$1"
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
show_usage() {
|
||||
echo "C-Relay Build and Push Script"
|
||||
echo ""
|
||||
echo "Usage:"
|
||||
echo " $0 \"commit message\" - Default: compile, increment patch, commit & push"
|
||||
echo " $0 -r \"commit message\" - Release: compile x86+arm64, increment minor, create release"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 \"Fixed event validation bug\""
|
||||
echo " $0 --release \"Major release with new features\""
|
||||
echo ""
|
||||
echo "Default Mode (patch increment):"
|
||||
echo " - Compile C-Relay"
|
||||
echo " - Increment patch version (v1.2.3 → v1.2.4)"
|
||||
echo " - Git add, commit with message, and push"
|
||||
echo ""
|
||||
echo "Release Mode (-r flag):"
|
||||
echo " - Compile C-Relay for x86_64 and arm64"
|
||||
echo " - Increment minor version, zero patch (v1.2.3 → v1.3.0)"
|
||||
echo " - Git add, commit, push, and create Gitea release"
|
||||
echo ""
|
||||
echo "Requirements for Release Mode:"
|
||||
echo " - For ARM64 builds: make install-arm64-deps (optional - will build x86_64 only if missing)"
|
||||
echo " - Gitea token in ~/.gitea_token for release uploads"
|
||||
}
|
||||
|
||||
# Validate inputs
|
||||
if [[ -z "$COMMIT_MESSAGE" ]]; then
|
||||
print_error "Commit message is required"
|
||||
echo ""
|
||||
show_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if we're in a git repository
|
||||
check_git_repo() {
|
||||
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
||||
print_error "Not in a git repository"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to get current version and increment appropriately
|
||||
increment_version() {
|
||||
local increment_type="$1" # "patch" or "minor"
|
||||
|
||||
print_status "Getting current version..."
|
||||
|
||||
# Get the highest version tag (not chronologically latest)
|
||||
LATEST_TAG=$(git tag -l 'v*.*.*' | sort -V | tail -n 1 || echo "")
|
||||
if [[ -z "$LATEST_TAG" ]]; then
|
||||
LATEST_TAG="v0.0.0"
|
||||
print_warning "No version tags found, starting from $LATEST_TAG"
|
||||
fi
|
||||
|
||||
# Extract version components (remove 'v' prefix)
|
||||
VERSION=${LATEST_TAG#v}
|
||||
|
||||
# Parse major.minor.patch using regex
|
||||
if [[ $VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
|
||||
MAJOR=${BASH_REMATCH[1]}
|
||||
MINOR=${BASH_REMATCH[2]}
|
||||
PATCH=${BASH_REMATCH[3]}
|
||||
else
|
||||
print_error "Invalid version format in tag: $LATEST_TAG"
|
||||
print_error "Expected format: v0.1.0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Increment version based on type
|
||||
if [[ "$increment_type" == "minor" ]]; then
|
||||
# Minor release: increment minor, zero patch
|
||||
NEW_MINOR=$((MINOR + 1))
|
||||
NEW_PATCH=0
|
||||
NEW_VERSION="v${MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
|
||||
print_status "Release mode: incrementing minor version"
|
||||
else
|
||||
# Default: increment patch
|
||||
NEW_PATCH=$((PATCH + 1))
|
||||
NEW_VERSION="v${MAJOR}.${MINOR}.${NEW_PATCH}"
|
||||
print_status "Default mode: incrementing patch version"
|
||||
fi
|
||||
|
||||
print_status "Current version: $LATEST_TAG"
|
||||
print_status "New version: $NEW_VERSION"
|
||||
|
||||
# Export for use in other functions
|
||||
export NEW_VERSION
|
||||
}
|
||||
|
||||
# Function to compile the C-Relay project
|
||||
compile_project() {
|
||||
print_status "Compiling C-Relay..."
|
||||
|
||||
# Clean previous build
|
||||
if make clean > /dev/null 2>&1; then
|
||||
print_success "Cleaned previous build"
|
||||
else
|
||||
print_warning "Clean failed or no Makefile found"
|
||||
fi
|
||||
|
||||
# Force regenerate version.h to pick up new tags
|
||||
if make force-version > /dev/null 2>&1; then
|
||||
print_success "Regenerated version.h"
|
||||
else
|
||||
print_warning "Failed to regenerate version.h"
|
||||
fi
|
||||
|
||||
# Compile the project
|
||||
if make > /dev/null 2>&1; then
|
||||
print_success "C-Relay compiled successfully"
|
||||
else
|
||||
print_error "Compilation failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to build release binaries
|
||||
build_release_binaries() {
|
||||
print_status "Building release binaries..."
|
||||
|
||||
# Build x86_64 version
|
||||
print_status "Building x86_64 version..."
|
||||
make clean > /dev/null 2>&1
|
||||
if make x86 > /dev/null 2>&1; then
|
||||
if [[ -f "build/c_relay_x86" ]]; then
|
||||
cp build/c_relay_x86 c-relay-x86_64
|
||||
print_success "x86_64 binary created: c-relay-x86_64"
|
||||
else
|
||||
print_error "x86_64 binary not found after compilation"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
print_error "x86_64 build failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Try to build ARM64 version
|
||||
print_status "Attempting ARM64 build..."
|
||||
make clean > /dev/null 2>&1
|
||||
if make arm64 > /dev/null 2>&1; then
|
||||
if [[ -f "build/c_relay_arm64" ]]; then
|
||||
cp build/c_relay_arm64 c-relay-arm64
|
||||
print_success "ARM64 binary created: c-relay-arm64"
|
||||
else
|
||||
print_warning "ARM64 binary not found after compilation"
|
||||
fi
|
||||
else
|
||||
print_warning "ARM64 build failed - ARM64 cross-compilation not properly set up"
|
||||
print_status "Only x86_64 binary will be included in release"
|
||||
fi
|
||||
|
||||
# Restore normal build
|
||||
make clean > /dev/null 2>&1
|
||||
make > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Function to commit and push changes
|
||||
git_commit_and_push() {
|
||||
print_status "Preparing git commit..."
|
||||
|
||||
# Stage all changes
|
||||
if git add . > /dev/null 2>&1; then
|
||||
print_success "Staged all changes"
|
||||
else
|
||||
print_error "Failed to stage changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if there are changes to commit
|
||||
if git diff --staged --quiet; then
|
||||
print_warning "No changes to commit"
|
||||
else
|
||||
# Commit changes
|
||||
if git commit -m "$NEW_VERSION - $COMMIT_MESSAGE" > /dev/null 2>&1; then
|
||||
print_success "Committed changes"
|
||||
else
|
||||
print_error "Failed to commit changes"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create new git tag
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag $NEW_VERSION already exists"
|
||||
fi
|
||||
|
||||
# Push changes and tags
|
||||
print_status "Pushing to remote repository..."
|
||||
if git push > /dev/null 2>&1; then
|
||||
print_success "Pushed changes"
|
||||
else
|
||||
print_error "Failed to push changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Push only the new tag to avoid conflicts with existing tags
|
||||
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Pushed tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag push failed, trying force push..."
|
||||
if git push --force origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Force-pushed updated tag: $NEW_VERSION"
|
||||
else
|
||||
print_error "Failed to push tag: $NEW_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to commit and push changes without creating a tag (tag already created)
|
||||
git_commit_and_push_no_tag() {
|
||||
print_status "Preparing git commit..."
|
||||
|
||||
# Stage all changes
|
||||
if git add . > /dev/null 2>&1; then
|
||||
print_success "Staged all changes"
|
||||
else
|
||||
print_error "Failed to stage changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if there are changes to commit
|
||||
if git diff --staged --quiet; then
|
||||
print_warning "No changes to commit"
|
||||
else
|
||||
# Commit changes
|
||||
if git commit -m "$NEW_VERSION - $COMMIT_MESSAGE" > /dev/null 2>&1; then
|
||||
print_success "Committed changes"
|
||||
else
|
||||
print_error "Failed to commit changes"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Push changes and tags
|
||||
print_status "Pushing to remote repository..."
|
||||
if git push > /dev/null 2>&1; then
|
||||
print_success "Pushed changes"
|
||||
else
|
||||
print_error "Failed to push changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Push only the new tag to avoid conflicts with existing tags
|
||||
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Pushed tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag push failed, trying force push..."
|
||||
if git push --force origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Force-pushed updated tag: $NEW_VERSION"
|
||||
else
|
||||
print_error "Failed to push tag: $NEW_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to create Gitea release
|
||||
create_gitea_release() {
|
||||
print_status "Creating Gitea release..."
|
||||
|
||||
# Check for Gitea token
|
||||
if [[ ! -f "$HOME/.gitea_token" ]]; then
|
||||
print_warning "No ~/.gitea_token found. Skipping release creation."
|
||||
print_warning "Create ~/.gitea_token with your Gitea access token to enable releases."
|
||||
return 0
|
||||
fi
|
||||
|
||||
local token=$(cat "$HOME/.gitea_token" | tr -d '\n\r')
|
||||
local api_url="https://git.laantungir.net/api/v1/repos/laantungir/c-relay"
|
||||
|
||||
# Create release
|
||||
print_status "Creating release $NEW_VERSION..."
|
||||
local response=$(curl -s -X POST "$api_url/releases" \
|
||||
-H "Authorization: token $token" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"tag_name\": \"$NEW_VERSION\", \"name\": \"$NEW_VERSION\", \"body\": \"$COMMIT_MESSAGE\"}")
|
||||
|
||||
if echo "$response" | grep -q '"id"'; then
|
||||
print_success "Created release $NEW_VERSION"
|
||||
upload_release_binaries "$api_url" "$token"
|
||||
elif echo "$response" | grep -q "already exists"; then
|
||||
print_warning "Release $NEW_VERSION already exists"
|
||||
upload_release_binaries "$api_url" "$token"
|
||||
else
|
||||
print_error "Failed to create release $NEW_VERSION"
|
||||
print_error "Response: $response"
|
||||
|
||||
# Try to check if the release exists anyway
|
||||
print_status "Checking if release exists..."
|
||||
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
|
||||
if echo "$check_response" | grep -q '"id"'; then
|
||||
print_warning "Release exists but creation response was unexpected"
|
||||
upload_release_binaries "$api_url" "$token"
|
||||
else
|
||||
print_error "Release does not exist and creation failed"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to upload release binaries
|
||||
upload_release_binaries() {
|
||||
local api_url="$1"
|
||||
local token="$2"
|
||||
|
||||
# Get release ID with more robust parsing
|
||||
print_status "Getting release ID for $NEW_VERSION..."
|
||||
local response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
|
||||
local release_id=$(echo "$response" | grep -o '"id":[0-9]*' | head -n1 | cut -d: -f2)
|
||||
|
||||
if [[ -z "$release_id" ]]; then
|
||||
print_error "Could not get release ID for $NEW_VERSION"
|
||||
print_error "API Response: $response"
|
||||
|
||||
# Try to list all releases to debug
|
||||
print_status "Available releases:"
|
||||
curl -s -H "Authorization: token $token" "$api_url/releases" | grep -o '"tag_name":"[^"]*"' | head -5
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "Found release ID: $release_id"
|
||||
|
||||
# Upload x86_64 binary
|
||||
if [[ -f "c-relay-x86_64" ]]; then
|
||||
print_status "Uploading x86_64 binary..."
|
||||
if curl -s -X POST "$api_url/releases/$release_id/assets" \
|
||||
-H "Authorization: token $token" \
|
||||
-F "attachment=@c-relay-x86_64;filename=c-relay-${NEW_VERSION}-linux-x86_64" > /dev/null; then
|
||||
print_success "Uploaded x86_64 binary"
|
||||
else
|
||||
print_warning "Failed to upload x86_64 binary"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Upload ARM64 binary
|
||||
if [[ -f "c-relay-arm64" ]]; then
|
||||
print_status "Uploading ARM64 binary..."
|
||||
if curl -s -X POST "$api_url/releases/$release_id/assets" \
|
||||
-H "Authorization: token $token" \
|
||||
-F "attachment=@c-relay-arm64;filename=c-relay-${NEW_VERSION}-linux-arm64" > /dev/null; then
|
||||
print_success "Uploaded ARM64 binary"
|
||||
else
|
||||
print_warning "Failed to upload ARM64 binary"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to clean up release binaries
|
||||
cleanup_release_binaries() {
|
||||
if [[ -f "c-relay-x86_64" ]]; then
|
||||
rm -f c-relay-x86_64
|
||||
print_status "Cleaned up x86_64 binary"
|
||||
fi
|
||||
if [[ -f "c-relay-arm64" ]]; then
|
||||
rm -f c-relay-arm64
|
||||
print_status "Cleaned up ARM64 binary"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
print_status "C-Relay Build and Push Script"
|
||||
|
||||
# Check prerequisites
|
||||
check_git_repo
|
||||
|
||||
if [[ "$RELEASE_MODE" == true ]]; then
|
||||
print_status "=== RELEASE MODE ==="
|
||||
|
||||
# Increment minor version for releases
|
||||
increment_version "minor"
|
||||
|
||||
# Create new git tag BEFORE compilation so version.h picks it up
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag $NEW_VERSION already exists, removing and recreating..."
|
||||
git tag -d "$NEW_VERSION" > /dev/null 2>&1
|
||||
git tag "$NEW_VERSION" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Compile project first (will now pick up the new tag)
|
||||
compile_project
|
||||
|
||||
# Build release binaries
|
||||
build_release_binaries
|
||||
|
||||
# Commit and push (but skip tag creation since we already did it)
|
||||
git_commit_and_push_no_tag
|
||||
|
||||
# Create Gitea release with binaries
|
||||
create_gitea_release
|
||||
|
||||
# Cleanup
|
||||
cleanup_release_binaries
|
||||
|
||||
print_success "Release $NEW_VERSION completed successfully!"
|
||||
print_status "Binaries uploaded to Gitea release"
|
||||
|
||||
else
|
||||
print_status "=== DEFAULT MODE ==="
|
||||
|
||||
# Increment patch version for regular commits
|
||||
increment_version "patch"
|
||||
|
||||
# Create new git tag BEFORE compilation so version.h picks it up
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag $NEW_VERSION already exists, removing and recreating..."
|
||||
git tag -d "$NEW_VERSION" > /dev/null 2>&1
|
||||
git tag "$NEW_VERSION" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Compile project (will now pick up the new tag)
|
||||
compile_project
|
||||
|
||||
# Commit and push (but skip tag creation since we already did it)
|
||||
git_commit_and_push_no_tag
|
||||
|
||||
print_success "Build and push completed successfully!"
|
||||
print_status "Version $NEW_VERSION pushed to repository"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execute main function
|
||||
main
|
||||
229
db/README.md
229
db/README.md
@@ -1,228 +1 @@
|
||||
# C Nostr Relay Database
|
||||
|
||||
This directory contains the SQLite database schema and initialization scripts for the C Nostr Relay implementation.
|
||||
|
||||
## Files
|
||||
|
||||
- **`schema.sql`** - Complete database schema based on nostr-rs-relay v18
|
||||
- **`init.sh`** - Database initialization script
|
||||
- **`c_nostr_relay.db`** - SQLite database file (created after running init.sh)
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. **Initialize the database:**
|
||||
```bash
|
||||
cd db
|
||||
./init.sh
|
||||
```
|
||||
|
||||
2. **Force reinitialize (removes existing database):**
|
||||
```bash
|
||||
./init.sh --force
|
||||
```
|
||||
|
||||
3. **Initialize with optimization and info:**
|
||||
```bash
|
||||
./init.sh --info --optimize
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
The schema is fully compatible with the Nostr protocol and includes:
|
||||
|
||||
### Core Tables
|
||||
|
||||
- **`event`** - Main event storage with all Nostr event data
|
||||
- **`tag`** - Denormalized tag index for efficient queries
|
||||
- **`user_verification`** - NIP-05 verification tracking
|
||||
- **`account`** - User account management (optional)
|
||||
- **`invoice`** - Lightning payment tracking (optional)
|
||||
|
||||
### Key Features
|
||||
|
||||
- ✅ **NIP-01 compliant** - Full basic protocol support
|
||||
- ✅ **Replaceable events** - Supports kinds 0, 3, 10000-19999
|
||||
- ✅ **Parameterized replaceable** - Supports kinds 30000-39999 with `d` tags
|
||||
- ✅ **Event deletion** - NIP-09 soft deletion with `hidden` column
|
||||
- ✅ **Event expiration** - NIP-40 automatic cleanup
|
||||
- ✅ **Authentication** - NIP-42 client authentication
|
||||
- ✅ **NIP-05 verification** - Domain-based identity verification
|
||||
- ✅ **Performance optimized** - Comprehensive indexing strategy
|
||||
|
||||
### Schema Version
|
||||
|
||||
Current version: **v18** (compatible with nostr-rs-relay v18)
|
||||
|
||||
## Database Structure
|
||||
|
||||
### Event Storage
|
||||
```sql
|
||||
CREATE TABLE event (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_hash BLOB NOT NULL, -- 32-byte SHA256 hash
|
||||
first_seen INTEGER NOT NULL, -- relay receive timestamp
|
||||
created_at INTEGER NOT NULL, -- event creation timestamp
|
||||
expires_at INTEGER, -- NIP-40 expiration
|
||||
author BLOB NOT NULL, -- 32-byte pubkey
|
||||
delegated_by BLOB, -- NIP-26 delegator
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
hidden INTEGER DEFAULT FALSE, -- soft deletion flag
|
||||
content TEXT NOT NULL -- complete JSON event
|
||||
);
|
||||
```
|
||||
|
||||
### Tag Indexing
|
||||
```sql
|
||||
CREATE TABLE tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL,
|
||||
name TEXT, -- tag name ("e", "p", etc.)
|
||||
value TEXT, -- tag value
|
||||
created_at INTEGER NOT NULL, -- denormalized for performance
|
||||
kind INTEGER NOT NULL -- denormalized for performance
|
||||
);
|
||||
```
|
||||
|
||||
## Performance Features
|
||||
|
||||
### Optimized Indexes
|
||||
- **Hash-based lookups** - `event_hash_index` for O(1) event retrieval
|
||||
- **Author queries** - `author_index`, `author_created_at_index`
|
||||
- **Kind filtering** - `kind_index`, `kind_created_at_index`
|
||||
- **Tag searching** - `tag_covering_index` for efficient tag queries
|
||||
- **Composite queries** - Multi-column indexes for complex filters
|
||||
|
||||
### Query Optimization
|
||||
- **Denormalized tags** - Includes `kind` and `created_at` in tag table
|
||||
- **Binary storage** - BLOBs for hex data (pubkeys, hashes)
|
||||
- **WAL mode** - Write-Ahead Logging for concurrent access
|
||||
- **Automatic cleanup** - Triggers for data integrity
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Operations
|
||||
|
||||
1. **Insert an event:**
|
||||
```sql
|
||||
INSERT INTO event (event_hash, first_seen, created_at, author, kind, content)
|
||||
VALUES (?, ?, ?, ?, ?, ?);
|
||||
```
|
||||
|
||||
2. **Query by author:**
|
||||
```sql
|
||||
SELECT content FROM event
|
||||
WHERE author = ? AND hidden != TRUE
|
||||
ORDER BY created_at DESC;
|
||||
```
|
||||
|
||||
3. **Filter by tags:**
|
||||
```sql
|
||||
SELECT e.content FROM event e
|
||||
JOIN tag t ON e.id = t.event_id
|
||||
WHERE t.name = 'p' AND t.value = ? AND e.hidden != TRUE;
|
||||
```
|
||||
|
||||
### Advanced Queries
|
||||
|
||||
1. **Get replaceable event (latest only):**
|
||||
```sql
|
||||
SELECT content FROM event
|
||||
WHERE author = ? AND kind = ? AND hidden != TRUE
|
||||
ORDER BY created_at DESC LIMIT 1;
|
||||
```
|
||||
|
||||
2. **Tag-based filtering (NIP-01 filters):**
|
||||
```sql
|
||||
SELECT e.content FROM event e
|
||||
WHERE e.id IN (
|
||||
SELECT t.event_id FROM tag t
|
||||
WHERE t.name = ? AND t.value IN (?, ?, ?)
|
||||
) AND e.hidden != TRUE;
|
||||
```
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Regular Operations
|
||||
|
||||
1. **Check database integrity:**
|
||||
```bash
|
||||
sqlite3 c_nostr_relay.db "PRAGMA integrity_check;"
|
||||
```
|
||||
|
||||
2. **Optimize database:**
|
||||
```bash
|
||||
sqlite3 c_nostr_relay.db "PRAGMA optimize; VACUUM; ANALYZE;"
|
||||
```
|
||||
|
||||
3. **Clean expired events:**
|
||||
```sql
|
||||
DELETE FROM event WHERE expires_at <= strftime('%s', 'now');
|
||||
```
|
||||
|
||||
### Monitoring
|
||||
|
||||
1. **Database size:**
|
||||
```bash
|
||||
ls -lh c_nostr_relay.db
|
||||
```
|
||||
|
||||
2. **Table statistics:**
|
||||
```sql
|
||||
SELECT name, COUNT(*) as count FROM (
|
||||
SELECT 'events' as name FROM event UNION ALL
|
||||
SELECT 'tags' as name FROM tag UNION ALL
|
||||
SELECT 'verifications' as name FROM user_verification
|
||||
) GROUP BY name;
|
||||
```
|
||||
|
||||
## Migration Support
|
||||
|
||||
The schema includes a migration system for future updates:
|
||||
|
||||
```sql
|
||||
CREATE TABLE schema_info (
|
||||
version INTEGER PRIMARY KEY,
|
||||
applied_at INTEGER NOT NULL,
|
||||
description TEXT
|
||||
);
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Input validation** - Always validate event JSON and signatures
|
||||
2. **Rate limiting** - Implement at application level
|
||||
3. **Access control** - Use `account` table for permissions
|
||||
4. **Backup strategy** - Regular database backups recommended
|
||||
|
||||
## Compatibility
|
||||
|
||||
- **SQLite version** - Requires SQLite 3.8.0+
|
||||
- **nostr-rs-relay** - Schema compatible with v18
|
||||
- **NIPs supported** - 01, 02, 05, 09, 10, 11, 26, 40, 42
|
||||
- **C libraries** - Compatible with sqlite3 C API
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Database locked error:**
|
||||
- Ensure proper connection closing in your C code
|
||||
- Check for long-running transactions
|
||||
|
||||
2. **Performance issues:**
|
||||
- Run `PRAGMA optimize;` regularly
|
||||
- Consider `VACUUM` if database grew significantly
|
||||
|
||||
3. **Schema errors:**
|
||||
- Verify SQLite version compatibility
|
||||
- Check foreign key constraints
|
||||
|
||||
### Getting Help
|
||||
|
||||
- Check the main project README for C implementation details
|
||||
- Review nostr-rs-relay documentation for reference implementation
|
||||
- Consult Nostr NIPs for protocol specifications
|
||||
|
||||
## License
|
||||
|
||||
This database schema is part of the C Nostr Relay project and follows the same license terms.
|
||||
Only README.md will remain
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
234
db/init.sh
234
db/init.sh
@@ -1,234 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# C Nostr Relay Database Initialization Script
|
||||
# Creates and initializes the SQLite database with proper schema
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Configuration
|
||||
DB_DIR="$(dirname "$0")"
|
||||
DB_NAME="c_nostr_relay.db"
|
||||
DB_PATH="${DB_DIR}/${DB_NAME}"
|
||||
SCHEMA_FILE="${DB_DIR}/schema.sql"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if SQLite3 is installed
|
||||
check_sqlite() {
|
||||
if ! command -v sqlite3 &> /dev/null; then
|
||||
log_error "sqlite3 is not installed. Please install it first:"
|
||||
echo " Ubuntu/Debian: sudo apt-get install sqlite3"
|
||||
echo " CentOS/RHEL: sudo yum install sqlite"
|
||||
echo " macOS: brew install sqlite3"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local version=$(sqlite3 --version | cut -d' ' -f1)
|
||||
log_info "Using SQLite version: $version"
|
||||
}
|
||||
|
||||
# Create database directory if it doesn't exist
|
||||
create_db_directory() {
|
||||
if [ ! -d "$DB_DIR" ]; then
|
||||
log_info "Creating database directory: $DB_DIR"
|
||||
mkdir -p "$DB_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup existing database if it exists
|
||||
backup_existing_db() {
|
||||
if [ -f "$DB_PATH" ]; then
|
||||
local backup_path="${DB_PATH}.backup.$(date +%Y%m%d_%H%M%S)"
|
||||
log_warning "Existing database found. Creating backup: $backup_path"
|
||||
cp "$DB_PATH" "$backup_path"
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize the database with schema
|
||||
init_database() {
|
||||
log_info "Initializing database: $DB_PATH"
|
||||
|
||||
if [ ! -f "$SCHEMA_FILE" ]; then
|
||||
log_error "Schema file not found: $SCHEMA_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove existing database if --force flag is used
|
||||
if [ "$1" = "--force" ] && [ -f "$DB_PATH" ]; then
|
||||
log_warning "Force flag detected. Removing existing database."
|
||||
rm -f "$DB_PATH"
|
||||
fi
|
||||
|
||||
# Create the database and apply schema
|
||||
log_info "Applying schema from: $SCHEMA_FILE"
|
||||
if sqlite3 "$DB_PATH" < "$SCHEMA_FILE"; then
|
||||
log_success "Database schema applied successfully"
|
||||
else
|
||||
log_error "Failed to apply database schema"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify database integrity
|
||||
verify_database() {
|
||||
log_info "Verifying database integrity..."
|
||||
|
||||
# Check if database file exists and is not empty
|
||||
if [ ! -s "$DB_PATH" ]; then
|
||||
log_error "Database file is empty or doesn't exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run SQLite integrity check
|
||||
local integrity_result=$(sqlite3 "$DB_PATH" "PRAGMA integrity_check;")
|
||||
if [ "$integrity_result" = "ok" ]; then
|
||||
log_success "Database integrity check passed"
|
||||
else
|
||||
log_error "Database integrity check failed: $integrity_result"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify schema version
|
||||
local schema_version=$(sqlite3 "$DB_PATH" "PRAGMA user_version;")
|
||||
log_info "Database schema version: $schema_version"
|
||||
|
||||
# Check that main tables exist
|
||||
local table_count=$(sqlite3 "$DB_PATH" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name IN ('events', 'schema_info');")
|
||||
if [ "$table_count" -eq 2 ]; then
|
||||
log_success "Core tables created successfully"
|
||||
else
|
||||
log_error "Missing core tables (expected 2, found $table_count)"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Display database information
|
||||
show_db_info() {
|
||||
log_info "Database Information:"
|
||||
echo " Location: $DB_PATH"
|
||||
echo " Size: $(du -h "$DB_PATH" | cut -f1)"
|
||||
|
||||
log_info "Database Tables:"
|
||||
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;" | sed 's/^/ - /'
|
||||
|
||||
log_info "Database Indexes:"
|
||||
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='index' AND name NOT LIKE 'sqlite_%' ORDER BY name;" | sed 's/^/ - /'
|
||||
|
||||
log_info "Database Views:"
|
||||
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='view' ORDER BY name;" | sed 's/^/ - /'
|
||||
}
|
||||
|
||||
# Run database optimization
|
||||
optimize_database() {
|
||||
log_info "Running database optimization..."
|
||||
sqlite3 "$DB_PATH" "PRAGMA optimize; VACUUM; ANALYZE;"
|
||||
log_success "Database optimization completed"
|
||||
}
|
||||
|
||||
# Print usage information
|
||||
print_usage() {
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Initialize SQLite database for C Nostr Relay"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --force Remove existing database before initialization"
|
||||
echo " --info Show database information after initialization"
|
||||
echo " --optimize Run database optimization after initialization"
|
||||
echo " --help Show this help message"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Initialize database (with backup if exists)"
|
||||
echo " $0 --force # Force reinitialize database"
|
||||
echo " $0 --info --optimize # Initialize with info and optimization"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
local force_flag=false
|
||||
local show_info=false
|
||||
local optimize=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--force)
|
||||
force_flag=true
|
||||
shift
|
||||
;;
|
||||
--info)
|
||||
show_info=true
|
||||
shift
|
||||
;;
|
||||
--optimize)
|
||||
optimize=true
|
||||
shift
|
||||
;;
|
||||
--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
log_info "Starting C Nostr Relay database initialization..."
|
||||
|
||||
# Execute initialization steps
|
||||
check_sqlite
|
||||
create_db_directory
|
||||
|
||||
if [ "$force_flag" = false ]; then
|
||||
backup_existing_db
|
||||
fi
|
||||
|
||||
if [ "$force_flag" = true ]; then
|
||||
init_database --force
|
||||
else
|
||||
init_database
|
||||
fi
|
||||
|
||||
verify_database
|
||||
|
||||
if [ "$optimize" = true ]; then
|
||||
optimize_database
|
||||
fi
|
||||
|
||||
if [ "$show_info" = true ]; then
|
||||
show_db_info
|
||||
fi
|
||||
|
||||
log_success "Database initialization completed successfully!"
|
||||
echo ""
|
||||
echo "Database ready at: $DB_PATH"
|
||||
echo "You can now start your C Nostr Relay application."
|
||||
}
|
||||
|
||||
# Execute main function with all arguments
|
||||
main "$@"
|
||||
@@ -1,90 +0,0 @@
|
||||
-- C Nostr Relay Database Schema
|
||||
-- SQLite schema for storing Nostr events with JSON tags support
|
||||
|
||||
-- Schema version tracking
|
||||
PRAGMA user_version = 2;
|
||||
|
||||
-- Enable foreign key support
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
-- Optimize for performance
|
||||
PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
PRAGMA cache_size = 10000;
|
||||
|
||||
-- Core events table with hybrid single-table design
|
||||
CREATE TABLE events (
|
||||
id TEXT PRIMARY KEY, -- Nostr event ID (hex string)
|
||||
pubkey TEXT NOT NULL, -- Public key of event author (hex string)
|
||||
created_at INTEGER NOT NULL, -- Event creation timestamp (Unix timestamp)
|
||||
kind INTEGER NOT NULL, -- Event kind (0-65535)
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||
content TEXT NOT NULL, -- Event content (text content only)
|
||||
sig TEXT NOT NULL, -- Event signature (hex string)
|
||||
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event
|
||||
);
|
||||
|
||||
-- Core performance indexes
|
||||
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||
CREATE INDEX idx_events_kind ON events(kind);
|
||||
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||
CREATE INDEX idx_events_event_type ON events(event_type);
|
||||
|
||||
-- Composite indexes for common query patterns
|
||||
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||
CREATE INDEX idx_events_pubkey_kind ON events(pubkey, kind);
|
||||
|
||||
-- Schema information table
|
||||
CREATE TABLE schema_info (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- Insert schema metadata
|
||||
INSERT INTO schema_info (key, value) VALUES
|
||||
('version', '2'),
|
||||
('description', 'Hybrid single-table Nostr relay schema with JSON tags'),
|
||||
('created_at', strftime('%s', 'now'));
|
||||
|
||||
-- Helper views for common queries
|
||||
CREATE VIEW recent_events AS
|
||||
SELECT id, pubkey, created_at, kind, event_type, content
|
||||
FROM events
|
||||
WHERE event_type != 'ephemeral'
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1000;
|
||||
|
||||
CREATE VIEW event_stats AS
|
||||
SELECT
|
||||
event_type,
|
||||
COUNT(*) as count,
|
||||
AVG(length(content)) as avg_content_length,
|
||||
MIN(created_at) as earliest,
|
||||
MAX(created_at) as latest
|
||||
FROM events
|
||||
GROUP BY event_type;
|
||||
|
||||
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour
|
||||
CREATE TRIGGER cleanup_ephemeral_events
|
||||
AFTER INSERT ON events
|
||||
WHEN NEW.event_type = 'ephemeral'
|
||||
BEGIN
|
||||
DELETE FROM events
|
||||
WHERE event_type = 'ephemeral'
|
||||
AND first_seen < (strftime('%s', 'now') - 3600);
|
||||
END;
|
||||
|
||||
-- Replaceable event handling trigger
|
||||
CREATE TRIGGER handle_replaceable_events
|
||||
AFTER INSERT ON events
|
||||
WHEN NEW.event_type = 'replaceable'
|
||||
BEGIN
|
||||
DELETE FROM events
|
||||
WHERE pubkey = NEW.pubkey
|
||||
AND kind = NEW.kind
|
||||
AND event_type = 'replaceable'
|
||||
AND id != NEW.id;
|
||||
END;
|
||||
@@ -1,337 +0,0 @@
|
||||
# Advanced Nostr Relay Schema Design
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines the design for an advanced multi-table schema that enforces Nostr protocol compliance at the database level, with separate tables for different event types based on their storage and replacement characteristics.
|
||||
|
||||
## Event Type Classification
|
||||
|
||||
Based on the Nostr specification, events are classified into four categories:
|
||||
|
||||
### 1. Regular Events
|
||||
- **Kinds**: `1000 <= n < 10000` || `4 <= n < 45` || `n == 1` || `n == 2`
|
||||
- **Storage Policy**: All events stored permanently
|
||||
- **Examples**: Text notes (1), Reposts (6), Reactions (7), Direct Messages (4)
|
||||
|
||||
### 2. Replaceable Events
|
||||
- **Kinds**: `10000 <= n < 20000` || `n == 0` || `n == 3`
|
||||
- **Storage Policy**: Only latest per `(pubkey, kind)` combination
|
||||
- **Replacement Logic**: Latest `created_at`, then lowest `id` lexically
|
||||
- **Examples**: Metadata (0), Contacts (3), Mute List (10000)
|
||||
|
||||
### 3. Ephemeral Events
|
||||
- **Kinds**: `20000 <= n < 30000`
|
||||
- **Storage Policy**: Not expected to be stored (optional temporary storage)
|
||||
- **Examples**: Typing indicators, presence updates, ephemeral messages
|
||||
|
||||
### 4. Addressable Events
|
||||
- **Kinds**: `30000 <= n < 40000`
|
||||
- **Storage Policy**: Only latest per `(pubkey, kind, d_tag)` combination
|
||||
- **Replacement Logic**: Same as replaceable events
|
||||
- **Examples**: Long-form content (30023), Application-specific data
|
||||
|
||||
## SQLite JSON Capabilities Research
|
||||
|
||||
SQLite provides powerful JSON functions that could be leveraged for tag storage:
|
||||
|
||||
### Core JSON Functions
|
||||
```sql
|
||||
-- Extract specific values
|
||||
json_extract(column, '$.path')
|
||||
|
||||
-- Iterate through arrays
|
||||
json_each(json_array_column)
|
||||
|
||||
-- Flatten nested structures
|
||||
json_tree(json_column)
|
||||
|
||||
-- Validate JSON structure
|
||||
json_valid(column)
|
||||
|
||||
-- Array operations
|
||||
json_array_length(column)
|
||||
json_extract(column, '$[0]') -- First element
|
||||
```
|
||||
|
||||
### Tag Query Examples
|
||||
|
||||
#### Find all 'e' tag references:
|
||||
```sql
|
||||
SELECT
|
||||
id,
|
||||
json_extract(value, '$[1]') as referenced_event_id,
|
||||
json_extract(value, '$[2]') as relay_hint,
|
||||
json_extract(value, '$[3]') as marker
|
||||
FROM events, json_each(tags)
|
||||
WHERE json_extract(value, '$[0]') = 'e';
|
||||
```
|
||||
|
||||
#### Find events with specific hashtags:
|
||||
```sql
|
||||
SELECT id, content
|
||||
FROM events, json_each(tags)
|
||||
WHERE json_extract(value, '$[0]') = 't'
|
||||
AND json_extract(value, '$[1]') = 'bitcoin';
|
||||
```
|
||||
|
||||
#### Extract 'd' tag for addressable events:
|
||||
```sql
|
||||
SELECT
|
||||
id,
|
||||
json_extract(value, '$[1]') as d_tag_value
|
||||
FROM events, json_each(tags)
|
||||
WHERE json_extract(value, '$[0]') = 'd'
|
||||
LIMIT 1;
|
||||
```
|
||||
|
||||
### JSON Functional Indexes
|
||||
```sql
|
||||
-- Index on hashtags
|
||||
CREATE INDEX idx_hashtags ON events(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 't';
|
||||
|
||||
-- Index on 'd' tags for addressable events
|
||||
CREATE INDEX idx_d_tags ON events_addressable(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 'd';
|
||||
```
|
||||
|
||||
## Proposed Schema Design
|
||||
|
||||
### Option 1: Separate Tables with JSON Tags
|
||||
|
||||
```sql
|
||||
-- Regular Events (permanent storage)
|
||||
CREATE TABLE events_regular (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON,
|
||||
first_seen INTEGER DEFAULT (strftime('%s', 'now')),
|
||||
CONSTRAINT kind_regular CHECK (
|
||||
(kind >= 1000 AND kind < 10000) OR
|
||||
(kind >= 4 AND kind < 45) OR
|
||||
kind = 1 OR kind = 2
|
||||
)
|
||||
);
|
||||
|
||||
-- Replaceable Events (latest per pubkey+kind)
|
||||
CREATE TABLE events_replaceable (
|
||||
pubkey TEXT NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
id TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON,
|
||||
replaced_at INTEGER DEFAULT (strftime('%s', 'now')),
|
||||
PRIMARY KEY (pubkey, kind),
|
||||
CONSTRAINT kind_replaceable CHECK (
|
||||
(kind >= 10000 AND kind < 20000) OR
|
||||
kind = 0 OR kind = 3
|
||||
)
|
||||
);
|
||||
|
||||
-- Ephemeral Events (temporary/optional storage)
|
||||
CREATE TABLE events_ephemeral (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON,
|
||||
expires_at INTEGER DEFAULT (strftime('%s', 'now', '+1 hour')),
|
||||
CONSTRAINT kind_ephemeral CHECK (
|
||||
kind >= 20000 AND kind < 30000
|
||||
)
|
||||
);
|
||||
|
||||
-- Addressable Events (latest per pubkey+kind+d_tag)
|
||||
CREATE TABLE events_addressable (
|
||||
pubkey TEXT NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
d_tag TEXT NOT NULL,
|
||||
id TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON,
|
||||
replaced_at INTEGER DEFAULT (strftime('%s', 'now')),
|
||||
PRIMARY KEY (pubkey, kind, d_tag),
|
||||
CONSTRAINT kind_addressable CHECK (
|
||||
kind >= 30000 AND kind < 40000
|
||||
)
|
||||
);
|
||||
```
|
||||
|
||||
### Indexes for Performance
|
||||
|
||||
```sql
|
||||
-- Regular events indexes
|
||||
CREATE INDEX idx_regular_pubkey ON events_regular(pubkey);
|
||||
CREATE INDEX idx_regular_kind ON events_regular(kind);
|
||||
CREATE INDEX idx_regular_created_at ON events_regular(created_at);
|
||||
CREATE INDEX idx_regular_kind_created_at ON events_regular(kind, created_at);
|
||||
|
||||
-- Replaceable events indexes
|
||||
CREATE INDEX idx_replaceable_created_at ON events_replaceable(created_at);
|
||||
CREATE INDEX idx_replaceable_id ON events_replaceable(id);
|
||||
|
||||
-- Ephemeral events indexes
|
||||
CREATE INDEX idx_ephemeral_expires_at ON events_ephemeral(expires_at);
|
||||
CREATE INDEX idx_ephemeral_pubkey ON events_ephemeral(pubkey);
|
||||
|
||||
-- Addressable events indexes
|
||||
CREATE INDEX idx_addressable_created_at ON events_addressable(created_at);
|
||||
CREATE INDEX idx_addressable_id ON events_addressable(id);
|
||||
|
||||
-- JSON tag indexes (examples)
|
||||
CREATE INDEX idx_regular_e_tags ON events_regular(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 'e';
|
||||
|
||||
CREATE INDEX idx_regular_p_tags ON events_regular(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 'p';
|
||||
```
|
||||
|
||||
### Option 2: Unified Tag Table Approach
|
||||
|
||||
```sql
|
||||
-- Unified tag storage (alternative to JSON)
|
||||
CREATE TABLE tags_unified (
|
||||
event_id TEXT NOT NULL,
|
||||
event_type TEXT NOT NULL, -- 'regular', 'replaceable', 'ephemeral', 'addressable'
|
||||
tag_index INTEGER NOT NULL, -- Position in tag array
|
||||
name TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
param_2 TEXT, -- Third element if present
|
||||
param_3 TEXT, -- Fourth element if present
|
||||
param_json TEXT, -- JSON for additional parameters
|
||||
PRIMARY KEY (event_id, tag_index)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_tags_name_value ON tags_unified(name, value);
|
||||
CREATE INDEX idx_tags_event_type ON tags_unified(event_type);
|
||||
```
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### 1. Kind Classification Function (C Code)
|
||||
```c
|
||||
typedef enum {
|
||||
EVENT_TYPE_REGULAR,
|
||||
EVENT_TYPE_REPLACEABLE,
|
||||
EVENT_TYPE_EPHEMERAL,
|
||||
EVENT_TYPE_ADDRESSABLE,
|
||||
EVENT_TYPE_INVALID
|
||||
} event_type_t;
|
||||
|
||||
event_type_t classify_event_kind(int kind) {
|
||||
if ((kind >= 1000 && kind < 10000) ||
|
||||
(kind >= 4 && kind < 45) ||
|
||||
kind == 1 || kind == 2) {
|
||||
return EVENT_TYPE_REGULAR;
|
||||
}
|
||||
|
||||
if ((kind >= 10000 && kind < 20000) ||
|
||||
kind == 0 || kind == 3) {
|
||||
return EVENT_TYPE_REPLACEABLE;
|
||||
}
|
||||
|
||||
if (kind >= 20000 && kind < 30000) {
|
||||
return EVENT_TYPE_EPHEMERAL;
|
||||
}
|
||||
|
||||
if (kind >= 30000 && kind < 40000) {
|
||||
return EVENT_TYPE_ADDRESSABLE;
|
||||
}
|
||||
|
||||
return EVENT_TYPE_INVALID;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Replacement Logic for Replaceable Events
|
||||
```sql
|
||||
-- Trigger for replaceable events
|
||||
CREATE TRIGGER replace_event_on_insert
|
||||
BEFORE INSERT ON events_replaceable
|
||||
FOR EACH ROW
|
||||
WHEN EXISTS (
|
||||
SELECT 1 FROM events_replaceable
|
||||
WHERE pubkey = NEW.pubkey AND kind = NEW.kind
|
||||
)
|
||||
BEGIN
|
||||
DELETE FROM events_replaceable
|
||||
WHERE pubkey = NEW.pubkey
|
||||
AND kind = NEW.kind
|
||||
AND (
|
||||
created_at < NEW.created_at OR
|
||||
(created_at = NEW.created_at AND id > NEW.id)
|
||||
);
|
||||
END;
|
||||
```
|
||||
|
||||
### 3. D-Tag Extraction for Addressable Events
|
||||
```c
|
||||
char* extract_d_tag(cJSON* tags) {
|
||||
if (!tags || !cJSON_IsArray(tags)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cJSON* tag;
|
||||
cJSON_ArrayForEach(tag, tags) {
|
||||
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
|
||||
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
|
||||
cJSON* tag_value = cJSON_GetArrayItem(tag, 1);
|
||||
|
||||
if (cJSON_IsString(tag_name) && cJSON_IsString(tag_value)) {
|
||||
if (strcmp(cJSON_GetStringValue(tag_name), "d") == 0) {
|
||||
return strdup(cJSON_GetStringValue(tag_value));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return strdup(""); // Default empty d-tag
|
||||
}
|
||||
```
|
||||
|
||||
## Advantages of This Design
|
||||
|
||||
### 1. Protocol Compliance
|
||||
- **Enforced at DB level**: Schema constraints prevent invalid event storage
|
||||
- **Automatic replacement**: Triggers handle replaceable/addressable event logic
|
||||
- **Type safety**: Separate tables ensure correct handling per event type
|
||||
|
||||
### 2. Performance Benefits
|
||||
- **Targeted indexes**: Each table optimized for its access patterns
|
||||
- **Reduced storage**: Ephemeral events can be auto-expired
|
||||
- **Query optimization**: SQLite can optimize queries per table structure
|
||||
|
||||
### 3. JSON Tag Benefits
|
||||
- **Atomic storage**: Tags stored with their event
|
||||
- **Rich querying**: SQLite JSON functions enable complex tag queries
|
||||
- **Schema flexibility**: Can handle arbitrary tag structures
|
||||
- **Functional indexes**: Index specific tag patterns efficiently
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
1. **Phase 1**: Create new schema alongside existing
|
||||
2. **Phase 2**: Implement kind classification and routing logic
|
||||
3. **Phase 3**: Migrate existing data to appropriate tables
|
||||
4. **Phase 4**: Update application logic to use new tables
|
||||
5. **Phase 5**: Drop old schema after verification
|
||||
|
||||
## Next Steps for Implementation
|
||||
|
||||
1. **Prototype JSON performance**: Create test database with sample data
|
||||
2. **Benchmark query patterns**: Compare JSON vs normalized approaches
|
||||
3. **Implement kind classification**: Add routing logic to C code
|
||||
4. **Create migration scripts**: Handle existing data transformation
|
||||
5. **Update test suite**: Verify compliance with new schema
|
||||
280
docs/config_schema_design.md
Normal file
280
docs/config_schema_design.md
Normal file
@@ -0,0 +1,280 @@
|
||||
# Database Configuration Schema Design
|
||||
|
||||
## Overview
|
||||
This document outlines the database configuration schema additions for the C Nostr Relay startup config file system. The design follows the Ginxsom admin system approach with signed Nostr events and database storage.
|
||||
|
||||
## Schema Version Update
|
||||
- Current Version: 2
|
||||
- Target Version: 3
|
||||
- Update: Add server configuration management tables
|
||||
|
||||
## Core Configuration Tables
|
||||
|
||||
### 1. `server_config` Table
|
||||
|
||||
```sql
|
||||
-- Server configuration table - core configuration storage
|
||||
CREATE TABLE server_config (
|
||||
key TEXT PRIMARY KEY, -- Configuration key (unique identifier)
|
||||
value TEXT NOT NULL, -- Configuration value (stored as string)
|
||||
description TEXT, -- Human-readable description
|
||||
config_type TEXT DEFAULT 'user' CHECK (config_type IN ('system', 'user', 'runtime')),
|
||||
data_type TEXT DEFAULT 'string' CHECK (data_type IN ('string', 'integer', 'boolean', 'json')),
|
||||
validation_rules TEXT, -- JSON validation rules (optional)
|
||||
is_sensitive INTEGER DEFAULT 0, -- 1 if value should be masked in logs
|
||||
requires_restart INTEGER DEFAULT 0, -- 1 if change requires server restart
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
```
|
||||
|
||||
**Configuration Types:**
|
||||
- `system`: Core system settings (admin keys, security)
|
||||
- `user`: User-configurable settings (relay info, features)
|
||||
- `runtime`: Dynamic runtime values (statistics, cache)
|
||||
|
||||
**Data Types:**
|
||||
- `string`: Text values
|
||||
- `integer`: Numeric values
|
||||
- `boolean`: True/false values (stored as "true"/"false")
|
||||
- `json`: JSON object/array values
|
||||
|
||||
### 2. `config_history` Table
|
||||
|
||||
```sql
|
||||
-- Configuration change history table
|
||||
CREATE TABLE config_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
config_key TEXT NOT NULL, -- Key that was changed
|
||||
old_value TEXT, -- Previous value (NULL for new keys)
|
||||
new_value TEXT NOT NULL, -- New value
|
||||
changed_by TEXT DEFAULT 'system', -- Who made the change (system/admin/user)
|
||||
change_reason TEXT, -- Optional reason for change
|
||||
changed_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
FOREIGN KEY (config_key) REFERENCES server_config(key)
|
||||
);
|
||||
```
|
||||
|
||||
### 3. `config_validation_log` Table
|
||||
|
||||
```sql
|
||||
-- Configuration validation errors log
|
||||
CREATE TABLE config_validation_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
config_key TEXT NOT NULL,
|
||||
attempted_value TEXT,
|
||||
validation_error TEXT NOT NULL,
|
||||
error_source TEXT DEFAULT 'validation', -- validation/parsing/constraint
|
||||
attempted_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
```
|
||||
|
||||
### 4. Configuration File Cache Table
|
||||
|
||||
```sql
|
||||
-- Cache for file-based configuration events
|
||||
CREATE TABLE config_file_cache (
|
||||
file_path TEXT PRIMARY KEY, -- Full path to config file
|
||||
file_hash TEXT NOT NULL, -- SHA256 hash of file content
|
||||
event_id TEXT, -- Nostr event ID from file
|
||||
event_pubkey TEXT, -- Admin pubkey that signed event
|
||||
loaded_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
validation_status TEXT CHECK (validation_status IN ('valid', 'invalid', 'unverified')),
|
||||
validation_error TEXT -- Error details if invalid
|
||||
);
|
||||
```
|
||||
|
||||
## Indexes and Performance
|
||||
|
||||
```sql
|
||||
-- Performance indexes for configuration tables
|
||||
CREATE INDEX idx_server_config_type ON server_config(config_type);
|
||||
CREATE INDEX idx_server_config_updated ON server_config(updated_at DESC);
|
||||
CREATE INDEX idx_config_history_key ON config_history(config_key);
|
||||
CREATE INDEX idx_config_history_time ON config_history(changed_at DESC);
|
||||
CREATE INDEX idx_config_validation_key ON config_validation_log(config_key);
|
||||
CREATE INDEX idx_config_validation_time ON config_validation_log(attempted_at DESC);
|
||||
```
|
||||
|
||||
## Triggers
|
||||
|
||||
### Update Timestamp Trigger
|
||||
|
||||
```sql
|
||||
-- Trigger to update timestamp on configuration changes
|
||||
CREATE TRIGGER update_config_timestamp
|
||||
AFTER UPDATE ON server_config
|
||||
BEGIN
|
||||
UPDATE server_config SET updated_at = strftime('%s', 'now') WHERE key = NEW.key;
|
||||
END;
|
||||
```
|
||||
|
||||
### Configuration History Trigger
|
||||
|
||||
```sql
|
||||
-- Trigger to log configuration changes to history
|
||||
CREATE TRIGGER log_config_changes
|
||||
AFTER UPDATE ON server_config
|
||||
WHEN OLD.value != NEW.value
|
||||
BEGIN
|
||||
INSERT INTO config_history (config_key, old_value, new_value, changed_by, change_reason)
|
||||
VALUES (NEW.key, OLD.value, NEW.value, 'system', 'configuration update');
|
||||
END;
|
||||
```
|
||||
|
||||
## Default Configuration Values
|
||||
|
||||
### Core System Settings
|
||||
|
||||
```sql
|
||||
INSERT OR IGNORE INTO server_config (key, value, description, config_type, data_type, requires_restart) VALUES
|
||||
-- Administrative settings
|
||||
('admin_pubkey', '', 'Authorized admin public key (hex)', 'system', 'string', 1),
|
||||
('admin_enabled', 'false', 'Enable admin interface', 'system', 'boolean', 1),
|
||||
|
||||
-- Server core settings
|
||||
('relay_port', '8888', 'WebSocket server port', 'user', 'integer', 1),
|
||||
('database_path', 'db/c_nostr_relay.db', 'SQLite database file path', 'user', 'string', 1),
|
||||
('max_connections', '100', 'Maximum concurrent connections', 'user', 'integer', 1),
|
||||
|
||||
-- NIP-11 Relay Information
|
||||
('relay_name', 'C Nostr Relay', 'Relay name for NIP-11', 'user', 'string', 0),
|
||||
('relay_description', 'High-performance C Nostr relay with SQLite storage', 'Relay description', 'user', 'string', 0),
|
||||
('relay_contact', '', 'Contact information', 'user', 'string', 0),
|
||||
('relay_pubkey', '', 'Relay public key', 'user', 'string', 0),
|
||||
('relay_software', 'https://git.laantungir.net/laantungir/c-relay.git', 'Software URL', 'user', 'string', 0),
|
||||
('relay_version', '0.2.0', 'Software version', 'user', 'string', 0),
|
||||
|
||||
-- NIP-13 Proof of Work
|
||||
('pow_enabled', 'true', 'Enable NIP-13 Proof of Work validation', 'user', 'boolean', 0),
|
||||
('pow_min_difficulty', '0', 'Minimum PoW difficulty required', 'user', 'integer', 0),
|
||||
('pow_mode', 'basic', 'PoW validation mode (basic/full/strict)', 'user', 'string', 0),
|
||||
|
||||
-- NIP-40 Expiration Timestamp
|
||||
('expiration_enabled', 'true', 'Enable NIP-40 expiration handling', 'user', 'boolean', 0),
|
||||
('expiration_strict', 'true', 'Reject expired events on submission', 'user', 'boolean', 0),
|
||||
('expiration_filter', 'true', 'Filter expired events from responses', 'user', 'boolean', 0),
|
||||
('expiration_grace_period', '300', 'Grace period for clock skew (seconds)', 'user', 'integer', 0),
|
||||
|
||||
-- Subscription limits
|
||||
('max_subscriptions_per_client', '20', 'Max subscriptions per client', 'user', 'integer', 0),
|
||||
('max_total_subscriptions', '5000', 'Max total concurrent subscriptions', 'user', 'integer', 0),
|
||||
('subscription_id_max_length', '64', 'Maximum subscription ID length', 'user', 'integer', 0),
|
||||
|
||||
-- Event processing limits
|
||||
('max_event_tags', '100', 'Maximum tags per event', 'user', 'integer', 0),
|
||||
('max_content_length', '8196', 'Maximum content length', 'user', 'integer', 0),
|
||||
('max_message_length', '16384', 'Maximum message length', 'user', 'integer', 0),
|
||||
|
||||
-- Performance settings
|
||||
('default_limit', '500', 'Default query limit', 'user', 'integer', 0),
|
||||
('max_limit', '5000', 'Maximum query limit', 'user', 'integer', 0);
|
||||
```
|
||||
|
||||
### Runtime Statistics
|
||||
|
||||
```sql
|
||||
INSERT OR IGNORE INTO server_config (key, value, description, config_type, data_type) VALUES
|
||||
-- Runtime statistics (updated by server)
|
||||
('server_start_time', '0', 'Server startup timestamp', 'runtime', 'integer'),
|
||||
('total_events_processed', '0', 'Total events processed', 'runtime', 'integer'),
|
||||
('total_subscriptions_created', '0', 'Total subscriptions created', 'runtime', 'integer'),
|
||||
('current_connections', '0', 'Current active connections', 'runtime', 'integer'),
|
||||
('database_size_bytes', '0', 'Database file size in bytes', 'runtime', 'integer');
|
||||
```
|
||||
|
||||
## Configuration Views
|
||||
|
||||
### Active Configuration View
|
||||
|
||||
```sql
|
||||
CREATE VIEW active_config AS
|
||||
SELECT
|
||||
key,
|
||||
value,
|
||||
description,
|
||||
config_type,
|
||||
data_type,
|
||||
requires_restart,
|
||||
updated_at
|
||||
FROM server_config
|
||||
WHERE config_type IN ('system', 'user')
|
||||
ORDER BY config_type, key;
|
||||
```
|
||||
|
||||
### Runtime Statistics View
|
||||
|
||||
```sql
|
||||
CREATE VIEW runtime_stats AS
|
||||
SELECT
|
||||
key,
|
||||
value,
|
||||
description,
|
||||
updated_at
|
||||
FROM server_config
|
||||
WHERE config_type = 'runtime'
|
||||
ORDER BY key;
|
||||
```
|
||||
|
||||
### Configuration Change Summary
|
||||
|
||||
```sql
|
||||
CREATE VIEW recent_config_changes AS
|
||||
SELECT
|
||||
ch.config_key,
|
||||
sc.description,
|
||||
ch.old_value,
|
||||
ch.new_value,
|
||||
ch.changed_by,
|
||||
ch.change_reason,
|
||||
ch.changed_at
|
||||
FROM config_history ch
|
||||
JOIN server_config sc ON ch.config_key = sc.key
|
||||
ORDER BY ch.changed_at DESC
|
||||
LIMIT 50;
|
||||
```
|
||||
|
||||
## Validation Rules Format
|
||||
|
||||
Configuration validation rules are stored as JSON strings in the `validation_rules` column:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "integer",
|
||||
"min": 1,
|
||||
"max": 65535,
|
||||
"required": true
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "^[0-9a-fA-F]{64}$",
|
||||
"required": false,
|
||||
"description": "64-character hex string"
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "boolean",
|
||||
"required": true
|
||||
}
|
||||
```
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
1. **Phase 1**: Add configuration tables to existing schema
|
||||
2. **Phase 2**: Populate with current hardcoded values
|
||||
3. **Phase 3**: Update application code to read from database
|
||||
4. **Phase 4**: Add file-based configuration loading
|
||||
5. **Phase 5**: Remove hardcoded defaults and environment variable fallbacks
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Startup**: Load configuration from file → database → apply to application
|
||||
- **Runtime**: Read configuration values from database cache
|
||||
- **Updates**: Write changes to database → optionally update file
|
||||
- **Validation**: Validate all configuration changes before applying
|
||||
- **History**: Track all configuration changes for audit purposes
|
||||
493
docs/file_config_design.md
Normal file
493
docs/file_config_design.md
Normal file
@@ -0,0 +1,493 @@
|
||||
# File-Based Configuration Architecture Design
|
||||
|
||||
## Overview
|
||||
This document outlines the XDG-compliant file-based configuration system for the C Nostr Relay, following the Ginxsom admin system approach using signed Nostr events.
|
||||
|
||||
## XDG Base Directory Specification Compliance
|
||||
|
||||
### File Location Strategy
|
||||
|
||||
**Primary Location:**
|
||||
```
|
||||
$XDG_CONFIG_HOME/c-relay/c_relay_config_event.json
|
||||
```
|
||||
|
||||
**Fallback Location:**
|
||||
```
|
||||
$HOME/.config/c-relay/c_relay_config_event.json
|
||||
```
|
||||
|
||||
**System-wide Fallback:**
|
||||
```
|
||||
/etc/c-relay/c_relay_config_event.json
|
||||
```
|
||||
|
||||
### Directory Structure
|
||||
```
|
||||
$XDG_CONFIG_HOME/c-relay/
|
||||
├── c_relay_config_event.json # Main configuration file
|
||||
├── backup/ # Configuration backups
|
||||
│ ├── c_relay_config_event.json.bak
|
||||
│ └── c_relay_config_event.20241205.json
|
||||
└── validation/ # Validation logs
|
||||
└── config_validation.log
|
||||
```
|
||||
|
||||
## Configuration File Format
|
||||
|
||||
### Signed Nostr Event Structure
|
||||
|
||||
The configuration file contains a signed Nostr event (kind 33334) with relay configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": 33334,
|
||||
"created_at": 1704067200,
|
||||
"tags": [
|
||||
["relay_name", "C Nostr Relay"],
|
||||
["relay_description", "High-performance C Nostr relay with SQLite storage"],
|
||||
["relay_port", "8888"],
|
||||
["database_path", "db/c_nostr_relay.db"],
|
||||
["admin_pubkey", ""],
|
||||
["admin_enabled", "false"],
|
||||
|
||||
["pow_enabled", "true"],
|
||||
["pow_min_difficulty", "0"],
|
||||
["pow_mode", "basic"],
|
||||
|
||||
["expiration_enabled", "true"],
|
||||
["expiration_strict", "true"],
|
||||
["expiration_filter", "true"],
|
||||
["expiration_grace_period", "300"],
|
||||
|
||||
["max_subscriptions_per_client", "20"],
|
||||
["max_total_subscriptions", "5000"],
|
||||
["max_connections", "100"],
|
||||
|
||||
["relay_contact", ""],
|
||||
["relay_pubkey", ""],
|
||||
["relay_software", "https://git.laantungir.net/laantungir/c-relay.git"],
|
||||
["relay_version", "0.2.0"],
|
||||
|
||||
["max_event_tags", "100"],
|
||||
["max_content_length", "8196"],
|
||||
["max_message_length", "16384"],
|
||||
["default_limit", "500"],
|
||||
["max_limit", "5000"]
|
||||
],
|
||||
"content": "C Nostr Relay configuration event",
|
||||
"pubkey": "admin_public_key_hex_64_chars",
|
||||
"id": "computed_event_id_hex_64_chars",
|
||||
"sig": "computed_signature_hex_128_chars"
|
||||
}
|
||||
```
|
||||
|
||||
### Event Kind Definition
|
||||
|
||||
**Kind 33334**: C Nostr Relay Configuration Event
|
||||
- Parameterized replaceable event
|
||||
- Must be signed by authorized admin pubkey
|
||||
- Contains relay configuration as tags
|
||||
- Validation required on load
|
||||
|
||||
## Configuration Loading Architecture
|
||||
|
||||
### Loading Priority Chain
|
||||
|
||||
1. **Command Line Arguments** (highest priority)
|
||||
2. **File-based Configuration** (signed Nostr event)
|
||||
3. **Database Configuration** (persistent storage)
|
||||
4. **Environment Variables** (compatibility mode)
|
||||
5. **Hardcoded Defaults** (fallback)
|
||||
|
||||
### Loading Process Flow
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Server Startup] --> B[Get Config File Path]
|
||||
B --> C{File Exists?}
|
||||
C -->|No| D[Check Database Config]
|
||||
C -->|Yes| E[Load & Parse JSON]
|
||||
E --> F[Validate Event Structure]
|
||||
F --> G{Valid Event?}
|
||||
G -->|No| H[Log Error & Use Database]
|
||||
G -->|Yes| I[Verify Event Signature]
|
||||
I --> J{Signature Valid?}
|
||||
J -->|No| K[Log Error & Use Database]
|
||||
J -->|Yes| L[Extract Configuration Tags]
|
||||
L --> M[Apply to Database]
|
||||
M --> N[Apply to Application]
|
||||
D --> O[Load from Database]
|
||||
H --> O
|
||||
K --> O
|
||||
O --> P[Apply Environment Variable Overrides]
|
||||
P --> Q[Apply Command Line Overrides]
|
||||
Q --> N
|
||||
N --> R[Server Ready]
|
||||
```
|
||||
|
||||
## C Implementation Architecture
|
||||
|
||||
### Core Data Structures
|
||||
|
||||
```c
|
||||
// Configuration file management
|
||||
typedef struct {
|
||||
char file_path[512];
|
||||
char file_hash[65]; // SHA256 hash
|
||||
time_t last_modified;
|
||||
time_t last_loaded;
|
||||
int validation_status; // 0=valid, 1=invalid, 2=unverified
|
||||
char validation_error[256];
|
||||
} config_file_info_t;
|
||||
|
||||
// Configuration event structure
|
||||
typedef struct {
|
||||
char event_id[65];
|
||||
char pubkey[65];
|
||||
char signature[129];
|
||||
long created_at;
|
||||
int kind;
|
||||
cJSON* tags;
|
||||
char* content;
|
||||
} config_event_t;
|
||||
|
||||
// Configuration management context
|
||||
typedef struct {
|
||||
config_file_info_t file_info;
|
||||
config_event_t event;
|
||||
int loaded_from_file;
|
||||
int loaded_from_database;
|
||||
char admin_pubkey[65];
|
||||
time_t load_timestamp;
|
||||
} config_context_t;
|
||||
```
|
||||
|
||||
### Core Function Signatures
|
||||
|
||||
```c
|
||||
// XDG path resolution
|
||||
int get_config_file_path(char* path, size_t path_size);
|
||||
int create_config_directories(const char* config_path);
|
||||
|
||||
// File operations
|
||||
int load_config_from_file(const char* config_path, config_context_t* ctx);
|
||||
int save_config_to_file(const char* config_path, const config_event_t* event);
|
||||
int backup_config_file(const char* config_path);
|
||||
|
||||
// Event validation
|
||||
int validate_config_event_structure(const cJSON* event);
|
||||
int verify_config_event_signature(const config_event_t* event, const char* admin_pubkey);
|
||||
int validate_config_tag_values(const cJSON* tags);
|
||||
|
||||
// Configuration extraction and application
|
||||
int extract_config_from_tags(const cJSON* tags, config_context_t* ctx);
|
||||
int apply_config_to_database(const config_context_t* ctx);
|
||||
int apply_config_to_globals(const config_context_t* ctx);
|
||||
|
||||
// File monitoring and updates
|
||||
int monitor_config_file_changes(const char* config_path);
|
||||
int reload_config_on_change(config_context_t* ctx);
|
||||
|
||||
// Error handling and logging
|
||||
int log_config_validation_error(const char* config_key, const char* error);
|
||||
int log_config_load_event(const config_context_t* ctx, const char* source);
|
||||
```
|
||||
|
||||
## Configuration Validation Rules
|
||||
|
||||
### Event Structure Validation
|
||||
|
||||
1. **Required Fields**: `kind`, `created_at`, `tags`, `content`, `pubkey`, `id`, `sig`
|
||||
2. **Kind Validation**: Must be exactly 33334
|
||||
3. **Timestamp Validation**: Must be reasonable (not too old, not future)
|
||||
4. **Tags Format**: Array of string arrays `[["key", "value"], ...]`
|
||||
5. **Signature Verification**: Must be signed by authorized admin pubkey
|
||||
|
||||
### Configuration Value Validation
|
||||
|
||||
```c
|
||||
typedef struct {
|
||||
char* key;
|
||||
char* data_type; // "string", "integer", "boolean", "json"
|
||||
char* validation_rule; // JSON validation rule
|
||||
int required;
|
||||
char* default_value;
|
||||
} config_validation_rule_t;
|
||||
|
||||
static config_validation_rule_t validation_rules[] = {
|
||||
{"relay_port", "integer", "{\"min\": 1, \"max\": 65535}", 1, "8888"},
|
||||
{"pow_min_difficulty", "integer", "{\"min\": 0, \"max\": 64}", 1, "0"},
|
||||
{"expiration_grace_period", "integer", "{\"min\": 0, \"max\": 86400}", 1, "300"},
|
||||
{"admin_pubkey", "string", "{\"pattern\": \"^[0-9a-fA-F]{64}$\"}", 0, ""},
|
||||
{"pow_enabled", "boolean", "{}", 1, "true"},
|
||||
// ... more rules
|
||||
};
|
||||
```
|
||||
|
||||
### Security Validation
|
||||
|
||||
1. **Admin Pubkey Verification**: Only configured admin pubkeys can create config events
|
||||
2. **Event ID Verification**: Event ID must match computed hash
|
||||
3. **Signature Verification**: Signature must be valid for the event and pubkey
|
||||
4. **Timestamp Validation**: Prevent replay attacks with old events
|
||||
5. **File Permission Checks**: Config files should have appropriate permissions
|
||||
|
||||
## File Management Features
|
||||
|
||||
### Configuration File Operations
|
||||
|
||||
**File Creation:**
|
||||
- Generate initial configuration file with default values
|
||||
- Sign with admin private key
|
||||
- Set appropriate file permissions (600 - owner read/write only)
|
||||
|
||||
**File Updates:**
|
||||
- Create backup of existing file
|
||||
- Validate new configuration
|
||||
- Atomic file replacement (write to temp, then rename)
|
||||
- Update file metadata cache
|
||||
|
||||
**File Monitoring:**
|
||||
- Watch for file system changes using inotify (Linux)
|
||||
- Reload configuration automatically when file changes
|
||||
- Validate changes before applying
|
||||
- Log all configuration reload events
|
||||
|
||||
### Backup and Recovery
|
||||
|
||||
**Automatic Backups:**
|
||||
```
|
||||
$XDG_CONFIG_HOME/c-relay/backup/
|
||||
├── c_relay_config_event.json.bak # Last working config
|
||||
├── c_relay_config_event.20241205-143022.json # Timestamped backups
|
||||
└── c_relay_config_event.20241204-091530.json
|
||||
```
|
||||
|
||||
**Recovery Process:**
|
||||
1. Detect corrupted or invalid config file
|
||||
2. Attempt to load from `.bak` backup
|
||||
3. If backup fails, generate default configuration
|
||||
4. Log recovery actions for audit
|
||||
|
||||
## Integration with Database Schema
|
||||
|
||||
### File-Database Synchronization
|
||||
|
||||
**On File Load:**
|
||||
1. Parse and validate file-based configuration
|
||||
2. Extract configuration values from event tags
|
||||
3. Update database `server_config` table
|
||||
4. Record file metadata in `config_file_cache` table
|
||||
5. Log configuration changes in `config_history` table
|
||||
|
||||
**Configuration Priority Resolution:**
|
||||
```c
|
||||
char* get_config_value(const char* key, const char* default_value) {
|
||||
// Priority: CLI args > File config > DB config > Env vars > Default
|
||||
char* value = NULL;
|
||||
|
||||
// 1. Check command line overrides (if implemented)
|
||||
value = get_cli_override(key);
|
||||
if (value) return value;
|
||||
|
||||
// 2. Check database (updated from file)
|
||||
value = get_database_config(key);
|
||||
if (value) return value;
|
||||
|
||||
// 3. Check environment variables (compatibility)
|
||||
value = get_env_config(key);
|
||||
if (value) return value;
|
||||
|
||||
// 4. Return default
|
||||
return strdup(default_value);
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling and Recovery
|
||||
|
||||
### Validation Error Handling
|
||||
|
||||
```c
|
||||
typedef enum {
|
||||
CONFIG_ERROR_NONE = 0,
|
||||
CONFIG_ERROR_FILE_NOT_FOUND = 1,
|
||||
CONFIG_ERROR_PARSE_FAILED = 2,
|
||||
CONFIG_ERROR_INVALID_STRUCTURE = 3,
|
||||
CONFIG_ERROR_SIGNATURE_INVALID = 4,
|
||||
CONFIG_ERROR_UNAUTHORIZED = 5,
|
||||
CONFIG_ERROR_VALUE_INVALID = 6,
|
||||
CONFIG_ERROR_IO_ERROR = 7
|
||||
} config_error_t;
|
||||
|
||||
typedef struct {
|
||||
config_error_t error_code;
|
||||
char error_message[256];
|
||||
char config_key[64];
|
||||
char invalid_value[128];
|
||||
time_t error_timestamp;
|
||||
} config_error_info_t;
|
||||
```
|
||||
|
||||
### Graceful Degradation
|
||||
|
||||
**File Load Failure:**
|
||||
1. Log detailed error information
|
||||
2. Fall back to database configuration
|
||||
3. Continue operation with last known good config
|
||||
4. Set service status to "degraded" mode
|
||||
|
||||
**Validation Failure:**
|
||||
1. Log validation errors with specific details
|
||||
2. Skip invalid configuration items
|
||||
3. Use default values for failed items
|
||||
4. Continue with partial configuration
|
||||
|
||||
**Permission Errors:**
|
||||
1. Log permission issues
|
||||
2. Attempt to use fallback locations
|
||||
3. Generate temporary config if needed
|
||||
4. Alert administrator via logs
|
||||
|
||||
## Configuration Update Process
|
||||
|
||||
### Safe Configuration Updates
|
||||
|
||||
**Atomic Update Process:**
|
||||
1. Create backup of current configuration
|
||||
2. Write new configuration to temporary file
|
||||
3. Validate new configuration completely
|
||||
4. If valid, rename temporary file to active config
|
||||
5. Update database with new values
|
||||
6. Apply changes to running server
|
||||
7. Log successful update
|
||||
|
||||
**Rollback Process:**
|
||||
1. Detect invalid configuration at startup
|
||||
2. Restore from backup file
|
||||
3. Log rollback event
|
||||
4. Continue with previous working configuration
|
||||
|
||||
### Hot Reload Support
|
||||
|
||||
**File Change Detection:**
|
||||
```c
|
||||
int monitor_config_file_changes(const char* config_path) {
|
||||
// Use inotify on Linux to watch file changes
|
||||
int inotify_fd = inotify_init();
|
||||
int watch_fd = inotify_add_watch(inotify_fd, config_path, IN_MODIFY | IN_MOVED_TO);
|
||||
|
||||
// Monitor in separate thread
|
||||
// On change: validate -> apply -> log
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
**Runtime Configuration Updates:**
|
||||
- Reload configuration on file change
|
||||
- Apply non-restart-required changes immediately
|
||||
- Queue restart-required changes for next restart
|
||||
- Notify operators of configuration changes
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Access Control
|
||||
|
||||
**File Permissions:**
|
||||
- Config files: 600 (owner read/write only)
|
||||
- Directories: 700 (owner access only)
|
||||
- Backup files: 600 (owner read/write only)
|
||||
|
||||
**Admin Key Management:**
|
||||
- Admin private keys never stored in config files
|
||||
- Only admin pubkeys stored for verification
|
||||
- Support for multiple admin pubkeys
|
||||
- Key rotation support
|
||||
|
||||
### Signature Validation
|
||||
|
||||
**Event Signature Verification:**
|
||||
```c
|
||||
int verify_config_event_signature(const config_event_t* event, const char* admin_pubkey) {
|
||||
// 1. Reconstruct event for signing (without id and sig)
|
||||
// 2. Compute event ID and verify against stored ID
|
||||
// 3. Verify signature using admin pubkey
|
||||
// 4. Check admin pubkey authorization
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
```
|
||||
|
||||
**Anti-Replay Protection:**
|
||||
- Configuration events must be newer than current
|
||||
- Event timestamps validated against reasonable bounds
|
||||
- Configuration history prevents replay attacks
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Basic File Support
|
||||
- XDG path resolution
|
||||
- File loading and parsing
|
||||
- Basic validation
|
||||
- Database integration
|
||||
|
||||
### Phase 2: Security Features
|
||||
- Event signature verification
|
||||
- Admin pubkey management
|
||||
- File permission checks
|
||||
- Error handling
|
||||
|
||||
### Phase 3: Advanced Features
|
||||
- Hot reload support
|
||||
- Automatic backups
|
||||
- Configuration utilities
|
||||
- Interactive setup
|
||||
|
||||
### Phase 4: Monitoring & Management
|
||||
- Configuration change monitoring
|
||||
- Advanced validation rules
|
||||
- Configuration audit logging
|
||||
- Management utilities
|
||||
|
||||
## Configuration Generation Utilities
|
||||
|
||||
### Interactive Setup Script
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# scripts/setup_config.sh - Interactive configuration setup
|
||||
|
||||
create_initial_config() {
|
||||
echo "=== C Nostr Relay Initial Configuration ==="
|
||||
|
||||
# Collect basic information
|
||||
read -p "Relay name [C Nostr Relay]: " relay_name
|
||||
read -p "Admin public key (hex): " admin_pubkey
|
||||
read -p "Server port [8888]: " server_port
|
||||
|
||||
# Generate signed configuration event
|
||||
./scripts/generate_config.sh \
|
||||
--admin-key "$admin_pubkey" \
|
||||
--relay-name "${relay_name:-C Nostr Relay}" \
|
||||
--port "${server_port:-8888}" \
|
||||
--output "$XDG_CONFIG_HOME/c-relay/c_relay_config_event.json"
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration Validation Utility
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# scripts/validate_config.sh - Validate configuration file
|
||||
|
||||
validate_config_file() {
|
||||
local config_file="$1"
|
||||
|
||||
# Check file exists and is readable
|
||||
# Validate JSON structure
|
||||
# Verify event signature
|
||||
# Check configuration values
|
||||
# Report validation results
|
||||
}
|
||||
```
|
||||
|
||||
This comprehensive file-based configuration design provides a robust, secure, and maintainable system that follows industry standards while integrating seamlessly with the existing C Nostr Relay architecture.
|
||||
@@ -1,416 +0,0 @@
|
||||
# Final Schema Recommendation: Hybrid Single Table Approach
|
||||
|
||||
## Executive Summary
|
||||
|
||||
After analyzing the subscription query complexity, **the multi-table approach creates more problems than it solves**. REQ filters don't align with storage semantics - clients filter by kind, author, and tags regardless of event type classification.
|
||||
|
||||
**Recommendation: Modified Single Table with Event Type Classification**
|
||||
|
||||
## The Multi-Table Problem
|
||||
|
||||
### REQ Filter Reality Check
|
||||
- Clients send: `{"kinds": [1, 0, 30023], "authors": ["pubkey"], "#p": ["target"]}`
|
||||
- Multi-table requires: 3 separate queries + UNION + complex ordering
|
||||
- Single table requires: 1 query with simple WHERE conditions
|
||||
|
||||
### Query Complexity Explosion
|
||||
```sql
|
||||
-- Multi-table nightmare for simple filter
|
||||
WITH results AS (
|
||||
SELECT * FROM events_regular WHERE kind = 1 AND pubkey = ?
|
||||
UNION ALL
|
||||
SELECT * FROM events_replaceable WHERE kind = 0 AND pubkey = ?
|
||||
UNION ALL
|
||||
SELECT * FROM events_addressable WHERE kind = 30023 AND pubkey = ?
|
||||
)
|
||||
SELECT r.* FROM results r
|
||||
JOIN multiple_tag_tables t ON complex_conditions
|
||||
ORDER BY created_at DESC, id ASC LIMIT ?;
|
||||
|
||||
-- vs Single table simplicity
|
||||
SELECT e.* FROM events e, json_each(e.tags) t
|
||||
WHERE e.kind IN (1, 0, 30023)
|
||||
AND e.pubkey = ?
|
||||
AND json_extract(t.value, '$[0]') = 'p'
|
||||
AND json_extract(t.value, '$[1]') = ?
|
||||
ORDER BY e.created_at DESC, e.id ASC LIMIT ?;
|
||||
```
|
||||
|
||||
## Recommended Schema: Hybrid Approach
|
||||
|
||||
### Core Design Philosophy
|
||||
- **Single table for REQ query simplicity**
|
||||
- **Event type classification for protocol compliance**
|
||||
- **JSON tags for atomic storage and rich querying**
|
||||
- **Partial unique constraints for replacement logic**
|
||||
|
||||
### Schema Definition
|
||||
|
||||
```sql
|
||||
CREATE TABLE events (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON NOT NULL DEFAULT '[]',
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
|
||||
-- Additional fields for addressable events
|
||||
d_tag TEXT GENERATED ALWAYS AS (
|
||||
CASE
|
||||
WHEN event_type = 'addressable' THEN
|
||||
json_extract(tags, '$[*][1]')
|
||||
FROM json_each(tags)
|
||||
WHERE json_extract(value, '$[0]') = 'd'
|
||||
LIMIT 1
|
||||
ELSE NULL
|
||||
END
|
||||
) STORED,
|
||||
|
||||
-- Replacement tracking
|
||||
replaced_at INTEGER,
|
||||
|
||||
-- Protocol compliance constraints
|
||||
CONSTRAINT unique_replaceable
|
||||
UNIQUE (pubkey, kind)
|
||||
WHERE event_type = 'replaceable',
|
||||
|
||||
CONSTRAINT unique_addressable
|
||||
UNIQUE (pubkey, kind, d_tag)
|
||||
WHERE event_type = 'addressable' AND d_tag IS NOT NULL
|
||||
);
|
||||
```
|
||||
|
||||
### Event Type Classification Function
|
||||
|
||||
```sql
|
||||
-- Function to determine event type from kind
|
||||
CREATE VIEW event_type_lookup AS
|
||||
SELECT
|
||||
CASE
|
||||
WHEN (kind >= 1000 AND kind < 10000) OR
|
||||
(kind >= 4 AND kind < 45) OR
|
||||
kind = 1 OR kind = 2 THEN 'regular'
|
||||
WHEN (kind >= 10000 AND kind < 20000) OR
|
||||
kind = 0 OR kind = 3 THEN 'replaceable'
|
||||
WHEN kind >= 20000 AND kind < 30000 THEN 'ephemeral'
|
||||
WHEN kind >= 30000 AND kind < 40000 THEN 'addressable'
|
||||
ELSE 'unknown'
|
||||
END as event_type,
|
||||
kind
|
||||
FROM (
|
||||
-- Generate all possible kind values for lookup
|
||||
WITH RECURSIVE kinds(kind) AS (
|
||||
SELECT 0
|
||||
UNION ALL
|
||||
SELECT kind + 1 FROM kinds WHERE kind < 65535
|
||||
)
|
||||
SELECT kind FROM kinds
|
||||
);
|
||||
```
|
||||
|
||||
### Performance Indexes
|
||||
|
||||
```sql
|
||||
-- Core query patterns
|
||||
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||
CREATE INDEX idx_events_kind ON events(kind);
|
||||
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||
CREATE INDEX idx_events_event_type ON events(event_type);
|
||||
|
||||
-- Composite indexes for common filters
|
||||
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||
CREATE INDEX idx_events_type_created_at ON events(event_type, created_at DESC);
|
||||
|
||||
-- JSON tag indexes for common patterns
|
||||
CREATE INDEX idx_events_e_tags ON events(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 'e';
|
||||
|
||||
CREATE INDEX idx_events_p_tags ON events(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 'p';
|
||||
|
||||
CREATE INDEX idx_events_hashtags ON events(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 't';
|
||||
|
||||
-- Addressable events d_tag index
|
||||
CREATE INDEX idx_events_d_tag ON events(d_tag)
|
||||
WHERE event_type = 'addressable' AND d_tag IS NOT NULL;
|
||||
```
|
||||
|
||||
### Replacement Logic Implementation
|
||||
|
||||
#### Replaceable Events Trigger
|
||||
```sql
|
||||
CREATE TRIGGER handle_replaceable_events
|
||||
BEFORE INSERT ON events
|
||||
FOR EACH ROW
|
||||
WHEN NEW.event_type = 'replaceable'
|
||||
BEGIN
|
||||
-- Delete older replaceable events with same pubkey+kind
|
||||
DELETE FROM events
|
||||
WHERE event_type = 'replaceable'
|
||||
AND pubkey = NEW.pubkey
|
||||
AND kind = NEW.kind
|
||||
AND (
|
||||
created_at < NEW.created_at OR
|
||||
(created_at = NEW.created_at AND id > NEW.id)
|
||||
);
|
||||
END;
|
||||
```
|
||||
|
||||
#### Addressable Events Trigger
|
||||
```sql
|
||||
CREATE TRIGGER handle_addressable_events
|
||||
BEFORE INSERT ON events
|
||||
FOR EACH ROW
|
||||
WHEN NEW.event_type = 'addressable'
|
||||
BEGIN
|
||||
-- Delete older addressable events with same pubkey+kind+d_tag
|
||||
DELETE FROM events
|
||||
WHERE event_type = 'addressable'
|
||||
AND pubkey = NEW.pubkey
|
||||
AND kind = NEW.kind
|
||||
AND d_tag = NEW.d_tag
|
||||
AND (
|
||||
created_at < NEW.created_at OR
|
||||
(created_at = NEW.created_at AND id > NEW.id)
|
||||
);
|
||||
END;
|
||||
```
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### C Code Integration
|
||||
|
||||
#### Event Type Classification
|
||||
```c
|
||||
typedef enum {
|
||||
EVENT_TYPE_REGULAR,
|
||||
EVENT_TYPE_REPLACEABLE,
|
||||
EVENT_TYPE_EPHEMERAL,
|
||||
EVENT_TYPE_ADDRESSABLE,
|
||||
EVENT_TYPE_UNKNOWN
|
||||
} event_type_t;
|
||||
|
||||
event_type_t classify_event_kind(int kind) {
|
||||
if ((kind >= 1000 && kind < 10000) ||
|
||||
(kind >= 4 && kind < 45) ||
|
||||
kind == 1 || kind == 2) {
|
||||
return EVENT_TYPE_REGULAR;
|
||||
}
|
||||
if ((kind >= 10000 && kind < 20000) ||
|
||||
kind == 0 || kind == 3) {
|
||||
return EVENT_TYPE_REPLACEABLE;
|
||||
}
|
||||
if (kind >= 20000 && kind < 30000) {
|
||||
return EVENT_TYPE_EPHEMERAL;
|
||||
}
|
||||
if (kind >= 30000 && kind < 40000) {
|
||||
return EVENT_TYPE_ADDRESSABLE;
|
||||
}
|
||||
return EVENT_TYPE_UNKNOWN;
|
||||
}
|
||||
|
||||
const char* event_type_to_string(event_type_t type) {
|
||||
switch (type) {
|
||||
case EVENT_TYPE_REGULAR: return "regular";
|
||||
case EVENT_TYPE_REPLACEABLE: return "replaceable";
|
||||
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
|
||||
case EVENT_TYPE_ADDRESSABLE: return "addressable";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Simplified Event Storage
|
||||
```c
|
||||
int store_event(cJSON* event) {
|
||||
// Extract fields
|
||||
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||
cJSON* pubkey = cJSON_GetObjectItem(event, "pubkey");
|
||||
cJSON* created_at = cJSON_GetObjectItem(event, "created_at");
|
||||
cJSON* kind = cJSON_GetObjectItem(event, "kind");
|
||||
cJSON* content = cJSON_GetObjectItem(event, "content");
|
||||
cJSON* sig = cJSON_GetObjectItem(event, "sig");
|
||||
|
||||
// Classify event type
|
||||
event_type_t type = classify_event_kind(cJSON_GetNumberValue(kind));
|
||||
|
||||
// Serialize tags to JSON
|
||||
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||
char* tags_json = cJSON_Print(tags ? tags : cJSON_CreateArray());
|
||||
|
||||
// Single INSERT statement - database handles replacement via triggers
|
||||
const char* sql =
|
||||
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
free(tags_json);
|
||||
return -1;
|
||||
}
|
||||
|
||||
sqlite3_bind_text(stmt, 1, cJSON_GetStringValue(id), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
|
||||
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
|
||||
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
|
||||
|
||||
rc = sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
free(tags_json);
|
||||
|
||||
return (rc == SQLITE_DONE) ? 0 : -1;
|
||||
}
|
||||
```
|
||||
|
||||
#### Simple REQ Query Building
|
||||
```c
|
||||
char* build_filter_query(cJSON* filter) {
|
||||
// Build single query against events table
|
||||
// Much simpler than multi-table approach
|
||||
|
||||
GString* query = g_string_new("SELECT * FROM events WHERE 1=1");
|
||||
|
||||
// Handle ids filter
|
||||
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
|
||||
if (ids && cJSON_IsArray(ids)) {
|
||||
g_string_append(query, " AND id IN (");
|
||||
// Add parameter placeholders
|
||||
g_string_append(query, ")");
|
||||
}
|
||||
|
||||
// Handle authors filter
|
||||
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
|
||||
if (authors && cJSON_IsArray(authors)) {
|
||||
g_string_append(query, " AND pubkey IN (");
|
||||
// Add parameter placeholders
|
||||
g_string_append(query, ")");
|
||||
}
|
||||
|
||||
// Handle kinds filter
|
||||
cJSON* kinds = cJSON_GetObjectItem(filter, "kinds");
|
||||
if (kinds && cJSON_IsArray(kinds)) {
|
||||
g_string_append(query, " AND kind IN (");
|
||||
// Add parameter placeholders
|
||||
g_string_append(query, ")");
|
||||
}
|
||||
|
||||
// Handle tag filters (#e, #p, etc.)
|
||||
cJSON* item;
|
||||
cJSON_ArrayForEach(item, filter) {
|
||||
char* key = item->string;
|
||||
if (key && key[0] == '#' && strlen(key) == 2) {
|
||||
char tag_name = key[1];
|
||||
g_string_append_printf(query,
|
||||
" AND EXISTS (SELECT 1 FROM json_each(tags) "
|
||||
"WHERE json_extract(value, '$[0]') = '%c' "
|
||||
"AND json_extract(value, '$[1]') IN (", tag_name);
|
||||
// Add parameter placeholders
|
||||
g_string_append(query, "))");
|
||||
}
|
||||
}
|
||||
|
||||
// Handle time range
|
||||
cJSON* since = cJSON_GetObjectItem(filter, "since");
|
||||
if (since) {
|
||||
g_string_append(query, " AND created_at >= ?");
|
||||
}
|
||||
|
||||
cJSON* until = cJSON_GetObjectItem(filter, "until");
|
||||
if (until) {
|
||||
g_string_append(query, " AND created_at <= ?");
|
||||
}
|
||||
|
||||
// Standard ordering and limit
|
||||
g_string_append(query, " ORDER BY created_at DESC, id ASC");
|
||||
|
||||
cJSON* limit = cJSON_GetObjectItem(filter, "limit");
|
||||
if (limit) {
|
||||
g_string_append(query, " LIMIT ?");
|
||||
}
|
||||
|
||||
return g_string_free(query, FALSE);
|
||||
}
|
||||
```
|
||||
|
||||
## Benefits of This Approach
|
||||
|
||||
### 1. Query Simplicity
|
||||
- ✅ Single table = simple REQ queries
|
||||
- ✅ No UNION complexity
|
||||
- ✅ Familiar SQL patterns
|
||||
- ✅ Easy LIMIT and ORDER BY handling
|
||||
|
||||
### 2. Protocol Compliance
|
||||
- ✅ Event type classification enforced
|
||||
- ✅ Replacement logic via triggers
|
||||
- ✅ Unique constraints prevent duplicates
|
||||
- ✅ Proper handling of all event types
|
||||
|
||||
### 3. Performance
|
||||
- ✅ Unified indexes across all events
|
||||
- ✅ No join overhead for basic queries
|
||||
- ✅ JSON tag indexes for complex filters
|
||||
- ✅ Single table scan for cross-kind queries
|
||||
|
||||
### 4. Implementation Simplicity
|
||||
- ✅ Minimal changes from current code
|
||||
- ✅ Database handles replacement logic
|
||||
- ✅ Simple event storage function
|
||||
- ✅ No complex routing logic needed
|
||||
|
||||
### 5. Future Flexibility
|
||||
- ✅ Can add columns for new event types
|
||||
- ✅ Can split tables later if needed
|
||||
- ✅ Easy to add new indexes
|
||||
- ✅ Extensible constraint system
|
||||
|
||||
## Migration Path
|
||||
|
||||
### Phase 1: Schema Update
|
||||
1. Add `event_type` column to existing events table
|
||||
2. Add JSON `tags` column
|
||||
3. Create classification triggers
|
||||
4. Add partial unique constraints
|
||||
|
||||
### Phase 2: Data Migration
|
||||
1. Classify existing events by kind
|
||||
2. Convert existing tag table data to JSON
|
||||
3. Verify constraint compliance
|
||||
4. Update indexes
|
||||
|
||||
### Phase 3: Code Updates
|
||||
1. Update event storage to use new schema
|
||||
2. Simplify REQ query building
|
||||
3. Remove tag table JOIN logic
|
||||
4. Test subscription filtering
|
||||
|
||||
### Phase 4: Optimization
|
||||
1. Monitor query performance
|
||||
2. Add specialized indexes as needed
|
||||
3. Tune replacement triggers
|
||||
4. Consider ephemeral event cleanup
|
||||
|
||||
## Conclusion
|
||||
|
||||
This hybrid approach achieves the best of both worlds:
|
||||
|
||||
- **Protocol compliance** through event type classification and constraints
|
||||
- **Query simplicity** through unified storage
|
||||
- **Performance** through targeted indexes
|
||||
- **Implementation ease** through minimal complexity
|
||||
|
||||
The multi-table approach, while theoretically cleaner, creates a subscription query nightmare that would significantly burden the implementation. The hybrid single-table approach provides all the benefits with manageable complexity.
|
||||
@@ -1,326 +0,0 @@
|
||||
# Implementation Plan: Hybrid Schema Migration
|
||||
|
||||
## Overview
|
||||
|
||||
Migrating from the current two-table design (event + tag tables) to a single event table with JSON tags column and event type classification.
|
||||
|
||||
## Current Schema → Target Schema
|
||||
|
||||
### Current Schema (to be replaced)
|
||||
```sql
|
||||
CREATE TABLE event (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE tag (
|
||||
id TEXT NOT NULL, -- references event.id
|
||||
name TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
parameters TEXT
|
||||
);
|
||||
```
|
||||
|
||||
### Target Schema (simplified from final recommendation)
|
||||
```sql
|
||||
CREATE TABLE events (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON NOT NULL DEFAULT '[]',
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
|
||||
-- Optional: Protocol compliance constraints (can be added later)
|
||||
CONSTRAINT unique_replaceable
|
||||
UNIQUE (pubkey, kind) WHERE event_type = 'replaceable',
|
||||
CONSTRAINT unique_addressable
|
||||
UNIQUE (pubkey, kind, json_extract(tags, '$[?(@[0]=="d")][1]'))
|
||||
WHERE event_type = 'addressable'
|
||||
);
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### Phase 1: Update Schema File
|
||||
|
||||
**File**: `db/schema.sql`
|
||||
|
||||
1. Replace current event table definition
|
||||
2. Remove tag table completely
|
||||
3. Add new indexes for performance
|
||||
4. Add event type classification logic
|
||||
|
||||
### Phase 2: Update C Code
|
||||
|
||||
**File**: `src/main.c`
|
||||
|
||||
1. Add event type classification function
|
||||
2. Update `store_event()` function to use JSON tags
|
||||
3. Update `retrieve_event()` function to return JSON tags
|
||||
4. Remove all tag table related code
|
||||
5. Update REQ query handling to use JSON tag queries
|
||||
|
||||
### Phase 3: Update Database Initialization
|
||||
|
||||
**File**: `db/init.sh`
|
||||
|
||||
1. Update table count validation (expect 1 table instead of 2)
|
||||
2. Update schema verification logic
|
||||
|
||||
### Phase 4: Update Tests
|
||||
|
||||
**File**: `tests/1_nip_test.sh`
|
||||
|
||||
1. Verify events are stored with JSON tags
|
||||
2. Test query functionality with new schema
|
||||
3. Validate event type classification
|
||||
|
||||
### Phase 5: Migration Strategy
|
||||
|
||||
Create migration script to handle existing data (if any).
|
||||
|
||||
## Detailed Implementation
|
||||
|
||||
### 1. Event Type Classification
|
||||
|
||||
```c
|
||||
// Add to src/main.c
|
||||
typedef enum {
|
||||
EVENT_TYPE_REGULAR,
|
||||
EVENT_TYPE_REPLACEABLE,
|
||||
EVENT_TYPE_EPHEMERAL,
|
||||
EVENT_TYPE_ADDRESSABLE,
|
||||
EVENT_TYPE_UNKNOWN
|
||||
} event_type_t;
|
||||
|
||||
event_type_t classify_event_kind(int kind) {
|
||||
if ((kind >= 1000 && kind < 10000) ||
|
||||
(kind >= 4 && kind < 45) ||
|
||||
kind == 1 || kind == 2) {
|
||||
return EVENT_TYPE_REGULAR;
|
||||
}
|
||||
if ((kind >= 10000 && kind < 20000) ||
|
||||
kind == 0 || kind == 3) {
|
||||
return EVENT_TYPE_REPLACEABLE;
|
||||
}
|
||||
if (kind >= 20000 && kind < 30000) {
|
||||
return EVENT_TYPE_EPHEMERAL;
|
||||
}
|
||||
if (kind >= 30000 && kind < 40000) {
|
||||
return EVENT_TYPE_ADDRESSABLE;
|
||||
}
|
||||
return EVENT_TYPE_UNKNOWN;
|
||||
}
|
||||
|
||||
const char* event_type_to_string(event_type_t type) {
|
||||
switch (type) {
|
||||
case EVENT_TYPE_REGULAR: return "regular";
|
||||
case EVENT_TYPE_REPLACEABLE: return "replaceable";
|
||||
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
|
||||
case EVENT_TYPE_ADDRESSABLE: return "addressable";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Updated store_event Function
|
||||
|
||||
```c
|
||||
// Replace existing store_event function
|
||||
int store_event(cJSON* event) {
|
||||
if (!g_db || !event) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Extract event fields
|
||||
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||
cJSON* pubkey = cJSON_GetObjectItem(event, "pubkey");
|
||||
cJSON* created_at = cJSON_GetObjectItem(event, "created_at");
|
||||
cJSON* kind = cJSON_GetObjectItem(event, "kind");
|
||||
cJSON* content = cJSON_GetObjectItem(event, "content");
|
||||
cJSON* sig = cJSON_GetObjectItem(event, "sig");
|
||||
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||
|
||||
if (!id || !pubkey || !created_at || !kind || !content || !sig) {
|
||||
log_error("Invalid event - missing required fields");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Classify event type
|
||||
event_type_t type = classify_event_kind((int)cJSON_GetNumberValue(kind));
|
||||
|
||||
// Serialize tags to JSON (use empty array if no tags)
|
||||
char* tags_json = NULL;
|
||||
if (tags && cJSON_IsArray(tags)) {
|
||||
tags_json = cJSON_Print(tags);
|
||||
} else {
|
||||
tags_json = strdup("[]");
|
||||
}
|
||||
|
||||
if (!tags_json) {
|
||||
log_error("Failed to serialize tags to JSON");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Single INSERT statement
|
||||
const char* sql =
|
||||
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
log_error("Failed to prepare event insert statement");
|
||||
free(tags_json);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Bind parameters
|
||||
sqlite3_bind_text(stmt, 1, cJSON_GetStringValue(id), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
|
||||
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
|
||||
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
|
||||
|
||||
// Execute statement
|
||||
rc = sqlite3_step(stmt);
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
if (rc != SQLITE_DONE) {
|
||||
if (rc == SQLITE_CONSTRAINT) {
|
||||
log_warning("Event already exists in database");
|
||||
free(tags_json);
|
||||
return 0; // Not an error, just duplicate
|
||||
}
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Failed to insert event: %s", sqlite3_errmsg(g_db));
|
||||
log_error(error_msg);
|
||||
free(tags_json);
|
||||
return -1;
|
||||
}
|
||||
|
||||
free(tags_json);
|
||||
log_success("Event stored in database");
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Updated retrieve_event Function
|
||||
|
||||
```c
|
||||
// Replace existing retrieve_event function
|
||||
cJSON* retrieve_event(const char* event_id) {
|
||||
if (!g_db || !event_id) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const char* sql =
|
||||
"SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE id = ?";
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||
if (rc != SQLITE_OK) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sqlite3_bind_text(stmt, 1, event_id, -1, SQLITE_STATIC);
|
||||
|
||||
cJSON* event = NULL;
|
||||
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
event = cJSON_CreateObject();
|
||||
|
||||
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||
cJSON_AddStringToObject(event, "pubkey", (char*)sqlite3_column_text(stmt, 1));
|
||||
cJSON_AddNumberToObject(event, "created_at", sqlite3_column_int64(stmt, 2));
|
||||
cJSON_AddNumberToObject(event, "kind", sqlite3_column_int(stmt, 3));
|
||||
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
|
||||
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
|
||||
|
||||
// Parse tags JSON
|
||||
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
|
||||
if (tags_json) {
|
||||
cJSON* tags = cJSON_Parse(tags_json);
|
||||
if (tags) {
|
||||
cJSON_AddItemToObject(event, "tags", tags);
|
||||
} else {
|
||||
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
|
||||
}
|
||||
} else {
|
||||
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
|
||||
}
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
return event;
|
||||
}
|
||||
```
|
||||
|
||||
## Migration Considerations
|
||||
|
||||
### Handling Existing Data
|
||||
|
||||
If there's existing data in the current schema:
|
||||
|
||||
1. **Export existing events and tags**
|
||||
2. **Transform tag data to JSON format**
|
||||
3. **Classify events by kind**
|
||||
4. **Import into new schema**
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
- API remains the same - events still have the same JSON structure
|
||||
- Internal storage changes but external interface is unchanged
|
||||
- Tests should pass with minimal modifications
|
||||
|
||||
## Performance Optimizations
|
||||
|
||||
### Essential Indexes
|
||||
|
||||
```sql
|
||||
-- Core performance indexes
|
||||
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||
CREATE INDEX idx_events_kind ON events(kind);
|
||||
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||
CREATE INDEX idx_events_event_type ON events(event_type);
|
||||
|
||||
-- Composite indexes for common query patterns
|
||||
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||
|
||||
-- JSON tag indexes for common tag patterns
|
||||
CREATE INDEX idx_events_e_tags ON events(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 'e';
|
||||
|
||||
CREATE INDEX idx_events_p_tags ON events(
|
||||
json_extract(tags, '$[*][1]')
|
||||
) WHERE json_extract(tags, '$[*][0]') = 'p';
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Switch to code mode** to implement the schema changes
|
||||
2. **Update db/schema.sql** with new table definition
|
||||
3. **Modify src/main.c** with new functions
|
||||
4. **Update db/init.sh** for single table validation
|
||||
5. **Test with existing test suite**
|
||||
|
||||
This approach will provide:
|
||||
- ✅ Simplified schema management
|
||||
- ✅ Protocol compliance preparation
|
||||
- ✅ JSON tag query capabilities
|
||||
- ✅ Performance optimization opportunities
|
||||
- ✅ Easy REQ subscription handling
|
||||
|
||||
Ready to proceed with implementation?
|
||||
@@ -1,331 +0,0 @@
|
||||
# Subscription Query Complexity Analysis
|
||||
|
||||
## Overview
|
||||
|
||||
This document analyzes how Nostr REQ subscription filters would be implemented across different schema designs, focusing on query complexity, performance implications, and implementation burden.
|
||||
|
||||
## Nostr REQ Filter Specification Recap
|
||||
|
||||
Clients send REQ messages with filters containing:
|
||||
- **`ids`**: List of specific event IDs
|
||||
- **`authors`**: List of pubkeys
|
||||
- **`kinds`**: List of event kinds
|
||||
- **`#<letter>`**: Tag filters (e.g., `#e` for event refs, `#p` for pubkey mentions)
|
||||
- **`since`/`until`**: Time range filters
|
||||
- **`limit`**: Maximum events to return
|
||||
|
||||
### Key Filter Behaviors:
|
||||
- **Multiple filters = OR logic**: Match any filter
|
||||
- **Within filter = AND logic**: Match all specified conditions
|
||||
- **Lists = IN logic**: Match any value in the list
|
||||
- **Tag filters**: Must have at least one matching tag
|
||||
|
||||
## Schema Comparison for REQ Handling
|
||||
|
||||
### Current Simple Schema (Single Table)
|
||||
```sql
|
||||
CREATE TABLE event (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE tag (
|
||||
id TEXT NOT NULL, -- event ID
|
||||
name TEXT NOT NULL,
|
||||
value TEXT NOT NULL,
|
||||
parameters TEXT
|
||||
);
|
||||
```
|
||||
|
||||
#### Sample REQ Query Implementation:
|
||||
```sql
|
||||
-- Filter: {"authors": ["pubkey1", "pubkey2"], "kinds": [1, 6], "#p": ["target_pubkey"]}
|
||||
SELECT DISTINCT e.*
|
||||
FROM event e
|
||||
WHERE e.pubkey IN ('pubkey1', 'pubkey2')
|
||||
AND e.kind IN (1, 6)
|
||||
AND EXISTS (
|
||||
SELECT 1 FROM tag t
|
||||
WHERE t.id = e.id AND t.name = 'p' AND t.value = 'target_pubkey'
|
||||
)
|
||||
ORDER BY e.created_at DESC, e.id ASC
|
||||
LIMIT ?;
|
||||
```
|
||||
|
||||
### Multi-Table Schema Challenge
|
||||
|
||||
With separate tables (`events_regular`, `events_replaceable`, `events_ephemeral`, `events_addressable`), a REQ filter could potentially match events across ALL tables.
|
||||
|
||||
#### Problem Example:
|
||||
Filter: `{"kinds": [1, 0, 20001, 30023]}`
|
||||
- Kind 1 → `events_regular`
|
||||
- Kind 0 → `events_replaceable`
|
||||
- Kind 20001 → `events_ephemeral`
|
||||
- Kind 30023 → `events_addressable`
|
||||
|
||||
This requires **4 separate queries + UNION**, significantly complicating the implementation.
|
||||
|
||||
## Multi-Table Query Complexity
|
||||
|
||||
### Scenario 1: Cross-Table Kind Filter
|
||||
```sql
|
||||
-- Filter: {"kinds": [1, 0, 30023]}
|
||||
-- Requires querying 3 different tables
|
||||
|
||||
SELECT id, pubkey, created_at, kind, content, sig FROM events_regular
|
||||
WHERE kind = 1
|
||||
UNION ALL
|
||||
SELECT id, pubkey, created_at, kind, content, sig FROM events_replaceable
|
||||
WHERE kind = 0
|
||||
UNION ALL
|
||||
SELECT id, pubkey, created_at, kind, content, sig FROM events_addressable
|
||||
WHERE kind = 30023
|
||||
ORDER BY created_at DESC, id ASC
|
||||
LIMIT ?;
|
||||
```
|
||||
|
||||
### Scenario 2: Cross-Table Author Filter
|
||||
```sql
|
||||
-- Filter: {"authors": ["pubkey1"]}
|
||||
-- Must check ALL tables for this author
|
||||
|
||||
SELECT id, pubkey, created_at, kind, content, sig FROM events_regular
|
||||
WHERE pubkey = 'pubkey1'
|
||||
UNION ALL
|
||||
SELECT id, pubkey, created_at, kind, content, sig FROM events_replaceable
|
||||
WHERE pubkey = 'pubkey1'
|
||||
UNION ALL
|
||||
SELECT id, pubkey, created_at, kind, content, sig FROM events_ephemeral
|
||||
WHERE pubkey = 'pubkey1'
|
||||
UNION ALL
|
||||
SELECT id, pubkey, created_at, kind, content, sig FROM events_addressable
|
||||
WHERE pubkey = 'pubkey1'
|
||||
ORDER BY created_at DESC, id ASC
|
||||
LIMIT ?;
|
||||
```
|
||||
|
||||
### Scenario 3: Complex Multi-Condition Filter
|
||||
```sql
|
||||
-- Filter: {"authors": ["pubkey1"], "kinds": [1, 0], "#p": ["target"], "since": 1234567890}
|
||||
-- Extremely complex with multiple UNIONs and tag JOINs
|
||||
|
||||
WITH regular_results AS (
|
||||
SELECT DISTINCT r.*
|
||||
FROM events_regular r
|
||||
JOIN tags_regular tr ON r.id = tr.event_id
|
||||
WHERE r.pubkey = 'pubkey1'
|
||||
AND r.kind = 1
|
||||
AND r.created_at >= 1234567890
|
||||
AND tr.name = 'p' AND tr.value = 'target'
|
||||
),
|
||||
replaceable_results AS (
|
||||
SELECT DISTINCT rp.*
|
||||
FROM events_replaceable rp
|
||||
JOIN tags_replaceable trp ON (rp.pubkey, rp.kind) = (trp.event_pubkey, trp.event_kind)
|
||||
WHERE rp.pubkey = 'pubkey1'
|
||||
AND rp.kind = 0
|
||||
AND rp.created_at >= 1234567890
|
||||
AND trp.name = 'p' AND trp.value = 'target'
|
||||
)
|
||||
SELECT * FROM regular_results
|
||||
UNION ALL
|
||||
SELECT * FROM replaceable_results
|
||||
ORDER BY created_at DESC, id ASC
|
||||
LIMIT ?;
|
||||
```
|
||||
|
||||
## Implementation Burden Analysis
|
||||
|
||||
### Single Table Approach
|
||||
```c
|
||||
// Simple - one query builder function
|
||||
char* build_filter_query(cJSON* filters) {
|
||||
// Build single SELECT with WHERE conditions
|
||||
// Single ORDER BY and LIMIT
|
||||
// One execution path
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Table Approach
|
||||
```c
|
||||
// Complex - requires routing and union logic
|
||||
char* build_multi_table_query(cJSON* filters) {
|
||||
// 1. Analyze kinds to determine which tables to query
|
||||
// 2. Split filters per table type
|
||||
// 3. Build separate queries for each table
|
||||
// 4. Union results with complex ORDER BY
|
||||
// 5. Handle LIMIT across UNION (tricky!)
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
bool needs_regular;
|
||||
bool needs_replaceable;
|
||||
bool needs_ephemeral;
|
||||
bool needs_addressable;
|
||||
cJSON* regular_filter;
|
||||
cJSON* replaceable_filter;
|
||||
cJSON* ephemeral_filter;
|
||||
cJSON* addressable_filter;
|
||||
} filter_routing_t;
|
||||
```
|
||||
|
||||
### Query Routing Complexity
|
||||
|
||||
For each REQ filter, we must:
|
||||
|
||||
1. **Analyze kinds** → Determine which tables to query
|
||||
2. **Split filters** → Create per-table filter conditions
|
||||
3. **Handle tag filters** → Different tag table references per event type
|
||||
4. **Union results** → Merge with proper ordering
|
||||
5. **Apply LIMIT** → Complex with UNION queries
|
||||
|
||||
## Performance Implications
|
||||
|
||||
### Single Table Advantages:
|
||||
- ✅ **Single query execution**
|
||||
- ✅ **One index strategy**
|
||||
- ✅ **Simple LIMIT handling**
|
||||
- ✅ **Unified ORDER BY**
|
||||
- ✅ **No UNION overhead**
|
||||
|
||||
### Multi-Table Disadvantages:
|
||||
- ❌ **Multiple query executions**
|
||||
- ❌ **UNION sorting overhead**
|
||||
- ❌ **Complex LIMIT application**
|
||||
- ❌ **Index fragmentation across tables**
|
||||
- ❌ **Result set merging complexity**
|
||||
|
||||
## Specific REQ Filter Challenges
|
||||
|
||||
### 1. LIMIT Handling with UNION
|
||||
```sql
|
||||
-- WRONG: Limit applies to each subquery
|
||||
(SELECT * FROM events_regular WHERE ... LIMIT 100)
|
||||
UNION ALL
|
||||
(SELECT * FROM events_replaceable WHERE ... LIMIT 100)
|
||||
-- Could return 200 events!
|
||||
|
||||
-- CORRECT: Limit applies to final result
|
||||
SELECT * FROM (
|
||||
SELECT * FROM events_regular WHERE ...
|
||||
UNION ALL
|
||||
SELECT * FROM events_replaceable WHERE ...
|
||||
ORDER BY created_at DESC, id ASC
|
||||
) LIMIT 100;
|
||||
-- But this sorts ALL results before limiting!
|
||||
```
|
||||
|
||||
### 2. Tag Filter Complexity
|
||||
Each event type needs different tag table joins:
|
||||
- `events_regular` → `tags_regular`
|
||||
- `events_replaceable` → `tags_replaceable` (with composite key)
|
||||
- `events_addressable` → `tags_addressable` (with composite key)
|
||||
- `events_ephemeral` → `tags_ephemeral`
|
||||
|
||||
### 3. Subscription State Management
|
||||
With multiple tables, subscription state becomes complex:
|
||||
- Which tables does this subscription monitor?
|
||||
- How to efficiently check new events across tables?
|
||||
- Different trigger/notification patterns per table
|
||||
|
||||
## Alternative: Unified Event View
|
||||
|
||||
### Hybrid Approach: Views Over Multi-Tables
|
||||
```sql
|
||||
-- Create unified view for queries
|
||||
CREATE VIEW all_events AS
|
||||
SELECT
|
||||
'regular' as event_type,
|
||||
id, pubkey, created_at, kind, content, sig
|
||||
FROM events_regular
|
||||
UNION ALL
|
||||
SELECT
|
||||
'replaceable' as event_type,
|
||||
id, pubkey, created_at, kind, content, sig
|
||||
FROM events_replaceable
|
||||
UNION ALL
|
||||
SELECT
|
||||
'ephemeral' as event_type,
|
||||
id, pubkey, created_at, kind, content, sig
|
||||
FROM events_ephemeral
|
||||
UNION ALL
|
||||
SELECT
|
||||
'addressable' as event_type,
|
||||
id, pubkey, created_at, kind, content, sig
|
||||
FROM events_addressable;
|
||||
|
||||
-- Unified tag view
|
||||
CREATE VIEW all_tags AS
|
||||
SELECT event_id, name, value, parameters FROM tags_regular
|
||||
UNION ALL
|
||||
SELECT CONCAT(event_pubkey, ':', event_kind), name, value, parameters FROM tags_replaceable
|
||||
UNION ALL
|
||||
SELECT event_id, name, value, parameters FROM tags_ephemeral
|
||||
UNION ALL
|
||||
SELECT CONCAT(event_pubkey, ':', event_kind, ':', d_tag), name, value, parameters FROM tags_addressable;
|
||||
```
|
||||
|
||||
### REQ Query Against Views:
|
||||
```sql
|
||||
-- Much simpler - back to single-table complexity
|
||||
SELECT DISTINCT e.*
|
||||
FROM all_events e
|
||||
JOIN all_tags t ON e.id = t.event_id
|
||||
WHERE e.pubkey IN (?)
|
||||
AND e.kind IN (?)
|
||||
AND t.name = 'p' AND t.value = ?
|
||||
ORDER BY e.created_at DESC, e.id ASC
|
||||
LIMIT ?;
|
||||
```
|
||||
|
||||
## Recommendation
|
||||
|
||||
**The multi-table approach creates significant subscription query complexity that may outweigh its benefits.**
|
||||
|
||||
### Key Issues:
|
||||
1. **REQ filters don't map to event types** - clients filter by kind, author, tags, not storage semantics
|
||||
2. **UNION query complexity** - much harder to optimize and implement
|
||||
3. **Subscription management burden** - must monitor multiple tables
|
||||
4. **Performance uncertainty** - UNION queries may be slower than single table
|
||||
|
||||
### Alternative Recommendation:
|
||||
|
||||
**Modified Single Table with Event Type Column:**
|
||||
|
||||
```sql
|
||||
CREATE TABLE events (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
event_type TEXT NOT NULL, -- 'regular', 'replaceable', 'ephemeral', 'addressable'
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON,
|
||||
|
||||
-- Replaceable event fields
|
||||
replaced_at INTEGER,
|
||||
|
||||
-- Addressable event fields
|
||||
d_tag TEXT,
|
||||
|
||||
-- Unique constraints per event type
|
||||
CONSTRAINT unique_replaceable
|
||||
UNIQUE (pubkey, kind) WHERE event_type = 'replaceable',
|
||||
CONSTRAINT unique_addressable
|
||||
UNIQUE (pubkey, kind, d_tag) WHERE event_type = 'addressable'
|
||||
);
|
||||
```
|
||||
|
||||
### Benefits:
|
||||
- ✅ **Simple REQ queries** - single table, familiar patterns
|
||||
- ✅ **Type enforcement** - partial unique constraints handle replacement logic
|
||||
- ✅ **Performance** - unified indexes, no UNIONs
|
||||
- ✅ **Implementation simplicity** - minimal changes from current code
|
||||
- ✅ **Future flexibility** - can split tables later if needed
|
||||
|
||||
This approach gets the best of both worlds: protocol compliance through constraints, but query simplicity through unified storage.
|
||||
@@ -5,6 +5,71 @@
|
||||
|
||||
echo "=== C Nostr Relay Build and Restart Script ==="
|
||||
|
||||
# Parse command line arguments
|
||||
PRESERVE_CONFIG=false
|
||||
HELP=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--preserve-config|-p)
|
||||
PRESERVE_CONFIG=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
HELP=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
HELP=true
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Show help
|
||||
if [ "$HELP" = true ]; then
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --preserve-config, -p Keep existing configuration file (don't regenerate)"
|
||||
echo " --help, -h Show this help message"
|
||||
echo ""
|
||||
echo "Development Setup:"
|
||||
echo " Uses local config directory: ./dev-config/"
|
||||
echo " This avoids conflicts with production instances using ~/.config/c-relay/"
|
||||
echo ""
|
||||
echo "Default behavior: Automatically regenerates configuration file on each build"
|
||||
echo " for development purposes"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Handle configuration file and database regeneration
|
||||
# Use local development config directory to avoid conflicts with production
|
||||
DEV_CONFIG_DIR="./dev-config"
|
||||
CONFIG_FILE="$DEV_CONFIG_DIR/c_relay_config_event.json"
|
||||
DB_FILE="./db/c_nostr_relay.db"
|
||||
|
||||
# Create development config directory if it doesn't exist
|
||||
mkdir -p "$DEV_CONFIG_DIR"
|
||||
|
||||
if [ "$PRESERVE_CONFIG" = false ]; then
|
||||
if [ -f "$CONFIG_FILE" ]; then
|
||||
echo "Removing old development configuration file to trigger regeneration..."
|
||||
rm -f "$CONFIG_FILE"
|
||||
echo "✓ Development configuration file removed - will be regenerated with new keys"
|
||||
fi
|
||||
if [ -f "$DB_FILE" ]; then
|
||||
echo "Removing old database to trigger fresh key generation..."
|
||||
rm -f "$DB_FILE"* # Remove db file and any WAL/SHM files
|
||||
echo "✓ Database removed - will be recreated with embedded schema and new keys"
|
||||
fi
|
||||
elif [ "$PRESERVE_CONFIG" = true ]; then
|
||||
echo "Preserving existing development configuration and database as requested"
|
||||
else
|
||||
echo "No existing development configuration or database found - will generate fresh setup"
|
||||
fi
|
||||
|
||||
# Build the project first
|
||||
echo "Building project..."
|
||||
make clean all
|
||||
@@ -15,9 +80,22 @@ if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if relay binary exists after build
|
||||
if [ ! -f "./src/main" ]; then
|
||||
echo "ERROR: Relay binary not found after build. Build may have failed."
|
||||
# Check if relay binary exists after build - detect architecture
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
x86_64)
|
||||
BINARY_PATH="./build/c_relay_x86"
|
||||
;;
|
||||
aarch64|arm64)
|
||||
BINARY_PATH="./build/c_relay_arm64"
|
||||
;;
|
||||
*)
|
||||
BINARY_PATH="./build/c_relay_$ARCH"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ ! -f "$BINARY_PATH" ]; then
|
||||
echo "ERROR: Relay binary not found at $BINARY_PATH after build. Build may have failed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -25,7 +103,7 @@ echo "Build successful. Proceeding with relay restart..."
|
||||
|
||||
# Kill existing relay if running
|
||||
echo "Stopping any existing relay servers..."
|
||||
pkill -f "./src/main" 2>/dev/null
|
||||
pkill -f "c_relay_" 2>/dev/null
|
||||
sleep 2 # Give time for shutdown
|
||||
|
||||
# Check if port is still bound
|
||||
@@ -35,7 +113,7 @@ if lsof -i :8888 >/dev/null 2>&1; then
|
||||
fi
|
||||
|
||||
# Get any remaining processes
|
||||
REMAINING_PIDS=$(pgrep -f "./src/main" || echo "")
|
||||
REMAINING_PIDS=$(pgrep -f "c_relay_" || echo "")
|
||||
if [ -n "$REMAINING_PIDS" ]; then
|
||||
echo "Force killing remaining processes: $REMAINING_PIDS"
|
||||
kill -9 $REMAINING_PIDS 2>/dev/null
|
||||
@@ -47,18 +125,16 @@ fi
|
||||
# Clean up PID file
|
||||
rm -f relay.pid
|
||||
|
||||
# Initialize database if needed
|
||||
if [ ! -f "./db/c_nostr_relay.db" ]; then
|
||||
echo "Initializing database..."
|
||||
./db/init.sh --force >/dev/null 2>&1
|
||||
fi
|
||||
# Database initialization is now handled automatically by the relay
|
||||
# when it starts up with embedded schema
|
||||
echo "Database will be initialized automatically on startup if needed"
|
||||
|
||||
# Start relay in background with output redirection
|
||||
echo "Starting relay server..."
|
||||
echo "Debug: Current processes: $(ps aux | grep './src/main' | grep -v grep || echo 'None')"
|
||||
echo "Debug: Current processes: $(ps aux | grep 'c_relay_' | grep -v grep || echo 'None')"
|
||||
|
||||
# Start relay in background and capture its PID
|
||||
./src/main > relay.log 2>&1 &
|
||||
# Start relay in background and capture its PID with development config directory
|
||||
$BINARY_PATH --config-dir "$DEV_CONFIG_DIR" > relay.log 2>&1 &
|
||||
RELAY_PID=$!
|
||||
|
||||
echo "Started with PID: $RELAY_PID"
|
||||
@@ -77,10 +153,25 @@ if ps -p "$RELAY_PID" >/dev/null 2>&1; then
|
||||
# Save PID for debugging
|
||||
echo $RELAY_PID > relay.pid
|
||||
|
||||
# Check if new keys were generated and display them
|
||||
sleep 1 # Give relay time to write initial logs
|
||||
if grep -q "GENERATED RELAY KEYPAIRS" relay.log 2>/dev/null; then
|
||||
echo "=== IMPORTANT: NEW KEYPAIRS GENERATED ==="
|
||||
echo ""
|
||||
# Extract and display the keypairs section from the log
|
||||
grep -A 12 -B 2 "GENERATED RELAY KEYPAIRS" relay.log | head -n 16
|
||||
echo ""
|
||||
echo "⚠️ SAVE THESE PRIVATE KEYS SECURELY - THEY CONTROL YOUR RELAY!"
|
||||
echo "⚠️ These keys are also logged in relay.log for reference"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "=== Relay server running in background ==="
|
||||
echo "To kill relay: pkill -f './src/main'"
|
||||
echo "To check status: ps aux | grep src/main"
|
||||
echo "Development config: $DEV_CONFIG_DIR/"
|
||||
echo "To kill relay: pkill -f 'c_relay_'"
|
||||
echo "To check status: ps aux | grep c_relay_"
|
||||
echo "To view logs: tail -f relay.log"
|
||||
echo "Binary: $BINARY_PATH --config-dir $DEV_CONFIG_DIR"
|
||||
echo "Ready for Nostr client connections!"
|
||||
else
|
||||
echo "ERROR: Relay failed to start"
|
||||
|
||||
Submodule nostr_core_lib updated: 33129d82fd...55e2a9c68e
240
relay.log
240
relay.log
@@ -1,240 +0,0 @@
|
||||
[34m[1m=== C Nostr Relay Server ===[0m
|
||||
[32m[SUCCESS][0m Database connection established
|
||||
[34m[INFO][0m Starting relay server...
|
||||
[34m[INFO][0m Starting libwebsockets-based Nostr relay server...
|
||||
[32m[SUCCESS][0m WebSocket relay started on ws://127.0.0.1:8888
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 5 rows
|
||||
[34m[INFO][0m Total events sent: 5
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 17 rows
|
||||
[34m[INFO][0m Total events sent: 17
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 7 rows
|
||||
[34m[INFO][0m Total events sent: 7
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0) ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 1 rows
|
||||
[34m[INFO][0m Total events sent: 1
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND pubkey IN ('aa4fc8665f5696e33db7e1a572e3b0f5b3d615837b0f362dcb1c8068b098c7b4') ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 17 rows
|
||||
[34m[INFO][0m Total events sent: 17
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND created_at >= 1756983802 ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 6 rows
|
||||
[34m[INFO][0m Total events sent: 6
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 17 rows
|
||||
[34m[INFO][0m Total events sent: 17
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0,1) ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 8 rows
|
||||
[34m[INFO][0m Total events sent: 8
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 1
|
||||
[34m[INFO][0m Query returned 1 rows
|
||||
[34m[INFO][0m Total events sent: 1
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling EVENT message
|
||||
[32m[SUCCESS][0m Event stored in database
|
||||
[32m[SUCCESS][0m Event stored successfully
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 22 rows
|
||||
[34m[INFO][0m Total events sent: 22
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 9 rows
|
||||
[34m[INFO][0m Total events sent: 9
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0) ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 1 rows
|
||||
[34m[INFO][0m Total events sent: 1
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND pubkey IN ('aa4fc8665f5696e33db7e1a572e3b0f5b3d615837b0f362dcb1c8068b098c7b4') ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 22 rows
|
||||
[34m[INFO][0m Total events sent: 22
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND created_at >= 1756983945 ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 9 rows
|
||||
[34m[INFO][0m Total events sent: 9
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 22 rows
|
||||
[34m[INFO][0m Total events sent: 22
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0,1) ORDER BY created_at DESC LIMIT 500
|
||||
[34m[INFO][0m Query returned 10 rows
|
||||
[34m[INFO][0m Total events sent: 10
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
[34m[INFO][0m WebSocket connection established
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Handling REQ message
|
||||
[34m[INFO][0m Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 1
|
||||
[34m[INFO][0m Query returned 1 rows
|
||||
[34m[INFO][0m Total events sent: 1
|
||||
[34m[INFO][0m Received WebSocket message
|
||||
[34m[INFO][0m Subscription closed
|
||||
[34m[INFO][0m WebSocket connection closed
|
||||
1170
src/config.c
Normal file
1170
src/config.c
Normal file
File diff suppressed because it is too large
Load Diff
232
src/config.h
Normal file
232
src/config.h
Normal file
@@ -0,0 +1,232 @@
|
||||
#ifndef CONFIG_H
|
||||
#define CONFIG_H
|
||||
|
||||
#include <sqlite3.h>
|
||||
#include <time.h>
|
||||
#include <stddef.h>
|
||||
#include <cjson/cJSON.h>
|
||||
|
||||
// Configuration system constants
|
||||
#define CONFIG_KEY_MAX_LENGTH 64
|
||||
#define CONFIG_VALUE_MAX_LENGTH 512
|
||||
#define CONFIG_DESCRIPTION_MAX_LENGTH 256
|
||||
#define CONFIG_XDG_DIR_NAME "c-relay"
|
||||
#define CONFIG_FILE_NAME "c_relay_config_event.json"
|
||||
#define CONFIG_ADMIN_PRIVKEY_ENV "C_RELAY_ADMIN_PRIVKEY"
|
||||
#define CONFIG_RELAY_PRIVKEY_ENV "C_RELAY_PRIVKEY"
|
||||
#define CONFIG_DIR_OVERRIDE_ENV "C_RELAY_CONFIG_DIR_OVERRIDE"
|
||||
#define CONFIG_FILE_OVERRIDE_ENV "C_RELAY_CONFIG_FILE_OVERRIDE"
|
||||
#define NOSTR_PUBKEY_HEX_LENGTH 64
|
||||
#define NOSTR_PRIVKEY_HEX_LENGTH 64
|
||||
#define NOSTR_EVENT_ID_HEX_LENGTH 64
|
||||
#define NOSTR_SIGNATURE_HEX_LENGTH 128
|
||||
|
||||
// Protocol and implementation constants (hardcoded - should NOT be configurable)
|
||||
#define SUBSCRIPTION_ID_MAX_LENGTH 64
|
||||
#define CLIENT_IP_MAX_LENGTH 64
|
||||
#define RELAY_NAME_MAX_LENGTH 128
|
||||
#define RELAY_DESCRIPTION_MAX_LENGTH 1024
|
||||
#define RELAY_URL_MAX_LENGTH 256
|
||||
#define RELAY_CONTACT_MAX_LENGTH 128
|
||||
#define RELAY_PUBKEY_MAX_LENGTH 65
|
||||
|
||||
// Default configuration values (used as fallbacks if database config fails)
|
||||
#define DEFAULT_DATABASE_PATH "db/c_nostr_relay.db"
|
||||
#define DEFAULT_PORT 8888
|
||||
#define DEFAULT_HOST "127.0.0.1"
|
||||
#define MAX_CLIENTS 100
|
||||
#define MAX_SUBSCRIPTIONS_PER_CLIENT 20
|
||||
#define MAX_TOTAL_SUBSCRIPTIONS 5000
|
||||
#define MAX_FILTERS_PER_SUBSCRIPTION 10
|
||||
|
||||
// Configuration types
|
||||
typedef enum {
|
||||
CONFIG_TYPE_SYSTEM = 0,
|
||||
CONFIG_TYPE_USER = 1,
|
||||
CONFIG_TYPE_RUNTIME = 2
|
||||
} config_type_t;
|
||||
|
||||
// Configuration data types
|
||||
typedef enum {
|
||||
CONFIG_DATA_STRING = 0,
|
||||
CONFIG_DATA_INTEGER = 1,
|
||||
CONFIG_DATA_BOOLEAN = 2,
|
||||
CONFIG_DATA_JSON = 3
|
||||
} config_data_type_t;
|
||||
|
||||
// Configuration validation result
|
||||
typedef enum {
|
||||
CONFIG_VALID = 0,
|
||||
CONFIG_INVALID_TYPE = 1,
|
||||
CONFIG_INVALID_RANGE = 2,
|
||||
CONFIG_INVALID_FORMAT = 3,
|
||||
CONFIG_MISSING_REQUIRED = 4
|
||||
} config_validation_result_t;
|
||||
|
||||
// Configuration entry structure
|
||||
typedef struct {
|
||||
char key[CONFIG_KEY_MAX_LENGTH];
|
||||
char value[CONFIG_VALUE_MAX_LENGTH];
|
||||
char description[CONFIG_DESCRIPTION_MAX_LENGTH];
|
||||
config_type_t config_type;
|
||||
config_data_type_t data_type;
|
||||
int is_sensitive;
|
||||
int requires_restart;
|
||||
time_t created_at;
|
||||
time_t updated_at;
|
||||
} config_entry_t;
|
||||
|
||||
// Configuration manager state
|
||||
typedef struct {
|
||||
sqlite3* db;
|
||||
sqlite3_stmt* get_config_stmt;
|
||||
sqlite3_stmt* set_config_stmt;
|
||||
sqlite3_stmt* log_change_stmt;
|
||||
|
||||
// Configuration loading status
|
||||
int file_config_loaded;
|
||||
int database_config_loaded;
|
||||
time_t last_reload;
|
||||
|
||||
// XDG configuration directory
|
||||
char config_dir_path[512];
|
||||
char config_file_path[600];
|
||||
} config_manager_t;
|
||||
|
||||
// Global configuration manager instance
|
||||
extern config_manager_t g_config_manager;
|
||||
|
||||
// ================================
|
||||
// CORE CONFIGURATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Initialize configuration system
|
||||
int init_configuration_system(void);
|
||||
|
||||
// Cleanup configuration system
|
||||
void cleanup_configuration_system(void);
|
||||
|
||||
// Load configuration from all sources (file -> database -> defaults)
|
||||
int load_configuration(void);
|
||||
|
||||
// Apply loaded configuration to global variables
|
||||
int apply_configuration_to_globals(void);
|
||||
|
||||
// ================================
|
||||
// DATABASE CONFIGURATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Initialize database prepared statements
|
||||
int init_config_database_statements(void);
|
||||
|
||||
// Get configuration value from database
|
||||
int get_database_config(const char* key, char* value, size_t value_size);
|
||||
|
||||
// Set configuration value in database
|
||||
int set_database_config(const char* key, const char* new_value, const char* changed_by);
|
||||
|
||||
// Load all configuration from database
|
||||
int load_config_from_database(void);
|
||||
|
||||
// ================================
|
||||
// FILE CONFIGURATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Get XDG configuration directory path
|
||||
int get_xdg_config_dir(char* path, size_t path_size);
|
||||
|
||||
// Check if configuration file exists
|
||||
int config_file_exists(void);
|
||||
|
||||
// Load configuration from file
|
||||
int load_config_from_file(void);
|
||||
|
||||
// Validate and apply Nostr configuration event
|
||||
int validate_and_apply_config_event(const cJSON* event);
|
||||
|
||||
// Validate Nostr event structure
|
||||
int validate_nostr_event_structure(const cJSON* event);
|
||||
|
||||
// Validate configuration tags array
|
||||
int validate_config_tags(const cJSON* tags);
|
||||
|
||||
// Extract and apply configuration tags to database
|
||||
int extract_and_apply_config_tags(const cJSON* tags);
|
||||
|
||||
// ================================
|
||||
// CONFIGURATION ACCESS FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Get configuration value (checks all sources: file -> database -> environment -> defaults)
|
||||
const char* get_config_value(const char* key);
|
||||
|
||||
// Get configuration value as integer
|
||||
int get_config_int(const char* key, int default_value);
|
||||
|
||||
// Get configuration value as boolean
|
||||
int get_config_bool(const char* key, int default_value);
|
||||
|
||||
// Set configuration value (updates database)
|
||||
int set_config_value(const char* key, const char* value);
|
||||
|
||||
// ================================
|
||||
// CONFIGURATION VALIDATION
|
||||
// ================================
|
||||
|
||||
// Validate configuration value
|
||||
config_validation_result_t validate_config_value(const char* key, const char* value);
|
||||
|
||||
// Log validation error
|
||||
void log_config_validation_error(const char* key, const char* value, const char* error);
|
||||
|
||||
// ================================
|
||||
// UTILITY FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Convert config type enum to string
|
||||
const char* config_type_to_string(config_type_t type);
|
||||
|
||||
// Convert config data type enum to string
|
||||
const char* config_data_type_to_string(config_data_type_t type);
|
||||
|
||||
// Convert string to config type enum
|
||||
config_type_t string_to_config_type(const char* str);
|
||||
|
||||
// Convert string to config data type enum
|
||||
config_data_type_t string_to_config_data_type(const char* str);
|
||||
|
||||
// Check if configuration key requires restart
|
||||
int config_requires_restart(const char* key);
|
||||
|
||||
// ================================
|
||||
// NOSTR EVENT GENERATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Generate configuration file with valid Nostr event if it doesn't exist
|
||||
int generate_config_file_if_missing(void);
|
||||
|
||||
// Create a valid Nostr configuration event from database values
|
||||
cJSON* create_config_nostr_event(const char* privkey_hex);
|
||||
|
||||
// Generate a random private key (32 bytes as hex string)
|
||||
int generate_random_privkey(char* privkey_hex, size_t buffer_size);
|
||||
|
||||
// Derive public key from private key (using secp256k1)
|
||||
int derive_pubkey_from_privkey(const char* privkey_hex, char* pubkey_hex, size_t buffer_size);
|
||||
|
||||
// Create Nostr event ID (SHA256 of serialized event data)
|
||||
int create_nostr_event_id(const cJSON* event, char* event_id_hex, size_t buffer_size);
|
||||
|
||||
// Sign Nostr event (using secp256k1 Schnorr signature)
|
||||
int sign_nostr_event(const cJSON* event, const char* privkey_hex, char* signature_hex, size_t buffer_size);
|
||||
|
||||
// Write configuration event to file
|
||||
int write_config_event_to_file(const cJSON* event);
|
||||
|
||||
// Helper function to generate random private key
|
||||
int generate_random_private_key(char* privkey_hex, size_t buffer_size);
|
||||
|
||||
// Helper function to derive public key from private key
|
||||
int derive_public_key(const char* privkey_hex, char* pubkey_hex, size_t buffer_size);
|
||||
|
||||
#endif // CONFIG_H
|
||||
2544
src/main.c
2544
src/main.c
File diff suppressed because it is too large
Load Diff
313
src/sql_schema.h
Normal file
313
src/sql_schema.h
Normal file
@@ -0,0 +1,313 @@
|
||||
/* Embedded SQL Schema for C Nostr Relay
|
||||
* Generated from db/schema.sql - Do not edit manually
|
||||
* Schema Version: 3
|
||||
*/
|
||||
#ifndef SQL_SCHEMA_H
|
||||
#define SQL_SCHEMA_H
|
||||
|
||||
/* Schema version constant */
|
||||
#define EMBEDDED_SCHEMA_VERSION "3"
|
||||
|
||||
/* Embedded SQL schema as C string literal */
|
||||
static const char* const EMBEDDED_SCHEMA_SQL =
|
||||
"-- C Nostr Relay Database Schema\n\
|
||||
-- SQLite schema for storing Nostr events with JSON tags support\n\
|
||||
\n\
|
||||
-- Schema version tracking\n\
|
||||
PRAGMA user_version = 3;\n\
|
||||
\n\
|
||||
-- Enable foreign key support\n\
|
||||
PRAGMA foreign_keys = ON;\n\
|
||||
\n\
|
||||
-- Optimize for performance\n\
|
||||
PRAGMA journal_mode = WAL;\n\
|
||||
PRAGMA synchronous = NORMAL;\n\
|
||||
PRAGMA cache_size = 10000;\n\
|
||||
\n\
|
||||
-- Core events table with hybrid single-table design\n\
|
||||
CREATE TABLE events (\n\
|
||||
id TEXT PRIMARY KEY, -- Nostr event ID (hex string)\n\
|
||||
pubkey TEXT NOT NULL, -- Public key of event author (hex string)\n\
|
||||
created_at INTEGER NOT NULL, -- Event creation timestamp (Unix timestamp)\n\
|
||||
kind INTEGER NOT NULL, -- Event kind (0-65535)\n\
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),\n\
|
||||
content TEXT NOT NULL, -- Event content (text content only)\n\
|
||||
sig TEXT NOT NULL, -- Event signature (hex string)\n\
|
||||
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array\n\
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Core performance indexes\n\
|
||||
CREATE INDEX idx_events_pubkey ON events(pubkey);\n\
|
||||
CREATE INDEX idx_events_kind ON events(kind);\n\
|
||||
CREATE INDEX idx_events_created_at ON events(created_at DESC);\n\
|
||||
CREATE INDEX idx_events_event_type ON events(event_type);\n\
|
||||
\n\
|
||||
-- Composite indexes for common query patterns\n\
|
||||
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);\n\
|
||||
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);\n\
|
||||
CREATE INDEX idx_events_pubkey_kind ON events(pubkey, kind);\n\
|
||||
\n\
|
||||
-- Schema information table\n\
|
||||
CREATE TABLE schema_info (\n\
|
||||
key TEXT PRIMARY KEY,\n\
|
||||
value TEXT NOT NULL,\n\
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Insert schema metadata\n\
|
||||
INSERT INTO schema_info (key, value) VALUES\n\
|
||||
('version', '3'),\n\
|
||||
('description', 'Hybrid single-table Nostr relay schema with JSON tags and configuration management'),\n\
|
||||
('created_at', strftime('%s', 'now'));\n\
|
||||
\n\
|
||||
-- Helper views for common queries\n\
|
||||
CREATE VIEW recent_events AS\n\
|
||||
SELECT id, pubkey, created_at, kind, event_type, content\n\
|
||||
FROM events\n\
|
||||
WHERE event_type != 'ephemeral'\n\
|
||||
ORDER BY created_at DESC\n\
|
||||
LIMIT 1000;\n\
|
||||
\n\
|
||||
CREATE VIEW event_stats AS\n\
|
||||
SELECT \n\
|
||||
event_type,\n\
|
||||
COUNT(*) as count,\n\
|
||||
AVG(length(content)) as avg_content_length,\n\
|
||||
MIN(created_at) as earliest,\n\
|
||||
MAX(created_at) as latest\n\
|
||||
FROM events\n\
|
||||
GROUP BY event_type;\n\
|
||||
\n\
|
||||
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour\n\
|
||||
CREATE TRIGGER cleanup_ephemeral_events\n\
|
||||
AFTER INSERT ON events\n\
|
||||
WHEN NEW.event_type = 'ephemeral'\n\
|
||||
BEGIN\n\
|
||||
DELETE FROM events \n\
|
||||
WHERE event_type = 'ephemeral' \n\
|
||||
AND first_seen < (strftime('%s', 'now') - 3600);\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Replaceable event handling trigger\n\
|
||||
CREATE TRIGGER handle_replaceable_events\n\
|
||||
AFTER INSERT ON events\n\
|
||||
WHEN NEW.event_type = 'replaceable'\n\
|
||||
BEGIN\n\
|
||||
DELETE FROM events \n\
|
||||
WHERE pubkey = NEW.pubkey \n\
|
||||
AND kind = NEW.kind \n\
|
||||
AND event_type = 'replaceable'\n\
|
||||
AND id != NEW.id;\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Persistent Subscriptions Logging Tables (Phase 2)\n\
|
||||
-- Optional database logging for subscription analytics and debugging\n\
|
||||
\n\
|
||||
-- Subscription events log\n\
|
||||
CREATE TABLE subscription_events (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
subscription_id TEXT NOT NULL, -- Subscription ID from client\n\
|
||||
client_ip TEXT NOT NULL, -- Client IP address\n\
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('created', 'closed', 'expired', 'disconnected')),\n\
|
||||
filter_json TEXT, -- JSON representation of filters (for created events)\n\
|
||||
events_sent INTEGER DEFAULT 0, -- Number of events sent to this subscription\n\
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
ended_at INTEGER, -- When subscription ended (for closed/expired/disconnected)\n\
|
||||
duration INTEGER -- Computed: ended_at - created_at\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Subscription metrics summary\n\
|
||||
CREATE TABLE subscription_metrics (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
date TEXT NOT NULL, -- Date (YYYY-MM-DD)\n\
|
||||
total_created INTEGER DEFAULT 0, -- Total subscriptions created\n\
|
||||
total_closed INTEGER DEFAULT 0, -- Total subscriptions closed\n\
|
||||
total_events_broadcast INTEGER DEFAULT 0, -- Total events broadcast\n\
|
||||
avg_duration REAL DEFAULT 0, -- Average subscription duration\n\
|
||||
peak_concurrent INTEGER DEFAULT 0, -- Peak concurrent subscriptions\n\
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
UNIQUE(date)\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Event broadcasting log (optional, for detailed analytics)\n\
|
||||
CREATE TABLE event_broadcasts (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
event_id TEXT NOT NULL, -- Event ID that was broadcast\n\
|
||||
subscription_id TEXT NOT NULL, -- Subscription that received it\n\
|
||||
client_ip TEXT NOT NULL, -- Client IP\n\
|
||||
broadcast_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
FOREIGN KEY (event_id) REFERENCES events(id)\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Indexes for subscription logging performance\n\
|
||||
CREATE INDEX idx_subscription_events_id ON subscription_events(subscription_id);\n\
|
||||
CREATE INDEX idx_subscription_events_type ON subscription_events(event_type);\n\
|
||||
CREATE INDEX idx_subscription_events_created ON subscription_events(created_at DESC);\n\
|
||||
CREATE INDEX idx_subscription_events_client ON subscription_events(client_ip);\n\
|
||||
\n\
|
||||
CREATE INDEX idx_subscription_metrics_date ON subscription_metrics(date DESC);\n\
|
||||
\n\
|
||||
CREATE INDEX idx_event_broadcasts_event ON event_broadcasts(event_id);\n\
|
||||
CREATE INDEX idx_event_broadcasts_sub ON event_broadcasts(subscription_id);\n\
|
||||
CREATE INDEX idx_event_broadcasts_time ON event_broadcasts(broadcast_at DESC);\n\
|
||||
\n\
|
||||
-- Trigger to update subscription duration when ended\n\
|
||||
CREATE TRIGGER update_subscription_duration\n\
|
||||
AFTER UPDATE OF ended_at ON subscription_events\n\
|
||||
WHEN NEW.ended_at IS NOT NULL AND OLD.ended_at IS NULL\n\
|
||||
BEGIN\n\
|
||||
UPDATE subscription_events\n\
|
||||
SET duration = NEW.ended_at - NEW.created_at\n\
|
||||
WHERE id = NEW.id;\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- View for subscription analytics\n\
|
||||
CREATE VIEW subscription_analytics AS\n\
|
||||
SELECT\n\
|
||||
date(created_at, 'unixepoch') as date,\n\
|
||||
COUNT(*) as subscriptions_created,\n\
|
||||
COUNT(CASE WHEN ended_at IS NOT NULL THEN 1 END) as subscriptions_ended,\n\
|
||||
AVG(CASE WHEN duration IS NOT NULL THEN duration END) as avg_duration_seconds,\n\
|
||||
MAX(events_sent) as max_events_sent,\n\
|
||||
AVG(events_sent) as avg_events_sent,\n\
|
||||
COUNT(DISTINCT client_ip) as unique_clients\n\
|
||||
FROM subscription_events\n\
|
||||
GROUP BY date(created_at, 'unixepoch')\n\
|
||||
ORDER BY date DESC;\n\
|
||||
\n\
|
||||
-- View for current active subscriptions (from log perspective)\n\
|
||||
CREATE VIEW active_subscriptions_log AS\n\
|
||||
SELECT\n\
|
||||
subscription_id,\n\
|
||||
client_ip,\n\
|
||||
filter_json,\n\
|
||||
events_sent,\n\
|
||||
created_at,\n\
|
||||
(strftime('%s', 'now') - created_at) as duration_seconds\n\
|
||||
FROM subscription_events\n\
|
||||
WHERE event_type = 'created'\n\
|
||||
AND subscription_id NOT IN (\n\
|
||||
SELECT subscription_id FROM subscription_events\n\
|
||||
WHERE event_type IN ('closed', 'expired', 'disconnected')\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- ================================\n\
|
||||
-- CONFIGURATION MANAGEMENT TABLES\n\
|
||||
-- ================================\n\
|
||||
\n\
|
||||
-- Core server configuration table\n\
|
||||
CREATE TABLE config (\n\
|
||||
key TEXT PRIMARY KEY, -- Configuration key (unique identifier)\n\
|
||||
value TEXT NOT NULL, -- Configuration value (stored as string)\n\
|
||||
description TEXT, -- Human-readable description\n\
|
||||
config_type TEXT DEFAULT 'user' CHECK (config_type IN ('system', 'user', 'runtime')),\n\
|
||||
data_type TEXT DEFAULT 'string' CHECK (data_type IN ('string', 'integer', 'boolean', 'json')),\n\
|
||||
validation_rules TEXT, -- JSON validation rules (optional)\n\
|
||||
is_sensitive INTEGER DEFAULT 0, -- 1 if value should be masked in logs\n\
|
||||
requires_restart INTEGER DEFAULT 0, -- 1 if change requires server restart\n\
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Configuration change history table\n\
|
||||
CREATE TABLE config_history (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
config_key TEXT NOT NULL, -- Key that was changed\n\
|
||||
old_value TEXT, -- Previous value (NULL for new keys)\n\
|
||||
new_value TEXT NOT NULL, -- New value\n\
|
||||
changed_by TEXT DEFAULT 'system', -- Who made the change (system/admin/user)\n\
|
||||
change_reason TEXT, -- Optional reason for change\n\
|
||||
changed_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
FOREIGN KEY (config_key) REFERENCES config(key)\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Configuration validation errors log\n\
|
||||
CREATE TABLE config_validation_log (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
config_key TEXT NOT NULL,\n\
|
||||
attempted_value TEXT,\n\
|
||||
validation_error TEXT NOT NULL,\n\
|
||||
error_source TEXT DEFAULT 'validation', -- validation/parsing/constraint\n\
|
||||
attempted_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Cache for file-based configuration events\n\
|
||||
CREATE TABLE config_file_cache (\n\
|
||||
file_path TEXT PRIMARY KEY, -- Full path to config file\n\
|
||||
file_hash TEXT NOT NULL, -- SHA256 hash of file content\n\
|
||||
event_id TEXT, -- Nostr event ID from file\n\
|
||||
event_pubkey TEXT, -- Admin pubkey that signed event\n\
|
||||
loaded_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
validation_status TEXT CHECK (validation_status IN ('valid', 'invalid', 'unverified')),\n\
|
||||
validation_error TEXT -- Error details if invalid\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Performance indexes for configuration tables\n\
|
||||
CREATE INDEX idx_config_type ON config(config_type);\n\
|
||||
CREATE INDEX idx_config_updated ON config(updated_at DESC);\n\
|
||||
CREATE INDEX idx_config_history_key ON config_history(config_key);\n\
|
||||
CREATE INDEX idx_config_history_time ON config_history(changed_at DESC);\n\
|
||||
CREATE INDEX idx_config_validation_key ON config_validation_log(config_key);\n\
|
||||
CREATE INDEX idx_config_validation_time ON config_validation_log(attempted_at DESC);\n\
|
||||
\n\
|
||||
-- Trigger to update timestamp on configuration changes\n\
|
||||
CREATE TRIGGER update_config_timestamp\n\
|
||||
AFTER UPDATE ON config\n\
|
||||
BEGIN\n\
|
||||
UPDATE config SET updated_at = strftime('%s', 'now') WHERE key = NEW.key;\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Trigger to log configuration changes to history\n\
|
||||
CREATE TRIGGER log_config_changes\n\
|
||||
AFTER UPDATE ON config\n\
|
||||
WHEN OLD.value != NEW.value\n\
|
||||
BEGIN\n\
|
||||
INSERT INTO config_history (config_key, old_value, new_value, changed_by, change_reason)\n\
|
||||
VALUES (NEW.key, OLD.value, NEW.value, 'system', 'configuration update');\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Active Configuration View\n\
|
||||
CREATE VIEW active_config AS\n\
|
||||
SELECT\n\
|
||||
key,\n\
|
||||
value,\n\
|
||||
description,\n\
|
||||
config_type,\n\
|
||||
data_type,\n\
|
||||
requires_restart,\n\
|
||||
updated_at\n\
|
||||
FROM config\n\
|
||||
WHERE config_type IN ('system', 'user')\n\
|
||||
ORDER BY config_type, key;\n\
|
||||
\n\
|
||||
-- Runtime Statistics View\n\
|
||||
CREATE VIEW runtime_stats AS\n\
|
||||
SELECT\n\
|
||||
key,\n\
|
||||
value,\n\
|
||||
description,\n\
|
||||
updated_at\n\
|
||||
FROM config\n\
|
||||
WHERE config_type = 'runtime'\n\
|
||||
ORDER BY key;\n\
|
||||
\n\
|
||||
-- Configuration Change Summary\n\
|
||||
CREATE VIEW recent_config_changes AS\n\
|
||||
SELECT\n\
|
||||
ch.config_key,\n\
|
||||
sc.description,\n\
|
||||
ch.old_value,\n\
|
||||
ch.new_value,\n\
|
||||
ch.changed_by,\n\
|
||||
ch.change_reason,\n\
|
||||
ch.changed_at\n\
|
||||
FROM config_history ch\n\
|
||||
JOIN config sc ON ch.config_key = sc.key\n\
|
||||
ORDER BY ch.changed_at DESC\n\
|
||||
LIMIT 50;\n\
|
||||
\n\
|
||||
-- Runtime Statistics (initialized by server on startup)\n\
|
||||
-- These will be populated when configuration system initializes";
|
||||
|
||||
#endif /* SQL_SCHEMA_H */
|
||||
217
systemd/README.md
Normal file
217
systemd/README.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# C-Relay Systemd Service
|
||||
|
||||
This directory contains files for running C-Relay as a Linux systemd service.
|
||||
|
||||
## Files
|
||||
|
||||
- **`c-relay.service`** - Systemd service unit file
|
||||
- **`install-systemd.sh`** - Installation script (run as root)
|
||||
- **`uninstall-systemd.sh`** - Uninstallation script (run as root)
|
||||
- **`README.md`** - This documentation file
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Build the relay
|
||||
```bash
|
||||
# From the project root directory
|
||||
make
|
||||
```
|
||||
|
||||
### 2. Install as systemd service
|
||||
```bash
|
||||
# Run the installation script as root
|
||||
sudo ./systemd/install-systemd.sh
|
||||
```
|
||||
|
||||
### 3. Start the service
|
||||
```bash
|
||||
sudo systemctl start c-relay
|
||||
```
|
||||
|
||||
### 4. Check status
|
||||
```bash
|
||||
sudo systemctl status c-relay
|
||||
```
|
||||
|
||||
## Service Details
|
||||
|
||||
### Installation Location
|
||||
- **Binary**: `/opt/c-relay/c_relay_x86`
|
||||
- **Database**: `/opt/c-relay/db/`
|
||||
- **Service File**: `/etc/systemd/system/c-relay.service`
|
||||
|
||||
### User Account
|
||||
- **User**: `c-relay` (system user, no shell access)
|
||||
- **Group**: `c-relay`
|
||||
- **Home Directory**: `/opt/c-relay`
|
||||
|
||||
### Network Configuration
|
||||
- **Default Port**: 8888
|
||||
- **Default Host**: 127.0.0.1 (localhost only)
|
||||
- **WebSocket Endpoint**: `ws://127.0.0.1:8888`
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
Edit `/etc/systemd/system/c-relay.service` to configure:
|
||||
|
||||
```ini
|
||||
Environment=C_RELAY_CONFIG_PRIVKEY=your_private_key_here
|
||||
Environment=C_RELAY_PORT=8888
|
||||
Environment=C_RELAY_HOST=0.0.0.0
|
||||
```
|
||||
|
||||
After editing, reload and restart:
|
||||
```bash
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart c-relay
|
||||
```
|
||||
|
||||
### Security Settings
|
||||
The service runs with enhanced security:
|
||||
- Runs as unprivileged `c-relay` user
|
||||
- No new privileges allowed
|
||||
- Protected system directories
|
||||
- Private temporary directory
|
||||
- Limited file access (only `/opt/c-relay/db` writable)
|
||||
- Network restrictions to IPv4/IPv6 only
|
||||
|
||||
## Service Management
|
||||
|
||||
### Basic Commands
|
||||
```bash
|
||||
# Start service
|
||||
sudo systemctl start c-relay
|
||||
|
||||
# Stop service
|
||||
sudo systemctl stop c-relay
|
||||
|
||||
# Restart service
|
||||
sudo systemctl restart c-relay
|
||||
|
||||
# Enable auto-start on boot
|
||||
sudo systemctl enable c-relay
|
||||
|
||||
# Disable auto-start on boot
|
||||
sudo systemctl disable c-relay
|
||||
|
||||
# Check service status
|
||||
sudo systemctl status c-relay
|
||||
|
||||
# View logs (live)
|
||||
sudo journalctl -u c-relay -f
|
||||
|
||||
# View logs (last 100 lines)
|
||||
sudo journalctl -u c-relay -n 100
|
||||
```
|
||||
|
||||
### Log Management
|
||||
Logs are handled by systemd's journal:
|
||||
```bash
|
||||
# View all logs
|
||||
sudo journalctl -u c-relay
|
||||
|
||||
# View logs from today
|
||||
sudo journalctl -u c-relay --since today
|
||||
|
||||
# View logs with timestamps
|
||||
sudo journalctl -u c-relay --since "1 hour ago" --no-pager
|
||||
```
|
||||
|
||||
## Database Management
|
||||
|
||||
The database is automatically created on first run. Location: `/opt/c-relay/db/c_nostr_relay.db`
|
||||
|
||||
### Backup Database
|
||||
```bash
|
||||
sudo cp /opt/c-relay/db/c_nostr_relay.db /opt/c-relay/db/backup-$(date +%Y%m%d).db
|
||||
```
|
||||
|
||||
### Reset Database
|
||||
```bash
|
||||
sudo systemctl stop c-relay
|
||||
sudo rm /opt/c-relay/db/c_nostr_relay.db*
|
||||
sudo systemctl start c-relay
|
||||
```
|
||||
|
||||
## Updating the Service
|
||||
|
||||
### Update Binary
|
||||
1. Build new version: `make`
|
||||
2. Stop service: `sudo systemctl stop c-relay`
|
||||
3. Replace binary: `sudo cp build/c_relay_x86 /opt/c-relay/`
|
||||
4. Set permissions: `sudo chown c-relay:c-relay /opt/c-relay/c_relay_x86`
|
||||
5. Start service: `sudo systemctl start c-relay`
|
||||
|
||||
### Update Service File
|
||||
1. Stop service: `sudo systemctl stop c-relay`
|
||||
2. Copy new service file: `sudo cp systemd/c-relay.service /etc/systemd/system/`
|
||||
3. Reload systemd: `sudo systemctl daemon-reload`
|
||||
4. Start service: `sudo systemctl start c-relay`
|
||||
|
||||
## Uninstallation
|
||||
|
||||
Run the uninstall script to completely remove the service:
|
||||
```bash
|
||||
sudo ./systemd/uninstall-systemd.sh
|
||||
```
|
||||
|
||||
This will:
|
||||
- Stop and disable the service
|
||||
- Remove the systemd service file
|
||||
- Optionally remove the installation directory
|
||||
- Optionally remove the `c-relay` user account
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Service Won't Start
|
||||
```bash
|
||||
# Check detailed status
|
||||
sudo systemctl status c-relay -l
|
||||
|
||||
# Check logs for errors
|
||||
sudo journalctl -u c-relay --no-pager -l
|
||||
```
|
||||
|
||||
### Permission Issues
|
||||
```bash
|
||||
# Fix ownership of installation directory
|
||||
sudo chown -R c-relay:c-relay /opt/c-relay
|
||||
|
||||
# Ensure binary is executable
|
||||
sudo chmod +x /opt/c-relay/c_relay_x86
|
||||
```
|
||||
|
||||
### Port Already in Use
|
||||
```bash
|
||||
# Check what's using port 8888
|
||||
sudo netstat -tulpn | grep :8888
|
||||
|
||||
# Or with ss command
|
||||
sudo ss -tulpn | grep :8888
|
||||
```
|
||||
|
||||
### Database Issues
|
||||
```bash
|
||||
# Check database file permissions
|
||||
ls -la /opt/c-relay/db/
|
||||
|
||||
# Check database integrity
|
||||
sudo -u c-relay sqlite3 /opt/c-relay/db/c_nostr_relay.db "PRAGMA integrity_check;"
|
||||
```
|
||||
|
||||
## Custom Configuration
|
||||
|
||||
For advanced configurations, you can:
|
||||
1. Modify the service file for different ports or settings
|
||||
2. Use environment files: `/etc/systemd/system/c-relay.service.d/override.conf`
|
||||
3. Configure log rotation with journald settings
|
||||
4. Set up reverse proxy (nginx/apache) for HTTPS support
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- The service runs as a non-root user with minimal privileges
|
||||
- Database directory is only writable by the c-relay user
|
||||
- Consider firewall rules for the relay port
|
||||
- For internet-facing relays, use reverse proxy with SSL/TLS
|
||||
- Monitor logs for suspicious activity
|
||||
43
systemd/c-relay.service
Normal file
43
systemd/c-relay.service
Normal file
@@ -0,0 +1,43 @@
|
||||
[Unit]
|
||||
Description=C Nostr Relay Server
|
||||
Documentation=https://github.com/your-repo/c-relay
|
||||
After=network.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=c-relay
|
||||
Group=c-relay
|
||||
WorkingDirectory=/opt/c-relay
|
||||
ExecStart=/opt/c-relay/c_relay_x86
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
SyslogIdentifier=c-relay
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/opt/c-relay/db
|
||||
PrivateTmp=true
|
||||
ProtectKernelTunables=true
|
||||
ProtectKernelModules=true
|
||||
ProtectControlGroups=true
|
||||
|
||||
# Network security
|
||||
PrivateNetwork=false
|
||||
RestrictAddressFamilies=AF_INET AF_INET6
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
|
||||
# Environment variables (optional)
|
||||
Environment=C_RELAY_CONFIG_PRIVKEY=
|
||||
Environment=C_RELAY_PORT=8888
|
||||
Environment=C_RELAY_HOST=127.0.0.1
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
92
systemd/install-systemd.sh
Executable file
92
systemd/install-systemd.sh
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
|
||||
# C-Relay Systemd Service Installation Script
|
||||
# This script installs the C-Relay as a systemd service
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
INSTALL_DIR="/opt/c-relay"
|
||||
SERVICE_NAME="c-relay"
|
||||
SERVICE_FILE="c-relay.service"
|
||||
BINARY_NAME="c_relay_x86"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${GREEN}=== C-Relay Systemd Service Installation ===${NC}"
|
||||
|
||||
# Check if running as root
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo -e "${RED}Error: This script must be run as root${NC}"
|
||||
echo "Usage: sudo ./install-systemd.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if binary exists (script is in systemd/ subdirectory)
|
||||
if [ ! -f "../build/$BINARY_NAME" ]; then
|
||||
echo -e "${RED}Error: Binary ../build/$BINARY_NAME not found${NC}"
|
||||
echo "Please run 'make' from the project root directory first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if service file exists
|
||||
if [ ! -f "$SERVICE_FILE" ]; then
|
||||
echo -e "${RED}Error: Service file $SERVICE_FILE not found${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create c-relay user if it doesn't exist
|
||||
if ! id "c-relay" &>/dev/null; then
|
||||
echo -e "${YELLOW}Creating c-relay user...${NC}"
|
||||
useradd --system --shell /bin/false --home-dir $INSTALL_DIR --create-home c-relay
|
||||
else
|
||||
echo -e "${GREEN}User c-relay already exists${NC}"
|
||||
fi
|
||||
|
||||
# Create installation directory
|
||||
echo -e "${YELLOW}Creating installation directory...${NC}"
|
||||
mkdir -p $INSTALL_DIR
|
||||
mkdir -p $INSTALL_DIR/db
|
||||
|
||||
# Copy binary
|
||||
echo -e "${YELLOW}Installing binary...${NC}"
|
||||
cp ../build/$BINARY_NAME $INSTALL_DIR/
|
||||
chmod +x $INSTALL_DIR/$BINARY_NAME
|
||||
|
||||
# Set permissions
|
||||
echo -e "${YELLOW}Setting permissions...${NC}"
|
||||
chown -R c-relay:c-relay $INSTALL_DIR
|
||||
|
||||
# Install systemd service
|
||||
echo -e "${YELLOW}Installing systemd service...${NC}"
|
||||
cp $SERVICE_FILE /etc/systemd/system/
|
||||
systemctl daemon-reload
|
||||
|
||||
# Enable service
|
||||
echo -e "${YELLOW}Enabling service...${NC}"
|
||||
systemctl enable $SERVICE_NAME
|
||||
|
||||
echo -e "${GREEN}=== Installation Complete ===${NC}"
|
||||
echo
|
||||
echo -e "${GREEN}Next steps:${NC}"
|
||||
echo "1. Configure environment variables in /etc/systemd/system/$SERVICE_FILE if needed"
|
||||
echo "2. Start the service: sudo systemctl start $SERVICE_NAME"
|
||||
echo "3. Check status: sudo systemctl status $SERVICE_NAME"
|
||||
echo "4. View logs: sudo journalctl -u $SERVICE_NAME -f"
|
||||
echo
|
||||
echo -e "${GREEN}Service commands:${NC}"
|
||||
echo " Start: sudo systemctl start $SERVICE_NAME"
|
||||
echo " Stop: sudo systemctl stop $SERVICE_NAME"
|
||||
echo " Restart: sudo systemctl restart $SERVICE_NAME"
|
||||
echo " Status: sudo systemctl status $SERVICE_NAME"
|
||||
echo " Logs: sudo journalctl -u $SERVICE_NAME"
|
||||
echo
|
||||
echo -e "${GREEN}Installation directory: $INSTALL_DIR${NC}"
|
||||
echo -e "${GREEN}Service file: /etc/systemd/system/$SERVICE_FILE${NC}"
|
||||
echo
|
||||
echo -e "${YELLOW}Note: The relay will run on port 8888 by default${NC}"
|
||||
echo -e "${YELLOW}Database will be created automatically in $INSTALL_DIR/db/${NC}"
|
||||
86
systemd/uninstall-systemd.sh
Executable file
86
systemd/uninstall-systemd.sh
Executable file
@@ -0,0 +1,86 @@
|
||||
#!/bin/bash
|
||||
|
||||
# C-Relay Systemd Service Uninstallation Script
|
||||
# This script removes the C-Relay systemd service
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
INSTALL_DIR="/opt/c-relay"
|
||||
SERVICE_NAME="c-relay"
|
||||
SERVICE_FILE="c-relay.service"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${GREEN}=== C-Relay Systemd Service Uninstallation ===${NC}"
|
||||
|
||||
# Check if running as root
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo -e "${RED}Error: This script must be run as root${NC}"
|
||||
echo "Usage: sudo ./uninstall-systemd.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Stop service if running
|
||||
echo -e "${YELLOW}Stopping service...${NC}"
|
||||
if systemctl is-active --quiet $SERVICE_NAME; then
|
||||
systemctl stop $SERVICE_NAME
|
||||
echo -e "${GREEN}Service stopped${NC}"
|
||||
else
|
||||
echo -e "${GREEN}Service was not running${NC}"
|
||||
fi
|
||||
|
||||
# Disable service if enabled
|
||||
echo -e "${YELLOW}Disabling service...${NC}"
|
||||
if systemctl is-enabled --quiet $SERVICE_NAME; then
|
||||
systemctl disable $SERVICE_NAME
|
||||
echo -e "${GREEN}Service disabled${NC}"
|
||||
else
|
||||
echo -e "${GREEN}Service was not enabled${NC}"
|
||||
fi
|
||||
|
||||
# Remove systemd service file
|
||||
echo -e "${YELLOW}Removing service file...${NC}"
|
||||
if [ -f "/etc/systemd/system/$SERVICE_FILE" ]; then
|
||||
rm /etc/systemd/system/$SERVICE_FILE
|
||||
systemctl daemon-reload
|
||||
echo -e "${GREEN}Service file removed${NC}"
|
||||
else
|
||||
echo -e "${GREEN}Service file was not found${NC}"
|
||||
fi
|
||||
|
||||
# Ask about removing installation directory
|
||||
echo
|
||||
echo -e "${YELLOW}Do you want to remove the installation directory $INSTALL_DIR? (y/N)${NC}"
|
||||
read -r response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
||||
echo -e "${YELLOW}Removing installation directory...${NC}"
|
||||
rm -rf $INSTALL_DIR
|
||||
echo -e "${GREEN}Installation directory removed${NC}"
|
||||
else
|
||||
echo -e "${GREEN}Installation directory preserved${NC}"
|
||||
fi
|
||||
|
||||
# Ask about removing c-relay user
|
||||
echo
|
||||
echo -e "${YELLOW}Do you want to remove the c-relay user? (y/N)${NC}"
|
||||
read -r response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
||||
echo -e "${YELLOW}Removing c-relay user...${NC}"
|
||||
if id "c-relay" &>/dev/null; then
|
||||
userdel c-relay
|
||||
echo -e "${GREEN}User c-relay removed${NC}"
|
||||
else
|
||||
echo -e "${GREEN}User c-relay was not found${NC}"
|
||||
fi
|
||||
else
|
||||
echo -e "${GREEN}User c-relay preserved${NC}"
|
||||
fi
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}=== Uninstallation Complete ===${NC}"
|
||||
echo -e "${GREEN}C-Relay systemd service has been removed${NC}"
|
||||
BIN
test_combined.db
Normal file
BIN
test_combined.db
Normal file
Binary file not shown.
BIN
test_db.db-shm
Normal file
BIN
test_db.db-shm
Normal file
Binary file not shown.
BIN
test_db.db-wal
Normal file
BIN
test_db.db-wal
Normal file
Binary file not shown.
432
tests/11_nip_information.sh
Executable file
432
tests/11_nip_information.sh
Executable file
@@ -0,0 +1,432 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP-11 Relay Information Document Test
|
||||
# Tests HTTP endpoint for relay information according to NIP-11 specification
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="http://127.0.0.1:8888"
|
||||
RELAY_WS_URL="ws://127.0.0.1:8888"
|
||||
|
||||
# Print functions
|
||||
print_header() {
|
||||
echo -e "${BLUE}${BOLD}=== $1 ===${RESET}"
|
||||
}
|
||||
|
||||
print_step() {
|
||||
echo -e "${YELLOW}[STEP]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}✓${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}✗${RESET} $1"
|
||||
}
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
# Test functions
|
||||
test_http_with_correct_header() {
|
||||
print_step "Testing HTTP request with correct Accept header"
|
||||
|
||||
local response=""
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
# Use curl to test with proper Accept header
|
||||
response=$(curl -s -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "200" ]]; then
|
||||
print_success "HTTP 200 OK received with correct Accept header"
|
||||
|
||||
# Validate JSON response
|
||||
if echo "$response" | jq . >/dev/null 2>&1; then
|
||||
print_success "Response is valid JSON"
|
||||
return 0
|
||||
else
|
||||
print_error "Response is not valid JSON"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "Expected HTTP 200, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_http_without_header() {
|
||||
print_step "Testing HTTP request without Accept header (should return 406)"
|
||||
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" "$RELAY_URL/" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "406" ]]; then
|
||||
print_success "HTTP 406 Not Acceptable received without proper Accept header"
|
||||
return 0
|
||||
else
|
||||
print_error "Expected HTTP 406, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_http_with_wrong_header() {
|
||||
print_step "Testing HTTP request with wrong Accept header (should return 406)"
|
||||
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" -H "Accept: application/json" "$RELAY_URL/" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "406" ]]; then
|
||||
print_success "HTTP 406 Not Acceptable received with wrong Accept header"
|
||||
return 0
|
||||
else
|
||||
print_error "Expected HTTP 406, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_cors_headers() {
|
||||
print_step "Testing CORS headers presence"
|
||||
|
||||
local headers=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
headers=$(curl -s -I -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local cors_origin_found=false
|
||||
local cors_headers_found=false
|
||||
local cors_methods_found=false
|
||||
|
||||
if echo "$headers" | grep -qi "access-control-allow-origin"; then
|
||||
cors_origin_found=true
|
||||
print_success "Access-Control-Allow-Origin header found"
|
||||
fi
|
||||
|
||||
if echo "$headers" | grep -qi "access-control-allow-headers"; then
|
||||
cors_headers_found=true
|
||||
print_success "Access-Control-Allow-Headers header found"
|
||||
fi
|
||||
|
||||
if echo "$headers" | grep -qi "access-control-allow-methods"; then
|
||||
cors_methods_found=true
|
||||
print_success "Access-Control-Allow-Methods header found"
|
||||
fi
|
||||
|
||||
if [[ "$cors_origin_found" == true && "$cors_headers_found" == true && "$cors_methods_found" == true ]]; then
|
||||
print_success "All required CORS headers present"
|
||||
return 0
|
||||
else
|
||||
print_error "Missing CORS headers"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_json_structure() {
|
||||
print_step "Testing NIP-11 JSON structure and required fields"
|
||||
|
||||
local response=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
response=$(curl -s -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "$response" ]]; then
|
||||
print_error "Empty response received"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate JSON structure using jq
|
||||
if ! echo "$response" | jq . >/dev/null 2>&1; then
|
||||
print_error "Response is not valid JSON"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "Valid JSON structure confirmed"
|
||||
|
||||
# Check for required fields
|
||||
local required_checks=0
|
||||
local total_checks=0
|
||||
|
||||
# Test name field
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.name' >/dev/null 2>&1; then
|
||||
local name=$(echo "$response" | jq -r '.name')
|
||||
print_success "Name field present: $name"
|
||||
((required_checks++))
|
||||
else
|
||||
print_warning "Name field missing (optional)"
|
||||
fi
|
||||
|
||||
# Test supported_nips field (required)
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.supported_nips' >/dev/null 2>&1; then
|
||||
local nips=$(echo "$response" | jq -r '.supported_nips | @json')
|
||||
print_success "Supported NIPs field present: $nips"
|
||||
((required_checks++))
|
||||
|
||||
# Verify NIP-11 is in the supported list
|
||||
if echo "$response" | jq -e '.supported_nips | contains([11])' >/dev/null 2>&1; then
|
||||
print_success "NIP-11 correctly listed in supported NIPs"
|
||||
else
|
||||
print_warning "NIP-11 not found in supported NIPs list"
|
||||
fi
|
||||
else
|
||||
print_error "Supported NIPs field missing (should be present)"
|
||||
fi
|
||||
|
||||
# Test software field
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.software' >/dev/null 2>&1; then
|
||||
local software=$(echo "$response" | jq -r '.software')
|
||||
print_success "Software field present: $software"
|
||||
((required_checks++))
|
||||
else
|
||||
print_warning "Software field missing (optional)"
|
||||
fi
|
||||
|
||||
# Test version field
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.version' >/dev/null 2>&1; then
|
||||
local version=$(echo "$response" | jq -r '.version')
|
||||
print_success "Version field present: $version"
|
||||
((required_checks++))
|
||||
else
|
||||
print_warning "Version field missing (optional)"
|
||||
fi
|
||||
|
||||
# Test limitation object
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.limitation' >/dev/null 2>&1; then
|
||||
print_success "Limitation object present"
|
||||
((required_checks++))
|
||||
|
||||
# Check some common limitation fields
|
||||
if echo "$response" | jq -e '.limitation.max_message_length' >/dev/null 2>&1; then
|
||||
local max_msg=$(echo "$response" | jq -r '.limitation.max_message_length')
|
||||
print_info " max_message_length: $max_msg"
|
||||
fi
|
||||
|
||||
if echo "$response" | jq -e '.limitation.max_subscriptions' >/dev/null 2>&1; then
|
||||
local max_subs=$(echo "$response" | jq -r '.limitation.max_subscriptions')
|
||||
print_info " max_subscriptions: $max_subs"
|
||||
fi
|
||||
else
|
||||
print_warning "Limitation object missing (recommended)"
|
||||
fi
|
||||
|
||||
# Test description field
|
||||
if echo "$response" | jq -e '.description' >/dev/null 2>&1; then
|
||||
local description=$(echo "$response" | jq -r '.description')
|
||||
print_success "Description field present: ${description:0:50}..."
|
||||
else
|
||||
print_warning "Description field missing (optional)"
|
||||
fi
|
||||
|
||||
print_info "JSON structure validation: $required_checks/$total_checks core fields present"
|
||||
return 0
|
||||
}
|
||||
|
||||
test_content_type_header() {
|
||||
print_step "Testing Content-Type header"
|
||||
|
||||
local headers=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
headers=$(curl -s -I -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if echo "$headers" | grep -qi "content-type.*application/nostr+json"; then
|
||||
print_success "Correct Content-Type header: application/nostr+json"
|
||||
return 0
|
||||
else
|
||||
print_warning "Content-Type header not exactly 'application/nostr+json'"
|
||||
echo "$headers" | grep -i "content-type" | head -1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_non_root_path() {
|
||||
print_step "Testing non-root path (should return 404)"
|
||||
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" -H "Accept: application/nostr+json" "$RELAY_URL/nonexistent" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "404" ]]; then
|
||||
print_success "HTTP 404 Not Found received for non-root path"
|
||||
return 0
|
||||
else
|
||||
print_error "Expected HTTP 404 for non-root path, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_websocket_still_works() {
|
||||
print_step "Testing that WebSocket functionality still works on same port"
|
||||
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_warning "websocat not available - skipping WebSocket test"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Try to connect to WebSocket and send a simple REQ
|
||||
local response=""
|
||||
response=$(echo '["REQ","test_ws_nip11",{}]' | timeout 3s websocat "$RELAY_WS_URL" 2>/dev/null || echo "Connection failed")
|
||||
|
||||
if [[ "$response" == *"Connection failed"* ]]; then
|
||||
print_error "WebSocket connection failed"
|
||||
return 1
|
||||
elif [[ "$response" == *"EOSE"* ]]; then
|
||||
print_success "WebSocket still functional - received EOSE response"
|
||||
return 0
|
||||
else
|
||||
print_warning "WebSocket response unclear, but connection succeeded"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Main test function
|
||||
run_nip11_tests() {
|
||||
print_header "NIP-11 Relay Information Document Tests"
|
||||
|
||||
# Check dependencies
|
||||
print_step "Checking dependencies..."
|
||||
if ! command -v curl &> /dev/null; then
|
||||
print_error "curl command not found - required for NIP-11 HTTP testing"
|
||||
return 1
|
||||
fi
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq command not found - required for JSON validation"
|
||||
return 1
|
||||
fi
|
||||
print_success "All dependencies found"
|
||||
|
||||
print_header "PHASE 1: Basic HTTP Functionality"
|
||||
|
||||
# Test 1: Correct Accept header
|
||||
if ! test_http_with_correct_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 2: Missing Accept header
|
||||
if ! test_http_without_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 3: Wrong Accept header
|
||||
if ! test_http_with_wrong_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 2: HTTP Headers Validation"
|
||||
|
||||
# Test 4: CORS headers
|
||||
if ! test_cors_headers; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 5: Content-Type header
|
||||
if ! test_content_type_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 3: JSON Structure Validation"
|
||||
|
||||
# Test 6: JSON structure and required fields
|
||||
if ! test_json_structure; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 4: Additional Endpoint Behavior"
|
||||
|
||||
# Test 7: Non-root paths
|
||||
if ! test_non_root_path; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 8: WebSocket compatibility
|
||||
if ! test_websocket_still_works; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 5: NIP-11 Compliance Summary"
|
||||
|
||||
# Final validation - get the actual response and display it
|
||||
print_step "Displaying complete NIP-11 response..."
|
||||
local response=""
|
||||
if command -v curl &> /dev/null; then
|
||||
response=$(curl -s -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
if [[ -n "$response" ]] && echo "$response" | jq . >/dev/null 2>&1; then
|
||||
echo "$response" | jq .
|
||||
else
|
||||
print_error "Failed to retrieve or parse final response"
|
||||
fi
|
||||
fi
|
||||
|
||||
print_success "All NIP-11 tests passed!"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main execution
|
||||
print_header "Starting NIP-11 Relay Information Document Test Suite"
|
||||
echo
|
||||
|
||||
if run_nip11_tests; then
|
||||
echo
|
||||
print_success "All NIP-11 tests completed successfully!"
|
||||
print_info "The C-Relay NIP-11 implementation is fully compliant"
|
||||
print_info "✅ HTTP endpoint, Accept header validation, CORS, and JSON structure all working"
|
||||
echo
|
||||
exit 0
|
||||
else
|
||||
echo
|
||||
print_error "Some NIP-11 tests failed"
|
||||
exit 1
|
||||
fi
|
||||
384
tests/13_nip_test.sh
Executable file
384
tests/13_nip_test.sh
Executable file
@@ -0,0 +1,384 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP-13 Proof of Work Validation Test Suite for C Nostr Relay
|
||||
# Tests PoW validation in the relay's event processing pipeline
|
||||
# Based on nostr_core_lib/tests/nip13_test.c
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="ws://127.0.0.1:8888"
|
||||
HTTP_URL="http://127.0.0.1:8888"
|
||||
TEST_COUNT=0
|
||||
PASSED_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
|
||||
# Test results tracking
|
||||
declare -a TEST_RESULTS=()
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}${BOLD}[SUCCESS]${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}${BOLD}[ERROR]${RESET} $1"
|
||||
}
|
||||
|
||||
print_test_header() {
|
||||
TEST_COUNT=$((TEST_COUNT + 1))
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST $TEST_COUNT: $1 ===${RESET}"
|
||||
}
|
||||
|
||||
record_test_result() {
|
||||
local test_name="$1"
|
||||
local result="$2"
|
||||
local details="$3"
|
||||
|
||||
TEST_RESULTS+=("$test_name|$result|$details")
|
||||
|
||||
if [ "$result" = "PASS" ]; then
|
||||
PASSED_COUNT=$((PASSED_COUNT + 1))
|
||||
print_success "PASS: $test_name"
|
||||
else
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
print_error "FAIL: $test_name"
|
||||
if [ -n "$details" ]; then
|
||||
echo " Details: $details"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if relay is running
|
||||
check_relay_running() {
|
||||
print_info "Checking if relay is running..."
|
||||
|
||||
if ! curl -s -H "Accept: application/nostr+json" "$HTTP_URL/" >/dev/null 2>&1; then
|
||||
print_error "Relay is not running or not accessible at $HTTP_URL"
|
||||
print_info "Please start the relay with: ./make_and_restart_relay.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Relay is running and accessible"
|
||||
}
|
||||
|
||||
# Test NIP-11 relay information includes NIP-13
|
||||
test_nip11_pow_support() {
|
||||
print_test_header "NIP-11 PoW Support Advertisement"
|
||||
|
||||
print_info "Fetching relay information..."
|
||||
RELAY_INFO=$(curl -s -H "Accept: application/nostr+json" "$HTTP_URL/")
|
||||
|
||||
echo "Relay Info Response:"
|
||||
echo "$RELAY_INFO" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Check if NIP-13 is in supported_nips
|
||||
if echo "$RELAY_INFO" | jq -e '.supported_nips | index(13)' >/dev/null 2>&1; then
|
||||
print_success "✓ NIP-13 found in supported_nips array"
|
||||
NIP13_SUPPORTED=true
|
||||
else
|
||||
print_error "✗ NIP-13 not found in supported_nips array"
|
||||
NIP13_SUPPORTED=false
|
||||
fi
|
||||
|
||||
# Check if min_pow_difficulty is present
|
||||
MIN_POW_DIFF=$(echo "$RELAY_INFO" | jq -r '.limitation.min_pow_difficulty // "missing"')
|
||||
if [ "$MIN_POW_DIFF" != "missing" ]; then
|
||||
print_success "✓ min_pow_difficulty found: $MIN_POW_DIFF"
|
||||
MIN_POW_PRESENT=true
|
||||
else
|
||||
print_error "✗ min_pow_difficulty not found in limitations"
|
||||
MIN_POW_PRESENT=false
|
||||
fi
|
||||
|
||||
if [ "$NIP13_SUPPORTED" = true ] && [ "$MIN_POW_PRESENT" = true ]; then
|
||||
record_test_result "NIP-11 PoW Support Advertisement" "PASS" "NIP-13 supported, min_pow_difficulty=$MIN_POW_DIFF"
|
||||
return 0
|
||||
else
|
||||
record_test_result "NIP-11 PoW Support Advertisement" "FAIL" "Missing NIP-13 support or min_pow_difficulty"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event submission without PoW (should be accepted when min_difficulty=0)
|
||||
test_event_without_pow() {
|
||||
print_test_header "Event Submission Without PoW (min_difficulty=0)"
|
||||
|
||||
# Create a simple event without PoW
|
||||
print_info "Generating test event without PoW..."
|
||||
|
||||
# Use nak to generate a simple event
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak command not found - skipping PoW generation tests"
|
||||
record_test_result "Event Submission Without PoW" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Generate event without PoW using direct private key
|
||||
PRIVATE_KEY="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
EVENT_JSON=$(nak event --sec "$PRIVATE_KEY" -c "Test event without PoW" --ts $(date +%s))
|
||||
|
||||
print_info "Generated event:"
|
||||
echo "$EVENT_JSON" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Send event to relay via WebSocket using websocat
|
||||
print_info "Sending event to relay..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
EVENT_MESSAGE="[\"EVENT\",$EVENT_JSON]"
|
||||
|
||||
# Send to relay and capture response
|
||||
if command -v websocat &> /dev/null; then
|
||||
RESPONSE=$(echo "$EVENT_MESSAGE" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $RESPONSE"
|
||||
|
||||
if [[ "$RESPONSE" == *"Connection failed"* ]]; then
|
||||
print_error "✗ Failed to connect to relay"
|
||||
record_test_result "Event Submission Without PoW" "FAIL" "Connection failed"
|
||||
return 1
|
||||
elif [[ "$RESPONSE" == *"true"* ]]; then
|
||||
print_success "✓ Event without PoW accepted (expected when min_difficulty=0)"
|
||||
record_test_result "Event Submission Without PoW" "PASS" "Event accepted as expected"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ Event without PoW rejected (unexpected when min_difficulty=0)"
|
||||
record_test_result "Event Submission Without PoW" "FAIL" "Event rejected: $RESPONSE"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
record_test_result "Event Submission Without PoW" "SKIP" "websocat not available"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with valid PoW
|
||||
test_event_with_pow() {
|
||||
print_test_header "Event Submission With Valid PoW"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak command not found - skipping PoW validation tests"
|
||||
record_test_result "Event Submission With Valid PoW" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Generating event with PoW difficulty 8..."
|
||||
|
||||
# Generate event with PoW (difficulty 8 for reasonable test time) using direct private key
|
||||
PRIVATE_KEY="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
POW_EVENT_JSON=$(nak event --sec "$PRIVATE_KEY" -c "Test event with PoW difficulty 8" --pow 8 --ts $(date +%s))
|
||||
|
||||
if [ -z "$POW_EVENT_JSON" ]; then
|
||||
print_error "Failed to generate PoW event"
|
||||
record_test_result "Event Submission With Valid PoW" "FAIL" "PoW event generation failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated PoW event:"
|
||||
echo "$POW_EVENT_JSON" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Extract nonce info for verification
|
||||
NONCE_TAG=$(echo "$POW_EVENT_JSON" | jq -r '.tags[] | select(.[0] == "nonce") | .[1]' 2>/dev/null || echo "")
|
||||
TARGET_DIFF=$(echo "$POW_EVENT_JSON" | jq -r '.tags[] | select(.[0] == "nonce") | .[2]' 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$NONCE_TAG" ] && [ -n "$TARGET_DIFF" ]; then
|
||||
print_info "PoW details: nonce=$NONCE_TAG, target_difficulty=$TARGET_DIFF"
|
||||
fi
|
||||
|
||||
# Send event to relay via WebSocket using websocat
|
||||
print_info "Sending PoW event to relay..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
POW_EVENT_MESSAGE="[\"EVENT\",$POW_EVENT_JSON]"
|
||||
|
||||
# Send to relay and capture response
|
||||
if command -v websocat &> /dev/null; then
|
||||
RESPONSE=$(echo "$POW_EVENT_MESSAGE" | timeout 10s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $RESPONSE"
|
||||
|
||||
if [[ "$RESPONSE" == *"Connection failed"* ]]; then
|
||||
print_error "✗ Failed to connect to relay"
|
||||
record_test_result "Event Submission With Valid PoW" "FAIL" "Connection failed"
|
||||
return 1
|
||||
elif [[ "$RESPONSE" == *"true"* ]]; then
|
||||
print_success "✓ Event with valid PoW accepted"
|
||||
record_test_result "Event Submission With Valid PoW" "PASS" "PoW event accepted"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ Event with valid PoW rejected"
|
||||
record_test_result "Event Submission With Valid PoW" "FAIL" "PoW event rejected: $RESPONSE"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
record_test_result "Event Submission With Valid PoW" "SKIP" "websocat not available"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Test relay configuration with environment variables
|
||||
test_pow_configuration() {
|
||||
print_test_header "PoW Configuration Via Environment Variables"
|
||||
|
||||
print_info "Testing different PoW configurations requires relay restart"
|
||||
print_info "Current configuration from logs:"
|
||||
|
||||
if [ -f "relay.log" ]; then
|
||||
grep "PoW Configuration:" relay.log | tail -1
|
||||
else
|
||||
print_warning "No relay.log found"
|
||||
fi
|
||||
|
||||
# Test current configuration values
|
||||
RELAY_INFO=$(curl -s -H "Accept: application/nostr+json" "$HTTP_URL/")
|
||||
MIN_POW_DIFF=$(echo "$RELAY_INFO" | jq -r '.limitation.min_pow_difficulty')
|
||||
|
||||
print_info "Current min_pow_difficulty from NIP-11: $MIN_POW_DIFF"
|
||||
|
||||
# For now, just verify the configuration is readable
|
||||
if [ "$MIN_POW_DIFF" != "null" ] && [ "$MIN_POW_DIFF" != "missing" ]; then
|
||||
print_success "✓ PoW configuration is accessible via NIP-11"
|
||||
record_test_result "PoW Configuration Via Environment Variables" "PASS" "min_pow_difficulty=$MIN_POW_DIFF"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ PoW configuration not accessible"
|
||||
record_test_result "PoW Configuration Via Environment Variables" "FAIL" "Cannot read min_pow_difficulty"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test NIP-13 reference event validation
|
||||
test_nip13_reference_event() {
|
||||
print_test_header "NIP-13 Reference Event Validation"
|
||||
|
||||
# This is the official NIP-13 reference event
|
||||
NIP13_REF_EVENT='{"id":"000006d8c378af1779d2feebc7603a125d99eca0ccf1085959b307f64e5dd358","pubkey":"a48380f4cfcc1ad5378294fcac36439770f9c878dd880ffa94bb74ea54a6f243","created_at":1651794653,"kind":1,"tags":[["nonce","776797","20"]],"content":"It'\''s just me mining my own business","sig":"284622fc0a3f4f1303455d5175f7ba962a3300d136085b9566801bc2e0699de0c7e31e44c81fb40ad9049173742e904713c3594a1da0fc5d2382a25c11aba977"}'
|
||||
|
||||
print_info "Testing NIP-13 reference event from specification:"
|
||||
echo "$NIP13_REF_EVENT" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Send reference event to relay via WebSocket using websocat
|
||||
print_info "Sending NIP-13 reference event to relay..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
REF_EVENT_MESSAGE="[\"EVENT\",$NIP13_REF_EVENT]"
|
||||
|
||||
# Send to relay and capture response
|
||||
if command -v websocat &> /dev/null; then
|
||||
RESPONSE=$(echo "$REF_EVENT_MESSAGE" | timeout 10s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $RESPONSE"
|
||||
|
||||
if [[ "$RESPONSE" == *"Connection failed"* ]] || [[ -z "$RESPONSE" ]]; then
|
||||
print_error "✗ Failed to connect to relay or no response"
|
||||
record_test_result "NIP-13 Reference Event Validation" "FAIL" "Connection failed or timeout"
|
||||
return 1
|
||||
elif [[ "$RESPONSE" == *"true"* ]]; then
|
||||
print_success "✓ NIP-13 reference event accepted"
|
||||
record_test_result "NIP-13 Reference Event Validation" "PASS" "Reference event accepted"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ NIP-13 reference event rejected"
|
||||
record_test_result "NIP-13 Reference Event Validation" "FAIL" "Reference event rejected: $RESPONSE"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
record_test_result "NIP-13 Reference Event Validation" "SKIP" "websocat not available"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Print test summary
|
||||
print_test_summary() {
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST SUMMARY ===${RESET}"
|
||||
echo "Total tests run: $TEST_COUNT"
|
||||
echo -e "${GREEN}Passed: $PASSED_COUNT${RESET}"
|
||||
echo -e "${RED}Failed: $FAILED_COUNT${RESET}"
|
||||
|
||||
if [ $FAILED_COUNT -gt 0 ]; then
|
||||
echo ""
|
||||
echo -e "${RED}${BOLD}Failed tests:${RESET}"
|
||||
for result in "${TEST_RESULTS[@]}"; do
|
||||
IFS='|' read -r name status details <<< "$result"
|
||||
if [ "$status" = "FAIL" ]; then
|
||||
echo -e " ${RED}✗ $name${RESET}"
|
||||
if [ -n "$details" ]; then
|
||||
echo " $details"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [ $FAILED_COUNT -eq 0 ]; then
|
||||
echo -e "${GREEN}${BOLD}🎉 ALL TESTS PASSED!${RESET}"
|
||||
echo -e "${GREEN}✅ NIP-13 PoW validation is working correctly in the relay${RESET}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}${BOLD}❌ SOME TESTS FAILED${RESET}"
|
||||
echo "Please review the output above and check relay logs for more details."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main test execution
|
||||
main() {
|
||||
echo -e "${BOLD}=== NIP-13 Proof of Work Relay Test Suite ===${RESET}"
|
||||
echo "Testing NIP-13 PoW validation in the C Nostr Relay"
|
||||
echo "Relay URL: $RELAY_URL"
|
||||
echo ""
|
||||
|
||||
# Check prerequisites
|
||||
if ! command -v curl &> /dev/null; then
|
||||
print_error "curl is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_warning "websocat not found - WebSocket tests will be skipped"
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
check_relay_running
|
||||
test_nip11_pow_support
|
||||
test_event_without_pow
|
||||
test_event_with_pow
|
||||
test_pow_configuration
|
||||
test_nip13_reference_event
|
||||
|
||||
# Print summary
|
||||
print_test_summary
|
||||
exit $?
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -99,6 +99,47 @@ publish_event() {
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to publish invalid event and expect rejection
|
||||
publish_invalid_event() {
|
||||
local event_json="$1"
|
||||
local description="$2"
|
||||
local expected_error="$3"
|
||||
|
||||
print_info "Publishing invalid $description..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
local event_message="[\"EVENT\",$event_json]"
|
||||
|
||||
# Publish to relay
|
||||
local response=""
|
||||
if command -v websocat &> /dev/null; then
|
||||
response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check response - should contain "false" and error message
|
||||
if [[ "$response" == *"Connection failed"* ]]; then
|
||||
print_error "Failed to connect to relay for $description"
|
||||
return 1
|
||||
elif [[ "$response" == *"false"* ]]; then
|
||||
# Extract error message
|
||||
local error_msg=$(echo "$response" | grep -o '"[^"]*invalid[^"]*"' | head -1 | sed 's/"//g' 2>/dev/null || echo "rejected")
|
||||
print_success "$description correctly rejected: $error_msg"
|
||||
echo # Add blank line for readability
|
||||
return 0
|
||||
elif [[ "$response" == *"true"* ]]; then
|
||||
print_error "$description was incorrectly accepted (should have been rejected)"
|
||||
echo # Add blank line for readability
|
||||
return 1
|
||||
else
|
||||
print_warning "$description response unclear: $response"
|
||||
echo # Add blank line for readability
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test subscription with filters
|
||||
test_subscription() {
|
||||
local sub_id="$1"
|
||||
@@ -211,7 +252,41 @@ run_comprehensive_test() {
|
||||
# Brief pause to let events settle
|
||||
sleep 2
|
||||
|
||||
print_header "PHASE 2: Testing Subscriptions and Filters"
|
||||
print_header "PHASE 2: Testing Invalid Events (NIP-01 Validation)"
|
||||
|
||||
print_step "Testing various invalid events that should be rejected..."
|
||||
|
||||
# Test 1: Event with invalid JSON structure (malformed)
|
||||
local malformed_event='{"id":"invalid","pubkey":"invalid_pubkey","created_at":"not_a_number","kind":1,"tags":[],"content":"test"}'
|
||||
publish_invalid_event "$malformed_event" "malformed event with invalid created_at" "invalid"
|
||||
|
||||
# Test 2: Event with missing required fields
|
||||
local missing_field_event='{"id":"test123","pubkey":"valid_pubkey","kind":1,"tags":[],"content":"test"}'
|
||||
publish_invalid_event "$missing_field_event" "event missing created_at and sig" "invalid"
|
||||
|
||||
# Test 3: Event with invalid pubkey format (not hex)
|
||||
local invalid_pubkey_event='{"id":"abc123","pubkey":"not_valid_hex_pubkey","created_at":1234567890,"kind":1,"tags":[],"content":"test","sig":"fake_sig"}'
|
||||
publish_invalid_event "$invalid_pubkey_event" "event with invalid pubkey format" "invalid"
|
||||
|
||||
# Test 4: Event with invalid event ID format
|
||||
local invalid_id_event='{"id":"not_64_char_hex","pubkey":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","created_at":1234567890,"kind":1,"tags":[],"content":"test","sig":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}'
|
||||
publish_invalid_event "$invalid_id_event" "event with invalid ID format" "invalid"
|
||||
|
||||
# Test 5: Event with invalid signature
|
||||
local invalid_sig_event='{"id":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","pubkey":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","created_at":1234567890,"kind":1,"tags":[],"content":"test","sig":"invalid_signature_format"}'
|
||||
publish_invalid_event "$invalid_sig_event" "event with invalid signature format" "invalid"
|
||||
|
||||
# Test 6: Event with invalid kind (negative)
|
||||
local invalid_kind_event='{"id":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","pubkey":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","created_at":1234567890,"kind":-1,"tags":[],"content":"test","sig":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}'
|
||||
publish_invalid_event "$invalid_kind_event" "event with negative kind" "invalid"
|
||||
|
||||
# Test 7: Event with invalid tags format (not array)
|
||||
local invalid_tags_event='{"id":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","pubkey":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","created_at":1234567890,"kind":1,"tags":"not_an_array","content":"test","sig":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}'
|
||||
publish_invalid_event "$invalid_tags_event" "event with invalid tags format" "invalid"
|
||||
|
||||
print_success "Invalid event tests completed - all should have been rejected"
|
||||
|
||||
print_header "PHASE 3: Testing Subscriptions and Filters"
|
||||
|
||||
# Test subscription filters
|
||||
print_step "Testing various subscription filters..."
|
||||
@@ -240,7 +315,7 @@ run_comprehensive_test() {
|
||||
# Test 7: Limit results
|
||||
test_subscription "test_limit" '{"kinds":[1],"limit":1}' "Limited to 1 event" "1"
|
||||
|
||||
print_header "PHASE 3: Database Verification"
|
||||
print_header "PHASE 4: Database Verification"
|
||||
|
||||
# Check what's actually stored in the database
|
||||
print_step "Verifying database contents..."
|
||||
@@ -265,13 +340,14 @@ run_comprehensive_test() {
|
||||
}
|
||||
|
||||
# Run the comprehensive test
|
||||
print_header "Starting C-Relay Comprehensive Test Suite"
|
||||
print_header "Starting C-Relay Comprehensive Test Suite with NIP-01 Validation"
|
||||
echo
|
||||
|
||||
if run_comprehensive_test; then
|
||||
echo
|
||||
print_success "All tests completed successfully!"
|
||||
print_info "The C-Relay hybrid schema implementation is working correctly"
|
||||
print_info "The C-Relay with full NIP-01 validation is working correctly"
|
||||
print_info "✅ Event validation, signature verification, and error handling all working"
|
||||
echo
|
||||
exit 0
|
||||
else
|
||||
|
||||
539
tests/40_nip_test.sh
Executable file
539
tests/40_nip_test.sh
Executable file
@@ -0,0 +1,539 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP-40 Expiration Timestamp Test Suite for C Nostr Relay
|
||||
# Tests expiration timestamp handling in the relay's event processing pipeline
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="ws://127.0.0.1:8888"
|
||||
HTTP_URL="http://127.0.0.1:8888"
|
||||
TEST_COUNT=0
|
||||
PASSED_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
|
||||
# Test results tracking
|
||||
declare -a TEST_RESULTS=()
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}${BOLD}[SUCCESS]${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}${BOLD}[ERROR]${RESET} $1"
|
||||
}
|
||||
|
||||
print_test_header() {
|
||||
TEST_COUNT=$((TEST_COUNT + 1))
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST $TEST_COUNT: $1 ===${RESET}"
|
||||
}
|
||||
|
||||
record_test_result() {
|
||||
local test_name="$1"
|
||||
local result="$2"
|
||||
local details="$3"
|
||||
|
||||
TEST_RESULTS+=("$test_name|$result|$details")
|
||||
|
||||
if [ "$result" = "PASS" ]; then
|
||||
PASSED_COUNT=$((PASSED_COUNT + 1))
|
||||
print_success "PASS: $test_name"
|
||||
else
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
print_error "FAIL: $test_name"
|
||||
if [ -n "$details" ]; then
|
||||
echo " Details: $details"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if relay is running
|
||||
check_relay_running() {
|
||||
print_info "Checking if relay is running..."
|
||||
|
||||
if ! curl -s -H "Accept: application/nostr+json" "$HTTP_URL/" >/dev/null 2>&1; then
|
||||
print_error "Relay is not running or not accessible at $HTTP_URL"
|
||||
print_info "Please start the relay with: ./make_and_restart_relay.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Relay is running and accessible"
|
||||
}
|
||||
|
||||
# Test NIP-11 relay information includes NIP-40
|
||||
test_nip11_expiration_support() {
|
||||
print_test_header "NIP-11 Expiration Support Advertisement"
|
||||
|
||||
print_info "Fetching relay information..."
|
||||
RELAY_INFO=$(curl -s -H "Accept: application/nostr+json" "$HTTP_URL/")
|
||||
|
||||
echo "Relay Info Response:"
|
||||
echo "$RELAY_INFO" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Check if NIP-40 is in supported_nips
|
||||
if echo "$RELAY_INFO" | jq -e '.supported_nips | index(40)' >/dev/null 2>&1; then
|
||||
print_success "✓ NIP-40 found in supported_nips array"
|
||||
NIP40_SUPPORTED=true
|
||||
else
|
||||
print_error "✗ NIP-40 not found in supported_nips array"
|
||||
NIP40_SUPPORTED=false
|
||||
fi
|
||||
|
||||
if [ "$NIP40_SUPPORTED" = true ]; then
|
||||
record_test_result "NIP-11 Expiration Support Advertisement" "PASS" "NIP-40 advertised in relay info"
|
||||
return 0
|
||||
else
|
||||
record_test_result "NIP-11 Expiration Support Advertisement" "FAIL" "NIP-40 not advertised"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to create event with expiration tag
|
||||
create_event_with_expiration() {
|
||||
local content="$1"
|
||||
local expiration_timestamp="$2"
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create event with expiration tag
|
||||
nak event --sec "$private_key" -c "$content" -t "expiration=$expiration_timestamp" --ts $(date +%s)
|
||||
}
|
||||
|
||||
# Helper function to send event and check response
|
||||
send_event_and_check() {
|
||||
local event_json="$1"
|
||||
local expected_result="$2" # "accept" or "reject"
|
||||
local description="$3"
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create EVENT message
|
||||
local event_message="[\"EVENT\",$event_json]"
|
||||
|
||||
# Send to relay
|
||||
if command -v websocat &> /dev/null; then
|
||||
local response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $response"
|
||||
|
||||
if [[ "$response" == *"Connection failed"* ]]; then
|
||||
print_error "✗ Failed to connect to relay"
|
||||
return 1
|
||||
elif [[ "$expected_result" == "accept" && "$response" == *"true"* ]]; then
|
||||
print_success "✓ $description accepted as expected"
|
||||
return 0
|
||||
elif [[ "$expected_result" == "reject" && "$response" == *"false"* ]]; then
|
||||
print_success "✓ $description rejected as expected"
|
||||
return 0
|
||||
elif [[ "$expected_result" == "accept" && "$response" == *"false"* ]]; then
|
||||
print_error "✗ $description unexpectedly rejected: $response"
|
||||
return 1
|
||||
elif [[ "$expected_result" == "reject" && "$response" == *"true"* ]]; then
|
||||
print_error "✗ $description unexpectedly accepted: $response"
|
||||
return 1
|
||||
else
|
||||
print_warning "? Unclear response for $description: $response"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event without expiration tag
|
||||
test_event_without_expiration() {
|
||||
print_test_header "Event Submission Without Expiration Tag"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak command not found - skipping expiration tests"
|
||||
record_test_result "Event Submission Without Expiration Tag" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event without expiration tag..."
|
||||
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
local event_json=$(nak event --sec "$private_key" -c "Test event without expiration" --ts $(date +%s))
|
||||
|
||||
print_info "Generated event:"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
if send_event_and_check "$event_json" "accept" "Event without expiration tag"; then
|
||||
record_test_result "Event Submission Without Expiration Tag" "PASS" "Non-expiring event accepted"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission Without Expiration Tag" "FAIL" "Non-expiring event handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with future expiration (should be accepted)
|
||||
test_event_with_future_expiration() {
|
||||
print_test_header "Event Submission With Future Expiration"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Event Submission With Future Expiration" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event with future expiration (1 hour from now)..."
|
||||
|
||||
local future_timestamp=$(($(date +%s) + 3600)) # 1 hour from now
|
||||
local event_json=$(create_event_with_expiration "Test event expiring in 1 hour" "$future_timestamp")
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
record_test_result "Event Submission With Future Expiration" "FAIL" "Failed to create event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated event (expires at $future_timestamp):"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
if send_event_and_check "$event_json" "accept" "Event with future expiration"; then
|
||||
record_test_result "Event Submission With Future Expiration" "PASS" "Future-expiring event accepted"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission With Future Expiration" "FAIL" "Future-expiring event rejected"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with past expiration (should be rejected in strict mode)
|
||||
test_event_with_past_expiration() {
|
||||
print_test_header "Event Submission With Past Expiration"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Event Submission With Past Expiration" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event with past expiration (1 hour ago)..."
|
||||
|
||||
local past_timestamp=$(($(date +%s) - 3600)) # 1 hour ago
|
||||
local event_json=$(create_event_with_expiration "Test event expired 1 hour ago" "$past_timestamp")
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
record_test_result "Event Submission With Past Expiration" "FAIL" "Failed to create event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated event (expired at $past_timestamp):"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
# In strict mode (default), this should be rejected
|
||||
if send_event_and_check "$event_json" "reject" "Event with past expiration"; then
|
||||
record_test_result "Event Submission With Past Expiration" "PASS" "Expired event correctly rejected in strict mode"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission With Past Expiration" "FAIL" "Expired event handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with expiration within grace period
|
||||
test_event_within_grace_period() {
|
||||
print_test_header "Event Submission Within Grace Period"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Event Submission Within Grace Period" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event with expiration within grace period (2 minutes ago, grace period is 5 minutes)..."
|
||||
|
||||
local grace_timestamp=$(($(date +%s) - 120)) # 2 minutes ago (within 5 minute grace period)
|
||||
local event_json=$(create_event_with_expiration "Test event within grace period" "$grace_timestamp")
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
record_test_result "Event Submission Within Grace Period" "FAIL" "Failed to create event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated event (expired at $grace_timestamp, within grace period):"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Should be accepted due to grace period
|
||||
if send_event_and_check "$event_json" "accept" "Event within grace period"; then
|
||||
record_test_result "Event Submission Within Grace Period" "PASS" "Event within grace period accepted"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission Within Grace Period" "FAIL" "Grace period handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event filtering in subscriptions
|
||||
test_expiration_filtering_in_subscriptions() {
|
||||
print_test_header "Expiration Filtering in Subscriptions"
|
||||
|
||||
if ! command -v nak &> /dev/null || ! command -v websocat &> /dev/null; then
|
||||
record_test_result "Expiration Filtering in Subscriptions" "SKIP" "Required tools not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Setting up test events for subscription filtering..."
|
||||
|
||||
# First, create a few events with different expiration times
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
|
||||
# Event 1: No expiration (should be returned)
|
||||
local event1=$(nak event --sec "$private_key" -c "Event without expiration for filtering test" --ts $(date +%s))
|
||||
|
||||
# Event 2: Future expiration (should be returned)
|
||||
local future_timestamp=$(($(date +%s) + 1800)) # 30 minutes from now
|
||||
local event2=$(create_event_with_expiration "Event with future expiration for filtering test" "$future_timestamp")
|
||||
|
||||
# Event 3: Past expiration (should NOT be returned if filtering is enabled)
|
||||
local past_timestamp=$(($(date +%s) - 3600)) # 1 hour ago
|
||||
local event3=$(create_event_with_expiration "Event with past expiration for filtering test" "$past_timestamp")
|
||||
|
||||
print_info "Publishing test events..."
|
||||
|
||||
# Note: We expect event3 to be rejected on submission in strict mode,
|
||||
# so we'll create it with a slightly more recent expiration that might get through
|
||||
local recent_past=$(($(date +%s) - 600)) # 10 minutes ago (outside grace period)
|
||||
local event3_recent=$(create_event_with_expiration "Recently expired event for filtering test" "$recent_past")
|
||||
|
||||
# Try to submit all events (some may be rejected)
|
||||
echo "[\"EVENT\",$event1]" | timeout 3s websocat "$RELAY_URL" >/dev/null 2>&1 || true
|
||||
echo "[\"EVENT\",$event2]" | timeout 3s websocat "$RELAY_URL" >/dev/null 2>&1 || true
|
||||
echo "[\"EVENT\",$event3_recent]" | timeout 3s websocat "$RELAY_URL" >/dev/null 2>&1 || true
|
||||
|
||||
sleep 2 # Let events settle
|
||||
|
||||
print_info "Testing subscription filtering..."
|
||||
|
||||
# Create subscription for recent events
|
||||
local req_message='["REQ","filter_test",{"kinds":[1],"limit":10}]'
|
||||
local response=$(echo -e "$req_message\n[\"CLOSE\",\"filter_test\"]" | timeout 5s websocat "$RELAY_URL" 2>/dev/null || echo "")
|
||||
|
||||
print_info "Subscription response:"
|
||||
echo "$response"
|
||||
echo ""
|
||||
|
||||
# Count events that contain our test content
|
||||
local no_exp_count=0
|
||||
local future_exp_count=0
|
||||
local past_exp_count=0
|
||||
|
||||
if echo "$response" | grep -q "Event without expiration for filtering test"; then
|
||||
no_exp_count=1
|
||||
print_success "✓ Event without expiration found in subscription results"
|
||||
fi
|
||||
|
||||
if echo "$response" | grep -q "Event with future expiration for filtering test"; then
|
||||
future_exp_count=1
|
||||
print_success "✓ Event with future expiration found in subscription results"
|
||||
fi
|
||||
|
||||
if echo "$response" | grep -q "Recently expired event for filtering test"; then
|
||||
past_exp_count=1
|
||||
print_warning "✗ Recently expired event found in subscription results (should be filtered)"
|
||||
else
|
||||
print_success "✓ Recently expired event properly filtered from subscription results"
|
||||
fi
|
||||
|
||||
# Evaluate results
|
||||
local expected_events=$((no_exp_count + future_exp_count))
|
||||
if [ $expected_events -ge 1 ] && [ $past_exp_count -eq 0 ]; then
|
||||
record_test_result "Expiration Filtering in Subscriptions" "PASS" "Expired events properly filtered from subscriptions"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Expiration Filtering in Subscriptions" "FAIL" "Expiration filtering not working properly in subscriptions"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test malformed expiration tags
|
||||
test_malformed_expiration_tags() {
|
||||
print_test_header "Handling of Malformed Expiration Tags"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Handling of Malformed Expiration Tags" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Testing events with malformed expiration tags..."
|
||||
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
|
||||
# Test 1: Non-numeric expiration value
|
||||
local event1=$(nak event --sec "$private_key" -c "Event with non-numeric expiration" -t "expiration=not_a_number" --ts $(date +%s))
|
||||
|
||||
# Test 2: Empty expiration value
|
||||
local event2=$(nak event --sec "$private_key" -c "Event with empty expiration" -t "expiration=" --ts $(date +%s))
|
||||
|
||||
print_info "Testing non-numeric expiration value..."
|
||||
if send_event_and_check "$event1" "accept" "Event with non-numeric expiration (should be treated as no expiration)"; then
|
||||
print_success "✓ Non-numeric expiration handled gracefully"
|
||||
malformed_test1=true
|
||||
else
|
||||
malformed_test1=false
|
||||
fi
|
||||
|
||||
print_info "Testing empty expiration value..."
|
||||
if send_event_and_check "$event2" "accept" "Event with empty expiration (should be treated as no expiration)"; then
|
||||
print_success "✓ Empty expiration handled gracefully"
|
||||
malformed_test2=true
|
||||
else
|
||||
malformed_test2=false
|
||||
fi
|
||||
|
||||
if [ "$malformed_test1" = true ] && [ "$malformed_test2" = true ]; then
|
||||
record_test_result "Handling of Malformed Expiration Tags" "PASS" "Malformed expiration tags handled gracefully"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Handling of Malformed Expiration Tags" "FAIL" "Malformed expiration tag handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test configuration via environment variables
|
||||
test_expiration_configuration() {
|
||||
print_test_header "Expiration Configuration Via Environment Variables"
|
||||
|
||||
print_info "Testing expiration configuration from relay logs..."
|
||||
|
||||
if [ -f "relay.log" ]; then
|
||||
print_info "Current configuration from logs:"
|
||||
grep "Expiration Configuration:" relay.log | tail -1 || print_warning "No expiration configuration found in logs"
|
||||
else
|
||||
print_warning "No relay.log found"
|
||||
fi
|
||||
|
||||
# The relay should be running with default configuration
|
||||
print_info "Default configuration should be:"
|
||||
print_info " enabled=true"
|
||||
print_info " strict_mode=true (rejects expired events on submission)"
|
||||
print_info " filter_responses=true (filters expired events from responses)"
|
||||
print_info " grace_period=300 seconds (5 minutes)"
|
||||
|
||||
# Test current behavior matches expected default configuration
|
||||
print_info "Configuration test based on observed behavior:"
|
||||
|
||||
# Check if NIP-40 is advertised (indicates enabled=true)
|
||||
if curl -s -H "Accept: application/nostr+json" "$HTTP_URL/" | jq -e '.supported_nips | index(40)' >/dev/null 2>&1; then
|
||||
print_success "✓ NIP-40 support advertised (enabled=true)"
|
||||
config_test=true
|
||||
else
|
||||
print_error "✗ NIP-40 not advertised (may be disabled)"
|
||||
config_test=false
|
||||
fi
|
||||
|
||||
if [ "$config_test" = true ]; then
|
||||
record_test_result "Expiration Configuration Via Environment Variables" "PASS" "Expiration configuration is accessible and working"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Expiration Configuration Via Environment Variables" "FAIL" "Expiration configuration issues detected"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Print test summary
|
||||
print_test_summary() {
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST SUMMARY ===${RESET}"
|
||||
echo "Total tests run: $TEST_COUNT"
|
||||
echo -e "${GREEN}Passed: $PASSED_COUNT${RESET}"
|
||||
echo -e "${RED}Failed: $FAILED_COUNT${RESET}"
|
||||
|
||||
if [ $FAILED_COUNT -gt 0 ]; then
|
||||
echo ""
|
||||
echo -e "${RED}${BOLD}Failed tests:${RESET}"
|
||||
for result in "${TEST_RESULTS[@]}"; do
|
||||
IFS='|' read -r name status details <<< "$result"
|
||||
if [ "$status" = "FAIL" ]; then
|
||||
echo -e " ${RED}✗ $name${RESET}"
|
||||
if [ -n "$details" ]; then
|
||||
echo " $details"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [ $FAILED_COUNT -eq 0 ]; then
|
||||
echo -e "${GREEN}${BOLD}🎉 ALL TESTS PASSED!${RESET}"
|
||||
echo -e "${GREEN}✅ NIP-40 Expiration Timestamp support is working correctly in the relay${RESET}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}${BOLD}❌ SOME TESTS FAILED${RESET}"
|
||||
echo "Please review the output above and check relay logs for more details."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main test execution
|
||||
main() {
|
||||
echo -e "${BOLD}=== NIP-40 Expiration Timestamp Relay Test Suite ===${RESET}"
|
||||
echo "Testing NIP-40 Expiration Timestamp support in the C Nostr Relay"
|
||||
echo "Relay URL: $RELAY_URL"
|
||||
echo ""
|
||||
|
||||
# Check prerequisites
|
||||
if ! command -v curl &> /dev/null; then
|
||||
print_error "curl is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_warning "websocat not found - WebSocket tests will be skipped"
|
||||
fi
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak not found - Event generation tests will be skipped"
|
||||
print_info "Install with: go install github.com/fiatjaf/nak@latest"
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
check_relay_running
|
||||
test_nip11_expiration_support
|
||||
test_event_without_expiration
|
||||
test_event_with_future_expiration
|
||||
test_event_with_past_expiration
|
||||
test_event_within_grace_period
|
||||
test_expiration_filtering_in_subscriptions
|
||||
test_malformed_expiration_tags
|
||||
test_expiration_configuration
|
||||
|
||||
# Print summary
|
||||
print_test_summary
|
||||
exit $?
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
386
tests/9_nip_delete_test.sh
Executable file
386
tests/9_nip_delete_test.sh
Executable file
@@ -0,0 +1,386 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP-09 Event Deletion Request Test for C-Relay
|
||||
# Tests deletion request functionality - assumes relay is already running
|
||||
# Based on the pattern from 1_nip_test.sh
|
||||
|
||||
set -e
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="ws://127.0.0.1:8888"
|
||||
TEST_PRIVATE_KEY="nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99"
|
||||
|
||||
# Print functions
|
||||
print_header() {
|
||||
echo -e "${BLUE}${BOLD}=== $1 ===${RESET}"
|
||||
}
|
||||
|
||||
print_step() {
|
||||
echo -e "${YELLOW}[STEP]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}✓${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}✗${RESET} $1"
|
||||
}
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
# Helper function to publish event and extract ID
|
||||
publish_event() {
|
||||
local event_json="$1"
|
||||
local description="$2"
|
||||
|
||||
# Extract event ID
|
||||
local event_id=$(echo "$event_json" | jq -r '.id' 2>/dev/null)
|
||||
if [[ "$event_id" == "null" || -z "$event_id" ]]; then
|
||||
print_error "Could not extract event ID from $description"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Publishing $description..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
local event_message="[\"EVENT\",$event_json]"
|
||||
|
||||
# Publish to relay
|
||||
local response=""
|
||||
if command -v websocat &> /dev/null; then
|
||||
response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check response
|
||||
if [[ "$response" == *"Connection failed"* ]]; then
|
||||
print_error "Failed to connect to relay for $description"
|
||||
return 1
|
||||
elif [[ "$response" == *"true"* ]]; then
|
||||
print_success "$description uploaded (ID: ${event_id:0:16}...)"
|
||||
echo "$event_id"
|
||||
return 0
|
||||
else
|
||||
print_warning "$description might have failed: $response"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to publish deletion request
|
||||
publish_deletion_request() {
|
||||
local deletion_event_json="$1"
|
||||
local description="$2"
|
||||
|
||||
# Extract event ID
|
||||
local event_id=$(echo "$deletion_event_json" | jq -r '.id' 2>/dev/null)
|
||||
if [[ "$event_id" == "null" || -z "$event_id" ]]; then
|
||||
print_error "Could not extract event ID from $description"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Publishing $description..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
local event_message="[\"EVENT\",$deletion_event_json]"
|
||||
|
||||
# Publish to relay
|
||||
local response=""
|
||||
if command -v websocat &> /dev/null; then
|
||||
response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check response
|
||||
if [[ "$response" == *"Connection failed"* ]]; then
|
||||
print_error "Failed to connect to relay for $description"
|
||||
return 1
|
||||
elif [[ "$response" == *"true"* ]]; then
|
||||
print_success "$description accepted (ID: ${event_id:0:16}...)"
|
||||
echo "$event_id"
|
||||
return 0
|
||||
else
|
||||
print_warning "$description might have failed: $response"
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to check if event exists via subscription
|
||||
check_event_exists() {
|
||||
local event_id="$1"
|
||||
local sub_id="exists_$(date +%s%N | cut -c1-10)"
|
||||
|
||||
# Create REQ message to query for specific event ID
|
||||
local req_message="[\"REQ\",\"$sub_id\",{\"ids\":[\"$event_id\"]}]"
|
||||
|
||||
# Send subscription and collect events
|
||||
local response=""
|
||||
if command -v websocat &> /dev/null; then
|
||||
response=$(echo -e "$req_message\n[\"CLOSE\",\"$sub_id\"]" | timeout 3s websocat "$RELAY_URL" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Count EVENT responses
|
||||
local event_count=0
|
||||
if [[ -n "$response" ]]; then
|
||||
event_count=$(echo "$response" | grep -c "\"EVENT\"" 2>/dev/null || echo "0")
|
||||
fi
|
||||
|
||||
echo "$event_count"
|
||||
}
|
||||
|
||||
# Helper function to query events by kind
|
||||
query_events_by_kind() {
|
||||
local kind="$1"
|
||||
local sub_id="kind${kind}_$(date +%s%N | cut -c1-10)"
|
||||
|
||||
# Create REQ message to query for events of specific kind
|
||||
local req_message="[\"REQ\",\"$sub_id\",{\"kinds\":[$kind]}]"
|
||||
|
||||
# Send subscription and collect events
|
||||
local response=""
|
||||
if command -v websocat &> /dev/null; then
|
||||
response=$(echo -e "$req_message\n[\"CLOSE\",\"$sub_id\"]" | timeout 3s websocat "$RELAY_URL" 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Count EVENT responses
|
||||
local event_count=0
|
||||
if [[ -n "$response" ]]; then
|
||||
event_count=$(echo "$response" | grep -c "\"EVENT\"" 2>/dev/null || echo "0")
|
||||
fi
|
||||
|
||||
echo "$event_count"
|
||||
}
|
||||
|
||||
# Main test function
|
||||
run_deletion_test() {
|
||||
print_header "NIP-09 Event Deletion Request Test"
|
||||
|
||||
# Check dependencies
|
||||
print_step "Checking dependencies..."
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_error "nak command not found"
|
||||
print_info "Please install nak: go install github.com/fiatjaf/nak@latest"
|
||||
return 1
|
||||
fi
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_error "websocat command not found"
|
||||
print_info "Please install websocat for testing"
|
||||
return 1
|
||||
fi
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq command not found"
|
||||
print_info "Please install jq for JSON processing"
|
||||
return 1
|
||||
fi
|
||||
print_success "All dependencies found"
|
||||
|
||||
print_header "PHASE 1: Publishing Events to be Deleted"
|
||||
|
||||
# Create test events that will be deleted
|
||||
print_step "Creating events for deletion testing..."
|
||||
|
||||
# Create regular events (kind 1) - these will be deleted by ID
|
||||
local event1=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Event to be deleted #1" -k 1 --ts $(($(date +%s) - 100)) -t "type=test" -t "phase=deletion" 2>/dev/null)
|
||||
local event2=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Event to be deleted #2" -k 1 --ts $(($(date +%s) - 90)) -t "type=test" -t "phase=deletion" 2>/dev/null)
|
||||
|
||||
# Publish the events
|
||||
event1_id=$(publish_event "$event1" "Event to be deleted #1")
|
||||
if [[ -z "$event1_id" ]]; then
|
||||
print_error "Failed to publish test event #1"
|
||||
return 1
|
||||
fi
|
||||
|
||||
event2_id=$(publish_event "$event2" "Event to be deleted #2")
|
||||
if [[ -z "$event2_id" ]]; then
|
||||
print_error "Failed to publish test event #2"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create an addressable event (kind 30001) - will be deleted by address
|
||||
local addr_event=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Addressable event to be deleted" -k 30001 --ts $(($(date +%s) - 80)) -t "d=test-delete" -t "type=addressable" 2>/dev/null)
|
||||
|
||||
addr_event_id=$(publish_event "$addr_event" "Addressable event to be deleted")
|
||||
if [[ -z "$addr_event_id" ]]; then
|
||||
print_error "Failed to publish addressable test event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create an event by a different author (to test unauthorized deletion)
|
||||
local different_key="nsec1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab"
|
||||
local unauth_event=$(nak event --sec "$different_key" -c "Event by different author" -k 1 --ts $(($(date +%s) - 70)) -t "type=unauthorized" 2>/dev/null)
|
||||
|
||||
unauth_event_id=$(publish_event "$unauth_event" "Event by different author")
|
||||
if [[ -z "$unauth_event_id" ]]; then
|
||||
print_warning "Failed to publish unauthorized test event - continuing anyway"
|
||||
fi
|
||||
|
||||
# Let events settle
|
||||
sleep 2
|
||||
|
||||
print_header "PHASE 2: Testing Event Deletion by ID"
|
||||
|
||||
print_step "Verifying events exist before deletion..."
|
||||
local event1_before=$(check_event_exists "$event1_id")
|
||||
local event2_before=$(check_event_exists "$event2_id")
|
||||
print_info "Event1 exists: $event1_before, Event2 exists: $event2_before"
|
||||
|
||||
# Create deletion request targeting the two events by ID
|
||||
print_step "Creating deletion request for events by ID..."
|
||||
local deletion_by_id=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Deleting events by ID" -k 5 --ts $(date +%s) -e "$event1_id" -e "$event2_id" -t "k=1" 2>/dev/null)
|
||||
|
||||
deletion_id=$(publish_deletion_request "$deletion_by_id" "Deletion request for events by ID")
|
||||
if [[ -z "$deletion_id" ]]; then
|
||||
print_error "Failed to publish deletion request"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Wait for deletion to process
|
||||
sleep 3
|
||||
|
||||
# Check if events were deleted
|
||||
print_step "Verifying events were deleted..."
|
||||
local event1_after=$(check_event_exists "$event1_id")
|
||||
local event2_after=$(check_event_exists "$event2_id")
|
||||
print_info "Event1 exists after deletion: $event1_after, Event2 exists after deletion: $event2_after"
|
||||
|
||||
if [[ "$event1_after" == "0" && "$event2_after" == "0" ]]; then
|
||||
print_success "✓ Events successfully deleted by ID"
|
||||
else
|
||||
print_error "✗ Events were not properly deleted"
|
||||
fi
|
||||
|
||||
print_header "PHASE 3: Testing Address-based Deletion"
|
||||
|
||||
if [[ -n "$addr_event_id" ]]; then
|
||||
print_step "Verifying addressable event exists before deletion..."
|
||||
local addr_before=$(check_event_exists "$addr_event_id")
|
||||
print_info "Addressable event exists: $addr_before"
|
||||
|
||||
# Create deletion request for addressable event using 'a' tag
|
||||
print_step "Creating deletion request for addressable event..."
|
||||
local test_pubkey=$(echo "$addr_event" | jq -r '.pubkey' 2>/dev/null)
|
||||
local deletion_by_addr=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Deleting addressable event" -k 5 --ts $(date +%s) -t "a=30001:${test_pubkey}:test-delete" -t "k=30001" 2>/dev/null)
|
||||
|
||||
addr_deletion_id=$(publish_deletion_request "$deletion_by_addr" "Deletion request for addressable event")
|
||||
if [[ -n "$addr_deletion_id" ]]; then
|
||||
# Wait for deletion to process
|
||||
sleep 3
|
||||
|
||||
# Check if addressable event was deleted
|
||||
print_step "Verifying addressable event was deleted..."
|
||||
local addr_after=$(check_event_exists "$addr_event_id")
|
||||
print_info "Addressable event exists after deletion: $addr_after"
|
||||
|
||||
if [[ "$addr_after" == "0" ]]; then
|
||||
print_success "✓ Addressable event successfully deleted"
|
||||
else
|
||||
print_error "✗ Addressable event was not properly deleted"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
print_header "PHASE 4: Testing Unauthorized Deletion"
|
||||
|
||||
if [[ -n "$unauth_event_id" ]]; then
|
||||
print_step "Testing unauthorized deletion attempt..."
|
||||
|
||||
# Try to delete the unauthorized event (should fail)
|
||||
local unauth_deletion=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Attempting unauthorized deletion" -k 5 --ts $(date +%s) -e "$unauth_event_id" -t "k=1" 2>/dev/null)
|
||||
|
||||
unauth_deletion_id=$(publish_deletion_request "$unauth_deletion" "Unauthorized deletion request")
|
||||
if [[ -n "$unauth_deletion_id" ]]; then
|
||||
# Wait for processing
|
||||
sleep 3
|
||||
|
||||
# Check if unauthorized event still exists (should still exist)
|
||||
local unauth_after=$(check_event_exists "$unauth_event_id")
|
||||
print_info "Unauthorized event exists after deletion attempt: $unauth_after"
|
||||
|
||||
if [[ "$unauth_after" == "1" ]]; then
|
||||
print_success "✓ Unauthorized deletion properly rejected - event still exists"
|
||||
else
|
||||
print_error "✗ Unauthorized deletion succeeded - security vulnerability!"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
print_header "PHASE 5: Testing Invalid Deletion Requests"
|
||||
|
||||
print_step "Testing deletion request with no targets..."
|
||||
|
||||
# Create deletion request with no 'e' or 'a' tags (should be rejected)
|
||||
local invalid_deletion='{"id":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef","pubkey":"aa4fc8665f5696e33db7e1a572e3b0f5b3d615837b0f362dcb1c8068b098c7b4","created_at":'$(date +%s)',"kind":5,"tags":[["k","1"]],"content":"Invalid deletion request with no targets","sig":"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"}'
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
local invalid_message="[\"EVENT\",$invalid_deletion]"
|
||||
|
||||
# Publish to relay
|
||||
local invalid_response=""
|
||||
if command -v websocat &> /dev/null; then
|
||||
invalid_response=$(echo "$invalid_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
fi
|
||||
|
||||
# Check response - should be rejected
|
||||
if [[ "$invalid_response" == *"false"* ]]; then
|
||||
print_success "✓ Invalid deletion request properly rejected"
|
||||
elif [[ "$invalid_response" == *"true"* ]]; then
|
||||
print_warning "⚠ Invalid deletion request was accepted (should have been rejected)"
|
||||
else
|
||||
print_info "Invalid deletion request response: $invalid_response"
|
||||
fi
|
||||
|
||||
print_header "PHASE 6: Verification"
|
||||
|
||||
# Verify deletion requests themselves are stored
|
||||
print_step "Verifying deletion requests are stored..."
|
||||
local deletion_count=$(query_events_by_kind 5)
|
||||
print_info "Deletion requests accessible via query: $deletion_count"
|
||||
|
||||
if [[ "$deletion_count" -gt 0 ]]; then
|
||||
print_success "✓ Deletion requests properly stored and queryable"
|
||||
else
|
||||
print_warning "⚠ No deletion requests found via query"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Run the test
|
||||
print_header "Starting NIP-09 Event Deletion Request Test Suite"
|
||||
echo
|
||||
|
||||
if run_deletion_test; then
|
||||
echo
|
||||
print_success "All NIP-09 deletion tests completed successfully!"
|
||||
print_info "The C-Relay NIP-09 implementation is working correctly"
|
||||
print_info "✅ Event deletion by ID working"
|
||||
print_info "✅ Address-based deletion working"
|
||||
print_info "✅ Authorization validation working"
|
||||
print_info "✅ Invalid deletion rejection working"
|
||||
echo
|
||||
exit 0
|
||||
else
|
||||
echo
|
||||
print_error "Some NIP-09 tests failed"
|
||||
exit 1
|
||||
fi
|
||||
199
tests/subscribe_all.sh
Executable file
199
tests/subscribe_all.sh
Executable file
@@ -0,0 +1,199 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Persistent Subscription Test Script
|
||||
# Subscribes to all events in the relay and prints them as they arrive in real-time
|
||||
# This tests the persistent subscription functionality of the C-Relay
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="ws://127.0.0.1:8888"
|
||||
SUBSCRIPTION_ID="persistent_test_$(date +%s)"
|
||||
|
||||
# Print functions
|
||||
print_header() {
|
||||
echo -e "${BLUE}${BOLD}=== $1 ===${RESET}"
|
||||
}
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}✓${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}✗${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
print_event() {
|
||||
echo -e "${GREEN}[EVENT]${RESET} $1"
|
||||
}
|
||||
|
||||
# Cleanup function
|
||||
cleanup() {
|
||||
print_info "Cleaning up..."
|
||||
if [[ -n "$WEBSOCAT_PID" ]]; then
|
||||
kill "$WEBSOCAT_PID" 2>/dev/null || true
|
||||
wait "$WEBSOCAT_PID" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Send CLOSE message to clean up subscription on relay
|
||||
if command -v websocat &> /dev/null; then
|
||||
echo "[\"CLOSE\",\"$SUBSCRIPTION_ID\"]" | timeout 2s websocat "$RELAY_URL" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
print_info "Cleanup complete"
|
||||
exit 0
|
||||
}
|
||||
|
||||
# Set up signal handlers
|
||||
trap cleanup SIGINT SIGTERM
|
||||
|
||||
# Parse events from relay responses
|
||||
parse_events() {
|
||||
while IFS= read -r line; do
|
||||
# Check if this is an EVENT message
|
||||
if echo "$line" | jq -e '. | type == "array" and length >= 3 and .[0] == "EVENT"' >/dev/null 2>&1; then
|
||||
# Extract event details
|
||||
local event_id=$(echo "$line" | jq -r '.[2].id' 2>/dev/null || echo "unknown")
|
||||
local event_kind=$(echo "$line" | jq -r '.[2].kind' 2>/dev/null || echo "unknown")
|
||||
local event_content=$(echo "$line" | jq -r '.[2].content' 2>/dev/null || echo "")
|
||||
local event_pubkey=$(echo "$line" | jq -r '.[2].pubkey' 2>/dev/null || echo "unknown")
|
||||
local event_created_at=$(echo "$line" | jq -r '.[2].created_at' 2>/dev/null || echo "unknown")
|
||||
local event_tags=$(echo "$line" | jq -r '.[2].tags | length' 2>/dev/null || echo "0")
|
||||
|
||||
# Convert timestamp to readable format
|
||||
local readable_time="unknown"
|
||||
if [[ "$event_created_at" != "unknown" && "$event_created_at" =~ ^[0-9]+$ ]]; then
|
||||
readable_time=$(date -d "@$event_created_at" "+%Y-%m-%d %H:%M:%S" 2>/dev/null || echo "$event_created_at")
|
||||
fi
|
||||
|
||||
# Print formatted event
|
||||
print_event "Kind: $event_kind | ID: ${event_id:0:16}... | Author: ${event_pubkey:0:16}..."
|
||||
echo -e " ${YELLOW}Time:${RESET} $readable_time | ${YELLOW}Tags:${RESET} $event_tags"
|
||||
|
||||
# Show content (truncated if too long)
|
||||
if [[ -n "$event_content" ]]; then
|
||||
local truncated_content="${event_content:0:100}"
|
||||
if [[ ${#event_content} -gt 100 ]]; then
|
||||
truncated_content="${truncated_content}..."
|
||||
fi
|
||||
echo -e " ${YELLOW}Content:${RESET} $truncated_content"
|
||||
fi
|
||||
echo # Blank line for readability
|
||||
|
||||
elif echo "$line" | jq -e '. | type == "array" and length >= 2 and .[0] == "EOSE"' >/dev/null 2>&1; then
|
||||
# End of stored events
|
||||
local sub_id=$(echo "$line" | jq -r '.[1]' 2>/dev/null)
|
||||
print_info "End of stored events for subscription: $sub_id"
|
||||
print_success "Persistent subscription is now active - waiting for new events..."
|
||||
echo
|
||||
|
||||
elif echo "$line" | jq -e '. | type == "array" and length >= 3 and .[0] == "CLOSED"' >/dev/null 2>&1; then
|
||||
# Subscription closed
|
||||
local sub_id=$(echo "$line" | jq -r '.[1]' 2>/dev/null)
|
||||
local reason=$(echo "$line" | jq -r '.[2]' 2>/dev/null)
|
||||
print_warning "Subscription $sub_id was closed: $reason"
|
||||
|
||||
elif echo "$line" | jq -e '. | type == "array" and length >= 4 and .[0] == "OK"' >/dev/null 2>&1; then
|
||||
# OK response to event publishing
|
||||
local event_id=$(echo "$line" | jq -r '.[1]' 2>/dev/null)
|
||||
local success=$(echo "$line" | jq -r '.[2]' 2>/dev/null)
|
||||
local message=$(echo "$line" | jq -r '.[3]' 2>/dev/null)
|
||||
if [[ "$success" == "true" ]]; then
|
||||
print_success "Event published: ${event_id:0:16}..."
|
||||
else
|
||||
print_error "Event publish failed: ${event_id:0:16}... - $message"
|
||||
fi
|
||||
|
||||
else
|
||||
# Unknown message type - just show it
|
||||
print_info "Relay message: $line"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
print_header "Persistent Subscription Test - Subscribe to All Events"
|
||||
|
||||
# Check dependencies
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_error "websocat command not found"
|
||||
print_info "Please install websocat for testing"
|
||||
return 1
|
||||
fi
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq command not found"
|
||||
print_info "Please install jq for JSON processing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Subscription ID: $SUBSCRIPTION_ID"
|
||||
print_info "Relay URL: $RELAY_URL"
|
||||
print_info "Filter: {} (all events)"
|
||||
echo
|
||||
|
||||
# Create REQ message to subscribe to all events
|
||||
local req_message="[\"REQ\",\"$SUBSCRIPTION_ID\",{}]"
|
||||
|
||||
print_info "Establishing persistent subscription..."
|
||||
print_info "Press Ctrl+C to stop and cleanup"
|
||||
echo
|
||||
|
||||
# Start websocat connection and keep it open
|
||||
{
|
||||
echo "$req_message"
|
||||
# Keep the connection alive by sleeping indefinitely
|
||||
# The connection will receive events as they come in
|
||||
while true; do
|
||||
sleep 1
|
||||
done
|
||||
} | websocat "$RELAY_URL" | parse_events &
|
||||
|
||||
# Store the background process ID
|
||||
WEBSOCAT_PID=$!
|
||||
|
||||
# Wait for the background process (which runs indefinitely)
|
||||
# This will exit when we get a signal (Ctrl+C)
|
||||
wait "$WEBSOCAT_PID" 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Usage information
|
||||
usage() {
|
||||
echo "Usage: $0"
|
||||
echo
|
||||
echo "This script creates a persistent subscription to all events on the relay"
|
||||
echo "and displays them in real-time as they arrive. Perfect for testing"
|
||||
echo "the persistent subscription functionality."
|
||||
echo
|
||||
echo "To test:"
|
||||
echo "1. Run this script in one terminal"
|
||||
echo "2. Run 'tests/1_nip_test.sh' in another terminal"
|
||||
echo "3. Watch events appear in real-time in this terminal"
|
||||
echo
|
||||
echo "Press Ctrl+C to stop and cleanup the subscription."
|
||||
}
|
||||
|
||||
# Handle help flag
|
||||
if [[ "$1" == "-h" || "$1" == "--help" ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user