Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
517cc020c7 | ||
|
|
2c699652b0 | ||
|
|
2e4ffc0e79 | ||
|
|
70c91ec858 | ||
|
|
b7c4609c2d | ||
|
|
7f69367666 | ||
|
|
fa17aa1f78 | ||
|
|
7e560b4247 | ||
|
|
9a29ea51e3 | ||
|
|
6c10713e18 | ||
|
|
b810982a17 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
nostr_core_lib/
|
||||
nips/
|
||||
build/
|
||||
relay.log
|
||||
Trash/
|
||||
src/version.h
|
||||
|
||||
1
.roo/rules-code/rules.md
Normal file
1
.roo/rules-code/rules.md
Normal file
@@ -0,0 +1 @@
|
||||
Use ./make_and_restart_relay.sh instead of make to build the project.
|
||||
68
Makefile
68
Makefile
@@ -9,7 +9,7 @@ LIBS = -lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k
|
||||
BUILD_DIR = build
|
||||
|
||||
# Source files
|
||||
MAIN_SRC = src/main.c
|
||||
MAIN_SRC = src/main.c src/config.c
|
||||
NOSTR_CORE_LIB = nostr_core_lib/libnostr_core_x64.a
|
||||
|
||||
# Architecture detection
|
||||
@@ -36,19 +36,68 @@ $(NOSTR_CORE_LIB):
|
||||
@echo "Building nostr_core_lib..."
|
||||
cd nostr_core_lib && ./build.sh
|
||||
|
||||
# Generate version.h from git tags
|
||||
src/version.h:
|
||||
@if [ -d .git ]; then \
|
||||
echo "Generating version.h from git tags..."; \
|
||||
VERSION=$$(git describe --tags --always --dirty 2>/dev/null || echo "unknown"); \
|
||||
if echo "$$VERSION" | grep -q "^v[0-9]"; then \
|
||||
CLEAN_VERSION=$$(echo "$$VERSION" | sed 's/^v//'); \
|
||||
MAJOR=$$(echo "$$CLEAN_VERSION" | cut -d. -f1); \
|
||||
MINOR=$$(echo "$$CLEAN_VERSION" | cut -d. -f2); \
|
||||
PATCH=$$(echo "$$CLEAN_VERSION" | cut -d. -f3 | cut -d- -f1); \
|
||||
else \
|
||||
CLEAN_VERSION="0.0.0-$$VERSION"; \
|
||||
MAJOR=0; MINOR=0; PATCH=0; \
|
||||
fi; \
|
||||
echo "/* Auto-generated version information */" > src/version.h; \
|
||||
echo "#ifndef VERSION_H" >> src/version.h; \
|
||||
echo "#define VERSION_H" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#define VERSION \"$$VERSION\"" >> src/version.h; \
|
||||
echo "#define VERSION_MAJOR $$MAJOR" >> src/version.h; \
|
||||
echo "#define VERSION_MINOR $$MINOR" >> src/version.h; \
|
||||
echo "#define VERSION_PATCH $$PATCH" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#endif /* VERSION_H */" >> src/version.h; \
|
||||
echo "Generated version.h with version: $$VERSION"; \
|
||||
elif [ ! -f src/version.h ]; then \
|
||||
echo "Git not available and version.h missing, creating fallback version.h..."; \
|
||||
VERSION="unknown"; \
|
||||
echo "/* Auto-generated version information */" > src/version.h; \
|
||||
echo "#ifndef VERSION_H" >> src/version.h; \
|
||||
echo "#define VERSION_H" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#define VERSION \"$$VERSION\"" >> src/version.h; \
|
||||
echo "#define VERSION_MAJOR 0" >> src/version.h; \
|
||||
echo "#define VERSION_MINOR 0" >> src/version.h; \
|
||||
echo "#define VERSION_PATCH 0" >> src/version.h; \
|
||||
echo "" >> src/version.h; \
|
||||
echo "#endif /* VERSION_H */" >> src/version.h; \
|
||||
echo "Created fallback version.h with version: $$VERSION"; \
|
||||
else \
|
||||
echo "Git not available, preserving existing version.h"; \
|
||||
fi
|
||||
|
||||
# Force version.h regeneration (useful for development)
|
||||
force-version:
|
||||
@echo "Force regenerating version.h..."
|
||||
@rm -f src/version.h
|
||||
@$(MAKE) src/version.h
|
||||
|
||||
# Build the relay
|
||||
$(TARGET): $(BUILD_DIR) $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
$(TARGET): $(BUILD_DIR) src/version.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
@echo "Compiling C-Relay for architecture: $(ARCH)"
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(TARGET) $(NOSTR_CORE_LIB) $(LIBS)
|
||||
@echo "Build complete: $(TARGET)"
|
||||
|
||||
# Build for specific architectures
|
||||
x86: $(BUILD_DIR) $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
x86: $(BUILD_DIR) src/version.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
@echo "Building C-Relay for x86_64..."
|
||||
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_x86 $(NOSTR_CORE_LIB) $(LIBS)
|
||||
@echo "Build complete: $(BUILD_DIR)/c_relay_x86"
|
||||
|
||||
arm64: $(BUILD_DIR) $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
arm64: $(BUILD_DIR) src/version.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB)
|
||||
@echo "Cross-compiling C-Relay for ARM64..."
|
||||
@if ! command -v aarch64-linux-gnu-gcc >/dev/null 2>&1; then \
|
||||
echo "ERROR: ARM64 cross-compiler not found."; \
|
||||
@@ -112,14 +161,16 @@ test: $(TARGET)
|
||||
@echo "Running tests..."
|
||||
./tests/1_nip_test.sh
|
||||
|
||||
# Initialize database
|
||||
# Initialize database (now handled automatically when server starts)
|
||||
init-db:
|
||||
@echo "Initializing database..."
|
||||
./db/init.sh --force
|
||||
@echo "Database initialization is now handled automatically when the server starts."
|
||||
@echo "The schema is embedded in the binary - no external files needed."
|
||||
@echo "To manually recreate database: rm -f db/c_nostr_relay.db && ./build/c_relay_x86"
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
rm -rf $(BUILD_DIR)
|
||||
rm -f src/version.h
|
||||
@echo "Clean complete"
|
||||
|
||||
# Clean everything including nostr_core_lib
|
||||
@@ -158,5 +209,6 @@ help:
|
||||
@echo " make check-toolchain # Check what compilers are available"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make init-db # Set up database"
|
||||
@echo " make force-version # Force regenerate version.h from git"
|
||||
|
||||
.PHONY: all x86 arm64 test init-db clean clean-all install-deps install-cross-tools install-arm64-deps check-toolchain help
|
||||
.PHONY: all x86 arm64 test init-db clean clean-all install-deps install-cross-tools install-arm64-deps check-toolchain help force-version
|
||||
17
README.md
17
README.md
@@ -11,19 +11,12 @@ Do NOT modify the formatting, add emojis, or change the text. Keep the simple fo
|
||||
- [x] NIP-01: Basic protocol flow implementation
|
||||
- [x] NIP-09: Event deletion
|
||||
- [x] NIP-11: Relay information document
|
||||
- [ ] NIP-12: Generic tag queries
|
||||
- [ ] NIP-13: Proof of Work
|
||||
- [x] NIP-13: Proof of Work
|
||||
- [x] NIP-15: End of Stored Events Notice
|
||||
- [ ] NIP-16: Event Treatment
|
||||
- [x] NIP-20: Command Results
|
||||
- [ ] NIP-22: Event `created_at` Limits
|
||||
- [ ] NIP-25: Reactions
|
||||
- [ ] NIP-26: Delegated Event Signing
|
||||
- [ ] NIP-28: Public Chat
|
||||
- [ ] NIP-33: Parameterized Replaceable Events
|
||||
- [ ] NIP-40: Expiration Timestamp
|
||||
- [x] NIP-33: Parameterized Replaceable Events
|
||||
- [x] NIP-40: Expiration Timestamp
|
||||
- [ ] NIP-42: Authentication of clients to relays
|
||||
- [ ] NIP-45: Counting results. [experimental](#count)
|
||||
- [ ] NIP-50: Keywords filter. [experimental](#search)
|
||||
- [ ] NIP-45: Counting results.
|
||||
- [ ] NIP-50: Keywords filter.
|
||||
- [ ] NIP-70: Protected Events
|
||||
|
||||
|
||||
@@ -139,6 +139,13 @@ compile_project() {
|
||||
print_warning "Clean failed or no Makefile found"
|
||||
fi
|
||||
|
||||
# Force regenerate version.h to pick up new tags
|
||||
if make force-version > /dev/null 2>&1; then
|
||||
print_success "Regenerated version.h"
|
||||
else
|
||||
print_warning "Failed to regenerate version.h"
|
||||
fi
|
||||
|
||||
# Compile the project
|
||||
if make > /dev/null 2>&1; then
|
||||
print_success "C-Relay compiled successfully"
|
||||
@@ -229,10 +236,65 @@ git_commit_and_push() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if git push --tags > /dev/null 2>&1; then
|
||||
print_success "Pushed tags"
|
||||
# Push only the new tag to avoid conflicts with existing tags
|
||||
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Pushed tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Failed to push tags"
|
||||
print_warning "Tag push failed, trying force push..."
|
||||
if git push --force origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Force-pushed updated tag: $NEW_VERSION"
|
||||
else
|
||||
print_error "Failed to push tag: $NEW_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to commit and push changes without creating a tag (tag already created)
|
||||
git_commit_and_push_no_tag() {
|
||||
print_status "Preparing git commit..."
|
||||
|
||||
# Stage all changes
|
||||
if git add . > /dev/null 2>&1; then
|
||||
print_success "Staged all changes"
|
||||
else
|
||||
print_error "Failed to stage changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if there are changes to commit
|
||||
if git diff --staged --quiet; then
|
||||
print_warning "No changes to commit"
|
||||
else
|
||||
# Commit changes
|
||||
if git commit -m "$NEW_VERSION - $COMMIT_MESSAGE" > /dev/null 2>&1; then
|
||||
print_success "Committed changes"
|
||||
else
|
||||
print_error "Failed to commit changes"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Push changes and tags
|
||||
print_status "Pushing to remote repository..."
|
||||
if git push > /dev/null 2>&1; then
|
||||
print_success "Pushed changes"
|
||||
else
|
||||
print_error "Failed to push changes"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Push only the new tag to avoid conflicts with existing tags
|
||||
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Pushed tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag push failed, trying force push..."
|
||||
if git push --force origin "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Force-pushed updated tag: $NEW_VERSION"
|
||||
else
|
||||
print_error "Failed to push tag: $NEW_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -352,14 +414,23 @@ main() {
|
||||
# Increment minor version for releases
|
||||
increment_version "minor"
|
||||
|
||||
# Compile project first
|
||||
# Create new git tag BEFORE compilation so version.h picks it up
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag $NEW_VERSION already exists, removing and recreating..."
|
||||
git tag -d "$NEW_VERSION" > /dev/null 2>&1
|
||||
git tag "$NEW_VERSION" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Compile project first (will now pick up the new tag)
|
||||
compile_project
|
||||
|
||||
# Build release binaries
|
||||
build_release_binaries
|
||||
|
||||
# Commit and push
|
||||
git_commit_and_push
|
||||
# Commit and push (but skip tag creation since we already did it)
|
||||
git_commit_and_push_no_tag
|
||||
|
||||
# Create Gitea release with binaries
|
||||
create_gitea_release
|
||||
@@ -376,11 +447,20 @@ main() {
|
||||
# Increment patch version for regular commits
|
||||
increment_version "patch"
|
||||
|
||||
# Compile project
|
||||
# Create new git tag BEFORE compilation so version.h picks it up
|
||||
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
|
||||
print_success "Created tag: $NEW_VERSION"
|
||||
else
|
||||
print_warning "Tag $NEW_VERSION already exists, removing and recreating..."
|
||||
git tag -d "$NEW_VERSION" > /dev/null 2>&1
|
||||
git tag "$NEW_VERSION" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Compile project (will now pick up the new tag)
|
||||
compile_project
|
||||
|
||||
# Commit and push
|
||||
git_commit_and_push
|
||||
# Commit and push (but skip tag creation since we already did it)
|
||||
git_commit_and_push_no_tag
|
||||
|
||||
print_success "Build and push completed successfully!"
|
||||
print_status "Version $NEW_VERSION pushed to repository"
|
||||
|
||||
229
db/README.md
229
db/README.md
@@ -1,228 +1 @@
|
||||
# C Nostr Relay Database
|
||||
|
||||
This directory contains the SQLite database schema and initialization scripts for the C Nostr Relay implementation.
|
||||
|
||||
## Files
|
||||
|
||||
- **`schema.sql`** - Complete database schema based on nostr-rs-relay v18
|
||||
- **`init.sh`** - Database initialization script
|
||||
- **`c_nostr_relay.db`** - SQLite database file (created after running init.sh)
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. **Initialize the database:**
|
||||
```bash
|
||||
cd db
|
||||
./init.sh
|
||||
```
|
||||
|
||||
2. **Force reinitialize (removes existing database):**
|
||||
```bash
|
||||
./init.sh --force
|
||||
```
|
||||
|
||||
3. **Initialize with optimization and info:**
|
||||
```bash
|
||||
./init.sh --info --optimize
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
The schema is fully compatible with the Nostr protocol and includes:
|
||||
|
||||
### Core Tables
|
||||
|
||||
- **`event`** - Main event storage with all Nostr event data
|
||||
- **`tag`** - Denormalized tag index for efficient queries
|
||||
- **`user_verification`** - NIP-05 verification tracking
|
||||
- **`account`** - User account management (optional)
|
||||
- **`invoice`** - Lightning payment tracking (optional)
|
||||
|
||||
### Key Features
|
||||
|
||||
- ✅ **NIP-01 compliant** - Full basic protocol support
|
||||
- ✅ **Replaceable events** - Supports kinds 0, 3, 10000-19999
|
||||
- ✅ **Parameterized replaceable** - Supports kinds 30000-39999 with `d` tags
|
||||
- ✅ **Event deletion** - NIP-09 soft deletion with `hidden` column
|
||||
- ✅ **Event expiration** - NIP-40 automatic cleanup
|
||||
- ✅ **Authentication** - NIP-42 client authentication
|
||||
- ✅ **NIP-05 verification** - Domain-based identity verification
|
||||
- ✅ **Performance optimized** - Comprehensive indexing strategy
|
||||
|
||||
### Schema Version
|
||||
|
||||
Current version: **v18** (compatible with nostr-rs-relay v18)
|
||||
|
||||
## Database Structure
|
||||
|
||||
### Event Storage
|
||||
```sql
|
||||
CREATE TABLE event (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_hash BLOB NOT NULL, -- 32-byte SHA256 hash
|
||||
first_seen INTEGER NOT NULL, -- relay receive timestamp
|
||||
created_at INTEGER NOT NULL, -- event creation timestamp
|
||||
expires_at INTEGER, -- NIP-40 expiration
|
||||
author BLOB NOT NULL, -- 32-byte pubkey
|
||||
delegated_by BLOB, -- NIP-26 delegator
|
||||
kind INTEGER NOT NULL, -- event kind
|
||||
hidden INTEGER DEFAULT FALSE, -- soft deletion flag
|
||||
content TEXT NOT NULL -- complete JSON event
|
||||
);
|
||||
```
|
||||
|
||||
### Tag Indexing
|
||||
```sql
|
||||
CREATE TABLE tag (
|
||||
id INTEGER PRIMARY KEY,
|
||||
event_id INTEGER NOT NULL,
|
||||
name TEXT, -- tag name ("e", "p", etc.)
|
||||
value TEXT, -- tag value
|
||||
created_at INTEGER NOT NULL, -- denormalized for performance
|
||||
kind INTEGER NOT NULL -- denormalized for performance
|
||||
);
|
||||
```
|
||||
|
||||
## Performance Features
|
||||
|
||||
### Optimized Indexes
|
||||
- **Hash-based lookups** - `event_hash_index` for O(1) event retrieval
|
||||
- **Author queries** - `author_index`, `author_created_at_index`
|
||||
- **Kind filtering** - `kind_index`, `kind_created_at_index`
|
||||
- **Tag searching** - `tag_covering_index` for efficient tag queries
|
||||
- **Composite queries** - Multi-column indexes for complex filters
|
||||
|
||||
### Query Optimization
|
||||
- **Denormalized tags** - Includes `kind` and `created_at` in tag table
|
||||
- **Binary storage** - BLOBs for hex data (pubkeys, hashes)
|
||||
- **WAL mode** - Write-Ahead Logging for concurrent access
|
||||
- **Automatic cleanup** - Triggers for data integrity
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Basic Operations
|
||||
|
||||
1. **Insert an event:**
|
||||
```sql
|
||||
INSERT INTO event (event_hash, first_seen, created_at, author, kind, content)
|
||||
VALUES (?, ?, ?, ?, ?, ?);
|
||||
```
|
||||
|
||||
2. **Query by author:**
|
||||
```sql
|
||||
SELECT content FROM event
|
||||
WHERE author = ? AND hidden != TRUE
|
||||
ORDER BY created_at DESC;
|
||||
```
|
||||
|
||||
3. **Filter by tags:**
|
||||
```sql
|
||||
SELECT e.content FROM event e
|
||||
JOIN tag t ON e.id = t.event_id
|
||||
WHERE t.name = 'p' AND t.value = ? AND e.hidden != TRUE;
|
||||
```
|
||||
|
||||
### Advanced Queries
|
||||
|
||||
1. **Get replaceable event (latest only):**
|
||||
```sql
|
||||
SELECT content FROM event
|
||||
WHERE author = ? AND kind = ? AND hidden != TRUE
|
||||
ORDER BY created_at DESC LIMIT 1;
|
||||
```
|
||||
|
||||
2. **Tag-based filtering (NIP-01 filters):**
|
||||
```sql
|
||||
SELECT e.content FROM event e
|
||||
WHERE e.id IN (
|
||||
SELECT t.event_id FROM tag t
|
||||
WHERE t.name = ? AND t.value IN (?, ?, ?)
|
||||
) AND e.hidden != TRUE;
|
||||
```
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Regular Operations
|
||||
|
||||
1. **Check database integrity:**
|
||||
```bash
|
||||
sqlite3 c_nostr_relay.db "PRAGMA integrity_check;"
|
||||
```
|
||||
|
||||
2. **Optimize database:**
|
||||
```bash
|
||||
sqlite3 c_nostr_relay.db "PRAGMA optimize; VACUUM; ANALYZE;"
|
||||
```
|
||||
|
||||
3. **Clean expired events:**
|
||||
```sql
|
||||
DELETE FROM event WHERE expires_at <= strftime('%s', 'now');
|
||||
```
|
||||
|
||||
### Monitoring
|
||||
|
||||
1. **Database size:**
|
||||
```bash
|
||||
ls -lh c_nostr_relay.db
|
||||
```
|
||||
|
||||
2. **Table statistics:**
|
||||
```sql
|
||||
SELECT name, COUNT(*) as count FROM (
|
||||
SELECT 'events' as name FROM event UNION ALL
|
||||
SELECT 'tags' as name FROM tag UNION ALL
|
||||
SELECT 'verifications' as name FROM user_verification
|
||||
) GROUP BY name;
|
||||
```
|
||||
|
||||
## Migration Support
|
||||
|
||||
The schema includes a migration system for future updates:
|
||||
|
||||
```sql
|
||||
CREATE TABLE schema_info (
|
||||
version INTEGER PRIMARY KEY,
|
||||
applied_at INTEGER NOT NULL,
|
||||
description TEXT
|
||||
);
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Input validation** - Always validate event JSON and signatures
|
||||
2. **Rate limiting** - Implement at application level
|
||||
3. **Access control** - Use `account` table for permissions
|
||||
4. **Backup strategy** - Regular database backups recommended
|
||||
|
||||
## Compatibility
|
||||
|
||||
- **SQLite version** - Requires SQLite 3.8.0+
|
||||
- **nostr-rs-relay** - Schema compatible with v18
|
||||
- **NIPs supported** - 01, 02, 05, 09, 10, 11, 26, 40, 42
|
||||
- **C libraries** - Compatible with sqlite3 C API
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Database locked error:**
|
||||
- Ensure proper connection closing in your C code
|
||||
- Check for long-running transactions
|
||||
|
||||
2. **Performance issues:**
|
||||
- Run `PRAGMA optimize;` regularly
|
||||
- Consider `VACUUM` if database grew significantly
|
||||
|
||||
3. **Schema errors:**
|
||||
- Verify SQLite version compatibility
|
||||
- Check foreign key constraints
|
||||
|
||||
### Getting Help
|
||||
|
||||
- Check the main project README for C implementation details
|
||||
- Review nostr-rs-relay documentation for reference implementation
|
||||
- Consult Nostr NIPs for protocol specifications
|
||||
|
||||
## License
|
||||
|
||||
This database schema is part of the C Nostr Relay project and follows the same license terms.
|
||||
Only README.md will remain
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
234
db/init.sh
234
db/init.sh
@@ -1,234 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# C Nostr Relay Database Initialization Script
|
||||
# Creates and initializes the SQLite database with proper schema
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Configuration
|
||||
DB_DIR="$(dirname "$0")"
|
||||
DB_NAME="c_nostr_relay.db"
|
||||
DB_PATH="${DB_DIR}/${DB_NAME}"
|
||||
SCHEMA_FILE="${DB_DIR}/schema.sql"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging functions
|
||||
log_info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
log_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if SQLite3 is installed
|
||||
check_sqlite() {
|
||||
if ! command -v sqlite3 &> /dev/null; then
|
||||
log_error "sqlite3 is not installed. Please install it first:"
|
||||
echo " Ubuntu/Debian: sudo apt-get install sqlite3"
|
||||
echo " CentOS/RHEL: sudo yum install sqlite"
|
||||
echo " macOS: brew install sqlite3"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local version=$(sqlite3 --version | cut -d' ' -f1)
|
||||
log_info "Using SQLite version: $version"
|
||||
}
|
||||
|
||||
# Create database directory if it doesn't exist
|
||||
create_db_directory() {
|
||||
if [ ! -d "$DB_DIR" ]; then
|
||||
log_info "Creating database directory: $DB_DIR"
|
||||
mkdir -p "$DB_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
# Backup existing database if it exists
|
||||
backup_existing_db() {
|
||||
if [ -f "$DB_PATH" ]; then
|
||||
local backup_path="${DB_PATH}.backup.$(date +%Y%m%d_%H%M%S)"
|
||||
log_warning "Existing database found. Creating backup: $backup_path"
|
||||
cp "$DB_PATH" "$backup_path"
|
||||
fi
|
||||
}
|
||||
|
||||
# Initialize the database with schema
|
||||
init_database() {
|
||||
log_info "Initializing database: $DB_PATH"
|
||||
|
||||
if [ ! -f "$SCHEMA_FILE" ]; then
|
||||
log_error "Schema file not found: $SCHEMA_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove existing database if --force flag is used
|
||||
if [ "$1" = "--force" ] && [ -f "$DB_PATH" ]; then
|
||||
log_warning "Force flag detected. Removing existing database."
|
||||
rm -f "$DB_PATH"
|
||||
fi
|
||||
|
||||
# Create the database and apply schema
|
||||
log_info "Applying schema from: $SCHEMA_FILE"
|
||||
if sqlite3 "$DB_PATH" < "$SCHEMA_FILE"; then
|
||||
log_success "Database schema applied successfully"
|
||||
else
|
||||
log_error "Failed to apply database schema"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify database integrity
|
||||
verify_database() {
|
||||
log_info "Verifying database integrity..."
|
||||
|
||||
# Check if database file exists and is not empty
|
||||
if [ ! -s "$DB_PATH" ]; then
|
||||
log_error "Database file is empty or doesn't exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run SQLite integrity check
|
||||
local integrity_result=$(sqlite3 "$DB_PATH" "PRAGMA integrity_check;")
|
||||
if [ "$integrity_result" = "ok" ]; then
|
||||
log_success "Database integrity check passed"
|
||||
else
|
||||
log_error "Database integrity check failed: $integrity_result"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify schema version
|
||||
local schema_version=$(sqlite3 "$DB_PATH" "PRAGMA user_version;")
|
||||
log_info "Database schema version: $schema_version"
|
||||
|
||||
# Check that main tables exist
|
||||
local table_count=$(sqlite3 "$DB_PATH" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name IN ('events', 'schema_info');")
|
||||
if [ "$table_count" -eq 2 ]; then
|
||||
log_success "Core tables created successfully"
|
||||
else
|
||||
log_error "Missing core tables (expected 2, found $table_count)"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Display database information
|
||||
show_db_info() {
|
||||
log_info "Database Information:"
|
||||
echo " Location: $DB_PATH"
|
||||
echo " Size: $(du -h "$DB_PATH" | cut -f1)"
|
||||
|
||||
log_info "Database Tables:"
|
||||
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;" | sed 's/^/ - /'
|
||||
|
||||
log_info "Database Indexes:"
|
||||
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='index' AND name NOT LIKE 'sqlite_%' ORDER BY name;" | sed 's/^/ - /'
|
||||
|
||||
log_info "Database Views:"
|
||||
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='view' ORDER BY name;" | sed 's/^/ - /'
|
||||
}
|
||||
|
||||
# Run database optimization
|
||||
optimize_database() {
|
||||
log_info "Running database optimization..."
|
||||
sqlite3 "$DB_PATH" "PRAGMA optimize; VACUUM; ANALYZE;"
|
||||
log_success "Database optimization completed"
|
||||
}
|
||||
|
||||
# Print usage information
|
||||
print_usage() {
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Initialize SQLite database for C Nostr Relay"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --force Remove existing database before initialization"
|
||||
echo " --info Show database information after initialization"
|
||||
echo " --optimize Run database optimization after initialization"
|
||||
echo " --help Show this help message"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Initialize database (with backup if exists)"
|
||||
echo " $0 --force # Force reinitialize database"
|
||||
echo " $0 --info --optimize # Initialize with info and optimization"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
local force_flag=false
|
||||
local show_info=false
|
||||
local optimize=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--force)
|
||||
force_flag=true
|
||||
shift
|
||||
;;
|
||||
--info)
|
||||
show_info=true
|
||||
shift
|
||||
;;
|
||||
--optimize)
|
||||
optimize=true
|
||||
shift
|
||||
;;
|
||||
--help)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
log_info "Starting C Nostr Relay database initialization..."
|
||||
|
||||
# Execute initialization steps
|
||||
check_sqlite
|
||||
create_db_directory
|
||||
|
||||
if [ "$force_flag" = false ]; then
|
||||
backup_existing_db
|
||||
fi
|
||||
|
||||
if [ "$force_flag" = true ]; then
|
||||
init_database --force
|
||||
else
|
||||
init_database
|
||||
fi
|
||||
|
||||
verify_database
|
||||
|
||||
if [ "$optimize" = true ]; then
|
||||
optimize_database
|
||||
fi
|
||||
|
||||
if [ "$show_info" = true ]; then
|
||||
show_db_info
|
||||
fi
|
||||
|
||||
log_success "Database initialization completed successfully!"
|
||||
echo ""
|
||||
echo "Database ready at: $DB_PATH"
|
||||
echo "You can now start your C Nostr Relay application."
|
||||
}
|
||||
|
||||
# Execute main function with all arguments
|
||||
main "$@"
|
||||
181
db/schema.sql
181
db/schema.sql
@@ -1,181 +0,0 @@
|
||||
-- C Nostr Relay Database Schema
|
||||
-- SQLite schema for storing Nostr events with JSON tags support
|
||||
|
||||
-- Schema version tracking
|
||||
PRAGMA user_version = 2;
|
||||
|
||||
-- Enable foreign key support
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
-- Optimize for performance
|
||||
PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
PRAGMA cache_size = 10000;
|
||||
|
||||
-- Core events table with hybrid single-table design
|
||||
CREATE TABLE events (
|
||||
id TEXT PRIMARY KEY, -- Nostr event ID (hex string)
|
||||
pubkey TEXT NOT NULL, -- Public key of event author (hex string)
|
||||
created_at INTEGER NOT NULL, -- Event creation timestamp (Unix timestamp)
|
||||
kind INTEGER NOT NULL, -- Event kind (0-65535)
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||
content TEXT NOT NULL, -- Event content (text content only)
|
||||
sig TEXT NOT NULL, -- Event signature (hex string)
|
||||
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event
|
||||
);
|
||||
|
||||
-- Core performance indexes
|
||||
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||
CREATE INDEX idx_events_kind ON events(kind);
|
||||
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||
CREATE INDEX idx_events_event_type ON events(event_type);
|
||||
|
||||
-- Composite indexes for common query patterns
|
||||
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||
CREATE INDEX idx_events_pubkey_kind ON events(pubkey, kind);
|
||||
|
||||
-- Schema information table
|
||||
CREATE TABLE schema_info (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- Insert schema metadata
|
||||
INSERT INTO schema_info (key, value) VALUES
|
||||
('version', '2'),
|
||||
('description', 'Hybrid single-table Nostr relay schema with JSON tags'),
|
||||
('created_at', strftime('%s', 'now'));
|
||||
|
||||
-- Helper views for common queries
|
||||
CREATE VIEW recent_events AS
|
||||
SELECT id, pubkey, created_at, kind, event_type, content
|
||||
FROM events
|
||||
WHERE event_type != 'ephemeral'
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1000;
|
||||
|
||||
CREATE VIEW event_stats AS
|
||||
SELECT
|
||||
event_type,
|
||||
COUNT(*) as count,
|
||||
AVG(length(content)) as avg_content_length,
|
||||
MIN(created_at) as earliest,
|
||||
MAX(created_at) as latest
|
||||
FROM events
|
||||
GROUP BY event_type;
|
||||
|
||||
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour
|
||||
CREATE TRIGGER cleanup_ephemeral_events
|
||||
AFTER INSERT ON events
|
||||
WHEN NEW.event_type = 'ephemeral'
|
||||
BEGIN
|
||||
DELETE FROM events
|
||||
WHERE event_type = 'ephemeral'
|
||||
AND first_seen < (strftime('%s', 'now') - 3600);
|
||||
END;
|
||||
|
||||
-- Replaceable event handling trigger
|
||||
CREATE TRIGGER handle_replaceable_events
|
||||
AFTER INSERT ON events
|
||||
WHEN NEW.event_type = 'replaceable'
|
||||
BEGIN
|
||||
DELETE FROM events
|
||||
WHERE pubkey = NEW.pubkey
|
||||
AND kind = NEW.kind
|
||||
AND event_type = 'replaceable'
|
||||
AND id != NEW.id;
|
||||
END;
|
||||
|
||||
-- Persistent Subscriptions Logging Tables (Phase 2)
|
||||
-- Optional database logging for subscription analytics and debugging
|
||||
|
||||
-- Subscription events log
|
||||
CREATE TABLE subscription_events (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
subscription_id TEXT NOT NULL, -- Subscription ID from client
|
||||
client_ip TEXT NOT NULL, -- Client IP address
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('created', 'closed', 'expired', 'disconnected')),
|
||||
filter_json TEXT, -- JSON representation of filters (for created events)
|
||||
events_sent INTEGER DEFAULT 0, -- Number of events sent to this subscription
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
ended_at INTEGER, -- When subscription ended (for closed/expired/disconnected)
|
||||
duration INTEGER -- Computed: ended_at - created_at
|
||||
);
|
||||
|
||||
-- Subscription metrics summary
|
||||
CREATE TABLE subscription_metrics (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
date TEXT NOT NULL, -- Date (YYYY-MM-DD)
|
||||
total_created INTEGER DEFAULT 0, -- Total subscriptions created
|
||||
total_closed INTEGER DEFAULT 0, -- Total subscriptions closed
|
||||
total_events_broadcast INTEGER DEFAULT 0, -- Total events broadcast
|
||||
avg_duration REAL DEFAULT 0, -- Average subscription duration
|
||||
peak_concurrent INTEGER DEFAULT 0, -- Peak concurrent subscriptions
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
UNIQUE(date)
|
||||
);
|
||||
|
||||
-- Event broadcasting log (optional, for detailed analytics)
|
||||
CREATE TABLE event_broadcasts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
event_id TEXT NOT NULL, -- Event ID that was broadcast
|
||||
subscription_id TEXT NOT NULL, -- Subscription that received it
|
||||
client_ip TEXT NOT NULL, -- Client IP
|
||||
broadcast_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
FOREIGN KEY (event_id) REFERENCES events(id)
|
||||
);
|
||||
|
||||
-- Indexes for subscription logging performance
|
||||
CREATE INDEX idx_subscription_events_id ON subscription_events(subscription_id);
|
||||
CREATE INDEX idx_subscription_events_type ON subscription_events(event_type);
|
||||
CREATE INDEX idx_subscription_events_created ON subscription_events(created_at DESC);
|
||||
CREATE INDEX idx_subscription_events_client ON subscription_events(client_ip);
|
||||
|
||||
CREATE INDEX idx_subscription_metrics_date ON subscription_metrics(date DESC);
|
||||
|
||||
CREATE INDEX idx_event_broadcasts_event ON event_broadcasts(event_id);
|
||||
CREATE INDEX idx_event_broadcasts_sub ON event_broadcasts(subscription_id);
|
||||
CREATE INDEX idx_event_broadcasts_time ON event_broadcasts(broadcast_at DESC);
|
||||
|
||||
-- Trigger to update subscription duration when ended
|
||||
CREATE TRIGGER update_subscription_duration
|
||||
AFTER UPDATE OF ended_at ON subscription_events
|
||||
WHEN NEW.ended_at IS NOT NULL AND OLD.ended_at IS NULL
|
||||
BEGIN
|
||||
UPDATE subscription_events
|
||||
SET duration = NEW.ended_at - NEW.created_at
|
||||
WHERE id = NEW.id;
|
||||
END;
|
||||
|
||||
-- View for subscription analytics
|
||||
CREATE VIEW subscription_analytics AS
|
||||
SELECT
|
||||
date(created_at, 'unixepoch') as date,
|
||||
COUNT(*) as subscriptions_created,
|
||||
COUNT(CASE WHEN ended_at IS NOT NULL THEN 1 END) as subscriptions_ended,
|
||||
AVG(CASE WHEN duration IS NOT NULL THEN duration END) as avg_duration_seconds,
|
||||
MAX(events_sent) as max_events_sent,
|
||||
AVG(events_sent) as avg_events_sent,
|
||||
COUNT(DISTINCT client_ip) as unique_clients
|
||||
FROM subscription_events
|
||||
GROUP BY date(created_at, 'unixepoch')
|
||||
ORDER BY date DESC;
|
||||
|
||||
-- View for current active subscriptions (from log perspective)
|
||||
CREATE VIEW active_subscriptions_log AS
|
||||
SELECT
|
||||
subscription_id,
|
||||
client_ip,
|
||||
filter_json,
|
||||
events_sent,
|
||||
created_at,
|
||||
(strftime('%s', 'now') - created_at) as duration_seconds
|
||||
FROM subscription_events
|
||||
WHERE event_type = 'created'
|
||||
AND subscription_id NOT IN (
|
||||
SELECT subscription_id FROM subscription_events
|
||||
WHERE event_type IN ('closed', 'expired', 'disconnected')
|
||||
);
|
||||
280
docs/config_schema_design.md
Normal file
280
docs/config_schema_design.md
Normal file
@@ -0,0 +1,280 @@
|
||||
# Database Configuration Schema Design
|
||||
|
||||
## Overview
|
||||
This document outlines the database configuration schema additions for the C Nostr Relay startup config file system. The design follows the Ginxsom admin system approach with signed Nostr events and database storage.
|
||||
|
||||
## Schema Version Update
|
||||
- Current Version: 2
|
||||
- Target Version: 3
|
||||
- Update: Add server configuration management tables
|
||||
|
||||
## Core Configuration Tables
|
||||
|
||||
### 1. `server_config` Table
|
||||
|
||||
```sql
|
||||
-- Server configuration table - core configuration storage
|
||||
CREATE TABLE server_config (
|
||||
key TEXT PRIMARY KEY, -- Configuration key (unique identifier)
|
||||
value TEXT NOT NULL, -- Configuration value (stored as string)
|
||||
description TEXT, -- Human-readable description
|
||||
config_type TEXT DEFAULT 'user' CHECK (config_type IN ('system', 'user', 'runtime')),
|
||||
data_type TEXT DEFAULT 'string' CHECK (data_type IN ('string', 'integer', 'boolean', 'json')),
|
||||
validation_rules TEXT, -- JSON validation rules (optional)
|
||||
is_sensitive INTEGER DEFAULT 0, -- 1 if value should be masked in logs
|
||||
requires_restart INTEGER DEFAULT 0, -- 1 if change requires server restart
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
```
|
||||
|
||||
**Configuration Types:**
|
||||
- `system`: Core system settings (admin keys, security)
|
||||
- `user`: User-configurable settings (relay info, features)
|
||||
- `runtime`: Dynamic runtime values (statistics, cache)
|
||||
|
||||
**Data Types:**
|
||||
- `string`: Text values
|
||||
- `integer`: Numeric values
|
||||
- `boolean`: True/false values (stored as "true"/"false")
|
||||
- `json`: JSON object/array values
|
||||
|
||||
### 2. `config_history` Table
|
||||
|
||||
```sql
|
||||
-- Configuration change history table
|
||||
CREATE TABLE config_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
config_key TEXT NOT NULL, -- Key that was changed
|
||||
old_value TEXT, -- Previous value (NULL for new keys)
|
||||
new_value TEXT NOT NULL, -- New value
|
||||
changed_by TEXT DEFAULT 'system', -- Who made the change (system/admin/user)
|
||||
change_reason TEXT, -- Optional reason for change
|
||||
changed_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
FOREIGN KEY (config_key) REFERENCES server_config(key)
|
||||
);
|
||||
```
|
||||
|
||||
### 3. `config_validation_log` Table
|
||||
|
||||
```sql
|
||||
-- Configuration validation errors log
|
||||
CREATE TABLE config_validation_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
config_key TEXT NOT NULL,
|
||||
attempted_value TEXT,
|
||||
validation_error TEXT NOT NULL,
|
||||
error_source TEXT DEFAULT 'validation', -- validation/parsing/constraint
|
||||
attempted_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
```
|
||||
|
||||
### 4. Configuration File Cache Table
|
||||
|
||||
```sql
|
||||
-- Cache for file-based configuration events
|
||||
CREATE TABLE config_file_cache (
|
||||
file_path TEXT PRIMARY KEY, -- Full path to config file
|
||||
file_hash TEXT NOT NULL, -- SHA256 hash of file content
|
||||
event_id TEXT, -- Nostr event ID from file
|
||||
event_pubkey TEXT, -- Admin pubkey that signed event
|
||||
loaded_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
validation_status TEXT CHECK (validation_status IN ('valid', 'invalid', 'unverified')),
|
||||
validation_error TEXT -- Error details if invalid
|
||||
);
|
||||
```
|
||||
|
||||
## Indexes and Performance
|
||||
|
||||
```sql
|
||||
-- Performance indexes for configuration tables
|
||||
CREATE INDEX idx_server_config_type ON server_config(config_type);
|
||||
CREATE INDEX idx_server_config_updated ON server_config(updated_at DESC);
|
||||
CREATE INDEX idx_config_history_key ON config_history(config_key);
|
||||
CREATE INDEX idx_config_history_time ON config_history(changed_at DESC);
|
||||
CREATE INDEX idx_config_validation_key ON config_validation_log(config_key);
|
||||
CREATE INDEX idx_config_validation_time ON config_validation_log(attempted_at DESC);
|
||||
```
|
||||
|
||||
## Triggers
|
||||
|
||||
### Update Timestamp Trigger
|
||||
|
||||
```sql
|
||||
-- Trigger to update timestamp on configuration changes
|
||||
CREATE TRIGGER update_config_timestamp
|
||||
AFTER UPDATE ON server_config
|
||||
BEGIN
|
||||
UPDATE server_config SET updated_at = strftime('%s', 'now') WHERE key = NEW.key;
|
||||
END;
|
||||
```
|
||||
|
||||
### Configuration History Trigger
|
||||
|
||||
```sql
|
||||
-- Trigger to log configuration changes to history
|
||||
CREATE TRIGGER log_config_changes
|
||||
AFTER UPDATE ON server_config
|
||||
WHEN OLD.value != NEW.value
|
||||
BEGIN
|
||||
INSERT INTO config_history (config_key, old_value, new_value, changed_by, change_reason)
|
||||
VALUES (NEW.key, OLD.value, NEW.value, 'system', 'configuration update');
|
||||
END;
|
||||
```
|
||||
|
||||
## Default Configuration Values
|
||||
|
||||
### Core System Settings
|
||||
|
||||
```sql
|
||||
INSERT OR IGNORE INTO server_config (key, value, description, config_type, data_type, requires_restart) VALUES
|
||||
-- Administrative settings
|
||||
('admin_pubkey', '', 'Authorized admin public key (hex)', 'system', 'string', 1),
|
||||
('admin_enabled', 'false', 'Enable admin interface', 'system', 'boolean', 1),
|
||||
|
||||
-- Server core settings
|
||||
('relay_port', '8888', 'WebSocket server port', 'user', 'integer', 1),
|
||||
('database_path', 'db/c_nostr_relay.db', 'SQLite database file path', 'user', 'string', 1),
|
||||
('max_connections', '100', 'Maximum concurrent connections', 'user', 'integer', 1),
|
||||
|
||||
-- NIP-11 Relay Information
|
||||
('relay_name', 'C Nostr Relay', 'Relay name for NIP-11', 'user', 'string', 0),
|
||||
('relay_description', 'High-performance C Nostr relay with SQLite storage', 'Relay description', 'user', 'string', 0),
|
||||
('relay_contact', '', 'Contact information', 'user', 'string', 0),
|
||||
('relay_pubkey', '', 'Relay public key', 'user', 'string', 0),
|
||||
('relay_software', 'https://git.laantungir.net/laantungir/c-relay.git', 'Software URL', 'user', 'string', 0),
|
||||
('relay_version', '0.2.0', 'Software version', 'user', 'string', 0),
|
||||
|
||||
-- NIP-13 Proof of Work
|
||||
('pow_enabled', 'true', 'Enable NIP-13 Proof of Work validation', 'user', 'boolean', 0),
|
||||
('pow_min_difficulty', '0', 'Minimum PoW difficulty required', 'user', 'integer', 0),
|
||||
('pow_mode', 'basic', 'PoW validation mode (basic/full/strict)', 'user', 'string', 0),
|
||||
|
||||
-- NIP-40 Expiration Timestamp
|
||||
('expiration_enabled', 'true', 'Enable NIP-40 expiration handling', 'user', 'boolean', 0),
|
||||
('expiration_strict', 'true', 'Reject expired events on submission', 'user', 'boolean', 0),
|
||||
('expiration_filter', 'true', 'Filter expired events from responses', 'user', 'boolean', 0),
|
||||
('expiration_grace_period', '300', 'Grace period for clock skew (seconds)', 'user', 'integer', 0),
|
||||
|
||||
-- Subscription limits
|
||||
('max_subscriptions_per_client', '20', 'Max subscriptions per client', 'user', 'integer', 0),
|
||||
('max_total_subscriptions', '5000', 'Max total concurrent subscriptions', 'user', 'integer', 0),
|
||||
('subscription_id_max_length', '64', 'Maximum subscription ID length', 'user', 'integer', 0),
|
||||
|
||||
-- Event processing limits
|
||||
('max_event_tags', '100', 'Maximum tags per event', 'user', 'integer', 0),
|
||||
('max_content_length', '8196', 'Maximum content length', 'user', 'integer', 0),
|
||||
('max_message_length', '16384', 'Maximum message length', 'user', 'integer', 0),
|
||||
|
||||
-- Performance settings
|
||||
('default_limit', '500', 'Default query limit', 'user', 'integer', 0),
|
||||
('max_limit', '5000', 'Maximum query limit', 'user', 'integer', 0);
|
||||
```
|
||||
|
||||
### Runtime Statistics
|
||||
|
||||
```sql
|
||||
INSERT OR IGNORE INTO server_config (key, value, description, config_type, data_type) VALUES
|
||||
-- Runtime statistics (updated by server)
|
||||
('server_start_time', '0', 'Server startup timestamp', 'runtime', 'integer'),
|
||||
('total_events_processed', '0', 'Total events processed', 'runtime', 'integer'),
|
||||
('total_subscriptions_created', '0', 'Total subscriptions created', 'runtime', 'integer'),
|
||||
('current_connections', '0', 'Current active connections', 'runtime', 'integer'),
|
||||
('database_size_bytes', '0', 'Database file size in bytes', 'runtime', 'integer');
|
||||
```
|
||||
|
||||
## Configuration Views
|
||||
|
||||
### Active Configuration View
|
||||
|
||||
```sql
|
||||
CREATE VIEW active_config AS
|
||||
SELECT
|
||||
key,
|
||||
value,
|
||||
description,
|
||||
config_type,
|
||||
data_type,
|
||||
requires_restart,
|
||||
updated_at
|
||||
FROM server_config
|
||||
WHERE config_type IN ('system', 'user')
|
||||
ORDER BY config_type, key;
|
||||
```
|
||||
|
||||
### Runtime Statistics View
|
||||
|
||||
```sql
|
||||
CREATE VIEW runtime_stats AS
|
||||
SELECT
|
||||
key,
|
||||
value,
|
||||
description,
|
||||
updated_at
|
||||
FROM server_config
|
||||
WHERE config_type = 'runtime'
|
||||
ORDER BY key;
|
||||
```
|
||||
|
||||
### Configuration Change Summary
|
||||
|
||||
```sql
|
||||
CREATE VIEW recent_config_changes AS
|
||||
SELECT
|
||||
ch.config_key,
|
||||
sc.description,
|
||||
ch.old_value,
|
||||
ch.new_value,
|
||||
ch.changed_by,
|
||||
ch.change_reason,
|
||||
ch.changed_at
|
||||
FROM config_history ch
|
||||
JOIN server_config sc ON ch.config_key = sc.key
|
||||
ORDER BY ch.changed_at DESC
|
||||
LIMIT 50;
|
||||
```
|
||||
|
||||
## Validation Rules Format
|
||||
|
||||
Configuration validation rules are stored as JSON strings in the `validation_rules` column:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "integer",
|
||||
"min": 1,
|
||||
"max": 65535,
|
||||
"required": true
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "string",
|
||||
"pattern": "^[0-9a-fA-F]{64}$",
|
||||
"required": false,
|
||||
"description": "64-character hex string"
|
||||
}
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "boolean",
|
||||
"required": true
|
||||
}
|
||||
```
|
||||
|
||||
## Migration Strategy
|
||||
|
||||
1. **Phase 1**: Add configuration tables to existing schema
|
||||
2. **Phase 2**: Populate with current hardcoded values
|
||||
3. **Phase 3**: Update application code to read from database
|
||||
4. **Phase 4**: Add file-based configuration loading
|
||||
5. **Phase 5**: Remove hardcoded defaults and environment variable fallbacks
|
||||
|
||||
## Integration Points
|
||||
|
||||
- **Startup**: Load configuration from file → database → apply to application
|
||||
- **Runtime**: Read configuration values from database cache
|
||||
- **Updates**: Write changes to database → optionally update file
|
||||
- **Validation**: Validate all configuration changes before applying
|
||||
- **History**: Track all configuration changes for audit purposes
|
||||
493
docs/file_config_design.md
Normal file
493
docs/file_config_design.md
Normal file
@@ -0,0 +1,493 @@
|
||||
# File-Based Configuration Architecture Design
|
||||
|
||||
## Overview
|
||||
This document outlines the XDG-compliant file-based configuration system for the C Nostr Relay, following the Ginxsom admin system approach using signed Nostr events.
|
||||
|
||||
## XDG Base Directory Specification Compliance
|
||||
|
||||
### File Location Strategy
|
||||
|
||||
**Primary Location:**
|
||||
```
|
||||
$XDG_CONFIG_HOME/c-relay/c_relay_config_event.json
|
||||
```
|
||||
|
||||
**Fallback Location:**
|
||||
```
|
||||
$HOME/.config/c-relay/c_relay_config_event.json
|
||||
```
|
||||
|
||||
**System-wide Fallback:**
|
||||
```
|
||||
/etc/c-relay/c_relay_config_event.json
|
||||
```
|
||||
|
||||
### Directory Structure
|
||||
```
|
||||
$XDG_CONFIG_HOME/c-relay/
|
||||
├── c_relay_config_event.json # Main configuration file
|
||||
├── backup/ # Configuration backups
|
||||
│ ├── c_relay_config_event.json.bak
|
||||
│ └── c_relay_config_event.20241205.json
|
||||
└── validation/ # Validation logs
|
||||
└── config_validation.log
|
||||
```
|
||||
|
||||
## Configuration File Format
|
||||
|
||||
### Signed Nostr Event Structure
|
||||
|
||||
The configuration file contains a signed Nostr event (kind 33334) with relay configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": 33334,
|
||||
"created_at": 1704067200,
|
||||
"tags": [
|
||||
["relay_name", "C Nostr Relay"],
|
||||
["relay_description", "High-performance C Nostr relay with SQLite storage"],
|
||||
["relay_port", "8888"],
|
||||
["database_path", "db/c_nostr_relay.db"],
|
||||
["admin_pubkey", ""],
|
||||
["admin_enabled", "false"],
|
||||
|
||||
["pow_enabled", "true"],
|
||||
["pow_min_difficulty", "0"],
|
||||
["pow_mode", "basic"],
|
||||
|
||||
["expiration_enabled", "true"],
|
||||
["expiration_strict", "true"],
|
||||
["expiration_filter", "true"],
|
||||
["expiration_grace_period", "300"],
|
||||
|
||||
["max_subscriptions_per_client", "20"],
|
||||
["max_total_subscriptions", "5000"],
|
||||
["max_connections", "100"],
|
||||
|
||||
["relay_contact", ""],
|
||||
["relay_pubkey", ""],
|
||||
["relay_software", "https://git.laantungir.net/laantungir/c-relay.git"],
|
||||
["relay_version", "0.2.0"],
|
||||
|
||||
["max_event_tags", "100"],
|
||||
["max_content_length", "8196"],
|
||||
["max_message_length", "16384"],
|
||||
["default_limit", "500"],
|
||||
["max_limit", "5000"]
|
||||
],
|
||||
"content": "C Nostr Relay configuration event",
|
||||
"pubkey": "admin_public_key_hex_64_chars",
|
||||
"id": "computed_event_id_hex_64_chars",
|
||||
"sig": "computed_signature_hex_128_chars"
|
||||
}
|
||||
```
|
||||
|
||||
### Event Kind Definition
|
||||
|
||||
**Kind 33334**: C Nostr Relay Configuration Event
|
||||
- Parameterized replaceable event
|
||||
- Must be signed by authorized admin pubkey
|
||||
- Contains relay configuration as tags
|
||||
- Validation required on load
|
||||
|
||||
## Configuration Loading Architecture
|
||||
|
||||
### Loading Priority Chain
|
||||
|
||||
1. **Command Line Arguments** (highest priority)
|
||||
2. **File-based Configuration** (signed Nostr event)
|
||||
3. **Database Configuration** (persistent storage)
|
||||
4. **Environment Variables** (compatibility mode)
|
||||
5. **Hardcoded Defaults** (fallback)
|
||||
|
||||
### Loading Process Flow
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Server Startup] --> B[Get Config File Path]
|
||||
B --> C{File Exists?}
|
||||
C -->|No| D[Check Database Config]
|
||||
C -->|Yes| E[Load & Parse JSON]
|
||||
E --> F[Validate Event Structure]
|
||||
F --> G{Valid Event?}
|
||||
G -->|No| H[Log Error & Use Database]
|
||||
G -->|Yes| I[Verify Event Signature]
|
||||
I --> J{Signature Valid?}
|
||||
J -->|No| K[Log Error & Use Database]
|
||||
J -->|Yes| L[Extract Configuration Tags]
|
||||
L --> M[Apply to Database]
|
||||
M --> N[Apply to Application]
|
||||
D --> O[Load from Database]
|
||||
H --> O
|
||||
K --> O
|
||||
O --> P[Apply Environment Variable Overrides]
|
||||
P --> Q[Apply Command Line Overrides]
|
||||
Q --> N
|
||||
N --> R[Server Ready]
|
||||
```
|
||||
|
||||
## C Implementation Architecture
|
||||
|
||||
### Core Data Structures
|
||||
|
||||
```c
|
||||
// Configuration file management
|
||||
typedef struct {
|
||||
char file_path[512];
|
||||
char file_hash[65]; // SHA256 hash
|
||||
time_t last_modified;
|
||||
time_t last_loaded;
|
||||
int validation_status; // 0=valid, 1=invalid, 2=unverified
|
||||
char validation_error[256];
|
||||
} config_file_info_t;
|
||||
|
||||
// Configuration event structure
|
||||
typedef struct {
|
||||
char event_id[65];
|
||||
char pubkey[65];
|
||||
char signature[129];
|
||||
long created_at;
|
||||
int kind;
|
||||
cJSON* tags;
|
||||
char* content;
|
||||
} config_event_t;
|
||||
|
||||
// Configuration management context
|
||||
typedef struct {
|
||||
config_file_info_t file_info;
|
||||
config_event_t event;
|
||||
int loaded_from_file;
|
||||
int loaded_from_database;
|
||||
char admin_pubkey[65];
|
||||
time_t load_timestamp;
|
||||
} config_context_t;
|
||||
```
|
||||
|
||||
### Core Function Signatures
|
||||
|
||||
```c
|
||||
// XDG path resolution
|
||||
int get_config_file_path(char* path, size_t path_size);
|
||||
int create_config_directories(const char* config_path);
|
||||
|
||||
// File operations
|
||||
int load_config_from_file(const char* config_path, config_context_t* ctx);
|
||||
int save_config_to_file(const char* config_path, const config_event_t* event);
|
||||
int backup_config_file(const char* config_path);
|
||||
|
||||
// Event validation
|
||||
int validate_config_event_structure(const cJSON* event);
|
||||
int verify_config_event_signature(const config_event_t* event, const char* admin_pubkey);
|
||||
int validate_config_tag_values(const cJSON* tags);
|
||||
|
||||
// Configuration extraction and application
|
||||
int extract_config_from_tags(const cJSON* tags, config_context_t* ctx);
|
||||
int apply_config_to_database(const config_context_t* ctx);
|
||||
int apply_config_to_globals(const config_context_t* ctx);
|
||||
|
||||
// File monitoring and updates
|
||||
int monitor_config_file_changes(const char* config_path);
|
||||
int reload_config_on_change(config_context_t* ctx);
|
||||
|
||||
// Error handling and logging
|
||||
int log_config_validation_error(const char* config_key, const char* error);
|
||||
int log_config_load_event(const config_context_t* ctx, const char* source);
|
||||
```
|
||||
|
||||
## Configuration Validation Rules
|
||||
|
||||
### Event Structure Validation
|
||||
|
||||
1. **Required Fields**: `kind`, `created_at`, `tags`, `content`, `pubkey`, `id`, `sig`
|
||||
2. **Kind Validation**: Must be exactly 33334
|
||||
3. **Timestamp Validation**: Must be reasonable (not too old, not future)
|
||||
4. **Tags Format**: Array of string arrays `[["key", "value"], ...]`
|
||||
5. **Signature Verification**: Must be signed by authorized admin pubkey
|
||||
|
||||
### Configuration Value Validation
|
||||
|
||||
```c
|
||||
typedef struct {
|
||||
char* key;
|
||||
char* data_type; // "string", "integer", "boolean", "json"
|
||||
char* validation_rule; // JSON validation rule
|
||||
int required;
|
||||
char* default_value;
|
||||
} config_validation_rule_t;
|
||||
|
||||
static config_validation_rule_t validation_rules[] = {
|
||||
{"relay_port", "integer", "{\"min\": 1, \"max\": 65535}", 1, "8888"},
|
||||
{"pow_min_difficulty", "integer", "{\"min\": 0, \"max\": 64}", 1, "0"},
|
||||
{"expiration_grace_period", "integer", "{\"min\": 0, \"max\": 86400}", 1, "300"},
|
||||
{"admin_pubkey", "string", "{\"pattern\": \"^[0-9a-fA-F]{64}$\"}", 0, ""},
|
||||
{"pow_enabled", "boolean", "{}", 1, "true"},
|
||||
// ... more rules
|
||||
};
|
||||
```
|
||||
|
||||
### Security Validation
|
||||
|
||||
1. **Admin Pubkey Verification**: Only configured admin pubkeys can create config events
|
||||
2. **Event ID Verification**: Event ID must match computed hash
|
||||
3. **Signature Verification**: Signature must be valid for the event and pubkey
|
||||
4. **Timestamp Validation**: Prevent replay attacks with old events
|
||||
5. **File Permission Checks**: Config files should have appropriate permissions
|
||||
|
||||
## File Management Features
|
||||
|
||||
### Configuration File Operations
|
||||
|
||||
**File Creation:**
|
||||
- Generate initial configuration file with default values
|
||||
- Sign with admin private key
|
||||
- Set appropriate file permissions (600 - owner read/write only)
|
||||
|
||||
**File Updates:**
|
||||
- Create backup of existing file
|
||||
- Validate new configuration
|
||||
- Atomic file replacement (write to temp, then rename)
|
||||
- Update file metadata cache
|
||||
|
||||
**File Monitoring:**
|
||||
- Watch for file system changes using inotify (Linux)
|
||||
- Reload configuration automatically when file changes
|
||||
- Validate changes before applying
|
||||
- Log all configuration reload events
|
||||
|
||||
### Backup and Recovery
|
||||
|
||||
**Automatic Backups:**
|
||||
```
|
||||
$XDG_CONFIG_HOME/c-relay/backup/
|
||||
├── c_relay_config_event.json.bak # Last working config
|
||||
├── c_relay_config_event.20241205-143022.json # Timestamped backups
|
||||
└── c_relay_config_event.20241204-091530.json
|
||||
```
|
||||
|
||||
**Recovery Process:**
|
||||
1. Detect corrupted or invalid config file
|
||||
2. Attempt to load from `.bak` backup
|
||||
3. If backup fails, generate default configuration
|
||||
4. Log recovery actions for audit
|
||||
|
||||
## Integration with Database Schema
|
||||
|
||||
### File-Database Synchronization
|
||||
|
||||
**On File Load:**
|
||||
1. Parse and validate file-based configuration
|
||||
2. Extract configuration values from event tags
|
||||
3. Update database `server_config` table
|
||||
4. Record file metadata in `config_file_cache` table
|
||||
5. Log configuration changes in `config_history` table
|
||||
|
||||
**Configuration Priority Resolution:**
|
||||
```c
|
||||
char* get_config_value(const char* key, const char* default_value) {
|
||||
// Priority: CLI args > File config > DB config > Env vars > Default
|
||||
char* value = NULL;
|
||||
|
||||
// 1. Check command line overrides (if implemented)
|
||||
value = get_cli_override(key);
|
||||
if (value) return value;
|
||||
|
||||
// 2. Check database (updated from file)
|
||||
value = get_database_config(key);
|
||||
if (value) return value;
|
||||
|
||||
// 3. Check environment variables (compatibility)
|
||||
value = get_env_config(key);
|
||||
if (value) return value;
|
||||
|
||||
// 4. Return default
|
||||
return strdup(default_value);
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling and Recovery
|
||||
|
||||
### Validation Error Handling
|
||||
|
||||
```c
|
||||
typedef enum {
|
||||
CONFIG_ERROR_NONE = 0,
|
||||
CONFIG_ERROR_FILE_NOT_FOUND = 1,
|
||||
CONFIG_ERROR_PARSE_FAILED = 2,
|
||||
CONFIG_ERROR_INVALID_STRUCTURE = 3,
|
||||
CONFIG_ERROR_SIGNATURE_INVALID = 4,
|
||||
CONFIG_ERROR_UNAUTHORIZED = 5,
|
||||
CONFIG_ERROR_VALUE_INVALID = 6,
|
||||
CONFIG_ERROR_IO_ERROR = 7
|
||||
} config_error_t;
|
||||
|
||||
typedef struct {
|
||||
config_error_t error_code;
|
||||
char error_message[256];
|
||||
char config_key[64];
|
||||
char invalid_value[128];
|
||||
time_t error_timestamp;
|
||||
} config_error_info_t;
|
||||
```
|
||||
|
||||
### Graceful Degradation
|
||||
|
||||
**File Load Failure:**
|
||||
1. Log detailed error information
|
||||
2. Fall back to database configuration
|
||||
3. Continue operation with last known good config
|
||||
4. Set service status to "degraded" mode
|
||||
|
||||
**Validation Failure:**
|
||||
1. Log validation errors with specific details
|
||||
2. Skip invalid configuration items
|
||||
3. Use default values for failed items
|
||||
4. Continue with partial configuration
|
||||
|
||||
**Permission Errors:**
|
||||
1. Log permission issues
|
||||
2. Attempt to use fallback locations
|
||||
3. Generate temporary config if needed
|
||||
4. Alert administrator via logs
|
||||
|
||||
## Configuration Update Process
|
||||
|
||||
### Safe Configuration Updates
|
||||
|
||||
**Atomic Update Process:**
|
||||
1. Create backup of current configuration
|
||||
2. Write new configuration to temporary file
|
||||
3. Validate new configuration completely
|
||||
4. If valid, rename temporary file to active config
|
||||
5. Update database with new values
|
||||
6. Apply changes to running server
|
||||
7. Log successful update
|
||||
|
||||
**Rollback Process:**
|
||||
1. Detect invalid configuration at startup
|
||||
2. Restore from backup file
|
||||
3. Log rollback event
|
||||
4. Continue with previous working configuration
|
||||
|
||||
### Hot Reload Support
|
||||
|
||||
**File Change Detection:**
|
||||
```c
|
||||
int monitor_config_file_changes(const char* config_path) {
|
||||
// Use inotify on Linux to watch file changes
|
||||
int inotify_fd = inotify_init();
|
||||
int watch_fd = inotify_add_watch(inotify_fd, config_path, IN_MODIFY | IN_MOVED_TO);
|
||||
|
||||
// Monitor in separate thread
|
||||
// On change: validate -> apply -> log
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
**Runtime Configuration Updates:**
|
||||
- Reload configuration on file change
|
||||
- Apply non-restart-required changes immediately
|
||||
- Queue restart-required changes for next restart
|
||||
- Notify operators of configuration changes
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Access Control
|
||||
|
||||
**File Permissions:**
|
||||
- Config files: 600 (owner read/write only)
|
||||
- Directories: 700 (owner access only)
|
||||
- Backup files: 600 (owner read/write only)
|
||||
|
||||
**Admin Key Management:**
|
||||
- Admin private keys never stored in config files
|
||||
- Only admin pubkeys stored for verification
|
||||
- Support for multiple admin pubkeys
|
||||
- Key rotation support
|
||||
|
||||
### Signature Validation
|
||||
|
||||
**Event Signature Verification:**
|
||||
```c
|
||||
int verify_config_event_signature(const config_event_t* event, const char* admin_pubkey) {
|
||||
// 1. Reconstruct event for signing (without id and sig)
|
||||
// 2. Compute event ID and verify against stored ID
|
||||
// 3. Verify signature using admin pubkey
|
||||
// 4. Check admin pubkey authorization
|
||||
return NOSTR_SUCCESS;
|
||||
}
|
||||
```
|
||||
|
||||
**Anti-Replay Protection:**
|
||||
- Configuration events must be newer than current
|
||||
- Event timestamps validated against reasonable bounds
|
||||
- Configuration history prevents replay attacks
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Basic File Support
|
||||
- XDG path resolution
|
||||
- File loading and parsing
|
||||
- Basic validation
|
||||
- Database integration
|
||||
|
||||
### Phase 2: Security Features
|
||||
- Event signature verification
|
||||
- Admin pubkey management
|
||||
- File permission checks
|
||||
- Error handling
|
||||
|
||||
### Phase 3: Advanced Features
|
||||
- Hot reload support
|
||||
- Automatic backups
|
||||
- Configuration utilities
|
||||
- Interactive setup
|
||||
|
||||
### Phase 4: Monitoring & Management
|
||||
- Configuration change monitoring
|
||||
- Advanced validation rules
|
||||
- Configuration audit logging
|
||||
- Management utilities
|
||||
|
||||
## Configuration Generation Utilities
|
||||
|
||||
### Interactive Setup Script
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# scripts/setup_config.sh - Interactive configuration setup
|
||||
|
||||
create_initial_config() {
|
||||
echo "=== C Nostr Relay Initial Configuration ==="
|
||||
|
||||
# Collect basic information
|
||||
read -p "Relay name [C Nostr Relay]: " relay_name
|
||||
read -p "Admin public key (hex): " admin_pubkey
|
||||
read -p "Server port [8888]: " server_port
|
||||
|
||||
# Generate signed configuration event
|
||||
./scripts/generate_config.sh \
|
||||
--admin-key "$admin_pubkey" \
|
||||
--relay-name "${relay_name:-C Nostr Relay}" \
|
||||
--port "${server_port:-8888}" \
|
||||
--output "$XDG_CONFIG_HOME/c-relay/c_relay_config_event.json"
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration Validation Utility
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# scripts/validate_config.sh - Validate configuration file
|
||||
|
||||
validate_config_file() {
|
||||
local config_file="$1"
|
||||
|
||||
# Check file exists and is readable
|
||||
# Validate JSON structure
|
||||
# Verify event signature
|
||||
# Check configuration values
|
||||
# Report validation results
|
||||
}
|
||||
```
|
||||
|
||||
This comprehensive file-based configuration design provides a robust, secure, and maintainable system that follows industry standards while integrating seamlessly with the existing C Nostr Relay architecture.
|
||||
@@ -5,6 +5,53 @@
|
||||
|
||||
echo "=== C Nostr Relay Build and Restart Script ==="
|
||||
|
||||
# Parse command line arguments
|
||||
PRESERVE_CONFIG=false
|
||||
HELP=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--preserve-config|-p)
|
||||
PRESERVE_CONFIG=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
HELP=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
HELP=true
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Show help
|
||||
if [ "$HELP" = true ]; then
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --preserve-config Keep existing configuration file (don't regenerate)"
|
||||
echo " --help, -h Show this help message"
|
||||
echo ""
|
||||
echo "Default behavior: Automatically regenerates configuration file on each build"
|
||||
echo " for development purposes"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Handle configuration file regeneration
|
||||
CONFIG_FILE="$HOME/.config/c-relay/c_relay_config_event.json"
|
||||
if [ "$PRESERVE_CONFIG" = false ] && [ -f "$CONFIG_FILE" ]; then
|
||||
echo "Removing old configuration file to trigger regeneration..."
|
||||
rm -f "$CONFIG_FILE"
|
||||
echo "✓ Configuration file removed - will be regenerated with latest database values"
|
||||
elif [ "$PRESERVE_CONFIG" = true ] && [ -f "$CONFIG_FILE" ]; then
|
||||
echo "Preserving existing configuration file as requested"
|
||||
elif [ ! -f "$CONFIG_FILE" ]; then
|
||||
echo "No existing configuration file found - will generate new one"
|
||||
fi
|
||||
|
||||
# Build the project first
|
||||
echo "Building project..."
|
||||
make clean all
|
||||
@@ -90,6 +137,19 @@ if ps -p "$RELAY_PID" >/dev/null 2>&1; then
|
||||
# Save PID for debugging
|
||||
echo $RELAY_PID > relay.pid
|
||||
|
||||
# Check if a new private key was generated and display it
|
||||
sleep 1 # Give relay time to write initial logs
|
||||
if grep -q "GENERATED RELAY ADMIN PRIVATE KEY" relay.log 2>/dev/null; then
|
||||
echo "=== IMPORTANT: NEW ADMIN PRIVATE KEY GENERATED ==="
|
||||
echo ""
|
||||
# Extract and display the private key section from the log
|
||||
grep -A 8 -B 2 "GENERATED RELAY ADMIN PRIVATE KEY" relay.log | head -n 12
|
||||
echo ""
|
||||
echo "⚠️ SAVE THIS PRIVATE KEY SECURELY - IT CONTROLS YOUR RELAY!"
|
||||
echo "⚠️ This key is also logged in relay.log for reference"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "=== Relay server running in background ==="
|
||||
echo "To kill relay: pkill -f 'c_relay_'"
|
||||
echo "To check status: ps aux | grep c_relay_"
|
||||
|
||||
Submodule nostr_core_lib updated: 33129d82fd...55e2a9c68e
12
relay.log
12
relay.log
@@ -1,12 +0,0 @@
|
||||
[34m[1m=== C Nostr Relay Server ===[0m
|
||||
[32m[SUCCESS][0m Database connection established
|
||||
[32m[SUCCESS][0m Relay information initialized with default values
|
||||
[34m[INFO][0m Starting relay server...
|
||||
[34m[INFO][0m Starting libwebsockets-based Nostr relay server...
|
||||
[32m[SUCCESS][0m WebSocket relay started on ws://127.0.0.1:8888
|
||||
[34m[INFO][0m HTTP request received
|
||||
[34m[INFO][0m Handling NIP-11 relay information request
|
||||
[32m[SUCCESS][0m NIP-11 relay information served successfully
|
||||
[34m[INFO][0m HTTP request received
|
||||
[34m[INFO][0m Handling NIP-11 relay information request
|
||||
[33m[WARNING][0m HTTP request without proper Accept header for NIP-11
|
||||
1002
src/config.c
Normal file
1002
src/config.c
Normal file
File diff suppressed because it is too large
Load Diff
223
src/config.h
Normal file
223
src/config.h
Normal file
@@ -0,0 +1,223 @@
|
||||
#ifndef CONFIG_H
|
||||
#define CONFIG_H
|
||||
|
||||
#include <sqlite3.h>
|
||||
#include <time.h>
|
||||
#include <stddef.h>
|
||||
#include <cjson/cJSON.h>
|
||||
|
||||
// Configuration system constants
|
||||
#define CONFIG_KEY_MAX_LENGTH 64
|
||||
#define CONFIG_VALUE_MAX_LENGTH 512
|
||||
#define CONFIG_DESCRIPTION_MAX_LENGTH 256
|
||||
#define CONFIG_XDG_DIR_NAME "c-relay"
|
||||
#define CONFIG_FILE_NAME "c_relay_config_event.json"
|
||||
#define CONFIG_PRIVKEY_ENV "C_RELAY_CONFIG_PRIVKEY"
|
||||
#define NOSTR_PUBKEY_HEX_LENGTH 64
|
||||
#define NOSTR_PRIVKEY_HEX_LENGTH 64
|
||||
#define NOSTR_EVENT_ID_HEX_LENGTH 64
|
||||
#define NOSTR_SIGNATURE_HEX_LENGTH 128
|
||||
|
||||
// Protocol and implementation constants (hardcoded - should NOT be configurable)
|
||||
#define SUBSCRIPTION_ID_MAX_LENGTH 64
|
||||
#define CLIENT_IP_MAX_LENGTH 64
|
||||
#define RELAY_NAME_MAX_LENGTH 128
|
||||
#define RELAY_DESCRIPTION_MAX_LENGTH 1024
|
||||
#define RELAY_URL_MAX_LENGTH 256
|
||||
#define RELAY_CONTACT_MAX_LENGTH 128
|
||||
#define RELAY_PUBKEY_MAX_LENGTH 65
|
||||
|
||||
// Default configuration values (used as fallbacks if database config fails)
|
||||
#define DEFAULT_DATABASE_PATH "db/c_nostr_relay.db"
|
||||
#define DEFAULT_PORT 8888
|
||||
#define DEFAULT_HOST "127.0.0.1"
|
||||
#define MAX_CLIENTS 100
|
||||
#define MAX_SUBSCRIPTIONS_PER_CLIENT 20
|
||||
#define MAX_TOTAL_SUBSCRIPTIONS 5000
|
||||
#define MAX_FILTERS_PER_SUBSCRIPTION 10
|
||||
|
||||
// Configuration types
|
||||
typedef enum {
|
||||
CONFIG_TYPE_SYSTEM = 0,
|
||||
CONFIG_TYPE_USER = 1,
|
||||
CONFIG_TYPE_RUNTIME = 2
|
||||
} config_type_t;
|
||||
|
||||
// Configuration data types
|
||||
typedef enum {
|
||||
CONFIG_DATA_STRING = 0,
|
||||
CONFIG_DATA_INTEGER = 1,
|
||||
CONFIG_DATA_BOOLEAN = 2,
|
||||
CONFIG_DATA_JSON = 3
|
||||
} config_data_type_t;
|
||||
|
||||
// Configuration validation result
|
||||
typedef enum {
|
||||
CONFIG_VALID = 0,
|
||||
CONFIG_INVALID_TYPE = 1,
|
||||
CONFIG_INVALID_RANGE = 2,
|
||||
CONFIG_INVALID_FORMAT = 3,
|
||||
CONFIG_MISSING_REQUIRED = 4
|
||||
} config_validation_result_t;
|
||||
|
||||
// Configuration entry structure
|
||||
typedef struct {
|
||||
char key[CONFIG_KEY_MAX_LENGTH];
|
||||
char value[CONFIG_VALUE_MAX_LENGTH];
|
||||
char description[CONFIG_DESCRIPTION_MAX_LENGTH];
|
||||
config_type_t config_type;
|
||||
config_data_type_t data_type;
|
||||
int is_sensitive;
|
||||
int requires_restart;
|
||||
time_t created_at;
|
||||
time_t updated_at;
|
||||
} config_entry_t;
|
||||
|
||||
// Configuration manager state
|
||||
typedef struct {
|
||||
sqlite3* db;
|
||||
sqlite3_stmt* get_config_stmt;
|
||||
sqlite3_stmt* set_config_stmt;
|
||||
sqlite3_stmt* log_change_stmt;
|
||||
|
||||
// Configuration loading status
|
||||
int file_config_loaded;
|
||||
int database_config_loaded;
|
||||
time_t last_reload;
|
||||
|
||||
// XDG configuration directory
|
||||
char config_dir_path[512];
|
||||
char config_file_path[600];
|
||||
} config_manager_t;
|
||||
|
||||
// Global configuration manager instance
|
||||
extern config_manager_t g_config_manager;
|
||||
|
||||
// ================================
|
||||
// CORE CONFIGURATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Initialize configuration system
|
||||
int init_configuration_system(void);
|
||||
|
||||
// Cleanup configuration system
|
||||
void cleanup_configuration_system(void);
|
||||
|
||||
// Load configuration from all sources (file -> database -> defaults)
|
||||
int load_configuration(void);
|
||||
|
||||
// Apply loaded configuration to global variables
|
||||
int apply_configuration_to_globals(void);
|
||||
|
||||
// ================================
|
||||
// DATABASE CONFIGURATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Initialize database prepared statements
|
||||
int init_config_database_statements(void);
|
||||
|
||||
// Get configuration value from database
|
||||
int get_database_config(const char* key, char* value, size_t value_size);
|
||||
|
||||
// Set configuration value in database
|
||||
int set_database_config(const char* key, const char* new_value, const char* changed_by);
|
||||
|
||||
// Load all configuration from database
|
||||
int load_config_from_database(void);
|
||||
|
||||
// ================================
|
||||
// FILE CONFIGURATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Get XDG configuration directory path
|
||||
int get_xdg_config_dir(char* path, size_t path_size);
|
||||
|
||||
// Check if configuration file exists
|
||||
int config_file_exists(void);
|
||||
|
||||
// Load configuration from file
|
||||
int load_config_from_file(void);
|
||||
|
||||
// Validate and apply Nostr configuration event
|
||||
int validate_and_apply_config_event(const cJSON* event);
|
||||
|
||||
// Validate Nostr event structure
|
||||
int validate_nostr_event_structure(const cJSON* event);
|
||||
|
||||
// Validate configuration tags array
|
||||
int validate_config_tags(const cJSON* tags);
|
||||
|
||||
// Extract and apply configuration tags to database
|
||||
int extract_and_apply_config_tags(const cJSON* tags);
|
||||
|
||||
// ================================
|
||||
// CONFIGURATION ACCESS FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Get configuration value (checks all sources: file -> database -> environment -> defaults)
|
||||
const char* get_config_value(const char* key);
|
||||
|
||||
// Get configuration value as integer
|
||||
int get_config_int(const char* key, int default_value);
|
||||
|
||||
// Get configuration value as boolean
|
||||
int get_config_bool(const char* key, int default_value);
|
||||
|
||||
// Set configuration value (updates database)
|
||||
int set_config_value(const char* key, const char* value);
|
||||
|
||||
// ================================
|
||||
// CONFIGURATION VALIDATION
|
||||
// ================================
|
||||
|
||||
// Validate configuration value
|
||||
config_validation_result_t validate_config_value(const char* key, const char* value);
|
||||
|
||||
// Log validation error
|
||||
void log_config_validation_error(const char* key, const char* value, const char* error);
|
||||
|
||||
// ================================
|
||||
// UTILITY FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Convert config type enum to string
|
||||
const char* config_type_to_string(config_type_t type);
|
||||
|
||||
// Convert config data type enum to string
|
||||
const char* config_data_type_to_string(config_data_type_t type);
|
||||
|
||||
// Convert string to config type enum
|
||||
config_type_t string_to_config_type(const char* str);
|
||||
|
||||
// Convert string to config data type enum
|
||||
config_data_type_t string_to_config_data_type(const char* str);
|
||||
|
||||
// Check if configuration key requires restart
|
||||
int config_requires_restart(const char* key);
|
||||
|
||||
// ================================
|
||||
// NOSTR EVENT GENERATION FUNCTIONS
|
||||
// ================================
|
||||
|
||||
// Generate configuration file with valid Nostr event if it doesn't exist
|
||||
int generate_config_file_if_missing(void);
|
||||
|
||||
// Create a valid Nostr configuration event from database values
|
||||
cJSON* create_config_nostr_event(const char* privkey_hex);
|
||||
|
||||
// Generate a random private key (32 bytes as hex string)
|
||||
int generate_random_privkey(char* privkey_hex, size_t buffer_size);
|
||||
|
||||
// Derive public key from private key (using secp256k1)
|
||||
int derive_pubkey_from_privkey(const char* privkey_hex, char* pubkey_hex, size_t buffer_size);
|
||||
|
||||
// Create Nostr event ID (SHA256 of serialized event data)
|
||||
int create_nostr_event_id(const cJSON* event, char* event_id_hex, size_t buffer_size);
|
||||
|
||||
// Sign Nostr event (using secp256k1 Schnorr signature)
|
||||
int sign_nostr_event(const cJSON* event, const char* privkey_hex, char* signature_hex, size_t buffer_size);
|
||||
|
||||
// Write configuration event to file
|
||||
int write_config_event_to_file(const cJSON* event);
|
||||
|
||||
#endif // CONFIG_H
|
||||
585
src/main.c
585
src/main.c
@@ -14,26 +14,9 @@
|
||||
// Include nostr_core_lib for Nostr functionality
|
||||
#include "../nostr_core_lib/cjson/cJSON.h"
|
||||
#include "../nostr_core_lib/nostr_core/nostr_core.h"
|
||||
|
||||
// Server Configuration
|
||||
#define DEFAULT_PORT 8888
|
||||
#define DEFAULT_HOST "127.0.0.1"
|
||||
#define DATABASE_PATH "db/c_nostr_relay.db"
|
||||
#define MAX_CLIENTS 100
|
||||
|
||||
// Persistent subscription system configuration
|
||||
#define MAX_SUBSCRIPTIONS_PER_CLIENT 20
|
||||
#define MAX_TOTAL_SUBSCRIPTIONS 5000
|
||||
#define MAX_FILTERS_PER_SUBSCRIPTION 10
|
||||
#define SUBSCRIPTION_ID_MAX_LENGTH 64
|
||||
#define CLIENT_IP_MAX_LENGTH 64
|
||||
|
||||
// NIP-11 relay information configuration
|
||||
#define RELAY_NAME_MAX_LENGTH 128
|
||||
#define RELAY_DESCRIPTION_MAX_LENGTH 1024
|
||||
#define RELAY_URL_MAX_LENGTH 256
|
||||
#define RELAY_CONTACT_MAX_LENGTH 128
|
||||
#define RELAY_PUBKEY_MAX_LENGTH 65 // 64 hex chars + null terminator
|
||||
#include "../nostr_core_lib/nostr_core/nip013.h" // NIP-13: Proof of Work
|
||||
#include "config.h" // Configuration management system
|
||||
#include "sql_schema.h" // Embedded database schema
|
||||
|
||||
// Color constants for logging
|
||||
#define RED "\033[31m"
|
||||
@@ -44,7 +27,7 @@
|
||||
#define RESET "\033[0m"
|
||||
|
||||
// Global state
|
||||
static sqlite3* g_db = NULL;
|
||||
sqlite3* g_db = NULL; // Non-static so config.c can access it
|
||||
static int g_server_running = 1;
|
||||
static struct lws_context *ws_context = NULL;
|
||||
|
||||
@@ -74,6 +57,46 @@ struct relay_info {
|
||||
// Global relay information instance
|
||||
static struct relay_info g_relay_info = {0};
|
||||
|
||||
// NIP-13 PoW configuration structure
|
||||
struct pow_config {
|
||||
int enabled; // 0 = disabled, 1 = enabled
|
||||
int min_pow_difficulty; // Minimum required difficulty (0 = no requirement)
|
||||
int validation_flags; // Bitflags for validation options
|
||||
int require_nonce_tag; // 1 = require nonce tag presence
|
||||
int reject_lower_targets; // 1 = reject if committed < actual difficulty
|
||||
int strict_format; // 1 = enforce strict nonce tag format
|
||||
int anti_spam_mode; // 1 = full anti-spam validation
|
||||
};
|
||||
|
||||
// Global PoW configuration instance
|
||||
static struct pow_config g_pow_config = {
|
||||
.enabled = 1, // Enable PoW validation by default
|
||||
.min_pow_difficulty = 0, // No minimum difficulty by default
|
||||
.validation_flags = NOSTR_POW_VALIDATE_BASIC,
|
||||
.require_nonce_tag = 0, // Don't require nonce tags by default
|
||||
.reject_lower_targets = 0, // Allow lower committed targets by default
|
||||
.strict_format = 0, // Relaxed format validation by default
|
||||
.anti_spam_mode = 0 // Basic validation by default
|
||||
};
|
||||
|
||||
// NIP-40 Expiration configuration structure
|
||||
struct expiration_config {
|
||||
int enabled; // 0 = disabled, 1 = enabled
|
||||
int strict_mode; // 1 = reject expired events on submission
|
||||
int filter_responses; // 1 = filter expired events from responses
|
||||
int delete_expired; // 1 = delete expired events from DB (future feature)
|
||||
long grace_period; // Grace period in seconds for clock skew
|
||||
};
|
||||
|
||||
// Global expiration configuration instance
|
||||
static struct expiration_config g_expiration_config = {
|
||||
.enabled = 1, // Enable expiration handling by default
|
||||
.strict_mode = 1, // Reject expired events on submission by default
|
||||
.filter_responses = 1, // Filter expired events from responses by default
|
||||
.delete_expired = 0, // Don't delete by default (keep for audit)
|
||||
.grace_period = 300 // 5 minutes grace period for clock skew
|
||||
};
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -147,8 +170,8 @@ static subscription_manager_t g_subscription_manager = {
|
||||
.active_subscriptions = NULL,
|
||||
.subscriptions_lock = PTHREAD_MUTEX_INITIALIZER,
|
||||
.total_subscriptions = 0,
|
||||
.max_subscriptions_per_client = MAX_SUBSCRIPTIONS_PER_CLIENT,
|
||||
.max_total_subscriptions = MAX_TOTAL_SUBSCRIPTIONS,
|
||||
.max_subscriptions_per_client = MAX_SUBSCRIPTIONS_PER_CLIENT, // Will be updated from config
|
||||
.max_total_subscriptions = MAX_TOTAL_SUBSCRIPTIONS, // Will be updated from config
|
||||
.total_created = 0,
|
||||
.total_events_broadcast = 0
|
||||
};
|
||||
@@ -159,6 +182,9 @@ void log_success(const char* message);
|
||||
void log_error(const char* message);
|
||||
void log_warning(const char* message);
|
||||
|
||||
// Forward declaration for subscription manager configuration
|
||||
void update_subscription_manager_config(void);
|
||||
|
||||
// Forward declarations for subscription database logging
|
||||
void log_subscription_created(const subscription_t* sub);
|
||||
void log_subscription_closed(const char* sub_id, const char* client_ip, const char* reason);
|
||||
@@ -190,6 +216,16 @@ void cleanup_relay_info();
|
||||
cJSON* generate_relay_info_json();
|
||||
int handle_nip11_http_request(struct lws* wsi, const char* accept_header);
|
||||
|
||||
// Forward declarations for NIP-13 PoW validation
|
||||
void init_pow_config();
|
||||
int validate_event_pow(cJSON* event, char* error_message, size_t error_size);
|
||||
|
||||
// Forward declarations for NIP-40 expiration handling
|
||||
void init_expiration_config();
|
||||
long extract_expiration_timestamp(cJSON* tags);
|
||||
int is_event_expired(cJSON* event, time_t current_time);
|
||||
int validate_event_expiration(cJSON* event, char* error_message, size_t error_size);
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -624,6 +660,19 @@ int broadcast_event_to_subscriptions(cJSON* event) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Check if event is expired and should not be broadcast (NIP-40)
|
||||
if (g_expiration_config.enabled && g_expiration_config.filter_responses) {
|
||||
time_t current_time = time(NULL);
|
||||
if (is_event_expired(event, current_time)) {
|
||||
char debug_msg[256];
|
||||
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
|
||||
const char* event_id = event_id_obj ? cJSON_GetStringValue(event_id_obj) : "unknown";
|
||||
snprintf(debug_msg, sizeof(debug_msg), "Skipping broadcast of expired event: %.16s", event_id);
|
||||
log_info(debug_msg);
|
||||
return 0; // Don't broadcast expired events
|
||||
}
|
||||
}
|
||||
|
||||
int broadcasts = 0;
|
||||
|
||||
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
|
||||
@@ -891,6 +940,19 @@ void log_warning(const char* message) {
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
// Update subscription manager configuration from config system
|
||||
void update_subscription_manager_config(void) {
|
||||
g_subscription_manager.max_subscriptions_per_client = get_config_int("max_subscriptions_per_client", MAX_SUBSCRIPTIONS_PER_CLIENT);
|
||||
g_subscription_manager.max_total_subscriptions = get_config_int("max_total_subscriptions", MAX_TOTAL_SUBSCRIPTIONS);
|
||||
|
||||
char config_msg[256];
|
||||
snprintf(config_msg, sizeof(config_msg),
|
||||
"Subscription limits: max_per_client=%d, max_total=%d",
|
||||
g_subscription_manager.max_subscriptions_per_client,
|
||||
g_subscription_manager.max_total_subscriptions);
|
||||
log_info(config_msg);
|
||||
}
|
||||
|
||||
// Signal handler for graceful shutdown
|
||||
void signal_handler(int sig) {
|
||||
if (sig == SIGINT || sig == SIGTERM) {
|
||||
@@ -1224,13 +1286,47 @@ int mark_event_as_deleted(const char* event_id, const char* deletion_event_id, c
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Initialize relay information with default values
|
||||
// Initialize relay information using configuration system
|
||||
void init_relay_info() {
|
||||
// Set default relay information
|
||||
strncpy(g_relay_info.name, "C Nostr Relay", sizeof(g_relay_info.name) - 1);
|
||||
strncpy(g_relay_info.description, "A high-performance Nostr relay implemented in C with SQLite storage", sizeof(g_relay_info.description) - 1);
|
||||
strncpy(g_relay_info.software, "https://github.com/teknari/c-relay", sizeof(g_relay_info.software) - 1);
|
||||
strncpy(g_relay_info.version, "0.1.0", sizeof(g_relay_info.version) - 1);
|
||||
// Load relay information from configuration system
|
||||
const char* relay_name = get_config_value("relay_name");
|
||||
if (relay_name) {
|
||||
strncpy(g_relay_info.name, relay_name, sizeof(g_relay_info.name) - 1);
|
||||
} else {
|
||||
strncpy(g_relay_info.name, "C Nostr Relay", sizeof(g_relay_info.name) - 1);
|
||||
}
|
||||
|
||||
const char* relay_description = get_config_value("relay_description");
|
||||
if (relay_description) {
|
||||
strncpy(g_relay_info.description, relay_description, sizeof(g_relay_info.description) - 1);
|
||||
} else {
|
||||
strncpy(g_relay_info.description, "A high-performance Nostr relay implemented in C with SQLite storage", sizeof(g_relay_info.description) - 1);
|
||||
}
|
||||
|
||||
const char* relay_software = get_config_value("relay_software");
|
||||
if (relay_software) {
|
||||
strncpy(g_relay_info.software, relay_software, sizeof(g_relay_info.software) - 1);
|
||||
} else {
|
||||
strncpy(g_relay_info.software, "https://git.laantungir.net/laantungir/c-relay.git", sizeof(g_relay_info.software) - 1);
|
||||
}
|
||||
|
||||
const char* relay_version = get_config_value("relay_version");
|
||||
if (relay_version) {
|
||||
strncpy(g_relay_info.version, relay_version, sizeof(g_relay_info.version) - 1);
|
||||
} else {
|
||||
strncpy(g_relay_info.version, "0.2.0", sizeof(g_relay_info.version) - 1);
|
||||
}
|
||||
|
||||
// Load optional fields
|
||||
const char* relay_contact = get_config_value("relay_contact");
|
||||
if (relay_contact) {
|
||||
strncpy(g_relay_info.contact, relay_contact, sizeof(g_relay_info.contact) - 1);
|
||||
}
|
||||
|
||||
const char* relay_pubkey = get_config_value("relay_pubkey");
|
||||
if (relay_pubkey) {
|
||||
strncpy(g_relay_info.pubkey, relay_pubkey, sizeof(g_relay_info.pubkey) - 1);
|
||||
}
|
||||
|
||||
// Initialize supported NIPs array
|
||||
g_relay_info.supported_nips = cJSON_CreateArray();
|
||||
@@ -1238,26 +1334,28 @@ void init_relay_info() {
|
||||
cJSON_AddItemToArray(g_relay_info.supported_nips, cJSON_CreateNumber(1)); // NIP-01: Basic protocol
|
||||
cJSON_AddItemToArray(g_relay_info.supported_nips, cJSON_CreateNumber(9)); // NIP-09: Event deletion
|
||||
cJSON_AddItemToArray(g_relay_info.supported_nips, cJSON_CreateNumber(11)); // NIP-11: Relay information
|
||||
cJSON_AddItemToArray(g_relay_info.supported_nips, cJSON_CreateNumber(13)); // NIP-13: Proof of Work
|
||||
cJSON_AddItemToArray(g_relay_info.supported_nips, cJSON_CreateNumber(15)); // NIP-15: EOSE
|
||||
cJSON_AddItemToArray(g_relay_info.supported_nips, cJSON_CreateNumber(20)); // NIP-20: Command results
|
||||
cJSON_AddItemToArray(g_relay_info.supported_nips, cJSON_CreateNumber(40)); // NIP-40: Expiration Timestamp
|
||||
}
|
||||
|
||||
// Initialize server limitations
|
||||
// Initialize server limitations using configuration
|
||||
g_relay_info.limitation = cJSON_CreateObject();
|
||||
if (g_relay_info.limitation) {
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_message_length", 16384);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_subscriptions", MAX_SUBSCRIPTIONS_PER_CLIENT);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_limit", 5000);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_message_length", get_config_int("max_message_length", 16384));
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_subscriptions", get_config_int("max_subscriptions_per_client", 20));
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_limit", get_config_int("max_limit", 5000));
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_subid_length", SUBSCRIPTION_ID_MAX_LENGTH);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_event_tags", 100);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_content_length", 8196);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "min_pow_difficulty", 0);
|
||||
cJSON_AddBoolToObject(g_relay_info.limitation, "auth_required", cJSON_False);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_event_tags", get_config_int("max_event_tags", 100));
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "max_content_length", get_config_int("max_content_length", 8196));
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "min_pow_difficulty", g_pow_config.min_pow_difficulty);
|
||||
cJSON_AddBoolToObject(g_relay_info.limitation, "auth_required", get_config_bool("admin_enabled", 0) ? cJSON_True : cJSON_False);
|
||||
cJSON_AddBoolToObject(g_relay_info.limitation, "payment_required", cJSON_False);
|
||||
cJSON_AddBoolToObject(g_relay_info.limitation, "restricted_writes", cJSON_False);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "created_at_lower_limit", 0);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "created_at_upper_limit", 2147483647);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "default_limit", 500);
|
||||
cJSON_AddNumberToObject(g_relay_info.limitation, "default_limit", get_config_int("default_limit", 500));
|
||||
}
|
||||
|
||||
// Initialize empty retention policies (can be configured later)
|
||||
@@ -1564,21 +1662,358 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// NIP-13 PROOF OF WORK VALIDATION
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Initialize PoW configuration using configuration system
|
||||
void init_pow_config() {
|
||||
log_info("Initializing NIP-13 Proof of Work configuration");
|
||||
|
||||
// Load PoW settings from configuration system
|
||||
g_pow_config.enabled = get_config_bool("pow_enabled", 1);
|
||||
g_pow_config.min_pow_difficulty = get_config_int("pow_min_difficulty", 0);
|
||||
|
||||
// Get PoW mode from configuration
|
||||
const char* pow_mode = get_config_value("pow_mode");
|
||||
if (pow_mode) {
|
||||
if (strcmp(pow_mode, "strict") == 0) {
|
||||
g_pow_config.validation_flags = NOSTR_POW_VALIDATE_ANTI_SPAM | NOSTR_POW_STRICT_FORMAT;
|
||||
g_pow_config.require_nonce_tag = 1;
|
||||
g_pow_config.reject_lower_targets = 1;
|
||||
g_pow_config.strict_format = 1;
|
||||
g_pow_config.anti_spam_mode = 1;
|
||||
log_info("PoW configured in strict anti-spam mode");
|
||||
} else if (strcmp(pow_mode, "full") == 0) {
|
||||
g_pow_config.validation_flags = NOSTR_POW_VALIDATE_FULL;
|
||||
g_pow_config.require_nonce_tag = 1;
|
||||
log_info("PoW configured in full validation mode");
|
||||
} else if (strcmp(pow_mode, "basic") == 0) {
|
||||
g_pow_config.validation_flags = NOSTR_POW_VALIDATE_BASIC;
|
||||
log_info("PoW configured in basic validation mode");
|
||||
} else if (strcmp(pow_mode, "disabled") == 0) {
|
||||
g_pow_config.enabled = 0;
|
||||
log_info("PoW validation disabled via configuration");
|
||||
}
|
||||
} else {
|
||||
// Default to basic mode
|
||||
g_pow_config.validation_flags = NOSTR_POW_VALIDATE_BASIC;
|
||||
log_info("PoW configured in basic validation mode (default)");
|
||||
}
|
||||
|
||||
// Log final configuration
|
||||
char config_msg[512];
|
||||
snprintf(config_msg, sizeof(config_msg),
|
||||
"PoW Configuration: enabled=%s, min_difficulty=%d, validation_flags=0x%x, mode=%s",
|
||||
g_pow_config.enabled ? "true" : "false",
|
||||
g_pow_config.min_pow_difficulty,
|
||||
g_pow_config.validation_flags,
|
||||
g_pow_config.anti_spam_mode ? "anti-spam" :
|
||||
(g_pow_config.validation_flags & NOSTR_POW_VALIDATE_FULL) ? "full" : "basic");
|
||||
log_info(config_msg);
|
||||
}
|
||||
|
||||
// Validate event Proof of Work according to NIP-13
|
||||
int validate_event_pow(cJSON* event, char* error_message, size_t error_size) {
|
||||
if (!g_pow_config.enabled) {
|
||||
return 0; // PoW validation disabled
|
||||
}
|
||||
|
||||
if (!event) {
|
||||
snprintf(error_message, error_size, "pow: null event");
|
||||
return NOSTR_ERROR_INVALID_INPUT;
|
||||
}
|
||||
|
||||
// If min_pow_difficulty is 0, only validate events that have nonce tags
|
||||
// This allows events without PoW when difficulty requirement is 0
|
||||
if (g_pow_config.min_pow_difficulty == 0) {
|
||||
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||
int has_nonce_tag = 0;
|
||||
|
||||
if (tags && cJSON_IsArray(tags)) {
|
||||
cJSON* tag = NULL;
|
||||
cJSON_ArrayForEach(tag, tags) {
|
||||
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
|
||||
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
|
||||
if (cJSON_IsString(tag_name)) {
|
||||
const char* name = cJSON_GetStringValue(tag_name);
|
||||
if (name && strcmp(name, "nonce") == 0) {
|
||||
has_nonce_tag = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no minimum difficulty required and no nonce tag, skip PoW validation
|
||||
if (!has_nonce_tag) {
|
||||
return 0; // Accept event without PoW when min_difficulty=0
|
||||
}
|
||||
}
|
||||
|
||||
// Perform PoW validation using nostr_core_lib
|
||||
nostr_pow_result_t pow_result;
|
||||
int validation_result = nostr_validate_pow(event, g_pow_config.min_pow_difficulty,
|
||||
g_pow_config.validation_flags, &pow_result);
|
||||
|
||||
if (validation_result != NOSTR_SUCCESS) {
|
||||
// Handle specific error cases with appropriate messages
|
||||
switch (validation_result) {
|
||||
case NOSTR_ERROR_NIP13_INSUFFICIENT:
|
||||
snprintf(error_message, error_size,
|
||||
"pow: insufficient difficulty: %d < %d",
|
||||
pow_result.actual_difficulty, g_pow_config.min_pow_difficulty);
|
||||
log_warning("Event rejected: insufficient PoW difficulty");
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_NO_NONCE_TAG:
|
||||
// This should not happen with min_difficulty=0 after our check above
|
||||
if (g_pow_config.min_pow_difficulty > 0) {
|
||||
snprintf(error_message, error_size, "pow: missing required nonce tag");
|
||||
log_warning("Event rejected: missing nonce tag");
|
||||
} else {
|
||||
return 0; // Allow when min_difficulty=0
|
||||
}
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_INVALID_NONCE_TAG:
|
||||
snprintf(error_message, error_size, "pow: invalid nonce tag format");
|
||||
log_warning("Event rejected: invalid nonce tag format");
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_TARGET_MISMATCH:
|
||||
snprintf(error_message, error_size,
|
||||
"pow: committed target (%d) lower than minimum (%d)",
|
||||
pow_result.committed_target, g_pow_config.min_pow_difficulty);
|
||||
log_warning("Event rejected: committed target too low (anti-spam protection)");
|
||||
break;
|
||||
case NOSTR_ERROR_NIP13_CALCULATION:
|
||||
snprintf(error_message, error_size, "pow: difficulty calculation failed");
|
||||
log_error("PoW difficulty calculation error");
|
||||
break;
|
||||
case NOSTR_ERROR_EVENT_INVALID_ID:
|
||||
snprintf(error_message, error_size, "pow: invalid event ID format");
|
||||
log_warning("Event rejected: invalid event ID for PoW calculation");
|
||||
break;
|
||||
default:
|
||||
snprintf(error_message, error_size, "pow: validation failed - %s",
|
||||
strlen(pow_result.error_detail) > 0 ? pow_result.error_detail : "unknown error");
|
||||
log_warning("Event rejected: PoW validation failed");
|
||||
}
|
||||
return validation_result;
|
||||
}
|
||||
|
||||
// Log successful PoW validation (only if minimum difficulty is required)
|
||||
if (g_pow_config.min_pow_difficulty > 0 || pow_result.has_nonce_tag) {
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg),
|
||||
"PoW validated: difficulty=%d, target=%d, nonce=%llu%s",
|
||||
pow_result.actual_difficulty,
|
||||
pow_result.committed_target,
|
||||
(unsigned long long)pow_result.nonce_value,
|
||||
pow_result.has_nonce_tag ? "" : " (no nonce tag)");
|
||||
log_info(debug_msg);
|
||||
}
|
||||
|
||||
return 0; // Success
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// NIP-40 EXPIRATION TIMESTAMP HANDLING
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Initialize expiration configuration using configuration system
|
||||
void init_expiration_config() {
|
||||
log_info("Initializing NIP-40 Expiration Timestamp configuration");
|
||||
|
||||
// Load expiration settings from configuration system
|
||||
g_expiration_config.enabled = get_config_bool("expiration_enabled", 1);
|
||||
g_expiration_config.strict_mode = get_config_bool("expiration_strict", 1);
|
||||
g_expiration_config.filter_responses = get_config_bool("expiration_filter", 1);
|
||||
g_expiration_config.delete_expired = get_config_bool("expiration_delete", 0);
|
||||
g_expiration_config.grace_period = get_config_int("expiration_grace_period", 300);
|
||||
|
||||
// Validate grace period bounds
|
||||
if (g_expiration_config.grace_period < 0 || g_expiration_config.grace_period > 86400) {
|
||||
log_warning("Invalid grace period, using default of 300 seconds");
|
||||
g_expiration_config.grace_period = 300;
|
||||
}
|
||||
|
||||
// Log final configuration
|
||||
char config_msg[512];
|
||||
snprintf(config_msg, sizeof(config_msg),
|
||||
"Expiration Configuration: enabled=%s, strict_mode=%s, filter_responses=%s, grace_period=%ld seconds",
|
||||
g_expiration_config.enabled ? "true" : "false",
|
||||
g_expiration_config.strict_mode ? "true" : "false",
|
||||
g_expiration_config.filter_responses ? "true" : "false",
|
||||
g_expiration_config.grace_period);
|
||||
log_info(config_msg);
|
||||
}
|
||||
|
||||
// Extract expiration timestamp from event tags
|
||||
long extract_expiration_timestamp(cJSON* tags) {
|
||||
if (!tags || !cJSON_IsArray(tags)) {
|
||||
return 0; // No expiration
|
||||
}
|
||||
|
||||
cJSON* tag = NULL;
|
||||
cJSON_ArrayForEach(tag, tags) {
|
||||
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
|
||||
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
|
||||
cJSON* tag_value = cJSON_GetArrayItem(tag, 1);
|
||||
|
||||
if (cJSON_IsString(tag_name) && cJSON_IsString(tag_value)) {
|
||||
const char* name = cJSON_GetStringValue(tag_name);
|
||||
const char* value = cJSON_GetStringValue(tag_value);
|
||||
|
||||
if (name && value && strcmp(name, "expiration") == 0) {
|
||||
long expiration_ts = atol(value);
|
||||
if (expiration_ts > 0) {
|
||||
return expiration_ts;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0; // No valid expiration tag found
|
||||
}
|
||||
|
||||
// Check if event is currently expired
|
||||
int is_event_expired(cJSON* event, time_t current_time) {
|
||||
if (!event) {
|
||||
return 0; // Invalid event, not expired
|
||||
}
|
||||
|
||||
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||
long expiration_ts = extract_expiration_timestamp(tags);
|
||||
|
||||
if (expiration_ts == 0) {
|
||||
return 0; // No expiration timestamp, not expired
|
||||
}
|
||||
|
||||
// Check if current time exceeds expiration + grace period
|
||||
return (current_time > (expiration_ts + g_expiration_config.grace_period));
|
||||
}
|
||||
|
||||
// Validate event expiration according to NIP-40
|
||||
int validate_event_expiration(cJSON* event, char* error_message, size_t error_size) {
|
||||
if (!g_expiration_config.enabled) {
|
||||
return 0; // Expiration validation disabled
|
||||
}
|
||||
|
||||
if (!event) {
|
||||
snprintf(error_message, error_size, "expiration: null event");
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Check if event is expired
|
||||
time_t current_time = time(NULL);
|
||||
if (is_event_expired(event, current_time)) {
|
||||
if (g_expiration_config.strict_mode) {
|
||||
cJSON* tags = cJSON_GetObjectItem(event, "tags");
|
||||
long expiration_ts = extract_expiration_timestamp(tags);
|
||||
|
||||
snprintf(error_message, error_size,
|
||||
"invalid: event expired (expiration=%ld, current=%ld, grace=%ld)",
|
||||
expiration_ts, (long)current_time, g_expiration_config.grace_period);
|
||||
log_warning("Event rejected: expired timestamp");
|
||||
return -1;
|
||||
} else {
|
||||
// In non-strict mode, log but allow expired events
|
||||
char debug_msg[256];
|
||||
snprintf(debug_msg, sizeof(debug_msg),
|
||||
"Accepting expired event (strict_mode disabled)");
|
||||
log_info(debug_msg);
|
||||
}
|
||||
}
|
||||
|
||||
return 0; // Success
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// DATABASE FUNCTIONS
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Initialize database connection
|
||||
// Initialize database connection and schema
|
||||
int init_database() {
|
||||
int rc = sqlite3_open(DATABASE_PATH, &g_db);
|
||||
// Use configurable database path, falling back to default
|
||||
const char* db_path = get_config_value("database_path");
|
||||
if (!db_path) {
|
||||
db_path = DEFAULT_DATABASE_PATH;
|
||||
}
|
||||
|
||||
int rc = sqlite3_open(db_path, &g_db);
|
||||
if (rc != SQLITE_OK) {
|
||||
log_error("Cannot open database");
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_success("Database connection established");
|
||||
char success_msg[256];
|
||||
snprintf(success_msg, sizeof(success_msg), "Database connection established: %s", db_path);
|
||||
log_success(success_msg);
|
||||
|
||||
// Check if database is already initialized by looking for the events table
|
||||
const char* check_sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='events'";
|
||||
sqlite3_stmt* check_stmt;
|
||||
rc = sqlite3_prepare_v2(g_db, check_sql, -1, &check_stmt, NULL);
|
||||
if (rc == SQLITE_OK) {
|
||||
int has_events_table = (sqlite3_step(check_stmt) == SQLITE_ROW);
|
||||
sqlite3_finalize(check_stmt);
|
||||
|
||||
if (has_events_table) {
|
||||
log_info("Database schema already exists, skipping initialization");
|
||||
|
||||
// Log existing schema version if available
|
||||
const char* version_sql = "SELECT value FROM schema_info WHERE key = 'version'";
|
||||
sqlite3_stmt* version_stmt;
|
||||
if (sqlite3_prepare_v2(g_db, version_sql, -1, &version_stmt, NULL) == SQLITE_OK) {
|
||||
if (sqlite3_step(version_stmt) == SQLITE_ROW) {
|
||||
const char* db_version = (char*)sqlite3_column_text(version_stmt, 0);
|
||||
char version_msg[256];
|
||||
snprintf(version_msg, sizeof(version_msg), "Existing database schema version: %s",
|
||||
db_version ? db_version : "unknown");
|
||||
log_info(version_msg);
|
||||
} else {
|
||||
log_info("Database exists but no version information found");
|
||||
}
|
||||
sqlite3_finalize(version_stmt);
|
||||
}
|
||||
} else {
|
||||
// Initialize database schema using embedded SQL
|
||||
log_info("Initializing database schema from embedded SQL");
|
||||
|
||||
// Execute the embedded schema SQL
|
||||
char* error_msg = NULL;
|
||||
rc = sqlite3_exec(g_db, EMBEDDED_SCHEMA_SQL, NULL, NULL, &error_msg);
|
||||
if (rc != SQLITE_OK) {
|
||||
char error_log[512];
|
||||
snprintf(error_log, sizeof(error_log), "Failed to initialize database schema: %s",
|
||||
error_msg ? error_msg : "unknown error");
|
||||
log_error(error_log);
|
||||
if (error_msg) {
|
||||
sqlite3_free(error_msg);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_success("Database schema initialized successfully");
|
||||
|
||||
// Log schema version information
|
||||
char version_msg[256];
|
||||
snprintf(version_msg, sizeof(version_msg), "Database schema version: %s",
|
||||
EMBEDDED_SCHEMA_VERSION);
|
||||
log_info(version_msg);
|
||||
}
|
||||
} else {
|
||||
log_error("Failed to check existing database schema");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1889,7 +2324,7 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
}
|
||||
|
||||
// Check session subscription limits
|
||||
if (pss && pss->subscription_count >= MAX_SUBSCRIPTIONS_PER_CLIENT) {
|
||||
if (pss && pss->subscription_count >= g_subscription_manager.max_subscriptions_per_client) {
|
||||
log_error("Maximum subscriptions per client exceeded");
|
||||
|
||||
// Send CLOSED notice
|
||||
@@ -1972,6 +2407,9 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
char* sql_ptr = sql + strlen(sql);
|
||||
int remaining = sizeof(sql) - strlen(sql);
|
||||
|
||||
// Note: Expiration filtering will be done at application level
|
||||
// after retrieving events to ensure compatibility with all SQLite versions
|
||||
|
||||
// Handle kinds filter
|
||||
cJSON* kinds = cJSON_GetObjectItem(filter, "kinds");
|
||||
if (kinds && cJSON_IsArray(kinds)) {
|
||||
@@ -2100,6 +2538,16 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
}
|
||||
cJSON_AddItemToObject(event, "tags", tags);
|
||||
|
||||
// Check expiration filtering (NIP-40) at application level
|
||||
if (g_expiration_config.enabled && g_expiration_config.filter_responses) {
|
||||
time_t current_time = time(NULL);
|
||||
if (is_event_expired(event, current_time)) {
|
||||
// Skip this expired event
|
||||
cJSON_Delete(event);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Send EVENT message
|
||||
cJSON* event_msg = cJSON_CreateArray();
|
||||
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
|
||||
@@ -2189,14 +2637,26 @@ int handle_event_message(cJSON* event, char* error_message, size_t error_size) {
|
||||
return signature_result;
|
||||
}
|
||||
|
||||
// Step 3: Complete event validation (combines structure + signature + additional checks)
|
||||
// Step 3: Validate Proof of Work (NIP-13) if enabled
|
||||
int pow_result = validate_event_pow(event, error_message, error_size);
|
||||
if (pow_result != 0) {
|
||||
return pow_result; // PoW validation failed, error message already set
|
||||
}
|
||||
|
||||
// Step 4: Validate expiration timestamp (NIP-40) if enabled
|
||||
int expiration_result = validate_event_expiration(event, error_message, error_size);
|
||||
if (expiration_result != 0) {
|
||||
return expiration_result; // Expiration validation failed, error message already set
|
||||
}
|
||||
|
||||
// Step 5: Complete event validation (combines structure + signature + additional checks)
|
||||
int validation_result = nostr_validate_event(event);
|
||||
if (validation_result != NOSTR_SUCCESS) {
|
||||
snprintf(error_message, error_size, "invalid: complete event validation failed");
|
||||
return validation_result;
|
||||
}
|
||||
|
||||
// Step 4: Check for special event types and handle accordingly
|
||||
// Step 6: Check for special event types and handle accordingly
|
||||
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
|
||||
cJSON* pubkey_obj = cJSON_GetObjectItem(event, "pubkey");
|
||||
cJSON* created_at_obj = cJSON_GetObjectItem(event, "created_at");
|
||||
@@ -2236,7 +2696,7 @@ int handle_event_message(cJSON* event, char* error_message, size_t error_size) {
|
||||
}
|
||||
}
|
||||
|
||||
// Step 5: Store event in database
|
||||
// Step 7: Store event in database
|
||||
if (store_event(event) == 0) {
|
||||
error_message[0] = '\0'; // Success - empty error message
|
||||
log_success("Event validated and stored successfully");
|
||||
@@ -2488,7 +2948,7 @@ int start_websocket_relay() {
|
||||
log_info("Starting libwebsockets-based Nostr relay server...");
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.port = DEFAULT_PORT;
|
||||
info.port = get_config_int("relay_port", DEFAULT_PORT);
|
||||
info.protocols = protocols;
|
||||
info.gid = -1;
|
||||
info.uid = -1;
|
||||
@@ -2514,7 +2974,9 @@ int start_websocket_relay() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
log_success("WebSocket relay started on ws://127.0.0.1:8888");
|
||||
char startup_msg[256];
|
||||
snprintf(startup_msg, sizeof(startup_msg), "WebSocket relay started on ws://127.0.0.1:%d", info.port);
|
||||
log_success(startup_msg);
|
||||
|
||||
// Main event loop with proper signal handling
|
||||
while (g_server_running) {
|
||||
@@ -2570,6 +3032,12 @@ int main(int argc, char* argv[]) {
|
||||
log_error("Invalid port number");
|
||||
return 1;
|
||||
}
|
||||
// Store port in configuration system
|
||||
char port_str[16];
|
||||
snprintf(port_str, sizeof(port_str), "%d", port);
|
||||
set_database_config("relay_port", port_str, "command_line");
|
||||
// Re-apply configuration to make sure global variables are updated
|
||||
apply_configuration_to_globals();
|
||||
} else {
|
||||
log_error("Port argument requires a value");
|
||||
return 1;
|
||||
@@ -2587,22 +3055,40 @@ int main(int argc, char* argv[]) {
|
||||
|
||||
printf(BLUE BOLD "=== C Nostr Relay Server ===" RESET "\n");
|
||||
|
||||
// Initialize database
|
||||
// Initialize database FIRST (required for configuration system)
|
||||
if (init_database() != 0) {
|
||||
log_error("Failed to initialize database");
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize nostr library
|
||||
// Initialize nostr library BEFORE configuration system
|
||||
// (required for Nostr event generation in config files)
|
||||
if (nostr_init() != 0) {
|
||||
log_error("Failed to initialize nostr library");
|
||||
close_database();
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize configuration system (loads file + database + applies to globals)
|
||||
if (init_configuration_system() != 0) {
|
||||
log_error("Failed to initialize configuration system");
|
||||
nostr_cleanup();
|
||||
close_database();
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize NIP-11 relay information
|
||||
init_relay_info();
|
||||
|
||||
// Initialize NIP-13 PoW configuration
|
||||
init_pow_config();
|
||||
|
||||
// Initialize NIP-40 expiration configuration
|
||||
init_expiration_config();
|
||||
|
||||
// Update subscription manager configuration
|
||||
update_subscription_manager_config();
|
||||
|
||||
log_info("Starting relay server...");
|
||||
|
||||
// Start WebSocket Nostr relay server
|
||||
@@ -2610,6 +3096,7 @@ int main(int argc, char* argv[]) {
|
||||
|
||||
// Cleanup
|
||||
cleanup_relay_info();
|
||||
cleanup_configuration_system();
|
||||
nostr_cleanup();
|
||||
close_database();
|
||||
|
||||
|
||||
313
src/sql_schema.h
Normal file
313
src/sql_schema.h
Normal file
@@ -0,0 +1,313 @@
|
||||
/* Embedded SQL Schema for C Nostr Relay
|
||||
* Generated from db/schema.sql - Do not edit manually
|
||||
* Schema Version: 3
|
||||
*/
|
||||
#ifndef SQL_SCHEMA_H
|
||||
#define SQL_SCHEMA_H
|
||||
|
||||
/* Schema version constant */
|
||||
#define EMBEDDED_SCHEMA_VERSION "3"
|
||||
|
||||
/* Embedded SQL schema as C string literal */
|
||||
static const char* const EMBEDDED_SCHEMA_SQL =
|
||||
"-- C Nostr Relay Database Schema\n\
|
||||
-- SQLite schema for storing Nostr events with JSON tags support\n\
|
||||
\n\
|
||||
-- Schema version tracking\n\
|
||||
PRAGMA user_version = 3;\n\
|
||||
\n\
|
||||
-- Enable foreign key support\n\
|
||||
PRAGMA foreign_keys = ON;\n\
|
||||
\n\
|
||||
-- Optimize for performance\n\
|
||||
PRAGMA journal_mode = WAL;\n\
|
||||
PRAGMA synchronous = NORMAL;\n\
|
||||
PRAGMA cache_size = 10000;\n\
|
||||
\n\
|
||||
-- Core events table with hybrid single-table design\n\
|
||||
CREATE TABLE events (\n\
|
||||
id TEXT PRIMARY KEY, -- Nostr event ID (hex string)\n\
|
||||
pubkey TEXT NOT NULL, -- Public key of event author (hex string)\n\
|
||||
created_at INTEGER NOT NULL, -- Event creation timestamp (Unix timestamp)\n\
|
||||
kind INTEGER NOT NULL, -- Event kind (0-65535)\n\
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),\n\
|
||||
content TEXT NOT NULL, -- Event content (text content only)\n\
|
||||
sig TEXT NOT NULL, -- Event signature (hex string)\n\
|
||||
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array\n\
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Core performance indexes\n\
|
||||
CREATE INDEX idx_events_pubkey ON events(pubkey);\n\
|
||||
CREATE INDEX idx_events_kind ON events(kind);\n\
|
||||
CREATE INDEX idx_events_created_at ON events(created_at DESC);\n\
|
||||
CREATE INDEX idx_events_event_type ON events(event_type);\n\
|
||||
\n\
|
||||
-- Composite indexes for common query patterns\n\
|
||||
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);\n\
|
||||
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);\n\
|
||||
CREATE INDEX idx_events_pubkey_kind ON events(pubkey, kind);\n\
|
||||
\n\
|
||||
-- Schema information table\n\
|
||||
CREATE TABLE schema_info (\n\
|
||||
key TEXT PRIMARY KEY,\n\
|
||||
value TEXT NOT NULL,\n\
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Insert schema metadata\n\
|
||||
INSERT INTO schema_info (key, value) VALUES\n\
|
||||
('version', '3'),\n\
|
||||
('description', 'Hybrid single-table Nostr relay schema with JSON tags and configuration management'),\n\
|
||||
('created_at', strftime('%s', 'now'));\n\
|
||||
\n\
|
||||
-- Helper views for common queries\n\
|
||||
CREATE VIEW recent_events AS\n\
|
||||
SELECT id, pubkey, created_at, kind, event_type, content\n\
|
||||
FROM events\n\
|
||||
WHERE event_type != 'ephemeral'\n\
|
||||
ORDER BY created_at DESC\n\
|
||||
LIMIT 1000;\n\
|
||||
\n\
|
||||
CREATE VIEW event_stats AS\n\
|
||||
SELECT \n\
|
||||
event_type,\n\
|
||||
COUNT(*) as count,\n\
|
||||
AVG(length(content)) as avg_content_length,\n\
|
||||
MIN(created_at) as earliest,\n\
|
||||
MAX(created_at) as latest\n\
|
||||
FROM events\n\
|
||||
GROUP BY event_type;\n\
|
||||
\n\
|
||||
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour\n\
|
||||
CREATE TRIGGER cleanup_ephemeral_events\n\
|
||||
AFTER INSERT ON events\n\
|
||||
WHEN NEW.event_type = 'ephemeral'\n\
|
||||
BEGIN\n\
|
||||
DELETE FROM events \n\
|
||||
WHERE event_type = 'ephemeral' \n\
|
||||
AND first_seen < (strftime('%s', 'now') - 3600);\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Replaceable event handling trigger\n\
|
||||
CREATE TRIGGER handle_replaceable_events\n\
|
||||
AFTER INSERT ON events\n\
|
||||
WHEN NEW.event_type = 'replaceable'\n\
|
||||
BEGIN\n\
|
||||
DELETE FROM events \n\
|
||||
WHERE pubkey = NEW.pubkey \n\
|
||||
AND kind = NEW.kind \n\
|
||||
AND event_type = 'replaceable'\n\
|
||||
AND id != NEW.id;\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Persistent Subscriptions Logging Tables (Phase 2)\n\
|
||||
-- Optional database logging for subscription analytics and debugging\n\
|
||||
\n\
|
||||
-- Subscription events log\n\
|
||||
CREATE TABLE subscription_events (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
subscription_id TEXT NOT NULL, -- Subscription ID from client\n\
|
||||
client_ip TEXT NOT NULL, -- Client IP address\n\
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('created', 'closed', 'expired', 'disconnected')),\n\
|
||||
filter_json TEXT, -- JSON representation of filters (for created events)\n\
|
||||
events_sent INTEGER DEFAULT 0, -- Number of events sent to this subscription\n\
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
ended_at INTEGER, -- When subscription ended (for closed/expired/disconnected)\n\
|
||||
duration INTEGER -- Computed: ended_at - created_at\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Subscription metrics summary\n\
|
||||
CREATE TABLE subscription_metrics (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
date TEXT NOT NULL, -- Date (YYYY-MM-DD)\n\
|
||||
total_created INTEGER DEFAULT 0, -- Total subscriptions created\n\
|
||||
total_closed INTEGER DEFAULT 0, -- Total subscriptions closed\n\
|
||||
total_events_broadcast INTEGER DEFAULT 0, -- Total events broadcast\n\
|
||||
avg_duration REAL DEFAULT 0, -- Average subscription duration\n\
|
||||
peak_concurrent INTEGER DEFAULT 0, -- Peak concurrent subscriptions\n\
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
UNIQUE(date)\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Event broadcasting log (optional, for detailed analytics)\n\
|
||||
CREATE TABLE event_broadcasts (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
event_id TEXT NOT NULL, -- Event ID that was broadcast\n\
|
||||
subscription_id TEXT NOT NULL, -- Subscription that received it\n\
|
||||
client_ip TEXT NOT NULL, -- Client IP\n\
|
||||
broadcast_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
FOREIGN KEY (event_id) REFERENCES events(id)\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Indexes for subscription logging performance\n\
|
||||
CREATE INDEX idx_subscription_events_id ON subscription_events(subscription_id);\n\
|
||||
CREATE INDEX idx_subscription_events_type ON subscription_events(event_type);\n\
|
||||
CREATE INDEX idx_subscription_events_created ON subscription_events(created_at DESC);\n\
|
||||
CREATE INDEX idx_subscription_events_client ON subscription_events(client_ip);\n\
|
||||
\n\
|
||||
CREATE INDEX idx_subscription_metrics_date ON subscription_metrics(date DESC);\n\
|
||||
\n\
|
||||
CREATE INDEX idx_event_broadcasts_event ON event_broadcasts(event_id);\n\
|
||||
CREATE INDEX idx_event_broadcasts_sub ON event_broadcasts(subscription_id);\n\
|
||||
CREATE INDEX idx_event_broadcasts_time ON event_broadcasts(broadcast_at DESC);\n\
|
||||
\n\
|
||||
-- Trigger to update subscription duration when ended\n\
|
||||
CREATE TRIGGER update_subscription_duration\n\
|
||||
AFTER UPDATE OF ended_at ON subscription_events\n\
|
||||
WHEN NEW.ended_at IS NOT NULL AND OLD.ended_at IS NULL\n\
|
||||
BEGIN\n\
|
||||
UPDATE subscription_events\n\
|
||||
SET duration = NEW.ended_at - NEW.created_at\n\
|
||||
WHERE id = NEW.id;\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- View for subscription analytics\n\
|
||||
CREATE VIEW subscription_analytics AS\n\
|
||||
SELECT\n\
|
||||
date(created_at, 'unixepoch') as date,\n\
|
||||
COUNT(*) as subscriptions_created,\n\
|
||||
COUNT(CASE WHEN ended_at IS NOT NULL THEN 1 END) as subscriptions_ended,\n\
|
||||
AVG(CASE WHEN duration IS NOT NULL THEN duration END) as avg_duration_seconds,\n\
|
||||
MAX(events_sent) as max_events_sent,\n\
|
||||
AVG(events_sent) as avg_events_sent,\n\
|
||||
COUNT(DISTINCT client_ip) as unique_clients\n\
|
||||
FROM subscription_events\n\
|
||||
GROUP BY date(created_at, 'unixepoch')\n\
|
||||
ORDER BY date DESC;\n\
|
||||
\n\
|
||||
-- View for current active subscriptions (from log perspective)\n\
|
||||
CREATE VIEW active_subscriptions_log AS\n\
|
||||
SELECT\n\
|
||||
subscription_id,\n\
|
||||
client_ip,\n\
|
||||
filter_json,\n\
|
||||
events_sent,\n\
|
||||
created_at,\n\
|
||||
(strftime('%s', 'now') - created_at) as duration_seconds\n\
|
||||
FROM subscription_events\n\
|
||||
WHERE event_type = 'created'\n\
|
||||
AND subscription_id NOT IN (\n\
|
||||
SELECT subscription_id FROM subscription_events\n\
|
||||
WHERE event_type IN ('closed', 'expired', 'disconnected')\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- ================================\n\
|
||||
-- CONFIGURATION MANAGEMENT TABLES\n\
|
||||
-- ================================\n\
|
||||
\n\
|
||||
-- Core server configuration table\n\
|
||||
CREATE TABLE server_config (\n\
|
||||
key TEXT PRIMARY KEY, -- Configuration key (unique identifier)\n\
|
||||
value TEXT NOT NULL, -- Configuration value (stored as string)\n\
|
||||
description TEXT, -- Human-readable description\n\
|
||||
config_type TEXT DEFAULT 'user' CHECK (config_type IN ('system', 'user', 'runtime')),\n\
|
||||
data_type TEXT DEFAULT 'string' CHECK (data_type IN ('string', 'integer', 'boolean', 'json')),\n\
|
||||
validation_rules TEXT, -- JSON validation rules (optional)\n\
|
||||
is_sensitive INTEGER DEFAULT 0, -- 1 if value should be masked in logs\n\
|
||||
requires_restart INTEGER DEFAULT 0, -- 1 if change requires server restart\n\
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Configuration change history table\n\
|
||||
CREATE TABLE config_history (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
config_key TEXT NOT NULL, -- Key that was changed\n\
|
||||
old_value TEXT, -- Previous value (NULL for new keys)\n\
|
||||
new_value TEXT NOT NULL, -- New value\n\
|
||||
changed_by TEXT DEFAULT 'system', -- Who made the change (system/admin/user)\n\
|
||||
change_reason TEXT, -- Optional reason for change\n\
|
||||
changed_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
FOREIGN KEY (config_key) REFERENCES server_config(key)\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Configuration validation errors log\n\
|
||||
CREATE TABLE config_validation_log (\n\
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
|
||||
config_key TEXT NOT NULL,\n\
|
||||
attempted_value TEXT,\n\
|
||||
validation_error TEXT NOT NULL,\n\
|
||||
error_source TEXT DEFAULT 'validation', -- validation/parsing/constraint\n\
|
||||
attempted_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Cache for file-based configuration events\n\
|
||||
CREATE TABLE config_file_cache (\n\
|
||||
file_path TEXT PRIMARY KEY, -- Full path to config file\n\
|
||||
file_hash TEXT NOT NULL, -- SHA256 hash of file content\n\
|
||||
event_id TEXT, -- Nostr event ID from file\n\
|
||||
event_pubkey TEXT, -- Admin pubkey that signed event\n\
|
||||
loaded_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
|
||||
validation_status TEXT CHECK (validation_status IN ('valid', 'invalid', 'unverified')),\n\
|
||||
validation_error TEXT -- Error details if invalid\n\
|
||||
);\n\
|
||||
\n\
|
||||
-- Performance indexes for configuration tables\n\
|
||||
CREATE INDEX idx_server_config_type ON server_config(config_type);\n\
|
||||
CREATE INDEX idx_server_config_updated ON server_config(updated_at DESC);\n\
|
||||
CREATE INDEX idx_config_history_key ON config_history(config_key);\n\
|
||||
CREATE INDEX idx_config_history_time ON config_history(changed_at DESC);\n\
|
||||
CREATE INDEX idx_config_validation_key ON config_validation_log(config_key);\n\
|
||||
CREATE INDEX idx_config_validation_time ON config_validation_log(attempted_at DESC);\n\
|
||||
\n\
|
||||
-- Trigger to update timestamp on configuration changes\n\
|
||||
CREATE TRIGGER update_config_timestamp\n\
|
||||
AFTER UPDATE ON server_config\n\
|
||||
BEGIN\n\
|
||||
UPDATE server_config SET updated_at = strftime('%s', 'now') WHERE key = NEW.key;\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Trigger to log configuration changes to history\n\
|
||||
CREATE TRIGGER log_config_changes\n\
|
||||
AFTER UPDATE ON server_config\n\
|
||||
WHEN OLD.value != NEW.value\n\
|
||||
BEGIN\n\
|
||||
INSERT INTO config_history (config_key, old_value, new_value, changed_by, change_reason)\n\
|
||||
VALUES (NEW.key, OLD.value, NEW.value, 'system', 'configuration update');\n\
|
||||
END;\n\
|
||||
\n\
|
||||
-- Active Configuration View\n\
|
||||
CREATE VIEW active_config AS\n\
|
||||
SELECT\n\
|
||||
key,\n\
|
||||
value,\n\
|
||||
description,\n\
|
||||
config_type,\n\
|
||||
data_type,\n\
|
||||
requires_restart,\n\
|
||||
updated_at\n\
|
||||
FROM server_config\n\
|
||||
WHERE config_type IN ('system', 'user')\n\
|
||||
ORDER BY config_type, key;\n\
|
||||
\n\
|
||||
-- Runtime Statistics View\n\
|
||||
CREATE VIEW runtime_stats AS\n\
|
||||
SELECT\n\
|
||||
key,\n\
|
||||
value,\n\
|
||||
description,\n\
|
||||
updated_at\n\
|
||||
FROM server_config\n\
|
||||
WHERE config_type = 'runtime'\n\
|
||||
ORDER BY key;\n\
|
||||
\n\
|
||||
-- Configuration Change Summary\n\
|
||||
CREATE VIEW recent_config_changes AS\n\
|
||||
SELECT\n\
|
||||
ch.config_key,\n\
|
||||
sc.description,\n\
|
||||
ch.old_value,\n\
|
||||
ch.new_value,\n\
|
||||
ch.changed_by,\n\
|
||||
ch.change_reason,\n\
|
||||
ch.changed_at\n\
|
||||
FROM config_history ch\n\
|
||||
JOIN server_config sc ON ch.config_key = sc.key\n\
|
||||
ORDER BY ch.changed_at DESC\n\
|
||||
LIMIT 50;\n\
|
||||
\n\
|
||||
-- Runtime Statistics (initialized by server on startup)\n\
|
||||
-- These will be populated when configuration system initializes";
|
||||
|
||||
#endif /* SQL_SCHEMA_H */
|
||||
432
tests/11_nip_information.sh
Executable file
432
tests/11_nip_information.sh
Executable file
@@ -0,0 +1,432 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP-11 Relay Information Document Test
|
||||
# Tests HTTP endpoint for relay information according to NIP-11 specification
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="http://127.0.0.1:8888"
|
||||
RELAY_WS_URL="ws://127.0.0.1:8888"
|
||||
|
||||
# Print functions
|
||||
print_header() {
|
||||
echo -e "${BLUE}${BOLD}=== $1 ===${RESET}"
|
||||
}
|
||||
|
||||
print_step() {
|
||||
echo -e "${YELLOW}[STEP]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}✓${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}✗${RESET} $1"
|
||||
}
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
# Test functions
|
||||
test_http_with_correct_header() {
|
||||
print_step "Testing HTTP request with correct Accept header"
|
||||
|
||||
local response=""
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
# Use curl to test with proper Accept header
|
||||
response=$(curl -s -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "200" ]]; then
|
||||
print_success "HTTP 200 OK received with correct Accept header"
|
||||
|
||||
# Validate JSON response
|
||||
if echo "$response" | jq . >/dev/null 2>&1; then
|
||||
print_success "Response is valid JSON"
|
||||
return 0
|
||||
else
|
||||
print_error "Response is not valid JSON"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "Expected HTTP 200, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_http_without_header() {
|
||||
print_step "Testing HTTP request without Accept header (should return 406)"
|
||||
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" "$RELAY_URL/" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "406" ]]; then
|
||||
print_success "HTTP 406 Not Acceptable received without proper Accept header"
|
||||
return 0
|
||||
else
|
||||
print_error "Expected HTTP 406, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_http_with_wrong_header() {
|
||||
print_step "Testing HTTP request with wrong Accept header (should return 406)"
|
||||
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" -H "Accept: application/json" "$RELAY_URL/" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "406" ]]; then
|
||||
print_success "HTTP 406 Not Acceptable received with wrong Accept header"
|
||||
return 0
|
||||
else
|
||||
print_error "Expected HTTP 406, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_cors_headers() {
|
||||
print_step "Testing CORS headers presence"
|
||||
|
||||
local headers=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
headers=$(curl -s -I -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local cors_origin_found=false
|
||||
local cors_headers_found=false
|
||||
local cors_methods_found=false
|
||||
|
||||
if echo "$headers" | grep -qi "access-control-allow-origin"; then
|
||||
cors_origin_found=true
|
||||
print_success "Access-Control-Allow-Origin header found"
|
||||
fi
|
||||
|
||||
if echo "$headers" | grep -qi "access-control-allow-headers"; then
|
||||
cors_headers_found=true
|
||||
print_success "Access-Control-Allow-Headers header found"
|
||||
fi
|
||||
|
||||
if echo "$headers" | grep -qi "access-control-allow-methods"; then
|
||||
cors_methods_found=true
|
||||
print_success "Access-Control-Allow-Methods header found"
|
||||
fi
|
||||
|
||||
if [[ "$cors_origin_found" == true && "$cors_headers_found" == true && "$cors_methods_found" == true ]]; then
|
||||
print_success "All required CORS headers present"
|
||||
return 0
|
||||
else
|
||||
print_error "Missing CORS headers"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_json_structure() {
|
||||
print_step "Testing NIP-11 JSON structure and required fields"
|
||||
|
||||
local response=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
response=$(curl -s -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "$response" ]]; then
|
||||
print_error "Empty response received"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Validate JSON structure using jq
|
||||
if ! echo "$response" | jq . >/dev/null 2>&1; then
|
||||
print_error "Response is not valid JSON"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "Valid JSON structure confirmed"
|
||||
|
||||
# Check for required fields
|
||||
local required_checks=0
|
||||
local total_checks=0
|
||||
|
||||
# Test name field
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.name' >/dev/null 2>&1; then
|
||||
local name=$(echo "$response" | jq -r '.name')
|
||||
print_success "Name field present: $name"
|
||||
((required_checks++))
|
||||
else
|
||||
print_warning "Name field missing (optional)"
|
||||
fi
|
||||
|
||||
# Test supported_nips field (required)
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.supported_nips' >/dev/null 2>&1; then
|
||||
local nips=$(echo "$response" | jq -r '.supported_nips | @json')
|
||||
print_success "Supported NIPs field present: $nips"
|
||||
((required_checks++))
|
||||
|
||||
# Verify NIP-11 is in the supported list
|
||||
if echo "$response" | jq -e '.supported_nips | contains([11])' >/dev/null 2>&1; then
|
||||
print_success "NIP-11 correctly listed in supported NIPs"
|
||||
else
|
||||
print_warning "NIP-11 not found in supported NIPs list"
|
||||
fi
|
||||
else
|
||||
print_error "Supported NIPs field missing (should be present)"
|
||||
fi
|
||||
|
||||
# Test software field
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.software' >/dev/null 2>&1; then
|
||||
local software=$(echo "$response" | jq -r '.software')
|
||||
print_success "Software field present: $software"
|
||||
((required_checks++))
|
||||
else
|
||||
print_warning "Software field missing (optional)"
|
||||
fi
|
||||
|
||||
# Test version field
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.version' >/dev/null 2>&1; then
|
||||
local version=$(echo "$response" | jq -r '.version')
|
||||
print_success "Version field present: $version"
|
||||
((required_checks++))
|
||||
else
|
||||
print_warning "Version field missing (optional)"
|
||||
fi
|
||||
|
||||
# Test limitation object
|
||||
((total_checks++))
|
||||
if echo "$response" | jq -e '.limitation' >/dev/null 2>&1; then
|
||||
print_success "Limitation object present"
|
||||
((required_checks++))
|
||||
|
||||
# Check some common limitation fields
|
||||
if echo "$response" | jq -e '.limitation.max_message_length' >/dev/null 2>&1; then
|
||||
local max_msg=$(echo "$response" | jq -r '.limitation.max_message_length')
|
||||
print_info " max_message_length: $max_msg"
|
||||
fi
|
||||
|
||||
if echo "$response" | jq -e '.limitation.max_subscriptions' >/dev/null 2>&1; then
|
||||
local max_subs=$(echo "$response" | jq -r '.limitation.max_subscriptions')
|
||||
print_info " max_subscriptions: $max_subs"
|
||||
fi
|
||||
else
|
||||
print_warning "Limitation object missing (recommended)"
|
||||
fi
|
||||
|
||||
# Test description field
|
||||
if echo "$response" | jq -e '.description' >/dev/null 2>&1; then
|
||||
local description=$(echo "$response" | jq -r '.description')
|
||||
print_success "Description field present: ${description:0:50}..."
|
||||
else
|
||||
print_warning "Description field missing (optional)"
|
||||
fi
|
||||
|
||||
print_info "JSON structure validation: $required_checks/$total_checks core fields present"
|
||||
return 0
|
||||
}
|
||||
|
||||
test_content_type_header() {
|
||||
print_step "Testing Content-Type header"
|
||||
|
||||
local headers=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
headers=$(curl -s -I -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if echo "$headers" | grep -qi "content-type.*application/nostr+json"; then
|
||||
print_success "Correct Content-Type header: application/nostr+json"
|
||||
return 0
|
||||
else
|
||||
print_warning "Content-Type header not exactly 'application/nostr+json'"
|
||||
echo "$headers" | grep -i "content-type" | head -1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_non_root_path() {
|
||||
print_step "Testing non-root path (should return 404)"
|
||||
|
||||
local http_code=""
|
||||
|
||||
if command -v curl &> /dev/null; then
|
||||
http_code=$(curl -s -o /dev/null -w "%{http_code}" -H "Accept: application/nostr+json" "$RELAY_URL/nonexistent" 2>/dev/null || echo "000")
|
||||
else
|
||||
print_error "curl command not found - required for NIP-11 testing"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ "$http_code" == "404" ]]; then
|
||||
print_success "HTTP 404 Not Found received for non-root path"
|
||||
return 0
|
||||
else
|
||||
print_error "Expected HTTP 404 for non-root path, got HTTP $http_code"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
test_websocket_still_works() {
|
||||
print_step "Testing that WebSocket functionality still works on same port"
|
||||
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_warning "websocat not available - skipping WebSocket test"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Try to connect to WebSocket and send a simple REQ
|
||||
local response=""
|
||||
response=$(echo '["REQ","test_ws_nip11",{}]' | timeout 3s websocat "$RELAY_WS_URL" 2>/dev/null || echo "Connection failed")
|
||||
|
||||
if [[ "$response" == *"Connection failed"* ]]; then
|
||||
print_error "WebSocket connection failed"
|
||||
return 1
|
||||
elif [[ "$response" == *"EOSE"* ]]; then
|
||||
print_success "WebSocket still functional - received EOSE response"
|
||||
return 0
|
||||
else
|
||||
print_warning "WebSocket response unclear, but connection succeeded"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Main test function
|
||||
run_nip11_tests() {
|
||||
print_header "NIP-11 Relay Information Document Tests"
|
||||
|
||||
# Check dependencies
|
||||
print_step "Checking dependencies..."
|
||||
if ! command -v curl &> /dev/null; then
|
||||
print_error "curl command not found - required for NIP-11 HTTP testing"
|
||||
return 1
|
||||
fi
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq command not found - required for JSON validation"
|
||||
return 1
|
||||
fi
|
||||
print_success "All dependencies found"
|
||||
|
||||
print_header "PHASE 1: Basic HTTP Functionality"
|
||||
|
||||
# Test 1: Correct Accept header
|
||||
if ! test_http_with_correct_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 2: Missing Accept header
|
||||
if ! test_http_without_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 3: Wrong Accept header
|
||||
if ! test_http_with_wrong_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 2: HTTP Headers Validation"
|
||||
|
||||
# Test 4: CORS headers
|
||||
if ! test_cors_headers; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 5: Content-Type header
|
||||
if ! test_content_type_header; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 3: JSON Structure Validation"
|
||||
|
||||
# Test 6: JSON structure and required fields
|
||||
if ! test_json_structure; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 4: Additional Endpoint Behavior"
|
||||
|
||||
# Test 7: Non-root paths
|
||||
if ! test_non_root_path; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test 8: WebSocket compatibility
|
||||
if ! test_websocket_still_works; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_header "PHASE 5: NIP-11 Compliance Summary"
|
||||
|
||||
# Final validation - get the actual response and display it
|
||||
print_step "Displaying complete NIP-11 response..."
|
||||
local response=""
|
||||
if command -v curl &> /dev/null; then
|
||||
response=$(curl -s -H "Accept: application/nostr+json" "$RELAY_URL/" 2>/dev/null || echo "")
|
||||
if [[ -n "$response" ]] && echo "$response" | jq . >/dev/null 2>&1; then
|
||||
echo "$response" | jq .
|
||||
else
|
||||
print_error "Failed to retrieve or parse final response"
|
||||
fi
|
||||
fi
|
||||
|
||||
print_success "All NIP-11 tests passed!"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main execution
|
||||
print_header "Starting NIP-11 Relay Information Document Test Suite"
|
||||
echo
|
||||
|
||||
if run_nip11_tests; then
|
||||
echo
|
||||
print_success "All NIP-11 tests completed successfully!"
|
||||
print_info "The C-Relay NIP-11 implementation is fully compliant"
|
||||
print_info "✅ HTTP endpoint, Accept header validation, CORS, and JSON structure all working"
|
||||
echo
|
||||
exit 0
|
||||
else
|
||||
echo
|
||||
print_error "Some NIP-11 tests failed"
|
||||
exit 1
|
||||
fi
|
||||
384
tests/13_nip_test.sh
Executable file
384
tests/13_nip_test.sh
Executable file
@@ -0,0 +1,384 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP-13 Proof of Work Validation Test Suite for C Nostr Relay
|
||||
# Tests PoW validation in the relay's event processing pipeline
|
||||
# Based on nostr_core_lib/tests/nip13_test.c
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="ws://127.0.0.1:8888"
|
||||
HTTP_URL="http://127.0.0.1:8888"
|
||||
TEST_COUNT=0
|
||||
PASSED_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
|
||||
# Test results tracking
|
||||
declare -a TEST_RESULTS=()
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}${BOLD}[SUCCESS]${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}${BOLD}[ERROR]${RESET} $1"
|
||||
}
|
||||
|
||||
print_test_header() {
|
||||
TEST_COUNT=$((TEST_COUNT + 1))
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST $TEST_COUNT: $1 ===${RESET}"
|
||||
}
|
||||
|
||||
record_test_result() {
|
||||
local test_name="$1"
|
||||
local result="$2"
|
||||
local details="$3"
|
||||
|
||||
TEST_RESULTS+=("$test_name|$result|$details")
|
||||
|
||||
if [ "$result" = "PASS" ]; then
|
||||
PASSED_COUNT=$((PASSED_COUNT + 1))
|
||||
print_success "PASS: $test_name"
|
||||
else
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
print_error "FAIL: $test_name"
|
||||
if [ -n "$details" ]; then
|
||||
echo " Details: $details"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if relay is running
|
||||
check_relay_running() {
|
||||
print_info "Checking if relay is running..."
|
||||
|
||||
if ! curl -s -H "Accept: application/nostr+json" "$HTTP_URL/" >/dev/null 2>&1; then
|
||||
print_error "Relay is not running or not accessible at $HTTP_URL"
|
||||
print_info "Please start the relay with: ./make_and_restart_relay.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Relay is running and accessible"
|
||||
}
|
||||
|
||||
# Test NIP-11 relay information includes NIP-13
|
||||
test_nip11_pow_support() {
|
||||
print_test_header "NIP-11 PoW Support Advertisement"
|
||||
|
||||
print_info "Fetching relay information..."
|
||||
RELAY_INFO=$(curl -s -H "Accept: application/nostr+json" "$HTTP_URL/")
|
||||
|
||||
echo "Relay Info Response:"
|
||||
echo "$RELAY_INFO" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Check if NIP-13 is in supported_nips
|
||||
if echo "$RELAY_INFO" | jq -e '.supported_nips | index(13)' >/dev/null 2>&1; then
|
||||
print_success "✓ NIP-13 found in supported_nips array"
|
||||
NIP13_SUPPORTED=true
|
||||
else
|
||||
print_error "✗ NIP-13 not found in supported_nips array"
|
||||
NIP13_SUPPORTED=false
|
||||
fi
|
||||
|
||||
# Check if min_pow_difficulty is present
|
||||
MIN_POW_DIFF=$(echo "$RELAY_INFO" | jq -r '.limitation.min_pow_difficulty // "missing"')
|
||||
if [ "$MIN_POW_DIFF" != "missing" ]; then
|
||||
print_success "✓ min_pow_difficulty found: $MIN_POW_DIFF"
|
||||
MIN_POW_PRESENT=true
|
||||
else
|
||||
print_error "✗ min_pow_difficulty not found in limitations"
|
||||
MIN_POW_PRESENT=false
|
||||
fi
|
||||
|
||||
if [ "$NIP13_SUPPORTED" = true ] && [ "$MIN_POW_PRESENT" = true ]; then
|
||||
record_test_result "NIP-11 PoW Support Advertisement" "PASS" "NIP-13 supported, min_pow_difficulty=$MIN_POW_DIFF"
|
||||
return 0
|
||||
else
|
||||
record_test_result "NIP-11 PoW Support Advertisement" "FAIL" "Missing NIP-13 support or min_pow_difficulty"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event submission without PoW (should be accepted when min_difficulty=0)
|
||||
test_event_without_pow() {
|
||||
print_test_header "Event Submission Without PoW (min_difficulty=0)"
|
||||
|
||||
# Create a simple event without PoW
|
||||
print_info "Generating test event without PoW..."
|
||||
|
||||
# Use nak to generate a simple event
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak command not found - skipping PoW generation tests"
|
||||
record_test_result "Event Submission Without PoW" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Generate event without PoW using direct private key
|
||||
PRIVATE_KEY="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
EVENT_JSON=$(nak event --sec "$PRIVATE_KEY" -c "Test event without PoW" --ts $(date +%s))
|
||||
|
||||
print_info "Generated event:"
|
||||
echo "$EVENT_JSON" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Send event to relay via WebSocket using websocat
|
||||
print_info "Sending event to relay..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
EVENT_MESSAGE="[\"EVENT\",$EVENT_JSON]"
|
||||
|
||||
# Send to relay and capture response
|
||||
if command -v websocat &> /dev/null; then
|
||||
RESPONSE=$(echo "$EVENT_MESSAGE" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $RESPONSE"
|
||||
|
||||
if [[ "$RESPONSE" == *"Connection failed"* ]]; then
|
||||
print_error "✗ Failed to connect to relay"
|
||||
record_test_result "Event Submission Without PoW" "FAIL" "Connection failed"
|
||||
return 1
|
||||
elif [[ "$RESPONSE" == *"true"* ]]; then
|
||||
print_success "✓ Event without PoW accepted (expected when min_difficulty=0)"
|
||||
record_test_result "Event Submission Without PoW" "PASS" "Event accepted as expected"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ Event without PoW rejected (unexpected when min_difficulty=0)"
|
||||
record_test_result "Event Submission Without PoW" "FAIL" "Event rejected: $RESPONSE"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
record_test_result "Event Submission Without PoW" "SKIP" "websocat not available"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with valid PoW
|
||||
test_event_with_pow() {
|
||||
print_test_header "Event Submission With Valid PoW"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak command not found - skipping PoW validation tests"
|
||||
record_test_result "Event Submission With Valid PoW" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Generating event with PoW difficulty 8..."
|
||||
|
||||
# Generate event with PoW (difficulty 8 for reasonable test time) using direct private key
|
||||
PRIVATE_KEY="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
POW_EVENT_JSON=$(nak event --sec "$PRIVATE_KEY" -c "Test event with PoW difficulty 8" --pow 8 --ts $(date +%s))
|
||||
|
||||
if [ -z "$POW_EVENT_JSON" ]; then
|
||||
print_error "Failed to generate PoW event"
|
||||
record_test_result "Event Submission With Valid PoW" "FAIL" "PoW event generation failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated PoW event:"
|
||||
echo "$POW_EVENT_JSON" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Extract nonce info for verification
|
||||
NONCE_TAG=$(echo "$POW_EVENT_JSON" | jq -r '.tags[] | select(.[0] == "nonce") | .[1]' 2>/dev/null || echo "")
|
||||
TARGET_DIFF=$(echo "$POW_EVENT_JSON" | jq -r '.tags[] | select(.[0] == "nonce") | .[2]' 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$NONCE_TAG" ] && [ -n "$TARGET_DIFF" ]; then
|
||||
print_info "PoW details: nonce=$NONCE_TAG, target_difficulty=$TARGET_DIFF"
|
||||
fi
|
||||
|
||||
# Send event to relay via WebSocket using websocat
|
||||
print_info "Sending PoW event to relay..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
POW_EVENT_MESSAGE="[\"EVENT\",$POW_EVENT_JSON]"
|
||||
|
||||
# Send to relay and capture response
|
||||
if command -v websocat &> /dev/null; then
|
||||
RESPONSE=$(echo "$POW_EVENT_MESSAGE" | timeout 10s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $RESPONSE"
|
||||
|
||||
if [[ "$RESPONSE" == *"Connection failed"* ]]; then
|
||||
print_error "✗ Failed to connect to relay"
|
||||
record_test_result "Event Submission With Valid PoW" "FAIL" "Connection failed"
|
||||
return 1
|
||||
elif [[ "$RESPONSE" == *"true"* ]]; then
|
||||
print_success "✓ Event with valid PoW accepted"
|
||||
record_test_result "Event Submission With Valid PoW" "PASS" "PoW event accepted"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ Event with valid PoW rejected"
|
||||
record_test_result "Event Submission With Valid PoW" "FAIL" "PoW event rejected: $RESPONSE"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
record_test_result "Event Submission With Valid PoW" "SKIP" "websocat not available"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Test relay configuration with environment variables
|
||||
test_pow_configuration() {
|
||||
print_test_header "PoW Configuration Via Environment Variables"
|
||||
|
||||
print_info "Testing different PoW configurations requires relay restart"
|
||||
print_info "Current configuration from logs:"
|
||||
|
||||
if [ -f "relay.log" ]; then
|
||||
grep "PoW Configuration:" relay.log | tail -1
|
||||
else
|
||||
print_warning "No relay.log found"
|
||||
fi
|
||||
|
||||
# Test current configuration values
|
||||
RELAY_INFO=$(curl -s -H "Accept: application/nostr+json" "$HTTP_URL/")
|
||||
MIN_POW_DIFF=$(echo "$RELAY_INFO" | jq -r '.limitation.min_pow_difficulty')
|
||||
|
||||
print_info "Current min_pow_difficulty from NIP-11: $MIN_POW_DIFF"
|
||||
|
||||
# For now, just verify the configuration is readable
|
||||
if [ "$MIN_POW_DIFF" != "null" ] && [ "$MIN_POW_DIFF" != "missing" ]; then
|
||||
print_success "✓ PoW configuration is accessible via NIP-11"
|
||||
record_test_result "PoW Configuration Via Environment Variables" "PASS" "min_pow_difficulty=$MIN_POW_DIFF"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ PoW configuration not accessible"
|
||||
record_test_result "PoW Configuration Via Environment Variables" "FAIL" "Cannot read min_pow_difficulty"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test NIP-13 reference event validation
|
||||
test_nip13_reference_event() {
|
||||
print_test_header "NIP-13 Reference Event Validation"
|
||||
|
||||
# This is the official NIP-13 reference event
|
||||
NIP13_REF_EVENT='{"id":"000006d8c378af1779d2feebc7603a125d99eca0ccf1085959b307f64e5dd358","pubkey":"a48380f4cfcc1ad5378294fcac36439770f9c878dd880ffa94bb74ea54a6f243","created_at":1651794653,"kind":1,"tags":[["nonce","776797","20"]],"content":"It'\''s just me mining my own business","sig":"284622fc0a3f4f1303455d5175f7ba962a3300d136085b9566801bc2e0699de0c7e31e44c81fb40ad9049173742e904713c3594a1da0fc5d2382a25c11aba977"}'
|
||||
|
||||
print_info "Testing NIP-13 reference event from specification:"
|
||||
echo "$NIP13_REF_EVENT" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Send reference event to relay via WebSocket using websocat
|
||||
print_info "Sending NIP-13 reference event to relay..."
|
||||
|
||||
# Create EVENT message in Nostr format
|
||||
REF_EVENT_MESSAGE="[\"EVENT\",$NIP13_REF_EVENT]"
|
||||
|
||||
# Send to relay and capture response
|
||||
if command -v websocat &> /dev/null; then
|
||||
RESPONSE=$(echo "$REF_EVENT_MESSAGE" | timeout 10s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $RESPONSE"
|
||||
|
||||
if [[ "$RESPONSE" == *"Connection failed"* ]] || [[ -z "$RESPONSE" ]]; then
|
||||
print_error "✗ Failed to connect to relay or no response"
|
||||
record_test_result "NIP-13 Reference Event Validation" "FAIL" "Connection failed or timeout"
|
||||
return 1
|
||||
elif [[ "$RESPONSE" == *"true"* ]]; then
|
||||
print_success "✓ NIP-13 reference event accepted"
|
||||
record_test_result "NIP-13 Reference Event Validation" "PASS" "Reference event accepted"
|
||||
return 0
|
||||
else
|
||||
print_error "✗ NIP-13 reference event rejected"
|
||||
record_test_result "NIP-13 Reference Event Validation" "FAIL" "Reference event rejected: $RESPONSE"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
record_test_result "NIP-13 Reference Event Validation" "SKIP" "websocat not available"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Print test summary
|
||||
print_test_summary() {
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST SUMMARY ===${RESET}"
|
||||
echo "Total tests run: $TEST_COUNT"
|
||||
echo -e "${GREEN}Passed: $PASSED_COUNT${RESET}"
|
||||
echo -e "${RED}Failed: $FAILED_COUNT${RESET}"
|
||||
|
||||
if [ $FAILED_COUNT -gt 0 ]; then
|
||||
echo ""
|
||||
echo -e "${RED}${BOLD}Failed tests:${RESET}"
|
||||
for result in "${TEST_RESULTS[@]}"; do
|
||||
IFS='|' read -r name status details <<< "$result"
|
||||
if [ "$status" = "FAIL" ]; then
|
||||
echo -e " ${RED}✗ $name${RESET}"
|
||||
if [ -n "$details" ]; then
|
||||
echo " $details"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [ $FAILED_COUNT -eq 0 ]; then
|
||||
echo -e "${GREEN}${BOLD}🎉 ALL TESTS PASSED!${RESET}"
|
||||
echo -e "${GREEN}✅ NIP-13 PoW validation is working correctly in the relay${RESET}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}${BOLD}❌ SOME TESTS FAILED${RESET}"
|
||||
echo "Please review the output above and check relay logs for more details."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main test execution
|
||||
main() {
|
||||
echo -e "${BOLD}=== NIP-13 Proof of Work Relay Test Suite ===${RESET}"
|
||||
echo "Testing NIP-13 PoW validation in the C Nostr Relay"
|
||||
echo "Relay URL: $RELAY_URL"
|
||||
echo ""
|
||||
|
||||
# Check prerequisites
|
||||
if ! command -v curl &> /dev/null; then
|
||||
print_error "curl is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_warning "websocat not found - WebSocket tests will be skipped"
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
check_relay_running
|
||||
test_nip11_pow_support
|
||||
test_event_without_pow
|
||||
test_event_with_pow
|
||||
test_pow_configuration
|
||||
test_nip13_reference_event
|
||||
|
||||
# Print summary
|
||||
print_test_summary
|
||||
exit $?
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
539
tests/40_nip_test.sh
Executable file
539
tests/40_nip_test.sh
Executable file
@@ -0,0 +1,539 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NIP-40 Expiration Timestamp Test Suite for C Nostr Relay
|
||||
# Tests expiration timestamp handling in the relay's event processing pipeline
|
||||
|
||||
set -e # Exit on error
|
||||
|
||||
# Color constants
|
||||
RED='\033[31m'
|
||||
GREEN='\033[32m'
|
||||
YELLOW='\033[33m'
|
||||
BLUE='\033[34m'
|
||||
BOLD='\033[1m'
|
||||
RESET='\033[0m'
|
||||
|
||||
# Test configuration
|
||||
RELAY_URL="ws://127.0.0.1:8888"
|
||||
HTTP_URL="http://127.0.0.1:8888"
|
||||
TEST_COUNT=0
|
||||
PASSED_COUNT=0
|
||||
FAILED_COUNT=0
|
||||
|
||||
# Test results tracking
|
||||
declare -a TEST_RESULTS=()
|
||||
|
||||
print_info() {
|
||||
echo -e "${BLUE}[INFO]${RESET} $1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo -e "${GREEN}${BOLD}[SUCCESS]${RESET} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARNING]${RESET} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}${BOLD}[ERROR]${RESET} $1"
|
||||
}
|
||||
|
||||
print_test_header() {
|
||||
TEST_COUNT=$((TEST_COUNT + 1))
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST $TEST_COUNT: $1 ===${RESET}"
|
||||
}
|
||||
|
||||
record_test_result() {
|
||||
local test_name="$1"
|
||||
local result="$2"
|
||||
local details="$3"
|
||||
|
||||
TEST_RESULTS+=("$test_name|$result|$details")
|
||||
|
||||
if [ "$result" = "PASS" ]; then
|
||||
PASSED_COUNT=$((PASSED_COUNT + 1))
|
||||
print_success "PASS: $test_name"
|
||||
else
|
||||
FAILED_COUNT=$((FAILED_COUNT + 1))
|
||||
print_error "FAIL: $test_name"
|
||||
if [ -n "$details" ]; then
|
||||
echo " Details: $details"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if relay is running
|
||||
check_relay_running() {
|
||||
print_info "Checking if relay is running..."
|
||||
|
||||
if ! curl -s -H "Accept: application/nostr+json" "$HTTP_URL/" >/dev/null 2>&1; then
|
||||
print_error "Relay is not running or not accessible at $HTTP_URL"
|
||||
print_info "Please start the relay with: ./make_and_restart_relay.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Relay is running and accessible"
|
||||
}
|
||||
|
||||
# Test NIP-11 relay information includes NIP-40
|
||||
test_nip11_expiration_support() {
|
||||
print_test_header "NIP-11 Expiration Support Advertisement"
|
||||
|
||||
print_info "Fetching relay information..."
|
||||
RELAY_INFO=$(curl -s -H "Accept: application/nostr+json" "$HTTP_URL/")
|
||||
|
||||
echo "Relay Info Response:"
|
||||
echo "$RELAY_INFO" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Check if NIP-40 is in supported_nips
|
||||
if echo "$RELAY_INFO" | jq -e '.supported_nips | index(40)' >/dev/null 2>&1; then
|
||||
print_success "✓ NIP-40 found in supported_nips array"
|
||||
NIP40_SUPPORTED=true
|
||||
else
|
||||
print_error "✗ NIP-40 not found in supported_nips array"
|
||||
NIP40_SUPPORTED=false
|
||||
fi
|
||||
|
||||
if [ "$NIP40_SUPPORTED" = true ]; then
|
||||
record_test_result "NIP-11 Expiration Support Advertisement" "PASS" "NIP-40 advertised in relay info"
|
||||
return 0
|
||||
else
|
||||
record_test_result "NIP-11 Expiration Support Advertisement" "FAIL" "NIP-40 not advertised"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function to create event with expiration tag
|
||||
create_event_with_expiration() {
|
||||
local content="$1"
|
||||
local expiration_timestamp="$2"
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
echo ""
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create event with expiration tag
|
||||
nak event --sec "$private_key" -c "$content" -t "expiration=$expiration_timestamp" --ts $(date +%s)
|
||||
}
|
||||
|
||||
# Helper function to send event and check response
|
||||
send_event_and_check() {
|
||||
local event_json="$1"
|
||||
local expected_result="$2" # "accept" or "reject"
|
||||
local description="$3"
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Create EVENT message
|
||||
local event_message="[\"EVENT\",$event_json]"
|
||||
|
||||
# Send to relay
|
||||
if command -v websocat &> /dev/null; then
|
||||
local response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
|
||||
|
||||
print_info "Relay response: $response"
|
||||
|
||||
if [[ "$response" == *"Connection failed"* ]]; then
|
||||
print_error "✗ Failed to connect to relay"
|
||||
return 1
|
||||
elif [[ "$expected_result" == "accept" && "$response" == *"true"* ]]; then
|
||||
print_success "✓ $description accepted as expected"
|
||||
return 0
|
||||
elif [[ "$expected_result" == "reject" && "$response" == *"false"* ]]; then
|
||||
print_success "✓ $description rejected as expected"
|
||||
return 0
|
||||
elif [[ "$expected_result" == "accept" && "$response" == *"false"* ]]; then
|
||||
print_error "✗ $description unexpectedly rejected: $response"
|
||||
return 1
|
||||
elif [[ "$expected_result" == "reject" && "$response" == *"true"* ]]; then
|
||||
print_error "✗ $description unexpectedly accepted: $response"
|
||||
return 1
|
||||
else
|
||||
print_warning "? Unclear response for $description: $response"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "websocat not found - required for testing"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event without expiration tag
|
||||
test_event_without_expiration() {
|
||||
print_test_header "Event Submission Without Expiration Tag"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak command not found - skipping expiration tests"
|
||||
record_test_result "Event Submission Without Expiration Tag" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event without expiration tag..."
|
||||
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
local event_json=$(nak event --sec "$private_key" -c "Test event without expiration" --ts $(date +%s))
|
||||
|
||||
print_info "Generated event:"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
if send_event_and_check "$event_json" "accept" "Event without expiration tag"; then
|
||||
record_test_result "Event Submission Without Expiration Tag" "PASS" "Non-expiring event accepted"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission Without Expiration Tag" "FAIL" "Non-expiring event handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with future expiration (should be accepted)
|
||||
test_event_with_future_expiration() {
|
||||
print_test_header "Event Submission With Future Expiration"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Event Submission With Future Expiration" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event with future expiration (1 hour from now)..."
|
||||
|
||||
local future_timestamp=$(($(date +%s) + 3600)) # 1 hour from now
|
||||
local event_json=$(create_event_with_expiration "Test event expiring in 1 hour" "$future_timestamp")
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
record_test_result "Event Submission With Future Expiration" "FAIL" "Failed to create event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated event (expires at $future_timestamp):"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
if send_event_and_check "$event_json" "accept" "Event with future expiration"; then
|
||||
record_test_result "Event Submission With Future Expiration" "PASS" "Future-expiring event accepted"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission With Future Expiration" "FAIL" "Future-expiring event rejected"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with past expiration (should be rejected in strict mode)
|
||||
test_event_with_past_expiration() {
|
||||
print_test_header "Event Submission With Past Expiration"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Event Submission With Past Expiration" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event with past expiration (1 hour ago)..."
|
||||
|
||||
local past_timestamp=$(($(date +%s) - 3600)) # 1 hour ago
|
||||
local event_json=$(create_event_with_expiration "Test event expired 1 hour ago" "$past_timestamp")
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
record_test_result "Event Submission With Past Expiration" "FAIL" "Failed to create event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated event (expired at $past_timestamp):"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
# In strict mode (default), this should be rejected
|
||||
if send_event_and_check "$event_json" "reject" "Event with past expiration"; then
|
||||
record_test_result "Event Submission With Past Expiration" "PASS" "Expired event correctly rejected in strict mode"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission With Past Expiration" "FAIL" "Expired event handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event with expiration within grace period
|
||||
test_event_within_grace_period() {
|
||||
print_test_header "Event Submission Within Grace Period"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Event Submission Within Grace Period" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Creating event with expiration within grace period (2 minutes ago, grace period is 5 minutes)..."
|
||||
|
||||
local grace_timestamp=$(($(date +%s) - 120)) # 2 minutes ago (within 5 minute grace period)
|
||||
local event_json=$(create_event_with_expiration "Test event within grace period" "$grace_timestamp")
|
||||
|
||||
if [ -z "$event_json" ]; then
|
||||
record_test_result "Event Submission Within Grace Period" "FAIL" "Failed to create event"
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_info "Generated event (expired at $grace_timestamp, within grace period):"
|
||||
echo "$event_json" | jq '.'
|
||||
echo ""
|
||||
|
||||
# Should be accepted due to grace period
|
||||
if send_event_and_check "$event_json" "accept" "Event within grace period"; then
|
||||
record_test_result "Event Submission Within Grace Period" "PASS" "Event within grace period accepted"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Event Submission Within Grace Period" "FAIL" "Grace period handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test event filtering in subscriptions
|
||||
test_expiration_filtering_in_subscriptions() {
|
||||
print_test_header "Expiration Filtering in Subscriptions"
|
||||
|
||||
if ! command -v nak &> /dev/null || ! command -v websocat &> /dev/null; then
|
||||
record_test_result "Expiration Filtering in Subscriptions" "SKIP" "Required tools not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Setting up test events for subscription filtering..."
|
||||
|
||||
# First, create a few events with different expiration times
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
|
||||
# Event 1: No expiration (should be returned)
|
||||
local event1=$(nak event --sec "$private_key" -c "Event without expiration for filtering test" --ts $(date +%s))
|
||||
|
||||
# Event 2: Future expiration (should be returned)
|
||||
local future_timestamp=$(($(date +%s) + 1800)) # 30 minutes from now
|
||||
local event2=$(create_event_with_expiration "Event with future expiration for filtering test" "$future_timestamp")
|
||||
|
||||
# Event 3: Past expiration (should NOT be returned if filtering is enabled)
|
||||
local past_timestamp=$(($(date +%s) - 3600)) # 1 hour ago
|
||||
local event3=$(create_event_with_expiration "Event with past expiration for filtering test" "$past_timestamp")
|
||||
|
||||
print_info "Publishing test events..."
|
||||
|
||||
# Note: We expect event3 to be rejected on submission in strict mode,
|
||||
# so we'll create it with a slightly more recent expiration that might get through
|
||||
local recent_past=$(($(date +%s) - 600)) # 10 minutes ago (outside grace period)
|
||||
local event3_recent=$(create_event_with_expiration "Recently expired event for filtering test" "$recent_past")
|
||||
|
||||
# Try to submit all events (some may be rejected)
|
||||
echo "[\"EVENT\",$event1]" | timeout 3s websocat "$RELAY_URL" >/dev/null 2>&1 || true
|
||||
echo "[\"EVENT\",$event2]" | timeout 3s websocat "$RELAY_URL" >/dev/null 2>&1 || true
|
||||
echo "[\"EVENT\",$event3_recent]" | timeout 3s websocat "$RELAY_URL" >/dev/null 2>&1 || true
|
||||
|
||||
sleep 2 # Let events settle
|
||||
|
||||
print_info "Testing subscription filtering..."
|
||||
|
||||
# Create subscription for recent events
|
||||
local req_message='["REQ","filter_test",{"kinds":[1],"limit":10}]'
|
||||
local response=$(echo -e "$req_message\n[\"CLOSE\",\"filter_test\"]" | timeout 5s websocat "$RELAY_URL" 2>/dev/null || echo "")
|
||||
|
||||
print_info "Subscription response:"
|
||||
echo "$response"
|
||||
echo ""
|
||||
|
||||
# Count events that contain our test content
|
||||
local no_exp_count=0
|
||||
local future_exp_count=0
|
||||
local past_exp_count=0
|
||||
|
||||
if echo "$response" | grep -q "Event without expiration for filtering test"; then
|
||||
no_exp_count=1
|
||||
print_success "✓ Event without expiration found in subscription results"
|
||||
fi
|
||||
|
||||
if echo "$response" | grep -q "Event with future expiration for filtering test"; then
|
||||
future_exp_count=1
|
||||
print_success "✓ Event with future expiration found in subscription results"
|
||||
fi
|
||||
|
||||
if echo "$response" | grep -q "Recently expired event for filtering test"; then
|
||||
past_exp_count=1
|
||||
print_warning "✗ Recently expired event found in subscription results (should be filtered)"
|
||||
else
|
||||
print_success "✓ Recently expired event properly filtered from subscription results"
|
||||
fi
|
||||
|
||||
# Evaluate results
|
||||
local expected_events=$((no_exp_count + future_exp_count))
|
||||
if [ $expected_events -ge 1 ] && [ $past_exp_count -eq 0 ]; then
|
||||
record_test_result "Expiration Filtering in Subscriptions" "PASS" "Expired events properly filtered from subscriptions"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Expiration Filtering in Subscriptions" "FAIL" "Expiration filtering not working properly in subscriptions"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test malformed expiration tags
|
||||
test_malformed_expiration_tags() {
|
||||
print_test_header "Handling of Malformed Expiration Tags"
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
record_test_result "Handling of Malformed Expiration Tags" "SKIP" "nak not available"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Testing events with malformed expiration tags..."
|
||||
|
||||
local private_key="91ba716fa9e7ea2fcbad360cf4f8e0d312f73984da63d90f524ad61a6a1e7dbe"
|
||||
|
||||
# Test 1: Non-numeric expiration value
|
||||
local event1=$(nak event --sec "$private_key" -c "Event with non-numeric expiration" -t "expiration=not_a_number" --ts $(date +%s))
|
||||
|
||||
# Test 2: Empty expiration value
|
||||
local event2=$(nak event --sec "$private_key" -c "Event with empty expiration" -t "expiration=" --ts $(date +%s))
|
||||
|
||||
print_info "Testing non-numeric expiration value..."
|
||||
if send_event_and_check "$event1" "accept" "Event with non-numeric expiration (should be treated as no expiration)"; then
|
||||
print_success "✓ Non-numeric expiration handled gracefully"
|
||||
malformed_test1=true
|
||||
else
|
||||
malformed_test1=false
|
||||
fi
|
||||
|
||||
print_info "Testing empty expiration value..."
|
||||
if send_event_and_check "$event2" "accept" "Event with empty expiration (should be treated as no expiration)"; then
|
||||
print_success "✓ Empty expiration handled gracefully"
|
||||
malformed_test2=true
|
||||
else
|
||||
malformed_test2=false
|
||||
fi
|
||||
|
||||
if [ "$malformed_test1" = true ] && [ "$malformed_test2" = true ]; then
|
||||
record_test_result "Handling of Malformed Expiration Tags" "PASS" "Malformed expiration tags handled gracefully"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Handling of Malformed Expiration Tags" "FAIL" "Malformed expiration tag handling failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test configuration via environment variables
|
||||
test_expiration_configuration() {
|
||||
print_test_header "Expiration Configuration Via Environment Variables"
|
||||
|
||||
print_info "Testing expiration configuration from relay logs..."
|
||||
|
||||
if [ -f "relay.log" ]; then
|
||||
print_info "Current configuration from logs:"
|
||||
grep "Expiration Configuration:" relay.log | tail -1 || print_warning "No expiration configuration found in logs"
|
||||
else
|
||||
print_warning "No relay.log found"
|
||||
fi
|
||||
|
||||
# The relay should be running with default configuration
|
||||
print_info "Default configuration should be:"
|
||||
print_info " enabled=true"
|
||||
print_info " strict_mode=true (rejects expired events on submission)"
|
||||
print_info " filter_responses=true (filters expired events from responses)"
|
||||
print_info " grace_period=300 seconds (5 minutes)"
|
||||
|
||||
# Test current behavior matches expected default configuration
|
||||
print_info "Configuration test based on observed behavior:"
|
||||
|
||||
# Check if NIP-40 is advertised (indicates enabled=true)
|
||||
if curl -s -H "Accept: application/nostr+json" "$HTTP_URL/" | jq -e '.supported_nips | index(40)' >/dev/null 2>&1; then
|
||||
print_success "✓ NIP-40 support advertised (enabled=true)"
|
||||
config_test=true
|
||||
else
|
||||
print_error "✗ NIP-40 not advertised (may be disabled)"
|
||||
config_test=false
|
||||
fi
|
||||
|
||||
if [ "$config_test" = true ]; then
|
||||
record_test_result "Expiration Configuration Via Environment Variables" "PASS" "Expiration configuration is accessible and working"
|
||||
return 0
|
||||
else
|
||||
record_test_result "Expiration Configuration Via Environment Variables" "FAIL" "Expiration configuration issues detected"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Print test summary
|
||||
print_test_summary() {
|
||||
echo ""
|
||||
echo -e "${BOLD}=== TEST SUMMARY ===${RESET}"
|
||||
echo "Total tests run: $TEST_COUNT"
|
||||
echo -e "${GREEN}Passed: $PASSED_COUNT${RESET}"
|
||||
echo -e "${RED}Failed: $FAILED_COUNT${RESET}"
|
||||
|
||||
if [ $FAILED_COUNT -gt 0 ]; then
|
||||
echo ""
|
||||
echo -e "${RED}${BOLD}Failed tests:${RESET}"
|
||||
for result in "${TEST_RESULTS[@]}"; do
|
||||
IFS='|' read -r name status details <<< "$result"
|
||||
if [ "$status" = "FAIL" ]; then
|
||||
echo -e " ${RED}✗ $name${RESET}"
|
||||
if [ -n "$details" ]; then
|
||||
echo " $details"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
echo ""
|
||||
if [ $FAILED_COUNT -eq 0 ]; then
|
||||
echo -e "${GREEN}${BOLD}🎉 ALL TESTS PASSED!${RESET}"
|
||||
echo -e "${GREEN}✅ NIP-40 Expiration Timestamp support is working correctly in the relay${RESET}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}${BOLD}❌ SOME TESTS FAILED${RESET}"
|
||||
echo "Please review the output above and check relay logs for more details."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main test execution
|
||||
main() {
|
||||
echo -e "${BOLD}=== NIP-40 Expiration Timestamp Relay Test Suite ===${RESET}"
|
||||
echo "Testing NIP-40 Expiration Timestamp support in the C Nostr Relay"
|
||||
echo "Relay URL: $RELAY_URL"
|
||||
echo ""
|
||||
|
||||
# Check prerequisites
|
||||
if ! command -v curl &> /dev/null; then
|
||||
print_error "curl is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v jq &> /dev/null; then
|
||||
print_error "jq is required but not installed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v websocat &> /dev/null; then
|
||||
print_warning "websocat not found - WebSocket tests will be skipped"
|
||||
fi
|
||||
|
||||
if ! command -v nak &> /dev/null; then
|
||||
print_warning "nak not found - Event generation tests will be skipped"
|
||||
print_info "Install with: go install github.com/fiatjaf/nak@latest"
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
check_relay_running
|
||||
test_nip11_expiration_support
|
||||
test_event_without_expiration
|
||||
test_event_with_future_expiration
|
||||
test_event_with_past_expiration
|
||||
test_event_within_grace_period
|
||||
test_expiration_filtering_in_subscriptions
|
||||
test_malformed_expiration_tags
|
||||
test_expiration_configuration
|
||||
|
||||
# Print summary
|
||||
print_test_summary
|
||||
exit $?
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user