Compare commits

...

45 Commits

Author SHA1 Message Date
Your Name
f272264960 v1.0.8 - Remove limit from subscription count 2025-12-07 07:40:17 -04:00
Your Name
cb3171b390 v1.0.7 - Added a clean startup to erase subscriptions. 2025-12-06 18:26:22 -04:00
Your Name
03f036d60d v1.0.6 - Working on cleaning up subscriptions which were piling up. Set a startup cleanup, and a connection age limit. 2025-12-05 07:37:57 -04:00
Your Name
9b35f463ae . 2025-11-21 11:52:56 -04:00
Your Name
7b74486519 v1.0.5 - Just catching up 2025-11-11 17:01:39 -04:00
Your Name
b276b44ded v1.0.4 - Fixed web socket limitation with the number of npubs in a subscription 2025-11-07 19:59:34 -05:00
laan tungir
3792649ed9 v1.0.3 - From remote 2025-11-07 14:07:46 -05:00
laan tungir
5f08956605 removed some files 2025-11-03 07:33:12 -05:00
Your Name
643d89ed7b v1.0.0 - First major release 2025-11-01 07:04:56 -04:00
Your Name
8ca459593c Reset version back to v0.8.6 2025-11-01 07:03:39 -04:00
Your Name
ee4208cc19 v7.0.0 - Version 1.0.0 2025-11-01 06:56:02 -04:00
Your Name
f6330e4bb8 v7.0.0 - Version 1.0.0 2025-10-31 11:17:34 -04:00
Your Name
4f3cf10a5c v6.0.0 - Test fixed output redirection 2025-10-31 11:17:19 -04:00
Your Name
aa1954e81e v6.0.0 - Version 1.0.0 2025-10-31 11:15:32 -04:00
Your Name
482597bd0e v5.0.0 - Test fixed release ID extraction 2025-10-31 11:15:16 -04:00
Your Name
d4b90e681c v5.0.0 - Version 1.0.0 2025-10-31 11:13:41 -04:00
Your Name
fcf9e43c4c v4.0.0 - Debug upload with existing binary 2025-10-31 11:13:21 -04:00
Your Name
b8d8cd19d3 v4.0.0 - Debug upload issue 2025-10-31 11:12:58 -04:00
Your Name
536c2d966c v4.0.0 - Version 1.0.0 2025-10-31 11:11:31 -04:00
Your Name
f49cb0a5ac v3.0.0 - Test release upload fix 2025-10-31 11:10:53 -04:00
Your Name
cef6bb2340 v3.0.0 - Version 1.0.0 2025-10-31 11:08:18 -04:00
Your Name
4c03253b30 v2.0.0 - Version 1.0.0 2025-10-31 11:06:27 -04:00
Your Name
ed09bb7370 v1.0.0 - Version 1.0.0) 2025-10-31 10:39:06 -04:00
Your Name
5c46a25173 v0.8.5 - cleanup 2025-10-30 07:06:48 -04:00
Your Name
d1538f00df v0.8.4 - Updated increment_and_push.sh 2025-10-30 07:03:22 -04:00
Your Name
afa4acbbfb v0.8.3 - --dry-run 2025-10-30 06:52:05 -04:00
Your Name
d9a530485f v0.8.2 - markdown intro 2025-10-29 07:53:56 -04:00
Your Name
b2ad70b028 v0.8.1 - added screenshots 2025-10-29 07:39:08 -04:00
Your Name
f49aae8ab0 v0.7.44 - Release v0.8.0 with NIP-59 timestamp randomization and status command fixes 2025-10-27 13:21:47 -04:00
Your Name
f6debcf799 v0.7.44 - Release v0.8.0 with NIP-59 timestamp randomization and status command fixes 2025-10-27 13:19:58 -04:00
Your Name
edbc4f1359 v0.7.43 - Add plain text 'status' command handler for NIP-17 DMs 2025-10-27 13:19:10 -04:00
Your Name
5242f066e7 Update nostr_core_lib with timestamp randomization feature 2025-10-27 12:59:19 -04:00
Your Name
af186800fa v0.7.42 - Fix ephemeral event storage and document monitoring system 2025-10-26 15:02:00 -04:00
Your Name
2bff4a5f44 v0.7.41 - Fix SQL query routing in admin API - add missing sql_query case to handle_kind_23456_unified 2025-10-26 13:34:16 -04:00
Your Name
edb73d50cf v0.7.40 - Removed event_broadcasts table and related code to fix FOREIGN KEY constraint failures preventing event insertion 2025-10-25 15:26:31 -04:00
Your Name
3dc09d55fd v0.7.39 - Set dm's back 2 days to adjust for timestamp ramdomization of giftwraps. 2025-10-23 18:43:45 -03:00
Your Name
079fb1b0f5 v0.7.38 - Fixed error upon startup with existing db 2025-10-23 11:17:16 -04:00
Your Name
17b2aa8111 v0.7.37 - Enhanced admin interface with sliding sidebar navigation, moved dark mode and logout to sidebar footer, improved button styling consistency 2025-10-22 12:43:09 -04:00
Your Name
78d484cfe0 v0.7.36 - Implement sliding side navigation menu with page switching for admin sections 2025-10-22 11:01:30 -04:00
Your Name
182e12817d v0.7.35 - Implement event-driven monitoring system with dual triggers for events and subscriptions 2025-10-22 10:48:57 -04:00
Your Name
9179d57cc9 v0.7.34 - We seemed to maybe finally fixed the monitoring error? 2025-10-22 10:19:43 -04:00
Your Name
9cb9b746d8 v0.7.33 - Refactor monitoring system to use subscription-based activation with ephemeral events - fixes recursive crash bug 2025-10-19 10:26:09 -04:00
Your Name
57a0089664 v0.7.32 - Implement ephemeral event bypass (NIP-01) - events with kinds 20000-29999 are now broadcast to subscriptions but never stored in database, preventing recursive monitoring event loops 2025-10-19 09:38:02 -04:00
Your Name
53f7608872 v0.7.31 - Fixed production crash by replacing in-memory subscription iteration with database queries in monitoring system 2025-10-18 18:09:13 -04:00
Your Name
838ce5b45a v0.7.30 - Update increment and push script 2025-10-18 15:04:45 -04:00
70 changed files with 8893 additions and 1703 deletions

1
.gitignore vendored
View File

@@ -11,4 +11,3 @@ copy_executable_local.sh
nostr_login_lite/
style_guide/
nostr-tools

View File

@@ -121,8 +121,8 @@ fuser -k 8888/tcp
- Event filtering done at C level, not SQL level for NIP-40 expiration
### Configuration Override Behavior
- CLI port override only affects first-time startup
- After database creation, all config comes from events
- CLI port override applies during first-time startup and existing relay restarts
- After database creation, all config comes from events (but CLI overrides can still be applied)
- Database path cannot be changed after initialization
## Non-Obvious Pitfalls

View File

@@ -5,6 +5,9 @@ ARG DEBUG_BUILD=false
FROM alpine:3.19 AS builder
# Re-declare build argument in this stage
ARG DEBUG_BUILD=false
# Install build dependencies
RUN apk add --no-cache \
build-base \

65
NOSTR_RELEASE.md Normal file
View File

@@ -0,0 +1,65 @@
# Relay
I am releasing the code for the nostr relay that I wrote use myself. The code is free for anyone to use in any way that they wish.
Some of the features of this relay are conventional, and some are unconventional.
## The conventional
This relay is written in C99 with a sqlite database.
It implements the following NIPs.
- [x] NIP-01: Basic protocol flow implementation
- [x] NIP-09: Event deletion
- [x] NIP-11: Relay information document
- [x] NIP-13: Proof of Work
- [x] NIP-15: End of Stored Events Notice
- [x] NIP-20: Command Results
- [x] NIP-33: Parameterized Replaceable Events
- [x] NIP-40: Expiration Timestamp
- [x] NIP-42: Authentication of clients to relays
- [x] NIP-45: Counting results
- [x] NIP-50: Keywords filter
- [x] NIP-70: Protected Events
## The unconventional
### The binaries are fully self contained.
It should just run in linux without having to worry about what you have on your system. I want to download and run. No docker. No dependency hell.
I'm not bothering with other operating systems.
### The relay is a full nostr citizen with it's own public and private keys.
For example, you can see my relay's profile (wss://relay.laantungir.net) running here:
[Primal link](https://primal.net/p/nprofile1qqswn2jsmm8lq8evas0v9vhqkdpn9nuujt90mtz60nqgsxndy66es4qjjnhr7)
What this means in practice is that when you start the relay, it generates keys for itself, and for it's administrator (You can specify these if you wish)
Now the program and the administrator can have verifed communication between the two, simply by using nostr events. For example, the administrator can send DMs to the relay, asking it's status, and changing it's configuration through any client that can handle nip17 DMs. The relay can also send notifications to the administrator about it's current status, or it can publish it's status on a regular schedule directly to NOSTR as kind-1 notes.
Also included is a more standard administrative web front end. This front end communicates to the relay using an extensive api, which again is nostr events signed by the administrator. You can completely control the relay through signed nostr events.
## Screenshots
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/main.png)
Main page with real time updates.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/config.png)
Set your configuration preferences.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/subscriptions.png)
View current subscriptions
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/white-blacklists.png)
Add npubs to white or black lists.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/sqlQuery.png)
Run sql queries on the database.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/main-light.png)
Light mode.

View File

@@ -195,6 +195,9 @@ All commands are sent as NIP-44 encrypted JSON arrays in the event content. The
- `pow_min_difficulty`: Minimum proof-of-work difficulty
- `nip40_expiration_enabled`: Enable event expiration (`true`/`false`)
**Monitoring Settings:**
- `kind_24567_reporting_throttle_sec`: Minimum seconds between monitoring events (default: 5)
### Dynamic Configuration Updates
C-Relay supports **dynamic configuration updates** without requiring a restart for most settings. Configuration parameters are categorized as either **dynamic** (can be updated immediately) or **restart-required** (require relay restart to take effect).
@@ -391,6 +394,68 @@ SELECT
## Real-time Monitoring System
C-Relay includes a subscription-based monitoring system that broadcasts real-time relay statistics using ephemeral events (kind 24567).
### Activation
The monitoring system activates automatically when clients subscribe to kind 24567 events:
```json
["REQ", "monitoring-sub", {"kinds": [24567]}]
```
For specific monitoring types, use d-tag filters:
```json
["REQ", "event-kinds-sub", {"kinds": [24567], "#d": ["event_kinds"]}]
["REQ", "time-stats-sub", {"kinds": [24567], "#d": ["time_stats"]}]
["REQ", "top-pubkeys-sub", {"kinds": [24567], "#d": ["top_pubkeys"]}]
```
When no subscriptions exist, monitoring is dormant to conserve resources.
### Monitoring Event Types
| Type | d Tag | Description |
|------|-------|-------------|
| Event Distribution | `event_kinds` | Event count by kind with percentages |
| Time Statistics | `time_stats` | Events in last 24h, 7d, 30d |
| Top Publishers | `top_pubkeys` | Top 10 pubkeys by event count |
| Active Subscriptions | `active_subscriptions` | Current subscription details (admin only) |
| Subscription Details | `subscription_details` | Detailed subscription info (admin only) |
| CPU Metrics | `cpu_metrics` | Process CPU and memory usage |
### Event Structure
```json
{
"kind": 24567,
"pubkey": "<relay_pubkey>",
"created_at": <timestamp>,
"content": "{\"data_type\":\"event_kinds\",\"timestamp\":1234567890,...}",
"tags": [
["d", "event_kinds"]
]
}
```
### Configuration
- `kind_24567_reporting_throttle_sec`: Minimum seconds between monitoring events (default: 5)
### Web Dashboard Integration
The built-in web dashboard (`/api/`) automatically subscribes to monitoring events and displays real-time statistics.
### Performance Considerations
- Monitoring events are ephemeral (not stored in database)
- Throttling prevents excessive event generation
- Automatic activation/deactivation based on subscriptions
- Minimal overhead when no clients are monitoring
## Direct Messaging Admin System
In addition to the above admin API, c-relay allows the administrator to direct message the relay to get information or control some settings. As long as the administrator is signed in with any nostr client that allows sending nip-17 direct messages (DMs), they can control the relay.

612
STATIC_MUSL_GUIDE.md Normal file
View File

@@ -0,0 +1,612 @@
# Static MUSL Build Guide for C Programs
## Overview
This guide explains how to build truly portable static binaries using Alpine Linux and MUSL libc. These binaries have **zero runtime dependencies** and work on any Linux distribution without modification.
This guide is specifically tailored for C programs that use:
- **nostr_core_lib** - Nostr protocol implementation
- **nostr_login_lite** - Nostr authentication library
- Common dependencies: libwebsockets, OpenSSL, SQLite, curl, secp256k1
## Why MUSL Static Binaries?
### Advantages Over glibc
| Feature | MUSL Static | glibc Static | glibc Dynamic |
|---------|-------------|--------------|---------------|
| **Portability** | ✓ Any Linux | ⚠ glibc only | ✗ Requires matching libs |
| **Binary Size** | ~7-10 MB | ~12-15 MB | ~2-3 MB |
| **Dependencies** | None | NSS libs | Many system libs |
| **Deployment** | Single file | Single file + NSS | Binary + libraries |
| **Compatibility** | Universal | glibc version issues | Library version hell |
### Key Benefits
1. **True Portability**: Works on Alpine, Ubuntu, Debian, CentOS, Arch, etc.
2. **No Library Hell**: No `GLIBC_2.XX not found` errors
3. **Simple Deployment**: Just copy one file
4. **Reproducible Builds**: Same Docker image = same binary
5. **Security**: No dependency on system libraries with vulnerabilities
## Quick Start
### Prerequisites
- Docker installed and running
- Your C project with source code
- Internet connection for downloading dependencies
### Basic Build Process
```bash
# 1. Copy the Dockerfile template (see below)
cp /path/to/c-relay/Dockerfile.alpine-musl ./Dockerfile.static
# 2. Customize for your project (see Customization section)
vim Dockerfile.static
# 3. Build the static binary
docker build --platform linux/amd64 -f Dockerfile.static -t my-app-builder .
# 4. Extract the binary
docker create --name temp-container my-app-builder
docker cp temp-container:/build/my_app_static ./my_app_static
docker rm temp-container
# 5. Verify it's static
ldd ./my_app_static # Should show "not a dynamic executable"
```
## Dockerfile Template
Here's a complete Dockerfile template you can customize for your project:
```dockerfile
# Alpine-based MUSL static binary builder
# Produces truly portable binaries with zero runtime dependencies
FROM alpine:3.19 AS builder
# Install build dependencies
RUN apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers \
wget \
bash
WORKDIR /build
# Build libsecp256k1 static (required for Nostr)
RUN cd /tmp && \
git clone https://github.com/bitcoin-core/secp256k1.git && \
cd secp256k1 && \
./autogen.sh && \
./configure --enable-static --disable-shared --prefix=/usr \
CFLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/secp256k1
# Build libwebsockets static (if needed for WebSocket support)
RUN cd /tmp && \
git clone --depth 1 --branch v4.3.3 https://github.com/warmcat/libwebsockets.git && \
cd libwebsockets && \
mkdir build && cd build && \
cmake .. \
-DLWS_WITH_STATIC=ON \
-DLWS_WITH_SHARED=OFF \
-DLWS_WITH_SSL=ON \
-DLWS_WITHOUT_TESTAPPS=ON \
-DLWS_WITHOUT_TEST_SERVER=ON \
-DLWS_WITHOUT_TEST_CLIENT=ON \
-DLWS_WITHOUT_TEST_PING=ON \
-DLWS_WITH_HTTP2=OFF \
-DLWS_WITH_LIBUV=OFF \
-DLWS_WITH_LIBEVENT=OFF \
-DLWS_IPV6=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_C_FLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/libwebsockets
# Copy git configuration for submodules
COPY .gitmodules /build/.gitmodules
COPY .git /build/.git
# Initialize submodules
RUN git submodule update --init --recursive
# Copy and build nostr_core_lib
COPY nostr_core_lib /build/nostr_core_lib/
RUN cd nostr_core_lib && \
chmod +x build.sh && \
sed -i 's/CFLAGS="-Wall -Wextra -std=c99 -fPIC -O2"/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"/' build.sh && \
rm -f *.o *.a 2>/dev/null || true && \
./build.sh --nips=1,6,13,17,19,44,59
# Copy and build nostr_login_lite (if used)
# COPY nostr_login_lite /build/nostr_login_lite/
# RUN cd nostr_login_lite && make static
# Copy your application source
COPY src/ /build/src/
COPY Makefile /build/Makefile
# Build your application with full static linking
RUN gcc -static -O2 -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
-I. -Inostr_core_lib -Inostr_core_lib/nostr_core \
-Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
src/*.c \
-o /build/my_app_static \
nostr_core_lib/libnostr_core_x64.a \
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
-lcurl -lz -lpthread -lm -ldl && \
strip /build/my_app_static
# Verify it's truly static
RUN echo "=== Binary Information ===" && \
file /build/my_app_static && \
ls -lh /build/my_app_static && \
echo "=== Checking for dynamic dependencies ===" && \
(ldd /build/my_app_static 2>&1 || echo "Binary is static")
# Output stage - just the binary
FROM scratch AS output
COPY --from=builder /build/my_app_static /my_app_static
```
## Customization Guide
### 1. Adjust Dependencies
**Add dependencies** by modifying the `apk add` section:
```dockerfile
RUN apk add --no-cache \
build-base \
musl-dev \
# Add your dependencies here:
libpng-dev \
libpng-static \
libjpeg-turbo-dev \
libjpeg-turbo-static
```
**Remove unused dependencies** to speed up builds:
- Remove `libwebsockets` section if you don't need WebSocket support
- Remove `sqlite` if you don't use databases
- Remove `curl` if you don't make HTTP requests
### 2. Configure nostr_core_lib NIPs
Specify which NIPs your application needs:
```bash
./build.sh --nips=1,6,19 # Minimal: Basic protocol, keys, bech32
./build.sh --nips=1,6,13,17,19,44,59 # Full: All common NIPs
./build.sh --nips=all # Everything available
```
**Common NIP combinations:**
- **Basic client**: `1,6,19` (events, keys, bech32)
- **With encryption**: `1,6,19,44` (add modern encryption)
- **With DMs**: `1,6,17,19,44,59` (add private messages)
- **Relay/server**: `1,6,13,17,19,42,44,59` (add PoW, auth)
### 3. Modify Compilation Flags
**For your application:**
```dockerfile
RUN gcc -static -O2 -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \ # REQUIRED for MUSL
-I. -Inostr_core_lib \ # Include paths
src/*.c \ # Your source files
-o /build/my_app_static \ # Output binary
nostr_core_lib/libnostr_core_x64.a \ # Nostr library
-lwebsockets -lssl -lcrypto \ # Link libraries
-lsqlite3 -lsecp256k1 -lcurl \
-lz -lpthread -lm -ldl
```
**Debug build** (with symbols, no optimization):
```dockerfile
RUN gcc -static -g -O0 -DDEBUG \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
# ... rest of flags
```
### 4. Multi-Architecture Support
Build for different architectures:
```bash
# x86_64 (Intel/AMD)
docker build --platform linux/amd64 -f Dockerfile.static -t my-app-x86 .
# ARM64 (Apple Silicon, Raspberry Pi 4+)
docker build --platform linux/arm64 -f Dockerfile.static -t my-app-arm64 .
```
## Build Script Template
Create a `build_static.sh` script for convenience:
```bash
#!/bin/bash
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BUILD_DIR="$SCRIPT_DIR/build"
DOCKERFILE="$SCRIPT_DIR/Dockerfile.static"
# Detect architecture
ARCH=$(uname -m)
case "$ARCH" in
x86_64)
PLATFORM="linux/amd64"
OUTPUT_NAME="my_app_static_x86_64"
;;
aarch64|arm64)
PLATFORM="linux/arm64"
OUTPUT_NAME="my_app_static_arm64"
;;
*)
echo "Unknown architecture: $ARCH"
exit 1
;;
esac
echo "Building for platform: $PLATFORM"
mkdir -p "$BUILD_DIR"
# Build Docker image
docker build \
--platform "$PLATFORM" \
-f "$DOCKERFILE" \
-t my-app-builder:latest \
--progress=plain \
.
# Extract binary
CONTAINER_ID=$(docker create my-app-builder:latest)
docker cp "$CONTAINER_ID:/build/my_app_static" "$BUILD_DIR/$OUTPUT_NAME"
docker rm "$CONTAINER_ID"
chmod +x "$BUILD_DIR/$OUTPUT_NAME"
echo "✓ Build complete: $BUILD_DIR/$OUTPUT_NAME"
echo "✓ Size: $(du -h "$BUILD_DIR/$OUTPUT_NAME" | cut -f1)"
# Verify
if ldd "$BUILD_DIR/$OUTPUT_NAME" 2>&1 | grep -q "not a dynamic executable"; then
echo "✓ Binary is fully static"
else
echo "⚠ Warning: Binary may have dynamic dependencies"
fi
```
Make it executable:
```bash
chmod +x build_static.sh
./build_static.sh
```
## Common Issues and Solutions
### Issue 1: Fortification Errors
**Error:**
```
undefined reference to '__snprintf_chk'
undefined reference to '__fprintf_chk'
```
**Cause**: GCC's `-O2` enables fortification by default, which uses glibc-specific functions.
**Solution**: Add these flags to **all** compilation commands:
```bash
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0
```
This must be applied to:
1. nostr_core_lib build.sh
2. Your application compilation
3. Any other libraries you build
### Issue 2: Missing Symbols from nostr_core_lib
**Error:**
```
undefined reference to 'nostr_create_event'
undefined reference to 'nostr_sign_event'
```
**Cause**: Required NIPs not included in nostr_core_lib build.
**Solution**: Add missing NIPs:
```bash
./build.sh --nips=1,6,19 # Add the NIPs you need
```
### Issue 3: Docker Permission Denied
**Error:**
```
permission denied while trying to connect to the Docker daemon socket
```
**Solution**:
```bash
sudo usermod -aG docker $USER
newgrp docker # Or logout and login
```
### Issue 4: Binary Won't Run on Target System
**Checks**:
```bash
# 1. Verify it's static
ldd my_app_static # Should show "not a dynamic executable"
# 2. Check architecture
file my_app_static # Should match target system
# 3. Test on different distributions
docker run --rm -v $(pwd):/app alpine:latest /app/my_app_static --version
docker run --rm -v $(pwd):/app ubuntu:latest /app/my_app_static --version
```
## Project Structure Example
Organize your project for easy static builds:
```
my-nostr-app/
├── src/
│ ├── main.c
│ ├── handlers.c
│ └── utils.c
├── nostr_core_lib/ # Git submodule
├── nostr_login_lite/ # Git submodule (if used)
├── Dockerfile.static # Static build Dockerfile
├── build_static.sh # Build script
├── Makefile # Regular build
└── README.md
```
### Makefile Integration
Add static build targets to your Makefile:
```makefile
# Regular dynamic build
all: my_app
my_app: src/*.c
gcc -O2 src/*.c -o my_app \
nostr_core_lib/libnostr_core_x64.a \
-lssl -lcrypto -lsecp256k1 -lz -lpthread -lm
# Static MUSL build via Docker
static:
./build_static.sh
# Clean
clean:
rm -f my_app build/my_app_static_*
.PHONY: all static clean
```
## Deployment
### Single Binary Deployment
```bash
# Copy to server
scp build/my_app_static_x86_64 user@server:/opt/my-app/
# Run (no dependencies needed!)
ssh user@server
/opt/my-app/my_app_static_x86_64
```
### SystemD Service
```ini
[Unit]
Description=My Nostr Application
After=network.target
[Service]
Type=simple
User=myapp
WorkingDirectory=/opt/my-app
ExecStart=/opt/my-app/my_app_static_x86_64
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
```
### Docker Container (Minimal)
```dockerfile
FROM scratch
COPY my_app_static_x86_64 /app
ENTRYPOINT ["/app"]
```
Build and run:
```bash
docker build -t my-app:latest .
docker run --rm my-app:latest --help
```
## Reusing c-relay Files
You can directly copy these files from c-relay:
### 1. Dockerfile.alpine-musl
```bash
cp /path/to/c-relay/Dockerfile.alpine-musl ./Dockerfile.static
```
Then customize:
- Change binary name (line 125)
- Adjust source files (line 122-124)
- Modify include paths (line 120-121)
### 2. build_static.sh
```bash
cp /path/to/c-relay/build_static.sh ./
```
Then customize:
- Change `OUTPUT_NAME` variable (lines 66, 70)
- Update Docker image name (line 98)
- Modify verification commands (lines 180-184)
### 3. .dockerignore (Optional)
```bash
cp /path/to/c-relay/.dockerignore ./
```
Helps speed up Docker builds by excluding unnecessary files.
## Best Practices
1. **Version Control**: Commit your Dockerfile and build script
2. **Tag Builds**: Include git commit hash in binary version
3. **Test Thoroughly**: Verify on multiple distributions
4. **Document Dependencies**: List required NIPs and libraries
5. **Automate**: Use CI/CD to build on every commit
6. **Archive Binaries**: Keep old versions for rollback
## Performance Comparison
| Metric | MUSL Static | glibc Dynamic |
|--------|-------------|---------------|
| Binary Size | 7-10 MB | 2-3 MB + libs |
| Startup Time | ~50ms | ~40ms |
| Memory Usage | Similar | Similar |
| Portability | ✓ Universal | ✗ System-dependent |
| Deployment | Single file | Binary + libraries |
## References
- [MUSL libc](https://musl.libc.org/)
- [Alpine Linux](https://alpinelinux.org/)
- [nostr_core_lib](https://github.com/chebizarro/nostr_core_lib)
- [Static Linking Best Practices](https://www.musl-libc.org/faq.html)
- [c-relay Implementation](./docs/musl_static_build.md)
## Example: Minimal Nostr Client
Here's a complete example of building a minimal Nostr client:
```c
// minimal_client.c
#include "nostr_core/nostr_core.h"
#include <stdio.h>
int main() {
// Generate keypair
char nsec[64], npub[64];
nostr_generate_keypair(nsec, npub);
printf("Generated keypair:\n");
printf("Private key (nsec): %s\n", nsec);
printf("Public key (npub): %s\n", npub);
// Create event
cJSON *event = nostr_create_event(1, "Hello, Nostr!", NULL);
nostr_sign_event(event, nsec);
char *json = cJSON_Print(event);
printf("\nSigned event:\n%s\n", json);
free(json);
cJSON_Delete(event);
return 0;
}
```
**Dockerfile.static:**
```dockerfile
FROM alpine:3.19 AS builder
RUN apk add --no-cache build-base musl-dev git autoconf automake libtool \
openssl-dev openssl-libs-static zlib-dev zlib-static
WORKDIR /build
# Build secp256k1
RUN cd /tmp && git clone https://github.com/bitcoin-core/secp256k1.git && \
cd secp256k1 && ./autogen.sh && \
./configure --enable-static --disable-shared --prefix=/usr CFLAGS="-fPIC" && \
make -j$(nproc) && make install
# Copy and build nostr_core_lib
COPY nostr_core_lib /build/nostr_core_lib/
RUN cd nostr_core_lib && \
sed -i 's/CFLAGS="-Wall/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall/' build.sh && \
./build.sh --nips=1,6,19
# Build application
COPY minimal_client.c /build/
RUN gcc -static -O2 -Wall -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
-Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson \
minimal_client.c -o /build/minimal_client_static \
nostr_core_lib/libnostr_core_x64.a \
-lssl -lcrypto -lsecp256k1 -lz -lpthread -lm -ldl && \
strip /build/minimal_client_static
FROM scratch
COPY --from=builder /build/minimal_client_static /minimal_client_static
```
**Build and run:**
```bash
docker build -f Dockerfile.static -t minimal-client .
docker create --name temp minimal-client
docker cp temp:/minimal_client_static ./
docker rm temp
./minimal_client_static
```
## Conclusion
Static MUSL binaries provide the best portability for C applications. While they're slightly larger than dynamic binaries, the benefits of zero dependencies and universal compatibility make them ideal for:
- Server deployments across different Linux distributions
- Embedded systems and IoT devices
- Docker containers (FROM scratch)
- Distribution to users without dependency management
- Long-term archival and reproducibility
Follow this guide to create portable, self-contained binaries for your Nostr applications!

View File

@@ -285,7 +285,7 @@ h1 {
border-bottom: var(--border-width) solid var(--border-color);
padding-bottom: 10px;
margin-bottom: 30px;
font-weight: normal;
font-weight: bold;
font-size: 24px;
font-family: var(--font-family);
color: var(--primary-color);
@@ -293,32 +293,57 @@ h1 {
h2 {
font-weight: normal;
padding-left: 10px;
text-align: center;
font-size: 16px;
font-family: var(--font-family);
color: var(--primary-color);
}
h3 {
font-weight: normal;
font-size: 12px;
font-family: var(--font-family);
color: var(--primary-color);
padding-bottom: 10px;
}
label {
display: block;
margin-bottom: 5px;
font-weight: lighter;
font-size: 10px;
font-family: var(--font-family);
color: var(--primary-color);
}
.section {
background: var(--secondary-color);
border: var(--border-width) solid var(--border-color);
border-radius: var(--border-radius);
padding: 20px;
margin-bottom: 20px;
margin-left: 5px;
margin-right:5px;
}
.section-header {
display: flex;
justify-content: center;
align-items: center;
padding-bottom: 15px;
}
.input-group {
margin-bottom: 15px;
}
label {
display: block;
margin-bottom: 5px;
font-weight: bold;
font-size: 14px;
font-family: var(--font-family);
color: var(--primary-color);
}
input,
textarea,
@@ -491,6 +516,24 @@ button:disabled {
border-radius: 0;
}
/* Relay Events Styles */
.status-message {
margin-top: 10px;
padding: 8px;
border-radius: var(--border-radius);
font-size: 14px;
font-family: var(--font-family);
text-align: center;
}
.relay-entry {
border: var(--border-width) solid var(--border-color);
border-radius: var(--border-radius);
padding: 10px;
margin-bottom: 10px;
background: var(--secondary-color);
}
.config-value-input:focus {
border: 1px solid var(--accent-color);
background: var(--secondary-color);
@@ -660,14 +703,7 @@ button:disabled {
display: none;
}
.section-header {
display: flex;
justify-content: space-between;
align-items: center;
/* margin-bottom: 15px; */
/* border-bottom: var(--border-width) solid var(--border-color); */
/* padding-bottom: 10px; */
}
.countdown-btn {
width: auto;
@@ -948,10 +984,8 @@ button:disabled {
padding: 6px 8px;
text-align: left;
font-family: var(--font-family);
max-width: 200px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
min-width: 100px;
}
.sql-results-table th {
@@ -1107,3 +1141,170 @@ body.dark-mode .sql-results-table tbody tr:nth-child(even) {
border-radius: var(--border-radius);
box-sizing: border-box;
}
/* ================================
SIDE NAVIGATION MENU
================================ */
.side-nav {
position: fixed;
top: 0;
left: -300px;
width: 280px;
height: 100vh;
background: var(--secondary-color);
border-right: var(--border-width) solid var(--border-color);
z-index: 1000;
transition: left 0.3s ease;
overflow-y: auto;
padding-top: 80px;
}
.side-nav.open {
left: 0;
}
.side-nav-overlay {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.5);
z-index: 999;
display: none;
}
.side-nav-overlay.show {
display: block;
}
.nav-menu {
list-style: none;
padding: 0;
margin: 0;
}
.nav-menu li {
border-bottom: var(--border-width) solid var(--muted-color);
}
.nav-menu li:last-child {
border-bottom: none;
}
.nav-item {
display: block;
padding: 15px 20px;
color: var(--primary-color);
text-decoration: none;
font-family: var(--font-family);
font-size: 16px;
font-weight: bold;
transition: all 0.2s ease;
cursor: pointer;
border: 2px solid var(--secondary-color);
background: none;
width: 100%;
text-align: left;
}
.nav-item:hover {
border: 2px solid var(--secondary-color);
background:var(--muted-color);
color: var(--accent-color);
}
.nav-item.active {
text-decoration: underline;
padding-left: 16px;
}
.nav-footer {
position: absolute;
bottom: 20px;
left: 0;
right: 0;
padding: 0 20px;
}
.nav-footer-btn {
display: block;
width: 100%;
padding: 12px 20px;
margin-bottom: 8px;
color: var(--primary-color);
border: 1px solid var(--border-color);
border-radius: 4px;
font-family: var(--font-family);
font-size: 14px;
font-weight: bold;
cursor: pointer;
transition: all 0.2s ease;
}
.nav-footer-btn:hover {
background:var(--muted-color);
border-color: var(--accent-color);
}
.nav-footer-btn:last-child {
margin-bottom: 0;
}
.header-title.clickable {
cursor: pointer;
transition: all 0.2s ease;
}
.header-title.clickable:hover {
opacity: 0.8;
}
/* ================================
SUBSCRIPTION TABLE COLLAPSIBLE GROUPS
================================ */
/* Subscription group header styles */
.subscription-group-header {
font-weight: 500;
cursor: pointer;
user-select: none;
}
.subscription-group-header:hover {
background-color: var(--secondary-color);
}
.expand-icon {
display: inline-block;
width: 20px;
transition: transform 0.2s ease;
font-size: 12px;
}
/* Detail row styles */
.subscription-detail-row {
/* background-color: var(--secondary-color); */
}
.subscription-detail-row:hover {
background-color: var(--muted-color);
}
/* Detail row cell styles */
.subscription-detail-prefix {
padding-left: 30px;
font-family: 'Courier New', monospace;
font-size: 11px;
color: var(--muted-color);
}
.subscription-detail-id {
font-family: 'Courier New', monospace;
font-size: 12px;
}

View File

@@ -9,11 +9,31 @@
</head>
<body>
<!-- Side Navigation Menu -->
<nav class="side-nav" id="side-nav">
<ul class="nav-menu">
<li><button class="nav-item" data-page="statistics">Statistics</button></li>
<li><button class="nav-item" data-page="subscriptions">Subscriptions</button></li>
<li><button class="nav-item" data-page="configuration">Configuration</button></li>
<li><button class="nav-item" data-page="authorization">Authorization</button></li>
<li><button class="nav-item" data-page="relay-events">Relay Events</button></li>
<li><button class="nav-item" data-page="dm">DM</button></li>
<li><button class="nav-item" data-page="database">Database Query</button></li>
</ul>
<div class="nav-footer">
<button class="nav-footer-btn" id="nav-dark-mode-btn">DARK MODE</button>
<button class="nav-footer-btn" id="nav-logout-btn">LOGOUT</button>
</div>
</nav>
<!-- Side Navigation Overlay -->
<div class="side-nav-overlay" id="side-nav-overlay"></div>
<!-- Header with title and profile display -->
<div class="section">
<div class="header-content">
<div class="header-title">
<div class="header-title clickable" id="header-title">
<span class="relay-letter" data-letter="R">R</span>
<span class="relay-letter" data-letter="E">E</span>
<span class="relay-letter" data-letter="L">L</span>
@@ -34,10 +54,7 @@
<span id="header-user-name" class="header-user-name">Loading...</span>
</div>
<!-- Logout dropdown -->
<div class="logout-dropdown" id="logout-dropdown" style="display: none;">
<button type="button" id="dark-mode-btn" class="logout-btn">🌙 DARK MODE</button>
<button type="button" id="logout-btn" class="logout-btn">LOGOUT</button>
</div>
<!-- Dropdown menu removed - buttons moved to sidebar -->
</div>
</div>
@@ -51,12 +68,10 @@
</div>
<!-- DATABASE STATISTICS Section -->
<!-- Subscribe to kind 24567 events to receive real-time monitoring data -->
<div class="section flex-section" id="databaseStatisticsSection" style="display: none;">
<div class="section-header">
<h2>DATABASE STATISTICS</h2>
<!-- Monitoring toggle button will be inserted here by JavaScript -->
<!-- Temporarily disable auto-refresh button for real-time monitoring -->
<!-- <button type="button" id="refresh-stats-btn" class="countdown-btn"></button> -->
DATABASE STATISTICS
</div>
<!-- Event Rate Graph Container -->
@@ -81,10 +96,26 @@
<td>Total Events</td>
<td id="total-events">-</td>
</tr>
<tr>
<td>Process ID</td>
<td id="process-id">-</td>
</tr>
<tr>
<td>Active Subscriptions</td>
<td id="active-subscriptions">-</td>
</tr>
<tr>
<td>Memory Usage</td>
<td id="memory-usage">-</td>
</tr>
<tr>
<td>CPU Core</td>
<td id="cpu-core">-</td>
</tr>
<tr>
<td>CPU Usage</td>
<td id="cpu-usage">-</td>
</tr>
<tr>
<td>Oldest Event</td>
<td id="oldest-event">-</td>
@@ -175,25 +206,15 @@
<!-- SUBSCRIPTION DETAILS Section (Admin Only) -->
<div class="section flex-section" id="subscriptionDetailsSection" style="display: none;">
<div class="section-header">
<h2>ACTIVE SUBSCRIPTION DETAILS</h2>
ACTIVE SUBSCRIPTION DETAILS
</div>
<div class="input-group">
<div class="config-table-container">
<table class="config-table" id="subscription-details-table">
<thead>
<tr>
<th>Subscription ID</th>
<th>Client IP</th>
<th>Duration</th>
<th>Events Sent</th>
<th>Status</th>
<th>Filters</th>
</tr>
</thead>
<tbody id="subscription-details-table-body">
<tr>
<td colspan="6" style="text-align: center; font-style: italic;">No subscriptions active</td>
<td colspan="4" style="text-align: center; font-style: italic;">No subscriptions active</td>
</tr>
</tbody>
</table>
@@ -203,7 +224,9 @@
<!-- Testing Section -->
<div id="div_config" class="section flex-section" style="display: none;">
<h2>RELAY CONFIGURATION</h2>
<div class="section-header">
RELAY CONFIGURATION
</div>
<div id="config-display" class="hidden">
<div class="config-table-container">
<table class="config-table" id="config-table">
@@ -230,7 +253,7 @@
<!-- Auth Rules Management - Moved after configuration -->
<div class="section flex-section" id="authRulesSection" style="display: none;">
<div class="section-header">
<h2>AUTH RULES MANAGEMENT</h2>
AUTH RULES MANAGEMENT
</div>
<!-- Auth Rules Table -->
@@ -292,7 +315,7 @@
</div>
<!-- Outbox -->
<div class="input-group">
<div>
<label for="dm-outbox">Send Message to Relay:</label>
<textarea id="dm-outbox" rows="4" placeholder="Enter your message to send to the relay..."></textarea>
</div>
@@ -311,6 +334,72 @@
</div>
</div>
<!-- RELAY EVENTS Section -->
<div class="section" id="relayEventsSection" style="display: none;">
<div class="section-header">
RELAY EVENTS MANAGEMENT
</div>
<!-- Kind 0: User Metadata -->
<div class="input-group">
<h3>Kind 0: User Metadata</h3>
<div class="form-group">
<label for="kind0-name">Name:</label>
<input type="text" id="kind0-name" placeholder="Relay Name">
</div>
<div class="form-group">
<label for="kind0-about">About:</label>
<textarea id="kind0-about" rows="3" placeholder="Relay Description"></textarea>
</div>
<div class="form-group">
<label for="kind0-picture">Picture URL:</label>
<input type="url" id="kind0-picture" placeholder="https://example.com/logo.png">
</div>
<div class="form-group">
<label for="kind0-banner">Banner URL:</label>
<input type="url" id="kind0-banner" placeholder="https://example.com/banner.png">
</div>
<div class="form-group">
<label for="kind0-nip05">NIP-05:</label>
<input type="text" id="kind0-nip05" placeholder="relay@example.com">
</div>
<div class="form-group">
<label for="kind0-website">Website:</label>
<input type="url" id="kind0-website" placeholder="https://example.com">
</div>
<div class="inline-buttons">
<button type="button" id="submit-kind0-btn">UPDATE METADATA</button>
</div>
<div id="kind0-status" class="status-message"></div>
</div>
<!-- Kind 10050: DM Relay List -->
<div class="input-group">
<h3>Kind 10050: DM Relay List</h3>
<div class="form-group">
<label for="kind10050-relays">Relay URLs (one per line):</label>
<textarea id="kind10050-relays" rows="4" placeholder="wss://relay1.com&#10;wss://relay2.com"></textarea>
</div>
<div class="inline-buttons">
<button type="button" id="submit-kind10050-btn">UPDATE DM RELAYS</button>
</div>
<div id="kind10050-status" class="status-message"></div>
</div>
<!-- Kind 10002: Relay List -->
<div class="input-group">
<h3>Kind 10002: Relay List</h3>
<div id="kind10002-relay-entries">
<!-- Dynamic relay entries will be added here -->
</div>
<div class="inline-buttons">
<button type="button" id="add-relay-entry-btn">ADD RELAY</button>
<button type="button" id="submit-kind10002-btn">UPDATE RELAYS</button>
</div>
<div id="kind10002-status" class="status-message"></div>
</div>
</div>
<!-- SQL QUERY Section -->
<div class="section" id="sqlQuerySection" style="display: none;">
<div class="section-header">

File diff suppressed because it is too large Load Diff

View File

@@ -18,6 +18,7 @@ class ASCIIBarChart {
* @param {boolean} [options.useBinMode=false] - Enable time bin mode for data aggregation
* @param {number} [options.binDuration=10000] - Duration of each time bin in milliseconds (10 seconds default)
* @param {string} [options.xAxisLabelFormat='elapsed'] - X-axis label format: 'elapsed', 'bins', 'timestamps', 'ranges'
* @param {boolean} [options.debug=false] - Enable debug logging
*/
constructor(containerId, options = {}) {
this.container = document.getElementById(containerId);
@@ -29,6 +30,7 @@ class ASCIIBarChart {
this.xAxisLabel = options.xAxisLabel || '';
this.yAxisLabel = options.yAxisLabel || '';
this.autoFitWidth = options.autoFitWidth !== false; // Default to true
this.debug = options.debug || false; // Debug logging option
// Time bin configuration
this.useBinMode = options.useBinMode !== false; // Default to true
@@ -61,21 +63,10 @@ class ASCIIBarChart {
* @param {number} value - The numeric value to add
*/
addValue(value) {
if (this.useBinMode) {
// Time bin mode: increment count in current active bin
// Time bin mode: add value to current active bin count
this.checkBinRotation(); // Ensure we have an active bin
this.bins[this.currentBinIndex].count++;
this.bins[this.currentBinIndex].count += value; // Changed from ++ to += value
this.totalDataPoints++;
} else {
// Legacy mode: add individual values
this.data.push(value);
this.totalDataPoints++;
// Keep only the most recent data points
if (this.data.length > this.maxDataPoints) {
this.data.shift();
}
}
this.render();
this.updateInfo();
@@ -119,7 +110,7 @@ class ASCIIBarChart {
const totalWidth = yAxisPadding + yAxisNumbers + separator + dataWidth + padding;
// Only log when width changes
if (this.lastChartWidth !== totalWidth) {
if (this.debug && this.lastChartWidth !== totalWidth) {
console.log('getChartWidth changed:', { dataLength, totalWidth, previous: this.lastChartWidth });
this.lastChartWidth = totalWidth;
}
@@ -142,7 +133,7 @@ class ASCIIBarChart {
// Calculate optimal font size
// For monospace fonts, character width is approximately 0.6 * font size
// Use a slightly smaller ratio to fit more content
const charWidthRatio = 0.6;
const charWidthRatio = 0.7;
const padding = 30; // Reduce padding to fit more content
const availableWidth = containerWidth - padding;
const optimalFontSize = Math.floor((availableWidth / chartWidth) / charWidthRatio);
@@ -151,7 +142,7 @@ class ASCIIBarChart {
const fontSize = Math.max(4, Math.min(20, optimalFontSize));
// Only log when font size changes
if (this.lastFontSize !== fontSize) {
if (this.debug && this.lastFontSize !== fontSize) {
console.log('fontSize changed:', { containerWidth, chartWidth, fontSize, previous: this.lastFontSize });
this.lastFontSize = fontSize;
}
@@ -190,7 +181,9 @@ class ASCIIBarChart {
}
});
if (this.debug) {
console.log('render() dataToRender:', dataToRender, 'bins length:', this.bins.length);
}
maxValue = Math.max(...dataToRender);
minValue = Math.min(...dataToRender);
valueRange = maxValue - minValue;
@@ -243,8 +236,8 @@ class ASCIIBarChart {
}
}
// Calculate the actual count value this row represents (0 at bottom, increasing upward)
const rowCount = (row - 1) * scaleFactor;
// Calculate the actual count value this row represents (1 at bottom, increasing upward)
const rowCount = (row - 1) * scaleFactor + 1;
// Add Y-axis label (show actual count values)
line += String(rowCount).padStart(3, ' ') + ' |';

View File

@@ -1,3 +1,19 @@
#!/bin/bash
# Copy the binary to the deployment location
cp build/c_relay_x86 ~/Storage/c_relay/crelay
# Copy the service file to systemd (use the main service file)
sudo cp systemd/c-relay.service /etc/systemd/system/c-relay-local.service
# Reload systemd daemon to pick up the new service
sudo systemctl daemon-reload
# Enable the service (if not already enabled)
sudo systemctl enable c-relay-local.service
# Restart the service
sudo systemctl restart c-relay-local.service
# Show service status
sudo systemctl status c-relay-local.service --no-pager -l

View File

@@ -175,6 +175,18 @@ Configuration events follow the standard Nostr event format with kind 33334:
- **Impact**: Allows some flexibility in expiration timing
- **Example**: `"600"` (10 minute grace period)
### NIP-59 Gift Wrap Timestamp Configuration
#### `nip59_timestamp_max_delay_sec`
- **Description**: Controls timestamp randomization for NIP-59 gift wraps
- **Default**: `"0"` (no randomization)
- **Range**: `0` to `604800` (7 days)
- **Impact**: Affects compatibility with other Nostr clients for direct messaging
- **Values**:
- `"0"`: No randomization (maximum compatibility)
- `"1-604800"`: Random timestamp between now and N seconds ago
- **Example**: `"172800"` (2 days randomization for privacy)
## Configuration Examples
### Basic Relay Setup

View File

@@ -0,0 +1,298 @@
# Libwebsockets Proper Pattern - Message Queue Design
## Problem Analysis
### Current Violation
We're calling `lws_write()` directly from multiple code paths:
1. **Event broadcast** (subscriptions.c:667) - when events arrive
2. **OK responses** (websockets.c:855) - when processing EVENT messages
3. **EOSE responses** (websockets.c:976) - when processing REQ messages
4. **COUNT responses** (websockets.c:1922) - when processing COUNT messages
This violates libwebsockets' design pattern which requires:
- **`lws_write()` ONLY called from `LWS_CALLBACK_SERVER_WRITEABLE`**
- Application queues messages and requests writeable callback
- Libwebsockets handles write timing and socket buffer management
### Consequences of Violation
1. Partial writes when socket buffer is full
2. Multiple concurrent write attempts before callback fires
3. "write already pending" errors with single buffer
4. Frame corruption from interleaved partial writes
5. "Invalid frame header" errors on client side
## Correct Architecture
### Message Queue Pattern
```
┌─────────────────────────────────────────────────────────────┐
│ Application Layer │
├─────────────────────────────────────────────────────────────┤
│ │
│ Event Arrives → Queue Message → Request Writeable Callback │
│ REQ Received → Queue EOSE → Request Writeable Callback │
│ EVENT Received→ Queue OK → Request Writeable Callback │
│ COUNT Received→ Queue COUNT → Request Writeable Callback │
│ │
└─────────────────────────────────────────────────────────────┘
lws_callback_on_writable(wsi)
┌─────────────────────────────────────────────────────────────┐
│ LWS_CALLBACK_SERVER_WRITEABLE │
├─────────────────────────────────────────────────────────────┤
│ │
│ 1. Dequeue next message from queue │
│ 2. Call lws_write() with message data │
│ 3. If queue not empty, request another callback │
│ │
└─────────────────────────────────────────────────────────────┘
libwebsockets handles:
- Socket buffer management
- Partial write handling
- Frame atomicity
```
## Data Structures
### Message Queue Node
```c
typedef struct message_queue_node {
unsigned char* data; // Message data (with LWS_PRE space)
size_t length; // Message length (without LWS_PRE)
enum lws_write_protocol type; // LWS_WRITE_TEXT, etc.
struct message_queue_node* next;
} message_queue_node_t;
```
### Per-Session Data Updates
```c
struct per_session_data {
// ... existing fields ...
// Message queue (replaces single buffer)
message_queue_node_t* message_queue_head;
message_queue_node_t* message_queue_tail;
int message_queue_count;
int writeable_requested; // Flag to prevent duplicate requests
};
```
## Implementation Functions
### 1. Queue Message (Application Layer)
```c
int queue_message(struct lws* wsi, struct per_session_data* pss,
const char* message, size_t length,
enum lws_write_protocol type)
{
// Allocate node
message_queue_node_t* node = malloc(sizeof(message_queue_node_t));
// Allocate buffer with LWS_PRE space
node->data = malloc(LWS_PRE + length);
memcpy(node->data + LWS_PRE, message, length);
node->length = length;
node->type = type;
node->next = NULL;
// Add to queue (FIFO)
pthread_mutex_lock(&pss->session_lock);
if (!pss->message_queue_head) {
pss->message_queue_head = node;
pss->message_queue_tail = node;
} else {
pss->message_queue_tail->next = node;
pss->message_queue_tail = node;
}
pss->message_queue_count++;
pthread_mutex_unlock(&pss->session_lock);
// Request writeable callback (only if not already requested)
if (!pss->writeable_requested) {
pss->writeable_requested = 1;
lws_callback_on_writable(wsi);
}
return 0;
}
```
### 2. Process Queue (Writeable Callback)
```c
int process_message_queue(struct lws* wsi, struct per_session_data* pss)
{
pthread_mutex_lock(&pss->session_lock);
// Get next message from queue
message_queue_node_t* node = pss->message_queue_head;
if (!node) {
pss->writeable_requested = 0;
pthread_mutex_unlock(&pss->session_lock);
return 0; // Queue empty
}
// Remove from queue
pss->message_queue_head = node->next;
if (!pss->message_queue_head) {
pss->message_queue_tail = NULL;
}
pss->message_queue_count--;
pthread_mutex_unlock(&pss->session_lock);
// Write message (libwebsockets handles partial writes)
int result = lws_write(wsi, node->data + LWS_PRE, node->length, node->type);
// Free node
free(node->data);
free(node);
// If queue not empty, request another callback
pthread_mutex_lock(&pss->session_lock);
if (pss->message_queue_head) {
lws_callback_on_writable(wsi);
} else {
pss->writeable_requested = 0;
}
pthread_mutex_unlock(&pss->session_lock);
return (result < 0) ? -1 : 0;
}
```
## Refactoring Changes
### Before (WRONG - Direct Write)
```c
// websockets.c:855 - OK response
int write_result = lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
if (write_result < 0) {
DEBUG_ERROR("Write failed");
} else if ((size_t)write_result != response_len) {
// Partial write - queue remaining data
queue_websocket_write(wsi, pss, ...);
}
```
### After (CORRECT - Queue Message)
```c
// websockets.c:855 - OK response
queue_message(wsi, pss, response_str, response_len, LWS_WRITE_TEXT);
// That's it! Writeable callback will handle the actual write
```
### Before (WRONG - Direct Write in Broadcast)
```c
// subscriptions.c:667 - EVENT broadcast
int write_result = lws_write(current_temp->wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
if (write_result < 0) {
DEBUG_ERROR("Write failed");
} else if ((size_t)write_result != msg_len) {
queue_websocket_write(...);
}
```
### After (CORRECT - Queue Message)
```c
// subscriptions.c:667 - EVENT broadcast
struct per_session_data* pss = lws_wsi_user(current_temp->wsi);
queue_message(current_temp->wsi, pss, msg_str, msg_len, LWS_WRITE_TEXT);
// Writeable callback will handle the actual write
```
## Benefits of Correct Pattern
1. **No Partial Write Handling Needed**
- Libwebsockets handles partial writes internally
- We just queue complete messages
2. **No "Write Already Pending" Errors**
- Queue can hold unlimited messages
- Each processed sequentially from callback
3. **Thread Safety**
- Queue operations protected by session lock
- Write only from single callback thread
4. **Frame Atomicity**
- Libwebsockets ensures complete frame transmission
- No interleaved partial writes
5. **Simpler Code**
- No complex partial write state machine
- Just queue and forget
6. **Better Performance**
- Libwebsockets optimizes write timing
- Batches writes when socket ready
## Migration Steps
1. ✅ Identify all `lws_write()` call sites
2. ✅ Confirm violation of libwebsockets pattern
3. ⏳ Design message queue structure
4. ⏳ Implement `queue_message()` function
5. ⏳ Implement `process_message_queue()` function
6. ⏳ Update `per_session_data` structure
7. ⏳ Refactor OK response to use queue
8. ⏳ Refactor EOSE response to use queue
9. ⏳ Refactor COUNT response to use queue
10. ⏳ Refactor EVENT broadcast to use queue
11. ⏳ Update `LWS_CALLBACK_SERVER_WRITEABLE` handler
12. ⏳ Add queue cleanup in `LWS_CALLBACK_CLOSED`
13. ⏳ Remove old partial write code
14. ⏳ Test with rapid multiple events
15. ⏳ Test with large events (>4KB)
16. ⏳ Test under load
17. ⏳ Verify no frame errors
## Testing Strategy
### Test 1: Multiple Rapid Events
```bash
# Send 10 events rapidly to same client
for i in {1..10}; do
echo '["EVENT",{"kind":1,"content":"test'$i'","created_at":'$(date +%s)',...}]' | \
websocat ws://localhost:8888 &
done
```
**Expected**: All events queued and sent sequentially, no errors
### Test 2: Large Events
```bash
# Send event >4KB (forces multiple socket writes)
nak event --content "$(head -c 5000 /dev/urandom | base64)" | \
websocat ws://localhost:8888
```
**Expected**: Event queued, libwebsockets handles partial writes internally
### Test 3: Concurrent Connections
```bash
# 100 concurrent connections, each sending events
for i in {1..100}; do
(echo '["REQ","sub'$i'",{}]'; sleep 1) | websocat ws://localhost:8888 &
done
```
**Expected**: All subscriptions work, events broadcast correctly
## Success Criteria
- ✅ No `lws_write()` calls outside `LWS_CALLBACK_SERVER_WRITEABLE`
- ✅ No "write already pending" errors in logs
- ✅ No "Invalid frame header" errors on client side
- ✅ All messages delivered in correct order
- ✅ Large events (>4KB) handled correctly
- ✅ Multiple rapid events to same client work
- ✅ Concurrent connections stable under load
## References
- [libwebsockets documentation](https://libwebsockets.org/lws-api-doc-main/html/index.html)
- [LWS_CALLBACK_SERVER_WRITEABLE](https://libwebsockets.org/lws-api-doc-main/html/group__callback-when-writeable.html)
- [lws_callback_on_writable()](https://libwebsockets.org/lws-api-doc-main/html/group__callback-when-writeable.html#ga96f3ad8e1e2c3e0c8e0b0e5e5e5e5e5e)

View File

@@ -0,0 +1,517 @@
# NIP-59 Timestamp Configuration Implementation Plan
## Overview
Add configurable timestamp randomization for NIP-59 gift wraps to improve compatibility with Nostr apps that don't implement timestamp randomization.
## Problem Statement
The NIP-59 protocol specifies that timestamps on gift wraps should have randomness to prevent time-analysis attacks. However, some Nostr platforms don't implement this, causing compatibility issues with direct messaging (NIP-17).
## Solution
Add a configuration parameter `nip59_timestamp_max_delay_sec` that controls the maximum random delay applied to timestamps:
- **Value = 0**: Use current timestamp (no randomization) for maximum compatibility
- **Value > 0**: Use random timestamp between now and N seconds ago
- **Default = 0**: Maximum compatibility mode (no randomization)
## Implementation Approach: Option B (Direct Parameter Addition)
We chose Option B because:
1. Explicit and stateless - value flows through call chain
2. Thread-safe by design
3. No global state needed in nostr_core_lib
4. DMs are sent rarely, so database query per call is acceptable
---
## Detailed Implementation Steps
### Phase 1: Configuration Setup in c-relay
#### 1.1 Add Configuration Parameter
**File:** `src/default_config_event.h`
**Location:** Line 82 (after `trust_proxy_headers`)
```c
// NIP-59 Gift Wrap Timestamp Configuration
{"nip59_timestamp_max_delay_sec", "0"} // Default: 0 (no randomization for compatibility)
```
**Rationale:**
- Default of 0 seconds (no randomization) for maximum compatibility
- Placed after proxy settings, before closing brace
- Follows existing naming convention
#### 1.2 Add Configuration Validation
**File:** `src/config.c`
**Function:** `validate_config_field()` (around line 923)
Add validation case:
```c
else if (strcmp(key, "nip59_timestamp_max_delay_sec") == 0) {
long value = strtol(value_str, NULL, 10);
if (value < 0 || value > 604800) { // Max 7 days
snprintf(error_msg, error_size,
"nip59_timestamp_max_delay_sec must be between 0 and 604800 (7 days)");
return -1;
}
}
```
**Rationale:**
- 0 = no randomization (compatibility mode)
- 604800 = 7 days maximum (reasonable upper bound)
- Prevents negative values or excessive delays
---
### Phase 2: Modify nostr_core_lib Functions
#### 2.1 Update random_past_timestamp() Function
**File:** `nostr_core_lib/nostr_core/nip059.c`
**Current Location:** Lines 31-36
**Current Code:**
```c
static time_t random_past_timestamp(void) {
time_t now = time(NULL);
// Random time up to 2 days (172800 seconds) in the past
long random_offset = (long)(rand() % 172800);
return now - random_offset;
}
```
**New Code:**
```c
static time_t random_past_timestamp(long max_delay_sec) {
time_t now = time(NULL);
// If max_delay_sec is 0, return current timestamp (no randomization)
if (max_delay_sec == 0) {
return now;
}
// Random time up to max_delay_sec in the past
long random_offset = (long)(rand() % max_delay_sec);
return now - random_offset;
}
```
**Changes:**
- Add `long max_delay_sec` parameter
- Handle special case: `max_delay_sec == 0` returns current time
- Use `max_delay_sec` instead of hardcoded 172800
#### 2.2 Update nostr_nip59_create_seal() Function
**File:** `nostr_core_lib/nostr_core/nip059.c`
**Current Location:** Lines 144-215
**Function Signature Change:**
```c
// OLD:
cJSON* nostr_nip59_create_seal(cJSON* rumor,
const unsigned char* sender_private_key,
const unsigned char* recipient_public_key);
// NEW:
cJSON* nostr_nip59_create_seal(cJSON* rumor,
const unsigned char* sender_private_key,
const unsigned char* recipient_public_key,
long max_delay_sec);
```
**Code Change at Line 181:**
```c
// OLD:
time_t seal_time = random_past_timestamp();
// NEW:
time_t seal_time = random_past_timestamp(max_delay_sec);
```
#### 2.3 Update nostr_nip59_create_gift_wrap() Function
**File:** `nostr_core_lib/nostr_core/nip059.c`
**Current Location:** Lines 220-323
**Function Signature Change:**
```c
// OLD:
cJSON* nostr_nip59_create_gift_wrap(cJSON* seal,
const char* recipient_public_key_hex);
// NEW:
cJSON* nostr_nip59_create_gift_wrap(cJSON* seal,
const char* recipient_public_key_hex,
long max_delay_sec);
```
**Code Change at Line 275:**
```c
// OLD:
time_t wrap_time = random_past_timestamp();
// NEW:
time_t wrap_time = random_past_timestamp(max_delay_sec);
```
#### 2.4 Update nip059.h Header
**File:** `nostr_core_lib/nostr_core/nip059.h`
**Locations:** Lines 38-39 and 48
**Update Function Declarations:**
```c
// Line 38-39: Update nostr_nip59_create_seal
cJSON* nostr_nip59_create_seal(cJSON* rumor,
const unsigned char* sender_private_key,
const unsigned char* recipient_public_key,
long max_delay_sec);
// Line 48: Update nostr_nip59_create_gift_wrap
cJSON* nostr_nip59_create_gift_wrap(cJSON* seal,
const char* recipient_public_key_hex,
long max_delay_sec);
```
**Update Documentation Comments:**
```c
/**
* NIP-59: Create a seal (kind 13) wrapping a rumor
*
* @param rumor The rumor event to seal (cJSON object)
* @param sender_private_key 32-byte sender private key
* @param recipient_public_key 32-byte recipient public key (x-only)
* @param max_delay_sec Maximum random delay in seconds (0 = no randomization)
* @return cJSON object representing the seal event, or NULL on error
*/
/**
* NIP-59: Create a gift wrap (kind 1059) wrapping a seal
*
* @param seal The seal event to wrap (cJSON object)
* @param recipient_public_key_hex Recipient's public key in hex format
* @param max_delay_sec Maximum random delay in seconds (0 = no randomization)
* @return cJSON object representing the gift wrap event, or NULL on error
*/
```
---
### Phase 3: Update NIP-17 Integration
#### 3.1 Update nostr_nip17_send_dm() Function
**File:** `nostr_core_lib/nostr_core/nip017.c`
**Current Location:** Lines 260-320
**Function Signature Change:**
```c
// OLD:
int nostr_nip17_send_dm(cJSON* dm_event,
const char** recipient_pubkeys,
int num_recipients,
const unsigned char* sender_private_key,
cJSON** gift_wraps_out,
int max_gift_wraps);
// NEW:
int nostr_nip17_send_dm(cJSON* dm_event,
const char** recipient_pubkeys,
int num_recipients,
const unsigned char* sender_private_key,
cJSON** gift_wraps_out,
int max_gift_wraps,
long max_delay_sec);
```
**Code Changes:**
At line 281 (seal creation):
```c
// OLD:
cJSON* seal = nostr_nip59_create_seal(dm_event, sender_private_key, recipient_public_key);
// NEW:
cJSON* seal = nostr_nip59_create_seal(dm_event, sender_private_key, recipient_public_key, max_delay_sec);
```
At line 287 (gift wrap creation):
```c
// OLD:
cJSON* gift_wrap = nostr_nip59_create_gift_wrap(seal, recipient_pubkeys[i]);
// NEW:
cJSON* gift_wrap = nostr_nip59_create_gift_wrap(seal, recipient_pubkeys[i], max_delay_sec);
```
At line 306 (sender seal creation):
```c
// OLD:
cJSON* sender_seal = nostr_nip59_create_seal(dm_event, sender_private_key, sender_public_key);
// NEW:
cJSON* sender_seal = nostr_nip59_create_seal(dm_event, sender_private_key, sender_public_key, max_delay_sec);
```
At line 309 (sender gift wrap creation):
```c
// OLD:
cJSON* sender_gift_wrap = nostr_nip59_create_gift_wrap(sender_seal, sender_pubkey_hex);
// NEW:
cJSON* sender_gift_wrap = nostr_nip59_create_gift_wrap(sender_seal, sender_pubkey_hex, max_delay_sec);
```
#### 3.2 Update nip017.h Header
**File:** `nostr_core_lib/nostr_core/nip017.h`
**Location:** Lines 102-107
**Update Function Declaration:**
```c
int nostr_nip17_send_dm(cJSON* dm_event,
const char** recipient_pubkeys,
int num_recipients,
const unsigned char* sender_private_key,
cJSON** gift_wraps_out,
int max_gift_wraps,
long max_delay_sec);
```
**Update Documentation Comment (lines 88-100):**
```c
/**
* NIP-17: Send a direct message to recipients
*
* This function creates the appropriate rumor, seals it, gift wraps it,
* and returns the final gift wrap events ready for publishing.
*
* @param dm_event The unsigned DM event (kind 14 or 15)
* @param recipient_pubkeys Array of recipient public keys (hex strings)
* @param num_recipients Number of recipients
* @param sender_private_key 32-byte sender private key
* @param gift_wraps_out Array to store resulting gift wrap events (caller must free)
* @param max_gift_wraps Maximum number of gift wraps to create
* @param max_delay_sec Maximum random timestamp delay in seconds (0 = no randomization)
* @return Number of gift wrap events created, or -1 on error
*/
```
---
### Phase 4: Update c-relay Call Sites
#### 4.1 Update src/api.c
**Location:** Line 1319
**Current Code:**
```c
int send_result = nostr_nip17_send_dm(
dm_response, // dm_event
recipient_pubkeys, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
gift_wraps, // gift_wraps_out
1 // max_gift_wraps
);
```
**New Code:**
```c
// Get timestamp delay configuration
long max_delay_sec = get_config_int("nip59_timestamp_max_delay_sec", 0);
int send_result = nostr_nip17_send_dm(
dm_response, // dm_event
recipient_pubkeys, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
gift_wraps, // gift_wraps_out
1, // max_gift_wraps
max_delay_sec // max_delay_sec
);
```
#### 4.2 Update src/dm_admin.c
**Location:** Line 371
**Current Code:**
```c
int send_result = nostr_nip17_send_dm(
success_dm, // dm_event
sender_pubkey_array, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
success_gift_wraps, // gift_wraps_out
1 // max_gift_wraps
);
```
**New Code:**
```c
// Get timestamp delay configuration
long max_delay_sec = get_config_int("nip59_timestamp_max_delay_sec", 0);
int send_result = nostr_nip17_send_dm(
success_dm, // dm_event
sender_pubkey_array, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
success_gift_wraps, // gift_wraps_out
1, // max_gift_wraps
max_delay_sec // max_delay_sec
);
```
**Note:** Both files already include `config.h`, so `get_config_int()` is available.
---
## Testing Plan
### Test Case 1: No Randomization (Compatibility Mode)
**Configuration:** `nip59_timestamp_max_delay_sec = 0`
**Expected Behavior:**
- Gift wrap timestamps should equal current time
- Seal timestamps should equal current time
- No random delay applied
**Test Command:**
```bash
# Set config via admin API
# Send test DM
# Verify timestamps are current (within 1 second of send time)
```
### Test Case 2: Custom Delay
**Configuration:** `nip59_timestamp_max_delay_sec = 1000`
**Expected Behavior:**
- Gift wrap timestamps should be between now and 1000 seconds ago
- Seal timestamps should be between now and 1000 seconds ago
- Random delay applied within specified range
**Test Command:**
```bash
# Set config via admin API
# Send test DM
# Verify timestamps are in past but within 1000 seconds
```
### Test Case 3: Default Behavior
**Configuration:** `nip59_timestamp_max_delay_sec = 0` (default)
**Expected Behavior:**
- Gift wrap timestamps should equal current time
- Seal timestamps should equal current time
- No randomization (maximum compatibility)
**Test Command:**
```bash
# Use default config
# Send test DM
# Verify timestamps are current (within 1 second of send time)
```
### Test Case 4: Configuration Validation
**Test Invalid Values:**
- Negative value: Should be rejected
- Value > 604800: Should be rejected
- Valid boundary values (0, 604800): Should be accepted
### Test Case 5: Interoperability
**Test with Other Nostr Clients:**
- Send DM with `max_delay_sec = 0` to clients that don't randomize
- Send DM with `max_delay_sec = 172800` to clients that do randomize
- Verify both scenarios work correctly
---
## Documentation Updates
### Update docs/configuration_guide.md
Add new section:
```markdown
### NIP-59 Gift Wrap Timestamp Configuration
#### nip59_timestamp_max_delay_sec
- **Type:** Integer
- **Default:** 0 (no randomization)
- **Range:** 0 to 604800 (7 days)
- **Description:** Controls timestamp randomization for NIP-59 gift wraps
The NIP-59 protocol recommends randomizing timestamps on gift wraps to prevent
time-analysis attacks. However, some Nostr platforms don't implement this,
causing compatibility issues.
**Values:**
- `0` (default): No randomization - uses current timestamp (maximum compatibility)
- `1-604800`: Random timestamp between now and N seconds ago
**Use Cases:**
- Keep default `0` for maximum compatibility with clients that don't randomize
- Set to `172800` for privacy per NIP-59 specification (2 days randomization)
- Set to custom value (e.g., `3600`) for 1-hour randomization window
**Example:**
```json
["nip59_timestamp_max_delay_sec", "0"] // Default: compatibility mode
["nip59_timestamp_max_delay_sec", "3600"] // 1 hour randomization
["nip59_timestamp_max_delay_sec", "172800"] // 2 days randomization
```
```
---
## Implementation Checklist
### nostr_core_lib Changes
- [ ] Modify `random_past_timestamp()` to accept `max_delay_sec` parameter
- [ ] Update `nostr_nip59_create_seal()` signature and implementation
- [ ] Update `nostr_nip59_create_gift_wrap()` signature and implementation
- [ ] Update `nip059.h` function declarations and documentation
- [ ] Update `nostr_nip17_send_dm()` signature and implementation
- [ ] Update `nip017.h` function declaration and documentation
### c-relay Changes
- [ ] Add `nip59_timestamp_max_delay_sec` to `default_config_event.h`
- [ ] Add validation in `config.c` for new parameter
- [ ] Update `src/api.c` call site to pass `max_delay_sec`
- [ ] Update `src/dm_admin.c` call site to pass `max_delay_sec`
### Testing
- [ ] Test with `max_delay_sec = 0` (no randomization)
- [ ] Test with `max_delay_sec = 1000` (custom delay)
- [ ] Test with `max_delay_sec = 172800` (default behavior)
- [ ] Test configuration validation (invalid values)
- [ ] Test interoperability with other Nostr clients
### Documentation
- [ ] Update `docs/configuration_guide.md`
- [ ] Add this implementation plan to docs
- [ ] Update README if needed
---
## Rollback Plan
If issues arise:
1. Revert nostr_core_lib changes (git revert in submodule)
2. Revert c-relay changes
3. Configuration parameter will be ignored if not used
4. Default behavior (0) provides maximum compatibility
---
## Notes
- The configuration is read on each DM send, allowing runtime changes
- No restart required when changing `nip59_timestamp_max_delay_sec`
- Thread-safe by design (no global state)
- Default value of 0 provides maximum compatibility with other Nostr clients
- Can be changed to 172800 or other values for NIP-59 privacy features
---
## References
- [NIP-59: Gift Wrap](https://github.com/nostr-protocol/nips/blob/master/59.md)
- [NIP-17: Private Direct Messages](https://github.com/nostr-protocol/nips/blob/master/17.md)
- [NIP-44: Versioned Encryption](https://github.com/nostr-protocol/nips/blob/master/44.md)

View File

@@ -0,0 +1,532 @@
# Subscription Cleanup - Simplified Design
## Problem Summary
The c-relay Nostr relay experienced severe performance degradation (90-100% CPU) due to subscription accumulation in the database. Investigation revealed **323,644 orphaned subscriptions** that were never properly closed when WebSocket connections dropped.
## Solution: Two-Component Approach
This simplified design focuses on two pragmatic solutions that align with Nostr's stateless design:
1. **Startup Cleanup**: Close all subscriptions on relay restart
2. **Connection Age Limit**: Disconnect clients after a configurable time period
Both solutions force clients to reconnect and re-establish subscriptions, which is standard Nostr behavior.
---
## Component 1: Startup Cleanup
### Purpose
Ensure clean state on every relay restart by closing all subscriptions in the database.
### Implementation
**File:** [`src/subscriptions.c`](src/subscriptions.c)
**New Function:**
```c
void cleanup_all_subscriptions_on_startup(void) {
if (!g_db) {
DEBUG_ERROR("Database not initialized for startup cleanup");
return;
}
DEBUG_LOG("Startup cleanup: Marking all active subscriptions as disconnected");
// Mark all 'created' subscriptions as disconnected
const char* update_sql =
"UPDATE subscriptions "
"SET ended_at = strftime('%s', 'now') "
"WHERE event_type = 'created' AND ended_at IS NULL";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, update_sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare startup cleanup query: %s", sqlite3_errmsg(g_db));
return;
}
rc = sqlite3_step(stmt);
int updated_count = sqlite3_changes(g_db);
sqlite3_finalize(stmt);
if (updated_count > 0) {
// Log a single 'disconnected' event for the startup cleanup
const char* insert_sql =
"INSERT INTO subscriptions (subscription_id, wsi_pointer, client_ip, event_type) "
"VALUES ('startup_cleanup', '', 'system', 'disconnected')";
rc = sqlite3_prepare_v2(g_db, insert_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
DEBUG_LOG("Startup cleanup: Marked %d subscriptions as disconnected", updated_count);
} else {
DEBUG_LOG("Startup cleanup: No active subscriptions found");
}
}
```
**Integration Point:** [`src/main.c:1810`](src/main.c:1810)
```c
// Initialize subscription manager mutexes
if (pthread_mutex_init(&g_subscription_manager.subscriptions_lock, NULL) != 0) {
DEBUG_ERROR("Failed to initialize subscriptions mutex");
sqlite3_close(g_db);
return 1;
}
if (pthread_mutex_init(&g_subscription_manager.ip_tracking_lock, NULL) != 0) {
DEBUG_ERROR("Failed to initialize IP tracking mutex");
pthread_mutex_destroy(&g_subscription_manager.subscriptions_lock);
sqlite3_close(g_db);
return 1;
}
// **NEW: Startup cleanup - close all subscriptions**
cleanup_all_subscriptions_on_startup();
// Start WebSocket relay server
DEBUG_LOG("Starting WebSocket relay server...");
if (start_websocket_relay(port_override, strict_port) != 0) {
DEBUG_ERROR("Failed to start WebSocket relay");
// ... cleanup code
}
```
### Benefits
- **Immediate relief**: Restart relay to fix subscription issues
- **Clean slate**: Every restart starts with zero active subscriptions
- **Simple**: Single SQL UPDATE statement
- **Nostr-aligned**: Clients are designed to reconnect after relay restart
- **No configuration needed**: Always runs on startup
---
## Component 2: Connection Age Limit
### Purpose
Automatically disconnect clients after a configurable time period, forcing them to reconnect and re-establish subscriptions.
### Why Disconnect Instead of Just Closing Subscriptions?
**Option 1: Send CLOSED message (keep connection)**
- ❌ Not all clients handle `CLOSED` messages properly
- ❌ Silent failure - client thinks it's subscribed but isn't
- ❌ Partial cleanup - connection still consumes resources
- ❌ More complex to implement
**Option 2: Disconnect client entirely (force reconnection)**
- ✅ Universal compatibility - all clients handle WebSocket reconnection
- ✅ Complete resource cleanup (memory, file descriptors, etc.)
- ✅ Simple implementation - single operation
- ✅ Well-tested code path (same as network interruptions)
- ✅ Forces re-authentication if needed
### Implementation
**File:** [`src/websockets.c`](src/websockets.c)
**New Function:**
```c
/**
* Check connection age and disconnect clients that have been connected too long.
* This forces clients to reconnect and re-establish subscriptions.
*
* Uses libwebsockets' lws_vhost_foreach_wsi() to iterate through all active
* connections and checks their connection_established timestamp from per_session_data.
*/
void check_connection_age(int max_connection_seconds) {
if (max_connection_seconds <= 0 || !ws_context) {
return;
}
time_t now = time(NULL);
time_t cutoff = now - max_connection_seconds;
// Get the default vhost
struct lws_vhost *vhost = lws_get_vhost_by_name(ws_context, "default");
if (!vhost) {
DEBUG_ERROR("Failed to get vhost for connection age check");
return;
}
// Iterate through all active WebSocket connections
// Note: lws_vhost_foreach_wsi() calls our callback for each connection
struct lws *wsi = NULL;
while ((wsi = lws_vhost_foreach_wsi(vhost, wsi)) != NULL) {
// Get per-session data which contains connection_established timestamp
struct per_session_data *pss = (struct per_session_data *)lws_wsi_user(wsi);
if (pss && pss->connection_established > 0) {
// Check if connection is older than cutoff
if (pss->connection_established < cutoff) {
// Connection is too old - close it
long age_seconds = now - pss->connection_established;
DEBUG_LOG("Closing connection from %s (age: %lds, limit: %ds)",
pss->client_ip, age_seconds, max_connection_seconds);
// Close with normal status and reason message
lws_close_reason(wsi, LWS_CLOSE_STATUS_NORMAL,
(unsigned char *)"connection age limit", 21);
}
}
}
}
```
**Key Implementation Details:**
1. **No database needed**: Active connections are tracked by libwebsockets itself
2. **Uses existing timestamp**: `pss->connection_established` is already set on line 456 of websockets.c
3. **Built-in iterator**: `lws_vhost_foreach_wsi()` safely iterates through all active connections
4. **Per-session data**: Each connection's `per_session_data` is accessible via `lws_wsi_user()`
5. **Safe closure**: `lws_close_reason()` properly closes the WebSocket with a status code and message
**Integration Point:** [`src/websockets.c:2176`](src/websockets.c:2176) - in existing event loop
```c
// Main event loop with proper signal handling
while (g_server_running && !g_shutdown_flag) {
int result = lws_service(ws_context, 1000);
if (result < 0) {
DEBUG_ERROR("libwebsockets service error");
break;
}
// Check if it's time to post status update
time_t current_time = time(NULL);
int status_post_hours = get_config_int("kind_1_status_posts_hours", 0);
if (status_post_hours > 0) {
int seconds_interval = status_post_hours * 3600;
if (current_time - last_status_post_time >= seconds_interval) {
last_status_post_time = current_time;
generate_and_post_status_event();
}
}
// **NEW: Check for connection age limit**
int max_connection_seconds = get_config_int("max_connection_seconds", 86400); // Default 24 hours
if (max_connection_seconds > 0) {
check_connection_age(max_connection_seconds);
}
}
```
### Configuration
**Parameter:** `max_connection_seconds`
- **Default:** `86400` (24 hours)
- **Range:** `0` = disabled, `>0` = disconnect after X seconds
- **Units:** Seconds (for consistency with other time-based configs)
**Example configurations:**
```json
{
"max_connection_seconds": 86400 // 86400 seconds = 24 hours (default)
}
```
```json
{
"max_connection_seconds": 43200 // 43200 seconds = 12 hours
}
```
```json
{
"max_connection_seconds": 3600 // 3600 seconds = 1 hour
}
```
```json
{
"max_connection_seconds": 0 // Disabled
}
```
### Client Behavior
When disconnected due to age limit, clients will:
1. Detect WebSocket closure
2. Wait briefly (exponential backoff)
3. Reconnect to relay
4. Re-authenticate if needed (NIP-42)
5. Re-establish all subscriptions
6. Resume normal operation
This is **exactly what happens** during network interruptions, so it's a well-tested code path in all Nostr clients.
### Benefits
- **No new threads**: Uses existing event loop
- **Minimal overhead**: Check runs once per second (same as `lws_service`)
- **Simple implementation**: Iterate through active connections
- **Consistent pattern**: Matches existing status post checking
- **Universal compatibility**: All clients handle reconnection
- **Complete cleanup**: Frees all resources associated with connection
- **Configurable**: Can be adjusted per relay needs or disabled entirely
---
## Implementation Plan
### Phase 1: Startup Cleanup (1-2 hours)
1. **Add `cleanup_all_subscriptions_on_startup()` function**
- File: [`src/subscriptions.c`](src/subscriptions.c)
- SQL UPDATE to mark all active subscriptions as disconnected
- Add logging for cleanup count
2. **Integrate in main()**
- File: [`src/main.c:1810`](src/main.c:1810)
- Call after mutex initialization, before WebSocket server start
3. **Test**
- Create subscriptions in database
- Restart relay
- Verify all subscriptions marked as disconnected
- Verify `active_subscriptions_log` shows 0 subscriptions
**Estimated Time:** 1-2 hours
### Phase 2: Connection Age Limit (2-3 hours)
1. **Add `check_connection_age()` function**
- File: [`src/websockets.c`](src/websockets.c)
- Iterate through active connections
- Close connections older than limit
2. **Integrate in event loop**
- File: [`src/websockets.c:2176`](src/websockets.c:2176)
- Add check after status post check
- Use same pattern as status posts
3. **Add configuration parameter**
- Add `max_connection_seconds` to default config
- Default: 86400 (24 hours)
4. **Test**
- Connect client
- Wait for timeout (or reduce timeout for testing)
- Verify client disconnected
- Verify client reconnects automatically
- Verify subscriptions re-established
**Estimated Time:** 2-3 hours
---
## Testing Strategy
### Startup Cleanup Tests
```bash
# Test 1: Clean startup with existing subscriptions
- Create 100 active subscriptions in database
- Restart relay
- Verify all subscriptions marked as disconnected
- Verify active_subscriptions_log shows 0 subscriptions
# Test 2: Clean startup with no subscriptions
- Start relay with empty database
- Verify no errors
- Verify startup cleanup logs "No active subscriptions found"
# Test 3: Clients reconnect after restart
- Create subscriptions before restart
- Restart relay
- Connect clients and create new subscriptions
- Verify new subscriptions tracked correctly
```
### Connection Age Limit Tests
```bash
# Test 1: Connection disconnected after timeout
- Set max_connection_seconds to 60 (for testing)
- Connect client
- Wait 61 seconds
- Verify client disconnected
- Verify client reconnects automatically
# Test 2: Subscriptions re-established after reconnection
- Connect client with subscriptions
- Wait for timeout
- Verify client reconnects
- Verify subscriptions re-established
- Verify events still delivered
# Test 3: Disabled when set to 0
- Set max_connection_seconds to 0
- Connect client
- Wait extended period
- Verify client NOT disconnected
```
### Integration Tests
```bash
# Test 1: Combined behavior
- Start relay (startup cleanup runs)
- Connect multiple clients
- Create subscriptions
- Wait for connection timeout
- Verify clients reconnect
- Restart relay
- Verify clean state
# Test 2: Load test
- Connect 100 clients
- Each creates 5 subscriptions
- Wait for connection timeout
- Verify all clients reconnect
- Verify all subscriptions re-established
- Monitor CPU usage (should remain low)
```
---
## Success Criteria
### Component 1: Startup Cleanup
- ✅ Relay starts with zero active subscriptions
- ✅ All previous subscriptions marked as disconnected on startup
- ✅ Clients successfully reconnect and re-establish subscriptions
- ✅ Relay restart can be used as emergency fix for subscription issues
- ✅ No errors during startup cleanup process
### Component 2: Connection Age Limit
- ✅ Clients disconnected after configured time period
- ✅ Clients automatically reconnect
- ✅ Subscriptions re-established after reconnection
- ✅ No impact on relay performance
- ✅ Configuration parameter works correctly (including disabled state)
### Overall Success
- ✅ CPU usage remains low (<10%)
- No orphaned subscriptions accumulate
- Database size remains stable
- No manual intervention required
---
## Configuration Reference
**New Configuration Parameters:**
```json
{
"max_connection_seconds": 86400
}
```
**Recommended Settings:**
- **Production:**
- `max_connection_seconds: 86400` (24 hours)
- **Development:**
- `max_connection_seconds: 3600` (1 hour for faster testing)
- **High-traffic:**
- `max_connection_seconds: 43200` (12 hours)
- **Disabled:**
- `max_connection_seconds: 0`
---
## Rollback Plan
If issues arise after deployment:
1. **Disable connection age limit:**
- Set `max_connection_seconds: 0` in config
- Restart relay
- Monitor for stability
2. **Revert code changes:**
- Remove `check_connection_age()` call from event loop
- Remove `cleanup_all_subscriptions_on_startup()` call from main
- Restart relay
3. **Database cleanup (if needed):**
- Manually clean up orphaned subscriptions using SQL:
```sql
UPDATE subscriptions
SET ended_at = strftime('%s', 'now')
WHERE event_type = 'created' AND ended_at IS NULL;
```
---
## Comparison with Original Design
### Original Design (5 Components)
1. Startup cleanup
2. Fix WebSocket disconnection logging
3. Enhance subscription removal with reason parameter
4. Periodic cleanup task (background thread)
5. Optimize database VIEW
6. Subscription expiration (optional)
### Simplified Design (2 Components)
1. Startup cleanup
2. Connection age limit
### Why Simplified is Better
**Advantages:**
- **Simpler**: 2 components vs 5-6 components
- **Faster to implement**: 3-5 hours vs 11-17 hours
- **Easier to maintain**: Less code, fewer moving parts
- **More reliable**: Fewer potential failure points
- **Nostr-aligned**: Leverages client reconnection behavior
- **No new threads**: Uses existing event loop
- **Universal compatibility**: All clients handle reconnection
**What We're Not Losing:**
- Startup cleanup is identical in both designs
- Connection age limit achieves the same goal as periodic cleanup + expiration
- Disconnection forces complete cleanup (better than just logging)
- Database VIEW optimization not needed if subscriptions don't accumulate
**Trade-offs:**
- Less granular logging (but simpler)
- No historical subscription analytics (but cleaner database)
- Clients must reconnect periodically (but this is standard Nostr behavior)
---
## Conclusion
This simplified design solves the subscription accumulation problem with two pragmatic solutions:
1. **Startup cleanup** ensures every relay restart starts with a clean slate
2. **Connection age limit** prevents long-term accumulation by forcing periodic reconnection
Both solutions align with Nostr's stateless design where clients are expected to handle reconnection. The implementation is simple, maintainable, and leverages existing code patterns.
**Key Benefits:**
- Solves the root problem (subscription accumulation)
- Simple to implement (3-5 hours total)
- Easy to maintain (minimal code)
- Universal compatibility (all clients handle reconnection)
- No new threads or background tasks
- Configurable and can be disabled if needed
- Relay restart as emergency fix
**Next Steps:**
1. Implement Component 1 (Startup Cleanup)
2. Test thoroughly
3. Implement Component 2 (Connection Age Limit)
4. Test thoroughly
5. Deploy to production
6. Monitor CPU usage and subscription counts

View File

@@ -0,0 +1,209 @@
# Subscription Matching Debug Plan
## Problem
The relay is not matching kind 1059 (NIP-17 gift wrap) events to subscriptions, even though a subscription exists with `kinds:[1059]` filter. The log shows:
```
Event broadcast complete: 0 subscriptions matched
```
But we have this subscription:
```
sub:3 146.70.187.119 0x78edc9b43210 8m 27s kinds:[1059], since:10/23/2025, 4:27:59 PM, limit:50
```
## Investigation Strategy
### 1. Add Debug Output to `event_matches_filter()` (lines 386-564)
Add debug logging at each filter check to trace the matching logic:
- **Entry point**: Log the event kind and filter being tested
- **Kinds filter check** (lines 392-415): Log whether kinds filter exists, the event kind value, and each filter kind being compared
- **Authors filter check** (lines 417-442): Log if authors filter exists and matching results
- **IDs filter check** (lines 444-469): Log if IDs filter exists and matching results
- **Since filter check** (lines 471-482): Log the event timestamp vs filter since value
- **Until filter check** (lines 484-495): Log the event timestamp vs filter until value
- **Tag filters check** (lines 497-561): Log tag filter matching details
- **Exit point**: Log whether the overall filter matched
### 2. Add Debug Output to `event_matches_subscription()` (lines 567-581)
Add logging to show:
- How many filters are in the subscription
- Which filter (if any) matched
- Overall subscription match result
### 3. Add Debug Output to `broadcast_event_to_subscriptions()` (lines 584-726)
Add logging to show:
- The event being broadcast (kind, id, created_at)
- Total number of active subscriptions being checked
- How many subscriptions matched after the first pass
### 4. Key Areas to Focus On
Based on the code analysis, the most likely issues are:
1. **Kind matching logic** (lines 392-415): The event kind might not be extracted correctly, or the comparison might be failing
2. **Since timestamp** (lines 471-482): The subscription has a `since` filter - if the event timestamp is before this, it won't match
3. **Event structure**: The event JSON might not have the expected structure
### 5. Specific Debug Additions
#### In `event_matches_filter()` at line 386:
```c
// Add at start of function
cJSON* event_kind_obj = cJSON_GetObjectItem(event, "kind");
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
cJSON* event_created_at_obj = cJSON_GetObjectItem(event, "created_at");
DEBUG_TRACE("FILTER_MATCH: Testing event kind=%d id=%.8s created_at=%ld",
event_kind_obj ? (int)cJSON_GetNumberValue(event_kind_obj) : -1,
event_id_obj && cJSON_IsString(event_id_obj) ? cJSON_GetStringValue(event_id_obj) : "null",
event_created_at_obj ? (long)cJSON_GetNumberValue(event_created_at_obj) : 0);
```
#### In kinds filter check (after line 392):
```c
if (filter->kinds && cJSON_IsArray(filter->kinds)) {
DEBUG_TRACE("FILTER_MATCH: Checking kinds filter with %d kinds", cJSON_GetArraySize(filter->kinds));
cJSON* event_kind = cJSON_GetObjectItem(event, "kind");
if (!event_kind || !cJSON_IsNumber(event_kind)) {
DEBUG_WARN("FILTER_MATCH: Event has no valid kind field");
return 0;
}
int event_kind_val = (int)cJSON_GetNumberValue(event_kind);
DEBUG_TRACE("FILTER_MATCH: Event kind=%d", event_kind_val);
int kind_match = 0;
cJSON* kind_item = NULL;
cJSON_ArrayForEach(kind_item, filter->kinds) {
if (cJSON_IsNumber(kind_item)) {
int filter_kind = (int)cJSON_GetNumberValue(kind_item);
DEBUG_TRACE("FILTER_MATCH: Comparing event kind %d with filter kind %d", event_kind_val, filter_kind);
if (filter_kind == event_kind_val) {
kind_match = 1;
DEBUG_TRACE("FILTER_MATCH: Kind matched!");
break;
}
}
}
if (!kind_match) {
DEBUG_TRACE("FILTER_MATCH: No kind match, filter rejected");
return 0;
}
DEBUG_TRACE("FILTER_MATCH: Kinds filter passed");
}
```
#### In since filter check (after line 472):
```c
if (filter->since > 0) {
cJSON* event_created_at = cJSON_GetObjectItem(event, "created_at");
if (!event_created_at || !cJSON_IsNumber(event_created_at)) {
DEBUG_WARN("FILTER_MATCH: Event has no valid created_at field");
return 0;
}
long event_timestamp = (long)cJSON_GetNumberValue(event_created_at);
DEBUG_TRACE("FILTER_MATCH: Checking since filter: event_ts=%ld filter_since=%ld",
event_timestamp, filter->since);
if (event_timestamp < filter->since) {
DEBUG_TRACE("FILTER_MATCH: Event too old (before since), filter rejected");
return 0;
}
DEBUG_TRACE("FILTER_MATCH: Since filter passed");
}
```
#### At end of `event_matches_filter()` (before line 563):
```c
DEBUG_TRACE("FILTER_MATCH: All filters passed, event matches!");
return 1; // All filters passed
```
#### In `event_matches_subscription()` at line 567:
```c
int event_matches_subscription(cJSON* event, subscription_t* subscription) {
if (!event || !subscription || !subscription->filters) {
return 0;
}
DEBUG_TRACE("SUB_MATCH: Testing subscription '%s'", subscription->id);
int filter_num = 0;
subscription_filter_t* filter = subscription->filters;
while (filter) {
filter_num++;
DEBUG_TRACE("SUB_MATCH: Testing filter #%d", filter_num);
if (event_matches_filter(event, filter)) {
DEBUG_TRACE("SUB_MATCH: Filter #%d matched! Subscription '%s' matches",
filter_num, subscription->id);
return 1; // Match found (OR logic)
}
filter = filter->next;
}
DEBUG_TRACE("SUB_MATCH: No filters matched for subscription '%s'", subscription->id);
return 0; // No filters matched
}
```
#### In `broadcast_event_to_subscriptions()` at line 584:
```c
int broadcast_event_to_subscriptions(cJSON* event) {
if (!event) {
return 0;
}
// Log event details
cJSON* event_kind = cJSON_GetObjectItem(event, "kind");
cJSON* event_id = cJSON_GetObjectItem(event, "id");
cJSON* event_created_at = cJSON_GetObjectItem(event, "created_at");
DEBUG_TRACE("BROADCAST: Event kind=%d id=%.8s created_at=%ld",
event_kind ? (int)cJSON_GetNumberValue(event_kind) : -1,
event_id && cJSON_IsString(event_id) ? cJSON_GetStringValue(event_id) : "null",
event_created_at ? (long)cJSON_GetNumberValue(event_created_at) : 0);
// ... existing expiration check code ...
// After line 611 (before pthread_mutex_lock):
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
int total_subs = 0;
subscription_t* count_sub = g_subscription_manager.active_subscriptions;
while (count_sub) {
total_subs++;
count_sub = count_sub->next;
}
DEBUG_TRACE("BROADCAST: Checking %d active subscriptions", total_subs);
subscription_t* sub = g_subscription_manager.active_subscriptions;
// ... rest of matching logic ...
```
## Expected Outcome
With these debug additions, we should see output like:
```
BROADCAST: Event kind=1059 id=abc12345 created_at=1729712279
BROADCAST: Checking 1 active subscriptions
SUB_MATCH: Testing subscription 'sub:3'
SUB_MATCH: Testing filter #1
FILTER_MATCH: Testing event kind=1059 id=abc12345 created_at=1729712279
FILTER_MATCH: Checking kinds filter with 1 kinds
FILTER_MATCH: Event kind=1059
FILTER_MATCH: Comparing event kind 1059 with filter kind 1059
FILTER_MATCH: Kind matched!
FILTER_MATCH: Kinds filter passed
FILTER_MATCH: Checking since filter: event_ts=1729712279 filter_since=1729708079
FILTER_MATCH: Since filter passed
FILTER_MATCH: All filters passed, event matches!
SUB_MATCH: Filter #1 matched! Subscription 'sub:3' matches
Event broadcast complete: 1 subscriptions matched
```
This will help us identify exactly where the matching is failing.

View File

@@ -0,0 +1,358 @@
# Subscription Table Collapsible Groups Implementation Plan
## Objective
Add collapsible/expandable functionality to subscription groups, where:
1. Each WSI Pointer group starts collapsed, showing only a summary row
2. Clicking the summary row expands to show all subscription details for that WSI Pointer
3. Clicking again collapses the group back to the summary row
## Current Behavior
- All subscriptions are displayed in a flat list
- WSI Pointer value shown only on first row of each group
- No interaction or collapse functionality
## Desired Behavior
- **Collapsed state**: One row per WSI Pointer showing summary information
- **Expanded state**: Header row + detail rows for each subscription in that group
- **Toggle**: Click on header row to expand/collapse
- **Visual indicator**: Arrow or icon showing expand/collapse state
## Design Approach
### Option 1: Summary Row + Detail Rows (Recommended)
```
[▶] WSI Pointer: 0x12345678 | Subscriptions: 3 | Total Duration: 15m
(detail rows hidden)
When clicked:
[▼] WSI Pointer: 0x12345678 | Subscriptions: 3 | Total Duration: 15m
| sub-001 | 5m 30s | kinds:[1]
| sub-002 | 3m 15s | kinds:[3]
| sub-003 | 6m 15s | kinds:[1,3]
```
### Option 2: First Row as Header (Alternative)
```
[▶] 0x12345678 | sub-001 | 5m 30s | kinds:[1] (+ 2 more)
When clicked:
[▼] 0x12345678 | sub-001 | 5m 30s | kinds:[1]
| sub-002 | 3m 15s | kinds:[3]
| sub-003 | 6m 15s | kinds:[1,3]
```
**Recommendation**: Option 1 provides clearer UX and better summary information.
## Implementation Details
### Files to Modify
1. `api/index.js` - Function `populateSubscriptionDetailsTable()` (lines 4277-4384)
2. `api/index.css` - Add styles for collapsible rows
### Data Structure Changes
Need to group subscriptions by WSI Pointer first:
```javascript
// Group subscriptions by wsi_pointer
const groupedSubscriptions = {};
subscriptionsData.forEach(sub => {
const wsiKey = sub.wsi_pointer || 'N/A';
if (!groupedSubscriptions[wsiKey]) {
groupedSubscriptions[wsiKey] = [];
}
groupedSubscriptions[wsiKey].push(sub);
});
```
### HTML Structure
#### Summary Row (Header)
```html
<tr class="subscription-group-header" data-wsi-pointer="0x12345678" data-expanded="false">
<td colspan="4" style="cursor: pointer; user-select: none;">
<span class="expand-icon"></span>
<strong>WSI Pointer:</strong> 0x12345678
<span class="group-summary">
| Subscriptions: 3 | Oldest: 15m 30s
</span>
</td>
</tr>
```
#### Detail Rows (Initially Hidden)
```html
<tr class="subscription-detail-row" data-wsi-group="0x12345678" style="display: none;">
<td style="padding-left: 30px;"><!-- Empty for WSI Pointer --></td>
<td>sub-001</td>
<td>5m 30s</td>
<td>kinds:[1]</td>
</tr>
```
### JavaScript Implementation
#### Modified populateSubscriptionDetailsTable Function
```javascript
function populateSubscriptionDetailsTable(subscriptionsData) {
const tableBody = document.getElementById('subscription-details-table-body');
if (!tableBody || !subscriptionsData || !Array.isArray(subscriptionsData)) return;
tableBody.innerHTML = '';
if (subscriptionsData.length === 0) {
const row = document.createElement('tr');
row.innerHTML = '<td colspan="4" style="text-align: center; font-style: italic;">No active subscriptions</td>';
tableBody.appendChild(row);
return;
}
// Sort subscriptions by wsi_pointer to group them together
subscriptionsData.sort((a, b) => {
const wsiA = a.wsi_pointer || '';
const wsiB = b.wsi_pointer || '';
return wsiA.localeCompare(wsiB);
});
// Group subscriptions by wsi_pointer
const groupedSubscriptions = {};
subscriptionsData.forEach(sub => {
const wsiKey = sub.wsi_pointer || 'N/A';
if (!groupedSubscriptions[wsiKey]) {
groupedSubscriptions[wsiKey] = [];
}
groupedSubscriptions[wsiKey].push(sub);
});
// Create rows for each group
Object.entries(groupedSubscriptions).forEach(([wsiPointer, subscriptions]) => {
// Calculate group summary
const subCount = subscriptions.length;
const now = Math.floor(Date.now() / 1000);
const oldestDuration = Math.max(...subscriptions.map(s => now - s.created_at));
const oldestDurationStr = formatDuration(oldestDuration);
// Create header row (summary)
const headerRow = document.createElement('tr');
headerRow.className = 'subscription-group-header';
headerRow.setAttribute('data-wsi-pointer', wsiPointer);
headerRow.setAttribute('data-expanded', 'false');
headerRow.style.cursor = 'pointer';
headerRow.style.userSelect = 'none';
headerRow.style.backgroundColor = 'var(--hover-color, #f5f5f5)';
headerRow.innerHTML = `
<td colspan="4" style="padding: 8px;">
<span class="expand-icon" style="display: inline-block; width: 20px; transition: transform 0.2s;">▶</span>
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">WSI: ${wsiPointer}</strong>
<span style="color: #666; margin-left: 15px;">
Subscriptions: ${subCount} | Oldest: ${oldestDurationStr}
</span>
</td>
`;
// Add click handler to toggle expansion
headerRow.addEventListener('click', () => toggleSubscriptionGroup(wsiPointer));
tableBody.appendChild(headerRow);
// Create detail rows (initially hidden)
subscriptions.forEach((subscription, index) => {
const detailRow = document.createElement('tr');
detailRow.className = 'subscription-detail-row';
detailRow.setAttribute('data-wsi-group', wsiPointer);
detailRow.style.display = 'none';
// Calculate duration
const duration = now - subscription.created_at;
const durationStr = formatDuration(duration);
// Format filters
let filtersDisplay = 'None';
if (subscription.filters && subscription.filters.length > 0) {
const filterDetails = [];
subscription.filters.forEach((filter) => {
const parts = [];
if (filter.kinds && Array.isArray(filter.kinds) && filter.kinds.length > 0) {
parts.push(`kinds:[${filter.kinds.join(',')}]`);
}
if (filter.authors && Array.isArray(filter.authors) && filter.authors.length > 0) {
const authorCount = filter.authors.length;
if (authorCount === 1) {
const shortPubkey = filter.authors[0].substring(0, 8) + '...';
parts.push(`authors:[${shortPubkey}]`);
} else {
parts.push(`authors:[${authorCount} pubkeys]`);
}
}
if (filter.ids && Array.isArray(filter.ids) && filter.ids.length > 0) {
const idCount = filter.ids.length;
parts.push(`ids:[${idCount} event${idCount > 1 ? 's' : ''}]`);
}
const timeParts = [];
if (filter.since && filter.since > 0) {
const sinceDate = new Date(filter.since * 1000).toLocaleString();
timeParts.push(`since:${sinceDate}`);
}
if (filter.until && filter.until > 0) {
const untilDate = new Date(filter.until * 1000).toLocaleString();
timeParts.push(`until:${untilDate}`);
}
if (timeParts.length > 0) {
parts.push(timeParts.join(', '));
}
if (filter.limit && filter.limit > 0) {
parts.push(`limit:${filter.limit}`);
}
if (filter.tag_filters && Array.isArray(filter.tag_filters) && filter.tag_filters.length > 0) {
parts.push(`tags:[${filter.tag_filters.length} filter${filter.tag_filters.length > 1 ? 's' : ''}]`);
}
if (parts.length > 0) {
filterDetails.push(parts.join(', '));
} else {
filterDetails.push('empty filter');
}
});
filtersDisplay = filterDetails.join(' | ');
}
detailRow.innerHTML = `
<td style="padding-left: 30px; font-family: 'Courier New', monospace; font-size: 11px; color: #999;">└─</td>
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${subscription.id || 'N/A'}</td>
<td>${durationStr}</td>
<td>${filtersDisplay}</td>
`;
tableBody.appendChild(detailRow);
});
});
}
// Toggle function for expanding/collapsing groups
function toggleSubscriptionGroup(wsiPointer) {
const headerRow = document.querySelector(`.subscription-group-header[data-wsi-pointer="${wsiPointer}"]`);
const detailRows = document.querySelectorAll(`.subscription-detail-row[data-wsi-group="${wsiPointer}"]`);
const expandIcon = headerRow.querySelector('.expand-icon');
const isExpanded = headerRow.getAttribute('data-expanded') === 'true';
if (isExpanded) {
// Collapse
detailRows.forEach(row => row.style.display = 'none');
expandIcon.textContent = '▶';
expandIcon.style.transform = 'rotate(0deg)';
headerRow.setAttribute('data-expanded', 'false');
} else {
// Expand
detailRows.forEach(row => row.style.display = 'table-row');
expandIcon.textContent = '▼';
expandIcon.style.transform = 'rotate(90deg)';
headerRow.setAttribute('data-expanded', 'true');
}
}
```
### CSS Additions (api/index.css)
```css
/* Subscription group header styles */
.subscription-group-header {
background-color: var(--hover-color, #f5f5f5);
font-weight: 500;
transition: background-color 0.2s;
}
.subscription-group-header:hover {
background-color: var(--primary-color-light, #e8e8e8);
}
.expand-icon {
display: inline-block;
width: 20px;
transition: transform 0.2s ease;
font-size: 12px;
}
/* Detail row styles */
.subscription-detail-row {
background-color: var(--background-color, #ffffff);
}
.subscription-detail-row:hover {
background-color: var(--hover-color-light, #fafafa);
}
/* Dark mode support */
.dark-mode .subscription-group-header {
background-color: var(--hover-color-dark, #2a2a2a);
}
.dark-mode .subscription-group-header:hover {
background-color: var(--primary-color-dark, #333333);
}
.dark-mode .subscription-detail-row {
background-color: var(--background-color-dark, #1a1a1a);
}
.dark-mode .subscription-detail-row:hover {
background-color: var(--hover-color-dark, #252525);
}
```
## Features Included
1.**Collapsible groups**: Each WSI Pointer group can be collapsed/expanded
2.**Visual indicator**: Arrow icon (▶/▼) shows current state
3.**Summary information**: Shows subscription count and oldest duration
4.**Smooth transitions**: Icon rotation animation
5.**Hover effects**: Visual feedback on header rows
6.**Tree structure**: Detail rows indented with └─ character
7.**Dark mode support**: Proper styling for both themes
8.**Keyboard accessible**: Can be enhanced with keyboard navigation
## User Experience Flow
1. **Initial load**: All groups collapsed, showing summary rows only
2. **Click header**: Group expands, showing all subscription details
3. **Click again**: Group collapses back to summary
4. **Multiple groups**: Each group can be independently expanded/collapsed
5. **Visual feedback**: Hover effects and smooth animations
## Testing Checklist
1. ✅ Verify groups start collapsed by default
2. ✅ Verify clicking header expands group
3. ✅ Verify clicking again collapses group
4. ✅ Verify multiple groups can be expanded simultaneously
5. ✅ Verify summary information is accurate
6. ✅ Verify detail rows display correctly when expanded
7. ✅ Verify styling works in both light and dark modes
8. ✅ Verify no console errors
9. ✅ Verify performance with many subscriptions
## Optional Enhancements
1. **Expand/Collapse All**: Add buttons to expand or collapse all groups at once
2. **Remember State**: Store expansion state in localStorage
3. **Keyboard Navigation**: Add keyboard shortcuts (Space/Enter to toggle)
4. **Animation**: Add slide-down/up animation for detail rows
5. **Search/Filter**: Add ability to search within subscriptions
6. **Export**: Add ability to export subscription data
## Next Steps
1. Review this plan
2. Switch to Code mode
3. Implement the changes in `api/index.js`
4. Add CSS styles in `api/index.css`
5. Test the collapsible functionality
6. Verify all edge cases work correctly

View File

@@ -0,0 +1,155 @@
# Subscription Table WSI Pointer Grouping Implementation Plan
## Objective
Modify the subscription details table to show the WSI Pointer value only once per group - on the first row of each WSI Pointer group, leaving it blank for subsequent rows with the same WSI Pointer.
## Current Behavior
- All rows show the WSI Pointer value
- Rows are sorted by WSI Pointer (grouping is working)
- Visual grouping is not clear
## Desired Behavior
- First row of each WSI Pointer group shows the full WSI Pointer value
- Subsequent rows in the same group have an empty cell for WSI Pointer
- This creates a clear visual grouping effect
## Implementation Details
### File to Modify
`api/index.js` - Function `populateSubscriptionDetailsTable()` (lines 4277-4384)
### Code Changes Required
#### Current Code (lines 4291-4383):
```javascript
// Sort subscriptions by wsi_pointer to group them together
subscriptionsData.sort((a, b) => {
const wsiA = a.wsi_pointer || '';
const wsiB = b.wsi_pointer || '';
return wsiA.localeCompare(wsiB);
});
subscriptionsData.forEach((subscription, index) => {
const row = document.createElement('tr');
// Calculate duration
const now = Math.floor(Date.now() / 1000);
const duration = now - subscription.created_at;
const durationStr = formatDuration(duration);
// Format client IP (show full IP for admin view)
const clientIP = subscription.client_ip || 'unknown';
// Format wsi_pointer (show full pointer)
const wsiPointer = subscription.wsi_pointer || 'N/A';
// Format filters (show actual filter details)
let filtersDisplay = 'None';
// ... filter formatting code ...
row.innerHTML = `
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${wsiPointer}</td>
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${subscription.id || 'N/A'}</td>
<!-- <td style="font-family: 'Courier New', monospace; font-size: 12px;">${clientIP}</td> -->
<td>${durationStr}</td>
<td>${filtersDisplay}</td>
`;
tableBody.appendChild(row);
});
```
#### Modified Code:
```javascript
// Sort subscriptions by wsi_pointer to group them together
subscriptionsData.sort((a, b) => {
const wsiA = a.wsi_pointer || '';
const wsiB = b.wsi_pointer || '';
return wsiA.localeCompare(wsiB);
});
// Track previous WSI Pointer to detect group changes
let previousWsiPointer = null;
subscriptionsData.forEach((subscription, index) => {
const row = document.createElement('tr');
// Calculate duration
const now = Math.floor(Date.now() / 1000);
const duration = now - subscription.created_at;
const durationStr = formatDuration(duration);
// Format client IP (show full IP for admin view)
const clientIP = subscription.client_ip || 'unknown';
// Format wsi_pointer - only show if it's different from previous row
const currentWsiPointer = subscription.wsi_pointer || 'N/A';
let wsiPointerDisplay = '';
if (currentWsiPointer !== previousWsiPointer) {
// This is the first row of a new group - show the WSI Pointer
wsiPointerDisplay = currentWsiPointer;
previousWsiPointer = currentWsiPointer;
} else {
// This is a continuation of the same group - leave blank
wsiPointerDisplay = '';
}
// Format filters (show actual filter details)
let filtersDisplay = 'None';
// ... filter formatting code remains the same ...
row.innerHTML = `
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${wsiPointerDisplay}</td>
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${subscription.id || 'N/A'}</td>
<!-- <td style="font-family: 'Courier New', monospace; font-size: 12px;">${clientIP}</td> -->
<td>${durationStr}</td>
<td>${filtersDisplay}</td>
`;
tableBody.appendChild(row);
});
```
### Key Changes Explained
1. **Add tracking variable**: `let previousWsiPointer = null;` before the forEach loop
2. **Store current WSI Pointer**: `const currentWsiPointer = subscription.wsi_pointer || 'N/A';`
3. **Compare with previous**: Check if `currentWsiPointer !== previousWsiPointer`
4. **Conditional display**:
- If different: Show the WSI Pointer value and update `previousWsiPointer`
- If same: Show empty string (blank cell)
5. **Use display variable**: Replace `${wsiPointer}` with `${wsiPointerDisplay}` in the row HTML
### Visual Result
**Before:**
```
WSI Pointer | Subscription ID | Duration | Filters
0x12345678 | sub-001 | 5m 30s | kinds:[1]
0x12345678 | sub-002 | 3m 15s | kinds:[3]
0x87654321 | sub-003 | 1m 45s | kinds:[1,3]
```
**After:**
```
WSI Pointer | Subscription ID | Duration | Filters
0x12345678 | sub-001 | 5m 30s | kinds:[1]
| sub-002 | 3m 15s | kinds:[3]
0x87654321 | sub-003 | 1m 45s | kinds:[1,3]
```
## Testing Checklist
1. ✅ Verify first row of each group shows WSI Pointer
2. ✅ Verify subsequent rows in same group are blank
3. ✅ Verify grouping works with multiple subscriptions per WSI Pointer
4. ✅ Verify single subscription per WSI Pointer still shows the value
5. ✅ Verify empty/null WSI Pointers are handled correctly
6. ✅ Verify table still displays correctly when no subscriptions exist
## Next Steps
1. Review this plan
2. Switch to Code mode
3. Implement the changes in `api/index.js`
4. Test the implementation
5. Verify the visual grouping effect

View File

@@ -0,0 +1,200 @@
# WebSocket Write Queue Design
## Problem Statement
The current partial write handling implementation uses a single buffer per session, which fails when multiple events need to be sent to the same client in rapid succession. This causes:
1. First event gets partial write → queued successfully
2. Second event tries to write → **FAILS** with "write already pending"
3. Subsequent events fail similarly, causing data loss
### Server Log Evidence
```
[WARN] WS_FRAME_PARTIAL: EVENT partial write, sub=1 sent=3210 expected=5333
[TRACE] Queued partial write: len=2123
[WARN] WS_FRAME_PARTIAL: EVENT partial write, sub=1 sent=3210 expected=5333
[WARN] queue_websocket_write: write already pending, cannot queue new write
[ERROR] Failed to queue partial EVENT write for sub=1
```
## Root Cause
WebSocket frames must be sent **atomically** - you cannot interleave multiple frames. The current single-buffer approach correctly enforces this, but it rejects new writes instead of queuing them.
## Solution: Write Queue Architecture
### Design Principles
1. **Frame Atomicity**: Complete one WebSocket frame before starting the next
2. **Sequential Processing**: Process queued writes in FIFO order
3. **Memory Safety**: Proper cleanup on connection close or errors
4. **Thread Safety**: Protect queue operations with existing session lock
### Data Structures
#### Write Queue Node
```c
struct write_queue_node {
unsigned char* buffer; // Buffer with LWS_PRE space
size_t total_len; // Total length of data to write
size_t offset; // How much has been written so far
int write_type; // LWS_WRITE_TEXT, etc.
struct write_queue_node* next; // Next node in queue
};
```
#### Per-Session Write Queue
```c
struct per_session_data {
// ... existing fields ...
// Write queue for handling multiple pending writes
struct write_queue_node* write_queue_head; // First item to write
struct write_queue_node* write_queue_tail; // Last item in queue
int write_queue_length; // Number of items in queue
int write_in_progress; // Flag: 1 if currently writing
};
```
### Algorithm Flow
#### 1. Enqueue Write (`queue_websocket_write`)
```
IF write_queue is empty AND no write in progress:
- Attempt immediate write with lws_write()
- IF complete:
- Return success
- ELSE (partial write):
- Create queue node with remaining data
- Add to queue
- Set write_in_progress flag
- Request LWS_CALLBACK_SERVER_WRITEABLE
ELSE:
- Create queue node with full data
- Append to queue tail
- IF no write in progress:
- Request LWS_CALLBACK_SERVER_WRITEABLE
```
#### 2. Process Queue (`process_pending_write`)
```
WHILE write_queue is not empty:
- Get head node
- Calculate remaining data (total_len - offset)
- Attempt write with lws_write()
IF write fails (< 0):
- Log error
- Remove and free head node
- Continue to next node
ELSE IF partial write (< remaining):
- Update offset
- Request LWS_CALLBACK_SERVER_WRITEABLE
- Break (wait for next callback)
ELSE (complete write):
- Remove and free head node
- Continue to next node
IF queue is empty:
- Clear write_in_progress flag
```
#### 3. Cleanup (`LWS_CALLBACK_CLOSED`)
```
WHILE write_queue is not empty:
- Get head node
- Free buffer
- Free node
- Move to next
Clear queue pointers
```
### Memory Management
1. **Allocation**: Each queue node allocates buffer with `LWS_PRE + data_len`
2. **Ownership**: Queue owns all buffers until write completes or connection closes
3. **Deallocation**: Free buffer and node when:
- Write completes successfully
- Write fails with error
- Connection closes
### Thread Safety
- Use existing `pss->session_lock` to protect queue operations
- Lock during:
- Enqueue operations
- Dequeue operations
- Queue traversal for cleanup
### Performance Considerations
1. **Queue Length Limit**: Implement max queue length (e.g., 100 items) to prevent memory exhaustion
2. **Memory Pressure**: Monitor total queued bytes per session
3. **Backpressure**: If queue exceeds limit, close connection with NOTICE
### Error Handling
1. **Allocation Failure**: Return error, log, send NOTICE to client
2. **Write Failure**: Remove failed frame, continue with next
3. **Queue Overflow**: Close connection with appropriate NOTICE
## Implementation Plan
### Phase 1: Data Structure Changes
1. Add `write_queue_node` structure to `websockets.h`
2. Update `per_session_data` with queue fields
3. Remove old single-buffer fields
### Phase 2: Queue Operations
1. Implement `enqueue_write()` helper
2. Implement `dequeue_write()` helper
3. Update `queue_websocket_write()` to use queue
4. Update `process_pending_write()` to process queue
### Phase 3: Integration
1. Update all `lws_write()` call sites
2. Update `LWS_CALLBACK_CLOSED` cleanup
3. Add queue length monitoring
### Phase 4: Testing
1. Test with rapid multiple events to same client
2. Test with large events (>4KB)
3. Test under load with concurrent connections
4. Verify no "Invalid frame header" errors
## Expected Outcomes
1. **No More Rejections**: All writes queued successfully
2. **Frame Integrity**: Complete frames sent atomically
3. **Memory Safety**: Proper cleanup on all paths
4. **Performance**: Minimal overhead for queue management
## Metrics to Monitor
1. Average queue length per session
2. Maximum queue length observed
3. Queue overflow events (if limit implemented)
4. Write completion rate
5. Partial write frequency
## Alternative Approaches Considered
### 1. Larger Single Buffer
**Rejected**: Doesn't solve the fundamental problem of multiple concurrent writes
### 2. Immediate Write Retry
**Rejected**: Could cause busy-waiting and CPU waste
### 3. Drop Frames on Conflict
**Rejected**: Violates reliability requirements
## References
- libwebsockets documentation on `lws_write()` and `LWS_CALLBACK_SERVER_WRITEABLE`
- WebSocket RFC 6455 on frame structure
- Nostr NIP-01 on relay-to-client communication

View File

@@ -8,36 +8,59 @@ YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
print_status() { echo -e "${BLUE}[INFO]${NC} $1"; }
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
print_status() { echo -e "${BLUE}[INFO]${NC} $1" >&2; }
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1" >&2; }
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1" >&2; }
print_error() { echo -e "${RED}[ERROR]${NC} $1" >&2; }
# Global variables
COMMIT_MESSAGE=""
RELEASE_MODE=false
VERSION_INCREMENT_TYPE="patch" # "patch", "minor", or "major"
show_usage() {
echo "C-Relay Increment and Push Script"
echo ""
echo "Usage:"
echo " $0 \"commit message\" - Default: increment patch, commit & push"
echo " $0 -r \"commit message\" - Release: increment minor, create release"
echo "USAGE:"
echo " $0 [OPTIONS] \"commit message\""
echo ""
echo "Examples:"
echo "COMMANDS:"
echo " $0 \"commit message\" Default: increment patch, commit & push"
echo " $0 -p \"commit message\" Increment patch version"
echo " $0 -m \"commit message\" Increment minor version"
echo " $0 -M \"commit message\" Increment major version"
echo " $0 -r \"commit message\" Create release with assets (no version increment)"
echo " $0 -r -m \"commit message\" Create release with minor version increment"
echo " $0 -h Show this help message"
echo ""
echo "OPTIONS:"
echo " -p, --patch Increment patch version (default)"
echo " -m, --minor Increment minor version"
echo " -M, --major Increment major version"
echo " -r, --release Create release with assets"
echo " -h, --help Show this help message"
echo ""
echo "EXAMPLES:"
echo " $0 \"Fixed event validation bug\""
echo " $0 --release \"Major release with new features\""
echo " $0 -m \"Added new features\""
echo " $0 -M \"Breaking API changes\""
echo " $0 -r \"Release current version\""
echo " $0 -r -m \"Release with minor increment\""
echo ""
echo "Default Mode (patch increment):"
echo " - Increment patch version (v1.2.3 → v1.2.4)"
echo " - Git add, commit with message, and push"
echo "VERSION INCREMENT MODES:"
echo " -p, --patch (default): Increment patch version (v1.2.3 → v1.2.4)"
echo " -m, --minor: Increment minor version, zero patch (v1.2.3 → v1.3.0)"
echo " -M, --major: Increment major version, zero minor+patch (v1.2.3 → v2.0.0)"
echo ""
echo "Release Mode (-r flag):"
echo " - Increment minor version, zero patch (v1.2.3 → v1.3.0)"
echo " - Git add, commit, push, and create Gitea release"
echo "RELEASE MODE (-r flag):"
echo " - Build static binary using build_static.sh"
echo " - Create source tarball"
echo " - Git add, commit, push, and create Gitea release with assets"
echo " - Can be combined with version increment flags"
echo ""
echo "Requirements for Release Mode:"
echo "REQUIREMENTS FOR RELEASE MODE:"
echo " - Gitea token in ~/.gitea_token for release uploads"
echo " - Docker installed for static binary builds"
}
# Parse command line arguments
@@ -47,6 +70,18 @@ while [[ $# -gt 0 ]]; do
RELEASE_MODE=true
shift
;;
-p|--patch)
VERSION_INCREMENT_TYPE="patch"
shift
;;
-m|--minor)
VERSION_INCREMENT_TYPE="minor"
shift
;;
-M|--major)
VERSION_INCREMENT_TYPE="major"
shift
;;
-h|--help)
show_usage
exit 0
@@ -79,7 +114,7 @@ check_git_repo() {
# Function to get current version and increment appropriately
increment_version() {
local increment_type="$1" # "patch" or "minor"
local increment_type="$1" # "patch", "minor", or "major"
print_status "Getting current version..."
@@ -105,24 +140,34 @@ increment_version() {
fi
# Increment version based on type
if [[ "$increment_type" == "minor" ]]; then
if [[ "$increment_type" == "major" ]]; then
# Major release: increment major, zero minor and patch
NEW_MAJOR=$((MAJOR + 1))
NEW_MINOR=0
NEW_PATCH=0
NEW_VERSION="v${NEW_MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Major version increment: incrementing major version"
elif [[ "$increment_type" == "minor" ]]; then
# Minor release: increment minor, zero patch
NEW_MAJOR=$MAJOR
NEW_MINOR=$((MINOR + 1))
NEW_PATCH=0
NEW_VERSION="v${MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Release mode: incrementing minor version"
NEW_VERSION="v${NEW_MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Minor version increment: incrementing minor version"
else
# Default: increment patch
NEW_MAJOR=$MAJOR
NEW_MINOR=$MINOR
NEW_PATCH=$((PATCH + 1))
NEW_VERSION="v${MAJOR}.${MINOR}.${NEW_PATCH}"
print_status "Default mode: incrementing patch version"
NEW_VERSION="v${NEW_MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Patch version increment: incrementing patch version"
fi
print_status "Current version: $LATEST_TAG"
print_status "New version: $NEW_VERSION"
# Update version in src/main.h
update_version_in_header "$NEW_VERSION" "$MAJOR" "$NEW_MINOR" "$NEW_PATCH"
update_version_in_header "$NEW_VERSION" "$NEW_MAJOR" "$NEW_MINOR" "$NEW_PATCH"
# Export for use in other functions
export NEW_VERSION
@@ -261,6 +306,115 @@ git_commit_and_push_no_tag() {
fi
}
# Function to build release binary
build_release_binary() {
print_status "Building release binary..."
# Check if build_static.sh exists
if [[ ! -f "build_static.sh" ]]; then
print_error "build_static.sh not found"
return 1
fi
# Run the static build script
if ./build_static.sh > /dev/null 2>&1; then
print_success "Built static binary successfully"
return 0
else
print_error "Failed to build static binary"
return 1
fi
}
# Function to create source tarball
create_source_tarball() {
print_status "Creating source tarball..."
local tarball_name="c-relay-${NEW_VERSION#v}.tar.gz"
# Create tarball excluding build artifacts and git files
if tar -czf "$tarball_name" \
--exclude='build/*' \
--exclude='.git*' \
--exclude='*.db' \
--exclude='*.db-*' \
--exclude='*.log' \
--exclude='*.tar.gz' \
. > /dev/null 2>&1; then
print_success "Created source tarball: $tarball_name"
echo "$tarball_name"
return 0
else
print_error "Failed to create source tarball"
return 1
fi
}
# Function to upload release assets to Gitea
upload_release_assets() {
local release_id="$1"
local binary_path="$2"
local tarball_path="$3"
print_status "Uploading release assets..."
# Check for Gitea token
if [[ ! -f "$HOME/.gitea_token" ]]; then
print_warning "No ~/.gitea_token found. Skipping asset uploads."
return 0
fi
local token=$(cat "$HOME/.gitea_token" | tr -d '\n\r')
local api_url="https://git.laantungir.net/api/v1/repos/laantungir/c-relay"
local assets_url="$api_url/releases/$release_id/assets"
print_status "Assets URL: $assets_url"
# Upload binary
if [[ -f "$binary_path" ]]; then
print_status "Uploading binary: $(basename "$binary_path")"
# Retry loop for eventual consistency
local max_attempts=3
local attempt=1
while [[ $attempt -le $max_attempts ]]; do
print_status "Upload attempt $attempt/$max_attempts"
local binary_response=$(curl -fS -X POST "$assets_url" \
-H "Authorization: token $token" \
-F "attachment=@$binary_path;filename=$(basename "$binary_path")" \
-F "name=$(basename "$binary_path")")
if echo "$binary_response" | grep -q '"id"'; then
print_success "Uploaded binary successfully"
break
else
print_warning "Upload attempt $attempt failed"
if [[ $attempt -lt $max_attempts ]]; then
print_status "Retrying in 2 seconds..."
sleep 2
else
print_error "Failed to upload binary after $max_attempts attempts"
print_error "Response: $binary_response"
fi
fi
((attempt++))
done
fi
# Upload source tarball
if [[ -f "$tarball_path" ]]; then
print_status "Uploading source tarball: $(basename "$tarball_path")"
local tarball_response=$(curl -s -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@$tarball_path;filename=$(basename "$tarball_path")")
if echo "$tarball_response" | grep -q '"id"'; then
print_success "Uploaded source tarball successfully"
else
print_warning "Failed to upload source tarball: $tarball_response"
fi
fi
}
# Function to create Gitea release
create_gitea_release() {
print_status "Creating Gitea release..."
@@ -284,10 +438,21 @@ create_gitea_release() {
if echo "$response" | grep -q '"id"'; then
print_success "Created release $NEW_VERSION"
return 0
# Extract release ID for asset uploads
local release_id=$(echo "$response" | grep -o '"id":[0-9]*' | head -1 | cut -d':' -f2)
echo $release_id
elif echo "$response" | grep -q "already exists"; then
print_warning "Release $NEW_VERSION already exists"
return 0
# Try to get existing release ID
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
if echo "$check_response" | grep -q '"id"'; then
local release_id=$(echo "$check_response" | grep -o '"id":[0-9]*' | head -1 | cut -d':' -f2)
print_status "Using existing release ID: $release_id"
echo $release_id
else
print_error "Could not find existing release ID"
return 1
fi
else
print_error "Failed to create release $NEW_VERSION"
print_error "Response: $response"
@@ -297,7 +462,8 @@ create_gitea_release() {
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
if echo "$check_response" | grep -q '"id"'; then
print_warning "Release exists but creation response was unexpected"
return 0
local release_id=$(echo "$check_response" | grep -o '"id":[0-9]*' | head -1 | cut -d':' -f2)
echo $release_id
else
print_error "Release does not exist and creation failed"
return 1
@@ -315,8 +481,15 @@ main() {
if [[ "$RELEASE_MODE" == true ]]; then
print_status "=== RELEASE MODE ==="
# Increment minor version for releases
increment_version "minor"
# Only increment version if explicitly requested (not just because of -r flag)
if [[ "$VERSION_INCREMENT_TYPE" != "patch" ]]; then
increment_version "$VERSION_INCREMENT_TYPE"
else
# In release mode without version increment, get current version
LATEST_TAG=$(git tag -l 'v*.*.*' | sort -V | tail -n 1 || echo "v0.0.0")
NEW_VERSION="$LATEST_TAG"
export NEW_VERSION
fi
# Create new git tag BEFORE compilation so version.h picks it up
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
@@ -330,9 +503,42 @@ main() {
# Commit and push (but skip tag creation since we already did it)
git_commit_and_push_no_tag
# Build release binary
if build_release_binary; then
local binary_path="build/c_relay_static_x86_64"
else
print_warning "Binary build failed, continuing with release creation"
# Check if binary exists from previous build
if [[ -f "build/c_relay_static_x86_64" ]]; then
print_status "Using existing binary from previous build"
binary_path="build/c_relay_static_x86_64"
else
binary_path=""
fi
fi
# Create source tarball
local tarball_path=""
if tarball_path=$(create_source_tarball); then
: # tarball_path is set by the function
else
print_warning "Source tarball creation failed, continuing with release creation"
fi
# Create Gitea release
if create_gitea_release; then
local release_id=""
if release_id=$(create_gitea_release); then
# Validate release_id is numeric
if [[ "$release_id" =~ ^[0-9]+$ ]]; then
# Upload assets if we have a release ID and assets
if [[ -n "$release_id" && (-n "$binary_path" || -n "$tarball_path") ]]; then
upload_release_assets "$release_id" "$binary_path" "$tarball_path"
fi
print_success "Release $NEW_VERSION completed successfully!"
else
print_error "Invalid release_id: $release_id"
exit 1
fi
else
print_error "Release creation failed"
fi
@@ -340,8 +546,8 @@ main() {
else
print_status "=== DEFAULT MODE ==="
# Increment patch version for regular commits
increment_version "patch"
# Increment version based on type (default to patch)
increment_version "$VERSION_INCREMENT_TYPE"
# Create new git tag BEFORE compilation so version.h picks it up
if git tag "$NEW_VERSION" > /dev/null 2>&1; then

View File

@@ -133,6 +133,11 @@ if [ -n "$PORT_OVERRIDE" ]; then
fi
fi
# Validate strict port flag (only makes sense with port override)
if [ "$USE_TEST_KEYS" = true ] && [ -z "$PORT_OVERRIDE" ]; then
echo "WARNING: --strict-port is always used with test keys. Consider specifying a custom port with -p."
fi
# Validate debug level if provided
if [ -n "$DEBUG_LEVEL" ]; then
if ! [[ "$DEBUG_LEVEL" =~ ^[0-5]$ ]]; then
@@ -163,6 +168,8 @@ if [ "$HELP" = true ]; then
echo " $0 # Fresh start with random keys"
echo " $0 -a <admin-hex> -r <relay-hex> # Use custom keys"
echo " $0 -a <admin-hex> -p 9000 # Custom admin key on port 9000"
echo " $0 -p 7777 --strict-port # Fail if port 7777 unavailable (no fallback)"
echo " $0 -p 8080 --strict-port -d=3 # Custom port with strict binding and debug"
echo " $0 --debug-level=3 # Start with debug level 3 (info)"
echo " $0 -d=5 # Start with debug level 5 (trace)"
echo " $0 --preserve-database # Preserve existing database and keys"

1
nips

Submodule nips deleted from 8c45ff5d96

View File

@@ -39,6 +39,11 @@ Even simpler: Use this one-liner
cd /usr/local/bin/c_relay
sudo -u c-relay ./c_relay --debug-level=5 & sleep 2 && sudo gdb -p $(pgrep c_relay)
Inside gdb, after attaching:
(gdb) continue
Or shorter:
(gdb) c
How to View the Logs
@@ -76,3 +81,12 @@ sudo systemctl status rsyslog
sudo -u c-relay ./c_relay --debug-level=5 -r 85d0b37e2ae822966dcadd06b2dc9368cde73865f90ea4d44f8b57d47ef0820a -a 1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139
./c_relay_static_x86_64 -p 7889 --debug-level=5 -r 85d0b37e2ae822966dcadd06b2dc9368cde73865f90ea4d44f8b57d47ef0820a -a 1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139
sudo ufw allow 8888/tcp
sudo ufw delete allow 8888/tcp
lsof -i :7777
kill $(lsof -t -i :7777)
kill -9 $(lsof -t -i :7777)

View File

@@ -1 +1 @@
3159561
1508392

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 738 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

BIN
screenshots/DM.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

BIN
screenshots/config.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

BIN
screenshots/light-mode.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 79 KiB

BIN
screenshots/main-light.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

BIN
screenshots/main.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

BIN
screenshots/raffles.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 296 KiB

BIN
screenshots/sqlQuery.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 157 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

967
src/api.c

File diff suppressed because it is too large Load Diff

View File

@@ -60,11 +60,11 @@ char* execute_sql_query(const char* query, const char* request_id, char* error_m
int handle_sql_query_unified(cJSON* event, const char* query, char* error_message, size_t error_size, struct lws* wsi);
// Monitoring system functions
int init_monitoring_system(void);
void cleanup_monitoring_system(void);
void monitoring_on_event_stored(void);
int set_monitoring_enabled(int enabled);
int is_monitoring_enabled(void);
void monitoring_on_subscription_change(void);
int get_monitoring_throttle_seconds(void);
// Kind 1 status posts
int generate_and_post_status_event(void);
#endif // API_H

View File

@@ -3,6 +3,22 @@
#include "debug.h"
#include "default_config_event.h"
#include "dm_admin.h"
// Undefine VERSION macros before including nostr_core.h to avoid redefinition warnings
// This must come AFTER default_config_event.h so that RELAY_VERSION macro expansion works correctly
#ifdef VERSION
#undef VERSION
#endif
#ifdef VERSION_MAJOR
#undef VERSION_MAJOR
#endif
#ifdef VERSION_MINOR
#undef VERSION_MINOR
#endif
#ifdef VERSION_PATCH
#undef VERSION_PATCH
#endif
#include "../nostr_core_lib/nostr_core/nostr_core.h"
#include <stdio.h>
#include <stdlib.h>
@@ -72,6 +88,7 @@ int migrate_config_from_events_to_table(void);
int populate_config_table_from_event(const cJSON* event);
int handle_config_query_unified(cJSON* event, const char* query_type, char* error_message, size_t error_size, struct lws* wsi);
int handle_config_set_unified(cJSON* event, const char* config_key, const char* config_value, char* error_message, size_t error_size, struct lws* wsi);
int handle_create_relay_event_unified(cJSON* event, const char* kind_str, const char* event_data_json, char* error_message, size_t error_size, struct lws* wsi);
// Forward declarations for tag parsing utilities
const char* get_first_tag_name(cJSON* event);
@@ -79,6 +96,7 @@ const char* get_tag_value(cJSON* event, const char* tag_name, int value_index);
int parse_auth_query_parameters(cJSON* event, char** query_type, char** pattern_value);
int handle_config_update_unified(cJSON* event, char* error_message, size_t error_size, struct lws* wsi);
int handle_stats_query_unified(cJSON* event, char* error_message, size_t error_size, struct lws* wsi);
int handle_sql_query_unified(cJSON* event, const char* query, char* error_message, size_t error_size, struct lws* wsi);
// Current configuration cache
@@ -801,7 +819,7 @@ int first_time_startup_sequence(const cli_options_t* cli_options, char* admin_pu
return 0;
}
int startup_existing_relay(const char* relay_pubkey, const cli_options_t* cli_options) {
int startup_existing_relay(const char* relay_pubkey, const cli_options_t* cli_options __attribute__((unused))) {
if (!relay_pubkey) {
DEBUG_ERROR("Invalid relay pubkey for existing relay startup");
return -1;
@@ -824,26 +842,7 @@ int startup_existing_relay(const char* relay_pubkey, const cli_options_t* cli_op
// NOTE: Database is already initialized in main.c before calling this function
// Config table should already exist with complete configuration
// Check if CLI overrides need to be applied
int has_overrides = 0;
if (cli_options) {
if (cli_options->port_override > 0) has_overrides = 1;
if (cli_options->admin_pubkey_override[0] != '\0') has_overrides = 1;
if (cli_options->relay_privkey_override[0] != '\0') has_overrides = 1;
}
if (has_overrides) {
// Apply CLI overrides to existing database
DEBUG_INFO("Applying CLI overrides to existing database");
if (apply_cli_overrides_atomic(cli_options) != 0) {
DEBUG_ERROR("Failed to apply CLI overrides to existing database");
return -1;
}
} else {
// No CLI overrides - config table is already available
DEBUG_INFO("No CLI overrides - config table is already available");
}
// CLI overrides will be applied after this function returns in main.c
return 0;
}
@@ -1149,6 +1148,20 @@ static int validate_config_field(const char* key, const char* value, char* error
return 0;
}
// NIP-59 Gift Wrap Timestamp Configuration
if (strcmp(key, "nip59_timestamp_max_delay_sec") == 0) {
if (!is_valid_non_negative_integer(value)) {
snprintf(error_msg, error_size, "invalid nip59_timestamp_max_delay_sec '%s' (must be non-negative integer)", value);
return -1;
}
long val = strtol(value, NULL, 10);
if (val > 604800) { // Max 7 days
snprintf(error_msg, error_size, "nip59_timestamp_max_delay_sec '%s' too large (max 604800 seconds = 7 days)", value);
return -1;
}
return 0;
}
if (strcmp(key, "nip42_auth_required_kinds") == 0) {
// Validate comma-separated list of kind numbers
if (!value || strlen(value) == 0) {
@@ -2542,7 +2555,7 @@ int handle_kind_23456_unified(cJSON* event, char* error_message, size_t error_si
}
// Perform NIP-44 decryption (relay as recipient, admin as sender)
char decrypted_text[4096]; // Buffer for decrypted content
char decrypted_text[16384]; // Buffer for decrypted content (16KB)
int decrypt_result = nostr_nip44_decrypt(relay_privkey_bytes, sender_pubkey_bytes, content, decrypted_text, sizeof(decrypted_text));
// Clean up private key immediately after use
@@ -2555,51 +2568,17 @@ int handle_kind_23456_unified(cJSON* event, char* error_message, size_t error_si
return -1;
}
// Check if decrypted content is a direct command array (DM control system)
cJSON* potential_command_array = cJSON_Parse(decrypted_text);
if (potential_command_array && cJSON_IsArray(potential_command_array)) {
// Route to DM admin system
int dm_result = process_dm_admin_command(potential_command_array, event, error_message, error_size, wsi);
cJSON_Delete(potential_command_array);
memset(decrypted_text, 0, sizeof(decrypted_text)); // Clear sensitive data
return dm_result;
}
// If not a direct command array, try parsing as inner event JSON (NIP-17)
cJSON* inner_event = potential_command_array; // Reuse the parsed JSON
if (!inner_event || !cJSON_IsObject(inner_event)) {
DEBUG_ERROR("error: decrypted content is not valid inner event JSON");
cJSON_Delete(inner_event);
snprintf(error_message, error_size, "error: decrypted content is not valid inner event JSON");
return -1;
}
// Extract content from inner event
cJSON* inner_content_obj = cJSON_GetObjectItem(inner_event, "content");
if (!inner_content_obj || !cJSON_IsString(inner_content_obj)) {
DEBUG_ERROR("error: inner event missing content field");
cJSON_Delete(inner_event);
snprintf(error_message, error_size, "error: inner event missing content field");
return -1;
}
const char* inner_content = cJSON_GetStringValue(inner_content_obj);
// Parse inner content as JSON array (the command array)
decrypted_content = cJSON_Parse(inner_content);
// Parse decrypted content as command array directly (NOT as NIP-17 inner event)
// Kind 23456 events contain direct command arrays: ["command_name", arg1, arg2, ...]
decrypted_content = cJSON_Parse(decrypted_text);
if (!decrypted_content || !cJSON_IsArray(decrypted_content)) {
DEBUG_ERROR("error: inner content is not valid JSON array");
cJSON_Delete(inner_event);
snprintf(error_message, error_size, "error: inner content is not valid JSON array");
DEBUG_ERROR("error: decrypted content is not valid command array");
cJSON_Delete(decrypted_content);
snprintf(error_message, error_size, "error: decrypted content is not valid command array");
return -1;
}
// Clean up inner event
cJSON_Delete(inner_event);
// Replace event content with decrypted command array for processing
cJSON_DeleteItemFromObject(event, "content");
cJSON_AddStringToObject(event, "content", "decrypted");
@@ -2616,10 +2595,26 @@ int handle_kind_23456_unified(cJSON* event, char* error_message, size_t error_si
cJSON_AddItemToArray(command_tag, cJSON_Duplicate(first_item, 1));
// Add remaining items as tag values
// Convert non-string items (objects, arrays, numbers) to JSON strings
for (int i = 1; i < cJSON_GetArraySize(decrypted_content); i++) {
cJSON* item = cJSON_GetArrayItem(decrypted_content, i);
if (item) {
if (cJSON_IsString(item)) {
// Keep strings as-is
cJSON_AddItemToArray(command_tag, cJSON_Duplicate(item, 1));
} else if (cJSON_IsNumber(item)) {
// Convert numbers to strings
char num_str[32];
snprintf(num_str, sizeof(num_str), "%.0f", cJSON_GetNumberValue(item));
cJSON_AddItemToArray(command_tag, cJSON_CreateString(num_str));
} else if (cJSON_IsObject(item) || cJSON_IsArray(item)) {
// Convert objects/arrays to JSON strings
char* json_str = cJSON_PrintUnformatted(item);
if (json_str) {
cJSON_AddItemToArray(command_tag, cJSON_CreateString(json_str));
free(json_str);
}
}
}
}
@@ -2696,6 +2691,25 @@ int handle_kind_23456_unified(cJSON* event, char* error_message, size_t error_si
else if (strcmp(action_type, "stats_query") == 0) {
return handle_stats_query_unified(event, error_message, error_size, wsi);
}
else if (strcmp(action_type, "create_relay_event") == 0) {
const char* kind_str = get_tag_value(event, action_type, 1);
const char* event_data_json = get_tag_value(event, action_type, 2);
if (!kind_str || !event_data_json) {
DEBUG_ERROR("invalid: missing kind or event data");
snprintf(error_message, error_size, "invalid: missing kind or event data");
return -1;
}
return handle_create_relay_event_unified(event, kind_str, event_data_json, error_message, error_size, wsi);
}
else if (strcmp(action_type, "sql_query") == 0) {
const char* query = get_tag_value(event, action_type, 1);
if (!query) {
DEBUG_ERROR("invalid: missing sql_query parameter");
snprintf(error_message, error_size, "invalid: missing sql_query parameter");
return -1;
}
return handle_sql_query_unified(event, query, error_message, error_size, wsi);
}
else if (strcmp(action_type, "whitelist") == 0 || strcmp(action_type, "blacklist") == 0) {
// Handle auth rule modifications (existing logic from process_admin_auth_event)
return handle_auth_rule_modification_unified(event, error_message, error_size, wsi);
@@ -3495,6 +3509,41 @@ int handle_stats_query_unified(cJSON* event, char* error_message, size_t error_s
return -1;
}
// Unified create relay event handler
int handle_create_relay_event_unified(cJSON* event, const char* kind_str, const char* event_data_json, char* error_message, size_t error_size, struct lws* wsi) {
// Suppress unused parameter warning
(void)wsi;
if (!event || !kind_str || !event_data_json) {
snprintf(error_message, error_size, "invalid: missing parameters for create_relay_event");
return -1;
}
// Parse kind string to integer
char* endptr;
int kind = (int)strtol(kind_str, &endptr, 10);
if (endptr == kind_str || *endptr != '\0') {
snprintf(error_message, error_size, "invalid: kind must be a valid integer");
return -1;
}
// Parse event data JSON
cJSON* event_data = cJSON_Parse(event_data_json);
if (!event_data) {
snprintf(error_message, error_size, "invalid: event_data must be valid JSON");
return -1;
}
// Call the existing implementation from api.c
extern int handle_create_relay_event_command(cJSON* event, int kind, cJSON* event_data, char* error_message, size_t error_size, struct lws* wsi);
int result = handle_create_relay_event_command(event, kind, event_data, error_message, error_size, wsi);
// Clean up
cJSON_Delete(event_data);
return result;
}
// Unified config update handler - handles multiple config objects in single atomic command
int handle_config_update_unified(cJSON* event, char* error_message, size_t error_size, struct lws* wsi) {
// Suppress unused parameter warning
@@ -4099,32 +4148,18 @@ int populate_all_config_values_atomic(const char* admin_pubkey, const char* rela
return -1;
}
// Insert monitoring system config entries
// Insert monitoring system config entry (ephemeral kind 24567)
// Note: Monitoring is automatically activated when clients subscribe to kind 24567
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, "kind_34567_reporting_enabled", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, "false", -1, SQLITE_STATIC); // boolean, default false
sqlite3_bind_text(stmt, 3, "boolean", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 4, "Enable real-time monitoring event generation", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 5, "monitoring", -1, SQLITE_STATIC);
sqlite3_bind_int(stmt, 6, 0); // does not require restart
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to insert kind_34567_reporting_enabled: %s", sqlite3_errmsg(g_db));
sqlite3_finalize(stmt);
sqlite3_exec(g_db, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, "kind_34567_reporting_throttling_sec", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 1, "kind_24567_reporting_throttle_sec", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, "5", -1, SQLITE_STATIC); // integer, default 5 seconds
sqlite3_bind_text(stmt, 3, "integer", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 4, "Minimum seconds between monitoring event reports", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 4, "Minimum seconds between monitoring event reports (ephemeral kind 24567)", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 5, "monitoring", -1, SQLITE_STATIC);
sqlite3_bind_int(stmt, 6, 0); // does not require restart
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to insert kind_34567_reporting_throttling_sec: %s", sqlite3_errmsg(g_db));
DEBUG_ERROR("Failed to insert kind_24567_reporting_throttle_sec: %s", sqlite3_errmsg(g_db));
sqlite3_finalize(stmt);
sqlite3_exec(g_db, "ROLLBACK;", NULL, NULL, NULL);
return -1;

View File

@@ -65,6 +65,9 @@ static const struct {
{"max_total_subscriptions", "5000"},
{"max_filters_per_subscription", "10"},
// Connection Management
{"max_connection_seconds", "86400"}, // 24 hours (0 = disabled)
// Event Processing Limits
{"max_event_tags", "100"},
{"max_content_length", "8196"},
@@ -72,7 +75,19 @@ static const struct {
// Performance Settings
{"default_limit", "500"},
{"max_limit", "5000"}
{"max_limit", "5000"},
// Proxy Settings
// Trust proxy headers (X-Forwarded-For, X-Real-IP) for accurate client IP detection
// Safe for informational/debugging use. Only becomes a security concern if you implement
// IP-based rate limiting or access control (which would require firewall protection anyway)
{"trust_proxy_headers", "true"},
// NIP-59 Gift Wrap Timestamp Configuration
{"nip59_timestamp_max_delay_sec", "0"},
// Kind 1 Status Posts
{"kind_1_status_posts_hours", "1"}
};
// Number of default configuration values

Binary file not shown.

View File

@@ -80,6 +80,7 @@ extern int handle_sql_query_unified(cJSON* event, const char* query, char* error
// Process direct command arrays (DM control system)
// This handles commands sent as direct JSON arrays, not wrapped in inner events
// Note: create_relay_event is NOT supported via DMs - use Kind 23456 events only
int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_message, size_t error_size, struct lws* wsi) {
if (!command_array || !cJSON_IsArray(command_array) || !event) {
DEBUG_ERROR("DM Admin: Invalid command array or event");
@@ -231,19 +232,27 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
return NULL;
}
DEBUG_INFO("DM_ADMIN: Received potential NIP-17 gift wrap event for processing");
// Step 1: Validate it's addressed to us
if (!is_nip17_gift_wrap_for_relay(gift_wrap_event)) {
DEBUG_INFO("DM_ADMIN: Event is not a valid gift wrap for this relay - rejecting");
strncpy(error_message, "NIP-17: Event is not a valid gift wrap for this relay", error_size - 1);
return NULL;
}
DEBUG_INFO("DM_ADMIN: Valid NIP-17 gift wrap confirmed for this relay");
// Step 2: Get relay private key for decryption
char* relay_privkey_hex = get_relay_private_key();
if (!relay_privkey_hex) {
DEBUG_INFO("DM_ADMIN: Could not get relay private key for decryption");
strncpy(error_message, "NIP-17: Could not get relay private key for decryption", error_size - 1);
return NULL;
}
DEBUG_INFO("DM_ADMIN: Retrieved relay private key for decryption");
// Convert hex private key to bytes
unsigned char relay_privkey[32];
if (nostr_hex_to_bytes(relay_privkey_hex, relay_privkey, sizeof(relay_privkey)) != 0) {
@@ -254,10 +263,13 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
}
free(relay_privkey_hex);
DEBUG_INFO("DM_ADMIN: Converted relay private key to bytes successfully");
// Step 3: Decrypt and parse inner event using library function
DEBUG_INFO("DM_ADMIN: Attempting to decrypt NIP-17 gift wrap using nostr_nip17_receive_dm");
cJSON* inner_dm = nostr_nip17_receive_dm(gift_wrap_event, relay_privkey);
if (!inner_dm) {
DEBUG_ERROR("NIP-17: nostr_nip17_receive_dm returned NULL");
DEBUG_INFO("DM_ADMIN: nostr_nip17_receive_dm returned NULL - decryption failed");
// Debug: Print the gift wrap event
char* gift_wrap_debug = cJSON_Print(gift_wrap_event);
if (gift_wrap_debug) {
@@ -273,12 +285,17 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
}
privkey_hex[64] = '\0';
DEBUG_INFO("DM_ADMIN: NIP-17 decryption failed - returning error");
strncpy(error_message, "NIP-17: Failed to decrypt and parse inner DM event", error_size - 1);
return NULL;
}
DEBUG_INFO("DM_ADMIN: Successfully decrypted NIP-17 gift wrap, processing inner DM");
// Step 4: Process admin command
DEBUG_INFO("DM_ADMIN: Processing decrypted admin command");
int result = process_nip17_admin_command(inner_dm, error_message, error_size, wsi);
DEBUG_INFO("DM_ADMIN: Admin command processing completed with result: %d", result);
// Step 5: For plain text commands (stats/config), the response is already handled
// Only create a generic response for other command types that don't handle their own responses
@@ -351,13 +368,17 @@ cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message,
if (success_dm) {
cJSON* success_gift_wraps[1];
// Get timestamp delay configuration
long max_delay_sec = get_config_int("nip59_timestamp_max_delay_sec", 0);
int send_result = nostr_nip17_send_dm(
success_dm, // dm_event
(const char**)&sender_pubkey, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
success_gift_wraps, // gift_wraps_out
1 // max_gift_wraps
1, // max_gift_wraps
max_delay_sec // max_delay_sec
);
cJSON_Delete(success_dm);
@@ -457,18 +478,23 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
return -1;
}
DEBUG_INFO("DM_ADMIN: Processing NIP-17 admin command from decrypted DM");
// Extract content from DM
cJSON* content_obj = cJSON_GetObjectItem(dm_event, "content");
if (!content_obj || !cJSON_IsString(content_obj)) {
DEBUG_INFO("DM_ADMIN: DM missing content field");
strncpy(error_message, "NIP-17: DM missing content", error_size - 1);
return -1;
}
const char* dm_content = cJSON_GetStringValue(content_obj);
DEBUG_INFO("DM_ADMIN: Extracted DM content: %.100s%s", dm_content, strlen(dm_content) > 100 ? "..." : "");
// Check if sender is admin before processing any commands
cJSON* sender_pubkey_obj = cJSON_GetObjectItem(dm_event, "pubkey");
if (!sender_pubkey_obj || !cJSON_IsString(sender_pubkey_obj)) {
DEBUG_INFO("DM_ADMIN: DM missing sender pubkey - treating as user DM");
return 0; // Not an error, just treat as user DM
}
const char* sender_pubkey = cJSON_GetStringValue(sender_pubkey_obj);
@@ -477,11 +503,16 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
const char* admin_pubkey = get_config_value("admin_pubkey");
int is_admin = admin_pubkey && strlen(admin_pubkey) > 0 && strcmp(sender_pubkey, admin_pubkey) == 0;
DEBUG_INFO("DM_ADMIN: Sender pubkey: %.16s... (admin: %s)", sender_pubkey, is_admin ? "YES" : "NO");
// Parse DM content as JSON array of commands
DEBUG_INFO("DM_ADMIN: Attempting to parse DM content as JSON command array");
cJSON* command_array = cJSON_Parse(dm_content);
if (!command_array || !cJSON_IsArray(command_array)) {
DEBUG_INFO("DM_ADMIN: Content is not a JSON array, checking for plain text commands");
// If content is not a JSON array, check for plain text commands
if (is_admin) {
DEBUG_INFO("DM_ADMIN: Processing plain text admin command");
// Convert content to lowercase for case-insensitive matching
char content_lower[256];
size_t content_len = strlen(dm_content);
@@ -498,8 +529,10 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
// Check for stats commands
if (strstr(content_lower, "stats") != NULL || strstr(content_lower, "statistics") != NULL) {
DEBUG_INFO("DM_ADMIN: Processing stats command");
char* stats_text = generate_stats_text();
if (!stats_text) {
DEBUG_INFO("DM_ADMIN: Failed to generate stats text");
return -1;
}
@@ -512,12 +545,15 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
return -1;
}
DEBUG_INFO("DM_ADMIN: Stats command processed successfully");
return 0;
}
// Check for config commands
else if (strstr(content_lower, "config") != NULL || strstr(content_lower, "configuration") != NULL) {
DEBUG_INFO("DM_ADMIN: Processing config command");
char* config_text = generate_config_text();
if (!config_text) {
DEBUG_INFO("DM_ADMIN: Failed to generate config text");
return -1;
}
@@ -530,15 +566,47 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
return -1;
}
DEBUG_INFO("DM_ADMIN: Config command processed successfully");
return 0;
}
// Check for status commands
else if (strstr(content_lower, "status") != NULL) {
DEBUG_INFO("DM_ADMIN: Processing status command");
// Create synthetic event for system_command handler
cJSON* synthetic_event = cJSON_CreateObject();
cJSON_AddNumberToObject(synthetic_event, "kind", 23456);
cJSON_AddStringToObject(synthetic_event, "pubkey", sender_pubkey);
// Create tags array with system_command
cJSON* tags = cJSON_CreateArray();
cJSON* cmd_tag = cJSON_CreateArray();
cJSON_AddItemToArray(cmd_tag, cJSON_CreateString("system_command"));
cJSON_AddItemToArray(cmd_tag, cJSON_CreateString("system_status"));
cJSON_AddItemToArray(tags, cmd_tag);
cJSON_AddItemToObject(synthetic_event, "tags", tags);
char error_msg[256];
int result = handle_system_command_unified(synthetic_event, "system_status", error_msg, sizeof(error_msg), wsi);
cJSON_Delete(synthetic_event);
if (result != 0) {
DEBUG_ERROR(error_msg);
return -1;
}
DEBUG_INFO("DM_ADMIN: Status command processed successfully");
return 0;
}
else {
DEBUG_INFO("DM_ADMIN: Checking for confirmation or config change requests");
// Check if it's a confirmation response (yes/no)
int confirmation_result = handle_config_confirmation(sender_pubkey, dm_content);
if (confirmation_result != 0) {
if (confirmation_result > 0) {
// Configuration confirmation processed successfully
DEBUG_INFO("DM_ADMIN: Configuration confirmation processed successfully");
} else if (confirmation_result == -2) {
DEBUG_INFO("DM_ADMIN: No pending changes to confirm");
// No pending changes
char no_pending_msg[256];
snprintf(no_pending_msg, sizeof(no_pending_msg),
@@ -558,6 +626,7 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
int config_result = process_config_change_request(sender_pubkey, dm_content);
if (config_result != 0) {
if (config_result > 0) {
DEBUG_INFO("DM_ADMIN: Configuration change request processed successfully");
return 1; // Return positive value to indicate response was handled
} else {
DEBUG_ERROR("NIP-17: Configuration change request failed");
@@ -565,22 +634,28 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
}
}
DEBUG_INFO("DM_ADMIN: Unrecognized plain text admin command");
return 0; // Admin sent unrecognized plain text, treat as user DM
}
} else {
DEBUG_INFO("DM_ADMIN: Non-admin user sent plain text - treating as user DM");
// Not admin, treat as user DM
return 0;
}
}
DEBUG_INFO("DM_ADMIN: Successfully parsed JSON command array");
// Check if this is a "stats" command
if (cJSON_GetArraySize(command_array) > 0) {
cJSON* first_item = cJSON_GetArrayItem(command_array, 0);
if (cJSON_IsString(first_item) && strcmp(cJSON_GetStringValue(first_item), "stats") == 0) {
DEBUG_INFO("DM_ADMIN: Processing JSON stats command");
// Get sender pubkey for response
cJSON* sender_pubkey_obj = cJSON_GetObjectItem(dm_event, "pubkey");
if (!sender_pubkey_obj || !cJSON_IsString(sender_pubkey_obj)) {
cJSON_Delete(command_array);
DEBUG_INFO("DM_ADMIN: DM missing sender pubkey for stats command");
strncpy(error_message, "NIP-17: DM missing sender pubkey", error_size - 1);
return -1;
}
@@ -590,6 +665,7 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
char* stats_json = generate_stats_json();
if (!stats_json) {
cJSON_Delete(command_array);
DEBUG_INFO("DM_ADMIN: Failed to generate stats JSON");
strncpy(error_message, "NIP-17: Failed to generate stats", error_size - 1);
return -1;
}
@@ -605,10 +681,12 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
return -1;
}
DEBUG_INFO("DM_ADMIN: JSON stats command processed successfully");
return 0;
}
}
DEBUG_INFO("DM_ADMIN: Delegating to unified admin processing for command array");
// For other commands, delegate to existing admin processing
// Create a synthetic kind 23456 event with the DM content
cJSON* synthetic_event = cJSON_CreateObject();
@@ -628,10 +706,12 @@ int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t err
}
// Process as regular admin event
DEBUG_INFO("DM_ADMIN: Processing synthetic admin event");
int result = process_admin_event_in_config(synthetic_event, error_message, error_size, wsi);
cJSON_Delete(synthetic_event);
cJSON_Delete(command_array);
DEBUG_INFO("DM_ADMIN: Unified admin processing completed with result: %d", result);
return result;
}

File diff suppressed because one or more lines are too long

View File

@@ -95,7 +95,6 @@ void update_subscription_manager_config(void);
void log_subscription_created(const subscription_t* sub);
void log_subscription_closed(const char* sub_id, const char* client_ip, const char* reason);
void log_subscription_disconnected(const char* client_ip);
void log_event_broadcast(const char* event_id, const char* sub_id, const char* client_ip);
void update_subscription_events_sent(const char* sub_id, int events_sent);
// Forward declarations for NIP-01 event handling
@@ -148,10 +147,9 @@ int mark_event_as_deleted(const char* event_id, const char* deletion_event_id, c
// Forward declaration for database functions
int store_event(cJSON* event);
cJSON* retrieve_event(const char* event_id);
// Forward declarations for monitoring system
void init_monitoring_system(void);
void cleanup_monitoring_system(void);
// Forward declaration for monitoring system
void monitoring_on_event_stored(void);
// Forward declarations for NIP-11 relay information handling
@@ -219,11 +217,9 @@ void send_notice_message(struct lws* wsi, const char* message) {
char* msg_str = cJSON_Print(notice_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
lws_write(wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
free(buf);
// Use proper message queue system instead of direct lws_write
if (queue_message(wsi, NULL, msg_str, msg_len, LWS_WRITE_TEXT) != 0) {
DEBUG_ERROR("Failed to queue NOTICE message");
}
free(msg_str);
}
@@ -317,15 +313,36 @@ int init_database(const char* database_path_override) {
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
// Check config table row count immediately after database open
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM config", -1, &stmt, NULL) == SQLITE_OK) {
int rc = sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM config", -1, &stmt, NULL);
if (rc == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
int row_count = sqlite3_column_int(stmt, 0);
DEBUG_LOG("Config table row count immediately after sqlite3_open(): %d", row_count);
}
sqlite3_finalize(stmt);
} else {
// Capture and log the actual SQLite error instead of assuming table doesn't exist
const char* err_msg = sqlite3_errmsg(g_db);
DEBUG_LOG("Failed to prepare config table query: %s (error code: %d)", err_msg, rc);
// Check if it's actually a missing table vs other error
if (rc == SQLITE_ERROR) {
// Try to check if config table exists
sqlite3_stmt* check_stmt;
int check_rc = sqlite3_prepare_v2(g_db, "SELECT name FROM sqlite_master WHERE type='table' AND name='config'", -1, &check_stmt, NULL);
if (check_rc == SQLITE_OK) {
int has_table = (sqlite3_step(check_stmt) == SQLITE_ROW);
sqlite3_finalize(check_stmt);
if (has_table) {
DEBUG_LOG("Config table EXISTS but query failed - possible database corruption or locking issue");
} else {
DEBUG_LOG("Config table does not exist yet (first-time startup)");
}
} else {
DEBUG_LOG("Failed to check table existence: %s (error code: %d)", sqlite3_errmsg(g_db), check_rc);
}
}
}
}
// DEBUG_GUARD_END
@@ -571,93 +588,6 @@ const char* extract_d_tag_value(cJSON* tags) {
return NULL;
}
// Check and handle replaceable events according to NIP-01
int check_and_handle_replaceable_event(int kind, const char* pubkey, long created_at) {
if (!g_db || !pubkey) return 0;
const char* sql =
"SELECT created_at FROM events WHERE kind = ? AND pubkey = ? ORDER BY created_at DESC LIMIT 1";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
return 0; // Allow storage on DB error
}
sqlite3_bind_int(stmt, 1, kind);
sqlite3_bind_text(stmt, 2, pubkey, -1, SQLITE_STATIC);
int result = 0;
if (sqlite3_step(stmt) == SQLITE_ROW) {
long existing_created_at = sqlite3_column_int64(stmt, 0);
if (created_at <= existing_created_at) {
result = -1; // Older or same timestamp, reject
} else {
// Delete older versions
const char* delete_sql = "DELETE FROM events WHERE kind = ? AND pubkey = ? AND created_at < ?";
sqlite3_stmt* delete_stmt;
if (sqlite3_prepare_v2(g_db, delete_sql, -1, &delete_stmt, NULL) == SQLITE_OK) {
sqlite3_bind_int(delete_stmt, 1, kind);
sqlite3_bind_text(delete_stmt, 2, pubkey, -1, SQLITE_STATIC);
sqlite3_bind_int64(delete_stmt, 3, created_at);
sqlite3_step(delete_stmt);
sqlite3_finalize(delete_stmt);
}
}
}
sqlite3_finalize(stmt);
return result;
}
// Check and handle addressable events according to NIP-01
int check_and_handle_addressable_event(int kind, const char* pubkey, const char* d_tag_value, long created_at) {
if (!g_db || !pubkey) return 0;
// If no d tag, treat as regular replaceable
if (!d_tag_value) {
return check_and_handle_replaceable_event(kind, pubkey, created_at);
}
const char* sql =
"SELECT created_at FROM events WHERE kind = ? AND pubkey = ? AND json_extract(tags, '$[*][1]') = ? "
"AND json_extract(tags, '$[*][0]') = 'd' ORDER BY created_at DESC LIMIT 1";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
return 0; // Allow storage on DB error
}
sqlite3_bind_int(stmt, 1, kind);
sqlite3_bind_text(stmt, 2, pubkey, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, d_tag_value, -1, SQLITE_STATIC);
int result = 0;
if (sqlite3_step(stmt) == SQLITE_ROW) {
long existing_created_at = sqlite3_column_int64(stmt, 0);
if (created_at <= existing_created_at) {
result = -1; // Older or same timestamp, reject
} else {
// Delete older versions with same kind, pubkey, and d tag
const char* delete_sql =
"DELETE FROM events WHERE kind = ? AND pubkey = ? AND created_at < ? "
"AND json_extract(tags, '$[*][1]') = ? AND json_extract(tags, '$[*][0]') = 'd'";
sqlite3_stmt* delete_stmt;
if (sqlite3_prepare_v2(g_db, delete_sql, -1, &delete_stmt, NULL) == SQLITE_OK) {
sqlite3_bind_int(delete_stmt, 1, kind);
sqlite3_bind_text(delete_stmt, 2, pubkey, -1, SQLITE_STATIC);
sqlite3_bind_int64(delete_stmt, 3, created_at);
sqlite3_bind_text(delete_stmt, 4, d_tag_value, -1, SQLITE_STATIC);
sqlite3_step(delete_stmt);
sqlite3_finalize(delete_stmt);
}
}
}
sqlite3_finalize(stmt);
return result;
}
// Store event in database
int store_event(cJSON* event) {
@@ -682,6 +612,13 @@ int store_event(cJSON* event) {
// Classify event type
event_type_t type = classify_event_kind((int)cJSON_GetNumberValue(kind));
// EPHEMERAL EVENTS (kinds 20000-29999) should NOT be stored
if (type == EVENT_TYPE_EPHEMERAL) {
DEBUG_LOG("Ephemeral event (kind %d) - broadcasting only, not storing",
(int)cJSON_GetNumberValue(kind));
return 0; // Success - event was handled but not stored
}
// Serialize tags to JSON (use empty array if no tags)
char* tags_json = NULL;
if (tags && cJSON_IsArray(tags)) {
@@ -720,11 +657,36 @@ int store_event(cJSON* event) {
// Execute statement
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
const char* err_msg = sqlite3_errmsg(g_db);
int extended_errcode = sqlite3_extended_errcode(g_db);
DEBUG_ERROR("INSERT failed: rc=%d, extended_errcode=%d, msg=%s", rc, extended_errcode, err_msg);
}
sqlite3_finalize(stmt);
if (rc != SQLITE_DONE) {
if (rc == SQLITE_CONSTRAINT) {
DEBUG_WARN("Event already exists in database");
// Add TRACE level debug to show both events
if (g_debug_level >= DEBUG_LEVEL_TRACE) {
// Get the existing event from database
cJSON* existing_event = retrieve_event(cJSON_GetStringValue(id));
if (existing_event) {
char* existing_json = cJSON_Print(existing_event);
DEBUG_TRACE("EXISTING EVENT: %s", existing_json ? existing_json : "NULL");
free(existing_json);
cJSON_Delete(existing_event);
} else {
DEBUG_TRACE("EXISTING EVENT: Could not retrieve existing event");
}
// Show the event we're trying to insert
char* new_json = cJSON_Print(event);
DEBUG_TRACE("NEW EVENT: %s", new_json ? new_json : "NULL");
free(new_json);
}
free(tags_json);
return 0; // Not an error, just duplicate
}
@@ -916,12 +878,11 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
char* msg_str = cJSON_Print(event_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
lws_write(wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
// Use proper message queue system instead of direct lws_write
if (queue_message(wsi, NULL, msg_str, msg_len, LWS_WRITE_TEXT) != 0) {
DEBUG_ERROR("Failed to queue config EVENT message");
} else {
config_events_sent++;
free(buf);
}
free(msg_str);
}
@@ -959,11 +920,9 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
unsigned char* buf = malloc(LWS_PRE + closed_len);
if (buf) {
memcpy(buf + LWS_PRE, closed_str, closed_len);
lws_write(wsi, buf + LWS_PRE, closed_len, LWS_WRITE_TEXT);
free(buf);
// Use proper message queue system instead of direct lws_write
if (queue_message(wsi, pss, closed_str, closed_len, LWS_WRITE_TEXT) != 0) {
DEBUG_ERROR("Failed to queue CLOSED message");
}
free(closed_str);
}
@@ -1293,11 +1252,9 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
char* msg_str = cJSON_Print(event_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
lws_write(wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
free(buf);
// Use proper message queue system instead of direct lws_write
if (queue_message(wsi, pss, msg_str, msg_len, LWS_WRITE_TEXT) != 0) {
DEBUG_ERROR("Failed to queue EVENT message for sub=%s", sub_id);
}
free(msg_str);
}
@@ -1437,7 +1394,7 @@ void print_usage(const char* program_name) {
printf("Options:\n");
printf(" -h, --help Show this help message\n");
printf(" -v, --version Show version information\n");
printf(" -p, --port PORT Override relay port (first-time startup only)\n");
printf(" -p, --port PORT Override relay port (first-time startup and existing relay restarts)\n");
printf(" --strict-port Fail if exact port is unavailable (no port increment)\n");
printf(" -a, --admin-pubkey KEY Override admin public key (64-char hex or npub)\n");
printf(" -r, --relay-privkey KEY Override relay private key (64-char hex or nsec)\n");
@@ -1447,13 +1404,14 @@ void print_usage(const char* program_name) {
printf("Configuration:\n");
printf(" This relay uses event-based configuration stored in the database.\n");
printf(" On first startup, keys are automatically generated and printed once.\n");
printf(" Command line options like --port only apply during first-time setup.\n");
printf(" Command line options like --port apply during first-time setup and existing relay restarts.\n");
printf(" After initial setup, all configuration is managed via database events.\n");
printf(" Database file: <relay_pubkey>.db (created automatically)\n");
printf("\n");
printf("Port Binding:\n");
printf(" Default: Try up to 10 consecutive ports if requested port is busy\n");
printf(" --strict-port: Fail immediately if exact requested port is unavailable\n");
printf(" --strict-port works with any custom port specified via -p or --port\n");
printf("\n");
printf("Examples:\n");
printf(" %s # Start relay (auto-configure on first run)\n", program_name);
@@ -1702,70 +1660,7 @@ int main(int argc, char* argv[]) {
return 1;
}
// COMMENTED OUT: Old incremental config building code replaced by unified startup sequence
// The new first_time_startup_sequence() function handles all config creation atomically
/*
// Handle configuration setup after database is initialized
// Always populate defaults directly in config table (abandoning legacy event signing)
// Populate default config values in table
if (populate_default_config_values() != 0) {
DEBUG_ERROR("Failed to populate default config values");
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM config", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
int row_count = sqlite3_column_int(stmt, 0);
DEBUG_LOG("Config table row count after populate_default_config_values(): %d", row_count);
}
sqlite3_finalize(stmt);
}
}
// DEBUG_GUARD_END
// Apply CLI overrides now that database is available
if (cli_options.port_override > 0) {
char port_str[16];
snprintf(port_str, sizeof(port_str), "%d", cli_options.port_override);
if (update_config_in_table("relay_port", port_str) != 0) {
DEBUG_ERROR("Failed to update relay port override in config table");
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
printf(" Port: %d (overriding default)\n", cli_options.port_override);
}
// Add pubkeys to config table (single authoritative call)
if (add_pubkeys_to_config_table() != 0) {
DEBUG_ERROR("Failed to add pubkeys to config table");
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM config", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
int row_count = sqlite3_column_int(stmt, 0);
DEBUG_LOG("Config table row count after add_pubkeys_to_config_table() (first-time): %d", row_count);
}
sqlite3_finalize(stmt);
}
}
// DEBUG_GUARD_END
*/
} else {
// Find existing database file
char** existing_files = find_existing_db_files();
@@ -1800,7 +1695,7 @@ int main(int argc, char* argv[]) {
return 1;
}
// Setup existing relay (sets database path and loads config)
// Setup existing relay FIRST (sets database path)
if (startup_existing_relay(relay_pubkey, &cli_options) != 0) {
DEBUG_ERROR("Failed to setup existing relay");
cleanup_configuration_system();
@@ -1813,23 +1708,7 @@ int main(int argc, char* argv[]) {
return 1;
}
// Check config table row count before database initialization
{
sqlite3* temp_db = NULL;
if (sqlite3_open(g_database_path, &temp_db) == SQLITE_OK) {
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(temp_db, "SELECT COUNT(*) FROM config", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
int row_count = sqlite3_column_int(stmt, 0);
printf(" Config table row count before database initialization: %d\n", row_count);
}
sqlite3_finalize(stmt);
}
sqlite3_close(temp_db);
}
}
// Initialize database with existing database path
// Initialize database with the database path set by startup_existing_relay()
DEBUG_TRACE("Initializing existing database");
if (init_database(g_database_path) != 0) {
DEBUG_ERROR("Failed to initialize existing database");
@@ -1844,6 +1723,20 @@ int main(int argc, char* argv[]) {
}
DEBUG_LOG("Existing database initialized");
// Apply CLI overrides atomically (now that database is initialized)
if (apply_cli_overrides_atomic(&cli_options) != 0) {
DEBUG_ERROR("Failed to apply CLI overrides for existing relay");
cleanup_configuration_system();
free(relay_pubkey);
for (int i = 0; existing_files[i]; i++) {
free(existing_files[i]);
}
free(existing_files);
nostr_cleanup();
close_database();
return 1;
}
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
sqlite3_stmt* stmt;
@@ -1855,102 +1748,6 @@ int main(int argc, char* argv[]) {
sqlite3_finalize(stmt);
}
}
// DEBUG_GUARD_END
// COMMENTED OUT: Old incremental config building code replaced by unified startup sequence
// The new startup_existing_relay() function handles all config loading atomically
/*
// Ensure default configuration values are populated (for any missing keys)
// This must be done AFTER database initialization
// COMMENTED OUT: Don't modify existing database config on restart
// if (populate_default_config_values() != 0) {
// DEBUG_WARN("Failed to populate default config values for existing relay - continuing");
// }
// Load configuration from database
cJSON* config_event = load_config_event_from_database(relay_pubkey);
if (config_event) {
if (apply_configuration_from_event(config_event) != 0) {
DEBUG_WARN("Failed to apply configuration from database");
}
cJSON_Delete(config_event);
} else {
// This is expected for relays using table-based configuration
// No longer a warning - just informational
}
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM config", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
int row_count = sqlite3_column_int(stmt, 0);
DEBUG_LOG("Config table row count before checking pubkeys: %d", row_count);
}
sqlite3_finalize(stmt);
}
}
// DEBUG_GUARD_END
// Ensure pubkeys are in config table for existing relay
// This handles migration from old event-based config to table-based config
const char* admin_pubkey_from_table = get_config_value_from_table("admin_pubkey");
const char* relay_pubkey_from_table = get_config_value_from_table("relay_pubkey");
int need_to_add_pubkeys = 0;
// Check if admin_pubkey is missing or invalid
if (!admin_pubkey_from_table || strlen(admin_pubkey_from_table) != 64) {
DEBUG_WARN("Admin pubkey missing or invalid in config table - will regenerate from cache");
need_to_add_pubkeys = 1;
}
if (admin_pubkey_from_table) free((char*)admin_pubkey_from_table);
// Check if relay_pubkey is missing or invalid
if (!relay_pubkey_from_table || strlen(relay_pubkey_from_table) != 64) {
DEBUG_WARN("Relay pubkey missing or invalid in config table - will regenerate from cache");
need_to_add_pubkeys = 1;
}
if (relay_pubkey_from_table) free((char*)relay_pubkey_from_table);
// If either pubkey is missing, call add_pubkeys_to_config_table to populate both
if (need_to_add_pubkeys) {
if (add_pubkeys_to_config_table() != 0) {
DEBUG_ERROR("Failed to add pubkeys to config table for existing relay");
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM config", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
int row_count = sqlite3_column_int(stmt, 0);
DEBUG_LOG("Config table row count after add_pubkeys_to_config_table(): %d", row_count);
}
sqlite3_finalize(stmt);
}
}
// DEBUG_GUARD_END
}
// Apply CLI overrides for existing relay (port override should work even for existing relays)
if (cli_options.port_override > 0) {
char port_str[16];
snprintf(port_str, sizeof(port_str), "%d", cli_options.port_override);
if (update_config_in_table("relay_port", port_str) != 0) {
DEBUG_ERROR("Failed to update relay port override in config table for existing relay");
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
printf(" Port: %d (overriding configured port)\n", cli_options.port_override);
}
*/
// Free memory
free(relay_pubkey);
@@ -1989,9 +1786,6 @@ int main(int argc, char* argv[]) {
// Initialize NIP-40 expiration configuration
init_expiration_config();
// Initialize monitoring system
init_monitoring_system();
// Update subscription manager configuration
update_subscription_manager_config();
@@ -2013,19 +1807,17 @@ int main(int argc, char* argv[]) {
return 1;
}
// Cleanup orphaned subscriptions from previous runs
cleanup_all_subscriptions_on_startup();
// Start WebSocket Nostr relay server (port from configuration)
int result = start_websocket_relay(-1, cli_options.strict_port); // Let config system determine port, pass strict_port flag
// Start WebSocket Nostr relay server (port from CLI override or configuration)
int result = start_websocket_relay(cli_options.port_override, cli_options.strict_port); // Use CLI port override if specified, otherwise config
// Cleanup
cleanup_relay_info();
ginxsom_request_validator_cleanup();
cleanup_configuration_system();
// Cleanup monitoring system
cleanup_monitoring_system();
// Cleanup subscription manager mutexes
pthread_mutex_destroy(&g_subscription_manager.subscriptions_lock);
pthread_mutex_destroy(&g_subscription_manager.ip_tracking_lock);

View File

@@ -10,10 +10,14 @@
#define MAIN_H
// Version information (auto-updated by build system)
#define VERSION "v0.7.29"
#define VERSION_MAJOR 0
#define VERSION_MINOR
#define VERSION_PATCH 29
#define VERSION_MAJOR 1
#define VERSION_MINOR 0
#define VERSION_PATCH 8
#define VERSION "v1.0.8"
// Avoid VERSION_MAJOR redefinition warning from nostr_core_lib
#undef VERSION_MAJOR
#define VERSION_MAJOR 1
// Relay metadata (authoritative source for NIP-11 information)
#define RELAY_NAME "C-Relay"

View File

@@ -12,6 +12,7 @@
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include "websockets.h"
// Forward declaration for notice message function
@@ -22,23 +23,7 @@ int nostr_nip42_generate_challenge(char *challenge_buffer, size_t buffer_size);
int nostr_nip42_verify_auth_event(cJSON *event, const char *challenge_id,
const char *relay_url, int time_tolerance_seconds);
// Forward declaration for per_session_data struct (defined in main.c)
struct per_session_data {
int authenticated;
void* subscriptions; // Head of this session's subscription list
pthread_mutex_t session_lock; // Per-session thread safety
char client_ip[41]; // Client IP for logging
int subscription_count; // Number of subscriptions for this session
// NIP-42 Authentication State
char authenticated_pubkey[65]; // Authenticated public key (64 hex + null)
char active_challenge[65]; // Current challenge for this session (64 hex + null)
time_t challenge_created; // When challenge was created
time_t challenge_expires; // Challenge expiration time
int nip42_auth_required_events; // Whether NIP-42 auth is required for EVENT submission
int nip42_auth_required_subscriptions; // Whether NIP-42 auth is required for REQ operations
int auth_challenge_sent; // Whether challenge has been sent (0/1)
};
// Forward declaration for per_session_data struct (defined in websockets.h)
// Send NIP-42 authentication challenge to client
@@ -70,11 +55,9 @@ void send_nip42_auth_challenge(struct lws* wsi, struct per_session_data* pss) {
char* msg_str = cJSON_Print(auth_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
lws_write(wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
free(buf);
// Use proper message queue system instead of direct lws_write
if (queue_message(wsi, pss, msg_str, msg_len, LWS_WRITE_TEXT) != 0) {
DEBUG_ERROR("Failed to queue AUTH challenge message");
}
free(msg_str);
}

View File

@@ -1,12 +1,11 @@
/* Embedded SQL Schema for C Nostr Relay
* Generated from db/schema.sql - Do not edit manually
* Schema Version: 7
* Schema Version: 8
*/
#ifndef SQL_SCHEMA_H
#define SQL_SCHEMA_H
/* Schema version constant */
#define EMBEDDED_SCHEMA_VERSION "7"
#define EMBEDDED_SCHEMA_VERSION "8"
/* Embedded SQL schema as C string literal */
static const char* const EMBEDDED_SCHEMA_SQL =
@@ -15,7 +14,7 @@ static const char* const EMBEDDED_SCHEMA_SQL =
-- Configuration system using config table\n\
\n\
-- Schema version tracking\n\
PRAGMA user_version = 7;\n\
PRAGMA user_version = 8;\n\
\n\
-- Enable foreign key support\n\
PRAGMA foreign_keys = ON;\n\
@@ -58,8 +57,8 @@ CREATE TABLE schema_info (\n\
\n\
-- Insert schema metadata\n\
INSERT INTO schema_info (key, value) VALUES\n\
('version', '7'),\n\
('description', 'Hybrid Nostr relay schema with event-based and table-based configuration'),\n\
('version', '8'),\n\
('description', 'Hybrid Nostr relay schema with subscription deduplication support'),\n\
('created_at', strftime('%s', 'now'));\n\
\n\
-- Helper views for common queries\n\
@@ -93,16 +92,6 @@ FROM events\n\
WHERE kind = 33334\n\
ORDER BY created_at DESC;\n\
\n\
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour\n\
CREATE TRIGGER cleanup_ephemeral_events\n\
AFTER INSERT ON events\n\
WHEN NEW.event_type = 'ephemeral'\n\
BEGIN\n\
DELETE FROM events \n\
WHERE event_type = 'ephemeral' \n\
AND first_seen < (strftime('%s', 'now') - 3600);\n\
END;\n\
\n\
-- Replaceable event handling trigger\n\
CREATE TRIGGER handle_replaceable_events\n\
AFTER INSERT ON events\n\
@@ -181,17 +170,19 @@ END;\n\
-- Persistent Subscriptions Logging Tables (Phase 2)\n\
-- Optional database logging for subscription analytics and debugging\n\
\n\
-- Subscription events log\n\
CREATE TABLE subscription_events (\n\
-- Subscriptions log (renamed from subscription_events for clarity)\n\
CREATE TABLE subscriptions (\n\
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
subscription_id TEXT NOT NULL, -- Subscription ID from client\n\
wsi_pointer TEXT NOT NULL, -- WebSocket pointer address (hex string)\n\
client_ip TEXT NOT NULL, -- Client IP address\n\
event_type TEXT NOT NULL CHECK (event_type IN ('created', 'closed', 'expired', 'disconnected')),\n\
filter_json TEXT, -- JSON representation of filters (for created events)\n\
events_sent INTEGER DEFAULT 0, -- Number of events sent to this subscription\n\
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
ended_at INTEGER, -- When subscription ended (for closed/expired/disconnected)\n\
duration INTEGER -- Computed: ended_at - created_at\n\
duration INTEGER, -- Computed: ended_at - created_at\n\
UNIQUE(subscription_id, wsi_pointer) -- Prevent duplicate subscriptions per connection\n\
);\n\
\n\
-- Subscription metrics summary\n\
@@ -207,34 +198,23 @@ CREATE TABLE subscription_metrics (\n\
UNIQUE(date)\n\
);\n\
\n\
-- Event broadcasting log (optional, for detailed analytics)\n\
CREATE TABLE event_broadcasts (\n\
id INTEGER PRIMARY KEY AUTOINCREMENT,\n\
event_id TEXT NOT NULL, -- Event ID that was broadcast\n\
subscription_id TEXT NOT NULL, -- Subscription that received it\n\
client_ip TEXT NOT NULL, -- Client IP\n\
broadcast_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
FOREIGN KEY (event_id) REFERENCES events(id)\n\
);\n\
\n\
-- Indexes for subscription logging performance\n\
CREATE INDEX idx_subscription_events_id ON subscription_events(subscription_id);\n\
CREATE INDEX idx_subscription_events_type ON subscription_events(event_type);\n\
CREATE INDEX idx_subscription_events_created ON subscription_events(created_at DESC);\n\
CREATE INDEX idx_subscription_events_client ON subscription_events(client_ip);\n\
CREATE INDEX idx_subscriptions_id ON subscriptions(subscription_id);\n\
CREATE INDEX idx_subscriptions_type ON subscriptions(event_type);\n\
CREATE INDEX idx_subscriptions_created ON subscriptions(created_at DESC);\n\
CREATE INDEX idx_subscriptions_client ON subscriptions(client_ip);\n\
CREATE INDEX idx_subscriptions_wsi ON subscriptions(wsi_pointer);\n\
\n\
CREATE INDEX idx_subscription_metrics_date ON subscription_metrics(date DESC);\n\
\n\
CREATE INDEX idx_event_broadcasts_event ON event_broadcasts(event_id);\n\
CREATE INDEX idx_event_broadcasts_sub ON event_broadcasts(subscription_id);\n\
CREATE INDEX idx_event_broadcasts_time ON event_broadcasts(broadcast_at DESC);\n\
\n\
-- Trigger to update subscription duration when ended\n\
CREATE TRIGGER update_subscription_duration\n\
AFTER UPDATE OF ended_at ON subscription_events\n\
AFTER UPDATE OF ended_at ON subscriptions\n\
WHEN NEW.ended_at IS NOT NULL AND OLD.ended_at IS NULL\n\
BEGIN\n\
UPDATE subscription_events\n\
UPDATE subscriptions\n\
SET duration = NEW.ended_at - NEW.created_at\n\
WHERE id = NEW.id;\n\
END;\n\
@@ -249,24 +229,27 @@ SELECT\n\
MAX(events_sent) as max_events_sent,\n\
AVG(events_sent) as avg_events_sent,\n\
COUNT(DISTINCT client_ip) as unique_clients\n\
FROM subscription_events\n\
FROM subscriptions\n\
GROUP BY date(created_at, 'unixepoch')\n\
ORDER BY date DESC;\n\
\n\
-- View for current active subscriptions (from log perspective)\n\
CREATE VIEW active_subscriptions_log AS\n\
SELECT\n\
subscription_id,\n\
client_ip,\n\
filter_json,\n\
events_sent,\n\
created_at,\n\
(strftime('%s', 'now') - created_at) as duration_seconds\n\
FROM subscription_events\n\
WHERE event_type = 'created'\n\
AND subscription_id NOT IN (\n\
SELECT subscription_id FROM subscription_events\n\
WHERE event_type IN ('closed', 'expired', 'disconnected')\n\
s.subscription_id,\n\
s.client_ip,\n\
s.filter_json,\n\
s.events_sent,\n\
s.created_at,\n\
(strftime('%s', 'now') - s.created_at) as duration_seconds,\n\
s.wsi_pointer\n\
FROM subscriptions s\n\
WHERE s.event_type = 'created'\n\
AND NOT EXISTS (\n\
SELECT 1 FROM subscriptions s2\n\
WHERE s2.subscription_id = s.subscription_id\n\
AND s2.wsi_pointer = s.wsi_pointer\n\
AND s2.event_type IN ('closed', 'expired', 'disconnected')\n\
);\n\
\n\
-- Database Statistics Views for Admin API\n\

View File

@@ -25,6 +25,9 @@ int validate_timestamp_range(long since, long until, char* error_message, size_t
int validate_numeric_limits(int limit, char* error_message, size_t error_size);
int validate_search_term(const char* search_term, char* error_message, size_t error_size);
// Forward declaration for monitoring function
void monitoring_on_subscription_change(void);
// Global database variable
extern sqlite3* g_db;
@@ -241,8 +244,31 @@ int add_subscription_to_manager(subscription_t* sub) {
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
// Check global limits
if (g_subscription_manager.total_subscriptions >= g_subscription_manager.max_total_subscriptions) {
// Check for existing subscription with same ID and WebSocket connection
// Remove it first to prevent duplicates (implements subscription replacement per NIP-01)
subscription_t** current = &g_subscription_manager.active_subscriptions;
int found_duplicate = 0;
subscription_t* duplicate_old = NULL;
while (*current) {
subscription_t* existing = *current;
// Match by subscription ID and WebSocket pointer
if (strcmp(existing->id, sub->id) == 0 && existing->wsi == sub->wsi) {
// Found duplicate: mark inactive and unlink from global list under lock
existing->active = 0;
*current = existing->next;
g_subscription_manager.total_subscriptions--;
found_duplicate = 1;
duplicate_old = existing; // defer free until after per-session unlink
break;
}
current = &(existing->next);
}
// Check global limits (only if not replacing an existing subscription)
if (!found_duplicate && g_subscription_manager.total_subscriptions >= g_subscription_manager.max_total_subscriptions) {
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
DEBUG_ERROR("Maximum total subscriptions reached");
return -1;
@@ -252,13 +278,44 @@ int add_subscription_to_manager(subscription_t* sub) {
sub->next = g_subscription_manager.active_subscriptions;
g_subscription_manager.active_subscriptions = sub;
g_subscription_manager.total_subscriptions++;
// Only increment total_created if this is a new subscription (not a replacement)
if (!found_duplicate) {
g_subscription_manager.total_created++;
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Log subscription creation to database
// If we replaced an existing subscription, unlink it from the per-session list before freeing
if (duplicate_old) {
// Obtain per-session data for this wsi
struct per_session_data* pss = (struct per_session_data*) lws_wsi_user(duplicate_old->wsi);
if (pss) {
pthread_mutex_lock(&pss->session_lock);
struct subscription** scur = &pss->subscriptions;
while (*scur) {
if (*scur == duplicate_old) {
// Unlink by pointer identity to avoid removing the newly-added one
*scur = duplicate_old->session_next;
if (pss->subscription_count > 0) {
pss->subscription_count--;
}
break;
}
scur = &((*scur)->session_next);
}
pthread_mutex_unlock(&pss->session_lock);
}
// Now safe to free the old subscription
free_subscription(duplicate_old);
}
// Log subscription creation to database (INSERT OR REPLACE handles duplicates)
log_subscription_created(sub);
// Trigger monitoring update for subscription changes
monitoring_on_subscription_change();
return 0;
}
@@ -306,6 +363,9 @@ int remove_subscription_from_manager(const char* sub_id, struct lws* wsi) {
// Update events sent counter before freeing
update_subscription_events_sent(sub_id_copy, events_sent_copy);
// Trigger monitoring update for subscription changes
monitoring_on_subscription_change();
free_subscription(sub);
return 0;
}
@@ -324,37 +384,52 @@ int remove_subscription_from_manager(const char* sub_id, struct lws* wsi) {
// Check if an event matches a subscription filter
int event_matches_filter(cJSON* event, subscription_filter_t* filter) {
DEBUG_TRACE("Checking event against subscription filter");
if (!event || !filter) {
DEBUG_TRACE("Exiting event_matches_filter - null parameters");
return 0;
}
// Debug: Log event details being tested
cJSON* event_kind_obj = cJSON_GetObjectItem(event, "kind");
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
cJSON* event_created_at_obj = cJSON_GetObjectItem(event, "created_at");
DEBUG_TRACE("FILTER_MATCH: Testing event kind=%d id=%.8s created_at=%ld",
event_kind_obj ? (int)cJSON_GetNumberValue(event_kind_obj) : -1,
event_id_obj && cJSON_IsString(event_id_obj) ? cJSON_GetStringValue(event_id_obj) : "null",
event_created_at_obj ? (long)cJSON_GetNumberValue(event_created_at_obj) : 0);
// Check kinds filter
if (filter->kinds && cJSON_IsArray(filter->kinds)) {
DEBUG_TRACE("FILTER_MATCH: Checking kinds filter with %d kinds", cJSON_GetArraySize(filter->kinds));
cJSON* event_kind = cJSON_GetObjectItem(event, "kind");
if (!event_kind || !cJSON_IsNumber(event_kind)) {
DEBUG_WARN("FILTER_MATCH: Event has no valid kind field");
return 0;
}
int event_kind_val = (int)cJSON_GetNumberValue(event_kind);
int kind_match = 0;
DEBUG_TRACE("FILTER_MATCH: Event kind=%d", event_kind_val);
int kind_match = 0;
cJSON* kind_item = NULL;
cJSON_ArrayForEach(kind_item, filter->kinds) {
if (cJSON_IsNumber(kind_item)) {
int filter_kind = (int)cJSON_GetNumberValue(kind_item);
DEBUG_TRACE("FILTER_MATCH: Comparing event kind %d with filter kind %d", event_kind_val, filter_kind);
if (filter_kind == event_kind_val) {
kind_match = 1;
DEBUG_TRACE("FILTER_MATCH: Kind matched!");
break;
}
}
}
if (!kind_match) {
DEBUG_TRACE("FILTER_MATCH: No kind match, filter rejected");
return 0;
}
DEBUG_TRACE("FILTER_MATCH: Kinds filter passed");
}
// Check authors filter
@@ -415,13 +490,19 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter) {
if (filter->since > 0) {
cJSON* event_created_at = cJSON_GetObjectItem(event, "created_at");
if (!event_created_at || !cJSON_IsNumber(event_created_at)) {
DEBUG_WARN("FILTER_MATCH: Event has no valid created_at field");
return 0;
}
long event_timestamp = (long)cJSON_GetNumberValue(event_created_at);
DEBUG_TRACE("FILTER_MATCH: Checking since filter: event_ts=%ld filter_since=%ld",
event_timestamp, filter->since);
if (event_timestamp < filter->since) {
DEBUG_TRACE("FILTER_MATCH: Event too old (before since), filter rejected");
return 0;
}
DEBUG_TRACE("FILTER_MATCH: Since filter passed");
}
// Check until filter
@@ -503,7 +584,7 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter) {
}
}
DEBUG_TRACE("Exiting event_matches_filter - match found");
DEBUG_TRACE("FILTER_MATCH: All filters passed, event matches!");
return 1; // All filters passed
}
@@ -513,23 +594,29 @@ int event_matches_subscription(cJSON* event, subscription_t* subscription) {
return 0;
}
DEBUG_TRACE("SUB_MATCH: Testing subscription '%s'", subscription->id);
int filter_num = 0;
subscription_filter_t* filter = subscription->filters;
while (filter) {
filter_num++;
DEBUG_TRACE("SUB_MATCH: Testing filter #%d", filter_num);
if (event_matches_filter(event, filter)) {
DEBUG_TRACE("SUB_MATCH: Filter #%d matched! Subscription '%s' matches",
filter_num, subscription->id);
return 1; // Match found (OR logic)
}
filter = filter->next;
}
DEBUG_TRACE("SUB_MATCH: No filters matched for subscription '%s'", subscription->id);
return 0; // No filters matched
}
// Broadcast event to all matching subscriptions (thread-safe)
int broadcast_event_to_subscriptions(cJSON* event) {
DEBUG_TRACE("Broadcasting event to subscriptions");
if (!event) {
DEBUG_TRACE("Exiting broadcast_event_to_subscriptions - null event");
return 0;
}
@@ -546,6 +633,16 @@ int broadcast_event_to_subscriptions(cJSON* event) {
int broadcasts = 0;
// Log event details
cJSON* event_kind = cJSON_GetObjectItem(event, "kind");
cJSON* event_id = cJSON_GetObjectItem(event, "id");
cJSON* event_created_at = cJSON_GetObjectItem(event, "created_at");
DEBUG_TRACE("BROADCAST: Event kind=%d id=%.8s created_at=%ld",
event_kind ? (int)cJSON_GetNumberValue(event_kind) : -1,
event_id && cJSON_IsString(event_id) ? cJSON_GetStringValue(event_id) : "null",
event_created_at ? (long)cJSON_GetNumberValue(event_created_at) : 0);
// Create a temporary list of matching subscriptions to avoid holding lock during I/O
typedef struct temp_sub {
struct lws* wsi;
@@ -560,6 +657,14 @@ int broadcast_event_to_subscriptions(cJSON* event) {
// First pass: collect matching subscriptions while holding lock
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
int total_subs = 0;
subscription_t* count_sub = g_subscription_manager.active_subscriptions;
while (count_sub) {
total_subs++;
count_sub = count_sub->next;
}
DEBUG_TRACE("BROADCAST: Checking %d active subscriptions", total_subs);
subscription_t* sub = g_subscription_manager.active_subscriptions;
while (sub) {
if (sub->active && sub->wsi && event_matches_subscription(event, sub)) {
@@ -611,10 +716,17 @@ int broadcast_event_to_subscriptions(cJSON* event) {
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
// Send to WebSocket connection with error checking
// Note: lws_write can fail if connection is closed, but won't crash
int write_result = lws_write(current_temp->wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
if (write_result >= 0) {
// DEBUG: Log WebSocket frame details before sending
DEBUG_TRACE("WS_FRAME_SEND: type=EVENT sub=%s len=%zu data=%.100s%s",
current_temp->id,
msg_len,
msg_str,
msg_len > 100 ? "..." : "");
// Queue message for proper libwebsockets pattern
struct per_session_data* pss = (struct per_session_data*)lws_wsi_user(current_temp->wsi);
if (queue_message(current_temp->wsi, pss, msg_str, msg_len, LWS_WRITE_TEXT) == 0) {
// Message queued successfully
broadcasts++;
// Update events sent counter for this subscription
@@ -632,10 +744,13 @@ int broadcast_event_to_subscriptions(cJSON* event) {
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Log event broadcast to database (optional - can be disabled for performance)
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
if (event_id_obj && cJSON_IsString(event_id_obj)) {
log_event_broadcast(cJSON_GetStringValue(event_id_obj), current_temp->id, current_temp->client_ip);
}
// NOTE: event_broadcasts table removed due to FOREIGN KEY constraint issues
// cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
// if (event_id_obj && cJSON_IsString(event_id_obj)) {
// log_event_broadcast(cJSON_GetStringValue(event_id_obj), current_temp->id, current_temp->client_ip);
// }
} else {
DEBUG_ERROR("Failed to queue EVENT message for sub=%s", current_temp->id);
}
free(buf);
@@ -660,10 +775,41 @@ int broadcast_event_to_subscriptions(cJSON* event) {
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
DEBUG_LOG("Event broadcast complete: %d subscriptions matched", broadcasts);
DEBUG_TRACE("Exiting broadcast_event_to_subscriptions");
return broadcasts;
}
// Check if any active subscription exists for a specific event kind (thread-safe)
int has_subscriptions_for_kind(int event_kind) {
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
subscription_t* sub = g_subscription_manager.active_subscriptions;
while (sub) {
if (sub->active && sub->filters) {
subscription_filter_t* filter = sub->filters;
while (filter) {
// Check if this filter includes our event kind
if (filter->kinds && cJSON_IsArray(filter->kinds)) {
cJSON* kind_item = NULL;
cJSON_ArrayForEach(kind_item, filter->kinds) {
if (cJSON_IsNumber(kind_item)) {
int filter_kind = (int)cJSON_GetNumberValue(kind_item);
if (filter_kind == event_kind) {
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
return 1; // Found matching subscription
}
}
}
}
filter = filter->next;
}
}
sub = sub->next;
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
return 0; // No matching subscriptions
}
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
@@ -675,6 +821,10 @@ int broadcast_event_to_subscriptions(cJSON* event) {
void log_subscription_created(const subscription_t* sub) {
if (!g_db || !sub) return;
// Convert wsi pointer to string
char wsi_str[32];
snprintf(wsi_str, sizeof(wsi_str), "%p", (void*)sub->wsi);
// Create filter JSON for logging
char* filter_json = NULL;
if (sub->filters) {
@@ -721,16 +871,18 @@ void log_subscription_created(const subscription_t* sub) {
cJSON_Delete(filters_array);
}
// Use INSERT OR REPLACE to handle duplicates automatically
const char* sql =
"INSERT INTO subscription_events (subscription_id, client_ip, event_type, filter_json) "
"VALUES (?, ?, 'created', ?)";
"INSERT OR REPLACE INTO subscriptions (subscription_id, wsi_pointer, client_ip, event_type, filter_json) "
"VALUES (?, ?, ?, 'created', ?)";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, sub->id, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, sub->client_ip, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, filter_json ? filter_json : "[]", -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 2, wsi_str, -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 3, sub->client_ip, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 4, filter_json ? filter_json : "[]", -1, SQLITE_TRANSIENT);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
@@ -745,8 +897,8 @@ void log_subscription_closed(const char* sub_id, const char* client_ip, const ch
if (!g_db || !sub_id) return;
const char* sql =
"INSERT INTO subscription_events (subscription_id, client_ip, event_type) "
"VALUES (?, ?, 'closed')";
"INSERT INTO subscriptions (subscription_id, wsi_pointer, client_ip, event_type) "
"VALUES (?, '', ?, 'closed')";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
@@ -760,7 +912,7 @@ void log_subscription_closed(const char* sub_id, const char* client_ip, const ch
// Update the corresponding 'created' entry with end time and events sent
const char* update_sql =
"UPDATE subscription_events "
"UPDATE subscriptions "
"SET ended_at = strftime('%s', 'now') "
"WHERE subscription_id = ? AND event_type = 'created' AND ended_at IS NULL";
@@ -778,7 +930,7 @@ void log_subscription_disconnected(const char* client_ip) {
// Mark all active subscriptions for this client as disconnected
const char* sql =
"UPDATE subscription_events "
"UPDATE subscriptions "
"SET ended_at = strftime('%s', 'now') "
"WHERE client_ip = ? AND event_type = 'created' AND ended_at IS NULL";
@@ -793,8 +945,8 @@ void log_subscription_disconnected(const char* client_ip) {
if (changes > 0) {
// Log a disconnection event
const char* insert_sql =
"INSERT INTO subscription_events (subscription_id, client_ip, event_type) "
"VALUES ('disconnect', ?, 'disconnected')";
"INSERT INTO subscriptions (subscription_id, wsi_pointer, client_ip, event_type) "
"VALUES ('disconnect', '', ?, 'disconnected')";
rc = sqlite3_prepare_v2(g_db, insert_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
@@ -807,31 +959,32 @@ void log_subscription_disconnected(const char* client_ip) {
}
// Log event broadcast to database (optional, can be resource intensive)
void log_event_broadcast(const char* event_id, const char* sub_id, const char* client_ip) {
if (!g_db || !event_id || !sub_id || !client_ip) return;
const char* sql =
"INSERT INTO event_broadcasts (event_id, subscription_id, client_ip) "
"VALUES (?, ?, ?)";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, event_id, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, sub_id, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 3, client_ip, -1, SQLITE_STATIC);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
}
// REMOVED: event_broadcasts table removed due to FOREIGN KEY constraint issues
// void log_event_broadcast(const char* event_id, const char* sub_id, const char* client_ip) {
// if (!g_db || !event_id || !sub_id || !client_ip) return;
//
// const char* sql =
// "INSERT INTO event_broadcasts (event_id, subscription_id, client_ip) "
// "VALUES (?, ?, ?)";
//
// sqlite3_stmt* stmt;
// int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
// if (rc == SQLITE_OK) {
// sqlite3_bind_text(stmt, 1, event_id, -1, SQLITE_STATIC);
// sqlite3_bind_text(stmt, 2, sub_id, -1, SQLITE_STATIC);
// sqlite3_bind_text(stmt, 3, client_ip, -1, SQLITE_STATIC);
//
// sqlite3_step(stmt);
// sqlite3_finalize(stmt);
// }
// }
// Update events sent counter for a subscription
void update_subscription_events_sent(const char* sub_id, int events_sent) {
if (!g_db || !sub_id) return;
const char* sql =
"UPDATE subscription_events "
"UPDATE subscriptions "
"SET events_sent = ? "
"WHERE subscription_id = ? AND event_type = 'created'";
@@ -846,6 +999,44 @@ void update_subscription_events_sent(const char* sub_id, int events_sent) {
}
}
// Cleanup all subscriptions on startup
void cleanup_all_subscriptions_on_startup(void) {
if (!g_db) {
DEBUG_ERROR("Database not available for startup cleanup");
return;
}
DEBUG_LOG("Performing startup subscription cleanup");
// Mark all active subscriptions as disconnected
const char* sql =
"UPDATE subscriptions "
"SET ended_at = strftime('%s', 'now') "
"WHERE event_type = 'created' AND ended_at IS NULL";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare startup cleanup query");
return;
}
rc = sqlite3_step(stmt);
int changes = sqlite3_changes(g_db);
sqlite3_finalize(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to execute startup cleanup");
return;
}
if (changes > 0) {
DEBUG_LOG("Startup cleanup: marked %d orphaned subscriptions as disconnected", changes);
} else {
DEBUG_LOG("Startup cleanup: no orphaned subscriptions found");
}
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////

View File

@@ -115,7 +115,12 @@ int get_active_connections_for_ip(const char* client_ip);
void log_subscription_created(const subscription_t* sub);
void log_subscription_closed(const char* sub_id, const char* client_ip, const char* reason);
void log_subscription_disconnected(const char* client_ip);
void log_event_broadcast(const char* event_id, const char* sub_id, const char* client_ip);
void update_subscription_events_sent(const char* sub_id, int events_sent);
// Subscription query functions
int has_subscriptions_for_kind(int event_kind);
// Startup cleanup function
void cleanup_all_subscriptions_on_startup(void);
#endif // SUBSCRIPTIONS_H

File diff suppressed because it is too large Load Diff

View File

@@ -21,16 +21,24 @@
// Filter validation constants
#define MAX_FILTERS_PER_REQUEST 10
#define MAX_AUTHORS_PER_FILTER 100
#define MAX_IDS_PER_FILTER 100
#define MAX_KINDS_PER_FILTER 50
#define MAX_TAG_VALUES_PER_FILTER 100
#define MAX_AUTHORS_PER_FILTER 1000
#define MAX_IDS_PER_FILTER 1000
#define MAX_KINDS_PER_FILTER 500
#define MAX_TAG_VALUES_PER_FILTER 1000
#define MAX_KIND_VALUE 65535
#define MAX_TIMESTAMP_VALUE 2147483647 // Max 32-bit signed int
#define MAX_LIMIT_VALUE 5000
#define MAX_SEARCH_LENGTH 256
#define MAX_TAG_VALUE_LENGTH 1024
// Message queue node for proper libwebsockets pattern
struct message_queue_node {
unsigned char* data; // Message data (with LWS_PRE space)
size_t length; // Message length (without LWS_PRE)
enum lws_write_protocol type; // LWS_WRITE_TEXT, etc.
struct message_queue_node* next; // Next node in queue
};
// Enhanced per-session data with subscription management, NIP-42 authentication, and rate limiting
struct per_session_data {
int authenticated;
@@ -59,6 +67,18 @@ struct per_session_data {
int malformed_request_count; // Count of malformed requests in current hour
time_t malformed_request_window_start; // Start of current hour window
time_t malformed_request_blocked_until; // Time until blocked for malformed requests
// Message queue for proper libwebsockets pattern (replaces single buffer)
struct message_queue_node* message_queue_head; // Head of message queue
struct message_queue_node* message_queue_tail; // Tail of message queue
int message_queue_count; // Number of messages in queue
int writeable_requested; // Flag: 1 if writeable callback requested
// Message reassembly for handling fragmented WebSocket messages
char* reassembly_buffer; // Buffer for accumulating message fragments (NULL when not reassembling)
size_t reassembly_size; // Current size of accumulated data
size_t reassembly_capacity; // Allocated capacity of reassembly buffer
int reassembly_active; // Flag: 1 if currently reassembling a message
};
// NIP-11 HTTP session data structure for managing buffer lifetime
@@ -73,6 +93,10 @@ struct nip11_session_data {
// Function declarations
int start_websocket_relay(int port_override, int strict_port);
// Message queue functions for proper libwebsockets pattern
int queue_message(struct lws* wsi, struct per_session_data* pss, const char* message, size_t length, enum lws_write_protocol type);
int process_message_queue(struct lws* wsi, struct per_session_data* pss);
// Auth rules checking function from request_validator.c
int check_database_auth_rules(const char *pubkey, const char *operation, const char *resource_hash);

5
tests/.test_keys.txt Normal file
View File

@@ -0,0 +1,5 @@
# Test key configuration (from make_and_restart_relay.sh -t)
ADMIN_PRIVATE_KEY="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
ADMIN_PUBLIC_KEY="6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3"
RELAY_PUBLIC_KEY="4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
RELAY_URL="ws://localhost:8888"

12
tests/debug.log Normal file
View File

@@ -0,0 +1,12 @@
=== NOSTR WebSocket Debug Log Started ===
[14:13:42.079] SEND localhost:8888: ["EVENT", {
"pubkey": "e74e808f64b82fe4671b92cdf83f6dd5f5f44dbcb67fbd0e044f34a6193e0994",
"created_at": 1761499244,
"kind": 1059,
"tags": [["p", "4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"]],
"content": "ApTb8y2oD3/TtVCV73Szhgfh5ODlluGd5zjsH44g5BBwaGB1NshOJ/5kF/XN0TfYJKQBe07UTpnOYMZ4l2ppU6SrR8Tor+ZEiAF/kpCpa/x6LDDIvf4mueQicDKjOf8Y6oEbsxYjtFrpuSC0LLMgLaVhcZjAgVD0YQTo+8nHOzHZD5RBr305vdnrxIe4ubEficAHCpnKq9L3A46AIyb+aHjjTbSYmB061cf6hzLSnmdh5xeACExjhxwsX9ivSvqGYcDNsH1JCM8EYQyRX9xAPDBYM1yuS8PpadqMluOcqOd/FFYyjYNpFrardblPsjUzZTz/TDSLyrYFDUKNa7pWIhW1asc1ZaY0ry0AoWnbl/QyMxqBjDFXd3mJfWccYsOI/Yrx3sxbZdL+ayRlQeQuDk/M9rQkH8GN/5+GE1aN5I6eVl0F37Axc/lLuIt/AIpoTwZYAEi9j/BYGLP6sYkjUp0foz91QximOTgu8evynu+nfAv330HVkipTIGOjEZea7QNSK0Fylxs8fanHlmiqWGyfyBeoWpxGslHZVu6K9k7GC8ABEIdNRa8vlqlphPfWPCS70Lnq3LgeKOj1C3sNF9ST8g7pth/0FEZgXruzhpx/EyjsasNbdLZg3iX1QwRS0P4L341Flrztovt8npyP9ytTiukkYIQzXCX8XuWjiaUuzXiLkVazjh0Nl03ikKKu2+7nuaBB92geBjbGT76zZ6HeXBgcmC7dWn7pHhzqu+QTonZK0oCl427Fs0eXiYsILjxFFQkmk7OHXgdZF9jquNXloz5lgwY9S3xj4JyRwLN/9xfh16awxLZNEFvX10X97bXsmNMRUDrJJPkKMTSxZpvuTbd+Lx2iB++4NyGZibNa6nOWOJG9d2LwEzIcIHS0uQpEIPl7Ccz6+rmkVh9kLbB2rda2fYp9GCOcn6XbfaXZZXJM+HAQwPJgrtDiuQex0tEIcQcB9CYCN4ze9HCt1kb23TUgEDAipz/RqYP4dOCYmRZ7vaYk/irJ+iRDfnvPK0Id1TrSeo5kaVc7py2zWZRVdndpTM8RvW0SLwdldXDIv+ym/mS0L7bchoaYjoNeuTNKQ6AOoc0E7f4ySr65FUKYd2FTvIsP2Avsa3S+D0za30ensxr733l80AQlVmUPrhsgOzzjEuOW1hGlGus38X+CDDEuMSJnq3hvz/CxVtAk71Zkbyr5lc1BPi758Y4rlZFQnhaKYKv5nSFJc7GtDykv+1cwxNGC6AxGKprnYMDVxuAIFYBztFitdO5BsjWvvKzAbleszewtGfjE2NgltIJk+gQlTpWvLNxd3gvb+qHarfEv7BPnPfsKktDpEfuNMKXdJPANyACq5gXj854o/X8iO2iLm7JSdMhEQgIIyHNyLCCQdLDnqDWIfcdyIzAfRilSCwImt3CVJBGD7HoXRbwGRR3vgEBcoVPmsYzaU9vr62I=",
"id": "75c178ee47aac3ab9e984ddb85bdf9d8c68ade0d97e9cd86bb39e3110218a589",
"sig": "aba8382cc8d6ba6bba467109d2ddc19718732fe803d71e73fd2db62c1cbbb1b4527447240906e01755139067a71c75d8c03271826ca5d0226c818cb7fb495fe2"
}]
[14:13:42.083] RECV localhost:8888: ["OK", "75c178ee47aac3ab9e984ddb85bdf9d8c68ade0d97e9cd86bb39e3110218a589", true, ""]

35
tests/ephemeral_test.sh Executable file
View File

@@ -0,0 +1,35 @@
#!/bin/bash
# Simplified Ephemeral Event Test
# Tests that ephemeral events are broadcast to active subscriptions
echo "=== Generating Ephemeral Event (kind 20000) ==="
event=$(nak event --kind 20000 --content "test ephemeral event")
echo "$event"
echo ""
echo "=== Testing Ephemeral Event Broadcast ==="
subscription='["REQ","test_sub",{"kinds":[20000],"limit":10}]'
echo "Subscription Filter:"
echo "$subscription"
echo ""
event_msg='["EVENT",'"$event"']'
echo "Event Message:"
echo "$event_msg"
echo ""
echo "=== Relay Responses ==="
(
# Send subscription
printf "%s\n" "$subscription"
# Wait for subscription to establish
sleep 1
# Send ephemeral event on same connection
printf "%s\n" "$event_msg"
# Wait for responses
sleep 2
) | timeout 5 websocat ws://127.0.0.1:8888
echo ""
echo "Test complete!"

63
tests/large_event_test.sh Executable file
View File

@@ -0,0 +1,63 @@
#!/bin/bash
# Test script for posting large events (>4KB) to test partial write handling
# Uses nak to properly sign events with large content
RELAY_URL="ws://localhost:8888"
# Check if nak is installed
if ! command -v nak &> /dev/null; then
echo "Error: nak is not installed. Install with: go install github.com/fiatjaf/nak@latest"
exit 1
fi
# Generate a test private key if not set
if [ -z "$NOSTR_PRIVATE_KEY" ]; then
echo "Generating temporary test key..."
export NOSTR_PRIVATE_KEY=$(nak key generate)
fi
echo "=== Large Event Test ==="
echo "Testing partial write handling with events >4KB"
echo "Relay: $RELAY_URL"
echo ""
# Test 1: 5KB event
echo "Test 1: Posting 5KB event..."
CONTENT_5KB=$(python3 -c "print('A' * 5000)")
echo "$CONTENT_5KB" | nak event -k 1 --content - $RELAY_URL
sleep 1
# Test 2: 10KB event
echo ""
echo "Test 2: Posting 10KB event..."
CONTENT_10KB=$(python3 -c "print('B' * 10000)")
echo "$CONTENT_10KB" | nak event -k 1 --content - $RELAY_URL
sleep 1
# Test 3: 20KB event
echo ""
echo "Test 3: Posting 20KB event..."
CONTENT_20KB=$(python3 -c "print('C' * 20000)")
echo "$CONTENT_20KB" | nak event -k 1 --content - $RELAY_URL
sleep 1
# Test 4: 50KB event (very large)
echo ""
echo "Test 4: Posting 50KB event..."
CONTENT_50KB=$(python3 -c "print('D' * 50000)")
echo "$CONTENT_50KB" | nak event -k 1 --content - $RELAY_URL
echo ""
echo "=== Test Complete ==="
echo ""
echo "Check relay.log for:"
echo " - 'Queued partial write' messages (indicates buffering is working)"
echo " - 'write completed' messages (indicates retry succeeded)"
echo " - No 'Invalid frame header' errors"
echo ""
echo "To view logs in real-time:"
echo " tail -f relay.log | grep -E '(partial|write completed|Invalid frame)'"
echo ""
echo "To check if events were stored:"
echo " sqlite3 build/*.db 'SELECT id, length(content) as content_size FROM events ORDER BY created_at DESC LIMIT 4;'"

View File

@@ -3,6 +3,19 @@
# Test script to post kind 1 events to the relay every second
# Cycles through three different secret keys
# Content includes current timestamp
#
# Usage: ./post_events.sh <relay_url>
# Example: ./post_events.sh ws://localhost:8888
# Example: ./post_events.sh wss://relay.laantungir.net
# Check if relay URL is provided
if [ -z "$1" ]; then
echo "Error: Relay URL is required"
echo "Usage: $0 <relay_url>"
echo "Example: $0 ws://localhost:8888"
echo "Example: $0 wss://relay.laantungir.net"
exit 1
fi
# Array of secret keys to cycle through
SECRET_KEYS=(
@@ -11,7 +24,7 @@ SECRET_KEYS=(
"1618aaa21f5bd45c5ffede0d9a60556db67d4a046900e5f66b0bae5c01c801fb"
)
RELAY_URL="ws://localhost:8888"
RELAY_URL="$1"
KEY_INDEX=0
echo "Starting event posting test to $RELAY_URL"
@@ -36,5 +49,5 @@ while true; do
KEY_INDEX=$(( (KEY_INDEX + 1) % ${#SECRET_KEYS[@]} ))
# Wait 1 second
sleep 1
sleep .2
done

View File

@@ -1,203 +0,0 @@
#!/bin/bash
# Rate Limiting Test Suite for C-Relay
# Tests rate limiting and abuse prevention mechanisms
set -e
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
TEST_TIMEOUT=15
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
# Function to test rate limiting
test_rate_limiting() {
local description="$1"
local message="$2"
local burst_count="${3:-10}"
local expected_limited="${4:-false}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
local rate_limited=false
local success_count=0
local error_count=0
# Send burst of messages
for i in $(seq 1 "$burst_count"); do
local response
response=$(echo "$message" | timeout 2 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
if [[ "$response" == *"rate limit"* ]] || [[ "$response" == *"too many"* ]] || [[ "$response" == *"TOO_MANY"* ]]; then
rate_limited=true
elif [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
((success_count++))
else
((error_count++))
fi
# Small delay between requests
sleep 0.05
done
if [[ "$expected_limited" == "true" ]]; then
if [[ "$rate_limited" == "true" ]]; then
echo -e "${GREEN}PASSED${NC} - Rate limiting triggered as expected"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${RED}FAILED${NC} - Rate limiting not triggered (expected)"
FAILED_TESTS=$((FAILED_TESTS + 1))
return 1
fi
else
if [[ "$rate_limited" == "false" ]]; then
echo -e "${GREEN}PASSED${NC} - No rate limiting for normal traffic"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${YELLOW}UNCERTAIN${NC} - Unexpected rate limiting"
PASSED_TESTS=$((PASSED_TESTS + 1)) # Count as passed since it's conservative
return 0
fi
fi
}
# Function to test sustained load
test_sustained_load() {
local description="$1"
local message="$2"
local duration="${3:-10}"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
echo -n "Testing $description... "
local start_time
start_time=$(date +%s)
local rate_limited=false
local total_requests=0
local successful_requests=0
while [[ $(($(date +%s) - start_time)) -lt duration ]]; do
((total_requests++))
local response
response=$(echo "$message" | timeout 1 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
if [[ "$response" == *"rate limit"* ]] || [[ "$response" == *"too many"* ]] || [[ "$response" == *"TOO_MANY"* ]]; then
rate_limited=true
elif [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
((successful_requests++))
fi
# Small delay to avoid overwhelming
sleep 0.1
done
local success_rate=0
if [[ $total_requests -gt 0 ]]; then
success_rate=$((successful_requests * 100 / total_requests))
fi
if [[ "$rate_limited" == "true" ]]; then
echo -e "${GREEN}PASSED${NC} - Rate limiting activated under sustained load (${success_rate}% success rate)"
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
else
echo -e "${YELLOW}UNCERTAIN${NC} - No rate limiting detected (${success_rate}% success rate)"
# This might be acceptable if rate limiting is very permissive
PASSED_TESTS=$((PASSED_TESTS + 1))
return 0
fi
}
echo "=========================================="
echo "C-Relay Rate Limiting Test Suite"
echo "=========================================="
echo "Testing rate limiting against relay at ws://$RELAY_HOST:$RELAY_PORT"
echo ""
# Test basic connectivity first
echo "=== Basic Connectivity Test ==="
test_rate_limiting "Basic connectivity" '["REQ","rate_test",{}]' 1 false
echo ""
echo "=== Burst Request Testing ==="
# Test rapid succession of requests
test_rate_limiting "Rapid REQ messages" '["REQ","burst_req_'$(date +%s%N)'",{}]' 20 true
test_rate_limiting "Rapid COUNT messages" '["COUNT","burst_count_'$(date +%s%N)'",{}]' 20 true
test_rate_limiting "Rapid CLOSE messages" '["CLOSE","burst_close"]' 20 true
echo ""
echo "=== Malformed Message Rate Limiting ==="
# Test if malformed messages trigger rate limiting faster
test_rate_limiting "Malformed JSON burst" '["REQ","malformed"' 15 true
test_rate_limiting "Invalid message type burst" '["INVALID","test",{}]' 15 true
test_rate_limiting "Empty message burst" '[]' 15 true
echo ""
echo "=== Sustained Load Testing ==="
# Test sustained moderate load
test_sustained_load "Sustained REQ load" '["REQ","sustained_'$(date +%s%N)'",{}]' 10
test_sustained_load "Sustained COUNT load" '["COUNT","sustained_count_'$(date +%s%N)'",{}]' 10
echo ""
echo "=== Filter Complexity Testing ==="
# Test if complex filters trigger rate limiting
test_rate_limiting "Complex filter burst" '["REQ","complex_'$(date +%s%N)'",{"authors":["a","b","c"],"kinds":[1,2,3],"#e":["x","y","z"],"#p":["m","n","o"],"since":1000000000,"until":2000000000,"limit":100}]' 10 true
echo ""
echo "=== Subscription Management Testing ==="
# Test subscription creation/deletion rate limiting
echo -n "Testing subscription churn... "
local churn_test_passed=true
for i in $(seq 1 25); do
# Create subscription
echo "[\"REQ\",\"churn_${i}_$(date +%s%N)\",{}]" | timeout 1 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 || true
# Close subscription
echo "[\"CLOSE\",\"churn_${i}_*\"]" | timeout 1 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 || true
sleep 0.05
done
# Check if relay is still responsive
if echo 'ping' | timeout 2 websocat -n1 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1; then
echo -e "${GREEN}PASSED${NC} - Subscription churn handled"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
PASSED_TESTS=$((PASSED_TESTS + 1))
else
echo -e "${RED}FAILED${NC} - Relay unresponsive after subscription churn"
TOTAL_TESTS=$((TOTAL_TESTS + 1))
FAILED_TESTS=$((FAILED_TESTS + 1))
fi
echo ""
echo "=== Test Results ==="
echo "Total tests: $TOTAL_TESTS"
echo -e "Passed: ${GREEN}$PASSED_TESTS${NC}"
echo -e "Failed: ${RED}$FAILED_TESTS${NC}"
if [[ $FAILED_TESTS -eq 0 ]]; then
echo -e "${GREEN}✓ All rate limiting tests passed!${NC}"
echo "Rate limiting appears to be working correctly."
exit 0
else
echo -e "${RED}✗ Some rate limiting tests failed!${NC}"
echo "Rate limiting may not be properly configured."
exit 1
fi

BIN
tests/sendDM Executable file

Binary file not shown.

296
tests/sendDM.c Normal file
View File

@@ -0,0 +1,296 @@
/*
* NIP-17 Private Direct Messages - Command Line Application
*
* This example demonstrates how to send NIP-17 private direct messages
* using the Nostr Core Library.
*
* Usage:
* ./send_nip17_dm -r <recipient> -s <sender> [-R <relay>]... <message>
*
* Options:
* -r <recipient>: The recipient's public key (npub or hex)
* -s <sender>: The sender's private key (nsec or hex)
* -R <relay>: Relay URL to send to (can be specified multiple times)
* <message>: The message to send (must be the last argument)
*
* If no relays are specified, uses default relay.
* If no sender key is provided, uses a default test key.
*
* Examples:
* ./send_nip17_dm -r npub1example... -s nsec1test... -R wss://relay1.com "Hello from NIP-17!"
* ./send_nip17_dm -r 4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa -s aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -R ws://localhost:8888 "config"
*/
#define _GNU_SOURCE
#define _POSIX_C_SOURCE 200809L
#include "../nostr_core_lib/nostr_core/nostr_core.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <getopt.h>
// Default test private key (for demonstration - DO NOT USE IN PRODUCTION)
#define DEFAULT_SENDER_NSEC "nsec12kgt0dv2k2safv6s32w8f89z9uw27e68hjaa0d66c5xvk70ezpwqncd045"
// Default relay for sending DMs
#define DEFAULT_RELAY "wss://relay.laantungir.net"
// Progress callback for publishing
void publish_progress_callback(const char* relay_url, const char* status,
const char* message, int success_count,
int total_relays, int completed_relays, void* user_data) {
(void)user_data;
if (relay_url) {
printf("📡 [%s]: %s", relay_url, status);
if (message) {
printf(" - %s", message);
}
printf(" (%d/%d completed, %d successful)\n", completed_relays, total_relays, success_count);
} else {
printf("📡 PUBLISH COMPLETE: %d/%d successful\n", success_count, total_relays);
}
}
/**
* Convert npub or hex pubkey to hex format
*/
int convert_pubkey_to_hex(const char* input_pubkey, char* output_hex) {
// Check if it's already hex (64 characters)
if (strlen(input_pubkey) == 64) {
// Assume it's already hex
strcpy(output_hex, input_pubkey);
return 0;
}
// Check if it's an npub (starts with "npub1")
if (strncmp(input_pubkey, "npub1", 5) == 0) {
// Convert npub to hex
unsigned char pubkey_bytes[32];
if (nostr_decode_npub(input_pubkey, pubkey_bytes) != 0) {
fprintf(stderr, "Error: Invalid npub format\n");
return -1;
}
nostr_bytes_to_hex(pubkey_bytes, 32, output_hex);
return 0;
}
fprintf(stderr, "Error: Public key must be 64-character hex or valid npub\n");
return -1;
}
/**
* Convert nsec to private key bytes if needed
*/
int convert_nsec_to_private_key(const char* input_nsec, unsigned char* private_key) {
// Check if it's already hex (64 characters)
if (strlen(input_nsec) == 64) {
// Convert hex to bytes
if (nostr_hex_to_bytes(input_nsec, private_key, 32) != 0) {
fprintf(stderr, "Error: Invalid hex private key\n");
return -1;
}
return 0;
}
// Check if it's an nsec (starts with "nsec1")
if (strncmp(input_nsec, "nsec1", 5) == 0) {
// Convert nsec directly to private key bytes
if (nostr_decode_nsec(input_nsec, private_key) != 0) {
fprintf(stderr, "Error: Invalid nsec format\n");
return -1;
}
return 0;
}
fprintf(stderr, "Error: Private key must be 64-character hex or valid nsec\n");
return -1;
}
/**
* Main function
*/
int main(int argc, char* argv[]) {
char* recipient_key = NULL;
char* sender_key = NULL;
char** relays = NULL;
int relay_count = 0;
char* message = NULL;
// Parse command line options
int opt;
while ((opt = getopt(argc, argv, "r:s:R:")) != -1) {
switch (opt) {
case 'r':
recipient_key = optarg;
break;
case 's':
sender_key = optarg;
break;
case 'R':
relays = realloc(relays, (relay_count + 1) * sizeof(char*));
relays[relay_count] = optarg;
relay_count++;
break;
default:
fprintf(stderr, "Usage: %s -r <recipient> -s <sender> [-R <relay>]... <message>\n", argv[0]);
fprintf(stderr, "Options:\n");
fprintf(stderr, " -r <recipient>: The recipient's public key (npub or hex)\n");
fprintf(stderr, " -s <sender>: The sender's private key (nsec or hex)\n");
fprintf(stderr, " -R <relay>: Relay URL to send to (can be specified multiple times)\n");
fprintf(stderr, " <message>: The message to send (must be the last argument)\n");
return 1;
}
}
// Check for required arguments
if (!recipient_key) {
fprintf(stderr, "Error: Recipient key (-r) is required\n");
return 1;
}
// Get message from remaining arguments
if (optind >= argc) {
fprintf(stderr, "Error: Message is required\n");
return 1;
}
message = argv[optind];
// Use default values if not provided
if (!sender_key) {
sender_key = DEFAULT_SENDER_NSEC;
}
if (relay_count == 0) {
relays = malloc(sizeof(char*));
relays[0] = DEFAULT_RELAY;
relay_count = 1;
}
printf("🧪 NIP-17 Private Direct Message Sender\n");
printf("======================================\n\n");
// Initialize crypto
if (nostr_init() != NOSTR_SUCCESS) {
fprintf(stderr, "Failed to initialize crypto\n");
free(relays);
return 1;
}
// Convert recipient pubkey
char recipient_pubkey_hex[65];
if (convert_pubkey_to_hex(recipient_key, recipient_pubkey_hex) != 0) {
free(relays);
return 1;
}
// Convert sender private key
unsigned char sender_privkey[32];
if (convert_nsec_to_private_key(sender_key, sender_privkey) != 0) {
free(relays);
return 1;
}
// Derive sender public key for display
unsigned char sender_pubkey_bytes[32];
char sender_pubkey_hex[65];
if (nostr_ec_public_key_from_private_key(sender_privkey, sender_pubkey_bytes) != 0) {
fprintf(stderr, "Failed to derive sender public key\n");
return 1;
}
nostr_bytes_to_hex(sender_pubkey_bytes, 32, sender_pubkey_hex);
printf("📤 Sender: %s\n", sender_pubkey_hex);
printf("📥 Recipient: %s\n", recipient_pubkey_hex);
printf("💬 Message: %s\n", message);
printf("🌐 Relays: ");
for (int i = 0; i < relay_count; i++) {
printf("%s", relays[i]);
if (i < relay_count - 1) printf(", ");
}
printf("\n\n");
// Create DM event
printf("💬 Creating DM event...\n");
const char* recipient_pubkeys[] = {recipient_pubkey_hex};
cJSON* dm_event = nostr_nip17_create_chat_event(
message,
recipient_pubkeys,
1,
"NIP-17 CLI", // subject
NULL, // no reply
relays[0], // relay hint (use first relay)
sender_pubkey_hex
);
if (!dm_event) {
fprintf(stderr, "Failed to create DM event\n");
return 1;
}
printf("✅ Created DM event (kind 14)\n");
// Send DM (create gift wraps)
printf("🎁 Creating gift wraps...\n");
cJSON* gift_wraps[10]; // Max 10 gift wraps
int gift_wrap_count = nostr_nip17_send_dm(
dm_event,
recipient_pubkeys,
1,
sender_privkey,
gift_wraps,
10
);
cJSON_Delete(dm_event); // Original DM event no longer needed
if (gift_wrap_count <= 0) {
fprintf(stderr, "Failed to create gift wraps\n");
return 1;
}
printf("✅ Created %d gift wrap(s)\n", gift_wrap_count);
// Publish the gift wrap to relays
printf("\n📤 Publishing gift wrap to %d relay(s)...\n", relay_count);
int success_count = 0;
publish_result_t* publish_results = synchronous_publish_event_with_progress(
(const char**)relays,
relay_count,
gift_wraps[0], // Send the first gift wrap
&success_count,
10, // 10 second timeout
publish_progress_callback,
NULL, // no user data
0, // NIP-42 disabled
NULL // no private key for auth
);
if (!publish_results || success_count == 0) {
fprintf(stderr, "\n❌ Failed to publish gift wrap to any relay (success_count: %d/%d)\n", success_count, relay_count);
// Clean up gift wraps
for (int i = 0; i < gift_wrap_count; i++) {
cJSON_Delete(gift_wraps[i]);
}
if (publish_results) free(publish_results);
free(relays);
return 1;
}
printf("\n✅ Successfully published NIP-17 DM to %d/%d relay(s)!\n", success_count, relay_count);
// Clean up
free(publish_results);
for (int i = 0; i < gift_wrap_count; i++) {
cJSON_Delete(gift_wraps[i]);
}
free(relays);
nostr_cleanup();
printf("\n🎉 DM sent successfully! The recipient can now decrypt it using their private key.\n");
return 0;
}

View File

@@ -0,0 +1,295 @@
#!/bin/bash
# Subscription Cleanup Testing Suite for C-Relay
# Tests startup cleanup and connection age limit features
set -e
# Load test keys
source "$(dirname "$0")/.test_keys.txt"
# Configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
RELAY_URL="ws://${RELAY_HOST}:${RELAY_PORT}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test counters
TOTAL_TESTS=0
PASSED_TESTS=0
FAILED_TESTS=0
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}Subscription Cleanup Test Suite${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""
# Function to print test header
print_test_header() {
echo -e "${BLUE}=== Test $1: $2 ===${NC}"
}
# Function to print test result
print_result() {
local status=$1
local message=$2
TOTAL_TESTS=$((TOTAL_TESTS + 1))
if [ "$status" = "PASS" ]; then
echo -e "${GREEN}[PASS]${NC} $message"
PASSED_TESTS=$((PASSED_TESTS + 1))
elif [ "$status" = "FAIL" ]; then
echo -e "${RED}[FAIL]${NC} $message"
FAILED_TESTS=$((FAILED_TESTS + 1))
else
echo -e "${YELLOW}[WARN]${NC} $message"
PASSED_TESTS=$((PASSED_TESTS + 1))
fi
}
# Function to check if relay is running
check_relay_running() {
# Send a simple REQ and check for EOSE response
local response=$(echo '["REQ","ping",{}]' | timeout 2 websocat -n1 "$RELAY_URL" 2>/dev/null)
if echo "$response" | grep -q "EOSE\|EVENT"; then
return 0
else
return 1
fi
}
# Function to create a subscription
create_subscription() {
local sub_id=$1
local filter=${2:-"{}"}
echo "[\"REQ\",\"$sub_id\",$filter]" | timeout 5 websocat -n1 "$RELAY_URL" 2>/dev/null || echo "TIMEOUT"
}
# Function to close a subscription
close_subscription() {
local sub_id=$1
echo "[\"CLOSE\",\"$sub_id\"]" | timeout 5 websocat -n1 "$RELAY_URL" 2>/dev/null || echo "TIMEOUT"
}
# Function to query subscription count from database
get_subscription_count() {
local db_file=$(find . -name "*.db" -type f 2>/dev/null | head -1)
if [ -z "$db_file" ]; then
echo "0"
return
fi
sqlite3 "$db_file" "SELECT COUNT(*) FROM subscriptions WHERE event_type='created' AND ended_at IS NULL;" 2>/dev/null || echo "0"
}
# Test 1: Basic Connectivity
print_test_header "1" "Basic Connectivity"
if check_relay_running; then
print_result "PASS" "Relay is running and accepting connections"
else
print_result "FAIL" "Cannot connect to relay at $RELAY_URL"
echo ""
echo -e "${RED}ERROR: Relay must be running for tests to proceed${NC}"
exit 1
fi
echo ""
# Test 2: Create Multiple Subscriptions
print_test_header "2" "Create Multiple Subscriptions"
echo "[INFO] Creating 5 test subscriptions..."
for i in {1..5}; do
response=$(create_subscription "cleanup_test_$i")
if echo "$response" | grep -q "EOSE"; then
echo "[INFO] Subscription cleanup_test_$i created successfully"
else
print_result "WARN" "Subscription cleanup_test_$i may not have been created: $response"
fi
done
# Give subscriptions time to be logged
sleep 2
# Check subscription count in database
active_subs=$(get_subscription_count)
echo "[INFO] Active subscriptions in database: $active_subs"
if [ "$active_subs" -ge 5 ]; then
print_result "PASS" "Multiple subscriptions created and logged ($active_subs active)"
else
print_result "WARN" "Expected at least 5 subscriptions, found $active_subs"
fi
echo ""
# Test 3: Simulate Orphaned Subscriptions (disconnect without CLOSE)
print_test_header "3" "Simulate Orphaned Subscriptions"
echo "[INFO] Creating subscriptions and disconnecting abruptly..."
# Create subscriptions in background and kill the connection
for i in {6..10}; do
(echo "[\"REQ\",\"orphan_test_$i\",{}]" | timeout 2 websocat "$RELAY_URL" &>/dev/null) &
pid=$!
sleep 0.5
kill -9 $pid 2>/dev/null || true
done
sleep 2
orphaned_subs=$(get_subscription_count)
echo "[INFO] Subscriptions after abrupt disconnects: $orphaned_subs"
if [ "$orphaned_subs" -gt "$active_subs" ]; then
print_result "PASS" "Orphaned subscriptions detected ($orphaned_subs total, was $active_subs)"
else
print_result "WARN" "No increase in orphaned subscriptions detected"
fi
echo ""
# Test 4: Startup Cleanup (requires relay restart)
print_test_header "4" "Startup Cleanup Feature"
echo "[INFO] This test requires relay restart to verify startup cleanup"
echo "[INFO] Current orphaned subscriptions: $orphaned_subs"
echo ""
echo -e "${YELLOW}[ACTION REQUIRED]${NC} Please restart the relay now with:"
echo " ./make_and_restart_relay.sh"
echo ""
echo -n "Press Enter after relay has restarted to continue..."
read
# Wait for relay to be ready
echo "[INFO] Waiting for relay to be ready..."
sleep 3
if ! check_relay_running; then
print_result "FAIL" "Relay not responding after restart"
exit 1
fi
# Check if orphaned subscriptions were cleaned up
cleaned_subs=$(get_subscription_count)
echo "[INFO] Active subscriptions after restart: $cleaned_subs"
if [ "$cleaned_subs" -eq 0 ]; then
print_result "PASS" "Startup cleanup removed all orphaned subscriptions"
elif [ "$cleaned_subs" -lt "$orphaned_subs" ]; then
print_result "PASS" "Startup cleanup reduced orphaned subscriptions (from $orphaned_subs to $cleaned_subs)"
else
print_result "FAIL" "Startup cleanup did not reduce orphaned subscriptions"
fi
echo ""
# Test 5: Connection Age Limit (requires configuration)
print_test_header "5" "Connection Age Limit Feature"
echo "[INFO] Testing connection age limit feature..."
echo "[INFO] Default max_connection_seconds is 86400 (24 hours)"
echo ""
echo -e "${YELLOW}[INFO]${NC} To test connection age limit with shorter timeout:"
echo " 1. Set max_connection_seconds to 60 (1 minute) via admin event"
echo " 2. Create a subscription and wait 61 seconds"
echo " 3. Connection should be automatically closed"
echo ""
echo "[INFO] For this test, we'll verify the feature is enabled in config"
# Create a test subscription to verify connection works
response=$(create_subscription "age_test_1")
if echo "$response" | grep -q "EOSE"; then
print_result "PASS" "Connection age limit feature is operational (subscription created)"
else
print_result "WARN" "Could not verify connection age limit feature"
fi
echo ""
# Test 6: Verify Client Reconnection
print_test_header "6" "Client Reconnection After Cleanup"
echo "[INFO] Testing that clients can reconnect after cleanup..."
# Create a subscription
response=$(create_subscription "reconnect_test_1")
if echo "$response" | grep -q "EOSE"; then
echo "[INFO] First connection successful"
# Close and reconnect
sleep 1
response=$(create_subscription "reconnect_test_2")
if echo "$response" | grep -q "EOSE"; then
print_result "PASS" "Client can reconnect and create new subscriptions"
else
print_result "FAIL" "Client reconnection failed"
fi
else
print_result "FAIL" "Initial connection failed"
fi
echo ""
# Test 7: Verify Disabled State (max_connection_seconds = 0)
print_test_header "7" "Verify Feature Can Be Disabled"
echo "[INFO] Connection age limit can be disabled by setting max_connection_seconds=0"
echo "[INFO] When disabled, connections remain open indefinitely"
echo "[INFO] This is the recommended setting for most relays"
# Create a long-lived subscription
response=$(create_subscription "disabled_test_1")
if echo "$response" | grep -q "EOSE"; then
print_result "PASS" "Subscriptions work normally when feature is disabled/default"
else
print_result "WARN" "Could not verify disabled state"
fi
echo ""
# Test 8: Database Integrity Check
print_test_header "8" "Database Integrity After Cleanup"
echo "[INFO] Checking database integrity..."
db_file=$(find . -name "*.db" -type f 2>/dev/null | head -1)
if [ -n "$db_file" ]; then
# Check if database is accessible
if sqlite3 "$db_file" "PRAGMA integrity_check;" 2>/dev/null | grep -q "ok"; then
print_result "PASS" "Database integrity check passed"
else
print_result "FAIL" "Database integrity check failed"
fi
# Check subscription table structure
if sqlite3 "$db_file" "SELECT COUNT(*) FROM subscriptions;" &>/dev/null; then
print_result "PASS" "Subscription table is accessible"
else
print_result "FAIL" "Subscription table is not accessible"
fi
else
print_result "WARN" "No database file found"
fi
echo ""
# Final Summary
echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}Test Summary${NC}"
echo -e "${BLUE}========================================${NC}"
echo "Total Tests: $TOTAL_TESTS"
echo -e "${GREEN}Passed: $PASSED_TESTS${NC}"
echo -e "${RED}Failed: $FAILED_TESTS${NC}"
echo ""
if [ $FAILED_TESTS -eq 0 ]; then
echo -e "${GREEN}All tests passed!${NC}"
exit 0
else
echo -e "${RED}Some tests failed. Please review the output above.${NC}"
exit 1
fi

747
tests/test_requests.mjs Normal file
View File

@@ -0,0 +1,747 @@
/**
* Nostr Relay Pubkey Filter Test
* Tests how many pubkeys different relays can handle in a single filter request
*/
import { WebSocket } from 'ws';
// Configuration
const RELAYS = [
// "wss://relay.laantungir.net"
"ws://127.0.0.1:8888"
];
// Test parameters
const STEP_SIZE = 25; // Increment pubkey count by 25 each test
const MAX_PUBKEYS = 500; // Maximum pubkeys to test
const EVENT_KIND = 1; // Kind 1 = text notes
const EVENT_LIMIT = 2; // Only request 2 events per test
// Generate test pubkey arrays of increasing sizes
function generateTestPubkeyArrays() {
const testArrays = [];
for (let count = STEP_SIZE; count <= MAX_PUBKEYS; count += STEP_SIZE) {
testArrays.push(PUBKEYS.slice(0, count));
}
return testArrays;
}
const PUBKEYS = [
"85080d3bad70ccdcd7f74c29a44f55bb85cbcd3dd0cbb957da1d215bdb931204",
"82341f882b6eabcd2ba7f1ef90aad961cf074af15b9ef44a09f9d2a8fbfbe6a2",
"916b7aca250f43b9f842faccc831db4d155088632a8c27c0d140f2043331ba57",
"2645caf5706a31767c921532975a079f85950e1006bd5065f5dd0213e6848a96",
"8fe3f243e91121818107875d51bca4f3fcf543437aa9715150ec8036358939c5",
"83e818dfbeccea56b0f551576b3fd39a7a50e1d8159343500368fa085ccd964b",
"a341f45ff9758f570a21b000c17d4e53a3a497c8397f26c0e6d61e5acffc7a98",
"e88a691e98d9987c964521dff60025f60700378a4879180dcbbb4a5027850411",
"2cde0e02bda47eaeeed65e341619cc5f2afce990164669da4e1e5989180a96b9",
"edcd20558f17d99327d841e4582f9b006331ac4010806efa020ef0d40078e6da",
"34d2f5274f1958fcd2cb2463dabeaddf8a21f84ace4241da888023bf05cc8095",
"c48b5cced5ada74db078df6b00fa53fc1139d73bf0ed16de325d52220211dbd5",
"04c915daefee38317fa734444acee390a8269fe5810b2241e5e6dd343dfbecc9",
"e33fe65f1fde44c6dc17eeb38fdad0fceaf1cae8722084332ed1e32496291d42",
"1306edd66f1da374adc417cf884bbcff57c6399656236c1f872ee10403c01b2d",
"eaf27aa104833bcd16f671488b01d65f6da30163b5848aea99677cc947dd00aa",
"472f440f29ef996e92a186b8d320ff180c855903882e59d50de1b8bd5669301e",
"be1d89794bf92de5dd64c1e60f6a2c70c140abac9932418fee30c5c637fe9479",
"c49d52a573366792b9a6e4851587c28042fb24fa5625c6d67b8c95c8751aca15",
"c037a6897df86bfd4df5496ca7e2318992b4766897fb18fbd1d347a4f4459f5e",
"e41e883f1ef62485a074c1a1fa1d0a092a5d678ad49bedc2f955ab5e305ba94e",
"020f2d21ae09bf35fcdfb65decf1478b846f5f728ab30c5eaabcd6d081a81c3e",
"e2f28c1ac6dff5a7b755635af4c8436d2fec89b888a9d9548a51b2c63f779555",
"29fbc05acee671fb579182ca33b0e41b455bb1f9564b90a3d8f2f39dee3f2779",
"090254801a7e8e5085b02e711622f0dfa1a85503493af246aa42af08f5e4d2df",
"3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d",
"6b0d4c8d9dc59e110d380b0429a02891f1341a0fa2ba1b1cf83a3db4d47e3964",
"b0b8fbd9578ac23e782d97a32b7b3a72cda0760761359bd65661d42752b4090a",
"b7996c183e036df27802945b80bbdc8b0bf5971b6621a86bf3569c332117f07d",
"1833ee04459feb2ca4ae690d5f31269ad488c69e5fe903a42b532c677c4a8170",
"4adb4ff2dc72bbf1f6da19fc109008a25013c837cf712016972fad015b19513f",
"c4eabae1be3cf657bc1855ee05e69de9f059cb7a059227168b80b89761cbc4e0",
"368f4e0027fd223fdb69b6ec6e1c06d1f027a611b1ed38eeb32493eb2878bb35",
"703e26b4f8bc0fa57f99d815dbb75b086012acc24fc557befa310f5aa08d1898",
"50d94fc2d8580c682b071a542f8b1e31a200b0508bab95a33bef0855df281d63",
"6e1534f56fc9e937e06237c8ba4b5662bcacc4e1a3cfab9c16d89390bec4fca3",
"a5e93aef8e820cbc7ab7b6205f854b87aed4b48c5f6b30fbbeba5c99e40dcf3f",
"1989034e56b8f606c724f45a12ce84a11841621aaf7182a1f6564380b9c4276b",
"19fefd7f39c96d2ff76f87f7627ae79145bc971d8ab23205005939a5a913bc2f",
"6e468422dfb74a5738702a8823b9b28168abab8655faacb6853cd0ee15deee93",
"a3b0ce5d70d0db22885706b2b1f144c6864a7e4828acff3f8f01ca6b3f54ad15",
"aef0d6b212827f3ba1de6189613e6d4824f181f567b1205273c16895fdaf0b23",
"826e9f895b81ab41a4522268b249e68d02ca81608def562a493cee35ffc5c759",
"460c25e682fda7832b52d1f22d3d22b3176d972f60dcdc3212ed8c92ef85065c",
"e1055729d51e037b3c14e8c56e2c79c22183385d94aadb32e5dc88092cd0fef4",
"27f211f4542fd89d673cfad15b6d838cc5d525615aae8695ed1dcebc39b2dadb",
"eab0e756d32b80bcd464f3d844b8040303075a13eabc3599a762c9ac7ab91f4f",
"32e1827635450ebb3c5a7d12c1f8e7b2b514439ac10a67eef3d9fd9c5c68e245",
"63fe6318dc58583cfe16810f86dd09e18bfd76aabc24a0081ce2856f330504ed",
"00000000827ffaa94bfea288c3dfce4422c794fbb96625b6b31e9049f729d700",
"7fa56f5d6962ab1e3cd424e758c3002b8665f7b0d8dcee9fe9e288d7751ac194",
"22aa81510ee63fe2b16cae16e0921f78e9ba9882e2868e7e63ad6d08ae9b5954",
"175f568d77fb0cb7400f0ddd8aed1738cd797532b314ef053a1669d4dba7433a",
"6c535d95a8659b234d5a0805034f5f0a67e3c0ceffcc459f61f680fe944424bf",
"9579444852221038dcba34512257b66a1c6e5bdb4339b6794826d4024b3e4ce9",
"58c741aa630c2da35a56a77c1d05381908bd10504fdd2d8b43f725efa6d23196",
"b8e6bf46e109314616fe24e6c7e265791a5f2f4ec95ae8aa15d7107ad250dc63",
"84dee6e676e5bb67b4ad4e042cf70cbd8681155db535942fcc6a0533858a7240",
"387519cafd325668ecffe59577f37238638da4cf2d985b82f932fc81d33da1e8",
"b9e76546ba06456ed301d9e52bc49fa48e70a6bf2282be7a1ae72947612023dc",
"4d62dd5e6ac55ae2405940f59f6f030a994ec2b3ecc5556c8dc542cce20e46dd",
"9c612f8b770f0e3fd35cdac2bc57fcee8561e560504ea25c8b9eff8e03512b3e",
"3eeb3de14ec5c48c6c4c9ff80908c4186170eabb74b2a6705a7db9f9922cd61e",
"51d7f1b736d1958fa56f113e82a27987a3aca4f0e6d237fa8fc369cc1608c5c0",
"c2622c916d9b90e10a81b2ba67b19bdfc5d6be26c25756d1f990d3785ce1361b",
"b111d517452f9ef015e16d60ae623a6b66af63024eec941b0653bfee0dd667d4",
"d897efcd971f8e5eae08c86b7da66f89b30e761a4a86ac41d907425d15b630fe",
"9dea27855974a08fceb48c40fab8432c1a8e3a53a1da22a1ad568595d6010649",
"47b630bbcdfa88b1c85f84aa3b68fe6c0102b651ba5d9a23cbd2d07b4f6eecc1",
"eb0dc09a61fdfc0df5db1f20c7fc7d83f00c690580fea2e5bac8f99c13f65065",
"5c0775b1ae0a5140da9599aa9cd1c5beea55c2d55a5d681808525eb5fce37b32",
"b474e6999980aa9e8c9dd6e3720fb03136bfa05aba5fab1634dc0bd8767d412f",
"759f7abf05ca710bf2c8da7ad7a9a7df6d0c85db7b2217da524e94e3627b2fbd",
"060e7c6ed0dbeb9c8cdc61445ee38b9b08d899d6b617e28064b0916e243ddddb",
"f728d9e6e7048358e70930f5ca64b097770d989ccd86854fe618eda9c8a38106",
"bf2376e17ba4ec269d10fcc996a4746b451152be9031fa48e74553dde5526bce",
"b99dbca0184a32ce55904cb267b22e434823c97f418f36daf5d2dff0dd7b5c27",
"c7dccba4fe4426a7b1ea239a5637ba40fab9862c8c86b3330fe65e9f667435f6",
"ad46db12ee250a108756ab4f0f3007b04d7e699f45eac3ab696077296219d207",
"59fbee7369df7713dbbfa9bbdb0892c62eba929232615c6ff2787da384cb770f",
"d7f0e3917c466f1e2233e9624fbd6d4bd1392dbcfcaf3574f457569d496cb731",
"e9e4276490374a0daf7759fd5f475deff6ffb9b0fc5fa98c902b5f4b2fe3bba2",
"6f35047caf7432fc0ab54a28fed6c82e7b58230bf98302bf18350ff71e10430a",
"fdd5e8f6ae0db817be0b71da20498c1806968d8a6459559c249f322fa73464a7",
"883fea4c071fda4406d2b66be21cb1edaf45a3e058050d6201ecf1d3596bbc39",
"a1808558470389142e297d4729e081ab8bdff1ab50d0ebe22ffa78958f7a6ab7",
"330fb1431ff9d8c250706bbcdc016d5495a3f744e047a408173e92ae7ee42dac",
"a4cb51f4618cfcd16b2d3171c466179bed8e197c43b8598823b04de266cef110",
"9c163c7351f8832b08b56cbb2e095960d1c5060dd6b0e461e813f0f07459119e",
"0a722ca20e1ccff0adfdc8c2abb097957f0e0bf32db18c4281f031756d50eb8d",
"5cad82c898ee66013711945d687f7d9549f645a0118467dae2f5e274c598d6ff",
"03b593ef3d95102b54bdff77728131a7c3bdfe9007b0b92cd7c4ad4a0021de25",
"d0debf9fb12def81f43d7c69429bb784812ac1e4d2d53a202db6aac7ea4b466c",
"60d53675f07dee9e7d77910efa44682d87cb532313ba66b8f4449d649172296b",
"d3ab33199eb48c6f785072b4a66a8e57814e35d31375cca8c3ceeecc171f30ba",
"772bd267dffbff318d1a89f257c3371410111a8b89571dbbefa77af6bfa179f3",
"11b9a89404dbf3034e7e1886ba9dc4c6d376f239a118271bd2ec567a889850ce",
"0497384b57b43c107a778870462901bf68e0e8583b32e2816563543c059784a4",
"5d9ba2c5ee0e86e2c4477b145eb301f2df06063a19f4c4ab9042bd347188ec8e",
"5683ffc7ff8a732565135aad56cdff94ebacd9a616d1313aea8ad48a446bfe99",
"3004d45a0ab6352c61a62586a57c50f11591416c29db1143367a4f0623b491ca",
"b24e32ee9a1c18f2771b53345ed8dbc55b59cbe958e5a165dc01704c3aaa6196",
"0a2df905acd5b5be3214a84cb2d4f61b0efb4d9bf05739d51112252504959688",
"95361a2b42a26c22bac3b6b6ba4c5cac4d36906eb0cfb98268681c45a301c518",
"b07d216f2f0422ec0252dd81a6513b8d0b0c7ef85291fbf5a85ef23f8df78fa7",
"064de2497ce621aee2a5b4b926a08b1ca01bce9da85b0c714e883e119375140c",
"5a8e581f16a012e24d2a640152ad562058cb065e1df28e907c1bfa82c150c8ba",
"a36bdc7952e973b31cb32d4ce3ce21447db66c3149c1b7a3d2450f77f9c7e8f9",
"e03cfe011d81424bb60a12e9eb0cb0c9c688c34712c3794c0752e0718b369ef2",
"2edbcea694d164629854a52583458fd6d965b161e3c48b57d3aff01940558884",
"b9003833fabff271d0782e030be61b7ec38ce7d45a1b9a869fbdb34b9e2d2000",
"4379e76bfa76a80b8db9ea759211d90bb3e67b2202f8880cc4f5ffe2065061ad",
"76c71aae3a491f1d9eec47cba17e229cda4113a0bbb6e6ae1776d7643e29cafa",
"d307643547703537dfdef811c3dea96f1f9e84c8249e200353425924a9908cf8",
"da0cc82154bdf4ce8bf417eaa2d2fa99aa65c96c77867d6656fccdbf8e781b18",
"3511ad63cd9ad760780044b7c815ee55e8e00722b5de271c47ff29367653456c",
"f9acb0b034c4c1177e985f14639f317ef0fedee7657c060b146ee790024317ec",
"0c371f5ed95076613443e8331c4b60828ed67bcdefaa1698fb5ce9d7b3285ffb",
"ee11a5dff40c19a555f41fe42b48f00e618c91225622ae37b6c2bb67b76c4e49",
"053935081a69624466034446eda3374d905652ddbf8217c88708182687a33066",
"a305cc8926861bdde5c71bbb6fd394bb4cea6ef5f5f86402b249fc5ceb0ce220",
"03a6e50be223dbb49e282764388f6f2ca8826eae8c5a427aa82bb1b61e51d5e6",
"a197639863cf175adc96348382a73b4a4a361c6b2e6fc1de61a14244a2f926a1",
"3ca7ca157b5975ace02225caf99fdce43f11207c072cb4899c80a414a9c7539d",
"02d9f5676fffc339ffe94dfab38bebe21ce117c6f1509d9922a82d454f420da2",
"08634a74c9d14479b462389b307695815f9a189e8fb6e058b92e18bd3f537405",
"ec7de4aa8758ba9e09a8c89d2757a1fa0e2cc61c20b757af52ae058931c1a33f",
"2250f69694c2a43929e77e5de0f6a61ae5e37a1ee6d6a3baef1706ed9901248b",
"a9434ee165ed01b286becfc2771ef1705d3537d051b387288898cc00d5c885be",
"bd625f1b8c49a79f075f3ebd2d111ff625504cf2ad12442fd70d191dd2f4a562",
"25e5c82273a271cb1a840d0060391a0bf4965cafeb029d5ab55350b418953fbb",
"42224859763652914db53052103f0b744df79dfc4efef7e950fc0802fc3df3c5",
"11d0b66747887ba9a6d34b23eb31287374b45b1a1b161eac54cb183c53e00ef7",
"2544cfcb89d7c2f8d3a31ea2ed386ac5189a18f484672436580eec215f9b039c",
"d4338b7c3306491cfdf54914d1a52b80a965685f7361311eae5f3eaff1d23a5b",
"c43e382ee4835010b9fad18e0a6f50f1ae143b98e089b8bb974232fce4d1f295",
"92de68b21302fa2137b1cbba7259b8ba967b535a05c6d2b0847d9f35ff3cf56a",
"55f04590674f3648f4cdc9dc8ce32da2a282074cd0b020596ee033d12d385185",
"2af01e0d6bd1b9fbb9e3d43157d64590fb27dcfbcabe28784a5832e17befb87b",
"35b23cd02d2d75e55cee38fdee26bc82f1d15d3c9580800b04b0da2edb7517ea",
"7e0c255fd3d0f9b48789a944baf19bf42c205a9c55199805eb13573b32137488",
"ee0e01eb17fc6cb4cd2d9300d2f8945b51056514f156c6bc6d491b74496d161a",
"cbc5ef6b01cbd1ffa2cb95a954f04c385a936c1a86e1bb9ccdf2cf0f4ebeaccb",
"ec6e36d5c9eb874f1db4253ef02377f7cc70697cda40fbfb24ded6b5d14cce4c",
"2779f3d9f42c7dee17f0e6bcdcf89a8f9d592d19e3b1bbd27ef1cffd1a7f98d1",
"976713246c36db1a4364f917b98633cbe0805d46af880f6b50a505d4eb32ed47",
"8766a54ef9a170b3860bc66fd655abb24b5fda75d7d7ff362f44442fbdeb47b9",
"ea2e3c814d08a378f8a5b8faecb2884d05855975c5ca4b5c25e2d6f936286f14",
"e2ccf7cf20403f3f2a4a55b328f0de3be38558a7d5f33632fdaaefc726c1c8eb",
"07eced8b63b883cedbd8520bdb3303bf9c2b37c2c7921ca5c59f64e0f79ad2a6",
"532d830dffe09c13e75e8b145c825718fc12b0003f61d61e9077721c7fff93cb",
"1afe0c74e3d7784eba93a5e3fa554a6eeb01928d12739ae8ba4832786808e36d",
"c708943ea349519dcf56b2a5c138fd9ed064ad65ddecae6394eabd87a62f1770",
"ccaa58e37c99c85bc5e754028a718bd46485e5d3cb3345691ecab83c755d48cc",
"5b0e8da6fdfba663038690b37d216d8345a623cc33e111afd0f738ed7792bc54",
"f2c96c97f6419a538f84cf3fa72e2194605e1848096e6e5170cce5b76799d400",
"aa55a479ad6934d0fd78f3dbd88515cd1ca0d7a110812e711380d59df7598935",
"bd9eb657c25b4f6cda68871ce26259d1f9bc62420487e3224905b674a710a45a",
"69a80567e79b6b9bc7282ad595512df0b804784616bedb623c122fad420a2635",
"fa984bd7dbb282f07e16e7ae87b26a2a7b9b90b7246a44771f0cf5ae58018f52",
"df173277182f3155d37b330211ba1de4a81500c02d195e964f91be774ec96708",
"675b84fe75e216ab947c7438ee519ca7775376ddf05dadfba6278bd012e1d728",
"91c9a5e1a9744114c6fe2d61ae4de82629eaaa0fb52f48288093c7e7e036f832",
"24ebb6b58d0b984a965b76f82ce9eff8795cc95085a4d09dedc56949ed596ada",
"bb1cf5250435ff475cd8b32acb23e3ee7bbe8fc38f6951704b4798513947672c",
"922945779f93fd0b3759f1157e3d9fa20f3fd24c4b8f2bcf520cacf649af776d",
"3d842afecd5e293f28b6627933704a3fb8ce153aa91d790ab11f6a752d44a42d",
"e8d67c435a4a59304e1414280e952efe17be4254fca27916bf63f9f73e54aba4",
"c1fc7771f5fa418fd3ac49221a18f19b42ccb7a663da8f04cbbf6c08c80d20b1",
"8eee8f5a002e533e9f9ffef14c713da449c23f56f4415e7995552075a02d1d37",
"c998a5739f04f7fff202c54962aa5782b34ecb10d6f915bdfdd7582963bf9171",
"a536ab1f7f3c0133baadbdf472b1ac7ad4b774ed432c1989284193572788bca0",
"c9b19ffcd43e6a5f23b3d27106ce19e4ad2df89ba1031dd4617f1b591e108965",
"e6ee5b449c220defea6373b8a7e147cabd67c2bdb5016886bf6096a3c7435a61",
"a619cf1a888a73211bbf32e0c438319f23e91444d45d7bc88816ed5fcb7e8fa3",
"56a6b75373c8f7b93c53bcae86d8ffbaba9f2a1b38122054fcdb7f3bf645b727",
"89bfe407c647eb1888871f756516bb1906254fba3132d516ce9099614e37d10c",
"b7ed68b062de6b4a12e51fd5285c1e1e0ed0e5128cda93ab11b4150b55ed32fc",
"4657dfe8965be8980a93072bcfb5e59a65124406db0f819215ee78ba47934b3e",
"d61f3bc5b3eb4400efdae6169a5c17cabf3246b514361de939ce4a1a0da6ef4a",
"58ead82fa15b550094f7f5fe4804e0fe75b779dbef2e9b20511eccd69e6d08f9",
"fcf6fee0e959c7195dadc5f36fe5a873003b389e7033293b06057c821fcbc9c5",
"6681268ace4748d41a4cfcc1e64006fb935bbc359782b3d9611f64d51c6752d9",
"e76450df94f84c1c0b71677a45d75b7918f0b786113c2d038e6ab8841b99f276",
"a44dbc9aaa357176a7d4f5c3106846ea096b66de0b50ee39aff54baab6c4bf4b",
"281e109d2a2899bb0555cf0c3a69b24b3debd61885ca29ef39b95b632be25fe7",
"5be6446aa8a31c11b3b453bf8dafc9b346ff328d1fa11a0fa02a1e6461f6a9b1",
"e1ff3bfdd4e40315959b08b4fcc8245eaa514637e1d4ec2ae166b743341be1af",
"0d6c8388dcb049b8dd4fc8d3d8c3bb93de3da90ba828e4f09c8ad0f346488a33",
"9be0be0e64d38a29a9cec9a5c8ef5d873c2bfa5362a4b558da5ff69bc3cbb81e",
"c48e29f04b482cc01ca1f9ef8c86ef8318c059e0e9353235162f080f26e14c11",
"4c7f826edf647462f744b3f16d485f53c797eabdb21cc8a7bb0713283b88e629",
"d1621db4d91b23180707b8d4eb9b599fa8ec1dfc2453793a1f83878bd4bbc9d8",
"4b74667f89358cd582ad82b16a2d24d5bfcb89ac4b1347ee80e5674a13ba78b2",
"83d999a148625c3d2bb819af3064c0f6a12d7da88f68b2c69221f3a746171d19",
"b6494a74d18a2dfa3f80ced9fadae35807716fce1071e4de19e2d746b6d87606",
"b9c411db4036219e3dfcbe28d60e550b46cce86260fcf2c65d281258e437556f",
"2590201e2919a8aa6568c88900192aa54ef00e6c0974a5b0432f52614a841ec8",
"c15a5a65986e7ab4134dee3ab85254da5c5d4b04e78b4f16c82837192d355185",
"dab6c6065c439b9bafb0b0f1ff5a0c68273bce5c1959a4158ad6a70851f507b6",
"baf27a4cc4da49913e7fdecc951fd3b971c9279959af62b02b761a043c33384c",
"6c237d8b3b120251c38c230c06d9e48f0d3017657c5b65c8c36112eb15c52aeb",
"f173040998481bcb2534a53433eafb8d6ea4c7b0e1fc64572830471fe43fc77d",
"36732cc35fe56185af1b11160a393d6c73a1fe41ddf1184c10394c28ca5d627b",
"126103bfddc8df256b6e0abfd7f3797c80dcc4ea88f7c2f87dd4104220b4d65f",
"457e17b7ea97a845a0d1fa8feda9976596678e3a8af46dc6671d40e050ce857d",
"1739d937dc8c0c7370aa27585938c119e25c41f6c441a5d34c6d38503e3136ef",
"b676ded7c768d66a757aa3967b1243d90bf57afb09d1044d3219d8d424e4aea0",
"33bd77e5394520747faae1394a4af5fa47f404389676375b6dc7be865ed81452",
"fe7f6bc6f7338b76bbf80db402ade65953e20b2f23e66e898204b63cc42539a3",
"4f83ef69228b3e09b0abc11ded9e6b85319c0b7fef1a044b8ee9970e38441817",
"4523be58d395b1b196a9b8c82b038b6895cb02b683d0c253a955068dba1facd0",
"d8f38b894b42f7008305cebf17b48925654f22b180c5861b81141f80ccf72848",
"9c557e253213c127a86e333ff01c9f12f63091957efafd878d220a0e2cb1193e",
"9eab64e92219ccedb15ea9b75ababaa4ae831451019394e0e3336390c3a742d8",
"43dedbafef3748c3f9146b961c9b87a3f5cdb1ccb50b4f5890e408702a27a506",
"17717ad4d20e2a425cda0a2195624a0a4a73c4f6975f16b1593fc87fa46f2d58",
"ee6ea13ab9fe5c4a68eaf9b1a34fe014a66b40117c50ee2a614f4cda959b6e74",
"4d023ce9dfd75a7f3075b8e8e084008be17a1f750c63b5de721e6ef883adc765",
"d91191e30e00444b942c0e82cad470b32af171764c2275bee0bd99377efd4075",
"4eb88310d6b4ed95c6d66a395b3d3cf559b85faec8f7691dafd405a92e055d6d",
"0aeb0814c99a13df50643ca27b831a92aaae6366f54e9c276166347aa037d63a",
"16f1a0100d4cfffbcc4230e8e0e4290cc5849c1adc64d6653fda07c031b1074b",
"148d1366a5e4672b1321adf00321778f86a2371a4bdbe99133f28df0b3d32fa1",
"7076f6592de184f9e912c617c46e5e83bad91d3b7f88b7b54cc73bf8ca493321",
"dea51494fec5947d27ca659b73dd281ff5bdba3f89f5da1977a731ad0c22e725",
"aa97e3d392b97c21327081cdb3cb674dfa8c9c663493db43799a4079555ad1b1",
"b7dfbebf760efb2c756b3cae22baafdbbdf55abb782f85e93b4db804d5cba7e3",
"ca696d7edb4a86be7c7d9872bd2f9a44440cf8e2de7853536cbb3a60ae89641f",
"ff27d01cb1e56fb58580306c7ba76bb037bf211c5b573c56e4e70ca858755af0",
"58dece9ff68d021afe76d549b9e48e6cb7b06a5c14cdf45332c8ed7321d6f211",
"78688c1f371e7b923d95368c9298cca06c1ec0a89ea897aa181bd60091121fea",
"9e8dd91d21e867dec464868a8d1f4a27c0e113c53e32f2bec0a7c6e25ad2e9d5",
"6d028aa49aa1f584b3d35aee9fcee8e3c0d81108114289aa046a7969b21eb5f5",
"2c470abbac95a49cd0ed5b3b9e628ffda1dbb03c14caba1a225a9b8bf1dc9d5f",
"9d7af6946b320b3ba6b4d386de2b2cf3f8ac52fdcb63f3343d1a8362693a3ce5",
"d7a4345c3ead1ea7a34bd6aae43c63cbd81941d9ba019fe972843e5ce78e3187",
"00f471f6312ce408f7eb56592a2b6c6b5f54ac2967c77f4c1498903b598e1b16",
"41e9e2c8398583b90204f8e35c2a4c036aeebac6d05dbdc3e7fb44a1d6bd65a2",
"0d978064b9054c023111926050d983573dac2aff16bb8a7497fde8ad725357c0",
"507f5bf8367c1883f115ddf9ee95f79ea693c720eb5a5a8718443a99fa308954",
"9a090f86adf9fbdc37de2a14745e73d6e1fd096d3da0670b6795ce5ad3cfeea3",
"f2b7c5787424c8f9cf6c4480eb99f4a3770cc06337a4f0d1b109ba849b464193",
"6ffe93bb72d4ac788fd8be2dadf5bb4a2f14a330d530b0e68bd733c9744c6619",
"3ebc74907d1f928f209ef210e872cac033eaf3ff89e6853286d45d91e351ef9e",
"da56c54b5e6749d65ad038c196478794af94e4fa5a4efdc20b49981e4ec566c3",
"266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5",
"5b705e6cb602425c019202dd070a0c009b040ac19960eeef2d8a8fab25c1efe5",
"9ec7a778167afb1d30c4833de9322da0c08ba71a69e1911d5578d3144bb56437",
"e771af0b05c8e95fcdf6feb3500544d2fb1ccd384788e9f490bb3ee28e8ed66f",
"40b9c85fffeafc1cadf8c30a4e5c88660ff6e4971a0dc723d5ab674b5e61b451",
"efe5d120df0cc290fa748727fb45ac487caad346d4f2293ab069e8f01fc51981",
"408f636bd26fcc5f29889033b447cb2411f60ab1b8a5fc8cb3842dab758fdeb5",
"d8a2c33f2e2ff3a9d4ff2a5593f3d5a59e9167fa5ded063d0e49891776611e0c",
"02a11d1545114ab63c29958093c91b9f88618e56fee037b9d2fabcff32f62ea9",
"1bbd7fdf68eaf5c19446c3aaf63b39dd4a8e33548bc96f6bd239a4124d8f229e",
"726a1e261cc6474674e8285e3951b3bb139be9a773d1acf49dc868db861a1c11",
"167e7fe01a76b6bec9d2a9b196b18c72e150e985fbeb46ee651869e7b4032785",
"c88f94f0a391b9aaa1ffefd645253b1a968b0a422a876ea48920a95d45c33f47",
"cd169bd8fbd5179e2a8d498ffc31d3ae0e40825ff2b8a85ea359c4455a107ca8",
"fd38f135ef675eac5e93d5b2a738c41777c250188031caf1dcf07b1687a1fe49",
"6b4c612991132cf4c6c390dceaae75041b9954ba4f9c465aca70beb350815a57",
"8fec426247845bdd26f36ae4f737508c15dbec07d43ce18f8c136ab9e35ac212",
"af551accea482000bdccb34bd3c521558e1f353773a3caed83a147921c369ea1",
"a664a4973cd23e9f3b35a62429f7671aba2c2ae68c03313913b5c2d77269d906",
"18f54af1e10c5bb7a35468b0f62b295d12347903c9f95738d065c84bef1402ef",
"be39043cc12efbddfee564d95da751a71df6c139e2def45c431cadeb4a573ca3",
"01ddee289b1a2e90874ca3428a7a414764a6cad1abfaa985c201e7aada16d38c",
"da25cf7b457bddb6b7bc8e1b0146c0fa85373807d6efdac955199fd01fd53c1f",
"ec380784d96b93d404166a6bf1a778227a94a02bdf499f94b5f48f61f5b1350f",
"6538925ebfb661f418d8c7d074bee2e8afd778701dd89070c2da936d571e55c3",
"9edd72eb23222c969379d90d60ec82891b7c827188bb28510a863f59cb697b0a",
"09222857afceb23c66c99fc93d8e5ebda6d7aad901eb38af73c508f117685012",
"744ecc9a119a92da88b1f448b4030cdbc2fec5c37ea06ebdd026e742b002af7f",
"f531a8672baa2415b271e866dbe11fb08640f6c0e5d98f918bd0308e7169b5b7",
"44dc1c2db9c3fbd7bee9257eceb52be3cf8c40baf7b63f46e56b58a131c74f0b",
"89e14be49ed0073da83b678279cd29ba5ad86cf000b6a3d1a4c3dc4aa4fdd02c",
"8fb140b4e8ddef97ce4b821d247278a1a4353362623f64021484b372f948000c",
"72f9755501e1a4464f7277d86120f67e7f7ec3a84ef6813cc7606bf5e0870ff3",
"3d99feac152027ede63326aa4f43d4ca88e4cd27296b96fe18c55d496a8f6340",
"2540d50aeb9be889c3bd050c9cc849b57b156a2759b48084a83db59aa9056eb4",
"b66be78da89991544a05c3a2b63da1d15eefe8e9a1bb6a4369f8616865bd6b7c",
"2f5de0003db84ecd5449128350c66c7fb63e9d02b250d84af84f463e2f9bcef1",
"2045369fc115b138d1438f98d3c29916986c9fde6b8203f7ff8699f0faee1c93",
"ae1008d23930b776c18092f6eab41e4b09fcf3f03f3641b1b4e6ee3aa166d760",
"1ec454734dcbf6fe54901ce25c0c7c6bca5edd89443416761fadc321d38df139",
"b2d670de53b27691c0c3400225b65c35a26d06093bcc41f48ffc71e0907f9d4a",
"ac3f6afe17593f61810513dac9a1e544e87b9ce91b27d37b88ec58fbaa9014aa",
"d1f3c71639ae3bba17ffc6c8deb1fdb3a56506b3492213d033528cc291523704",
"6b4a29bbd43d1d0eeead384f512dbb591ce9407d27dba48ad54b00d9d2e1972b",
"84de08882b6e36705cf6592ee58e632dd6e092dd61c13192fc80cbbc0cbc82cc",
"d3d74124ddfb5bdc61b8f18d17c3335bbb4f8c71182a35ee27314a49a4eb7b1d",
"a008def15796fba9a0d6fab04e8fd57089285d9fd505da5a83fe8aad57a3564d",
"eb7246eb8e26b0c48dd4f9c2a822a0f4d5c84138937195090932b61a2d756051",
"683211bd155c7b764e4b99ba263a151d81209be7a566a2bb1971dc1bbd3b715e",
"5468bceeb74ce35cb4173dcc9974bddac9e894a74bf3d44f9ca8b7554605c9ed",
"78ce6faa72264387284e647ba6938995735ec8c7d5c5a65737e55130f026307d",
"2754fc862d6bc0b7c3971046612d942563d181c187a391e180ed6b00f80e7e5b",
"f1725586a402c06aec818d1478a45aaa0dc16c7a9c4869d97c350336d16f8e43",
"6a359852238dc902aed19fbbf6a055f9abf21c1ca8915d1c4e27f50df2f290d9",
"9e4954853fca260cecf983f098e5204c68b2bdfebde91f1f7b25c10b566d50f8",
"3356de61b39647931ce8b2140b2bab837e0810c0ef515bbe92de0248040b8bdd",
"3e294d2fd339bb16a5403a86e3664947dd408c4d87a0066524f8a573ae53ca8e",
"21335073401a310cc9179fe3a77e9666710cfdf630dfd840f972c183a244b1ad",
"987096ef8a2853fea1a31b0ed5276503da291536f167bbf7f3f991c9f05d6d7f",
"7a78fbfec68c2b3ab6084f1f808321ba3b5ea47502c41115902013e648e76288",
"c12a2bcb002fd74b4d342f9b942c24c44cc46d5ed39201245a8b6f4162e7efce",
"8867bed93e89c93d0d8ac98b2443c5554799edb9190346946b12e03f13664450",
"9b61cd02adac4b18fbcc06237e7469b07e276faf6ec4ecb34b030c2e385892a0",
"0463223adf38df9a22a7fb07999a638fdd42d8437573e0bf19c43e013b14d673",
"9989500413fb756d8437912cc32be0730dbe1bfc6b5d2eef759e1456c239f905",
"17e2889fba01021d048a13fd0ba108ad31c38326295460c21e69c43fa8fbe515",
"6c6c253fe26a5b2abf440124e35dcaa39e891cd28274431ba49da5c11d89747d",
"9d065f84c0cba7b0ef86f5d2d155e6ce01178a8a33e194f9999b7497b1b2201b",
"5ffb8e1b6b629c0e34a013f9298ebb0759b98a3d24029916321d5eb4255b6735",
"3fc5f8553abd753ac47967c4c468cfd08e8cb9dee71b79e12d5adab205bc04d3",
"ff82c8b53aa53a9705200690b91c572e2e4918f1a88de5d923ac06fa4560fa19",
"4d4ab737e2fbb5af0fd590b4b7e8c6fe76d3a02a9791ef7fdacf601f9e50fad8",
"5eca50a04afaefe55659fb74810b42654e2268c1acca6e53801b9862db74a83a",
"d700fc10d457eeae4f02eb04d715a054837e68a2e2d010971382c5e1016dc99e",
"af321973db865bb33fbc50a4de67fc0e6808d812c6e4dfd9cbc2fd50275b1dfd",
"bbf923aa9246065f88c40c7d9bf61cccc0ff3fcff065a8cb2ff4cfbb62088f1e",
"268b948b5aab4bab0e5430ee49e3cff11776cf183df93b32159f9670ed541495",
"3d2e51508699f98f0f2bdbe7a45b673c687fe6420f466dc296d90b908d51d594",
"4d4fb5ff0afb8c04e6c6e03f51281b664576f985e5bc34a3a7ee310a1e821f47",
"9b12847f3d28bf8850ebc03f8d495a1ae8f9a2c86dbda295c90556619a3ee831",
"733c5427f55ceba01a0f6607ab0fd11832bbb27d7db17b570e7eb7b68a081d9a",
"4bc7982c4ee4078b2ada5340ae673f18d3b6a664b1f97e8d6799e6074cb5c39d",
"afa0f26dbf3e674630d1cd6499e86c14f316cd4f78c6ab73bb85b00aa9c50a57",
"c301f13372c8f0d9bc8186d874fa45fa33aede13e66f4187a3bd22ee41c95b2a",
"548a29f145187fc97689f8ae67944627723c315c163b0dbb88842e50c681d7ca",
"d0a1ffb8761b974cec4a3be8cbcb2e96a7090dcf465ffeac839aa4ca20c9a59e",
"faaf47af27e3de06e83f346fc6ccea0aabfc7520d82ffe90c48dfcd740c69caa",
"3eacaa768326d7dce80f6ee17ada199bebe7eb3c1a60b39b14e0a58bbac66fe4",
"7f5237e9f77a22c4a89624c7ac31cae797d8ac4144b02493890d54fee7399bcd",
"d84517802a434757c56ae8642bffb4d26e5ade0712053750215680f5896e579b",
"bdb96ad31ac6af123c7683c55775ee2138da0f8f011e3994d56a27270e692575",
"aab1b0caf13b9bd26a62cf8b3b20f9bfaa0e56f3ec42196a00fedf432e07d739",
"c230edd34ca5c8318bf4592ac056cde37519d395c0904c37ea1c650b8ad4a712",
"ce41c1698a8c042218bc586f0b9ec8d5bffa3dcbcea09bd59db9d0d92c3fc0b4",
"b9a537523bba2fcdae857d90d8a760de4f2139c9f90d986f747ce7d0ec0d173d",
"1a6e0aeff1dba7ba121fbeb33bf3162901495df3fcb4e4a40423e1c10edf0dca",
"21b419102da8fc0ba90484aec934bf55b7abcf75eedb39124e8d75e491f41a5e",
"2183e94758481d0f124fbd93c56ccaa45e7e545ceeb8d52848f98253f497b975",
"e3f98bfb9cbeb7563a139983602e50f616cb7ebb06c3295b8ee377328f051206",
"b5b9b84d1723994d06013606227fb5b91f9de8820b04cf848d1dccc23d054f39",
"07adfda9c5adc80881bb2a5220f6e3181e0c043b90fa115c4f183464022968e6",
"facdaf1ce758bdf04cdf1a1fa32a3564a608d4abc2481a286ffc178f86953ef0",
"efc83f01c8fb309df2c8866b8c7924cc8b6f0580afdde1d6e16e2b6107c2862c",
"52b4a076bcbbbdc3a1aefa3735816cf74993b1b8db202b01c883c58be7fad8bd",
"c6f7077f1699d50cf92a9652bfebffac05fc6842b9ee391089d959b8ad5d48fd",
"e7424ad457e512fdf4764a56bf6d428a06a13a1006af1fb8e0fe32f6d03265c7",
"27797bd4e5ee52db0a197668c92b9a3e7e237e1f9fa73a10c38d731c294cfc9a",
"7bdef7bdebb8721f77927d0e77c66059360fa62371fdf15f3add93923a613229",
"3335d373e6c1b5bc669b4b1220c08728ea8ce622e5a7cfeeb4c0001d91ded1de",
"645681b9d067b1a362c4bee8ddff987d2466d49905c26cb8fec5e6fb73af5c84",
"06b7819d7f1c7f5472118266ed7bca8785dceae09e36ea3a4af665c6d1d8327c",
"7a6b8c7de171955c214ded7e35cc782cd6dddfd141abb1929c632f69348e6f49",
"eb882b0bb659bf72235020a0b884c4a7d817e0af3903715736b146188b1d0868",
"2ae6c71323a225ecfa8cf655600ebbe12b1019ff36bf02726d82d095aab29729",
"c2f85a06279a7bfa7f2477a3cee907990231a13d17b54524738504bd12e0c86c",
"f1b911af1c7a56073e3b83ba7eaa681467040e0fbbdd265445aa80e65c274c22",
"a54c2ae6ec6ac06b4d7b45c483eab86ac226b8ecfa99163ef7cc000da9b40895",
"bbc73cae41502ddad7a4112586dcaf4422810d60aa4b57c637ccd1a746b07844",
"218238431393959d6c8617a3bd899303a96609b44a644e973891038a7de8622d",
"59cacbd83ad5c54ad91dacf51a49c06e0bef730ac0e7c235a6f6fa29b9230f02",
"ba80990666ef0b6f4ba5059347beb13242921e54669e680064ca755256a1e3a6",
"031db544f3158556508b321db59afd62c5bb1592021e5dfd9ff87dca0ad27d8c",
"ee85604f8ec6e4e24f8eaf2a624d042ebd431dae448fe11779adcfb6bb78575e",
"266ee74062e8dae0aeddfcd0f72725107598efaa80c1a7176d6ee6dd302bce4c",
"b83a28b7e4e5d20bd960c5faeb6625f95529166b8bdb045d42634a2f35919450",
"dbe0605a9c73172bad7523a327b236d55ea4b634e80e78a9013db791f8fd5b2c",
"1e53e900c3bbc5ead295215efe27b2c8d5fbd15fb3dd810da3063674cb7213b2",
"832a2b3cef4b1754c4a7572964a44db64d19edf627ec45179b519d0a5eae8199",
"4c800257a588a82849d049817c2bdaad984b25a45ad9f6dad66e47d3b47e3b2f",
"3743244390be53473a7e3b3b8d04dce83f6c9514b81a997fb3b123c072ef9f78",
"f96c3d76497074c4c83a7b3823380e77dc73d5a9494fd2e053e4a1453e17824b",
"d04ecf33a303a59852fdb681ed8b412201ba85d8d2199aec73cb62681d62aa90",
"0cca6201658d5d98239c1511ef402562ff7d72446fb201a8d1857c39e369c9fa",
"61066504617ee79387021e18c89fb79d1ddbc3e7bff19cf2298f40466f8715e9",
"7adb520c3ac7cb6dc8253508df0ce1d975da49fefda9b5c956744a049d230ace",
"7579076d9aff0a4cfdefa7e2045f2486c7e5d8bc63bfc6b45397233e1bbfcb19",
"9ec078ef9ca31e1bdbb97175dde1cb00bf9f7225e6f622ccc8d367302e220497",
"93e174736c4719f80627854aca8b67efd0b59558c8ece267a8eccbbd2e7c5535",
"e62f419a0e16607b96ff10ecb00249af7d4b69c7d121e4b04130c61cc998c32e",
"171ddd43dab1af0d1fb14029287152a4c89296890e0607cf5e7ba73c73fdf1a5",
"604e96e099936a104883958b040b47672e0f048c98ac793f37ffe4c720279eb2",
"7726c437ccf791f6ded97dbac1846e62019e5fbd24f42e9db2f640f231c3c09a",
"1bf9f239dca1636149bc2f3fc334077ae959ea9607cacf945ef8f8bb227dc5e1",
"fcd818454002a6c47a980393f0549ac6e629d28d5688114bb60d831b5c1832a7",
"56172b53f730750b40e63c501b16068dd96a245e7f0551675c0fec9817ee96e0",
"260d3a820b7f8de20f4972725999b1af88b0cc5554ca38f9681c8d657e043cc3",
"9ba8c688f091ca48de2b0f9bc998e3bc36a0092149f9201767da592849777f1c",
"61594d714aa94fe551f604123578c4a6592145f4228ad8601224b1b89ce401b0",
"416ca193aa5448b8cca1f09642807765cc0ee299609f972df0614cfb8ea2f2b1",
"9b6d95b76a01191a4c778185681ed7f3bced2fffa8e41516ec78240b213285f5",
"ee0304bae0d4679bb34347ce3b1b80482262b9812bd0c0d5e19a5e2445043b75",
"8027a1877f39e603dafc63279e004b4ed9df861d18ce81d9c43c7d7135da8f65",
"42b409ff9b261a985227b1ab92707e706777ac14de24654d7e05f0501b37e003",
"99097983b74c70800b182abc6f64046ab70407e9cabcd6cf570a38ada9ef75d5",
"de8ca7a6b3f7314e91921d4dc5e915fb7bc2bd32129ea6966322effa48050c4c",
"dcb302978215f54f33c3d2d7157ef69fd5058cf488fc43dd75c32b5dcaf47e7a",
"7c765d407d3a9d5ea117cb8b8699628560787fc084a0c76afaa449bfbd121d84",
"06639a386c9c1014217622ccbcf40908c4f1a0c33e23f8d6d68f4abf655f8f71",
"59f1b5faf29904fe94a6a042e2d82d80d68fc16ad7651eba90a8de39f63f8fe8",
"174398550d1468a41b98a09f496c38d3694feadef0f0073fd557610384bafb10",
"00d52016bd7e4aae8bf8eaa23f42276b649fe557483b5d7684702633dd0fd944",
"9a39bf837c868d61ed8cce6a4c7a0eb96f5e5bcc082ad6afdd5496cb614a23fb",
"74ffc51cc30150cf79b6cb316d3a15cf332ab29a38fec9eb484ab1551d6d1856",
"97c70a44366a6535c145b333f973ea86dfdc2d7a99da618c40c64705ad98e322",
"6c6e3e05e1c9d2aae0ed2431544aea411771dd9d81017539af0fd818b2389f28",
"23a2cf63ec81e65561acafc655898d2fd0ef190084653fa452413f75e5a3d5bc",
"f1f9b0996d4ff1bf75e79e4cc8577c89eb633e68415c7faf74cf17a07bf80bd8",
"e3aefda887252a72cee3578d33b2dcd90e9fe53b8bed6347ef5e26f74211adbb",
"6b4ec98f02e647e01440b473bbd92a7fae21e01b6aa6c65e32db94a36092272e",
"623ed218de81311783656783d6ce690b521a89c4dc09f28962e5bfd4fa549249",
"9ce71f1506ccf4b99f234af49bd6202be883a80f95a155c6e9a1c36fd7e780c7",
"139fcc6bb304b2879974c59cda61d86d7816ad4ac0f38ee7a724df488060e65d",
"0e8c41eb946657188ea6f2aac36c25e393fff4d4149a83679220d66595ff0faa",
"59ffbe1fc829decf90655438bd2df3a7b746ef4a04634d4ee9e280bb6ce5f14e",
"e4f695f05bb05b231255ccce3d471b8d79c64a65bccc014662d27f0f7e921092",
"39cc53c9e3f7d4980b21bea5ebc8a5b9cdf7fa6539430b5a826e8ad527168656",
"685fb49563864326e78df461468795b7e47849a27e713281cd8bb75c0547936d",
"05e4649832dfb8d1bfa81ea7cbf1c92c4f1cd5052bfc8d5465ba744aa6fa5eb8",
"e5cece49ae3fc2c81f50c8e7a93a5fb1e1585380c467e4822234b64a94add617",
"dda028cd1b806b4d494cc7f2789b6c2bd7e3c28ff6a267d03acc5ac6e69a05e0",
"046c436b2a525059867b40c81e469b6d83001442fc65312c87a7ce7abeb022ff",
"676ffea2ec31426a906d7795d7ebae2ba5e61f0b9fa815995b4a299dd085d510",
"15b5cf6cdf4fd1c02f28bcce0f197cafae4c8c7c66a3e2e23af9fe610875315e",
"c0fb5367cfcb803c5383f98e26524bed9176e6211588f53ec63fe6079cbfd3df",
"7ab00f596b0286b77f78af567ee1be2536feee41daee67bd872f1480b7aa65b9",
"e8d66519e43b1214ac68f9f2bdbc4386d41ac66b20c5a260b9b04102784074e9",
"e6618db6961dc7b91478e0fa78c4c1b6699009981526693bd5e273972550860c",
"b1e1185884a6d14bbfce3899cb53e8183adde642f264d0ff4f1745371e06134c",
"cae5b7ea348afefc4c102bb7b125c4928f114739a27b877c6bcfbe5a79280384",
"ecbe372132a9323b813eeb48f8dfcedaeca00e2887af181b063c6cfa13ed8ea1",
"52387c6b99cc42aac51916b08b7b51d2baddfc19f2ba08d82a48432849dbdfb2",
"3c39a7b53dec9ac85acf08b267637a9841e6df7b7b0f5e2ac56a8cf107de37da",
"f8e6c64342f1e052480630e27e1016dce35fc3a614e60434fef4aa2503328ca9",
"fd0266485777bd73e97c7c37f520c83c82e362fe4c25a6be93f3380083d4646b",
"433e80c14ff7b8e16e179ccec35f55833df7dd5a5a063d23117b4b01b6f97170",
"b7c6f6915cfa9a62fff6a1f02604de88c23c6c6c6d1b8f62c7cc10749f307e81",
"ddf03aca85ade039e6742d5bef3df352df199d0d31e22b9858e7eda85cb3bbbe",
"d36e8083fa7b36daee646cb8b3f99feaa3d89e5a396508741f003e21ac0b6bec",
"ec79b568bdea63ca6091f5b84b0c639c10a0919e175fa09a4de3154f82906f25",
"8cd2d0f8310f7009e94f50231870756cb39ba68f37506044910e2f71482b1788",
"0eef96197f5c6be3859b6817e6a5736685856c416e29a2925bd5a15b2a57c8b1",
"04918dfc36c93e7db6cc0d60f37e1522f1c36b64d3f4b424c532d7c595febbc5",
"c8383d81dd24406745b68409be40d6721c301029464067fcc50a25ddf9139549",
"3b3a42d34cf0a1402d18d536c9d2ac2eb1c6019a9153be57084c8165d192e325",
"da18e9860040f3bf493876fc16b1a912ae5a6f6fa8d5159c3de2b8233a0d9851",
"e3fc673fc5f99cc554d0ff47756795647d25cb6e6658f912d114ae6429d35d35",
"3aa5817273c3b2f94f491840e0472f049d0f10009e23de63006166bca9b36ea3",
"bbb5dda0e15567979f0543407bdc2033d6f0bbb30f72512a981cfdb2f09e2747",
"1096f6be0a4d7f0ecc2df4ed2c8683f143efc81eeba3ece6daadd2fca74c7ecc",
"d76726da1b64e8679d8b6e66facf551ba96f2612de5a171fac818ee85ce3e5fe",
"27487c9600b16b24a1bfb0519cfe4a5d1ad84959e3cce5d6d7a99d48660a1f78",
"5d3ab876c206a37ad3b094e20bfc3941df3fa21a15ac8ea76d6918473789669a",
"6b1b8dac34ffc61d464dfeef00e4a84a604e172ef6391fb629293d6f1666148c",
"6fb266012c3008303e54ae55140b46957e9978098401dda34f4d921a275bf8bb",
"53a91e3a64d1f658e983ac1e4f9e0c697f8f33e01d8debe439f4c1a92113f592",
"5082984480f3b27891840a2037512739149678efc2ac981ca8cd016d02304efd",
"7b849efa5604b58d50c419637b9873847dbf957081d526136c3a49b7357cd617",
"f53b9d91a8cd177fb4a1cf081a1b6d58759a381ef120a7c5a18c0e70cae80983",
"cfd7df62799a22e384a4ab5da8c4026c875b119d0f47c2716b20cdac9cc1f1a6",
"d83b5ef189df7e884627294b752969547814c3cfe38995cf207c040e03bbe7a4",
"96f652249b0946e1575d78a8bc7450123c8e64f1c56f6b2f93bc23fb249ed85a",
"d60bdad03468f5f8c85b1b10db977e310a5aafab33750dfadb37488b02bfc8d7",
"9839f160d893daae661c84168e07f46f0e1e9746feb8439a6d76738b4ad32eaa",
"453a656903a031395d450f318211a6ec54cd79049a851f92cd6702c65ff5f5bd",
"1634b87b5fcfd4a6c4ff2f2de17450ccce46f9abe0b02a71876c596ec165bfed",
"24480686b56234a240fd9827209b584847f3d4f9657f0d9a97aec5320a264acb",
"f4d1866e8599563c52ceeedf11c28b8567e465c6e9a91df92add535d57f02ab0",
"805b34f708837dfb3e7f05815ac5760564628b58d5a0ce839ccbb6ef3620fac3",
"659a74f6cfbc7c252c58d93452b9d9575e36c464aa6544c6375227c9166a6ed9",
"75d737c3472471029c44876b330d2284288a42779b591a2ed4daa1c6c07efaf7",
"dac1d8c5a9fe94f224e095b52577c33c2cc2b8f3a2d6ad9cbd46845af8c987f0",
"be7358c4fe50148cccafc02ea205d80145e253889aa3958daafa8637047c840e",
"30e8cbf1427c137fa60674a639431c19a9d6f4c07fd2959df83158e674fccbaa",
"7f573f55d875ce8edc528edf822949fd2ab9f9c65d914a40225663b0a697be07",
"781a1527055f74c1f70230f10384609b34548f8ab6a0a6caa74025827f9fdae5",
"d82a91e1013170b10ca7fa0ec800fd0dc6e9335b70c303dadba603fc36802b5f",
"a4237e420cdb0b3231d171fe879bcae37a2db7abf2f12a337b975337618c3ac2",
"7ff4d89f90845ac4d7a50a259163798e0f446e61d4c943cc89637beff567ad02",
"48dbb5e717a6221d64fd13ba12794bc28e5067ac1d7632ee9437d533772750df",
"efba340bd479176486e5a2281c97ac4a90fdcf86ec9c13a78c3182ab877cd19b",
"1021c8921548fa89abb4cc7e8668a3a8dcebae0a4c323ffeaf570438832d6993",
"c67cd3e1a83daa56cff16f635db2fdb9ed9619300298d4701a58e68e84098345",
"4ad6fa2d16e2a9b576c863b4cf7404a70d4dc320c0c447d10ad6ff58993eacc8",
"e568a76a4f8836be296d405eb41034260d55e2361e4b2ef88350a4003bbd5f9b",
"ebdee92945ef05283be0ac3de25787c81a6a58a10f568f9c6b61d9dd513adbad",
"6e8f3edfa28bfc8057d735794f76b697bcf18fb894a5a37a132693ebda31a464",
"576d23dc3db2056d208849462fee358cf9f0f3310a2c63cb6c267a4b9f5848f9",
"18905d0a5d623ab81a98ba98c582bd5f57f2506c6b808905fc599d5a0b229b08",
"a9046cc9175dc5a45fb93a2c890f9a8b18c707fa6d695771aab9300081d3e21a",
"7a69e5f62fcc20e81beea7461a945e6531f8c7944200d0b3cb4cc63556c44106",
"fd0266485777bd73e97c7c37f520c83c82e362fe4c25a6be93f3380083d4646b",
"4b29db7a76f3b4fbc0a4fffc092e41c14f1a1a975a462d87e82827af03719cb2",
"df1a6cb6c95a5bdd2a69e4fa921061da950fc0bb0b3529d04ca75e0c11f871df",
"08bfc00b7f72e015f45c326f486bec16e4d5236b70e44543f1c5e86a8e21c76a",
"1e908fbc1d131c17a87f32069f53f64f45c75f91a2f6d43f8aa6410974da5562",
"b3a737d014a7e75f44b0f5afbd752f9bcc2abe54f60dbbebc3681b6e16611967",
"d3052ca3e3d523b1ec80671eb1bba0517a2f522e195778dc83dd03a8d84a170e",
"b98ded4ceaea20790dbcb3c31400692009d34c7f9927c286835a99b7481a5c22",
"9e1e498420bc7c35f3e3a78d20045b4c8343986dae48739759bccb2a27e88c53",
"141d2053cb29535ad45aa9e865cdec492524f0ec0066496b98b7099daab5d658",
"8722c3843c85ddd6162a5cb506e1cb4d6ab0cafb966034f426e55a2ef89a345e",
"52dfd21724329920c5c95f5361464e468584136d30030eb29247a7fe6c2c6e36",
"d82a91e1013170b10ca7fa0ec800fd0dc6e9335b70c303dadba603fc36802b5f",
"6bbb7d71eaa2544215a877e136cd7f490f4625eb56459a0da856cc8296d5df30",
"1ebb28301aa1a48248d3723a0ea434bb7d4612ec920fa749583e3f41ce25849f",
"00000001505e7e48927046e9bbaa728b1f3b511227e2200c578d6e6bb0c77eb9",
"a01b5ba26421374250442e0d23f96e6a4bce429e0175cd0769ad8c585dd5a892",
"26d6a946675e603f8de4bf6f9cef442037b70c7eee170ff06ed7673fc34c98f1",
"1dd7992ea0ecbda7480ceed35748c3655691133c8c78af947fd24299db8f481f",
"cdee943cbb19c51ab847a66d5d774373aa9f63d287246bb59b0827fa5e637400",
"3d03c53608415b1d718c7786ee10bdb4e67bced32207e32880ee9e44301a19ec",
"170dc4045d6c06275b40bd39f68ca31dbb962094e9763ee460f8341bd40bebca",
"db1abbff170320730e5ace672ad7217161b8935afc1a896ae2fecf903c159932",
"b0ac2c26eabdb0e0a9b0d10fd1458ca73c575b19d65e13f0e7484cbee84038b3",
"c1e7fc21b4f9c199e6086e095639f0f16a4e4884544547ce8a653ed7b5b6c4a7",
"813c2662366a12f6337b951c048552fd3c4894e403cab701634dcd803786dc09",
"fd0bcf8cd1aee83fe75e6c0fdfc543845e5bc3f50d26d2d5e5c6d3fa521f98c0",
"45fd1955f590da87c1fd2edb99d17accf235ec3ebf0afe1d3306ade42693c6e9",
"27938497226683c701e2843c6db580e2f0e25f5a198f4c3397e3a0a27764215d",
"2321edfd415f9558605b4d7a7083c52624e8922ae86bb2ae359fbf829724111a",
"0461fcbecc4c3374439932d6b8f11269ccdb7cc973ad7a50ae362db135a474dd",
"1f8e182bf72d61cb78dcd6b96dd3be8b874b8881da6630757b6508970f67230c",
"c6603b0f1ccfec625d9c08b753e4f774eaf7d1cf2769223125b5fd4da728019e",
"296842eaaed9be5ae0668da09fe48aac0521c4af859ad547d93145e5ac34c17e",
"88f8707a45e825a13ed383332abe6e2f104ab44d877918be22550083a2b59e60",
"27a20b41a66b35d442302a50ca1baad72c2ed844c8d1224c9f6d50a12752084e",
"280e847ef0c82a2a7c4e877c91cd7567474c1431b815d27bbc1017e147d9d77c",
"ad9d42203fd2480ea2e5c4c64593a027708aebe2b02aa60bd7b1d666daa5b08d",
"bb0174ae21a6cac1a0a9c8b4ac6ebfda56ce51605c315b1824970bc275f7239a",
"edb470271297ac5a61f277f3cd14de54c67eb5ccd20ef0d9df29be18685bb004",
"9609b093450dd7e0afb389619acdaf2e6a0d6817c93552f3911e05b50ae73e3d",
"3e33fd7124f174fc535151937f8718634dd9d856143d4cefb5a10ddaf2f615c0",
"463555bb4b0f80fd1376fae628fabfaf7e5e31cd2741d80aa4d225c926bc287e",
"916cb5ff07d3b51cef7f6b6b7f5479b1001b401c0e82558ee1a22504c7d507c9",
"cc5f259f036683798e4a52071dbb97238702ffb6f0c85af6d273c8ddbe5c0afb",
"2ad91f1dca2dcd5fc89e7208d1e5059f0bac0870d63fc3bac21c7a9388fa18fd",
"8bf629b3d519a0f8a8390137a445c0eb2f5f2b4a8ed71151de898051e8006f13",
"94215f42a96335c87fcb9e881a0bbb62b9a795519e109cf5f9d2ef617681f622",
"bcbf9644d3f475d00eb9c6e467385ce16d4546c1a24222ccfa542bf776eaba95",
"ba5115c37b0f911e530ed6c487ccbd9b737da33fd4b88a9f590860378c06af62",
"609f186ca023d658c0fe019570472f59565c8be1dc163b1541fac9d90aa4e8af",
"4e5622b575cdbb4d5ded093e48c68cd3f724fad547142f0c1b0aec2f2b2a0b2e",
"4df7b43b3a4db4b99e3dbad6bd0f84226726efd63ae7e027f91acbd91b4dba48",
"bcbf9644d3f475d00eb9c6e467385ce16d4546c1a24222ccfa542bf776eaba95",
"3a06add309fd8419ea4d4e475e9c0dff5909c635d9769bf0728232f3a0683a84",
"d2384c4ac623eb9f15ae4cb32ee7a9b03e0202802d52f2395f2ee8f6433151aa",
"1d4cc828b657da8c7a101e8657365459b9dc74139bed5d35bd8295b00be2a1ae",
"76fcec0e0638351f1d0e0dc4ebaf6dd3d67404126d664547674070f3175273d9",
"6707c39e6c53ef945c5df29af78667dc941ed80094994bd264fd806a6e0a3230",
"80482e60178c2ce996da6d67577f56a2b2c47ccb1c84c81f2b7960637cb71b78",
"147784df719c09fad62bff0493a60b4f5dbbe8579e73f00d207350e8ffdfd65f",
"afc0295d2c6e0a1820c214c07312070a4070d52083163d7fe410fa02bf85d9d2",
"6a5e3cc17279cbdf051c06d96e3f843cdb296f351d8ca35a6a190c0ab90dbf9a",
"3b7fc823611f1aeaea63ee3bf69b25b8aa16ec6e81d1afc39026808fe194354f",
"d96fe9c5478d1bb36e9ec40cc678b0bf7ff67e017922a151f925640a8884f291",
"1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59",
"06dde95f0268ce40128bf73ca6e85567b8567688ea52f24dcd5734e77c50f2d9",
"f683e87035f7ad4f44e0b98cfbd9537e16455a92cd38cefc4cb31db7557f5ef2",
"036533caa872376946d4e4fdea4c1a0441eda38ca2d9d9417bb36006cbaabf58",
"7cc328a08ddb2afdf9f9be77beff4c83489ff979721827d628a542f32a247c0e",
"f240be2b684f85cc81566f2081386af81d7427ea86250c8bde6b7a8500c761ba",
"fb61b93d864e4f0eba766bb8556f2dc0262e8e985012e29ba28508dd52067d98",
"0e52122d1eb95cdd8ba5f65815f7d1c9125a8c14d82989eae52ab369eea6c7e4",
"04dcaf2552801937d1c20b69adf89646f21b53c17906271d22c7be9bcadb96c0",
"0ee827a36e8bb0cfc483cf1872781182c4a16c58acba3ae2d7b155e0370e93b8",
"adc14fa3ad590856dd8b80815d367f7c1e6735ad00fd98a86d002fbe9fb535e1",
"2bc9be7569515701581d5d765422a17ee3500d8e1f4e7aa53f6be86ae6ba274d",
"9a21569255d0a3a9e75f1de2e4c883c9be2e5615887f22b2ecf6b1813bcd587d",
"3f3ff7adb39159c42c0aa16d53c0483bfbfad57df22a9e9e9364a306741eb2cf",
"e9aa50decff01f2cec1ec2b2e0b34332cf9c92cafdac5a7cc0881a6d26b59854",
"c7eda660a6bc8270530e82b4a7712acdea2e31dc0a56f8dc955ac009efd97c86",
"787338757fc25d65cd929394d5e7713cf43638e8d259e8dcf5c73b834eb851f2",
"7e8ffe414a53ce18a85536eda74d3cbda0da6a98d4fe6c514948002472178c95",
"6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3",
"0c371f5ed95076613443e8331c4b60828ed67bcdefaa1698fb5ce9d7b3285ffb",
"daa41bedb68591363bf4407f687cb9789cc543ed024bb77c22d2c84d88f54153",
"12ee03d11684a125dd87be879c28190415be3f3b1eca6b4ed743bd74ffd880e6",
"00dfb20815a71e24572394efdfbf772e56a507921b8287201ab8937496ee8e6d",
"94a6a78a5aebbba40bd1aaa2234810132c2d8004bb9177616c413d3c0ddf320e"
];
const PUBKEY_ARRAYS = generateTestPubkeyArrays();
// Main testing function
async function testRelayPubkeyLimits() {
console.log('Starting Nostr Relay Pubkey Filter Test');
console.log(`Testing ${RELAYS.length} relays with up to ${MAX_PUBKEYS} pubkeys`);
console.log(`Incrementing by ${STEP_SIZE} pubkeys per test, requesting ${EVENT_LIMIT} events per test\n`);
const results = {};
// Initialize results for each relay
for (const relayUrl of RELAYS) {
results[relayUrl] = {
maxPubkeys: 0,
failures: 0,
lastSuccess: 0
};
}
// Test each pubkey array size
for (const pubkeyArray of PUBKEY_ARRAYS) {
const pubkeyCount = pubkeyArray.length;
console.log(`\nTesting with ${pubkeyCount} pubkeys...`);
// Test each relay with this pubkey count
for (const relayUrl of RELAYS) {
if (results[relayUrl].failures >= 2) {
console.log(` ${relayUrl}: Skipping (already failed 2 times)`);
continue;
}
try {
const success = await testRelayWithPubkeys(relayUrl, pubkeyArray);
if (success) {
results[relayUrl].maxPubkeys = pubkeyCount;
results[relayUrl].lastSuccess = pubkeyCount;
results[relayUrl].failures = 0; // Reset failures on success
console.log(` ${relayUrl}: ✓ Success`);
} else {
results[relayUrl].failures++;
console.log(` ${relayUrl}: ✗ Failed (${results[relayUrl].failures}/2)`);
}
} catch (error) {
results[relayUrl].failures++;
console.log(` ${relayUrl}: ✗ Error (${results[relayUrl].failures}/2): ${error.message}`);
}
}
}
// Print final results
console.log('\n=== FINAL RESULTS ===');
for (const relayUrl of RELAYS) {
const result = results[relayUrl];
console.log(`${relayUrl}: ${result.maxPubkeys} pubkeys (last success: ${result.lastSuccess})`);
}
}
// Test a single relay with a specific pubkey array
async function testRelayWithPubkeys(relayUrl, pubkeys) {
return new Promise((resolve) => {
const ws = new WebSocket(relayUrl);
let receivedEOSE = false;
let subscriptionId = 'test_' + Math.random().toString(36).substr(2, 9);
let timeoutId;
const cleanup = () => {
clearTimeout(timeoutId);
ws.close();
};
ws.on('open', () => {
console.log(`Connected to ${relayUrl}`);
// Send subscription request
const filter = {
kinds: [EVENT_KIND],
authors: pubkeys,
limit: EVENT_LIMIT
};
const subscriptionMessage = ['REQ', subscriptionId, filter];
const messageString = JSON.stringify(subscriptionMessage);
const messageSize = Buffer.byteLength(messageString, 'utf8');
console.log(`Sending request with ${pubkeys.length} pubkeys (${messageSize} bytes)`);
ws.send(messageString);
// Set timeout for EOSE response
timeoutId = setTimeout(() => {
if (!receivedEOSE) {
console.log('Timeout waiting for EOSE');
cleanup();
resolve(false);
}
}, 10000); // 10 second timeout
});
ws.on('message', (data) => {
try {
const message = JSON.parse(data.toString());
if (message[0] === 'EVENT' && message[1] === subscriptionId) {
// Skip event content, just log that we received an event
console.log(`Received EVENT for subscription ${subscriptionId}`);
} else if (message[0] === 'EOSE' && message[1] === subscriptionId) {
console.log(`Received EOSE: ${JSON.stringify(message)}`);
receivedEOSE = true;
cleanup();
resolve(true);
} else {
console.log(`Received other message: ${JSON.stringify(message)}`);
}
} catch (e) {
console.log(`Received non-JSON: ${data.toString()}`);
}
});
ws.on('error', (error) => {
console.log(`WebSocket error: ${error.message}`);
cleanup();
resolve(false);
});
// Overall connection timeout
setTimeout(() => {
if (!receivedEOSE) {
cleanup();
resolve(false);
}
}, 15000); // 15 second total timeout
});
}
// Run the test
testRelayPubkeyLimits().catch(console.error);