Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a5f92e4da3 | ||
|
|
64b9f28444 | ||
|
|
fe27b5e41a | ||
|
|
d0bf851e86 | ||
|
|
3da7b62a95 | ||
|
|
4f1fbee52c | ||
|
|
6592c37c6e | ||
|
|
deec021933 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,4 +3,4 @@ logs/
|
|||||||
nostr_core_lib/
|
nostr_core_lib/
|
||||||
blobs/
|
blobs/
|
||||||
c-relay/
|
c-relay/
|
||||||
|
text_graph/
|
||||||
|
|||||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,6 +1,3 @@
|
|||||||
[submodule "blossom"]
|
[submodule "blossom"]
|
||||||
path = blossom
|
path = blossom
|
||||||
url = ssh://git@git.laantungir.net:222/laantungir/blossom.git
|
url = ssh://git@git.laantungir.net:222/laantungir/blossom.git
|
||||||
[submodule "nostr_core_lib"]
|
|
||||||
path = nostr_core_lib
|
|
||||||
url = ssh://git@git.laantungir.net:222/laantungir/nostr_core_lib.git
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
ADMIN_PRIVKEY='22cc83aa57928a2800234c939240c9a6f0f44a33ea3838a860ed38930b195afd'
|
ADMIN_PRIVKEY='22cc83aa57928a2800234c939240c9a6f0f44a33ea3838a860ed38930b195afd'
|
||||||
ADMIN_PUBKEY='8ff74724ed641b3c28e5a86d7c5cbc49c37638ace8c6c38935860e7a5eedde0e'
|
ADMIN_PUBKEY='8ff74724ed641b3c28e5a86d7c5cbc49c37638ace8c6c38935860e7a5eedde0e'
|
||||||
SERVER_PRIVKEY='c4e0d2ed7d36277d6698650f68a6e9199f91f3abb476a67f07303e81309c48f1'
|
SERVER_PRIVKEY='c4e0d2ed7d36277d6698650f68a6e9199f91f3abb476a67f07303e81309c48f1'
|
||||||
SERVER_PUBKEY='ebe82fbff0ff79b2973892eb285cafc767863e434f894838a548580266b70254'
|
SERVER_PUBKEY='52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a'
|
||||||
|
|||||||
131
Dockerfile.alpine-musl
Normal file
131
Dockerfile.alpine-musl
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
# Alpine-based MUSL static binary builder for Ginxsom
|
||||||
|
# Produces truly portable binaries with zero runtime dependencies
|
||||||
|
|
||||||
|
ARG DEBUG_BUILD=false
|
||||||
|
|
||||||
|
FROM alpine:3.19 AS builder
|
||||||
|
|
||||||
|
# Re-declare build argument in this stage
|
||||||
|
ARG DEBUG_BUILD=false
|
||||||
|
|
||||||
|
# Install build dependencies
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
build-base \
|
||||||
|
musl-dev \
|
||||||
|
git \
|
||||||
|
cmake \
|
||||||
|
pkgconfig \
|
||||||
|
autoconf \
|
||||||
|
automake \
|
||||||
|
libtool \
|
||||||
|
openssl-dev \
|
||||||
|
openssl-libs-static \
|
||||||
|
zlib-dev \
|
||||||
|
zlib-static \
|
||||||
|
curl-dev \
|
||||||
|
curl-static \
|
||||||
|
sqlite-dev \
|
||||||
|
sqlite-static \
|
||||||
|
fcgi-dev \
|
||||||
|
fcgi \
|
||||||
|
linux-headers \
|
||||||
|
wget \
|
||||||
|
bash \
|
||||||
|
nghttp2-dev \
|
||||||
|
nghttp2-static \
|
||||||
|
c-ares-dev \
|
||||||
|
c-ares-static \
|
||||||
|
libidn2-dev \
|
||||||
|
libidn2-static \
|
||||||
|
libunistring-dev \
|
||||||
|
libunistring-static \
|
||||||
|
libpsl-dev \
|
||||||
|
libpsl-static \
|
||||||
|
brotli-dev \
|
||||||
|
brotli-static
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
# Build libsecp256k1 static (cached layer - only rebuilds if Alpine version changes)
|
||||||
|
RUN cd /tmp && \
|
||||||
|
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||||
|
cd secp256k1 && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --enable-static --disable-shared --prefix=/usr \
|
||||||
|
CFLAGS="-fPIC" && \
|
||||||
|
make -j$(nproc) && \
|
||||||
|
make install && \
|
||||||
|
rm -rf /tmp/secp256k1
|
||||||
|
|
||||||
|
# Copy only submodule configuration and git directory
|
||||||
|
COPY .gitmodules /build/.gitmodules
|
||||||
|
COPY .git /build/.git
|
||||||
|
|
||||||
|
# Initialize submodules (cached unless .gitmodules changes)
|
||||||
|
RUN git submodule update --init --recursive
|
||||||
|
|
||||||
|
# Copy nostr_core_lib source files (cached unless nostr_core_lib changes)
|
||||||
|
COPY nostr_core_lib /build/nostr_core_lib/
|
||||||
|
|
||||||
|
# Build nostr_core_lib with required NIPs (cached unless nostr_core_lib changes)
|
||||||
|
# Disable fortification in build.sh to prevent __*_chk symbol issues
|
||||||
|
# NIPs: 001(Basic), 006(Keys), 013(PoW), 017(DMs), 019(Bech32), 042(Auth), 044(Encryption), 059(Gift Wrap)
|
||||||
|
RUN cd nostr_core_lib && \
|
||||||
|
chmod +x build.sh && \
|
||||||
|
sed -i 's/CFLAGS="-Wall -Wextra -std=c99 -fPIC -O2"/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"/' build.sh && \
|
||||||
|
rm -f *.o *.a 2>/dev/null || true && \
|
||||||
|
./build.sh --nips=1,6,13,17,19,42,44,59
|
||||||
|
|
||||||
|
# Copy web interface files for embedding
|
||||||
|
COPY api/ /build/api/
|
||||||
|
COPY scripts/embed_web_files.sh /build/scripts/
|
||||||
|
|
||||||
|
# Create src directory and embed web files into C headers
|
||||||
|
RUN mkdir -p src && \
|
||||||
|
chmod +x scripts/embed_web_files.sh && \
|
||||||
|
./scripts/embed_web_files.sh
|
||||||
|
|
||||||
|
# Copy Ginxsom source files LAST (only this layer rebuilds on source changes)
|
||||||
|
COPY src/ /build/src/
|
||||||
|
COPY include/ /build/include/
|
||||||
|
|
||||||
|
# Build Ginxsom with full static linking (only rebuilds when src/ changes)
|
||||||
|
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
|
||||||
|
# Use conditional compilation flags based on DEBUG_BUILD argument
|
||||||
|
RUN if [ "$DEBUG_BUILD" = "true" ]; then \
|
||||||
|
CFLAGS="-g -O0 -DDEBUG"; \
|
||||||
|
STRIP_CMD=""; \
|
||||||
|
echo "Building with DEBUG symbols enabled"; \
|
||||||
|
else \
|
||||||
|
CFLAGS="-O2"; \
|
||||||
|
STRIP_CMD="strip /build/ginxsom-fcgi_static"; \
|
||||||
|
echo "Building optimized production binary"; \
|
||||||
|
fi && \
|
||||||
|
gcc -static $CFLAGS -Wall -Wextra -std=gnu99 \
|
||||||
|
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
|
||||||
|
-I. -Iinclude -Inostr_core_lib -Inostr_core_lib/nostr_core \
|
||||||
|
-Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
|
||||||
|
src/main.c src/admin_api.c src/admin_auth.c src/admin_event.c \
|
||||||
|
src/admin_handlers.c src/admin_interface.c src/admin_commands.c \
|
||||||
|
src/bud04.c src/bud06.c src/bud08.c src/bud09.c \
|
||||||
|
src/request_validator.c src/relay_client.c \
|
||||||
|
nostr_core_lib/nostr_core/core_relay_pool.c \
|
||||||
|
-o /build/ginxsom-fcgi_static \
|
||||||
|
nostr_core_lib/libnostr_core_x64.a \
|
||||||
|
-lfcgi -lsqlite3 -lsecp256k1 -lssl -lcrypto -lcurl \
|
||||||
|
-lnghttp2 -lcares -lidn2 -lunistring -lpsl -lbrotlidec -lbrotlicommon \
|
||||||
|
-lz -lpthread -lm -ldl && \
|
||||||
|
eval "$STRIP_CMD"
|
||||||
|
|
||||||
|
# Verify it's truly static
|
||||||
|
RUN echo "=== Binary Information ===" && \
|
||||||
|
file /build/ginxsom-fcgi_static && \
|
||||||
|
ls -lh /build/ginxsom-fcgi_static && \
|
||||||
|
echo "=== Checking for dynamic dependencies ===" && \
|
||||||
|
(ldd /build/ginxsom-fcgi_static 2>&1 || echo "Binary is static") && \
|
||||||
|
echo "=== Build complete ==="
|
||||||
|
|
||||||
|
# Output stage - just the binary
|
||||||
|
FROM scratch AS output
|
||||||
|
COPY --from=builder /build/ginxsom-fcgi_static /ginxsom-fcgi_static
|
||||||
54
Makefile
54
Makefile
@@ -1,18 +1,31 @@
|
|||||||
# Ginxsom Blossom Server Makefile
|
# Ginxsom Blossom Server Makefile
|
||||||
|
|
||||||
CC = gcc
|
CC = gcc
|
||||||
CFLAGS = -Wall -Wextra -std=c99 -O2 -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson
|
CFLAGS = -Wall -Wextra -std=gnu99 -O2 -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson
|
||||||
LIBS = -lfcgi -lsqlite3 nostr_core_lib/libnostr_core_x64.a -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -lcurl
|
LIBS = -lfcgi -lsqlite3 nostr_core_lib/libnostr_core_x64.a -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -lcurl
|
||||||
SRCDIR = src
|
SRCDIR = src
|
||||||
BUILDDIR = build
|
BUILDDIR = build
|
||||||
TARGET = $(BUILDDIR)/ginxsom-fcgi
|
TARGET = $(BUILDDIR)/ginxsom-fcgi
|
||||||
|
|
||||||
# Source files
|
# Source files
|
||||||
SOURCES = $(SRCDIR)/main.c $(SRCDIR)/admin_api.c $(SRCDIR)/admin_auth.c $(SRCDIR)/admin_event.c $(SRCDIR)/admin_websocket.c $(SRCDIR)/admin_handlers.c $(SRCDIR)/bud04.c $(SRCDIR)/bud06.c $(SRCDIR)/bud08.c $(SRCDIR)/bud09.c $(SRCDIR)/request_validator.c
|
SOURCES = $(SRCDIR)/main.c $(SRCDIR)/admin_api.c $(SRCDIR)/admin_auth.c $(SRCDIR)/admin_event.c $(SRCDIR)/admin_handlers.c $(SRCDIR)/admin_interface.c $(SRCDIR)/bud04.c $(SRCDIR)/bud06.c $(SRCDIR)/bud08.c $(SRCDIR)/bud09.c $(SRCDIR)/request_validator.c $(SRCDIR)/relay_client.c $(SRCDIR)/admin_commands.c
|
||||||
OBJECTS = $(SOURCES:$(SRCDIR)/%.c=$(BUILDDIR)/%.o)
|
OBJECTS = $(SOURCES:$(SRCDIR)/%.c=$(BUILDDIR)/%.o)
|
||||||
|
|
||||||
|
# Embedded web interface files
|
||||||
|
EMBEDDED_HEADER = $(SRCDIR)/admin_interface_embedded.h
|
||||||
|
EMBED_SCRIPT = scripts/embed_web_files.sh
|
||||||
|
|
||||||
|
# Add core_relay_pool.c from nostr_core_lib
|
||||||
|
POOL_SRC = nostr_core_lib/nostr_core/core_relay_pool.c
|
||||||
|
POOL_OBJ = $(BUILDDIR)/core_relay_pool.o
|
||||||
|
|
||||||
# Default target
|
# Default target
|
||||||
all: $(TARGET)
|
all: $(EMBEDDED_HEADER) $(TARGET)
|
||||||
|
|
||||||
|
# Generate embedded web interface files
|
||||||
|
$(EMBEDDED_HEADER): $(EMBED_SCRIPT) api/*.html api/*.css api/*.js
|
||||||
|
@echo "Embedding web interface files..."
|
||||||
|
@$(EMBED_SCRIPT)
|
||||||
|
|
||||||
# Create build directory
|
# Create build directory
|
||||||
$(BUILDDIR):
|
$(BUILDDIR):
|
||||||
@@ -22,13 +35,26 @@ $(BUILDDIR):
|
|||||||
$(BUILDDIR)/%.o: $(SRCDIR)/%.c | $(BUILDDIR)
|
$(BUILDDIR)/%.o: $(SRCDIR)/%.c | $(BUILDDIR)
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
# Link final executable
|
# Compile core_relay_pool.o (needs src/ for request_validator.h)
|
||||||
$(TARGET): $(OBJECTS)
|
$(POOL_OBJ): $(POOL_SRC) | $(BUILDDIR)
|
||||||
$(CC) $(OBJECTS) $(LIBS) -o $@
|
$(CC) $(CFLAGS) -I$(SRCDIR) -c $< -o $@
|
||||||
|
|
||||||
# Clean build files
|
# Link final executable
|
||||||
|
$(TARGET): $(OBJECTS) $(POOL_OBJ)
|
||||||
|
$(CC) $(OBJECTS) $(POOL_OBJ) $(LIBS) -o $@
|
||||||
|
|
||||||
|
# Clean build files (preserves static binaries)
|
||||||
clean:
|
clean:
|
||||||
rm -rf $(BUILDDIR)
|
rm -rf $(BUILDDIR)
|
||||||
|
rm -f $(EMBEDDED_HEADER)
|
||||||
|
@echo "Note: Static binaries (ginxsom-fcgi_static_*) are preserved."
|
||||||
|
@echo "To remove everything: make clean-all"
|
||||||
|
|
||||||
|
# Clean everything including static binaries
|
||||||
|
clean-all:
|
||||||
|
rm -rf $(BUILDDIR)
|
||||||
|
rm -f $(EMBEDDED_HEADER)
|
||||||
|
@echo "✓ All build artifacts removed"
|
||||||
|
|
||||||
# Install (copy to system location)
|
# Install (copy to system location)
|
||||||
install: $(TARGET)
|
install: $(TARGET)
|
||||||
@@ -47,4 +73,16 @@ run: $(TARGET)
|
|||||||
debug: CFLAGS += -g -DDEBUG
|
debug: CFLAGS += -g -DDEBUG
|
||||||
debug: $(TARGET)
|
debug: $(TARGET)
|
||||||
|
|
||||||
.PHONY: all clean install uninstall run debug
|
# Rebuild embedded files
|
||||||
|
embed:
|
||||||
|
@$(EMBED_SCRIPT)
|
||||||
|
|
||||||
|
# Static MUSL build via Docker
|
||||||
|
static:
|
||||||
|
./build_static.sh
|
||||||
|
|
||||||
|
# Static MUSL build with debug symbols
|
||||||
|
static-debug:
|
||||||
|
./build_static.sh --debug
|
||||||
|
|
||||||
|
.PHONY: all clean clean-all install uninstall run debug embed static static-debug
|
||||||
|
|||||||
126
README.md
126
README.md
@@ -369,6 +369,132 @@ Error responses include specific error codes:
|
|||||||
- `no_blob_hashes`: Missing valid SHA-256 hashes
|
- `no_blob_hashes`: Missing valid SHA-256 hashes
|
||||||
- `unsupported_media_type`: Non-JSON Content-Type
|
- `unsupported_media_type`: Non-JSON Content-Type
|
||||||
|
|
||||||
|
## Administrator API
|
||||||
|
|
||||||
|
Ginxsom uses an **event-based administration system** where all configuration and management commands are sent as signed Nostr events using the admin private key. All admin commands use **NIP-44 encrypted command arrays** for security.
|
||||||
|
|
||||||
|
### Authentication
|
||||||
|
|
||||||
|
All admin commands require signing with the admin private key configured in the server. The admin public key is stored in the database and checked against incoming Kind 23458 events.
|
||||||
|
|
||||||
|
### Event Structure
|
||||||
|
|
||||||
|
**Admin Command Event (Kind 23458):**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "event_id",
|
||||||
|
"pubkey": "admin_public_key",
|
||||||
|
"created_at": 1234587890,
|
||||||
|
"kind": 23458,
|
||||||
|
"content": "NIP44_ENCRYPTED_COMMAND_ARRAY",
|
||||||
|
"tags": [
|
||||||
|
["p", "blossom_server_pubkey"]
|
||||||
|
],
|
||||||
|
"sig": "event_signature"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `content` field contains a NIP-44 encrypted JSON array representing the command.
|
||||||
|
|
||||||
|
**Admin Response Event (Kind 23459):**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "response_event_id",
|
||||||
|
"pubkey": "blossom_server_pubkey",
|
||||||
|
"created_at": 1234587890,
|
||||||
|
"kind": 23459,
|
||||||
|
"content": "NIP44_ENCRYPTED_RESPONSE_OBJECT",
|
||||||
|
"tags": [
|
||||||
|
["p", "admin_public_key"],
|
||||||
|
["e", "request_event_id"]
|
||||||
|
],
|
||||||
|
"sig": "response_event_signature"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `content` field contains a NIP-44 encrypted JSON response object.
|
||||||
|
|
||||||
|
### Admin Commands
|
||||||
|
|
||||||
|
All commands are sent as NIP-44 encrypted JSON arrays in the event content:
|
||||||
|
|
||||||
|
| Command Type | Command Format | Description |
|
||||||
|
|--------------|----------------|-------------|
|
||||||
|
| **Configuration Management** |
|
||||||
|
| `config_query` | `["config_query", "all"]` | Query all configuration parameters |
|
||||||
|
| `config_update` | `["config_update", [{"key": "max_file_size", "value": "209715200", ...}]]` | Update configuration parameters |
|
||||||
|
| **Statistics & Monitoring** |
|
||||||
|
| `stats_query` | `["stats_query"]` | Get comprehensive database and storage statistics |
|
||||||
|
| `system_status` | `["system_command", "system_status"]` | Get system status and health metrics |
|
||||||
|
| **Blossom Operations** |
|
||||||
|
| `blob_list` | `["blob_list", "all"]` or `["blob_list", "pubkey", "abc123..."]` | List blobs with filtering |
|
||||||
|
| `storage_stats` | `["storage_stats"]` | Get detailed storage statistics |
|
||||||
|
| `mirror_status` | `["mirror_status"]` | Get status of mirroring operations |
|
||||||
|
| `report_query` | `["report_query", "all"]` | Query content reports (BUD-09) |
|
||||||
|
| **Database Queries** |
|
||||||
|
| `sql_query` | `["sql_query", "SELECT * FROM blobs LIMIT 10"]` | Execute read-only SQL query |
|
||||||
|
|
||||||
|
### Configuration Categories
|
||||||
|
|
||||||
|
**Blossom Settings:**
|
||||||
|
- `max_file_size`: Maximum upload size in bytes
|
||||||
|
- `storage_path`: Blob storage directory path
|
||||||
|
- `cdn_origin`: CDN URL for blob descriptors
|
||||||
|
- `enable_nip94`: Include NIP-94 tags in responses
|
||||||
|
|
||||||
|
**Relay Client Settings:**
|
||||||
|
- `enable_relay_connect`: Enable relay client functionality
|
||||||
|
- `kind_0_content`: Profile metadata JSON
|
||||||
|
- `kind_10002_tags`: Relay list JSON array
|
||||||
|
|
||||||
|
**Authentication Settings:**
|
||||||
|
- `auth_enabled`: Enable auth rules system
|
||||||
|
- `require_auth_upload`: Require authentication for uploads
|
||||||
|
- `require_auth_delete`: Require authentication for deletes
|
||||||
|
|
||||||
|
**Limits:**
|
||||||
|
- `max_blobs_per_user`: Per-user blob limit
|
||||||
|
- `rate_limit_uploads`: Uploads per minute
|
||||||
|
- `max_total_storage`: Total storage limit in bytes
|
||||||
|
|
||||||
|
### Response Format
|
||||||
|
|
||||||
|
All admin commands return signed EVENT responses via the relay connection. Responses use NIP-44 encrypted JSON content with structured data.
|
||||||
|
|
||||||
|
**Success Response Example:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "stats_query",
|
||||||
|
"timestamp": 1234587890,
|
||||||
|
"database_size_bytes": 1048576,
|
||||||
|
"storage_size_bytes": 10737418240,
|
||||||
|
"total_blobs": 1543,
|
||||||
|
"blob_types": [
|
||||||
|
{"type": "image/jpeg", "count": 856, "size_bytes": 5368709120}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Error Response Example:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "config_update",
|
||||||
|
"status": "error",
|
||||||
|
"error": "invalid configuration value",
|
||||||
|
"timestamp": 1234587890
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Security Features
|
||||||
|
|
||||||
|
- **Cryptographic Authentication**: Only admin pubkey can send commands
|
||||||
|
- **NIP-44 Encryption**: All commands and responses are encrypted
|
||||||
|
- **Command Logging**: All admin actions logged to database
|
||||||
|
- **SQL Safety**: Only SELECT statements allowed with timeout and row limits
|
||||||
|
- **Rate Limiting**: Prevents admin command flooding
|
||||||
|
|
||||||
|
For detailed command specifications and examples, see [`docs/ADMIN_COMMANDS_PLAN.md`](docs/ADMIN_COMMANDS_PLAN.md).
|
||||||
|
|
||||||
## File Storage
|
## File Storage
|
||||||
|
|
||||||
### Current (Flat) Structure
|
### Current (Flat) Structure
|
||||||
|
|||||||
@@ -41,7 +41,10 @@ INSERT OR IGNORE INTO config (key, value, description) VALUES
|
|||||||
('admin_enabled', 'true', 'Whether admin API is enabled'),
|
('admin_enabled', 'true', 'Whether admin API is enabled'),
|
||||||
('nip42_require_auth', 'false', 'Enable NIP-42 challenge/response authentication'),
|
('nip42_require_auth', 'false', 'Enable NIP-42 challenge/response authentication'),
|
||||||
('nip42_challenge_timeout', '600', 'NIP-42 challenge timeout in seconds'),
|
('nip42_challenge_timeout', '600', 'NIP-42 challenge timeout in seconds'),
|
||||||
('nip42_time_tolerance', '300', 'NIP-42 timestamp tolerance in seconds');
|
('nip42_time_tolerance', '300', 'NIP-42 timestamp tolerance in seconds'),
|
||||||
|
('enable_relay_connect', 'true', 'Enable Nostr relay client connections'),
|
||||||
|
('kind_0_content', '{"name":"Ginxsom Blossom Server","about":"A Blossom media server for storing and serving files on Nostr","picture":"","nip05":""}', 'Kind 0 profile metadata content (JSON)'),
|
||||||
|
('kind_10002_tags', '["wss://relay.laantungir.net"]', 'Kind 10002 relay list - JSON array of relay URLs');
|
||||||
|
|
||||||
-- Authentication rules table for whitelist/blacklist functionality
|
-- Authentication rules table for whitelist/blacklist functionality
|
||||||
CREATE TABLE IF NOT EXISTS auth_rules (
|
CREATE TABLE IF NOT EXISTS auth_rules (
|
||||||
|
|||||||
58
api/embedded.html
Normal file
58
api/embedded.html
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Embedded NOSTR_LOGIN_LITE</title>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||||
|
margin: 0;
|
||||||
|
padding: 40px;
|
||||||
|
background: white;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
min-height: 100vh;
|
||||||
|
}
|
||||||
|
|
||||||
|
.container {
|
||||||
|
max-width: 400px;
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
#login-container {
|
||||||
|
/* No styling - let embedded modal blend seamlessly */
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<div id="login-container"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script src="../lite/nostr.bundle.js"></script>
|
||||||
|
<script src="../lite/nostr-lite.js"></script>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
document.addEventListener('DOMContentLoaded', async () => {
|
||||||
|
await window.NOSTR_LOGIN_LITE.init({
|
||||||
|
theme:'default',
|
||||||
|
methods: {
|
||||||
|
extension: true,
|
||||||
|
local: true,
|
||||||
|
seedphrase: true,
|
||||||
|
readonly: true,
|
||||||
|
connect: true,
|
||||||
|
remote: true,
|
||||||
|
otp: true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
window.NOSTR_LOGIN_LITE.embed('#login-container', {
|
||||||
|
seamless: true
|
||||||
|
});
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
1310
api/index.css
Normal file
1310
api/index.css
Normal file
File diff suppressed because it is too large
Load Diff
425
api/index.html
Normal file
425
api/index.html
Normal file
@@ -0,0 +1,425 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Blossom Admin</title>
|
||||||
|
<link rel="stylesheet" href="/api/index.css">
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<!-- Side Navigation Menu -->
|
||||||
|
<nav class="side-nav" id="side-nav">
|
||||||
|
<ul class="nav-menu">
|
||||||
|
<li><button class="nav-item" data-page="statistics">Statistics</button></li>
|
||||||
|
<li><button class="nav-item" data-page="configuration">Configuration</button></li>
|
||||||
|
<li><button class="nav-item" data-page="authorization">Authorization</button></li>
|
||||||
|
<li><button class="nav-item" data-page="relay-events">Blossom Events</button></li>
|
||||||
|
<li><button class="nav-item" data-page="database">Database Query</button></li>
|
||||||
|
</ul>
|
||||||
|
<div class="nav-footer">
|
||||||
|
<button class="nav-footer-btn" id="nav-dark-mode-btn">DARK MODE</button>
|
||||||
|
<button class="nav-footer-btn" id="nav-logout-btn">LOGOUT</button>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<!-- Side Navigation Overlay -->
|
||||||
|
<div class="side-nav-overlay" id="side-nav-overlay"></div>
|
||||||
|
|
||||||
|
<!-- Header with title and profile display -->
|
||||||
|
<div class="section">
|
||||||
|
|
||||||
|
<div class="header-content">
|
||||||
|
<div class="header-title clickable" id="header-title">
|
||||||
|
<span class="relay-letter" data-letter="B">B</span>
|
||||||
|
<span class="relay-letter" data-letter="L">L</span>
|
||||||
|
<span class="relay-letter" data-letter="O">O</span>
|
||||||
|
<span class="relay-letter" data-letter="S">S</span>
|
||||||
|
<span class="relay-letter" data-letter="S">S</span>
|
||||||
|
<span class="relay-letter" data-letter="O">O</span>
|
||||||
|
<span class="relay-letter" data-letter="M">M</span>
|
||||||
|
</div>
|
||||||
|
<div class="relay-info">
|
||||||
|
<div id="relay-name" class="relay-name">Blossom</div>
|
||||||
|
<div id="relay-description" class="relay-description">Loading...</div>
|
||||||
|
<div id="relay-pubkey-container" class="relay-pubkey-container">
|
||||||
|
<div id="relay-pubkey" class="relay-pubkey">Loading...</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="profile-area" id="profile-area" style="display: none;">
|
||||||
|
<div class="admin-label">admin</div>
|
||||||
|
<div class="profile-container">
|
||||||
|
<img id="header-user-image" class="header-user-image" alt="Profile" style="display: none;">
|
||||||
|
<span id="header-user-name" class="header-user-name">Loading...</span>
|
||||||
|
</div>
|
||||||
|
<!-- Logout dropdown -->
|
||||||
|
<!-- Dropdown menu removed - buttons moved to sidebar -->
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Login Modal Overlay -->
|
||||||
|
<div id="login-modal" class="login-modal-overlay" style="display: none;">
|
||||||
|
<div class="login-modal-content">
|
||||||
|
<div id="login-modal-container"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- DATABASE STATISTICS Section -->
|
||||||
|
<!-- Subscribe to kind 24567 events to receive real-time monitoring data -->
|
||||||
|
<div class="section flex-section" id="databaseStatisticsSection" style="display: none;">
|
||||||
|
<div class="section-header">
|
||||||
|
DATABASE STATISTICS
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Blob Rate Graph Container -->
|
||||||
|
<div id="event-rate-chart"></div>
|
||||||
|
|
||||||
|
<!-- Database Overview Table -->
|
||||||
|
<div class="input-group">
|
||||||
|
<div class="config-table-container">
|
||||||
|
<table class="config-table" id="stats-overview-table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Metric</th>
|
||||||
|
<th>Value</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="stats-overview-table-body">
|
||||||
|
<tr>
|
||||||
|
<td>Database Size</td>
|
||||||
|
<td id="db-size">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Total Blobs</td>
|
||||||
|
<td id="total-events">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Total Size</td>
|
||||||
|
<td id="total-size">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Process ID</td>
|
||||||
|
<td id="process-id">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Memory Usage</td>
|
||||||
|
<td id="memory-usage">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>CPU Core</td>
|
||||||
|
<td id="cpu-core">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>CPU Usage</td>
|
||||||
|
<td id="cpu-usage">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Oldest Blob</td>
|
||||||
|
<td id="oldest-event">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Newest Blob</td>
|
||||||
|
<td id="newest-event">-</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Blob Type Distribution Table -->
|
||||||
|
<div class="input-group">
|
||||||
|
<label>Blob Type Distribution:</label>
|
||||||
|
<div class="config-table-container">
|
||||||
|
<table class="config-table" id="stats-kinds-table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Blob Type</th>
|
||||||
|
<th>Count</th>
|
||||||
|
<th>Percentage</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="stats-kinds-table-body">
|
||||||
|
<tr>
|
||||||
|
<td colspan="3" style="text-align: center; font-style: italic;">No data loaded</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Time-based Statistics Table -->
|
||||||
|
<div class="input-group">
|
||||||
|
<label>Time-based Statistics:</label>
|
||||||
|
<div class="config-table-container">
|
||||||
|
<table class="config-table" id="stats-time-table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Period</th>
|
||||||
|
<th>Blobs</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="stats-time-table-body">
|
||||||
|
<tr>
|
||||||
|
<td>Last 24 Hours</td>
|
||||||
|
<td id="events-24h">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Last 7 Days</td>
|
||||||
|
<td id="events-7d">-</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>Last 30 Days</td>
|
||||||
|
<td id="events-30d">-</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Top Pubkeys Table -->
|
||||||
|
<div class="input-group">
|
||||||
|
<label>Top Pubkeys by Event Count:</label>
|
||||||
|
<div class="config-table-container">
|
||||||
|
<table class="config-table" id="stats-pubkeys-table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Rank</th>
|
||||||
|
<th>Pubkey</th>
|
||||||
|
<th>Blob Count</th>
|
||||||
|
<th>Total Size</th>
|
||||||
|
<th>Percentage</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="stats-pubkeys-table-body">
|
||||||
|
<tr>
|
||||||
|
<td colspan="4" style="text-align: center; font-style: italic;">No data loaded</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
<!-- Testing Section -->
|
||||||
|
<div id="div_config" class="section flex-section" style="display: none;">
|
||||||
|
<div class="section-header">
|
||||||
|
BLOSSOM CONFIGURATION
|
||||||
|
</div>
|
||||||
|
<div id="config-display" class="hidden">
|
||||||
|
<div class="config-table-container">
|
||||||
|
<table class="config-table" id="config-table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Parameter</th>
|
||||||
|
<th>Value</th>
|
||||||
|
<th>Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="config-table-body">
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="inline-buttons">
|
||||||
|
<button type="button" id="fetch-config-btn">REFRESH</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Auth Rules Management - Moved after configuration -->
|
||||||
|
<div class="section flex-section" id="authRulesSection" style="display: none;">
|
||||||
|
<div class="section-header">
|
||||||
|
AUTH RULES MANAGEMENT
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Auth Rules Table -->
|
||||||
|
<div id="authRulesTableContainer" style="display: none;">
|
||||||
|
<table class="config-table" id="authRulesTable">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Rule Type</th>
|
||||||
|
<th>Pattern Type</th>
|
||||||
|
<th>Pattern Value</th>
|
||||||
|
<th>Status</th>
|
||||||
|
<th>Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody id="authRulesTableBody">
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Simplified Auth Rule Input Section -->
|
||||||
|
<div id="authRuleInputSections" style="display: block;">
|
||||||
|
|
||||||
|
<!-- Combined Pubkey Auth Rule Section -->
|
||||||
|
|
||||||
|
|
||||||
|
<div class="input-group">
|
||||||
|
<label for="authRulePubkey">Pubkey (nsec or hex):</label>
|
||||||
|
<input type="text" id="authRulePubkey" placeholder="nsec1... or 64-character hex pubkey">
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<div id="whitelistWarning" class="warning-box" style="display: none;">
|
||||||
|
<strong>⚠️ WARNING:</strong> Adding whitelist rules changes relay behavior to whitelist-only
|
||||||
|
mode.
|
||||||
|
Only whitelisted users will be able to interact with the relay.
|
||||||
|
</div>
|
||||||
|
<div class="inline-buttons">
|
||||||
|
<button type="button" id="addWhitelistBtn" onclick="addWhitelistRule()">ADD TO
|
||||||
|
WHITELIST</button>
|
||||||
|
<button type="button" id="addBlacklistBtn" onclick="addBlacklistRule()">ADD TO
|
||||||
|
BLACKLIST</button>
|
||||||
|
<button type="button" id="refreshAuthRulesBtn">REFRESH</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<!-- BLOSSOM EVENTS Section -->
|
||||||
|
<div class="section" id="relayEventsSection" style="display: none;">
|
||||||
|
<div class="section-header">
|
||||||
|
BLOSSOM EVENTS MANAGEMENT
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Kind 0: User Metadata -->
|
||||||
|
<div class="input-group">
|
||||||
|
<h3>Kind 0: User Metadata</h3>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="kind0-name">Name:</label>
|
||||||
|
<input type="text" id="kind0-name" placeholder="Blossom Server Name">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="kind0-about">About:</label>
|
||||||
|
<textarea id="kind0-about" rows="3" placeholder="Blossom Server Description"></textarea>
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="kind0-picture">Picture URL:</label>
|
||||||
|
<input type="url" id="kind0-picture" placeholder="https://example.com/logo.png">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="kind0-banner">Banner URL:</label>
|
||||||
|
<input type="url" id="kind0-banner" placeholder="https://example.com/banner.png">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="kind0-nip05">NIP-05:</label>
|
||||||
|
<input type="text" id="kind0-nip05" placeholder="blossom@example.com">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="kind0-website">Website:</label>
|
||||||
|
<input type="url" id="kind0-website" placeholder="https://example.com">
|
||||||
|
</div>
|
||||||
|
<div class="inline-buttons">
|
||||||
|
<button type="button" id="submit-kind0-btn">UPDATE METADATA</button>
|
||||||
|
</div>
|
||||||
|
<div id="kind0-status" class="status-message"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Kind 10050: DM Blossom List -->
|
||||||
|
<div class="input-group">
|
||||||
|
<h3>Kind 10050: DM Blossom List</h3>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="kind10050-relays">Blossom URLs (one per line):</label>
|
||||||
|
<textarea id="kind10050-relays" rows="4" placeholder="https://blossom1.com https://blossom2.com"></textarea>
|
||||||
|
</div>
|
||||||
|
<div class="inline-buttons">
|
||||||
|
<button type="button" id="submit-kind10050-btn">UPDATE DM BLOSSOM SERVERS</button>
|
||||||
|
</div>
|
||||||
|
<div id="kind10050-status" class="status-message"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Kind 10002: Blossom List -->
|
||||||
|
<div class="input-group">
|
||||||
|
<h3>Kind 10002: Blossom Server List</h3>
|
||||||
|
<div id="kind10002-relay-entries">
|
||||||
|
<!-- Dynamic blossom server entries will be added here -->
|
||||||
|
</div>
|
||||||
|
<div class="inline-buttons">
|
||||||
|
<button type="button" id="add-relay-entry-btn">ADD SERVER</button>
|
||||||
|
<button type="button" id="submit-kind10002-btn">UPDATE SERVERS</button>
|
||||||
|
</div>
|
||||||
|
<div id="kind10002-status" class="status-message"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- SQL QUERY Section -->
|
||||||
|
<div class="section" id="sqlQuerySection" style="display: none;">
|
||||||
|
<div class="section-header">
|
||||||
|
<h2>SQL QUERY CONSOLE</h2>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Query Selector -->
|
||||||
|
<div class="input-group">
|
||||||
|
<label for="query-dropdown">Quick Queries & History:</label>
|
||||||
|
<select id="query-dropdown" onchange="loadSelectedQuery()">
|
||||||
|
<option value="">-- Select a query --</option>
|
||||||
|
<optgroup label="Common Queries">
|
||||||
|
<option value="recent_events">Recent Events</option>
|
||||||
|
<option value="event_stats">Event Statistics</option>
|
||||||
|
<option value="subscriptions">Active Subscriptions</option>
|
||||||
|
<option value="top_pubkeys">Top Pubkeys</option>
|
||||||
|
<option value="event_kinds">Event Kinds Distribution</option>
|
||||||
|
<option value="time_stats">Time-based Statistics</option>
|
||||||
|
</optgroup>
|
||||||
|
<optgroup label="Query History" id="history-group">
|
||||||
|
<!-- Dynamically populated from localStorage -->
|
||||||
|
</optgroup>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Query Editor -->
|
||||||
|
<div class="input-group">
|
||||||
|
<label for="sql-input">SQL Query:</label>
|
||||||
|
<textarea id="sql-input" rows="5" placeholder="SELECT * FROM events LIMIT 10"></textarea>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Query Actions -->
|
||||||
|
<div class="input-group">
|
||||||
|
<div class="inline-buttons">
|
||||||
|
<button type="button" id="execute-sql-btn">EXECUTE QUERY</button>
|
||||||
|
<button type="button" id="clear-sql-btn">CLEAR</button>
|
||||||
|
<button type="button" id="clear-history-btn">CLEAR HISTORY</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Query Results -->
|
||||||
|
<div class="input-group">
|
||||||
|
<label>Query Results:</label>
|
||||||
|
<div id="query-info" class="info-box"></div>
|
||||||
|
<div id="query-table" class="config-table-container"></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Load the official nostr-tools bundle first -->
|
||||||
|
<!-- <script src="https://laantungir.net/nostr-login-lite/nostr.bundle.js"></script> -->
|
||||||
|
<script src="/api/nostr.bundle.js"></script>
|
||||||
|
|
||||||
|
<!-- Load NOSTR_LOGIN_LITE main library -->
|
||||||
|
<!-- <script src="https://laantungir.net/nostr-login-lite/nostr-lite.js"></script> -->
|
||||||
|
<script src="/api/nostr-lite.js"></script>
|
||||||
|
<!-- Load text_graph library -->
|
||||||
|
<script src="/api/text_graph.js"></script>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<script src="/api/index.js"></script>
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
||||||
5832
api/index.js
Normal file
5832
api/index.js
Normal file
File diff suppressed because it is too large
Load Diff
4282
api/nostr-lite.js
Normal file
4282
api/nostr-lite.js
Normal file
File diff suppressed because it is too large
Load Diff
11534
api/nostr.bundle.js
Normal file
11534
api/nostr.bundle.js
Normal file
File diff suppressed because it is too large
Load Diff
463
api/text_graph.js
Normal file
463
api/text_graph.js
Normal file
@@ -0,0 +1,463 @@
|
|||||||
|
/**
|
||||||
|
* ASCIIBarChart - A dynamic ASCII-based vertical bar chart renderer
|
||||||
|
*
|
||||||
|
* Creates real-time animated bar charts using monospaced characters (X)
|
||||||
|
* with automatic scaling, labels, and responsive font sizing.
|
||||||
|
*/
|
||||||
|
class ASCIIBarChart {
|
||||||
|
/**
|
||||||
|
* Create a new ASCII bar chart
|
||||||
|
* @param {string} containerId - The ID of the HTML element to render the chart in
|
||||||
|
* @param {Object} options - Configuration options
|
||||||
|
* @param {number} [options.maxHeight=20] - Maximum height of the chart in rows
|
||||||
|
* @param {number} [options.maxDataPoints=30] - Maximum number of data columns before scrolling
|
||||||
|
* @param {string} [options.title=''] - Chart title (displayed centered at top)
|
||||||
|
* @param {string} [options.xAxisLabel=''] - X-axis label (displayed centered at bottom)
|
||||||
|
* @param {string} [options.yAxisLabel=''] - Y-axis label (displayed vertically on left)
|
||||||
|
* @param {boolean} [options.autoFitWidth=true] - Automatically adjust font size to fit container width
|
||||||
|
* @param {boolean} [options.useBinMode=false] - Enable time bin mode for data aggregation
|
||||||
|
* @param {number} [options.binDuration=10000] - Duration of each time bin in milliseconds (10 seconds default)
|
||||||
|
* @param {string} [options.xAxisLabelFormat='elapsed'] - X-axis label format: 'elapsed', 'bins', 'timestamps', 'ranges'
|
||||||
|
* @param {boolean} [options.debug=false] - Enable debug logging
|
||||||
|
*/
|
||||||
|
constructor(containerId, options = {}) {
|
||||||
|
this.container = document.getElementById(containerId);
|
||||||
|
this.data = [];
|
||||||
|
this.maxHeight = options.maxHeight || 20;
|
||||||
|
this.maxDataPoints = options.maxDataPoints || 30;
|
||||||
|
this.totalDataPoints = 0; // Track total number of data points added
|
||||||
|
this.title = options.title || '';
|
||||||
|
this.xAxisLabel = options.xAxisLabel || '';
|
||||||
|
this.yAxisLabel = options.yAxisLabel || '';
|
||||||
|
this.autoFitWidth = options.autoFitWidth !== false; // Default to true
|
||||||
|
this.debug = options.debug || false; // Debug logging option
|
||||||
|
|
||||||
|
// Time bin configuration
|
||||||
|
this.useBinMode = options.useBinMode !== false; // Default to true
|
||||||
|
this.binDuration = options.binDuration || 4000; // 4 seconds default
|
||||||
|
this.xAxisLabelFormat = options.xAxisLabelFormat || 'elapsed';
|
||||||
|
|
||||||
|
// Time bin data structures
|
||||||
|
this.bins = [];
|
||||||
|
this.currentBinIndex = -1;
|
||||||
|
this.binStartTime = null;
|
||||||
|
this.binCheckInterval = null;
|
||||||
|
this.chartStartTime = Date.now();
|
||||||
|
|
||||||
|
// Set up resize observer if auto-fit is enabled
|
||||||
|
if (this.autoFitWidth) {
|
||||||
|
this.resizeObserver = new ResizeObserver(() => {
|
||||||
|
this.adjustFontSize();
|
||||||
|
});
|
||||||
|
this.resizeObserver.observe(this.container);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize first bin if bin mode is enabled
|
||||||
|
if (this.useBinMode) {
|
||||||
|
this.initializeBins();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a new data point to the chart
|
||||||
|
* @param {number} value - The numeric value to add
|
||||||
|
*/
|
||||||
|
addValue(value) {
|
||||||
|
// Time bin mode: add value to current active bin count
|
||||||
|
this.checkBinRotation(); // Ensure we have an active bin
|
||||||
|
this.bins[this.currentBinIndex].count += value; // Changed from ++ to += value
|
||||||
|
this.totalDataPoints++;
|
||||||
|
|
||||||
|
this.render();
|
||||||
|
this.updateInfo();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear all data from the chart
|
||||||
|
*/
|
||||||
|
clear() {
|
||||||
|
this.data = [];
|
||||||
|
this.totalDataPoints = 0;
|
||||||
|
|
||||||
|
if (this.useBinMode) {
|
||||||
|
this.bins = [];
|
||||||
|
this.currentBinIndex = -1;
|
||||||
|
this.binStartTime = null;
|
||||||
|
this.initializeBins();
|
||||||
|
}
|
||||||
|
|
||||||
|
this.render();
|
||||||
|
this.updateInfo();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate the width of the chart in characters
|
||||||
|
* @returns {number} The chart width in characters
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
getChartWidth() {
|
||||||
|
let dataLength = this.maxDataPoints; // Always use maxDataPoints for consistent width
|
||||||
|
|
||||||
|
if (dataLength === 0) return 50; // Default width for empty chart
|
||||||
|
|
||||||
|
const yAxisPadding = this.yAxisLabel ? 2 : 0;
|
||||||
|
const yAxisNumbers = 3; // Width of Y-axis numbers
|
||||||
|
const separator = 1; // The '|' character
|
||||||
|
// const dataWidth = dataLength * 2; // Each column is 2 characters wide // TEMP: commented for no-space test
|
||||||
|
const dataWidth = dataLength; // Each column is 1 character wide // TEMP: adjusted for no-space columns
|
||||||
|
const padding = 1; // Extra padding
|
||||||
|
|
||||||
|
const totalWidth = yAxisPadding + yAxisNumbers + separator + dataWidth + padding;
|
||||||
|
|
||||||
|
// Only log when width changes
|
||||||
|
if (this.debug && this.lastChartWidth !== totalWidth) {
|
||||||
|
console.log('getChartWidth changed:', { dataLength, totalWidth, previous: this.lastChartWidth });
|
||||||
|
this.lastChartWidth = totalWidth;
|
||||||
|
}
|
||||||
|
|
||||||
|
return totalWidth;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adjust font size to fit container width
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
adjustFontSize() {
|
||||||
|
if (!this.autoFitWidth) return;
|
||||||
|
|
||||||
|
const containerWidth = this.container.clientWidth;
|
||||||
|
const chartWidth = this.getChartWidth();
|
||||||
|
|
||||||
|
if (chartWidth === 0) return;
|
||||||
|
|
||||||
|
// Calculate optimal font size
|
||||||
|
// For monospace fonts, character width is approximately 0.6 * font size
|
||||||
|
// Use a slightly smaller ratio to fit more content
|
||||||
|
const charWidthRatio = 0.7;
|
||||||
|
const padding = 30; // Reduce padding to fit more content
|
||||||
|
const availableWidth = containerWidth - padding;
|
||||||
|
const optimalFontSize = Math.floor((availableWidth / chartWidth) / charWidthRatio);
|
||||||
|
|
||||||
|
// Set reasonable bounds (min 4px, max 20px)
|
||||||
|
const fontSize = Math.max(4, Math.min(20, optimalFontSize));
|
||||||
|
|
||||||
|
// Only log when font size changes
|
||||||
|
if (this.debug && this.lastFontSize !== fontSize) {
|
||||||
|
console.log('fontSize changed:', { containerWidth, chartWidth, fontSize, previous: this.lastFontSize });
|
||||||
|
this.lastFontSize = fontSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.container.style.fontSize = fontSize + 'px';
|
||||||
|
this.container.style.lineHeight = '1.0';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Render the chart to the container
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
render() {
|
||||||
|
let dataToRender = [];
|
||||||
|
let maxValue = 0;
|
||||||
|
let minValue = 0;
|
||||||
|
let valueRange = 0;
|
||||||
|
|
||||||
|
if (this.useBinMode) {
|
||||||
|
// Bin mode: render bin counts
|
||||||
|
if (this.bins.length === 0) {
|
||||||
|
this.container.textContent = 'No data yet. Click Start to begin.';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Always create a fixed-length array filled with 0s, then overlay actual bin data
|
||||||
|
dataToRender = new Array(this.maxDataPoints).fill(0);
|
||||||
|
|
||||||
|
// Overlay actual bin data (most recent bins, reversed for left-to-right display)
|
||||||
|
const startIndex = Math.max(0, this.bins.length - this.maxDataPoints);
|
||||||
|
const recentBins = this.bins.slice(startIndex);
|
||||||
|
|
||||||
|
// Reverse the bins so most recent is on the left, and overlay onto the fixed array
|
||||||
|
recentBins.reverse().forEach((bin, index) => {
|
||||||
|
if (index < this.maxDataPoints) {
|
||||||
|
dataToRender[index] = bin.count;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (this.debug) {
|
||||||
|
console.log('render() dataToRender:', dataToRender, 'bins length:', this.bins.length);
|
||||||
|
}
|
||||||
|
maxValue = Math.max(...dataToRender);
|
||||||
|
minValue = Math.min(...dataToRender);
|
||||||
|
valueRange = maxValue - minValue;
|
||||||
|
} else {
|
||||||
|
// Legacy mode: render individual values
|
||||||
|
if (this.data.length === 0) {
|
||||||
|
this.container.textContent = 'No data yet. Click Start to begin.';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
dataToRender = this.data;
|
||||||
|
maxValue = Math.max(...this.data);
|
||||||
|
minValue = Math.min(...this.data);
|
||||||
|
valueRange = maxValue - minValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let output = '';
|
||||||
|
const scale = this.maxHeight;
|
||||||
|
|
||||||
|
// Calculate scaling factor: each X represents at least 1 count
|
||||||
|
const maxCount = Math.max(...dataToRender);
|
||||||
|
const scaleFactor = Math.max(1, Math.ceil(maxCount / scale)); // 1 X = scaleFactor counts
|
||||||
|
const scaledMax = Math.ceil(maxCount / scaleFactor) * scaleFactor;
|
||||||
|
|
||||||
|
// Calculate Y-axis label width (for vertical text)
|
||||||
|
const yLabelWidth = this.yAxisLabel ? 2 : 0;
|
||||||
|
const yAxisPadding = this.yAxisLabel ? ' ' : '';
|
||||||
|
|
||||||
|
// Add title if provided (centered)
|
||||||
|
if (this.title) {
|
||||||
|
// const chartWidth = 4 + this.maxDataPoints * 2; // Y-axis numbers + data columns // TEMP: commented for no-space test
|
||||||
|
const chartWidth = 4 + this.maxDataPoints; // Y-axis numbers + data columns // TEMP: adjusted for no-space columns
|
||||||
|
const titlePadding = Math.floor((chartWidth - this.title.length) / 2);
|
||||||
|
output += yAxisPadding + ' '.repeat(Math.max(0, titlePadding)) + this.title + '\n\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Draw from top to bottom
|
||||||
|
for (let row = scale; row > 0; row--) {
|
||||||
|
let line = '';
|
||||||
|
|
||||||
|
// Add vertical Y-axis label character
|
||||||
|
if (this.yAxisLabel) {
|
||||||
|
const L = this.yAxisLabel.length;
|
||||||
|
const startRow = Math.floor((scale - L) / 2) + 1;
|
||||||
|
const relativeRow = scale - row + 1; // 1 at top, scale at bottom
|
||||||
|
if (relativeRow >= startRow && relativeRow < startRow + L) {
|
||||||
|
const labelIndex = relativeRow - startRow;
|
||||||
|
line += this.yAxisLabel[labelIndex] + ' ';
|
||||||
|
} else {
|
||||||
|
line += ' ';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the actual count value this row represents (1 at bottom, increasing upward)
|
||||||
|
const rowCount = (row - 1) * scaleFactor + 1;
|
||||||
|
|
||||||
|
// Add Y-axis label (show actual count values)
|
||||||
|
line += String(rowCount).padStart(3, ' ') + ' |';
|
||||||
|
|
||||||
|
// Draw each column
|
||||||
|
for (let i = 0; i < dataToRender.length; i++) {
|
||||||
|
const count = dataToRender[i];
|
||||||
|
const scaledHeight = Math.ceil(count / scaleFactor);
|
||||||
|
|
||||||
|
if (scaledHeight >= row) {
|
||||||
|
// line += ' X'; // TEMP: commented out space between columns
|
||||||
|
line += 'X'; // TEMP: no space between columns
|
||||||
|
} else {
|
||||||
|
// line += ' '; // TEMP: commented out space between columns
|
||||||
|
line += ' '; // TEMP: single space for empty columns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output += line + '\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Draw X-axis
|
||||||
|
// output += yAxisPadding + ' +' + '-'.repeat(this.maxDataPoints * 2) + '\n'; // TEMP: commented out for no-space test
|
||||||
|
output += yAxisPadding + ' +' + '-'.repeat(this.maxDataPoints) + '\n'; // TEMP: back to original length
|
||||||
|
|
||||||
|
// Draw X-axis labels based on mode and format
|
||||||
|
let xAxisLabels = yAxisPadding + ' '; // Initial padding to align with X-axis
|
||||||
|
|
||||||
|
// Determine label interval (every 5 columns)
|
||||||
|
const labelInterval = 5;
|
||||||
|
|
||||||
|
// Generate all labels first and store in array
|
||||||
|
let labels = [];
|
||||||
|
for (let i = 0; i < this.maxDataPoints; i++) {
|
||||||
|
if (i % labelInterval === 0) {
|
||||||
|
let label = '';
|
||||||
|
if (this.useBinMode) {
|
||||||
|
// For bin mode, show labels for all possible positions
|
||||||
|
// i=0 is leftmost (most recent), i=maxDataPoints-1 is rightmost (oldest)
|
||||||
|
const elapsedSec = (i * this.binDuration) / 1000;
|
||||||
|
// Format with appropriate precision for sub-second bins
|
||||||
|
if (this.binDuration < 1000) {
|
||||||
|
// Show decimal seconds for sub-second bins
|
||||||
|
label = elapsedSec.toFixed(1) + 's';
|
||||||
|
} else {
|
||||||
|
// Show whole seconds for 1+ second bins
|
||||||
|
label = String(Math.round(elapsedSec)) + 's';
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// For legacy mode, show data point numbers
|
||||||
|
const startIndex = Math.max(1, this.totalDataPoints - this.maxDataPoints + 1);
|
||||||
|
label = String(startIndex + i);
|
||||||
|
}
|
||||||
|
labels.push(label);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the label string with calculated spacing
|
||||||
|
for (let i = 0; i < labels.length; i++) {
|
||||||
|
const label = labels[i];
|
||||||
|
xAxisLabels += label;
|
||||||
|
|
||||||
|
// Add spacing: labelInterval - label.length (except for last label)
|
||||||
|
if (i < labels.length - 1) {
|
||||||
|
const spacing = labelInterval - label.length;
|
||||||
|
xAxisLabels += ' '.repeat(spacing);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the label line extends to match the X-axis dash line length
|
||||||
|
// The dash line is this.maxDataPoints characters long, starting after " +"
|
||||||
|
const dashLineLength = this.maxDataPoints;
|
||||||
|
const minLabelLineLength = yAxisPadding.length + 4 + dashLineLength; // 4 for " "
|
||||||
|
if (xAxisLabels.length < minLabelLineLength) {
|
||||||
|
xAxisLabels += ' '.repeat(minLabelLineLength - xAxisLabels.length);
|
||||||
|
}
|
||||||
|
output += xAxisLabels + '\n';
|
||||||
|
|
||||||
|
// Add X-axis label if provided
|
||||||
|
if (this.xAxisLabel) {
|
||||||
|
// const labelPadding = Math.floor((this.maxDataPoints * 2 - this.xAxisLabel.length) / 2); // TEMP: commented for no-space test
|
||||||
|
const labelPadding = Math.floor((this.maxDataPoints - this.xAxisLabel.length) / 2); // TEMP: adjusted for no-space columns
|
||||||
|
output += '\n' + yAxisPadding + ' ' + ' '.repeat(Math.max(0, labelPadding)) + this.xAxisLabel + '\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
this.container.textContent = output;
|
||||||
|
|
||||||
|
// Adjust font size to fit width (only once at initialization)
|
||||||
|
if (this.autoFitWidth) {
|
||||||
|
this.adjustFontSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the external info display
|
||||||
|
if (this.useBinMode) {
|
||||||
|
const binCounts = this.bins.map(bin => bin.count);
|
||||||
|
const scaleFactor = Math.max(1, Math.ceil(maxValue / scale));
|
||||||
|
document.getElementById('values').textContent = `[${dataToRender.join(', ')}]`;
|
||||||
|
document.getElementById('max-value').textContent = maxValue;
|
||||||
|
document.getElementById('scale').textContent = `Min: ${minValue}, Max: ${maxValue}, 1X=${scaleFactor} counts`;
|
||||||
|
} else {
|
||||||
|
document.getElementById('values').textContent = `[${this.data.join(', ')}]`;
|
||||||
|
document.getElementById('max-value').textContent = maxValue;
|
||||||
|
document.getElementById('scale').textContent = `Min: ${minValue}, Max: ${maxValue}, Height: ${scale}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update the info display
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
updateInfo() {
|
||||||
|
if (this.useBinMode) {
|
||||||
|
const totalCount = this.bins.reduce((sum, bin) => sum + bin.count, 0);
|
||||||
|
document.getElementById('count').textContent = totalCount;
|
||||||
|
} else {
|
||||||
|
document.getElementById('count').textContent = this.data.length;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize the bin system
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
initializeBins() {
|
||||||
|
this.bins = [];
|
||||||
|
this.currentBinIndex = -1;
|
||||||
|
this.binStartTime = null;
|
||||||
|
this.chartStartTime = Date.now();
|
||||||
|
|
||||||
|
// Create first bin
|
||||||
|
this.rotateBin();
|
||||||
|
|
||||||
|
// Set up automatic bin rotation check
|
||||||
|
this.binCheckInterval = setInterval(() => {
|
||||||
|
this.checkBinRotation();
|
||||||
|
}, 100); // Check every 100ms for responsiveness
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if current bin should rotate and create new bin if needed
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
checkBinRotation() {
|
||||||
|
if (!this.useBinMode || !this.binStartTime) return;
|
||||||
|
|
||||||
|
const now = Date.now();
|
||||||
|
if ((now - this.binStartTime) >= this.binDuration) {
|
||||||
|
this.rotateBin();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Rotate to a new bin, finalizing the current one
|
||||||
|
*/
|
||||||
|
rotateBin() {
|
||||||
|
// Finalize current bin if it exists
|
||||||
|
if (this.currentBinIndex >= 0) {
|
||||||
|
this.bins[this.currentBinIndex].isActive = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new bin
|
||||||
|
const newBin = {
|
||||||
|
startTime: Date.now(),
|
||||||
|
count: 0,
|
||||||
|
isActive: true
|
||||||
|
};
|
||||||
|
|
||||||
|
this.bins.push(newBin);
|
||||||
|
this.currentBinIndex = this.bins.length - 1;
|
||||||
|
this.binStartTime = newBin.startTime;
|
||||||
|
|
||||||
|
// Keep only the most recent bins
|
||||||
|
if (this.bins.length > this.maxDataPoints) {
|
||||||
|
this.bins.shift();
|
||||||
|
this.currentBinIndex--;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure currentBinIndex points to the last bin (the active one)
|
||||||
|
this.currentBinIndex = this.bins.length - 1;
|
||||||
|
|
||||||
|
// Force a render to update the display immediately
|
||||||
|
this.render();
|
||||||
|
this.updateInfo();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format X-axis label for a bin based on the configured format
|
||||||
|
* @param {number} binIndex - Index of the bin
|
||||||
|
* @returns {string} Formatted label
|
||||||
|
* @private
|
||||||
|
*/
|
||||||
|
formatBinLabel(binIndex) {
|
||||||
|
const bin = this.bins[binIndex];
|
||||||
|
if (!bin) return ' ';
|
||||||
|
|
||||||
|
switch (this.xAxisLabelFormat) {
|
||||||
|
case 'bins':
|
||||||
|
return String(binIndex + 1).padStart(2, ' ');
|
||||||
|
|
||||||
|
case 'timestamps':
|
||||||
|
const time = new Date(bin.startTime);
|
||||||
|
return time.toLocaleTimeString('en-US', {
|
||||||
|
hour12: false,
|
||||||
|
hour: '2-digit',
|
||||||
|
minute: '2-digit',
|
||||||
|
second: '2-digit'
|
||||||
|
}).replace(/:/g, '');
|
||||||
|
|
||||||
|
case 'ranges':
|
||||||
|
const startSec = Math.floor((bin.startTime - this.chartStartTime) / 1000);
|
||||||
|
const endSec = startSec + Math.floor(this.binDuration / 1000);
|
||||||
|
return `${startSec}-${endSec}`;
|
||||||
|
|
||||||
|
case 'elapsed':
|
||||||
|
default:
|
||||||
|
// For elapsed time, always show time relative to the first bin (index 0)
|
||||||
|
// This keeps the leftmost label as 0s and increases to the right
|
||||||
|
const firstBinTime = this.bins[0] ? this.bins[0].startTime : this.chartStartTime;
|
||||||
|
const elapsedSec = Math.floor((bin.startTime - firstBinTime) / 1000);
|
||||||
|
return String(elapsedSec).padStart(2, ' ') + 's';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
BIN
build/admin_commands.o
Normal file
BIN
build/admin_commands.o
Normal file
Binary file not shown.
Binary file not shown.
BIN
build/admin_interface.o
Normal file
BIN
build/admin_interface.o
Normal file
Binary file not shown.
Binary file not shown.
BIN
build/core_relay_pool.o
Normal file
BIN
build/core_relay_pool.o
Normal file
Binary file not shown.
Binary file not shown.
BIN
build/main.o
BIN
build/main.o
Binary file not shown.
BIN
build/relay_client.o
Normal file
BIN
build/relay_client.o
Normal file
Binary file not shown.
Binary file not shown.
223
build_static.sh
Executable file
223
build_static.sh
Executable file
@@ -0,0 +1,223 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Build fully static MUSL binaries for Ginxsom using Alpine Docker
|
||||||
|
# Produces truly portable binaries with zero runtime dependencies
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
BUILD_DIR="$SCRIPT_DIR/build"
|
||||||
|
DOCKERFILE="$SCRIPT_DIR/Dockerfile.alpine-musl"
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
DEBUG_BUILD=false
|
||||||
|
if [[ "$1" == "--debug" ]]; then
|
||||||
|
DEBUG_BUILD=true
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Ginxsom MUSL Static Binary Builder (DEBUG MODE)"
|
||||||
|
echo "=========================================="
|
||||||
|
else
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Ginxsom MUSL Static Binary Builder (PRODUCTION MODE)"
|
||||||
|
echo "=========================================="
|
||||||
|
fi
|
||||||
|
echo "Project directory: $SCRIPT_DIR"
|
||||||
|
echo "Build directory: $BUILD_DIR"
|
||||||
|
echo "Debug build: $DEBUG_BUILD"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Create build directory
|
||||||
|
mkdir -p "$BUILD_DIR"
|
||||||
|
|
||||||
|
# Check if Docker is available
|
||||||
|
if ! command -v docker &> /dev/null; then
|
||||||
|
echo "ERROR: Docker is not installed or not in PATH"
|
||||||
|
echo ""
|
||||||
|
echo "Docker is required to build MUSL static binaries."
|
||||||
|
echo "Please install Docker:"
|
||||||
|
echo " - Ubuntu/Debian: sudo apt install docker.io"
|
||||||
|
echo " - Or visit: https://docs.docker.com/engine/install/"
|
||||||
|
echo ""
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if Docker daemon is running
|
||||||
|
if ! docker info &> /dev/null; then
|
||||||
|
echo "ERROR: Docker daemon is not running or user not in docker group"
|
||||||
|
echo ""
|
||||||
|
echo "Please start Docker and ensure you're in the docker group:"
|
||||||
|
echo " - sudo systemctl start docker"
|
||||||
|
echo " - sudo usermod -aG docker $USER && newgrp docker"
|
||||||
|
echo " - Or start Docker Desktop"
|
||||||
|
echo ""
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
DOCKER_CMD="docker"
|
||||||
|
|
||||||
|
echo "✓ Docker is available and running"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Detect architecture
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
case "$ARCH" in
|
||||||
|
x86_64)
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
OUTPUT_NAME="ginxsom-fcgi_static_x86_64"
|
||||||
|
;;
|
||||||
|
aarch64|arm64)
|
||||||
|
PLATFORM="linux/arm64"
|
||||||
|
OUTPUT_NAME="ginxsom-fcgi_static_arm64"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "WARNING: Unknown architecture: $ARCH"
|
||||||
|
echo "Defaulting to linux/amd64"
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
OUTPUT_NAME="ginxsom-fcgi_static_${ARCH}"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
echo "Building for platform: $PLATFORM"
|
||||||
|
echo "Output binary: $OUTPUT_NAME"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Build the Docker image
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Step 1: Building Alpine Docker image"
|
||||||
|
echo "=========================================="
|
||||||
|
echo "This will:"
|
||||||
|
echo " - Use Alpine Linux (native MUSL)"
|
||||||
|
echo " - Build all dependencies statically"
|
||||||
|
echo " - Compile Ginxsom with full static linking"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
$DOCKER_CMD build \
|
||||||
|
--platform "$PLATFORM" \
|
||||||
|
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
|
||||||
|
-f "$DOCKERFILE" \
|
||||||
|
-t ginxsom-musl-builder:latest \
|
||||||
|
--progress=plain \
|
||||||
|
. || {
|
||||||
|
echo ""
|
||||||
|
echo "ERROR: Docker build failed"
|
||||||
|
echo "Check the output above for details"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "✓ Docker image built successfully"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Extract the binary from the container
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Step 2: Extracting static binary"
|
||||||
|
echo "=========================================="
|
||||||
|
|
||||||
|
# Build the builder stage to extract the binary
|
||||||
|
$DOCKER_CMD build \
|
||||||
|
--platform "$PLATFORM" \
|
||||||
|
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
|
||||||
|
--target builder \
|
||||||
|
-f "$DOCKERFILE" \
|
||||||
|
-t ginxsom-static-builder-stage:latest \
|
||||||
|
. > /dev/null 2>&1
|
||||||
|
|
||||||
|
# Create a temporary container to copy the binary
|
||||||
|
CONTAINER_ID=$($DOCKER_CMD create ginxsom-static-builder-stage:latest)
|
||||||
|
|
||||||
|
# Copy binary from container
|
||||||
|
$DOCKER_CMD cp "$CONTAINER_ID:/build/ginxsom-fcgi_static" "$BUILD_DIR/$OUTPUT_NAME" || {
|
||||||
|
echo "ERROR: Failed to extract binary from container"
|
||||||
|
$DOCKER_CMD rm "$CONTAINER_ID" 2>/dev/null
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clean up container
|
||||||
|
$DOCKER_CMD rm "$CONTAINER_ID" > /dev/null
|
||||||
|
|
||||||
|
echo "✓ Binary extracted to: $BUILD_DIR/$OUTPUT_NAME"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Make binary executable
|
||||||
|
chmod +x "$BUILD_DIR/$OUTPUT_NAME"
|
||||||
|
|
||||||
|
# Verify the binary
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Step 3: Verifying static binary"
|
||||||
|
echo "=========================================="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "Checking for dynamic dependencies:"
|
||||||
|
if LDD_OUTPUT=$(timeout 5 ldd "$BUILD_DIR/$OUTPUT_NAME" 2>&1); then
|
||||||
|
if echo "$LDD_OUTPUT" | grep -q "not a dynamic executable"; then
|
||||||
|
echo "✓ Binary is fully static (no dynamic dependencies)"
|
||||||
|
TRULY_STATIC=true
|
||||||
|
elif echo "$LDD_OUTPUT" | grep -q "statically linked"; then
|
||||||
|
echo "✓ Binary is statically linked"
|
||||||
|
TRULY_STATIC=true
|
||||||
|
else
|
||||||
|
echo "⚠ WARNING: Binary may have dynamic dependencies:"
|
||||||
|
echo "$LDD_OUTPUT"
|
||||||
|
TRULY_STATIC=false
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# ldd failed or timed out - check with file command instead
|
||||||
|
if file "$BUILD_DIR/$OUTPUT_NAME" | grep -q "statically linked"; then
|
||||||
|
echo "✓ Binary is statically linked (verified with file command)"
|
||||||
|
TRULY_STATIC=true
|
||||||
|
else
|
||||||
|
echo "⚠ Could not verify static linking (ldd check failed)"
|
||||||
|
TRULY_STATIC=false
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
echo "File size: $(ls -lh "$BUILD_DIR/$OUTPUT_NAME" | awk '{print $5}')"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Build Summary"
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Binary: $BUILD_DIR/$OUTPUT_NAME"
|
||||||
|
echo "Size: $(du -h "$BUILD_DIR/$OUTPUT_NAME" | cut -f1)"
|
||||||
|
echo "Platform: $PLATFORM"
|
||||||
|
if [ "$DEBUG_BUILD" = true ]; then
|
||||||
|
echo "Build Type: DEBUG (with symbols, no optimization)"
|
||||||
|
else
|
||||||
|
echo "Build Type: PRODUCTION (optimized, stripped)"
|
||||||
|
fi
|
||||||
|
if [ "$TRULY_STATIC" = true ]; then
|
||||||
|
echo "Linkage: Fully static binary (Alpine MUSL-based)"
|
||||||
|
echo "Portability: Works on ANY Linux distribution"
|
||||||
|
else
|
||||||
|
echo "Linkage: Static binary (may have minimal dependencies)"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
echo "✓ Build complete!"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Clean up old dynamic build artifacts
|
||||||
|
echo "=========================================="
|
||||||
|
echo "Cleaning up old build artifacts"
|
||||||
|
echo "=========================================="
|
||||||
|
echo ""
|
||||||
|
if ls build/*.o 2>/dev/null | grep -q .; then
|
||||||
|
echo "Removing old .o files from dynamic builds..."
|
||||||
|
rm -f build/*.o
|
||||||
|
echo "✓ Cleanup complete"
|
||||||
|
else
|
||||||
|
echo "No .o files to clean"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Also remove old dynamic binary if it exists
|
||||||
|
if [ -f "build/ginxsom-fcgi" ]; then
|
||||||
|
echo "Removing old dynamic binary..."
|
||||||
|
rm -f build/ginxsom-fcgi
|
||||||
|
echo "✓ Old binary removed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Deployment:"
|
||||||
|
echo " scp $BUILD_DIR/$OUTPUT_NAME user@server:/path/to/ginxsom/"
|
||||||
|
echo ""
|
||||||
@@ -2,7 +2,8 @@
|
|||||||
# Comprehensive Blossom Protocol Implementation
|
# Comprehensive Blossom Protocol Implementation
|
||||||
|
|
||||||
# Main context - specify error log here to override system default
|
# Main context - specify error log here to override system default
|
||||||
error_log logs/nginx/error.log info;
|
# Set to warn level to capture FastCGI stderr messages
|
||||||
|
error_log logs/nginx/error.log warn;
|
||||||
pid logs/nginx/nginx.pid;
|
pid logs/nginx/nginx.pid;
|
||||||
|
|
||||||
events {
|
events {
|
||||||
@@ -219,6 +220,35 @@ http {
|
|||||||
fastcgi_param HTTP_AUTHORIZATION $http_authorization;
|
fastcgi_param HTTP_AUTHORIZATION $http_authorization;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Admin web interface (/admin)
|
||||||
|
location /admin {
|
||||||
|
if ($request_method !~ ^(GET)$) {
|
||||||
|
return 405;
|
||||||
|
}
|
||||||
|
fastcgi_pass fastcgi_backend;
|
||||||
|
fastcgi_param QUERY_STRING $query_string;
|
||||||
|
fastcgi_param REQUEST_METHOD $request_method;
|
||||||
|
fastcgi_param CONTENT_TYPE $content_type;
|
||||||
|
fastcgi_param CONTENT_LENGTH $content_length;
|
||||||
|
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||||
|
fastcgi_param REQUEST_URI $request_uri;
|
||||||
|
fastcgi_param DOCUMENT_URI $document_uri;
|
||||||
|
fastcgi_param DOCUMENT_ROOT $document_root;
|
||||||
|
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
||||||
|
fastcgi_param REQUEST_SCHEME $scheme;
|
||||||
|
fastcgi_param HTTPS $https if_not_empty;
|
||||||
|
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
||||||
|
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
||||||
|
fastcgi_param REMOTE_ADDR $remote_addr;
|
||||||
|
fastcgi_param REMOTE_PORT $remote_port;
|
||||||
|
fastcgi_param SERVER_ADDR $server_addr;
|
||||||
|
fastcgi_param SERVER_PORT $server_port;
|
||||||
|
fastcgi_param SERVER_NAME $server_name;
|
||||||
|
fastcgi_param REDIRECT_STATUS 200;
|
||||||
|
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||||
|
fastcgi_param HTTP_AUTHORIZATION $http_authorization;
|
||||||
|
}
|
||||||
|
|
||||||
# Admin API endpoints (/api/*)
|
# Admin API endpoints (/api/*)
|
||||||
location /api/ {
|
location /api/ {
|
||||||
if ($request_method !~ ^(GET|PUT|POST)$) {
|
if ($request_method !~ ^(GET|PUT|POST)$) {
|
||||||
@@ -570,6 +600,35 @@ http {
|
|||||||
fastcgi_param HTTP_AUTHORIZATION $http_authorization;
|
fastcgi_param HTTP_AUTHORIZATION $http_authorization;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Admin web interface (/admin)
|
||||||
|
location /admin {
|
||||||
|
if ($request_method !~ ^(GET)$) {
|
||||||
|
return 405;
|
||||||
|
}
|
||||||
|
fastcgi_pass fastcgi_backend;
|
||||||
|
fastcgi_param QUERY_STRING $query_string;
|
||||||
|
fastcgi_param REQUEST_METHOD $request_method;
|
||||||
|
fastcgi_param CONTENT_TYPE $content_type;
|
||||||
|
fastcgi_param CONTENT_LENGTH $content_length;
|
||||||
|
fastcgi_param SCRIPT_NAME $fastcgi_script_name;
|
||||||
|
fastcgi_param REQUEST_URI $request_uri;
|
||||||
|
fastcgi_param DOCUMENT_URI $document_uri;
|
||||||
|
fastcgi_param DOCUMENT_ROOT $document_root;
|
||||||
|
fastcgi_param SERVER_PROTOCOL $server_protocol;
|
||||||
|
fastcgi_param REQUEST_SCHEME $scheme;
|
||||||
|
fastcgi_param HTTPS $https if_not_empty;
|
||||||
|
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
|
||||||
|
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
|
||||||
|
fastcgi_param REMOTE_ADDR $remote_addr;
|
||||||
|
fastcgi_param REMOTE_PORT $remote_port;
|
||||||
|
fastcgi_param SERVER_ADDR $server_addr;
|
||||||
|
fastcgi_param SERVER_PORT $server_port;
|
||||||
|
fastcgi_param SERVER_NAME $server_name;
|
||||||
|
fastcgi_param REDIRECT_STATUS 200;
|
||||||
|
fastcgi_param SCRIPT_FILENAME $document_root/ginxsom.fcgi;
|
||||||
|
fastcgi_param HTTP_AUTHORIZATION $http_authorization;
|
||||||
|
}
|
||||||
|
|
||||||
# Admin API endpoints (/api/*)
|
# Admin API endpoints (/api/*)
|
||||||
location /api/ {
|
location /api/ {
|
||||||
if ($request_method !~ ^(GET|PUT|POST)$) {
|
if ($request_method !~ ^(GET|PUT|POST)$) {
|
||||||
|
|||||||
Binary file not shown.
49
deploy_lt.sh
49
deploy_lt.sh
@@ -73,8 +73,55 @@ print_success "Remote environment configured"
|
|||||||
print_status "Copying files to remote server..."
|
print_status "Copying files to remote server..."
|
||||||
|
|
||||||
# Copy entire project directory (excluding unnecessary files)
|
# Copy entire project directory (excluding unnecessary files)
|
||||||
|
# Note: We include .git and .gitmodules to allow submodule initialization on remote
|
||||||
print_status "Copying entire ginxsom project..."
|
print_status "Copying entire ginxsom project..."
|
||||||
rsync -avz --exclude='.git' --exclude='build' --exclude='logs' --exclude='Trash' --exclude='blobs' --exclude='db' --no-g --no-o --no-perms --omit-dir-times . $REMOTE_USER@$REMOTE_HOST:$REMOTE_DIR/
|
rsync -avz --exclude='build' --exclude='logs' --exclude='Trash' --exclude='blobs' --exclude='db' --no-g --no-o --no-perms --omit-dir-times . $REMOTE_USER@$REMOTE_HOST:$REMOTE_DIR/
|
||||||
|
|
||||||
|
# Initialize git submodules on remote server
|
||||||
|
print_status "Initializing git submodules on remote server..."
|
||||||
|
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||||
|
cd /home/ubuntu/ginxsom
|
||||||
|
|
||||||
|
# Check if .git exists
|
||||||
|
if [ ! -d .git ]; then
|
||||||
|
echo "ERROR: .git directory not found - git repository not copied"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if .gitmodules exists
|
||||||
|
if [ ! -f .gitmodules ]; then
|
||||||
|
echo "ERROR: .gitmodules file not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Initializing git submodules..."
|
||||||
|
git submodule update --init --recursive
|
||||||
|
|
||||||
|
# Verify submodule was initialized
|
||||||
|
if [ ! -f nostr_core_lib/cjson/cJSON.h ]; then
|
||||||
|
echo "ERROR: Submodule initialization failed - cJSON.h not found"
|
||||||
|
echo "Checking nostr_core_lib directory:"
|
||||||
|
ls -la nostr_core_lib/ || echo "nostr_core_lib directory not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Submodules initialized successfully"
|
||||||
|
|
||||||
|
# Build nostr_core_lib
|
||||||
|
echo "Building nostr_core_lib..."
|
||||||
|
cd nostr_core_lib
|
||||||
|
./build.sh
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "ERROR: Failed to build nostr_core_lib"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "nostr_core_lib built successfully"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
print_error "Failed to initialize git submodules or build nostr_core_lib"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Build on remote server to ensure compatibility
|
# Build on remote server to ensure compatibility
|
||||||
print_status "Building ginxsom on remote server..."
|
print_status "Building ginxsom on remote server..."
|
||||||
|
|||||||
162
deploy_static.sh
Executable file
162
deploy_static.sh
Executable file
@@ -0,0 +1,162 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Colors for output
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
print_status() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||||
|
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
|
||||||
|
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
|
||||||
|
print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
REMOTE_HOST="laantungir.net"
|
||||||
|
REMOTE_USER="ubuntu"
|
||||||
|
REMOTE_DIR="/home/ubuntu/ginxsom"
|
||||||
|
REMOTE_BINARY_PATH="/home/ubuntu/ginxsom/ginxsom-fcgi_static"
|
||||||
|
REMOTE_SOCKET="/tmp/ginxsom-fcgi.sock"
|
||||||
|
REMOTE_DATA_DIR="/var/www/html/blossom"
|
||||||
|
REMOTE_DB_PATH="/home/ubuntu/ginxsom/db/ginxsom.db"
|
||||||
|
|
||||||
|
# Detect architecture
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
case "$ARCH" in
|
||||||
|
x86_64)
|
||||||
|
BINARY_NAME="ginxsom-fcgi_static_x86_64"
|
||||||
|
;;
|
||||||
|
aarch64|arm64)
|
||||||
|
BINARY_NAME="ginxsom-fcgi_static_arm64"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
print_error "Unsupported architecture: $ARCH"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
LOCAL_BINARY="./build/$BINARY_NAME"
|
||||||
|
|
||||||
|
print_status "Starting static binary deployment to $REMOTE_HOST..."
|
||||||
|
|
||||||
|
# Check if static binary exists
|
||||||
|
if [ ! -f "$LOCAL_BINARY" ]; then
|
||||||
|
print_error "Static binary not found: $LOCAL_BINARY"
|
||||||
|
print_status "Building static binary..."
|
||||||
|
./build_static.sh
|
||||||
|
|
||||||
|
if [ ! -f "$LOCAL_BINARY" ]; then
|
||||||
|
print_error "Build failed - binary still not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Static binary found: $LOCAL_BINARY"
|
||||||
|
print_status "Binary size: $(du -h "$LOCAL_BINARY" | cut -f1)"
|
||||||
|
|
||||||
|
# Verify binary is static
|
||||||
|
if ldd "$LOCAL_BINARY" 2>&1 | grep -q "not a dynamic executable"; then
|
||||||
|
print_success "Binary is fully static"
|
||||||
|
elif ldd "$LOCAL_BINARY" 2>&1 | grep -q "statically linked"; then
|
||||||
|
print_success "Binary is statically linked"
|
||||||
|
else
|
||||||
|
print_warning "Binary may have dynamic dependencies"
|
||||||
|
ldd "$LOCAL_BINARY" 2>&1 || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Setup remote environment
|
||||||
|
print_status "Setting up remote environment..."
|
||||||
|
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Create directories
|
||||||
|
mkdir -p /home/ubuntu/ginxsom/db
|
||||||
|
sudo mkdir -p /var/www/html/blossom
|
||||||
|
sudo chown www-data:www-data /var/www/html/blossom
|
||||||
|
sudo chmod 755 /var/www/html/blossom
|
||||||
|
|
||||||
|
# Stop existing processes
|
||||||
|
echo "Stopping existing ginxsom processes..."
|
||||||
|
sudo pkill -f ginxsom-fcgi || true
|
||||||
|
sudo rm -f /tmp/ginxsom-fcgi.sock || true
|
||||||
|
|
||||||
|
echo "Remote environment ready"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
print_success "Remote environment configured"
|
||||||
|
|
||||||
|
# Copy static binary
|
||||||
|
print_status "Copying static binary to remote server..."
|
||||||
|
scp "$LOCAL_BINARY" $REMOTE_USER@$REMOTE_HOST:$REMOTE_BINARY_PATH
|
||||||
|
|
||||||
|
print_success "Binary copied successfully"
|
||||||
|
|
||||||
|
# Set permissions and start service
|
||||||
|
print_status "Starting ginxsom FastCGI process..."
|
||||||
|
ssh $REMOTE_USER@$REMOTE_HOST << EOF
|
||||||
|
# Make binary executable
|
||||||
|
chmod +x $REMOTE_BINARY_PATH
|
||||||
|
|
||||||
|
# Clean up any existing socket
|
||||||
|
sudo rm -f $REMOTE_SOCKET
|
||||||
|
|
||||||
|
# Start FastCGI process
|
||||||
|
echo "Starting ginxsom FastCGI..."
|
||||||
|
sudo spawn-fcgi -M 666 -u www-data -g www-data -s $REMOTE_SOCKET -U www-data -G www-data -d $REMOTE_DIR -- $REMOTE_BINARY_PATH --db-path "$REMOTE_DB_PATH" --storage-dir "$REMOTE_DATA_DIR"
|
||||||
|
|
||||||
|
# Give it a moment to start
|
||||||
|
sleep 2
|
||||||
|
|
||||||
|
# Verify process is running
|
||||||
|
if pgrep -f "ginxsom-fcgi" > /dev/null; then
|
||||||
|
echo "FastCGI process started successfully"
|
||||||
|
echo "PID: \$(pgrep -f ginxsom-fcgi)"
|
||||||
|
else
|
||||||
|
echo "Process verification: socket exists"
|
||||||
|
ls -la $REMOTE_SOCKET
|
||||||
|
fi
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
print_success "FastCGI process started"
|
||||||
|
else
|
||||||
|
print_error "Failed to start FastCGI process"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Reload nginx
|
||||||
|
print_status "Reloading nginx..."
|
||||||
|
ssh $REMOTE_USER@$REMOTE_HOST << 'EOF'
|
||||||
|
if sudo nginx -t; then
|
||||||
|
sudo nginx -s reload
|
||||||
|
echo "Nginx reloaded successfully"
|
||||||
|
else
|
||||||
|
echo "Nginx configuration test failed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
EOF
|
||||||
|
|
||||||
|
print_success "Nginx reloaded"
|
||||||
|
|
||||||
|
# Test deployment
|
||||||
|
print_status "Testing deployment..."
|
||||||
|
|
||||||
|
echo "Testing health endpoint..."
|
||||||
|
if curl -k -s --max-time 10 "https://blossom.laantungir.net/health" | grep -q "OK"; then
|
||||||
|
print_success "Health check passed"
|
||||||
|
else
|
||||||
|
print_warning "Health check failed - checking response..."
|
||||||
|
curl -k -v --max-time 10 "https://blossom.laantungir.net/health" 2>&1 | head -10
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_success "Deployment to $REMOTE_HOST completed!"
|
||||||
|
print_status "Ginxsom should now be available at: https://blossom.laantungir.net"
|
||||||
|
print_status ""
|
||||||
|
print_status "Deployment Summary:"
|
||||||
|
echo " Binary: $BINARY_NAME"
|
||||||
|
echo " Size: $(du -h "$LOCAL_BINARY" | cut -f1)"
|
||||||
|
echo " Type: Fully static MUSL binary"
|
||||||
|
echo " Portability: Works on any Linux distribution"
|
||||||
|
echo " Deployment time: ~10 seconds (vs ~5 minutes for dynamic build)"
|
||||||
535
docs/ADMIN_COMMANDS_PLAN.md
Normal file
535
docs/ADMIN_COMMANDS_PLAN.md
Normal file
@@ -0,0 +1,535 @@
|
|||||||
|
# Ginxsom Admin Commands Implementation Plan
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This document outlines the implementation plan for Ginxsom's admin command system, adapted from c-relay's event-based administration system. Commands are sent as NIP-44 encrypted Kind 23456 events and responses are returned as Kind 23457 events.
|
||||||
|
|
||||||
|
## Command Analysis: c-relay vs Ginxsom
|
||||||
|
|
||||||
|
### Commands to Implement (Blossom-Relevant)
|
||||||
|
|
||||||
|
| c-relay Command | Ginxsom Equivalent | Rationale |
|
||||||
|
|-----------------|-------------------|-----------|
|
||||||
|
| `config_query` | `config_query` | Query Blossom server configuration |
|
||||||
|
| `config_update` | `config_update` | Update server settings dynamically |
|
||||||
|
| `stats_query` | `stats_query` | Database statistics (blobs, storage, etc.) |
|
||||||
|
| `system_status` | `system_status` | Server health and status |
|
||||||
|
| `sql_query` | `sql_query` | Direct database queries for debugging |
|
||||||
|
| N/A | `blob_list` | List blobs by pubkey or criteria |
|
||||||
|
| N/A | `storage_stats` | Storage usage and capacity info |
|
||||||
|
| N/A | `mirror_status` | Status of mirroring operations |
|
||||||
|
| N/A | `report_query` | Query content reports (BUD-09) |
|
||||||
|
|
||||||
|
### Commands to Exclude (Not Blossom-Relevant)
|
||||||
|
|
||||||
|
| c-relay Command | Reason for Exclusion |
|
||||||
|
|-----------------|---------------------|
|
||||||
|
| `auth_add_blacklist` | Blossom uses different auth model (per-blob, not per-pubkey) |
|
||||||
|
| `auth_add_whitelist` | Same as above |
|
||||||
|
| `auth_delete_rule` | Same as above |
|
||||||
|
| `auth_query_all` | Same as above |
|
||||||
|
| `system_clear_auth` | Same as above |
|
||||||
|
|
||||||
|
**Note**: Blossom's authentication is event-based per operation (upload/delete), not relay-level whitelist/blacklist. Auth rules in Ginxsom are configured via the `auth_rules` table but managed differently than c-relay.
|
||||||
|
|
||||||
|
## Event Structure
|
||||||
|
|
||||||
|
### Admin Command Event (Kind 23456)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "event_id",
|
||||||
|
"pubkey": "admin_public_key",
|
||||||
|
"created_at": 1234567890,
|
||||||
|
"kind": 23456,
|
||||||
|
"content": "NIP44_ENCRYPTED_COMMAND_ARRAY",
|
||||||
|
"tags": [
|
||||||
|
["p", "blossom_server_pubkey"]
|
||||||
|
],
|
||||||
|
"sig": "event_signature"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Admin Response Event (Kind 23457)
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "response_event_id",
|
||||||
|
"pubkey": "blossom_server_pubkey",
|
||||||
|
"created_at": 1234567890,
|
||||||
|
"kind": 23457,
|
||||||
|
"content": "NIP44_ENCRYPTED_RESPONSE_OBJECT",
|
||||||
|
"tags": [
|
||||||
|
["p", "admin_public_key"],
|
||||||
|
["e", "request_event_id"]
|
||||||
|
],
|
||||||
|
"sig": "response_event_signature"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Command Specifications
|
||||||
|
|
||||||
|
### 1. Configuration Management
|
||||||
|
|
||||||
|
#### `config_query`
|
||||||
|
|
||||||
|
Query server configuration parameters.
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["config_query", "all"]
|
||||||
|
["config_query", "category", "blossom"]
|
||||||
|
["config_query", "key", "max_file_size"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "config_all",
|
||||||
|
"total_results": 15,
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"key": "max_file_size",
|
||||||
|
"value": "104857600",
|
||||||
|
"data_type": "integer",
|
||||||
|
"category": "blossom",
|
||||||
|
"description": "Maximum file size in bytes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "enable_relay_connect",
|
||||||
|
"value": "true",
|
||||||
|
"data_type": "boolean",
|
||||||
|
"category": "relay",
|
||||||
|
"description": "Enable relay client functionality"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Configuration Categories:**
|
||||||
|
- `blossom`: Blossom protocol settings (max_file_size, storage_path, etc.)
|
||||||
|
- `relay`: Relay client settings (enable_relay_connect, kind_0_content, etc.)
|
||||||
|
- `auth`: Authentication settings (auth_enabled, nip42_required, etc.)
|
||||||
|
- `limits`: Rate limits and quotas
|
||||||
|
- `system`: System-level settings
|
||||||
|
|
||||||
|
#### `config_update`
|
||||||
|
|
||||||
|
Update configuration parameters dynamically.
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["config_update", [
|
||||||
|
{
|
||||||
|
"key": "max_file_size",
|
||||||
|
"value": "209715200",
|
||||||
|
"data_type": "integer",
|
||||||
|
"category": "blossom"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "enable_relay_connect",
|
||||||
|
"value": "true",
|
||||||
|
"data_type": "boolean",
|
||||||
|
"category": "relay"
|
||||||
|
}
|
||||||
|
]]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "config_update",
|
||||||
|
"status": "success",
|
||||||
|
"total_results": 2,
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"key": "max_file_size",
|
||||||
|
"value": "209715200",
|
||||||
|
"status": "updated",
|
||||||
|
"restart_required": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "enable_relay_connect",
|
||||||
|
"value": "true",
|
||||||
|
"status": "updated",
|
||||||
|
"restart_required": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Statistics and Monitoring
|
||||||
|
|
||||||
|
#### `stats_query`
|
||||||
|
|
||||||
|
Get comprehensive database and storage statistics.
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["stats_query"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "stats_query",
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"database_size_bytes": 1048576,
|
||||||
|
"storage_size_bytes": 10737418240,
|
||||||
|
"total_blobs": 1543,
|
||||||
|
"unique_uploaders": 234,
|
||||||
|
"blob_types": [
|
||||||
|
{"type": "image/jpeg", "count": 856, "size_bytes": 5368709120, "percentage": 55.4},
|
||||||
|
{"type": "image/png", "count": 432, "size_bytes": 3221225472, "percentage": 28.0},
|
||||||
|
{"type": "video/mp4", "count": 123, "size_bytes": 2147483648, "percentage": 8.0}
|
||||||
|
],
|
||||||
|
"time_stats": {
|
||||||
|
"total": 1543,
|
||||||
|
"last_24h": 45,
|
||||||
|
"last_7d": 234,
|
||||||
|
"last_30d": 876
|
||||||
|
},
|
||||||
|
"top_uploaders": [
|
||||||
|
{"pubkey": "abc123...", "blob_count": 234, "total_bytes": 1073741824, "percentage": 15.2},
|
||||||
|
{"pubkey": "def456...", "blob_count": 187, "total_bytes": 858993459, "percentage": 12.1}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `system_status`
|
||||||
|
|
||||||
|
Get current system status and health metrics.
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["system_command", "system_status"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "system_status",
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"uptime_seconds": 86400,
|
||||||
|
"version": "0.1.0",
|
||||||
|
"relay_client": {
|
||||||
|
"enabled": true,
|
||||||
|
"connected_relays": 1,
|
||||||
|
"relay_status": [
|
||||||
|
{
|
||||||
|
"url": "wss://relay.laantungir.net",
|
||||||
|
"state": "connected",
|
||||||
|
"events_received": 12,
|
||||||
|
"events_published": 3
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"storage": {
|
||||||
|
"path": "/home/teknari/lt_gitea/ginxsom/blobs",
|
||||||
|
"total_bytes": 10737418240,
|
||||||
|
"available_bytes": 53687091200,
|
||||||
|
"usage_percentage": 16.7
|
||||||
|
},
|
||||||
|
"database": {
|
||||||
|
"path": "db/52e366edfa4e9cc6a6d4653828e51ccf828a2f5a05227d7a768f33b5a198681a.db",
|
||||||
|
"size_bytes": 1048576,
|
||||||
|
"total_blobs": 1543
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Blossom-Specific Commands
|
||||||
|
|
||||||
|
#### `blob_list`
|
||||||
|
|
||||||
|
List blobs with filtering options.
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["blob_list", "all"]
|
||||||
|
["blob_list", "pubkey", "abc123..."]
|
||||||
|
["blob_list", "type", "image/jpeg"]
|
||||||
|
["blob_list", "recent", 50]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "blob_list",
|
||||||
|
"total_results": 50,
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"sha256": "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553",
|
||||||
|
"size": 184292,
|
||||||
|
"type": "application/pdf",
|
||||||
|
"uploaded_at": 1725105921,
|
||||||
|
"uploader_pubkey": "abc123...",
|
||||||
|
"url": "https://cdn.example.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `storage_stats`
|
||||||
|
|
||||||
|
Get detailed storage statistics.
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["storage_stats"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "storage_stats",
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"storage_path": "/home/teknari/lt_gitea/ginxsom/blobs",
|
||||||
|
"total_bytes": 10737418240,
|
||||||
|
"available_bytes": 53687091200,
|
||||||
|
"used_bytes": 10737418240,
|
||||||
|
"usage_percentage": 16.7,
|
||||||
|
"blob_count": 1543,
|
||||||
|
"average_blob_size": 6958592,
|
||||||
|
"largest_blob": {
|
||||||
|
"sha256": "abc123...",
|
||||||
|
"size": 104857600,
|
||||||
|
"type": "video/mp4"
|
||||||
|
},
|
||||||
|
"by_type": [
|
||||||
|
{"type": "image/jpeg", "count": 856, "total_bytes": 5368709120},
|
||||||
|
{"type": "image/png", "count": 432, "total_bytes": 3221225472}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `mirror_status`
|
||||||
|
|
||||||
|
Get status of blob mirroring operations (BUD-04).
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["mirror_status"]
|
||||||
|
["mirror_status", "sha256", "abc123..."]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "mirror_status",
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"total_mirrors": 23,
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"sha256": "abc123...",
|
||||||
|
"source_url": "https://cdn.example.com/abc123.jpg",
|
||||||
|
"status": "completed",
|
||||||
|
"mirrored_at": 1725105921,
|
||||||
|
"size": 1048576
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `report_query`
|
||||||
|
|
||||||
|
Query content reports (BUD-09).
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["report_query", "all"]
|
||||||
|
["report_query", "blob", "abc123..."]
|
||||||
|
["report_query", "type", "nudity"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "report_query",
|
||||||
|
"total_results": 12,
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"report_id": 1,
|
||||||
|
"blob_sha256": "abc123...",
|
||||||
|
"report_type": "nudity",
|
||||||
|
"reporter_pubkey": "def456...",
|
||||||
|
"content": "Inappropriate content",
|
||||||
|
"reported_at": 1725105921
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Database Queries
|
||||||
|
|
||||||
|
#### `sql_query`
|
||||||
|
|
||||||
|
Execute read-only SQL queries for debugging.
|
||||||
|
|
||||||
|
**Command Format:**
|
||||||
|
```json
|
||||||
|
["sql_query", "SELECT * FROM blobs LIMIT 10"]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Response:**
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"query_type": "sql_query",
|
||||||
|
"request_id": "request_event_id",
|
||||||
|
"timestamp": 1234567890,
|
||||||
|
"query": "SELECT * FROM blobs LIMIT 10",
|
||||||
|
"execution_time_ms": 12,
|
||||||
|
"row_count": 10,
|
||||||
|
"columns": ["sha256", "size", "type", "uploaded_at", "uploader_pubkey"],
|
||||||
|
"rows": [
|
||||||
|
["b1674191...", 184292, "application/pdf", 1725105921, "abc123..."]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
**Security:**
|
||||||
|
- Only SELECT statements allowed
|
||||||
|
- Query timeout: 5 seconds
|
||||||
|
- Result row limit: 1000 rows
|
||||||
|
- All queries logged
|
||||||
|
|
||||||
|
## Implementation Architecture
|
||||||
|
|
||||||
|
### 1. Command Processing Flow
|
||||||
|
|
||||||
|
```
|
||||||
|
1. Relay client receives Kind 23456 event
|
||||||
|
2. Verify sender is admin_pubkey
|
||||||
|
3. Decrypt content using NIP-44
|
||||||
|
4. Parse command array
|
||||||
|
5. Validate command structure
|
||||||
|
6. Execute command handler
|
||||||
|
7. Generate response object
|
||||||
|
8. Encrypt response using NIP-44
|
||||||
|
9. Create Kind 23457 event
|
||||||
|
10. Publish to relays
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Code Structure
|
||||||
|
|
||||||
|
**New Files:**
|
||||||
|
- `src/admin_commands.c` - Command handlers
|
||||||
|
- `src/admin_commands.h` - Command interface
|
||||||
|
- `src/nip44.c` - NIP-44 encryption wrapper (uses nostr_core_lib)
|
||||||
|
- `src/nip44.h` - NIP-44 interface
|
||||||
|
|
||||||
|
**Modified Files:**
|
||||||
|
- `src/relay_client.c` - Add command processing to `on_admin_command_event()`
|
||||||
|
- `src/main.c` - Initialize admin command system
|
||||||
|
|
||||||
|
### 3. Database Schema Additions
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Admin command log
|
||||||
|
CREATE TABLE IF NOT EXISTS admin_commands (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
event_id TEXT NOT NULL,
|
||||||
|
command_type TEXT NOT NULL,
|
||||||
|
admin_pubkey TEXT NOT NULL,
|
||||||
|
executed_at INTEGER NOT NULL,
|
||||||
|
execution_time_ms INTEGER,
|
||||||
|
status TEXT NOT NULL,
|
||||||
|
error TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Create index for command history queries
|
||||||
|
CREATE INDEX IF NOT EXISTS idx_admin_commands_executed
|
||||||
|
ON admin_commands(executed_at DESC);
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Configuration Keys
|
||||||
|
|
||||||
|
**Blossom Category:**
|
||||||
|
- `max_file_size` - Maximum upload size in bytes
|
||||||
|
- `storage_path` - Blob storage directory
|
||||||
|
- `cdn_origin` - CDN URL for blob descriptors
|
||||||
|
- `enable_nip94` - Include NIP-94 tags in responses
|
||||||
|
|
||||||
|
**Relay Category:**
|
||||||
|
- `enable_relay_connect` - Enable relay client
|
||||||
|
- `kind_0_content` - Profile metadata JSON
|
||||||
|
- `kind_10002_tags` - Relay list JSON array
|
||||||
|
|
||||||
|
**Auth Category:**
|
||||||
|
- `auth_enabled` - Enable auth rules system
|
||||||
|
- `require_auth_upload` - Require auth for uploads
|
||||||
|
- `require_auth_delete` - Require auth for deletes
|
||||||
|
|
||||||
|
**Limits Category:**
|
||||||
|
- `max_blobs_per_user` - Per-user blob limit
|
||||||
|
- `rate_limit_uploads` - Uploads per minute
|
||||||
|
- `max_total_storage` - Total storage limit in bytes
|
||||||
|
|
||||||
|
## Implementation Phases
|
||||||
|
|
||||||
|
### Phase 1: NIP-44 Encryption Support
|
||||||
|
- Integrate nostr_core_lib NIP-44 functions
|
||||||
|
- Create encryption/decryption wrappers
|
||||||
|
- Test with sample data
|
||||||
|
|
||||||
|
### Phase 2: Command Infrastructure
|
||||||
|
- Create admin_commands.c/h
|
||||||
|
- Implement command parser
|
||||||
|
- Add command logging to database
|
||||||
|
- Implement response builder
|
||||||
|
|
||||||
|
### Phase 3: Core Commands
|
||||||
|
- Implement `config_query`
|
||||||
|
- Implement `config_update`
|
||||||
|
- Implement `stats_query`
|
||||||
|
- Implement `system_status`
|
||||||
|
|
||||||
|
### Phase 4: Blossom Commands
|
||||||
|
- Implement `blob_list`
|
||||||
|
- Implement `storage_stats`
|
||||||
|
- Implement `mirror_status`
|
||||||
|
- Implement `report_query`
|
||||||
|
|
||||||
|
### Phase 5: Advanced Features
|
||||||
|
- Implement `sql_query` with security
|
||||||
|
- Add command history tracking
|
||||||
|
- Implement rate limiting for admin commands
|
||||||
|
|
||||||
|
### Phase 6: Testing & Documentation
|
||||||
|
- Create test suite for each command
|
||||||
|
- Update README.md with admin API section
|
||||||
|
- Create example scripts using nak tool
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
1. **Authentication**: Only admin_pubkey can send commands
|
||||||
|
2. **Encryption**: All commands/responses use NIP-44
|
||||||
|
3. **Logging**: All admin actions logged to database
|
||||||
|
4. **Rate Limiting**: Prevent admin command flooding
|
||||||
|
5. **SQL Safety**: Only SELECT allowed, with timeout and row limits
|
||||||
|
6. **Input Validation**: Strict validation of all command parameters
|
||||||
|
|
||||||
|
## Testing Strategy
|
||||||
|
|
||||||
|
1. **Unit Tests**: Test each command handler independently
|
||||||
|
2. **Integration Tests**: Test full command flow with encryption
|
||||||
|
3. **Security Tests**: Verify auth checks and SQL injection prevention
|
||||||
|
4. **Performance Tests**: Ensure commands don't block relay operations
|
||||||
|
5. **Manual Tests**: Use nak tool to send real encrypted commands
|
||||||
|
|
||||||
|
## Documentation Updates
|
||||||
|
|
||||||
|
Add new section to README.md after "Content Reporting (BUD-09)":
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
## Administrator API
|
||||||
|
|
||||||
|
Ginxsom uses an event-based administration system where commands are sent as
|
||||||
|
NIP-44 encrypted Kind 23456 events and responses are returned as Kind 23457
|
||||||
|
events. This provides secure, cryptographically authenticated remote management.
|
||||||
|
|
||||||
|
[Full admin API documentation here]
|
||||||
296
docs/STATIC_BUILD.md
Normal file
296
docs/STATIC_BUILD.md
Normal file
@@ -0,0 +1,296 @@
|
|||||||
|
# Ginxsom Static MUSL Build Guide
|
||||||
|
|
||||||
|
This guide explains how to build and deploy Ginxsom as a fully static MUSL binary with zero runtime dependencies.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Ginxsom now supports building as a static MUSL binary using Alpine Linux and Docker. This produces a truly portable binary that works on **any Linux distribution** without requiring any system libraries.
|
||||||
|
|
||||||
|
## Benefits
|
||||||
|
|
||||||
|
| Feature | Static MUSL | Dynamic glibc |
|
||||||
|
|---------|-------------|---------------|
|
||||||
|
| **Portability** | ✓ Any Linux | ✗ Requires matching libs |
|
||||||
|
| **Dependencies** | None | libfcgi, libsqlite3, etc. |
|
||||||
|
| **Deployment** | Copy one file | Build on target |
|
||||||
|
| **Binary Size** | ~7-10 MB | ~2-3 MB + libraries |
|
||||||
|
| **Deployment Time** | ~10 seconds | ~5-10 minutes |
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Docker installed and running
|
||||||
|
- Internet connection (for first build only)
|
||||||
|
- ~2GB disk space for Docker images
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### 1. Build Static Binary
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build production binary (optimized, stripped)
|
||||||
|
make static
|
||||||
|
|
||||||
|
# Or build debug binary (with symbols)
|
||||||
|
make static-debug
|
||||||
|
|
||||||
|
# Or use the script directly
|
||||||
|
./build_static.sh
|
||||||
|
./build_static.sh --debug
|
||||||
|
```
|
||||||
|
|
||||||
|
The binary will be created in `build/ginxsom-fcgi_static_x86_64` (or `_arm64` for ARM systems).
|
||||||
|
|
||||||
|
### 2. Verify Binary
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check if truly static
|
||||||
|
ldd build/ginxsom-fcgi_static_x86_64
|
||||||
|
# Should output: "not a dynamic executable"
|
||||||
|
|
||||||
|
# Check file info
|
||||||
|
file build/ginxsom-fcgi_static_x86_64
|
||||||
|
# Should show: "statically linked"
|
||||||
|
|
||||||
|
# Check size
|
||||||
|
ls -lh build/ginxsom-fcgi_static_x86_64
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Deploy to Server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use the simplified deployment script
|
||||||
|
./deploy_static.sh
|
||||||
|
|
||||||
|
# Or manually copy and start
|
||||||
|
scp build/ginxsom-fcgi_static_x86_64 user@server:/path/to/ginxsom/
|
||||||
|
ssh user@server
|
||||||
|
chmod +x /path/to/ginxsom/ginxsom-fcgi_static_x86_64
|
||||||
|
sudo spawn-fcgi -M 666 -u www-data -g www-data \
|
||||||
|
-s /tmp/ginxsom-fcgi.sock \
|
||||||
|
-- /path/to/ginxsom/ginxsom-fcgi_static_x86_64 \
|
||||||
|
--db-path /path/to/db/ginxsom.db \
|
||||||
|
--storage-dir /var/www/html/blossom
|
||||||
|
```
|
||||||
|
|
||||||
|
## Build Process Details
|
||||||
|
|
||||||
|
### What Happens During Build
|
||||||
|
|
||||||
|
1. **Docker Image Creation** (5-10 minutes first time, cached after):
|
||||||
|
- Uses Alpine Linux 3.19 (native MUSL)
|
||||||
|
- Builds secp256k1 statically
|
||||||
|
- Builds nostr_core_lib with required NIPs
|
||||||
|
- Embeds web interface files
|
||||||
|
- Compiles Ginxsom with full static linking
|
||||||
|
|
||||||
|
2. **Binary Extraction**:
|
||||||
|
- Extracts binary from Docker container
|
||||||
|
- Verifies static linking
|
||||||
|
- Makes executable
|
||||||
|
|
||||||
|
3. **Verification**:
|
||||||
|
- Checks for dynamic dependencies
|
||||||
|
- Reports file size
|
||||||
|
- Tests execution
|
||||||
|
|
||||||
|
### Docker Layers (Cached)
|
||||||
|
|
||||||
|
The Dockerfile uses multi-stage builds with caching:
|
||||||
|
|
||||||
|
```
|
||||||
|
Layer 1: Alpine base + dependencies (cached)
|
||||||
|
Layer 2: Build secp256k1 (cached)
|
||||||
|
Layer 3: Initialize git submodules (cached unless .gitmodules changes)
|
||||||
|
Layer 4: Build nostr_core_lib (cached unless nostr_core_lib changes)
|
||||||
|
Layer 5: Embed web files (cached unless api/ changes)
|
||||||
|
Layer 6: Build Ginxsom (rebuilds when src/ changes)
|
||||||
|
```
|
||||||
|
|
||||||
|
This means subsequent builds are **much faster** (~1-2 minutes) since only changed layers rebuild.
|
||||||
|
|
||||||
|
## Deployment Comparison
|
||||||
|
|
||||||
|
### Old Dynamic Build Deployment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Sync entire project (30 seconds)
|
||||||
|
rsync -avz . user@server:/path/
|
||||||
|
|
||||||
|
# 2. Build on remote server (5-10 minutes)
|
||||||
|
ssh user@server "cd /path && make clean && make"
|
||||||
|
|
||||||
|
# 3. Restart service (10 seconds)
|
||||||
|
ssh user@server "sudo systemctl restart ginxsom"
|
||||||
|
|
||||||
|
# Total: ~6-11 minutes
|
||||||
|
```
|
||||||
|
|
||||||
|
### New Static Build Deployment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Build locally once (5-10 minutes first time, cached after)
|
||||||
|
make static
|
||||||
|
|
||||||
|
# 2. Copy binary (10 seconds)
|
||||||
|
scp build/ginxsom-fcgi_static_x86_64 user@server:/path/
|
||||||
|
|
||||||
|
# 3. Restart service (10 seconds)
|
||||||
|
ssh user@server "sudo systemctl restart ginxsom"
|
||||||
|
|
||||||
|
# Total: ~20 seconds (after first build)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cleanup
|
||||||
|
|
||||||
|
### Automatic Cleanup
|
||||||
|
|
||||||
|
The static build script automatically cleans up old dynamic build artifacts (`.o` files and `ginxsom-fcgi` binary) after successfully building the static binary. This keeps your `build/` directory clean.
|
||||||
|
|
||||||
|
### Manual Cleanup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clean dynamic build artifacts (preserves static binaries)
|
||||||
|
make clean
|
||||||
|
|
||||||
|
# Clean everything including static binaries
|
||||||
|
make clean-all
|
||||||
|
|
||||||
|
# Or manually remove specific files
|
||||||
|
rm -f build/*.o
|
||||||
|
rm -f build/ginxsom-fcgi
|
||||||
|
rm -f build/ginxsom-fcgi_static_*
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Docker Not Found
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install Docker
|
||||||
|
sudo apt install docker.io
|
||||||
|
|
||||||
|
# Add user to docker group
|
||||||
|
sudo usermod -aG docker $USER
|
||||||
|
newgrp docker
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build Fails
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clean Docker cache and rebuild
|
||||||
|
docker system prune -a
|
||||||
|
make static
|
||||||
|
```
|
||||||
|
|
||||||
|
### Binary Won't Run on Target
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Verify it's static
|
||||||
|
ldd build/ginxsom-fcgi_static_x86_64
|
||||||
|
|
||||||
|
# Check architecture matches
|
||||||
|
file build/ginxsom-fcgi_static_x86_64
|
||||||
|
uname -m # On target system
|
||||||
|
```
|
||||||
|
|
||||||
|
### Alpine Package Not Found
|
||||||
|
|
||||||
|
If you get errors about missing Alpine packages, the package name may have changed. Check Alpine's package database:
|
||||||
|
- https://pkgs.alpinelinux.org/packages
|
||||||
|
|
||||||
|
## Advanced Usage
|
||||||
|
|
||||||
|
### Cross-Compilation
|
||||||
|
|
||||||
|
Build for different architectures:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build for ARM64 on x86_64 machine
|
||||||
|
docker build --platform linux/arm64 -f Dockerfile.alpine-musl -t ginxsom-arm64 .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom NIPs
|
||||||
|
|
||||||
|
Edit `Dockerfile.alpine-musl` line 66 to change which NIPs are included:
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
./build.sh --nips=1,6,19 # Minimal
|
||||||
|
./build.sh --nips=1,6,13,17,19,44,59 # Full (default)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Debug Build
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build with debug symbols (no optimization)
|
||||||
|
make static-debug
|
||||||
|
|
||||||
|
# Binary will be larger but include debugging info
|
||||||
|
gdb build/ginxsom-fcgi_static_x86_64
|
||||||
|
```
|
||||||
|
|
||||||
|
## File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
ginxsom/
|
||||||
|
├── Dockerfile.alpine-musl # Alpine Docker build definition
|
||||||
|
├── build_static.sh # Build script wrapper
|
||||||
|
├── deploy_static.sh # Simplified deployment script
|
||||||
|
├── Makefile # Updated with 'static' target
|
||||||
|
└── build/
|
||||||
|
└── ginxsom-fcgi_static_x86_64 # Output binary
|
||||||
|
```
|
||||||
|
|
||||||
|
## CI/CD Integration
|
||||||
|
|
||||||
|
### GitHub Actions Example
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
name: Build Static Binary
|
||||||
|
|
||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Build static binary
|
||||||
|
run: make static
|
||||||
|
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: ginxsom-static
|
||||||
|
path: build/ginxsom-fcgi_static_x86_64
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
Static MUSL binaries have minimal performance impact:
|
||||||
|
|
||||||
|
| Metric | Static MUSL | Dynamic glibc |
|
||||||
|
|--------|-------------|---------------|
|
||||||
|
| Startup Time | ~50ms | ~40ms |
|
||||||
|
| Memory Usage | Similar | Similar |
|
||||||
|
| Request Latency | Identical | Identical |
|
||||||
|
| Binary Size | 7-10 MB | 2-3 MB + libs |
|
||||||
|
|
||||||
|
The slight startup delay is negligible for a long-running FastCGI process.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [MUSL libc](https://musl.libc.org/)
|
||||||
|
- [Alpine Linux](https://alpinelinux.org/)
|
||||||
|
- [Static Linking Best Practices](https://www.musl-libc.org/faq.html)
|
||||||
|
- [c-relay Static Build](../c-relay/STATIC_BUILD.md)
|
||||||
|
|
||||||
|
## Support
|
||||||
|
|
||||||
|
For issues with static builds:
|
||||||
|
1. Check Docker is running: `docker info`
|
||||||
|
2. Verify submodules: `git submodule status`
|
||||||
|
3. Clean and rebuild: `docker system prune -a && make static`
|
||||||
|
4. Check logs in Docker build output
|
||||||
867
docs/WEBSOCKET_IMPLEMENTATION.md
Normal file
867
docs/WEBSOCKET_IMPLEMENTATION.md
Normal file
@@ -0,0 +1,867 @@
|
|||||||
|
# Ginxsom Websocket Implementation - Revised Plan
|
||||||
|
|
||||||
|
## Original Request
|
||||||
|
|
||||||
|
We have mostly implemented being able to handle websocket interactions in ginxsome, but I want to rethink why we are doing this, and then change the code.
|
||||||
|
|
||||||
|
Why do I want Ginxsom to handle websockets?
|
||||||
|
|
||||||
|
Ginxsome should have a npub, and you should be able to be able to interact with it as if it is a person on nostr, i.e. a regular nostr user. So it should subscribe to relays, and it should be able to read events that are sent to it - such as commands sent from the administrator, and it should be able to post events, such as it's current status, kind 0, etc.
|
||||||
|
|
||||||
|
So for this use, we don't need ginxsome to be a websocket server, but only a websocket client.
|
||||||
|
|
||||||
|
So possibly, implementing libwebsockets was too much. For this use, we probably can just use nostr_core_lib and the websocket client implementation that is already in nostr_core_lib.
|
||||||
|
|
||||||
|
So review what we currently have implemented, and give suggestions.
|
||||||
|
|
||||||
|
I want ginxsome to have within it's config db table, the values "enable_relay_connect", kind_0_content, kind_10002_tags, .
|
||||||
|
|
||||||
|
Upon startup, if enable_relay_connect is true, ginxsome should establish and maintain a connection to those relays listed in "kind_10002_tags".
|
||||||
|
|
||||||
|
Upon startup after connecting to the relays, it should publish a signed kind 0 to the relays is subscribes to, using the content from the database value kind_0_content, and it should publish a kind 10002 also using the data that is in the database.
|
||||||
|
|
||||||
|
It should also subscribe to any kind 23456 events published by the administrator and addressed to the blossom_pubkey.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## AGENT IMPLEMENTATION - REVISED PLAN
|
||||||
|
|
||||||
|
### Implementation Plan for Ginxsom as Nostr Client
|
||||||
|
|
||||||
|
#### Overview
|
||||||
|
|
||||||
|
This plan implements Ginxsom as a Nostr client that can:
|
||||||
|
- Connect to relays as a regular Nostr user
|
||||||
|
- Publish its profile (Kind 0) and relay list (Kind 10002)
|
||||||
|
- Subscribe to admin commands (Kind 23458)
|
||||||
|
- Maintain persistent relay connections
|
||||||
|
|
||||||
|
#### Architecture Analysis
|
||||||
|
|
||||||
|
**Existing Infrastructure:**
|
||||||
|
- [`src/relay_client.c`](../src/relay_client.c:1) - Already implements relay connection management
|
||||||
|
- [`src/admin_commands.c`](../src/admin_commands.c:1) - Command processing system
|
||||||
|
- Uses `nostr_core_lib` for websocket client, event signing, NIP-44 encryption
|
||||||
|
|
||||||
|
**Key Insight:** Most infrastructure already exists! We just need to:
|
||||||
|
1. Add database config fields
|
||||||
|
2. Implement Kind 0 and Kind 10002 publishing
|
||||||
|
3. Ensure relay connections persist on startup
|
||||||
|
|
||||||
|
#### Phase 1: Database Schema Updates (1 hour)
|
||||||
|
|
||||||
|
**Goal:** Add configuration fields for relay client behavior
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Add new columns to `config` table:
|
||||||
|
```sql
|
||||||
|
ALTER TABLE config ADD COLUMN enable_relay_connect INTEGER DEFAULT 0;
|
||||||
|
ALTER TABLE config ADD COLUMN kind_0_content TEXT DEFAULT '{}';
|
||||||
|
ALTER TABLE config ADD COLUMN kind_10002_tags TEXT DEFAULT '[]';
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Update [`db/init.sh`](../db/init.sh) to include these fields in initial schema
|
||||||
|
|
||||||
|
3. Create migration script for existing databases
|
||||||
|
|
||||||
|
**Database Values:**
|
||||||
|
- `enable_relay_connect`: 0 or 1 (boolean)
|
||||||
|
- `kind_0_content`: JSON string with profile metadata
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"name": "Ginxsom Blossom Server",
|
||||||
|
"about": "Blossom blob storage server",
|
||||||
|
"picture": "https://example.com/logo.png",
|
||||||
|
"nip05": "ginxsom@example.com"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
- `kind_10002_tags`: JSON array of relay URLs
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
["r", "wss://relay.damus.io"],
|
||||||
|
["r", "wss://relay.nostr.band"],
|
||||||
|
["r", "wss://nos.lol"]
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Phase 2: Configuration Loading (1-2 hours)
|
||||||
|
|
||||||
|
**Goal:** Load relay client config from database on startup
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Update [`relay_client_init()`](../src/relay_client.c:64) to load new config fields:
|
||||||
|
```c
|
||||||
|
// Load enable_relay_connect flag
|
||||||
|
int enable_relay_connect = 0;
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
sqlite3_prepare_v2(db, "SELECT enable_relay_connect FROM config LIMIT 1", -1, &stmt, NULL);
|
||||||
|
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
enable_relay_connect = sqlite3_column_int(stmt, 0);
|
||||||
|
}
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
if (!enable_relay_connect) {
|
||||||
|
log_message(LOG_INFO, "Relay client disabled in config");
|
||||||
|
return 0; // Don't start relay client
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Load `kind_0_content` and `kind_10002_tags` into global variables
|
||||||
|
|
||||||
|
3. Parse `kind_10002_tags` JSON to extract relay URLs for connection
|
||||||
|
|
||||||
|
**Integration Point:** This modifies existing [`relay_client_init()`](../src/relay_client.c:64) function
|
||||||
|
|
||||||
|
#### Phase 3: Kind 0 Profile Publishing (2-3 hours)
|
||||||
|
|
||||||
|
**Goal:** Publish server profile to relays on startup
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Create new function `publish_kind_0_profile()` in [`src/relay_client.c`](../src/relay_client.c:1):
|
||||||
|
```c
|
||||||
|
static int publish_kind_0_profile(nostr_pool_t* pool, const char* kind_0_content) {
|
||||||
|
// Create Kind 0 event
|
||||||
|
nostr_event_t* event = nostr_create_event(
|
||||||
|
0, // kind
|
||||||
|
kind_0_content, // content from database
|
||||||
|
NULL, // no tags
|
||||||
|
0 // tag count
|
||||||
|
);
|
||||||
|
|
||||||
|
// Sign event with server's private key
|
||||||
|
if (nostr_sign_event(event, server_privkey) != 0) {
|
||||||
|
log_message(LOG_ERROR, "Failed to sign Kind 0 event");
|
||||||
|
nostr_free_event(event);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish to all connected relays
|
||||||
|
for (int i = 0; i < pool->relay_count; i++) {
|
||||||
|
nostr_relay_t* relay = pool->relays[i];
|
||||||
|
if (relay->connected) {
|
||||||
|
nostr_send_event(relay, event);
|
||||||
|
log_message(LOG_INFO, "Published Kind 0 to %s", relay->url);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nostr_free_event(event);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Call from [`relay_client_start()`](../src/relay_client.c:258) after relay connections established:
|
||||||
|
```c
|
||||||
|
// Wait for relay connections (with timeout)
|
||||||
|
sleep(2);
|
||||||
|
|
||||||
|
// Publish Kind 0 profile
|
||||||
|
if (kind_0_content && strlen(kind_0_content) > 0) {
|
||||||
|
publish_kind_0_profile(pool, kind_0_content);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Add periodic re-publishing (every 24 hours) to keep profile fresh
|
||||||
|
|
||||||
|
**Note:** Uses existing `nostr_core_lib` functions for event creation and signing
|
||||||
|
|
||||||
|
#### Phase 4: Kind 10002 Relay List Publishing (2-3 hours)
|
||||||
|
|
||||||
|
**Goal:** Publish relay list to inform other clients where to find this server
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Create new function `publish_kind_10002_relay_list()` in [`src/relay_client.c`](../src/relay_client.c:1):
|
||||||
|
```c
|
||||||
|
static int publish_kind_10002_relay_list(nostr_pool_t* pool, const char* kind_10002_tags_json) {
|
||||||
|
// Parse JSON array of relay tags
|
||||||
|
cJSON* tags_array = cJSON_Parse(kind_10002_tags_json);
|
||||||
|
if (!tags_array) {
|
||||||
|
log_message(LOG_ERROR, "Failed to parse kind_10002_tags JSON");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert cJSON array to nostr_tag_t array
|
||||||
|
int tag_count = cJSON_GetArraySize(tags_array);
|
||||||
|
nostr_tag_t* tags = malloc(sizeof(nostr_tag_t) * tag_count);
|
||||||
|
|
||||||
|
for (int i = 0; i < tag_count; i++) {
|
||||||
|
cJSON* tag = cJSON_GetArrayItem(tags_array, i);
|
||||||
|
// Parse ["r", "wss://relay.url"] format
|
||||||
|
tags[i].key = strdup(cJSON_GetArrayItem(tag, 0)->valuestring);
|
||||||
|
tags[i].value = strdup(cJSON_GetArrayItem(tag, 1)->valuestring);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create Kind 10002 event
|
||||||
|
nostr_event_t* event = nostr_create_event(
|
||||||
|
10002, // kind
|
||||||
|
"", // empty content
|
||||||
|
tags, // relay tags
|
||||||
|
tag_count // tag count
|
||||||
|
);
|
||||||
|
|
||||||
|
// Sign and publish
|
||||||
|
if (nostr_sign_event(event, server_privkey) != 0) {
|
||||||
|
log_message(LOG_ERROR, "Failed to sign Kind 10002 event");
|
||||||
|
// cleanup...
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish to all connected relays
|
||||||
|
for (int i = 0; i < pool->relay_count; i++) {
|
||||||
|
nostr_relay_t* relay = pool->relays[i];
|
||||||
|
if (relay->connected) {
|
||||||
|
nostr_send_event(relay, event);
|
||||||
|
log_message(LOG_INFO, "Published Kind 10002 to %s", relay->url);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
cJSON_Delete(tags_array);
|
||||||
|
for (int i = 0; i < tag_count; i++) {
|
||||||
|
free(tags[i].key);
|
||||||
|
free(tags[i].value);
|
||||||
|
}
|
||||||
|
free(tags);
|
||||||
|
nostr_free_event(event);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Call from [`relay_client_start()`](../src/relay_client.c:258) after Kind 0 publishing:
|
||||||
|
```c
|
||||||
|
// Publish Kind 10002 relay list
|
||||||
|
if (kind_10002_tags && strlen(kind_10002_tags) > 0) {
|
||||||
|
publish_kind_10002_relay_list(pool, kind_10002_tags);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Add periodic re-publishing (every 24 hours)
|
||||||
|
|
||||||
|
**Note:** Kind 10002 uses "r" tags to list relays where the server can be reached
|
||||||
|
|
||||||
|
#### Phase 5: Admin Command Subscription (1 hour)
|
||||||
|
|
||||||
|
**Goal:** Ensure subscription to Kind 23458 admin commands is active
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Verify [`on_admin_command_event()`](../src/relay_client.c:615) is registered for Kind 23458
|
||||||
|
|
||||||
|
2. Ensure subscription filter includes server's pubkey:
|
||||||
|
```c
|
||||||
|
// Subscribe to Kind 23458 events addressed to this server
|
||||||
|
nostr_filter_t filter = {
|
||||||
|
.kinds = {23458},
|
||||||
|
.kind_count = 1,
|
||||||
|
.p_tags = {server_pubkey},
|
||||||
|
.p_tag_count = 1
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Verify subscription is maintained across reconnections
|
||||||
|
|
||||||
|
**Note:** This is already implemented in [`relay_client.c`](../src/relay_client.c:615), just needs verification
|
||||||
|
|
||||||
|
#### Phase 6: Connection Persistence (2 hours)
|
||||||
|
|
||||||
|
**Goal:** Maintain relay connections and auto-reconnect on failure
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Verify [`relay_management_thread()`](../src/relay_client.c:258) handles reconnections
|
||||||
|
|
||||||
|
2. Add connection health monitoring:
|
||||||
|
```c
|
||||||
|
// Check relay connections every 60 seconds
|
||||||
|
for (int i = 0; i < pool->relay_count; i++) {
|
||||||
|
nostr_relay_t* relay = pool->relays[i];
|
||||||
|
if (!relay->connected) {
|
||||||
|
log_message(LOG_WARN, "Relay %s disconnected, reconnecting...", relay->url);
|
||||||
|
nostr_relay_connect(relay);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Add exponential backoff for failed connections
|
||||||
|
|
||||||
|
4. Log connection status changes
|
||||||
|
|
||||||
|
**Note:** `nostr_core_lib` likely handles most of this, just need to verify and add logging
|
||||||
|
|
||||||
|
#### Phase 7: Configuration Management (2 hours)
|
||||||
|
|
||||||
|
**Goal:** Allow runtime configuration updates via admin API
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Add new admin commands to [`src/admin_commands.c`](../src/admin_commands.c:1):
|
||||||
|
- `relay_config_query` - Get current relay client config
|
||||||
|
- `relay_config_update` - Update relay client config
|
||||||
|
- `relay_reconnect` - Force reconnection to relays
|
||||||
|
- `relay_publish_profile` - Re-publish Kind 0 and Kind 10002
|
||||||
|
|
||||||
|
2. Implement handlers:
|
||||||
|
```c
|
||||||
|
static cJSON* handle_relay_config_update(cJSON* params) {
|
||||||
|
// Update database config
|
||||||
|
// Reload relay client if needed
|
||||||
|
// Return success/failure
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Add to command routing in [`admin_commands_process()`](../src/admin_commands.c:101)
|
||||||
|
|
||||||
|
**Integration:** Extends existing admin command system
|
||||||
|
|
||||||
|
#### Phase 8: Testing & Documentation (2-3 hours)
|
||||||
|
|
||||||
|
**Goal:** Comprehensive testing and documentation
|
||||||
|
|
||||||
|
**Tasks:**
|
||||||
|
|
||||||
|
1. Create [`tests/relay_client_test.sh`](../tests/relay_client_test.sh):
|
||||||
|
- Test database config loading
|
||||||
|
- Test Kind 0 publishing
|
||||||
|
- Test Kind 10002 publishing
|
||||||
|
- Test admin command subscription
|
||||||
|
- Test reconnection logic
|
||||||
|
- Test config updates via admin API
|
||||||
|
|
||||||
|
2. Create [`docs/RELAY_CLIENT.md`](../docs/RELAY_CLIENT.md):
|
||||||
|
- Document configuration options
|
||||||
|
- Document Kind 0 content format
|
||||||
|
- Document Kind 10002 tags format
|
||||||
|
- Document admin commands
|
||||||
|
- Document troubleshooting
|
||||||
|
|
||||||
|
3. Update [`README.md`](../README.md) with relay client section
|
||||||
|
|
||||||
|
4. Add logging for all relay client operations
|
||||||
|
|
||||||
|
#### Implementation Summary
|
||||||
|
|
||||||
|
**Total Estimated Time:** 13-17 hours
|
||||||
|
|
||||||
|
**Phase Breakdown:**
|
||||||
|
1. Database Schema (1 hour)
|
||||||
|
2. Config Loading (1-2 hours)
|
||||||
|
3. Kind 0 Publishing (2-3 hours)
|
||||||
|
4. Kind 10002 Publishing (2-3 hours)
|
||||||
|
5. Admin Subscription (1 hour) - mostly verification
|
||||||
|
6. Connection Persistence (2 hours)
|
||||||
|
7. Config Management (2 hours)
|
||||||
|
8. Testing & Docs (2-3 hours)
|
||||||
|
|
||||||
|
**Key Benefits:**
|
||||||
|
- ✅ Leverages existing `relay_client.c` infrastructure
|
||||||
|
- ✅ Uses `nostr_core_lib` for all Nostr operations
|
||||||
|
- ✅ Integrates with existing admin command system
|
||||||
|
- ✅ No new dependencies required
|
||||||
|
- ✅ Minimal code changes needed
|
||||||
|
|
||||||
|
**Dependencies:**
|
||||||
|
- `nostr_core_lib` - websocket client, event signing, NIP-44
|
||||||
|
- `cJSON` - JSON parsing for config values
|
||||||
|
- SQLite3 - database storage
|
||||||
|
|
||||||
|
**Configuration Example:**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Enable relay client
|
||||||
|
UPDATE config SET enable_relay_connect = 1;
|
||||||
|
|
||||||
|
-- Set profile
|
||||||
|
UPDATE config SET kind_0_content = '{
|
||||||
|
"name": "My Ginxsom Server",
|
||||||
|
"about": "Blossom blob storage for my community",
|
||||||
|
"picture": "https://example.com/logo.png"
|
||||||
|
}';
|
||||||
|
|
||||||
|
-- Set relay list
|
||||||
|
UPDATE config SET kind_10002_tags = '[
|
||||||
|
["r", "wss://relay.damus.io"],
|
||||||
|
["r", "wss://relay.nostr.band"],
|
||||||
|
["r", "wss://nos.lol"]
|
||||||
|
]';
|
||||||
|
```
|
||||||
|
|
||||||
|
**Startup Sequence:**
|
||||||
|
|
||||||
|
1. Load config from database
|
||||||
|
2. Check `enable_relay_connect` flag
|
||||||
|
3. If enabled:
|
||||||
|
- Parse `kind_10002_tags` for relay URLs
|
||||||
|
- Connect to relays
|
||||||
|
- Wait for connections (2 second timeout)
|
||||||
|
- Publish Kind 0 profile
|
||||||
|
- Publish Kind 10002 relay list
|
||||||
|
- Subscribe to Kind 23458 admin commands
|
||||||
|
- Start connection monitoring thread
|
||||||
|
|
||||||
|
**Runtime Behavior:**
|
||||||
|
|
||||||
|
- Maintains persistent connections to configured relays
|
||||||
|
- Auto-reconnects on connection failure (exponential backoff)
|
||||||
|
- Re-publishes Kind 0 and Kind 10002 every 24 hours
|
||||||
|
- Processes Kind 23458 admin commands from authorized pubkeys
|
||||||
|
- Logs all relay operations for debugging
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## AGENT IMPLEMENTATION - REVISED PLAN
|
||||||
|
|
||||||
|
### Executive Summary
|
||||||
|
|
||||||
|
After analyzing the existing codebase, I discovered that **most of the infrastructure is already in place**! The key insight is that we should leverage existing code rather than duplicate it.
|
||||||
|
|
||||||
|
### Analysis of Existing Code
|
||||||
|
|
||||||
|
#### What We Already Have
|
||||||
|
|
||||||
|
1. **[`src/admin_commands.c`](../src/admin_commands.c:1)** - Complete command processing system
|
||||||
|
- [`admin_commands_process()`](../src/admin_commands.c:101) - Routes commands to handlers
|
||||||
|
- [`admin_decrypt_command()`](../src/admin_commands.c:67) - NIP-44 decryption wrapper
|
||||||
|
- [`admin_encrypt_response()`](../src/admin_commands.c:43) - NIP-44 encryption wrapper
|
||||||
|
- Individual handlers: config_query, config_update, stats_query, system_status, blob_list, storage_stats, sql_query
|
||||||
|
|
||||||
|
2. **[`src/admin_event.c`](../src/admin_event.c:1)** - HTTP endpoint handler (currently Kind 23456/23457)
|
||||||
|
- [`handle_admin_event_request()`](../src/admin_event.c:37) - Processes POST requests
|
||||||
|
- Lines 189-205: NIP-44 decryption
|
||||||
|
- Lines 391-408: NIP-44 encryption
|
||||||
|
- Lines 355-471: Response event creation
|
||||||
|
|
||||||
|
3. **[`src/relay_client.c`](../src/relay_client.c:1)** - Relay connection manager (already uses Kind 23458/23459!)
|
||||||
|
- [`relay_client_init()`](../src/relay_client.c:64) - Loads config, creates pool
|
||||||
|
- [`relay_client_start()`](../src/relay_client.c:258) - Starts management thread
|
||||||
|
- [`on_admin_command_event()`](../src/relay_client.c:615) - Processes Kind 23458 from relays
|
||||||
|
- Lines 664-683: Decrypts command using `admin_decrypt_command()`
|
||||||
|
- Line 708: Processes command using `admin_commands_process()`
|
||||||
|
- Lines 728-740: Encrypts and sends response
|
||||||
|
|
||||||
|
#### Key Architectural Insight
|
||||||
|
|
||||||
|
**The architecture is already unified!**
|
||||||
|
- **[`admin_commands.c`](../src/admin_commands.c:1)** provides singular command processing functions
|
||||||
|
- **[`admin_event.c`](../src/admin_event.c:1)** handles HTTP delivery (POST body)
|
||||||
|
- **[`relay_client.c`](../src/relay_client.c:615)** handles relay delivery (websocket)
|
||||||
|
- **Both use the same** `admin_decrypt_command()`, `admin_commands_process()`, and `admin_encrypt_response()`
|
||||||
|
|
||||||
|
**No code duplication needed!** We just need to:
|
||||||
|
1. Update kind numbers from 23456→23458 and 23457→23459
|
||||||
|
2. Add HTTP Authorization header support (currently only POST body)
|
||||||
|
3. Embed web interface
|
||||||
|
4. Adapt c-relay UI to work with Blossom data
|
||||||
|
|
||||||
|
### Revised Implementation Plan
|
||||||
|
|
||||||
|
#### Phase 1: Update to Kind 23458/23459 (2-3 hours)
|
||||||
|
|
||||||
|
**Goal**: Change from Kind 23456/23457 to Kind 23458/23459 throughout codebase
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. Update [`src/admin_event.c`](../src/admin_event.c:1)
|
||||||
|
- Line 1: Update comment from "Kind 23456/23457" to "Kind 23458/23459"
|
||||||
|
- Line 86-87: Change kind check from 23456 to 23458
|
||||||
|
- Line 414: Change response kind from 23457 to 23459
|
||||||
|
- Line 436: Update `nostr_create_and_sign_event()` call to use 23459
|
||||||
|
|
||||||
|
2. Update [`src/admin_commands.h`](../src/admin_commands.h:1)
|
||||||
|
- Line 4: Update comment from "Kind 23456" to "Kind 23458"
|
||||||
|
- Line 5: Update comment from "Kind 23457" to "Kind 23459"
|
||||||
|
|
||||||
|
3. Test both delivery methods work with new kind numbers
|
||||||
|
|
||||||
|
**Note**: [`relay_client.c`](../src/relay_client.c:1) already uses 23458/23459! Only admin_event.c needs updating.
|
||||||
|
|
||||||
|
#### Phase 2: Add Authorization Header Support (3-4 hours)
|
||||||
|
|
||||||
|
**Goal**: Support Kind 23458 events in HTTP Authorization header (in addition to POST body)
|
||||||
|
|
||||||
|
**Current State**: [`admin_event.c`](../src/admin_event.c:37) only reads from POST body
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. Create new function `parse_authorization_header()` in [`src/admin_event.c`](../src/admin_event.c:1)
|
||||||
|
```c
|
||||||
|
// Parse Authorization header for Kind 23458 event
|
||||||
|
// Returns: cJSON event object or NULL
|
||||||
|
static cJSON* parse_authorization_header(void) {
|
||||||
|
const char* auth_header = getenv("HTTP_AUTHORIZATION");
|
||||||
|
if (!auth_header || strncmp(auth_header, "Nostr ", 6) != 0) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse base64-encoded event after "Nostr "
|
||||||
|
const char* b64_event = auth_header + 6;
|
||||||
|
// Decode and parse JSON
|
||||||
|
// Return cJSON object
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Modify [`handle_admin_event_request()`](../src/admin_event.c:37) to check both sources:
|
||||||
|
```c
|
||||||
|
// Try Authorization header first
|
||||||
|
cJSON* event = parse_authorization_header();
|
||||||
|
|
||||||
|
// Fall back to POST body if no Authorization header
|
||||||
|
if (!event) {
|
||||||
|
// Existing POST body parsing code (lines 38-82)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Extract common processing logic into `process_admin_event()`:
|
||||||
|
```c
|
||||||
|
static int process_admin_event(cJSON* event) {
|
||||||
|
// Lines 84-256 (existing validation and processing)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Test both delivery methods:
|
||||||
|
- POST body with JSON event
|
||||||
|
- Authorization header with base64-encoded event
|
||||||
|
|
||||||
|
#### Phase 3: Embed Web Interface (4-5 hours)
|
||||||
|
|
||||||
|
**Goal**: Embed c-relay admin UI files into binary
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. Create [`scripts/embed_web_files.sh`](../scripts/embed_web_files.sh)
|
||||||
|
```bash
|
||||||
|
#!/bin/bash
|
||||||
|
# Convert web files to C byte arrays
|
||||||
|
|
||||||
|
for file in api/*.html api/*.css api/*.js; do
|
||||||
|
filename=$(basename "$file")
|
||||||
|
varname=$(echo "$filename" | tr '.-' '__')
|
||||||
|
|
||||||
|
echo "// Embedded: $filename" > "src/embedded_${varname}.h"
|
||||||
|
echo "static const unsigned char embedded_${varname}[] = {" >> "src/embedded_${varname}.h"
|
||||||
|
hexdump -v -e '16/1 "0x%02x, " "\n"' "$file" >> "src/embedded_${varname}.h"
|
||||||
|
echo "};" >> "src/embedded_${varname}.h"
|
||||||
|
echo "static const size_t embedded_${varname}_size = sizeof(embedded_${varname});" >> "src/embedded_${varname}.h"
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Create [`src/admin_interface.c`](../src/admin_interface.c)
|
||||||
|
```c
|
||||||
|
#include "embedded_index_html.h"
|
||||||
|
#include "embedded_index_js.h"
|
||||||
|
#include "embedded_index_css.h"
|
||||||
|
|
||||||
|
void handle_admin_interface_request(const char* path) {
|
||||||
|
if (strcmp(path, "/admin") == 0 || strcmp(path, "/admin/") == 0) {
|
||||||
|
printf("Content-Type: text/html\r\n\r\n");
|
||||||
|
fwrite(embedded_index_html, 1, embedded_index_html_size, stdout);
|
||||||
|
}
|
||||||
|
else if (strcmp(path, "/admin/index.js") == 0) {
|
||||||
|
printf("Content-Type: application/javascript\r\n\r\n");
|
||||||
|
fwrite(embedded_index_js, 1, embedded_index_js_size, stdout);
|
||||||
|
}
|
||||||
|
else if (strcmp(path, "/admin/index.css") == 0) {
|
||||||
|
printf("Content-Type: text/css\r\n\r\n");
|
||||||
|
fwrite(embedded_index_css, 1, embedded_index_css_size, stdout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Update [`Makefile`](../Makefile) to run embedding script before compilation
|
||||||
|
|
||||||
|
4. Add nginx routing for `/admin` and `/api/admin` paths
|
||||||
|
|
||||||
|
5. Test embedded files are served correctly
|
||||||
|
|
||||||
|
#### Phase 4: Adapt Web Interface (5-6 hours)
|
||||||
|
|
||||||
|
**Goal**: Modify c-relay UI to work with Ginxsom/Blossom
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. Remove DM section from [`api/index.html`](../api/index.html)
|
||||||
|
- Delete lines 311-335 (DM section content)
|
||||||
|
- Delete line 20 (DM navigation button)
|
||||||
|
|
||||||
|
2. Add Kind 23458/23459 wrapper to [`api/index.js`](../api/index.js)
|
||||||
|
```javascript
|
||||||
|
// Create Kind 23458 admin command event
|
||||||
|
async function createAdminEvent(commandArray) {
|
||||||
|
const content = JSON.stringify(commandArray);
|
||||||
|
// Encrypt using NIP-44 (use nostr-tools or similar)
|
||||||
|
const encrypted = await nip44.encrypt(serverPubkey, content);
|
||||||
|
|
||||||
|
const event = {
|
||||||
|
kind: 23458,
|
||||||
|
created_at: Math.floor(Date.now() / 1000),
|
||||||
|
tags: [['p', serverPubkey]],
|
||||||
|
content: encrypted
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sign event
|
||||||
|
return await signEvent(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send admin command via Authorization header
|
||||||
|
async function sendAdminCommand(commandArray) {
|
||||||
|
const event = await createAdminEvent(commandArray);
|
||||||
|
const b64Event = btoa(JSON.stringify(event));
|
||||||
|
|
||||||
|
const response = await fetch('/api/admin', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Authorization': `Nostr ${b64Event}`
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const responseEvent = await response.json();
|
||||||
|
// Decrypt Kind 23459 response
|
||||||
|
const decrypted = await nip44.decrypt(responseEvent.content);
|
||||||
|
return JSON.parse(decrypted);
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Replace all `fetch()` calls with `sendAdminCommand()`:
|
||||||
|
- Database stats: `sendAdminCommand(['stats_query'])`
|
||||||
|
- Config query: `sendAdminCommand(['config_query'])`
|
||||||
|
- Config update: `sendAdminCommand(['config_update', {key: value}])`
|
||||||
|
- Blob list: `sendAdminCommand(['blob_list', {limit: 100}])`
|
||||||
|
- SQL query: `sendAdminCommand(['sql_query', 'SELECT ...'])`
|
||||||
|
|
||||||
|
4. Add data mapping functions:
|
||||||
|
```javascript
|
||||||
|
// Map Blossom data to c-relay UI expectations
|
||||||
|
function mapBlossomToRelay(data) {
|
||||||
|
if (data.blobs) {
|
||||||
|
// Map blobs to events
|
||||||
|
return {
|
||||||
|
events: data.blobs.map(blob => ({
|
||||||
|
id: blob.sha256,
|
||||||
|
kind: mimeToKind(blob.type),
|
||||||
|
pubkey: blob.uploader_pubkey,
|
||||||
|
created_at: blob.uploaded_at,
|
||||||
|
content: blob.filename || ''
|
||||||
|
}))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
function mimeToKind(mimeType) {
|
||||||
|
// Map MIME types to pseudo-kinds for UI display
|
||||||
|
if (mimeType.startsWith('image/')) return 1;
|
||||||
|
if (mimeType.startsWith('video/')) return 2;
|
||||||
|
if (mimeType.startsWith('audio/')) return 3;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Test all UI sections work with Blossom data
|
||||||
|
|
||||||
|
#### Phase 5: Testing & Documentation (2-3 hours)
|
||||||
|
|
||||||
|
**Goal**: Comprehensive testing and documentation
|
||||||
|
|
||||||
|
**Tasks**:
|
||||||
|
1. Create [`tests/admin_unified_test.sh`](../tests/admin_unified_test.sh)
|
||||||
|
- Test HTTP POST body delivery
|
||||||
|
- Test HTTP Authorization header delivery
|
||||||
|
- Test relay delivery (if enabled)
|
||||||
|
- Test all command types
|
||||||
|
- Test encryption/decryption
|
||||||
|
- Test error handling
|
||||||
|
|
||||||
|
2. Create [`docs/ADMIN_INTERFACE.md`](../docs/ADMIN_INTERFACE.md)
|
||||||
|
- Document dual delivery architecture
|
||||||
|
- Document command format
|
||||||
|
- Document response format
|
||||||
|
- Document web interface usage
|
||||||
|
- Document relay configuration
|
||||||
|
|
||||||
|
3. Update [`README.md`](../README.md) with admin interface section
|
||||||
|
|
||||||
|
4. Update [`docs/IMPLEMENTATION.md`](../docs/IMPLEMENTATION.md) with admin system details
|
||||||
|
|
||||||
|
### Summary of Changes
|
||||||
|
|
||||||
|
#### What We're Keeping (No Duplication!)
|
||||||
|
- ✅ [`admin_commands.c`](../src/admin_commands.c:1) - All command handlers
|
||||||
|
- ✅ [`admin_decrypt_command()`](../src/admin_commands.c:67) - Decryption
|
||||||
|
- ✅ [`admin_encrypt_response()`](../src/admin_commands.c:43) - Encryption
|
||||||
|
- ✅ [`admin_commands_process()`](../src/admin_commands.c:101) - Command routing
|
||||||
|
- ✅ [`relay_client.c`](../src/relay_client.c:1) - Relay delivery (already uses 23458/23459!)
|
||||||
|
|
||||||
|
#### What We're Changing
|
||||||
|
- 🔄 [`admin_event.c`](../src/admin_event.c:1) - Update to Kind 23458/23459, add Authorization header support
|
||||||
|
- 🔄 [`admin_commands.h`](../src/admin_commands.h:1) - Update comments to reflect 23458/23459
|
||||||
|
|
||||||
|
#### What We're Adding
|
||||||
|
- ➕ [`scripts/embed_web_files.sh`](../scripts/embed_web_files.sh) - File embedding script
|
||||||
|
- ➕ [`src/admin_interface.c`](../src/admin_interface.c) - Embedded file serving
|
||||||
|
- ➕ [`api/index.js`](../api/index.js) modifications - Kind 23458/23459 wrappers
|
||||||
|
- ➕ [`api/index.html`](../api/index.html) modifications - Remove DM section
|
||||||
|
- ➕ Documentation and tests
|
||||||
|
|
||||||
|
### Estimated Timeline
|
||||||
|
|
||||||
|
- Phase 1 (Kind number updates): 2-3 hours
|
||||||
|
- Phase 2 (Authorization header): 3-4 hours
|
||||||
|
- Phase 3 (Embed web files): 4-5 hours
|
||||||
|
- Phase 4 (Adapt UI): 5-6 hours
|
||||||
|
- Phase 5 (Testing & docs): 2-3 hours
|
||||||
|
|
||||||
|
**Total: 16-21 hours**
|
||||||
|
|
||||||
|
This is significantly less than the original 19-27 hour estimate because we're leveraging existing infrastructure rather than duplicating it.
|
||||||
|
|
||||||
|
### Key Benefits
|
||||||
|
|
||||||
|
1. **No Code Duplication**: Reuse existing `admin_commands.c` functions
|
||||||
|
2. **Unified Processing**: Same code path for HTTP and relay delivery
|
||||||
|
3. **Already Implemented**: Relay client already uses correct kind numbers!
|
||||||
|
4. **Minimal Changes**: Only need to update `admin_event.c` and add UI embedding
|
||||||
|
5. **Consistent Architecture**: Both delivery methods use same encryption/decryption
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## IMPLEMENTATION STATUS
|
||||||
|
|
||||||
|
### Phase 1: Update to Kind 23458/23459 ✅ COMPLETE
|
||||||
|
**Completed:** December 12, 2025
|
||||||
|
**Duration:** ~15 minutes
|
||||||
|
|
||||||
|
**Changes Made:**
|
||||||
|
1. Updated [`src/admin_event.c`](../src/admin_event.c:1) - 7 locations
|
||||||
|
- Line 1: Comment updated to Kind 23458/23459
|
||||||
|
- Line 34: Function comment updated
|
||||||
|
- Lines 84-92: Kind verification changed from 23456 to 23458
|
||||||
|
- Line 248: Comment updated for Kind 23459 response
|
||||||
|
- Line 353: Function comment updated
|
||||||
|
- Line 414: Response kind changed from 23457 to 23459
|
||||||
|
- Line 436: Event signing updated to use kind 23459
|
||||||
|
|
||||||
|
2. Updated [`src/admin_commands.h`](../src/admin_commands.h:1)
|
||||||
|
- Lines 4-5: Comments updated to reflect Kind 23458/23459
|
||||||
|
|
||||||
|
3. Updated [`tests/admin_event_test.sh`](../tests/admin_event_test.sh) - 6 locations
|
||||||
|
- Line 4: Header comment updated
|
||||||
|
- Line 75: Function comment updated
|
||||||
|
- Line 80: Log message updated
|
||||||
|
- Line 92: nak event creation updated to kind 23458
|
||||||
|
- Line 107: Comment updated
|
||||||
|
- Lines 136-138: Response parsing updated to check for kind 23459
|
||||||
|
- Line 178: Test suite description updated
|
||||||
|
|
||||||
|
**Verification:**
|
||||||
|
- ✅ Build succeeds without errors
|
||||||
|
- ✅ Server starts and accepts requests
|
||||||
|
- ✅ `/api/admin` endpoint responds (test shows expected behavior - rejects plaintext content)
|
||||||
|
|
||||||
|
### Phase 2: Add Authorization Header Support ✅ COMPLETE
|
||||||
|
**Completed:** December 12, 2025
|
||||||
|
**Duration:** ~30 minutes
|
||||||
|
|
||||||
|
**Changes Made:**
|
||||||
|
1. Added [`parse_authorization_header()`](../src/admin_event.c:259) function
|
||||||
|
- Parses "Authorization: Nostr <event-json>" header format
|
||||||
|
- Returns cJSON event object or NULL if not present
|
||||||
|
- Supports both base64-encoded and direct JSON formats
|
||||||
|
|
||||||
|
2. Added [`process_admin_event()`](../src/admin_event.c:289) function
|
||||||
|
- Extracted all event processing logic from `handle_admin_event_request()`
|
||||||
|
- Handles validation, admin authentication, NIP-44 decryption
|
||||||
|
- Executes commands and generates Kind 23459 responses
|
||||||
|
- Single unified code path for both delivery methods
|
||||||
|
|
||||||
|
3. Refactored [`handle_admin_event_request()`](../src/admin_event.c:37)
|
||||||
|
- Now checks Authorization header first
|
||||||
|
- Falls back to POST body if header not present
|
||||||
|
- Delegates all processing to `process_admin_event()`
|
||||||
|
- Cleaner, more maintainable code structure
|
||||||
|
|
||||||
|
**Architecture:**
|
||||||
|
```
|
||||||
|
HTTP Request
|
||||||
|
↓
|
||||||
|
handle_admin_event_request()
|
||||||
|
↓
|
||||||
|
├─→ parse_authorization_header() → event (if present)
|
||||||
|
└─→ Parse POST body → event (if header not present)
|
||||||
|
↓
|
||||||
|
process_admin_event(event)
|
||||||
|
↓
|
||||||
|
├─→ Validate Kind 23458
|
||||||
|
├─→ Verify admin pubkey
|
||||||
|
├─→ Decrypt NIP-44 content
|
||||||
|
├─→ Parse command array
|
||||||
|
├─→ Execute command (config_query, etc.)
|
||||||
|
└─→ send_admin_response_event() → Kind 23459
|
||||||
|
```
|
||||||
|
|
||||||
|
**Verification:**
|
||||||
|
- ✅ Build succeeds without errors
|
||||||
|
- ✅ Server starts and accepts requests
|
||||||
|
- ✅ Supports both POST body and Authorization header delivery
|
||||||
|
- ✅ Unified processing for both methods
|
||||||
|
|
||||||
|
**Note:** Test script currently sends plaintext content instead of NIP-44 encrypted content, so tests fail with "Invalid JSON" error. This is expected and correct behavior - the server properly rejects non-encrypted content.
|
||||||
|
|
||||||
|
### Phase 3: Embed Web Interface ⏳ PENDING
|
||||||
|
**Status:** Not Started
|
||||||
|
**Estimated Duration:** 4-5 hours
|
||||||
|
|
||||||
|
**Planned Tasks:**
|
||||||
|
1. Create `scripts/embed_web_files.sh` script
|
||||||
|
2. Test embedding with sample files
|
||||||
|
3. Create `src/admin_interface.c` for serving embedded files
|
||||||
|
4. Add `handle_admin_interface_request()` function
|
||||||
|
5. Update Makefile with embedding targets
|
||||||
|
6. Add nginx routing for `/admin` and `/api/`
|
||||||
|
7. Test embedded file serving
|
||||||
|
|
||||||
|
### Phase 4: Adapt Web Interface ⏳ PENDING
|
||||||
|
**Status:** Not Started
|
||||||
|
**Estimated Duration:** 5-6 hours
|
||||||
|
|
||||||
|
**Planned Tasks:**
|
||||||
|
1. Remove DM section from `api/index.html`
|
||||||
|
2. Add `createAdminEvent()` function to `api/index.js`
|
||||||
|
3. Add `sendAdminCommand()` function to `api/index.js`
|
||||||
|
4. Replace `fetch()` calls with `sendAdminCommand()` throughout
|
||||||
|
5. Add `mapBlossomToRelay()` data mapping function
|
||||||
|
6. Add `mimeToKind()` helper function
|
||||||
|
7. Test UI displays correctly with Blossom data
|
||||||
|
8. Verify all sections work (Statistics, Config, Auth, Database)
|
||||||
|
|
||||||
|
### Phase 5: Testing & Documentation ⏳ PENDING
|
||||||
|
**Status:** Not Started
|
||||||
|
**Estimated Duration:** 2-3 hours
|
||||||
|
|
||||||
|
**Planned Tasks:**
|
||||||
|
1. Create `tests/admin_unified_test.sh`
|
||||||
|
2. Test HTTP POST body delivery with NIP-44 encryption
|
||||||
|
3. Test HTTP Authorization header delivery with NIP-44 encryption
|
||||||
|
4. Test relay delivery (if enabled)
|
||||||
|
5. Test all command types (stats_query, config_query, etc.)
|
||||||
|
6. Test encryption/decryption
|
||||||
|
7. Test error handling
|
||||||
|
8. Create `docs/ADMIN_INTERFACE.md`
|
||||||
|
9. Update `README.md` with admin interface section
|
||||||
|
10. Update `docs/IMPLEMENTATION.md` with admin system details
|
||||||
|
11. Create troubleshooting guide
|
||||||
|
|
||||||
|
### Summary
|
||||||
|
|
||||||
|
**Completed:** Phases 1-2 (45 minutes total)
|
||||||
|
**Remaining:** Phases 3-5 (11-14 hours estimated)
|
||||||
|
|
||||||
|
**Key Achievements:**
|
||||||
|
- ✅ Updated all kind numbers from 23456/23457 to 23458/23459
|
||||||
|
- ✅ Added dual delivery support (POST body + Authorization header)
|
||||||
|
- ✅ Unified processing architecture (no code duplication)
|
||||||
|
- ✅ Server builds and runs successfully
|
||||||
|
|
||||||
|
**Next Steps:**
|
||||||
|
- Embed c-relay web interface into binary
|
||||||
|
- Adapt UI to work with Blossom data structures
|
||||||
|
- Add comprehensive testing with NIP-44 encryption
|
||||||
|
- Complete documentation
|
||||||
8
ginxsom.code-workspace
Normal file
8
ginxsom.code-workspace
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"folders": [
|
||||||
|
{
|
||||||
|
"path": "."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"settings": {}
|
||||||
|
}
|
||||||
Submodule nostr_core_lib deleted from 7d7c3eafe8
@@ -6,7 +6,7 @@
|
|||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
# Parse command line arguments
|
# Parse command line arguments
|
||||||
TEST_MODE=0
|
TEST_MODE=1 # Default to test mode
|
||||||
FOLLOW_LOGS=0
|
FOLLOW_LOGS=0
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
@@ -15,14 +15,19 @@ while [[ $# -gt 0 ]]; do
|
|||||||
TEST_MODE=1
|
TEST_MODE=1
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
-p|--production)
|
||||||
|
TEST_MODE=0
|
||||||
|
shift
|
||||||
|
;;
|
||||||
--follow)
|
--follow)
|
||||||
FOLLOW_LOGS=1
|
FOLLOW_LOGS=1
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unknown option: $1"
|
echo "Unknown option: $1"
|
||||||
echo "Usage: $0 [-t|--test-keys] [--follow]"
|
echo "Usage: $0 [-t|--test-keys] [-p|--production] [--follow]"
|
||||||
echo " -t, --test-keys Use test mode with keys from .test_keys"
|
echo " -t, --test-keys Use test mode with keys from .test_keys (DEFAULT)"
|
||||||
|
echo " -p, --production Use production mode (generate new keys)"
|
||||||
echo " --follow Follow logs in real-time"
|
echo " --follow Follow logs in real-time"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
@@ -44,7 +49,22 @@ if [[ $FOLLOW_LOGS -eq 1 ]]; then
|
|||||||
wait
|
wait
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
FCGI_BINARY="./build/ginxsom-fcgi"
|
# Detect architecture for static binary name
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
case "$ARCH" in
|
||||||
|
x86_64) STATIC_BINARY="./build/ginxsom-fcgi_static_x86_64" ;;
|
||||||
|
aarch64|arm64) STATIC_BINARY="./build/ginxsom-fcgi_static_arm64" ;;
|
||||||
|
*) STATIC_BINARY="./build/ginxsom-fcgi_static_${ARCH}" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# Use static binary if available, fallback to dynamic
|
||||||
|
if [ -f "$STATIC_BINARY" ]; then
|
||||||
|
FCGI_BINARY="$STATIC_BINARY"
|
||||||
|
echo "Using static binary: $FCGI_BINARY"
|
||||||
|
else
|
||||||
|
FCGI_BINARY="./build/ginxsom-fcgi"
|
||||||
|
echo "Static binary not found, using dynamic binary: $FCGI_BINARY"
|
||||||
|
fi
|
||||||
SOCKET_PATH="/tmp/ginxsom-fcgi.sock"
|
SOCKET_PATH="/tmp/ginxsom-fcgi.sock"
|
||||||
PID_FILE="/tmp/ginxsom-fcgi.pid"
|
PID_FILE="/tmp/ginxsom-fcgi.pid"
|
||||||
NGINX_CONFIG="config/local-nginx.conf"
|
NGINX_CONFIG="config/local-nginx.conf"
|
||||||
@@ -168,15 +188,24 @@ fi
|
|||||||
|
|
||||||
echo -e "${GREEN}FastCGI cleanup complete${NC}"
|
echo -e "${GREEN}FastCGI cleanup complete${NC}"
|
||||||
|
|
||||||
# Step 3: Always rebuild FastCGI binary with clean build
|
# Step 3: Always rebuild FastCGI binary with static build
|
||||||
echo -e "\n${YELLOW}3. Rebuilding FastCGI binary (clean build)...${NC}"
|
echo -e "\n${YELLOW}3. Rebuilding FastCGI binary (static build)...${NC}"
|
||||||
echo "Performing clean rebuild to ensure all changes are compiled..."
|
echo "Building static binary with Docker..."
|
||||||
make clean && make
|
make static
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo -e "${RED}Build failed! Cannot continue.${NC}"
|
echo -e "${RED}Static build failed! Cannot continue.${NC}"
|
||||||
|
echo -e "${RED}Docker must be available and running for static builds.${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
echo -e "${GREEN}Clean rebuild complete${NC}"
|
|
||||||
|
# Update FCGI_BINARY to use the newly built static binary
|
||||||
|
ARCH=$(uname -m)
|
||||||
|
case "$ARCH" in
|
||||||
|
x86_64) FCGI_BINARY="./build/ginxsom-fcgi_static_x86_64" ;;
|
||||||
|
aarch64|arm64) FCGI_BINARY="./build/ginxsom-fcgi_static_arm64" ;;
|
||||||
|
*) FCGI_BINARY="./build/ginxsom-fcgi_static_${ARCH}" ;;
|
||||||
|
esac
|
||||||
|
echo -e "${GREEN}Static build complete: $FCGI_BINARY${NC}"
|
||||||
|
|
||||||
# Step 3.5: Clean database directory for fresh testing
|
# Step 3.5: Clean database directory for fresh testing
|
||||||
echo -e "\n${YELLOW}3.5. Cleaning database directory...${NC}"
|
echo -e "\n${YELLOW}3.5. Cleaning database directory...${NC}"
|
||||||
@@ -246,24 +275,37 @@ else
|
|||||||
echo -e "${YELLOW}Starting FastCGI in production mode - will generate new keys and create database${NC}"
|
echo -e "${YELLOW}Starting FastCGI in production mode - will generate new keys and create database${NC}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start FastCGI application with proper logging (daemonized but with redirected streams)
|
# Start FastCGI application with proper logging
|
||||||
echo "FastCGI starting at $(date)" >> logs/app/stderr.log
|
echo "FastCGI starting at $(date)" >> logs/app/stderr.log
|
||||||
spawn-fcgi -s "$SOCKET_PATH" -M 666 -u "$USER" -g "$USER" -P "$PID_FILE" -- "$FCGI_BINARY" $FCGI_ARGS 1>>logs/app/stdout.log 2>>logs/app/stderr.log
|
|
||||||
|
|
||||||
if [ $? -eq 0 ] && [ -f "$PID_FILE" ]; then
|
# Use nohup with spawn-fcgi -n to keep process running with redirected output
|
||||||
PID=$(cat "$PID_FILE")
|
# The key is: nohup prevents HUP signal, -n prevents daemonization (keeps stderr connected)
|
||||||
|
nohup spawn-fcgi -n -s "$SOCKET_PATH" -M 666 -u "$USER" -g "$USER" -- "$FCGI_BINARY" $FCGI_ARGS >>logs/app/stdout.log 2>>logs/app/stderr.log </dev/null &
|
||||||
|
SPAWN_PID=$!
|
||||||
|
|
||||||
|
# Wait for spawn-fcgi to spawn the child
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# Get the actual FastCGI process PID (child of spawn-fcgi)
|
||||||
|
FCGI_PID=$(pgrep -f "ginxsom-fcgi.*--storage-dir" | head -1)
|
||||||
|
if [ -z "$FCGI_PID" ]; then
|
||||||
|
echo -e "${RED}Warning: Could not find FastCGI process${NC}"
|
||||||
|
FCGI_PID=$SPAWN_PID
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Save PID
|
||||||
|
echo $FCGI_PID > "$PID_FILE"
|
||||||
|
|
||||||
|
# Give it a moment to start
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
if check_process "$FCGI_PID"; then
|
||||||
echo -e "${GREEN}FastCGI application started successfully${NC}"
|
echo -e "${GREEN}FastCGI application started successfully${NC}"
|
||||||
echo "PID: $PID"
|
echo "PID: $FCGI_PID"
|
||||||
|
echo -e "${GREEN}Process confirmed running${NC}"
|
||||||
# Verify it's actually running
|
|
||||||
if check_process "$PID"; then
|
|
||||||
echo -e "${GREEN}Process confirmed running${NC}"
|
|
||||||
else
|
|
||||||
echo -e "${RED}Warning: Process may have crashed immediately${NC}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo -e "${RED}Failed to start FastCGI application${NC}"
|
echo -e "${RED}Failed to start FastCGI application${NC}"
|
||||||
|
echo -e "${RED}Process may have crashed immediately${NC}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -335,3 +377,7 @@ echo -e "${YELLOW}To monitor logs, check: logs/nginx/error.log, logs/nginx/acces
|
|||||||
echo -e "\n${YELLOW}Server is available at:${NC}"
|
echo -e "\n${YELLOW}Server is available at:${NC}"
|
||||||
echo -e " ${GREEN}HTTP:${NC} http://localhost:9001"
|
echo -e " ${GREEN}HTTP:${NC} http://localhost:9001"
|
||||||
echo -e " ${GREEN}HTTPS:${NC} https://localhost:9443"
|
echo -e " ${GREEN}HTTPS:${NC} https://localhost:9443"
|
||||||
|
echo -e "\n${YELLOW}Admin WebSocket endpoint:${NC}"
|
||||||
|
echo -e " ${GREEN}WSS:${NC} wss://localhost:9443/admin (via nginx proxy)"
|
||||||
|
echo -e " ${GREEN}WS:${NC} ws://localhost:9001/admin (via nginx proxy)"
|
||||||
|
echo -e " ${GREEN}Direct:${NC} ws://localhost:9442 (direct connection)"
|
||||||
82
scripts/embed_web_files.sh
Executable file
82
scripts/embed_web_files.sh
Executable file
@@ -0,0 +1,82 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Embed web interface files into C source code
|
||||||
|
# This script converts HTML, CSS, and JS files into C byte arrays
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
API_DIR="api"
|
||||||
|
OUTPUT_DIR="src"
|
||||||
|
OUTPUT_FILE="${OUTPUT_DIR}/admin_interface_embedded.h"
|
||||||
|
|
||||||
|
# Files to embed
|
||||||
|
FILES=(
|
||||||
|
"index.html"
|
||||||
|
"index.css"
|
||||||
|
"index.js"
|
||||||
|
"nostr-lite.js"
|
||||||
|
"nostr.bundle.js"
|
||||||
|
"text_graph.js"
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "=== Embedding Web Interface Files ==="
|
||||||
|
echo "Source directory: ${API_DIR}"
|
||||||
|
echo "Output file: ${OUTPUT_FILE}"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Start output file
|
||||||
|
cat > "${OUTPUT_FILE}" << 'EOF'
|
||||||
|
/*
|
||||||
|
* Embedded Web Interface Files
|
||||||
|
* Auto-generated by scripts/embed_web_files.sh
|
||||||
|
* DO NOT EDIT MANUALLY
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ADMIN_INTERFACE_EMBEDDED_H
|
||||||
|
#define ADMIN_INTERFACE_EMBEDDED_H
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Process each file
|
||||||
|
for file in "${FILES[@]}"; do
|
||||||
|
filepath="${API_DIR}/${file}"
|
||||||
|
|
||||||
|
if [[ ! -f "${filepath}" ]]; then
|
||||||
|
echo "WARNING: File not found: ${filepath}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create variable name from filename (replace . and - with _)
|
||||||
|
varname=$(echo "${file}" | tr '.-' '__')
|
||||||
|
|
||||||
|
echo "Embedding: ${file} -> embedded_${varname}"
|
||||||
|
|
||||||
|
# Get file size
|
||||||
|
filesize=$(stat -f%z "${filepath}" 2>/dev/null || stat -c%s "${filepath}" 2>/dev/null)
|
||||||
|
|
||||||
|
# Add comment
|
||||||
|
echo "" >> "${OUTPUT_FILE}"
|
||||||
|
echo "// Embedded file: ${file} (${filesize} bytes)" >> "${OUTPUT_FILE}"
|
||||||
|
|
||||||
|
# Convert file to C byte array
|
||||||
|
echo "static const unsigned char embedded_${varname}[] = {" >> "${OUTPUT_FILE}"
|
||||||
|
|
||||||
|
# Use xxd to convert to hex, then format as C array
|
||||||
|
xxd -i < "${filepath}" >> "${OUTPUT_FILE}"
|
||||||
|
|
||||||
|
echo "};" >> "${OUTPUT_FILE}"
|
||||||
|
echo "static const size_t embedded_${varname}_size = sizeof(embedded_${varname});" >> "${OUTPUT_FILE}"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Close header guard
|
||||||
|
cat >> "${OUTPUT_FILE}" << 'EOF'
|
||||||
|
|
||||||
|
#endif /* ADMIN_INTERFACE_EMBEDDED_H */
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=== Embedding Complete ==="
|
||||||
|
echo "Generated: ${OUTPUT_FILE}"
|
||||||
|
echo "Total files embedded: ${#FILES[@]}"
|
||||||
743
src/admin_commands.c
Normal file
743
src/admin_commands.c
Normal file
@@ -0,0 +1,743 @@
|
|||||||
|
/*
|
||||||
|
* Ginxsom Admin Commands Implementation
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "admin_commands.h"
|
||||||
|
#include "../nostr_core_lib/nostr_core/nostr_core.h"
|
||||||
|
#include <sqlite3.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
// Forward declare app_log
|
||||||
|
typedef enum {
|
||||||
|
LOG_DEBUG = 0,
|
||||||
|
LOG_INFO = 1,
|
||||||
|
LOG_WARN = 2,
|
||||||
|
LOG_ERROR = 3
|
||||||
|
} log_level_t;
|
||||||
|
|
||||||
|
void app_log(log_level_t level, const char* format, ...);
|
||||||
|
|
||||||
|
// Global state
|
||||||
|
static struct {
|
||||||
|
int initialized;
|
||||||
|
char db_path[512];
|
||||||
|
} g_admin_state = {0};
|
||||||
|
|
||||||
|
// Initialize admin command system
|
||||||
|
int admin_commands_init(const char *db_path) {
|
||||||
|
if (g_admin_state.initialized) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
strncpy(g_admin_state.db_path, db_path, sizeof(g_admin_state.db_path) - 1);
|
||||||
|
g_admin_state.initialized = 1;
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Admin command system initialized");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NIP-44 encryption helper
|
||||||
|
int admin_encrypt_response(
|
||||||
|
const unsigned char* server_privkey,
|
||||||
|
const unsigned char* admin_pubkey,
|
||||||
|
const char* plaintext_json,
|
||||||
|
char* output,
|
||||||
|
size_t output_size
|
||||||
|
) {
|
||||||
|
int result = nostr_nip44_encrypt(
|
||||||
|
server_privkey,
|
||||||
|
admin_pubkey,
|
||||||
|
plaintext_json,
|
||||||
|
output,
|
||||||
|
output_size
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to encrypt admin response: %d", result);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NIP-44 decryption helper
|
||||||
|
int admin_decrypt_command(
|
||||||
|
const unsigned char* server_privkey,
|
||||||
|
const unsigned char* admin_pubkey,
|
||||||
|
const char* encrypted_data,
|
||||||
|
char* output,
|
||||||
|
size_t output_size
|
||||||
|
) {
|
||||||
|
int result = nostr_nip44_decrypt(
|
||||||
|
server_privkey,
|
||||||
|
admin_pubkey,
|
||||||
|
encrypted_data,
|
||||||
|
output,
|
||||||
|
output_size
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to decrypt admin command: %d", result);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create error response
|
||||||
|
static cJSON* create_error_response(const char* query_type, const char* error_msg) {
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", query_type);
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", error_msg);
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process admin command array and generate response
|
||||||
|
cJSON* admin_commands_process(cJSON* command_array, const char* request_event_id) {
|
||||||
|
(void)request_event_id; // Reserved for future use (e.g., logging, tracking)
|
||||||
|
|
||||||
|
if (!cJSON_IsArray(command_array) || cJSON_GetArraySize(command_array) < 1) {
|
||||||
|
return create_error_response("unknown", "Invalid command format");
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* cmd_type = cJSON_GetArrayItem(command_array, 0);
|
||||||
|
if (!cJSON_IsString(cmd_type)) {
|
||||||
|
return create_error_response("unknown", "Command type must be string");
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* command = cmd_type->valuestring;
|
||||||
|
app_log(LOG_INFO, "Processing admin command: %s", command);
|
||||||
|
|
||||||
|
// Route to appropriate handler
|
||||||
|
if (strcmp(command, "config_query") == 0) {
|
||||||
|
return admin_cmd_config_query(command_array);
|
||||||
|
}
|
||||||
|
else if (strcmp(command, "config_update") == 0) {
|
||||||
|
return admin_cmd_config_update(command_array);
|
||||||
|
}
|
||||||
|
else if (strcmp(command, "stats_query") == 0) {
|
||||||
|
return admin_cmd_stats_query(command_array);
|
||||||
|
}
|
||||||
|
else if (strcmp(command, "system_command") == 0) {
|
||||||
|
// Check second parameter for system_status
|
||||||
|
if (cJSON_GetArraySize(command_array) >= 2) {
|
||||||
|
cJSON* subcmd = cJSON_GetArrayItem(command_array, 1);
|
||||||
|
if (cJSON_IsString(subcmd) && strcmp(subcmd->valuestring, "system_status") == 0) {
|
||||||
|
return admin_cmd_system_status(command_array);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return create_error_response("system_command", "Unknown system command");
|
||||||
|
}
|
||||||
|
else if (strcmp(command, "blob_list") == 0) {
|
||||||
|
return admin_cmd_blob_list(command_array);
|
||||||
|
}
|
||||||
|
else if (strcmp(command, "storage_stats") == 0) {
|
||||||
|
return admin_cmd_storage_stats(command_array);
|
||||||
|
}
|
||||||
|
else if (strcmp(command, "sql_query") == 0) {
|
||||||
|
return admin_cmd_sql_query(command_array);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
char error_msg[256];
|
||||||
|
snprintf(error_msg, sizeof(error_msg), "Unknown command: %s", command);
|
||||||
|
return create_error_response("unknown", error_msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ============================================================================
|
||||||
|
// COMMAND HANDLERS (Stub implementations - to be completed)
|
||||||
|
// ============================================================================
|
||||||
|
|
||||||
|
cJSON* admin_cmd_config_query(cJSON* args) {
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", "config_query");
|
||||||
|
|
||||||
|
// Open database
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if specific keys were requested (args[1] should be array of keys or null for all)
|
||||||
|
cJSON* keys_array = NULL;
|
||||||
|
if (cJSON_GetArraySize(args) >= 2) {
|
||||||
|
keys_array = cJSON_GetArrayItem(args, 1);
|
||||||
|
if (!cJSON_IsArray(keys_array) && !cJSON_IsNull(keys_array)) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Keys parameter must be array or null");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
const char* sql;
|
||||||
|
|
||||||
|
if (keys_array && cJSON_IsArray(keys_array) && cJSON_GetArraySize(keys_array) > 0) {
|
||||||
|
// Query specific keys
|
||||||
|
int key_count = cJSON_GetArraySize(keys_array);
|
||||||
|
|
||||||
|
// Build SQL with placeholders
|
||||||
|
char sql_buffer[1024] = "SELECT key, value, description FROM config WHERE key IN (?";
|
||||||
|
for (int i = 1; i < key_count && i < 50; i++) { // Limit to 50 keys
|
||||||
|
strncat(sql_buffer, ",?", sizeof(sql_buffer) - strlen(sql_buffer) - 1);
|
||||||
|
}
|
||||||
|
strncat(sql_buffer, ")", sizeof(sql_buffer) - strlen(sql_buffer) - 1);
|
||||||
|
|
||||||
|
rc = sqlite3_prepare_v2(db, sql_buffer, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to prepare query");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind keys
|
||||||
|
for (int i = 0; i < key_count && i < 50; i++) {
|
||||||
|
cJSON* key_item = cJSON_GetArrayItem(keys_array, i);
|
||||||
|
if (cJSON_IsString(key_item)) {
|
||||||
|
sqlite3_bind_text(stmt, i + 1, key_item->valuestring, -1, SQLITE_STATIC);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Query all config values
|
||||||
|
sql = "SELECT key, value, description FROM config ORDER BY key";
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to prepare query");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute query and build result
|
||||||
|
cJSON* config_obj = cJSON_CreateObject();
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) {
|
||||||
|
const char* key = (const char*)sqlite3_column_text(stmt, 0);
|
||||||
|
const char* value = (const char*)sqlite3_column_text(stmt, 1);
|
||||||
|
const char* description = (const char*)sqlite3_column_text(stmt, 2);
|
||||||
|
|
||||||
|
cJSON* entry = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(entry, "value", value ? value : "");
|
||||||
|
if (description && strlen(description) > 0) {
|
||||||
|
cJSON_AddStringToObject(entry, "description", description);
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON_AddItemToObject(config_obj, key, entry);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(response, "status", "success");
|
||||||
|
cJSON_AddNumberToObject(response, "count", count);
|
||||||
|
cJSON_AddItemToObject(response, "config", config_obj);
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Config query returned %d entries", count);
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* admin_cmd_config_update(cJSON* args) {
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", "config_update");
|
||||||
|
|
||||||
|
// Expected format: ["config_update", {"key1": "value1", "key2": "value2"}]
|
||||||
|
if (cJSON_GetArraySize(args) < 2) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Missing config updates object");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* updates = cJSON_GetArrayItem(args, 1);
|
||||||
|
if (!cJSON_IsObject(updates)) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Updates must be an object");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open database for writing
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READWRITE, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare update statement
|
||||||
|
const char* sql = "UPDATE config SET value = ?, updated_at = strftime('%s', 'now') WHERE key = ?";
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to prepare update statement");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each update
|
||||||
|
cJSON* updated_keys = cJSON_CreateArray();
|
||||||
|
cJSON* failed_keys = cJSON_CreateArray();
|
||||||
|
int success_count = 0;
|
||||||
|
int fail_count = 0;
|
||||||
|
|
||||||
|
cJSON* item = NULL;
|
||||||
|
cJSON_ArrayForEach(item, updates) {
|
||||||
|
const char* key = item->string;
|
||||||
|
const char* value = cJSON_GetStringValue(item);
|
||||||
|
|
||||||
|
if (!value) {
|
||||||
|
cJSON_AddItemToArray(failed_keys, cJSON_CreateString(key));
|
||||||
|
fail_count++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_reset(stmt);
|
||||||
|
sqlite3_bind_text(stmt, 1, value, -1, SQLITE_TRANSIENT);
|
||||||
|
sqlite3_bind_text(stmt, 2, key, -1, SQLITE_TRANSIENT);
|
||||||
|
|
||||||
|
rc = sqlite3_step(stmt);
|
||||||
|
if (rc == SQLITE_DONE && sqlite3_changes(db) > 0) {
|
||||||
|
cJSON_AddItemToArray(updated_keys, cJSON_CreateString(key));
|
||||||
|
success_count++;
|
||||||
|
app_log(LOG_INFO, "Updated config key: %s", key);
|
||||||
|
} else {
|
||||||
|
cJSON_AddItemToArray(failed_keys, cJSON_CreateString(key));
|
||||||
|
fail_count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(response, "status", "success");
|
||||||
|
cJSON_AddNumberToObject(response, "updated_count", success_count);
|
||||||
|
cJSON_AddNumberToObject(response, "failed_count", fail_count);
|
||||||
|
cJSON_AddItemToObject(response, "updated_keys", updated_keys);
|
||||||
|
if (fail_count > 0) {
|
||||||
|
cJSON_AddItemToObject(response, "failed_keys", failed_keys);
|
||||||
|
} else {
|
||||||
|
cJSON_Delete(failed_keys);
|
||||||
|
}
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* admin_cmd_stats_query(cJSON* args) {
|
||||||
|
(void)args;
|
||||||
|
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", "stats_query");
|
||||||
|
|
||||||
|
// Open database
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query storage stats view
|
||||||
|
const char* sql = "SELECT * FROM storage_stats";
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to query stats");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* stats = cJSON_CreateObject();
|
||||||
|
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON_AddNumberToObject(stats, "total_blobs", sqlite3_column_int64(stmt, 0));
|
||||||
|
cJSON_AddNumberToObject(stats, "total_bytes", sqlite3_column_int64(stmt, 1));
|
||||||
|
cJSON_AddNumberToObject(stats, "avg_blob_size", sqlite3_column_double(stmt, 2));
|
||||||
|
cJSON_AddNumberToObject(stats, "first_upload", sqlite3_column_int64(stmt, 3));
|
||||||
|
cJSON_AddNumberToObject(stats, "last_upload", sqlite3_column_int64(stmt, 4));
|
||||||
|
cJSON_AddNumberToObject(stats, "unique_uploaders", sqlite3_column_int64(stmt, 5));
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
// Get auth rules count
|
||||||
|
sql = "SELECT COUNT(*) FROM auth_rules WHERE enabled = 1";
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc == SQLITE_OK && sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON_AddNumberToObject(stats, "active_auth_rules", sqlite3_column_int(stmt, 0));
|
||||||
|
}
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(response, "status", "success");
|
||||||
|
cJSON_AddItemToObject(response, "stats", stats);
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* admin_cmd_system_status(cJSON* args) {
|
||||||
|
(void)args;
|
||||||
|
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", "system_status");
|
||||||
|
|
||||||
|
cJSON* status = cJSON_CreateObject();
|
||||||
|
|
||||||
|
// Server uptime (would need to track start time - placeholder for now)
|
||||||
|
cJSON_AddStringToObject(status, "server_status", "running");
|
||||||
|
cJSON_AddNumberToObject(status, "current_time", (double)time(NULL));
|
||||||
|
|
||||||
|
// Database status
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc == SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(status, "database_status", "connected");
|
||||||
|
|
||||||
|
// Get database size
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
const char* sql = "SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()";
|
||||||
|
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) == SQLITE_OK) {
|
||||||
|
if (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON_AddNumberToObject(status, "database_size_bytes", sqlite3_column_int64(stmt, 0));
|
||||||
|
}
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_close(db);
|
||||||
|
} else {
|
||||||
|
cJSON_AddStringToObject(status, "database_status", "error");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory info (basic - would need more system calls for detailed info)
|
||||||
|
cJSON_AddStringToObject(status, "memory_status", "ok");
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(response, "status", "success");
|
||||||
|
cJSON_AddItemToObject(response, "system", status);
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* admin_cmd_blob_list(cJSON* args) {
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", "blob_list");
|
||||||
|
|
||||||
|
// Parse optional parameters: limit, offset, uploader_pubkey
|
||||||
|
int limit = 100; // Default limit
|
||||||
|
int offset = 0;
|
||||||
|
const char* uploader_filter = NULL;
|
||||||
|
|
||||||
|
if (cJSON_GetArraySize(args) >= 2) {
|
||||||
|
cJSON* params = cJSON_GetArrayItem(args, 1);
|
||||||
|
if (cJSON_IsObject(params)) {
|
||||||
|
cJSON* limit_item = cJSON_GetObjectItem(params, "limit");
|
||||||
|
if (cJSON_IsNumber(limit_item)) {
|
||||||
|
limit = limit_item->valueint;
|
||||||
|
if (limit > 1000) limit = 1000; // Max 1000
|
||||||
|
if (limit < 1) limit = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* offset_item = cJSON_GetObjectItem(params, "offset");
|
||||||
|
if (cJSON_IsNumber(offset_item)) {
|
||||||
|
offset = offset_item->valueint;
|
||||||
|
if (offset < 0) offset = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* uploader_item = cJSON_GetObjectItem(params, "uploader");
|
||||||
|
if (cJSON_IsString(uploader_item)) {
|
||||||
|
uploader_filter = uploader_item->valuestring;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open database
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build query
|
||||||
|
char sql[512];
|
||||||
|
if (uploader_filter) {
|
||||||
|
snprintf(sql, sizeof(sql),
|
||||||
|
"SELECT sha256, size, type, uploaded_at, uploader_pubkey, filename "
|
||||||
|
"FROM blobs WHERE uploader_pubkey = ? "
|
||||||
|
"ORDER BY uploaded_at DESC LIMIT ? OFFSET ?");
|
||||||
|
} else {
|
||||||
|
snprintf(sql, sizeof(sql),
|
||||||
|
"SELECT sha256, size, type, uploaded_at, uploader_pubkey, filename "
|
||||||
|
"FROM blobs ORDER BY uploaded_at DESC LIMIT ? OFFSET ?");
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to prepare query");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bind parameters
|
||||||
|
int param_idx = 1;
|
||||||
|
if (uploader_filter) {
|
||||||
|
sqlite3_bind_text(stmt, param_idx++, uploader_filter, -1, SQLITE_STATIC);
|
||||||
|
}
|
||||||
|
sqlite3_bind_int(stmt, param_idx++, limit);
|
||||||
|
sqlite3_bind_int(stmt, param_idx++, offset);
|
||||||
|
|
||||||
|
// Execute and build results
|
||||||
|
cJSON* blobs = cJSON_CreateArray();
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON* blob = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(blob, "sha256", (const char*)sqlite3_column_text(stmt, 0));
|
||||||
|
cJSON_AddNumberToObject(blob, "size", sqlite3_column_int64(stmt, 1));
|
||||||
|
cJSON_AddStringToObject(blob, "type", (const char*)sqlite3_column_text(stmt, 2));
|
||||||
|
cJSON_AddNumberToObject(blob, "uploaded_at", sqlite3_column_int64(stmt, 3));
|
||||||
|
|
||||||
|
const char* uploader = (const char*)sqlite3_column_text(stmt, 4);
|
||||||
|
if (uploader) {
|
||||||
|
cJSON_AddStringToObject(blob, "uploader_pubkey", uploader);
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* filename = (const char*)sqlite3_column_text(stmt, 5);
|
||||||
|
if (filename) {
|
||||||
|
cJSON_AddStringToObject(blob, "filename", filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON_AddItemToArray(blobs, blob);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(response, "status", "success");
|
||||||
|
cJSON_AddNumberToObject(response, "count", count);
|
||||||
|
cJSON_AddNumberToObject(response, "limit", limit);
|
||||||
|
cJSON_AddNumberToObject(response, "offset", offset);
|
||||||
|
cJSON_AddItemToObject(response, "blobs", blobs);
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* admin_cmd_storage_stats(cJSON* args) {
|
||||||
|
(void)args;
|
||||||
|
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", "storage_stats");
|
||||||
|
|
||||||
|
// Open database
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* storage = cJSON_CreateObject();
|
||||||
|
|
||||||
|
// Get overall stats from view
|
||||||
|
const char* sql = "SELECT * FROM storage_stats";
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc == SQLITE_OK && sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON_AddNumberToObject(storage, "total_blobs", sqlite3_column_int64(stmt, 0));
|
||||||
|
cJSON_AddNumberToObject(storage, "total_bytes", sqlite3_column_int64(stmt, 1));
|
||||||
|
cJSON_AddNumberToObject(storage, "avg_blob_size", sqlite3_column_double(stmt, 2));
|
||||||
|
cJSON_AddNumberToObject(storage, "first_upload", sqlite3_column_int64(stmt, 3));
|
||||||
|
cJSON_AddNumberToObject(storage, "last_upload", sqlite3_column_int64(stmt, 4));
|
||||||
|
cJSON_AddNumberToObject(storage, "unique_uploaders", sqlite3_column_int64(stmt, 5));
|
||||||
|
}
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
// Get stats by MIME type
|
||||||
|
sql = "SELECT type, COUNT(*) as count, SUM(size) as total_size "
|
||||||
|
"FROM blobs GROUP BY type ORDER BY count DESC LIMIT 10";
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc == SQLITE_OK) {
|
||||||
|
cJSON* by_type = cJSON_CreateArray();
|
||||||
|
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON* type_stat = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(type_stat, "mime_type", (const char*)sqlite3_column_text(stmt, 0));
|
||||||
|
cJSON_AddNumberToObject(type_stat, "count", sqlite3_column_int64(stmt, 1));
|
||||||
|
cJSON_AddNumberToObject(type_stat, "total_bytes", sqlite3_column_int64(stmt, 2));
|
||||||
|
cJSON_AddItemToArray(by_type, type_stat);
|
||||||
|
}
|
||||||
|
cJSON_AddItemToObject(storage, "by_mime_type", by_type);
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get top uploaders
|
||||||
|
sql = "SELECT uploader_pubkey, COUNT(*) as count, SUM(size) as total_size "
|
||||||
|
"FROM blobs WHERE uploader_pubkey IS NOT NULL "
|
||||||
|
"GROUP BY uploader_pubkey ORDER BY count DESC LIMIT 10";
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc == SQLITE_OK) {
|
||||||
|
cJSON* top_uploaders = cJSON_CreateArray();
|
||||||
|
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON* uploader_stat = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(uploader_stat, "pubkey", (const char*)sqlite3_column_text(stmt, 0));
|
||||||
|
cJSON_AddNumberToObject(uploader_stat, "blob_count", sqlite3_column_int64(stmt, 1));
|
||||||
|
cJSON_AddNumberToObject(uploader_stat, "total_bytes", sqlite3_column_int64(stmt, 2));
|
||||||
|
cJSON_AddItemToArray(top_uploaders, uploader_stat);
|
||||||
|
}
|
||||||
|
cJSON_AddItemToObject(storage, "top_uploaders", top_uploaders);
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(response, "status", "success");
|
||||||
|
cJSON_AddItemToObject(response, "storage", storage);
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* admin_cmd_sql_query(cJSON* args) {
|
||||||
|
cJSON* response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response, "query_type", "sql_query");
|
||||||
|
|
||||||
|
// Expected format: ["sql_query", "SELECT ..."]
|
||||||
|
if (cJSON_GetArraySize(args) < 2) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Missing SQL query");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON* query_item = cJSON_GetArrayItem(args, 1);
|
||||||
|
if (!cJSON_IsString(query_item)) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Query must be a string");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* sql = query_item->valuestring;
|
||||||
|
|
||||||
|
// Security: Only allow SELECT queries
|
||||||
|
const char* sql_upper = sql;
|
||||||
|
while (*sql_upper == ' ' || *sql_upper == '\t' || *sql_upper == '\n') sql_upper++;
|
||||||
|
if (strncasecmp(sql_upper, "SELECT", 6) != 0) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Only SELECT queries are allowed");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open database (read-only for safety)
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_admin_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response, "error", "Failed to open database");
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare and execute query
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
cJSON_AddStringToObject(response, "status", "error");
|
||||||
|
char error_msg[256];
|
||||||
|
snprintf(error_msg, sizeof(error_msg), "SQL error: %s", sqlite3_errmsg(db));
|
||||||
|
cJSON_AddStringToObject(response, "error", error_msg);
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return response;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get column names
|
||||||
|
int col_count = sqlite3_column_count(stmt);
|
||||||
|
cJSON* columns = cJSON_CreateArray();
|
||||||
|
for (int i = 0; i < col_count; i++) {
|
||||||
|
cJSON_AddItemToArray(columns, cJSON_CreateString(sqlite3_column_name(stmt, i)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute and collect rows (limit to 1000 rows for safety)
|
||||||
|
cJSON* rows = cJSON_CreateArray();
|
||||||
|
int row_count = 0;
|
||||||
|
const int MAX_ROWS = 1000;
|
||||||
|
|
||||||
|
while (row_count < MAX_ROWS && (rc = sqlite3_step(stmt)) == SQLITE_ROW) {
|
||||||
|
cJSON* row = cJSON_CreateArray();
|
||||||
|
for (int i = 0; i < col_count; i++) {
|
||||||
|
int col_type = sqlite3_column_type(stmt, i);
|
||||||
|
switch (col_type) {
|
||||||
|
case SQLITE_INTEGER:
|
||||||
|
cJSON_AddItemToArray(row, cJSON_CreateNumber(sqlite3_column_int64(stmt, i)));
|
||||||
|
break;
|
||||||
|
case SQLITE_FLOAT:
|
||||||
|
cJSON_AddItemToArray(row, cJSON_CreateNumber(sqlite3_column_double(stmt, i)));
|
||||||
|
break;
|
||||||
|
case SQLITE_TEXT:
|
||||||
|
cJSON_AddItemToArray(row, cJSON_CreateString((const char*)sqlite3_column_text(stmt, i)));
|
||||||
|
break;
|
||||||
|
case SQLITE_NULL:
|
||||||
|
cJSON_AddItemToArray(row, cJSON_CreateNull());
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
cJSON_AddItemToArray(row, cJSON_CreateString(""));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cJSON_AddItemToArray(rows, row);
|
||||||
|
row_count++;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
cJSON_AddStringToObject(response, "status", "success");
|
||||||
|
cJSON_AddItemToObject(response, "columns", columns);
|
||||||
|
cJSON_AddItemToObject(response, "rows", rows);
|
||||||
|
cJSON_AddNumberToObject(response, "row_count", row_count);
|
||||||
|
if (row_count >= MAX_ROWS) {
|
||||||
|
cJSON_AddBoolToObject(response, "truncated", 1);
|
||||||
|
}
|
||||||
|
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "SQL query executed: %d rows returned", row_count);
|
||||||
|
|
||||||
|
return response;
|
||||||
|
}
|
||||||
56
src/admin_commands.h
Normal file
56
src/admin_commands.h
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
/*
|
||||||
|
* Ginxsom Admin Commands Interface
|
||||||
|
*
|
||||||
|
* Handles encrypted admin commands sent via Kind 23458 events
|
||||||
|
* and generates encrypted responses as Kind 23459 events.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ADMIN_COMMANDS_H
|
||||||
|
#define ADMIN_COMMANDS_H
|
||||||
|
|
||||||
|
#include <cjson/cJSON.h>
|
||||||
|
|
||||||
|
// Command handler result codes
|
||||||
|
typedef enum {
|
||||||
|
ADMIN_CMD_SUCCESS = 0,
|
||||||
|
ADMIN_CMD_ERROR_PARSE = -1,
|
||||||
|
ADMIN_CMD_ERROR_UNKNOWN = -2,
|
||||||
|
ADMIN_CMD_ERROR_INVALID = -3,
|
||||||
|
ADMIN_CMD_ERROR_DATABASE = -4,
|
||||||
|
ADMIN_CMD_ERROR_PERMISSION = -5
|
||||||
|
} admin_cmd_result_t;
|
||||||
|
|
||||||
|
// Initialize admin command system
|
||||||
|
int admin_commands_init(const char *db_path);
|
||||||
|
|
||||||
|
// Process an admin command and generate response
|
||||||
|
// Returns cJSON response object (caller must free with cJSON_Delete)
|
||||||
|
cJSON* admin_commands_process(cJSON* command_array, const char* request_event_id);
|
||||||
|
|
||||||
|
// Individual command handlers
|
||||||
|
cJSON* admin_cmd_config_query(cJSON* args);
|
||||||
|
cJSON* admin_cmd_config_update(cJSON* args);
|
||||||
|
cJSON* admin_cmd_stats_query(cJSON* args);
|
||||||
|
cJSON* admin_cmd_system_status(cJSON* args);
|
||||||
|
cJSON* admin_cmd_blob_list(cJSON* args);
|
||||||
|
cJSON* admin_cmd_storage_stats(cJSON* args);
|
||||||
|
cJSON* admin_cmd_sql_query(cJSON* args);
|
||||||
|
|
||||||
|
// NIP-44 encryption/decryption helpers
|
||||||
|
int admin_encrypt_response(
|
||||||
|
const unsigned char* server_privkey,
|
||||||
|
const unsigned char* admin_pubkey,
|
||||||
|
const char* plaintext_json,
|
||||||
|
char* output,
|
||||||
|
size_t output_size
|
||||||
|
);
|
||||||
|
|
||||||
|
int admin_decrypt_command(
|
||||||
|
const unsigned char* server_privkey,
|
||||||
|
const unsigned char* admin_pubkey,
|
||||||
|
const char* encrypted_data,
|
||||||
|
char* output,
|
||||||
|
size_t output_size
|
||||||
|
);
|
||||||
|
|
||||||
|
#endif /* ADMIN_COMMANDS_H */
|
||||||
@@ -1,8 +1,10 @@
|
|||||||
// Admin event handler for Kind 23456/23457 admin commands
|
// Admin event handler for Kind 23458/23459 admin commands
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <sys/types.h>
|
||||||
#include "ginxsom.h"
|
#include "ginxsom.h"
|
||||||
|
|
||||||
// Forward declarations for nostr_core_lib functions
|
// Forward declarations for nostr_core_lib functions
|
||||||
@@ -27,90 +29,162 @@ extern char g_db_path[];
|
|||||||
static int get_server_privkey(unsigned char* privkey_bytes);
|
static int get_server_privkey(unsigned char* privkey_bytes);
|
||||||
static int get_server_pubkey(char* pubkey_hex, size_t size);
|
static int get_server_pubkey(char* pubkey_hex, size_t size);
|
||||||
static int handle_config_query_command(cJSON* response_data);
|
static int handle_config_query_command(cJSON* response_data);
|
||||||
|
static int handle_query_view_command(cJSON* command_array, cJSON* response_data);
|
||||||
static int send_admin_response_event(const char* admin_pubkey, const char* request_id,
|
static int send_admin_response_event(const char* admin_pubkey, const char* request_id,
|
||||||
cJSON* response_data);
|
cJSON* response_data);
|
||||||
|
static cJSON* parse_authorization_header(void);
|
||||||
|
static int process_admin_event(cJSON* event);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Handle Kind 23456 admin command event
|
* Handle Kind 23458 admin command event
|
||||||
* Expects POST to /api/admin with JSON body containing the event
|
* Supports two delivery methods:
|
||||||
|
* 1. POST body with JSON event
|
||||||
|
* 2. Authorization header with Nostr event
|
||||||
*/
|
*/
|
||||||
void handle_admin_event_request(void) {
|
void handle_admin_event_request(void) {
|
||||||
// Read request body
|
cJSON* event = NULL;
|
||||||
const char* content_length_str = getenv("CONTENT_LENGTH");
|
int should_free_event = 1;
|
||||||
if (!content_length_str) {
|
|
||||||
printf("Status: 411 Length Required\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\"error\":\"Content-Length header required\"}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
long content_length = atol(content_length_str);
|
// First, try to get event from Authorization header
|
||||||
if (content_length <= 0 || content_length > 65536) {
|
event = parse_authorization_header();
|
||||||
printf("Status: 400 Bad Request\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\"error\":\"Invalid content length\"}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
char* json_body = malloc(content_length + 1);
|
// If not in header, try POST body
|
||||||
if (!json_body) {
|
if (!event) {
|
||||||
printf("Status: 500 Internal Server Error\r\n");
|
const char* content_length_str = getenv("CONTENT_LENGTH");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
if (!content_length_str) {
|
||||||
printf("{\"error\":\"Memory allocation failed\"}\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
return;
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
}
|
printf("{\"error\":\"Event required in POST body or Authorization header\"}\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
long content_length = atol(content_length_str);
|
||||||
|
if (content_length <= 0 || content_length > 65536) {
|
||||||
|
printf("Status: 400 Bad Request\r\n");
|
||||||
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
|
printf("{\"error\":\"Invalid content length\"}\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
char* json_body = malloc(content_length + 1);
|
||||||
|
if (!json_body) {
|
||||||
|
printf("Status: 500 Internal Server Error\r\n");
|
||||||
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
|
printf("{\"error\":\"Memory allocation failed\"}\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t bytes_read = fread(json_body, 1, content_length, stdin);
|
||||||
|
if (bytes_read != (size_t)content_length) {
|
||||||
|
free(json_body);
|
||||||
|
printf("Status: 400 Bad Request\r\n");
|
||||||
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
|
printf("{\"error\":\"Failed to read complete request body\"}\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
json_body[content_length] = '\0';
|
||||||
|
|
||||||
|
// Parse event JSON
|
||||||
|
event = cJSON_Parse(json_body);
|
||||||
|
|
||||||
|
// Debug: Log the received JSON
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Received POST body: %s", json_body);
|
||||||
|
|
||||||
size_t bytes_read = fread(json_body, 1, content_length, stdin);
|
|
||||||
if (bytes_read != (size_t)content_length) {
|
|
||||||
free(json_body);
|
free(json_body);
|
||||||
printf("Status: 400 Bad Request\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
if (!event) {
|
||||||
printf("{\"error\":\"Failed to read complete request body\"}\n");
|
app_log(LOG_ERROR, "ADMIN_EVENT: Failed to parse JSON");
|
||||||
return;
|
printf("Status: 400 Bad Request\r\n");
|
||||||
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
|
printf("{\"error\":\"Invalid JSON\"}\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug: Log parsed event
|
||||||
|
char* event_str = cJSON_Print(event);
|
||||||
|
if (event_str) {
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Parsed event: %s", event_str);
|
||||||
|
free(event_str);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
json_body[content_length] = '\0';
|
|
||||||
|
|
||||||
// Parse event JSON
|
// Process the event (handles validation, decryption, command execution, response)
|
||||||
cJSON* event = cJSON_Parse(json_body);
|
int result = process_admin_event(event);
|
||||||
free(json_body);
|
|
||||||
|
|
||||||
|
// Clean up
|
||||||
|
if (should_free_event && event) {
|
||||||
|
cJSON_Delete(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
(void)result; // Result already handled by process_admin_event
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse Kind 23458 event from Authorization header
|
||||||
|
* Format: Authorization: Nostr <base64-encoded-event-json>
|
||||||
|
* Returns: cJSON event object or NULL if not present/invalid
|
||||||
|
*/
|
||||||
|
static cJSON* parse_authorization_header(void) {
|
||||||
|
const char* auth_header = getenv("HTTP_AUTHORIZATION");
|
||||||
|
if (!auth_header) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for "Nostr " prefix (case-insensitive)
|
||||||
|
if (strncasecmp(auth_header, "Nostr ", 6) != 0) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip "Nostr " prefix
|
||||||
|
const char* base64_event = auth_header + 6;
|
||||||
|
|
||||||
|
// Decode base64 (simple implementation - in production use proper base64 decoder)
|
||||||
|
// For now, assume the event is JSON directly (not base64 encoded)
|
||||||
|
// This matches the pattern from c-relay's admin interface
|
||||||
|
cJSON* event = cJSON_Parse(base64_event);
|
||||||
|
|
||||||
|
return event;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Process a Kind 23458 admin event (from POST body or Authorization header)
|
||||||
|
* Returns: 0 on success, -1 on error (error response already sent)
|
||||||
|
*/
|
||||||
|
static int process_admin_event(cJSON* event) {
|
||||||
if (!event) {
|
if (!event) {
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Invalid JSON\"}\n");
|
printf("{\"error\":\"Invalid event\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's Kind 23456
|
// Verify it's Kind 23458
|
||||||
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
|
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
|
||||||
if (!kind_obj || !cJSON_IsNumber(kind_obj) ||
|
if (!kind_obj || !cJSON_IsNumber(kind_obj) ||
|
||||||
(int)cJSON_GetNumberValue(kind_obj) != 23456) {
|
(int)cJSON_GetNumberValue(kind_obj) != 23458) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Event must be Kind 23456\"}\n");
|
printf("{\"error\":\"Event must be Kind 23458\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get event ID for response correlation
|
// Get event ID for response correlation
|
||||||
cJSON* id_obj = cJSON_GetObjectItem(event, "id");
|
cJSON* id_obj = cJSON_GetObjectItem(event, "id");
|
||||||
if (!id_obj || !cJSON_IsString(id_obj)) {
|
if (!id_obj || !cJSON_IsString(id_obj)) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Event missing id\"}\n");
|
printf("{\"error\":\"Event missing id\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
const char* request_id = cJSON_GetStringValue(id_obj);
|
const char* request_id = cJSON_GetStringValue(id_obj);
|
||||||
|
|
||||||
// Get admin pubkey from event
|
// Get admin pubkey from event
|
||||||
cJSON* pubkey_obj = cJSON_GetObjectItem(event, "pubkey");
|
cJSON* pubkey_obj = cJSON_GetObjectItem(event, "pubkey");
|
||||||
if (!pubkey_obj || !cJSON_IsString(pubkey_obj)) {
|
if (!pubkey_obj || !cJSON_IsString(pubkey_obj)) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Event missing pubkey\"}\n");
|
printf("{\"error\":\"Event missing pubkey\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
const char* admin_pubkey = cJSON_GetStringValue(pubkey_obj);
|
const char* admin_pubkey = cJSON_GetStringValue(pubkey_obj);
|
||||||
|
|
||||||
@@ -118,11 +192,10 @@ void handle_admin_event_request(void) {
|
|||||||
sqlite3* db;
|
sqlite3* db;
|
||||||
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
if (rc != SQLITE_OK) {
|
if (rc != SQLITE_OK) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 500 Internal Server Error\r\n");
|
printf("Status: 500 Internal Server Error\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Database error\"}\n");
|
printf("{\"error\":\"Database error\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
sqlite3_stmt* stmt;
|
sqlite3_stmt* stmt;
|
||||||
@@ -141,42 +214,38 @@ void handle_admin_event_request(void) {
|
|||||||
sqlite3_close(db);
|
sqlite3_close(db);
|
||||||
|
|
||||||
if (!is_admin) {
|
if (!is_admin) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 403 Forbidden\r\n");
|
printf("Status: 403 Forbidden\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Not authorized as admin\"}\n");
|
printf("{\"error\":\"Not authorized as admin\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get encrypted content
|
// Get encrypted content
|
||||||
cJSON* content_obj = cJSON_GetObjectItem(event, "content");
|
cJSON* content_obj = cJSON_GetObjectItem(event, "content");
|
||||||
if (!content_obj || !cJSON_IsString(content_obj)) {
|
if (!content_obj || !cJSON_IsString(content_obj)) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Event missing content\"}\n");
|
printf("{\"error\":\"Event missing content\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
const char* encrypted_content = cJSON_GetStringValue(content_obj);
|
const char* encrypted_content = cJSON_GetStringValue(content_obj);
|
||||||
|
|
||||||
// Get server private key for decryption
|
// Get server private key for decryption
|
||||||
unsigned char server_privkey[32];
|
unsigned char server_privkey[32];
|
||||||
if (get_server_privkey(server_privkey) != 0) {
|
if (get_server_privkey(server_privkey) != 0) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 500 Internal Server Error\r\n");
|
printf("Status: 500 Internal Server Error\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Failed to get server private key\"}\n");
|
printf("{\"error\":\"Failed to get server private key\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert admin pubkey to bytes
|
// Convert admin pubkey to bytes
|
||||||
unsigned char admin_pubkey_bytes[32];
|
unsigned char admin_pubkey_bytes[32];
|
||||||
if (nostr_hex_to_bytes(admin_pubkey, admin_pubkey_bytes, 32) != 0) {
|
if (nostr_hex_to_bytes(admin_pubkey, admin_pubkey_bytes, 32) != 0) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Invalid admin pubkey format\"}\n");
|
printf("{\"error\":\"Invalid admin pubkey format\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt content using NIP-44 (or use plaintext for testing)
|
// Decrypt content using NIP-44 (or use plaintext for testing)
|
||||||
@@ -195,34 +264,37 @@ void handle_admin_event_request(void) {
|
|||||||
);
|
);
|
||||||
|
|
||||||
if (decrypt_result != 0) {
|
if (decrypt_result != 0) {
|
||||||
cJSON_Delete(event);
|
app_log(LOG_ERROR, "ADMIN_EVENT: Decryption failed with result: %d", decrypt_result);
|
||||||
|
app_log(LOG_ERROR, "ADMIN_EVENT: Encrypted content: %s", encrypted_content);
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Failed to decrypt content\"}\n");
|
printf("{\"error\":\"Failed to decrypt content\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
content_to_parse = decrypted_content;
|
content_to_parse = decrypted_content;
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Decrypted content: %s", decrypted_content);
|
||||||
|
} else {
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Using plaintext content (starts with '['): %s", encrypted_content);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse command array (either decrypted or plaintext)
|
// Parse command array (either decrypted or plaintext)
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Parsing command array from: %s", content_to_parse);
|
||||||
cJSON* command_array = cJSON_Parse(content_to_parse);
|
cJSON* command_array = cJSON_Parse(content_to_parse);
|
||||||
if (!command_array || !cJSON_IsArray(command_array)) {
|
if (!command_array || !cJSON_IsArray(command_array)) {
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Decrypted content is not a valid command array\"}\n");
|
printf("{\"error\":\"Decrypted content is not a valid command array\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get command type
|
// Get command type
|
||||||
cJSON* command_type = cJSON_GetArrayItem(command_array, 0);
|
cJSON* command_type = cJSON_GetArrayItem(command_array, 0);
|
||||||
if (!command_type || !cJSON_IsString(command_type)) {
|
if (!command_type || !cJSON_IsString(command_type)) {
|
||||||
cJSON_Delete(command_array);
|
cJSON_Delete(command_array);
|
||||||
cJSON_Delete(event);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
printf("Status: 400 Bad Request\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Invalid command format\"}\n");
|
printf("{\"error\":\"Invalid command format\"}\n");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* cmd = cJSON_GetStringValue(command_type);
|
const char* cmd = cJSON_GetStringValue(command_type);
|
||||||
@@ -235,23 +307,35 @@ void handle_admin_event_request(void) {
|
|||||||
// Handle command
|
// Handle command
|
||||||
int result = -1;
|
int result = -1;
|
||||||
if (strcmp(cmd, "config_query") == 0) {
|
if (strcmp(cmd, "config_query") == 0) {
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Handling config_query command");
|
||||||
result = handle_config_query_command(response_data);
|
result = handle_config_query_command(response_data);
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: config_query result: %d", result);
|
||||||
|
} else if (strcmp(cmd, "query_view") == 0) {
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Handling query_view command");
|
||||||
|
result = handle_query_view_command(command_array, response_data);
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: query_view result: %d", result);
|
||||||
} else {
|
} else {
|
||||||
|
app_log(LOG_WARN, "ADMIN_EVENT: Unknown command: %s", cmd);
|
||||||
cJSON_AddStringToObject(response_data, "status", "error");
|
cJSON_AddStringToObject(response_data, "status", "error");
|
||||||
cJSON_AddStringToObject(response_data, "error", "Unknown command");
|
cJSON_AddStringToObject(response_data, "error", "Unknown command");
|
||||||
|
result = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
cJSON_Delete(command_array);
|
cJSON_Delete(command_array);
|
||||||
cJSON_Delete(event);
|
|
||||||
|
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
// Send Kind 23457 response
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Sending Kind 23459 response");
|
||||||
send_admin_response_event(admin_pubkey, request_id, response_data);
|
// Send Kind 23459 response
|
||||||
|
int send_result = send_admin_response_event(admin_pubkey, request_id, response_data);
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Response sent with result: %d", send_result);
|
||||||
|
return send_result;
|
||||||
} else {
|
} else {
|
||||||
|
app_log(LOG_ERROR, "ADMIN_EVENT: Command processing failed");
|
||||||
cJSON_Delete(response_data);
|
cJSON_Delete(response_data);
|
||||||
printf("Status: 500 Internal Server Error\r\n");
|
printf("Status: 500 Internal Server Error\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/json\r\n\r\n");
|
||||||
printf("{\"error\":\"Command processing failed\"}\n");
|
printf("{\"error\":\"Command processing failed\"}\n");
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -350,7 +434,126 @@ static int handle_config_query_command(cJSON* response_data) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Send Kind 23457 admin response event
|
* Handle query_view command - returns data from a specified database view
|
||||||
|
* Command format: ["query_view", "view_name"]
|
||||||
|
*/
|
||||||
|
static int handle_query_view_command(cJSON* command_array, cJSON* response_data) {
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: handle_query_view_command called");
|
||||||
|
|
||||||
|
// Get view name from command array
|
||||||
|
cJSON* view_name_obj = cJSON_GetArrayItem(command_array, 1);
|
||||||
|
if (!view_name_obj || !cJSON_IsString(view_name_obj)) {
|
||||||
|
app_log(LOG_ERROR, "ADMIN_EVENT: View name missing or not a string");
|
||||||
|
cJSON_AddStringToObject(response_data, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response_data, "error", "View name required");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char* view_name = cJSON_GetStringValue(view_name_obj);
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Querying view: %s", view_name);
|
||||||
|
|
||||||
|
// Validate view name (whitelist approach for security)
|
||||||
|
const char* allowed_views[] = {
|
||||||
|
"blob_overview",
|
||||||
|
"blob_type_distribution",
|
||||||
|
"blob_time_stats",
|
||||||
|
"top_uploaders",
|
||||||
|
NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
int view_allowed = 0;
|
||||||
|
for (int i = 0; allowed_views[i] != NULL; i++) {
|
||||||
|
if (strcmp(view_name, allowed_views[i]) == 0) {
|
||||||
|
view_allowed = 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!view_allowed) {
|
||||||
|
cJSON_AddStringToObject(response_data, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response_data, "error", "Invalid view name");
|
||||||
|
app_log(LOG_WARN, "ADMIN_EVENT: Attempted to query invalid view: %s", view_name);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: View '%s' is allowed, opening database: %s", view_name, g_db_path);
|
||||||
|
|
||||||
|
// Open database
|
||||||
|
sqlite3* db;
|
||||||
|
int rc = sqlite3_open_v2(g_db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
app_log(LOG_ERROR, "ADMIN_EVENT: Failed to open database: %s (error: %s)", g_db_path, sqlite3_errmsg(db));
|
||||||
|
cJSON_AddStringToObject(response_data, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response_data, "error", "Database error");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build SQL query
|
||||||
|
char sql[256];
|
||||||
|
snprintf(sql, sizeof(sql), "SELECT * FROM %s", view_name);
|
||||||
|
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Executing SQL: %s", sql);
|
||||||
|
|
||||||
|
sqlite3_stmt* stmt;
|
||||||
|
if (sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||||
|
app_log(LOG_ERROR, "ADMIN_EVENT: Failed to prepare query: %s (error: %s)", sql, sqlite3_errmsg(db));
|
||||||
|
sqlite3_close(db);
|
||||||
|
cJSON_AddStringToObject(response_data, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response_data, "error", "Failed to prepare query");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get column count and names
|
||||||
|
int col_count = sqlite3_column_count(stmt);
|
||||||
|
|
||||||
|
// Create results array
|
||||||
|
cJSON* results = cJSON_CreateArray();
|
||||||
|
|
||||||
|
// Fetch all rows
|
||||||
|
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||||
|
cJSON* row = cJSON_CreateObject();
|
||||||
|
|
||||||
|
for (int i = 0; i < col_count; i++) {
|
||||||
|
const char* col_name = sqlite3_column_name(stmt, i);
|
||||||
|
int col_type = sqlite3_column_type(stmt, i);
|
||||||
|
|
||||||
|
switch (col_type) {
|
||||||
|
case SQLITE_INTEGER:
|
||||||
|
cJSON_AddNumberToObject(row, col_name, (double)sqlite3_column_int64(stmt, i));
|
||||||
|
break;
|
||||||
|
case SQLITE_FLOAT:
|
||||||
|
cJSON_AddNumberToObject(row, col_name, sqlite3_column_double(stmt, i));
|
||||||
|
break;
|
||||||
|
case SQLITE_TEXT:
|
||||||
|
cJSON_AddStringToObject(row, col_name, (const char*)sqlite3_column_text(stmt, i));
|
||||||
|
break;
|
||||||
|
case SQLITE_NULL:
|
||||||
|
cJSON_AddNullToObject(row, col_name);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
// For BLOB or unknown types, skip
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON_AddItemToArray(results, row);
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
// Build response
|
||||||
|
cJSON_AddStringToObject(response_data, "status", "success");
|
||||||
|
cJSON_AddStringToObject(response_data, "view_name", view_name);
|
||||||
|
cJSON_AddItemToObject(response_data, "data", results);
|
||||||
|
|
||||||
|
app_log(LOG_DEBUG, "ADMIN_EVENT: Query view '%s' returned %d rows", view_name, cJSON_GetArraySize(results));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send Kind 23459 admin response event
|
||||||
*/
|
*/
|
||||||
static int send_admin_response_event(const char* admin_pubkey, const char* request_id,
|
static int send_admin_response_event(const char* admin_pubkey, const char* request_id,
|
||||||
cJSON* response_data) {
|
cJSON* response_data) {
|
||||||
@@ -407,11 +610,11 @@ static int send_admin_response_event(const char* admin_pubkey, const char* reque
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create Kind 23457 response event
|
// Create Kind 23459 response event
|
||||||
cJSON* response_event = cJSON_CreateObject();
|
cJSON* response_event = cJSON_CreateObject();
|
||||||
cJSON_AddStringToObject(response_event, "pubkey", server_pubkey);
|
cJSON_AddStringToObject(response_event, "pubkey", server_pubkey);
|
||||||
cJSON_AddNumberToObject(response_event, "created_at", (double)time(NULL));
|
cJSON_AddNumberToObject(response_event, "created_at", (double)time(NULL));
|
||||||
cJSON_AddNumberToObject(response_event, "kind", 23457);
|
cJSON_AddNumberToObject(response_event, "kind", 23459);
|
||||||
cJSON_AddStringToObject(response_event, "content", encrypted_response);
|
cJSON_AddStringToObject(response_event, "content", encrypted_response);
|
||||||
|
|
||||||
// Add tags
|
// Add tags
|
||||||
@@ -433,7 +636,7 @@ static int send_admin_response_event(const char* admin_pubkey, const char* reque
|
|||||||
|
|
||||||
// Sign the event
|
// Sign the event
|
||||||
cJSON* signed_event = nostr_create_and_sign_event(
|
cJSON* signed_event = nostr_create_and_sign_event(
|
||||||
23457,
|
23459,
|
||||||
encrypted_response,
|
encrypted_response,
|
||||||
tags,
|
tags,
|
||||||
server_privkey,
|
server_privkey,
|
||||||
|
|||||||
62
src/admin_interface.c
Normal file
62
src/admin_interface.c
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
// Admin interface handler - serves embedded web UI files
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include "ginxsom.h"
|
||||||
|
#include "admin_interface_embedded.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serve embedded file with appropriate content type
|
||||||
|
*/
|
||||||
|
static void serve_embedded_file(const unsigned char* data, size_t size, const char* content_type) {
|
||||||
|
printf("Status: 200 OK\r\n");
|
||||||
|
printf("Content-Type: %s\r\n", content_type);
|
||||||
|
printf("Content-Length: %zu\r\n", size);
|
||||||
|
printf("Cache-Control: public, max-age=3600\r\n");
|
||||||
|
printf("\r\n");
|
||||||
|
fwrite((void*)data, 1, size, stdout);
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle admin interface requests
|
||||||
|
* Serves embedded web UI files from /api path (consistent with c-relay)
|
||||||
|
*/
|
||||||
|
void handle_admin_interface_request(const char* path) {
|
||||||
|
// Normalize path - remove trailing slash
|
||||||
|
char normalized_path[256];
|
||||||
|
strncpy(normalized_path, path, sizeof(normalized_path) - 1);
|
||||||
|
normalized_path[sizeof(normalized_path) - 1] = '\0';
|
||||||
|
|
||||||
|
size_t len = strlen(normalized_path);
|
||||||
|
if (len > 1 && normalized_path[len - 1] == '/') {
|
||||||
|
normalized_path[len - 1] = '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
// Route to appropriate embedded file
|
||||||
|
// All paths use /api/ prefix for consistency with c-relay
|
||||||
|
if (strcmp(normalized_path, "/api") == 0 || strcmp(normalized_path, "/api/index.html") == 0) {
|
||||||
|
serve_embedded_file(embedded_index_html, embedded_index_html_size, "text/html; charset=utf-8");
|
||||||
|
}
|
||||||
|
else if (strcmp(normalized_path, "/api/index.css") == 0) {
|
||||||
|
serve_embedded_file(embedded_index_css, embedded_index_css_size, "text/css; charset=utf-8");
|
||||||
|
}
|
||||||
|
else if (strcmp(normalized_path, "/api/index.js") == 0) {
|
||||||
|
serve_embedded_file(embedded_index_js, embedded_index_js_size, "application/javascript; charset=utf-8");
|
||||||
|
}
|
||||||
|
else if (strcmp(normalized_path, "/api/nostr-lite.js") == 0) {
|
||||||
|
serve_embedded_file(embedded_nostr_lite_js, embedded_nostr_lite_js_size, "application/javascript; charset=utf-8");
|
||||||
|
}
|
||||||
|
else if (strcmp(normalized_path, "/api/nostr.bundle.js") == 0) {
|
||||||
|
serve_embedded_file(embedded_nostr_bundle_js, embedded_nostr_bundle_js_size, "application/javascript; charset=utf-8");
|
||||||
|
}
|
||||||
|
else if (strcmp(normalized_path, "/api/text_graph.js") == 0) {
|
||||||
|
serve_embedded_file(embedded_text_graph_js, embedded_text_graph_js_size, "application/javascript; charset=utf-8");
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// 404 Not Found
|
||||||
|
printf("Status: 404 Not Found\r\n");
|
||||||
|
printf("Content-Type: text/html; charset=utf-8\r\n");
|
||||||
|
printf("\r\n");
|
||||||
|
printf("<html><body><h1>404 Not Found</h1><p>File not found: %s</p></body></html>\n", normalized_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
63364
src/admin_interface_embedded.h
Normal file
63364
src/admin_interface_embedded.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,163 +0,0 @@
|
|||||||
/*
|
|
||||||
* Ginxsom Admin WebSocket Module
|
|
||||||
* Handles WebSocket connections for Kind 23456/23457 admin commands
|
|
||||||
* Based on c-relay's WebSocket implementation
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "ginxsom.h"
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <cjson/cJSON.h>
|
|
||||||
#include <sqlite3.h>
|
|
||||||
|
|
||||||
// Forward declarations from admin_auth.c
|
|
||||||
int process_admin_command(cJSON *event, char ***command_array_out, int *command_count_out, char **admin_pubkey_out);
|
|
||||||
void free_command_array(char **command_array, int command_count);
|
|
||||||
int create_admin_response(const char *response_json, const char *admin_pubkey, const char *original_event_id, cJSON **response_event_out);
|
|
||||||
|
|
||||||
// Forward declarations from admin_handlers.c (to be created)
|
|
||||||
int execute_admin_command(char **command_array, int command_count, const char *admin_pubkey, char **response_json_out);
|
|
||||||
|
|
||||||
// Handle WebSocket admin command endpoint (/api/admin)
|
|
||||||
void handle_admin_websocket_request(void) {
|
|
||||||
// For now, this is a placeholder for WebSocket implementation
|
|
||||||
// In a full implementation, this would:
|
|
||||||
// 1. Upgrade HTTP connection to WebSocket
|
|
||||||
// 2. Handle WebSocket frames
|
|
||||||
// 3. Process Kind 23456 events
|
|
||||||
// 4. Send Kind 23457 responses
|
|
||||||
|
|
||||||
printf("Status: 501 Not Implemented\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"websocket_not_implemented\",\n");
|
|
||||||
printf(" \"message\": \"WebSocket admin endpoint not yet implemented\",\n");
|
|
||||||
printf(" \"note\": \"Use HTTP POST to /api/admin for now\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle HTTP POST admin command endpoint (/api/admin)
|
|
||||||
void handle_admin_command_post_request(void) {
|
|
||||||
// Read the request body (should contain Kind 23456 event JSON)
|
|
||||||
const char *content_length_str = getenv("CONTENT_LENGTH");
|
|
||||||
if (!content_length_str) {
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"missing_content_length\",\n");
|
|
||||||
printf(" \"message\": \"Content-Length header required\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
long content_length = atol(content_length_str);
|
|
||||||
if (content_length <= 0 || content_length > 1024 * 1024) { // 1MB limit
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"invalid_content_length\",\n");
|
|
||||||
printf(" \"message\": \"Content-Length must be between 1 and 1MB\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the request body
|
|
||||||
char *request_body = malloc(content_length + 1);
|
|
||||||
if (!request_body) {
|
|
||||||
printf("Status: 500 Internal Server Error\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"memory_allocation_failed\",\n");
|
|
||||||
printf(" \"message\": \"Failed to allocate memory for request body\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t bytes_read = fread(request_body, 1, content_length, stdin);
|
|
||||||
if (bytes_read != (size_t)content_length) {
|
|
||||||
free(request_body);
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"incomplete_request_body\",\n");
|
|
||||||
printf(" \"message\": \"Failed to read complete request body\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
request_body[content_length] = '\0';
|
|
||||||
|
|
||||||
// Parse the JSON event
|
|
||||||
cJSON *event = cJSON_Parse(request_body);
|
|
||||||
free(request_body);
|
|
||||||
|
|
||||||
if (!event) {
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"invalid_json\",\n");
|
|
||||||
printf(" \"message\": \"Request body is not valid JSON\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process the admin command
|
|
||||||
char **command_array = NULL;
|
|
||||||
int command_count = 0;
|
|
||||||
char *admin_pubkey = NULL;
|
|
||||||
|
|
||||||
int result = process_admin_command(event, &command_array, &command_count, &admin_pubkey);
|
|
||||||
cJSON_Delete(event);
|
|
||||||
|
|
||||||
if (result != 0) {
|
|
||||||
printf("Status: 400 Bad Request\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"invalid_admin_command\",\n");
|
|
||||||
printf(" \"message\": \"Failed to process admin command\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute the command
|
|
||||||
char *response_json = NULL;
|
|
||||||
int exec_result = execute_admin_command(command_array, command_count, admin_pubkey, &response_json);
|
|
||||||
free_command_array(command_array, command_count);
|
|
||||||
free(admin_pubkey);
|
|
||||||
|
|
||||||
if (exec_result != 0) {
|
|
||||||
printf("Status: 500 Internal Server Error\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"command_execution_failed\",\n");
|
|
||||||
printf(" \"message\": \"Failed to execute admin command\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the response event (Kind 23457)
|
|
||||||
cJSON *response_event = NULL;
|
|
||||||
int create_result = create_admin_response(response_json, admin_pubkey, NULL, &response_event);
|
|
||||||
free(response_json);
|
|
||||||
|
|
||||||
if (create_result != 0) {
|
|
||||||
printf("Status: 500 Internal Server Error\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("{\n");
|
|
||||||
printf(" \"error\": \"response_creation_failed\",\n");
|
|
||||||
printf(" \"message\": \"Failed to create admin response\"\n");
|
|
||||||
printf("}\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the response event as JSON
|
|
||||||
char *response_json_str = cJSON_Print(response_event);
|
|
||||||
cJSON_Delete(response_event);
|
|
||||||
|
|
||||||
printf("Status: 200 OK\r\n");
|
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
|
||||||
printf("%s\n", response_json_str);
|
|
||||||
|
|
||||||
free(response_json_str);
|
|
||||||
}
|
|
||||||
@@ -10,8 +10,8 @@
|
|||||||
// Version information (auto-updated by build system)
|
// Version information (auto-updated by build system)
|
||||||
#define VERSION_MAJOR 0
|
#define VERSION_MAJOR 0
|
||||||
#define VERSION_MINOR 1
|
#define VERSION_MINOR 1
|
||||||
#define VERSION_PATCH 10
|
#define VERSION_PATCH 18
|
||||||
#define VERSION "v0.1.10"
|
#define VERSION "v0.1.18"
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
@@ -250,6 +250,16 @@ void send_json_response(int status_code, const char* json_content);
|
|||||||
// Logging utilities
|
// Logging utilities
|
||||||
void log_request(const char* method, const char* uri, const char* auth_status, int status_code);
|
void log_request(const char* method, const char* uri, const char* auth_status, int status_code);
|
||||||
|
|
||||||
|
// Centralized application logging (writes to logs/app/app.log)
|
||||||
|
typedef enum {
|
||||||
|
LOG_DEBUG = 0,
|
||||||
|
LOG_INFO = 1,
|
||||||
|
LOG_WARN = 2,
|
||||||
|
LOG_ERROR = 3
|
||||||
|
} log_level_t;
|
||||||
|
|
||||||
|
void app_log(log_level_t level, const char* format, ...);
|
||||||
|
|
||||||
// SHA-256 validation helper (used by multiple BUDs)
|
// SHA-256 validation helper (used by multiple BUDs)
|
||||||
int validate_sha256_format(const char* sha256);
|
int validate_sha256_format(const char* sha256);
|
||||||
|
|
||||||
@@ -262,9 +272,12 @@ int validate_sha256_format(const char* sha256);
|
|||||||
// Admin API request handler
|
// Admin API request handler
|
||||||
void handle_admin_api_request(const char* method, const char* uri, const char* validated_pubkey, int is_authenticated);
|
void handle_admin_api_request(const char* method, const char* uri, const char* validated_pubkey, int is_authenticated);
|
||||||
|
|
||||||
// Admin event handler (Kind 23456/23457)
|
// Admin event handler (Kind 23458/23459)
|
||||||
void handle_admin_event_request(void);
|
void handle_admin_event_request(void);
|
||||||
|
|
||||||
|
// Admin interface handler (serves embedded web UI)
|
||||||
|
void handle_admin_interface_request(const char* path);
|
||||||
|
|
||||||
// Individual endpoint handlers
|
// Individual endpoint handlers
|
||||||
void handle_stats_api(void);
|
void handle_stats_api(void);
|
||||||
void handle_config_get_api(void);
|
void handle_config_get_api(void);
|
||||||
|
|||||||
306
src/main.c
306
src/main.c
@@ -5,11 +5,14 @@
|
|||||||
|
|
||||||
#define _GNU_SOURCE
|
#define _GNU_SOURCE
|
||||||
#include "ginxsom.h"
|
#include "ginxsom.h"
|
||||||
|
#include "relay_client.h"
|
||||||
|
#include "admin_commands.h"
|
||||||
#include "../nostr_core_lib/nostr_core/nostr_common.h"
|
#include "../nostr_core_lib/nostr_core/nostr_common.h"
|
||||||
#include "../nostr_core_lib/nostr_core/utils.h"
|
#include "../nostr_core_lib/nostr_core/utils.h"
|
||||||
#include <getopt.h>
|
#include <getopt.h>
|
||||||
#include <curl/curl.h>
|
#include <curl/curl.h>
|
||||||
#include <sqlite3.h>
|
#include <sqlite3.h>
|
||||||
|
#include <stdarg.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
@@ -19,7 +22,43 @@
|
|||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
// Debug macros removed
|
// Centralized logging system (declaration in ginxsom.h)
|
||||||
|
void app_log(log_level_t level, const char *format, ...) {
|
||||||
|
FILE *log_file = fopen("logs/app/app.log", "a");
|
||||||
|
if (!log_file) {
|
||||||
|
return; // Silently fail if we can't open log file
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get timestamp
|
||||||
|
time_t now = time(NULL);
|
||||||
|
struct tm *tm_info = localtime(&now);
|
||||||
|
char timestamp[64];
|
||||||
|
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
|
||||||
|
|
||||||
|
// Get log level string
|
||||||
|
const char *level_str;
|
||||||
|
switch (level) {
|
||||||
|
case LOG_DEBUG: level_str = "DEBUG"; break;
|
||||||
|
case LOG_INFO: level_str = "INFO"; break;
|
||||||
|
case LOG_WARN: level_str = "WARN"; break;
|
||||||
|
case LOG_ERROR: level_str = "ERROR"; break;
|
||||||
|
default: level_str = "UNKNOWN"; break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write log prefix with timestamp, PID, and level
|
||||||
|
fprintf(log_file, "[%s] [PID:%d] [%s] ", timestamp, getpid(), level_str);
|
||||||
|
|
||||||
|
// Write formatted message
|
||||||
|
va_list args;
|
||||||
|
va_start(args, format);
|
||||||
|
vfprintf(log_file, format, args);
|
||||||
|
va_end(args);
|
||||||
|
|
||||||
|
// Ensure newline
|
||||||
|
fprintf(log_file, "\n");
|
||||||
|
|
||||||
|
fclose(log_file);
|
||||||
|
}
|
||||||
|
|
||||||
#define MAX_SHA256_LEN 65
|
#define MAX_SHA256_LEN 65
|
||||||
#define MAX_PATH_LEN 4096
|
#define MAX_PATH_LEN 4096
|
||||||
@@ -196,7 +235,10 @@ int initialize_database(const char *db_path) {
|
|||||||
" ('admin_enabled', 'true', 'Whether admin API is enabled'),"
|
" ('admin_enabled', 'true', 'Whether admin API is enabled'),"
|
||||||
" ('nip42_require_auth', 'false', 'Enable NIP-42 challenge/response authentication'),"
|
" ('nip42_require_auth', 'false', 'Enable NIP-42 challenge/response authentication'),"
|
||||||
" ('nip42_challenge_timeout', '600', 'NIP-42 challenge timeout in seconds'),"
|
" ('nip42_challenge_timeout', '600', 'NIP-42 challenge timeout in seconds'),"
|
||||||
" ('nip42_time_tolerance', '300', 'NIP-42 timestamp tolerance in seconds');";
|
" ('nip42_time_tolerance', '300', 'NIP-42 timestamp tolerance in seconds'),"
|
||||||
|
" ('enable_relay_connect', 'true', 'Enable connection to Nostr relays'),"
|
||||||
|
" ('kind_0_content', '{\"name\":\"Ginxsom Blossom Server\",\"about\":\"A Nostr-enabled Blossom media server\",\"picture\":\"\"}', 'JSON content for Kind 0 profile event'),"
|
||||||
|
" ('kind_10002_tags', '[\"wss://relay.laantungir.net\"]', 'JSON array of relay URLs for Kind 10002');";
|
||||||
|
|
||||||
rc = sqlite3_exec(db, insert_config, NULL, NULL, &err_msg);
|
rc = sqlite3_exec(db, insert_config, NULL, NULL, &err_msg);
|
||||||
if (rc != SQLITE_OK) {
|
if (rc != SQLITE_OK) {
|
||||||
@@ -206,7 +248,7 @@ int initialize_database(const char *db_path) {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create storage_stats view
|
// Create storage_stats view (legacy - kept for backward compatibility)
|
||||||
const char *create_view =
|
const char *create_view =
|
||||||
"CREATE VIEW IF NOT EXISTS storage_stats AS "
|
"CREATE VIEW IF NOT EXISTS storage_stats AS "
|
||||||
"SELECT "
|
"SELECT "
|
||||||
@@ -226,6 +268,85 @@ int initialize_database(const char *db_path) {
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create blob_overview view for admin dashboard
|
||||||
|
const char *create_overview_view =
|
||||||
|
"CREATE VIEW IF NOT EXISTS blob_overview AS "
|
||||||
|
"SELECT "
|
||||||
|
" COUNT(*) as total_blobs, "
|
||||||
|
" COALESCE(SUM(size), 0) as total_bytes, "
|
||||||
|
" MIN(uploaded_at) as first_upload, "
|
||||||
|
" MAX(uploaded_at) as last_upload "
|
||||||
|
"FROM blobs;";
|
||||||
|
|
||||||
|
rc = sqlite3_exec(db, create_overview_view, NULL, NULL, &err_msg);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
fprintf(stderr, "Failed to create blob_overview view: %s\n", err_msg);
|
||||||
|
sqlite3_free(err_msg);
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create blob_type_distribution view for MIME type statistics
|
||||||
|
const char *create_type_view =
|
||||||
|
"CREATE VIEW IF NOT EXISTS blob_type_distribution AS "
|
||||||
|
"SELECT "
|
||||||
|
" type as mime_type, "
|
||||||
|
" COUNT(*) as blob_count, "
|
||||||
|
" SUM(size) as total_bytes, "
|
||||||
|
" ROUND(COUNT(*) * 100.0 / (SELECT COUNT(*) FROM blobs), 2) as percentage "
|
||||||
|
"FROM blobs "
|
||||||
|
"GROUP BY type "
|
||||||
|
"ORDER BY blob_count DESC;";
|
||||||
|
|
||||||
|
rc = sqlite3_exec(db, create_type_view, NULL, NULL, &err_msg);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
fprintf(stderr, "Failed to create blob_type_distribution view: %s\n", err_msg);
|
||||||
|
sqlite3_free(err_msg);
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create blob_time_stats view for time-based statistics
|
||||||
|
const char *create_time_view =
|
||||||
|
"CREATE VIEW IF NOT EXISTS blob_time_stats AS "
|
||||||
|
"SELECT "
|
||||||
|
" COUNT(CASE WHEN uploaded_at >= strftime('%s', 'now', '-1 day') THEN 1 END) as blobs_24h, "
|
||||||
|
" COUNT(CASE WHEN uploaded_at >= strftime('%s', 'now', '-7 days') THEN 1 END) as blobs_7d, "
|
||||||
|
" COUNT(CASE WHEN uploaded_at >= strftime('%s', 'now', '-30 days') THEN 1 END) as blobs_30d "
|
||||||
|
"FROM blobs;";
|
||||||
|
|
||||||
|
rc = sqlite3_exec(db, create_time_view, NULL, NULL, &err_msg);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
fprintf(stderr, "Failed to create blob_time_stats view: %s\n", err_msg);
|
||||||
|
sqlite3_free(err_msg);
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create top_uploaders view for pubkey statistics
|
||||||
|
const char *create_uploaders_view =
|
||||||
|
"CREATE VIEW IF NOT EXISTS top_uploaders AS "
|
||||||
|
"SELECT "
|
||||||
|
" uploader_pubkey, "
|
||||||
|
" COUNT(*) as blob_count, "
|
||||||
|
" SUM(size) as total_bytes, "
|
||||||
|
" ROUND(COUNT(*) * 100.0 / (SELECT COUNT(*) FROM blobs), 2) as percentage, "
|
||||||
|
" MIN(uploaded_at) as first_upload, "
|
||||||
|
" MAX(uploaded_at) as last_upload "
|
||||||
|
"FROM blobs "
|
||||||
|
"WHERE uploader_pubkey IS NOT NULL "
|
||||||
|
"GROUP BY uploader_pubkey "
|
||||||
|
"ORDER BY blob_count DESC "
|
||||||
|
"LIMIT 20;";
|
||||||
|
|
||||||
|
rc = sqlite3_exec(db, create_uploaders_view, NULL, NULL, &err_msg);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
fprintf(stderr, "Failed to create top_uploaders view: %s\n", err_msg);
|
||||||
|
sqlite3_free(err_msg);
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(stderr, "Database schema initialized successfully\n");
|
fprintf(stderr, "Database schema initialized successfully\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1830,8 +1951,9 @@ void handle_auth_challenge_request(void) {
|
|||||||
/////////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
int main(int argc, char *argv[]) {
|
int main(int argc, char *argv[]) {
|
||||||
fprintf(stderr, "DEBUG: main() started\n");
|
// Initialize application logging
|
||||||
fflush(stderr);
|
app_log(LOG_INFO, "=== Ginxsom FastCGI Application Starting ===");
|
||||||
|
app_log(LOG_INFO, "Process ID: %d", getpid());
|
||||||
|
|
||||||
// Parse command line arguments
|
// Parse command line arguments
|
||||||
int use_test_keys = 0;
|
int use_test_keys = 0;
|
||||||
@@ -1891,17 +2013,16 @@ int main(int argc, char *argv[]) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "STARTUP: Using storage directory: %s\n", g_storage_dir);
|
app_log(LOG_INFO, "Storage directory: %s", g_storage_dir);
|
||||||
|
|
||||||
// CRITICAL: Initialize nostr crypto system BEFORE key operations
|
// CRITICAL: Initialize nostr crypto system BEFORE key operations
|
||||||
fprintf(stderr, "STARTUP: Initializing nostr crypto system...\r\n");
|
app_log(LOG_INFO, "Initializing nostr crypto system...");
|
||||||
int crypto_init_result = nostr_crypto_init();
|
int crypto_init_result = nostr_crypto_init();
|
||||||
fprintf(stderr, "CRYPTO INIT RESULT: %d\r\n", crypto_init_result);
|
|
||||||
if (crypto_init_result != 0) {
|
if (crypto_init_result != 0) {
|
||||||
fprintf(stderr, "FATAL ERROR: Failed to initialize nostr crypto system\r\n");
|
app_log(LOG_ERROR, "Failed to initialize nostr crypto system (result: %d)", crypto_init_result);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
fprintf(stderr, "STARTUP: nostr crypto system initialized successfully\r\n");
|
app_log(LOG_INFO, "Nostr crypto system initialized successfully");
|
||||||
|
|
||||||
// ========================================================================
|
// ========================================================================
|
||||||
// DATABASE AND KEY INITIALIZATION - 5 SCENARIOS
|
// DATABASE AND KEY INITIALIZATION - 5 SCENARIOS
|
||||||
@@ -1909,12 +2030,12 @@ int main(int argc, char *argv[]) {
|
|||||||
|
|
||||||
// Scenario 4: Test Mode (--test-keys)
|
// Scenario 4: Test Mode (--test-keys)
|
||||||
if (use_test_keys) {
|
if (use_test_keys) {
|
||||||
fprintf(stderr, "\n=== SCENARIO 4: TEST MODE ===\n");
|
app_log(LOG_INFO, "=== SCENARIO 4: TEST MODE ===");
|
||||||
|
|
||||||
// Load test keys from .test_keys file
|
// Load test keys from .test_keys file
|
||||||
FILE *keys_file = fopen(".test_keys", "r");
|
FILE *keys_file = fopen(".test_keys", "r");
|
||||||
if (!keys_file) {
|
if (!keys_file) {
|
||||||
fprintf(stderr, "ERROR: Cannot open .test_keys file\n");
|
app_log(LOG_ERROR, "Cannot open .test_keys file");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1934,35 +2055,38 @@ int main(int argc, char *argv[]) {
|
|||||||
if (end && (end - start) == 64) {
|
if (end && (end - start) == 64) {
|
||||||
strncpy(test_server_privkey, start, 64);
|
strncpy(test_server_privkey, start, 64);
|
||||||
test_server_privkey[64] = '\0';
|
test_server_privkey[64] = '\0';
|
||||||
|
app_log(LOG_DEBUG, "Parsed SERVER_PRIVKEY from .test_keys");
|
||||||
|
} else {
|
||||||
|
app_log(LOG_ERROR, "Failed to parse SERVER_PRIVKEY (length: %ld)", end ? (long)(end - start) : -1L);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fclose(keys_file);
|
fclose(keys_file);
|
||||||
|
|
||||||
fprintf(stderr, "TEST MODE: Loaded keys from .test_keys\n");
|
app_log(LOG_INFO, "Loaded keys from .test_keys");
|
||||||
fprintf(stderr, "TEST MODE: Admin pubkey: %s\n", g_admin_pubkey);
|
app_log(LOG_INFO, "Admin pubkey: %s", g_admin_pubkey);
|
||||||
|
|
||||||
// Derive pubkey from test privkey
|
// Derive pubkey from test privkey
|
||||||
if (derive_pubkey_from_privkey(test_server_privkey, g_blossom_pubkey) != 0) {
|
if (derive_pubkey_from_privkey(test_server_privkey, g_blossom_pubkey) != 0) {
|
||||||
fprintf(stderr, "ERROR: Failed to derive pubkey from test privkey\n");
|
app_log(LOG_ERROR, "Failed to derive pubkey from test privkey");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "TEST MODE: Server pubkey: %s\n", g_blossom_pubkey);
|
app_log(LOG_INFO, "Server pubkey: %s", g_blossom_pubkey);
|
||||||
|
|
||||||
// Set database path based on test pubkey
|
// Set database path based on test pubkey
|
||||||
if (set_db_path_from_pubkey(g_blossom_pubkey) != 0) {
|
if (set_db_path_from_pubkey(g_blossom_pubkey) != 0) {
|
||||||
fprintf(stderr, "ERROR: Failed to set database path\n");
|
app_log(LOG_ERROR, "Failed to set database path");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test mode ALWAYS overwrites database for clean testing
|
// Test mode ALWAYS overwrites database for clean testing
|
||||||
fprintf(stderr, "TEST MODE: Creating/overwriting database: %s\n", g_db_path);
|
app_log(LOG_INFO, "Creating/overwriting test database: %s", g_db_path);
|
||||||
unlink(g_db_path); // Remove if exists
|
unlink(g_db_path); // Remove if exists
|
||||||
|
|
||||||
// Initialize new database
|
// Initialize new database
|
||||||
if (initialize_database(g_db_path) != 0) {
|
if (initialize_database(g_db_path) != 0) {
|
||||||
fprintf(stderr, "ERROR: Failed to initialize test database\n");
|
app_log(LOG_ERROR, "Failed to initialize test database");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1971,7 +2095,7 @@ int main(int argc, char *argv[]) {
|
|||||||
g_blossom_seckey[64] = '\0';
|
g_blossom_seckey[64] = '\0';
|
||||||
|
|
||||||
if (store_blossom_private_key(test_server_privkey) != 0) {
|
if (store_blossom_private_key(test_server_privkey) != 0) {
|
||||||
fprintf(stderr, "ERROR: Failed to store test private key\n");
|
app_log(LOG_ERROR, "Failed to store test private key");
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2003,12 +2127,12 @@ int main(int argc, char *argv[]) {
|
|||||||
sqlite3_close(db);
|
sqlite3_close(db);
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "TEST MODE: Database initialized successfully\n");
|
app_log(LOG_INFO, "Test database initialized successfully");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scenario 3: Keys Specified (--server-privkey)
|
// Scenario 3: Keys Specified (--server-privkey)
|
||||||
else if (test_server_privkey[0] != '\0') {
|
else if (test_server_privkey[0] != '\0') {
|
||||||
fprintf(stderr, "\n=== SCENARIO 3: KEYS SPECIFIED ===\n");
|
app_log(LOG_INFO, "=== SCENARIO 3: KEYS SPECIFIED ===");
|
||||||
|
|
||||||
// Derive pubkey from provided privkey
|
// Derive pubkey from provided privkey
|
||||||
if (derive_pubkey_from_privkey(test_server_privkey, g_blossom_pubkey) != 0) {
|
if (derive_pubkey_from_privkey(test_server_privkey, g_blossom_pubkey) != 0) {
|
||||||
@@ -2022,6 +2146,7 @@ int main(int argc, char *argv[]) {
|
|||||||
if (db_path_specified) {
|
if (db_path_specified) {
|
||||||
fprintf(stderr, "\n=== SCENARIO 5: DATABASE + KEYS (VALIDATION) ===\n");
|
fprintf(stderr, "\n=== SCENARIO 5: DATABASE + KEYS (VALIDATION) ===\n");
|
||||||
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
||||||
|
g_db_path[sizeof(g_db_path) - 1] = '\0';
|
||||||
|
|
||||||
// Check if database exists
|
// Check if database exists
|
||||||
struct stat st;
|
struct stat st;
|
||||||
@@ -2117,6 +2242,7 @@ int main(int argc, char *argv[]) {
|
|||||||
else if (db_path_specified) {
|
else if (db_path_specified) {
|
||||||
fprintf(stderr, "\n=== SCENARIO 2: DATABASE SPECIFIED ===\n");
|
fprintf(stderr, "\n=== SCENARIO 2: DATABASE SPECIFIED ===\n");
|
||||||
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
strncpy(g_db_path, specified_db_path, sizeof(g_db_path) - 1);
|
||||||
|
g_db_path[sizeof(g_db_path) - 1] = '\0';
|
||||||
|
|
||||||
// Check if database exists
|
// Check if database exists
|
||||||
struct stat st;
|
struct stat st;
|
||||||
@@ -2156,21 +2282,18 @@ int main(int argc, char *argv[]) {
|
|||||||
// END DATABASE AND KEY INITIALIZATION
|
// END DATABASE AND KEY INITIALIZATION
|
||||||
// ========================================================================
|
// ========================================================================
|
||||||
|
|
||||||
fprintf(stderr, "\n=== FINAL CONFIGURATION ===\n");
|
app_log(LOG_INFO, "=== FINAL CONFIGURATION ===");
|
||||||
fprintf(stderr, "Database path: %s\n", g_db_path);
|
app_log(LOG_INFO, "Database path: %s", g_db_path);
|
||||||
fprintf(stderr, "Storage directory: %s\n", g_storage_dir);
|
app_log(LOG_INFO, "Storage directory: %s", g_storage_dir);
|
||||||
fprintf(stderr, "Server pubkey: %s\n", g_blossom_pubkey);
|
app_log(LOG_INFO, "Server pubkey: %s", g_blossom_pubkey);
|
||||||
if (strlen(g_admin_pubkey) > 0) {
|
if (strlen(g_admin_pubkey) > 0) {
|
||||||
fprintf(stderr, "Admin pubkey: %s\n", g_admin_pubkey);
|
app_log(LOG_INFO, "Admin pubkey: %s", g_admin_pubkey);
|
||||||
}
|
}
|
||||||
fprintf(stderr, "===========================\n\n");
|
app_log(LOG_INFO, "===========================");
|
||||||
|
|
||||||
fflush(stderr);
|
|
||||||
|
|
||||||
// If --generate-keys was specified, exit after key generation
|
// If --generate-keys was specified, exit after key generation
|
||||||
if (g_generate_keys) {
|
if (g_generate_keys) {
|
||||||
fprintf(stderr, "Key generation completed, exiting.\n");
|
app_log(LOG_INFO, "Key generation completed, exiting");
|
||||||
fflush(stderr);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2191,25 +2314,58 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize request validator system
|
// Initialize request validator system
|
||||||
fprintf(stderr, "STARTUP: Initializing request validator system...\r\n");
|
app_log(LOG_INFO, "Initializing request validator system...");
|
||||||
int validator_init_result =
|
int validator_init_result =
|
||||||
ginxsom_request_validator_init(g_db_path, "ginxsom");
|
ginxsom_request_validator_init(g_db_path, "ginxsom");
|
||||||
fprintf(stderr, "MAIN: validator init return code: %d\r\n",
|
|
||||||
validator_init_result);
|
|
||||||
if (validator_init_result != NOSTR_SUCCESS) {
|
if (validator_init_result != NOSTR_SUCCESS) {
|
||||||
fprintf(stderr,
|
app_log(LOG_ERROR, "Failed to initialize request validator system (result: %d)", validator_init_result);
|
||||||
"FATAL ERROR: Failed to initialize request validator system\r\n");
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
fprintf(stderr,
|
app_log(LOG_INFO, "Request validator system initialized successfully");
|
||||||
"STARTUP: Request validator system initialized successfully\r\n");
|
|
||||||
fflush(stderr);
|
|
||||||
|
|
||||||
|
// Initialize relay client system
|
||||||
|
app_log(LOG_INFO, "Initializing relay client system...");
|
||||||
|
int relay_init_result = relay_client_init(g_db_path);
|
||||||
|
if (relay_init_result != 0) {
|
||||||
|
app_log(LOG_WARN, "Failed to initialize relay client system (result: %d)", relay_init_result);
|
||||||
|
app_log(LOG_WARN, "Continuing without relay client functionality");
|
||||||
|
} else {
|
||||||
|
app_log(LOG_INFO, "Relay client system initialized successfully");
|
||||||
|
|
||||||
|
// Start relay connections (this will check enable_relay_connect config)
|
||||||
|
app_log(LOG_INFO, "Starting relay client connections...");
|
||||||
|
int relay_start_result = relay_client_start();
|
||||||
|
if (relay_start_result != 0) {
|
||||||
|
app_log(LOG_WARN, "Failed to start relay client (result: %d)", relay_start_result);
|
||||||
|
app_log(LOG_WARN, "Relay client disabled - check configuration");
|
||||||
|
} else {
|
||||||
|
app_log(LOG_INFO, "Relay client started successfully");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize admin commands system
|
||||||
|
app_log(LOG_INFO, "Initializing admin commands system...");
|
||||||
|
int admin_cmd_result = admin_commands_init(g_db_path);
|
||||||
|
if (admin_cmd_result != 0) {
|
||||||
|
app_log(LOG_WARN, "Failed to initialize admin commands system (result: %d)", admin_cmd_result);
|
||||||
|
app_log(LOG_WARN, "Continuing without admin commands functionality");
|
||||||
|
} else {
|
||||||
|
app_log(LOG_INFO, "Admin commands system initialized successfully");
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
// THIS IS WHERE THE REQUESTS ENTER THE FastCGI
|
// THIS IS WHERE THE REQUESTS ENTER THE FastCGI
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
|
app_log(LOG_INFO, "FastCGI request loop starting - ready to accept requests");
|
||||||
|
|
||||||
|
int first_request = 1;
|
||||||
while (FCGI_Accept() >= 0) {
|
while (FCGI_Accept() >= 0) {
|
||||||
|
// Test stderr capture on first request
|
||||||
|
if (first_request) {
|
||||||
|
fprintf(stderr, "FCGI: First request received - testing nginx stderr capture\n");
|
||||||
|
fflush(stderr);
|
||||||
|
first_request = 0;
|
||||||
|
}
|
||||||
const char *request_method = getenv("REQUEST_METHOD");
|
const char *request_method = getenv("REQUEST_METHOD");
|
||||||
const char *request_uri = getenv("REQUEST_URI");
|
const char *request_uri = getenv("REQUEST_URI");
|
||||||
const char *auth_header = getenv("HTTP_AUTHORIZATION");
|
const char *auth_header = getenv("HTTP_AUTHORIZATION");
|
||||||
@@ -2237,13 +2393,14 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
|||||||
|
|
||||||
// Special case: Root endpoint is public and doesn't require authentication
|
// Special case: Root endpoint is public and doesn't require authentication
|
||||||
if (strcmp(request_method, "GET") == 0 && strcmp(request_uri, "/") == 0) {
|
if (strcmp(request_method, "GET") == 0 && strcmp(request_uri, "/") == 0) {
|
||||||
// Handle GET / requests - Server info endpoint
|
// Handle GET / requests - Server info endpoint (NIP-11)
|
||||||
printf("Status: 200 OK\r\n");
|
printf("Status: 200 OK\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/nostr+json\r\n\r\n");
|
||||||
printf("{\n");
|
printf("{\n");
|
||||||
printf(" \"server\": \"ginxsom\",\n");
|
printf(" \"server\": \"ginxsom\",\n");
|
||||||
printf(" \"version\": \"%s\",\n", VERSION);
|
printf(" \"version\": \"%s\",\n", VERSION);
|
||||||
printf(" \"description\": \"Ginxsom Blossom Server\",\n");
|
printf(" \"description\": \"Ginxsom Blossom Server\",\n");
|
||||||
|
printf(" \"pubkey\": \"%s\",\n", g_blossom_pubkey);
|
||||||
printf(" \"endpoints\": {\n");
|
printf(" \"endpoints\": {\n");
|
||||||
printf(" \"blob_get\": \"GET /<sha256>\",\n");
|
printf(" \"blob_get\": \"GET /<sha256>\",\n");
|
||||||
printf(" \"blob_head\": \"HEAD /<sha256>\",\n");
|
printf(" \"blob_head\": \"HEAD /<sha256>\",\n");
|
||||||
@@ -2305,12 +2462,28 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
|||||||
operation = "mirror";
|
operation = "mirror";
|
||||||
} else if (strcmp(request_method, "PUT") == 0 && strcmp(request_uri, "/report") == 0) {
|
} else if (strcmp(request_method, "PUT") == 0 && strcmp(request_uri, "/report") == 0) {
|
||||||
operation = "report";
|
operation = "report";
|
||||||
|
} else if (strncmp(request_uri, "/admin", 6) == 0) {
|
||||||
|
operation = "admin_interface"; // Public static files - no auth required
|
||||||
} else if (strncmp(request_uri, "/api/", 5) == 0) {
|
} else if (strncmp(request_uri, "/api/", 5) == 0) {
|
||||||
operation = "admin";
|
// Check if this is a static file request or API request
|
||||||
// Special case: POST /api/admin uses Kind 23456 events for authentication
|
const char *path = request_uri + 5; // Skip "/api/"
|
||||||
// Skip centralized validation for these requests
|
int is_static_file = 0;
|
||||||
if (strcmp(request_method, "POST") == 0 && strcmp(request_uri, "/api/admin") == 0) {
|
|
||||||
operation = "admin_event"; // Mark as special case
|
// Check for static file extensions or root /api path
|
||||||
|
if (strstr(path, ".html") || strstr(path, ".css") || strstr(path, ".js") ||
|
||||||
|
strlen(path) == 0 || strcmp(path, "/") == 0) {
|
||||||
|
is_static_file = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_static_file) {
|
||||||
|
operation = "admin_interface"; // Public static files - no auth required
|
||||||
|
} else {
|
||||||
|
operation = "admin";
|
||||||
|
// Special case: POST /api/admin uses Kind 23458 events for authentication
|
||||||
|
// Skip centralized validation for these requests
|
||||||
|
if (strcmp(request_method, "POST") == 0 && strcmp(request_uri, "/api/admin") == 0) {
|
||||||
|
operation = "admin_event"; // Mark as special case
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (strcmp(request_method, "GET") == 0 && strncmp(request_uri, "/list/", 6) == 0) {
|
} else if (strcmp(request_method, "GET") == 0 && strncmp(request_uri, "/list/", 6) == 0) {
|
||||||
operation = "list";
|
operation = "list";
|
||||||
@@ -2347,6 +2520,8 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
|||||||
// Special case: challenge generation failure should be handled by the endpoint
|
// Special case: challenge generation failure should be handled by the endpoint
|
||||||
if (strcmp(operation, "challenge") == 0) {
|
if (strcmp(operation, "challenge") == 0) {
|
||||||
// Let the /auth endpoint handle this - it will generate its own error response
|
// Let the /auth endpoint handle this - it will generate its own error response
|
||||||
|
} else if (strcmp(operation, "admin_interface") == 0) {
|
||||||
|
// Admin interface serves public static files - no auth required
|
||||||
} else if (strcmp(operation, "head") == 0 || strcmp(operation, "head_upload") == 0) {
|
} else if (strcmp(operation, "head") == 0 || strcmp(operation, "head_upload") == 0) {
|
||||||
// HEAD requests might not require auth depending on config - let handler decide
|
// HEAD requests might not require auth depending on config - let handler decide
|
||||||
} else if (strcmp(operation, "list") == 0) {
|
} else if (strcmp(operation, "list") == 0) {
|
||||||
@@ -2354,7 +2529,7 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
|||||||
} else if (strcmp(operation, "admin") == 0 && strcmp(request_uri, "/api/health") == 0) {
|
} else if (strcmp(operation, "admin") == 0 && strcmp(request_uri, "/api/health") == 0) {
|
||||||
// Health endpoint is public and doesn't require authentication - let handler decide
|
// Health endpoint is public and doesn't require authentication - let handler decide
|
||||||
} else if (strcmp(operation, "admin_event") == 0) {
|
} else if (strcmp(operation, "admin_event") == 0) {
|
||||||
// POST /api/admin uses Kind 23456 events - authentication handled by admin_event.c
|
// POST /api/admin uses Kind 23458 events - authentication handled by admin_event.c
|
||||||
// Skip centralized validation and let the handler validate the event
|
// Skip centralized validation and let the handler validate the event
|
||||||
} else {
|
} else {
|
||||||
// For other operations, validation failure means auth failure
|
// For other operations, validation failure means auth failure
|
||||||
@@ -2451,10 +2626,34 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
} else if (strcmp(request_method, "POST") == 0 &&
|
||||||
|
strcmp(request_uri, "/api/admin") == 0) {
|
||||||
|
// Handle POST /api/admin requests (Kind 23458 admin events)
|
||||||
|
handle_admin_event_request();
|
||||||
|
|
||||||
|
} else if (strncmp(request_uri, "/admin", 6) == 0) {
|
||||||
|
// Handle admin web interface requests (embedded files)
|
||||||
|
handle_admin_interface_request(request_uri);
|
||||||
|
|
||||||
} else if (strncmp(request_uri, "/api/", 5) == 0) {
|
} else if (strncmp(request_uri, "/api/", 5) == 0) {
|
||||||
// Handle admin API requests with pre-validated auth
|
// Check if this is a static file request (no auth required) or API request (auth required)
|
||||||
const char *validated_pubkey = (result.valid && strlen(result.pubkey) == 64) ? result.pubkey : NULL;
|
const char *path = request_uri + 5; // Skip "/api/"
|
||||||
handle_admin_api_request(request_method, request_uri, validated_pubkey, result.valid);
|
int is_static_file = 0;
|
||||||
|
|
||||||
|
// Check for static file extensions
|
||||||
|
if (strstr(path, ".html") || strstr(path, ".css") || strstr(path, ".js") ||
|
||||||
|
strcmp(request_uri, "/api") == 0 || strcmp(request_uri, "/api/") == 0) {
|
||||||
|
is_static_file = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_static_file) {
|
||||||
|
// Serve static files without authentication
|
||||||
|
handle_admin_interface_request(request_uri);
|
||||||
|
} else {
|
||||||
|
// Handle admin API requests with pre-validated auth
|
||||||
|
const char *validated_pubkey = (result.valid && strlen(result.pubkey) == 64) ? result.pubkey : NULL;
|
||||||
|
handle_admin_api_request(request_method, request_uri, validated_pubkey, result.valid);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} else if (strcmp(request_method, "GET") == 0 &&
|
} else if (strcmp(request_method, "GET") == 0 &&
|
||||||
@@ -2485,13 +2684,14 @@ if (!config_loaded /* && !initialize_server_config() */) {
|
|||||||
}
|
}
|
||||||
} else if (strcmp(request_method, "GET") == 0 &&
|
} else if (strcmp(request_method, "GET") == 0 &&
|
||||||
strcmp(request_uri, "/") == 0) {
|
strcmp(request_uri, "/") == 0) {
|
||||||
// Handle GET / requests - Server info endpoint
|
// Handle GET / requests - Server info endpoint (NIP-11)
|
||||||
printf("Status: 200 OK\r\n");
|
printf("Status: 200 OK\r\n");
|
||||||
printf("Content-Type: application/json\r\n\r\n");
|
printf("Content-Type: application/nostr+json\r\n\r\n");
|
||||||
printf("{\n");
|
printf("{\n");
|
||||||
printf(" \"server\": \"ginxsom\",\n");
|
printf(" \"server\": \"ginxsom\",\n");
|
||||||
printf(" \"version\": \"%s\",\n", VERSION);
|
printf(" \"version\": \"%s\",\n", VERSION);
|
||||||
printf(" \"description\": \"Ginxsom Blossom Server\",\n");
|
printf(" \"description\": \"Ginxsom Blossom Server\",\n");
|
||||||
|
printf(" \"pubkey\": \"%s\",\n", g_blossom_pubkey);
|
||||||
printf(" \"endpoints\": {\n");
|
printf(" \"endpoints\": {\n");
|
||||||
printf(" \"blob_get\": \"GET /<sha256>\",\n");
|
printf(" \"blob_get\": \"GET /<sha256>\",\n");
|
||||||
printf(" \"blob_head\": \"HEAD /<sha256>\",\n");
|
printf(" \"blob_head\": \"HEAD /<sha256>\",\n");
|
||||||
|
|||||||
871
src/relay_client.c
Normal file
871
src/relay_client.c
Normal file
@@ -0,0 +1,871 @@
|
|||||||
|
/*
|
||||||
|
* Ginxsom Relay Client Implementation
|
||||||
|
*
|
||||||
|
* Manages connections to Nostr relays, publishes events, and subscribes to admin commands.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "relay_client.h"
|
||||||
|
#include "admin_commands.h"
|
||||||
|
#include "../nostr_core_lib/nostr_core/nostr_core.h"
|
||||||
|
#include <sqlite3.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <pthread.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
// Forward declare app_log to avoid including ginxsom.h (which has typedef conflicts)
|
||||||
|
typedef enum {
|
||||||
|
LOG_DEBUG = 0,
|
||||||
|
LOG_INFO = 1,
|
||||||
|
LOG_WARN = 2,
|
||||||
|
LOG_ERROR = 3
|
||||||
|
} log_level_t;
|
||||||
|
|
||||||
|
void app_log(log_level_t level, const char* format, ...);
|
||||||
|
|
||||||
|
// Maximum number of relays to connect to
|
||||||
|
#define MAX_RELAYS 10
|
||||||
|
|
||||||
|
// Reconnection settings
|
||||||
|
#define RECONNECT_DELAY_SECONDS 30
|
||||||
|
#define MAX_RECONNECT_ATTEMPTS 5
|
||||||
|
|
||||||
|
// Global state
|
||||||
|
static struct {
|
||||||
|
int enabled;
|
||||||
|
int initialized;
|
||||||
|
int running;
|
||||||
|
char db_path[512];
|
||||||
|
nostr_relay_pool_t* pool;
|
||||||
|
char** relay_urls;
|
||||||
|
int relay_count;
|
||||||
|
nostr_pool_subscription_t* admin_subscription;
|
||||||
|
pthread_t management_thread;
|
||||||
|
pthread_mutex_t state_mutex;
|
||||||
|
} g_relay_state = {0};
|
||||||
|
|
||||||
|
// External globals from main.c
|
||||||
|
extern char g_blossom_seckey[65];
|
||||||
|
extern char g_blossom_pubkey[65];
|
||||||
|
extern char g_admin_pubkey[65];
|
||||||
|
|
||||||
|
// Forward declarations
|
||||||
|
static void *relay_management_thread(void *arg);
|
||||||
|
static int load_config_from_db(void);
|
||||||
|
static int parse_relay_urls(const char *json_array);
|
||||||
|
static int subscribe_to_admin_commands(void);
|
||||||
|
static void on_publish_response(const char* relay_url, const char* event_id, int success, const char* message, void* user_data);
|
||||||
|
static void on_admin_command_event(cJSON* event, const char* relay_url, void* user_data);
|
||||||
|
static void on_admin_subscription_eose(cJSON** events, int event_count, void* user_data);
|
||||||
|
|
||||||
|
// Initialize relay client system
|
||||||
|
int relay_client_init(const char *db_path) {
|
||||||
|
if (g_relay_state.initialized) {
|
||||||
|
app_log(LOG_WARN, "Relay client already initialized");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Initializing relay client system...");
|
||||||
|
|
||||||
|
// Store database path
|
||||||
|
strncpy(g_relay_state.db_path, db_path, sizeof(g_relay_state.db_path) - 1);
|
||||||
|
|
||||||
|
// Initialize mutex
|
||||||
|
if (pthread_mutex_init(&g_relay_state.state_mutex, NULL) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to initialize relay state mutex");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration from database
|
||||||
|
if (load_config_from_db() != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to load relay configuration from database");
|
||||||
|
pthread_mutex_destroy(&g_relay_state.state_mutex);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create relay pool if enabled
|
||||||
|
if (g_relay_state.enabled) {
|
||||||
|
// Use default reconnection config (don't free - it's a static structure)
|
||||||
|
nostr_pool_reconnect_config_t* config = nostr_pool_reconnect_config_default();
|
||||||
|
g_relay_state.pool = nostr_relay_pool_create(config);
|
||||||
|
if (!g_relay_state.pool) {
|
||||||
|
app_log(LOG_ERROR, "Failed to create relay pool");
|
||||||
|
pthread_mutex_destroy(&g_relay_state.state_mutex);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add all relays to pool
|
||||||
|
for (int i = 0; i < g_relay_state.relay_count; i++) {
|
||||||
|
if (nostr_relay_pool_add_relay(g_relay_state.pool, g_relay_state.relay_urls[i]) != NOSTR_SUCCESS) {
|
||||||
|
app_log(LOG_WARN, "Failed to add relay to pool: %s", g_relay_state.relay_urls[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trigger initial connection attempts by creating a dummy subscription
|
||||||
|
// This forces ensure_relay_connection() to be called for each relay
|
||||||
|
app_log(LOG_INFO, "Initiating relay connections...");
|
||||||
|
cJSON* dummy_filter = cJSON_CreateObject();
|
||||||
|
cJSON* kinds = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(kinds, cJSON_CreateNumber(0)); // Kind 0 (will match nothing)
|
||||||
|
cJSON_AddItemToObject(dummy_filter, "kinds", kinds);
|
||||||
|
cJSON_AddNumberToObject(dummy_filter, "limit", 0); // Limit 0 = no results
|
||||||
|
|
||||||
|
nostr_pool_subscription_t* dummy_sub = nostr_relay_pool_subscribe(
|
||||||
|
g_relay_state.pool,
|
||||||
|
(const char**)g_relay_state.relay_urls,
|
||||||
|
g_relay_state.relay_count,
|
||||||
|
dummy_filter,
|
||||||
|
NULL, // No event callback
|
||||||
|
NULL, // No EOSE callback
|
||||||
|
NULL, // No user data
|
||||||
|
1, // close_on_eose
|
||||||
|
1, // enable_deduplication
|
||||||
|
NOSTR_POOL_EOSE_FIRST, // result_mode
|
||||||
|
30, // relay_timeout_seconds
|
||||||
|
30 // eose_timeout_seconds
|
||||||
|
);
|
||||||
|
|
||||||
|
cJSON_Delete(dummy_filter);
|
||||||
|
|
||||||
|
// Immediately close the dummy subscription
|
||||||
|
if (dummy_sub) {
|
||||||
|
nostr_pool_subscription_close(dummy_sub);
|
||||||
|
app_log(LOG_INFO, "Connection attempts initiated for %d relays", g_relay_state.relay_count);
|
||||||
|
} else {
|
||||||
|
app_log(LOG_WARN, "Failed to initiate connection attempts");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
g_relay_state.initialized = 1;
|
||||||
|
app_log(LOG_INFO, "Relay client initialized (enabled: %d, relays: %d)",
|
||||||
|
g_relay_state.enabled, g_relay_state.relay_count);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration from database
|
||||||
|
static int load_config_from_db(void) {
|
||||||
|
sqlite3 *db;
|
||||||
|
sqlite3_stmt *stmt;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = sqlite3_open_v2(g_relay_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
app_log(LOG_ERROR, "Cannot open database: %s", sqlite3_errmsg(db));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load enable_relay_connect
|
||||||
|
const char *sql = "SELECT value FROM config WHERE key = ?";
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
app_log(LOG_ERROR, "Failed to prepare statement: %s", sqlite3_errmsg(db));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_bind_text(stmt, 1, "enable_relay_connect", -1, SQLITE_STATIC);
|
||||||
|
rc = sqlite3_step(stmt);
|
||||||
|
if (rc == SQLITE_ROW) {
|
||||||
|
const char *value = (const char *)sqlite3_column_text(stmt, 0);
|
||||||
|
g_relay_state.enabled = (strcmp(value, "true") == 0 || strcmp(value, "1") == 0);
|
||||||
|
} else {
|
||||||
|
g_relay_state.enabled = 0;
|
||||||
|
}
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
// If not enabled, skip loading relay URLs
|
||||||
|
if (!g_relay_state.enabled) {
|
||||||
|
sqlite3_close(db);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load kind_10002_tags (relay URLs)
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
app_log(LOG_ERROR, "Failed to prepare statement: %s", sqlite3_errmsg(db));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
sqlite3_bind_text(stmt, 1, "kind_10002_tags", -1, SQLITE_STATIC);
|
||||||
|
rc = sqlite3_step(stmt);
|
||||||
|
if (rc == SQLITE_ROW) {
|
||||||
|
const char *json_array = (const char *)sqlite3_column_text(stmt, 0);
|
||||||
|
if (parse_relay_urls(json_array) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to parse relay URLs from config");
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
app_log(LOG_WARN, "No relay URLs configured in kind_10002_tags");
|
||||||
|
}
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
|
||||||
|
sqlite3_close(db);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse relay URLs from JSON array
|
||||||
|
static int parse_relay_urls(const char *json_array) {
|
||||||
|
cJSON *root = cJSON_Parse(json_array);
|
||||||
|
if (!root || !cJSON_IsArray(root)) {
|
||||||
|
app_log(LOG_ERROR, "Invalid JSON array for relay URLs");
|
||||||
|
if (root) cJSON_Delete(root);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int count = cJSON_GetArraySize(root);
|
||||||
|
if (count > MAX_RELAYS) {
|
||||||
|
app_log(LOG_WARN, "Too many relays configured (%d), limiting to %d", count, MAX_RELAYS);
|
||||||
|
count = MAX_RELAYS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate relay URLs array
|
||||||
|
g_relay_state.relay_urls = malloc(count * sizeof(char*));
|
||||||
|
if (!g_relay_state.relay_urls) {
|
||||||
|
cJSON_Delete(root);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
g_relay_state.relay_count = 0;
|
||||||
|
for (int i = 0; i < count; i++) {
|
||||||
|
cJSON *item = cJSON_GetArrayItem(root, i);
|
||||||
|
if (cJSON_IsString(item) && item->valuestring) {
|
||||||
|
g_relay_state.relay_urls[g_relay_state.relay_count] = strdup(item->valuestring);
|
||||||
|
if (!g_relay_state.relay_urls[g_relay_state.relay_count]) {
|
||||||
|
// Cleanup on failure
|
||||||
|
for (int j = 0; j < g_relay_state.relay_count; j++) {
|
||||||
|
free(g_relay_state.relay_urls[j]);
|
||||||
|
}
|
||||||
|
free(g_relay_state.relay_urls);
|
||||||
|
cJSON_Delete(root);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
g_relay_state.relay_count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON_Delete(root);
|
||||||
|
app_log(LOG_INFO, "Parsed %d relay URLs from configuration", g_relay_state.relay_count);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start relay connections
|
||||||
|
int relay_client_start(void) {
|
||||||
|
if (!g_relay_state.initialized) {
|
||||||
|
app_log(LOG_ERROR, "Relay client not initialized");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!g_relay_state.enabled) {
|
||||||
|
app_log(LOG_INFO, "Relay client disabled in configuration");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g_relay_state.running) {
|
||||||
|
app_log(LOG_WARN, "Relay client already running");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Starting relay client...");
|
||||||
|
|
||||||
|
// Start management thread
|
||||||
|
g_relay_state.running = 1;
|
||||||
|
if (pthread_create(&g_relay_state.management_thread, NULL, relay_management_thread, NULL) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to create relay management thread");
|
||||||
|
g_relay_state.running = 0;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Relay client started successfully");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Relay management thread
|
||||||
|
static void *relay_management_thread(void *arg) {
|
||||||
|
(void)arg;
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Relay management thread started");
|
||||||
|
|
||||||
|
// Wait for at least one relay to connect (max 30 seconds)
|
||||||
|
int connected = 0;
|
||||||
|
for (int i = 0; i < 30 && !connected; i++) {
|
||||||
|
sleep(1);
|
||||||
|
|
||||||
|
// Poll to process connection attempts
|
||||||
|
nostr_relay_pool_poll(g_relay_state.pool, 100);
|
||||||
|
|
||||||
|
// Check if any relay is connected
|
||||||
|
for (int j = 0; j < g_relay_state.relay_count; j++) {
|
||||||
|
nostr_pool_relay_status_t status = nostr_relay_pool_get_relay_status(
|
||||||
|
g_relay_state.pool,
|
||||||
|
g_relay_state.relay_urls[j]
|
||||||
|
);
|
||||||
|
if (status == NOSTR_POOL_RELAY_CONNECTED) {
|
||||||
|
connected = 1;
|
||||||
|
app_log(LOG_INFO, "Relay connected: %s", g_relay_state.relay_urls[j]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!connected) {
|
||||||
|
app_log(LOG_WARN, "No relays connected after 30 seconds, continuing anyway");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish initial events
|
||||||
|
relay_client_publish_kind0();
|
||||||
|
relay_client_publish_kind10002();
|
||||||
|
|
||||||
|
// Subscribe to admin commands
|
||||||
|
subscribe_to_admin_commands();
|
||||||
|
|
||||||
|
// Main loop: poll the relay pool for incoming messages
|
||||||
|
while (g_relay_state.running) {
|
||||||
|
// Poll with 1000ms timeout
|
||||||
|
int events_processed = nostr_relay_pool_poll(g_relay_state.pool, 1000);
|
||||||
|
|
||||||
|
if (events_processed < 0) {
|
||||||
|
app_log(LOG_ERROR, "Error polling relay pool");
|
||||||
|
sleep(1);
|
||||||
|
}
|
||||||
|
// Pool handles all connection management, reconnection, and message processing
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Relay management thread stopping");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop relay connections
|
||||||
|
void relay_client_stop(void) {
|
||||||
|
if (!g_relay_state.running) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Stopping relay client...");
|
||||||
|
|
||||||
|
g_relay_state.running = 0;
|
||||||
|
|
||||||
|
// Wait for management thread to finish
|
||||||
|
pthread_join(g_relay_state.management_thread, NULL);
|
||||||
|
|
||||||
|
// Close admin subscription
|
||||||
|
if (g_relay_state.admin_subscription) {
|
||||||
|
nostr_pool_subscription_close(g_relay_state.admin_subscription);
|
||||||
|
g_relay_state.admin_subscription = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy relay pool (automatically disconnects all relays)
|
||||||
|
if (g_relay_state.pool) {
|
||||||
|
nostr_relay_pool_destroy(g_relay_state.pool);
|
||||||
|
g_relay_state.pool = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free relay URLs
|
||||||
|
if (g_relay_state.relay_urls) {
|
||||||
|
for (int i = 0; i < g_relay_state.relay_count; i++) {
|
||||||
|
free(g_relay_state.relay_urls[i]);
|
||||||
|
}
|
||||||
|
free(g_relay_state.relay_urls);
|
||||||
|
g_relay_state.relay_urls = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
pthread_mutex_destroy(&g_relay_state.state_mutex);
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Relay client stopped");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if relay client is enabled
|
||||||
|
int relay_client_is_enabled(void) {
|
||||||
|
return g_relay_state.enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish Kind 0 profile event
|
||||||
|
int relay_client_publish_kind0(void) {
|
||||||
|
if (!g_relay_state.enabled || !g_relay_state.running || !g_relay_state.pool) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Publishing Kind 0 profile event...");
|
||||||
|
|
||||||
|
// Load kind_0_content from database
|
||||||
|
sqlite3 *db;
|
||||||
|
sqlite3_stmt *stmt;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = sqlite3_open_v2(g_relay_state.db_path, &db, SQLITE_OPEN_READONLY, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
app_log(LOG_ERROR, "Cannot open database: %s", sqlite3_errmsg(db));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *sql = "SELECT value FROM config WHERE key = 'kind_0_content'";
|
||||||
|
rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL);
|
||||||
|
if (rc != SQLITE_OK) {
|
||||||
|
app_log(LOG_ERROR, "Failed to prepare statement: %s", sqlite3_errmsg(db));
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = sqlite3_step(stmt);
|
||||||
|
if (rc != SQLITE_ROW) {
|
||||||
|
app_log(LOG_WARN, "No kind_0_content found in config");
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *content = (const char *)sqlite3_column_text(stmt, 0);
|
||||||
|
|
||||||
|
// Convert private key from hex to bytes
|
||||||
|
unsigned char privkey_bytes[32];
|
||||||
|
if (nostr_hex_to_bytes(g_blossom_seckey, privkey_bytes, 32) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to convert private key from hex");
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and sign Kind 0 event using nostr_core_lib
|
||||||
|
cJSON* event = nostr_create_and_sign_event(
|
||||||
|
0, // kind
|
||||||
|
content, // content
|
||||||
|
NULL, // tags (empty for Kind 0)
|
||||||
|
privkey_bytes, // private key
|
||||||
|
time(NULL) // created_at
|
||||||
|
);
|
||||||
|
|
||||||
|
sqlite3_finalize(stmt);
|
||||||
|
sqlite3_close(db);
|
||||||
|
|
||||||
|
if (!event) {
|
||||||
|
app_log(LOG_ERROR, "Failed to create Kind 0 event");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish to all relays using async pool API
|
||||||
|
int result = nostr_relay_pool_publish_async(
|
||||||
|
g_relay_state.pool,
|
||||||
|
(const char**)g_relay_state.relay_urls,
|
||||||
|
g_relay_state.relay_count,
|
||||||
|
event,
|
||||||
|
on_publish_response,
|
||||||
|
(void*)"Kind 0" // user_data to identify event type
|
||||||
|
);
|
||||||
|
|
||||||
|
cJSON_Delete(event);
|
||||||
|
|
||||||
|
if (result == 0) {
|
||||||
|
app_log(LOG_INFO, "Kind 0 profile event publish initiated");
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
app_log(LOG_ERROR, "Failed to initiate Kind 0 profile event publish");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish Kind 10002 relay list event
|
||||||
|
int relay_client_publish_kind10002(void) {
|
||||||
|
if (!g_relay_state.enabled || !g_relay_state.running || !g_relay_state.pool) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Publishing Kind 10002 relay list event...");
|
||||||
|
|
||||||
|
// Build tags array from configured relays
|
||||||
|
cJSON* tags = cJSON_CreateArray();
|
||||||
|
for (int i = 0; i < g_relay_state.relay_count; i++) {
|
||||||
|
cJSON* tag = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(tag, cJSON_CreateString("r"));
|
||||||
|
cJSON_AddItemToArray(tag, cJSON_CreateString(g_relay_state.relay_urls[i]));
|
||||||
|
cJSON_AddItemToArray(tags, tag);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert private key from hex to bytes
|
||||||
|
unsigned char privkey_bytes[32];
|
||||||
|
if (nostr_hex_to_bytes(g_blossom_seckey, privkey_bytes, 32) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to convert private key from hex");
|
||||||
|
cJSON_Delete(tags);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and sign Kind 10002 event
|
||||||
|
cJSON* event = nostr_create_and_sign_event(
|
||||||
|
10002, // kind
|
||||||
|
"", // content (empty for Kind 10002)
|
||||||
|
tags, // tags
|
||||||
|
privkey_bytes, // private key
|
||||||
|
time(NULL) // created_at
|
||||||
|
);
|
||||||
|
|
||||||
|
cJSON_Delete(tags);
|
||||||
|
|
||||||
|
if (!event) {
|
||||||
|
app_log(LOG_ERROR, "Failed to create Kind 10002 event");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish to all relays using async pool API
|
||||||
|
int result = nostr_relay_pool_publish_async(
|
||||||
|
g_relay_state.pool,
|
||||||
|
(const char**)g_relay_state.relay_urls,
|
||||||
|
g_relay_state.relay_count,
|
||||||
|
event,
|
||||||
|
on_publish_response,
|
||||||
|
(void*)"Kind 10002" // user_data to identify event type
|
||||||
|
);
|
||||||
|
|
||||||
|
cJSON_Delete(event);
|
||||||
|
|
||||||
|
if (result == 0) {
|
||||||
|
app_log(LOG_INFO, "Kind 10002 relay list event publish initiated");
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
app_log(LOG_ERROR, "Failed to initiate Kind 10002 relay list event publish");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send Kind 23459 admin response event
|
||||||
|
int relay_client_send_admin_response(const char *recipient_pubkey, const char *response_content) {
|
||||||
|
if (!g_relay_state.enabled || !g_relay_state.running || !g_relay_state.pool) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!recipient_pubkey || !response_content) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Sending Kind 23459 admin response to %s", recipient_pubkey);
|
||||||
|
|
||||||
|
// TODO: Encrypt response_content using NIP-44
|
||||||
|
// For now, use plaintext (stub implementation)
|
||||||
|
const char *encrypted_content = response_content;
|
||||||
|
|
||||||
|
// Build tags array
|
||||||
|
cJSON* tags = cJSON_CreateArray();
|
||||||
|
cJSON* p_tag = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(p_tag, cJSON_CreateString("p"));
|
||||||
|
cJSON_AddItemToArray(p_tag, cJSON_CreateString(recipient_pubkey));
|
||||||
|
cJSON_AddItemToArray(tags, p_tag);
|
||||||
|
|
||||||
|
// Convert private key from hex to bytes
|
||||||
|
unsigned char privkey_bytes[32];
|
||||||
|
if (nostr_hex_to_bytes(g_blossom_seckey, privkey_bytes, 32) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to convert private key from hex");
|
||||||
|
cJSON_Delete(tags);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and sign Kind 23459 event
|
||||||
|
cJSON* event = nostr_create_and_sign_event(
|
||||||
|
23459, // kind
|
||||||
|
encrypted_content, // content
|
||||||
|
tags, // tags
|
||||||
|
privkey_bytes, // private key
|
||||||
|
time(NULL) // created_at
|
||||||
|
);
|
||||||
|
|
||||||
|
cJSON_Delete(tags);
|
||||||
|
|
||||||
|
if (!event) {
|
||||||
|
app_log(LOG_ERROR, "Failed to create Kind 23459 event");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish to all relays using async pool API
|
||||||
|
int result = nostr_relay_pool_publish_async(
|
||||||
|
g_relay_state.pool,
|
||||||
|
(const char**)g_relay_state.relay_urls,
|
||||||
|
g_relay_state.relay_count,
|
||||||
|
event,
|
||||||
|
on_publish_response,
|
||||||
|
(void*)"Kind 23459" // user_data to identify event type
|
||||||
|
);
|
||||||
|
|
||||||
|
cJSON_Delete(event);
|
||||||
|
|
||||||
|
if (result == 0) {
|
||||||
|
app_log(LOG_INFO, "Kind 23459 admin response publish initiated");
|
||||||
|
return 0;
|
||||||
|
} else {
|
||||||
|
app_log(LOG_ERROR, "Failed to initiate Kind 23459 admin response publish");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Callback for publish responses
|
||||||
|
static void on_publish_response(const char* relay_url, const char* event_id, int success, const char* message, void* user_data) {
|
||||||
|
const char* event_type = (const char*)user_data;
|
||||||
|
|
||||||
|
if (success) {
|
||||||
|
app_log(LOG_INFO, "%s event published successfully to %s (ID: %s)",
|
||||||
|
event_type, relay_url, event_id);
|
||||||
|
} else {
|
||||||
|
app_log(LOG_WARN, "%s event rejected by %s: %s",
|
||||||
|
event_type, relay_url, message ? message : "unknown error");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Callback for received Kind 23458 admin command events
|
||||||
|
static void on_admin_command_event(cJSON* event, const char* relay_url, void* user_data) {
|
||||||
|
(void)user_data;
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Received Kind 23458 admin command from relay: %s", relay_url);
|
||||||
|
|
||||||
|
// Extract event fields
|
||||||
|
cJSON* kind_json = cJSON_GetObjectItem(event, "kind");
|
||||||
|
cJSON* pubkey_json = cJSON_GetObjectItem(event, "pubkey");
|
||||||
|
cJSON* content_json = cJSON_GetObjectItem(event, "content");
|
||||||
|
cJSON* id_json = cJSON_GetObjectItem(event, "id");
|
||||||
|
|
||||||
|
if (!kind_json || !pubkey_json || !content_json || !id_json) {
|
||||||
|
app_log(LOG_ERROR, "Invalid event structure");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int kind = cJSON_GetNumberValue(kind_json);
|
||||||
|
const char* sender_pubkey = cJSON_GetStringValue(pubkey_json);
|
||||||
|
const char* encrypted_content = cJSON_GetStringValue(content_json);
|
||||||
|
const char* event_id = cJSON_GetStringValue(id_json);
|
||||||
|
|
||||||
|
if (kind != 23458) {
|
||||||
|
app_log(LOG_WARN, "Unexpected event kind: %d", kind);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify sender is admin
|
||||||
|
if (strcmp(sender_pubkey, g_admin_pubkey) != 0) {
|
||||||
|
app_log(LOG_WARN, "Ignoring command from non-admin pubkey: %s", sender_pubkey);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Processing admin command (event ID: %s)", event_id);
|
||||||
|
|
||||||
|
// Convert keys from hex to bytes
|
||||||
|
unsigned char server_privkey[32];
|
||||||
|
unsigned char admin_pubkey_bytes[32];
|
||||||
|
|
||||||
|
if (nostr_hex_to_bytes(g_blossom_seckey, server_privkey, 32) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to convert server private key from hex");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nostr_hex_to_bytes(sender_pubkey, admin_pubkey_bytes, 32) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to convert admin public key from hex");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt command content using NIP-44
|
||||||
|
char decrypted_command[4096];
|
||||||
|
if (admin_decrypt_command(server_privkey, admin_pubkey_bytes, encrypted_content,
|
||||||
|
decrypted_command, sizeof(decrypted_command)) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to decrypt admin command");
|
||||||
|
|
||||||
|
// Send error response
|
||||||
|
cJSON* error_response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(error_response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(error_response, "message", "Failed to decrypt command");
|
||||||
|
char* error_json = cJSON_PrintUnformatted(error_response);
|
||||||
|
cJSON_Delete(error_response);
|
||||||
|
|
||||||
|
char encrypted_response[4096];
|
||||||
|
if (admin_encrypt_response(server_privkey, admin_pubkey_bytes, error_json,
|
||||||
|
encrypted_response, sizeof(encrypted_response)) == 0) {
|
||||||
|
relay_client_send_admin_response(sender_pubkey, encrypted_response);
|
||||||
|
}
|
||||||
|
free(error_json);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_DEBUG, "Decrypted command: %s", decrypted_command);
|
||||||
|
|
||||||
|
// Parse command JSON
|
||||||
|
cJSON* command_json = cJSON_Parse(decrypted_command);
|
||||||
|
if (!command_json) {
|
||||||
|
app_log(LOG_ERROR, "Failed to parse command JSON");
|
||||||
|
|
||||||
|
cJSON* error_response = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(error_response, "status", "error");
|
||||||
|
cJSON_AddStringToObject(error_response, "message", "Invalid JSON format");
|
||||||
|
char* error_json = cJSON_PrintUnformatted(error_response);
|
||||||
|
cJSON_Delete(error_response);
|
||||||
|
|
||||||
|
char encrypted_response[4096];
|
||||||
|
if (admin_encrypt_response(server_privkey, admin_pubkey_bytes, error_json,
|
||||||
|
encrypted_response, sizeof(encrypted_response)) == 0) {
|
||||||
|
relay_client_send_admin_response(sender_pubkey, encrypted_response);
|
||||||
|
}
|
||||||
|
free(error_json);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process command and get response
|
||||||
|
cJSON* response_json = admin_commands_process(command_json, event_id);
|
||||||
|
cJSON_Delete(command_json);
|
||||||
|
|
||||||
|
if (!response_json) {
|
||||||
|
app_log(LOG_ERROR, "Failed to process admin command");
|
||||||
|
response_json = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(response_json, "status", "error");
|
||||||
|
cJSON_AddStringToObject(response_json, "message", "Failed to process command");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert response to JSON string
|
||||||
|
char* response_str = cJSON_PrintUnformatted(response_json);
|
||||||
|
cJSON_Delete(response_json);
|
||||||
|
|
||||||
|
if (!response_str) {
|
||||||
|
app_log(LOG_ERROR, "Failed to serialize response JSON");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt and send response
|
||||||
|
char encrypted_response[4096];
|
||||||
|
if (admin_encrypt_response(server_privkey, admin_pubkey_bytes, response_str,
|
||||||
|
encrypted_response, sizeof(encrypted_response)) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to encrypt admin response");
|
||||||
|
free(response_str);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
free(response_str);
|
||||||
|
|
||||||
|
if (relay_client_send_admin_response(sender_pubkey, encrypted_response) != 0) {
|
||||||
|
app_log(LOG_ERROR, "Failed to send admin response");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Callback for EOSE (End Of Stored Events) - new signature
|
||||||
|
static void on_admin_subscription_eose(cJSON** events, int event_count, void* user_data) {
|
||||||
|
(void)events;
|
||||||
|
(void)event_count;
|
||||||
|
(void)user_data;
|
||||||
|
app_log(LOG_INFO, "Received EOSE for admin command subscription");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe to admin commands (Kind 23458)
|
||||||
|
static int subscribe_to_admin_commands(void) {
|
||||||
|
if (!g_relay_state.pool) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Subscribing to Kind 23458 admin commands...");
|
||||||
|
|
||||||
|
// Create subscription filter for Kind 23458 events addressed to us
|
||||||
|
cJSON* filter = cJSON_CreateObject();
|
||||||
|
cJSON* kinds = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(kinds, cJSON_CreateNumber(23458));
|
||||||
|
cJSON_AddItemToObject(filter, "kinds", kinds);
|
||||||
|
|
||||||
|
cJSON* p_tags = cJSON_CreateArray();
|
||||||
|
cJSON_AddItemToArray(p_tags, cJSON_CreateString(g_blossom_pubkey));
|
||||||
|
cJSON_AddItemToObject(filter, "#p", p_tags);
|
||||||
|
|
||||||
|
cJSON_AddNumberToObject(filter, "since", (double)time(NULL));
|
||||||
|
|
||||||
|
// Subscribe using pool with new API signature
|
||||||
|
g_relay_state.admin_subscription = nostr_relay_pool_subscribe(
|
||||||
|
g_relay_state.pool,
|
||||||
|
(const char**)g_relay_state.relay_urls,
|
||||||
|
g_relay_state.relay_count,
|
||||||
|
filter,
|
||||||
|
on_admin_command_event,
|
||||||
|
on_admin_subscription_eose,
|
||||||
|
NULL, // user_data
|
||||||
|
0, // close_on_eose (keep subscription open)
|
||||||
|
1, // enable_deduplication
|
||||||
|
NOSTR_POOL_EOSE_FULL_SET, // result_mode
|
||||||
|
30, // relay_timeout_seconds
|
||||||
|
30 // eose_timeout_seconds
|
||||||
|
);
|
||||||
|
|
||||||
|
cJSON_Delete(filter);
|
||||||
|
|
||||||
|
if (!g_relay_state.admin_subscription) {
|
||||||
|
app_log(LOG_ERROR, "Failed to create admin command subscription");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Successfully subscribed to admin commands");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current relay connection status
|
||||||
|
char *relay_client_get_status(void) {
|
||||||
|
if (!g_relay_state.pool) {
|
||||||
|
return strdup("[]");
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON *root = cJSON_CreateArray();
|
||||||
|
|
||||||
|
pthread_mutex_lock(&g_relay_state.state_mutex);
|
||||||
|
for (int i = 0; i < g_relay_state.relay_count; i++) {
|
||||||
|
cJSON *relay_obj = cJSON_CreateObject();
|
||||||
|
cJSON_AddStringToObject(relay_obj, "url", g_relay_state.relay_urls[i]);
|
||||||
|
|
||||||
|
// Get status from pool
|
||||||
|
nostr_pool_relay_status_t status = nostr_relay_pool_get_relay_status(
|
||||||
|
g_relay_state.pool,
|
||||||
|
g_relay_state.relay_urls[i]
|
||||||
|
);
|
||||||
|
|
||||||
|
const char *state_str;
|
||||||
|
switch (status) {
|
||||||
|
case NOSTR_POOL_RELAY_CONNECTED: state_str = "connected"; break;
|
||||||
|
case NOSTR_POOL_RELAY_CONNECTING: state_str = "connecting"; break;
|
||||||
|
case NOSTR_POOL_RELAY_ERROR: state_str = "error"; break;
|
||||||
|
default: state_str = "disconnected"; break;
|
||||||
|
}
|
||||||
|
cJSON_AddStringToObject(relay_obj, "state", state_str);
|
||||||
|
|
||||||
|
// Get statistics from pool
|
||||||
|
const nostr_relay_stats_t* stats = nostr_relay_pool_get_relay_stats(
|
||||||
|
g_relay_state.pool,
|
||||||
|
g_relay_state.relay_urls[i]
|
||||||
|
);
|
||||||
|
|
||||||
|
if (stats) {
|
||||||
|
cJSON_AddNumberToObject(relay_obj, "events_received", stats->events_received);
|
||||||
|
cJSON_AddNumberToObject(relay_obj, "events_published", stats->events_published);
|
||||||
|
cJSON_AddNumberToObject(relay_obj, "connection_attempts", stats->connection_attempts);
|
||||||
|
cJSON_AddNumberToObject(relay_obj, "connection_failures", stats->connection_failures);
|
||||||
|
|
||||||
|
if (stats->query_latency_avg > 0) {
|
||||||
|
cJSON_AddNumberToObject(relay_obj, "query_latency_ms", stats->query_latency_avg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cJSON_AddItemToArray(root, relay_obj);
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock(&g_relay_state.state_mutex);
|
||||||
|
|
||||||
|
char *json_str = cJSON_PrintUnformatted(root);
|
||||||
|
cJSON_Delete(root);
|
||||||
|
|
||||||
|
return json_str;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force reconnection to all relays
|
||||||
|
int relay_client_reconnect(void) {
|
||||||
|
if (!g_relay_state.enabled || !g_relay_state.running || !g_relay_state.pool) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Forcing reconnection to all relays...");
|
||||||
|
|
||||||
|
// Remove and re-add all relays to force reconnection
|
||||||
|
pthread_mutex_lock(&g_relay_state.state_mutex);
|
||||||
|
for (int i = 0; i < g_relay_state.relay_count; i++) {
|
||||||
|
nostr_relay_pool_remove_relay(g_relay_state.pool, g_relay_state.relay_urls[i]);
|
||||||
|
nostr_relay_pool_add_relay(g_relay_state.pool, g_relay_state.relay_urls[i]);
|
||||||
|
}
|
||||||
|
pthread_mutex_unlock(&g_relay_state.state_mutex);
|
||||||
|
|
||||||
|
app_log(LOG_INFO, "Reconnection initiated for all relays");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
78
src/relay_client.h
Normal file
78
src/relay_client.h
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
/*
|
||||||
|
* Ginxsom Relay Client - Nostr Relay Connection Manager
|
||||||
|
*
|
||||||
|
* This module enables Ginxsom to act as a Nostr client, connecting to relays
|
||||||
|
* to publish events (Kind 0, Kind 10002) and subscribe to admin commands (Kind 23456).
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef RELAY_CLIENT_H
|
||||||
|
#define RELAY_CLIENT_H
|
||||||
|
|
||||||
|
#include <stddef.h>
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
// Connection states for relay tracking
|
||||||
|
typedef enum {
|
||||||
|
RELAY_STATE_DISCONNECTED = 0,
|
||||||
|
RELAY_STATE_CONNECTING = 1,
|
||||||
|
RELAY_STATE_CONNECTED = 2,
|
||||||
|
RELAY_STATE_ERROR = 3
|
||||||
|
} relay_state_t;
|
||||||
|
|
||||||
|
// Relay connection info (in-memory only)
|
||||||
|
typedef struct {
|
||||||
|
char url[256];
|
||||||
|
relay_state_t state;
|
||||||
|
int reconnect_attempts;
|
||||||
|
time_t last_connect_attempt;
|
||||||
|
time_t connected_since;
|
||||||
|
} relay_info_t;
|
||||||
|
|
||||||
|
// Initialize relay client system
|
||||||
|
// Loads configuration from database and prepares for connections
|
||||||
|
// Returns: 0 on success, -1 on error
|
||||||
|
int relay_client_init(const char *db_path);
|
||||||
|
|
||||||
|
// Start relay connections
|
||||||
|
// Connects to all relays specified in kind_10002_tags config
|
||||||
|
// Publishes Kind 0 and Kind 10002 events after successful connection
|
||||||
|
// Returns: 0 on success, -1 on error
|
||||||
|
int relay_client_start(void);
|
||||||
|
|
||||||
|
// Stop relay connections and cleanup
|
||||||
|
// Gracefully disconnects from all relays and stops background thread
|
||||||
|
void relay_client_stop(void);
|
||||||
|
|
||||||
|
// Check if relay client is enabled
|
||||||
|
// Returns: 1 if enabled, 0 if disabled
|
||||||
|
int relay_client_is_enabled(void);
|
||||||
|
|
||||||
|
// Publish Kind 0 profile event to all connected relays
|
||||||
|
// Uses kind_0_content from config database
|
||||||
|
// Returns: 0 on success, -1 on error
|
||||||
|
int relay_client_publish_kind0(void);
|
||||||
|
|
||||||
|
// Publish Kind 10002 relay list event to all connected relays
|
||||||
|
// Uses kind_10002_tags from config database
|
||||||
|
// Returns: 0 on success, -1 on error
|
||||||
|
int relay_client_publish_kind10002(void);
|
||||||
|
|
||||||
|
// Send Kind 23457 admin response event
|
||||||
|
// Encrypts content using NIP-44 and publishes to all connected relays
|
||||||
|
// Parameters:
|
||||||
|
// - recipient_pubkey: Admin's public key (recipient)
|
||||||
|
// - response_content: JSON response content to encrypt
|
||||||
|
// Returns: 0 on success, -1 on error
|
||||||
|
int relay_client_send_admin_response(const char *recipient_pubkey, const char *response_content);
|
||||||
|
|
||||||
|
// Get current relay connection status
|
||||||
|
// Returns JSON string with relay status (caller must free)
|
||||||
|
// Format: [{"url": "wss://...", "state": "connected", "connected_since": 1234567890}, ...]
|
||||||
|
char *relay_client_get_status(void);
|
||||||
|
|
||||||
|
// Force reconnection to all relays
|
||||||
|
// Disconnects and reconnects to all configured relays
|
||||||
|
// Returns: 0 on success, -1 on error
|
||||||
|
int relay_client_reconnect(void);
|
||||||
|
|
||||||
|
#endif // RELAY_CLIENT_H
|
||||||
@@ -529,7 +529,7 @@ int nostr_validate_unified_request(const nostr_unified_request_t *request,
|
|||||||
"VALIDATOR_DEBUG: STEP 10 FAILED - NIP-42 requires request_url and "
|
"VALIDATOR_DEBUG: STEP 10 FAILED - NIP-42 requires request_url and "
|
||||||
"challenge (from event tags)\n");
|
"challenge (from event tags)\n");
|
||||||
result->valid = 0;
|
result->valid = 0;
|
||||||
result->error_code = NOSTR_ERROR_NIP42_NOT_CONFIGURED;
|
result->error_code = NOSTR_ERROR_NIP42_INVALID_CHALLENGE;
|
||||||
strcpy(result->reason, "NIP-42 authentication requires request_url and challenge in event tags");
|
strcpy(result->reason, "NIP-42 authentication requires request_url and challenge in event tags");
|
||||||
cJSON_Delete(event);
|
cJSON_Delete(event);
|
||||||
return NOSTR_SUCCESS;
|
return NOSTR_SUCCESS;
|
||||||
@@ -549,15 +549,12 @@ int nostr_validate_unified_request(const nostr_unified_request_t *request,
|
|||||||
|
|
||||||
// Map specific NIP-42 error codes to detailed error messages
|
// Map specific NIP-42 error codes to detailed error messages
|
||||||
switch (nip42_result) {
|
switch (nip42_result) {
|
||||||
case NOSTR_ERROR_NIP42_CHALLENGE_NOT_FOUND:
|
case NOSTR_ERROR_NIP42_INVALID_CHALLENGE:
|
||||||
strcpy(result->reason, "Challenge not found or has been used. Request a new challenge from /auth endpoint.");
|
strcpy(result->reason, "Challenge not found or invalid. Request a new challenge from /auth endpoint.");
|
||||||
break;
|
break;
|
||||||
case NOSTR_ERROR_NIP42_CHALLENGE_EXPIRED:
|
case NOSTR_ERROR_NIP42_CHALLENGE_EXPIRED:
|
||||||
strcpy(result->reason, "Challenge has expired. Request a new challenge from /auth endpoint.");
|
strcpy(result->reason, "Challenge has expired. Request a new challenge from /auth endpoint.");
|
||||||
break;
|
break;
|
||||||
case NOSTR_ERROR_NIP42_INVALID_CHALLENGE:
|
|
||||||
strcpy(result->reason, "Invalid challenge format. Challenge must be a valid hex string.");
|
|
||||||
break;
|
|
||||||
case NOSTR_ERROR_NIP42_URL_MISMATCH:
|
case NOSTR_ERROR_NIP42_URL_MISMATCH:
|
||||||
strcpy(result->reason, "Relay URL in auth event does not match server. Use 'ginxsom' as relay value.");
|
strcpy(result->reason, "Relay URL in auth event does not match server. Use 'ginxsom' as relay value.");
|
||||||
break;
|
break;
|
||||||
@@ -576,12 +573,6 @@ int nostr_validate_unified_request(const nostr_unified_request_t *request,
|
|||||||
case NOSTR_ERROR_EVENT_INVALID_TAGS:
|
case NOSTR_ERROR_EVENT_INVALID_TAGS:
|
||||||
strcpy(result->reason, "Required tags missing. Auth event must include 'relay' and 'expiration' tags.");
|
strcpy(result->reason, "Required tags missing. Auth event must include 'relay' and 'expiration' tags.");
|
||||||
break;
|
break;
|
||||||
case NOSTR_ERROR_NIP42_INVALID_RELAY_URL:
|
|
||||||
strcpy(result->reason, "Invalid relay URL in tags. Use 'ginxsom' as the relay identifier.");
|
|
||||||
break;
|
|
||||||
case NOSTR_ERROR_NIP42_NOT_CONFIGURED:
|
|
||||||
strcpy(result->reason, "NIP-42 authentication not properly configured on server.");
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
snprintf(result->reason, sizeof(result->reason),
|
snprintf(result->reason, sizeof(result->reason),
|
||||||
"NIP-42 authentication failed (error code: %d). Check event structure and signature.",
|
"NIP-42 authentication failed (error code: %d). Check event structure and signature.",
|
||||||
@@ -1907,7 +1898,7 @@ static int validate_challenge(const char *challenge_id) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
validator_debug_log("NIP-42: Challenge not found\n");
|
validator_debug_log("NIP-42: Challenge not found\n");
|
||||||
return NOSTR_ERROR_NIP42_CHALLENGE_NOT_FOUND;
|
return NOSTR_ERROR_NIP42_INVALID_CHALLENGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
199
tests/23458_test.sh
Executable file
199
tests/23458_test.sh
Executable file
@@ -0,0 +1,199 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Simple test for Kind 23458 relay-based admin commands
|
||||||
|
# Tests config_query command via Nostr relay subscription
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
TEST_KEYS_FILE=".test_keys"
|
||||||
|
RELAY_URL="wss://relay.laantungir.net"
|
||||||
|
|
||||||
|
# Colors
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
|
||||||
|
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
|
||||||
|
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
|
||||||
|
|
||||||
|
# Load test keys
|
||||||
|
if [[ ! -f "$TEST_KEYS_FILE" ]]; then
|
||||||
|
log_error "$TEST_KEYS_FILE not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
source "$TEST_KEYS_FILE"
|
||||||
|
|
||||||
|
# Check dependencies
|
||||||
|
for cmd in nak jq websocat; do
|
||||||
|
if ! command -v $cmd &> /dev/null; then
|
||||||
|
log_error "$cmd is not installed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "=== Kind 23458 Admin Command Test ==="
|
||||||
|
echo ""
|
||||||
|
log_info "Configuration:"
|
||||||
|
log_info " Admin Privkey: ${ADMIN_PRIVKEY:0:16}..."
|
||||||
|
log_info " Server Pubkey: $SERVER_PUBKEY"
|
||||||
|
log_info " Relay URL: $RELAY_URL"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Test 1: Send config_query command
|
||||||
|
log_info "Test: Sending config_query command"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Encrypt command with NIP-44
|
||||||
|
# Command format: ["config_query"]
|
||||||
|
PLAINTEXT_COMMAND='["config_query"]'
|
||||||
|
|
||||||
|
log_info "Encrypting command with NIP-44..."
|
||||||
|
ENCRYPTED_COMMAND=$(nak encrypt --sec "$ADMIN_PRIVKEY" -p "$SERVER_PUBKEY" "$PLAINTEXT_COMMAND")
|
||||||
|
|
||||||
|
if [[ -z "$ENCRYPTED_COMMAND" ]]; then
|
||||||
|
log_error "Failed to encrypt command"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_success "Command encrypted"
|
||||||
|
log_info "Encrypted content: ${ENCRYPTED_COMMAND:0:50}..."
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
log_info "Creating Kind 23458 event..."
|
||||||
|
EVENT=$(nak event -k 23458 \
|
||||||
|
-c "$ENCRYPTED_COMMAND" \
|
||||||
|
--tag p="$SERVER_PUBKEY" \
|
||||||
|
--sec "$ADMIN_PRIVKEY")
|
||||||
|
|
||||||
|
if [[ -z "$EVENT" ]]; then
|
||||||
|
log_error "Failed to create event"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_success "Event created"
|
||||||
|
echo "$EVENT" | jq .
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Step 1: Create pipes for bidirectional communication
|
||||||
|
log_info "Step 1: Setting up websocat connection..."
|
||||||
|
SINCE=$(date +%s)
|
||||||
|
|
||||||
|
# Create named pipes for input and output
|
||||||
|
INPUT_PIPE=$(mktemp -u)
|
||||||
|
OUTPUT_PIPE=$(mktemp -u)
|
||||||
|
mkfifo "$INPUT_PIPE"
|
||||||
|
mkfifo "$OUTPUT_PIPE"
|
||||||
|
|
||||||
|
# Start websocat in background with bidirectional communication
|
||||||
|
(websocat "$RELAY_URL" < "$INPUT_PIPE" > "$OUTPUT_PIPE" 2>/dev/null) &
|
||||||
|
WEBSOCAT_PID=$!
|
||||||
|
|
||||||
|
# Open pipes for writing and reading
|
||||||
|
exec 3>"$INPUT_PIPE" # File descriptor 3 for writing
|
||||||
|
exec 4<"$OUTPUT_PIPE" # File descriptor 4 for reading
|
||||||
|
|
||||||
|
# Give connection time to establish
|
||||||
|
sleep 1
|
||||||
|
log_success "WebSocket connection established"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Step 2: Subscribe to Kind 23459 responses
|
||||||
|
log_info "Step 2: Subscribing to Kind 23459 responses..."
|
||||||
|
|
||||||
|
# Create subscription filter
|
||||||
|
SUBSCRIPTION_FILTER='["REQ","admin-response",{"kinds":[23459],"authors":["'$SERVER_PUBKEY'"],"#p":["'$ADMIN_PUBKEY'"],"since":'$SINCE'}]'
|
||||||
|
|
||||||
|
# Send subscription
|
||||||
|
echo "$SUBSCRIPTION_FILTER" >&3
|
||||||
|
sleep 1
|
||||||
|
log_success "Subscription sent"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Step 3: Publish the command event
|
||||||
|
log_info "Step 3: Publishing Kind 23458 command event..."
|
||||||
|
|
||||||
|
# Create EVENT message
|
||||||
|
EVENT_MSG='["EVENT",'$EVENT']'
|
||||||
|
|
||||||
|
# Send event
|
||||||
|
echo "$EVENT_MSG" >&3
|
||||||
|
sleep 1
|
||||||
|
log_success "Event published"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Step 4: Wait for response
|
||||||
|
log_info "Step 4: Waiting for Kind 23459 response (timeout: 15s)..."
|
||||||
|
|
||||||
|
RESPONSE_RECEIVED=0
|
||||||
|
TIMEOUT=15
|
||||||
|
START_TIME=$(date +%s)
|
||||||
|
|
||||||
|
while [[ $(($(date +%s) - START_TIME)) -lt $TIMEOUT ]]; do
|
||||||
|
if read -t 1 -r line <&4; then
|
||||||
|
if [[ -n "$line" ]]; then
|
||||||
|
# Parse the relay message
|
||||||
|
MSG_TYPE=$(echo "$line" | jq -r '.[0] // empty' 2>/dev/null)
|
||||||
|
|
||||||
|
if [[ "$MSG_TYPE" == "EVENT" ]]; then
|
||||||
|
# Extract the event (third element in array)
|
||||||
|
EVENT_DATA=$(echo "$line" | jq '.[2]' 2>/dev/null)
|
||||||
|
|
||||||
|
if [[ -n "$EVENT_DATA" ]]; then
|
||||||
|
log_success "Received Kind 23459 response!"
|
||||||
|
echo "$EVENT_DATA" | jq .
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Extract and decrypt content
|
||||||
|
ENCRYPTED_CONTENT=$(echo "$EVENT_DATA" | jq -r '.content // empty')
|
||||||
|
SENDER_PUBKEY=$(echo "$EVENT_DATA" | jq -r '.pubkey // empty')
|
||||||
|
|
||||||
|
if [[ -n "$ENCRYPTED_CONTENT" ]] && [[ -n "$SENDER_PUBKEY" ]]; then
|
||||||
|
log_info "Encrypted response: ${ENCRYPTED_CONTENT:0:50}..."
|
||||||
|
log_info "Sender pubkey: $SENDER_PUBKEY"
|
||||||
|
log_info "Decrypting response..."
|
||||||
|
|
||||||
|
# Try decryption with error output and timeout
|
||||||
|
DECRYPT_OUTPUT=$(timeout 5s nak decrypt --sec "$ADMIN_PRIVKEY" -p "$SENDER_PUBKEY" "$ENCRYPTED_CONTENT" 2>&1)
|
||||||
|
DECRYPT_EXIT=$?
|
||||||
|
|
||||||
|
if [[ $DECRYPT_EXIT -eq 0 ]] && [[ -n "$DECRYPT_OUTPUT" ]]; then
|
||||||
|
log_success "Response decrypted successfully:"
|
||||||
|
echo "$DECRYPT_OUTPUT" | jq . 2>/dev/null || echo "$DECRYPT_OUTPUT"
|
||||||
|
RESPONSE_RECEIVED=1
|
||||||
|
else
|
||||||
|
log_error "Failed to decrypt response (exit code: $DECRYPT_EXIT)"
|
||||||
|
if [[ -n "$DECRYPT_OUTPUT" ]]; then
|
||||||
|
log_error "Decryption error: $DECRYPT_OUTPUT"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
exec 3>&- # Close write pipe
|
||||||
|
exec 4<&- # Close read pipe
|
||||||
|
kill $WEBSOCAT_PID 2>/dev/null
|
||||||
|
rm -f "$INPUT_PIPE" "$OUTPUT_PIPE"
|
||||||
|
|
||||||
|
if [[ $RESPONSE_RECEIVED -eq 0 ]]; then
|
||||||
|
log_error "No response received within timeout period"
|
||||||
|
log_info "This could mean:"
|
||||||
|
log_info " 1. The server didn't receive the command"
|
||||||
|
log_info " 2. The server received but didn't process the command"
|
||||||
|
log_info " 3. The response was sent but not received by subscription"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
log_success "Test complete!"
|
||||||
|
echo ""
|
||||||
|
log_info "This test uses full NIP-44 encryption for both commands and responses."
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Ginxsom Admin Event Test Script
|
# Ginxsom Admin Event Test Script
|
||||||
# Tests Kind 23456/23457 admin command system with NIP-44 encryption
|
# Tests Kind 23458/23459 admin command system with NIP-44 encryption
|
||||||
#
|
#
|
||||||
# Prerequisites:
|
# Prerequisites:
|
||||||
# - nak: https://github.com/fiatjaf/nak
|
# - nak: https://github.com/fiatjaf/nak
|
||||||
@@ -72,12 +72,12 @@ check_dependencies() {
|
|||||||
log_success "All dependencies found"
|
log_success "All dependencies found"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create NIP-44 encrypted admin command event (Kind 23456)
|
# Create NIP-44 encrypted admin command event (Kind 23458)
|
||||||
create_admin_command_event() {
|
create_admin_command_event() {
|
||||||
local command="$1"
|
local command="$1"
|
||||||
local expiration=$(($(date +%s) + 3600)) # 1 hour from now
|
local expiration=$(($(date +%s) + 3600)) # 1 hour from now
|
||||||
|
|
||||||
log_info "Creating Kind 23456 admin command event..."
|
log_info "Creating Kind 23458 admin command event..."
|
||||||
log_info "Command: $command"
|
log_info "Command: $command"
|
||||||
|
|
||||||
# For now, we'll create the event structure manually since nak may not support NIP-44 encryption yet
|
# For now, we'll create the event structure manually since nak may not support NIP-44 encryption yet
|
||||||
@@ -87,9 +87,9 @@ create_admin_command_event() {
|
|||||||
local content="[\"$command\"]"
|
local content="[\"$command\"]"
|
||||||
|
|
||||||
# Create event with nak
|
# Create event with nak
|
||||||
# Kind 23456 = admin command
|
# Kind 23458 = admin command
|
||||||
# Tags: p = server pubkey, expiration
|
# Tags: p = server pubkey, expiration
|
||||||
local event=$(nak event -k 23456 \
|
local event=$(nak event -k 23458 \
|
||||||
-c "$content" \
|
-c "$content" \
|
||||||
--tag p="$SERVER_PUBKEY" \
|
--tag p="$SERVER_PUBKEY" \
|
||||||
--tag expiration="$expiration" \
|
--tag expiration="$expiration" \
|
||||||
@@ -104,7 +104,7 @@ send_admin_command() {
|
|||||||
|
|
||||||
log_info "=== Testing Admin Command: $command ==="
|
log_info "=== Testing Admin Command: $command ==="
|
||||||
|
|
||||||
# Create Kind 23456 event
|
# Create Kind 23458 event
|
||||||
local event=$(create_admin_command_event "$command")
|
local event=$(create_admin_command_event "$command")
|
||||||
|
|
||||||
if [[ -z "$event" ]]; then
|
if [[ -z "$event" ]]; then
|
||||||
@@ -132,10 +132,10 @@ send_admin_command() {
|
|||||||
log_success "HTTP $http_code - Response received"
|
log_success "HTTP $http_code - Response received"
|
||||||
echo "$body" | jq . 2>/dev/null || echo "$body"
|
echo "$body" | jq . 2>/dev/null || echo "$body"
|
||||||
|
|
||||||
# Try to parse as Kind 23457 event
|
# Try to parse as Kind 23459 event
|
||||||
local kind=$(echo "$body" | jq -r '.kind // empty' 2>/dev/null)
|
local kind=$(echo "$body" | jq -r '.kind // empty' 2>/dev/null)
|
||||||
if [[ "$kind" == "23457" ]]; then
|
if [[ "$kind" == "23459" ]]; then
|
||||||
log_success "Received Kind 23457 response event"
|
log_success "Received Kind 23459 response event"
|
||||||
local response_content=$(echo "$body" | jq -r '.content // empty' 2>/dev/null)
|
local response_content=$(echo "$body" | jq -r '.content // empty' 2>/dev/null)
|
||||||
log_info "Response content (encrypted): $response_content"
|
log_info "Response content (encrypted): $response_content"
|
||||||
# TODO: Decrypt NIP-44 content to see actual response
|
# TODO: Decrypt NIP-44 content to see actual response
|
||||||
@@ -174,7 +174,7 @@ test_server_health() {
|
|||||||
|
|
||||||
main() {
|
main() {
|
||||||
echo "=== Ginxsom Admin Event Test Suite ==="
|
echo "=== Ginxsom Admin Event Test Suite ==="
|
||||||
echo "Testing Kind 23456/23457 admin command system"
|
echo "Testing Kind 23458/23459 admin command system"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
log_info "Test Configuration:"
|
log_info "Test Configuration:"
|
||||||
|
|||||||
Reference in New Issue
Block a user