Compare commits

...

130 Commits

Author SHA1 Message Date
Your Name
b276b44ded v1.0.4 - Fixed web socket limitation with the number of npubs in a subscription 2025-11-07 19:59:34 -05:00
laan tungir
3792649ed9 v1.0.3 - From remote 2025-11-07 14:07:46 -05:00
laan tungir
5f08956605 removed some files 2025-11-03 07:33:12 -05:00
Your Name
643d89ed7b v1.0.0 - First major release 2025-11-01 07:04:56 -04:00
Your Name
8ca459593c Reset version back to v0.8.6 2025-11-01 07:03:39 -04:00
Your Name
ee4208cc19 v7.0.0 - Version 1.0.0 2025-11-01 06:56:02 -04:00
Your Name
f6330e4bb8 v7.0.0 - Version 1.0.0 2025-10-31 11:17:34 -04:00
Your Name
4f3cf10a5c v6.0.0 - Test fixed output redirection 2025-10-31 11:17:19 -04:00
Your Name
aa1954e81e v6.0.0 - Version 1.0.0 2025-10-31 11:15:32 -04:00
Your Name
482597bd0e v5.0.0 - Test fixed release ID extraction 2025-10-31 11:15:16 -04:00
Your Name
d4b90e681c v5.0.0 - Version 1.0.0 2025-10-31 11:13:41 -04:00
Your Name
fcf9e43c4c v4.0.0 - Debug upload with existing binary 2025-10-31 11:13:21 -04:00
Your Name
b8d8cd19d3 v4.0.0 - Debug upload issue 2025-10-31 11:12:58 -04:00
Your Name
536c2d966c v4.0.0 - Version 1.0.0 2025-10-31 11:11:31 -04:00
Your Name
f49cb0a5ac v3.0.0 - Test release upload fix 2025-10-31 11:10:53 -04:00
Your Name
cef6bb2340 v3.0.0 - Version 1.0.0 2025-10-31 11:08:18 -04:00
Your Name
4c03253b30 v2.0.0 - Version 1.0.0 2025-10-31 11:06:27 -04:00
Your Name
ed09bb7370 v1.0.0 - Version 1.0.0) 2025-10-31 10:39:06 -04:00
Your Name
5c46a25173 v0.8.5 - cleanup 2025-10-30 07:06:48 -04:00
Your Name
d1538f00df v0.8.4 - Updated increment_and_push.sh 2025-10-30 07:03:22 -04:00
Your Name
afa4acbbfb v0.8.3 - --dry-run 2025-10-30 06:52:05 -04:00
Your Name
d9a530485f v0.8.2 - markdown intro 2025-10-29 07:53:56 -04:00
Your Name
b2ad70b028 v0.8.1 - added screenshots 2025-10-29 07:39:08 -04:00
Your Name
f49aae8ab0 v0.7.44 - Release v0.8.0 with NIP-59 timestamp randomization and status command fixes 2025-10-27 13:21:47 -04:00
Your Name
f6debcf799 v0.7.44 - Release v0.8.0 with NIP-59 timestamp randomization and status command fixes 2025-10-27 13:19:58 -04:00
Your Name
edbc4f1359 v0.7.43 - Add plain text 'status' command handler for NIP-17 DMs 2025-10-27 13:19:10 -04:00
Your Name
5242f066e7 Update nostr_core_lib with timestamp randomization feature 2025-10-27 12:59:19 -04:00
Your Name
af186800fa v0.7.42 - Fix ephemeral event storage and document monitoring system 2025-10-26 15:02:00 -04:00
Your Name
2bff4a5f44 v0.7.41 - Fix SQL query routing in admin API - add missing sql_query case to handle_kind_23456_unified 2025-10-26 13:34:16 -04:00
Your Name
edb73d50cf v0.7.40 - Removed event_broadcasts table and related code to fix FOREIGN KEY constraint failures preventing event insertion 2025-10-25 15:26:31 -04:00
Your Name
3dc09d55fd v0.7.39 - Set dm's back 2 days to adjust for timestamp ramdomization of giftwraps. 2025-10-23 18:43:45 -03:00
Your Name
079fb1b0f5 v0.7.38 - Fixed error upon startup with existing db 2025-10-23 11:17:16 -04:00
Your Name
17b2aa8111 v0.7.37 - Enhanced admin interface with sliding sidebar navigation, moved dark mode and logout to sidebar footer, improved button styling consistency 2025-10-22 12:43:09 -04:00
Your Name
78d484cfe0 v0.7.36 - Implement sliding side navigation menu with page switching for admin sections 2025-10-22 11:01:30 -04:00
Your Name
182e12817d v0.7.35 - Implement event-driven monitoring system with dual triggers for events and subscriptions 2025-10-22 10:48:57 -04:00
Your Name
9179d57cc9 v0.7.34 - We seemed to maybe finally fixed the monitoring error? 2025-10-22 10:19:43 -04:00
Your Name
9cb9b746d8 v0.7.33 - Refactor monitoring system to use subscription-based activation with ephemeral events - fixes recursive crash bug 2025-10-19 10:26:09 -04:00
Your Name
57a0089664 v0.7.32 - Implement ephemeral event bypass (NIP-01) - events with kinds 20000-29999 are now broadcast to subscriptions but never stored in database, preventing recursive monitoring event loops 2025-10-19 09:38:02 -04:00
Your Name
53f7608872 v0.7.31 - Fixed production crash by replacing in-memory subscription iteration with database queries in monitoring system 2025-10-18 18:09:13 -04:00
Your Name
838ce5b45a v0.7.30 - Update increment and push script 2025-10-18 15:04:45 -04:00
Your Name
e878b9557e v0.7.29 - Update increment and push script 2025-10-18 14:57:34 -04:00
Your Name
6638d37d6f v0.7.28 - Update increment and push script 2025-10-18 14:55:51 -04:00
Your Name
4c29e15329 v0.7.27 - Update increment and push script 2025-10-18 14:53:37 -04:00
Your Name
48890a2121 v0.7.26 - Tidy up api 2025-10-18 14:48:16 -04:00
Your Name
e312d7e18c v0.7.25 - Implement SQL Query Admin API
- Move non-NIP-17 admin functions from dm_admin.c to api.c for better architecture
- Add NIP-44 encryption to send_admin_response() for secure admin responses
- Implement SQL query validation and execution with safety limits
- Add unified SQL query handler for admin API
- Fix buffer size for encrypted content to handle larger responses
- Update function declarations and includes across files
- Successfully test frontend query execution through web interface
2025-10-16 15:41:21 -04:00
Your Name
6c38aaebf3 v0.7.24 - Fix admin API subscription issues: NIP-17 historical events and relay pubkey timing 2025-10-16 06:27:01 -04:00
Your Name
18b0ac44bf v0.7.23 - Remove sticky positioning from main header to prevent floating behavior 2025-10-15 19:22:42 -04:00
Your Name
b6749eff2f v0.7.22 - Fix compiler warnings in c_utils_lib version.c by adding proper includes for popen/pclose functions 2025-10-15 19:20:14 -04:00
Your Name
c73a103280 v0.7.21 - Remove manual relay connection UI and implement automatic background connection with seamless data loading 2025-10-15 16:50:22 -04:00
Your Name
a5d194f730 v0.7.20 - Fix automatic relay connection for restored authentication state 2025-10-15 15:47:02 -04:00
Your Name
6320436b88 v0.7.19 - Implement automatic relay connection after login with authentication error handling 2025-10-15 15:41:18 -04:00
Your Name
87325927ed v0.7.18 - Fixed duplicate login modal bug and improved header layout 2025-10-15 15:31:44 -04:00
Your Name
4435cdf5b6 Add c_utils_lib as submodule with debug and version utilities 2025-10-15 10:29:35 -04:00
Your Name
b041654611 v0.7.17 - Fixed critical race condition in CLOSE message handler causing segfault during subscription storms 2025-10-15 09:10:18 -04:00
Your Name
e833dcefd4 v0.7.16 - Fixed blacklist authentication system - removed redundant action/parameters columns, added active=1 filtering, added comprehensive debug tracing, and identified that auth must be enabled for blacklist to work 2025-10-14 13:07:19 -04:00
Your Name
29680f0ee8 v0.7.15 - Fixed race condition in subscription management causing intermittent core dumps and format truncation warning 2025-10-14 11:34:55 -04:00
Your Name
670329700c v0.7.14 - Remove unified config cache system and fix first-time startup - All config values now queried directly from database, eliminating cache inconsistency bugs. Fixed startup sequence to use output parameters for pubkey passing. 2025-10-13 19:06:27 -04:00
Your Name
62e17af311 v0.7.13 - -t 2025-10-13 16:35:26 -04:00
Your Name
e3938a2c85 v0.7.12 - Implemented comprehensive debug system with 6 levels (0-5), file:line tracking at TRACE level, deployment script integration, and default level 5 for development 2025-10-13 12:44:18 -04:00
Your Name
49ffc3d99e v0.7.11 - Got api back working after switching to static build 2025-10-12 14:54:02 -04:00
Your Name
34bb1c34a2 v0.7.10 - Fixed api errors in accepting : in subscriptions 2025-10-12 10:31:03 -04:00
Your Name
b27a56a296 v0.7.9 - Optimize Docker build caching and enforce static binary usage
- Restructure Dockerfile.alpine-musl for better layer caching
  * Build dependencies (secp256k1, libwebsockets) in separate cached layers
  * Copy submodules before source files to maximize cache hits
  * Reduce rebuild time from ~2-3 minutes to ~10-15 seconds for source changes
- Remove 'musl' from binary names (c_relay_static_x86_64 instead of c_relay_static_musl_x86_64)
- Enforce static binary usage in make_and_restart_relay.sh
  * Remove all fallbacks to regular make builds
  * Exit with clear error if static binary not found
  * Ensures JSON1 extension is always available
- Fix build_static.sh hanging on ldd check with timeout
- Remove sudo usage from build_static.sh (assumes docker group membership)

These changes ensure consistent builds with JSON1 support and dramatically improve
development iteration speed through intelligent Docker layer caching.
2025-10-11 11:08:01 -04:00
Your Name
ecd7095123 v0.7.8 - Fully static builds implemented with musl-gcc 2025-10-11 10:51:03 -04:00
Your Name
d449513861 Add MUSL static binary build system using Alpine Docker
- Create Dockerfile.alpine-musl for truly portable static binaries
- Update build_static.sh to use Docker with sudo fallback
- Fix source code portability issues for MUSL:
  * Add missing headers in config.c, dm_admin.c
  * Remove glibc-specific headers in nip009.c, subscriptions.c
- Update nostr_core_lib submodule with fortification fix
- Add comprehensive documentation in docs/musl_static_build.md

Binary characteristics:
- Size: 7.6MB (vs 12MB+ for glibc static)
- Dependencies: Zero (truly portable)
- Compatibility: Any Linux distribution
- Build time: ~2 minutes with Docker caching

Resolves fortification symbol issues (__snprintf_chk, __fprintf_chk)
that prevented MUSL static linking.
2025-10-11 10:17:20 -04:00
Your Name
6709e229b3 v0.7.7 - Prevent sql attacks and rate limiting on subscriptions 2025-10-10 15:44:10 -04:00
Your Name
00a8f16262 v0.7.6 - Delete more old debugging prints 2025-10-10 13:38:18 -04:00
Your Name
00d16f8615 v0.7.5 - Complete debug logging cleanup - remove all remaining DEBUG messages from websockets.c, config.c, and dm_admin.c 2025-10-10 10:52:14 -04:00
Your Name
c90676d2b2 v0.7.4 - Remove excessive debug logging from entire codebase - preserve user-facing error logging 2025-10-10 10:21:30 -04:00
Your Name
b89c011ad5 v0.7.2 - -m 2025-10-10 06:53:30 -04:00
Your Name
c3de31aa88 v0.7.1 - Implemented static binary build system for cross-distribution compatibility 2025-10-09 14:36:32 -04:00
Your Name
b6df0be865 v0.6.0 - Fixed binary upload in release script - now shows upload errors and handles failures properly 2025-10-09 12:59:23 -04:00
Your Name
a89f84f76e v0.5.0 - New release 2025-10-09 12:51:53 -04:00
Your Name
5a916cc221 Reupload 2025-10-09 10:43:42 -04:00
Your Name
dcf421ff93 v0.4.13 - DM system appears fully functional 2025-10-08 07:11:22 -04:00
Your Name
d655258311 v0.4.12 - Refactor NIP-17 admin commands: eliminate ~400 lines of duplicated code with unified helper functions, fix SQL query bugs, and remove unused parameters 2025-10-06 18:49:25 -04:00
Your Name
f6d13d4318 v0.4.11 - Fixed nasty DM bug 2025-10-06 10:06:24 -04:00
Your Name
d5350d7c30 v0.4.10 - api 2025-10-05 14:35:09 -04:00
Your Name
c63fd04c92 v0.4.9 - Working on dm admin 2025-10-04 19:04:12 -04:00
Your Name
64b418a551 v0.4.8 - Implement web server functionality for embedded admin interface - serve HTML/CSS/JS from /api/ endpoint with proper MIME types, CORS headers, and performance optimizations 2025-10-04 12:45:35 -04:00
Your Name
36c9c84047 v0.4.7 - Implement NIP-70 Protected Events - Add protected event support with authentication checks, comprehensive testing, and relay metadata protection 2025-10-03 06:44:27 -04:00
Your Name
88b4aaa301 v0.4.6 - Implement NIP-50 search functionality with LIKE-based content and tag searching 2025-10-03 05:43:49 -04:00
Your Name
eac4c227c9 v0.4.5 - Fix NIP-45 COUNT test to account for existing relay events and handle replaceable events correctly 2025-10-03 05:19:39 -04:00
Your Name
d5eb7d4a55 v0.4.4 - Just waking up 2025-10-03 04:52:40 -04:00
Your Name
80b15e16e2 v0.4.3 - feat: Implement dynamic configuration updates without restart
- Add cache refresh mechanism for config updates
- Implement selective re-initialization for NIP-11 relay info changes
- Categorize configs as dynamic vs restart-required using requires_restart field
- Enhance admin API responses with restart requirement information
- Add comprehensive test for dynamic config updates
- Update documentation for dynamic configuration capabilities

Most relay settings can now be updated via admin API without requiring restart, improving operational flexibility while maintaining stability for critical changes.
2025-10-02 15:53:26 -04:00
Your Name
cfacedbb1a v0.4.2 - Implement NIP-11 Relay Information Document with event-based configuration - make relay info dynamically configurable via admin API 2025-10-02 11:38:28 -04:00
Your Name
c3bab033ed v0.4.1 - Fixed startup bug 2025-10-01 17:23:50 -04:00
Your Name
524f9bd84f Last push before major bug fixes 2025-10-01 14:53:20 -04:00
Your Name
4658ede9d6 feat: Implement auth rules enforcement and fix subscription filtering issues
- **Auth Rules Implementation**: Added blacklist/whitelist enforcement in websockets.c
  - Events are now checked against auth_rules table before acceptance
  - Blacklist blocks specific pubkeys, whitelist enables allow-only mode
  - Made check_database_auth_rules() public for cross-module access

- **Subscription Filtering Fixes**:
  - Added missing 'ids' filter support in SQL query building
  - Fixed test expectations to not require exact event counts for kind filters
  - Improved filter validation and error handling

- **Ephemeral Events Compliance**:
  - Modified SQL queries to exclude kinds 20000-29999 from historical queries
  - Maintains broadcasting to active subscribers while preventing storage/retrieval
  - Ensures NIP-01 compliance for ephemeral event handling

- **Comprehensive Testing**:
  - Created white_black_test.sh with full blacklist/whitelist functionality testing
  - Tests verify blocked posting for blacklisted users
  - Tests verify whitelist-only mode when whitelist rules exist
  - Includes proper auth rule clearing between test phases

- **Code Quality**:
  - Added proper function declarations to websockets.h
  - Improved error handling and logging throughout
  - Enhanced test script with clear pass/fail reporting
2025-09-30 15:17:59 -04:00
Your Name
f7b463aca1 Fixing whitelist and blacklist functionality 2025-09-30 15:02:49 -04:00
Your Name
c1a6e92b1d v0.3.19 - last save before major refactoring 2025-09-30 10:47:11 -04:00
Your Name
eefb0e427e v0.3.18 - index.html improvements 2025-09-30 07:51:23 -04:00
Your Name
c23d81b740 v0.3.17 - Embedded login button 2025-09-30 06:47:09 -04:00
Your Name
6dac231040 v0.3.16 - Admin system getting better 2025-09-30 05:32:23 -04:00
Your Name
6fd3e531c3 v0.3.15 - How can administration take so long 2025-09-27 15:50:42 -04:00
Your Name
c1c05991cf v0.3.14 - I think the admin api is finally working 2025-09-27 14:08:45 -04:00
Your Name
ab378e14d1 v0.3.13 - Working on admin system 2025-09-27 13:32:21 -04:00
Your Name
c0f9bf9ef5 v0.3.12 - Working through auth still 2025-09-25 17:33:38 -04:00
Your Name
bc6a7b3f20 Working on API 2025-09-25 16:35:16 -04:00
Your Name
036b0823b9 v0.3.11 - Working on admin api 2025-09-25 11:25:50 -04:00
Your Name
be99595bde v0.3.10 - . 2025-09-24 10:49:48 -04:00
Your Name
01836a4b4c v0.3.9 - API work 2025-09-21 15:53:03 -04:00
Your Name
9f3b3dd773 v0.3.8 - safety push 2025-09-18 10:18:15 -04:00
Your Name
3210b9e752 v0.3.7 - working on cinfig api 2025-09-16 15:52:27 -04:00
Your Name
2d66b8bf1d . 2025-09-15 20:34:00 -04:00
Your Name
f3d6afead1 v0.3.5 - nip42 implemented 2025-09-13 08:49:09 -04:00
Your Name
1690b58c67 v0.3.4 - Implement secure relay private key storage
- Add relay_seckey table for secure private key storage
- Implement store_relay_private_key() and get_relay_private_key() functions
- Remove relay private key from public configuration events (kind 33334)
- Update first-time startup sequence to store keys securely after DB init
- Add proper validation and error handling for private key operations
- Fix timing issue where private key storage was attempted before DB initialization
- Security improvement: relay private keys no longer exposed in public events
2025-09-07 07:35:51 -04:00
Your Name
2e8eda5c67 v0.3.3 - Fix function naming consistency: rename find_existing_nrdb_files to find_existing_db_files
- Update function declaration in config.h
- Update function definition in config.c
- Update function calls in config.c and main.c
- Maintain consistency with .db file extension naming convention

This resolves the inconsistency between database file extension (.db) and function names (nrdb)
2025-09-07 06:58:50 -04:00
Your Name
74a4dc2533 v0.3.2 - Implement -p/--port CLI option for first-time startup port override
- Add cli_options_t structure for extensible command line options
- Implement port override in create_default_config_event()
- Update main() with robust CLI parsing and validation
- Add comprehensive help text documenting first-time only behavior
- Ensure CLI options only affect initial configuration event creation
- Maintain event-based configuration architecture for ongoing operation
- Include comprehensive error handling and input validation
- Add documentation in CLI_PORT_OVERRIDE_IMPLEMENTATION.md

Tested: First-time startup uses CLI port, subsequent startups use database config
2025-09-07 06:54:56 -04:00
Your Name
be7ae2b580 v0.3.1 - Implement database location and extension changes
- Change database extension from .nrdb to .db for standard SQLite convention
- Modify make_and_restart_relay.sh to run executable from build/ directory
- Database files now created in build/ directory alongside executable
- Enhanced --preserve-database flag with backup/restore functionality
- Updated source code references in config.c and main.c
- Port auto-increment functionality remains fully functional
2025-09-07 06:15:49 -04:00
Your Name
c1de1bb480 v0.3.0 - Complete deployment documentation and examples - Added comprehensive deployment guide, automated deployment scripts, nginx SSL proxy setup, backup automation, and monitoring tools. Includes VPS deployment, cloud platform guides, and practical examples for production deployment of event-based configuration system. 2025-09-06 20:19:12 -04:00
Your Name
a02c1204ce v0.2.18 - Clean up configuration system: remove active_config VIEW, database_location field, fix double slash in paths, and ensure database_path reflects actual path used 2025-09-06 11:01:50 -04:00
Your Name
258779e234 v0.2.17 - Add --database-path parameter with metadata storage
- Add --database-path/-D command line parameter for database override
- Store actual database and config file paths as metadata in config tables
- Fix circular dependency by making command line overrides runtime-only
- Support multiple relay instances with separate databases and configurations
- Clean path normalization removes redundant ./ prefixes
- New fields: database_location and config_location for tracking actual usage
2025-09-06 10:39:11 -04:00
Your Name
342defca6b v0.2.16 - fixed config bugs 2025-09-06 10:24:42 -04:00
Your Name
580aec7d57 v0.2.15 - Add --database-path command line parameter for database location override
- Added -D/--database-path parameter to specify custom database file location
- Fixed port override timing to apply after configuration system initialization
- Updated help message with examples showing database path usage
- Supports both absolute and relative paths for database location
- Enables running multiple relay instances with separate databases
- Resolves database path issues when running from /usr/local/bin
2025-09-06 10:07:28 -04:00
Your Name
54b91af76c v0.2.14 - database path 2025-09-06 09:59:14 -04:00
Your Name
6d9b4efb7e v0.2.12 - Command line variables added 2025-09-06 07:41:43 -04:00
Your Name
6f51f445b7 v0.2.11 - Picky shit 2025-09-06 07:12:47 -04:00
Your Name
6de9518de7 v0.2.10 - Clean versioning 2025-09-06 06:25:27 -04:00
Your Name
517cc020c7 v0.2.9 - Embedded sql schema into app 2025-09-06 06:21:02 -04:00
Your Name
2c699652b0 v0.2.8 - Almost Final test of fixed versioning system 2025-09-06 05:14:03 -04:00
Your Name
2e4ffc0e79 Add force push for updated tags 2025-09-06 05:11:11 -04:00
Your Name
70c91ec858 Fix version.h generation timing in build script 2025-09-06 05:09:35 -04:00
Your Name
b7c4609c2d Fix tag push failure in build script 2025-09-06 05:06:03 -04:00
Your Name
7f69367666 v0.2.5 - Fixed versioning 2025-09-06 05:01:26 -04:00
Your Name
fa17aa1f78 v0.2.4 - Clean up config issues 2025-09-06 04:40:59 -04:00
Your Name
7e560b4247 Remove relay.log from tracking (already in .gitignore) 2025-09-05 19:51:47 -04:00
Your Name
9a29ea51e3 v0.2.2 - Working on config setup 2025-09-05 19:48:49 -04:00
Your Name
6c10713e18 v0.2.1 - Nip-40 implemented 2025-09-05 14:45:32 -04:00
Your Name
b810982a17 v0.2.0 - Nip13 implemented 2025-09-05 14:14:15 -04:00
Your Name
23c95fd2ea v0.1.1 - -release 2025-09-05 13:00:42 -04:00
944 changed files with 184800 additions and 2898 deletions

11
.gitignore vendored
View File

@@ -1,3 +1,14 @@
nostr_core_lib/
nips/
build/
relay.log
relay.pid
Trash/
src/version.h
dev-config/
db/
copy_executable_local.sh
nostr_login_lite/
style_guide/
nostr-tools

6
.gitmodules vendored
View File

@@ -1,3 +1,9 @@
[submodule "nostr_core_lib"]
path = nostr_core_lib
url = https://git.laantungir.net/laantungir/nostr_core_lib.git
[submodule "c_utils_lib"]
path = c_utils_lib
url = ssh://git@git.laantungir.net:2222/laantungir/c_utils_lib.git
[submodule "text_graph"]
path = text_graph
url = ssh://git@git.laantungir.net:2222/laantungir/text_graph.git

298
.roo/architect/AGENTS.md Normal file
View File

@@ -0,0 +1,298 @@
# AGENTS.md - AI Agent Integration Guide for Architect Mode
**Project-Specific Information for AI Agents Working with C-Relay in Architect Mode**
## Critical Architecture Understanding
### System Architecture Overview
C-Relay implements a **unique event-based configuration architecture** that fundamentally differs from traditional Nostr relays:
```
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
│ WebSocket │ │ Configuration │ │ Database │
│ + HTTP │◄──►│ Event System │◄──►│ (SQLite) │
│ (Port 8888) │ │ (Kind 33334) │ │ Schema v4 │
└─────────────────┘ └──────────────────┘ └─────────────────┘
│ │ │
▼ ▼ ▼
┌─────────────────┐ ┌──────────────────┐ ┌─────────────────┐
│ nostr_core_lib │ │ Admin Key │ │ Event Storage │
│ (Crypto/Sigs) │ │ Management │ │ + Subscriptions │
└─────────────────┘ └──────────────────┘ └─────────────────┘
```
### Core Architectural Principles
#### 1. Event-Driven Configuration
**Design Philosophy**: Configuration as cryptographically signed events rather than files
- **Benefits**: Auditability, remote management, tamper-evidence
- **Trade-offs**: Complexity in configuration changes, admin key management burden
- **Implementation**: Kind 33334 events stored in same database as relay events
#### 2. Identity-Based Database Naming
**Design Philosophy**: Database file named by relay's generated public key
- **Benefits**: Prevents database conflicts, enables multi-relay deployments
- **Trade-offs**: Cannot predict database filename, complicates backup strategies
- **Implementation**: `<relay_pubkey>.db` created in build/ directory
#### 3. Single-Binary Deployment
**Design Philosophy**: All functionality embedded in one executable
- **Benefits**: Simple deployment, no external dependencies to manage
- **Trade-offs**: Larger binary size, harder to modularize
- **Implementation**: SQL schema embedded as header file, nostr_core_lib as submodule
#### 4. Dual-Protocol Support
**Design Philosophy**: WebSocket (Nostr) and HTTP (NIP-11) on same port
- **Benefits**: Simplified port management, reduced infrastructure complexity
- **Trade-offs**: Protocol detection overhead, libwebsockets dependency
- **Implementation**: Request routing based on HTTP headers and upgrade requests
## Architectural Decision Analysis
### Configuration System Design
**Traditional Approach vs C-Relay:**
```
Traditional: C-Relay:
config.json → kind 33334 events
ENV variables → cryptographically signed tags
File watching → database polling/restart
```
**Implications for Extensions:**
- Configuration changes require event signing capabilities
- No hot-reloading without architectural changes
- Admin key loss = complete database reset required
### Database Architecture Decisions
**Schema Design Philosophy:**
- **Event Tags as JSON**: Separate table with JSON column instead of normalized relations
- **Application-Level Filtering**: NIP-40 expiration handled in C, not SQL
- **Embedded Schema**: Version 4 schema compiled into binary
**Scaling Considerations:**
- SQLite suitable for small-to-medium relays (< 10k concurrent connections)
- Single-writer limitation of SQLite affects write-heavy workloads
- JSON tag storage optimizes for read performance over write normalization
### Memory Management Architecture
**Thread Safety Model:**
- Global subscription manager with mutex protection
- Per-client subscription limits enforced in memory
- WebSocket connection state managed by libwebsockets
**Resource Management:**
- JSON objects use reference counting (jansson library)
- String duplication pattern for configuration values
- Automatic cleanup on client disconnect
## Architectural Extension Points
### Adding New Configuration Options
**Required Changes:**
1. Update [`default_config_event.h`](src/default_config_event.h) template
2. Add parsing logic in [`config.c`](src/config.c) `load_config_from_database()`
3. Add global config struct field in [`config.h`](src/config.h)
4. Update documentation in [`docs/configuration_guide.md`](docs/configuration_guide.md)
### Adding New NIP Support
**Integration Pattern:**
1. Event validation in [`request_validator.c`](src/request_validator.c)
2. Protocol handling in [`main.c`](src/main.c) WebSocket callback
3. Database storage considerations in schema
4. Add test in `tests/` directory
### Scaling Architecture
**Current Limitations:**
- Single process, no horizontal scaling
- SQLite single-writer bottleneck
- Memory-based subscription management
**Potential Extensions:**
- Redis for subscription state sharing
- PostgreSQL for better concurrent write performance
- Load balancer for read scaling with multiple instances
## Deployment Architecture Patterns
### Development Deployment
```
Developer Machine:
├── ./make_and_restart_relay.sh
├── build/c_relay_x86
├── build/<relay_pubkey>.db
└── relay.log
```
### Production SystemD Deployment
```
/opt/c-relay/:
├── c_relay_x86
├── <relay_pubkey>.db
├── systemd service (c-relay.service)
└── c-relay user isolation
```
### Container Deployment Architecture
```
Container:
├── Multi-stage build (deps + binary)
├── Volume mount for database persistence
├── Health checks via NIP-11 endpoint
└── Signal handling for graceful shutdown
```
### Reverse Proxy Architecture
```
Internet → Nginx/HAProxy → C-Relay
├── WebSocket upgrade handling
├── SSL termination
└── Rate limiting
```
## Security Architecture Considerations
### Key Management Design
**Admin Key Security Model:**
- Generated once, displayed once, never stored
- Required for all configuration changes
- Loss requires complete database reset
**Relay Identity Model:**
- Separate keypair for relay identity
- Public key used for database naming
- Private key never exposed to clients
### Event Validation Pipeline
```
WebSocket Input → JSON Parse → Schema Validate → Signature Verify → Store
↓ ↓ ↓
reject reject reject success
```
### Attack Surface Analysis
**Network Attack Vectors:**
- WebSocket connection flooding (mitigated by libwebsockets limits)
- JSON parsing attacks (handled by jansson library bounds checking)
- SQLite injection (prevented by prepared statements)
**Configuration Attack Vectors:**
- Admin key compromise (complete relay control)
- Event signature forgery (prevented by nostr_core_lib validation)
- Replay attacks (event timestamp validation required)
## Non-Obvious Architectural Considerations
### Database Evolution Strategy
**Current Limitations:**
- Schema changes require database recreation
- No migration system for configuration events
- Version 4 schema embedded in binary
**Future Architecture Needs:**
- Schema versioning and migration system
- Backward compatibility for configuration events
- Database backup/restore procedures
### Configuration Event Lifecycle
**Event Flow:**
```
Admin Signs Event → WebSocket Submit → Validate → Store → Restart Required
↓ ↓ ↓
Signature Check Database Config Reload
```
**Architectural Implications:**
- No hot configuration reloading
- Configuration changes require planned downtime
- Event ordering matters for multiple simultaneous changes
### Cross-Architecture Deployment
**Build System Architecture:**
- Auto-detection of host architecture
- Cross-compilation support for ARM64
- Architecture-specific binary outputs
**Deployment Implications:**
- Binary must match target architecture
- Dependencies must be available for target architecture
- Debug tooling architecture-specific
### Performance Architecture Characteristics
**Bottlenecks:**
1. **SQLite Write Performance**: Single writer limitation
2. **JSON Parsing**: Per-event parsing overhead
3. **Signature Validation**: Cryptographic operations per event
4. **Memory Management**: JSON object lifecycle management
**Optimization Points:**
- Prepared statement reuse
- Connection pooling for concurrent reads
- Event batching for bulk operations
- Subscription indexing strategies
### Integration Architecture Patterns
**Monitoring Integration:**
- NIP-11 endpoint for health checks
- Log file monitoring for operational metrics
- Database query monitoring for performance
- Process monitoring for resource usage
**Backup Architecture:**
- Database file backup (SQLite file copy)
- Configuration event export/import
- Admin key secure storage (external to relay)
### Future Extension Architectures
**Multi-Relay Coordination:**
- Database sharding by event kind
- Cross-relay event synchronization
- Distributed configuration management
**Plugin Architecture Possibilities:**
- Event processing pipeline hooks
- Custom validation plugins
- External authentication providers
**Scaling Architecture Options:**
- Read replicas with PostgreSQL migration
- Event stream processing with message queues
- Microservice decomposition (auth, storage, validation)
## Architectural Anti-Patterns to Avoid
1. **Configuration File Addition**: Breaks event-based config paradigm
2. **Direct Database Modification**: Bypasses signature validation
3. **Hard-Coded Ports**: Conflicts with auto-fallback system
4. **Schema Modifications**: Requires database recreation
5. **Admin Key Storage**: Violates security model
6. **Blocking Operations**: Interferes with WebSocket event loop
7. **Memory Leaks**: JSON objects must be properly reference counted
8. **Thread Unsafe Operations**: Global state requires proper synchronization
## Architecture Decision Records (Implicit)
### Decision: Event-Based Configuration
**Context**: Traditional config files vs. cryptographic auditability
**Decision**: Store configuration as signed Nostr events
**Consequences**: Complex configuration changes, enhanced security, remote management capability
### Decision: SQLite Database
**Context**: Database choice for relay storage
**Decision**: Embedded SQLite with JSON tag storage
**Consequences**: Simple deployment, single-writer limitation, application-level filtering
### Decision: Single Binary Deployment
**Context**: Dependency management vs. deployment simplicity
**Decision**: Embed all dependencies and schema in binary
**Consequences**: Larger binary, simple deployment, version coupling
### Decision: Dual Protocol Support
**Context**: WebSocket for Nostr, HTTP for NIP-11
**Decision**: Same port serves both protocols
**Consequences**: Simplified deployment, protocol detection overhead, libwebsockets dependency
These architectural decisions form the foundation of C-Relay's unique approach to Nostr relay implementation and should be carefully considered when planning extensions or modifications.
**
[Response interrupted by a tool use result. Only one tool may be used at a time and should be placed at the end of the message.]

7
.roo/commands/push.md Normal file
View File

@@ -0,0 +1,7 @@
---
description: "Brief description of what this command does"
---
Run increment_and_push.sh, and supply a good git commit message. For example:
./increment_and_push.sh "Fixed the bug with nip05 implementation"

1
.roo/rules-code/rules.md Normal file
View File

@@ -0,0 +1 @@
Use ./make_and_restart_relay.sh instead of make to build the project.

1
.rooignore Normal file
View File

@@ -0,0 +1 @@
src/embedded_web_content.c

152
AGENTS.md Normal file
View File

@@ -0,0 +1,152 @@
# AGENTS.md - AI Agent Integration Guide
**Project-Specific Information for AI Agents Working with C-Relay**
## Critical Build Commands
### Primary Build Command
```bash
./make_and_restart_relay.sh
```
**Never use `make` directly.** The project requires the custom restart script which:
- Handles database preservation/cleanup based on flags
- Manages architecture-specific binary detection (x86/ARM64)
- Performs automatic process cleanup and port management
- Starts relay in background with proper logging
### Architecture-Specific Binary Outputs
- **x86_64**: `./build/c_relay_x86`
- **ARM64**: `./build/c_relay_arm64`
- **Other**: `./build/c_relay_$(ARCH)`
### Database File Naming Convention
- **Format**: `<relay_pubkey>.db` (NOT `.nrdb` as shown in docs)
- **Location**: Created in `build/` directory during execution
- **Cleanup**: Use `--preserve-database` flag to retain between builds
## Critical Integration Issues
### Event-Based Configuration System
- **No traditional config files** - all configuration stored in config table
- Admin private key shown **only once** on first startup
- Configuration changes require cryptographically signed events
- Database path determined by generated relay pubkey
### First-Time Startup Sequence
1. Relay generates admin keypair and relay keypair
2. Creates database file with relay pubkey as filename
3. Stores default configuration in config table
4. **CRITICAL**: Admin private key displayed once and never stored on disk
### Port Management
- Default port 8888 with automatic fallback (8889, 8890, etc.)
- Script performs port availability checking before libwebsockets binding
- Process cleanup includes force-killing processes on port 8888
### Database Schema Dependencies
- Uses embedded SQL schema (`sql_schema.h`)
- Schema version 4 with JSON tag storage
- **Critical**: Event expiration filtering done at application level, not SQL level
### Admin API Event Structure
```json
{
"kind": 23456,
"content": "base64_nip44_encrypted_command_array",
"tags": [
["p", "<relay_pubkey>"]
]
}
```
**Configuration Commands** (encrypted in content):
- `["relay_description", "My Relay"]`
- `["max_subscriptions_per_client", "25"]`
- `["pow_min_difficulty", "16"]`
**Auth Rule Commands** (encrypted in content):
- `["blacklist", "pubkey", "hex_pubkey_value"]`
- `["whitelist", "pubkey", "hex_pubkey_value"]`
**Query Commands** (encrypted in content):
- `["auth_query", "all"]`
- `["system_command", "system_status"]`
### Process Management
```bash
# Kill existing relay processes
pkill -f "c_relay_"
# Check running processes
ps aux | grep c_relay_
# Force kill port binding
fuser -k 8888/tcp
```
### Cross-Compilation Specifics
- ARM64 requires explicit dependency installation: `make install-arm64-deps`
- Uses `aarch64-linux-gnu-gcc` with specific library paths
- PKG_CONFIG_PATH must be set for ARM64: `/usr/lib/aarch64-linux-gnu/pkgconfig`
### Testing Integration
- Tests expect relay running on default port
- Use `tests/quick_error_tests.sh` for validation
- Event configuration tests: `tests/event_config_tests.sh`
### SystemD Integration Considerations
- Service runs as `c-relay` user in `/opt/c-relay`
- Database files created in WorkingDirectory automatically
- No environment variables needed (event-based config)
- Resource limits: 65536 file descriptors, 4096 processes
### Development vs Production Differences
- Development: `make_and_restart_relay.sh` (default database cleanup)
- Production: `make_and_restart_relay.sh --preserve-database`
- Debug build requires manual gdb attachment to architecture-specific binary
### Critical File Dependencies
- `nostr_core_lib/` submodule must be initialized and built first
- Version header auto-generated from git tags: `src/version.h`
- Schema embedded in binary from `src/sql_schema.h`
### WebSocket Protocol Specifics
- Supports both WebSocket (Nostr protocol) and HTTP (NIP-11)
- NIP-11 requires `Accept: application/nostr+json` header
- CORS headers automatically added for NIP-11 compliance
### Memory Management Notes
- Persistent subscription system with thread-safe global manager
- Per-session subscription limits enforced
- Event filtering done at C level, not SQL level for NIP-40 expiration
### Configuration Override Behavior
- CLI port override applies during first-time startup and existing relay restarts
- After database creation, all config comes from events (but CLI overrides can still be applied)
- Database path cannot be changed after initialization
## Non-Obvious Pitfalls
1. **Database Lock Issues**: Script handles SQLite locking by killing existing processes first
2. **Port Race Conditions**: Pre-check + libwebsockets binding can still fail due to timing
3. **Key Loss**: Admin private key loss requires complete database deletion and restart
4. **Architecture Detection**: Build system auto-detects but cross-compilation requires manual setup
5. **Event Storage**: Ephemeral events (kind 20000-29999) accepted but not stored
6. **Signature Validation**: All events validated with `nostr_verify_event_signature()` from nostr_core_lib
## Quick Debugging Commands
```bash
# Check relay status
ps aux | grep c_relay_ && netstat -tln | grep 8888
# View logs
tail -f relay.log
# Test WebSocket connection
wscat -c ws://localhost:8888
# Test NIP-11 endpoint
curl -H "Accept: application/nostr+json" http://localhost:8888
# Find database files
find . -name "*.db" -type f

142
Dockerfile.alpine-musl Normal file
View File

@@ -0,0 +1,142 @@
# Alpine-based MUSL static binary builder for C-Relay
# Produces truly portable binaries with zero runtime dependencies
ARG DEBUG_BUILD=false
FROM alpine:3.19 AS builder
# Re-declare build argument in this stage
ARG DEBUG_BUILD=false
# Install build dependencies
RUN apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers \
wget \
bash
# Set working directory
WORKDIR /build
# Build libsecp256k1 static (cached layer - only rebuilds if Alpine version changes)
RUN cd /tmp && \
git clone https://github.com/bitcoin-core/secp256k1.git && \
cd secp256k1 && \
./autogen.sh && \
./configure --enable-static --disable-shared --prefix=/usr \
CFLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/secp256k1
# Build libwebsockets static with minimal features (cached layer)
RUN cd /tmp && \
git clone --depth 1 --branch v4.3.3 https://github.com/warmcat/libwebsockets.git && \
cd libwebsockets && \
mkdir build && cd build && \
cmake .. \
-DLWS_WITH_STATIC=ON \
-DLWS_WITH_SHARED=OFF \
-DLWS_WITH_SSL=ON \
-DLWS_WITHOUT_TESTAPPS=ON \
-DLWS_WITHOUT_TEST_SERVER=ON \
-DLWS_WITHOUT_TEST_CLIENT=ON \
-DLWS_WITHOUT_TEST_PING=ON \
-DLWS_WITH_HTTP2=OFF \
-DLWS_WITH_LIBUV=OFF \
-DLWS_WITH_LIBEVENT=OFF \
-DLWS_IPV6=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_C_FLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/libwebsockets
# Copy only submodule configuration and git directory
COPY .gitmodules /build/.gitmodules
COPY .git /build/.git
# Clean up any stale submodule references (nips directory is not a submodule)
RUN git rm --cached nips 2>/dev/null || true
# Initialize submodules (cached unless .gitmodules changes)
RUN git submodule update --init --recursive
# Copy nostr_core_lib source files (cached unless nostr_core_lib changes)
COPY nostr_core_lib /build/nostr_core_lib/
# Copy c_utils_lib source files (cached unless c_utils_lib changes)
COPY c_utils_lib /build/c_utils_lib/
# Build c_utils_lib with MUSL-compatible flags (cached unless c_utils_lib changes)
RUN cd c_utils_lib && \
sed -i 's/CFLAGS = -Wall -Wextra -std=c99 -O2 -g/CFLAGS = -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -O2 -g/' Makefile && \
make clean && \
make
# Build nostr_core_lib with required NIPs (cached unless nostr_core_lib changes)
# Disable fortification in build.sh to prevent __*_chk symbol issues
# NIPs: 001(Basic), 006(Keys), 013(PoW), 017(DMs), 019(Bech32), 044(Encryption), 059(Gift Wrap - required by NIP-17)
RUN cd nostr_core_lib && \
chmod +x build.sh && \
sed -i 's/CFLAGS="-Wall -Wextra -std=c99 -fPIC -O2"/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"/' build.sh && \
rm -f *.o *.a 2>/dev/null || true && \
./build.sh --nips=1,6,13,17,19,44,59
# Copy c-relay source files LAST (only this layer rebuilds on source changes)
COPY src/ /build/src/
COPY Makefile /build/Makefile
# Build c-relay with full static linking (only rebuilds when src/ changes)
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
# Use conditional compilation flags based on DEBUG_BUILD argument
RUN if [ "$DEBUG_BUILD" = "true" ]; then \
CFLAGS="-g -O0 -DDEBUG"; \
STRIP_CMD=""; \
echo "Building with DEBUG symbols enabled"; \
else \
CFLAGS="-O2"; \
STRIP_CMD="strip /build/c_relay_static"; \
echo "Building optimized production binary"; \
fi && \
gcc -static $CFLAGS -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
-I. -Ic_utils_lib/src -Inostr_core_lib -Inostr_core_lib/nostr_core \
-Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
src/main.c src/config.c src/dm_admin.c src/request_validator.c \
src/nip009.c src/nip011.c src/nip013.c src/nip040.c src/nip042.c \
src/websockets.c src/subscriptions.c src/api.c src/embedded_web_content.c \
-o /build/c_relay_static \
c_utils_lib/libc_utils.a \
nostr_core_lib/libnostr_core_x64.a \
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
-lcurl -lz -lpthread -lm -ldl && \
eval "$STRIP_CMD"
# Verify it's truly static
RUN echo "=== Binary Information ===" && \
file /build/c_relay_static && \
ls -lh /build/c_relay_static && \
echo "=== Checking for dynamic dependencies ===" && \
(ldd /build/c_relay_static 2>&1 || echo "Binary is static") && \
echo "=== Build complete ==="
# Output stage - just the binary
FROM scratch AS output
COPY --from=builder /build/c_relay_static /c_relay_static

View File

@@ -2,15 +2,16 @@
CC = gcc
CFLAGS = -Wall -Wextra -std=c99 -g -O2
INCLUDES = -I. -Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket
LIBS = -lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -L/usr/local/lib -lcurl
INCLUDES = -I. -Ic_utils_lib/src -Inostr_core_lib -Inostr_core_lib/nostr_core -Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket
LIBS = -lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -L/usr/local/lib -lsecp256k1 -lssl -lcrypto -L/usr/local/lib -lcurl -Lc_utils_lib -lc_utils
# Build directory
BUILD_DIR = build
# Source files
MAIN_SRC = src/main.c
MAIN_SRC = src/main.c src/config.c src/dm_admin.c src/request_validator.c src/nip009.c src/nip011.c src/nip013.c src/nip040.c src/nip042.c src/websockets.c src/subscriptions.c src/api.c src/embedded_web_content.c
NOSTR_CORE_LIB = nostr_core_lib/libnostr_core_x64.a
C_UTILS_LIB = c_utils_lib/libc_utils.a
# Architecture detection
ARCH = $(shell uname -m)
@@ -32,23 +33,66 @@ $(BUILD_DIR):
mkdir -p $(BUILD_DIR)
# Check if nostr_core_lib is built
# Explicitly specify NIPs to ensure NIP-44 (encryption) is included
# NIPs: 1 (basic), 6 (keys), 13 (PoW), 17 (DMs), 19 (bech32), 44 (encryption), 59 (gift wrap)
$(NOSTR_CORE_LIB):
@echo "Building nostr_core_lib..."
cd nostr_core_lib && ./build.sh
@echo "Building nostr_core_lib with required NIPs (including NIP-44 for encryption)..."
cd nostr_core_lib && ./build.sh --nips=1,6,13,17,19,44,59
# Check if c_utils_lib is built
$(C_UTILS_LIB):
@echo "Building c_utils_lib..."
cd c_utils_lib && ./build.sh lib
# Update main.h version information (requires main.h to exist)
src/main.h:
@if [ ! -f src/main.h ]; then \
echo "ERROR: src/main.h not found!"; \
echo "Please ensure src/main.h exists with relay metadata."; \
echo "Copy from a backup or create manually with proper relay configuration."; \
exit 1; \
fi; \
if [ -d .git ]; then \
echo "Updating main.h version information from git tags..."; \
RAW_VERSION=$$(git describe --tags --always 2>/dev/null || echo "unknown"); \
if echo "$$RAW_VERSION" | grep -q "^v[0-9]"; then \
CLEAN_VERSION=$$(echo "$$RAW_VERSION" | sed 's/^v//' | cut -d- -f1); \
VERSION="v$$CLEAN_VERSION"; \
MAJOR=$$(echo "$$CLEAN_VERSION" | cut -d. -f1); \
MINOR=$$(echo "$$CLEAN_VERSION" | cut -d. -f2); \
PATCH=$$(echo "$$CLEAN_VERSION" | cut -d. -f3); \
else \
VERSION="v0.0.0"; \
MAJOR=0; MINOR=0; PATCH=0; \
fi; \
echo "Updating version information in existing main.h..."; \
sed -i "s/#define VERSION \".*\"/#define VERSION \"$$VERSION\"/g" src/main.h; \
sed -i "s/#define VERSION_MAJOR [0-9]*/#define VERSION_MAJOR $$MAJOR/g" src/main.h; \
sed -i "s/#define VERSION_MINOR [0-9]*/#define VERSION_MINOR $$MINOR/g" src/main.h; \
sed -i "s/#define VERSION_PATCH [0-9]*/#define VERSION_PATCH $$PATCH/g" src/main.h; \
echo "Updated main.h version to: $$VERSION"; \
else \
echo "Git not available, preserving existing main.h version information"; \
fi
# Update main.h version information (requires existing main.h)
force-version:
@echo "Force updating main.h version information..."
@$(MAKE) src/main.h
# Build the relay
$(TARGET): $(BUILD_DIR) $(MAIN_SRC) $(NOSTR_CORE_LIB)
$(TARGET): $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB) $(C_UTILS_LIB)
@echo "Compiling C-Relay for architecture: $(ARCH)"
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(TARGET) $(NOSTR_CORE_LIB) $(LIBS)
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(TARGET) $(NOSTR_CORE_LIB) $(C_UTILS_LIB) $(LIBS)
@echo "Build complete: $(TARGET)"
# Build for specific architectures
x86: $(BUILD_DIR) $(MAIN_SRC) $(NOSTR_CORE_LIB)
x86: $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB) $(C_UTILS_LIB)
@echo "Building C-Relay for x86_64..."
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_x86 $(NOSTR_CORE_LIB) $(LIBS)
$(CC) $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_x86 $(NOSTR_CORE_LIB) $(C_UTILS_LIB) $(LIBS)
@echo "Build complete: $(BUILD_DIR)/c_relay_x86"
arm64: $(BUILD_DIR) $(MAIN_SRC) $(NOSTR_CORE_LIB)
arm64: $(BUILD_DIR) src/main.h src/sql_schema.h $(MAIN_SRC) $(NOSTR_CORE_LIB) $(C_UTILS_LIB)
@echo "Cross-compiling C-Relay for ARM64..."
@if ! command -v aarch64-linux-gnu-gcc >/dev/null 2>&1; then \
echo "ERROR: ARM64 cross-compiler not found."; \
@@ -72,7 +116,7 @@ arm64: $(BUILD_DIR) $(MAIN_SRC) $(NOSTR_CORE_LIB)
fi
@echo "Using aarch64-linux-gnu-gcc with ARM64 libraries..."
PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig:/usr/share/pkgconfig \
aarch64-linux-gnu-gcc $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_arm64 $(NOSTR_CORE_LIB) \
aarch64-linux-gnu-gcc $(CFLAGS) $(INCLUDES) $(MAIN_SRC) -o $(BUILD_DIR)/c_relay_arm64 $(NOSTR_CORE_LIB) $(C_UTILS_LIB) \
-L/usr/lib/aarch64-linux-gnu $(LIBS)
@echo "Build complete: $(BUILD_DIR)/c_relay_arm64"
@@ -112,19 +156,21 @@ test: $(TARGET)
@echo "Running tests..."
./tests/1_nip_test.sh
# Initialize database
# Initialize database (now handled automatically when server starts)
init-db:
@echo "Initializing database..."
./db/init.sh --force
@echo "Database initialization is now handled automatically when the server starts."
@echo "The schema is embedded in the binary - no external files needed."
@echo "To manually recreate database: rm -f db/c_nostr_relay.db && ./build/c_relay_x86"
# Clean build artifacts
clean:
rm -rf $(BUILD_DIR)
@echo "Clean complete"
# Clean everything including nostr_core_lib
# Clean everything including nostr_core_lib and c_utils_lib
clean-all: clean
cd nostr_core_lib && make clean 2>/dev/null || true
cd c_utils_lib && make clean 2>/dev/null || true
# Install dependencies (Ubuntu/Debian)
install-deps:
@@ -158,5 +204,23 @@ help:
@echo " make check-toolchain # Check what compilers are available"
@echo " make test # Run tests"
@echo " make init-db # Set up database"
@echo " make force-version # Force regenerate main.h from git"
.PHONY: all x86 arm64 test init-db clean clean-all install-deps install-cross-tools install-arm64-deps check-toolchain help
# Build fully static MUSL binaries using Docker
static-musl-x86_64:
@echo "Building fully static MUSL binary for x86_64..."
docker buildx build --platform linux/amd64 -f examples/deployment/static-builder.Dockerfile -t c-relay-static-builder-x86_64 --load .
docker run --rm -v $(PWD)/build:/output c-relay-static-builder-x86_64 sh -c "cp /c_relay_static_musl_x86_64 /output/"
@echo "Static binary created: build/c_relay_static_musl_x86_64"
static-musl-arm64:
@echo "Building fully static MUSL binary for ARM64..."
docker buildx build --platform linux/arm64 -f examples/deployment/static-builder.Dockerfile -t c-relay-static-builder-arm64 --load .
docker run --rm -v $(PWD)/build:/output c-relay-static-builder-arm64 sh -c "cp /c_relay_static_musl_x86_64 /output/c_relay_static_musl_arm64"
@echo "Static binary created: build/c_relay_static_musl_arm64"
static-musl: static-musl-x86_64 static-musl-arm64
@echo "Built static MUSL binaries for both architectures"
.PHONY: static-musl-x86_64 static-musl-arm64 static-musl
.PHONY: all x86 arm64 test init-db clean clean-all install-deps install-cross-tools install-arm64-deps check-toolchain help force-version

65
NOSTR_RELEASE.md Normal file
View File

@@ -0,0 +1,65 @@
# Relay
I am releasing the code for the nostr relay that I wrote use myself. The code is free for anyone to use in any way that they wish.
Some of the features of this relay are conventional, and some are unconventional.
## The conventional
This relay is written in C99 with a sqlite database.
It implements the following NIPs.
- [x] NIP-01: Basic protocol flow implementation
- [x] NIP-09: Event deletion
- [x] NIP-11: Relay information document
- [x] NIP-13: Proof of Work
- [x] NIP-15: End of Stored Events Notice
- [x] NIP-20: Command Results
- [x] NIP-33: Parameterized Replaceable Events
- [x] NIP-40: Expiration Timestamp
- [x] NIP-42: Authentication of clients to relays
- [x] NIP-45: Counting results
- [x] NIP-50: Keywords filter
- [x] NIP-70: Protected Events
## The unconventional
### The binaries are fully self contained.
It should just run in linux without having to worry about what you have on your system. I want to download and run. No docker. No dependency hell.
I'm not bothering with other operating systems.
### The relay is a full nostr citizen with it's own public and private keys.
For example, you can see my relay's profile (wss://relay.laantungir.net) running here:
[Primal link](https://primal.net/p/nprofile1qqswn2jsmm8lq8evas0v9vhqkdpn9nuujt90mtz60nqgsxndy66es4qjjnhr7)
What this means in practice is that when you start the relay, it generates keys for itself, and for it's administrator (You can specify these if you wish)
Now the program and the administrator can have verifed communication between the two, simply by using nostr events. For example, the administrator can send DMs to the relay, asking it's status, and changing it's configuration through any client that can handle nip17 DMs. The relay can also send notifications to the administrator about it's current status, or it can publish it's status on a regular schedule directly to NOSTR as kind-1 notes.
Also included is a more standard administrative web front end. This front end communicates to the relay using an extensive api, which again is nostr events signed by the administrator. You can completely control the relay through signed nostr events.
## Screenshots
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/main.png)
Main page with real time updates.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/config.png)
Set your configuration preferences.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/subscriptions.png)
View current subscriptions
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/white-blacklists.png)
Add npubs to white or black lists.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/sqlQuery.png)
Run sql queries on the database.
![](https://git.laantungir.net/laantungir/c-relay/raw/branch/master/screenshots/main-light.png)
Light mode.

481
README.md
View File

@@ -1,29 +1,474 @@
A nostr relay in C with sqlite on the back end.
# C-Nostr Relay
A high-performance Nostr relay implemented in C with SQLite backend, featuring nostr event-based management.
## Supported NIPs
<!--
NOTE FOR ASSISTANTS: When updating the NIPs checklist below, ONLY change [ ] to [x] to mark as complete.
Do NOT modify the formatting, add emojis, or change the text. Keep the simple format consistent.
-->
### [NIPs](https://github.com/nostr-protocol/nips)
- [x] NIP-01: Basic protocol flow implementation
- [x] NIP-09: Event deletion
- [ ] NIP-11: Relay information document
- [ ] NIP-12: Generic tag queries
- [ ] NIP-13: Proof of Work
- [x] NIP-11: Relay information document
- [x] NIP-13: Proof of Work
- [x] NIP-15: End of Stored Events Notice
- [ ] NIP-16: Event Treatment
- [x] NIP-20: Command Results
- [ ] NIP-22: Event `created_at` Limits
- [ ] NIP-25: Reactions
- [ ] NIP-26: Delegated Event Signing
- [ ] NIP-28: Public Chat
- [ ] NIP-33: Parameterized Replaceable Events
- [ ] NIP-40: Expiration Timestamp
- [ ] NIP-42: Authentication of clients to relays
- [ ] NIP-45: Counting results. [experimental](#count)
- [ ] NIP-50: Keywords filter. [experimental](#search)
- [ ] NIP-70: Protected Events
- [x] NIP-33: Parameterized Replaceable Events
- [x] NIP-40: Expiration Timestamp
- [x] NIP-42: Authentication of clients to relays
- [x] NIP-45: Counting results
- [x] NIP-50: Keywords filter
- [x] NIP-70: Protected Events
## Quick Start
Get your C-Relay up and running in minutes with a static binary (no dependencies required):
### 1. Download Static Binary
Download the latest static release from the [releases page](https://git.laantungir.net/laantungir/c-relay/releases):
```bash
# Static binary - works on all Linux distributions (no dependencies)
wget https://git.laantungir.net/laantungir/c-relay/releases/download/v0.6.0/c-relay-v0.6.0-linux-x86_64-static
chmod +x c-relay-v0.6.0-linux-x86_64-static
mv c-relay-v0.6.0-linux-x86_64-static c-relay
```
### 2. Start the Relay
Simply run the binary - no configuration files needed:
```bash
./c-relay
```
On first startup, you'll see:
- **Admin Private Key**: Save this securely! You'll need it for administration
- **Relay Public Key**: Your relay's identity on the Nostr network
- **Port Information**: Default is 8888, or the next available port
### 3. Access the Web Interface
Open your browser and navigate to:
```
http://localhost:8888/api/
```
The web interface provides:
- Real-time configuration management
- Database statistics dashboard
- Auth rules management
- Secure admin authentication with your Nostr identity
### 4. Test Your Relay
Test basic connectivity:
```bash
# Test WebSocket connection
curl -H "Accept: application/nostr+json" http://localhost:8888
# Test with a Nostr client
# Add ws://localhost:8888 to your client's relay list
```
### 5. Configure Your Relay (Optional)
Use the web interface or send admin commands to customize:
- Relay name and description
- Authentication rules (whitelist/blacklist)
- Connection limits
- Proof-of-work requirements
**That's it!** Your relay is now running with zero configuration required. The event-based configuration system means you can adjust all settings through the web interface or admin API without editing config files.
## Web Admin Interface
C-Relay includes a **built-in web-based administration interface** accessible at `http://localhost:8888/api/`. The interface provides:
- **Real-time Configuration Management**: View and edit all relay settings through a web UI
- **Database Statistics Dashboard**: Monitor event counts, storage usage, and performance metrics
- **Auth Rules Management**: Configure whitelist/blacklist rules for pubkeys
- **NIP-42 Authentication**: Secure access using your Nostr identity
- **Event-Based Updates**: All changes are applied as cryptographically signed Nostr events
The web interface serves embedded static files with no external dependencies and includes proper CORS headers for browser compatibility.
## Administrator API
C-Relay uses an innovative **event-based administration system** where all configuration and management commands are sent as signed Nostr events using the admin private key generated during first startup. All admin commands use **NIP-44 encrypted command arrays** for security and compatibility.
### Authentication
All admin commands require signing with the admin private key displayed during first-time startup. **Save this key securely** - it cannot be recovered and is needed for all administrative operations.
### Event Structure
All admin commands use the same unified event structure with NIP-44 encrypted content:
**Admin Command Event:**
```json
{
"id": "event_id",
"pubkey": "admin_public_key",
"created_at": 1234567890,
"kind": 23456,
"content": "AqHBUgcM7dXFYLQuDVzGwMST1G8jtWYyVvYxXhVGEu4nAb4LVw...",
"tags": [
["p", "relay_public_key"]
],
"sig": "event_signature"
}
```
The `content` field contains a NIP-44 encrypted JSON array representing the command.
**Admin Response Event:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "BpKCVhfN8eYtRmPqSvWxZnMkL2gHjUiOp3rTyEwQaS5dFg...",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
The `content` field contains a NIP-44 encrypted JSON response object.
### Admin Commands
All commands are sent as NIP-44 encrypted JSON arrays in the event content. The following table lists all available commands:
| Command Type | Command Format | Description |
|--------------|----------------|-------------|
| **Configuration Management** |
| `config_update` | `["config_update", [{"key": "auth_enabled", "value": "true", "data_type": "boolean", "category": "auth"}, {"key": "relay_description", "value": "My Relay", "data_type": "string", "category": "relay"}, ...]]` | Update relay configuration parameters (supports multiple updates) |
| `config_query` | `["config_query", "all"]` | Query all configuration parameters |
| **Auth Rules Management** |
| `auth_add_blacklist` | `["blacklist", "pubkey", "abc123..."]` | Add pubkey to blacklist |
| `auth_add_whitelist` | `["whitelist", "pubkey", "def456..."]` | Add pubkey to whitelist |
| `auth_delete_rule` | `["delete_auth_rule", "blacklist", "pubkey", "abc123..."]` | Delete specific auth rule |
| `auth_query_all` | `["auth_query", "all"]` | Query all auth rules |
| `auth_query_type` | `["auth_query", "whitelist"]` | Query specific rule type |
| `auth_query_pattern` | `["auth_query", "pattern", "abc123..."]` | Query specific pattern |
| **System Commands** |
| `system_clear_auth` | `["system_command", "clear_all_auth_rules"]` | Clear all auth rules |
| `system_status` | `["system_command", "system_status"]` | Get system status |
| `stats_query` | `["stats_query"]` | Get comprehensive database statistics |
| **Database Queries** |
| `sql_query` | `["sql_query", "SELECT * FROM events LIMIT 10"]` | Execute read-only SQL query against relay database |
### Available Configuration Keys
**Basic Relay Settings:**
- `relay_name`: Relay name (displayed in NIP-11)
- `relay_description`: Relay description text
- `relay_contact`: Contact information
- `relay_software`: Software URL
- `relay_version`: Software version
- `supported_nips`: Comma-separated list of supported NIP numbers (e.g., "1,2,4,9,11,12,13,15,16,20,22,33,40,42")
- `language_tags`: Comma-separated list of supported language tags (e.g., "en,es,fr" or "*" for all)
- `relay_countries`: Comma-separated list of supported country codes (e.g., "US,CA,MX" or "*" for all)
- `posting_policy`: Posting policy URL or text
- `payments_url`: Payment URL for premium features
- `max_connections`: Maximum concurrent connections
- `max_subscriptions_per_client`: Max subscriptions per client
- `max_event_tags`: Maximum tags per event
- `max_content_length`: Maximum event content length
**Authentication & Access Control:**
- `auth_enabled`: Enable whitelist/blacklist auth rules (`true`/`false`)
- `nip42_auth_required`: Enable NIP-42 cryptographic authentication (`true`/`false`)
- `nip42_auth_required_kinds`: Event kinds requiring NIP-42 auth (comma-separated)
- `nip42_challenge_timeout`: NIP-42 challenge expiration seconds
**Proof of Work & Validation:**
- `pow_min_difficulty`: Minimum proof-of-work difficulty
- `nip40_expiration_enabled`: Enable event expiration (`true`/`false`)
**Monitoring Settings:**
- `kind_24567_reporting_throttle_sec`: Minimum seconds between monitoring events (default: 5)
### Dynamic Configuration Updates
C-Relay supports **dynamic configuration updates** without requiring a restart for most settings. Configuration parameters are categorized as either **dynamic** (can be updated immediately) or **restart-required** (require relay restart to take effect).
**Dynamic Configuration Parameters (No Restart Required):**
- All relay information (NIP-11) settings: `relay_name`, `relay_description`, `relay_contact`, `relay_software`, `relay_version`, `supported_nips`, `language_tags`, `relay_countries`, `posting_policy`, `payments_url`
- Authentication settings: `auth_enabled`, `nip42_auth_required`, `nip42_auth_required_kinds`, `nip42_challenge_timeout`
- Subscription limits: `max_subscriptions_per_client`, `max_total_subscriptions`
- Event validation limits: `max_event_tags`, `max_content_length`, `max_message_length`
- Proof of Work settings: `pow_min_difficulty`, `pow_mode`
- Event expiration settings: `nip40_expiration_enabled`, `nip40_expiration_strict`, `nip40_expiration_filter`, `nip40_expiration_grace_period`
**Restart-Required Configuration Parameters:**
- Connection settings: `max_connections`, `relay_port`
- Database and core system settings
When updating configuration, the admin API response will indicate whether a restart is required for each parameter. Dynamic updates take effect immediately and are reflected in NIP-11 relay information documents without restart.
### Response Format
All admin commands return **signed EVENT responses** via WebSocket following standard Nostr protocol. Responses use JSON content with structured data.
#### Response Examples
**Success Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"config_update\", \"status\": \"success\", \"message\": \"Operation completed successfully\", \"timestamp\": 1234567890}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Error Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"config_update\", \"status\": \"error\", \"error\": \"invalid configuration value\", \"timestamp\": 1234567890}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Auth Rules Query Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"auth_rules_all\", \"total_results\": 2, \"timestamp\": 1234567890, \"data\": [{\"rule_type\": \"blacklist\", \"pattern_type\": \"pubkey\", \"pattern_value\": \"abc123...\", \"action\": \"allow\"}]}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Configuration Query Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"config_all\", \"total_results\": 27, \"timestamp\": 1234567890, \"data\": [{\"key\": \"auth_enabled\", \"value\": \"false\", \"data_type\": \"boolean\", \"category\": \"auth\", \"description\": \"Enable NIP-42 authentication\"}, {\"key\": \"relay_description\", \"value\": \"My Relay\", \"data_type\": \"string\", \"category\": \"relay\", \"description\": \"Relay description text\"}]}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Configuration Update Success Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"config_update\", \"total_results\": 2, \"timestamp\": 1234567890, \"status\": \"success\", \"data\": [{\"key\": \"auth_enabled\", \"value\": \"true\", \"status\": \"updated\"}, {\"key\": \"relay_description\", \"value\": \"My Updated Relay\", \"status\": \"updated\"}]}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Configuration Update Error Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"config_update\", \"status\": \"error\", \"error\": \"field validation failed: invalid port number '99999' (must be 1-65535)\", \"timestamp\": 1234567890}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Database Statistics Query Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"stats_query\", \"timestamp\": 1234567890, \"database_size_bytes\": 1048576, \"total_events\": 15432, \"database_created_at\": 1234567800, \"latest_event_at\": 1234567890, \"event_kinds\": [{\"kind\": 1, \"count\": 12000, \"percentage\": 77.8}, {\"kind\": 0, \"count\": 2500, \"percentage\": 16.2}], \"time_stats\": {\"total\": 15432, \"last_24h\": 234, \"last_7d\": 1456, \"last_30d\": 5432}, \"top_pubkeys\": [{\"pubkey\": \"abc123...\", \"event_count\": 1234, \"percentage\": 8.0}, {\"pubkey\": \"def456...\", \"event_count\": 987, \"percentage\": 6.4}]}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**SQL Query Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"sql_query\", \"request_id\": \"request_event_id\", \"timestamp\": 1234567890, \"query\": \"SELECT * FROM events LIMIT 10\", \"execution_time_ms\": 45, \"row_count\": 10, \"columns\": [\"id\", \"pubkey\", \"created_at\", \"kind\", \"content\"], \"rows\": [[\"abc123...\", \"def456...\", 1234567890, 1, \"Hello world\"], ...]}",
"tags": [
["p", "admin_public_key"],
["e", "request_event_id"]
],
"sig": "response_event_signature"
}]
```
### SQL Query Command
The `sql_query` command allows administrators to execute read-only SQL queries against the relay database. This provides powerful analytics and debugging capabilities through the admin API.
**Request/Response Correlation:**
- Each response includes the request event ID in both the `tags` array (`["e", "request_event_id"]`) and the decrypted content (`"request_id": "request_event_id"`)
- This allows proper correlation when multiple queries are submitted concurrently
- Frontend can track pending queries and match responses to requests
**Security Features:**
- Only SELECT statements allowed (INSERT, UPDATE, DELETE, DROP, etc. are blocked)
- Query timeout: 5 seconds (configurable)
- Result row limit: 1000 rows (configurable)
- All queries logged with execution time
**Available Tables and Views:**
- `events` - All Nostr events
- `config` - Configuration parameters
- `auth_rules` - Authentication rules
- `subscription_events` - Subscription lifecycle log
- `event_broadcasts` - Event broadcast log
- `recent_events` - Last 1000 events (view)
- `event_stats` - Event statistics by type (view)
- `subscription_analytics` - Subscription metrics (view)
- `active_subscriptions_log` - Currently active subscriptions (view)
- `event_kinds_view` - Event distribution by kind (view)
- `top_pubkeys_view` - Top 10 pubkeys by event count (view)
- `time_stats_view` - Time-based statistics (view)
**Example Queries:**
```sql
-- Recent events
SELECT id, pubkey, created_at, kind FROM events ORDER BY created_at DESC LIMIT 20
-- Event distribution by kind
SELECT * FROM event_kinds_view ORDER BY count DESC
-- Active subscriptions
SELECT * FROM active_subscriptions_log ORDER BY created_at DESC
-- Database statistics
SELECT
(SELECT COUNT(*) FROM events) as total_events,
(SELECT COUNT(*) FROM subscription_events) as total_subscriptions
```
## Real-time Monitoring System
C-Relay includes a subscription-based monitoring system that broadcasts real-time relay statistics using ephemeral events (kind 24567).
### Activation
The monitoring system activates automatically when clients subscribe to kind 24567 events:
```json
["REQ", "monitoring-sub", {"kinds": [24567]}]
```
For specific monitoring types, use d-tag filters:
```json
["REQ", "event-kinds-sub", {"kinds": [24567], "#d": ["event_kinds"]}]
["REQ", "time-stats-sub", {"kinds": [24567], "#d": ["time_stats"]}]
["REQ", "top-pubkeys-sub", {"kinds": [24567], "#d": ["top_pubkeys"]}]
```
When no subscriptions exist, monitoring is dormant to conserve resources.
### Monitoring Event Types
| Type | d Tag | Description |
|------|-------|-------------|
| Event Distribution | `event_kinds` | Event count by kind with percentages |
| Time Statistics | `time_stats` | Events in last 24h, 7d, 30d |
| Top Publishers | `top_pubkeys` | Top 10 pubkeys by event count |
| Active Subscriptions | `active_subscriptions` | Current subscription details (admin only) |
| Subscription Details | `subscription_details` | Detailed subscription info (admin only) |
| CPU Metrics | `cpu_metrics` | Process CPU and memory usage |
### Event Structure
```json
{
"kind": 24567,
"pubkey": "<relay_pubkey>",
"created_at": <timestamp>,
"content": "{\"data_type\":\"event_kinds\",\"timestamp\":1234567890,...}",
"tags": [
["d", "event_kinds"]
]
}
```
### Configuration
- `kind_24567_reporting_throttle_sec`: Minimum seconds between monitoring events (default: 5)
### Web Dashboard Integration
The built-in web dashboard (`/api/`) automatically subscribes to monitoring events and displays real-time statistics.
### Performance Considerations
- Monitoring events are ephemeral (not stored in database)
- Throttling prevents excessive event generation
- Automatic activation/deactivation based on subscriptions
- Minimal overhead when no clients are monitoring
## Direct Messaging Admin System
In addition to the above admin API, c-relay allows the administrator to direct message the relay to get information or control some settings. As long as the administrator is signed in with any nostr client that allows sending nip-17 direct messages (DMs), they can control the relay.
The is possible because the relay is a full nostr citizen with it's own private and public key, and it knows the administrator's public key.
**Available DM commands**
The intent is not to be strict in the formatting of the DM. So for example if the relay receives any DM from the administrator with the words "stats" or "statistics" in it, it will respond to the administrator with a reply DM with the current relay statistics.
- `stats`|`statistics`: Relay statistics
- `config`|`configuration`: Relay configuration

58
api/embedded.html Normal file
View File

@@ -0,0 +1,58 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Embedded NOSTR_LOGIN_LITE</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 40px;
background: white;
display: flex;
justify-content: center;
align-items: center;
min-height: 100vh;
}
.container {
max-width: 400px;
width: 100%;
}
#login-container {
/* No styling - let embedded modal blend seamlessly */
}
</style>
</head>
<body>
<div class="container">
<div id="login-container"></div>
</div>
<script src="../lite/nostr.bundle.js"></script>
<script src="../lite/nostr-lite.js"></script>
<script>
document.addEventListener('DOMContentLoaded', async () => {
await window.NOSTR_LOGIN_LITE.init({
theme:'default',
methods: {
extension: true,
local: true,
seedphrase: true,
readonly: true,
connect: true,
remote: true,
otp: true
}
});
window.NOSTR_LOGIN_LITE.embed('#login-container', {
seamless: true
});
});
</script>
</body>
</html>

1310
api/index.css Normal file

File diff suppressed because it is too large Load Diff

468
api/index.html Normal file
View File

@@ -0,0 +1,468 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>C-Relay Admin</title>
<link rel="stylesheet" href="/api/index.css">
</head>
<body>
<!-- Side Navigation Menu -->
<nav class="side-nav" id="side-nav">
<ul class="nav-menu">
<li><button class="nav-item" data-page="statistics">Statistics</button></li>
<li><button class="nav-item" data-page="subscriptions">Subscriptions</button></li>
<li><button class="nav-item" data-page="configuration">Configuration</button></li>
<li><button class="nav-item" data-page="authorization">Authorization</button></li>
<li><button class="nav-item" data-page="relay-events">Relay Events</button></li>
<li><button class="nav-item" data-page="dm">DM</button></li>
<li><button class="nav-item" data-page="database">Database Query</button></li>
</ul>
<div class="nav-footer">
<button class="nav-footer-btn" id="nav-dark-mode-btn">DARK MODE</button>
<button class="nav-footer-btn" id="nav-logout-btn">LOGOUT</button>
</div>
</nav>
<!-- Side Navigation Overlay -->
<div class="side-nav-overlay" id="side-nav-overlay"></div>
<!-- Header with title and profile display -->
<div class="section">
<div class="header-content">
<div class="header-title clickable" id="header-title">
<span class="relay-letter" data-letter="R">R</span>
<span class="relay-letter" data-letter="E">E</span>
<span class="relay-letter" data-letter="L">L</span>
<span class="relay-letter" data-letter="A">A</span>
<span class="relay-letter" data-letter="Y">Y</span>
</div>
<div class="relay-info">
<div id="relay-name" class="relay-name">C-Relay</div>
<div id="relay-description" class="relay-description">Loading...</div>
<div id="relay-pubkey-container" class="relay-pubkey-container">
<div id="relay-pubkey" class="relay-pubkey">Loading...</div>
</div>
</div>
<div class="profile-area" id="profile-area" style="display: none;">
<div class="admin-label">admin</div>
<div class="profile-container">
<img id="header-user-image" class="header-user-image" alt="Profile" style="display: none;">
<span id="header-user-name" class="header-user-name">Loading...</span>
</div>
<!-- Logout dropdown -->
<!-- Dropdown menu removed - buttons moved to sidebar -->
</div>
</div>
</div>
<!-- Login Modal Overlay -->
<div id="login-modal" class="login-modal-overlay" style="display: none;">
<div class="login-modal-content">
<div id="login-modal-container"></div>
</div>
</div>
<!-- DATABASE STATISTICS Section -->
<!-- Subscribe to kind 24567 events to receive real-time monitoring data -->
<div class="section flex-section" id="databaseStatisticsSection" style="display: none;">
<div class="section-header">
DATABASE STATISTICS
</div>
<!-- Event Rate Graph Container -->
<div id="event-rate-chart"></div>
<!-- Database Overview Table -->
<div class="input-group">
<div class="config-table-container">
<table class="config-table" id="stats-overview-table">
<thead>
<tr>
<th>Metric</th>
<th>Value</th>
</tr>
</thead>
<tbody id="stats-overview-table-body">
<tr>
<td>Database Size</td>
<td id="db-size">-</td>
</tr>
<tr>
<td>Total Events</td>
<td id="total-events">-</td>
</tr>
<tr>
<td>Process ID</td>
<td id="process-id">-</td>
</tr>
<tr>
<td>Active Subscriptions</td>
<td id="active-subscriptions">-</td>
</tr>
<tr>
<td>Memory Usage</td>
<td id="memory-usage">-</td>
</tr>
<tr>
<td>CPU Core</td>
<td id="cpu-core">-</td>
</tr>
<tr>
<td>CPU Usage</td>
<td id="cpu-usage">-</td>
</tr>
<tr>
<td>Oldest Event</td>
<td id="oldest-event">-</td>
</tr>
<tr>
<td>Newest Event</td>
<td id="newest-event">-</td>
</tr>
</tbody>
</table>
</div>
</div>
<!-- Event Kind Distribution Table -->
<div class="input-group">
<label>Event Kind Distribution:</label>
<div class="config-table-container">
<table class="config-table" id="stats-kinds-table">
<thead>
<tr>
<th>Event Kind</th>
<th>Count</th>
<th>Percentage</th>
</tr>
</thead>
<tbody id="stats-kinds-table-body">
<tr>
<td colspan="3" style="text-align: center; font-style: italic;">No data loaded</td>
</tr>
</tbody>
</table>
</div>
</div>
<!-- Time-based Statistics Table -->
<div class="input-group">
<label>Time-based Statistics:</label>
<div class="config-table-container">
<table class="config-table" id="stats-time-table">
<thead>
<tr>
<th>Period</th>
<th>Events</th>
</tr>
</thead>
<tbody id="stats-time-table-body">
<tr>
<td>Last 24 Hours</td>
<td id="events-24h">-</td>
</tr>
<tr>
<td>Last 7 Days</td>
<td id="events-7d">-</td>
</tr>
<tr>
<td>Last 30 Days</td>
<td id="events-30d">-</td>
</tr>
</tbody>
</table>
</div>
</div>
<!-- Top Pubkeys Table -->
<div class="input-group">
<label>Top Pubkeys by Event Count:</label>
<div class="config-table-container">
<table class="config-table" id="stats-pubkeys-table">
<thead>
<tr>
<th>Rank</th>
<th>Pubkey</th>
<th>Event Count</th>
<th>Percentage</th>
</tr>
</thead>
<tbody id="stats-pubkeys-table-body">
<tr>
<td colspan="4" style="text-align: center; font-style: italic;">No data loaded</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
<!-- SUBSCRIPTION DETAILS Section (Admin Only) -->
<div class="section flex-section" id="subscriptionDetailsSection" style="display: none;">
<div class="section-header">
ACTIVE SUBSCRIPTION DETAILS
</div>
<div class="input-group">
<div class="config-table-container">
<table class="config-table" id="subscription-details-table">
<tbody id="subscription-details-table-body">
<tr>
<td colspan="4" style="text-align: center; font-style: italic;">No subscriptions active</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
<!-- Testing Section -->
<div id="div_config" class="section flex-section" style="display: none;">
<div class="section-header">
RELAY CONFIGURATION
</div>
<div id="config-display" class="hidden">
<div class="config-table-container">
<table class="config-table" id="config-table">
<thead>
<tr>
<th>Parameter</th>
<th>Value</th>
<th>Actions</th>
</tr>
</thead>
<tbody id="config-table-body">
</tbody>
</table>
</div>
<div class="inline-buttons">
<button type="button" id="fetch-config-btn">REFRESH</button>
</div>
</div>
</div>
<!-- Auth Rules Management - Moved after configuration -->
<div class="section flex-section" id="authRulesSection" style="display: none;">
<div class="section-header">
AUTH RULES MANAGEMENT
</div>
<!-- Auth Rules Table -->
<div id="authRulesTableContainer" style="display: none;">
<table class="config-table" id="authRulesTable">
<thead>
<tr>
<th>Rule Type</th>
<th>Pattern Type</th>
<th>Pattern Value</th>
<th>Status</th>
<th>Actions</th>
</tr>
</thead>
<tbody id="authRulesTableBody">
</tbody>
</table>
</div>
<!-- Simplified Auth Rule Input Section -->
<div id="authRuleInputSections" style="display: block;">
<!-- Combined Pubkey Auth Rule Section -->
<div class="input-group">
<label for="authRulePubkey">Pubkey (nsec or hex):</label>
<input type="text" id="authRulePubkey" placeholder="nsec1... or 64-character hex pubkey">
</div>
<div id="whitelistWarning" class="warning-box" style="display: none;">
<strong>⚠️ WARNING:</strong> Adding whitelist rules changes relay behavior to whitelist-only
mode.
Only whitelisted users will be able to interact with the relay.
</div>
<div class="inline-buttons">
<button type="button" id="addWhitelistBtn" onclick="addWhitelistRule()">ADD TO
WHITELIST</button>
<button type="button" id="addBlacklistBtn" onclick="addBlacklistRule()">ADD TO
BLACKLIST</button>
<button type="button" id="refreshAuthRulesBtn">REFRESH</button>
</div>
</div>
</div>
<!-- NIP-17 DIRECT MESSAGES Section -->
<div class="section" id="nip17DMSection" style="display: none;">
<div class="section-header">
<h2>NIP-17 DIRECT MESSAGES</h2>
</div>
<!-- Outbox -->
<div>
<label for="dm-outbox">Send Message to Relay:</label>
<textarea id="dm-outbox" rows="4" placeholder="Enter your message to send to the relay..."></textarea>
</div>
<!-- Send Button -->
<div class="input-group">
<button type="button" id="send-dm-btn">SEND MESSAGE</button>
</div>
<!-- Inbox -->
<div class="input-group">
<label>Received Messages from Relay:</label>
<div id="dm-inbox" class="log-panel" style="height: 200px;">
<div class="log-entry">No messages received yet.</div>
</div>
</div>
</div>
<!-- RELAY EVENTS Section -->
<div class="section" id="relayEventsSection" style="display: none;">
<div class="section-header">
RELAY EVENTS MANAGEMENT
</div>
<!-- Kind 0: User Metadata -->
<div class="input-group">
<h3>Kind 0: User Metadata</h3>
<div class="form-group">
<label for="kind0-name">Name:</label>
<input type="text" id="kind0-name" placeholder="Relay Name">
</div>
<div class="form-group">
<label for="kind0-about">About:</label>
<textarea id="kind0-about" rows="3" placeholder="Relay Description"></textarea>
</div>
<div class="form-group">
<label for="kind0-picture">Picture URL:</label>
<input type="url" id="kind0-picture" placeholder="https://example.com/logo.png">
</div>
<div class="form-group">
<label for="kind0-banner">Banner URL:</label>
<input type="url" id="kind0-banner" placeholder="https://example.com/banner.png">
</div>
<div class="form-group">
<label for="kind0-nip05">NIP-05:</label>
<input type="text" id="kind0-nip05" placeholder="relay@example.com">
</div>
<div class="form-group">
<label for="kind0-website">Website:</label>
<input type="url" id="kind0-website" placeholder="https://example.com">
</div>
<div class="inline-buttons">
<button type="button" id="submit-kind0-btn">UPDATE METADATA</button>
</div>
<div id="kind0-status" class="status-message"></div>
</div>
<!-- Kind 10050: DM Relay List -->
<div class="input-group">
<h3>Kind 10050: DM Relay List</h3>
<div class="form-group">
<label for="kind10050-relays">Relay URLs (one per line):</label>
<textarea id="kind10050-relays" rows="4" placeholder="wss://relay1.com&#10;wss://relay2.com"></textarea>
</div>
<div class="inline-buttons">
<button type="button" id="submit-kind10050-btn">UPDATE DM RELAYS</button>
</div>
<div id="kind10050-status" class="status-message"></div>
</div>
<!-- Kind 10002: Relay List -->
<div class="input-group">
<h3>Kind 10002: Relay List</h3>
<div id="kind10002-relay-entries">
<!-- Dynamic relay entries will be added here -->
</div>
<div class="inline-buttons">
<button type="button" id="add-relay-entry-btn">ADD RELAY</button>
<button type="button" id="submit-kind10002-btn">UPDATE RELAYS</button>
</div>
<div id="kind10002-status" class="status-message"></div>
</div>
</div>
<!-- SQL QUERY Section -->
<div class="section" id="sqlQuerySection" style="display: none;">
<div class="section-header">
<h2>SQL QUERY CONSOLE</h2>
</div>
<!-- Query Selector -->
<div class="input-group">
<label for="query-dropdown">Quick Queries & History:</label>
<select id="query-dropdown" onchange="loadSelectedQuery()">
<option value="">-- Select a query --</option>
<optgroup label="Common Queries">
<option value="recent_events">Recent Events</option>
<option value="event_stats">Event Statistics</option>
<option value="subscriptions">Active Subscriptions</option>
<option value="top_pubkeys">Top Pubkeys</option>
<option value="event_kinds">Event Kinds Distribution</option>
<option value="time_stats">Time-based Statistics</option>
</optgroup>
<optgroup label="Query History" id="history-group">
<!-- Dynamically populated from localStorage -->
</optgroup>
</select>
</div>
<!-- Query Editor -->
<div class="input-group">
<label for="sql-input">SQL Query:</label>
<textarea id="sql-input" rows="5" placeholder="SELECT * FROM events LIMIT 10"></textarea>
</div>
<!-- Query Actions -->
<div class="input-group">
<div class="inline-buttons">
<button type="button" id="execute-sql-btn">EXECUTE QUERY</button>
<button type="button" id="clear-sql-btn">CLEAR</button>
<button type="button" id="clear-history-btn">CLEAR HISTORY</button>
</div>
</div>
<!-- Query Results -->
<div class="input-group">
<label>Query Results:</label>
<div id="query-info" class="info-box"></div>
<div id="query-table" class="config-table-container"></div>
</div>
</div>
<!-- Load the official nostr-tools bundle first -->
<!-- <script src="https://laantungir.net/nostr-login-lite/nostr.bundle.js"></script> -->
<script src="/api/nostr.bundle.js"></script>
<!-- Load NOSTR_LOGIN_LITE main library -->
<!-- <script src="https://laantungir.net/nostr-login-lite/nostr-lite.js"></script> -->
<script src="/api/nostr-lite.js"></script>
<!-- Load text_graph library -->
<script src="/api/text_graph.js"></script>
<script src="/api/index.js"></script>
</body>
</html>

5758
api/index.js Normal file

File diff suppressed because it is too large Load Diff

4282
api/nostr-lite.js Normal file

File diff suppressed because it is too large Load Diff

11534
api/nostr.bundle.js Normal file

File diff suppressed because it is too large Load Diff

463
api/text_graph.js Normal file
View File

@@ -0,0 +1,463 @@
/**
* ASCIIBarChart - A dynamic ASCII-based vertical bar chart renderer
*
* Creates real-time animated bar charts using monospaced characters (X)
* with automatic scaling, labels, and responsive font sizing.
*/
class ASCIIBarChart {
/**
* Create a new ASCII bar chart
* @param {string} containerId - The ID of the HTML element to render the chart in
* @param {Object} options - Configuration options
* @param {number} [options.maxHeight=20] - Maximum height of the chart in rows
* @param {number} [options.maxDataPoints=30] - Maximum number of data columns before scrolling
* @param {string} [options.title=''] - Chart title (displayed centered at top)
* @param {string} [options.xAxisLabel=''] - X-axis label (displayed centered at bottom)
* @param {string} [options.yAxisLabel=''] - Y-axis label (displayed vertically on left)
* @param {boolean} [options.autoFitWidth=true] - Automatically adjust font size to fit container width
* @param {boolean} [options.useBinMode=false] - Enable time bin mode for data aggregation
* @param {number} [options.binDuration=10000] - Duration of each time bin in milliseconds (10 seconds default)
* @param {string} [options.xAxisLabelFormat='elapsed'] - X-axis label format: 'elapsed', 'bins', 'timestamps', 'ranges'
* @param {boolean} [options.debug=false] - Enable debug logging
*/
constructor(containerId, options = {}) {
this.container = document.getElementById(containerId);
this.data = [];
this.maxHeight = options.maxHeight || 20;
this.maxDataPoints = options.maxDataPoints || 30;
this.totalDataPoints = 0; // Track total number of data points added
this.title = options.title || '';
this.xAxisLabel = options.xAxisLabel || '';
this.yAxisLabel = options.yAxisLabel || '';
this.autoFitWidth = options.autoFitWidth !== false; // Default to true
this.debug = options.debug || false; // Debug logging option
// Time bin configuration
this.useBinMode = options.useBinMode !== false; // Default to true
this.binDuration = options.binDuration || 4000; // 4 seconds default
this.xAxisLabelFormat = options.xAxisLabelFormat || 'elapsed';
// Time bin data structures
this.bins = [];
this.currentBinIndex = -1;
this.binStartTime = null;
this.binCheckInterval = null;
this.chartStartTime = Date.now();
// Set up resize observer if auto-fit is enabled
if (this.autoFitWidth) {
this.resizeObserver = new ResizeObserver(() => {
this.adjustFontSize();
});
this.resizeObserver.observe(this.container);
}
// Initialize first bin if bin mode is enabled
if (this.useBinMode) {
this.initializeBins();
}
}
/**
* Add a new data point to the chart
* @param {number} value - The numeric value to add
*/
addValue(value) {
// Time bin mode: add value to current active bin count
this.checkBinRotation(); // Ensure we have an active bin
this.bins[this.currentBinIndex].count += value; // Changed from ++ to += value
this.totalDataPoints++;
this.render();
this.updateInfo();
}
/**
* Clear all data from the chart
*/
clear() {
this.data = [];
this.totalDataPoints = 0;
if (this.useBinMode) {
this.bins = [];
this.currentBinIndex = -1;
this.binStartTime = null;
this.initializeBins();
}
this.render();
this.updateInfo();
}
/**
* Calculate the width of the chart in characters
* @returns {number} The chart width in characters
* @private
*/
getChartWidth() {
let dataLength = this.maxDataPoints; // Always use maxDataPoints for consistent width
if (dataLength === 0) return 50; // Default width for empty chart
const yAxisPadding = this.yAxisLabel ? 2 : 0;
const yAxisNumbers = 3; // Width of Y-axis numbers
const separator = 1; // The '|' character
// const dataWidth = dataLength * 2; // Each column is 2 characters wide // TEMP: commented for no-space test
const dataWidth = dataLength; // Each column is 1 character wide // TEMP: adjusted for no-space columns
const padding = 1; // Extra padding
const totalWidth = yAxisPadding + yAxisNumbers + separator + dataWidth + padding;
// Only log when width changes
if (this.debug && this.lastChartWidth !== totalWidth) {
console.log('getChartWidth changed:', { dataLength, totalWidth, previous: this.lastChartWidth });
this.lastChartWidth = totalWidth;
}
return totalWidth;
}
/**
* Adjust font size to fit container width
* @private
*/
adjustFontSize() {
if (!this.autoFitWidth) return;
const containerWidth = this.container.clientWidth;
const chartWidth = this.getChartWidth();
if (chartWidth === 0) return;
// Calculate optimal font size
// For monospace fonts, character width is approximately 0.6 * font size
// Use a slightly smaller ratio to fit more content
const charWidthRatio = 0.7;
const padding = 30; // Reduce padding to fit more content
const availableWidth = containerWidth - padding;
const optimalFontSize = Math.floor((availableWidth / chartWidth) / charWidthRatio);
// Set reasonable bounds (min 4px, max 20px)
const fontSize = Math.max(4, Math.min(20, optimalFontSize));
// Only log when font size changes
if (this.debug && this.lastFontSize !== fontSize) {
console.log('fontSize changed:', { containerWidth, chartWidth, fontSize, previous: this.lastFontSize });
this.lastFontSize = fontSize;
}
this.container.style.fontSize = fontSize + 'px';
this.container.style.lineHeight = '1.0';
}
/**
* Render the chart to the container
* @private
*/
render() {
let dataToRender = [];
let maxValue = 0;
let minValue = 0;
let valueRange = 0;
if (this.useBinMode) {
// Bin mode: render bin counts
if (this.bins.length === 0) {
this.container.textContent = 'No data yet. Click Start to begin.';
return;
}
// Always create a fixed-length array filled with 0s, then overlay actual bin data
dataToRender = new Array(this.maxDataPoints).fill(0);
// Overlay actual bin data (most recent bins, reversed for left-to-right display)
const startIndex = Math.max(0, this.bins.length - this.maxDataPoints);
const recentBins = this.bins.slice(startIndex);
// Reverse the bins so most recent is on the left, and overlay onto the fixed array
recentBins.reverse().forEach((bin, index) => {
if (index < this.maxDataPoints) {
dataToRender[index] = bin.count;
}
});
if (this.debug) {
console.log('render() dataToRender:', dataToRender, 'bins length:', this.bins.length);
}
maxValue = Math.max(...dataToRender);
minValue = Math.min(...dataToRender);
valueRange = maxValue - minValue;
} else {
// Legacy mode: render individual values
if (this.data.length === 0) {
this.container.textContent = 'No data yet. Click Start to begin.';
return;
}
dataToRender = this.data;
maxValue = Math.max(...this.data);
minValue = Math.min(...this.data);
valueRange = maxValue - minValue;
}
let output = '';
const scale = this.maxHeight;
// Calculate scaling factor: each X represents at least 1 count
const maxCount = Math.max(...dataToRender);
const scaleFactor = Math.max(1, Math.ceil(maxCount / scale)); // 1 X = scaleFactor counts
const scaledMax = Math.ceil(maxCount / scaleFactor) * scaleFactor;
// Calculate Y-axis label width (for vertical text)
const yLabelWidth = this.yAxisLabel ? 2 : 0;
const yAxisPadding = this.yAxisLabel ? ' ' : '';
// Add title if provided (centered)
if (this.title) {
// const chartWidth = 4 + this.maxDataPoints * 2; // Y-axis numbers + data columns // TEMP: commented for no-space test
const chartWidth = 4 + this.maxDataPoints; // Y-axis numbers + data columns // TEMP: adjusted for no-space columns
const titlePadding = Math.floor((chartWidth - this.title.length) / 2);
output += yAxisPadding + ' '.repeat(Math.max(0, titlePadding)) + this.title + '\n\n';
}
// Draw from top to bottom
for (let row = scale; row > 0; row--) {
let line = '';
// Add vertical Y-axis label character
if (this.yAxisLabel) {
const L = this.yAxisLabel.length;
const startRow = Math.floor((scale - L) / 2) + 1;
const relativeRow = scale - row + 1; // 1 at top, scale at bottom
if (relativeRow >= startRow && relativeRow < startRow + L) {
const labelIndex = relativeRow - startRow;
line += this.yAxisLabel[labelIndex] + ' ';
} else {
line += ' ';
}
}
// Calculate the actual count value this row represents (1 at bottom, increasing upward)
const rowCount = (row - 1) * scaleFactor + 1;
// Add Y-axis label (show actual count values)
line += String(rowCount).padStart(3, ' ') + ' |';
// Draw each column
for (let i = 0; i < dataToRender.length; i++) {
const count = dataToRender[i];
const scaledHeight = Math.ceil(count / scaleFactor);
if (scaledHeight >= row) {
// line += ' X'; // TEMP: commented out space between columns
line += 'X'; // TEMP: no space between columns
} else {
// line += ' '; // TEMP: commented out space between columns
line += ' '; // TEMP: single space for empty columns
}
}
output += line + '\n';
}
// Draw X-axis
// output += yAxisPadding + ' +' + '-'.repeat(this.maxDataPoints * 2) + '\n'; // TEMP: commented out for no-space test
output += yAxisPadding + ' +' + '-'.repeat(this.maxDataPoints) + '\n'; // TEMP: back to original length
// Draw X-axis labels based on mode and format
let xAxisLabels = yAxisPadding + ' '; // Initial padding to align with X-axis
// Determine label interval (every 5 columns)
const labelInterval = 5;
// Generate all labels first and store in array
let labels = [];
for (let i = 0; i < this.maxDataPoints; i++) {
if (i % labelInterval === 0) {
let label = '';
if (this.useBinMode) {
// For bin mode, show labels for all possible positions
// i=0 is leftmost (most recent), i=maxDataPoints-1 is rightmost (oldest)
const elapsedSec = (i * this.binDuration) / 1000;
// Format with appropriate precision for sub-second bins
if (this.binDuration < 1000) {
// Show decimal seconds for sub-second bins
label = elapsedSec.toFixed(1) + 's';
} else {
// Show whole seconds for 1+ second bins
label = String(Math.round(elapsedSec)) + 's';
}
} else {
// For legacy mode, show data point numbers
const startIndex = Math.max(1, this.totalDataPoints - this.maxDataPoints + 1);
label = String(startIndex + i);
}
labels.push(label);
}
}
// Build the label string with calculated spacing
for (let i = 0; i < labels.length; i++) {
const label = labels[i];
xAxisLabels += label;
// Add spacing: labelInterval - label.length (except for last label)
if (i < labels.length - 1) {
const spacing = labelInterval - label.length;
xAxisLabels += ' '.repeat(spacing);
}
}
// Ensure the label line extends to match the X-axis dash line length
// The dash line is this.maxDataPoints characters long, starting after " +"
const dashLineLength = this.maxDataPoints;
const minLabelLineLength = yAxisPadding.length + 4 + dashLineLength; // 4 for " "
if (xAxisLabels.length < minLabelLineLength) {
xAxisLabels += ' '.repeat(minLabelLineLength - xAxisLabels.length);
}
output += xAxisLabels + '\n';
// Add X-axis label if provided
if (this.xAxisLabel) {
// const labelPadding = Math.floor((this.maxDataPoints * 2 - this.xAxisLabel.length) / 2); // TEMP: commented for no-space test
const labelPadding = Math.floor((this.maxDataPoints - this.xAxisLabel.length) / 2); // TEMP: adjusted for no-space columns
output += '\n' + yAxisPadding + ' ' + ' '.repeat(Math.max(0, labelPadding)) + this.xAxisLabel + '\n';
}
this.container.textContent = output;
// Adjust font size to fit width (only once at initialization)
if (this.autoFitWidth) {
this.adjustFontSize();
}
// Update the external info display
if (this.useBinMode) {
const binCounts = this.bins.map(bin => bin.count);
const scaleFactor = Math.max(1, Math.ceil(maxValue / scale));
document.getElementById('values').textContent = `[${dataToRender.join(', ')}]`;
document.getElementById('max-value').textContent = maxValue;
document.getElementById('scale').textContent = `Min: ${minValue}, Max: ${maxValue}, 1X=${scaleFactor} counts`;
} else {
document.getElementById('values').textContent = `[${this.data.join(', ')}]`;
document.getElementById('max-value').textContent = maxValue;
document.getElementById('scale').textContent = `Min: ${minValue}, Max: ${maxValue}, Height: ${scale}`;
}
}
/**
* Update the info display
* @private
*/
updateInfo() {
if (this.useBinMode) {
const totalCount = this.bins.reduce((sum, bin) => sum + bin.count, 0);
document.getElementById('count').textContent = totalCount;
} else {
document.getElementById('count').textContent = this.data.length;
}
}
/**
* Initialize the bin system
* @private
*/
initializeBins() {
this.bins = [];
this.currentBinIndex = -1;
this.binStartTime = null;
this.chartStartTime = Date.now();
// Create first bin
this.rotateBin();
// Set up automatic bin rotation check
this.binCheckInterval = setInterval(() => {
this.checkBinRotation();
}, 100); // Check every 100ms for responsiveness
}
/**
* Check if current bin should rotate and create new bin if needed
* @private
*/
checkBinRotation() {
if (!this.useBinMode || !this.binStartTime) return;
const now = Date.now();
if ((now - this.binStartTime) >= this.binDuration) {
this.rotateBin();
}
}
/**
* Rotate to a new bin, finalizing the current one
*/
rotateBin() {
// Finalize current bin if it exists
if (this.currentBinIndex >= 0) {
this.bins[this.currentBinIndex].isActive = false;
}
// Create new bin
const newBin = {
startTime: Date.now(),
count: 0,
isActive: true
};
this.bins.push(newBin);
this.currentBinIndex = this.bins.length - 1;
this.binStartTime = newBin.startTime;
// Keep only the most recent bins
if (this.bins.length > this.maxDataPoints) {
this.bins.shift();
this.currentBinIndex--;
}
// Ensure currentBinIndex points to the last bin (the active one)
this.currentBinIndex = this.bins.length - 1;
// Force a render to update the display immediately
this.render();
this.updateInfo();
}
/**
* Format X-axis label for a bin based on the configured format
* @param {number} binIndex - Index of the bin
* @returns {string} Formatted label
* @private
*/
formatBinLabel(binIndex) {
const bin = this.bins[binIndex];
if (!bin) return ' ';
switch (this.xAxisLabelFormat) {
case 'bins':
return String(binIndex + 1).padStart(2, ' ');
case 'timestamps':
const time = new Date(bin.startTime);
return time.toLocaleTimeString('en-US', {
hour12: false,
hour: '2-digit',
minute: '2-digit',
second: '2-digit'
}).replace(/:/g, '');
case 'ranges':
const startSec = Math.floor((bin.startTime - this.chartStartTime) / 1000);
const endSec = startSec + Math.floor(this.binDuration / 1000);
return `${startSec}-${endSec}`;
case 'elapsed':
default:
// For elapsed time, always show time relative to the first bin (index 0)
// This keeps the leftmost label as 0s and increases to the right
const firstBinTime = this.bins[0] ? this.bins[0].startTime : this.chartStartTime;
const elapsedSec = Math.floor((bin.startTime - firstBinTime) / 1000);
return String(elapsedSec).padStart(2, ' ') + 's';
}
}
}

View File

@@ -1,391 +0,0 @@
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
print_status() { echo -e "${BLUE}[INFO]${NC} $1"; }
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Global variables
COMMIT_MESSAGE=""
RELEASE_MODE=false
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-r|--release)
RELEASE_MODE=true
shift
;;
-h|--help)
show_usage
exit 0
;;
*)
# First non-flag argument is the commit message
if [[ -z "$COMMIT_MESSAGE" ]]; then
COMMIT_MESSAGE="$1"
fi
shift
;;
esac
done
show_usage() {
echo "C-Relay Build and Push Script"
echo ""
echo "Usage:"
echo " $0 \"commit message\" - Default: compile, increment patch, commit & push"
echo " $0 -r \"commit message\" - Release: compile x86+arm64, increment minor, create release"
echo ""
echo "Examples:"
echo " $0 \"Fixed event validation bug\""
echo " $0 --release \"Major release with new features\""
echo ""
echo "Default Mode (patch increment):"
echo " - Compile C-Relay"
echo " - Increment patch version (v1.2.3 → v1.2.4)"
echo " - Git add, commit with message, and push"
echo ""
echo "Release Mode (-r flag):"
echo " - Compile C-Relay for x86_64 and arm64"
echo " - Increment minor version, zero patch (v1.2.3 → v1.3.0)"
echo " - Git add, commit, push, and create Gitea release"
echo ""
echo "Requirements for Release Mode:"
echo " - For ARM64 builds: make install-arm64-deps (optional - will build x86_64 only if missing)"
echo " - Gitea token in ~/.gitea_token for release uploads"
}
# Validate inputs
if [[ -z "$COMMIT_MESSAGE" ]]; then
print_error "Commit message is required"
echo ""
show_usage
exit 1
fi
# Check if we're in a git repository
check_git_repo() {
if ! git rev-parse --git-dir > /dev/null 2>&1; then
print_error "Not in a git repository"
exit 1
fi
}
# Function to get current version and increment appropriately
increment_version() {
local increment_type="$1" # "patch" or "minor"
print_status "Getting current version..."
# Get the highest version tag (not chronologically latest)
LATEST_TAG=$(git tag -l 'v*.*.*' | sort -V | tail -n 1 || echo "")
if [[ -z "$LATEST_TAG" ]]; then
LATEST_TAG="v0.0.0"
print_warning "No version tags found, starting from $LATEST_TAG"
fi
# Extract version components (remove 'v' prefix)
VERSION=${LATEST_TAG#v}
# Parse major.minor.patch using regex
if [[ $VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
PATCH=${BASH_REMATCH[3]}
else
print_error "Invalid version format in tag: $LATEST_TAG"
print_error "Expected format: v0.1.0"
exit 1
fi
# Increment version based on type
if [[ "$increment_type" == "minor" ]]; then
# Minor release: increment minor, zero patch
NEW_MINOR=$((MINOR + 1))
NEW_PATCH=0
NEW_VERSION="v${MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Release mode: incrementing minor version"
else
# Default: increment patch
NEW_PATCH=$((PATCH + 1))
NEW_VERSION="v${MAJOR}.${MINOR}.${NEW_PATCH}"
print_status "Default mode: incrementing patch version"
fi
print_status "Current version: $LATEST_TAG"
print_status "New version: $NEW_VERSION"
# Export for use in other functions
export NEW_VERSION
}
# Function to compile the C-Relay project
compile_project() {
print_status "Compiling C-Relay..."
# Clean previous build
if make clean > /dev/null 2>&1; then
print_success "Cleaned previous build"
else
print_warning "Clean failed or no Makefile found"
fi
# Compile the project
if make > /dev/null 2>&1; then
print_success "C-Relay compiled successfully"
else
print_error "Compilation failed"
exit 1
fi
}
# Function to build release binaries
build_release_binaries() {
print_status "Building release binaries..."
# Build x86_64 version
print_status "Building x86_64 version..."
make clean > /dev/null 2>&1
if make x86 > /dev/null 2>&1; then
if [[ -f "build/c_relay_x86" ]]; then
cp build/c_relay_x86 c-relay-x86_64
print_success "x86_64 binary created: c-relay-x86_64"
else
print_error "x86_64 binary not found after compilation"
exit 1
fi
else
print_error "x86_64 build failed"
exit 1
fi
# Try to build ARM64 version
print_status "Attempting ARM64 build..."
make clean > /dev/null 2>&1
if make arm64 > /dev/null 2>&1; then
if [[ -f "build/c_relay_arm64" ]]; then
cp build/c_relay_arm64 c-relay-arm64
print_success "ARM64 binary created: c-relay-arm64"
else
print_warning "ARM64 binary not found after compilation"
fi
else
print_warning "ARM64 build failed - ARM64 cross-compilation not properly set up"
print_status "Only x86_64 binary will be included in release"
fi
# Restore normal build
make clean > /dev/null 2>&1
make > /dev/null 2>&1
}
# Function to commit and push changes
git_commit_and_push() {
print_status "Preparing git commit..."
# Stage all changes
if git add . > /dev/null 2>&1; then
print_success "Staged all changes"
else
print_error "Failed to stage changes"
exit 1
fi
# Check if there are changes to commit
if git diff --staged --quiet; then
print_warning "No changes to commit"
else
# Commit changes
if git commit -m "$NEW_VERSION - $COMMIT_MESSAGE" > /dev/null 2>&1; then
print_success "Committed changes"
else
print_error "Failed to commit changes"
exit 1
fi
fi
# Create new git tag
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Created tag: $NEW_VERSION"
else
print_warning "Tag $NEW_VERSION already exists"
fi
# Push changes and tags
print_status "Pushing to remote repository..."
if git push > /dev/null 2>&1; then
print_success "Pushed changes"
else
print_error "Failed to push changes"
exit 1
fi
if git push --tags > /dev/null 2>&1; then
print_success "Pushed tags"
else
print_warning "Failed to push tags"
fi
}
# Function to create Gitea release
create_gitea_release() {
print_status "Creating Gitea release..."
# Check for Gitea token
if [[ ! -f "$HOME/.gitea_token" ]]; then
print_warning "No ~/.gitea_token found. Skipping release creation."
print_warning "Create ~/.gitea_token with your Gitea access token to enable releases."
return 0
fi
local token=$(cat "$HOME/.gitea_token" | tr -d '\n\r')
local api_url="https://git.laantungir.net/api/v1/repos/laantungir/c-relay"
# Create release
print_status "Creating release $NEW_VERSION..."
local response=$(curl -s -X POST "$api_url/releases" \
-H "Authorization: token $token" \
-H "Content-Type: application/json" \
-d "{\"tag_name\": \"$NEW_VERSION\", \"name\": \"$NEW_VERSION\", \"body\": \"$COMMIT_MESSAGE\"}")
if echo "$response" | grep -q '"id"'; then
print_success "Created release $NEW_VERSION"
upload_release_binaries "$api_url" "$token"
elif echo "$response" | grep -q "already exists"; then
print_warning "Release $NEW_VERSION already exists"
upload_release_binaries "$api_url" "$token"
else
print_error "Failed to create release $NEW_VERSION"
print_error "Response: $response"
# Try to check if the release exists anyway
print_status "Checking if release exists..."
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
if echo "$check_response" | grep -q '"id"'; then
print_warning "Release exists but creation response was unexpected"
upload_release_binaries "$api_url" "$token"
else
print_error "Release does not exist and creation failed"
return 1
fi
fi
}
# Function to upload release binaries
upload_release_binaries() {
local api_url="$1"
local token="$2"
# Get release ID with more robust parsing
print_status "Getting release ID for $NEW_VERSION..."
local response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
local release_id=$(echo "$response" | grep -o '"id":[0-9]*' | head -n1 | cut -d: -f2)
if [[ -z "$release_id" ]]; then
print_error "Could not get release ID for $NEW_VERSION"
print_error "API Response: $response"
# Try to list all releases to debug
print_status "Available releases:"
curl -s -H "Authorization: token $token" "$api_url/releases" | grep -o '"tag_name":"[^"]*"' | head -5
return 1
fi
print_success "Found release ID: $release_id"
# Upload x86_64 binary
if [[ -f "c-relay-x86_64" ]]; then
print_status "Uploading x86_64 binary..."
if curl -s -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@c-relay-x86_64;filename=c-relay-${NEW_VERSION}-linux-x86_64" > /dev/null; then
print_success "Uploaded x86_64 binary"
else
print_warning "Failed to upload x86_64 binary"
fi
fi
# Upload ARM64 binary
if [[ -f "c-relay-arm64" ]]; then
print_status "Uploading ARM64 binary..."
if curl -s -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@c-relay-arm64;filename=c-relay-${NEW_VERSION}-linux-arm64" > /dev/null; then
print_success "Uploaded ARM64 binary"
else
print_warning "Failed to upload ARM64 binary"
fi
fi
}
# Function to clean up release binaries
cleanup_release_binaries() {
if [[ -f "c-relay-x86_64" ]]; then
rm -f c-relay-x86_64
print_status "Cleaned up x86_64 binary"
fi
if [[ -f "c-relay-arm64" ]]; then
rm -f c-relay-arm64
print_status "Cleaned up ARM64 binary"
fi
}
# Main execution
main() {
print_status "C-Relay Build and Push Script"
# Check prerequisites
check_git_repo
if [[ "$RELEASE_MODE" == true ]]; then
print_status "=== RELEASE MODE ==="
# Increment minor version for releases
increment_version "minor"
# Compile project first
compile_project
# Build release binaries
build_release_binaries
# Commit and push
git_commit_and_push
# Create Gitea release with binaries
create_gitea_release
# Cleanup
cleanup_release_binaries
print_success "Release $NEW_VERSION completed successfully!"
print_status "Binaries uploaded to Gitea release"
else
print_status "=== DEFAULT MODE ==="
# Increment patch version for regular commits
increment_version "patch"
# Compile project
compile_project
# Commit and push
git_commit_and_push
print_success "Build and push completed successfully!"
print_status "Version $NEW_VERSION pushed to repository"
fi
}
# Execute main function
main

207
build_static.sh Executable file
View File

@@ -0,0 +1,207 @@
#!/bin/bash
# Build fully static MUSL binaries for C-Relay using Alpine Docker
# Produces truly portable binaries with zero runtime dependencies
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BUILD_DIR="$SCRIPT_DIR/build"
DOCKERFILE="$SCRIPT_DIR/Dockerfile.alpine-musl"
# Parse command line arguments
DEBUG_BUILD=false
if [[ "$1" == "--debug" ]]; then
DEBUG_BUILD=true
echo "=========================================="
echo "C-Relay MUSL Static Binary Builder (DEBUG MODE)"
echo "=========================================="
else
echo "=========================================="
echo "C-Relay MUSL Static Binary Builder (PRODUCTION MODE)"
echo "=========================================="
fi
echo "Project directory: $SCRIPT_DIR"
echo "Build directory: $BUILD_DIR"
echo "Debug build: $DEBUG_BUILD"
echo ""
# Create build directory
mkdir -p "$BUILD_DIR"
# Check if Docker is available
if ! command -v docker &> /dev/null; then
echo "ERROR: Docker is not installed or not in PATH"
echo ""
echo "Docker is required to build MUSL static binaries."
echo "Please install Docker:"
echo " - Ubuntu/Debian: sudo apt install docker.io"
echo " - Or visit: https://docs.docker.com/engine/install/"
echo ""
exit 1
fi
# Check if Docker daemon is running
if ! docker info &> /dev/null; then
echo "ERROR: Docker daemon is not running or user not in docker group"
echo ""
echo "Please start Docker and ensure you're in the docker group:"
echo " - sudo systemctl start docker"
echo " - sudo usermod -aG docker $USER && newgrp docker"
echo " - Or start Docker Desktop"
echo ""
exit 1
fi
DOCKER_CMD="docker"
echo "✓ Docker is available and running"
echo ""
# Detect architecture
ARCH=$(uname -m)
case "$ARCH" in
x86_64)
PLATFORM="linux/amd64"
OUTPUT_NAME="c_relay_static_x86_64"
;;
aarch64|arm64)
PLATFORM="linux/arm64"
OUTPUT_NAME="c_relay_static_arm64"
;;
*)
echo "WARNING: Unknown architecture: $ARCH"
echo "Defaulting to linux/amd64"
PLATFORM="linux/amd64"
OUTPUT_NAME="c_relay_static_${ARCH}"
;;
esac
echo "Building for platform: $PLATFORM"
echo "Output binary: $OUTPUT_NAME"
echo ""
# Build the Docker image
echo "=========================================="
echo "Step 1: Building Alpine Docker image"
echo "=========================================="
echo "This will:"
echo " - Use Alpine Linux (native MUSL)"
echo " - Build all dependencies statically"
echo " - Compile c-relay with full static linking"
echo ""
$DOCKER_CMD build \
--platform "$PLATFORM" \
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
-f "$DOCKERFILE" \
-t c-relay-musl-builder:latest \
--progress=plain \
. || {
echo ""
echo "ERROR: Docker build failed"
echo "Check the output above for details"
exit 1
}
echo ""
echo "✓ Docker image built successfully"
echo ""
# Extract the binary from the container
echo "=========================================="
echo "Step 2: Extracting static binary"
echo "=========================================="
# Build the builder stage to extract the binary
$DOCKER_CMD build \
--platform "$PLATFORM" \
--build-arg DEBUG_BUILD=$DEBUG_BUILD \
--target builder \
-f "$DOCKERFILE" \
-t c-relay-static-builder-stage:latest \
. > /dev/null 2>&1
# Create a temporary container to copy the binary
CONTAINER_ID=$($DOCKER_CMD create c-relay-static-builder-stage:latest)
# Copy binary from container
$DOCKER_CMD cp "$CONTAINER_ID:/build/c_relay_static" "$BUILD_DIR/$OUTPUT_NAME" || {
echo "ERROR: Failed to extract binary from container"
$DOCKER_CMD rm "$CONTAINER_ID" 2>/dev/null
exit 1
}
# Clean up container
$DOCKER_CMD rm "$CONTAINER_ID" > /dev/null
echo "✓ Binary extracted to: $BUILD_DIR/$OUTPUT_NAME"
echo ""
# Make binary executable
chmod +x "$BUILD_DIR/$OUTPUT_NAME"
# Verify the binary
echo "=========================================="
echo "Step 3: Verifying static binary"
echo "=========================================="
echo ""
echo "Checking for dynamic dependencies:"
if LDD_OUTPUT=$(timeout 5 ldd "$BUILD_DIR/$OUTPUT_NAME" 2>&1); then
if echo "$LDD_OUTPUT" | grep -q "not a dynamic executable"; then
echo "✓ Binary is fully static (no dynamic dependencies)"
TRULY_STATIC=true
elif echo "$LDD_OUTPUT" | grep -q "statically linked"; then
echo "✓ Binary is statically linked"
TRULY_STATIC=true
else
echo "⚠ WARNING: Binary may have dynamic dependencies:"
echo "$LDD_OUTPUT"
TRULY_STATIC=false
fi
else
# ldd failed or timed out - check with file command instead
if file "$BUILD_DIR/$OUTPUT_NAME" | grep -q "statically linked"; then
echo "✓ Binary is statically linked (verified with file command)"
TRULY_STATIC=true
else
echo "⚠ Could not verify static linking (ldd check failed)"
TRULY_STATIC=false
fi
fi
echo ""
echo "File size: $(ls -lh "$BUILD_DIR/$OUTPUT_NAME" | awk '{print $5}')"
echo ""
# Test if binary runs
echo "Testing binary execution:"
if "$BUILD_DIR/$OUTPUT_NAME" --version 2>&1 | head -5; then
echo "✓ Binary executes successfully"
else
echo "⚠ Binary execution test failed (this may be normal if --version is not supported)"
fi
echo ""
# Summary
echo "=========================================="
echo "Build Summary"
echo "=========================================="
echo "Binary: $BUILD_DIR/$OUTPUT_NAME"
echo "Size: $(du -h "$BUILD_DIR/$OUTPUT_NAME" | cut -f1)"
echo "Platform: $PLATFORM"
if [ "$DEBUG_BUILD" = true ]; then
echo "Build Type: DEBUG (with symbols, no optimization)"
else
echo "Build Type: PRODUCTION (optimized, stripped)"
fi
if [ "$TRULY_STATIC" = true ]; then
echo "Linkage: Fully static binary (Alpine MUSL-based)"
echo "Portability: Works on ANY Linux distribution"
else
echo "Linkage: Static binary (may have minimal dependencies)"
fi
echo ""
echo "✓ Build complete!"
echo ""

Binary file not shown.

8
c-relay.code-workspace Normal file
View File

@@ -0,0 +1,8 @@
{
"folders": [
{
"path": "."
}
],
"settings": {}
}

1
c_utils_lib Submodule

Submodule c_utils_lib added at 442facd7e3

View File

@@ -1,228 +0,0 @@
# C Nostr Relay Database
This directory contains the SQLite database schema and initialization scripts for the C Nostr Relay implementation.
## Files
- **`schema.sql`** - Complete database schema based on nostr-rs-relay v18
- **`init.sh`** - Database initialization script
- **`c_nostr_relay.db`** - SQLite database file (created after running init.sh)
## Quick Start
1. **Initialize the database:**
```bash
cd db
./init.sh
```
2. **Force reinitialize (removes existing database):**
```bash
./init.sh --force
```
3. **Initialize with optimization and info:**
```bash
./init.sh --info --optimize
```
## Database Schema
The schema is fully compatible with the Nostr protocol and includes:
### Core Tables
- **`event`** - Main event storage with all Nostr event data
- **`tag`** - Denormalized tag index for efficient queries
- **`user_verification`** - NIP-05 verification tracking
- **`account`** - User account management (optional)
- **`invoice`** - Lightning payment tracking (optional)
### Key Features
- ✅ **NIP-01 compliant** - Full basic protocol support
- ✅ **Replaceable events** - Supports kinds 0, 3, 10000-19999
- ✅ **Parameterized replaceable** - Supports kinds 30000-39999 with `d` tags
- ✅ **Event deletion** - NIP-09 soft deletion with `hidden` column
- ✅ **Event expiration** - NIP-40 automatic cleanup
- ✅ **Authentication** - NIP-42 client authentication
- ✅ **NIP-05 verification** - Domain-based identity verification
- ✅ **Performance optimized** - Comprehensive indexing strategy
### Schema Version
Current version: **v18** (compatible with nostr-rs-relay v18)
## Database Structure
### Event Storage
```sql
CREATE TABLE event (
id INTEGER PRIMARY KEY,
event_hash BLOB NOT NULL, -- 32-byte SHA256 hash
first_seen INTEGER NOT NULL, -- relay receive timestamp
created_at INTEGER NOT NULL, -- event creation timestamp
expires_at INTEGER, -- NIP-40 expiration
author BLOB NOT NULL, -- 32-byte pubkey
delegated_by BLOB, -- NIP-26 delegator
kind INTEGER NOT NULL, -- event kind
hidden INTEGER DEFAULT FALSE, -- soft deletion flag
content TEXT NOT NULL -- complete JSON event
);
```
### Tag Indexing
```sql
CREATE TABLE tag (
id INTEGER PRIMARY KEY,
event_id INTEGER NOT NULL,
name TEXT, -- tag name ("e", "p", etc.)
value TEXT, -- tag value
created_at INTEGER NOT NULL, -- denormalized for performance
kind INTEGER NOT NULL -- denormalized for performance
);
```
## Performance Features
### Optimized Indexes
- **Hash-based lookups** - `event_hash_index` for O(1) event retrieval
- **Author queries** - `author_index`, `author_created_at_index`
- **Kind filtering** - `kind_index`, `kind_created_at_index`
- **Tag searching** - `tag_covering_index` for efficient tag queries
- **Composite queries** - Multi-column indexes for complex filters
### Query Optimization
- **Denormalized tags** - Includes `kind` and `created_at` in tag table
- **Binary storage** - BLOBs for hex data (pubkeys, hashes)
- **WAL mode** - Write-Ahead Logging for concurrent access
- **Automatic cleanup** - Triggers for data integrity
## Usage Examples
### Basic Operations
1. **Insert an event:**
```sql
INSERT INTO event (event_hash, first_seen, created_at, author, kind, content)
VALUES (?, ?, ?, ?, ?, ?);
```
2. **Query by author:**
```sql
SELECT content FROM event
WHERE author = ? AND hidden != TRUE
ORDER BY created_at DESC;
```
3. **Filter by tags:**
```sql
SELECT e.content FROM event e
JOIN tag t ON e.id = t.event_id
WHERE t.name = 'p' AND t.value = ? AND e.hidden != TRUE;
```
### Advanced Queries
1. **Get replaceable event (latest only):**
```sql
SELECT content FROM event
WHERE author = ? AND kind = ? AND hidden != TRUE
ORDER BY created_at DESC LIMIT 1;
```
2. **Tag-based filtering (NIP-01 filters):**
```sql
SELECT e.content FROM event e
WHERE e.id IN (
SELECT t.event_id FROM tag t
WHERE t.name = ? AND t.value IN (?, ?, ?)
) AND e.hidden != TRUE;
```
## Maintenance
### Regular Operations
1. **Check database integrity:**
```bash
sqlite3 c_nostr_relay.db "PRAGMA integrity_check;"
```
2. **Optimize database:**
```bash
sqlite3 c_nostr_relay.db "PRAGMA optimize; VACUUM; ANALYZE;"
```
3. **Clean expired events:**
```sql
DELETE FROM event WHERE expires_at <= strftime('%s', 'now');
```
### Monitoring
1. **Database size:**
```bash
ls -lh c_nostr_relay.db
```
2. **Table statistics:**
```sql
SELECT name, COUNT(*) as count FROM (
SELECT 'events' as name FROM event UNION ALL
SELECT 'tags' as name FROM tag UNION ALL
SELECT 'verifications' as name FROM user_verification
) GROUP BY name;
```
## Migration Support
The schema includes a migration system for future updates:
```sql
CREATE TABLE schema_info (
version INTEGER PRIMARY KEY,
applied_at INTEGER NOT NULL,
description TEXT
);
```
## Security Considerations
1. **Input validation** - Always validate event JSON and signatures
2. **Rate limiting** - Implement at application level
3. **Access control** - Use `account` table for permissions
4. **Backup strategy** - Regular database backups recommended
## Compatibility
- **SQLite version** - Requires SQLite 3.8.0+
- **nostr-rs-relay** - Schema compatible with v18
- **NIPs supported** - 01, 02, 05, 09, 10, 11, 26, 40, 42
- **C libraries** - Compatible with sqlite3 C API
## Troubleshooting
### Common Issues
1. **Database locked error:**
- Ensure proper connection closing in your C code
- Check for long-running transactions
2. **Performance issues:**
- Run `PRAGMA optimize;` regularly
- Consider `VACUUM` if database grew significantly
3. **Schema errors:**
- Verify SQLite version compatibility
- Check foreign key constraints
### Getting Help
- Check the main project README for C implementation details
- Review nostr-rs-relay documentation for reference implementation
- Consult Nostr NIPs for protocol specifications
## License
This database schema is part of the C Nostr Relay project and follows the same license terms.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,234 +0,0 @@
#!/bin/bash
# C Nostr Relay Database Initialization Script
# Creates and initializes the SQLite database with proper schema
set -e # Exit on any error
# Configuration
DB_DIR="$(dirname "$0")"
DB_NAME="c_nostr_relay.db"
DB_PATH="${DB_DIR}/${DB_NAME}"
SCHEMA_FILE="${DB_DIR}/schema.sql"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Check if SQLite3 is installed
check_sqlite() {
if ! command -v sqlite3 &> /dev/null; then
log_error "sqlite3 is not installed. Please install it first:"
echo " Ubuntu/Debian: sudo apt-get install sqlite3"
echo " CentOS/RHEL: sudo yum install sqlite"
echo " macOS: brew install sqlite3"
exit 1
fi
local version=$(sqlite3 --version | cut -d' ' -f1)
log_info "Using SQLite version: $version"
}
# Create database directory if it doesn't exist
create_db_directory() {
if [ ! -d "$DB_DIR" ]; then
log_info "Creating database directory: $DB_DIR"
mkdir -p "$DB_DIR"
fi
}
# Backup existing database if it exists
backup_existing_db() {
if [ -f "$DB_PATH" ]; then
local backup_path="${DB_PATH}.backup.$(date +%Y%m%d_%H%M%S)"
log_warning "Existing database found. Creating backup: $backup_path"
cp "$DB_PATH" "$backup_path"
fi
}
# Initialize the database with schema
init_database() {
log_info "Initializing database: $DB_PATH"
if [ ! -f "$SCHEMA_FILE" ]; then
log_error "Schema file not found: $SCHEMA_FILE"
exit 1
fi
# Remove existing database if --force flag is used
if [ "$1" = "--force" ] && [ -f "$DB_PATH" ]; then
log_warning "Force flag detected. Removing existing database."
rm -f "$DB_PATH"
fi
# Create the database and apply schema
log_info "Applying schema from: $SCHEMA_FILE"
if sqlite3 "$DB_PATH" < "$SCHEMA_FILE"; then
log_success "Database schema applied successfully"
else
log_error "Failed to apply database schema"
exit 1
fi
}
# Verify database integrity
verify_database() {
log_info "Verifying database integrity..."
# Check if database file exists and is not empty
if [ ! -s "$DB_PATH" ]; then
log_error "Database file is empty or doesn't exist"
exit 1
fi
# Run SQLite integrity check
local integrity_result=$(sqlite3 "$DB_PATH" "PRAGMA integrity_check;")
if [ "$integrity_result" = "ok" ]; then
log_success "Database integrity check passed"
else
log_error "Database integrity check failed: $integrity_result"
exit 1
fi
# Verify schema version
local schema_version=$(sqlite3 "$DB_PATH" "PRAGMA user_version;")
log_info "Database schema version: $schema_version"
# Check that main tables exist
local table_count=$(sqlite3 "$DB_PATH" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name IN ('events', 'schema_info');")
if [ "$table_count" -eq 2 ]; then
log_success "Core tables created successfully"
else
log_error "Missing core tables (expected 2, found $table_count)"
exit 1
fi
}
# Display database information
show_db_info() {
log_info "Database Information:"
echo " Location: $DB_PATH"
echo " Size: $(du -h "$DB_PATH" | cut -f1)"
log_info "Database Tables:"
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;" | sed 's/^/ - /'
log_info "Database Indexes:"
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='index' AND name NOT LIKE 'sqlite_%' ORDER BY name;" | sed 's/^/ - /'
log_info "Database Views:"
sqlite3 "$DB_PATH" "SELECT name FROM sqlite_master WHERE type='view' ORDER BY name;" | sed 's/^/ - /'
}
# Run database optimization
optimize_database() {
log_info "Running database optimization..."
sqlite3 "$DB_PATH" "PRAGMA optimize; VACUUM; ANALYZE;"
log_success "Database optimization completed"
}
# Print usage information
print_usage() {
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Initialize SQLite database for C Nostr Relay"
echo ""
echo "Options:"
echo " --force Remove existing database before initialization"
echo " --info Show database information after initialization"
echo " --optimize Run database optimization after initialization"
echo " --help Show this help message"
echo ""
echo "Examples:"
echo " $0 # Initialize database (with backup if exists)"
echo " $0 --force # Force reinitialize database"
echo " $0 --info --optimize # Initialize with info and optimization"
}
# Main execution
main() {
local force_flag=false
local show_info=false
local optimize=false
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--force)
force_flag=true
shift
;;
--info)
show_info=true
shift
;;
--optimize)
optimize=true
shift
;;
--help)
print_usage
exit 0
;;
*)
log_error "Unknown option: $1"
print_usage
exit 1
;;
esac
done
log_info "Starting C Nostr Relay database initialization..."
# Execute initialization steps
check_sqlite
create_db_directory
if [ "$force_flag" = false ]; then
backup_existing_db
fi
if [ "$force_flag" = true ]; then
init_database --force
else
init_database
fi
verify_database
if [ "$optimize" = true ]; then
optimize_database
fi
if [ "$show_info" = true ]; then
show_db_info
fi
log_success "Database initialization completed successfully!"
echo ""
echo "Database ready at: $DB_PATH"
echo "You can now start your C Nostr Relay application."
}
# Execute main function with all arguments
main "$@"

View File

@@ -1,181 +0,0 @@
-- C Nostr Relay Database Schema
-- SQLite schema for storing Nostr events with JSON tags support
-- Schema version tracking
PRAGMA user_version = 2;
-- Enable foreign key support
PRAGMA foreign_keys = ON;
-- Optimize for performance
PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA cache_size = 10000;
-- Core events table with hybrid single-table design
CREATE TABLE events (
id TEXT PRIMARY KEY, -- Nostr event ID (hex string)
pubkey TEXT NOT NULL, -- Public key of event author (hex string)
created_at INTEGER NOT NULL, -- Event creation timestamp (Unix timestamp)
kind INTEGER NOT NULL, -- Event kind (0-65535)
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
content TEXT NOT NULL, -- Event content (text content only)
sig TEXT NOT NULL, -- Event signature (hex string)
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event
);
-- Core performance indexes
CREATE INDEX idx_events_pubkey ON events(pubkey);
CREATE INDEX idx_events_kind ON events(kind);
CREATE INDEX idx_events_created_at ON events(created_at DESC);
CREATE INDEX idx_events_event_type ON events(event_type);
-- Composite indexes for common query patterns
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
CREATE INDEX idx_events_pubkey_kind ON events(pubkey, kind);
-- Schema information table
CREATE TABLE schema_info (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
);
-- Insert schema metadata
INSERT INTO schema_info (key, value) VALUES
('version', '2'),
('description', 'Hybrid single-table Nostr relay schema with JSON tags'),
('created_at', strftime('%s', 'now'));
-- Helper views for common queries
CREATE VIEW recent_events AS
SELECT id, pubkey, created_at, kind, event_type, content
FROM events
WHERE event_type != 'ephemeral'
ORDER BY created_at DESC
LIMIT 1000;
CREATE VIEW event_stats AS
SELECT
event_type,
COUNT(*) as count,
AVG(length(content)) as avg_content_length,
MIN(created_at) as earliest,
MAX(created_at) as latest
FROM events
GROUP BY event_type;
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour
CREATE TRIGGER cleanup_ephemeral_events
AFTER INSERT ON events
WHEN NEW.event_type = 'ephemeral'
BEGIN
DELETE FROM events
WHERE event_type = 'ephemeral'
AND first_seen < (strftime('%s', 'now') - 3600);
END;
-- Replaceable event handling trigger
CREATE TRIGGER handle_replaceable_events
AFTER INSERT ON events
WHEN NEW.event_type = 'replaceable'
BEGIN
DELETE FROM events
WHERE pubkey = NEW.pubkey
AND kind = NEW.kind
AND event_type = 'replaceable'
AND id != NEW.id;
END;
-- Persistent Subscriptions Logging Tables (Phase 2)
-- Optional database logging for subscription analytics and debugging
-- Subscription events log
CREATE TABLE subscription_events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
subscription_id TEXT NOT NULL, -- Subscription ID from client
client_ip TEXT NOT NULL, -- Client IP address
event_type TEXT NOT NULL CHECK (event_type IN ('created', 'closed', 'expired', 'disconnected')),
filter_json TEXT, -- JSON representation of filters (for created events)
events_sent INTEGER DEFAULT 0, -- Number of events sent to this subscription
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
ended_at INTEGER, -- When subscription ended (for closed/expired/disconnected)
duration INTEGER -- Computed: ended_at - created_at
);
-- Subscription metrics summary
CREATE TABLE subscription_metrics (
id INTEGER PRIMARY KEY AUTOINCREMENT,
date TEXT NOT NULL, -- Date (YYYY-MM-DD)
total_created INTEGER DEFAULT 0, -- Total subscriptions created
total_closed INTEGER DEFAULT 0, -- Total subscriptions closed
total_events_broadcast INTEGER DEFAULT 0, -- Total events broadcast
avg_duration REAL DEFAULT 0, -- Average subscription duration
peak_concurrent INTEGER DEFAULT 0, -- Peak concurrent subscriptions
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
UNIQUE(date)
);
-- Event broadcasting log (optional, for detailed analytics)
CREATE TABLE event_broadcasts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
event_id TEXT NOT NULL, -- Event ID that was broadcast
subscription_id TEXT NOT NULL, -- Subscription that received it
client_ip TEXT NOT NULL, -- Client IP
broadcast_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
FOREIGN KEY (event_id) REFERENCES events(id)
);
-- Indexes for subscription logging performance
CREATE INDEX idx_subscription_events_id ON subscription_events(subscription_id);
CREATE INDEX idx_subscription_events_type ON subscription_events(event_type);
CREATE INDEX idx_subscription_events_created ON subscription_events(created_at DESC);
CREATE INDEX idx_subscription_events_client ON subscription_events(client_ip);
CREATE INDEX idx_subscription_metrics_date ON subscription_metrics(date DESC);
CREATE INDEX idx_event_broadcasts_event ON event_broadcasts(event_id);
CREATE INDEX idx_event_broadcasts_sub ON event_broadcasts(subscription_id);
CREATE INDEX idx_event_broadcasts_time ON event_broadcasts(broadcast_at DESC);
-- Trigger to update subscription duration when ended
CREATE TRIGGER update_subscription_duration
AFTER UPDATE OF ended_at ON subscription_events
WHEN NEW.ended_at IS NOT NULL AND OLD.ended_at IS NULL
BEGIN
UPDATE subscription_events
SET duration = NEW.ended_at - NEW.created_at
WHERE id = NEW.id;
END;
-- View for subscription analytics
CREATE VIEW subscription_analytics AS
SELECT
date(created_at, 'unixepoch') as date,
COUNT(*) as subscriptions_created,
COUNT(CASE WHEN ended_at IS NOT NULL THEN 1 END) as subscriptions_ended,
AVG(CASE WHEN duration IS NOT NULL THEN duration END) as avg_duration_seconds,
MAX(events_sent) as max_events_sent,
AVG(events_sent) as avg_events_sent,
COUNT(DISTINCT client_ip) as unique_clients
FROM subscription_events
GROUP BY date(created_at, 'unixepoch')
ORDER BY date DESC;
-- View for current active subscriptions (from log perspective)
CREATE VIEW active_subscriptions_log AS
SELECT
subscription_id,
client_ip,
filter_json,
events_sent,
created_at,
(strftime('%s', 'now') - created_at) as duration_seconds
FROM subscription_events
WHERE event_type = 'created'
AND subscription_id NOT IN (
SELECT subscription_id FROM subscription_events
WHERE event_type IN ('closed', 'expired', 'disconnected')
);

19
deploy_local.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash
# Copy the binary to the deployment location
cp build/c_relay_x86 ~/Storage/c_relay/crelay
# Copy the local service file to systemd
# sudo cp systemd/c-relay-local.service /etc/systemd/system/
# Reload systemd daemon to pick up the new service
# sudo systemctl daemon-reload
# Enable the service (if not already enabled)
# sudo systemctl enable c-relay-local.service
# Restart the service
# sudo systemctl restart c-relay-local.service
# Show service status
# sudo systemctl status c-relay-local.service --no-pager -l

28
deploy_lt.sh Executable file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
# C-Relay Static Binary Deployment Script
# Deploys build/c_relay_static_x86_64 to server via ssh
set -e
# Configuration
LOCAL_BINARY="build/c_relay_static_x86_64"
REMOTE_BINARY_PATH="/usr/local/bin/c_relay/c_relay"
SERVICE_NAME="c-relay"
# Create backup
ssh ubuntu@laantungir.com "sudo cp '$REMOTE_BINARY_PATH' '${REMOTE_BINARY_PATH}.backup.$(date +%Y%m%d_%H%M%S)'" 2>/dev/null || true
# Upload binary to temp location
scp "$LOCAL_BINARY" "ubuntu@laantungir.com:/tmp/c_relay.tmp"
# Install binary
ssh ubuntu@laantungir.com "sudo mv '/tmp/c_relay.tmp' '$REMOTE_BINARY_PATH'"
ssh ubuntu@laantungir.com "sudo chown c-relay:c-relay '$REMOTE_BINARY_PATH'"
ssh ubuntu@laantungir.com "sudo chmod +x '$REMOTE_BINARY_PATH'"
# Reload systemd and restart service
ssh ubuntu@laantungir.com "sudo systemctl daemon-reload"
ssh ubuntu@laantungir.com "sudo systemctl restart '$SERVICE_NAME'"
echo "Deployment complete!"

View File

@@ -0,0 +1,295 @@
# NIP-42 Authentication Implementation
## Overview
This relay implements NIP-42 (Authentication of clients to relays) providing granular authentication controls for event submission and subscription operations. The implementation supports both challenge-response authentication and per-connection state management.
## Architecture
### Core Components
1. **Per-Session Authentication State** (`struct per_session_data`)
- `authenticated`: Boolean flag indicating authentication status
- `authenticated_pubkey[65]`: Hex-encoded public key of authenticated user
- `active_challenge[65]`: Current authentication challenge
- `challenge_created`: Timestamp when challenge was generated
- `challenge_expires`: Challenge expiration timestamp
- `nip42_auth_required_events`: Whether auth is required for EVENT submission
- `nip42_auth_required_subscriptions`: Whether auth is required for REQ operations
- `auth_challenge_sent`: Flag indicating if challenge has been sent
2. **Challenge Management** (via `request_validator.c`)
- `nostr_nip42_generate_challenge()`: Generates cryptographically secure challenges
- `nostr_nip42_verify_auth_event()`: Validates signed authentication events
- Challenge storage and cleanup with expiration handling
3. **WebSocket Protocol Integration**
- AUTH message handling in `nostr_relay_callback()`
- Challenge generation and transmission
- Authentication verification and session state updates
## Configuration Options
### Event-Based Configuration
NIP-42 authentication is configured using kind 33334 configuration events with the following tags:
| Tag | Description | Default | Values |
|-----|-------------|---------|--------|
| `nip42_auth_required_events` | Require auth for EVENT submission | `false` | `true`/`false` |
| `nip42_auth_required_subscriptions` | Require auth for REQ operations | `false` | `true`/`false` |
### Example Configuration Event
```json
{
"kind": 33334,
"content": "C Nostr Relay Configuration",
"tags": [
["d", "<relay_pubkey>"],
["nip42_auth_required_events", "true"],
["nip42_auth_required_subscriptions", "false"],
["relay_description", "Authenticated Nostr Relay"]
],
"created_at": 1640995200,
"pubkey": "<admin_pubkey>",
"id": "<event_id>",
"sig": "<signature>"
}
```
## Authentication Flow
### 1. Challenge Generation
When authentication is required and client is not authenticated:
```
Client -> Relay: ["EVENT", <event>] (unauthenticated)
Relay -> Client: ["AUTH", <challenge>]
```
The challenge is a 64-character hex string generated using cryptographically secure random numbers.
### 2. Authentication Response
Client creates and signs an authentication event (kind 22242):
```json
{
"kind": 22242,
"content": "",
"tags": [
["relay", "ws://relay.example.com"],
["challenge", "<challenge_from_relay>"]
],
"created_at": <current_timestamp>,
"pubkey": "<client_pubkey>",
"id": "<event_id>",
"sig": "<signature>"
}
```
Client sends this event back to relay:
```
Client -> Relay: ["AUTH", <signed_auth_event>]
```
### 3. Verification and Session Update
The relay:
1. Validates the authentication event signature
2. Verifies the challenge matches the one sent
3. Checks challenge expiration (default: 10 minutes)
4. Updates session state with authenticated public key
5. Sends confirmation notice
```
Relay -> Client: ["NOTICE", "NIP-42 authentication successful"]
```
## Granular Authentication Controls
### Separate Controls for Events vs Subscriptions
The implementation provides separate authentication requirements:
- **Event Submission**: Control whether clients must authenticate to publish events
- **Subscription Access**: Control whether clients must authenticate to create subscriptions
This allows flexible relay policies:
- **Public Read, Authenticated Write**: `events=true, subscriptions=false`
- **Fully Authenticated**: `events=true, subscriptions=true`
- **Public Access**: `events=false, subscriptions=false` (default)
- **Authenticated Read Only**: `events=false, subscriptions=true`
### Per-Connection State
Each WebSocket connection maintains its own authentication state:
- Authentication persists for the lifetime of the connection
- Challenges expire after 10 minutes
- Session cleanup on connection close
## Security Features
### Challenge Security
- 64-character hexadecimal challenges (256 bits of entropy)
- Cryptographically secure random generation
- Challenge expiration to prevent replay attacks
- One-time use challenges
### Event Validation
- Complete signature verification using secp256k1
- Event ID validation
- Challenge-response binding verification
- Timestamp validation with configurable tolerance
### Session Management
- Thread-safe per-session state management
- Automatic cleanup on disconnection
- Challenge expiration handling
## Client Integration
### Using nak Client
```bash
# Generate keypair
PRIVKEY=$(nak key --gen)
PUBKEY=$(nak key --pub $PRIVKEY)
# Connect and authenticate automatically
nak event -k 1 --content "Authenticated message" --sec $PRIVKEY --relay ws://localhost:8888
# nak handles NIP-42 authentication automatically when required
```
### Manual WebSocket Integration
```javascript
const ws = new WebSocket('ws://localhost:8888');
ws.onmessage = (event) => {
const message = JSON.parse(event.data);
if (message[0] === 'AUTH') {
const challenge = message[1];
// Create auth event (kind 22242)
const authEvent = {
kind: 22242,
content: "",
tags: [
["relay", "ws://localhost:8888"],
["challenge", challenge]
],
created_at: Math.floor(Date.now() / 1000),
pubkey: clientPubkey,
// ... calculate id and signature
};
// Send auth response
ws.send(JSON.stringify(["AUTH", authEvent]));
}
};
// Send event (may trigger AUTH challenge)
ws.send(JSON.stringify(["EVENT", myEvent]));
```
## Administration
### Enabling Authentication
1. **Get Admin Private Key**: Extract from relay startup logs (shown once)
2. **Create Configuration Event**: Use nak or custom tooling
3. **Publish Configuration**: Send to relay with admin signature
```bash
# Enable auth for events only
nak event -k 33334 \
--content "C Nostr Relay Configuration" \
--tag "d=$RELAY_PUBKEY" \
--tag "nip42_auth_required_events=true" \
--tag "nip42_auth_required_subscriptions=false" \
--sec $ADMIN_PRIVKEY \
--relay ws://localhost:8888
```
### Monitoring Authentication
- Check relay logs for authentication events
- Monitor `NOTICE` messages for auth status
- Use `get_settings.sh` script to view current configuration
```bash
./get_settings.sh
```
## Troubleshooting
### Common Issues
1. **Challenge Expiration**
- Default: 10 minutes
- Client must respond within expiration window
- Generate new challenge for expired attempts
2. **Signature Verification Failures**
- Verify event structure matches NIP-42 specification
- Check challenge value matches exactly
- Ensure proper secp256k1 signature generation
3. **Configuration Not Applied**
- Verify admin private key is correct
- Check configuration event signature
- Ensure relay pubkey in 'd' tag matches relay
### Debug Commands
```bash
# Check supported NIPs
curl -H "Accept: application/nostr+json" http://localhost:8888 | jq .supported_nips
# View current configuration
nak req -k 33334 ws://localhost:8888 | jq .
# Test authentication flow
./tests/42_nip_test.sh
```
## Performance Considerations
- Challenge generation: ~1ms overhead per unauthenticated connection
- Authentication verification: ~2-5ms per auth event
- Memory overhead: ~200 bytes per connection for auth state
- Database impact: Configuration events cached, minimal query overhead
## Integration with Other NIPs
### NIP-01 (Basic Protocol)
- AUTH messages integrated into standard WebSocket flow
- Compatible with existing EVENT/REQ/CLOSE message handling
### NIP-11 (Relay Information)
- NIP-42 advertised in `supported_nips` array
- Authentication requirements reflected in relay metadata
### NIP-20 (Command Results)
- OK responses include authentication-related error messages
- NOTICE messages provide authentication status updates
## Future Extensions
### Potential Enhancements
- Role-based authentication (admin, user, read-only)
- Time-based access controls
- Rate limiting based on authentication status
- Integration with external authentication providers
### Configuration Extensions
- Per-kind authentication requirements
- Whitelist/blacklist integration
- Custom challenge expiration times
- Authentication logging and metrics

460
docs/admin_api_plan.md Normal file
View File

@@ -0,0 +1,460 @@
# C-Relay Administrator API Implementation Plan
## Problem Analysis
### Current Issues Identified:
1. **Schema Mismatch**: Storage system (config.c) vs Validation system (request_validator.c) use different column names and values
2. **Missing API Endpoint**: No way to clear auth_rules table for testing
3. **Configuration Gap**: Auth rules enforcement may not be properly enabled
4. **Documentation Gap**: Admin API commands not documented
### Root Cause: Auth Rules Schema Inconsistency
**Current Schema (sql_schema.h lines 140-150):**
```sql
CREATE TABLE auth_rules (
rule_type TEXT CHECK (rule_type IN ('whitelist', 'blacklist')),
pattern_type TEXT CHECK (pattern_type IN ('pubkey', 'hash')),
pattern_value TEXT,
action TEXT CHECK (action IN ('allow', 'deny')),
active INTEGER DEFAULT 1
);
```
**Storage Implementation (config.c):**
- Stores: `rule_type='blacklist'`, `pattern_type='pubkey'`, `pattern_value='hex'`, `action='allow'`
**Validation Implementation (request_validator.c):**
- Queries: `rule_type='pubkey_blacklist'`, `rule_target='hex'`, `operation='event'`, `enabled=1`
**MISMATCH**: Validator looks for non-existent columns and wrong rule_type values!
## Proposed Solution Architecture
### Phase 1: API Documentation & Standardization
#### Admin API Commands (via WebSocket with admin private key)
**Kind 23456: Unified Admin API (Ephemeral)**
- Configuration management: Update relay settings, limits, authentication policies
- Auth rules: Add/remove/query whitelist/blacklist rules
- System commands: clear rules, status, cache management
- **Unified Format**: All commands use NIP-44 encrypted content with `["p", "relay_pubkey"]` tags
- **Command Types**:
- Configuration: `["config_key", "config_value"]`
- Auth rules: `["rule_type", "pattern_type", "pattern_value"]`
- Queries: `["auth_query", "filter"]` or `["system_command", "command_name"]`
- **Security**: All admin commands use NIP-44 encryption for privacy and security
#### Configuration Commands (using Kind 23456)
1. **Update Configuration**:
```json
{
"kind": 23456,
"content": "base64_nip44_encrypted_command_array",
"tags": [["p", "relay_pubkey"]]
}
```
*Encrypted content contains:* `["relay_description", "My Relay"]`
2. **Query System Status**:
```json
{
"kind": 23456,
"content": "base64_nip44_encrypted_command_array",
"tags": [["p", "relay_pubkey"]]
}
```
*Encrypted content contains:* `["system_command", "system_status"]`
#### Auth Rules and System Commands (using Kind 23456)
1. **Clear All Auth Rules**:
```json
{
"kind": 23456,
"content": "base64_nip44_encrypted_command_array",
"tags": [["p", "relay_pubkey"]]
}
```
*Encrypted content contains:* `["system_command", "clear_all_auth_rules"]`
2. **Query All Auth Rules**:
```json
{
"kind": 23456,
"content": "base64_nip44_encrypted_command_array",
"tags": [["p", "relay_pubkey"]]
}
```
*Encrypted content contains:* `["auth_query", "all"]`
3. **Add Blacklist Rule**:
```json
{
"kind": 23456,
"content": "base64_nip44_encrypted_command_array",
"tags": [["p", "relay_pubkey"]]
}
```
*Encrypted content contains:* `["blacklist", "pubkey", "deadbeef1234abcd..."]`
### Phase 2: Auth Rules Schema Alignment
#### Option A: Fix Validator to Match Schema (RECOMMENDED)
**Update request_validator.c:**
```sql
-- OLD (broken):
WHERE rule_type = 'pubkey_blacklist' AND rule_target = ? AND operation = ? AND enabled = 1
-- NEW (correct):
WHERE rule_type = 'blacklist' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1
```
**Benefits:**
- Matches actual database schema
- Simpler rule_type values ('blacklist' vs 'pubkey_blacklist')
- Uses existing columns (pattern_value vs rule_target)
- Consistent with storage implementation
#### Option B: Update Schema to Match Validator (NOT RECOMMENDED)
Would require changing schema, migration scripts, and storage logic.
### Phase 3: Implementation Priority
#### High Priority (Critical for blacklist functionality):
1. Fix request_validator.c schema mismatch
2. Ensure auth_required configuration is enabled
3. Update tests to use unified ephemeral event kind (23456)
4. Test blacklist enforcement
#### Medium Priority (Enhanced Admin Features):
1. **Implement NIP-44 Encryption Support**:
- Detect NIP-44 encrypted content for Kind 23456 events
- Parse `encrypted_tags` field from content JSON
- Decrypt using admin privkey and relay pubkey
- Process decrypted tags as normal commands
2. Add clear_all_auth_rules system command
3. Add auth rule query functionality (both standard and encrypted modes)
4. Add configuration discovery (list available config keys)
5. Enhanced error reporting in admin API
6. Conflict resolution (same pubkey in whitelist + blacklist)
#### Security Priority (NIP-44 Implementation):
1. **Encryption Detection Logic**: Check for empty tags + encrypted_tags field
2. **Key Pair Management**: Use admin private key + relay public key for NIP-44
3. **Backward Compatibility**: Support both standard and encrypted modes
4. **Error Handling**: Graceful fallback if decryption fails
5. **Performance**: Cache decrypted results to avoid repeated decryption
#### Low Priority (Documentation & Polish):
1. Complete README.md API documentation
2. Example usage scripts
3. Admin client tools
### Phase 4: Expected API Structure
#### README.md Documentation Format:
```markdown
# C-Relay Administrator API
## Authentication
All admin commands require signing with the admin private key generated during first startup.
## Unified Admin API (Kind 23456 - Ephemeral)
Update relay configuration parameters or query available settings.
**Configuration Update Event:**
```json
{
"kind": 23456,
"content": "base64_nip44_encrypted_command_array",
"tags": [["p", "relay_pubkey"]]
}
```
*Encrypted content contains:* `["relay_description", "My Relay Description"]`
**Auth Rules Management:**
**Add Rule Event:**
```json
{
"kind": 23456,
"content": "{\"action\":\"add\",\"description\":\"Block malicious user\"}",
"tags": [
["blacklist", "pubkey", "deadbeef1234..."]
]
}
```
**Remove Rule Event:**
```json
{
"kind": 23456,
"content": "{\"action\":\"remove\",\"description\":\"Unblock user\"}",
"tags": [
["blacklist", "pubkey", "deadbeef1234..."]
]
}
```
**Query All Auth Rules:**
```json
{
"kind": 23456,
"content": "{\"query\":\"list_auth_rules\",\"description\":\"Get all rules\"}",
"tags": [
["auth_query", "all"]
]
}
```
**Query Whitelist Rules Only:**
```json
{
"kind": 23456,
"content": "{\"query\":\"list_auth_rules\",\"description\":\"Get whitelist\"}",
"tags": [
["auth_query", "whitelist"]
]
}
```
**Check Specific Pattern:**
```json
{
"kind": 23456,
"content": "{\"query\":\"check_pattern\",\"description\":\"Check if pattern exists\"}",
"tags": [
["auth_query", "pattern", "deadbeef1234..."]
]
}
```
## System Management (Kind 23456 - Ephemeral)
System administration commands using the same kind as auth rules.
**Clear All Auth Rules:**
```json
{
"kind": 23456,
"content": "{\"action\":\"clear_all\",\"description\":\"Clear all auth rules\"}",
"tags": [
["system_command", "clear_all_auth_rules"]
]
}
```
**System Status:**
```json
{
"kind": 23456,
"content": "{\"action\":\"system_status\",\"description\":\"Get system status\"}",
"tags": [
["system_command", "system_status"]
]
}
```
## Response Format
All admin commands return JSON responses via WebSocket:
**Success Response:**
```json
["OK", "event_id", true, "success_message"]
```
**Error Response:**
```json
["OK", "event_id", false, "error_message"]
```
## Configuration Keys
- `relay_description`: Relay description text
- `relay_contact`: Contact information
- `auth_enabled`: Enable authentication system
- `max_connections`: Maximum concurrent connections
- `pow_min_difficulty`: Minimum proof-of-work difficulty
- ... (full list of config keys)
## Examples
### Enable Authentication & Add Blacklist
```bash
# 1. Enable auth system
nak event -k 23456 --content "base64_nip44_encrypted_command" \
-t "auth_enabled=true" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
# 2. Add user to blacklist
nak event -k 23456 --content '{"action":"add","description":"Spam user"}' \
-t "blacklist=pubkey;$SPAM_USER_PUBKEY" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
# 3. Query all auth rules
nak event -k 23456 --content '{"query":"list_auth_rules","description":"Get all rules"}' \
-t "auth_query=all" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
# 4. Clear all rules for testing
nak event -k 23456 --content '{"action":"clear_all","description":"Clear all rules"}' \
-t "system_command=clear_all_auth_rules" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
```
## Expected Response Formats
### Configuration Query Response
```json
["EVENT", "subscription_id", {
"kind": 23457,
"content": "base64_nip44_encrypted_response",
"tags": [["p", "admin_pubkey"]]
}]
```
### Current Config Response
```json
["EVENT", "subscription_id", {
"kind": 23457,
"content": "base64_nip44_encrypted_response",
"tags": [["p", "admin_pubkey"]]
}]
```
### Auth Rules Query Response
```json
["EVENT", "subscription_id", {
"kind": 23456,
"content": "{\"auth_rules\": [{\"rule_type\": \"blacklist\", \"pattern_type\": \"pubkey\", \"pattern_value\": \"deadbeef...\"}, {\"rule_type\": \"whitelist\", \"pattern_type\": \"pubkey\", \"pattern_value\": \"cafebabe...\"}]}",
"tags": [["response_type", "auth_rules_list"], ["query_type", "all"]]
}]
```
### Pattern Check Response
```json
["EVENT", "subscription_id", {
"kind": 23456,
"content": "{\"pattern_exists\": true, \"rule_type\": \"blacklist\", \"pattern_value\": \"deadbeef...\"}",
"tags": [["response_type", "pattern_check"], ["pattern", "deadbeef..."]]
}]
```
## Implementation Steps
1. **Document API** (this file) ✅
2. **Update to ephemeral event kinds** ✅
3. **Fix request_validator.c** schema mismatch
4. **Update tests** to use unified Kind 23456
5. **Add auth rule query functionality**
6. **Add configuration discovery feature**
7. **Test blacklist functionality**
8. **Add remaining system commands**
## Testing Plan
1. Fix schema mismatch and test basic blacklist
2. Add clear_auth_rules and test table cleanup
3. Test whitelist/blacklist conflict scenarios
4. Test all admin API commands end-to-end
5. Update integration tests
This plan addresses the immediate blacklist issue while establishing a comprehensive admin API framework for future expansion.
## NIP-44 Encryption Implementation Details
### Server-Side Detection Logic
```c
// In admin event processing function
bool is_encrypted_command(struct nostr_event *event) {
// Check if Kind 23456 with NIP-44 encrypted content
if (event->kind == 23456 &&
event->tags_count == 0) {
return true;
}
return false;
}
cJSON *decrypt_admin_tags(struct nostr_event *event) {
cJSON *content_json = cJSON_Parse(event->content);
if (!content_json) return NULL;
cJSON *encrypted_tags = cJSON_GetObjectItem(content_json, "encrypted_tags");
if (!encrypted_tags) {
cJSON_Delete(content_json);
return NULL;
}
// Decrypt using NIP-44 with admin pubkey and relay privkey
char *decrypted = nip44_decrypt(
cJSON_GetStringValue(encrypted_tags),
admin_pubkey, // Shared secret with admin
relay_private_key // Our private key
);
cJSON *decrypted_tags = cJSON_Parse(decrypted);
free(decrypted);
cJSON_Delete(content_json);
return decrypted_tags; // Returns tag array: [["key1", "val1"], ["key2", "val2"]]
}
```
### Admin Event Processing Flow
1. **Receive Event**: Kind 23456 with admin signature
2. **Check Mode**: Empty tags = encrypted, populated tags = standard
3. **Decrypt if Needed**: Extract and decrypt `encrypted_tags` from content
4. **Process Commands**: Use decrypted/standard tags for command processing
5. **Execute**: Same logic for both modes after tag extraction
6. **Respond**: Standard response format (optionally encrypt response)
### Security Benefits
- **Command Privacy**: Admin operations invisible in event tags
- **Replay Protection**: NIP-44 includes timestamp/randomness
- **Key Management**: Uses existing admin/relay key pair
- **Backward Compatible**: Standard mode still works
- **Performance**: Only decrypt when needed (empty tags detection)
### NIP-44 Library Integration
The relay will need to integrate a NIP-44 encryption/decryption library:
```c
// Required NIP-44 functions
char* nip44_encrypt(const char* plaintext, const char* sender_privkey, const char* recipient_pubkey);
char* nip44_decrypt(const char* ciphertext, const char* recipient_privkey, const char* sender_pubkey);
```
### Implementation Priority (Updated)
#### Phase 1: Core Infrastructure (Complete)
- [x] Event-based admin authentication system
- [x] Kind 23456 (Unified Admin API) processing
- [x] Basic configuration parameter updates
- [x] Auth rule add/remove/clear functionality
- [x] Updated to ephemeral event kinds
- [x] Designed NIP-44 encryption support
#### Phase 2: NIP-44 Encryption Support (Next Priority)
- [ ] **Add NIP-44 library dependency** to project
- [ ] **Implement encryption detection logic** (`is_encrypted_command()`)
- [ ] **Add decrypt_admin_tags() function** with NIP-44 support
- [ ] **Update admin command processing** to handle both modes
- [ ] **Test encrypted admin commands** end-to-end
#### Phase 3: Enhanced Features
- [ ] **Auth rule query functionality** (both standard and encrypted modes)
- [ ] **Configuration discovery API** (list available config keys)
- [ ] **Enhanced error messages** with encryption status
- [ ] **Performance optimization** (caching, async decrypt)
#### Phase 4: Schema Fixes (Critical)
- [ ] **Fix request_validator.c** schema mismatch
- [ ] **Enable blacklist enforcement** with encrypted commands
- [ ] **Update tests** to use both standard and encrypted modes
This enhanced admin API provides enterprise-grade security while maintaining ease of use for basic operations.

View File

@@ -0,0 +1,457 @@
# c_utils_lib Architecture Plan
## Overview
`c_utils_lib` is a standalone C utility library designed to provide reusable, general-purpose functions for C projects. It serves as a learning repository and a practical toolkit for common C programming tasks.
## Design Philosophy
1. **Zero External Dependencies**: Only standard C library dependencies
2. **Modular Design**: Each utility is independent and can be used separately
3. **Learning-Oriented**: Well-documented code suitable for learning C
4. **Production-Ready**: Battle-tested utilities from real projects
5. **Cross-Platform**: Works on Linux, macOS, and other POSIX systems
## Repository Structure
```
c_utils_lib/
├── README.md # Main documentation
├── LICENSE # MIT License
├── VERSION # Current version (e.g., v0.1.0)
├── build.sh # Build script
├── Makefile # Build system
├── .gitignore # Git ignore rules
├── include/ # Public headers
│ ├── c_utils.h # Main header (includes all utilities)
│ ├── debug.h # Debug/logging system
│ ├── version.h # Version utilities
│ ├── string_utils.h # String utilities (future)
│ └── memory_utils.h # Memory utilities (future)
├── src/ # Implementation files
│ ├── debug.c # Debug system implementation
│ ├── version.c # Version utilities implementation
│ ├── string_utils.c # String utilities (future)
│ └── memory_utils.c # Memory utilities (future)
├── examples/ # Usage examples
│ ├── debug_example.c # Debug system example
│ ├── version_example.c # Version utilities example
│ └── Makefile # Examples build system
├── tests/ # Unit tests
│ ├── test_debug.c # Debug system tests
│ ├── test_version.c # Version utilities tests
│ ├── run_tests.sh # Test runner
│ └── Makefile # Tests build system
└── docs/ # Additional documentation
├── API.md # Complete API reference
├── INTEGRATION.md # How to integrate into projects
├── VERSIONING.md # Versioning system guide
└── CONTRIBUTING.md # Contribution guidelines
```
## Initial Utilities (v0.1.0)
### 1. Debug System (`debug.h`, `debug.c`)
**Purpose**: Unified logging and debugging system with configurable verbosity levels.
**Features**:
- 5 debug levels: NONE, ERROR, WARN, INFO, DEBUG, TRACE
- Timestamp formatting
- File/line information at TRACE level
- Macro-based API for zero-cost when disabled
- Thread-safe (future enhancement)
**API**:
```c
// Initialization
void debug_init(int level);
// Logging macros
DEBUG_ERROR(format, ...);
DEBUG_WARN(format, ...);
DEBUG_INFO(format, ...);
DEBUG_LOG(format, ...);
DEBUG_TRACE(format, ...);
// Global debug level
extern debug_level_t g_debug_level;
```
**Usage Example**:
```c
#include <c_utils/debug.h>
int main() {
debug_init(DEBUG_LEVEL_INFO);
DEBUG_INFO("Application started");
DEBUG_ERROR("Critical error: %s", error_msg);
return 0;
}
```
### 2. Version Utilities (`version.h`, `version.c`)
**Purpose**: Reusable versioning system for C projects using git tags.
**Features**:
- Automatic version extraction from git tags
- Semantic versioning support (MAJOR.MINOR.PATCH)
- Version comparison functions
- Header file generation for embedding version info
- Build number tracking
**API**:
```c
// Version structure
typedef struct {
int major;
int minor;
int patch;
char* git_hash;
char* build_date;
} version_info_t;
// Get version from git
int version_get_from_git(version_info_t* version);
// Generate version header file
int version_generate_header(const char* output_path, const char* prefix);
// Compare versions
int version_compare(version_info_t* v1, version_info_t* v2);
// Format version string
char* version_to_string(version_info_t* version);
```
**Usage Example**:
```c
#include <c_utils/version.h>
// In your build system:
version_generate_header("src/version.h", "MY_APP");
// In your code:
#include "version.h"
printf("Version: %s\n", MY_APP_VERSION);
```
**Integration with Projects**:
```bash
# In project Makefile
version.h:
c_utils_lib/bin/generate_version src/version.h MY_PROJECT
```
## Build System
### Static Library Output
```
libc_utils.a # Static library for linking
```
### Build Targets
```bash
make # Build static library
make examples # Build examples
make test # Run tests
make install # Install to system (optional)
make clean # Clean build artifacts
```
### Build Script (`build.sh`)
```bash
#!/bin/bash
# Simplified build script similar to nostr_core_lib
case "$1" in
lib|"")
make
;;
examples)
make examples
;;
test)
make test
;;
clean)
make clean
;;
install)
make install
;;
*)
echo "Usage: ./build.sh [lib|examples|test|clean|install]"
exit 1
;;
esac
```
## Versioning System Design
### How It Works
1. **Git Tags as Source of Truth**
- Version tags: `v0.1.0`, `v0.2.0`, etc.
- Follows semantic versioning
2. **Automatic Header Generation**
- Script reads git tags
- Generates header with version macros
- Includes build date and git hash
3. **Reusable Across Projects**
- Each project calls `version_generate_header()`
- Customizable prefix (e.g., `C_RELAY_VERSION`, `NOSTR_CORE_VERSION`)
- No hardcoded version numbers in source
### Example Generated Header
```c
// Auto-generated by c_utils_lib version system
#ifndef MY_PROJECT_VERSION_H
#define MY_PROJECT_VERSION_H
#define MY_PROJECT_VERSION "v0.1.0"
#define MY_PROJECT_VERSION_MAJOR 0
#define MY_PROJECT_VERSION_MINOR 1
#define MY_PROJECT_VERSION_PATCH 0
#define MY_PROJECT_GIT_HASH "a1b2c3d"
#define MY_PROJECT_BUILD_DATE "2025-10-15"
#endif
```
### Integration Pattern
```makefile
# In consuming project's Makefile
VERSION_SCRIPT = c_utils_lib/bin/generate_version
src/version.h: .git/refs/tags/*
$(VERSION_SCRIPT) src/version.h MY_PROJECT
my_app: src/version.h src/main.c
$(CC) src/main.c -o my_app -Ic_utils_lib/include -Lc_utils_lib -lc_utils
```
## Future Utilities (Roadmap)
### String Utilities (`string_utils.h`)
- Safe string operations (bounds checking)
- String trimming, splitting, joining
- Case conversion
- Pattern matching helpers
### Memory Utilities (`memory_utils.h`)
- Safe allocation wrappers
- Memory pool management
- Leak detection helpers (debug builds)
- Arena allocators
### Configuration Utilities (`config_utils.h`)
- INI file parsing
- JSON configuration (using cJSON)
- Environment variable helpers
- Command-line argument parsing
### File Utilities (`file_utils.h`)
- Safe file operations
- Directory traversal
- Path manipulation
- File watching (inotify wrapper)
### Time Utilities (`time_utils.h`)
- Timestamp formatting
- Duration calculations
- Timer utilities
- Rate limiting helpers
## Integration Guide
### As Git Submodule
```bash
# In your project
git submodule add https://github.com/yourusername/c_utils_lib.git
git submodule update --init --recursive
# Build the library
cd c_utils_lib && ./build.sh lib && cd ..
# Update your Makefile
INCLUDES += -Ic_utils_lib/include
LIBS += -Lc_utils_lib -lc_utils
```
### In Your Makefile
```makefile
# Check if c_utils_lib is built
c_utils_lib/libc_utils.a:
cd c_utils_lib && ./build.sh lib
# Link against it
my_app: c_utils_lib/libc_utils.a src/main.c
$(CC) src/main.c -o my_app \
-Ic_utils_lib/include \
-Lc_utils_lib -lc_utils
```
### In Your Code
```c
// Option 1: Include everything
#include <c_utils/c_utils.h>
// Option 2: Include specific utilities
#include <c_utils/debug.h>
#include <c_utils/version.h>
int main() {
debug_init(DEBUG_LEVEL_INFO);
DEBUG_INFO("Starting application version %s", MY_APP_VERSION);
return 0;
}
```
## Migration Plan for c-relay
### Phase 1: Extract Debug System
1. Create `c_utils_lib` repository
2. Move [`debug.c`](../src/debug.c) and [`debug.h`](../src/debug.h)
3. Create build system
4. Add basic tests
### Phase 2: Add Versioning System
1. Extract version generation logic from c-relay
2. Create reusable version utilities
3. Update c-relay to use new system
4. Update nostr_core_lib to use new system
### Phase 3: Add as Submodule
1. Add `c_utils_lib` as submodule to c-relay
2. Update c-relay Makefile
3. Update includes in c-relay source files
4. Remove old debug files from c-relay
### Phase 4: Documentation & Examples
1. Create comprehensive README
2. Add usage examples
3. Write integration guide
4. Document API
## Benefits
### For c-relay
- Cleaner separation of concerns
- Reusable utilities across projects
- Easier to maintain and test
- Consistent logging across codebase
### For Learning C
- Real-world utility implementations
- Best practices examples
- Modular design patterns
- Build system examples
### For Future Projects
- Drop-in utility library
- Proven, tested code
- Consistent patterns
- Time savings
## Testing Strategy
### Unit Tests
- Test each utility independently
- Mock external dependencies
- Edge case coverage
- Memory leak detection (valgrind)
### Integration Tests
- Test with real projects (c-relay, nostr_core_lib)
- Cross-platform testing
- Performance benchmarks
### Continuous Integration
- GitHub Actions for automated testing
- Multiple compiler versions (gcc, clang)
- Multiple platforms (Linux, macOS)
- Static analysis (cppcheck, clang-tidy)
## Documentation Standards
### Code Documentation
- Doxygen-style comments
- Function purpose and parameters
- Return value descriptions
- Usage examples in comments
### API Documentation
- Complete API reference in `docs/API.md`
- Usage examples for each function
- Common patterns and best practices
- Migration guides
### Learning Resources
- Detailed explanations of implementations
- Links to relevant C standards
- Common pitfalls and how to avoid them
- Performance considerations
## License
MIT License - permissive and suitable for learning and commercial use.
## Version History
- **v0.1.0** (Planned)
- Initial release
- Debug system
- Version utilities
- Basic documentation
- **v0.2.0** (Future)
- String utilities
- Memory utilities
- Enhanced documentation
- **v0.3.0** (Future)
- Configuration utilities
- File utilities
- Time utilities
## Success Criteria
1. ✅ Successfully integrated into c-relay
2. ✅ Successfully integrated into nostr_core_lib
3. ✅ All tests passing
4. ✅ Documentation complete
5. ✅ Examples working
6. ✅ Zero external dependencies (except standard library)
7. ✅ Cross-platform compatibility verified
## Next Steps
1. Create repository structure
2. Implement debug system
3. Implement version utilities
4. Create build system
5. Write tests
6. Create documentation
7. Integrate into c-relay
8. Publish to GitHub
---
**Note**: This is a living document. Update as the library evolves and new utilities are added.

View File

@@ -0,0 +1,621 @@
# c_utils_lib Implementation Plan
## Overview
This document provides a step-by-step implementation plan for creating the `c_utils_lib` library and integrating it into the c-relay project.
## Phase 1: Repository Setup & Structure
### Step 1.1: Create Repository Structure
**Location**: Create outside c-relay project (sibling directory)
```bash
# Create directory structure
mkdir -p c_utils_lib/{include,src,examples,tests,docs,bin}
cd c_utils_lib
# Create subdirectories
mkdir -p include/c_utils
mkdir -p tests/results
```
### Step 1.2: Initialize Git Repository
```bash
cd c_utils_lib
git init
git branch -M main
```
### Step 1.3: Create Core Files
**Files to create**:
1. `README.md` - Main documentation
2. `LICENSE` - MIT License
3. `VERSION` - Version file (v0.1.0)
4. `.gitignore` - Git ignore rules
5. `Makefile` - Build system
6. `build.sh` - Build script
## Phase 2: Debug System Implementation
### Step 2.1: Move Debug Files
**Source files** (from c-relay):
- `src/debug.c``c_utils_lib/src/debug.c`
- `src/debug.h``c_utils_lib/include/c_utils/debug.h`
**Modifications needed**:
1. Update header guard in `debug.h`:
```c
#ifndef C_UTILS_DEBUG_H
#define C_UTILS_DEBUG_H
```
2. No namespace changes needed (keep simple API)
3. Add header documentation:
```c
/**
* @file debug.h
* @brief Debug and logging system with configurable verbosity levels
*
* Provides a simple, efficient logging system with 5 levels:
* - ERROR: Critical errors
* - WARN: Warnings
* - INFO: Informational messages
* - DEBUG: Debug messages
* - TRACE: Detailed trace with file:line info
*/
```
### Step 2.2: Create Main Header
**File**: `include/c_utils/c_utils.h`
```c
#ifndef C_UTILS_H
#define C_UTILS_H
/**
* @file c_utils.h
* @brief Main header for c_utils_lib - includes all utilities
*
* Include this header to access all c_utils_lib functionality.
* Alternatively, include specific headers for modular usage.
*/
// Version information
#define C_UTILS_VERSION "v0.1.0"
#define C_UTILS_VERSION_MAJOR 0
#define C_UTILS_VERSION_MINOR 1
#define C_UTILS_VERSION_PATCH 0
// Include all utilities
#include "debug.h"
#include "version.h"
#endif /* C_UTILS_H */
```
## Phase 3: Version Utilities Implementation
### Step 3.1: Design Version API
**File**: `include/c_utils/version.h`
```c
#ifndef C_UTILS_VERSION_H
#define C_UTILS_VERSION_H
#include <time.h>
/**
* @brief Version information structure
*/
typedef struct {
int major;
int minor;
int patch;
char git_hash[41]; // SHA-1 hash (40 chars + null)
char build_date[32]; // ISO 8601 format
char version_string[64]; // "vX.Y.Z" format
} version_info_t;
/**
* @brief Extract version from git tags
* @param version Output version structure
* @return 0 on success, -1 on error
*/
int version_get_from_git(version_info_t* version);
/**
* @brief Generate version header file for a project
* @param output_path Path to output header file
* @param prefix Prefix for macros (e.g., "MY_APP")
* @return 0 on success, -1 on error
*/
int version_generate_header(const char* output_path, const char* prefix);
/**
* @brief Compare two versions
* @return -1 if v1 < v2, 0 if equal, 1 if v1 > v2
*/
int version_compare(const version_info_t* v1, const version_info_t* v2);
/**
* @brief Format version as string
* @param version Version structure
* @param buffer Output buffer
* @param buffer_size Size of output buffer
* @return Number of characters written
*/
int version_to_string(const version_info_t* version, char* buffer, size_t buffer_size);
#endif /* C_UTILS_VERSION_H */
```
### Step 3.2: Implement Version Utilities
**File**: `src/version.c`
Key functions to implement:
1. `version_get_from_git()` - Execute `git describe --tags` and parse
2. `version_generate_header()` - Generate header file with macros
3. `version_compare()` - Semantic version comparison
4. `version_to_string()` - Format version string
### Step 3.3: Create Version Generation Script
**File**: `bin/generate_version`
```bash
#!/bin/bash
# Generate version header for a project
OUTPUT_FILE="$1"
PREFIX="$2"
if [ -z "$OUTPUT_FILE" ] || [ -z "$PREFIX" ]; then
echo "Usage: $0 <output_file> <prefix>"
exit 1
fi
# Get version from git
if [ -d .git ]; then
VERSION=$(git describe --tags --always 2>/dev/null || echo "v0.0.0")
GIT_HASH=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
else
VERSION="v0.0.0"
GIT_HASH="unknown"
fi
# Parse version
CLEAN_VERSION=$(echo "$VERSION" | sed 's/^v//' | cut -d- -f1)
MAJOR=$(echo "$CLEAN_VERSION" | cut -d. -f1)
MINOR=$(echo "$CLEAN_VERSION" | cut -d. -f2)
PATCH=$(echo "$CLEAN_VERSION" | cut -d. -f3)
BUILD_DATE=$(date -u +"%Y-%m-%d %H:%M:%S UTC")
# Generate header
cat > "$OUTPUT_FILE" << EOF
/* Auto-generated by c_utils_lib version system */
/* DO NOT EDIT - This file is automatically generated */
#ifndef ${PREFIX}_VERSION_H
#define ${PREFIX}_VERSION_H
#define ${PREFIX}_VERSION "v${CLEAN_VERSION}"
#define ${PREFIX}_VERSION_MAJOR ${MAJOR}
#define ${PREFIX}_VERSION_MINOR ${MINOR}
#define ${PREFIX}_VERSION_PATCH ${PATCH}
#define ${PREFIX}_GIT_HASH "${GIT_HASH}"
#define ${PREFIX}_BUILD_DATE "${BUILD_DATE}"
#endif /* ${PREFIX}_VERSION_H */
EOF
echo "Generated $OUTPUT_FILE with version v${CLEAN_VERSION}"
```
## Phase 4: Build System
### Step 4.1: Create Makefile
**File**: `Makefile`
```makefile
# c_utils_lib Makefile
CC = gcc
AR = ar
CFLAGS = -Wall -Wextra -std=c99 -O2 -g
INCLUDES = -Iinclude
# Directories
SRC_DIR = src
INCLUDE_DIR = include
BUILD_DIR = build
EXAMPLES_DIR = examples
TESTS_DIR = tests
# Source files
SOURCES = $(wildcard $(SRC_DIR)/*.c)
OBJECTS = $(SOURCES:$(SRC_DIR)/%.c=$(BUILD_DIR)/%.o)
# Output library
LIBRARY = libc_utils.a
# Default target
all: $(LIBRARY)
# Create build directory
$(BUILD_DIR):
mkdir -p $(BUILD_DIR)
# Compile source files
$(BUILD_DIR)/%.o: $(SRC_DIR)/%.c | $(BUILD_DIR)
$(CC) $(CFLAGS) $(INCLUDES) -c $< -o $@
# Create static library
$(LIBRARY): $(OBJECTS)
$(AR) rcs $@ $^
@echo "Built $(LIBRARY)"
# Build examples
examples: $(LIBRARY)
$(MAKE) -C $(EXAMPLES_DIR)
# Run tests
test: $(LIBRARY)
$(MAKE) -C $(TESTS_DIR)
$(TESTS_DIR)/run_tests.sh
# Install to system (optional)
install: $(LIBRARY)
install -d /usr/local/lib
install -m 644 $(LIBRARY) /usr/local/lib/
install -d /usr/local/include/c_utils
install -m 644 $(INCLUDE_DIR)/c_utils/*.h /usr/local/include/c_utils/
@echo "Installed to /usr/local"
# Uninstall from system
uninstall:
rm -f /usr/local/lib/$(LIBRARY)
rm -rf /usr/local/include/c_utils
@echo "Uninstalled from /usr/local"
# Clean build artifacts
clean:
rm -rf $(BUILD_DIR) $(LIBRARY)
$(MAKE) -C $(EXAMPLES_DIR) clean 2>/dev/null || true
$(MAKE) -C $(TESTS_DIR) clean 2>/dev/null || true
# Help
help:
@echo "c_utils_lib Build System"
@echo ""
@echo "Targets:"
@echo " all Build static library (default)"
@echo " examples Build examples"
@echo " test Run tests"
@echo " install Install to /usr/local"
@echo " uninstall Remove from /usr/local"
@echo " clean Clean build artifacts"
@echo " help Show this help"
.PHONY: all examples test install uninstall clean help
```
### Step 4.2: Create Build Script
**File**: `build.sh`
```bash
#!/bin/bash
# c_utils_lib build script
set -e
case "$1" in
lib|"")
echo "Building c_utils_lib..."
make
;;
examples)
echo "Building examples..."
make examples
;;
test)
echo "Running tests..."
make test
;;
clean)
echo "Cleaning..."
make clean
;;
install)
echo "Installing..."
make install
;;
*)
echo "Usage: ./build.sh [lib|examples|test|clean|install]"
exit 1
;;
esac
echo "Done!"
```
## Phase 5: Examples & Tests
### Step 5.1: Create Debug Example
**File**: `examples/debug_example.c`
```c
#include <c_utils/debug.h>
int main() {
// Initialize with INFO level
debug_init(DEBUG_LEVEL_INFO);
DEBUG_INFO("Application started");
DEBUG_WARN("This is a warning");
DEBUG_ERROR("This is an error");
// This won't print (level too high)
DEBUG_LOG("This debug message won't show");
// Change level to DEBUG
g_debug_level = DEBUG_LEVEL_DEBUG;
DEBUG_LOG("Now debug messages show");
// Change to TRACE to see file:line info
g_debug_level = DEBUG_LEVEL_TRACE;
DEBUG_TRACE("Trace with file:line information");
return 0;
}
```
### Step 5.2: Create Version Example
**File**: `examples/version_example.c`
```c
#include <c_utils/version.h>
#include <stdio.h>
int main() {
version_info_t version;
// Get version from git
if (version_get_from_git(&version) == 0) {
char version_str[64];
version_to_string(&version, version_str, sizeof(version_str));
printf("Version: %s\n", version_str);
printf("Git Hash: %s\n", version.git_hash);
printf("Build Date: %s\n", version.build_date);
}
return 0;
}
```
### Step 5.3: Create Test Suite
**File**: `tests/test_debug.c`
```c
#include <c_utils/debug.h>
#include <stdio.h>
#include <string.h>
int test_debug_init() {
debug_init(DEBUG_LEVEL_INFO);
return (g_debug_level == DEBUG_LEVEL_INFO) ? 0 : -1;
}
int test_debug_levels() {
// Test that higher levels don't print at lower settings
debug_init(DEBUG_LEVEL_ERROR);
// Would need to capture stdout to verify
return 0;
}
int main() {
int failed = 0;
printf("Running debug tests...\n");
if (test_debug_init() != 0) {
printf("FAIL: test_debug_init\n");
failed++;
} else {
printf("PASS: test_debug_init\n");
}
if (test_debug_levels() != 0) {
printf("FAIL: test_debug_levels\n");
failed++;
} else {
printf("PASS: test_debug_levels\n");
}
return failed;
}
```
## Phase 6: Documentation
### Step 6.1: Create README.md
Key sections:
1. Overview and purpose
2. Quick start guide
3. Installation instructions
4. Usage examples
5. API reference (brief)
6. Integration guide
7. Contributing guidelines
8. License
### Step 6.2: Create API Documentation
**File**: `docs/API.md`
Complete API reference with:
- Function signatures
- Parameter descriptions
- Return values
- Usage examples
- Common patterns
### Step 6.3: Create Integration Guide
**File**: `docs/INTEGRATION.md`
How to integrate into projects:
1. As git submodule
2. Makefile integration
3. Code examples
4. Migration from standalone utilities
## Phase 7: Integration with c-relay
### Step 7.1: Add as Submodule
```bash
cd /path/to/c-relay
git submodule add <repo-url> c_utils_lib
git submodule update --init --recursive
```
### Step 7.2: Update c-relay Makefile
```makefile
# Add to c-relay Makefile
C_UTILS_LIB = c_utils_lib/libc_utils.a
# Update includes
INCLUDES += -Ic_utils_lib/include
# Update libs
LIBS += -Lc_utils_lib -lc_utils
# Add dependency
$(C_UTILS_LIB):
cd c_utils_lib && ./build.sh lib
# Update main target
$(TARGET): $(C_UTILS_LIB) ...
```
### Step 7.3: Update c-relay Source Files
**Changes needed**:
1. Update includes:
```c
// Old
#include "debug.h"
// New
#include <c_utils/debug.h>
```
2. Remove old debug files:
```bash
git rm src/debug.c src/debug.h
```
3. Update all files that use debug system:
- `src/main.c`
- `src/config.c`
- `src/dm_admin.c`
- `src/websockets.c`
- `src/subscriptions.c`
- Any other files using DEBUG_* macros
### Step 7.4: Test Integration
```bash
cd c-relay
make clean
make
./make_and_restart_relay.sh
```
Verify:
- Compilation succeeds
- Debug output works correctly
- No functionality regressions
## Phase 8: Version System Integration
### Step 8.1: Update c-relay Makefile for Versioning
```makefile
# Add version generation
src/version.h: .git/refs/tags/*
c_utils_lib/bin/generate_version src/version.h C_RELAY
# Add dependency
$(TARGET): src/version.h ...
```
### Step 8.2: Update c-relay to Use Generated Version
Replace hardcoded version in `src/main.h` with:
```c
#include "version.h"
// Use C_RELAY_VERSION instead of hardcoded VERSION
```
## Timeline Estimate
- **Phase 1**: Repository Setup - 1 hour
- **Phase 2**: Debug System - 2 hours
- **Phase 3**: Version Utilities - 4 hours
- **Phase 4**: Build System - 2 hours
- **Phase 5**: Examples & Tests - 3 hours
- **Phase 6**: Documentation - 3 hours
- **Phase 7**: c-relay Integration - 2 hours
- **Phase 8**: Version Integration - 2 hours
**Total**: ~19 hours
## Success Criteria
- [ ] c_utils_lib builds successfully
- [ ] All tests pass
- [ ] Examples compile and run
- [ ] c-relay integrates successfully
- [ ] Debug output works in c-relay
- [ ] Version generation works
- [ ] Documentation complete
- [ ] No regressions in c-relay functionality
## Next Steps
1. Review this plan with stakeholders
2. Create repository structure
3. Implement debug system
4. Implement version utilities
5. Create build system
6. Write tests and examples
7. Create documentation
8. Integrate into c-relay
9. Test thoroughly
10. Publish to GitHub
## Notes
- Keep the API simple and intuitive
- Focus on zero external dependencies
- Prioritize learning value in code comments
- Make integration as easy as possible
- Document everything thoroughly

433
docs/configuration_guide.md Normal file
View File

@@ -0,0 +1,433 @@
# Configuration Management Guide
Comprehensive guide for managing the C Nostr Relay's event-based configuration system.
## Table of Contents
- [Overview](#overview)
- [Configuration Events](#configuration-events)
- [Parameter Reference](#parameter-reference)
- [Configuration Examples](#configuration-examples)
- [Security Considerations](#security-considerations)
- [Troubleshooting](#troubleshooting)
## Overview
The C Nostr Relay uses a revolutionary **event-based configuration system** where all settings are stored as kind 33334 Nostr events in the database. This provides several advantages:
### Benefits
- **Real-time updates**: Configuration changes applied instantly without restart
- **Cryptographic security**: All changes must be cryptographically signed by admin
- **Audit trail**: Complete history of all configuration changes
- **Version control**: Each configuration change is timestamped and signed
- **Zero files**: No configuration files to manage, backup, or version control
### How It Works
1. **Admin keypair**: Generated on first startup, used to sign configuration events
2. **Configuration events**: Kind 33334 Nostr events with relay settings in tags
3. **Real-time processing**: New configuration events processed via WebSocket
4. **Immediate application**: Changes applied to running system without restart
## Configuration Events
### Event Structure
Configuration events follow the standard Nostr event format with kind 33334:
```json
{
"id": "event_id_computed_from_content",
"kind": 33334,
"pubkey": "admin_public_key_hex",
"created_at": 1699123456,
"content": "C Nostr Relay Configuration",
"tags": [
["d", "relay_public_key_hex"],
["relay_description", "My Nostr Relay"],
["max_subscriptions_per_client", "25"],
["pow_min_difficulty", "16"]
],
"sig": "signature_computed_with_admin_private_key"
}
```
### Required Tags
- **`d` tag**: Must contain the relay's public key (identifies which relay this config is for)
### Event Properties
- **Kind**: Must be exactly `33334`
- **Content**: Should be descriptive (e.g., "C Nostr Relay Configuration")
- **Pubkey**: Must be the admin public key generated at first startup
- **Signature**: Must be valid signature from admin private key
## Parameter Reference
### Basic Relay Information
#### `relay_description`
- **Description**: Human-readable relay description (shown in NIP-11)
- **Default**: `"C Nostr Relay"`
- **Format**: String, max 512 characters
- **Example**: `"My awesome Nostr relay for the community"`
#### `relay_contact`
- **Description**: Admin contact information (email, npub, etc.)
- **Default**: `""` (empty)
- **Format**: String, max 256 characters
- **Example**: `"admin@example.com"` or `"npub1..."`
#### `relay_software`
- **Description**: Software identifier for NIP-11
- **Default**: `"c-relay"`
- **Format**: String, max 64 characters
- **Example**: `"c-relay v1.0.0"`
#### `relay_version`
- **Description**: Software version string
- **Default**: Auto-detected from build
- **Format**: Semantic version string
- **Example**: `"1.0.0"`
### Client Connection Limits
#### `max_subscriptions_per_client`
- **Description**: Maximum subscriptions allowed per WebSocket connection
- **Default**: `"25"`
- **Range**: `1` to `100`
- **Impact**: Prevents individual clients from overwhelming the relay
- **Example**: `"50"` (allows up to 50 subscriptions per client)
#### `max_total_subscriptions`
- **Description**: Maximum total subscriptions across all clients
- **Default**: `"5000"`
- **Range**: `100` to `50000`
- **Impact**: Global limit to protect server resources
- **Example**: `"10000"` (allows up to 10,000 total subscriptions)
### Message and Event Limits
#### `max_message_length`
- **Description**: Maximum WebSocket message size in bytes
- **Default**: `"65536"` (64KB)
- **Range**: `1024` to `1048576` (1MB)
- **Impact**: Prevents large messages from consuming resources
- **Example**: `"131072"` (128KB)
#### `max_event_tags`
- **Description**: Maximum number of tags allowed per event
- **Default**: `"2000"`
- **Range**: `10` to `10000`
- **Impact**: Prevents events with excessive tags
- **Example**: `"5000"`
#### `max_content_length`
- **Description**: Maximum event content length in bytes
- **Default**: `"65536"` (64KB)
- **Range**: `1` to `1048576` (1MB)
- **Impact**: Limits event content size
- **Example**: `"131072"` (128KB for longer content)
### Proof of Work (NIP-13)
#### `pow_min_difficulty`
- **Description**: Minimum proof-of-work difficulty required for events
- **Default**: `"0"` (no PoW required)
- **Range**: `0` to `40`
- **Impact**: Higher values require more computational work from clients
- **Example**: `"20"` (requires significant PoW)
#### `pow_mode`
- **Description**: How proof-of-work is handled
- **Default**: `"optional"`
- **Values**:
- `"disabled"`: PoW completely ignored
- `"optional"`: PoW verified if present but not required
- `"required"`: All events must meet minimum difficulty
- **Example**: `"required"` (enforce PoW for all events)
### Event Expiration (NIP-40)
#### `nip40_expiration_enabled`
- **Description**: Enable NIP-40 expiration timestamp support
- **Default**: `"true"`
- **Values**: `"true"` or `"false"`
- **Impact**: When enabled, processes expiration tags and removes expired events
- **Example**: `"false"` (disable expiration processing)
#### `nip40_expiration_strict`
- **Description**: Strict mode for expiration handling
- **Default**: `"false"`
- **Values**: `"true"` or `"false"`
- **Impact**: In strict mode, expired events are immediately rejected
- **Example**: `"true"` (reject expired events immediately)
#### `nip40_expiration_filter`
- **Description**: Filter expired events from query results
- **Default**: `"true"`
- **Values**: `"true"` or `"false"`
- **Impact**: When enabled, expired events are filtered from responses
- **Example**: `"false"` (include expired events in results)
#### `nip40_expiration_grace_period`
- **Description**: Grace period in seconds before expiration takes effect
- **Default**: `"300"` (5 minutes)
- **Range**: `0` to `86400` (24 hours)
- **Impact**: Allows some flexibility in expiration timing
- **Example**: `"600"` (10 minute grace period)
### NIP-59 Gift Wrap Timestamp Configuration
#### `nip59_timestamp_max_delay_sec`
- **Description**: Controls timestamp randomization for NIP-59 gift wraps
- **Default**: `"0"` (no randomization)
- **Range**: `0` to `604800` (7 days)
- **Impact**: Affects compatibility with other Nostr clients for direct messaging
- **Values**:
- `"0"`: No randomization (maximum compatibility)
- `"1-604800"`: Random timestamp between now and N seconds ago
- **Example**: `"172800"` (2 days randomization for privacy)
## Configuration Examples
### Basic Relay Setup
```json
{
"kind": 33334,
"content": "Basic Relay Configuration",
"tags": [
["d", "relay_pubkey_here"],
["relay_description", "Community Nostr Relay"],
["relay_contact", "admin@community-relay.com"],
["max_subscriptions_per_client", "30"],
["max_total_subscriptions", "8000"]
]
}
```
### High-Security Relay
```json
{
"kind": 33334,
"content": "High Security Configuration",
"tags": [
["d", "relay_pubkey_here"],
["relay_description", "High-Security Nostr Relay"],
["pow_min_difficulty", "24"],
["pow_mode", "required"],
["max_subscriptions_per_client", "10"],
["max_total_subscriptions", "1000"],
["max_message_length", "32768"],
["nip40_expiration_strict", "true"]
]
}
```
### Public Community Relay
```json
{
"kind": 33334,
"content": "Public Community Relay Configuration",
"tags": [
["d", "relay_pubkey_here"],
["relay_description", "Open Community Relay - Welcome Everyone!"],
["relay_contact", "community@relay.example"],
["max_subscriptions_per_client", "50"],
["max_total_subscriptions", "25000"],
["max_content_length", "131072"],
["pow_mode", "optional"],
["pow_min_difficulty", "8"],
["nip40_expiration_enabled", "true"],
["nip40_expiration_grace_period", "900"]
]
}
```
### Private/Corporate Relay
```json
{
"kind": 33334,
"content": "Corporate Internal Relay",
"tags": [
["d", "relay_pubkey_here"],
["relay_description", "Corporate Internal Communications"],
["relay_contact", "it-admin@company.com"],
["max_subscriptions_per_client", "20"],
["max_total_subscriptions", "2000"],
["max_message_length", "262144"],
["nip40_expiration_enabled", "false"],
["pow_mode", "disabled"]
]
}
```
## Security Considerations
### Admin Key Management
#### Secure Storage
```bash
# Store admin private key securely
echo "ADMIN_PRIVKEY=your_admin_private_key_here" > .env
chmod 600 .env
# Or use a password manager
# Never store in version control
echo ".env" >> .gitignore
```
#### Key Rotation
Currently, admin key rotation requires:
1. Stopping the relay
2. Removing the database (loses all events)
3. Restarting (generates new keys)
Future versions will support admin key rotation while preserving events.
### Event Validation
The relay performs comprehensive validation on configuration events:
#### Cryptographic Validation
- **Signature verification**: Uses `nostr_verify_event_signature()`
- **Event structure**: Validates JSON structure with `nostr_validate_event_structure()`
- **Admin authorization**: Ensures events are signed by the authorized admin pubkey
#### Content Validation
- **Parameter bounds checking**: Validates numeric ranges
- **String length limits**: Enforces maximum lengths
- **Enum validation**: Validates allowed values for mode parameters
### Network Security
#### Access Control
```bash
# Limit access with firewall
sudo ufw allow from 192.168.1.0/24 to any port 8888
# Or use specific IPs
sudo ufw allow from 203.0.113.10 to any port 8888
```
#### TLS/SSL Termination
```nginx
# nginx configuration for HTTPS termination
server {
listen 443 ssl;
server_name relay.example.com;
ssl_certificate /path/to/cert.pem;
ssl_certificate_key /path/to/key.pem;
location / {
proxy_pass http://127.0.0.1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
}
```
## Troubleshooting
### Configuration Not Applied
#### Check Event Signature
```javascript
// Verify event signature with nostrtool or similar
const event = { /* your configuration event */ };
const isValid = nostrTools.verifySignature(event);
```
#### Verify Admin Pubkey
```bash
# Check current admin pubkey in database
sqlite3 relay.nrdb "SELECT DISTINCT pubkey FROM events WHERE kind = 33334 ORDER BY created_at DESC LIMIT 1;"
# Compare with expected admin pubkey from first startup
grep "Admin Public Key" relay.log
```
#### Check Event Structure
```bash
# View the exact event stored in database
sqlite3 relay.nrdb "SELECT json_pretty(json_object(
'kind', kind,
'pubkey', pubkey,
'created_at', created_at,
'content', content,
'tags', json(tags),
'sig', sig
)) FROM events WHERE kind = 33334 ORDER BY created_at DESC LIMIT 1;"
```
### Configuration Validation Errors
#### Invalid Parameter Values
```bash
# Check relay logs for validation errors
journalctl -u c-relay | grep "Configuration.*invalid\|Invalid.*configuration"
# Common issues:
# - Numeric values outside valid ranges
# - Invalid enum values (e.g., pow_mode)
# - String values exceeding length limits
```
#### Missing Required Tags
```bash
# Ensure 'd' tag is present with relay pubkey
sqlite3 relay.nrdb "SELECT tags FROM events WHERE kind = 33334 ORDER BY created_at DESC LIMIT 1;" | grep '"d"'
```
### Performance Impact
#### Monitor Configuration Changes
```bash
# Track configuration update frequency
sqlite3 relay.nrdb "SELECT datetime(created_at, 'unixepoch') as date,
COUNT(*) as config_updates
FROM events WHERE kind = 33334
GROUP BY date(created_at, 'unixepoch')
ORDER BY date DESC;"
```
#### Resource Usage After Changes
```bash
# Monitor system resources after configuration updates
top -p $(pgrep c_relay)
# Check for memory leaks
ps aux | grep c_relay | awk '{print $6}' # RSS memory
```
### Emergency Recovery
#### Reset to Default Configuration
If configuration becomes corrupted or causes issues:
```bash
# Create emergency configuration event
nostrtool event \
--kind 33334 \
--content "Emergency Reset Configuration" \
--tag d YOUR_RELAY_PUBKEY \
--tag max_subscriptions_per_client 25 \
--tag max_total_subscriptions 5000 \
--tag pow_mode optional \
--tag pow_min_difficulty 0 \
--private-key YOUR_ADMIN_PRIVKEY \
| nostrtool send ws://localhost:8888
```
#### Database Recovery
```bash
# If database is corrupted, backup and recreate
cp relay.nrdb relay.nrdb.backup
rm relay.nrdb*
./build/c_relay_x86 # Creates fresh database with new keys
```
---
This configuration guide covers all aspects of managing the C Nostr Relay's event-based configuration system. The system provides unprecedented flexibility and security for Nostr relay administration while maintaining simplicity and real-time responsiveness.

562
docs/debug_system.md Normal file
View File

@@ -0,0 +1,562 @@
# Simple Debug System Proposal
## Overview
A minimal debug system with 6 levels (0-5) controlled by a single `--debug-level` flag. TRACE level (5) automatically includes file:line information for ALL messages. Uses compile-time macros to ensure **zero performance impact and zero size increase** in production builds.
## Debug Levels
```c
typedef enum {
DEBUG_LEVEL_NONE = 0, // Production: no debug output
DEBUG_LEVEL_ERROR = 1, // Errors only
DEBUG_LEVEL_WARN = 2, // Errors + Warnings
DEBUG_LEVEL_INFO = 3, // Errors + Warnings + Info
DEBUG_LEVEL_DEBUG = 4, // All above + Debug messages
DEBUG_LEVEL_TRACE = 5 // All above + Trace (very verbose)
} debug_level_t;
```
## Usage
```bash
# Production (default - no debug output)
./c_relay_x86
# Show errors only
./c_relay_x86 --debug-level=1
# Show errors and warnings
./c_relay_x86 --debug-level=2
# Show errors, warnings, and info (recommended for development)
./c_relay_x86 --debug-level=3
# Show all debug messages
./c_relay_x86 --debug-level=4
# Show everything including trace with file:line (very verbose)
./c_relay_x86 --debug-level=5
```
## Implementation
### 1. Header File (`src/debug.h`)
```c
#ifndef DEBUG_H
#define DEBUG_H
#include <stdio.h>
#include <time.h>
// Debug levels
typedef enum {
DEBUG_LEVEL_NONE = 0,
DEBUG_LEVEL_ERROR = 1,
DEBUG_LEVEL_WARN = 2,
DEBUG_LEVEL_INFO = 3,
DEBUG_LEVEL_DEBUG = 4,
DEBUG_LEVEL_TRACE = 5
} debug_level_t;
// Global debug level (set at runtime via CLI)
extern debug_level_t g_debug_level;
// Initialize debug system
void debug_init(int level);
// Core logging function
void debug_log(debug_level_t level, const char* file, int line, const char* format, ...);
// Convenience macros that check level before calling
// Note: TRACE level (5) and above include file:line information for ALL messages
#define DEBUG_ERROR(...) \
do { if (g_debug_level >= DEBUG_LEVEL_ERROR) debug_log(DEBUG_LEVEL_ERROR, __FILE__, __LINE__, __VA_ARGS__); } while(0)
#define DEBUG_WARN(...) \
do { if (g_debug_level >= DEBUG_LEVEL_WARN) debug_log(DEBUG_LEVEL_WARN, __FILE__, __LINE__, __VA_ARGS__); } while(0)
#define DEBUG_INFO(...) \
do { if (g_debug_level >= DEBUG_LEVEL_INFO) debug_log(DEBUG_LEVEL_INFO, __FILE__, __LINE__, __VA_ARGS__); } while(0)
#define DEBUG_LOG(...) \
do { if (g_debug_level >= DEBUG_LEVEL_DEBUG) debug_log(DEBUG_LEVEL_DEBUG, __FILE__, __LINE__, __VA_ARGS__); } while(0)
#define DEBUG_TRACE(...) \
do { if (g_debug_level >= DEBUG_LEVEL_TRACE) debug_log(DEBUG_LEVEL_TRACE, __FILE__, __LINE__, __VA_ARGS__); } while(0)
#endif /* DEBUG_H */
```
### 2. Implementation File (`src/debug.c`)
```c
#include "debug.h"
#include <stdarg.h>
#include <string.h>
// Global debug level (default: no debug output)
debug_level_t g_debug_level = DEBUG_LEVEL_NONE;
void debug_init(int level) {
if (level < 0) level = 0;
if (level > 5) level = 5;
g_debug_level = (debug_level_t)level;
}
void debug_log(debug_level_t level, const char* file, int line, const char* format, ...) {
// Get timestamp
time_t now = time(NULL);
struct tm* tm_info = localtime(&now);
char timestamp[32];
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
// Get level string
const char* level_str = "UNKNOWN";
switch (level) {
case DEBUG_LEVEL_ERROR: level_str = "ERROR"; break;
case DEBUG_LEVEL_WARN: level_str = "WARN "; break;
case DEBUG_LEVEL_INFO: level_str = "INFO "; break;
case DEBUG_LEVEL_DEBUG: level_str = "DEBUG"; break;
case DEBUG_LEVEL_TRACE: level_str = "TRACE"; break;
default: break;
}
// Print prefix with timestamp and level
printf("[%s] [%s] ", timestamp, level_str);
// Print source location when debug level is TRACE (5) or higher
if (file && g_debug_level >= DEBUG_LEVEL_TRACE) {
// Extract just the filename (not full path)
const char* filename = strrchr(file, '/');
filename = filename ? filename + 1 : file;
printf("[%s:%d] ", filename, line);
}
// Print message
va_list args;
va_start(args, format);
vprintf(format, args);
va_end(args);
printf("\n");
fflush(stdout);
}
```
### 3. CLI Argument Parsing (add to `src/main.c`)
```c
// In main() function, add to argument parsing:
int debug_level = 0; // Default: no debug output
for (int i = 1; i < argc; i++) {
if (strncmp(argv[i], "--debug-level=", 14) == 0) {
debug_level = atoi(argv[i] + 14);
if (debug_level < 0) debug_level = 0;
if (debug_level > 5) debug_level = 5;
}
// ... other arguments ...
}
// Initialize debug system
debug_init(debug_level);
```
### 4. Update Makefile
```makefile
# Add debug.c to source files
MAIN_SRC = src/main.c src/config.c src/debug.c src/dm_admin.c src/request_validator.c ...
```
## Migration Strategy
### Keep Existing Functions
The existing `log_*` functions can remain as wrappers:
```c
// src/main.c - Update existing functions
// Note: These don't include file:line since they're wrappers
void log_info(const char* message) {
if (g_debug_level >= DEBUG_LEVEL_INFO) {
debug_log(DEBUG_LEVEL_INFO, NULL, 0, "%s", message);
}
}
void log_error(const char* message) {
if (g_debug_level >= DEBUG_LEVEL_ERROR) {
debug_log(DEBUG_LEVEL_ERROR, NULL, 0, "%s", message);
}
}
void log_warning(const char* message) {
if (g_debug_level >= DEBUG_LEVEL_WARN) {
debug_log(DEBUG_LEVEL_WARN, NULL, 0, "%s", message);
}
}
void log_success(const char* message) {
if (g_debug_level >= DEBUG_LEVEL_INFO) {
debug_log(DEBUG_LEVEL_INFO, NULL, 0, "✓ %s", message);
}
}
```
### Gradual Migration
Gradually replace log calls with debug macros:
```c
// Before:
log_info("Starting WebSocket relay server");
// After:
DEBUG_INFO("Starting WebSocket relay server");
// Before:
log_error("Failed to initialize database");
// After:
DEBUG_ERROR("Failed to initialize database");
```
### Add New Debug Levels
Add debug and trace messages where needed:
```c
// Detailed debugging
DEBUG_LOG("Processing subscription: %s", sub_id);
DEBUG_LOG("Filter count: %d", filter_count);
// Very verbose tracing
DEBUG_TRACE("Entering handle_req_message()");
DEBUG_TRACE("Subscription ID validated: %s", sub_id);
DEBUG_TRACE("Exiting handle_req_message()");
```
## Manual Guards for Expensive Operations
### The Problem
Debug macros use **runtime checks**, which means function arguments are always evaluated:
```c
// ❌ BAD: Database query executes even when debug level is 0
DEBUG_LOG("Count: %d", expensive_database_query());
```
The `expensive_database_query()` will **always execute** because function arguments are evaluated before the `if` check inside the macro.
### The Solution: Manual Guards
For expensive operations (database queries, file I/O, complex calculations), use manual guards:
```c
// ✅ GOOD: Query only executes when debugging is enabled
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
int count = expensive_database_query();
DEBUG_LOG("Count: %d", count);
}
```
### Standardized Comment Format
To make temporary debug guards easy to find and remove, use this standardized format:
```c
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
// Expensive operation here
sqlite3_stmt* stmt;
const char* sql = "SELECT COUNT(*) FROM events";
int count = 0;
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
count = sqlite3_column_int(stmt, 0);
}
sqlite3_finalize(stmt);
}
DEBUG_LOG("Event count: %d", count);
}
// DEBUG_GUARD_END
```
### Easy Removal
When you're done debugging, find and remove all temporary guards:
```bash
# Find all debug guards
grep -n "DEBUG_GUARD_START" src/*.c
# Remove guards with sed (between START and END markers)
sed -i '/DEBUG_GUARD_START/,/DEBUG_GUARD_END/d' src/config.c
```
Or use a simple script:
```bash
#!/bin/bash
# remove_debug_guards.sh
for file in src/*.c; do
sed -i '/DEBUG_GUARD_START/,/DEBUG_GUARD_END/d' "$file"
echo "Removed debug guards from $file"
done
```
### When to Use Manual Guards
Use manual guards for:
- ✅ Database queries
- ✅ File I/O operations
- ✅ Network requests
- ✅ Complex calculations
- ✅ Memory allocations for debug data
- ✅ String formatting with multiple operations
Don't need guards for:
- ❌ Simple variable access
- ❌ Basic arithmetic
- ❌ String literals
- ❌ Function calls that are already cheap
### Example: Database Query Guard
```c
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
sqlite3_stmt* count_stmt;
const char* count_sql = "SELECT COUNT(*) FROM config";
int config_count = 0;
if (sqlite3_prepare_v2(g_db, count_sql, -1, &count_stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(count_stmt) == SQLITE_ROW) {
config_count = sqlite3_column_int(count_stmt, 0);
}
sqlite3_finalize(count_stmt);
}
DEBUG_LOG("Config table has %d rows", config_count);
}
// DEBUG_GUARD_END
```
### Example: Complex String Formatting Guard
```c
// DEBUG_GUARD_START
if (g_debug_level >= DEBUG_LEVEL_TRACE) {
char filter_str[1024] = {0};
int offset = 0;
for (int i = 0; i < filter_count && offset < sizeof(filter_str) - 1; i++) {
offset += snprintf(filter_str + offset, sizeof(filter_str) - offset,
"Filter %d: kind=%d, author=%s; ",
i, filters[i].kind, filters[i].author);
}
DEBUG_TRACE("Processing filters: %s", filter_str);
}
// DEBUG_GUARD_END
```
### Alternative: Compile-Time Guards
For permanent debug code that should be completely removed in production builds, use compile-time guards:
```c
#ifdef ENABLE_DEBUG_CODE
// This code is completely removed when ENABLE_DEBUG_CODE is not defined
int count = expensive_database_query();
DEBUG_LOG("Count: %d", count);
#endif
```
Build with debug code:
```bash
make CFLAGS="-DENABLE_DEBUG_CODE"
```
Build without debug code (production):
```bash
make # No debug code compiled in
```
### Best Practices
1. **Always use standardized markers** (`DEBUG_GUARD_START`/`DEBUG_GUARD_END`) for temporary guards
2. **Add a comment** explaining what you're debugging
3. **Remove guards** when debugging is complete
4. **Use compile-time guards** for permanent debug infrastructure
5. **Keep guards simple** - one guard per logical debug operation
## Performance Impact
### Runtime Check
The macros include a runtime check:
```c
#define DEBUG_INFO(...) \
do { if (g_debug_level >= DEBUG_LEVEL_INFO) debug_log(DEBUG_LEVEL_INFO, NULL, 0, __VA_ARGS__); } while(0)
```
**Cost**: One integer comparison per debug statement (~1 CPU cycle)
**Impact**: Negligible - the comparison is faster than a function call
**Note**: Only `DEBUG_TRACE` includes `__FILE__` and `__LINE__`, which are compile-time constants with no runtime overhead.
### When Debug Level is 0 (Production)
```c
// With g_debug_level = 0:
DEBUG_INFO("Starting server");
// Becomes:
if (0 >= 3) debug_log(...); // Never executes
// Compiler optimizes to:
// (nothing - branch is eliminated)
```
**Result**: Modern compilers (gcc -O2 or higher) will completely eliminate the dead code branch.
### Size Impact
**Test Case**: 100 debug statements in code
**Without optimization** (`-O0`):
- Binary size increase: ~2KB (branch instructions)
- Runtime cost: 100 comparisons per execution
**With optimization** (`-O2` or `-O3`):
- Binary size increase: **0 bytes** (dead code eliminated when g_debug_level = 0)
- Runtime cost: **0 cycles** (branches removed by compiler)
### Verification
You can verify the optimization with:
```bash
# Compile with optimization
gcc -O2 -c debug_test.c -o debug_test.o
# Disassemble and check
objdump -d debug_test.o | grep -A 10 "debug_log"
```
When `g_debug_level = 0` (constant), you'll see the compiler has removed all debug calls.
## Example Output
### Level 0 (Production)
```
(no output)
```
### Level 1 (Errors Only)
```
[2025-01-12 14:30:15] [ERROR] Failed to open database: permission denied
[2025-01-12 14:30:20] [ERROR] WebSocket connection failed: port in use
```
### Level 2 (Errors + Warnings)
```
[2025-01-12 14:30:15] [ERROR] Failed to open database: permission denied
[2025-01-12 14:30:16] [WARN ] Port 8888 unavailable, trying 8889
[2025-01-12 14:30:17] [WARN ] Configuration key 'relay_name' not found, using default
```
### Level 3 (Errors + Warnings + Info)
```
[2025-01-12 14:30:15] [INFO ] Initializing C-Relay v0.4.6
[2025-01-12 14:30:15] [INFO ] Loading configuration from database
[2025-01-12 14:30:15] [ERROR] Failed to open database: permission denied
[2025-01-12 14:30:16] [WARN ] Port 8888 unavailable, trying 8889
[2025-01-12 14:30:17] [INFO ] WebSocket relay started on ws://127.0.0.1:8889
```
### Level 4 (All Debug Messages)
```
[2025-01-12 14:30:15] [INFO ] Initializing C-Relay v0.4.6
[2025-01-12 14:30:15] [DEBUG] Opening database: build/abc123...def.db
[2025-01-12 14:30:15] [DEBUG] Executing schema initialization
[2025-01-12 14:30:15] [INFO ] SQLite WAL mode enabled
[2025-01-12 14:30:16] [DEBUG] Attempting to bind to port 8888
[2025-01-12 14:30:16] [WARN ] Port 8888 unavailable, trying 8889
[2025-01-12 14:30:17] [DEBUG] Successfully bound to port 8889
[2025-01-12 14:30:17] [INFO ] WebSocket relay started on ws://127.0.0.1:8889
```
### Level 5 (Everything Including file:line for ALL messages)
```
[2025-01-12 14:30:15] [INFO ] [main.c:1607] Initializing C-Relay v0.4.6
[2025-01-12 14:30:15] [DEBUG] [main.c:348] Opening database: build/abc123...def.db
[2025-01-12 14:30:15] [TRACE] [main.c:330] Entering init_database()
[2025-01-12 14:30:15] [ERROR] [config.c:125] Database locked
```
## Implementation Steps
### Step 1: Create Files (5 minutes)
1. Create `src/debug.h` with the header code above
2. Create `src/debug.c` with the implementation code above
3. Update `Makefile` to include `src/debug.c` in `MAIN_SRC`
### Step 2: Add CLI Parsing (5 minutes)
Add `--debug-level` argument parsing to `main()` in `src/main.c`
### Step 3: Update Existing Functions (5 minutes)
Update the existing `log_*` functions to use the new debug macros
### Step 4: Test (5 minutes)
```bash
# Build
make clean && make
# Test different levels
./build/c_relay_x86 # No output
./build/c_relay_x86 --debug-level=1 # Errors only
./build/c_relay_x86 --debug-level=3 # Info + warnings + errors
./build/c_relay_x86 --debug-level=4 # All debug messages
./build/c_relay_x86 --debug-level=5 # Everything with file:line on TRACE
```
### Step 5: Gradual Migration (Ongoing)
As you work on different parts of the code, replace `log_*` calls with `DEBUG_*` macros and add new debug/trace statements where helpful.
## Benefits
**Simple**: Single flag, 6 levels, easy to understand
**Zero Overhead**: Compiler optimizes away unused debug code
**Zero Size Impact**: No binary size increase in production
**Backward Compatible**: Existing `log_*` functions still work
**Easy Migration**: Gradual replacement of log calls
**Flexible**: Can add detailed debugging without affecting production
## Total Implementation Time
**~20 minutes** for basic implementation
**Ongoing** for gradual migration of existing log calls
## Recommendation
This is the simplest possible debug system that provides:
- Multiple debug levels for different verbosity
- Zero performance impact in production
- Zero binary size increase
- Easy to use and understand
- Backward compatible with existing code
Start with the basic implementation, test it, then gradually migrate existing log calls and add new debug statements as needed.

View File

@@ -0,0 +1,94 @@
# Default Configuration Event Template
This document contains the template for the `src/default_config_event.h` file that will be created during implementation.
## File: `src/default_config_event.h`
```c
#ifndef DEFAULT_CONFIG_EVENT_H
#define DEFAULT_CONFIG_EVENT_H
/*
* Default Configuration Event Template
*
* This header contains the default configuration values for the C Nostr Relay.
* These values are used to create the initial kind 33334 configuration event
* during first-time startup.
*
* IMPORTANT: These values should never be accessed directly by other parts
* of the program. They are only used during initial configuration event creation.
*/
// Default configuration key-value pairs
static const struct {
const char* key;
const char* value;
} DEFAULT_CONFIG_VALUES[] = {
// Authentication
{"auth_enabled", "false"},
// Server Core Settings
{"relay_port", "8888"},
{"max_connections", "100"},
// NIP-11 Relay Information (relay keys will be populated at runtime)
{"relay_description", "High-performance C Nostr relay with SQLite storage"},
{"relay_contact", ""},
{"relay_software", "https://git.laantungir.net/laantungir/c-relay.git"},
{"relay_version", "v1.0.0"},
// NIP-13 Proof of Work (pow_min_difficulty = 0 means PoW disabled)
{"pow_min_difficulty", "0"},
{"pow_mode", "basic"},
// NIP-40 Expiration Timestamp
{"nip40_expiration_enabled", "true"},
{"nip40_expiration_strict", "true"},
{"nip40_expiration_filter", "true"},
{"nip40_expiration_grace_period", "300"},
// Subscription Limits
{"max_subscriptions_per_client", "25"},
{"max_total_subscriptions", "5000"},
{"max_filters_per_subscription", "10"},
// Event Processing Limits
{"max_event_tags", "100"},
{"max_content_length", "8196"},
{"max_message_length", "16384"},
// Performance Settings
{"default_limit", "500"},
{"max_limit", "5000"}
};
// Number of default configuration values
#define DEFAULT_CONFIG_COUNT (sizeof(DEFAULT_CONFIG_VALUES) / sizeof(DEFAULT_CONFIG_VALUES[0]))
// Function to create default configuration event
cJSON* create_default_config_event(const unsigned char* admin_privkey_bytes,
const char* relay_privkey_hex,
const char* relay_pubkey_hex);
#endif /* DEFAULT_CONFIG_EVENT_H */
```
## Usage Notes
1. **Isolation**: These default values are completely isolated from the rest of the program
2. **Single Access Point**: Only accessed during `create_default_config_event()`
3. **Runtime Keys**: Relay keys are added at runtime, not stored as defaults
4. **No Direct Access**: Other parts of the program should never include this header directly
5. **Clean Separation**: Keeps default configuration separate from configuration logic
## Function Implementation
The `create_default_config_event()` function will:
1. Create a new cJSON event object with kind 33334
2. Add all default configuration values as tags
3. Add runtime-generated relay keys as tags
4. Use `nostr_core_lib` to sign the event with admin private key
5. Return the complete signed event ready for database storage
This approach ensures clean separation between default values and the configuration system logic.

600
docs/deployment_guide.md Normal file
View File

@@ -0,0 +1,600 @@
# Deployment Guide - C Nostr Relay
Complete deployment guide for the C Nostr Relay with event-based configuration system across different environments and platforms.
## Table of Contents
- [Deployment Overview](#deployment-overview)
- [Production Deployment](#production-deployment)
- [Cloud Deployments](#cloud-deployments)
- [Container Deployment](#container-deployment)
- [Reverse Proxy Setup](#reverse-proxy-setup)
- [Monitoring Setup](#monitoring-setup)
- [Security Hardening](#security-hardening)
- [Backup and Recovery](#backup-and-recovery)
## Deployment Overview
The C Nostr Relay's event-based configuration system simplifies deployment:
### Key Deployment Benefits
- **Zero Configuration**: No config files to manage or transfer
- **Self-Contained**: Single binary + auto-generated database
- **Portable**: Database contains all relay state and configuration
- **Secure**: Admin keys generated locally, never transmitted
- **Scalable**: Efficient SQLite backend with WAL mode
### Deployment Requirements
- **CPU**: 1 vCPU minimum, 2+ recommended
- **RAM**: 512MB minimum, 2GB+ recommended
- **Storage**: 100MB for binary + database growth (varies by usage)
- **Network**: Port 8888 (configurable via events)
- **OS**: Linux (recommended), macOS, Windows (WSL)
## Production Deployment
### Server Preparation
#### System Updates
```bash
# Ubuntu/Debian
sudo apt update && sudo apt upgrade -y
# CentOS/RHEL
sudo yum update -y
# Install required packages
sudo apt install -y build-essential git sqlite3 libsqlite3-dev \
libwebsockets-dev libssl-dev libsecp256k1-dev libcurl4-openssl-dev \
zlib1g-dev systemd
```
#### User and Directory Setup
```bash
# Create dedicated system user
sudo useradd --system --home-dir /opt/c-relay --shell /bin/false c-relay
# Create application directory
sudo mkdir -p /opt/c-relay
sudo chown c-relay:c-relay /opt/c-relay
```
### Build and Installation
#### Automated Installation (Recommended)
```bash
# Clone repository
git clone https://github.com/your-org/c-relay.git
cd c-relay
git submodule update --init --recursive
# Build
make clean && make
# Install as systemd service
sudo systemd/install-service.sh
```
#### Manual Installation
```bash
# Build relay
make clean && make
# Install binary
sudo cp build/c_relay_x86 /opt/c-relay/
sudo chown c-relay:c-relay /opt/c-relay/c_relay_x86
sudo chmod +x /opt/c-relay/c_relay_x86
# Install systemd service
sudo cp systemd/c-relay.service /etc/systemd/system/
sudo systemctl daemon-reload
```
### Service Management
#### Start and Enable Service
```bash
# Start the service
sudo systemctl start c-relay
# Enable auto-start on boot
sudo systemctl enable c-relay
# Check status
sudo systemctl status c-relay
```
#### Capture Admin Keys (CRITICAL)
```bash
# View startup logs to get admin keys
sudo journalctl -u c-relay --since "5 minutes ago" | grep -A 10 "IMPORTANT: SAVE THIS ADMIN PRIVATE KEY"
# Or check the full log
sudo journalctl -u c-relay --no-pager | grep "Admin Private Key"
```
⚠️ **CRITICAL**: Save the admin private key immediately - it's only shown once and is needed for all configuration updates!
### Firewall Configuration
#### UFW (Ubuntu)
```bash
# Allow relay port
sudo ufw allow 8888/tcp
# Allow SSH (ensure you don't lock yourself out)
sudo ufw allow 22/tcp
# Enable firewall
sudo ufw enable
```
#### iptables
```bash
# Allow relay port
sudo iptables -A INPUT -p tcp --dport 8888 -j ACCEPT
# Save rules (Ubuntu/Debian)
sudo iptables-save > /etc/iptables/rules.v4
```
## Cloud Deployments
### AWS EC2
#### Instance Setup
```bash
# Launch Ubuntu 22.04 LTS instance (t3.micro or larger)
# Security Group: Allow port 8888 from 0.0.0.0/0 (or restricted IPs)
# Connect via SSH
ssh -i your-key.pem ubuntu@your-instance-ip
# Use the simple deployment script
git clone https://github.com/your-org/c-relay.git
cd c-relay
sudo examples/deployment/simple-vps/deploy.sh
```
#### Elastic IP (Recommended)
```bash
# Associate Elastic IP to ensure consistent public IP
# Configure DNS A record to point to Elastic IP
```
#### EBS Volume for Data
```bash
# Attach EBS volume for persistent storage
sudo mkfs.ext4 /dev/xvdf
sudo mkdir /data
sudo mount /dev/xvdf /data
sudo chown c-relay:c-relay /data
# Update systemd service to use /data
sudo sed -i 's/WorkingDirectory=\/opt\/c-relay/WorkingDirectory=\/data/' /etc/systemd/system/c-relay.service
sudo systemctl daemon-reload
```
### Google Cloud Platform
#### Compute Engine Setup
```bash
# Create VM instance (e2-micro or larger)
gcloud compute instances create c-relay-instance \
--image-family=ubuntu-2204-lts \
--image-project=ubuntu-os-cloud \
--machine-type=e2-micro \
--tags=nostr-relay
# Configure firewall
gcloud compute firewall-rules create allow-nostr-relay \
--allow tcp:8888 \
--source-ranges 0.0.0.0/0 \
--target-tags nostr-relay
# SSH and deploy
gcloud compute ssh c-relay-instance
git clone https://github.com/your-org/c-relay.git
cd c-relay
sudo examples/deployment/simple-vps/deploy.sh
```
#### Persistent Disk
```bash
# Create and attach persistent disk
gcloud compute disks create relay-data --size=50GB
gcloud compute instances attach-disk c-relay-instance --disk=relay-data
# Format and mount
sudo mkfs.ext4 /dev/sdb
sudo mkdir /data
sudo mount /dev/sdb /data
sudo chown c-relay:c-relay /data
```
### DigitalOcean
#### Droplet Creation
```bash
# Create Ubuntu 22.04 droplet (Basic plan, $6/month minimum)
# Enable monitoring and backups
# SSH into droplet
ssh root@your-droplet-ip
# Deploy relay
git clone https://github.com/your-org/c-relay.git
cd c-relay
examples/deployment/simple-vps/deploy.sh
```
#### Block Storage
```bash
# Attach block storage volume
# Format and mount as /data
sudo mkfs.ext4 /dev/sda
sudo mkdir /data
sudo mount /dev/sda /data
echo '/dev/sda /data ext4 defaults,nofail,discard 0 2' >> /etc/fstab
```
## Automated Deployment Examples
The `examples/deployment/` directory contains ready-to-use scripts:
### Simple VPS Deployment
```bash
# Clone repository and run automated deployment
git clone https://github.com/your-org/c-relay.git
cd c-relay
sudo examples/deployment/simple-vps/deploy.sh
```
### SSL Proxy Setup
```bash
# Set up nginx reverse proxy with SSL
sudo examples/deployment/nginx-proxy/setup-ssl-proxy.sh \
-d relay.example.com -e admin@example.com
```
### Monitoring Setup
```bash
# Set up continuous monitoring
sudo examples/deployment/monitoring/monitor-relay.sh \
-c -i 60 -e admin@example.com
```
### Backup Setup
```bash
# Set up automated backups
sudo examples/deployment/backup/backup-relay.sh \
-s my-backup-bucket -e admin@example.com
```
## Reverse Proxy Setup
### Nginx Configuration
#### Basic WebSocket Proxy
```nginx
# /etc/nginx/sites-available/nostr-relay
server {
listen 80;
server_name relay.yourdomain.com;
location / {
proxy_pass http://127.0.0.1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket timeouts
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
}
}
```
#### HTTPS with Let's Encrypt
```bash
# Install certbot
sudo apt install -y certbot python3-certbot-nginx
# Obtain certificate
sudo certbot --nginx -d relay.yourdomain.com
# Auto-renewal (crontab)
echo "0 12 * * * /usr/bin/certbot renew --quiet" | sudo crontab -
```
#### Enhanced HTTPS Configuration
```nginx
server {
listen 443 ssl http2;
server_name relay.yourdomain.com;
# SSL configuration
ssl_certificate /etc/letsencrypt/live/relay.yourdomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/relay.yourdomain.com/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers off;
# Security headers
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload";
add_header X-Content-Type-Options nosniff;
add_header X-Frame-Options DENY;
add_header X-XSS-Protection "1; mode=block";
# Rate limiting (optional)
limit_req_zone $remote_addr zone=relay:10m rate=10r/s;
limit_req zone=relay burst=20 nodelay;
location / {
proxy_pass http://127.0.0.1:8888;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket timeouts
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
# Buffer settings
proxy_buffering off;
}
}
# Redirect HTTP to HTTPS
server {
listen 80;
server_name relay.yourdomain.com;
return 301 https://$server_name$request_uri;
}
```
### Apache Configuration
#### WebSocket Proxy with mod_proxy_wstunnel
```apache
# Enable required modules
sudo a2enmod proxy
sudo a2enmod proxy_http
sudo a2enmod proxy_wstunnel
sudo a2enmod ssl
# /etc/apache2/sites-available/nostr-relay.conf
<VirtualHost *:443>
ServerName relay.yourdomain.com
# SSL configuration
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/relay.yourdomain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/relay.yourdomain.com/privkey.pem
# WebSocket proxy
ProxyPreserveHost On
ProxyRequests Off
ProxyPass / ws://127.0.0.1:8888/
ProxyPassReverse / ws://127.0.0.1:8888/
# Fallback for HTTP requests
RewriteEngine on
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule ^/?(.*) "ws://127.0.0.1:8888/$1" [P,L]
# Security headers
Header always set Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"
Header always set X-Content-Type-Options nosniff
Header always set X-Frame-Options DENY
</VirtualHost>
<VirtualHost *:80>
ServerName relay.yourdomain.com
Redirect permanent / https://relay.yourdomain.com/
</VirtualHost>
```
## Monitoring Setup
### System Monitoring
#### Basic Monitoring Script
```bash
#!/bin/bash
# /usr/local/bin/relay-monitor.sh
LOG_FILE="/var/log/relay-monitor.log"
DATE=$(date '+%Y-%m-%d %H:%M:%S')
# Check if relay is running
if ! pgrep -f "c_relay_x86" > /dev/null; then
echo "[$DATE] ERROR: Relay process not running" >> $LOG_FILE
systemctl restart c-relay
fi
# Check port availability
if ! netstat -tln | grep -q ":8888"; then
echo "[$DATE] ERROR: Port 8888 not listening" >> $LOG_FILE
fi
# Check database file
RELAY_DB=$(find /opt/c-relay -name "*.nrdb" | head -1)
if [[ -n "$RELAY_DB" ]]; then
DB_SIZE=$(du -h "$RELAY_DB" | cut -f1)
echo "[$DATE] INFO: Database size: $DB_SIZE" >> $LOG_FILE
fi
# Check memory usage
MEM_USAGE=$(ps aux | grep c_relay_x86 | grep -v grep | awk '{print $6}')
if [[ -n "$MEM_USAGE" ]]; then
echo "[$DATE] INFO: Memory usage: ${MEM_USAGE}KB" >> $LOG_FILE
fi
```
#### Cron Job Setup
```bash
# Add to crontab
echo "*/5 * * * * /usr/local/bin/relay-monitor.sh" | sudo crontab -
# Make script executable
sudo chmod +x /usr/local/bin/relay-monitor.sh
```
### Log Aggregation
#### Centralized Logging with rsyslog
```bash
# /etc/rsyslog.d/50-c-relay.conf
if $programname == 'c-relay' then /var/log/c-relay.log
& stop
```
### External Monitoring
#### Prometheus Integration
```yaml
# /etc/prometheus/prometheus.yml
scrape_configs:
- job_name: 'c-relay'
static_configs:
- targets: ['localhost:8888']
metrics_path: '/metrics' # If implemented
scrape_interval: 30s
```
## Security Hardening
### System Hardening
#### Service User Restrictions
```bash
# Restrict service user
sudo usermod -s /bin/false c-relay
sudo usermod -d /opt/c-relay c-relay
# Set proper permissions
sudo chmod 700 /opt/c-relay
sudo chown -R c-relay:c-relay /opt/c-relay
```
#### File System Restrictions
```bash
# Mount data directory with appropriate options
echo "/dev/sdb /opt/c-relay ext4 defaults,noexec,nosuid,nodev 0 2" >> /etc/fstab
```
### Network Security
#### Fail2Ban Configuration
```ini
# /etc/fail2ban/jail.d/c-relay.conf
[c-relay-dos]
enabled = true
port = 8888
filter = c-relay-dos
logpath = /var/log/c-relay.log
maxretry = 10
findtime = 60
bantime = 300
```
#### DDoS Protection
```bash
# iptables rate limiting
sudo iptables -A INPUT -p tcp --dport 8888 -m limit --limit 25/minute --limit-burst 100 -j ACCEPT
sudo iptables -A INPUT -p tcp --dport 8888 -j DROP
```
### Database Security
#### Encryption at Rest
```bash
# Use encrypted filesystem
sudo cryptsetup luksFormat /dev/sdb
sudo cryptsetup luksOpen /dev/sdb relay-data
sudo mkfs.ext4 /dev/mapper/relay-data
```
## Backup and Recovery
### Automated Backup
#### Database Backup Script
```bash
#!/bin/bash
# /usr/local/bin/backup-relay.sh
BACKUP_DIR="/backup/c-relay"
DATE=$(date +%Y%m%d_%H%M%S)
RELAY_DB=$(find /opt/c-relay -name "*.nrdb" | head -1)
mkdir -p "$BACKUP_DIR"
if [[ -n "$RELAY_DB" ]]; then
# SQLite backup
sqlite3 "$RELAY_DB" ".backup $BACKUP_DIR/relay_backup_$DATE.nrdb"
# Compress backup
gzip "$BACKUP_DIR/relay_backup_$DATE.nrdb"
# Cleanup old backups (keep 30 days)
find "$BACKUP_DIR" -name "relay_backup_*.nrdb.gz" -mtime +30 -delete
echo "Backup completed: relay_backup_$DATE.nrdb.gz"
else
echo "No relay database found!"
exit 1
fi
```
#### Cron Schedule
```bash
# Daily backup at 2 AM
echo "0 2 * * * /usr/local/bin/backup-relay.sh" | sudo crontab -
```
### Cloud Backup
#### AWS S3 Sync
```bash
# Install AWS CLI
sudo apt install -y awscli
# Configure AWS credentials
aws configure
# Sync backups to S3
aws s3 sync /backup/c-relay/ s3://your-backup-bucket/c-relay/ --delete
```
### Disaster Recovery
#### Recovery Procedures
```bash
# 1. Restore from backup
gunzip backup/relay_backup_20231201_020000.nrdb.gz
cp backup/relay_backup_20231201_020000.nrdb /opt/c-relay/
# 2. Fix permissions
sudo chown c-relay:c-relay /opt/c-relay/*.nrdb
# 3. Restart service
sudo systemctl restart c-relay
# 4. Verify recovery
sudo journalctl -u c-relay --since "1 minute ago"
```
---
This deployment guide provides comprehensive coverage for deploying the C Nostr Relay across various environments while taking full advantage of the event-based configuration system's simplicity and security features.

View File

@@ -0,0 +1,298 @@
# Libwebsockets Proper Pattern - Message Queue Design
## Problem Analysis
### Current Violation
We're calling `lws_write()` directly from multiple code paths:
1. **Event broadcast** (subscriptions.c:667) - when events arrive
2. **OK responses** (websockets.c:855) - when processing EVENT messages
3. **EOSE responses** (websockets.c:976) - when processing REQ messages
4. **COUNT responses** (websockets.c:1922) - when processing COUNT messages
This violates libwebsockets' design pattern which requires:
- **`lws_write()` ONLY called from `LWS_CALLBACK_SERVER_WRITEABLE`**
- Application queues messages and requests writeable callback
- Libwebsockets handles write timing and socket buffer management
### Consequences of Violation
1. Partial writes when socket buffer is full
2. Multiple concurrent write attempts before callback fires
3. "write already pending" errors with single buffer
4. Frame corruption from interleaved partial writes
5. "Invalid frame header" errors on client side
## Correct Architecture
### Message Queue Pattern
```
┌─────────────────────────────────────────────────────────────┐
│ Application Layer │
├─────────────────────────────────────────────────────────────┤
│ │
│ Event Arrives → Queue Message → Request Writeable Callback │
│ REQ Received → Queue EOSE → Request Writeable Callback │
│ EVENT Received→ Queue OK → Request Writeable Callback │
│ COUNT Received→ Queue COUNT → Request Writeable Callback │
│ │
└─────────────────────────────────────────────────────────────┘
lws_callback_on_writable(wsi)
┌─────────────────────────────────────────────────────────────┐
│ LWS_CALLBACK_SERVER_WRITEABLE │
├─────────────────────────────────────────────────────────────┤
│ │
│ 1. Dequeue next message from queue │
│ 2. Call lws_write() with message data │
│ 3. If queue not empty, request another callback │
│ │
└─────────────────────────────────────────────────────────────┘
libwebsockets handles:
- Socket buffer management
- Partial write handling
- Frame atomicity
```
## Data Structures
### Message Queue Node
```c
typedef struct message_queue_node {
unsigned char* data; // Message data (with LWS_PRE space)
size_t length; // Message length (without LWS_PRE)
enum lws_write_protocol type; // LWS_WRITE_TEXT, etc.
struct message_queue_node* next;
} message_queue_node_t;
```
### Per-Session Data Updates
```c
struct per_session_data {
// ... existing fields ...
// Message queue (replaces single buffer)
message_queue_node_t* message_queue_head;
message_queue_node_t* message_queue_tail;
int message_queue_count;
int writeable_requested; // Flag to prevent duplicate requests
};
```
## Implementation Functions
### 1. Queue Message (Application Layer)
```c
int queue_message(struct lws* wsi, struct per_session_data* pss,
const char* message, size_t length,
enum lws_write_protocol type)
{
// Allocate node
message_queue_node_t* node = malloc(sizeof(message_queue_node_t));
// Allocate buffer with LWS_PRE space
node->data = malloc(LWS_PRE + length);
memcpy(node->data + LWS_PRE, message, length);
node->length = length;
node->type = type;
node->next = NULL;
// Add to queue (FIFO)
pthread_mutex_lock(&pss->session_lock);
if (!pss->message_queue_head) {
pss->message_queue_head = node;
pss->message_queue_tail = node;
} else {
pss->message_queue_tail->next = node;
pss->message_queue_tail = node;
}
pss->message_queue_count++;
pthread_mutex_unlock(&pss->session_lock);
// Request writeable callback (only if not already requested)
if (!pss->writeable_requested) {
pss->writeable_requested = 1;
lws_callback_on_writable(wsi);
}
return 0;
}
```
### 2. Process Queue (Writeable Callback)
```c
int process_message_queue(struct lws* wsi, struct per_session_data* pss)
{
pthread_mutex_lock(&pss->session_lock);
// Get next message from queue
message_queue_node_t* node = pss->message_queue_head;
if (!node) {
pss->writeable_requested = 0;
pthread_mutex_unlock(&pss->session_lock);
return 0; // Queue empty
}
// Remove from queue
pss->message_queue_head = node->next;
if (!pss->message_queue_head) {
pss->message_queue_tail = NULL;
}
pss->message_queue_count--;
pthread_mutex_unlock(&pss->session_lock);
// Write message (libwebsockets handles partial writes)
int result = lws_write(wsi, node->data + LWS_PRE, node->length, node->type);
// Free node
free(node->data);
free(node);
// If queue not empty, request another callback
pthread_mutex_lock(&pss->session_lock);
if (pss->message_queue_head) {
lws_callback_on_writable(wsi);
} else {
pss->writeable_requested = 0;
}
pthread_mutex_unlock(&pss->session_lock);
return (result < 0) ? -1 : 0;
}
```
## Refactoring Changes
### Before (WRONG - Direct Write)
```c
// websockets.c:855 - OK response
int write_result = lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
if (write_result < 0) {
DEBUG_ERROR("Write failed");
} else if ((size_t)write_result != response_len) {
// Partial write - queue remaining data
queue_websocket_write(wsi, pss, ...);
}
```
### After (CORRECT - Queue Message)
```c
// websockets.c:855 - OK response
queue_message(wsi, pss, response_str, response_len, LWS_WRITE_TEXT);
// That's it! Writeable callback will handle the actual write
```
### Before (WRONG - Direct Write in Broadcast)
```c
// subscriptions.c:667 - EVENT broadcast
int write_result = lws_write(current_temp->wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
if (write_result < 0) {
DEBUG_ERROR("Write failed");
} else if ((size_t)write_result != msg_len) {
queue_websocket_write(...);
}
```
### After (CORRECT - Queue Message)
```c
// subscriptions.c:667 - EVENT broadcast
struct per_session_data* pss = lws_wsi_user(current_temp->wsi);
queue_message(current_temp->wsi, pss, msg_str, msg_len, LWS_WRITE_TEXT);
// Writeable callback will handle the actual write
```
## Benefits of Correct Pattern
1. **No Partial Write Handling Needed**
- Libwebsockets handles partial writes internally
- We just queue complete messages
2. **No "Write Already Pending" Errors**
- Queue can hold unlimited messages
- Each processed sequentially from callback
3. **Thread Safety**
- Queue operations protected by session lock
- Write only from single callback thread
4. **Frame Atomicity**
- Libwebsockets ensures complete frame transmission
- No interleaved partial writes
5. **Simpler Code**
- No complex partial write state machine
- Just queue and forget
6. **Better Performance**
- Libwebsockets optimizes write timing
- Batches writes when socket ready
## Migration Steps
1. ✅ Identify all `lws_write()` call sites
2. ✅ Confirm violation of libwebsockets pattern
3. ⏳ Design message queue structure
4. ⏳ Implement `queue_message()` function
5. ⏳ Implement `process_message_queue()` function
6. ⏳ Update `per_session_data` structure
7. ⏳ Refactor OK response to use queue
8. ⏳ Refactor EOSE response to use queue
9. ⏳ Refactor COUNT response to use queue
10. ⏳ Refactor EVENT broadcast to use queue
11. ⏳ Update `LWS_CALLBACK_SERVER_WRITEABLE` handler
12. ⏳ Add queue cleanup in `LWS_CALLBACK_CLOSED`
13. ⏳ Remove old partial write code
14. ⏳ Test with rapid multiple events
15. ⏳ Test with large events (>4KB)
16. ⏳ Test under load
17. ⏳ Verify no frame errors
## Testing Strategy
### Test 1: Multiple Rapid Events
```bash
# Send 10 events rapidly to same client
for i in {1..10}; do
echo '["EVENT",{"kind":1,"content":"test'$i'","created_at":'$(date +%s)',...}]' | \
websocat ws://localhost:8888 &
done
```
**Expected**: All events queued and sent sequentially, no errors
### Test 2: Large Events
```bash
# Send event >4KB (forces multiple socket writes)
nak event --content "$(head -c 5000 /dev/urandom | base64)" | \
websocat ws://localhost:8888
```
**Expected**: Event queued, libwebsockets handles partial writes internally
### Test 3: Concurrent Connections
```bash
# 100 concurrent connections, each sending events
for i in {1..100}; do
(echo '["REQ","sub'$i'",{}]'; sleep 1) | websocat ws://localhost:8888 &
done
```
**Expected**: All subscriptions work, events broadcast correctly
## Success Criteria
- ✅ No `lws_write()` calls outside `LWS_CALLBACK_SERVER_WRITEABLE`
- ✅ No "write already pending" errors in logs
- ✅ No "Invalid frame header" errors on client side
- ✅ All messages delivered in correct order
- ✅ Large events (>4KB) handled correctly
- ✅ Multiple rapid events to same client work
- ✅ Concurrent connections stable under load
## References
- [libwebsockets documentation](https://libwebsockets.org/lws-api-doc-main/html/index.html)
- [LWS_CALLBACK_SERVER_WRITEABLE](https://libwebsockets.org/lws-api-doc-main/html/group__callback-when-writeable.html)
- [lws_callback_on_writable()](https://libwebsockets.org/lws-api-doc-main/html/group__callback-when-writeable.html#ga96f3ad8e1e2c3e0c8e0b0e5e5e5e5e5e)

View File

@@ -0,0 +1,601 @@
# Simplified Monitoring Implementation Plan
## Kind 34567 Event Kind Distribution Reporting
**Date:** 2025-10-16
**Status:** Implementation Ready
---
## Overview
Simplified real-time monitoring system that:
- Reports event kind distribution (which includes total event count)
- Uses kind 34567 addressable events with `d=event_kinds`
- Controlled by two config variables
- Enabled on-demand when admin logs in
- Uses simple throttling to prevent performance impact
---
## Configuration Variables
### Database Config Table
Add two new configuration keys:
```sql
INSERT INTO config (key, value, data_type, description, category) VALUES
('kind_34567_reporting_enabled', 'false', 'boolean',
'Enable/disable kind 34567 event kind distribution reporting', 'monitoring'),
('kind_34567_reporting_throttling_sec', '5', 'integer',
'Minimum seconds between kind 34567 reports (throttling)', 'monitoring');
```
### Configuration Access
```c
// In src/monitoring.c or src/api.c
int is_monitoring_enabled(void) {
return get_config_bool("kind_34567_reporting_enabled", 0);
}
int get_monitoring_throttle_seconds(void) {
return get_config_int("kind_34567_reporting_throttling_sec", 5);
}
```
---
## Event Structure
### Kind 34567 Event Format
```json
{
"id": "<event_id>",
"pubkey": "<relay_pubkey>",
"created_at": 1697123456,
"kind": 34567,
"content": "{\"data_type\":\"event_kinds\",\"timestamp\":1697123456,\"data\":{\"total_events\":125000,\"distribution\":[{\"kind\":1,\"count\":45000,\"percentage\":36.0},{\"kind\":3,\"count\":12500,\"percentage\":10.0}]}}",
"tags": [
["d", "event_kinds"],
["relay", "<relay_pubkey>"]
],
"sig": "<signature>"
}
```
### Content JSON Structure
```json
{
"data_type": "event_kinds",
"timestamp": 1697123456,
"data": {
"total_events": 125000,
"distribution": [
{
"kind": 1,
"count": 45000,
"percentage": 36.0
},
{
"kind": 3,
"count": 12500,
"percentage": 10.0
}
]
},
"metadata": {
"query_time_ms": 18
}
}
```
---
## Implementation
### File Structure
```
src/
monitoring.h # New file - monitoring system header
monitoring.c # New file - monitoring implementation
main.c # Modified - add trigger hook
config.c # Modified - add config keys (or use migration)
```
### 1. Header File: `src/monitoring.h`
```c
#ifndef MONITORING_H
#define MONITORING_H
#include <time.h>
#include <cjson/cJSON.h>
// Initialize monitoring system
int init_monitoring_system(void);
// Cleanup monitoring system
void cleanup_monitoring_system(void);
// Called when an event is stored (from main.c)
void monitoring_on_event_stored(void);
// Enable/disable monitoring (called from admin API)
int set_monitoring_enabled(int enabled);
// Get monitoring status
int is_monitoring_enabled(void);
// Get throttle interval
int get_monitoring_throttle_seconds(void);
#endif /* MONITORING_H */
```
### 2. Implementation: `src/monitoring.c`
```c
#include "monitoring.h"
#include "config.h"
#include "debug.h"
#include "../nostr_core_lib/nostr_core/nostr_core.h"
#include <sqlite3.h>
#include <string.h>
#include <time.h>
// External references
extern sqlite3* g_db;
extern int broadcast_event_to_subscriptions(cJSON* event);
extern int store_event(cJSON* event);
extern const char* get_config_value(const char* key);
extern int get_config_bool(const char* key, int default_value);
extern int get_config_int(const char* key, int default_value);
extern char* get_relay_private_key(void);
// Throttling state
static time_t last_report_time = 0;
// Initialize monitoring system
int init_monitoring_system(void) {
DEBUG_LOG("Monitoring system initialized");
last_report_time = 0;
return 0;
}
// Cleanup monitoring system
void cleanup_monitoring_system(void) {
DEBUG_LOG("Monitoring system cleaned up");
}
// Check if monitoring is enabled
int is_monitoring_enabled(void) {
return get_config_bool("kind_34567_reporting_enabled", 0);
}
// Get throttle interval
int get_monitoring_throttle_seconds(void) {
return get_config_int("kind_34567_reporting_throttling_sec", 5);
}
// Enable/disable monitoring
int set_monitoring_enabled(int enabled) {
// Update config table
const char* value = enabled ? "true" : "false";
// This would call update_config_in_table() or similar
// For now, assume we have a function to update config
extern int update_config_in_table(const char* key, const char* value);
return update_config_in_table("kind_34567_reporting_enabled", value);
}
// Query event kind distribution from database
static char* query_event_kind_distribution(void) {
if (!g_db) {
DEBUG_ERROR("Database not available for monitoring query");
return NULL;
}
struct timespec start_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
// Query total events
sqlite3_stmt* stmt;
int total_events = 0;
if (sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM events", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
total_events = sqlite3_column_int(stmt, 0);
}
sqlite3_finalize(stmt);
}
// Query kind distribution
cJSON* response = cJSON_CreateObject();
cJSON_AddStringToObject(response, "data_type", "event_kinds");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
cJSON* data = cJSON_CreateObject();
cJSON_AddNumberToObject(data, "total_events", total_events);
cJSON* distribution = cJSON_CreateArray();
const char* sql =
"SELECT kind, COUNT(*) as count, "
"ROUND(COUNT(*) * 100.0 / (SELECT COUNT(*) FROM events), 2) as percentage "
"FROM events GROUP BY kind ORDER BY count DESC";
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) == SQLITE_OK) {
while (sqlite3_step(stmt) == SQLITE_ROW) {
cJSON* kind_obj = cJSON_CreateObject();
cJSON_AddNumberToObject(kind_obj, "kind", sqlite3_column_int(stmt, 0));
cJSON_AddNumberToObject(kind_obj, "count", sqlite3_column_int64(stmt, 1));
cJSON_AddNumberToObject(kind_obj, "percentage", sqlite3_column_double(stmt, 2));
cJSON_AddItemToArray(distribution, kind_obj);
}
sqlite3_finalize(stmt);
}
cJSON_AddItemToObject(data, "distribution", distribution);
cJSON_AddItemToObject(response, "data", data);
// Calculate query time
struct timespec end_time;
clock_gettime(CLOCK_MONOTONIC, &end_time);
double query_time_ms = (end_time.tv_sec - start_time.tv_sec) * 1000.0 +
(end_time.tv_nsec - start_time.tv_nsec) / 1000000.0;
cJSON* metadata = cJSON_CreateObject();
cJSON_AddNumberToObject(metadata, "query_time_ms", query_time_ms);
cJSON_AddItemToObject(response, "metadata", metadata);
char* json_string = cJSON_Print(response);
cJSON_Delete(response);
return json_string;
}
// Generate and broadcast kind 34567 event
static int generate_monitoring_event(const char* json_content) {
if (!json_content) return -1;
// Get relay keys
const char* relay_pubkey = get_config_value("relay_pubkey");
char* relay_privkey_hex = get_relay_private_key();
if (!relay_pubkey || !relay_privkey_hex) {
if (relay_privkey_hex) free(relay_privkey_hex);
DEBUG_ERROR("Could not get relay keys for monitoring event");
return -1;
}
// Convert relay private key to bytes
unsigned char relay_privkey[32];
if (nostr_hex_to_bytes(relay_privkey_hex, relay_privkey, sizeof(relay_privkey)) != 0) {
free(relay_privkey_hex);
DEBUG_ERROR("Failed to convert relay private key");
return -1;
}
free(relay_privkey_hex);
// Create tags array
cJSON* tags = cJSON_CreateArray();
// d tag for addressable event
cJSON* d_tag = cJSON_CreateArray();
cJSON_AddItemToArray(d_tag, cJSON_CreateString("d"));
cJSON_AddItemToArray(d_tag, cJSON_CreateString("event_kinds"));
cJSON_AddItemToArray(tags, d_tag);
// relay tag
cJSON* relay_tag = cJSON_CreateArray();
cJSON_AddItemToArray(relay_tag, cJSON_CreateString("relay"));
cJSON_AddItemToArray(relay_tag, cJSON_CreateString(relay_pubkey));
cJSON_AddItemToArray(tags, relay_tag);
// Create and sign event
cJSON* event = nostr_create_and_sign_event(
34567, // kind
json_content, // content
tags, // tags
relay_privkey, // private key
time(NULL) // timestamp
);
if (!event) {
DEBUG_ERROR("Failed to create and sign monitoring event");
return -1;
}
// Broadcast to subscriptions
broadcast_event_to_subscriptions(event);
// Store in database
int result = store_event(event);
cJSON_Delete(event);
return result;
}
// Called when an event is stored
void monitoring_on_event_stored(void) {
// Check if monitoring is enabled
if (!is_monitoring_enabled()) {
return;
}
// Check throttling
time_t now = time(NULL);
int throttle_seconds = get_monitoring_throttle_seconds();
if (now - last_report_time < throttle_seconds) {
return; // Too soon, skip this update
}
// Query event kind distribution
char* json_content = query_event_kind_distribution();
if (!json_content) {
DEBUG_ERROR("Failed to query event kind distribution");
return;
}
// Generate and broadcast monitoring event
int result = generate_monitoring_event(json_content);
free(json_content);
if (result == 0) {
last_report_time = now;
DEBUG_LOG("Generated kind 34567 monitoring event");
} else {
DEBUG_ERROR("Failed to generate monitoring event");
}
}
```
### 3. Integration: Modify `src/main.c`
Add monitoring hook to event storage:
```c
// At top of file
#include "monitoring.h"
// In main() function, after init_database()
if (init_monitoring_system() != 0) {
DEBUG_WARN("Failed to initialize monitoring system");
// Continue anyway - monitoring is optional
}
// In store_event() function, after successful storage
int store_event(cJSON* event) {
// ... existing code ...
if (rc != SQLITE_DONE) {
// ... error handling ...
}
free(tags_json);
// Trigger monitoring update
monitoring_on_event_stored();
return 0;
}
// In cleanup section of main()
cleanup_monitoring_system();
```
### 4. Admin API: Enable/Disable Monitoring
Add admin command to enable monitoring (in `src/dm_admin.c` or `src/api.c`):
```c
// Handle admin command to enable monitoring
if (strcmp(command, "enable_monitoring") == 0) {
set_monitoring_enabled(1);
send_nip17_response(sender_pubkey,
"✅ Kind 34567 monitoring enabled",
error_msg, sizeof(error_msg));
return 0;
}
// Handle admin command to disable monitoring
if (strcmp(command, "disable_monitoring") == 0) {
set_monitoring_enabled(0);
send_nip17_response(sender_pubkey,
"🔴 Kind 34567 monitoring disabled",
error_msg, sizeof(error_msg));
return 0;
}
// Handle admin command to set throttle interval
if (strncmp(command, "set_monitoring_throttle ", 24) == 0) {
int seconds = atoi(command + 24);
if (seconds >= 1 && seconds <= 3600) {
char value[16];
snprintf(value, sizeof(value), "%d", seconds);
update_config_in_table("kind_34567_reporting_throttling_sec", value);
char response[128];
snprintf(response, sizeof(response),
"✅ Monitoring throttle set to %d seconds", seconds);
send_nip17_response(sender_pubkey, response, error_msg, sizeof(error_msg));
}
return 0;
}
```
---
## Frontend Integration
### Admin Dashboard Subscription
```javascript
// When admin logs in to dashboard
async function enableMonitoring() {
// Send admin command to enable monitoring
await sendAdminCommand(['enable_monitoring']);
// Subscribe to kind 34567 events
const subscription = {
kinds: [34567],
authors: [relayPubkey],
"#d": ["event_kinds"]
};
relay.subscribe([subscription], {
onevent: (event) => {
handleMonitoringEvent(event);
}
});
}
// Handle incoming monitoring events
function handleMonitoringEvent(event) {
const content = JSON.parse(event.content);
if (content.data_type === 'event_kinds') {
updateEventKindsChart(content.data);
updateTotalEventsDisplay(content.data.total_events);
}
}
// When admin logs out or closes dashboard
async function disableMonitoring() {
await sendAdminCommand(['disable_monitoring']);
}
```
### Display Event Kind Distribution
```javascript
function updateEventKindsChart(data) {
const { total_events, distribution } = data;
// Update total events display
document.getElementById('total-events').textContent =
total_events.toLocaleString();
// Update chart/table with distribution
const tableBody = document.getElementById('kind-distribution-table');
tableBody.innerHTML = '';
distribution.forEach(item => {
const row = document.createElement('tr');
row.innerHTML = `
<td>Kind ${item.kind}</td>
<td>${item.count.toLocaleString()}</td>
<td>${item.percentage}%</td>
`;
tableBody.appendChild(row);
});
}
```
---
## Configuration Migration
### Add to Schema or Migration Script
```sql
-- Add monitoring configuration
INSERT INTO config (key, value, data_type, description, category) VALUES
('kind_34567_reporting_enabled', 'false', 'boolean',
'Enable/disable kind 34567 event kind distribution reporting', 'monitoring'),
('kind_34567_reporting_throttling_sec', '5', 'integer',
'Minimum seconds between kind 34567 reports (throttling)', 'monitoring');
```
Or add to existing config initialization in `src/config.c`.
---
## Testing
### 1. Enable Monitoring
```bash
# Via admin command (NIP-17 DM)
echo '["enable_monitoring"]' | nak event --kind 14 --content - ws://localhost:8888
```
### 2. Subscribe to Monitoring Events
```bash
# Subscribe to kind 34567 events
nak req --kinds 34567 --authors <relay_pubkey> ws://localhost:8888
```
### 3. Generate Events
```bash
# Send some test events to trigger monitoring
for i in {1..10}; do
nak event -c "Test event $i" ws://localhost:8888
sleep 1
done
```
### 4. Verify Monitoring Events
You should see kind 34567 events every 5 seconds (or configured throttle interval) with event kind distribution.
---
## Performance Impact
### With 3 events/second (relay.damus.io scale)
**Query execution**:
- Frequency: Every 5 seconds (throttled)
- Query time: ~700ms (for 1M events)
- Overhead: 700ms / 5000ms = 14% (acceptable)
**Per-event overhead**:
- Check if enabled: < 0.01ms
- Check throttle: < 0.01ms
- Total: < 0.02ms per event (negligible)
**Overall impact**: < 1% on event processing, 14% on query thread (separate from event processing)
---
## Future Enhancements
Once this is working, easy to add:
1. **More data types**: Add `d=connections`, `d=subscriptions`, etc.
2. **Materialized counters**: Optimize queries for very large databases
3. **Historical data**: Store monitoring events for trending
4. **Alerts**: Trigger on thresholds (e.g., > 90% capacity)
---
## Summary
This simplified plan provides:
**Single data type**: Event kind distribution (includes total events)
**Two config variables**: Enable/disable and throttle control
**On-demand activation**: Enabled when admin logs in
**Simple throttling**: Prevents performance impact
**Clean implementation**: ~200 lines of code
**Easy to extend**: Add more data types later
**Estimated implementation time**: 4-6 hours
**Files to create/modify**:
- Create: `src/monitoring.h` (~30 lines)
- Create: `src/monitoring.c` (~200 lines)
- Modify: `src/main.c` (~10 lines)
- Modify: `src/config.c` or migration (~5 lines)
- Modify: `src/dm_admin.c` or `src/api.c` (~30 lines)
- Create: `api/monitoring.js` (frontend, ~100 lines)
**Total new code**: ~375 lines

275
docs/musl_static_build.md Normal file
View File

@@ -0,0 +1,275 @@
# MUSL Static Binary Build Guide
## Overview
This guide explains how to build truly portable MUSL-based static binaries of c-relay using Alpine Linux Docker containers. These binaries have **zero runtime dependencies** and work on any Linux distribution.
## Why MUSL?
### MUSL vs glibc Static Binaries
**MUSL Advantages:**
- **Truly Static**: No hidden dependencies on system libraries
- **Smaller Size**: ~7.6MB vs ~12MB+ for glibc static builds
- **Better Portability**: Works on ANY Linux distribution without modification
- **Cleaner Linking**: No glibc-specific extensions or fortified functions
- **Simpler Deployment**: Single binary, no library compatibility issues
**glibc Limitations:**
- Static builds still require dynamic loading for NSS (Name Service Switch)
- Fortified functions (`__*_chk`) don't exist in MUSL
- Larger binary size due to glibc's complexity
- May have compatibility issues across different glibc versions
## Build Process
### Prerequisites
- Docker installed and running
- Sufficient disk space (~2GB for Docker layers)
- Internet connection (for downloading dependencies)
### Quick Start
```bash
# Build MUSL static binary
./build_static.sh
# The binary will be created at:
# build/c_relay_static_musl_x86_64 (on x86_64)
# build/c_relay_static_musl_arm64 (on ARM64)
```
### What Happens During Build
1. **Alpine Linux Base**: Uses Alpine 3.19 with native MUSL support
2. **Static Dependencies**: Builds all dependencies with static linking:
- libsecp256k1 (Bitcoin cryptography)
- libwebsockets (WebSocket server)
- OpenSSL (TLS/crypto)
- SQLite (database)
- curl (HTTP client)
- zlib (compression)
3. **nostr_core_lib**: Builds with MUSL-compatible flags:
- Disables glibc fortification (`-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0`)
- Includes required NIPs: 001, 006, 013, 017, 019, 044, 059
- Produces static library (~316KB)
4. **c-relay Compilation**: Links everything statically:
- All source files compiled with `-static` flag
- Fortification disabled to avoid `__*_chk` symbols
- Results in ~7.6MB stripped binary
5. **Verification**: Confirms binary is truly static:
- `ldd` shows "not a dynamic executable"
- `file` shows "statically linked"
- Binary executes successfully
## Technical Details
### Dockerfile Structure
The build uses a multi-stage Dockerfile (`Dockerfile.alpine-musl`):
```dockerfile
# Stage 1: Builder (Alpine Linux)
FROM alpine:3.19 AS builder
- Install build tools and static libraries
- Build dependencies from source
- Compile nostr_core_lib with MUSL flags
- Compile c-relay with full static linking
- Strip binary to reduce size
# Stage 2: Output (scratch)
FROM scratch AS output
- Contains only the final binary
```
### Key Compilation Flags
**For nostr_core_lib:**
```bash
CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"
```
**For c-relay:**
```bash
gcc -static -O2 -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
[source files] \
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
-lcurl -lz -lpthread -lm -ldl
```
### Fortification Issue
**Problem**: GCC's `-O2` optimization enables fortification by default, replacing standard functions with `__*_chk` variants (e.g., `__snprintf_chk`, `__fprintf_chk`). These are glibc-specific and don't exist in MUSL.
**Solution**: Explicitly disable fortification with:
- `-U_FORTIFY_SOURCE` (undefine any existing definition)
- `-D_FORTIFY_SOURCE=0` (set to 0)
This must be applied to **both** nostr_core_lib and c-relay compilation.
### NIP Dependencies
The build includes these NIPs in nostr_core_lib:
- **NIP-001**: Basic protocol (event creation, signing)
- **NIP-006**: Key derivation from mnemonic
- **NIP-013**: Proof of Work validation
- **NIP-017**: Private Direct Messages
- **NIP-019**: Bech32 encoding (nsec/npub)
- **NIP-044**: Modern encryption
- **NIP-059**: Gift Wrap (required by NIP-017)
## Verification
### Check Binary Type
```bash
# Should show "statically linked"
file build/c_relay_static_musl_x86_64
# Should show "not a dynamic executable"
ldd build/c_relay_static_musl_x86_64
# Check size (should be ~7.6MB)
ls -lh build/c_relay_static_musl_x86_64
```
### Test Execution
```bash
# Show help
./build/c_relay_static_musl_x86_64 --help
# Show version
./build/c_relay_static_musl_x86_64 --version
# Run relay
./build/c_relay_static_musl_x86_64 --port 8888
```
### Cross-Distribution Testing
Test the binary on different distributions to verify portability:
```bash
# Alpine Linux
docker run --rm -v $(pwd)/build:/app alpine:latest /app/c_relay_static_musl_x86_64 --version
# Ubuntu
docker run --rm -v $(pwd)/build:/app ubuntu:latest /app/c_relay_static_musl_x86_64 --version
# Debian
docker run --rm -v $(pwd)/build:/app debian:latest /app/c_relay_static_musl_x86_64 --version
# CentOS
docker run --rm -v $(pwd)/build:/app centos:latest /app/c_relay_static_musl_x86_64 --version
```
## Troubleshooting
### Docker Permission Denied
**Problem**: `permission denied while trying to connect to the Docker daemon socket`
**Solution**: Add user to docker group:
```bash
sudo usermod -aG docker $USER
newgrp docker # Or logout and login again
```
### Build Fails with Fortification Errors
**Problem**: `undefined reference to '__snprintf_chk'` or `'__fprintf_chk'`
**Solution**: Ensure fortification is disabled in both:
1. nostr_core_lib build.sh (line 534)
2. c-relay compilation flags in Dockerfile
### Binary Won't Execute
**Problem**: Binary fails to run on target system
**Checks**:
1. Verify it's truly static: `ldd binary` should show "not a dynamic executable"
2. Check architecture matches: `file binary` should show correct arch
3. Ensure execute permissions: `chmod +x binary`
### Missing NIP Functions
**Problem**: `undefined reference to 'nostr_nip*'` during linking
**Solution**: Add missing NIPs to the build command:
```bash
./build.sh --nips=1,6,13,17,19,44,59
```
## Deployment
### Single Binary Deployment
```bash
# Copy binary to server
scp build/c_relay_static_musl_x86_64 user@server:/opt/c-relay/
# Run on server (no dependencies needed!)
ssh user@server
cd /opt/c-relay
./c_relay_static_musl_x86_64 --port 8888
```
### SystemD Service
```ini
[Unit]
Description=C-Relay Nostr Relay (MUSL Static)
After=network.target
[Service]
Type=simple
User=c-relay
WorkingDirectory=/opt/c-relay
ExecStart=/opt/c-relay/c_relay_static_musl_x86_64
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
```
## Performance Comparison
| Metric | MUSL Static | glibc Static | glibc Dynamic |
|--------|-------------|--------------|---------------|
| Binary Size | 7.6 MB | 12+ MB | 2-3 MB |
| Startup Time | ~50ms | ~60ms | ~40ms |
| Memory Usage | Similar | Similar | Similar |
| Portability | ✓ Any Linux | ⚠ glibc only | ✗ Requires libs |
| Dependencies | None | NSS libs | Many libs |
## Best Practices
1. **Always verify** the binary is truly static before deployment
2. **Test on multiple distributions** to ensure portability
3. **Keep Docker images updated** for security patches
4. **Document the build date** and commit hash for reproducibility
5. **Store binaries** with architecture in filename (e.g., `_x86_64`, `_arm64`)
## References
- [MUSL libc](https://musl.libc.org/)
- [Alpine Linux](https://alpinelinux.org/)
- [Static Linking Best Practices](https://www.musl-libc.org/faq.html)
- [GCC Fortification](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html)
## Changelog
### 2025-10-11
- Initial MUSL build system implementation
- Alpine Docker-based build process
- Fortification fix for nostr_core_lib
- Complete NIP dependency resolution
- Documentation created

View File

@@ -0,0 +1,517 @@
# NIP-59 Timestamp Configuration Implementation Plan
## Overview
Add configurable timestamp randomization for NIP-59 gift wraps to improve compatibility with Nostr apps that don't implement timestamp randomization.
## Problem Statement
The NIP-59 protocol specifies that timestamps on gift wraps should have randomness to prevent time-analysis attacks. However, some Nostr platforms don't implement this, causing compatibility issues with direct messaging (NIP-17).
## Solution
Add a configuration parameter `nip59_timestamp_max_delay_sec` that controls the maximum random delay applied to timestamps:
- **Value = 0**: Use current timestamp (no randomization) for maximum compatibility
- **Value > 0**: Use random timestamp between now and N seconds ago
- **Default = 0**: Maximum compatibility mode (no randomization)
## Implementation Approach: Option B (Direct Parameter Addition)
We chose Option B because:
1. Explicit and stateless - value flows through call chain
2. Thread-safe by design
3. No global state needed in nostr_core_lib
4. DMs are sent rarely, so database query per call is acceptable
---
## Detailed Implementation Steps
### Phase 1: Configuration Setup in c-relay
#### 1.1 Add Configuration Parameter
**File:** `src/default_config_event.h`
**Location:** Line 82 (after `trust_proxy_headers`)
```c
// NIP-59 Gift Wrap Timestamp Configuration
{"nip59_timestamp_max_delay_sec", "0"} // Default: 0 (no randomization for compatibility)
```
**Rationale:**
- Default of 0 seconds (no randomization) for maximum compatibility
- Placed after proxy settings, before closing brace
- Follows existing naming convention
#### 1.2 Add Configuration Validation
**File:** `src/config.c`
**Function:** `validate_config_field()` (around line 923)
Add validation case:
```c
else if (strcmp(key, "nip59_timestamp_max_delay_sec") == 0) {
long value = strtol(value_str, NULL, 10);
if (value < 0 || value > 604800) { // Max 7 days
snprintf(error_msg, error_size,
"nip59_timestamp_max_delay_sec must be between 0 and 604800 (7 days)");
return -1;
}
}
```
**Rationale:**
- 0 = no randomization (compatibility mode)
- 604800 = 7 days maximum (reasonable upper bound)
- Prevents negative values or excessive delays
---
### Phase 2: Modify nostr_core_lib Functions
#### 2.1 Update random_past_timestamp() Function
**File:** `nostr_core_lib/nostr_core/nip059.c`
**Current Location:** Lines 31-36
**Current Code:**
```c
static time_t random_past_timestamp(void) {
time_t now = time(NULL);
// Random time up to 2 days (172800 seconds) in the past
long random_offset = (long)(rand() % 172800);
return now - random_offset;
}
```
**New Code:**
```c
static time_t random_past_timestamp(long max_delay_sec) {
time_t now = time(NULL);
// If max_delay_sec is 0, return current timestamp (no randomization)
if (max_delay_sec == 0) {
return now;
}
// Random time up to max_delay_sec in the past
long random_offset = (long)(rand() % max_delay_sec);
return now - random_offset;
}
```
**Changes:**
- Add `long max_delay_sec` parameter
- Handle special case: `max_delay_sec == 0` returns current time
- Use `max_delay_sec` instead of hardcoded 172800
#### 2.2 Update nostr_nip59_create_seal() Function
**File:** `nostr_core_lib/nostr_core/nip059.c`
**Current Location:** Lines 144-215
**Function Signature Change:**
```c
// OLD:
cJSON* nostr_nip59_create_seal(cJSON* rumor,
const unsigned char* sender_private_key,
const unsigned char* recipient_public_key);
// NEW:
cJSON* nostr_nip59_create_seal(cJSON* rumor,
const unsigned char* sender_private_key,
const unsigned char* recipient_public_key,
long max_delay_sec);
```
**Code Change at Line 181:**
```c
// OLD:
time_t seal_time = random_past_timestamp();
// NEW:
time_t seal_time = random_past_timestamp(max_delay_sec);
```
#### 2.3 Update nostr_nip59_create_gift_wrap() Function
**File:** `nostr_core_lib/nostr_core/nip059.c`
**Current Location:** Lines 220-323
**Function Signature Change:**
```c
// OLD:
cJSON* nostr_nip59_create_gift_wrap(cJSON* seal,
const char* recipient_public_key_hex);
// NEW:
cJSON* nostr_nip59_create_gift_wrap(cJSON* seal,
const char* recipient_public_key_hex,
long max_delay_sec);
```
**Code Change at Line 275:**
```c
// OLD:
time_t wrap_time = random_past_timestamp();
// NEW:
time_t wrap_time = random_past_timestamp(max_delay_sec);
```
#### 2.4 Update nip059.h Header
**File:** `nostr_core_lib/nostr_core/nip059.h`
**Locations:** Lines 38-39 and 48
**Update Function Declarations:**
```c
// Line 38-39: Update nostr_nip59_create_seal
cJSON* nostr_nip59_create_seal(cJSON* rumor,
const unsigned char* sender_private_key,
const unsigned char* recipient_public_key,
long max_delay_sec);
// Line 48: Update nostr_nip59_create_gift_wrap
cJSON* nostr_nip59_create_gift_wrap(cJSON* seal,
const char* recipient_public_key_hex,
long max_delay_sec);
```
**Update Documentation Comments:**
```c
/**
* NIP-59: Create a seal (kind 13) wrapping a rumor
*
* @param rumor The rumor event to seal (cJSON object)
* @param sender_private_key 32-byte sender private key
* @param recipient_public_key 32-byte recipient public key (x-only)
* @param max_delay_sec Maximum random delay in seconds (0 = no randomization)
* @return cJSON object representing the seal event, or NULL on error
*/
/**
* NIP-59: Create a gift wrap (kind 1059) wrapping a seal
*
* @param seal The seal event to wrap (cJSON object)
* @param recipient_public_key_hex Recipient's public key in hex format
* @param max_delay_sec Maximum random delay in seconds (0 = no randomization)
* @return cJSON object representing the gift wrap event, or NULL on error
*/
```
---
### Phase 3: Update NIP-17 Integration
#### 3.1 Update nostr_nip17_send_dm() Function
**File:** `nostr_core_lib/nostr_core/nip017.c`
**Current Location:** Lines 260-320
**Function Signature Change:**
```c
// OLD:
int nostr_nip17_send_dm(cJSON* dm_event,
const char** recipient_pubkeys,
int num_recipients,
const unsigned char* sender_private_key,
cJSON** gift_wraps_out,
int max_gift_wraps);
// NEW:
int nostr_nip17_send_dm(cJSON* dm_event,
const char** recipient_pubkeys,
int num_recipients,
const unsigned char* sender_private_key,
cJSON** gift_wraps_out,
int max_gift_wraps,
long max_delay_sec);
```
**Code Changes:**
At line 281 (seal creation):
```c
// OLD:
cJSON* seal = nostr_nip59_create_seal(dm_event, sender_private_key, recipient_public_key);
// NEW:
cJSON* seal = nostr_nip59_create_seal(dm_event, sender_private_key, recipient_public_key, max_delay_sec);
```
At line 287 (gift wrap creation):
```c
// OLD:
cJSON* gift_wrap = nostr_nip59_create_gift_wrap(seal, recipient_pubkeys[i]);
// NEW:
cJSON* gift_wrap = nostr_nip59_create_gift_wrap(seal, recipient_pubkeys[i], max_delay_sec);
```
At line 306 (sender seal creation):
```c
// OLD:
cJSON* sender_seal = nostr_nip59_create_seal(dm_event, sender_private_key, sender_public_key);
// NEW:
cJSON* sender_seal = nostr_nip59_create_seal(dm_event, sender_private_key, sender_public_key, max_delay_sec);
```
At line 309 (sender gift wrap creation):
```c
// OLD:
cJSON* sender_gift_wrap = nostr_nip59_create_gift_wrap(sender_seal, sender_pubkey_hex);
// NEW:
cJSON* sender_gift_wrap = nostr_nip59_create_gift_wrap(sender_seal, sender_pubkey_hex, max_delay_sec);
```
#### 3.2 Update nip017.h Header
**File:** `nostr_core_lib/nostr_core/nip017.h`
**Location:** Lines 102-107
**Update Function Declaration:**
```c
int nostr_nip17_send_dm(cJSON* dm_event,
const char** recipient_pubkeys,
int num_recipients,
const unsigned char* sender_private_key,
cJSON** gift_wraps_out,
int max_gift_wraps,
long max_delay_sec);
```
**Update Documentation Comment (lines 88-100):**
```c
/**
* NIP-17: Send a direct message to recipients
*
* This function creates the appropriate rumor, seals it, gift wraps it,
* and returns the final gift wrap events ready for publishing.
*
* @param dm_event The unsigned DM event (kind 14 or 15)
* @param recipient_pubkeys Array of recipient public keys (hex strings)
* @param num_recipients Number of recipients
* @param sender_private_key 32-byte sender private key
* @param gift_wraps_out Array to store resulting gift wrap events (caller must free)
* @param max_gift_wraps Maximum number of gift wraps to create
* @param max_delay_sec Maximum random timestamp delay in seconds (0 = no randomization)
* @return Number of gift wrap events created, or -1 on error
*/
```
---
### Phase 4: Update c-relay Call Sites
#### 4.1 Update src/api.c
**Location:** Line 1319
**Current Code:**
```c
int send_result = nostr_nip17_send_dm(
dm_response, // dm_event
recipient_pubkeys, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
gift_wraps, // gift_wraps_out
1 // max_gift_wraps
);
```
**New Code:**
```c
// Get timestamp delay configuration
long max_delay_sec = get_config_int("nip59_timestamp_max_delay_sec", 0);
int send_result = nostr_nip17_send_dm(
dm_response, // dm_event
recipient_pubkeys, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
gift_wraps, // gift_wraps_out
1, // max_gift_wraps
max_delay_sec // max_delay_sec
);
```
#### 4.2 Update src/dm_admin.c
**Location:** Line 371
**Current Code:**
```c
int send_result = nostr_nip17_send_dm(
success_dm, // dm_event
sender_pubkey_array, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
success_gift_wraps, // gift_wraps_out
1 // max_gift_wraps
);
```
**New Code:**
```c
// Get timestamp delay configuration
long max_delay_sec = get_config_int("nip59_timestamp_max_delay_sec", 0);
int send_result = nostr_nip17_send_dm(
success_dm, // dm_event
sender_pubkey_array, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
success_gift_wraps, // gift_wraps_out
1, // max_gift_wraps
max_delay_sec // max_delay_sec
);
```
**Note:** Both files already include `config.h`, so `get_config_int()` is available.
---
## Testing Plan
### Test Case 1: No Randomization (Compatibility Mode)
**Configuration:** `nip59_timestamp_max_delay_sec = 0`
**Expected Behavior:**
- Gift wrap timestamps should equal current time
- Seal timestamps should equal current time
- No random delay applied
**Test Command:**
```bash
# Set config via admin API
# Send test DM
# Verify timestamps are current (within 1 second of send time)
```
### Test Case 2: Custom Delay
**Configuration:** `nip59_timestamp_max_delay_sec = 1000`
**Expected Behavior:**
- Gift wrap timestamps should be between now and 1000 seconds ago
- Seal timestamps should be between now and 1000 seconds ago
- Random delay applied within specified range
**Test Command:**
```bash
# Set config via admin API
# Send test DM
# Verify timestamps are in past but within 1000 seconds
```
### Test Case 3: Default Behavior
**Configuration:** `nip59_timestamp_max_delay_sec = 0` (default)
**Expected Behavior:**
- Gift wrap timestamps should equal current time
- Seal timestamps should equal current time
- No randomization (maximum compatibility)
**Test Command:**
```bash
# Use default config
# Send test DM
# Verify timestamps are current (within 1 second of send time)
```
### Test Case 4: Configuration Validation
**Test Invalid Values:**
- Negative value: Should be rejected
- Value > 604800: Should be rejected
- Valid boundary values (0, 604800): Should be accepted
### Test Case 5: Interoperability
**Test with Other Nostr Clients:**
- Send DM with `max_delay_sec = 0` to clients that don't randomize
- Send DM with `max_delay_sec = 172800` to clients that do randomize
- Verify both scenarios work correctly
---
## Documentation Updates
### Update docs/configuration_guide.md
Add new section:
```markdown
### NIP-59 Gift Wrap Timestamp Configuration
#### nip59_timestamp_max_delay_sec
- **Type:** Integer
- **Default:** 0 (no randomization)
- **Range:** 0 to 604800 (7 days)
- **Description:** Controls timestamp randomization for NIP-59 gift wraps
The NIP-59 protocol recommends randomizing timestamps on gift wraps to prevent
time-analysis attacks. However, some Nostr platforms don't implement this,
causing compatibility issues.
**Values:**
- `0` (default): No randomization - uses current timestamp (maximum compatibility)
- `1-604800`: Random timestamp between now and N seconds ago
**Use Cases:**
- Keep default `0` for maximum compatibility with clients that don't randomize
- Set to `172800` for privacy per NIP-59 specification (2 days randomization)
- Set to custom value (e.g., `3600`) for 1-hour randomization window
**Example:**
```json
["nip59_timestamp_max_delay_sec", "0"] // Default: compatibility mode
["nip59_timestamp_max_delay_sec", "3600"] // 1 hour randomization
["nip59_timestamp_max_delay_sec", "172800"] // 2 days randomization
```
```
---
## Implementation Checklist
### nostr_core_lib Changes
- [ ] Modify `random_past_timestamp()` to accept `max_delay_sec` parameter
- [ ] Update `nostr_nip59_create_seal()` signature and implementation
- [ ] Update `nostr_nip59_create_gift_wrap()` signature and implementation
- [ ] Update `nip059.h` function declarations and documentation
- [ ] Update `nostr_nip17_send_dm()` signature and implementation
- [ ] Update `nip017.h` function declaration and documentation
### c-relay Changes
- [ ] Add `nip59_timestamp_max_delay_sec` to `default_config_event.h`
- [ ] Add validation in `config.c` for new parameter
- [ ] Update `src/api.c` call site to pass `max_delay_sec`
- [ ] Update `src/dm_admin.c` call site to pass `max_delay_sec`
### Testing
- [ ] Test with `max_delay_sec = 0` (no randomization)
- [ ] Test with `max_delay_sec = 1000` (custom delay)
- [ ] Test with `max_delay_sec = 172800` (default behavior)
- [ ] Test configuration validation (invalid values)
- [ ] Test interoperability with other Nostr clients
### Documentation
- [ ] Update `docs/configuration_guide.md`
- [ ] Add this implementation plan to docs
- [ ] Update README if needed
---
## Rollback Plan
If issues arise:
1. Revert nostr_core_lib changes (git revert in submodule)
2. Revert c-relay changes
3. Configuration parameter will be ignored if not used
4. Default behavior (0) provides maximum compatibility
---
## Notes
- The configuration is read on each DM send, allowing runtime changes
- No restart required when changing `nip59_timestamp_max_delay_sec`
- Thread-safe by design (no global state)
- Default value of 0 provides maximum compatibility with other Nostr clients
- Can be changed to 172800 or other values for NIP-59 privacy features
---
## References
- [NIP-59: Gift Wrap](https://github.com/nostr-protocol/nips/blob/master/59.md)
- [NIP-17: Private Direct Messages](https://github.com/nostr-protocol/nips/blob/master/17.md)
- [NIP-44: Versioned Encryption](https://github.com/nostr-protocol/nips/blob/master/44.md)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,325 @@
# Relay Traffic Measurement Guide
## Measuring Real-World Relay Traffic
To validate our performance assumptions, here are commands to measure actual event rates from live relays.
---
## Command: Count Events Over 1 Minute
### Basic Command
```bash
# Count events from relay.damus.io over 60 seconds
timeout 60 nak req -s $(date +%s) --stream wss://relay.damus.io | wc -l
```
This will:
1. Subscribe to all new events (`-s $(date +%s)` = since now)
2. Stream for 60 seconds (`timeout 60`)
3. Count the lines (each line = 1 event)
### With Event Rate Display
```bash
# Show events per second in real-time
timeout 60 nak req -s $(date +%s) --stream wss://relay.damus.io | \
pv -l -i 1 -r > /dev/null
```
This displays:
- Total events received
- Current rate (events/second)
- Average rate
### With Detailed Statistics
```bash
# Count events and calculate statistics
echo "Measuring relay traffic for 60 seconds..."
START=$(date +%s)
COUNT=$(timeout 60 nak req -s $START --stream wss://relay.damus.io | wc -l)
END=$(date +%s)
DURATION=$((END - START))
echo "Results:"
echo " Total events: $COUNT"
echo " Duration: ${DURATION}s"
echo " Events/second: $(echo "scale=2; $COUNT / $DURATION" | bc)"
echo " Events/minute: $COUNT"
```
### With Event Kind Distribution
```bash
# Count events by kind over 60 seconds
timeout 60 nak req -s $(date +%s) --stream wss://relay.damus.io | \
jq -r '.kind' | \
sort | uniq -c | sort -rn
```
Output example:
```
45 1 # 45 text notes
12 3 # 12 contact lists
8 7 # 8 reactions
3 6 # 3 reposts
```
### With Timestamp Analysis
```bash
# Show event timestamps and calculate intervals
timeout 60 nak req -s $(date +%s) --stream wss://relay.damus.io | \
jq -r '.created_at' | \
awk 'NR>1 {print $1-prev} {prev=$1}' | \
awk '{sum+=$1; count++} END {
print "Average interval:", sum/count, "seconds"
print "Events per second:", count/sum
}'
```
---
## Testing Multiple Relays
### Compare Traffic Across Relays
```bash
#!/bin/bash
# test_relay_traffic.sh
RELAYS=(
"wss://relay.damus.io"
"wss://nos.lol"
"wss://relay.nostr.band"
"wss://nostr.wine"
)
DURATION=60
echo "Measuring relay traffic for ${DURATION} seconds..."
echo ""
for relay in "${RELAYS[@]}"; do
echo "Testing: $relay"
count=$(timeout $DURATION nak req -s $(date +%s) --stream "$relay" 2>/dev/null | wc -l)
rate=$(echo "scale=2; $count / $DURATION" | bc)
echo " Events: $count"
echo " Rate: ${rate}/sec"
echo ""
done
```
---
## Expected Results (Based on Real Measurements)
### relay.damus.io (Large Public Relay)
- **Expected rate**: 0.5-2 events/second
- **60-second count**: 30-120 events
- **Peak times**: Higher during US daytime hours
### nos.lol (Medium Public Relay)
- **Expected rate**: 0.2-0.8 events/second
- **60-second count**: 12-48 events
### Personal/Small Relays
- **Expected rate**: 0.01-0.1 events/second
- **60-second count**: 1-6 events
---
## Using Results to Validate Performance Assumptions
After measuring your relay's traffic:
1. **Calculate average events/second**:
```
events_per_second = total_events / 60
```
2. **Estimate query overhead**:
```
# For 100k event database:
query_time = 70ms
overhead_percentage = (query_time * events_per_second) / 1000 * 100
# Example: 0.5 events/sec
overhead = (70 * 0.5) / 1000 * 100 = 3.5%
```
3. **Determine if optimization needed**:
- < 5% overhead: No optimization needed
- 5-20% overhead: Consider 1-second throttling
- > 20% overhead: Use materialized counters
---
## Real-Time Monitoring During Development
### Monitor Your Own Relay
```bash
# Watch events in real-time with count
nak req -s $(date +%s) --stream ws://localhost:8888 | \
awk '{count++; print count, $0}'
```
### Monitor with Event Details
```bash
# Show event kind and pubkey for each event
nak req -s $(date +%s) --stream ws://localhost:8888 | \
jq -r '"[\(.kind)] \(.pubkey[0:8])... \(.content[0:50])"'
```
### Continuous Traffic Monitoring
```bash
# Monitor traffic in 10-second windows
while true; do
echo "=== $(date) ==="
count=$(timeout 10 nak req -s $(date +%s) --stream ws://localhost:8888 | wc -l)
rate=$(echo "scale=2; $count / 10" | bc)
echo "Events: $count (${rate}/sec)"
sleep 1
done
```
---
## Performance Testing Commands
### Simulate Load
```bash
# Send test events to measure query performance
for i in {1..100}; do
nak event -c "Test event $i" ws://localhost:8888
sleep 0.1 # 10 events/second
done
```
### Measure Query Response Time
```bash
# Time how long queries take with current database
time sqlite3 your_relay.db "SELECT COUNT(*) FROM events"
time sqlite3 your_relay.db "SELECT kind, COUNT(*) FROM events GROUP BY kind"
```
---
## Automated Traffic Analysis Script
Save this as `analyze_relay_traffic.sh`:
```bash
#!/bin/bash
# Comprehensive relay traffic analysis
RELAY="${1:-ws://localhost:8888}"
DURATION="${2:-60}"
echo "Analyzing relay: $RELAY"
echo "Duration: ${DURATION} seconds"
echo ""
# Collect events
TMPFILE=$(mktemp)
timeout $DURATION nak req -s $(date +%s) --stream "$RELAY" > "$TMPFILE" 2>/dev/null
# Calculate statistics
TOTAL=$(wc -l < "$TMPFILE")
RATE=$(echo "scale=2; $TOTAL / $DURATION" | bc)
echo "=== Traffic Statistics ==="
echo "Total events: $TOTAL"
echo "Events/second: $RATE"
echo "Events/minute: $(echo "$TOTAL * 60 / $DURATION" | bc)"
echo ""
echo "=== Event Kind Distribution ==="
jq -r '.kind' "$TMPFILE" | sort | uniq -c | sort -rn | head -10
echo ""
echo "=== Top Publishers ==="
jq -r '.pubkey[0:16]' "$TMPFILE" | sort | uniq -c | sort -rn | head -5
echo ""
echo "=== Performance Estimate ==="
echo "For 100k event database:"
echo " Query time: ~70ms"
echo " Overhead: $(echo "scale=2; 70 * $RATE / 10" | bc)%"
echo ""
# Cleanup
rm "$TMPFILE"
```
Usage:
```bash
chmod +x analyze_relay_traffic.sh
./analyze_relay_traffic.sh wss://relay.damus.io 60
```
---
## Interpreting Results
### Low Traffic (< 0.1 events/sec)
- **Typical for**: Personal relays, small communities
- **Recommendation**: Trigger on every event, no optimization
- **Expected overhead**: < 1%
### Medium Traffic (0.1-0.5 events/sec)
- **Typical for**: Medium public relays
- **Recommendation**: Trigger on every event, consider throttling if database > 100k
- **Expected overhead**: 1-5%
### High Traffic (0.5-2 events/sec)
- **Typical for**: Large public relays
- **Recommendation**: Use 1-second throttling
- **Expected overhead**: 5-20% without throttling, < 1% with throttling
### Very High Traffic (> 2 events/sec)
- **Typical for**: Major public relays (rare)
- **Recommendation**: Use materialized counters
- **Expected overhead**: > 20% without optimization
---
## Continuous Monitoring in Production
### Add to Relay Startup
```bash
# In your relay startup script
echo "Starting traffic monitoring..."
nohup bash -c 'while true; do
count=$(timeout 60 nak req -s $(date +%s) --stream ws://localhost:8888 2>/dev/null | wc -l)
echo "$(date +%Y-%m-%d\ %H:%M:%S) - Events/min: $count" >> traffic.log
done' &
```
### Analyze Historical Traffic
```bash
# View traffic trends
cat traffic.log | awk '{print $4}' | \
awk '{sum+=$1; count++} END {print "Average:", sum/count, "events/min"}'
```
---
## Conclusion
Use these commands to:
1. ✅ Measure real-world traffic on your relay
2. ✅ Validate performance assumptions
3. ✅ Determine if optimization is needed
4. ✅ Monitor traffic trends over time
**Remember**: Most relays will measure < 1 event/second, making the simple "trigger on every event" approach perfectly viable.

630
docs/sql_query_admin_api.md Normal file
View File

@@ -0,0 +1,630 @@
# SQL Query Admin API Design
## Overview
This document describes the design for a general-purpose SQL query interface for the C-Relay admin API. This allows administrators to execute read-only SQL queries against the relay database through cryptographically signed kind 23456 events with NIP-44 encrypted command arrays.
## Security Model
### Authentication
- All queries must be sent as kind 23456 events with NIP-44 encrypted content
- Events must be signed by the admin's private key
- Admin pubkey verified against `config.admin_pubkey`
- Follows the same authentication pattern as existing admin commands
### Query Restrictions
While authentication is cryptographically secure, we implement defensive safeguards:
1. **Read-Only Enforcement**
- Only SELECT statements allowed
- Block: INSERT, UPDATE, DELETE, DROP, CREATE, ALTER, PRAGMA (write operations)
- Allow: SELECT, WITH (for CTEs)
2. **Resource Limits**
- Query timeout: 5 seconds (configurable)
- Result row limit: 1000 rows (configurable)
- Result size limit: 1MB (configurable)
3. **Query Logging**
- All queries logged with timestamp, admin pubkey, execution time
- Failed queries logged with error message
## Command Format
### Admin Event Structure (Kind 23456)
```json
{
"id": "event_id",
"pubkey": "admin_public_key",
"created_at": 1234567890,
"kind": 23456,
"content": "AqHBUgcM7dXFYLQuDVzGwMST1G8jtWYyVvYxXhVGEu4nAb4LVw...",
"tags": [
["p", "relay_public_key"]
],
"sig": "event_signature"
}
```
The `content` field contains a NIP-44 encrypted JSON array:
```json
["sql_query", "SELECT * FROM events LIMIT 10"]
```
### Response Format (Kind 23457)
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44_encrypted_content",
"tags": [
["p", "admin_public_key"],
["e", "request_event_id"]
],
"sig": "response_event_signature"
}]
```
The `content` field contains NIP-44 encrypted JSON:
```json
{
"query_type": "sql_query",
"request_id": "request_event_id",
"timestamp": 1234567890,
"query": "SELECT * FROM events LIMIT 10",
"execution_time_ms": 45,
"row_count": 10,
"columns": ["id", "pubkey", "created_at", "kind", "content"],
"rows": [
["abc123...", "def456...", 1234567890, 1, "Hello world"],
...
]
}
```
**Note:** The response includes the request event ID in two places:
1. **In tags**: `["e", "request_event_id"]` - Standard Nostr convention for event references
2. **In content**: `"request_id": "request_event_id"` - For easy access after decryption
### Error Response Format (Kind 23457)
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44_encrypted_content",
"tags": [
["p", "admin_public_key"],
["e", "request_event_id"]
],
"sig": "response_event_signature"
}]
```
The `content` field contains NIP-44 encrypted JSON:
```json
{
"query_type": "sql_query",
"request_id": "request_event_id",
"timestamp": 1234567890,
"query": "DELETE FROM events",
"status": "error",
"error": "Query blocked: DELETE statements not allowed",
"error_type": "blocked_statement"
}
```
## Available Database Tables and Views
### Core Tables
- **events** - All Nostr events (id, pubkey, created_at, kind, content, tags, sig)
- **config** - Configuration key-value pairs
- **auth_rules** - Authentication and authorization rules
- **subscription_events** - Subscription lifecycle events
- **event_broadcasts** - Event broadcast log
### Useful Views
- **recent_events** - Last 1000 events
- **event_stats** - Event statistics by type
- **configuration_events** - Kind 33334 configuration events
- **subscription_analytics** - Subscription metrics by date
- **active_subscriptions_log** - Currently active subscriptions
- **event_kinds_view** - Event distribution by kind
- **top_pubkeys_view** - Top 10 pubkeys by event count
- **time_stats_view** - Time-based statistics (24h, 7d, 30d)
## Implementation Plan
### Backend (dm_admin.c)
#### 1. Query Validation Function
```c
int validate_sql_query(const char* query, char* error_msg, size_t error_size);
```
- Check for blocked keywords (case-insensitive)
- Validate query syntax (basic checks)
- Return 0 on success, -1 on failure
#### 2. Query Execution Function
```c
char* execute_sql_query(const char* query, char* error_msg, size_t error_size);
```
- Set query timeout using sqlite3_busy_timeout()
- Execute query with row/size limits
- Build JSON response with results
- Log query execution
- Return JSON string or NULL on error
#### 3. Command Handler Integration
Add to `process_dm_admin_command()` in [`dm_admin.c`](src/dm_admin.c:131):
```c
else if (strcmp(command_type, "sql_query") == 0) {
const char* query = get_tag_value(event, "sql_query", 1);
if (!query) {
DEBUG_ERROR("DM Admin: Missing sql_query parameter");
snprintf(error_message, error_size, "invalid: missing SQL query");
} else {
result = handle_sql_query_unified(event, query, error_message, error_size, wsi);
}
}
```
Add unified handler function:
```c
int handle_sql_query_unified(cJSON* event, const char* query,
char* error_message, size_t error_size,
struct lws* wsi) {
// Get request event ID for response correlation
cJSON* request_id_obj = cJSON_GetObjectItem(event, "id");
if (!request_id_obj || !cJSON_IsString(request_id_obj)) {
snprintf(error_message, error_size, "Missing request event ID");
return -1;
}
const char* request_id = cJSON_GetStringValue(request_id_obj);
// Validate query
if (!validate_sql_query(query, error_message, error_size)) {
return -1;
}
// Execute query and include request_id in result
char* result_json = execute_sql_query(query, request_id, error_message, error_size);
if (!result_json) {
return -1;
}
// Send response as kind 23457 event with request ID in tags
cJSON* sender_pubkey_obj = cJSON_GetObjectItem(event, "pubkey");
if (!sender_pubkey_obj || !cJSON_IsString(sender_pubkey_obj)) {
free(result_json);
snprintf(error_message, error_size, "Missing sender pubkey");
return -1;
}
const char* sender_pubkey = cJSON_GetStringValue(sender_pubkey_obj);
int send_result = send_admin_response(sender_pubkey, result_json, request_id,
error_message, error_size, wsi);
free(result_json);
return send_result;
}
```
### Frontend (api/index.html)
#### SQL Query Section UI
Add to [`api/index.html`](api/index.html:1):
```html
<section id="sql-query-section" class="admin-section">
<h2>SQL Query Console</h2>
<div class="query-selector">
<label for="query-dropdown">Quick Queries & History:</label>
<select id="query-dropdown" onchange="loadSelectedQuery()">
<option value="">-- Select a query --</option>
<optgroup label="Common Queries">
<option value="recent_events">Recent Events</option>
<option value="event_stats">Event Statistics</option>
<option value="subscriptions">Active Subscriptions</option>
<option value="top_pubkeys">Top Pubkeys</option>
<option value="event_kinds">Event Kinds Distribution</option>
<option value="time_stats">Time-based Statistics</option>
</optgroup>
<optgroup label="Query History" id="history-group">
<!-- Dynamically populated from localStorage -->
</optgroup>
</select>
</div>
<div class="query-editor">
<label for="sql-input">SQL Query:</label>
<textarea id="sql-input" rows="5" placeholder="SELECT * FROM events LIMIT 10"></textarea>
<div class="query-actions">
<button onclick="executeSqlQuery()" class="primary-button">Execute Query</button>
<button onclick="clearSqlQuery()">Clear</button>
<button onclick="clearQueryHistory()" class="danger-button">Clear History</button>
</div>
</div>
<div class="query-results">
<h3>Results</h3>
<div id="query-info" class="info-box"></div>
<div id="query-table" class="table-container"></div>
</div>
</section>
```
#### JavaScript Functions (api/index.js)
Add to [`api/index.js`](api/index.js:1):
```javascript
// Predefined query templates
const SQL_QUERY_TEMPLATES = {
recent_events: "SELECT id, pubkey, created_at, kind, substr(content, 1, 50) as content FROM events ORDER BY created_at DESC LIMIT 20",
event_stats: "SELECT * FROM event_stats",
subscriptions: "SELECT * FROM active_subscriptions_log ORDER BY created_at DESC",
top_pubkeys: "SELECT * FROM top_pubkeys_view",
event_kinds: "SELECT * FROM event_kinds_view ORDER BY count DESC",
time_stats: "SELECT * FROM time_stats_view"
};
// Query history management (localStorage)
const QUERY_HISTORY_KEY = 'c_relay_sql_history';
const MAX_HISTORY_ITEMS = 20;
// Load query history from localStorage
function loadQueryHistory() {
try {
const history = localStorage.getItem(QUERY_HISTORY_KEY);
return history ? JSON.parse(history) : [];
} catch (e) {
console.error('Failed to load query history:', e);
return [];
}
}
// Save query to history
function saveQueryToHistory(query) {
if (!query || query.trim().length === 0) return;
try {
let history = loadQueryHistory();
// Remove duplicate if exists
history = history.filter(q => q !== query);
// Add to beginning
history.unshift(query);
// Limit size
if (history.length > MAX_HISTORY_ITEMS) {
history = history.slice(0, MAX_HISTORY_ITEMS);
}
localStorage.setItem(QUERY_HISTORY_KEY, JSON.stringify(history));
updateQueryDropdown();
} catch (e) {
console.error('Failed to save query history:', e);
}
}
// Clear query history
function clearQueryHistory() {
if (confirm('Clear all query history?')) {
localStorage.removeItem(QUERY_HISTORY_KEY);
updateQueryDropdown();
}
}
// Update dropdown with history
function updateQueryDropdown() {
const historyGroup = document.getElementById('history-group');
if (!historyGroup) return;
// Clear existing history options
historyGroup.innerHTML = '';
const history = loadQueryHistory();
if (history.length === 0) {
const option = document.createElement('option');
option.value = '';
option.textContent = '(no history)';
option.disabled = true;
historyGroup.appendChild(option);
return;
}
history.forEach((query, index) => {
const option = document.createElement('option');
option.value = `history_${index}`;
// Truncate long queries for display
const displayQuery = query.length > 60 ? query.substring(0, 60) + '...' : query;
option.textContent = displayQuery;
option.dataset.query = query;
historyGroup.appendChild(option);
});
}
// Load selected query from dropdown
function loadSelectedQuery() {
const dropdown = document.getElementById('query-dropdown');
const selectedValue = dropdown.value;
if (!selectedValue) return;
let query = '';
// Check if it's a template
if (SQL_QUERY_TEMPLATES[selectedValue]) {
query = SQL_QUERY_TEMPLATES[selectedValue];
}
// Check if it's from history
else if (selectedValue.startsWith('history_')) {
const selectedOption = dropdown.options[dropdown.selectedIndex];
query = selectedOption.dataset.query;
}
if (query) {
document.getElementById('sql-input').value = query;
}
// Reset dropdown to placeholder
dropdown.value = '';
}
// Initialize query history on page load
document.addEventListener('DOMContentLoaded', function() {
updateQueryDropdown();
});
// Clear the SQL query input
function clearSqlQuery() {
document.getElementById('sql-input').value = '';
document.getElementById('query-info').innerHTML = '';
document.getElementById('query-table').innerHTML = '';
}
// Track pending SQL queries by request ID
const pendingSqlQueries = new Map();
// Execute SQL query via admin API
async function executeSqlQuery() {
const query = document.getElementById('sql-input').value;
if (!query.trim()) {
showError('Please enter a SQL query');
return;
}
try {
// Show loading state
document.getElementById('query-info').innerHTML = '<div class="loading">Executing query...</div>';
document.getElementById('query-table').innerHTML = '';
// Save to history (before execution, so it's saved even if query fails)
saveQueryToHistory(query.trim());
// Send query as kind 23456 admin command
const command = ["sql_query", query];
const requestEvent = await sendAdminCommand(command);
// Store query info for when response arrives
if (requestEvent && requestEvent.id) {
pendingSqlQueries.set(requestEvent.id, {
query: query,
timestamp: Date.now()
});
}
// Note: Response will be handled by the event listener
// which will call displaySqlQueryResults() when response arrives
} catch (error) {
showError('Failed to execute query: ' + error.message);
}
}
// Handle SQL query response (called by event listener)
function handleSqlQueryResponse(response) {
// Check if this is a response to one of our queries
if (response.request_id && pendingSqlQueries.has(response.request_id)) {
const queryInfo = pendingSqlQueries.get(response.request_id);
pendingSqlQueries.delete(response.request_id);
// Display results
displaySqlQueryResults(response);
}
}
// Display SQL query results
function displaySqlQueryResults(response) {
const infoDiv = document.getElementById('query-info');
const tableDiv = document.getElementById('query-table');
if (response.status === 'error' || response.error) {
infoDiv.innerHTML = `<div class="error-message">❌ ${response.error || 'Query failed'}</div>`;
tableDiv.innerHTML = '';
return;
}
// Show query info with request ID for debugging
const rowCount = response.row_count || 0;
const execTime = response.execution_time_ms || 0;
const requestId = response.request_id ? response.request_id.substring(0, 8) + '...' : 'unknown';
infoDiv.innerHTML = `
<div class="query-info-success">
<span>✅ Query executed successfully</span>
<span>Rows: ${rowCount}</span>
<span>Execution Time: ${execTime}ms</span>
<span class="request-id" title="${response.request_id || ''}">Request: ${requestId}</span>
</div>
`;
// Build results table
if (response.rows && response.rows.length > 0) {
let html = '<table class="sql-results-table"><thead><tr>';
response.columns.forEach(col => {
html += `<th>${escapeHtml(col)}</th>`;
});
html += '</tr></thead><tbody>';
response.rows.forEach(row => {
html += '<tr>';
row.forEach(cell => {
const cellValue = cell === null ? '<em>NULL</em>' : escapeHtml(String(cell));
html += `<td>${cellValue}</td>`;
});
html += '</tr>';
});
html += '</tbody></table>';
tableDiv.innerHTML = html;
} else {
tableDiv.innerHTML = '<p class="no-results">No results returned</p>';
}
}
// Helper function to escape HTML
function escapeHtml(text) {
const div = document.createElement('div');
div.textContent = text;
return div.innerHTML;
}
```
## Example Queries
### Subscription Statistics
```sql
SELECT
date,
subscriptions_created,
subscriptions_ended,
avg_duration_seconds,
unique_clients
FROM subscription_analytics
ORDER BY date DESC
LIMIT 7;
```
### Event Distribution by Kind
```sql
SELECT kind, count, percentage
FROM event_kinds_view
ORDER BY count DESC;
```
### Recent Events by Specific Pubkey
```sql
SELECT id, created_at, kind, content
FROM events
WHERE pubkey = 'abc123...'
ORDER BY created_at DESC
LIMIT 20;
```
### Active Subscriptions with Details
```sql
SELECT
subscription_id,
client_ip,
events_sent,
duration_seconds,
filter_json
FROM active_subscriptions_log
ORDER BY created_at DESC;
```
### Database Size and Event Count
```sql
SELECT
(SELECT COUNT(*) FROM events) as total_events,
(SELECT COUNT(*) FROM subscription_events) as total_subscriptions,
(SELECT COUNT(*) FROM auth_rules WHERE active = 1) as active_rules;
```
## Configuration Options
Add to config table:
```sql
INSERT INTO config (key, value, data_type, description, category) VALUES
('sql_query_enabled', 'true', 'boolean', 'Enable SQL query admin API', 'admin'),
('sql_query_timeout', '5', 'integer', 'Query timeout in seconds', 'admin'),
('sql_query_row_limit', '1000', 'integer', 'Maximum rows per query', 'admin'),
('sql_query_size_limit', '1048576', 'integer', 'Maximum result size in bytes', 'admin'),
('sql_query_log_enabled', 'true', 'boolean', 'Log all SQL queries', 'admin');
```
## Security Considerations
### What This Protects Against
1. **Unauthorized Access** - Only admin can execute queries (cryptographic verification)
2. **Data Modification** - Read-only enforcement prevents accidental/malicious changes
3. **Resource Exhaustion** - Timeouts and limits prevent DoS
4. **Audit Trail** - All queries logged for security review
### What This Does NOT Protect Against
1. **Admin Compromise** - If admin private key is stolen, attacker has full read access
2. **Information Disclosure** - Admin can read all data (by design)
3. **Complex Attacks** - Sophisticated SQL injection might bypass simple keyword blocking
### Recommendations
1. **Secure Admin Key** - Store admin private key securely, never commit to git
2. **Monitor Query Logs** - Review query logs regularly for suspicious activity
3. **Backup Database** - Regular backups in case of issues
4. **Test Queries** - Test complex queries on development relay first
## Testing Plan
### Unit Tests
1. Query validation (blocked keywords, syntax)
2. Result formatting (JSON structure)
3. Error handling (timeouts, limits)
### Integration Tests
1. Execute queries through NIP-17 DM
2. Verify authentication (admin vs non-admin)
3. Test resource limits (timeout, row limit)
4. Test error responses
### Security Tests
1. Attempt blocked statements (INSERT, DELETE, etc.)
2. Attempt SQL injection patterns
3. Test query timeout with slow queries
4. Test row limit with large result sets
## Future Enhancements
1. **Query History** - Store recent queries for quick re-execution
2. **Query Favorites** - Save frequently used queries
3. **Export Results** - Download results as CSV/JSON
4. **Query Builder** - Visual query builder for common operations
5. **Real-time Updates** - WebSocket updates for live data
6. **Query Sharing** - Share queries with other admins (if multi-admin support added)
## Migration Path
### Phase 1: Backend Implementation
1. Add query validation function
2. Add query execution function
3. Integrate with NIP-17 command handler
4. Add configuration options
5. Add query logging
### Phase 2: Frontend Implementation
1. Add SQL query section to index.html
2. Add query execution JavaScript
3. Add predefined query templates
4. Add results display formatting
### Phase 3: Testing and Documentation
1. Write unit tests
2. Write integration tests
3. Update user documentation
4. Create query examples guide
### Phase 4: Enhancement
1. Add query history
2. Add export functionality
3. Optimize performance
4. Add more predefined templates

258
docs/sql_test_design.md Normal file
View File

@@ -0,0 +1,258 @@
# SQL Query Test Script Design
## Overview
Test script for validating the SQL query admin API functionality. Tests query validation, execution, error handling, and security features.
## Script: tests/sql_test.sh
### Test Categories
#### 1. Query Validation Tests
- ✅ Valid SELECT queries accepted
- ❌ INSERT statements blocked
- ❌ UPDATE statements blocked
- ❌ DELETE statements blocked
- ❌ DROP statements blocked
- ❌ CREATE statements blocked
- ❌ ALTER statements blocked
- ❌ PRAGMA write operations blocked
#### 2. Query Execution Tests
- ✅ Simple SELECT query
- ✅ SELECT with WHERE clause
- ✅ SELECT with JOIN
- ✅ SELECT with ORDER BY and LIMIT
- ✅ Query against views
- ✅ Query with aggregate functions (COUNT, SUM, AVG)
#### 3. Response Format Tests
- ✅ Response includes request_id
- ✅ Response includes query_type
- ✅ Response includes columns array
- ✅ Response includes rows array
- ✅ Response includes row_count
- ✅ Response includes execution_time_ms
#### 4. Error Handling Tests
- ❌ Invalid SQL syntax
- ❌ Non-existent table
- ❌ Non-existent column
- ❌ Query timeout (if configurable)
#### 5. Security Tests
- ❌ SQL injection attempts blocked
- ❌ Nested query attacks blocked
- ❌ Comment-based attacks blocked
#### 6. Concurrent Query Tests
- ✅ Multiple queries in parallel
- ✅ Responses correctly correlated to requests
## Script Structure
```bash
#!/bin/bash
# SQL Query Admin API Test Script
# Tests the sql_query command functionality
set -e
RELAY_URL="${RELAY_URL:-ws://localhost:8888}"
ADMIN_PRIVKEY="${ADMIN_PRIVKEY:-}"
RELAY_PUBKEY="${RELAY_PUBKEY:-}"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Test counters
TESTS_RUN=0
TESTS_PASSED=0
TESTS_FAILED=0
# Helper functions
print_test() {
echo -e "${YELLOW}TEST: $1${NC}"
TESTS_RUN=$((TESTS_RUN + 1))
}
print_pass() {
echo -e "${GREEN}✓ PASS: $1${NC}"
TESTS_PASSED=$((TESTS_PASSED + 1))
}
print_fail() {
echo -e "${RED}✗ FAIL: $1${NC}"
TESTS_FAILED=$((TESTS_FAILED + 1))
}
# Send SQL query command
send_sql_query() {
local query="$1"
# Implementation using nostr CLI tools or curl
# Returns response JSON
}
# Test functions
test_valid_select() {
print_test "Valid SELECT query"
local response=$(send_sql_query "SELECT * FROM events LIMIT 1")
if echo "$response" | grep -q '"query_type":"sql_query"'; then
print_pass "Valid SELECT accepted"
else
print_fail "Valid SELECT rejected"
fi
}
test_blocked_insert() {
print_test "INSERT statement blocked"
local response=$(send_sql_query "INSERT INTO events VALUES (...)")
if echo "$response" | grep -q '"error"'; then
print_pass "INSERT correctly blocked"
else
print_fail "INSERT not blocked"
fi
}
# ... more test functions ...
# Main test execution
main() {
echo "================================"
echo "SQL Query Admin API Tests"
echo "================================"
echo ""
# Check prerequisites
if [ -z "$ADMIN_PRIVKEY" ]; then
echo "Error: ADMIN_PRIVKEY not set"
exit 1
fi
# Run test suites
echo "1. Query Validation Tests"
test_valid_select
test_blocked_insert
test_blocked_update
test_blocked_delete
test_blocked_drop
echo ""
echo "2. Query Execution Tests"
test_simple_select
test_select_with_where
test_select_with_join
test_select_views
echo ""
echo "3. Response Format Tests"
test_response_format
test_request_id_correlation
echo ""
echo "4. Error Handling Tests"
test_invalid_syntax
test_nonexistent_table
echo ""
echo "5. Security Tests"
test_sql_injection
echo ""
echo "6. Concurrent Query Tests"
test_concurrent_queries
# Print summary
echo ""
echo "================================"
echo "Test Summary"
echo "================================"
echo "Tests Run: $TESTS_RUN"
echo "Tests Passed: $TESTS_PASSED"
echo "Tests Failed: $TESTS_FAILED"
if [ $TESTS_FAILED -eq 0 ]; then
echo -e "${GREEN}All tests passed!${NC}"
exit 0
else
echo -e "${RED}Some tests failed${NC}"
exit 1
fi
}
main "$@"
```
## Test Data Setup
The script should work with the existing relay database without requiring special test data, using:
- Existing events table
- Existing views (event_stats, recent_events, etc.)
- Existing config table
## Usage
```bash
# Set environment variables
export ADMIN_PRIVKEY="your_admin_private_key_hex"
export RELAY_PUBKEY="relay_public_key_hex"
export RELAY_URL="ws://localhost:8888"
# Run tests
./tests/sql_test.sh
# Run specific test category
./tests/sql_test.sh validation
./tests/sql_test.sh security
```
## Integration with CI/CD
The script should:
- Return exit code 0 on success, 1 on failure
- Output TAP (Test Anything Protocol) format for CI integration
- Be runnable in automated test pipelines
- Not require manual intervention
## Dependencies
- `bash` (version 4+)
- `curl` or `websocat` for WebSocket communication
- `jq` for JSON parsing
- Nostr CLI tools (optional, for event signing)
- Running c-relay instance
## Example Output
```
================================
SQL Query Admin API Tests
================================
1. Query Validation Tests
TEST: Valid SELECT query
✓ PASS: Valid SELECT accepted
TEST: INSERT statement blocked
✓ PASS: INSERT correctly blocked
TEST: UPDATE statement blocked
✓ PASS: UPDATE correctly blocked
2. Query Execution Tests
TEST: Simple SELECT query
✓ PASS: Query executed successfully
TEST: SELECT with WHERE clause
✓ PASS: WHERE clause works correctly
...
================================
Test Summary
================================
Tests Run: 24
Tests Passed: 24
Tests Failed: 0
All tests passed!

View File

@@ -0,0 +1,22 @@
# Startup and configuration for c_nostr_relay
No command line variables. Quick start.
## First time startup
When the program first starts, it generates a new private and public keys for the program, and for the admin. In the command line it prints out the private key for the admin. It creates a database in the same directory as the application. It names the database after the pubkey of the database <pubkey>.nrdb (This stands for nostr relay db)
Internally, it creates a valid nostr event using the generated admin private key, and saves it to the events table in the db. That nostr configuration event is a type 33334 event, with a d tag that equals the database public key d=<db pubkey>.
The event is populated from internal default values. Then the configuration setup is run by reading the event from the database events table.
Important, the constant values are ALWAYS read and set from the 33334 event in the events table, they are NEVER read from the stored default values. This is important for consistancy.
The config section of the program keeps track of the admin file, and if it ever changes, it does what is needed to implement the change.
## Later startups
The program looks for the database with the name c_nostr_relay.db in the same directory as the program. If it doesn't find it, it assumes a first time startup. If it does find it, it loads the database, and the config section reads the config event and proceedes from there.
## Changing database location?
Changing the location of the databases can be done by creating a sym-link to the new location of the database.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,147 @@
# Static Build Improvements
## Overview
The `build_static.sh` script has been updated to properly support MUSL static compilation and includes several optimizations.
## Changes Made
### 1. True MUSL Static Binary Support
The script now attempts to build with `musl-gcc` for truly portable static binaries:
- **MUSL binaries** have zero runtime dependencies and work across all Linux distributions
- **Automatic fallback** to glibc static linking if MUSL compilation fails (e.g., missing MUSL-compiled libraries)
- Clear messaging about which type of binary was created
### 2. SQLite Build Caching
SQLite is now built once and cached for future builds:
- **Cache location**: `~/.cache/c-relay-sqlite/`
- **Version-specific**: Each SQLite version gets its own cache directory
- **Significant speedup**: Subsequent builds skip the SQLite compilation step
- **Manual cleanup**: `rm -rf ~/.cache/c-relay-sqlite` to clear cache
### 3. Smart Package Installation
The script now checks for required packages before installing:
- Only installs missing packages
- Reduces unnecessary `apt` operations
- Faster builds when dependencies are already present
### 4. Bug Fixes
- Fixed format warning in `src/subscriptions.c` line 1067 (changed `%zu` to `%d` with cast for `MAX_SEARCH_TERM_LENGTH`)
## Usage
```bash
./build_static.sh
```
The script will:
1. Check for and install `musl-gcc` if needed
2. Build or use cached SQLite with JSON1 support
3. Attempt MUSL static compilation
4. Fall back to glibc static compilation if MUSL fails
5. Verify the resulting binary
## Binary Types
### MUSL Static Binary (Ideal - Currently Not Achievable)
- **Filename**: `build/c_relay_static_musl_x86_64`
- **Dependencies**: None (truly static)
- **Portability**: Works on any Linux distribution
- **Status**: Requires MUSL-compiled libwebsockets and other dependencies (not available by default)
### Glibc Static Binary (Current Output)
- **Filename**: `build/c_relay_static_x86_64` or `build/c_relay_static_glibc_x86_64`
- **Dependencies**: None - fully statically linked with glibc
- **Portability**: Works on most Linux distributions (glibc is statically included)
- **Note**: Despite using glibc, this is a **fully static binary** with no runtime dependencies
## Verification
The script automatically verifies binaries using `ldd` and `file`:
```bash
# For MUSL binary
ldd build/c_relay_static_musl_x86_64
# Output: "not a dynamic executable" (good!)
# For glibc binary
ldd build/c_relay_static_glibc_x86_64
# Output: Shows glibc dependencies
```
## Known Limitations
### MUSL Compilation Currently Fails Because:
1. **libwebsockets not available as MUSL static library**
- System libwebsockets is compiled with glibc, not MUSL
- MUSL cannot link against glibc-compiled libraries
- Solution: Build libwebsockets from source with musl-gcc (future enhancement)
2. **Other dependencies not MUSL-compatible**
- libssl, libcrypto, libsecp256k1, libcurl must be available as MUSL static libraries
- Most systems only provide glibc versions
- Solution: Build entire dependency chain with musl-gcc (complex, future enhancement)
### Current Behavior
The script attempts MUSL compilation but falls back to glibc:
1. Tries to compile with `musl-gcc -static` (fails due to missing MUSL libraries)
2. Logs the error to `/tmp/musl_build.log`
3. Displays a clear warning message
4. Automatically falls back to `gcc -static` with glibc
5. Produces a **fully static binary** with glibc statically linked (no runtime dependencies)
**Important**: The glibc static binary is still fully portable across most Linux distributions because glibc is statically included in the binary. It's not as universally portable as MUSL would be, but it works on virtually all modern Linux systems.
## Future Enhancements
1. **Full MUSL dependency chain**: Build all dependencies (libwebsockets, OpenSSL, etc.) with musl-gcc
2. **Multi-architecture support**: Add ARM64 MUSL builds
3. **Docker-based builds**: Use Alpine Linux containers for guaranteed MUSL environment
4. **Dependency vendoring**: Include pre-built MUSL libraries in the repository
## Troubleshooting
### Clear SQLite Cache
```bash
rm -rf ~/.cache/c-relay-sqlite
```
### Force Package Reinstall
```bash
sudo apt install --reinstall musl-dev musl-tools libssl-dev libcurl4-openssl-dev libsecp256k1-dev
```
### Check Build Logs
```bash
cat /tmp/musl_build.log
```
### Verify Binary Type
```bash
file build/c_relay_static_*
ldd build/c_relay_static_* 2>&1
```
## Performance Impact
- **First build**: ~2-3 minutes (includes SQLite compilation)
- **Subsequent builds**: ~30-60 seconds (uses cached SQLite)
- **Cache size**: ~10-15 MB per SQLite version
## Compatibility
The updated script is compatible with:
- Ubuntu 20.04+
- Debian 10+
- Other Debian-based distributions with `apt` package manager
For other distributions, adjust package installation commands accordingly.

View File

@@ -0,0 +1,209 @@
# Subscription Matching Debug Plan
## Problem
The relay is not matching kind 1059 (NIP-17 gift wrap) events to subscriptions, even though a subscription exists with `kinds:[1059]` filter. The log shows:
```
Event broadcast complete: 0 subscriptions matched
```
But we have this subscription:
```
sub:3 146.70.187.119 0x78edc9b43210 8m 27s kinds:[1059], since:10/23/2025, 4:27:59 PM, limit:50
```
## Investigation Strategy
### 1. Add Debug Output to `event_matches_filter()` (lines 386-564)
Add debug logging at each filter check to trace the matching logic:
- **Entry point**: Log the event kind and filter being tested
- **Kinds filter check** (lines 392-415): Log whether kinds filter exists, the event kind value, and each filter kind being compared
- **Authors filter check** (lines 417-442): Log if authors filter exists and matching results
- **IDs filter check** (lines 444-469): Log if IDs filter exists and matching results
- **Since filter check** (lines 471-482): Log the event timestamp vs filter since value
- **Until filter check** (lines 484-495): Log the event timestamp vs filter until value
- **Tag filters check** (lines 497-561): Log tag filter matching details
- **Exit point**: Log whether the overall filter matched
### 2. Add Debug Output to `event_matches_subscription()` (lines 567-581)
Add logging to show:
- How many filters are in the subscription
- Which filter (if any) matched
- Overall subscription match result
### 3. Add Debug Output to `broadcast_event_to_subscriptions()` (lines 584-726)
Add logging to show:
- The event being broadcast (kind, id, created_at)
- Total number of active subscriptions being checked
- How many subscriptions matched after the first pass
### 4. Key Areas to Focus On
Based on the code analysis, the most likely issues are:
1. **Kind matching logic** (lines 392-415): The event kind might not be extracted correctly, or the comparison might be failing
2. **Since timestamp** (lines 471-482): The subscription has a `since` filter - if the event timestamp is before this, it won't match
3. **Event structure**: The event JSON might not have the expected structure
### 5. Specific Debug Additions
#### In `event_matches_filter()` at line 386:
```c
// Add at start of function
cJSON* event_kind_obj = cJSON_GetObjectItem(event, "kind");
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
cJSON* event_created_at_obj = cJSON_GetObjectItem(event, "created_at");
DEBUG_TRACE("FILTER_MATCH: Testing event kind=%d id=%.8s created_at=%ld",
event_kind_obj ? (int)cJSON_GetNumberValue(event_kind_obj) : -1,
event_id_obj && cJSON_IsString(event_id_obj) ? cJSON_GetStringValue(event_id_obj) : "null",
event_created_at_obj ? (long)cJSON_GetNumberValue(event_created_at_obj) : 0);
```
#### In kinds filter check (after line 392):
```c
if (filter->kinds && cJSON_IsArray(filter->kinds)) {
DEBUG_TRACE("FILTER_MATCH: Checking kinds filter with %d kinds", cJSON_GetArraySize(filter->kinds));
cJSON* event_kind = cJSON_GetObjectItem(event, "kind");
if (!event_kind || !cJSON_IsNumber(event_kind)) {
DEBUG_WARN("FILTER_MATCH: Event has no valid kind field");
return 0;
}
int event_kind_val = (int)cJSON_GetNumberValue(event_kind);
DEBUG_TRACE("FILTER_MATCH: Event kind=%d", event_kind_val);
int kind_match = 0;
cJSON* kind_item = NULL;
cJSON_ArrayForEach(kind_item, filter->kinds) {
if (cJSON_IsNumber(kind_item)) {
int filter_kind = (int)cJSON_GetNumberValue(kind_item);
DEBUG_TRACE("FILTER_MATCH: Comparing event kind %d with filter kind %d", event_kind_val, filter_kind);
if (filter_kind == event_kind_val) {
kind_match = 1;
DEBUG_TRACE("FILTER_MATCH: Kind matched!");
break;
}
}
}
if (!kind_match) {
DEBUG_TRACE("FILTER_MATCH: No kind match, filter rejected");
return 0;
}
DEBUG_TRACE("FILTER_MATCH: Kinds filter passed");
}
```
#### In since filter check (after line 472):
```c
if (filter->since > 0) {
cJSON* event_created_at = cJSON_GetObjectItem(event, "created_at");
if (!event_created_at || !cJSON_IsNumber(event_created_at)) {
DEBUG_WARN("FILTER_MATCH: Event has no valid created_at field");
return 0;
}
long event_timestamp = (long)cJSON_GetNumberValue(event_created_at);
DEBUG_TRACE("FILTER_MATCH: Checking since filter: event_ts=%ld filter_since=%ld",
event_timestamp, filter->since);
if (event_timestamp < filter->since) {
DEBUG_TRACE("FILTER_MATCH: Event too old (before since), filter rejected");
return 0;
}
DEBUG_TRACE("FILTER_MATCH: Since filter passed");
}
```
#### At end of `event_matches_filter()` (before line 563):
```c
DEBUG_TRACE("FILTER_MATCH: All filters passed, event matches!");
return 1; // All filters passed
```
#### In `event_matches_subscription()` at line 567:
```c
int event_matches_subscription(cJSON* event, subscription_t* subscription) {
if (!event || !subscription || !subscription->filters) {
return 0;
}
DEBUG_TRACE("SUB_MATCH: Testing subscription '%s'", subscription->id);
int filter_num = 0;
subscription_filter_t* filter = subscription->filters;
while (filter) {
filter_num++;
DEBUG_TRACE("SUB_MATCH: Testing filter #%d", filter_num);
if (event_matches_filter(event, filter)) {
DEBUG_TRACE("SUB_MATCH: Filter #%d matched! Subscription '%s' matches",
filter_num, subscription->id);
return 1; // Match found (OR logic)
}
filter = filter->next;
}
DEBUG_TRACE("SUB_MATCH: No filters matched for subscription '%s'", subscription->id);
return 0; // No filters matched
}
```
#### In `broadcast_event_to_subscriptions()` at line 584:
```c
int broadcast_event_to_subscriptions(cJSON* event) {
if (!event) {
return 0;
}
// Log event details
cJSON* event_kind = cJSON_GetObjectItem(event, "kind");
cJSON* event_id = cJSON_GetObjectItem(event, "id");
cJSON* event_created_at = cJSON_GetObjectItem(event, "created_at");
DEBUG_TRACE("BROADCAST: Event kind=%d id=%.8s created_at=%ld",
event_kind ? (int)cJSON_GetNumberValue(event_kind) : -1,
event_id && cJSON_IsString(event_id) ? cJSON_GetStringValue(event_id) : "null",
event_created_at ? (long)cJSON_GetNumberValue(event_created_at) : 0);
// ... existing expiration check code ...
// After line 611 (before pthread_mutex_lock):
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
int total_subs = 0;
subscription_t* count_sub = g_subscription_manager.active_subscriptions;
while (count_sub) {
total_subs++;
count_sub = count_sub->next;
}
DEBUG_TRACE("BROADCAST: Checking %d active subscriptions", total_subs);
subscription_t* sub = g_subscription_manager.active_subscriptions;
// ... rest of matching logic ...
```
## Expected Outcome
With these debug additions, we should see output like:
```
BROADCAST: Event kind=1059 id=abc12345 created_at=1729712279
BROADCAST: Checking 1 active subscriptions
SUB_MATCH: Testing subscription 'sub:3'
SUB_MATCH: Testing filter #1
FILTER_MATCH: Testing event kind=1059 id=abc12345 created_at=1729712279
FILTER_MATCH: Checking kinds filter with 1 kinds
FILTER_MATCH: Event kind=1059
FILTER_MATCH: Comparing event kind 1059 with filter kind 1059
FILTER_MATCH: Kind matched!
FILTER_MATCH: Kinds filter passed
FILTER_MATCH: Checking since filter: event_ts=1729712279 filter_since=1729708079
FILTER_MATCH: Since filter passed
FILTER_MATCH: All filters passed, event matches!
SUB_MATCH: Filter #1 matched! Subscription 'sub:3' matches
Event broadcast complete: 1 subscriptions matched
```
This will help us identify exactly where the matching is failing.

View File

@@ -0,0 +1,358 @@
# Subscription Table Collapsible Groups Implementation Plan
## Objective
Add collapsible/expandable functionality to subscription groups, where:
1. Each WSI Pointer group starts collapsed, showing only a summary row
2. Clicking the summary row expands to show all subscription details for that WSI Pointer
3. Clicking again collapses the group back to the summary row
## Current Behavior
- All subscriptions are displayed in a flat list
- WSI Pointer value shown only on first row of each group
- No interaction or collapse functionality
## Desired Behavior
- **Collapsed state**: One row per WSI Pointer showing summary information
- **Expanded state**: Header row + detail rows for each subscription in that group
- **Toggle**: Click on header row to expand/collapse
- **Visual indicator**: Arrow or icon showing expand/collapse state
## Design Approach
### Option 1: Summary Row + Detail Rows (Recommended)
```
[▶] WSI Pointer: 0x12345678 | Subscriptions: 3 | Total Duration: 15m
(detail rows hidden)
When clicked:
[▼] WSI Pointer: 0x12345678 | Subscriptions: 3 | Total Duration: 15m
| sub-001 | 5m 30s | kinds:[1]
| sub-002 | 3m 15s | kinds:[3]
| sub-003 | 6m 15s | kinds:[1,3]
```
### Option 2: First Row as Header (Alternative)
```
[▶] 0x12345678 | sub-001 | 5m 30s | kinds:[1] (+ 2 more)
When clicked:
[▼] 0x12345678 | sub-001 | 5m 30s | kinds:[1]
| sub-002 | 3m 15s | kinds:[3]
| sub-003 | 6m 15s | kinds:[1,3]
```
**Recommendation**: Option 1 provides clearer UX and better summary information.
## Implementation Details
### Files to Modify
1. `api/index.js` - Function `populateSubscriptionDetailsTable()` (lines 4277-4384)
2. `api/index.css` - Add styles for collapsible rows
### Data Structure Changes
Need to group subscriptions by WSI Pointer first:
```javascript
// Group subscriptions by wsi_pointer
const groupedSubscriptions = {};
subscriptionsData.forEach(sub => {
const wsiKey = sub.wsi_pointer || 'N/A';
if (!groupedSubscriptions[wsiKey]) {
groupedSubscriptions[wsiKey] = [];
}
groupedSubscriptions[wsiKey].push(sub);
});
```
### HTML Structure
#### Summary Row (Header)
```html
<tr class="subscription-group-header" data-wsi-pointer="0x12345678" data-expanded="false">
<td colspan="4" style="cursor: pointer; user-select: none;">
<span class="expand-icon"></span>
<strong>WSI Pointer:</strong> 0x12345678
<span class="group-summary">
| Subscriptions: 3 | Oldest: 15m 30s
</span>
</td>
</tr>
```
#### Detail Rows (Initially Hidden)
```html
<tr class="subscription-detail-row" data-wsi-group="0x12345678" style="display: none;">
<td style="padding-left: 30px;"><!-- Empty for WSI Pointer --></td>
<td>sub-001</td>
<td>5m 30s</td>
<td>kinds:[1]</td>
</tr>
```
### JavaScript Implementation
#### Modified populateSubscriptionDetailsTable Function
```javascript
function populateSubscriptionDetailsTable(subscriptionsData) {
const tableBody = document.getElementById('subscription-details-table-body');
if (!tableBody || !subscriptionsData || !Array.isArray(subscriptionsData)) return;
tableBody.innerHTML = '';
if (subscriptionsData.length === 0) {
const row = document.createElement('tr');
row.innerHTML = '<td colspan="4" style="text-align: center; font-style: italic;">No active subscriptions</td>';
tableBody.appendChild(row);
return;
}
// Sort subscriptions by wsi_pointer to group them together
subscriptionsData.sort((a, b) => {
const wsiA = a.wsi_pointer || '';
const wsiB = b.wsi_pointer || '';
return wsiA.localeCompare(wsiB);
});
// Group subscriptions by wsi_pointer
const groupedSubscriptions = {};
subscriptionsData.forEach(sub => {
const wsiKey = sub.wsi_pointer || 'N/A';
if (!groupedSubscriptions[wsiKey]) {
groupedSubscriptions[wsiKey] = [];
}
groupedSubscriptions[wsiKey].push(sub);
});
// Create rows for each group
Object.entries(groupedSubscriptions).forEach(([wsiPointer, subscriptions]) => {
// Calculate group summary
const subCount = subscriptions.length;
const now = Math.floor(Date.now() / 1000);
const oldestDuration = Math.max(...subscriptions.map(s => now - s.created_at));
const oldestDurationStr = formatDuration(oldestDuration);
// Create header row (summary)
const headerRow = document.createElement('tr');
headerRow.className = 'subscription-group-header';
headerRow.setAttribute('data-wsi-pointer', wsiPointer);
headerRow.setAttribute('data-expanded', 'false');
headerRow.style.cursor = 'pointer';
headerRow.style.userSelect = 'none';
headerRow.style.backgroundColor = 'var(--hover-color, #f5f5f5)';
headerRow.innerHTML = `
<td colspan="4" style="padding: 8px;">
<span class="expand-icon" style="display: inline-block; width: 20px; transition: transform 0.2s;">▶</span>
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">WSI: ${wsiPointer}</strong>
<span style="color: #666; margin-left: 15px;">
Subscriptions: ${subCount} | Oldest: ${oldestDurationStr}
</span>
</td>
`;
// Add click handler to toggle expansion
headerRow.addEventListener('click', () => toggleSubscriptionGroup(wsiPointer));
tableBody.appendChild(headerRow);
// Create detail rows (initially hidden)
subscriptions.forEach((subscription, index) => {
const detailRow = document.createElement('tr');
detailRow.className = 'subscription-detail-row';
detailRow.setAttribute('data-wsi-group', wsiPointer);
detailRow.style.display = 'none';
// Calculate duration
const duration = now - subscription.created_at;
const durationStr = formatDuration(duration);
// Format filters
let filtersDisplay = 'None';
if (subscription.filters && subscription.filters.length > 0) {
const filterDetails = [];
subscription.filters.forEach((filter) => {
const parts = [];
if (filter.kinds && Array.isArray(filter.kinds) && filter.kinds.length > 0) {
parts.push(`kinds:[${filter.kinds.join(',')}]`);
}
if (filter.authors && Array.isArray(filter.authors) && filter.authors.length > 0) {
const authorCount = filter.authors.length;
if (authorCount === 1) {
const shortPubkey = filter.authors[0].substring(0, 8) + '...';
parts.push(`authors:[${shortPubkey}]`);
} else {
parts.push(`authors:[${authorCount} pubkeys]`);
}
}
if (filter.ids && Array.isArray(filter.ids) && filter.ids.length > 0) {
const idCount = filter.ids.length;
parts.push(`ids:[${idCount} event${idCount > 1 ? 's' : ''}]`);
}
const timeParts = [];
if (filter.since && filter.since > 0) {
const sinceDate = new Date(filter.since * 1000).toLocaleString();
timeParts.push(`since:${sinceDate}`);
}
if (filter.until && filter.until > 0) {
const untilDate = new Date(filter.until * 1000).toLocaleString();
timeParts.push(`until:${untilDate}`);
}
if (timeParts.length > 0) {
parts.push(timeParts.join(', '));
}
if (filter.limit && filter.limit > 0) {
parts.push(`limit:${filter.limit}`);
}
if (filter.tag_filters && Array.isArray(filter.tag_filters) && filter.tag_filters.length > 0) {
parts.push(`tags:[${filter.tag_filters.length} filter${filter.tag_filters.length > 1 ? 's' : ''}]`);
}
if (parts.length > 0) {
filterDetails.push(parts.join(', '));
} else {
filterDetails.push('empty filter');
}
});
filtersDisplay = filterDetails.join(' | ');
}
detailRow.innerHTML = `
<td style="padding-left: 30px; font-family: 'Courier New', monospace; font-size: 11px; color: #999;">└─</td>
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${subscription.id || 'N/A'}</td>
<td>${durationStr}</td>
<td>${filtersDisplay}</td>
`;
tableBody.appendChild(detailRow);
});
});
}
// Toggle function for expanding/collapsing groups
function toggleSubscriptionGroup(wsiPointer) {
const headerRow = document.querySelector(`.subscription-group-header[data-wsi-pointer="${wsiPointer}"]`);
const detailRows = document.querySelectorAll(`.subscription-detail-row[data-wsi-group="${wsiPointer}"]`);
const expandIcon = headerRow.querySelector('.expand-icon');
const isExpanded = headerRow.getAttribute('data-expanded') === 'true';
if (isExpanded) {
// Collapse
detailRows.forEach(row => row.style.display = 'none');
expandIcon.textContent = '▶';
expandIcon.style.transform = 'rotate(0deg)';
headerRow.setAttribute('data-expanded', 'false');
} else {
// Expand
detailRows.forEach(row => row.style.display = 'table-row');
expandIcon.textContent = '▼';
expandIcon.style.transform = 'rotate(90deg)';
headerRow.setAttribute('data-expanded', 'true');
}
}
```
### CSS Additions (api/index.css)
```css
/* Subscription group header styles */
.subscription-group-header {
background-color: var(--hover-color, #f5f5f5);
font-weight: 500;
transition: background-color 0.2s;
}
.subscription-group-header:hover {
background-color: var(--primary-color-light, #e8e8e8);
}
.expand-icon {
display: inline-block;
width: 20px;
transition: transform 0.2s ease;
font-size: 12px;
}
/* Detail row styles */
.subscription-detail-row {
background-color: var(--background-color, #ffffff);
}
.subscription-detail-row:hover {
background-color: var(--hover-color-light, #fafafa);
}
/* Dark mode support */
.dark-mode .subscription-group-header {
background-color: var(--hover-color-dark, #2a2a2a);
}
.dark-mode .subscription-group-header:hover {
background-color: var(--primary-color-dark, #333333);
}
.dark-mode .subscription-detail-row {
background-color: var(--background-color-dark, #1a1a1a);
}
.dark-mode .subscription-detail-row:hover {
background-color: var(--hover-color-dark, #252525);
}
```
## Features Included
1.**Collapsible groups**: Each WSI Pointer group can be collapsed/expanded
2.**Visual indicator**: Arrow icon (▶/▼) shows current state
3.**Summary information**: Shows subscription count and oldest duration
4.**Smooth transitions**: Icon rotation animation
5.**Hover effects**: Visual feedback on header rows
6.**Tree structure**: Detail rows indented with └─ character
7.**Dark mode support**: Proper styling for both themes
8.**Keyboard accessible**: Can be enhanced with keyboard navigation
## User Experience Flow
1. **Initial load**: All groups collapsed, showing summary rows only
2. **Click header**: Group expands, showing all subscription details
3. **Click again**: Group collapses back to summary
4. **Multiple groups**: Each group can be independently expanded/collapsed
5. **Visual feedback**: Hover effects and smooth animations
## Testing Checklist
1. ✅ Verify groups start collapsed by default
2. ✅ Verify clicking header expands group
3. ✅ Verify clicking again collapses group
4. ✅ Verify multiple groups can be expanded simultaneously
5. ✅ Verify summary information is accurate
6. ✅ Verify detail rows display correctly when expanded
7. ✅ Verify styling works in both light and dark modes
8. ✅ Verify no console errors
9. ✅ Verify performance with many subscriptions
## Optional Enhancements
1. **Expand/Collapse All**: Add buttons to expand or collapse all groups at once
2. **Remember State**: Store expansion state in localStorage
3. **Keyboard Navigation**: Add keyboard shortcuts (Space/Enter to toggle)
4. **Animation**: Add slide-down/up animation for detail rows
5. **Search/Filter**: Add ability to search within subscriptions
6. **Export**: Add ability to export subscription data
## Next Steps
1. Review this plan
2. Switch to Code mode
3. Implement the changes in `api/index.js`
4. Add CSS styles in `api/index.css`
5. Test the collapsible functionality
6. Verify all edge cases work correctly

View File

@@ -0,0 +1,155 @@
# Subscription Table WSI Pointer Grouping Implementation Plan
## Objective
Modify the subscription details table to show the WSI Pointer value only once per group - on the first row of each WSI Pointer group, leaving it blank for subsequent rows with the same WSI Pointer.
## Current Behavior
- All rows show the WSI Pointer value
- Rows are sorted by WSI Pointer (grouping is working)
- Visual grouping is not clear
## Desired Behavior
- First row of each WSI Pointer group shows the full WSI Pointer value
- Subsequent rows in the same group have an empty cell for WSI Pointer
- This creates a clear visual grouping effect
## Implementation Details
### File to Modify
`api/index.js` - Function `populateSubscriptionDetailsTable()` (lines 4277-4384)
### Code Changes Required
#### Current Code (lines 4291-4383):
```javascript
// Sort subscriptions by wsi_pointer to group them together
subscriptionsData.sort((a, b) => {
const wsiA = a.wsi_pointer || '';
const wsiB = b.wsi_pointer || '';
return wsiA.localeCompare(wsiB);
});
subscriptionsData.forEach((subscription, index) => {
const row = document.createElement('tr');
// Calculate duration
const now = Math.floor(Date.now() / 1000);
const duration = now - subscription.created_at;
const durationStr = formatDuration(duration);
// Format client IP (show full IP for admin view)
const clientIP = subscription.client_ip || 'unknown';
// Format wsi_pointer (show full pointer)
const wsiPointer = subscription.wsi_pointer || 'N/A';
// Format filters (show actual filter details)
let filtersDisplay = 'None';
// ... filter formatting code ...
row.innerHTML = `
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${wsiPointer}</td>
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${subscription.id || 'N/A'}</td>
<!-- <td style="font-family: 'Courier New', monospace; font-size: 12px;">${clientIP}</td> -->
<td>${durationStr}</td>
<td>${filtersDisplay}</td>
`;
tableBody.appendChild(row);
});
```
#### Modified Code:
```javascript
// Sort subscriptions by wsi_pointer to group them together
subscriptionsData.sort((a, b) => {
const wsiA = a.wsi_pointer || '';
const wsiB = b.wsi_pointer || '';
return wsiA.localeCompare(wsiB);
});
// Track previous WSI Pointer to detect group changes
let previousWsiPointer = null;
subscriptionsData.forEach((subscription, index) => {
const row = document.createElement('tr');
// Calculate duration
const now = Math.floor(Date.now() / 1000);
const duration = now - subscription.created_at;
const durationStr = formatDuration(duration);
// Format client IP (show full IP for admin view)
const clientIP = subscription.client_ip || 'unknown';
// Format wsi_pointer - only show if it's different from previous row
const currentWsiPointer = subscription.wsi_pointer || 'N/A';
let wsiPointerDisplay = '';
if (currentWsiPointer !== previousWsiPointer) {
// This is the first row of a new group - show the WSI Pointer
wsiPointerDisplay = currentWsiPointer;
previousWsiPointer = currentWsiPointer;
} else {
// This is a continuation of the same group - leave blank
wsiPointerDisplay = '';
}
// Format filters (show actual filter details)
let filtersDisplay = 'None';
// ... filter formatting code remains the same ...
row.innerHTML = `
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${wsiPointerDisplay}</td>
<td style="font-family: 'Courier New', monospace; font-size: 12px;">${subscription.id || 'N/A'}</td>
<!-- <td style="font-family: 'Courier New', monospace; font-size: 12px;">${clientIP}</td> -->
<td>${durationStr}</td>
<td>${filtersDisplay}</td>
`;
tableBody.appendChild(row);
});
```
### Key Changes Explained
1. **Add tracking variable**: `let previousWsiPointer = null;` before the forEach loop
2. **Store current WSI Pointer**: `const currentWsiPointer = subscription.wsi_pointer || 'N/A';`
3. **Compare with previous**: Check if `currentWsiPointer !== previousWsiPointer`
4. **Conditional display**:
- If different: Show the WSI Pointer value and update `previousWsiPointer`
- If same: Show empty string (blank cell)
5. **Use display variable**: Replace `${wsiPointer}` with `${wsiPointerDisplay}` in the row HTML
### Visual Result
**Before:**
```
WSI Pointer | Subscription ID | Duration | Filters
0x12345678 | sub-001 | 5m 30s | kinds:[1]
0x12345678 | sub-002 | 3m 15s | kinds:[3]
0x87654321 | sub-003 | 1m 45s | kinds:[1,3]
```
**After:**
```
WSI Pointer | Subscription ID | Duration | Filters
0x12345678 | sub-001 | 5m 30s | kinds:[1]
| sub-002 | 3m 15s | kinds:[3]
0x87654321 | sub-003 | 1m 45s | kinds:[1,3]
```
## Testing Checklist
1. ✅ Verify first row of each group shows WSI Pointer
2. ✅ Verify subsequent rows in same group are blank
3. ✅ Verify grouping works with multiple subscriptions per WSI Pointer
4. ✅ Verify single subscription per WSI Pointer still shows the value
5. ✅ Verify empty/null WSI Pointers are handled correctly
6. ✅ Verify table still displays correctly when no subscriptions exist
## Next Steps
1. Review this plan
2. Switch to Code mode
3. Implement the changes in `api/index.js`
4. Test the implementation
5. Verify the visual grouping effect

View File

@@ -0,0 +1,427 @@
# Unified Startup Sequence Design
## Overview
This document describes the new unified startup sequence where all config values are created first, then CLI overrides are applied as a separate atomic operation. This eliminates the current 3-step incremental building process.
## Current Problems
1. **Incremental Config Building**: Config is built in 3 steps:
- Step 1: `populate_default_config_values()` - adds defaults
- Step 2: CLI overrides applied via `update_config_in_table()`
- Step 3: `add_pubkeys_to_config_table()` - adds generated keys
2. **Race Conditions**: Cache can be refreshed between steps, causing incomplete config reads
3. **Complexity**: Multiple code paths for first-time vs restart scenarios
## New Design Principles
1. **Atomic Config Creation**: All config values created in single transaction
2. **Separate Override Phase**: CLI overrides applied after complete config exists
3. **Unified Code Path**: Same logic for first-time and restart scenarios
4. **Cache Safety**: Cache only loaded after config is complete
---
## Scenario 1: First-Time Startup (No Database)
### Sequence
```
1. Key Generation Phase
├─ generate_random_private_key_bytes() → admin_privkey_bytes
├─ nostr_bytes_to_hex() → admin_privkey (hex)
├─ nostr_ec_public_key_from_private_key() → admin_pubkey_bytes
├─ nostr_bytes_to_hex() → admin_pubkey (hex)
├─ generate_random_private_key_bytes() → relay_privkey_bytes
├─ nostr_bytes_to_hex() → relay_privkey (hex)
├─ nostr_ec_public_key_from_private_key() → relay_pubkey_bytes
└─ nostr_bytes_to_hex() → relay_pubkey (hex)
2. Database Creation Phase
├─ create_database_with_relay_pubkey(relay_pubkey)
│ └─ Sets g_database_path = "<relay_pubkey>.db"
└─ init_database(g_database_path)
└─ Creates database with embedded schema (includes config table)
3. Complete Config Population Phase (ATOMIC)
├─ BEGIN TRANSACTION
├─ populate_all_config_values_atomic()
│ ├─ Insert ALL default config values from DEFAULT_CONFIG_VALUES[]
│ ├─ Insert admin_pubkey
│ └─ Insert relay_pubkey
└─ COMMIT TRANSACTION
4. CLI Override Phase (ATOMIC)
├─ BEGIN TRANSACTION
├─ apply_cli_overrides()
│ ├─ IF cli_options.port_override > 0:
│ │ └─ UPDATE config SET value = ? WHERE key = 'relay_port'
│ ├─ IF cli_options.admin_pubkey_override[0]:
│ │ └─ UPDATE config SET value = ? WHERE key = 'admin_pubkey'
│ └─ IF cli_options.relay_privkey_override[0]:
│ └─ UPDATE config SET value = ? WHERE key = 'relay_privkey'
└─ COMMIT TRANSACTION
5. Secure Key Storage Phase
└─ store_relay_private_key(relay_privkey)
└─ INSERT INTO relay_seckey (private_key_hex) VALUES (?)
6. Cache Initialization Phase
└─ refresh_unified_cache_from_table()
└─ Loads complete config into g_unified_cache
```
### Function Call Sequence
```c
// In main.c - first_time_startup branch
if (is_first_time_startup()) {
// 1. Key Generation
first_time_startup_sequence(&cli_options);
// → Generates keys, stores in g_unified_cache
// → Sets g_database_path
// → Does NOT populate config yet
// 2. Database Creation
init_database(g_database_path);
// → Creates database with schema
// 3. Complete Config Population (NEW FUNCTION)
populate_all_config_values_atomic(&cli_options);
// → Inserts ALL defaults + pubkeys in single transaction
// → Does NOT apply CLI overrides yet
// 4. CLI Override Phase (NEW FUNCTION)
apply_cli_overrides_atomic(&cli_options);
// → Updates config table with CLI overrides
// → Separate transaction after complete config exists
// 5. Secure Key Storage
store_relay_private_key(relay_privkey);
// 6. Cache Initialization
refresh_unified_cache_from_table();
}
```
### New Functions Needed
```c
// In config.c
int populate_all_config_values_atomic(const cli_options_t* cli_options) {
// BEGIN TRANSACTION
// Insert ALL defaults from DEFAULT_CONFIG_VALUES[]
// Insert admin_pubkey from g_unified_cache
// Insert relay_pubkey from g_unified_cache
// COMMIT TRANSACTION
return 0;
}
int apply_cli_overrides_atomic(const cli_options_t* cli_options) {
// BEGIN TRANSACTION
// IF port_override: UPDATE config SET value = ? WHERE key = 'relay_port'
// IF admin_pubkey_override: UPDATE config SET value = ? WHERE key = 'admin_pubkey'
// IF relay_privkey_override: UPDATE config SET value = ? WHERE key = 'relay_privkey'
// COMMIT TRANSACTION
// invalidate_config_cache()
return 0;
}
```
---
## Scenario 2: Restart with Existing Database + CLI Options
### Sequence
```
1. Database Discovery Phase
├─ find_existing_db_files() → ["<relay_pubkey>.db"]
├─ extract_pubkey_from_filename() → relay_pubkey
└─ Sets g_database_path = "<relay_pubkey>.db"
2. Database Initialization Phase
└─ init_database(g_database_path)
└─ Opens existing database
3. Config Validation Phase
└─ validate_config_table_completeness()
├─ Check if all required keys exist
└─ IF missing keys: populate_missing_config_values()
4. CLI Override Phase (ATOMIC)
├─ BEGIN TRANSACTION
├─ apply_cli_overrides()
│ └─ UPDATE config SET value = ? WHERE key = ?
└─ COMMIT TRANSACTION
5. Cache Initialization Phase
└─ refresh_unified_cache_from_table()
└─ Loads complete config into g_unified_cache
```
### Function Call Sequence
```c
// In main.c - existing relay branch
else {
// 1. Database Discovery
char** existing_files = find_existing_db_files();
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
startup_existing_relay(relay_pubkey);
// → Sets g_database_path
// 2. Database Initialization
init_database(g_database_path);
// 3. Config Validation (NEW FUNCTION)
validate_config_table_completeness();
// → Checks for missing keys
// → Populates any missing defaults
// 4. CLI Override Phase (REUSE FUNCTION)
if (has_cli_overrides(&cli_options)) {
apply_cli_overrides_atomic(&cli_options);
}
// 5. Cache Initialization
refresh_unified_cache_from_table();
}
```
### New Functions Needed
```c
// In config.c
int validate_config_table_completeness(void) {
// Check if all DEFAULT_CONFIG_VALUES keys exist
// IF missing: populate_missing_config_values()
return 0;
}
int populate_missing_config_values(void) {
// BEGIN TRANSACTION
// For each key in DEFAULT_CONFIG_VALUES:
// IF NOT EXISTS: INSERT INTO config
// COMMIT TRANSACTION
return 0;
}
int has_cli_overrides(const cli_options_t* cli_options) {
return (cli_options->port_override > 0 ||
cli_options->admin_pubkey_override[0] != '\0' ||
cli_options->relay_privkey_override[0] != '\0');
}
```
---
## Scenario 3: Restart with Existing Database + No CLI Options
### Sequence
```
1. Database Discovery Phase
├─ find_existing_db_files() → ["<relay_pubkey>.db"]
├─ extract_pubkey_from_filename() → relay_pubkey
└─ Sets g_database_path = "<relay_pubkey>.db"
2. Database Initialization Phase
└─ init_database(g_database_path)
└─ Opens existing database
3. Config Validation Phase
└─ validate_config_table_completeness()
├─ Check if all required keys exist
└─ IF missing keys: populate_missing_config_values()
4. Cache Initialization Phase (IMMEDIATE)
└─ refresh_unified_cache_from_table()
└─ Loads complete config into g_unified_cache
```
### Function Call Sequence
```c
// In main.c - existing relay branch (no CLI overrides)
else {
// 1. Database Discovery
char** existing_files = find_existing_db_files();
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
startup_existing_relay(relay_pubkey);
// 2. Database Initialization
init_database(g_database_path);
// 3. Config Validation
validate_config_table_completeness();
// 4. Cache Initialization (IMMEDIATE - no overrides to apply)
refresh_unified_cache_from_table();
}
```
---
## Key Improvements
### 1. Atomic Config Creation
**Before:**
```c
populate_default_config_values(); // Step 1
update_config_in_table("relay_port", port_str); // Step 2
add_pubkeys_to_config_table(); // Step 3
```
**After:**
```c
populate_all_config_values_atomic(&cli_options); // Single transaction
apply_cli_overrides_atomic(&cli_options); // Separate transaction
```
### 2. Elimination of Race Conditions
**Before:**
- Cache could refresh between steps 1-3
- Incomplete config could be read
**After:**
- Config created atomically
- Cache only refreshed after complete config exists
### 3. Unified Code Path
**Before:**
- Different logic for first-time vs restart
- `populate_default_config_values()` vs `add_pubkeys_to_config_table()`
**After:**
- Same validation logic for both scenarios
- `validate_config_table_completeness()` handles both cases
### 4. Clear Separation of Concerns
**Before:**
- CLI overrides mixed with default population
- Unclear when overrides are applied
**After:**
- Phase 1: Complete config creation
- Phase 2: CLI overrides (if any)
- Phase 3: Cache initialization
---
## Implementation Changes Required
### 1. New Functions in config.c
```c
// Atomic config population for first-time startup
int populate_all_config_values_atomic(const cli_options_t* cli_options);
// Atomic CLI override application
int apply_cli_overrides_atomic(const cli_options_t* cli_options);
// Config validation for existing databases
int validate_config_table_completeness(void);
int populate_missing_config_values(void);
// Helper function
int has_cli_overrides(const cli_options_t* cli_options);
```
### 2. Modified Functions in config.c
```c
// Simplify to only generate keys and set database path
int first_time_startup_sequence(const cli_options_t* cli_options);
// Remove config population logic
int add_pubkeys_to_config_table(void); // DEPRECATED - logic moved to populate_all_config_values_atomic()
```
### 3. Modified Startup Flow in main.c
```c
// First-time startup
if (is_first_time_startup()) {
first_time_startup_sequence(&cli_options);
init_database(g_database_path);
populate_all_config_values_atomic(&cli_options); // NEW
apply_cli_overrides_atomic(&cli_options); // NEW
store_relay_private_key(relay_privkey);
refresh_unified_cache_from_table();
}
// Existing relay
else {
startup_existing_relay(relay_pubkey);
init_database(g_database_path);
validate_config_table_completeness(); // NEW
if (has_cli_overrides(&cli_options)) {
apply_cli_overrides_atomic(&cli_options); // NEW
}
refresh_unified_cache_from_table();
}
```
---
## Benefits
1. **Atomicity**: Config creation is atomic - no partial states
2. **Simplicity**: Clear phases with single responsibility
3. **Safety**: Cache only loaded after complete config exists
4. **Consistency**: Same validation logic for all scenarios
5. **Maintainability**: Easier to understand and modify
6. **Testability**: Each phase can be tested independently
---
## Migration Path
1. Implement new functions in config.c
2. Update main.c startup flow
3. Test first-time startup scenario
4. Test restart with CLI overrides
5. Test restart without CLI overrides
6. Remove deprecated functions
7. Update documentation
---
## Testing Strategy
### Test Cases
1. **First-time startup with defaults**
- Verify all config values created atomically
- Verify cache loads complete config
2. **First-time startup with port override**
- Verify defaults created first
- Verify port override applied second
- Verify cache reflects override
3. **Restart with complete config**
- Verify no config changes
- Verify cache loads immediately
4. **Restart with missing config keys**
- Verify missing keys populated
- Verify existing keys unchanged
5. **Restart with CLI overrides**
- Verify overrides applied atomically
- Verify cache invalidated and refreshed
### Validation Points
- Config table row count after each phase
- Cache validity state after each phase
- Transaction boundaries (BEGIN/COMMIT)
- Error handling for failed transactions

View File

@@ -0,0 +1,746 @@
# Unified Startup Implementation Plan
## Overview
This document provides a detailed implementation plan for refactoring the startup sequence to use atomic config creation followed by CLI overrides. This plan breaks down the work into discrete, testable steps.
---
## Phase 1: Create New Functions in config.c
### Step 1.1: Implement `populate_all_config_values_atomic()`
**Location**: `src/config.c`
**Purpose**: Create complete config table in single transaction for first-time startup
**Function Signature**:
```c
int populate_all_config_values_atomic(const cli_options_t* cli_options);
```
**Implementation Details**:
```c
int populate_all_config_values_atomic(const cli_options_t* cli_options) {
if (!g_database) {
DEBUG_ERROR("Database not initialized");
return -1;
}
// Begin transaction
char* err_msg = NULL;
int rc = sqlite3_exec(g_database, "BEGIN TRANSACTION;", NULL, NULL, &err_msg);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to begin transaction: %s", err_msg);
sqlite3_free(err_msg);
return -1;
}
// Prepare INSERT statement
sqlite3_stmt* stmt = NULL;
const char* sql = "INSERT INTO config (key, value) VALUES (?, ?)";
rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
// Insert all default config values
for (size_t i = 0; i < sizeof(DEFAULT_CONFIG_VALUES) / sizeof(DEFAULT_CONFIG_VALUES[0]); i++) {
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, DEFAULT_CONFIG_VALUES[i].key, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, DEFAULT_CONFIG_VALUES[i].value, -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to insert config key '%s': %s",
DEFAULT_CONFIG_VALUES[i].key, sqlite3_errmsg(g_database));
sqlite3_finalize(stmt);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
}
// Insert admin_pubkey from cache
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, "admin_pubkey", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, g_unified_cache.admin_pubkey, -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to insert admin_pubkey: %s", sqlite3_errmsg(g_database));
sqlite3_finalize(stmt);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
// Insert relay_pubkey from cache
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, "relay_pubkey", -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, g_unified_cache.relay_pubkey, -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to insert relay_pubkey: %s", sqlite3_errmsg(g_database));
sqlite3_finalize(stmt);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
sqlite3_finalize(stmt);
// Commit transaction
rc = sqlite3_exec(g_database, "COMMIT;", NULL, NULL, &err_msg);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to commit transaction: %s", err_msg);
sqlite3_free(err_msg);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
DEBUG_INFO("Successfully populated all config values atomically");
return 0;
}
```
**Testing**:
- Verify transaction atomicity (all or nothing)
- Verify all DEFAULT_CONFIG_VALUES inserted
- Verify admin_pubkey and relay_pubkey inserted
- Verify error handling on failure
---
### Step 1.2: Implement `apply_cli_overrides_atomic()`
**Location**: `src/config.c`
**Purpose**: Apply CLI overrides to existing config table in single transaction
**Function Signature**:
```c
int apply_cli_overrides_atomic(const cli_options_t* cli_options);
```
**Implementation Details**:
```c
int apply_cli_overrides_atomic(const cli_options_t* cli_options) {
if (!g_database) {
DEBUG_ERROR("Database not initialized");
return -1;
}
if (!cli_options) {
DEBUG_ERROR("CLI options is NULL");
return -1;
}
// Check if any overrides exist
bool has_overrides = false;
if (cli_options->port_override > 0) has_overrides = true;
if (cli_options->admin_pubkey_override[0] != '\0') has_overrides = true;
if (cli_options->relay_privkey_override[0] != '\0') has_overrides = true;
if (!has_overrides) {
DEBUG_INFO("No CLI overrides to apply");
return 0;
}
// Begin transaction
char* err_msg = NULL;
int rc = sqlite3_exec(g_database, "BEGIN TRANSACTION;", NULL, NULL, &err_msg);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to begin transaction: %s", err_msg);
sqlite3_free(err_msg);
return -1;
}
// Prepare UPDATE statement
sqlite3_stmt* stmt = NULL;
const char* sql = "UPDATE config SET value = ? WHERE key = ?";
rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
// Apply port override
if (cli_options->port_override > 0) {
char port_str[16];
snprintf(port_str, sizeof(port_str), "%d", cli_options->port_override);
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, port_str, -1, SQLITE_TRANSIENT);
sqlite3_bind_text(stmt, 2, "relay_port", -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to update relay_port: %s", sqlite3_errmsg(g_database));
sqlite3_finalize(stmt);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
DEBUG_INFO("Applied CLI override: relay_port = %s", port_str);
}
// Apply admin_pubkey override
if (cli_options->admin_pubkey_override[0] != '\0') {
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, cli_options->admin_pubkey_override, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, "admin_pubkey", -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to update admin_pubkey: %s", sqlite3_errmsg(g_database));
sqlite3_finalize(stmt);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
DEBUG_INFO("Applied CLI override: admin_pubkey");
}
// Apply relay_privkey override
if (cli_options->relay_privkey_override[0] != '\0') {
sqlite3_reset(stmt);
sqlite3_bind_text(stmt, 1, cli_options->relay_privkey_override, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, "relay_privkey", -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to update relay_privkey: %s", sqlite3_errmsg(g_database));
sqlite3_finalize(stmt);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
DEBUG_INFO("Applied CLI override: relay_privkey");
}
sqlite3_finalize(stmt);
// Commit transaction
rc = sqlite3_exec(g_database, "COMMIT;", NULL, NULL, &err_msg);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to commit transaction: %s", err_msg);
sqlite3_free(err_msg);
sqlite3_exec(g_database, "ROLLBACK;", NULL, NULL, NULL);
return -1;
}
// Invalidate cache to force refresh
invalidate_config_cache();
DEBUG_INFO("Successfully applied CLI overrides atomically");
return 0;
}
```
**Testing**:
- Verify transaction atomicity
- Verify each override type (port, admin_pubkey, relay_privkey)
- Verify cache invalidation after overrides
- Verify no-op when no overrides present
---
### Step 1.3: Implement `validate_config_table_completeness()`
**Location**: `src/config.c`
**Purpose**: Validate config table has all required keys, populate missing ones
**Function Signature**:
```c
int validate_config_table_completeness(void);
```
**Implementation Details**:
```c
int validate_config_table_completeness(void) {
if (!g_database) {
DEBUG_ERROR("Database not initialized");
return -1;
}
DEBUG_INFO("Validating config table completeness");
// Check each default config key
for (size_t i = 0; i < sizeof(DEFAULT_CONFIG_VALUES) / sizeof(DEFAULT_CONFIG_VALUES[0]); i++) {
const char* key = DEFAULT_CONFIG_VALUES[i].key;
// Check if key exists
sqlite3_stmt* stmt = NULL;
const char* sql = "SELECT COUNT(*) FROM config WHERE key = ?";
int rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
return -1;
}
sqlite3_bind_text(stmt, 1, key, -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
int count = 0;
if (rc == SQLITE_ROW) {
count = sqlite3_column_int(stmt, 0);
}
sqlite3_finalize(stmt);
// If key missing, populate it
if (count == 0) {
DEBUG_WARN("Config key '%s' missing, populating with default", key);
rc = populate_missing_config_key(key, DEFAULT_CONFIG_VALUES[i].value);
if (rc != 0) {
DEBUG_ERROR("Failed to populate missing key '%s'", key);
return -1;
}
}
}
DEBUG_INFO("Config table validation complete");
return 0;
}
```
**Helper Function**:
```c
static int populate_missing_config_key(const char* key, const char* value) {
sqlite3_stmt* stmt = NULL;
const char* sql = "INSERT INTO config (key, value) VALUES (?, ?)";
int rc = sqlite3_prepare_v2(g_database, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare statement: %s", sqlite3_errmsg(g_database));
return -1;
}
sqlite3_bind_text(stmt, 1, key, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, value, -1, SQLITE_STATIC);
rc = sqlite3_step(stmt);
sqlite3_finalize(stmt);
if (rc != SQLITE_DONE) {
DEBUG_ERROR("Failed to insert config key '%s': %s", key, sqlite3_errmsg(g_database));
return -1;
}
return 0;
}
```
**Testing**:
- Verify detection of missing keys
- Verify population of missing keys with defaults
- Verify no changes when all keys present
- Verify error handling
---
### Step 1.4: Implement `has_cli_overrides()`
**Location**: `src/config.c`
**Purpose**: Check if any CLI overrides are present
**Function Signature**:
```c
bool has_cli_overrides(const cli_options_t* cli_options);
```
**Implementation Details**:
```c
bool has_cli_overrides(const cli_options_t* cli_options) {
if (!cli_options) {
return false;
}
return (cli_options->port_override > 0 ||
cli_options->admin_pubkey_override[0] != '\0' ||
cli_options->relay_privkey_override[0] != '\0');
}
```
**Testing**:
- Verify returns true when any override present
- Verify returns false when no overrides
- Verify NULL safety
---
## Phase 2: Update Function Declarations in config.h
### Step 2.1: Add New Function Declarations
**Location**: `src/config.h`
**Changes**:
```c
// Add after existing function declarations
// Atomic config population for first-time startup
int populate_all_config_values_atomic(const cli_options_t* cli_options);
// Atomic CLI override application
int apply_cli_overrides_atomic(const cli_options_t* cli_options);
// Config validation for existing databases
int validate_config_table_completeness(void);
// Helper function to check for CLI overrides
bool has_cli_overrides(const cli_options_t* cli_options);
```
---
## Phase 3: Refactor Startup Flow in main.c
### Step 3.1: Update First-Time Startup Branch
**Location**: `src/main.c` (around lines 1624-1740)
**Current Code**:
```c
if (is_first_time_startup()) {
first_time_startup_sequence(&cli_options);
init_database(g_database_path);
// Current incremental approach
populate_default_config_values();
if (cli_options.port_override > 0) {
char port_str[16];
snprintf(port_str, sizeof(port_str), "%d", cli_options.port_override);
update_config_in_table("relay_port", port_str);
}
add_pubkeys_to_config_table();
store_relay_private_key(relay_privkey);
refresh_unified_cache_from_table();
}
```
**New Code**:
```c
if (is_first_time_startup()) {
// 1. Generate keys and set database path
first_time_startup_sequence(&cli_options);
// 2. Create database with schema
init_database(g_database_path);
// 3. Populate ALL config values atomically (defaults + pubkeys)
if (populate_all_config_values_atomic(&cli_options) != 0) {
DEBUG_ERROR("Failed to populate config values");
return EXIT_FAILURE;
}
// 4. Apply CLI overrides atomically (separate transaction)
if (apply_cli_overrides_atomic(&cli_options) != 0) {
DEBUG_ERROR("Failed to apply CLI overrides");
return EXIT_FAILURE;
}
// 5. Store relay private key securely
store_relay_private_key(relay_privkey);
// 6. Load complete config into cache
refresh_unified_cache_from_table();
}
```
**Testing**:
- Verify first-time startup creates complete config
- Verify CLI overrides applied correctly
- Verify cache loads complete config
- Verify error handling at each step
---
### Step 3.2: Update Existing Relay Startup Branch
**Location**: `src/main.c` (around lines 1741-1928)
**Current Code**:
```c
else {
char** existing_files = find_existing_db_files();
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
startup_existing_relay(relay_pubkey);
init_database(g_database_path);
// Current approach - unclear when overrides applied
populate_default_config_values();
if (cli_options.port_override > 0) {
// ... override logic ...
}
refresh_unified_cache_from_table();
}
```
**New Code**:
```c
else {
// 1. Discover existing database
char** existing_files = find_existing_db_files();
if (!existing_files || !existing_files[0]) {
DEBUG_ERROR("No existing database files found");
return EXIT_FAILURE;
}
char* relay_pubkey = extract_pubkey_from_filename(existing_files[0]);
startup_existing_relay(relay_pubkey);
// 2. Open existing database
init_database(g_database_path);
// 3. Validate config table completeness (populate missing keys)
if (validate_config_table_completeness() != 0) {
DEBUG_ERROR("Failed to validate config table");
return EXIT_FAILURE;
}
// 4. Apply CLI overrides if present (separate transaction)
if (has_cli_overrides(&cli_options)) {
if (apply_cli_overrides_atomic(&cli_options) != 0) {
DEBUG_ERROR("Failed to apply CLI overrides");
return EXIT_FAILURE;
}
}
// 5. Load complete config into cache
refresh_unified_cache_from_table();
}
```
**Testing**:
- Verify existing relay startup with complete config
- Verify missing keys populated
- Verify CLI overrides applied when present
- Verify no changes when no overrides
- Verify cache loads correctly
---
## Phase 4: Deprecate Old Functions
### Step 4.1: Mark Functions as Deprecated
**Location**: `src/config.c`
**Functions to Deprecate**:
1. `populate_default_config_values()` - replaced by `populate_all_config_values_atomic()`
2. `add_pubkeys_to_config_table()` - logic moved to `populate_all_config_values_atomic()`
**Changes**:
```c
// Mark as deprecated in comments
// DEPRECATED: Use populate_all_config_values_atomic() instead
// This function will be removed in a future version
int populate_default_config_values(void) {
// ... existing implementation ...
}
// DEPRECATED: Use populate_all_config_values_atomic() instead
// This function will be removed in a future version
int add_pubkeys_to_config_table(void) {
// ... existing implementation ...
}
```
---
## Phase 5: Testing Strategy
### Unit Tests
1. **Test `populate_all_config_values_atomic()`**
- Test with valid cli_options
- Test transaction rollback on error
- Test all config keys inserted
- Test pubkeys inserted correctly
2. **Test `apply_cli_overrides_atomic()`**
- Test port override
- Test admin_pubkey override
- Test relay_privkey override
- Test multiple overrides
- Test no overrides
- Test transaction rollback on error
3. **Test `validate_config_table_completeness()`**
- Test with complete config
- Test with missing keys
- Test population of missing keys
4. **Test `has_cli_overrides()`**
- Test with each override type
- Test with no overrides
- Test with NULL cli_options
### Integration Tests
1. **First-Time Startup**
```bash
# Clean environment
rm -f *.db
# Start relay with defaults
./build/c_relay_x86
# Verify config table complete
sqlite3 <relay_pubkey>.db "SELECT COUNT(*) FROM config;"
# Expected: 20+ rows (all defaults + pubkeys)
# Verify cache loaded
# Check relay.log for cache refresh message
```
2. **First-Time Startup with CLI Overrides**
```bash
# Clean environment
rm -f *.db
# Start relay with port override
./build/c_relay_x86 --port 9999
# Verify port override applied
sqlite3 <relay_pubkey>.db "SELECT value FROM config WHERE key='relay_port';"
# Expected: 9999
```
3. **Restart with Existing Database**
```bash
# Start relay (creates database)
./build/c_relay_x86
# Stop relay
pkill -f c_relay_
# Restart relay
./build/c_relay_x86
# Verify config unchanged
# Check relay.log for validation message
```
4. **Restart with CLI Overrides**
```bash
# Start relay (creates database)
./build/c_relay_x86
# Stop relay
pkill -f c_relay_
# Restart with port override
./build/c_relay_x86 --port 9999
# Verify port override applied
sqlite3 <relay_pubkey>.db "SELECT value FROM config WHERE key='relay_port';"
# Expected: 9999
```
### Regression Tests
Run existing test suite to ensure no breakage:
```bash
./tests/run_all_tests.sh
```
---
## Phase 6: Documentation Updates
### Files to Update
1. **docs/configuration_guide.md**
- Update startup sequence description
- Document new atomic config creation
- Document CLI override behavior
2. **docs/startup_flows_complete.md**
- Update with new flow diagrams
- Document new function calls
3. **README.md**
- Update CLI options documentation
- Document override behavior
---
## Implementation Timeline
### Week 1: Core Functions
- Day 1-2: Implement `populate_all_config_values_atomic()`
- Day 3-4: Implement `apply_cli_overrides_atomic()`
- Day 5: Implement `validate_config_table_completeness()` and `has_cli_overrides()`
### Week 2: Integration
- Day 1-2: Update main.c startup flow
- Day 3-4: Testing and bug fixes
- Day 5: Documentation updates
### Week 3: Cleanup
- Day 1-2: Deprecate old functions
- Day 3-4: Final testing and validation
- Day 5: Code review and merge
---
## Risk Mitigation
### Potential Issues
1. **Database Lock Contention**
- Risk: Multiple transactions could cause locks
- Mitigation: Use BEGIN IMMEDIATE for write transactions
2. **Cache Invalidation Timing**
- Risk: Cache could be read before overrides applied
- Mitigation: Invalidate cache immediately after overrides
3. **Backward Compatibility**
- Risk: Existing databases might have incomplete config
- Mitigation: `validate_config_table_completeness()` handles this
4. **Transaction Rollback**
- Risk: Partial config on error
- Mitigation: All operations in transactions with proper rollback
---
## Success Criteria
1. ✅ All config values created atomically in first-time startup
2. ✅ CLI overrides applied in separate atomic transaction
3. ✅ Existing databases validated and missing keys populated
4. ✅ Cache only loaded after complete config exists
5. ✅ All existing tests pass
6. ✅ No race conditions in config creation
7. ✅ Clear separation between config creation and override phases
---
## Rollback Plan
If issues arise during implementation:
1. **Revert main.c changes** - restore original startup flow
2. **Keep new functions** - they can coexist with old code
3. **Add feature flag** - allow toggling between old and new behavior
4. **Gradual migration** - enable new behavior per scenario
```c
// Feature flag approach
#define USE_ATOMIC_CONFIG_CREATION 1
#if USE_ATOMIC_CONFIG_CREATION
// New atomic approach
populate_all_config_values_atomic(&cli_options);
apply_cli_overrides_atomic(&cli_options);
#else
// Old incremental approach
populate_default_config_values();
// ... existing code ...
#endif
```

541
docs/user_guide.md Normal file
View File

@@ -0,0 +1,541 @@
# C Nostr Relay - User Guide
Complete guide for deploying, configuring, and managing the C Nostr Relay with event-based configuration system.
## Table of Contents
- [Quick Start](#quick-start)
- [Installation](#installation)
- [Web Admin Interface](#web-admin-interface)
- [Configuration Management](#configuration-management)
- [Administration](#administration)
- [Monitoring](#monitoring)
- [Troubleshooting](#troubleshooting)
- [Advanced Usage](#advanced-usage)
## Quick Start
### 1. Build and Start
```bash
# Clone and build
git clone <repository-url>
cd c-relay
git submodule update --init --recursive
make
# Start relay (zero configuration needed)
./build/c_relay_x86
```
### 2. First Startup - Save Keys
The relay will display admin keys on first startup:
```
=================================================================
IMPORTANT: SAVE THIS ADMIN PRIVATE KEY SECURELY!
=================================================================
Admin Private Key: a018ecc259ff296ef7aaca6cdccbc52cf28104ac7a1f14c27b0b8232e5025ddc
Admin Public Key: 68394d08ab87f936a42ff2deb15a84fbdfbe0996ee0eb20cda064aae673285d1
=================================================================
```
⚠️ **CRITICAL**: Save the admin private key - it's needed for configuration updates and only shown once!
### 3. Connect Clients
Your relay is now available at:
- **WebSocket**: `ws://localhost:8888`
- **NIP-11 Info**: `http://localhost:8888` (with `Accept: application/nostr+json` header)
- **Web Admin Interface**: `http://localhost:8888/api/` (serves embedded admin interface)
## Installation
### System Requirements
- **Operating System**: Linux, macOS, or Windows (WSL)
- **RAM**: Minimum 512MB, recommended 2GB+
- **Disk**: 100MB for binary + database storage (grows with events)
- **Network**: Port 8888 (configurable via events)
### Dependencies
Install required libraries:
**Ubuntu/Debian:**
```bash
sudo apt update
sudo apt install build-essential git sqlite3 libsqlite3-dev libwebsockets-dev libssl-dev libsecp256k1-dev libcurl4-openssl-dev zlib1g-dev
```
**CentOS/RHEL:**
```bash
sudo yum install gcc git sqlite-devel libwebsockets-devel openssl-devel libsecp256k1-devel libcurl-devel zlib-devel
```
**macOS (Homebrew):**
```bash
brew install git sqlite libwebsockets openssl libsecp256k1 curl zlib
```
### Building from Source
```bash
# Clone repository
git clone <repository-url>
cd c-relay
# Initialize submodules
git submodule update --init --recursive
# Build
make clean && make
# Verify build
ls -la build/c_relay_x86
```
### Production Deployment
#### SystemD Service (Recommended)
```bash
# Install as system service
sudo systemd/install-service.sh
# Start service
sudo systemctl start c-relay
# Enable auto-start
sudo systemctl enable c-relay
# Check status
sudo systemctl status c-relay
```
#### Manual Deployment
```bash
# Create dedicated user
sudo useradd --system --home-dir /opt/c-relay --shell /bin/false c-relay
# Install binary
sudo mkdir -p /opt/c-relay
sudo cp build/c_relay_x86 /opt/c-relay/
sudo chown -R c-relay:c-relay /opt/c-relay
# Run as service user
sudo -u c-relay /opt/c-relay/c_relay_x86
```
## Configuration Management
### Event-Based Configuration System
Unlike traditional relays that use config files, this relay stores all configuration as **kind 33334 Nostr events** in the database. This provides:
- **Real-time updates**: Changes applied instantly without restart
- **Cryptographic security**: All config changes must be signed by admin
- **Audit trail**: Complete history of configuration changes
- **No file management**: No config files to manage or version control
### First-Time Configuration
On first startup, the relay:
1. **Generates keypairs**: Creates cryptographically secure admin and relay keys
2. **Creates database**: `<relay_pubkey>.nrdb` file with optimized schema
3. **Stores default config**: Creates initial kind 33334 event with sensible defaults
4. **Displays admin key**: Shows admin private key once for you to save
### Updating Configuration
To change relay configuration, create and send a signed kind 33334 event:
#### Using nostrtool (recommended)
```bash
# Install nostrtool
npm install -g nostrtool
# Update relay description
nostrtool event \
--kind 33334 \
--content "C Nostr Relay Configuration" \
--tag d <relay_pubkey> \
--tag relay_description "My Production Relay" \
--tag max_subscriptions_per_client 50 \
--private-key <admin_private_key> \
| nostrtool send ws://localhost:8888
```
#### Manual Event Creation
```json
{
"kind": 33334,
"content": "C Nostr Relay Configuration",
"tags": [
["d", "<relay_pubkey>"],
["relay_description", "My Production Relay"],
["max_subscriptions_per_client", "50"],
["pow_min_difficulty", "20"]
],
"created_at": 1699123456,
"pubkey": "<admin_pubkey>",
"id": "<computed_event_id>",
"sig": "<signature>"
}
```
Send this to your relay via WebSocket, and changes are applied immediately.
### Configuration Parameters
#### Basic Settings
| Parameter | Description | Default | Example |
|-----------|-------------|---------|---------|
| `relay_description` | Relay description for NIP-11 | "C Nostr Relay" | "My awesome relay" |
| `relay_contact` | Admin contact information | "" | "admin@example.com" |
| `relay_software` | Software identifier | "c-relay" | "c-relay v1.0" |
#### Client Limits
| Parameter | Description | Default | Range |
|-----------|-------------|---------|-------|
| `max_subscriptions_per_client` | Max subscriptions per client | "25" | 1-100 |
| `max_total_subscriptions` | Total relay subscription limit | "5000" | 100-50000 |
| `max_message_length` | Maximum message size (bytes) | "65536" | 1024-1048576 |
| `max_event_tags` | Maximum tags per event | "2000" | 10-10000 |
| `max_content_length` | Maximum event content length | "65536" | 1-1048576 |
#### Proof of Work (NIP-13)
| Parameter | Description | Default | Options |
|-----------|-------------|---------|---------|
| `pow_min_difficulty` | Minimum PoW difficulty | "0" | 0-40 |
| `pow_mode` | PoW validation mode | "optional" | "disabled", "optional", "required" |
#### Event Expiration (NIP-40)
| Parameter | Description | Default | Options |
|-----------|-------------|---------|---------|
| `nip40_expiration_enabled` | Enable expiration handling | "true" | "true", "false" |
| `nip40_expiration_strict` | Strict expiration mode | "false" | "true", "false" |
| `nip40_expiration_filter` | Filter expired events | "true" | "true", "false" |
| `nip40_expiration_grace_period` | Grace period (seconds) | "300" | 0-86400 |
## Web Admin Interface
The relay includes a built-in web-based administration interface that provides a user-friendly way to manage your relay without command-line tools.
### Accessing the Interface
1. **Open your browser** and navigate to: `http://localhost:8888/api/`
2. **Authenticate** using your Nostr identity (the admin interface uses NIP-42 authentication)
3. **Manage configuration** through the web interface
### Features
- **Real-time Configuration**: View and edit all relay settings
- **Database Statistics**: Monitor event counts, storage usage, and performance metrics
- **Auth Rules Management**: Configure whitelist/blacklist rules for pubkeys
- **Relay Connection Testing**: Verify WebSocket connectivity and NIP-11 information
- **Event-Based Updates**: All changes are applied as signed Nostr events
### Security Notes
- The web interface requires NIP-42 authentication with your admin pubkey
- All configuration changes are cryptographically signed
- The interface serves embedded static files (no external dependencies)
- CORS headers are included for proper browser operation
### Browser Compatibility
The admin interface works with modern browsers that support:
- WebSocket connections
- ES6 JavaScript features
- Modern CSS Grid and Flexbox layouts
## Administration
### Viewing Current Configuration
```bash
# Find your database
ls -la *.nrdb
# View configuration events
sqlite3 <relay_pubkey>.nrdb "SELECT created_at, tags FROM events WHERE kind = 33334 ORDER BY created_at DESC LIMIT 1;"
# View all configuration history
sqlite3 <relay_pubkey>.nrdb "SELECT datetime(created_at, 'unixepoch') as date, tags FROM events WHERE kind = 33334 ORDER BY created_at DESC;"
```
### Admin Key Management
#### Backup Admin Keys
```bash
# Create secure backup
echo "Admin Private Key: <your_admin_key>" > admin_keys_backup_$(date +%Y%m%d).txt
chmod 600 admin_keys_backup_*.txt
# Store in secure location (password manager, encrypted drive, etc.)
```
#### Key Recovery
If you lose your admin private key:
1. **Stop the relay**: `pkill c_relay` or `sudo systemctl stop c-relay`
2. **Backup events**: `cp <relay_pubkey>.nrdb backup_$(date +%Y%m%d).nrdb`
3. **Remove database**: `rm <relay_pubkey>.nrdb*`
4. **Restart relay**: This creates new database with new keys
5. **⚠️ Note**: All stored events and configuration history will be lost
### Security Best Practices
#### Admin Key Security
- **Never share** the admin private key
- **Store securely** in password manager or encrypted storage
- **Backup safely** to multiple secure locations
- **Monitor** configuration changes in logs
#### Network Security
```bash
# Restrict access with firewall
sudo ufw allow 8888/tcp
# Use reverse proxy for HTTPS (recommended)
# Configure nginx/apache to proxy to ws://localhost:8888
```
#### Database Security
```bash
# Secure database file permissions
chmod 600 <relay_pubkey>.nrdb
chown c-relay:c-relay <relay_pubkey>.nrdb
# Regular backups
cp <relay_pubkey>.nrdb backup/relay_backup_$(date +%Y%m%d_%H%M%S).nrdb
```
## Monitoring
### Service Status
```bash
# Check if relay is running
ps aux | grep c_relay
# SystemD status
sudo systemctl status c-relay
# Network connections
netstat -tln | grep 8888
sudo ss -tlpn | grep 8888
```
### Log Monitoring
```bash
# Real-time logs (systemd)
sudo journalctl -u c-relay -f
# Recent logs
sudo journalctl -u c-relay --since "1 hour ago"
# Error logs only
sudo journalctl -u c-relay -p err
# Configuration changes
sudo journalctl -u c-relay | grep "Configuration updated via kind 33334"
```
### Database Analytics
```bash
# Connect to database
sqlite3 <relay_pubkey>.nrdb
# Event statistics
SELECT event_type, COUNT(*) as count FROM events GROUP BY event_type;
# Recent activity
SELECT datetime(created_at, 'unixepoch') as date, kind, LENGTH(content) as content_size
FROM events
ORDER BY created_at DESC
LIMIT 10;
# Subscription analytics (if logging enabled)
SELECT * FROM subscription_analytics ORDER BY date DESC LIMIT 7;
# Configuration changes
SELECT datetime(created_at, 'unixepoch') as date, tags
FROM configuration_events
ORDER BY created_at DESC;
```
### Performance Monitoring
```bash
# Database size
du -sh <relay_pubkey>.nrdb*
# Memory usage
ps aux | grep c_relay | awk '{print $6}' # RSS memory in KB
# Connection count (approximate)
netstat -an | grep :8888 | grep ESTABLISHED | wc -l
# System resources
top -p $(pgrep c_relay)
```
## Troubleshooting
### Common Issues
#### Relay Won't Start
```bash
# Check port availability
netstat -tln | grep 8888
# If port in use, find process: sudo lsof -i :8888
# Check binary permissions
ls -la build/c_relay_x86
chmod +x build/c_relay_x86
# Check dependencies
ldd build/c_relay_x86
```
#### Configuration Not Updating
1. **Verify signature**: Ensure event is properly signed with admin private key
2. **Check admin pubkey**: Must match the pubkey from first startup
3. **Validate event structure**: Use `nostrtool validate` or similar
4. **Check logs**: Look for validation errors in relay logs
5. **Test WebSocket**: Ensure WebSocket connection is active
```bash
# Test WebSocket connection
wscat -c ws://localhost:8888
# Send test message
{"id":"test","method":"REQ","params":["test",{}]}
```
#### Database Issues
```bash
# Check database integrity
sqlite3 <relay_pubkey>.nrdb "PRAGMA integrity_check;"
# Check schema version
sqlite3 <relay_pubkey>.nrdb "SELECT * FROM schema_info WHERE key = 'version';"
# View database size and stats
sqlite3 <relay_pubkey>.nrdb "PRAGMA page_size; PRAGMA page_count;"
```
#### Performance Issues
```bash
# Analyze slow queries (if any)
sqlite3 <relay_pubkey>.nrdb "PRAGMA compile_options;"
# Check database optimization
sqlite3 <relay_pubkey>.nrdb "PRAGMA optimize;"
# Monitor system resources
iostat 1 5 # I/O statistics
free -h # Memory usage
```
### Recovery Procedures
#### Corrupted Database Recovery
```bash
# Attempt repair
sqlite3 <relay_pubkey>.nrdb ".recover" > recovered.sql
sqlite3 recovered.nrdb < recovered.sql
# If repair fails, start fresh (loses all events)
mv <relay_pubkey>.nrdb <relay_pubkey>.nrdb.corrupted
./build/c_relay_x86 # Creates new database
```
#### Lost Configuration Recovery
If configuration is lost but database is intact:
1. **Find old config**: `sqlite3 <relay_pubkey>.nrdb "SELECT * FROM configuration_events;"`
2. **Create new config event**: Use last known good configuration
3. **Sign and send**: Update with current timestamp and new signature
#### Emergency Restart
```bash
# Quick restart with clean state
sudo systemctl stop c-relay
mv <relay_pubkey>.nrdb <relay_pubkey>.nrdb.backup
sudo systemctl start c-relay
# Check logs for new admin keys
sudo journalctl -u c-relay --since "5 minutes ago" | grep "Admin Private Key"
```
## Advanced Usage
### Custom Event Handlers
The relay supports custom handling for different event types. Configuration changes trigger:
- **Subscription Manager Updates**: When client limits change
- **PoW System Reinitialization**: When PoW settings change
- **Expiration System Updates**: When NIP-40 settings change
- **Relay Info Updates**: When NIP-11 information changes
### API Integration
```javascript
// Connect and send configuration update
const ws = new WebSocket('ws://localhost:8888');
ws.on('open', function() {
const configEvent = {
kind: 33334,
content: "Updated configuration",
tags: [
["d", relayPubkey],
["relay_description", "Updated via API"]
],
created_at: Math.floor(Date.now() / 1000),
pubkey: adminPubkey,
// ... add id and sig
};
ws.send(JSON.stringify(["EVENT", configEvent]));
});
```
### Backup Strategies
#### Automated Backup
```bash
#!/bin/bash
# backup-relay.sh
DATE=$(date +%Y%m%d_%H%M%S)
DB_FILE=$(ls *.nrdb | head -1)
BACKUP_DIR="/backup/c-relay"
mkdir -p $BACKUP_DIR
cp $DB_FILE $BACKUP_DIR/relay_backup_$DATE.nrdb
gzip $BACKUP_DIR/relay_backup_$DATE.nrdb
# Cleanup old backups (keep 30 days)
find $BACKUP_DIR -name "relay_backup_*.nrdb.gz" -mtime +30 -delete
```
#### Configuration Export
```bash
# Export configuration events
sqlite3 <relay_pubkey>.nrdb "SELECT json_object(
'kind', kind,
'content', content,
'tags', json(tags),
'created_at', created_at,
'pubkey', pubkey,
'sig', sig
) FROM events WHERE kind = 33334 ORDER BY created_at;" > config_backup.json
```
### Migration Between Servers
```bash
# Source server
tar czf relay_migration.tar.gz *.nrdb* relay.log
# Target server
tar xzf relay_migration.tar.gz
./build/c_relay_x86 # Will detect existing database and continue
```
---
This user guide provides comprehensive coverage of the C Nostr Relay's event-based configuration system. For additional technical details, see the developer documentation in the `docs/` directory.

View File

@@ -0,0 +1,200 @@
# WebSocket Write Queue Design
## Problem Statement
The current partial write handling implementation uses a single buffer per session, which fails when multiple events need to be sent to the same client in rapid succession. This causes:
1. First event gets partial write → queued successfully
2. Second event tries to write → **FAILS** with "write already pending"
3. Subsequent events fail similarly, causing data loss
### Server Log Evidence
```
[WARN] WS_FRAME_PARTIAL: EVENT partial write, sub=1 sent=3210 expected=5333
[TRACE] Queued partial write: len=2123
[WARN] WS_FRAME_PARTIAL: EVENT partial write, sub=1 sent=3210 expected=5333
[WARN] queue_websocket_write: write already pending, cannot queue new write
[ERROR] Failed to queue partial EVENT write for sub=1
```
## Root Cause
WebSocket frames must be sent **atomically** - you cannot interleave multiple frames. The current single-buffer approach correctly enforces this, but it rejects new writes instead of queuing them.
## Solution: Write Queue Architecture
### Design Principles
1. **Frame Atomicity**: Complete one WebSocket frame before starting the next
2. **Sequential Processing**: Process queued writes in FIFO order
3. **Memory Safety**: Proper cleanup on connection close or errors
4. **Thread Safety**: Protect queue operations with existing session lock
### Data Structures
#### Write Queue Node
```c
struct write_queue_node {
unsigned char* buffer; // Buffer with LWS_PRE space
size_t total_len; // Total length of data to write
size_t offset; // How much has been written so far
int write_type; // LWS_WRITE_TEXT, etc.
struct write_queue_node* next; // Next node in queue
};
```
#### Per-Session Write Queue
```c
struct per_session_data {
// ... existing fields ...
// Write queue for handling multiple pending writes
struct write_queue_node* write_queue_head; // First item to write
struct write_queue_node* write_queue_tail; // Last item in queue
int write_queue_length; // Number of items in queue
int write_in_progress; // Flag: 1 if currently writing
};
```
### Algorithm Flow
#### 1. Enqueue Write (`queue_websocket_write`)
```
IF write_queue is empty AND no write in progress:
- Attempt immediate write with lws_write()
- IF complete:
- Return success
- ELSE (partial write):
- Create queue node with remaining data
- Add to queue
- Set write_in_progress flag
- Request LWS_CALLBACK_SERVER_WRITEABLE
ELSE:
- Create queue node with full data
- Append to queue tail
- IF no write in progress:
- Request LWS_CALLBACK_SERVER_WRITEABLE
```
#### 2. Process Queue (`process_pending_write`)
```
WHILE write_queue is not empty:
- Get head node
- Calculate remaining data (total_len - offset)
- Attempt write with lws_write()
IF write fails (< 0):
- Log error
- Remove and free head node
- Continue to next node
ELSE IF partial write (< remaining):
- Update offset
- Request LWS_CALLBACK_SERVER_WRITEABLE
- Break (wait for next callback)
ELSE (complete write):
- Remove and free head node
- Continue to next node
IF queue is empty:
- Clear write_in_progress flag
```
#### 3. Cleanup (`LWS_CALLBACK_CLOSED`)
```
WHILE write_queue is not empty:
- Get head node
- Free buffer
- Free node
- Move to next
Clear queue pointers
```
### Memory Management
1. **Allocation**: Each queue node allocates buffer with `LWS_PRE + data_len`
2. **Ownership**: Queue owns all buffers until write completes or connection closes
3. **Deallocation**: Free buffer and node when:
- Write completes successfully
- Write fails with error
- Connection closes
### Thread Safety
- Use existing `pss->session_lock` to protect queue operations
- Lock during:
- Enqueue operations
- Dequeue operations
- Queue traversal for cleanup
### Performance Considerations
1. **Queue Length Limit**: Implement max queue length (e.g., 100 items) to prevent memory exhaustion
2. **Memory Pressure**: Monitor total queued bytes per session
3. **Backpressure**: If queue exceeds limit, close connection with NOTICE
### Error Handling
1. **Allocation Failure**: Return error, log, send NOTICE to client
2. **Write Failure**: Remove failed frame, continue with next
3. **Queue Overflow**: Close connection with appropriate NOTICE
## Implementation Plan
### Phase 1: Data Structure Changes
1. Add `write_queue_node` structure to `websockets.h`
2. Update `per_session_data` with queue fields
3. Remove old single-buffer fields
### Phase 2: Queue Operations
1. Implement `enqueue_write()` helper
2. Implement `dequeue_write()` helper
3. Update `queue_websocket_write()` to use queue
4. Update `process_pending_write()` to process queue
### Phase 3: Integration
1. Update all `lws_write()` call sites
2. Update `LWS_CALLBACK_CLOSED` cleanup
3. Add queue length monitoring
### Phase 4: Testing
1. Test with rapid multiple events to same client
2. Test with large events (>4KB)
3. Test under load with concurrent connections
4. Verify no "Invalid frame header" errors
## Expected Outcomes
1. **No More Rejections**: All writes queued successfully
2. **Frame Integrity**: Complete frames sent atomically
3. **Memory Safety**: Proper cleanup on all paths
4. **Performance**: Minimal overhead for queue management
## Metrics to Monitor
1. Average queue length per session
2. Maximum queue length observed
3. Queue overflow events (if limit implemented)
4. Write completion rate
5. Partial write frequency
## Alternative Approaches Considered
### 1. Larger Single Buffer
**Rejected**: Doesn't solve the fundamental problem of multiple concurrent writes
### 2. Immediate Write Retry
**Rejected**: Could cause busy-waiting and CPU waste
### 3. Drop Frames on Conflict
**Rejected**: Violates reliability requirements
## References
- libwebsockets documentation on `lws_write()` and `LWS_CALLBACK_SERVER_WRITEABLE`
- WebSocket RFC 6455 on frame structure
- Nostr NIP-01 on relay-to-client communication

128
embed_web_files.sh Executable file
View File

@@ -0,0 +1,128 @@
#!/bin/bash
# Script to embed web files into C headers for the C-Relay admin interface
# Converts HTML, CSS, and JS files from api/ directory into C byte arrays
set -e
echo "Embedding web files into C headers..."
# Output directory for generated headers
OUTPUT_DIR="src"
mkdir -p "$OUTPUT_DIR"
# Function to convert a file to C byte array
file_to_c_array() {
local input_file="$1"
local array_name="$2"
local output_file="$3"
# Get file size
local file_size=$(stat -c%s "$input_file" 2>/dev/null || stat -f%z "$input_file" 2>/dev/null || echo "0")
echo "// Auto-generated from $input_file" >> "$output_file"
echo "static const unsigned char ${array_name}_data[] = {" >> "$output_file"
# Convert file to hex bytes
hexdump -v -e '1/1 "0x%02x,"' "$input_file" >> "$output_file"
echo "};" >> "$output_file"
echo "static const size_t ${array_name}_size = $file_size;" >> "$output_file"
echo "" >> "$output_file"
}
# Generate the header file
HEADER_FILE="$OUTPUT_DIR/embedded_web_content.h"
echo "// Auto-generated embedded web content header" > "$HEADER_FILE"
echo "// Do not edit manually - generated by embed_web_files.sh" >> "$HEADER_FILE"
echo "" >> "$HEADER_FILE"
echo "#ifndef EMBEDDED_WEB_CONTENT_H" >> "$HEADER_FILE"
echo "#define EMBEDDED_WEB_CONTENT_H" >> "$HEADER_FILE"
echo "" >> "$HEADER_FILE"
echo "#include <stddef.h>" >> "$HEADER_FILE"
echo "" >> "$HEADER_FILE"
# Generate the C file
SOURCE_FILE="$OUTPUT_DIR/embedded_web_content.c"
echo "// Auto-generated embedded web content" > "$SOURCE_FILE"
echo "// Do not edit manually - generated by embed_web_files.sh" >> "$SOURCE_FILE"
echo "" >> "$SOURCE_FILE"
echo "#include \"embedded_web_content.h\"" >> "$SOURCE_FILE"
echo "#include <string.h>" >> "$SOURCE_FILE"
echo "" >> "$SOURCE_FILE"
# Process each web file
declare -A file_map
# Find all web files
for file in api/*.html api/*.css api/*.js; do
if [ -f "$file" ]; then
# Get filename without path
basename=$(basename "$file")
# Create C identifier from filename
c_name=$(echo "$basename" | sed 's/[^a-zA-Z0-9_]/_/g' | sed 's/^_//')
# Determine content type
case "$file" in
*.html) content_type="text/html" ;;
*.css) content_type="text/css" ;;
*.js) content_type="application/javascript" ;;
*) content_type="text/plain" ;;
esac
echo "Processing $file -> ${c_name}"
# No extern declarations needed - data is accessed through get_embedded_file()
# Add to source
file_to_c_array "$file" "$c_name" "$SOURCE_FILE"
# Store mapping for lookup function
file_map["/$basename"]="$c_name:$content_type"
if [ "$basename" = "index.html" ]; then
file_map["/"]="$c_name:$content_type"
fi
fi
done
# Generate lookup function
echo "// Embedded file lookup function" >> "$HEADER_FILE"
echo "typedef struct {" >> "$HEADER_FILE"
echo " const char *path;" >> "$HEADER_FILE"
echo " const unsigned char *data;" >> "$HEADER_FILE"
echo " size_t size;" >> "$HEADER_FILE"
echo " const char *content_type;" >> "$HEADER_FILE"
echo "} embedded_file_t;" >> "$HEADER_FILE"
echo "" >> "$HEADER_FILE"
echo "embedded_file_t *get_embedded_file(const char *path);" >> "$HEADER_FILE"
echo "" >> "$HEADER_FILE"
echo "#endif // EMBEDDED_WEB_CONTENT_H" >> "$HEADER_FILE"
# Generate lookup function implementation
echo "// File mapping" >> "$SOURCE_FILE"
echo "static embedded_file_t embedded_files[] = {" >> "$SOURCE_FILE"
for path in "${!file_map[@]}"; do
entry="${file_map[$path]}"
c_name="${entry%:*}"
content_type="${entry#*:}"
echo " {\"$path\", ${c_name}_data, ${c_name}_size, \"$content_type\"}," >> "$SOURCE_FILE"
done
echo " {NULL, NULL, 0, NULL} // Sentinel" >> "$SOURCE_FILE"
echo "};" >> "$SOURCE_FILE"
echo "" >> "$SOURCE_FILE"
echo "embedded_file_t *get_embedded_file(const char *path) {" >> "$SOURCE_FILE"
echo " for (int i = 0; embedded_files[i].path != NULL; i++) {" >> "$SOURCE_FILE"
echo " if (strcmp(path, embedded_files[i].path) == 0) {" >> "$SOURCE_FILE"
echo " return &embedded_files[i];" >> "$SOURCE_FILE"
echo " }" >> "$SOURCE_FILE"
echo " }" >> "$SOURCE_FILE"
echo " return NULL;" >> "$SOURCE_FILE"
echo "}" >> "$SOURCE_FILE"
echo "Web file embedding complete. Generated:" >&2
echo " $HEADER_FILE" >&2
echo " $SOURCE_FILE" >&2

View File

@@ -0,0 +1,70 @@
# Deployment Examples
This directory contains practical deployment examples and scripts for the C Nostr Relay with event-based configuration.
## Directory Structure
```
examples/deployment/
├── README.md # This file
├── simple-vps/ # Basic VPS deployment
├── nginx-proxy/ # Nginx reverse proxy configurations
├── monitoring/ # Monitoring and alerting examples
└── backup/ # Backup and recovery scripts
```
## Quick Start Examples
### 1. Simple VPS Deployment
For a basic Ubuntu VPS deployment:
```bash
cd examples/deployment/simple-vps
chmod +x deploy.sh
sudo ./deploy.sh
```
### 2. SSL Proxy Setup
For nginx reverse proxy with SSL:
```bash
cd examples/deployment/nginx-proxy
chmod +x setup-ssl-proxy.sh
sudo ./setup-ssl-proxy.sh -d relay.example.com -e admin@example.com
```
### 3. Monitoring Setup
For continuous monitoring:
```bash
cd examples/deployment/monitoring
chmod +x monitor-relay.sh
sudo ./monitor-relay.sh -c -e admin@example.com
```
### 4. Backup Setup
For automated backups:
```bash
cd examples/deployment/backup
chmod +x backup-relay.sh
sudo ./backup-relay.sh -s my-backup-bucket -e admin@example.com
```
## Configuration Examples
All examples assume the event-based configuration system where:
- No config files are needed
- Configuration is stored as kind 33334 events in the database
- Admin keys are generated on first startup
- Database naming uses relay pubkey (`<relay_pubkey>.nrdb`)
## Security Notes
- **Save Admin Keys**: All deployment examples emphasize capturing the admin private key on first startup
- **Firewall Configuration**: Examples include proper firewall rules
- **SSL/TLS**: Production examples include HTTPS configuration
- **User Isolation**: Service runs as dedicated `c-relay` system user
## Support
For detailed documentation, see:
- [`docs/deployment_guide.md`](../../docs/deployment_guide.md) - Comprehensive deployment guide
- [`docs/user_guide.md`](../../docs/user_guide.md) - User guide
- [`docs/configuration_guide.md`](../../docs/configuration_guide.md) - Configuration reference

View File

@@ -0,0 +1,367 @@
#!/bin/bash
# C Nostr Relay - Backup Script
# Automated backup solution for event-based configuration relay
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Default configuration
RELAY_DIR="/opt/c-relay"
BACKUP_DIR="/backup/c-relay"
RETENTION_DAYS="30"
COMPRESS="true"
REMOTE_BACKUP=""
S3_BUCKET=""
NOTIFICATION_EMAIL=""
LOG_FILE="/var/log/relay-backup.log"
# Functions
print_step() {
echo -e "${BLUE}[STEP]${NC} $1"
echo "$(date '+%Y-%m-%d %H:%M:%S') [STEP] $1" >> "$LOG_FILE"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
echo "$(date '+%Y-%m-%d %H:%M:%S') [SUCCESS] $1" >> "$LOG_FILE"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
echo "$(date '+%Y-%m-%d %H:%M:%S') [WARNING] $1" >> "$LOG_FILE"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
echo "$(date '+%Y-%m-%d %H:%M:%S') [ERROR] $1" >> "$LOG_FILE"
}
show_help() {
echo "Usage: $0 [OPTIONS]"
echo
echo "Options:"
echo " -d, --relay-dir DIR Relay directory (default: /opt/c-relay)"
echo " -b, --backup-dir DIR Backup directory (default: /backup/c-relay)"
echo " -r, --retention DAYS Retention period in days (default: 30)"
echo " -n, --no-compress Don't compress backups"
echo " -s, --s3-bucket BUCKET Upload to S3 bucket"
echo " -e, --email EMAIL Send notification email"
echo " -v, --verify Verify backup integrity"
echo " -h, --help Show this help message"
echo
echo "Examples:"
echo " $0 # Basic backup"
echo " $0 -s my-backup-bucket -e admin@example.com"
echo " $0 -r 7 -n # 7-day retention, no compression"
}
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-d|--relay-dir)
RELAY_DIR="$2"
shift 2
;;
-b|--backup-dir)
BACKUP_DIR="$2"
shift 2
;;
-r|--retention)
RETENTION_DAYS="$2"
shift 2
;;
-n|--no-compress)
COMPRESS="false"
shift
;;
-s|--s3-bucket)
S3_BUCKET="$2"
shift 2
;;
-e|--email)
NOTIFICATION_EMAIL="$2"
shift 2
;;
-v|--verify)
VERIFY="true"
shift
;;
-h|--help)
show_help
exit 0
;;
*)
print_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
}
check_dependencies() {
print_step "Checking dependencies..."
# Check sqlite3
if ! command -v sqlite3 &> /dev/null; then
print_error "sqlite3 not found. Install with: apt install sqlite3"
exit 1
fi
# Check compression tools
if [[ "$COMPRESS" == "true" ]]; then
if ! command -v gzip &> /dev/null; then
print_error "gzip not found for compression"
exit 1
fi
fi
# Check S3 tools if needed
if [[ -n "$S3_BUCKET" ]]; then
if ! command -v aws &> /dev/null; then
print_error "AWS CLI not found. Install with: apt install awscli"
exit 1
fi
fi
print_success "Dependencies verified"
}
find_database() {
print_step "Finding relay database..."
# Look for .nrdb files in relay directory
DB_FILES=($(find "$RELAY_DIR" -name "*.nrdb" 2>/dev/null))
if [[ ${#DB_FILES[@]} -eq 0 ]]; then
print_error "No relay database files found in $RELAY_DIR"
exit 1
elif [[ ${#DB_FILES[@]} -gt 1 ]]; then
print_warning "Multiple database files found:"
printf '%s\n' "${DB_FILES[@]}"
print_warning "Using the first one: ${DB_FILES[0]}"
fi
DB_FILE="${DB_FILES[0]}"
DB_NAME=$(basename "$DB_FILE")
print_success "Found database: $DB_FILE"
}
create_backup_directory() {
print_step "Creating backup directory..."
if [[ ! -d "$BACKUP_DIR" ]]; then
mkdir -p "$BACKUP_DIR"
chmod 700 "$BACKUP_DIR"
print_success "Created backup directory: $BACKUP_DIR"
else
print_success "Using existing backup directory: $BACKUP_DIR"
fi
}
perform_backup() {
local timestamp=$(date +%Y%m%d_%H%M%S)
local backup_name="relay_backup_${timestamp}"
local backup_file="$BACKUP_DIR/${backup_name}.nrdb"
print_step "Creating database backup..."
# Check if database is accessible
if [[ ! -r "$DB_FILE" ]]; then
print_error "Cannot read database file: $DB_FILE"
exit 1
fi
# Get database size
local db_size=$(du -h "$DB_FILE" | cut -f1)
print_step "Database size: $db_size"
# Create SQLite backup using .backup command (hot backup)
if sqlite3 "$DB_FILE" ".backup $backup_file" 2>/dev/null; then
print_success "Database backup created: $backup_file"
else
# Fallback to file copy if .backup fails
print_warning "SQLite backup failed, using file copy method"
cp "$DB_FILE" "$backup_file"
print_success "File copy backup created: $backup_file"
fi
# Verify backup file
if [[ ! -f "$backup_file" ]]; then
print_error "Backup file was not created"
exit 1
fi
# Check backup integrity
if [[ "$VERIFY" == "true" ]]; then
print_step "Verifying backup integrity..."
if sqlite3 "$backup_file" "PRAGMA integrity_check;" | grep -q "ok"; then
print_success "Backup integrity verified"
else
print_error "Backup integrity check failed"
exit 1
fi
fi
# Compress backup
if [[ "$COMPRESS" == "true" ]]; then
print_step "Compressing backup..."
gzip "$backup_file"
backup_file="${backup_file}.gz"
print_success "Backup compressed: $backup_file"
fi
# Set backup file as global variable for other functions
BACKUP_FILE="$backup_file"
BACKUP_NAME="$backup_name"
}
upload_to_s3() {
if [[ -z "$S3_BUCKET" ]]; then
return 0
fi
print_step "Uploading backup to S3..."
local s3_path="s3://$S3_BUCKET/c-relay/$(date +%Y)/$(date +%m)/"
if aws s3 cp "$BACKUP_FILE" "$s3_path" --storage-class STANDARD_IA; then
print_success "Backup uploaded to S3: $s3_path"
else
print_error "Failed to upload backup to S3"
return 1
fi
}
cleanup_old_backups() {
print_step "Cleaning up old backups..."
local deleted_count=0
# Clean local backups
while IFS= read -r -d '' file; do
rm "$file"
((deleted_count++))
done < <(find "$BACKUP_DIR" -name "relay_backup_*.nrdb*" -mtime "+$RETENTION_DAYS" -print0 2>/dev/null)
if [[ $deleted_count -gt 0 ]]; then
print_success "Deleted $deleted_count old local backups"
else
print_success "No old local backups to delete"
fi
# Clean S3 backups if configured
if [[ -n "$S3_BUCKET" ]]; then
local cutoff_date=$(date -d "$RETENTION_DAYS days ago" +%Y-%m-%d)
print_step "Cleaning S3 backups older than $cutoff_date..."
# Note: This is a simplified approach. In production, use S3 lifecycle policies
aws s3 ls "s3://$S3_BUCKET/c-relay/" --recursive | \
awk '$1 < "'$cutoff_date'" {print $4}' | \
while read -r key; do
aws s3 rm "s3://$S3_BUCKET/$key"
print_step "Deleted S3 backup: $key"
done
fi
}
send_notification() {
if [[ -z "$NOTIFICATION_EMAIL" ]]; then
return 0
fi
print_step "Sending notification email..."
local subject="C Nostr Relay Backup - $(date +%Y-%m-%d)"
local backup_size=$(du -h "$BACKUP_FILE" | cut -f1)
local message="Backup completed successfully.
Details:
- Date: $(date)
- Database: $DB_FILE
- Backup File: $BACKUP_FILE
- Backup Size: $backup_size
- Retention: $RETENTION_DAYS days
"
if [[ -n "$S3_BUCKET" ]]; then
message+="\n- S3 Bucket: $S3_BUCKET"
fi
# Try to send email using mail command
if command -v mail &> /dev/null; then
echo -e "$message" | mail -s "$subject" "$NOTIFICATION_EMAIL"
print_success "Notification sent to $NOTIFICATION_EMAIL"
else
print_warning "Mail command not available, skipping notification"
fi
}
show_backup_summary() {
local backup_size=$(du -h "$BACKUP_FILE" | cut -f1)
local backup_count=$(find "$BACKUP_DIR" -name "relay_backup_*.nrdb*" | wc -l)
echo
echo "🎉 Backup Completed Successfully!"
echo
echo "Backup Details:"
echo " Source DB: $DB_FILE"
echo " Backup File: $BACKUP_FILE"
echo " Backup Size: $backup_size"
echo " Compressed: $COMPRESS"
echo " Verified: ${VERIFY:-false}"
echo
echo "Storage:"
echo " Local Backups: $backup_count files in $BACKUP_DIR"
echo " Retention: $RETENTION_DAYS days"
if [[ -n "$S3_BUCKET" ]]; then
echo " S3 Bucket: $S3_BUCKET"
fi
echo
echo "Management Commands:"
echo " List backups: find $BACKUP_DIR -name 'relay_backup_*'"
echo " Restore: See examples/deployment/backup/restore-relay.sh"
echo
}
# Main execution
main() {
echo
echo "==============================================="
echo "💾 C Nostr Relay - Database Backup"
echo "==============================================="
echo
# Initialize log file
mkdir -p "$(dirname "$LOG_FILE")"
touch "$LOG_FILE"
parse_args "$@"
check_dependencies
find_database
create_backup_directory
perform_backup
upload_to_s3
cleanup_old_backups
send_notification
show_backup_summary
print_success "Backup process completed successfully!"
}
# Handle errors
trap 'print_error "Backup failed at line $LINENO"' ERR
# Run main function
main "$@"

View File

@@ -0,0 +1,460 @@
#!/bin/bash
# C Nostr Relay - Monitoring Script
# Comprehensive monitoring for event-based configuration relay
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
RELAY_DIR="/opt/c-relay"
SERVICE_NAME="c-relay"
RELAY_PORT="8888"
LOG_FILE="/var/log/relay-monitor.log"
ALERT_EMAIL=""
WEBHOOK_URL=""
CHECK_INTERVAL="60"
MAX_MEMORY_MB="1024"
MAX_DB_SIZE_MB="10240"
MIN_DISK_SPACE_MB="1024"
# Counters for statistics
TOTAL_CHECKS=0
FAILED_CHECKS=0
ALERTS_SENT=0
# Functions
print_step() {
echo -e "${BLUE}[INFO]${NC} $1"
log_message "INFO" "$1"
}
print_success() {
echo -e "${GREEN}[OK]${NC} $1"
log_message "OK" "$1"
}
print_warning() {
echo -e "${YELLOW}[WARN]${NC} $1"
log_message "WARN" "$1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
log_message "ERROR" "$1"
}
log_message() {
local level="$1"
local message="$2"
echo "$(date '+%Y-%m-%d %H:%M:%S') [$level] $message" >> "$LOG_FILE"
}
show_help() {
echo "Usage: $0 [OPTIONS]"
echo
echo "Options:"
echo " -d, --relay-dir DIR Relay directory (default: /opt/c-relay)"
echo " -p, --port PORT Relay port (default: 8888)"
echo " -i, --interval SECONDS Check interval (default: 60)"
echo " -e, --email EMAIL Alert email address"
echo " -w, --webhook URL Webhook URL for alerts"
echo " -m, --max-memory MB Max memory usage alert (default: 1024MB)"
echo " -s, --max-db-size MB Max database size alert (default: 10240MB)"
echo " -f, --min-free-space MB Min disk space alert (default: 1024MB)"
echo " -c, --continuous Run continuously (daemon mode)"
echo " -h, --help Show this help message"
echo
echo "Examples:"
echo " $0 # Single check"
echo " $0 -c -i 30 -e admin@example.com # Continuous monitoring"
echo " $0 -w https://hooks.slack.com/... # Webhook notifications"
}
parse_args() {
CONTINUOUS="false"
while [[ $# -gt 0 ]]; do
case $1 in
-d|--relay-dir)
RELAY_DIR="$2"
shift 2
;;
-p|--port)
RELAY_PORT="$2"
shift 2
;;
-i|--interval)
CHECK_INTERVAL="$2"
shift 2
;;
-e|--email)
ALERT_EMAIL="$2"
shift 2
;;
-w|--webhook)
WEBHOOK_URL="$2"
shift 2
;;
-m|--max-memory)
MAX_MEMORY_MB="$2"
shift 2
;;
-s|--max-db-size)
MAX_DB_SIZE_MB="$2"
shift 2
;;
-f|--min-free-space)
MIN_DISK_SPACE_MB="$2"
shift 2
;;
-c|--continuous)
CONTINUOUS="true"
shift
;;
-h|--help)
show_help
exit 0
;;
*)
print_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
}
check_process_running() {
print_step "Checking if relay process is running..."
if pgrep -f "c_relay_x86" > /dev/null; then
print_success "Relay process is running"
return 0
else
print_error "Relay process is not running"
return 1
fi
}
check_port_listening() {
print_step "Checking if port $RELAY_PORT is listening..."
if netstat -tln 2>/dev/null | grep -q ":$RELAY_PORT " || \
ss -tln 2>/dev/null | grep -q ":$RELAY_PORT "; then
print_success "Port $RELAY_PORT is listening"
return 0
else
print_error "Port $RELAY_PORT is not listening"
return 1
fi
}
check_service_status() {
print_step "Checking systemd service status..."
if systemctl is-active --quiet "$SERVICE_NAME"; then
print_success "Service $SERVICE_NAME is active"
return 0
else
local status=$(systemctl is-active "$SERVICE_NAME" 2>/dev/null || echo "unknown")
print_error "Service $SERVICE_NAME status: $status"
return 1
fi
}
check_memory_usage() {
print_step "Checking memory usage..."
local memory_kb=$(ps aux | grep "c_relay_x86" | grep -v grep | awk '{sum+=$6} END {print sum}')
if [[ -z "$memory_kb" ]]; then
print_warning "Could not determine memory usage"
return 1
fi
local memory_mb=$((memory_kb / 1024))
if [[ $memory_mb -gt $MAX_MEMORY_MB ]]; then
print_error "High memory usage: ${memory_mb}MB (limit: ${MAX_MEMORY_MB}MB)"
return 1
else
print_success "Memory usage: ${memory_mb}MB"
return 0
fi
}
check_database_size() {
print_step "Checking database size..."
local db_files=($(find "$RELAY_DIR" -name "*.nrdb" 2>/dev/null))
if [[ ${#db_files[@]} -eq 0 ]]; then
print_warning "No database files found"
return 1
fi
local total_size=0
for db_file in "${db_files[@]}"; do
if [[ -r "$db_file" ]]; then
local size_kb=$(du -k "$db_file" | cut -f1)
total_size=$((total_size + size_kb))
fi
done
local total_size_mb=$((total_size / 1024))
if [[ $total_size_mb -gt $MAX_DB_SIZE_MB ]]; then
print_error "Large database size: ${total_size_mb}MB (limit: ${MAX_DB_SIZE_MB}MB)"
return 1
else
print_success "Database size: ${total_size_mb}MB"
return 0
fi
}
check_disk_space() {
print_step "Checking disk space..."
local free_space_kb=$(df "$RELAY_DIR" | awk 'NR==2 {print $4}')
local free_space_mb=$((free_space_kb / 1024))
if [[ $free_space_mb -lt $MIN_DISK_SPACE_MB ]]; then
print_error "Low disk space: ${free_space_mb}MB (minimum: ${MIN_DISK_SPACE_MB}MB)"
return 1
else
print_success "Free disk space: ${free_space_mb}MB"
return 0
fi
}
check_database_integrity() {
print_step "Checking database integrity..."
local db_files=($(find "$RELAY_DIR" -name "*.nrdb" 2>/dev/null))
if [[ ${#db_files[@]} -eq 0 ]]; then
print_warning "No database files to check"
return 1
fi
local integrity_ok=true
for db_file in "${db_files[@]}"; do
if [[ -r "$db_file" ]]; then
if timeout 30 sqlite3 "$db_file" "PRAGMA integrity_check;" | grep -q "ok"; then
print_success "Database integrity OK: $(basename "$db_file")"
else
print_error "Database integrity failed: $(basename "$db_file")"
integrity_ok=false
fi
fi
done
if $integrity_ok; then
return 0
else
return 1
fi
}
check_websocket_connection() {
print_step "Checking WebSocket connection..."
# Simple connection test using curl
if timeout 10 curl -s -N -H "Connection: Upgrade" \
-H "Upgrade: websocket" -H "Sec-WebSocket-Key: test" \
-H "Sec-WebSocket-Version: 13" \
"http://localhost:$RELAY_PORT/" >/dev/null 2>&1; then
print_success "WebSocket connection test passed"
return 0
else
print_warning "WebSocket connection test failed (may be normal)"
return 1
fi
}
check_configuration_events() {
print_step "Checking configuration events..."
local db_files=($(find "$RELAY_DIR" -name "*.nrdb" 2>/dev/null))
if [[ ${#db_files[@]} -eq 0 ]]; then
print_warning "No database files found"
return 1
fi
local config_count=0
for db_file in "${db_files[@]}"; do
if [[ -r "$db_file" ]]; then
local count=$(sqlite3 "$db_file" "SELECT COUNT(*) FROM events WHERE kind = 33334;" 2>/dev/null || echo "0")
config_count=$((config_count + count))
fi
done
if [[ $config_count -gt 0 ]]; then
print_success "Configuration events found: $config_count"
return 0
else
print_warning "No configuration events found"
return 1
fi
}
send_alert() {
local subject="$1"
local message="$2"
local severity="$3"
ALERTS_SENT=$((ALERTS_SENT + 1))
# Email alert
if [[ -n "$ALERT_EMAIL" ]] && command -v mail >/dev/null 2>&1; then
echo -e "$message" | mail -s "$subject" "$ALERT_EMAIL"
print_step "Alert sent to $ALERT_EMAIL"
fi
# Webhook alert
if [[ -n "$WEBHOOK_URL" ]] && command -v curl >/dev/null 2>&1; then
local webhook_data="{\"text\":\"$subject\",\"attachments\":[{\"color\":\"$severity\",\"text\":\"$message\"}]}"
curl -X POST -H 'Content-type: application/json' \
--data "$webhook_data" "$WEBHOOK_URL" >/dev/null 2>&1
print_step "Alert sent to webhook"
fi
}
restart_service() {
print_step "Attempting to restart service..."
if systemctl restart "$SERVICE_NAME"; then
print_success "Service restarted successfully"
sleep 5 # Wait for service to stabilize
return 0
else
print_error "Failed to restart service"
return 1
fi
}
run_checks() {
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
local failed_checks=0
local total_checks=8
echo
echo "🔍 Relay Health Check - $timestamp"
echo "=================================="
# Core functionality checks
check_process_running || ((failed_checks++))
check_service_status || ((failed_checks++))
check_port_listening || ((failed_checks++))
# Resource checks
check_memory_usage || ((failed_checks++))
check_disk_space || ((failed_checks++))
check_database_size || ((failed_checks++))
# Database checks
check_database_integrity || ((failed_checks++))
check_configuration_events || ((failed_checks++))
# Optional checks
check_websocket_connection # Don't count this as critical
TOTAL_CHECKS=$((TOTAL_CHECKS + total_checks))
FAILED_CHECKS=$((FAILED_CHECKS + failed_checks))
# Summary
echo
if [[ $failed_checks -eq 0 ]]; then
print_success "All checks passed ($total_checks/$total_checks)"
return 0
else
print_error "Failed checks: $failed_checks/$total_checks"
# Send alert if configured
if [[ -n "$ALERT_EMAIL" || -n "$WEBHOOK_URL" ]]; then
local alert_subject="C Nostr Relay Health Alert"
local alert_message="Relay health check failed.
Failed checks: $failed_checks/$total_checks
Time: $timestamp
Host: $(hostname)
Service: $SERVICE_NAME
Port: $RELAY_PORT
Please check the relay logs:
sudo journalctl -u $SERVICE_NAME --since '10 minutes ago'
"
send_alert "$alert_subject" "$alert_message" "danger"
fi
# Auto-restart if service is down
if ! check_process_running >/dev/null 2>&1; then
print_step "Process is down, attempting restart..."
restart_service
fi
return 1
fi
}
show_statistics() {
if [[ $TOTAL_CHECKS -gt 0 ]]; then
local success_rate=$(( (TOTAL_CHECKS - FAILED_CHECKS) * 100 / TOTAL_CHECKS ))
echo
echo "📊 Monitoring Statistics"
echo "======================="
echo "Total Checks: $TOTAL_CHECKS"
echo "Failed Checks: $FAILED_CHECKS"
echo "Success Rate: ${success_rate}%"
echo "Alerts Sent: $ALERTS_SENT"
fi
}
cleanup() {
echo
print_step "Monitoring stopped"
show_statistics
exit 0
}
# Main execution
main() {
echo
echo "📡 C Nostr Relay - Health Monitor"
echo "================================="
echo
# Initialize log file
mkdir -p "$(dirname "$LOG_FILE")"
touch "$LOG_FILE"
parse_args "$@"
# Trap signals for cleanup
trap cleanup SIGINT SIGTERM
if [[ "$CONTINUOUS" == "true" ]]; then
print_step "Starting continuous monitoring (interval: ${CHECK_INTERVAL}s)"
print_step "Press Ctrl+C to stop"
while true; do
run_checks
sleep "$CHECK_INTERVAL"
done
else
run_checks
fi
show_statistics
}
# Run main function
main "$@"

View File

@@ -0,0 +1,168 @@
# Nginx Configuration for C Nostr Relay
# Complete nginx.conf for reverse proxy setup with SSL
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
use epoll;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Logging format
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
# Basic settings
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
# Gzip compression
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_types
text/plain
text/css
text/xml
text/javascript
application/json
application/javascript
application/xml+rss
application/atom+xml;
# Rate limiting
limit_req_zone $remote_addr zone=relay:10m rate=10r/s;
# Map WebSocket upgrade
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
# Upstream for the relay
upstream c_relay_backend {
server 127.0.0.1:8888;
keepalive 32;
}
# HTTP Server (redirect to HTTPS)
server {
listen 80;
server_name relay.yourdomain.com;
# Redirect all HTTP to HTTPS
return 301 https://$server_name$request_uri;
}
# HTTPS Server
server {
listen 443 ssl http2;
server_name relay.yourdomain.com;
# SSL Configuration
ssl_certificate /etc/letsencrypt/live/relay.yourdomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/relay.yourdomain.com/privkey.pem;
# SSL Security Settings
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384;
ssl_prefer_server_ciphers off;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
ssl_session_tickets off;
# OCSP Stapling
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/letsencrypt/live/relay.yourdomain.com/chain.pem;
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 5s;
# Security Headers
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Frame-Options "DENY" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Content-Security-Policy "default-src 'self'; connect-src 'self' wss://relay.yourdomain.com; script-src 'self'; style-src 'self' 'unsafe-inline';" always;
# Rate limiting
limit_req zone=relay burst=20 nodelay;
# Main proxy location for WebSocket and HTTP
location / {
# Proxy settings
proxy_pass http://c_relay_backend;
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $server_name;
# WebSocket support
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
# Timeouts for WebSocket connections
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
proxy_connect_timeout 60s;
# Buffer settings
proxy_buffering off;
proxy_request_buffering off;
# Error handling
proxy_intercept_errors on;
error_page 502 503 504 /50x.html;
}
# Error pages
location = /50x.html {
root /usr/share/nginx/html;
}
# Health check endpoint (if implemented)
location /health {
proxy_pass http://c_relay_backend/health;
access_log off;
}
# Deny access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
# Optional: Metrics endpoint (if implemented)
location /metrics {
proxy_pass http://c_relay_backend/metrics;
# Restrict access to monitoring systems
allow 10.0.0.0/8;
allow 172.16.0.0/12;
allow 192.168.0.0/16;
deny all;
}
}
}

View File

@@ -0,0 +1,346 @@
#!/bin/bash
# C Nostr Relay - Nginx SSL Proxy Setup Script
# Sets up nginx as a reverse proxy with Let's Encrypt SSL
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
DOMAIN=""
EMAIL=""
RELAY_PORT="8888"
NGINX_CONF_DIR="/etc/nginx"
SITES_AVAILABLE="/etc/nginx/sites-available"
SITES_ENABLED="/etc/nginx/sites-enabled"
# Functions
print_step() {
echo -e "${BLUE}[STEP]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
show_help() {
echo "Usage: $0 -d DOMAIN -e EMAIL [OPTIONS]"
echo
echo "Required options:"
echo " -d, --domain DOMAIN Domain name for the relay (e.g., relay.example.com)"
echo " -e, --email EMAIL Email address for Let's Encrypt"
echo
echo "Optional options:"
echo " -p, --port PORT Relay port (default: 8888)"
echo " -h, --help Show this help message"
echo
echo "Example:"
echo " $0 -d relay.example.com -e admin@example.com"
}
parse_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-d|--domain)
DOMAIN="$2"
shift 2
;;
-e|--email)
EMAIL="$2"
shift 2
;;
-p|--port)
RELAY_PORT="$2"
shift 2
;;
-h|--help)
show_help
exit 0
;;
*)
print_error "Unknown option: $1"
show_help
exit 1
;;
esac
done
if [[ -z "$DOMAIN" || -z "$EMAIL" ]]; then
print_error "Domain and email are required"
show_help
exit 1
fi
}
check_root() {
if [[ $EUID -ne 0 ]]; then
print_error "This script must be run as root (use sudo)"
exit 1
fi
}
check_relay_running() {
print_step "Checking if C Nostr Relay is running..."
if ! pgrep -f "c_relay_x86" > /dev/null; then
print_error "C Nostr Relay is not running"
print_error "Please start the relay first with: sudo systemctl start c-relay"
exit 1
fi
if ! netstat -tln | grep -q ":$RELAY_PORT"; then
print_error "Relay is not listening on port $RELAY_PORT"
exit 1
fi
print_success "Relay is running on port $RELAY_PORT"
}
install_nginx() {
print_step "Installing nginx..."
if command -v nginx &> /dev/null; then
print_warning "Nginx is already installed"
else
apt update
apt install -y nginx
systemctl enable nginx
print_success "Nginx installed"
fi
}
install_certbot() {
print_step "Installing certbot for Let's Encrypt..."
if command -v certbot &> /dev/null; then
print_warning "Certbot is already installed"
else
apt install -y certbot python3-certbot-nginx
print_success "Certbot installed"
fi
}
create_nginx_config() {
print_step "Creating nginx configuration..."
# Backup existing default config
if [[ -f "$SITES_ENABLED/default" ]]; then
mv "$SITES_ENABLED/default" "$SITES_ENABLED/default.backup"
print_warning "Backed up default nginx config"
fi
# Create site configuration
cat > "$SITES_AVAILABLE/$DOMAIN" << EOF
# HTTP Server (will be modified by certbot for HTTPS)
server {
listen 80;
server_name $DOMAIN;
# Rate limiting
limit_req_zone \$remote_addr zone=relay:10m rate=10r/s;
limit_req zone=relay burst=20 nodelay;
# Map WebSocket upgrade
map \$http_upgrade \$connection_upgrade {
default upgrade;
'' close;
}
location / {
# Proxy settings
proxy_pass http://127.0.0.1:$RELAY_PORT;
proxy_http_version 1.1;
proxy_cache_bypass \$http_upgrade;
# Headers
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
# WebSocket support
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection \$connection_upgrade;
# Timeouts for WebSocket connections
proxy_read_timeout 86400s;
proxy_send_timeout 86400s;
# Buffer settings
proxy_buffering off;
}
# Health check
location /health {
proxy_pass http://127.0.0.1:$RELAY_PORT/health;
access_log off;
}
}
EOF
# Enable the site
ln -sf "$SITES_AVAILABLE/$DOMAIN" "$SITES_ENABLED/"
print_success "Nginx configuration created for $DOMAIN"
}
test_nginx_config() {
print_step "Testing nginx configuration..."
if nginx -t; then
print_success "Nginx configuration is valid"
else
print_error "Nginx configuration is invalid"
exit 1
fi
}
restart_nginx() {
print_step "Restarting nginx..."
systemctl restart nginx
systemctl enable nginx
if systemctl is-active --quiet nginx; then
print_success "Nginx restarted successfully"
else
print_error "Failed to restart nginx"
exit 1
fi
}
setup_ssl() {
print_step "Setting up SSL certificate with Let's Encrypt..."
# Obtain certificate
if certbot --nginx -d "$DOMAIN" --email "$EMAIL" --agree-tos --non-interactive; then
print_success "SSL certificate obtained and configured"
else
print_error "Failed to obtain SSL certificate"
exit 1
fi
}
setup_auto_renewal() {
print_step "Setting up SSL certificate auto-renewal..."
# Create renewal cron job
cat > /etc/cron.d/certbot-renew << EOF
# Renew Let's Encrypt certificates
0 12 * * * root /usr/bin/certbot renew --quiet && /usr/bin/systemctl reload nginx
EOF
print_success "Auto-renewal configured"
}
configure_firewall() {
print_step "Configuring firewall..."
if command -v ufw &> /dev/null; then
ufw allow 'Nginx Full'
ufw delete allow 'Nginx HTTP' 2>/dev/null || true
print_success "UFW configured for nginx"
elif command -v firewall-cmd &> /dev/null; then
firewall-cmd --permanent --add-service=http
firewall-cmd --permanent --add-service=https
firewall-cmd --reload
print_success "Firewalld configured"
else
print_warning "No recognized firewall found"
print_warning "Please ensure ports 80 and 443 are open"
fi
}
test_setup() {
print_step "Testing the setup..."
sleep 5
# Test HTTP redirect
if curl -s -o /dev/null -w "%{http_code}" "http://$DOMAIN" | grep -q "301\|302"; then
print_success "HTTP to HTTPS redirect working"
else
print_warning "HTTP redirect test failed"
fi
# Test HTTPS
if curl -s -o /dev/null -w "%{http_code}" "https://$DOMAIN" | grep -q "200"; then
print_success "HTTPS connection working"
else
print_warning "HTTPS test failed"
fi
# Test WebSocket (if relay supports it)
if command -v wscat &> /dev/null; then
print_step "Testing WebSocket connection..."
timeout 5 wscat -c "wss://$DOMAIN" --execute "exit" &>/dev/null && \
print_success "WebSocket connection working" || \
print_warning "WebSocket test inconclusive (install wscat for better testing)"
fi
}
show_final_status() {
echo
echo "🎉 SSL Proxy Setup Complete!"
echo
echo "Configuration Summary:"
echo " Domain: $DOMAIN"
echo " SSL: Let's Encrypt"
echo " Backend: 127.0.0.1:$RELAY_PORT"
echo " Config: $SITES_AVAILABLE/$DOMAIN"
echo
echo "Your Nostr relay is now accessible at:"
echo " HTTPS URL: https://$DOMAIN"
echo " WebSocket: wss://$DOMAIN"
echo
echo "Management Commands:"
echo " Test config: sudo nginx -t"
echo " Reload nginx: sudo systemctl reload nginx"
echo " Check SSL: sudo certbot certificates"
echo " Renew SSL: sudo certbot renew"
echo
echo "SSL certificate will auto-renew via cron."
echo
}
# Main execution
main() {
echo
echo "============================================"
echo "🔒 C Nostr Relay - SSL Proxy Setup"
echo "============================================"
echo
parse_args "$@"
check_root
check_relay_running
install_nginx
install_certbot
create_nginx_config
test_nginx_config
restart_nginx
setup_ssl
setup_auto_renewal
configure_firewall
test_setup
show_final_status
print_success "SSL proxy setup completed successfully!"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,282 @@
#!/bin/bash
# C Nostr Relay - Simple VPS Deployment Script
# Deploys the relay with event-based configuration on Ubuntu/Debian VPS
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
RELAY_USER="c-relay"
INSTALL_DIR="/opt/c-relay"
SERVICE_NAME="c-relay"
RELAY_PORT="8888"
# Functions
print_step() {
echo -e "${BLUE}[STEP]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
check_root() {
if [[ $EUID -ne 0 ]]; then
print_error "This script must be run as root (use sudo)"
exit 1
fi
}
detect_os() {
if [[ -f /etc/debian_version ]]; then
OS="debian"
print_success "Detected Debian/Ubuntu system"
elif [[ -f /etc/redhat-release ]]; then
OS="redhat"
print_success "Detected RedHat/CentOS system"
else
print_error "Unsupported operating system"
exit 1
fi
}
install_dependencies() {
print_step "Installing system dependencies..."
if [[ $OS == "debian" ]]; then
apt update
apt install -y build-essential git sqlite3 libsqlite3-dev \
libwebsockets-dev libssl-dev libsecp256k1-dev \
libcurl4-openssl-dev zlib1g-dev systemd curl wget
elif [[ $OS == "redhat" ]]; then
yum groupinstall -y "Development Tools"
yum install -y git sqlite-devel libwebsockets-devel \
openssl-devel libsecp256k1-devel libcurl-devel \
zlib-devel systemd curl wget
fi
print_success "Dependencies installed"
}
create_user() {
print_step "Creating system user for relay..."
if id "$RELAY_USER" &>/dev/null; then
print_warning "User $RELAY_USER already exists"
else
useradd --system --home-dir "$INSTALL_DIR" --shell /bin/false "$RELAY_USER"
print_success "Created user: $RELAY_USER"
fi
}
setup_directories() {
print_step "Setting up directories..."
mkdir -p "$INSTALL_DIR"
chown "$RELAY_USER:$RELAY_USER" "$INSTALL_DIR"
chmod 755 "$INSTALL_DIR"
print_success "Directories configured"
}
build_relay() {
print_step "Building C Nostr Relay..."
# Check if we're in the source directory
if [[ ! -f "Makefile" ]]; then
print_error "Makefile not found. Please run this script from the c-relay source directory."
exit 1
fi
# Clean and build
make clean
make
if [[ ! -f "build/c_relay_x86" ]]; then
print_error "Build failed - binary not found"
exit 1
fi
print_success "Relay built successfully"
}
install_binary() {
print_step "Installing relay binary..."
cp build/c_relay_x86 "$INSTALL_DIR/"
chown "$RELAY_USER:$RELAY_USER" "$INSTALL_DIR/c_relay_x86"
chmod +x "$INSTALL_DIR/c_relay_x86"
print_success "Binary installed to $INSTALL_DIR"
}
install_service() {
print_step "Installing systemd service..."
# Use the existing systemd service file
if [[ -f "systemd/c-relay.service" ]]; then
cp systemd/c-relay.service /etc/systemd/system/
systemctl daemon-reload
print_success "Systemd service installed"
else
print_warning "Systemd service file not found, creating basic one..."
cat > /etc/systemd/system/c-relay.service << EOF
[Unit]
Description=C Nostr Relay
After=network.target
[Service]
Type=simple
User=$RELAY_USER
Group=$RELAY_USER
WorkingDirectory=$INSTALL_DIR
ExecStart=$INSTALL_DIR/c_relay_x86
Restart=always
RestartSec=5
# Security hardening
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=$INSTALL_DIR
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
print_success "Basic systemd service created"
fi
}
configure_firewall() {
print_step "Configuring firewall..."
if command -v ufw &> /dev/null; then
# UFW (Ubuntu)
ufw allow "$RELAY_PORT/tcp" comment "Nostr Relay"
print_success "UFW rule added for port $RELAY_PORT"
elif command -v firewall-cmd &> /dev/null; then
# Firewalld (CentOS/RHEL)
firewall-cmd --permanent --add-port="$RELAY_PORT/tcp"
firewall-cmd --reload
print_success "Firewalld rule added for port $RELAY_PORT"
else
print_warning "No recognized firewall found. Please manually open port $RELAY_PORT"
fi
}
start_service() {
print_step "Starting relay service..."
systemctl enable "$SERVICE_NAME"
systemctl start "$SERVICE_NAME"
sleep 3
if systemctl is-active --quiet "$SERVICE_NAME"; then
print_success "Relay service started and enabled"
else
print_error "Failed to start relay service"
print_error "Check logs with: journalctl -u $SERVICE_NAME --no-pager"
exit 1
fi
}
capture_admin_keys() {
print_step "Capturing admin keys..."
echo
echo "=================================="
echo "🔑 CRITICAL: ADMIN PRIVATE KEY 🔑"
echo "=================================="
echo
print_warning "The admin private key will be shown in the service logs."
print_warning "This key is generated ONCE and is needed for all configuration updates!"
echo
echo "To view the admin key, run:"
echo " sudo journalctl -u $SERVICE_NAME --no-pager | grep -A 5 'Admin Private Key'"
echo
echo "Or check recent logs:"
echo " sudo journalctl -u $SERVICE_NAME --since '5 minutes ago'"
echo
print_error "IMPORTANT: Save this key in a secure location immediately!"
echo
}
show_status() {
print_step "Deployment Status"
echo
echo "🎉 Deployment Complete!"
echo
echo "Service Status:"
systemctl status "$SERVICE_NAME" --no-pager -l
echo
echo "Quick Commands:"
echo " Check status: sudo systemctl status $SERVICE_NAME"
echo " View logs: sudo journalctl -u $SERVICE_NAME -f"
echo " Restart: sudo systemctl restart $SERVICE_NAME"
echo " Stop: sudo systemctl stop $SERVICE_NAME"
echo
echo "Relay Information:"
echo " Port: $RELAY_PORT"
echo " Directory: $INSTALL_DIR"
echo " User: $RELAY_USER"
echo " Database: Auto-generated in $INSTALL_DIR"
echo
echo "Next Steps:"
echo "1. Get your admin private key from the logs (see above)"
echo "2. Configure your relay using the event-based system"
echo "3. Set up SSL/TLS with a reverse proxy (nginx/apache)"
echo "4. Configure monitoring and backups"
echo
echo "Documentation:"
echo " User Guide: docs/user_guide.md"
echo " Config Guide: docs/configuration_guide.md"
echo " Deployment: docs/deployment_guide.md"
echo
}
# Main deployment flow
main() {
echo
echo "=========================================="
echo "🚀 C Nostr Relay - Simple VPS Deployment"
echo "=========================================="
echo
check_root
detect_os
install_dependencies
create_user
setup_directories
build_relay
install_binary
install_service
configure_firewall
start_service
capture_admin_keys
show_status
print_success "Deployment completed successfully!"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,150 @@
# MUSL-based fully static C-Relay builder
# Produces portable binaries with zero runtime dependencies
FROM alpine:latest AS builder
# Add alternative mirrors and install build dependencies with retry
RUN echo "http://dl-cdn.alpinelinux.org/alpine/v3.22/main" > /etc/apk/repositories && \
echo "http://dl-cdn.alpinelinux.org/alpine/v3.22/community" >> /etc/apk/repositories && \
echo "http://mirror.leaseweb.com/alpine/v3.22/main" >> /etc/apk/repositories && \
echo "http://mirror.leaseweb.com/alpine/v3.22/community" >> /etc/apk/repositories && \
apk update --no-cache || (sleep 5 && apk update --no-cache) || (sleep 10 && apk update --no-cache)
# Install build dependencies with retry logic
RUN apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers || \
(sleep 10 && apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers)
# Set working directory
WORKDIR /build
# Build zlib static (if needed)
RUN if [ ! -f /usr/lib/libz.a ]; then \
cd /tmp && \
wget https://zlib.net/zlib-1.3.1.tar.gz && \
tar xzf zlib-1.3.1.tar.gz && \
cd zlib-1.3.1 && \
./configure --static --prefix=/usr && \
make && make install; \
fi
# Build OpenSSL static
RUN cd /tmp && \
wget https://www.openssl.org/source/openssl-3.0.13.tar.gz && \
tar xzf openssl-3.0.13.tar.gz && \
cd openssl-3.0.13 && \
./Configure linux-x86_64 no-shared --prefix=/usr && \
make && make install_sw
# Build SQLite with JSON1 extension enabled
RUN cd /tmp && \
wget https://www.sqlite.org/2024/sqlite-autoconf-3460000.tar.gz && \
tar xzf sqlite-autoconf-3460000.tar.gz && \
cd sqlite-autoconf-3460000 && \
./configure \
--enable-static \
--disable-shared \
--enable-json1 \
--enable-fts5 \
--prefix=/usr \
CFLAGS="-DSQLITE_ENABLE_JSON1=1 -DSQLITE_ENABLE_FTS5=1" && \
make && make install
# Build libsecp256k1 static
RUN cd /tmp && \
git clone https://github.com/bitcoin-core/secp256k1.git && \
cd secp256k1 && \
./autogen.sh && \
./configure --enable-static --disable-shared --prefix=/usr && \
make && make install
# Build libwebsockets static with OpenSSL
RUN cd /tmp && \
git clone https://github.com/warmcat/libwebsockets.git && \
cd libwebsockets && \
mkdir build && cd build && \
cmake .. \
-DLWS_WITH_STATIC=ON \
-DLWS_WITH_SHARED=OFF \
-DLWS_WITH_SSL=ON \
-DLWS_OPENSSL_LIBRARIES="/usr/lib/libssl.a;/usr/lib/libcrypto.a" \
-DLWS_OPENSSL_INCLUDE_DIRS="/usr/include" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr && \
make && make install
# Build curl static (minimal features)
RUN cd /tmp && \
wget https://curl.se/download/curl-8.6.0.tar.gz && \
tar xzf curl-8.6.0.tar.gz && \
cd curl-8.6.0 && \
./configure \
--disable-shared \
--enable-static \
--disable-ldap \
--without-libidn2 \
--without-brotli \
--without-zstd \
--without-rtmp \
--without-libpsl \
--without-krb5 \
--with-openssl \
--prefix=/usr && \
make && make install
# Copy c-relay source
COPY . /build/
# Initialize submodules
RUN git submodule update --init --recursive
# Build nostr_core_lib
RUN cd nostr_core_lib && ./build.sh
# Build c-relay static
RUN make clean && \
CC="musl-gcc -static" \
CFLAGS="-O2 -Wall -Wextra -std=c99 -g" \
LDFLAGS="-static -Wl,--whole-archive -lpthread -Wl,--no-whole-archive" \
LIBS="-lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -lsecp256k1 -lssl -lcrypto -lcurl" \
make
# Strip binary for size
RUN strip build/c_relay_x86
# Multi-stage build to produce minimal output
FROM scratch AS output
COPY --from=builder /build/build/c_relay_x86 /c_relay_static_musl_x86_64

570
increment_and_push.sh Executable file
View File

@@ -0,0 +1,570 @@
#!/bin/bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
print_status() { echo -e "${BLUE}[INFO]${NC} $1" >&2; }
print_success() { echo -e "${GREEN}[SUCCESS]${NC} $1" >&2; }
print_warning() { echo -e "${YELLOW}[WARNING]${NC} $1" >&2; }
print_error() { echo -e "${RED}[ERROR]${NC} $1" >&2; }
# Global variables
COMMIT_MESSAGE=""
RELEASE_MODE=false
VERSION_INCREMENT_TYPE="patch" # "patch", "minor", or "major"
show_usage() {
echo "C-Relay Increment and Push Script"
echo ""
echo "USAGE:"
echo " $0 [OPTIONS] \"commit message\""
echo ""
echo "COMMANDS:"
echo " $0 \"commit message\" Default: increment patch, commit & push"
echo " $0 -p \"commit message\" Increment patch version"
echo " $0 -m \"commit message\" Increment minor version"
echo " $0 -M \"commit message\" Increment major version"
echo " $0 -r \"commit message\" Create release with assets (no version increment)"
echo " $0 -r -m \"commit message\" Create release with minor version increment"
echo " $0 -h Show this help message"
echo ""
echo "OPTIONS:"
echo " -p, --patch Increment patch version (default)"
echo " -m, --minor Increment minor version"
echo " -M, --major Increment major version"
echo " -r, --release Create release with assets"
echo " -h, --help Show this help message"
echo ""
echo "EXAMPLES:"
echo " $0 \"Fixed event validation bug\""
echo " $0 -m \"Added new features\""
echo " $0 -M \"Breaking API changes\""
echo " $0 -r \"Release current version\""
echo " $0 -r -m \"Release with minor increment\""
echo ""
echo "VERSION INCREMENT MODES:"
echo " -p, --patch (default): Increment patch version (v1.2.3 → v1.2.4)"
echo " -m, --minor: Increment minor version, zero patch (v1.2.3 → v1.3.0)"
echo " -M, --major: Increment major version, zero minor+patch (v1.2.3 → v2.0.0)"
echo ""
echo "RELEASE MODE (-r flag):"
echo " - Build static binary using build_static.sh"
echo " - Create source tarball"
echo " - Git add, commit, push, and create Gitea release with assets"
echo " - Can be combined with version increment flags"
echo ""
echo "REQUIREMENTS FOR RELEASE MODE:"
echo " - Gitea token in ~/.gitea_token for release uploads"
echo " - Docker installed for static binary builds"
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-r|--release)
RELEASE_MODE=true
shift
;;
-p|--patch)
VERSION_INCREMENT_TYPE="patch"
shift
;;
-m|--minor)
VERSION_INCREMENT_TYPE="minor"
shift
;;
-M|--major)
VERSION_INCREMENT_TYPE="major"
shift
;;
-h|--help)
show_usage
exit 0
;;
*)
# First non-flag argument is the commit message
if [[ -z "$COMMIT_MESSAGE" ]]; then
COMMIT_MESSAGE="$1"
fi
shift
;;
esac
done
# Validate inputs
if [[ -z "$COMMIT_MESSAGE" ]]; then
print_error "Commit message is required"
echo ""
show_usage
exit 1
fi
# Check if we're in a git repository
check_git_repo() {
if ! git rev-parse --git-dir > /dev/null 2>&1; then
print_error "Not in a git repository"
exit 1
fi
}
# Function to get current version and increment appropriately
increment_version() {
local increment_type="$1" # "patch", "minor", or "major"
print_status "Getting current version..."
# Get the highest version tag (not chronologically latest)
LATEST_TAG=$(git tag -l 'v*.*.*' | sort -V | tail -n 1 || echo "")
if [[ -z "$LATEST_TAG" ]]; then
LATEST_TAG="v0.0.0"
print_warning "No version tags found, starting from $LATEST_TAG"
fi
# Extract version components (remove 'v' prefix)
VERSION=${LATEST_TAG#v}
# Parse major.minor.patch using regex
if [[ $VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
PATCH=${BASH_REMATCH[3]}
else
print_error "Invalid version format in tag: $LATEST_TAG"
print_error "Expected format: v0.1.0"
exit 1
fi
# Increment version based on type
if [[ "$increment_type" == "major" ]]; then
# Major release: increment major, zero minor and patch
NEW_MAJOR=$((MAJOR + 1))
NEW_MINOR=0
NEW_PATCH=0
NEW_VERSION="v${NEW_MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Major version increment: incrementing major version"
elif [[ "$increment_type" == "minor" ]]; then
# Minor release: increment minor, zero patch
NEW_MAJOR=$MAJOR
NEW_MINOR=$((MINOR + 1))
NEW_PATCH=0
NEW_VERSION="v${NEW_MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Minor version increment: incrementing minor version"
else
# Default: increment patch
NEW_MAJOR=$MAJOR
NEW_MINOR=$MINOR
NEW_PATCH=$((PATCH + 1))
NEW_VERSION="v${NEW_MAJOR}.${NEW_MINOR}.${NEW_PATCH}"
print_status "Patch version increment: incrementing patch version"
fi
print_status "Current version: $LATEST_TAG"
print_status "New version: $NEW_VERSION"
# Update version in src/main.h
update_version_in_header "$NEW_VERSION" "$NEW_MAJOR" "$NEW_MINOR" "$NEW_PATCH"
# Export for use in other functions
export NEW_VERSION
}
# Function to update version macros in src/main.h
update_version_in_header() {
local new_version="$1"
local major="$2"
local minor="$3"
local patch="$4"
print_status "Updating version in src/main.h..."
# Check if src/main.h exists
if [[ ! -f "src/main.h" ]]; then
print_error "src/main.h not found"
exit 1
fi
# Update VERSION macro
sed -i "s/#define VERSION \".*\"/#define VERSION \"$new_version\"/" src/main.h
# Update VERSION_MAJOR macro
sed -i "s/#define VERSION_MAJOR [0-9]\+/#define VERSION_MAJOR $major/" src/main.h
# Update VERSION_MINOR macro
sed -i "s/#define VERSION_MINOR .*/#define VERSION_MINOR $minor/" src/main.h
# Update VERSION_PATCH macro
sed -i "s/#define VERSION_PATCH [0-9]\+/#define VERSION_PATCH $patch/" src/main.h
print_success "Updated version in src/main.h to $new_version"
}
# Function to commit and push changes
git_commit_and_push() {
print_status "Preparing git commit..."
# Stage all changes
if git add . > /dev/null 2>&1; then
print_success "Staged all changes"
else
print_error "Failed to stage changes"
exit 1
fi
# Check if there are changes to commit
if git diff --staged --quiet; then
print_warning "No changes to commit"
else
# Commit changes
if git commit -m "$NEW_VERSION - $COMMIT_MESSAGE" > /dev/null 2>&1; then
print_success "Committed changes"
else
print_error "Failed to commit changes"
exit 1
fi
fi
# Create new git tag
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Created tag: $NEW_VERSION"
else
print_warning "Tag $NEW_VERSION already exists"
fi
# Push changes and tags
print_status "Pushing to remote repository..."
if git push > /dev/null 2>&1; then
print_success "Pushed changes"
else
print_error "Failed to push changes"
exit 1
fi
# Push only the new tag to avoid conflicts with existing tags
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Pushed tag: $NEW_VERSION"
else
print_warning "Tag push failed, trying force push..."
if git push --force origin "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Force-pushed updated tag: $NEW_VERSION"
else
print_error "Failed to push tag: $NEW_VERSION"
exit 1
fi
fi
}
# Function to commit and push changes without creating a tag (tag already created)
git_commit_and_push_no_tag() {
print_status "Preparing git commit..."
# Stage all changes
if git add . > /dev/null 2>&1; then
print_success "Staged all changes"
else
print_error "Failed to stage changes"
exit 1
fi
# Check if there are changes to commit
if git diff --staged --quiet; then
print_warning "No changes to commit"
else
# Commit changes
if git commit -m "$NEW_VERSION - $COMMIT_MESSAGE" > /dev/null 2>&1; then
print_success "Committed changes"
else
print_error "Failed to commit changes"
exit 1
fi
fi
# Push changes and tags
print_status "Pushing to remote repository..."
if git push > /dev/null 2>&1; then
print_success "Pushed changes"
else
print_error "Failed to push changes"
exit 1
fi
# Push only the new tag to avoid conflicts with existing tags
if git push origin "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Pushed tag: $NEW_VERSION"
else
print_warning "Tag push failed, trying force push..."
if git push --force origin "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Force-pushed updated tag: $NEW_VERSION"
else
print_error "Failed to push tag: $NEW_VERSION"
exit 1
fi
fi
}
# Function to build release binary
build_release_binary() {
print_status "Building release binary..."
# Check if build_static.sh exists
if [[ ! -f "build_static.sh" ]]; then
print_error "build_static.sh not found"
return 1
fi
# Run the static build script
if ./build_static.sh > /dev/null 2>&1; then
print_success "Built static binary successfully"
return 0
else
print_error "Failed to build static binary"
return 1
fi
}
# Function to create source tarball
create_source_tarball() {
print_status "Creating source tarball..."
local tarball_name="c-relay-${NEW_VERSION#v}.tar.gz"
# Create tarball excluding build artifacts and git files
if tar -czf "$tarball_name" \
--exclude='build/*' \
--exclude='.git*' \
--exclude='*.db' \
--exclude='*.db-*' \
--exclude='*.log' \
--exclude='*.tar.gz' \
. > /dev/null 2>&1; then
print_success "Created source tarball: $tarball_name"
echo "$tarball_name"
return 0
else
print_error "Failed to create source tarball"
return 1
fi
}
# Function to upload release assets to Gitea
upload_release_assets() {
local release_id="$1"
local binary_path="$2"
local tarball_path="$3"
print_status "Uploading release assets..."
# Check for Gitea token
if [[ ! -f "$HOME/.gitea_token" ]]; then
print_warning "No ~/.gitea_token found. Skipping asset uploads."
return 0
fi
local token=$(cat "$HOME/.gitea_token" | tr -d '\n\r')
local api_url="https://git.laantungir.net/api/v1/repos/laantungir/c-relay"
local assets_url="$api_url/releases/$release_id/assets"
print_status "Assets URL: $assets_url"
# Upload binary
if [[ -f "$binary_path" ]]; then
print_status "Uploading binary: $(basename "$binary_path")"
# Retry loop for eventual consistency
local max_attempts=3
local attempt=1
while [[ $attempt -le $max_attempts ]]; do
print_status "Upload attempt $attempt/$max_attempts"
local binary_response=$(curl -fS -X POST "$assets_url" \
-H "Authorization: token $token" \
-F "attachment=@$binary_path;filename=$(basename "$binary_path")" \
-F "name=$(basename "$binary_path")")
if echo "$binary_response" | grep -q '"id"'; then
print_success "Uploaded binary successfully"
break
else
print_warning "Upload attempt $attempt failed"
if [[ $attempt -lt $max_attempts ]]; then
print_status "Retrying in 2 seconds..."
sleep 2
else
print_error "Failed to upload binary after $max_attempts attempts"
print_error "Response: $binary_response"
fi
fi
((attempt++))
done
fi
# Upload source tarball
if [[ -f "$tarball_path" ]]; then
print_status "Uploading source tarball: $(basename "$tarball_path")"
local tarball_response=$(curl -s -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@$tarball_path;filename=$(basename "$tarball_path")")
if echo "$tarball_response" | grep -q '"id"'; then
print_success "Uploaded source tarball successfully"
else
print_warning "Failed to upload source tarball: $tarball_response"
fi
fi
}
# Function to create Gitea release
create_gitea_release() {
print_status "Creating Gitea release..."
# Check for Gitea token
if [[ ! -f "$HOME/.gitea_token" ]]; then
print_warning "No ~/.gitea_token found. Skipping release creation."
print_warning "Create ~/.gitea_token with your Gitea access token to enable releases."
return 0
fi
local token=$(cat "$HOME/.gitea_token" | tr -d '\n\r')
local api_url="https://git.laantungir.net/api/v1/repos/laantungir/c-relay"
# Create release
print_status "Creating release $NEW_VERSION..."
local response=$(curl -s -X POST "$api_url/releases" \
-H "Authorization: token $token" \
-H "Content-Type: application/json" \
-d "{\"tag_name\": \"$NEW_VERSION\", \"name\": \"$NEW_VERSION\", \"body\": \"$COMMIT_MESSAGE\"}")
if echo "$response" | grep -q '"id"'; then
print_success "Created release $NEW_VERSION"
# Extract release ID for asset uploads
local release_id=$(echo "$response" | grep -o '"id":[0-9]*' | head -1 | cut -d':' -f2)
echo $release_id
elif echo "$response" | grep -q "already exists"; then
print_warning "Release $NEW_VERSION already exists"
# Try to get existing release ID
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
if echo "$check_response" | grep -q '"id"'; then
local release_id=$(echo "$check_response" | grep -o '"id":[0-9]*' | head -1 | cut -d':' -f2)
print_status "Using existing release ID: $release_id"
echo $release_id
else
print_error "Could not find existing release ID"
return 1
fi
else
print_error "Failed to create release $NEW_VERSION"
print_error "Response: $response"
# Try to check if the release exists anyway
print_status "Checking if release exists..."
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
if echo "$check_response" | grep -q '"id"'; then
print_warning "Release exists but creation response was unexpected"
local release_id=$(echo "$check_response" | grep -o '"id":[0-9]*' | head -1 | cut -d':' -f2)
echo $release_id
else
print_error "Release does not exist and creation failed"
return 1
fi
fi
}
# Main execution
main() {
print_status "C-Relay Increment and Push Script"
# Check prerequisites
check_git_repo
if [[ "$RELEASE_MODE" == true ]]; then
print_status "=== RELEASE MODE ==="
# Only increment version if explicitly requested (not just because of -r flag)
if [[ "$VERSION_INCREMENT_TYPE" != "patch" ]]; then
increment_version "$VERSION_INCREMENT_TYPE"
else
# In release mode without version increment, get current version
LATEST_TAG=$(git tag -l 'v*.*.*' | sort -V | tail -n 1 || echo "v0.0.0")
NEW_VERSION="$LATEST_TAG"
export NEW_VERSION
fi
# Create new git tag BEFORE compilation so version.h picks it up
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Created tag: $NEW_VERSION"
else
print_warning "Tag $NEW_VERSION already exists, removing and recreating..."
git tag -d "$NEW_VERSION" > /dev/null 2>&1
git tag "$NEW_VERSION" > /dev/null 2>&1
fi
# Commit and push (but skip tag creation since we already did it)
git_commit_and_push_no_tag
# Build release binary
if build_release_binary; then
local binary_path="build/c_relay_static_x86_64"
else
print_warning "Binary build failed, continuing with release creation"
# Check if binary exists from previous build
if [[ -f "build/c_relay_static_x86_64" ]]; then
print_status "Using existing binary from previous build"
binary_path="build/c_relay_static_x86_64"
else
binary_path=""
fi
fi
# Create source tarball
local tarball_path=""
if tarball_path=$(create_source_tarball); then
: # tarball_path is set by the function
else
print_warning "Source tarball creation failed, continuing with release creation"
fi
# Create Gitea release
local release_id=""
if release_id=$(create_gitea_release); then
# Validate release_id is numeric
if [[ "$release_id" =~ ^[0-9]+$ ]]; then
# Upload assets if we have a release ID and assets
if [[ -n "$release_id" && (-n "$binary_path" || -n "$tarball_path") ]]; then
upload_release_assets "$release_id" "$binary_path" "$tarball_path"
fi
print_success "Release $NEW_VERSION completed successfully!"
else
print_error "Invalid release_id: $release_id"
exit 1
fi
else
print_error "Release creation failed"
fi
else
print_status "=== DEFAULT MODE ==="
# Increment version based on type (default to patch)
increment_version "$VERSION_INCREMENT_TYPE"
# Create new git tag BEFORE compilation so version.h picks it up
if git tag "$NEW_VERSION" > /dev/null 2>&1; then
print_success "Created tag: $NEW_VERSION"
else
print_warning "Tag $NEW_VERSION already exists, removing and recreating..."
git tag -d "$NEW_VERSION" > /dev/null 2>&1
git tag "$NEW_VERSION" > /dev/null 2>&1
fi
# Commit and push (but skip tag creation since we already did it)
git_commit_and_push_no_tag
print_success "Increment and push completed successfully!"
print_status "Version $NEW_VERSION pushed to repository"
fi
}
# Execute main function
main

View File

@@ -5,9 +5,214 @@
echo "=== C Nostr Relay Build and Restart Script ==="
# Build the project first
echo "Building project..."
make clean all
# Parse command line arguments
PRESERVE_DATABASE=false
HELP=false
USE_TEST_KEYS=false
ADMIN_KEY=""
RELAY_KEY=""
PORT_OVERRIDE=""
DEBUG_LEVEL="5"
# Key validation function
validate_hex_key() {
local key="$1"
local key_type="$2"
if [ ${#key} -ne 64 ]; then
echo "ERROR: $key_type key must be exactly 64 characters"
return 1
fi
if ! [[ "$key" =~ ^[0-9a-fA-F]{64}$ ]]; then
echo "ERROR: $key_type key must contain only hex characters (0-9, a-f, A-F)"
return 1
fi
return 0
}
while [[ $# -gt 0 ]]; do
case $1 in
-a|--admin-key)
if [ -z "$2" ]; then
echo "ERROR: Admin key option requires a value"
HELP=true
shift
else
ADMIN_KEY="$2"
shift 2
fi
;;
-r|--relay-key)
if [ -z "$2" ]; then
echo "ERROR: Relay key option requires a value"
HELP=true
shift
else
RELAY_KEY="$2"
shift 2
fi
;;
-p|--port)
if [ -z "$2" ]; then
echo "ERROR: Port option requires a value"
HELP=true
shift
else
PORT_OVERRIDE="$2"
shift 2
fi
;;
-d|--preserve-database)
PRESERVE_DATABASE=true
shift
;;
--test-keys|-t)
USE_TEST_KEYS=true
shift
;;
--debug-level=*)
DEBUG_LEVEL="${1#*=}"
shift
;;
-d=*)
DEBUG_LEVEL="${1#*=}"
shift
;;
--debug-level)
if [ -z "$2" ]; then
echo "ERROR: Debug level option requires a value"
HELP=true
shift
else
DEBUG_LEVEL="$2"
shift 2
fi
;;
-d)
if [ -z "$2" ]; then
echo "ERROR: Debug level option requires a value"
HELP=true
shift
else
DEBUG_LEVEL="$2"
shift 2
fi
;;
--help|-h)
HELP=true
shift
;;
*)
echo "Unknown option: $1"
HELP=true
shift
;;
esac
done
# Validate custom keys if provided
if [ -n "$ADMIN_KEY" ]; then
if ! validate_hex_key "$ADMIN_KEY" "Admin"; then
exit 1
fi
fi
if [ -n "$RELAY_KEY" ]; then
if ! validate_hex_key "$RELAY_KEY" "Relay"; then
exit 1
fi
fi
# Validate port if provided
if [ -n "$PORT_OVERRIDE" ]; then
if ! [[ "$PORT_OVERRIDE" =~ ^[0-9]+$ ]] || [ "$PORT_OVERRIDE" -lt 1 ] || [ "$PORT_OVERRIDE" -gt 65535 ]; then
echo "ERROR: Port must be a number between 1 and 65535"
exit 1
fi
fi
# Validate strict port flag (only makes sense with port override)
if [ "$USE_TEST_KEYS" = true ] && [ -z "$PORT_OVERRIDE" ]; then
echo "WARNING: --strict-port is always used with test keys. Consider specifying a custom port with -p."
fi
# Validate debug level if provided
if [ -n "$DEBUG_LEVEL" ]; then
if ! [[ "$DEBUG_LEVEL" =~ ^[0-5]$ ]]; then
echo "ERROR: Debug level must be 0-5, got: $DEBUG_LEVEL"
exit 1
fi
fi
# Show help
if [ "$HELP" = true ]; then
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Options:"
echo " -a, --admin-key <hex> 64-character hex admin private key"
echo " -r, --relay-key <hex> 64-character hex relay private key"
echo " -p, --port <port> Custom port override (default: 8888)"
echo " -d, --debug-level <0-5> Set debug level: 0=none, 1=errors, 2=warnings, 3=info, 4=debug, 5=trace"
echo " --preserve-database Keep existing database files (don't delete for fresh start)"
echo " --test-keys, -t Use deterministic test keys for development (admin: all 'a's, relay: all '1's)"
echo " --help, -h Show this help message"
echo ""
echo "Event-Based Configuration:"
echo " This relay now uses event-based configuration stored directly in the database."
echo " On first startup, keys are automatically generated and printed once."
echo " Database file: <relay_pubkey>.db (created automatically)"
echo ""
echo "Examples:"
echo " $0 # Fresh start with random keys"
echo " $0 -a <admin-hex> -r <relay-hex> # Use custom keys"
echo " $0 -a <admin-hex> -p 9000 # Custom admin key on port 9000"
echo " $0 -p 7777 --strict-port # Fail if port 7777 unavailable (no fallback)"
echo " $0 -p 8080 --strict-port -d=3 # Custom port with strict binding and debug"
echo " $0 --debug-level=3 # Start with debug level 3 (info)"
echo " $0 -d=5 # Start with debug level 5 (trace)"
echo " $0 --preserve-database # Preserve existing database and keys"
echo " $0 --test-keys # Use test keys for consistent development"
echo " $0 -t --preserve-database # Use test keys and preserve database"
echo ""
echo "Key Format: Keys must be exactly 64 hexadecimal characters (0-9, a-f, A-F)"
echo "Default behavior: Deletes existing database files to start fresh with new keys"
echo " for development purposes"
exit 0
fi
# Handle database file cleanup for fresh start
if [ "$PRESERVE_DATABASE" = false ]; then
if ls *.db* >/dev/null 2>&1 || ls build/*.db* >/dev/null 2>&1; then
echo "Removing existing database files (including WAL/SHM) to trigger fresh key generation..."
rm -f *.db* build/*.db*
echo "✓ Database files removed - will generate new keys and database"
else
echo "No existing database found - will generate fresh setup"
fi
else
echo "Preserving existing database files (build process does not touch database files)"
fi
# Clean up legacy files that are no longer used
rm -rf dev-config/ 2>/dev/null
rm -f db/c_nostr_relay.db* 2>/dev/null
# Embed web files into C headers before building
echo "Embedding web files..."
./embed_web_files.sh
# Build the project - ONLY static build
echo "Building project (static binary with SQLite JSON1 extension)..."
./build_static.sh
# Exit if static build fails - no fallback
if [ $? -ne 0 ]; then
echo "ERROR: Static build failed. Cannot proceed without static binary."
echo "Please fix the build errors and try again."
exit 1
fi
# Check if build was successful
if [ $? -ne 0 ]; then
@@ -15,64 +220,135 @@ if [ $? -ne 0 ]; then
exit 1
fi
# Check if relay binary exists after build - detect architecture
# Check if static relay binary exists after build - ONLY use static binary
ARCH=$(uname -m)
case "$ARCH" in
x86_64)
BINARY_PATH="./build/c_relay_x86"
BINARY_PATH="./build/c_relay_static_x86_64"
;;
aarch64|arm64)
BINARY_PATH="./build/c_relay_arm64"
BINARY_PATH="./build/c_relay_static_arm64"
;;
*)
BINARY_PATH="./build/c_relay_$ARCH"
BINARY_PATH="./build/c_relay_static_$ARCH"
;;
esac
# Verify static binary exists - no fallbacks
if [ ! -f "$BINARY_PATH" ]; then
echo "ERROR: Relay binary not found at $BINARY_PATH after build. Build may have failed."
echo "ERROR: Static relay binary not found: $BINARY_PATH"
echo ""
echo "The relay requires the static binary with JSON1 support."
echo "Please run: ./build_static.sh"
echo ""
exit 1
fi
echo "Using static binary: $BINARY_PATH"
echo "Build successful. Proceeding with relay restart..."
# Kill existing relay if running
# Kill existing relay if running - start aggressive immediately
echo "Stopping any existing relay servers..."
pkill -f "c_relay_" 2>/dev/null
sleep 2 # Give time for shutdown
# Check if port is still bound
if lsof -i :8888 >/dev/null 2>&1; then
echo "Port 8888 still in use, force killing..."
fuser -k 8888/tcp 2>/dev/null || echo "No process on port 8888"
# Get all relay processes and kill them immediately with -9
RELAY_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$RELAY_PIDS" ]; then
echo "Force killing relay processes immediately: $RELAY_PIDS"
kill -9 $RELAY_PIDS 2>/dev/null
else
echo "No existing relay processes found"
fi
# Get any remaining processes
REMAINING_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$REMAINING_PIDS" ]; then
echo "Force killing remaining processes: $REMAINING_PIDS"
kill -9 $REMAINING_PIDS 2>/dev/null
# Ensure port 8888 is completely free with retry loop
echo "Ensuring port 8888 is available..."
for attempt in {1..15}; do
if ! lsof -i :8888 >/dev/null 2>&1; then
echo "Port 8888 is now free"
break
fi
echo "Attempt $attempt: Port 8888 still in use, force killing..."
# Kill anything using port 8888
fuser -k 8888/tcp 2>/dev/null || true
# Double-check for any remaining relay processes
REMAINING_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$REMAINING_PIDS" ]; then
echo "Killing remaining relay processes: $REMAINING_PIDS"
kill -9 $REMAINING_PIDS 2>/dev/null || true
fi
sleep 2
if [ $attempt -eq 15 ]; then
echo "ERROR: Could not free port 8888 after 15 attempts"
echo "Current processes using port:"
lsof -i :8888 2>/dev/null || echo "No process details available"
echo "You may need to manually kill processes or reboot"
exit 1
fi
done
# Final safety check - ensure no relay processes remain
FINAL_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$FINAL_PIDS" ]; then
echo "Final cleanup: killing processes $FINAL_PIDS"
kill -9 $FINAL_PIDS 2>/dev/null || true
sleep 1
else
echo "No existing relay found"
fi
# Clean up PID file
rm -f relay.pid
# Initialize database if needed
if [ ! -f "./db/c_nostr_relay.db" ]; then
echo "Initializing database..."
./db/init.sh --force >/dev/null 2>&1
fi
# Database initialization is now handled automatically by the relay
# with event-based configuration system
echo "Database will be initialized automatically on startup if needed"
# Start relay in background with output redirection
echo "Starting relay server..."
echo "Debug: Current processes: $(ps aux | grep 'c_relay_' | grep -v grep || echo 'None')"
# Build command line arguments for relay binary
RELAY_ARGS=""
if [ -n "$ADMIN_KEY" ]; then
RELAY_ARGS="$RELAY_ARGS -a $ADMIN_KEY"
echo "Using custom admin key: ${ADMIN_KEY:0:16}..."
fi
if [ -n "$RELAY_KEY" ]; then
RELAY_ARGS="$RELAY_ARGS -r $RELAY_KEY"
echo "Using custom relay key: ${RELAY_KEY:0:16}..."
fi
if [ -n "$PORT_OVERRIDE" ]; then
RELAY_ARGS="$RELAY_ARGS -p $PORT_OVERRIDE"
echo "Using custom port: $PORT_OVERRIDE"
fi
if [ -n "$DEBUG_LEVEL" ]; then
RELAY_ARGS="$RELAY_ARGS --debug-level=$DEBUG_LEVEL"
echo "Using debug level: $DEBUG_LEVEL"
fi
# Change to build directory before starting relay so database files are created there
cd build
# Start relay in background and capture its PID
$BINARY_PATH > relay.log 2>&1 &
if [ "$USE_TEST_KEYS" = true ]; then
echo "Using deterministic test keys for development..."
./$(basename $BINARY_PATH) -a 6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3 -r 1111111111111111111111111111111111111111111111111111111111111111 --debug-level=$DEBUG_LEVEL --strict-port > ../relay.log 2>&1 &
elif [ -n "$RELAY_ARGS" ]; then
echo "Starting relay with custom configuration..."
./$(basename $BINARY_PATH) $RELAY_ARGS --debug-level=$DEBUG_LEVEL --strict-port > ../relay.log 2>&1 &
else
# No command line arguments needed for random key generation
echo "Starting relay with random key generation..."
./$(basename $BINARY_PATH) --debug-level=$DEBUG_LEVEL --strict-port > ../relay.log 2>&1 &
fi
RELAY_PID=$!
# Change back to original directory
cd ..
echo "Started with PID: $RELAY_PID"
@@ -83,18 +359,61 @@ sleep 3
if ps -p "$RELAY_PID" >/dev/null 2>&1; then
echo "Relay started successfully!"
echo "PID: $RELAY_PID"
echo "WebSocket endpoint: ws://127.0.0.1:8888"
# Wait for relay to fully initialize and detect the actual port it's using
sleep 2
# Extract actual port from relay logs
ACTUAL_PORT=""
if [ -f relay.log ]; then
# Look for the success message with actual port
ACTUAL_PORT=$(grep "WebSocket relay started on ws://127.0.0.1:" relay.log 2>/dev/null | tail -1 | sed -n 's/.*ws:\/\/127\.0\.0\.1:\([0-9]*\).*/\1/p')
# If we couldn't find the port in logs, try to detect from netstat
if [ -z "$ACTUAL_PORT" ]; then
ACTUAL_PORT=$(netstat -tln 2>/dev/null | grep -E ":888[0-9]" | head -1 | sed -n 's/.*:\([0-9]*\).*/\1/p')
fi
fi
# Display the actual endpoint
if [ -n "$ACTUAL_PORT" ]; then
if [ "$ACTUAL_PORT" = "8888" ]; then
echo "WebSocket endpoint: ws://127.0.0.1:$ACTUAL_PORT"
else
echo "WebSocket endpoint: ws://127.0.0.1:$ACTUAL_PORT (fell back from port 8888)"
fi
else
echo "WebSocket endpoint: ws://127.0.0.1:8888 (port detection failed - check logs)"
fi
echo "HTTP endpoint: http://127.0.0.1:${ACTUAL_PORT:-8888}"
echo "Log file: relay.log"
echo ""
# Save PID for debugging
echo $RELAY_PID > relay.pid
echo "=== Relay server running in background ==="
# Check if new keys were generated and display them
sleep 1 # Give relay time to write initial logs
if grep -q "IMPORTANT: SAVE THIS ADMIN PRIVATE KEY SECURELY!" relay.log 2>/dev/null; then
echo "=== IMPORTANT: NEW ADMIN PRIVATE KEY GENERATED ==="
echo ""
# Extract and display the admin private key section from the log
grep -A 15 -B 2 "IMPORTANT: SAVE THIS ADMIN PRIVATE KEY SECURELY!" relay.log | head -n 20
echo ""
echo "⚠️ SAVE THIS ADMIN PRIVATE KEY SECURELY - IT CONTROLS YOUR RELAY CONFIGURATION!"
echo "⚠️ This key is needed to update configuration and is only displayed once"
echo "⚠️ The relay and database information is also logged in relay.log for reference"
echo ""
fi
echo "=== Event-Based Relay Server Running ==="
echo "Configuration: Event-based (kind 33334 Nostr events)"
echo "Database: Automatically created with relay pubkey naming"
echo "To kill relay: pkill -f 'c_relay_'"
echo "To check status: ps aux | grep c_relay_"
echo "To view logs: tail -f relay.log"
echo "Binary: $BINARY_PATH"
echo "Binary: $BINARY_PATH (zero configuration needed)"
echo "Ready for Nostr client connections!"
else
echo "ERROR: Relay failed to start"

3
nip_11_curl.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
curl -H "Accept: application/nostr+json" http://localhost:8888/

1
nips

Submodule nips deleted from 8c45ff5d96

153
node_modules/.package-lock.json generated vendored Normal file
View File

@@ -0,0 +1,153 @@
{
"name": "c-relay",
"lockfileVersion": 3,
"requires": true,
"packages": {
"node_modules/@noble/ciphers": {
"version": "0.5.3",
"resolved": "https://registry.npmjs.org/@noble/ciphers/-/ciphers-0.5.3.tgz",
"integrity": "sha512-B0+6IIHiqEs3BPMT0hcRmHvEj2QHOLu+uwt+tqDDeVd0oyVzh7BPrDcPjRnV1PV/5LaknXJJQvOuRGR0zQJz+w==",
"license": "MIT",
"funding": {
"url": "https://paulmillr.com/funding/"
}
},
"node_modules/@noble/curves": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.2.0.tgz",
"integrity": "sha512-oYclrNgRaM9SsBUBVbb8M6DTV7ZHRTKugureoYEncY5c65HOmRzvSiTE3y5CYaPYJA/GVkrhXEoF0M3Ya9PMnw==",
"license": "MIT",
"dependencies": {
"@noble/hashes": "1.3.2"
},
"funding": {
"url": "https://paulmillr.com/funding/"
}
},
"node_modules/@noble/curves/node_modules/@noble/hashes": {
"version": "1.3.2",
"resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.3.2.tgz",
"integrity": "sha512-MVC8EAQp7MvEcm30KWENFjgR+Mkmf+D189XJTkFIlwohU5hcBbn1ZkKq7KVTi2Hme3PMGF390DaL52beVrIihQ==",
"license": "MIT",
"engines": {
"node": ">= 16"
},
"funding": {
"url": "https://paulmillr.com/funding/"
}
},
"node_modules/@noble/hashes": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.3.1.tgz",
"integrity": "sha512-EbqwksQwz9xDRGfDST86whPBgM65E0OH/pCgqW0GBVzO22bNE+NuIbeTb714+IfSjU3aRk47EUvXIb5bTsenKA==",
"license": "MIT",
"engines": {
"node": ">= 16"
},
"funding": {
"url": "https://paulmillr.com/funding/"
}
},
"node_modules/@scure/base": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.1.tgz",
"integrity": "sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==",
"funding": [
{
"type": "individual",
"url": "https://paulmillr.com/funding/"
}
],
"license": "MIT"
},
"node_modules/@scure/bip32": {
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/@scure/bip32/-/bip32-1.3.1.tgz",
"integrity": "sha512-osvveYtyzdEVbt3OfwwXFr4P2iVBL5u1Q3q4ONBfDY/UpOuXmOlbgwc1xECEboY8wIays8Yt6onaWMUdUbfl0A==",
"license": "MIT",
"dependencies": {
"@noble/curves": "~1.1.0",
"@noble/hashes": "~1.3.1",
"@scure/base": "~1.1.0"
},
"funding": {
"url": "https://paulmillr.com/funding/"
}
},
"node_modules/@scure/bip32/node_modules/@noble/curves": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.1.0.tgz",
"integrity": "sha512-091oBExgENk/kGj3AZmtBDMpxQPDtxQABR2B9lb1JbVTs6ytdzZNwvhxQ4MWasRNEzlbEH8jCWFCwhF/Obj5AA==",
"license": "MIT",
"dependencies": {
"@noble/hashes": "1.3.1"
},
"funding": {
"url": "https://paulmillr.com/funding/"
}
},
"node_modules/@scure/bip39": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.2.1.tgz",
"integrity": "sha512-Z3/Fsz1yr904dduJD0NpiyRHhRYHdcnyh73FZWiV+/qhWi83wNJ3NWolYqCEN+ZWsUz2TWwajJggcRE9r1zUYg==",
"license": "MIT",
"dependencies": {
"@noble/hashes": "~1.3.0",
"@scure/base": "~1.1.0"
},
"funding": {
"url": "https://paulmillr.com/funding/"
}
},
"node_modules/nostr-tools": {
"version": "2.17.0",
"resolved": "https://registry.npmjs.org/nostr-tools/-/nostr-tools-2.17.0.tgz",
"integrity": "sha512-lrvHM7cSaGhz7F0YuBvgHMoU2s8/KuThihDoOYk8w5gpVHTy0DeUCAgCN8uLGeuSl5MAWekJr9Dkfo5HClqO9w==",
"license": "Unlicense",
"dependencies": {
"@noble/ciphers": "^0.5.1",
"@noble/curves": "1.2.0",
"@noble/hashes": "1.3.1",
"@scure/base": "1.1.1",
"@scure/bip32": "1.3.1",
"@scure/bip39": "1.2.1",
"nostr-wasm": "0.1.0"
},
"peerDependencies": {
"typescript": ">=5.0.0"
},
"peerDependenciesMeta": {
"typescript": {
"optional": true
}
}
},
"node_modules/nostr-wasm": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/nostr-wasm/-/nostr-wasm-0.1.0.tgz",
"integrity": "sha512-78BTryCLcLYv96ONU8Ws3Q1JzjlAt+43pWQhIl86xZmWeegYCNLPml7yQ+gG3vR6V5h4XGj+TxO+SS5dsThQIA==",
"license": "MIT"
},
"node_modules/ws": {
"version": "8.18.3",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
"integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
}
}
}

22
node_modules/@noble/ciphers/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2022 Paul Miller (https://paulmillr.com)
Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

655
node_modules/@noble/ciphers/README.md generated vendored Normal file
View File

@@ -0,0 +1,655 @@
# noble-ciphers
Auditable & minimal JS implementation of Salsa20, ChaCha and AES.
- 🔒 Auditable
- 🔻 Tree-shaking-friendly: use only what's necessary, other code won't be included
- 🏎 [Ultra-fast](#speed), hand-optimized for caveats of JS engines
- 🔍 Unique tests ensure correctness: property-based, cross-library and Wycheproof vectors
- 💼 AES: ECB, CBC, CTR, CFB, GCM, SIV (nonce misuse-resistant)
- 💃 Salsa20, ChaCha, XSalsa20, XChaCha, Poly1305, ChaCha8, ChaCha12
- 🥈 Two AES implementations: choose between friendly webcrypto wrapper and pure JS one
- 🪶 45KB (8KB gzipped) for everything, 10KB (3KB gzipped) for ChaCha build
For discussions, questions and support, visit
[GitHub Discussions](https://github.com/paulmillr/noble-ciphers/discussions)
section of the repository.
### This library belongs to _noble_ cryptography
> **noble cryptography** — high-security, easily auditable set of contained cryptographic libraries and tools.
- Zero or minimal dependencies
- Highly readable TypeScript / JS code
- PGP-signed releases and transparent NPM builds
- All libraries:
[ciphers](https://github.com/paulmillr/noble-ciphers),
[curves](https://github.com/paulmillr/noble-curves),
[hashes](https://github.com/paulmillr/noble-hashes),
[post-quantum](https://github.com/paulmillr/noble-post-quantum),
4kb [secp256k1](https://github.com/paulmillr/noble-secp256k1) /
[ed25519](https://github.com/paulmillr/noble-ed25519)
- [Check out homepage](https://paulmillr.com/noble/)
for reading resources, documentation and apps built with noble
## Usage
> npm install @noble/ciphers
We support all major platforms and runtimes.
For [Deno](https://deno.land), ensure to use
[npm specifier](https://deno.land/manual@v1.28.0/node/npm_specifiers).
For React Native, you may need a
[polyfill for getRandomValues](https://github.com/LinusU/react-native-get-random-values).
A standalone file
[noble-ciphers.js](https://github.com/paulmillr/noble-ciphers/releases) is also available.
```js
// import * from '@noble/ciphers'; // Error: use sub-imports, to ensure small app size
import { xchacha20poly1305 } from '@noble/ciphers/chacha';
// import { xchacha20poly1305 } from 'npm:@noble/ciphers@0.5.0/chacha'; // Deno
```
- [Examples](#examples)
- [Encrypt with XChaCha20-Poly1305](#encrypt-with-xchacha20-poly1305)
- [Encrypt with AES-256-GCM](#encrypt-with-aes-256-gcm)
- [Use existing key instead of a new one](#use-existing-key-instead-of-a-new-one)
- [Encrypt without nonce](#encrypt-without-nonce)
- [Use same array for input and output](#use-same-array-for-input-and-output)
- [All imports](#all-imports)
- [Implementations](#implementations)
- [Salsa20](#salsa)
- [ChaCha](#chacha)
- [AES](#aes)
- [Webcrypto AES](#webcrypto-aes)
- [Poly1305, GHash, Polyval](#poly1305-ghash-polyval)
- [FF1 format-preserving encryption](#ff1)
- [Managed nonces](#managed-nonces)
- [Guidance](#guidance)
- [Which cipher should I pick?](#which-cipher-should-i-pick)
- [How to encrypt properly](#how-to-encrypt-properly)
- [Nonces](#nonces)
- [Encryption limits](#encryption-limits)
- [AES internals and block modes](#aes-internals-and-block-modes)
- [Security](#security)
- [Speed](#speed)
- [Upgrading](#upgrading)
- [Contributing & testing](#contributing--testing)
- [Resources](#resources)
## Examples
#### Encrypt with XChaCha20-Poly1305
```js
import { xchacha20poly1305 } from '@noble/ciphers/chacha';
import { utf8ToBytes } from '@noble/ciphers/utils';
import { randomBytes } from '@noble/ciphers/webcrypto';
const key = randomBytes(32);
const nonce = randomBytes(24);
const chacha = xchacha20poly1305(key, nonce);
const data = utf8ToBytes('hello, noble');
const ciphertext = chacha.encrypt(data);
const data_ = chacha.decrypt(ciphertext); // utils.bytesToUtf8(data_) === data
```
#### Encrypt with AES-256-GCM
```js
import { gcm } from '@noble/ciphers/aes';
import { utf8ToBytes } from '@noble/ciphers/utils';
import { randomBytes } from '@noble/ciphers/webcrypto';
const key = randomBytes(32);
const nonce = randomBytes(24);
const aes = gcm(key, nonce);
const data = utf8ToBytes('hello, noble');
const ciphertext = aes.encrypt(data);
const data_ = aes.decrypt(ciphertext); // utils.bytesToUtf8(data_) === data
```
#### Use existing key instead of a new one
```js
const key = new Uint8Array([
169, 88, 160, 139, 168, 29, 147, 196, 14, 88, 237, 76, 243, 177, 109, 140, 195, 140, 80, 10, 216,
134, 215, 71, 191, 48, 20, 104, 189, 37, 38, 55,
]);
const nonce = new Uint8Array([
180, 90, 27, 63, 160, 191, 150, 33, 67, 212, 86, 71, 144, 6, 200, 102, 218, 32, 23, 147, 8, 41,
147, 11,
]);
// or, hex:
import { hexToBytes } from '@noble/ciphers/utils';
const key2 = hexToBytes('4b7f89bac90a1086fef73f5da2cbe93b2fae9dfbf7678ae1f3e75fd118ddf999');
const nonce2 = hexToBytes('9610467513de0bbd7c4cc2c3c64069f1802086fbd3232b13');
```
#### Encrypt without nonce
```js
import { xchacha20poly1305 } from '@noble/ciphers/chacha';
import { managedNonce } from '@noble/ciphers/webcrypto';
import { hexToBytes, utf8ToBytes } from '@noble/ciphers/utils';
const key = hexToBytes('fa686bfdffd3758f6377abbc23bf3d9bdc1a0dda4a6e7f8dbdd579fa1ff6d7e1');
const chacha = managedNonce(xchacha20poly1305)(key); // manages nonces for you
const data = utf8ToBytes('hello, noble');
const ciphertext = chacha.encrypt(data);
const data_ = chacha.decrypt(ciphertext);
```
#### Use same array for input and output
```js
import { chacha20poly1305 } from '@noble/ciphers/chacha';
import { utf8ToBytes } from '@noble/ciphers/utils';
import { randomBytes } from '@noble/ciphers/webcrypto';
const key = randomBytes(32);
const nonce = randomBytes(12);
const buf = new Uint8Array(12 + 16);
const _data = utf8ToBytes('hello, noble');
buf.set(_data, 0); // first 12 bytes
const _12b = buf.subarray(0, 12);
const chacha = chacha20poly1305(key, nonce);
chacha.encrypt(_12b, buf);
chacha.decrypt(buf, _12b); // _12b now same as _data
```
#### All imports
```js
import { gcm, siv } from '@noble/ciphers/aes';
import { xsalsa20poly1305 } from '@noble/ciphers/salsa';
import { chacha20poly1305, xchacha20poly1305 } from '@noble/ciphers/chacha';
// Unauthenticated encryption: make sure to use HMAC or similar
import { ctr, cfb, cbc, ecb } from '@noble/ciphers/aes';
import { salsa20, xsalsa20 } from '@noble/ciphers/salsa';
import { chacha20, xchacha20, chacha8, chacha12 } from '@noble/ciphers/chacha';
// Utilities
import { bytesToHex, hexToBytes, bytesToUtf8, utf8ToBytes } from '@noble/ciphers/utils';
import { managedNonce, randomBytes } from '@noble/ciphers/webcrypto';
```
## Implementations
### Salsa
```js
import { xsalsa20poly1305 } from '@noble/ciphers/salsa';
import { secretbox } from '@noble/ciphers/salsa'; // == xsalsa20poly1305
import { salsa20, xsalsa20 } from '@noble/ciphers/salsa';
```
[Salsa20](https://cr.yp.to/snuffle.html) stream cipher was released in 2005.
Salsa's goal was to implement AES replacement that does not rely on S-Boxes,
which are hard to implement in a constant-time manner.
Salsa20 is usually faster than AES, a big deal on slow, budget mobile phones.
[XSalsa20](https://cr.yp.to/snuffle/xsalsa-20110204.pdf), extended-nonce
variant was released in 2008. It switched nonces from 96-bit to 192-bit,
and became safe to be picked at random.
Nacl / Libsodium popularized term "secretbox", a simple black-box
authenticated encryption. Secretbox is just xsalsa20-poly1305. We provide the
alias and corresponding seal / open methods. We don't provide "box" or "sealedbox".
Check out [PDF](https://cr.yp.to/snuffle/salsafamily-20071225.pdf) and
[wiki](https://en.wikipedia.org/wiki/Salsa20).
### ChaCha
```js
import { chacha20poly1305, xchacha20poly1305 } from '@noble/ciphers/chacha';
import { chacha20, xchacha20, chacha8, chacha12 } from '@noble/ciphers/chacha';
```
[ChaCha20](https://cr.yp.to/chacha.html) stream cipher was released
in 2008. ChaCha aims to increase the diffusion per round, but had slightly less
cryptanalysis. It was standardized in
[RFC 8439](https://datatracker.ietf.org/doc/html/rfc8439) and is now used in TLS 1.3.
[XChaCha20](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha)
extended-nonce variant is also provided. Similar to XSalsa, it's safe to use with
randomly-generated nonces.
Check out [PDF](http://cr.yp.to/chacha/chacha-20080128.pdf) and [wiki](https://en.wikipedia.org/wiki/Salsa20).
### AES
```js
import { gcm, siv, ctr, cfb, cbc, ecb } from '@noble/ciphers/aes';
import { randomBytes } from '@noble/ciphers/webcrypto';
const plaintext = new Uint8Array(32).fill(16);
const key = randomBytes(32); // 24 for AES-192, 16 for AES-128
for (let cipher of [gcm, siv]) {
const stream = cipher(key, randomBytes(12));
const ciphertext_ = stream.encrypt(plaintext);
const plaintext_ = stream.decrypt(ciphertext_);
}
for (const cipher of [ctr, cbc, cbc]) {
const stream = cipher(key, randomBytes(16));
const ciphertext_ = stream.encrypt(plaintext);
const plaintext_ = stream.decrypt(ciphertext_);
}
for (const cipher of [ecb]) {
const stream = cipher(key);
const ciphertext_ = stream.encrypt(plaintext);
const plaintext_ = stream.decrypt(ciphertext_);
}
```
[AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard)
is a variant of Rijndael block cipher, standardized by NIST in 2001.
We provide the fastest available pure JS implementation.
We support AES-128, AES-192 and AES-256: the mode is selected dynamically,
based on key length (16, 24, 32).
[AES-GCM-SIV](https://en.wikipedia.org/wiki/AES-GCM-SIV)
nonce-misuse-resistant mode is also provided. It's recommended to use it,
to prevent catastrophic consequences of nonce reuse. Our implementation of SIV
has the same speed as GCM: there is no performance hit.
Check out [AES internals and block modes](#aes-internals-and-block-modes).
### Webcrypto AES
```js
import { gcm, ctr, cbc, randomBytes } from '@noble/ciphers/webcrypto';
const plaintext = new Uint8Array(32).fill(16);
const key = randomBytes(32);
for (const cipher of [gcm]) {
const stream = cipher(key, randomBytes(12));
const ciphertext_ = await stream.encrypt(plaintext);
const plaintext_ = await stream.decrypt(ciphertext_);
}
for (const cipher of [ctr, cbc]) {
const stream = cipher(key, randomBytes(16));
const ciphertext_ = await stream.encrypt(plaintext);
const plaintext_ = await stream.decrypt(ciphertext_);
}
```
We also have a separate wrapper over WebCrypto built-in.
It's the same as using `crypto.subtle`, but with massively simplified API.
Unlike pure js version, it's asynchronous.
### Poly1305, GHash, Polyval
```js
import { poly1305 } from '@noble/ciphers/_poly1305';
import { ghash, polyval } from '@noble/ciphers/_polyval';
```
We expose polynomial-evaluation MACs: [Poly1305](https://cr.yp.to/mac.html),
AES-GCM's [GHash](https://en.wikipedia.org/wiki/Galois/Counter_Mode) and
AES-SIV's [Polyval](https://en.wikipedia.org/wiki/AES-GCM-SIV).
Poly1305 ([PDF](https://cr.yp.to/mac/poly1305-20050329.pdf),
[wiki](https://en.wikipedia.org/wiki/Poly1305))
is a fast and parallel secret-key message-authentication code suitable for
a wide variety of applications. It was standardized in
[RFC 8439](https://datatracker.ietf.org/doc/html/rfc8439) and is now used in TLS 1.3.
Polynomial MACs are not perfect for every situation:
they lack Random Key Robustness: the MAC can be forged, and can't
be used in PAKE schemes. See
[invisible salamanders attack](https://keymaterial.net/2020/09/07/invisible-salamanders-in-aes-gcm-siv/).
To combat invisible salamanders, `hash(key)` can be included in ciphertext,
however, this would violate ciphertext indistinguishability:
an attacker would know which key was used - so `HKDF(key, i)`
could be used instead.
### FF1
Format-preserving encryption algorithm (FPE-FF1) specified in NIST Special Publication 800-38G.
[See more info](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-38G.pdf).
### Managed nonces
```js
import { managedNonce } from '@noble/ciphers/webcrypto';
import { gcm, siv, ctr, cbc, cbc, ecb } from '@noble/ciphers/aes';
import { xsalsa20poly1305 } from '@noble/ciphers/salsa';
import { chacha20poly1305, xchacha20poly1305 } from '@noble/ciphers/chacha';
const wgcm = managedNonce(gcm);
const wsiv = managedNonce(siv);
const wcbc = managedNonce(cbc);
const wctr = managedNonce(ctr);
const wsalsapoly = managedNonce(xsalsa20poly1305);
const wchacha = managedNonce(chacha20poly1305);
const wxchacha = managedNonce(xchacha20poly1305);
// Now:
const encrypted = wgcm(key).encrypt(data); // no nonces
```
We provide API that manages nonce internally instead of exposing them to library's user.
For `encrypt`, a `nonceBytes`-length buffer is fetched from CSPRNG and prenended to encrypted ciphertext.
For `decrypt`, first `nonceBytes` of ciphertext are treated as nonce.
## Guidance
### Which cipher should I pick?
XChaCha20-Poly1305 is the safest bet these days.
AES-GCM-SIV is the second safest.
AES-GCM is the third.
### How to encrypt properly
- Use unpredictable key with enough entropy
- Random key must be using cryptographically secure random number generator (CSPRNG), not `Math.random` etc.
- Non-random key generated from KDF is fine
- Re-using key is fine, but be aware of rules for cryptographic key wear-out and [encryption limits](#encryption-limits)
- Use new nonce every time and [don't repeat it](#nonces)
- chacha and salsa20 are fine for sequential counters that _never_ repeat: `01, 02...`
- xchacha and xsalsa20 should be used for random nonces instead
- Prefer authenticated encryption (AEAD)
- HMAC+ChaCha / HMAC+AES / chacha20poly1305 / aes-gcm is good
- chacha20 without poly1305 or hmac / aes-ctr / aes-cbc is bad
- Flipping bits or ciphertext substitution won't be detected in unauthenticated ciphers
- Don't re-use keys between different protocols
- For example, using secp256k1 key in AES is bad
- Use hkdf or, at least, a hash function to create sub-key instead
### Nonces
Most ciphers need a key and a nonce (aka initialization vector / IV) to encrypt a data:
ciphertext = encrypt(plaintext, key, nonce)
Repeating (key, nonce) pair with different plaintexts would allow an attacker to decrypt it:
ciphertext_a = encrypt(plaintext_a, key, nonce)
ciphertext_b = encrypt(plaintext_b, key, nonce)
stream_diff = xor(ciphertext_a, ciphertext_b) # Break encryption
So, you can't repeat nonces. One way of doing so is using counters:
for i in 0..:
ciphertext[i] = encrypt(plaintexts[i], key, i)
Another is generating random nonce every time:
for i in 0..:
rand_nonces[i] = random()
ciphertext[i] = encrypt(plaintexts[i], key, rand_nonces[i])
Counters are OK, but it's not always possible to store current counter value:
e.g. in decentralized, unsyncable systems.
Randomness is OK, but there's a catch:
ChaCha20 and AES-GCM use 96-bit / 12-byte nonces, which implies
higher chance of collision. In the example above,
`random()` can collide and produce repeating nonce.
To safely use random nonces, utilize XSalsa20 or XChaCha:
they increased nonce length to 192-bit, minimizing a chance of collision.
AES-SIV is also fine. In situations where you can't use eXtended-nonce
algorithms, key rotation is advised. hkdf would work great for this case.
### Encryption limits
A "protected message" would mean a probability of `2**-50` that a passive attacker
successfully distinguishes the ciphertext outputs of the AEAD scheme from the outputs
of a random function. See [draft-irtf-cfrg-aead-limits](https://datatracker.ietf.org/doc/draft-irtf-cfrg-aead-limits/) for details.
- Max message size:
- AES-GCM: ~68GB, `2**36-256`
- Salsa, ChaCha, XSalsa, XChaCha: ~256GB, `2**38-64`
- Max amount of protected messages, under same key:
- AES-GCM: `2**32.5`
- Salsa, ChaCha: `2**46`, but only integrity is affected, not confidentiality
- XSalsa, XChaCha: `2**72`
- Max amount of protected messages, across all keys:
- AES-GCM: `2**69/B` where B is max blocks encrypted by a key. Meaning
`2**59` for 1KB, `2**49` for 1MB, `2**39` for 1GB
- Salsa, ChaCha, XSalsa, XChaCha: `2**100`
##### AES internals and block modes
`cipher = encrypt(block, key)`. Data is split into 128-bit blocks. Encrypted in 10/12/14 rounds (128/192/256bit). Every round does:
1. **S-box**, table substitution
2. **Shift rows**, cyclic shift left of all rows of data array
3. **Mix columns**, multiplying every column by fixed polynomial
4. **Add round key**, round_key xor i-th column of array
For non-deterministic (not ECB) schemes, initialization vector (IV) is mixed to block/key;
and each new round either depends on previous block's key, or on some counter.
- ECB — simple deterministic replacement. Dangerous: always map x to y. See [AES Penguin](https://words.filippo.io/the-ecb-penguin/)
- CBC — key is previous rounds block. Hard to use: need proper padding, also needs MAC
- CTR — counter, allows to create streaming cipher. Requires good IV. Parallelizable. OK, but no MAC
- GCM — modern CTR, parallel, with MAC
- SIV — synthetic initialization vector, nonce-misuse-resistant. Guarantees that, when a nonce is repeated,
the only security loss is that identical plaintexts will produce identical ciphertexts.
- XTS — used in hard drives. Similar to ECB (deterministic), but has `[i][j]`
tweak arguments corresponding to sector i and 16-byte block (part of sector) j. Not authenticated!
GCM / SIV are not ideal:
- Conservative key wear-out is `2**32` (4B) msgs
- MAC can be forged: see Poly1305 section above. Same for SIV
## Security
The library has not been independently audited yet.
It is tested against property-based, cross-library and Wycheproof vectors,
and has fuzzing by [Guido Vranken's cryptofuzz](https://github.com/guidovranken/cryptofuzz).
If you see anything unusual: investigate and report.
### Constant-timeness
_JIT-compiler_ and _Garbage Collector_ make "constant time" extremely hard to
achieve [timing attack](https://en.wikipedia.org/wiki/Timing_attack) resistance
in a scripting language. Which means _any other JS library can't have
constant-timeness_. Even statically typed Rust, a language without GC,
[makes it harder to achieve constant-time](https://www.chosenplaintext.ca/open-source/rust-timing-shield/security)
for some cases. If your goal is absolute security, don't use any JS lib — including bindings to native ones.
Use low-level libraries & languages. Nonetheless we're targetting algorithmic constant time.
AES uses T-tables, which means it can't be done in constant-time in JS.
### Supply chain security
- **Commits** are signed with PGP keys, to prevent forgery. Make sure to verify commit signatures.
- **Releases** are transparent and built on GitHub CI. Make sure to verify [provenance](https://docs.npmjs.com/generating-provenance-statements) logs
- **Rare releasing** is followed to ensure less re-audit need for end-users
- **Dependencies** are minimized and locked-down:
- If your app has 500 dependencies, any dep could get hacked and you'll be downloading
malware with every install. We make sure to use as few dependencies as possible
- We prevent automatic dependency updates by locking-down version ranges. Every update is checked with `npm-diff`
- **Dev Dependencies** are only used if you want to contribute to the repo. They are disabled for end-users:
- scure-base, micro-bmark and micro-should are developed by the same author and follow identical security practices
- prettier (linter), fast-check (property-based testing) and typescript are used for code quality, vector generation and ts compilation. The packages are big, which makes it hard to audit their source code thoroughly and fully
### Randomness
We're deferring to built-in
[crypto.getRandomValues](https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues)
which is considered cryptographically secure (CSPRNG).
In the past, browsers had bugs that made it weak: it may happen again.
Implementing a userspace CSPRNG to get resilient to the weakness
is even worse: there is no reliable userspace source of quality entropy.
## Speed
To summarize, noble is the fastest JS implementation of Salsa, ChaCha and AES.
You can gain additional speed-up and
avoid memory allocations by passing `output`
uint8array into encrypt / decrypt methods.
Benchmark results on Apple M2 with node v20:
```
encrypt (64B)
├─xsalsa20poly1305 x 485,672 ops/sec @ 2μs/op
├─chacha20poly1305 x 466,200 ops/sec @ 2μs/op
├─xchacha20poly1305 x 312,500 ops/sec @ 3μs/op
├─aes-256-gcm x 151,057 ops/sec @ 6μs/op
└─aes-256-gcm-siv x 124,984 ops/sec @ 8μs/op
encrypt (1KB)
├─xsalsa20poly1305 x 146,477 ops/sec @ 6μs/op
├─chacha20poly1305 x 145,518 ops/sec @ 6μs/op
├─xchacha20poly1305 x 126,119 ops/sec @ 7μs/op
├─aes-256-gcm x 43,207 ops/sec @ 23μs/op
└─aes-256-gcm-siv x 39,363 ops/sec @ 25μs/op
encrypt (8KB)
├─xsalsa20poly1305 x 23,773 ops/sec @ 42μs/op
├─chacha20poly1305 x 24,134 ops/sec @ 41μs/op
├─xchacha20poly1305 x 23,520 ops/sec @ 42μs/op
├─aes-256-gcm x 8,420 ops/sec @ 118μs/op
└─aes-256-gcm-siv x 8,126 ops/sec @ 123μs/op
encrypt (1MB)
├─xsalsa20poly1305 x 195 ops/sec @ 5ms/op
├─chacha20poly1305 x 199 ops/sec @ 5ms/op
├─xchacha20poly1305 x 198 ops/sec @ 5ms/op
├─aes-256-gcm x 76 ops/sec @ 13ms/op
└─aes-256-gcm-siv x 78 ops/sec @ 12ms/op
```
Unauthenticated encryption:
```
encrypt (64B)
├─salsa x 1,287,001 ops/sec @ 777ns/op
├─chacha x 1,555,209 ops/sec @ 643ns/op
├─xsalsa x 938,086 ops/sec @ 1μs/op
└─xchacha x 920,810 ops/sec @ 1μs/op
encrypt (1KB)
├─salsa x 353,107 ops/sec @ 2μs/op
├─chacha x 377,216 ops/sec @ 2μs/op
├─xsalsa x 331,674 ops/sec @ 3μs/op
└─xchacha x 336,247 ops/sec @ 2μs/op
encrypt (8KB)
├─salsa x 57,084 ops/sec @ 17μs/op
├─chacha x 59,520 ops/sec @ 16μs/op
├─xsalsa x 57,097 ops/sec @ 17μs/op
└─xchacha x 58,278 ops/sec @ 17μs/op
encrypt (1MB)
├─salsa x 479 ops/sec @ 2ms/op
├─chacha x 491 ops/sec @ 2ms/op
├─xsalsa x 483 ops/sec @ 2ms/op
└─xchacha x 492 ops/sec @ 2ms/op
AES
encrypt (64B)
├─ctr-256 x 689,179 ops/sec @ 1μs/op
├─cbc-256 x 639,795 ops/sec @ 1μs/op
└─ecb-256 x 668,449 ops/sec @ 1μs/op
encrypt (1KB)
├─ctr-256 x 93,668 ops/sec @ 10μs/op
├─cbc-256 x 94,428 ops/sec @ 10μs/op
└─ecb-256 x 151,699 ops/sec @ 6μs/op
encrypt (8KB)
├─ctr-256 x 13,342 ops/sec @ 74μs/op
├─cbc-256 x 13,664 ops/sec @ 73μs/op
└─ecb-256 x 22,426 ops/sec @ 44μs/op
encrypt (1MB)
├─ctr-256 x 106 ops/sec @ 9ms/op
├─cbc-256 x 109 ops/sec @ 9ms/op
└─ecb-256 x 179 ops/sec @ 5ms/op
```
Compare to other implementations:
```
xsalsa20poly1305 (encrypt, 1MB)
├─tweetnacl x 108 ops/sec @ 9ms/op
└─noble x 190 ops/sec @ 5ms/op
chacha20poly1305 (encrypt, 1MB)
├─node x 1,360 ops/sec @ 735μs/op
├─stablelib x 117 ops/sec @ 8ms/op
└─noble x 193 ops/sec @ 5ms/op
chacha (encrypt, 1MB)
├─node x 2,035 ops/sec @ 491μs/op
├─stablelib x 206 ops/sec @ 4ms/op
└─noble x 474 ops/sec @ 2ms/op
ctr-256 (encrypt, 1MB)
├─node x 3,530 ops/sec @ 283μs/op
├─stablelib x 70 ops/sec @ 14ms/op
├─aesjs x 31 ops/sec @ 32ms/op
├─noble-webcrypto x 4,589 ops/sec @ 217μs/op
└─noble x 107 ops/sec @ 9ms/op
cbc-256 (encrypt, 1MB)
├─node x 993 ops/sec @ 1ms/op
├─stablelib x 63 ops/sec @ 15ms/op
├─aesjs x 29 ops/sec @ 34ms/op
├─noble-webcrypto x 1,087 ops/sec @ 919μs/op
└─noble x 110 ops/sec @ 9ms/op
gcm-256 (encrypt, 1MB)
├─node x 3,196 ops/sec @ 312μs/op
├─stablelib x 27 ops/sec @ 36ms/op
├─noble-webcrypto x 4,059 ops/sec @ 246μs/op
└─noble x 74 ops/sec @ 13ms/op
```
## Upgrading
Upgrade from `micro-aes-gcm` package is simple:
```js
// prepare
const key = Uint8Array.from([
64, 196, 127, 247, 172, 2, 34, 159, 6, 241, 30, 174, 183, 229, 41, 114, 253, 122, 119, 168, 177,
243, 155, 236, 164, 159, 98, 72, 162, 243, 224, 195,
]);
const message = 'Hello world';
// previous
import * as aes from 'micro-aes-gcm';
const ciphertext = await aes.encrypt(key, aes.utils.utf8ToBytes(message));
const plaintext = await aes.decrypt(key, ciphertext);
console.log(aes.utils.bytesToUtf8(plaintext) === message);
// became =>
import { gcm } from '@noble/ciphers/aes';
import { bytesToUtf8, utf8ToBytes } from '@noble/ciphers/utils';
import { managedNonce } from '@noble/ciphers/webcrypto';
const aes = managedNonce(gcm)(key);
const ciphertext = aes.encrypt(utf8ToBytes(message));
const plaintext = aes.decrypt(key, ciphertext);
console.log(bytesToUtf8(plaintext) === message);
```
## Contributing & testing
1. Clone the repository
2. `npm install` to install build dependencies like TypeScript
3. `npm run build` to compile TypeScript code
4. `npm run test` will execute all main tests
## Resources
Check out [paulmillr.com/noble](https://paulmillr.com/noble/)
for useful resources, articles, documentation and demos
related to the library.
## License
The MIT License (MIT)
Copyright (c) 2023 Paul Miller [(https://paulmillr.com)](https://paulmillr.com)
Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
See LICENSE file.

14
node_modules/@noble/ciphers/_arx.d.ts generated vendored Normal file
View File

@@ -0,0 +1,14 @@
import { XorStream } from './utils.js';
export declare const sigma: Uint32Array;
export declare function rotl(a: number, b: number): number;
export type CipherCoreFn = (sigma: Uint32Array, key: Uint32Array, nonce: Uint32Array, output: Uint32Array, counter: number, rounds?: number) => void;
export type ExtendNonceFn = (sigma: Uint32Array, key: Uint32Array, input: Uint32Array, output: Uint32Array) => void;
export type CipherOpts = {
allowShortKeys?: boolean;
extendNonceFn?: ExtendNonceFn;
counterLength?: number;
counterRight?: boolean;
rounds?: number;
};
export declare function createCipher(core: CipherCoreFn, opts: CipherOpts): XorStream;
//# sourceMappingURL=_arx.d.ts.map

1
node_modules/@noble/ciphers/_arx.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"_arx.d.ts","sourceRoot":"","sources":["src/_arx.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,SAAS,EAAkB,MAAM,YAAY,CAAC;AA4CvD,eAAO,MAAM,KAAK,aAAqB,CAAC;AAExC,wBAAgB,IAAI,CAAC,CAAC,EAAE,MAAM,EAAE,CAAC,EAAE,MAAM,GAAG,MAAM,CAEjD;AAED,MAAM,MAAM,YAAY,GAAG,CACzB,KAAK,EAAE,WAAW,EAClB,GAAG,EAAE,WAAW,EAChB,KAAK,EAAE,WAAW,EAClB,MAAM,EAAE,WAAW,EACnB,OAAO,EAAE,MAAM,EACf,MAAM,CAAC,EAAE,MAAM,KACZ,IAAI,CAAC;AAEV,MAAM,MAAM,aAAa,GAAG,CAC1B,KAAK,EAAE,WAAW,EAClB,GAAG,EAAE,WAAW,EAChB,KAAK,EAAE,WAAW,EAClB,MAAM,EAAE,WAAW,KAChB,IAAI,CAAC;AAEV,MAAM,MAAM,UAAU,GAAG;IACvB,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB,aAAa,CAAC,EAAE,aAAa,CAAC;IAC9B,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,YAAY,CAAC,EAAE,OAAO,CAAC;IACvB,MAAM,CAAC,EAAE,MAAM,CAAC;CACjB,CAAC;AAwDF,wBAAgB,YAAY,CAAC,IAAI,EAAE,YAAY,EAAE,IAAI,EAAE,UAAU,GAAG,SAAS,CAsF5E"}

175
node_modules/@noble/ciphers/_arx.js generated vendored Normal file
View File

@@ -0,0 +1,175 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.createCipher = exports.rotl = exports.sigma = void 0;
// Basic utils for ARX (add-rotate-xor) salsa and chacha ciphers.
const _assert_js_1 = require("./_assert.js");
const utils_js_1 = require("./utils.js");
/*
RFC8439 requires multi-step cipher stream, where
authKey starts with counter: 0, actual msg with counter: 1.
For this, we need a way to re-use nonce / counter:
const counter = new Uint8Array(4);
chacha(..., counter, ...); // counter is now 1
chacha(..., counter, ...); // counter is now 2
This is complicated:
- 32-bit counters are enough, no need for 64-bit: max ArrayBuffer size in JS is 4GB
- Original papers don't allow mutating counters
- Counter overflow is undefined [^1]
- Idea A: allow providing (nonce | counter) instead of just nonce, re-use it
- Caveat: Cannot be re-used through all cases:
- * chacha has (counter | nonce)
- * xchacha has (nonce16 | counter | nonce16)
- Idea B: separate nonce / counter and provide separate API for counter re-use
- Caveat: there are different counter sizes depending on an algorithm.
- salsa & chacha also differ in structures of key & sigma:
salsa20: s[0] | k(4) | s[1] | nonce(2) | ctr(2) | s[2] | k(4) | s[3]
chacha: s(4) | k(8) | ctr(1) | nonce(3)
chacha20orig: s(4) | k(8) | ctr(2) | nonce(2)
- Idea C: helper method such as `setSalsaState(key, nonce, sigma, data)`
- Caveat: we can't re-use counter array
xchacha [^2] uses the subkey and remaining 8 byte nonce with ChaCha20 as normal
(prefixed by 4 NUL bytes, since [RFC8439] specifies a 12-byte nonce).
[^1]: https://mailarchive.ietf.org/arch/msg/cfrg/gsOnTJzcbgG6OqD8Sc0GO5aR_tU/
[^2]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha#appendix-A.2
*/
// We can't make top-level var depend on utils.utf8ToBytes
// because it's not present in all envs. Creating a similar fn here
const _utf8ToBytes = (str) => Uint8Array.from(str.split('').map((c) => c.charCodeAt(0)));
const sigma16 = _utf8ToBytes('expand 16-byte k');
const sigma32 = _utf8ToBytes('expand 32-byte k');
const sigma16_32 = (0, utils_js_1.u32)(sigma16);
const sigma32_32 = (0, utils_js_1.u32)(sigma32);
exports.sigma = sigma32_32.slice();
function rotl(a, b) {
return (a << b) | (a >>> (32 - b));
}
exports.rotl = rotl;
// Is byte array aligned to 4 byte offset (u32)?
function isAligned32(b) {
return b.byteOffset % 4 === 0;
}
// Salsa and Chacha block length is always 512-bit
const BLOCK_LEN = 64;
const BLOCK_LEN32 = 16;
// new Uint32Array([2**32]) // => Uint32Array(1) [ 0 ]
// new Uint32Array([2**32-1]) // => Uint32Array(1) [ 4294967295 ]
const MAX_COUNTER = 2 ** 32 - 1;
const U32_EMPTY = new Uint32Array();
function runCipher(core, sigma, key, nonce, data, output, counter, rounds) {
const len = data.length;
const block = new Uint8Array(BLOCK_LEN);
const b32 = (0, utils_js_1.u32)(block);
// Make sure that buffers aligned to 4 bytes
const isAligned = isAligned32(data) && isAligned32(output);
const d32 = isAligned ? (0, utils_js_1.u32)(data) : U32_EMPTY;
const o32 = isAligned ? (0, utils_js_1.u32)(output) : U32_EMPTY;
for (let pos = 0; pos < len; counter++) {
core(sigma, key, nonce, b32, counter, rounds);
if (counter >= MAX_COUNTER)
throw new Error('arx: counter overflow');
const take = Math.min(BLOCK_LEN, len - pos);
// aligned to 4 bytes
if (isAligned && take === BLOCK_LEN) {
const pos32 = pos / 4;
if (pos % 4 !== 0)
throw new Error('arx: invalid block position');
for (let j = 0, posj; j < BLOCK_LEN32; j++) {
posj = pos32 + j;
o32[posj] = d32[posj] ^ b32[j];
}
pos += BLOCK_LEN;
continue;
}
for (let j = 0, posj; j < take; j++) {
posj = pos + j;
output[posj] = data[posj] ^ block[j];
}
pos += take;
}
}
function createCipher(core, opts) {
const { allowShortKeys, extendNonceFn, counterLength, counterRight, rounds } = (0, utils_js_1.checkOpts)({ allowShortKeys: false, counterLength: 8, counterRight: false, rounds: 20 }, opts);
if (typeof core !== 'function')
throw new Error('core must be a function');
(0, _assert_js_1.number)(counterLength);
(0, _assert_js_1.number)(rounds);
(0, _assert_js_1.bool)(counterRight);
(0, _assert_js_1.bool)(allowShortKeys);
return (key, nonce, data, output, counter = 0) => {
(0, _assert_js_1.bytes)(key);
(0, _assert_js_1.bytes)(nonce);
(0, _assert_js_1.bytes)(data);
const len = data.length;
if (!output)
output = new Uint8Array(len);
(0, _assert_js_1.bytes)(output);
(0, _assert_js_1.number)(counter);
if (counter < 0 || counter >= MAX_COUNTER)
throw new Error('arx: counter overflow');
if (output.length < len)
throw new Error(`arx: output (${output.length}) is shorter than data (${len})`);
const toClean = [];
// Key & sigma
// key=16 -> sigma16, k=key|key
// key=32 -> sigma32, k=key
let l = key.length, k, sigma;
if (l === 32) {
k = key.slice();
toClean.push(k);
sigma = sigma32_32;
}
else if (l === 16 && allowShortKeys) {
k = new Uint8Array(32);
k.set(key);
k.set(key, 16);
sigma = sigma16_32;
toClean.push(k);
}
else {
throw new Error(`arx: invalid 32-byte key, got length=${l}`);
}
// Nonce
// salsa20: 8 (8-byte counter)
// chacha20orig: 8 (8-byte counter)
// chacha20: 12 (4-byte counter)
// xsalsa20: 24 (16 -> hsalsa, 8 -> old nonce)
// xchacha20: 24 (16 -> hchacha, 8 -> old nonce)
// Align nonce to 4 bytes
if (!isAligned32(nonce)) {
nonce = nonce.slice();
toClean.push(nonce);
}
const k32 = (0, utils_js_1.u32)(k);
// hsalsa & hchacha: handle extended nonce
if (extendNonceFn) {
if (nonce.length !== 24)
throw new Error(`arx: extended nonce must be 24 bytes`);
extendNonceFn(sigma, k32, (0, utils_js_1.u32)(nonce.subarray(0, 16)), k32);
nonce = nonce.subarray(16);
}
// Handle nonce counter
const nonceNcLen = 16 - counterLength;
if (nonceNcLen !== nonce.length)
throw new Error(`arx: nonce must be ${nonceNcLen} or 16 bytes`);
// Pad counter when nonce is 64 bit
if (nonceNcLen !== 12) {
const nc = new Uint8Array(12);
nc.set(nonce, counterRight ? 0 : 12 - nonce.length);
nonce = nc;
toClean.push(nonce);
}
const n32 = (0, utils_js_1.u32)(nonce);
runCipher(core, sigma, k32, n32, data, output, counter, rounds);
while (toClean.length > 0)
toClean.pop().fill(0);
return output;
};
}
exports.createCipher = createCipher;
//# sourceMappingURL=_arx.js.map

1
node_modules/@noble/ciphers/_arx.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

24
node_modules/@noble/ciphers/_assert.d.ts generated vendored Normal file
View File

@@ -0,0 +1,24 @@
declare function number(n: number): void;
declare function bool(b: boolean): void;
export declare function isBytes(a: unknown): a is Uint8Array;
declare function bytes(b: Uint8Array | undefined, ...lengths: number[]): void;
export type Hash = {
(data: Uint8Array): Uint8Array;
blockLen: number;
outputLen: number;
create: any;
};
declare function hash(hash: Hash): void;
declare function exists(instance: any, checkFinished?: boolean): void;
declare function output(out: any, instance: any): void;
export { number, bool, bytes, hash, exists, output };
declare const assert: {
number: typeof number;
bool: typeof bool;
bytes: typeof bytes;
hash: typeof hash;
exists: typeof exists;
output: typeof output;
};
export default assert;
//# sourceMappingURL=_assert.d.ts.map

1
node_modules/@noble/ciphers/_assert.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"_assert.d.ts","sourceRoot":"","sources":["src/_assert.ts"],"names":[],"mappings":"AAAA,iBAAS,MAAM,CAAC,CAAC,EAAE,MAAM,QAExB;AAED,iBAAS,IAAI,CAAC,CAAC,EAAE,OAAO,QAEvB;AAED,wBAAgB,OAAO,CAAC,CAAC,EAAE,OAAO,GAAG,CAAC,IAAI,UAAU,CAKnD;AAED,iBAAS,KAAK,CAAC,CAAC,EAAE,UAAU,GAAG,SAAS,EAAE,GAAG,OAAO,EAAE,MAAM,EAAE,QAI7D;AAED,MAAM,MAAM,IAAI,GAAG;IACjB,CAAC,IAAI,EAAE,UAAU,GAAG,UAAU,CAAC;IAC/B,QAAQ,EAAE,MAAM,CAAC;IACjB,SAAS,EAAE,MAAM,CAAC;IAClB,MAAM,EAAE,GAAG,CAAC;CACb,CAAC;AACF,iBAAS,IAAI,CAAC,IAAI,EAAE,IAAI,QAKvB;AAED,iBAAS,MAAM,CAAC,QAAQ,EAAE,GAAG,EAAE,aAAa,UAAO,QAGlD;AAED,iBAAS,MAAM,CAAC,GAAG,EAAE,GAAG,EAAE,QAAQ,EAAE,GAAG,QAMtC;AAED,OAAO,EAAE,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,CAAC;AACrD,QAAA,MAAM,MAAM;;;;;;;CAAgD,CAAC;AAC7D,eAAe,MAAM,CAAC"}

50
node_modules/@noble/ciphers/_assert.js generated vendored Normal file
View File

@@ -0,0 +1,50 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.output = exports.exists = exports.hash = exports.bytes = exports.bool = exports.number = exports.isBytes = void 0;
function number(n) {
if (!Number.isSafeInteger(n) || n < 0)
throw new Error(`positive integer expected, not ${n}`);
}
exports.number = number;
function bool(b) {
if (typeof b !== 'boolean')
throw new Error(`boolean expected, not ${b}`);
}
exports.bool = bool;
function isBytes(a) {
return (a instanceof Uint8Array ||
(a != null && typeof a === 'object' && a.constructor.name === 'Uint8Array'));
}
exports.isBytes = isBytes;
function bytes(b, ...lengths) {
if (!isBytes(b))
throw new Error('Uint8Array expected');
if (lengths.length > 0 && !lengths.includes(b.length))
throw new Error(`Uint8Array expected of length ${lengths}, not of length=${b.length}`);
}
exports.bytes = bytes;
function hash(hash) {
if (typeof hash !== 'function' || typeof hash.create !== 'function')
throw new Error('hash must be wrapped by utils.wrapConstructor');
number(hash.outputLen);
number(hash.blockLen);
}
exports.hash = hash;
function exists(instance, checkFinished = true) {
if (instance.destroyed)
throw new Error('Hash instance has been destroyed');
if (checkFinished && instance.finished)
throw new Error('Hash#digest() has already been called');
}
exports.exists = exists;
function output(out, instance) {
bytes(out);
const min = instance.outputLen;
if (out.length < min) {
throw new Error(`digestInto() expects output buffer of length at least ${min}`);
}
}
exports.output = output;
const assert = { number, bool, bytes, hash, exists, output };
exports.default = assert;
//# sourceMappingURL=_assert.js.map

1
node_modules/@noble/ciphers/_assert.js.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"_assert.js","sourceRoot":"","sources":["src/_assert.ts"],"names":[],"mappings":";;;AAAA,SAAS,MAAM,CAAC,CAAS;IACvB,IAAI,CAAC,MAAM,CAAC,aAAa,CAAC,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC;QAAE,MAAM,IAAI,KAAK,CAAC,kCAAkC,CAAC,EAAE,CAAC,CAAC;AAChG,CAAC;AA6CQ,wBAAM;AA3Cf,SAAS,IAAI,CAAC,CAAU;IACtB,IAAI,OAAO,CAAC,KAAK,SAAS;QAAE,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,EAAE,CAAC,CAAC;AAC5E,CAAC;AAyCgB,oBAAI;AAvCrB,SAAgB,OAAO,CAAC,CAAU;IAChC,OAAO,CACL,CAAC,YAAY,UAAU;QACvB,CAAC,CAAC,IAAI,IAAI,IAAI,OAAO,CAAC,KAAK,QAAQ,IAAI,CAAC,CAAC,WAAW,CAAC,IAAI,KAAK,YAAY,CAAC,CAC5E,CAAC;AACJ,CAAC;AALD,0BAKC;AAED,SAAS,KAAK,CAAC,CAAyB,EAAE,GAAG,OAAiB;IAC5D,IAAI,CAAC,OAAO,CAAC,CAAC,CAAC;QAAE,MAAM,IAAI,KAAK,CAAC,qBAAqB,CAAC,CAAC;IACxD,IAAI,OAAO,CAAC,MAAM,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,CAAC,CAAC,MAAM,CAAC;QACnD,MAAM,IAAI,KAAK,CAAC,iCAAiC,OAAO,mBAAmB,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;AAC3F,CAAC;AA4BsB,sBAAK;AApB5B,SAAS,IAAI,CAAC,IAAU;IACtB,IAAI,OAAO,IAAI,KAAK,UAAU,IAAI,OAAO,IAAI,CAAC,MAAM,KAAK,UAAU;QACjE,MAAM,IAAI,KAAK,CAAC,+CAA+C,CAAC,CAAC;IACnE,MAAM,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;IACvB,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;AACxB,CAAC;AAe6B,oBAAI;AAblC,SAAS,MAAM,CAAC,QAAa,EAAE,aAAa,GAAG,IAAI;IACjD,IAAI,QAAQ,CAAC,SAAS;QAAE,MAAM,IAAI,KAAK,CAAC,kCAAkC,CAAC,CAAC;IAC5E,IAAI,aAAa,IAAI,QAAQ,CAAC,QAAQ;QAAE,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;AACnG,CAAC;AAUmC,wBAAM;AAR1C,SAAS,MAAM,CAAC,GAAQ,EAAE,QAAa;IACrC,KAAK,CAAC,GAAG,CAAC,CAAC;IACX,MAAM,GAAG,GAAG,QAAQ,CAAC,SAAS,CAAC;IAC/B,IAAI,GAAG,CAAC,MAAM,GAAG,GAAG,EAAE,CAAC;QACrB,MAAM,IAAI,KAAK,CAAC,yDAAyD,GAAG,EAAE,CAAC,CAAC;IAClF,CAAC;AACH,CAAC;AAE2C,wBAAM;AAClD,MAAM,MAAM,GAAG,EAAE,MAAM,EAAE,IAAI,EAAE,KAAK,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,EAAE,CAAC;AAC7D,kBAAe,MAAM,CAAC"}

70
node_modules/@noble/ciphers/_micro.d.ts generated vendored Normal file
View File

@@ -0,0 +1,70 @@
/*! noble-ciphers - MIT License (c) 2023 Paul Miller (paulmillr.com) */
import { Cipher, XorStream } from './utils.js';
export declare function hsalsa(s: Uint32Array, k: Uint32Array, i: Uint32Array, o32: Uint32Array): void;
export declare function hchacha(s: Uint32Array, k: Uint32Array, i: Uint32Array, o32: Uint32Array): void;
/**
* salsa20, 12-byte nonce.
*/
export declare const salsa20: XorStream;
/**
* xsalsa20, 24-byte nonce.
*/
export declare const xsalsa20: XorStream;
/**
* chacha20 non-RFC, original version by djb. 8-byte nonce, 8-byte counter.
*/
export declare const chacha20orig: XorStream;
/**
* chacha20 RFC 8439 (IETF / TLS). 12-byte nonce, 4-byte counter.
*/
export declare const chacha20: XorStream;
/**
* xchacha20 eXtended-nonce. https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha
*/
export declare const xchacha20: XorStream;
/**
* 8-round chacha from the original paper.
*/
export declare const chacha8: XorStream;
/**
* 12-round chacha from the original paper.
*/
export declare const chacha12: XorStream;
export declare function poly1305(msg: Uint8Array, key: Uint8Array): Uint8Array;
/**
* xsalsa20-poly1305 eXtended-nonce (24 bytes) salsa.
*/
export declare const xsalsa20poly1305: ((key: Uint8Array, nonce: Uint8Array) => {
encrypt: (plaintext: Uint8Array) => Uint8Array;
decrypt: (ciphertext: Uint8Array) => Uint8Array;
}) & {
blockSize: number;
nonceLength: number;
tagLength: number;
};
/**
* Alias to xsalsa20-poly1305
*/
export declare function secretbox(key: Uint8Array, nonce: Uint8Array): {
seal: (plaintext: Uint8Array) => Uint8Array;
open: (ciphertext: Uint8Array) => Uint8Array;
};
export declare const _poly1305_aead: (fn: XorStream) => (key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => Cipher;
/**
* chacha20-poly1305 12-byte-nonce chacha.
*/
export declare const chacha20poly1305: ((key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => Cipher) & {
blockSize: number;
nonceLength: number;
tagLength: number;
};
/**
* xchacha20-poly1305 eXtended-nonce (24 bytes) chacha.
* With 24-byte nonce, it's safe to use fill it with random (CSPRNG).
*/
export declare const xchacha20poly1305: ((key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => Cipher) & {
blockSize: number;
nonceLength: number;
tagLength: number;
};
//# sourceMappingURL=_micro.d.ts.map

1
node_modules/@noble/ciphers/_micro.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"_micro.d.ts","sourceRoot":"","sources":["src/_micro.ts"],"names":[],"mappings":"AAAA,uEAAuE;AAEvE,OAAO,EACL,MAAM,EAAE,SAAS,EAElB,MAAM,YAAY,CAAC;AA+EpB,wBAAgB,MAAM,CAAC,CAAC,EAAE,WAAW,EAAE,CAAC,EAAE,WAAW,EAAE,CAAC,EAAE,WAAW,EAAE,GAAG,EAAE,WAAW,QAatF;AAuBD,wBAAgB,OAAO,CAAC,CAAC,EAAE,WAAW,EAAE,CAAC,EAAE,WAAW,EAAE,CAAC,EAAE,WAAW,EAAE,GAAG,EAAE,WAAW,QAavF;AAED;;GAEG;AACH,eAAO,MAAM,OAAO,WAGlB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,QAAQ,WAGnB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,YAAY,WAIvB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,QAAQ,WAGnB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,SAAS,WAIpB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,OAAO,WAIlB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,QAAQ,WAInB,CAAC;AAQH,wBAAgB,QAAQ,CAAC,GAAG,EAAE,UAAU,EAAE,GAAG,EAAE,UAAU,GAAG,UAAU,CAcrE;AA4BD;;GAEG;AACH,eAAO,MAAM,gBAAgB,SAEI,UAAU,SAAS,UAAU;yBAInC,UAAU;0BAST,UAAU;;;;;CAWrC,CAAC;AAEF;;GAEG;AACH,wBAAgB,SAAS,CAAC,GAAG,EAAE,UAAU,EAAE,KAAK,EAAE,UAAU;;;EAG3D;AAED,eAAO,MAAM,cAAc,OACpB,SAAS,WACR,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,MAuBvD,CAAC;AAEJ;;GAEG;AACH,eAAO,MAAM,gBAAgB,SA5BrB,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,MAAM;;;;CA+B/D,CAAC;AAEF;;;GAGG;AACH,eAAO,MAAM,iBAAiB,SArCtB,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,MAAM;;;;CAwC/D,CAAC"}

295
node_modules/@noble/ciphers/_micro.js generated vendored Normal file
View File

@@ -0,0 +1,295 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.xchacha20poly1305 = exports.chacha20poly1305 = exports._poly1305_aead = exports.secretbox = exports.xsalsa20poly1305 = exports.poly1305 = exports.chacha12 = exports.chacha8 = exports.xchacha20 = exports.chacha20 = exports.chacha20orig = exports.xsalsa20 = exports.salsa20 = exports.hchacha = exports.hsalsa = void 0;
/*! noble-ciphers - MIT License (c) 2023 Paul Miller (paulmillr.com) */
// prettier-ignore
const utils_js_1 = require("./utils.js");
const _arx_js_1 = require("./_arx.js");
const _assert_js_1 = require("./_assert.js");
/*
noble-ciphers-micro: more auditable, but slower version of salsa20, chacha & poly1305.
Implements the same algorithms that are present in other files, but without
unrolled loops (https://en.wikipedia.org/wiki/Loop_unrolling).
*/
function bytesToNumberLE(bytes) {
return (0, utils_js_1.hexToNumber)((0, utils_js_1.bytesToHex)(Uint8Array.from(bytes).reverse()));
}
function numberToBytesLE(n, len) {
return (0, utils_js_1.numberToBytesBE)(n, len).reverse();
}
function salsaQR(x, a, b, c, d) {
x[b] ^= (0, _arx_js_1.rotl)((x[a] + x[d]) | 0, 7);
x[c] ^= (0, _arx_js_1.rotl)((x[b] + x[a]) | 0, 9);
x[d] ^= (0, _arx_js_1.rotl)((x[c] + x[b]) | 0, 13);
x[a] ^= (0, _arx_js_1.rotl)((x[d] + x[c]) | 0, 18);
}
// prettier-ignore
function chachaQR(x, a, b, c, d) {
x[a] = (x[a] + x[b]) | 0;
x[d] = (0, _arx_js_1.rotl)(x[d] ^ x[a], 16);
x[c] = (x[c] + x[d]) | 0;
x[b] = (0, _arx_js_1.rotl)(x[b] ^ x[c], 12);
x[a] = (x[a] + x[b]) | 0;
x[d] = (0, _arx_js_1.rotl)(x[d] ^ x[a], 8);
x[c] = (x[c] + x[d]) | 0;
x[b] = (0, _arx_js_1.rotl)(x[b] ^ x[c], 7);
}
function salsaRound(x, rounds = 20) {
for (let r = 0; r < rounds; r += 2) {
salsaQR(x, 0, 4, 8, 12);
salsaQR(x, 5, 9, 13, 1);
salsaQR(x, 10, 14, 2, 6);
salsaQR(x, 15, 3, 7, 11);
salsaQR(x, 0, 1, 2, 3);
salsaQR(x, 5, 6, 7, 4);
salsaQR(x, 10, 11, 8, 9);
salsaQR(x, 15, 12, 13, 14);
}
}
function chachaRound(x, rounds = 20) {
for (let r = 0; r < rounds; r += 2) {
chachaQR(x, 0, 4, 8, 12);
chachaQR(x, 1, 5, 9, 13);
chachaQR(x, 2, 6, 10, 14);
chachaQR(x, 3, 7, 11, 15);
chachaQR(x, 0, 5, 10, 15);
chachaQR(x, 1, 6, 11, 12);
chachaQR(x, 2, 7, 8, 13);
chachaQR(x, 3, 4, 9, 14);
}
}
function salsaCore(s, k, n, out, cnt, rounds = 20) {
// prettier-ignore
const y = new Uint32Array([
s[0], k[0], k[1], k[2], // "expa" Key Key Key
k[3], s[1], n[0], n[1], // Key "nd 3" Nonce Nonce
cnt, 0, s[2], k[4], // Pos. Pos. "2-by" Key
k[5], k[6], k[7], s[3], // Key Key Key "te k"
]);
const x = y.slice();
salsaRound(x, rounds);
for (let i = 0; i < 16; i++)
out[i] = (y[i] + x[i]) | 0;
}
// prettier-ignore
function hsalsa(s, k, i, o32) {
const x = new Uint32Array([
s[0], k[0], k[1], k[2],
k[3], s[1], i[0], i[1],
i[2], i[3], s[2], k[4],
k[5], k[6], k[7], s[3]
]);
salsaRound(x, 20);
let oi = 0;
o32[oi++] = x[0];
o32[oi++] = x[5];
o32[oi++] = x[10];
o32[oi++] = x[15];
o32[oi++] = x[6];
o32[oi++] = x[7];
o32[oi++] = x[8];
o32[oi++] = x[9];
}
exports.hsalsa = hsalsa;
function chachaCore(s, k, n, out, cnt, rounds = 20) {
// prettier-ignore
const y = new Uint32Array([
s[0], s[1], s[2], s[3], // "expa" "nd 3" "2-by" "te k"
k[0], k[1], k[2], k[3], // Key Key Key Key
k[4], k[5], k[6], k[7], // Key Key Key Key
cnt, n[0], n[1], n[2], // Counter Counter Nonce Nonce
]);
const x = y.slice();
chachaRound(x, rounds);
for (let i = 0; i < 16; i++)
out[i] = (y[i] + x[i]) | 0;
}
// prettier-ignore
function hchacha(s, k, i, o32) {
const x = new Uint32Array([
s[0], s[1], s[2], s[3],
k[0], k[1], k[2], k[3],
k[4], k[5], k[6], k[7],
i[0], i[1], i[2], i[3],
]);
chachaRound(x, 20);
let oi = 0;
o32[oi++] = x[0];
o32[oi++] = x[1];
o32[oi++] = x[2];
o32[oi++] = x[3];
o32[oi++] = x[12];
o32[oi++] = x[13];
o32[oi++] = x[14];
o32[oi++] = x[15];
}
exports.hchacha = hchacha;
/**
* salsa20, 12-byte nonce.
*/
exports.salsa20 = (0, _arx_js_1.createCipher)(salsaCore, {
allowShortKeys: true,
counterRight: true,
});
/**
* xsalsa20, 24-byte nonce.
*/
exports.xsalsa20 = (0, _arx_js_1.createCipher)(salsaCore, {
counterRight: true,
extendNonceFn: hsalsa,
});
/**
* chacha20 non-RFC, original version by djb. 8-byte nonce, 8-byte counter.
*/
exports.chacha20orig = (0, _arx_js_1.createCipher)(chachaCore, {
allowShortKeys: true,
counterRight: false,
counterLength: 8,
});
/**
* chacha20 RFC 8439 (IETF / TLS). 12-byte nonce, 4-byte counter.
*/
exports.chacha20 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 4,
});
/**
* xchacha20 eXtended-nonce. https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha
*/
exports.xchacha20 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 8,
extendNonceFn: hchacha,
});
/**
* 8-round chacha from the original paper.
*/
exports.chacha8 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 4,
rounds: 8,
});
/**
* 12-round chacha from the original paper.
*/
exports.chacha12 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 4,
rounds: 12,
});
const POW_2_130_5 = BigInt(2) ** BigInt(130) - BigInt(5);
const POW_2_128_1 = BigInt(2) ** BigInt(16 * 8) - BigInt(1);
const CLAMP_R = BigInt('0x0ffffffc0ffffffc0ffffffc0fffffff');
const _0 = BigInt(0);
const _1 = BigInt(1);
// Can be speed-up using BigUint64Array, but would be more complicated
function poly1305(msg, key) {
(0, _assert_js_1.bytes)(msg);
(0, _assert_js_1.bytes)(key);
let acc = _0;
const r = bytesToNumberLE(key.subarray(0, 16)) & CLAMP_R;
const s = bytesToNumberLE(key.subarray(16));
// Process by 16 byte chunks
for (let i = 0; i < msg.length; i += 16) {
const m = msg.subarray(i, i + 16);
const n = bytesToNumberLE(m) | (_1 << BigInt(8 * m.length));
acc = ((acc + n) * r) % POW_2_130_5;
}
const res = (acc + s) & POW_2_128_1;
return numberToBytesLE(res, 16);
}
exports.poly1305 = poly1305;
function computeTag(fn, key, nonce, ciphertext, AAD) {
const res = [];
if (AAD) {
res.push(AAD);
const leftover = AAD.length % 16;
if (leftover > 0)
res.push(new Uint8Array(16 - leftover));
}
res.push(ciphertext);
const leftover = ciphertext.length % 16;
if (leftover > 0)
res.push(new Uint8Array(16 - leftover));
// Lengths
const num = new Uint8Array(16);
const view = (0, utils_js_1.createView)(num);
(0, utils_js_1.setBigUint64)(view, 0, BigInt(AAD ? AAD.length : 0), true);
(0, utils_js_1.setBigUint64)(view, 8, BigInt(ciphertext.length), true);
res.push(num);
const authKey = fn(key, nonce, new Uint8Array(32));
return poly1305((0, utils_js_1.concatBytes)(...res), authKey);
}
/**
* xsalsa20-poly1305 eXtended-nonce (24 bytes) salsa.
*/
exports.xsalsa20poly1305 = (0, utils_js_1.wrapCipher)({ blockSize: 64, nonceLength: 24, tagLength: 16 }, function xsalsa20poly1305(key, nonce) {
(0, _assert_js_1.bytes)(key);
(0, _assert_js_1.bytes)(nonce);
return {
encrypt: (plaintext) => {
(0, _assert_js_1.bytes)(plaintext);
const m = (0, utils_js_1.concatBytes)(new Uint8Array(32), plaintext);
const c = (0, exports.xsalsa20)(key, nonce, m);
const authKey = c.subarray(0, 32);
const data = c.subarray(32);
const tag = poly1305(data, authKey);
return (0, utils_js_1.concatBytes)(tag, data);
},
decrypt: (ciphertext) => {
(0, _assert_js_1.bytes)(ciphertext);
if (ciphertext.length < 16)
throw new Error('encrypted data must be at least 16 bytes');
const c = (0, utils_js_1.concatBytes)(new Uint8Array(16), ciphertext);
const authKey = (0, exports.xsalsa20)(key, nonce, new Uint8Array(32));
const tag = poly1305(c.subarray(32), authKey);
if (!(0, utils_js_1.equalBytes)(c.subarray(16, 32), tag))
throw new Error('invalid poly1305 tag');
return (0, exports.xsalsa20)(key, nonce, c).subarray(32);
},
};
});
/**
* Alias to xsalsa20-poly1305
*/
function secretbox(key, nonce) {
const xs = (0, exports.xsalsa20poly1305)(key, nonce);
return { seal: xs.encrypt, open: xs.decrypt };
}
exports.secretbox = secretbox;
const _poly1305_aead = (fn) => (key, nonce, AAD) => {
const tagLength = 16;
const keyLength = 32;
(0, _assert_js_1.bytes)(key, keyLength);
(0, _assert_js_1.bytes)(nonce);
return {
encrypt: (plaintext) => {
(0, _assert_js_1.bytes)(plaintext);
const res = fn(key, nonce, plaintext, undefined, 1);
const tag = computeTag(fn, key, nonce, res, AAD);
return (0, utils_js_1.concatBytes)(res, tag);
},
decrypt: (ciphertext) => {
(0, _assert_js_1.bytes)(ciphertext);
if (ciphertext.length < tagLength)
throw new Error(`encrypted data must be at least ${tagLength} bytes`);
const passedTag = ciphertext.subarray(-tagLength);
const data = ciphertext.subarray(0, -tagLength);
const tag = computeTag(fn, key, nonce, data, AAD);
if (!(0, utils_js_1.equalBytes)(passedTag, tag))
throw new Error('invalid poly1305 tag');
return fn(key, nonce, data, undefined, 1);
},
};
};
exports._poly1305_aead = _poly1305_aead;
/**
* chacha20-poly1305 12-byte-nonce chacha.
*/
exports.chacha20poly1305 = (0, utils_js_1.wrapCipher)({ blockSize: 64, nonceLength: 12, tagLength: 16 }, (0, exports._poly1305_aead)(exports.chacha20));
/**
* xchacha20-poly1305 eXtended-nonce (24 bytes) chacha.
* With 24-byte nonce, it's safe to use fill it with random (CSPRNG).
*/
exports.xchacha20poly1305 = (0, utils_js_1.wrapCipher)({ blockSize: 64, nonceLength: 24, tagLength: 16 }, (0, exports._poly1305_aead)(exports.xchacha20));
//# sourceMappingURL=_micro.js.map

1
node_modules/@noble/ciphers/_micro.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

15
node_modules/@noble/ciphers/_poly1305.d.ts generated vendored Normal file
View File

@@ -0,0 +1,15 @@
import { Input, Hash } from './utils.js';
export type CHash = ReturnType<typeof wrapConstructorWithKey>;
export declare function wrapConstructorWithKey<H extends Hash<H>>(hashCons: (key: Input) => Hash<H>): {
(msg: Input, key: Input): Uint8Array;
outputLen: number;
blockLen: number;
create(key: Input): Hash<H>;
};
export declare const poly1305: {
(msg: Input, key: Input): Uint8Array;
outputLen: number;
blockLen: number;
create(key: Input): Hash<Hash<unknown>>;
};
//# sourceMappingURL=_poly1305.d.ts.map

1
node_modules/@noble/ciphers/_poly1305.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"_poly1305.d.ts","sourceRoot":"","sources":["src/_poly1305.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,KAAK,EAAW,IAAI,EAAE,MAAM,YAAY,CAAC;AAkRlD,MAAM,MAAM,KAAK,GAAG,UAAU,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAC9D,wBAAgB,sBAAsB,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,CAAC,EAAE,QAAQ,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,CAAC,CAAC,CAAC;UACrE,KAAK,OAAO,KAAK,GAAG,UAAU;;;gBAI7B,KAAK;EAE3B;AAED,eAAO,MAAM,QAAQ;UARC,KAAK,OAAO,KAAK,GAAG,UAAU;;;gBAI7B,KAAK;CAI8C,CAAC"}

268
node_modules/@noble/ciphers/_poly1305.js generated vendored Normal file
View File

@@ -0,0 +1,268 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.poly1305 = exports.wrapConstructorWithKey = void 0;
const _assert_js_1 = require("./_assert.js");
const utils_js_1 = require("./utils.js");
// Poly1305 is a fast and parallel secret-key message-authentication code.
// https://cr.yp.to/mac.html, https://cr.yp.to/mac/poly1305-20050329.pdf
// https://datatracker.ietf.org/doc/html/rfc8439
// Based on Public Domain poly1305-donna https://github.com/floodyberry/poly1305-donna
const u8to16 = (a, i) => (a[i++] & 0xff) | ((a[i++] & 0xff) << 8);
class Poly1305 {
constructor(key) {
this.blockLen = 16;
this.outputLen = 16;
this.buffer = new Uint8Array(16);
this.r = new Uint16Array(10);
this.h = new Uint16Array(10);
this.pad = new Uint16Array(8);
this.pos = 0;
this.finished = false;
key = (0, utils_js_1.toBytes)(key);
(0, _assert_js_1.bytes)(key, 32);
const t0 = u8to16(key, 0);
const t1 = u8to16(key, 2);
const t2 = u8to16(key, 4);
const t3 = u8to16(key, 6);
const t4 = u8to16(key, 8);
const t5 = u8to16(key, 10);
const t6 = u8to16(key, 12);
const t7 = u8to16(key, 14);
// https://github.com/floodyberry/poly1305-donna/blob/e6ad6e091d30d7f4ec2d4f978be1fcfcbce72781/poly1305-donna-16.h#L47
this.r[0] = t0 & 0x1fff;
this.r[1] = ((t0 >>> 13) | (t1 << 3)) & 0x1fff;
this.r[2] = ((t1 >>> 10) | (t2 << 6)) & 0x1f03;
this.r[3] = ((t2 >>> 7) | (t3 << 9)) & 0x1fff;
this.r[4] = ((t3 >>> 4) | (t4 << 12)) & 0x00ff;
this.r[5] = (t4 >>> 1) & 0x1ffe;
this.r[6] = ((t4 >>> 14) | (t5 << 2)) & 0x1fff;
this.r[7] = ((t5 >>> 11) | (t6 << 5)) & 0x1f81;
this.r[8] = ((t6 >>> 8) | (t7 << 8)) & 0x1fff;
this.r[9] = (t7 >>> 5) & 0x007f;
for (let i = 0; i < 8; i++)
this.pad[i] = u8to16(key, 16 + 2 * i);
}
process(data, offset, isLast = false) {
const hibit = isLast ? 0 : 1 << 11;
const { h, r } = this;
const r0 = r[0];
const r1 = r[1];
const r2 = r[2];
const r3 = r[3];
const r4 = r[4];
const r5 = r[5];
const r6 = r[6];
const r7 = r[7];
const r8 = r[8];
const r9 = r[9];
const t0 = u8to16(data, offset + 0);
const t1 = u8to16(data, offset + 2);
const t2 = u8to16(data, offset + 4);
const t3 = u8to16(data, offset + 6);
const t4 = u8to16(data, offset + 8);
const t5 = u8to16(data, offset + 10);
const t6 = u8to16(data, offset + 12);
const t7 = u8to16(data, offset + 14);
let h0 = h[0] + (t0 & 0x1fff);
let h1 = h[1] + (((t0 >>> 13) | (t1 << 3)) & 0x1fff);
let h2 = h[2] + (((t1 >>> 10) | (t2 << 6)) & 0x1fff);
let h3 = h[3] + (((t2 >>> 7) | (t3 << 9)) & 0x1fff);
let h4 = h[4] + (((t3 >>> 4) | (t4 << 12)) & 0x1fff);
let h5 = h[5] + ((t4 >>> 1) & 0x1fff);
let h6 = h[6] + (((t4 >>> 14) | (t5 << 2)) & 0x1fff);
let h7 = h[7] + (((t5 >>> 11) | (t6 << 5)) & 0x1fff);
let h8 = h[8] + (((t6 >>> 8) | (t7 << 8)) & 0x1fff);
let h9 = h[9] + ((t7 >>> 5) | hibit);
let c = 0;
let d0 = c + h0 * r0 + h1 * (5 * r9) + h2 * (5 * r8) + h3 * (5 * r7) + h4 * (5 * r6);
c = d0 >>> 13;
d0 &= 0x1fff;
d0 += h5 * (5 * r5) + h6 * (5 * r4) + h7 * (5 * r3) + h8 * (5 * r2) + h9 * (5 * r1);
c += d0 >>> 13;
d0 &= 0x1fff;
let d1 = c + h0 * r1 + h1 * r0 + h2 * (5 * r9) + h3 * (5 * r8) + h4 * (5 * r7);
c = d1 >>> 13;
d1 &= 0x1fff;
d1 += h5 * (5 * r6) + h6 * (5 * r5) + h7 * (5 * r4) + h8 * (5 * r3) + h9 * (5 * r2);
c += d1 >>> 13;
d1 &= 0x1fff;
let d2 = c + h0 * r2 + h1 * r1 + h2 * r0 + h3 * (5 * r9) + h4 * (5 * r8);
c = d2 >>> 13;
d2 &= 0x1fff;
d2 += h5 * (5 * r7) + h6 * (5 * r6) + h7 * (5 * r5) + h8 * (5 * r4) + h9 * (5 * r3);
c += d2 >>> 13;
d2 &= 0x1fff;
let d3 = c + h0 * r3 + h1 * r2 + h2 * r1 + h3 * r0 + h4 * (5 * r9);
c = d3 >>> 13;
d3 &= 0x1fff;
d3 += h5 * (5 * r8) + h6 * (5 * r7) + h7 * (5 * r6) + h8 * (5 * r5) + h9 * (5 * r4);
c += d3 >>> 13;
d3 &= 0x1fff;
let d4 = c + h0 * r4 + h1 * r3 + h2 * r2 + h3 * r1 + h4 * r0;
c = d4 >>> 13;
d4 &= 0x1fff;
d4 += h5 * (5 * r9) + h6 * (5 * r8) + h7 * (5 * r7) + h8 * (5 * r6) + h9 * (5 * r5);
c += d4 >>> 13;
d4 &= 0x1fff;
let d5 = c + h0 * r5 + h1 * r4 + h2 * r3 + h3 * r2 + h4 * r1;
c = d5 >>> 13;
d5 &= 0x1fff;
d5 += h5 * r0 + h6 * (5 * r9) + h7 * (5 * r8) + h8 * (5 * r7) + h9 * (5 * r6);
c += d5 >>> 13;
d5 &= 0x1fff;
let d6 = c + h0 * r6 + h1 * r5 + h2 * r4 + h3 * r3 + h4 * r2;
c = d6 >>> 13;
d6 &= 0x1fff;
d6 += h5 * r1 + h6 * r0 + h7 * (5 * r9) + h8 * (5 * r8) + h9 * (5 * r7);
c += d6 >>> 13;
d6 &= 0x1fff;
let d7 = c + h0 * r7 + h1 * r6 + h2 * r5 + h3 * r4 + h4 * r3;
c = d7 >>> 13;
d7 &= 0x1fff;
d7 += h5 * r2 + h6 * r1 + h7 * r0 + h8 * (5 * r9) + h9 * (5 * r8);
c += d7 >>> 13;
d7 &= 0x1fff;
let d8 = c + h0 * r8 + h1 * r7 + h2 * r6 + h3 * r5 + h4 * r4;
c = d8 >>> 13;
d8 &= 0x1fff;
d8 += h5 * r3 + h6 * r2 + h7 * r1 + h8 * r0 + h9 * (5 * r9);
c += d8 >>> 13;
d8 &= 0x1fff;
let d9 = c + h0 * r9 + h1 * r8 + h2 * r7 + h3 * r6 + h4 * r5;
c = d9 >>> 13;
d9 &= 0x1fff;
d9 += h5 * r4 + h6 * r3 + h7 * r2 + h8 * r1 + h9 * r0;
c += d9 >>> 13;
d9 &= 0x1fff;
c = ((c << 2) + c) | 0;
c = (c + d0) | 0;
d0 = c & 0x1fff;
c = c >>> 13;
d1 += c;
h[0] = d0;
h[1] = d1;
h[2] = d2;
h[3] = d3;
h[4] = d4;
h[5] = d5;
h[6] = d6;
h[7] = d7;
h[8] = d8;
h[9] = d9;
}
finalize() {
const { h, pad } = this;
const g = new Uint16Array(10);
let c = h[1] >>> 13;
h[1] &= 0x1fff;
for (let i = 2; i < 10; i++) {
h[i] += c;
c = h[i] >>> 13;
h[i] &= 0x1fff;
}
h[0] += c * 5;
c = h[0] >>> 13;
h[0] &= 0x1fff;
h[1] += c;
c = h[1] >>> 13;
h[1] &= 0x1fff;
h[2] += c;
g[0] = h[0] + 5;
c = g[0] >>> 13;
g[0] &= 0x1fff;
for (let i = 1; i < 10; i++) {
g[i] = h[i] + c;
c = g[i] >>> 13;
g[i] &= 0x1fff;
}
g[9] -= 1 << 13;
let mask = (c ^ 1) - 1;
for (let i = 0; i < 10; i++)
g[i] &= mask;
mask = ~mask;
for (let i = 0; i < 10; i++)
h[i] = (h[i] & mask) | g[i];
h[0] = (h[0] | (h[1] << 13)) & 0xffff;
h[1] = ((h[1] >>> 3) | (h[2] << 10)) & 0xffff;
h[2] = ((h[2] >>> 6) | (h[3] << 7)) & 0xffff;
h[3] = ((h[3] >>> 9) | (h[4] << 4)) & 0xffff;
h[4] = ((h[4] >>> 12) | (h[5] << 1) | (h[6] << 14)) & 0xffff;
h[5] = ((h[6] >>> 2) | (h[7] << 11)) & 0xffff;
h[6] = ((h[7] >>> 5) | (h[8] << 8)) & 0xffff;
h[7] = ((h[8] >>> 8) | (h[9] << 5)) & 0xffff;
let f = h[0] + pad[0];
h[0] = f & 0xffff;
for (let i = 1; i < 8; i++) {
f = (((h[i] + pad[i]) | 0) + (f >>> 16)) | 0;
h[i] = f & 0xffff;
}
}
update(data) {
(0, _assert_js_1.exists)(this);
const { buffer, blockLen } = this;
data = (0, utils_js_1.toBytes)(data);
const len = data.length;
for (let pos = 0; pos < len;) {
const take = Math.min(blockLen - this.pos, len - pos);
// Fast path: we have at least one block in input
if (take === blockLen) {
for (; blockLen <= len - pos; pos += blockLen)
this.process(data, pos);
continue;
}
buffer.set(data.subarray(pos, pos + take), this.pos);
this.pos += take;
pos += take;
if (this.pos === blockLen) {
this.process(buffer, 0, false);
this.pos = 0;
}
}
return this;
}
destroy() {
this.h.fill(0);
this.r.fill(0);
this.buffer.fill(0);
this.pad.fill(0);
}
digestInto(out) {
(0, _assert_js_1.exists)(this);
(0, _assert_js_1.output)(out, this);
this.finished = true;
const { buffer, h } = this;
let { pos } = this;
if (pos) {
buffer[pos++] = 1;
// buffer.subarray(pos).fill(0);
for (; pos < 16; pos++)
buffer[pos] = 0;
this.process(buffer, 0, true);
}
this.finalize();
let opos = 0;
for (let i = 0; i < 8; i++) {
out[opos++] = h[i] >>> 0;
out[opos++] = h[i] >>> 8;
}
return out;
}
digest() {
const { buffer, outputLen } = this;
this.digestInto(buffer);
const res = buffer.slice(0, outputLen);
this.destroy();
return res;
}
}
function wrapConstructorWithKey(hashCons) {
const hashC = (msg, key) => hashCons(key).update((0, utils_js_1.toBytes)(msg)).digest();
const tmp = hashCons(new Uint8Array(32));
hashC.outputLen = tmp.outputLen;
hashC.blockLen = tmp.blockLen;
hashC.create = (key) => hashCons(key);
return hashC;
}
exports.wrapConstructorWithKey = wrapConstructorWithKey;
exports.poly1305 = wrapConstructorWithKey((key) => new Poly1305(key));
//# sourceMappingURL=_poly1305.js.map

1
node_modules/@noble/ciphers/_poly1305.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

27
node_modules/@noble/ciphers/_polyval.d.ts generated vendored Normal file
View File

@@ -0,0 +1,27 @@
import { Input, Hash } from './utils.js';
/**
* `mulX_POLYVAL(ByteReverse(H))` from spec
* @param k mutated in place
*/
export declare function _toGHASHKey(k: Uint8Array): Uint8Array;
export type CHash = ReturnType<typeof wrapConstructorWithKey>;
declare function wrapConstructorWithKey<H extends Hash<H>>(hashCons: (key: Input, expectedLength?: number) => Hash<H>): {
(msg: Input, key: Input): Uint8Array;
outputLen: number;
blockLen: number;
create(key: Input, expectedLength?: number): Hash<H>;
};
export declare const ghash: {
(msg: Input, key: Input): Uint8Array;
outputLen: number;
blockLen: number;
create(key: Input, expectedLength?: number): Hash<Hash<unknown>>;
};
export declare const polyval: {
(msg: Input, key: Input): Uint8Array;
outputLen: number;
blockLen: number;
create(key: Input, expectedLength?: number): Hash<Hash<unknown>>;
};
export {};
//# sourceMappingURL=_polyval.d.ts.map

1
node_modules/@noble/ciphers/_polyval.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"_polyval.d.ts","sourceRoot":"","sources":["src/_polyval.ts"],"names":[],"mappings":"AAAA,OAAO,EAAuB,KAAK,EAAE,IAAI,EAAO,MAAM,YAAY,CAAC;AAsCnE;;;GAGG;AACH,wBAAgB,WAAW,CAAC,CAAC,EAAE,UAAU,GAAG,UAAU,CAYrD;AA+KD,MAAM,MAAM,KAAK,GAAG,UAAU,CAAC,OAAO,sBAAsB,CAAC,CAAC;AAC9D,iBAAS,sBAAsB,CAAC,CAAC,SAAS,IAAI,CAAC,CAAC,CAAC,EAC/C,QAAQ,EAAE,CAAC,GAAG,EAAE,KAAK,EAAE,cAAc,CAAC,EAAE,MAAM,KAAK,IAAI,CAAC,CAAC,CAAC;UAEtC,KAAK,OAAO,KAAK,GAAG,UAAU;;;gBAK7B,KAAK,mBAAmB,MAAM;EAEpD;AAED,eAAO,MAAM,KAAK;UATI,KAAK,OAAO,KAAK,GAAG,UAAU;;;gBAK7B,KAAK,mBAAmB,MAAM;CAMpD,CAAC;AACF,eAAO,MAAM,OAAO;UAZE,KAAK,OAAO,KAAK,GAAG,UAAU;;;gBAK7B,KAAK,mBAAmB,MAAM;CASpD,CAAC"}

221
node_modules/@noble/ciphers/_polyval.js generated vendored Normal file
View File

@@ -0,0 +1,221 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.polyval = exports.ghash = exports._toGHASHKey = void 0;
const utils_js_1 = require("./utils.js");
const _assert_js_1 = require("./_assert.js");
// GHash from AES-GCM and its little-endian "mirror image" Polyval from AES-SIV.
// Implemented in terms of GHash with conversion function for keys
// GCM GHASH from NIST SP800-38d, SIV from RFC 8452.
// https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf
// GHASH modulo: x^128 + x^7 + x^2 + x + 1
// POLYVAL modulo: x^128 + x^127 + x^126 + x^121 + 1
const BLOCK_SIZE = 16;
// TODO: rewrite
// temporary padding buffer
const ZEROS16 = /* @__PURE__ */ new Uint8Array(16);
const ZEROS32 = (0, utils_js_1.u32)(ZEROS16);
const POLY = 0xe1; // v = 2*v % POLY
// v = 2*v % POLY
// NOTE: because x + x = 0 (add/sub is same), mul2(x) != x+x
// We can multiply any number using montgomery ladder and this function (works as double, add is simple xor)
const mul2 = (s0, s1, s2, s3) => {
const hiBit = s3 & 1;
return {
s3: (s2 << 31) | (s3 >>> 1),
s2: (s1 << 31) | (s2 >>> 1),
s1: (s0 << 31) | (s1 >>> 1),
s0: (s0 >>> 1) ^ ((POLY << 24) & -(hiBit & 1)), // reduce % poly
};
};
const swapLE = (n) => (((n >>> 0) & 0xff) << 24) |
(((n >>> 8) & 0xff) << 16) |
(((n >>> 16) & 0xff) << 8) |
((n >>> 24) & 0xff) |
0;
/**
* `mulX_POLYVAL(ByteReverse(H))` from spec
* @param k mutated in place
*/
function _toGHASHKey(k) {
k.reverse();
const hiBit = k[15] & 1;
// k >>= 1
let carry = 0;
for (let i = 0; i < k.length; i++) {
const t = k[i];
k[i] = (t >>> 1) | carry;
carry = (t & 1) << 7;
}
k[0] ^= -hiBit & 0xe1; // if (hiBit) n ^= 0xe1000000000000000000000000000000;
return k;
}
exports._toGHASHKey = _toGHASHKey;
const estimateWindow = (bytes) => {
if (bytes > 64 * 1024)
return 8;
if (bytes > 1024)
return 4;
return 2;
};
class GHASH {
// We select bits per window adaptively based on expectedLength
constructor(key, expectedLength) {
this.blockLen = BLOCK_SIZE;
this.outputLen = BLOCK_SIZE;
this.s0 = 0;
this.s1 = 0;
this.s2 = 0;
this.s3 = 0;
this.finished = false;
key = (0, utils_js_1.toBytes)(key);
(0, _assert_js_1.bytes)(key, 16);
const kView = (0, utils_js_1.createView)(key);
let k0 = kView.getUint32(0, false);
let k1 = kView.getUint32(4, false);
let k2 = kView.getUint32(8, false);
let k3 = kView.getUint32(12, false);
// generate table of doubled keys (half of montgomery ladder)
const doubles = [];
for (let i = 0; i < 128; i++) {
doubles.push({ s0: swapLE(k0), s1: swapLE(k1), s2: swapLE(k2), s3: swapLE(k3) });
({ s0: k0, s1: k1, s2: k2, s3: k3 } = mul2(k0, k1, k2, k3));
}
const W = estimateWindow(expectedLength || 1024);
if (![1, 2, 4, 8].includes(W))
throw new Error(`ghash: wrong window size=${W}, should be 2, 4 or 8`);
this.W = W;
const bits = 128; // always 128 bits;
const windows = bits / W;
const windowSize = (this.windowSize = 2 ** W);
const items = [];
// Create precompute table for window of W bits
for (let w = 0; w < windows; w++) {
// truth table: 00, 01, 10, 11
for (let byte = 0; byte < windowSize; byte++) {
// prettier-ignore
let s0 = 0, s1 = 0, s2 = 0, s3 = 0;
for (let j = 0; j < W; j++) {
const bit = (byte >>> (W - j - 1)) & 1;
if (!bit)
continue;
const { s0: d0, s1: d1, s2: d2, s3: d3 } = doubles[W * w + j];
(s0 ^= d0), (s1 ^= d1), (s2 ^= d2), (s3 ^= d3);
}
items.push({ s0, s1, s2, s3 });
}
}
this.t = items;
}
_updateBlock(s0, s1, s2, s3) {
(s0 ^= this.s0), (s1 ^= this.s1), (s2 ^= this.s2), (s3 ^= this.s3);
const { W, t, windowSize } = this;
// prettier-ignore
let o0 = 0, o1 = 0, o2 = 0, o3 = 0;
const mask = (1 << W) - 1; // 2**W will kill performance.
let w = 0;
for (const num of [s0, s1, s2, s3]) {
for (let bytePos = 0; bytePos < 4; bytePos++) {
const byte = (num >>> (8 * bytePos)) & 0xff;
for (let bitPos = 8 / W - 1; bitPos >= 0; bitPos--) {
const bit = (byte >>> (W * bitPos)) & mask;
const { s0: e0, s1: e1, s2: e2, s3: e3 } = t[w * windowSize + bit];
(o0 ^= e0), (o1 ^= e1), (o2 ^= e2), (o3 ^= e3);
w += 1;
}
}
}
this.s0 = o0;
this.s1 = o1;
this.s2 = o2;
this.s3 = o3;
}
update(data) {
data = (0, utils_js_1.toBytes)(data);
(0, _assert_js_1.exists)(this);
const b32 = (0, utils_js_1.u32)(data);
const blocks = Math.floor(data.length / BLOCK_SIZE);
const left = data.length % BLOCK_SIZE;
for (let i = 0; i < blocks; i++) {
this._updateBlock(b32[i * 4 + 0], b32[i * 4 + 1], b32[i * 4 + 2], b32[i * 4 + 3]);
}
if (left) {
ZEROS16.set(data.subarray(blocks * BLOCK_SIZE));
this._updateBlock(ZEROS32[0], ZEROS32[1], ZEROS32[2], ZEROS32[3]);
ZEROS32.fill(0); // clean tmp buffer
}
return this;
}
destroy() {
const { t } = this;
// clean precompute table
for (const elm of t) {
(elm.s0 = 0), (elm.s1 = 0), (elm.s2 = 0), (elm.s3 = 0);
}
}
digestInto(out) {
(0, _assert_js_1.exists)(this);
(0, _assert_js_1.output)(out, this);
this.finished = true;
const { s0, s1, s2, s3 } = this;
const o32 = (0, utils_js_1.u32)(out);
o32[0] = s0;
o32[1] = s1;
o32[2] = s2;
o32[3] = s3;
return out;
}
digest() {
const res = new Uint8Array(BLOCK_SIZE);
this.digestInto(res);
this.destroy();
return res;
}
}
class Polyval extends GHASH {
constructor(key, expectedLength) {
key = (0, utils_js_1.toBytes)(key);
const ghKey = _toGHASHKey(key.slice());
super(ghKey, expectedLength);
ghKey.fill(0);
}
update(data) {
data = (0, utils_js_1.toBytes)(data);
(0, _assert_js_1.exists)(this);
const b32 = (0, utils_js_1.u32)(data);
const left = data.length % BLOCK_SIZE;
const blocks = Math.floor(data.length / BLOCK_SIZE);
for (let i = 0; i < blocks; i++) {
this._updateBlock(swapLE(b32[i * 4 + 3]), swapLE(b32[i * 4 + 2]), swapLE(b32[i * 4 + 1]), swapLE(b32[i * 4 + 0]));
}
if (left) {
ZEROS16.set(data.subarray(blocks * BLOCK_SIZE));
this._updateBlock(swapLE(ZEROS32[3]), swapLE(ZEROS32[2]), swapLE(ZEROS32[1]), swapLE(ZEROS32[0]));
ZEROS32.fill(0); // clean tmp buffer
}
return this;
}
digestInto(out) {
(0, _assert_js_1.exists)(this);
(0, _assert_js_1.output)(out, this);
this.finished = true;
// tmp ugly hack
const { s0, s1, s2, s3 } = this;
const o32 = (0, utils_js_1.u32)(out);
o32[0] = s0;
o32[1] = s1;
o32[2] = s2;
o32[3] = s3;
return out.reverse();
}
}
function wrapConstructorWithKey(hashCons) {
const hashC = (msg, key) => hashCons(key, msg.length).update((0, utils_js_1.toBytes)(msg)).digest();
const tmp = hashCons(new Uint8Array(16), 0);
hashC.outputLen = tmp.outputLen;
hashC.blockLen = tmp.blockLen;
hashC.create = (key, expectedLength) => hashCons(key, expectedLength);
return hashC;
}
exports.ghash = wrapConstructorWithKey((key, expectedLength) => new GHASH(key, expectedLength));
exports.polyval = wrapConstructorWithKey((key, expectedLength) => new Polyval(key, expectedLength));
//# sourceMappingURL=_polyval.js.map

1
node_modules/@noble/ciphers/_polyval.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

86
node_modules/@noble/ciphers/aes.d.ts generated vendored Normal file
View File

@@ -0,0 +1,86 @@
import { Cipher, CipherWithOutput } from './utils.js';
export declare function expandKeyLE(key: Uint8Array): Uint32Array;
export declare function expandKeyDecLE(key: Uint8Array): Uint32Array;
declare function encrypt(xk: Uint32Array, s0: number, s1: number, s2: number, s3: number): {
s0: number;
s1: number;
s2: number;
s3: number;
};
declare function decrypt(xk: Uint32Array, s0: number, s1: number, s2: number, s3: number): {
s0: number;
s1: number;
s2: number;
s3: number;
};
declare function ctrCounter(xk: Uint32Array, nonce: Uint8Array, src: Uint8Array, dst?: Uint8Array): Uint8Array;
declare function ctr32(xk: Uint32Array, isLE: boolean, nonce: Uint8Array, src: Uint8Array, dst?: Uint8Array): Uint8Array;
/**
* CTR: counter mode. Creates stream cipher.
* Requires good IV. Parallelizable. OK, but no MAC.
*/
export declare const ctr: ((key: Uint8Array, nonce: Uint8Array) => CipherWithOutput) & {
blockSize: number;
nonceLength: number;
};
export type BlockOpts = {
disablePadding?: boolean;
};
/**
* ECB: Electronic CodeBook. Simple deterministic replacement.
* Dangerous: always map x to y. See [AES Penguin](https://words.filippo.io/the-ecb-penguin/).
*/
export declare const ecb: ((key: Uint8Array, opts?: BlockOpts) => CipherWithOutput) & {
blockSize: number;
};
/**
* CBC: Cipher-Block-Chaining. Key is previous rounds block.
* Fragile: needs proper padding. Unauthenticated: needs MAC.
*/
export declare const cbc: ((key: Uint8Array, iv: Uint8Array, opts?: BlockOpts) => CipherWithOutput) & {
blockSize: number;
nonceLength: number;
};
/**
* CFB: Cipher Feedback Mode. The input for the block cipher is the previous cipher output.
* Unauthenticated: needs MAC.
*/
export declare const cfb: ((key: Uint8Array, iv: Uint8Array) => CipherWithOutput) & {
blockSize: number;
nonceLength: number;
};
/**
* GCM: Galois/Counter Mode.
* Good, modern version of CTR, parallel, with MAC.
* Be careful: MACs can be forged.
*/
export declare const gcm: ((key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => Cipher) & {
blockSize: number;
nonceLength: number;
tagLength: number;
};
/**
* AES-GCM-SIV: classic AES-GCM with nonce-misuse resistance.
* Guarantees that, when a nonce is repeated, the only security loss is that identical
* plaintexts will produce identical ciphertexts.
* RFC 8452, https://datatracker.ietf.org/doc/html/rfc8452
*/
export declare const siv: ((key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => Cipher) & {
blockSize: number;
nonceLength: number;
tagLength: number;
};
declare function encryptBlock(xk: Uint32Array, block: Uint8Array): Uint8Array;
declare function decryptBlock(xk: Uint32Array, block: Uint8Array): Uint8Array;
export declare const unsafe: {
expandKeyLE: typeof expandKeyLE;
expandKeyDecLE: typeof expandKeyDecLE;
encrypt: typeof encrypt;
decrypt: typeof decrypt;
encryptBlock: typeof encryptBlock;
decryptBlock: typeof decryptBlock;
ctrCounter: typeof ctrCounter;
ctr32: typeof ctr32;
};
export {};
//# sourceMappingURL=aes.d.ts.map

1
node_modules/@noble/ciphers/aes.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"aes.d.ts","sourceRoot":"","sources":["src/aes.ts"],"names":[],"mappings":"AACA,OAAO,EACO,MAAM,EAAE,gBAAgB,EAErC,MAAM,YAAY,CAAC;AAmGpB,wBAAgB,WAAW,CAAC,GAAG,EAAE,UAAU,GAAG,WAAW,CAmBxD;AAED,wBAAgB,cAAc,CAAC,GAAG,EAAE,UAAU,GAAG,WAAW,CAkB3D;AAwBD,iBAAS,OAAO,CAAC,EAAE,EAAE,WAAW,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM;;;;;EAkB/E;AAED,iBAAS,OAAO,CAAC,EAAE,EAAE,WAAW,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM,EAAE,EAAE,EAAE,MAAM;;;;;EAkB/E;AAWD,iBAAS,UAAU,CAAC,EAAE,EAAE,WAAW,EAAE,KAAK,EAAE,UAAU,EAAE,GAAG,EAAE,UAAU,EAAE,GAAG,CAAC,EAAE,UAAU,cAmCxF;AAKD,iBAAS,KAAK,CACZ,EAAE,EAAE,WAAW,EACf,IAAI,EAAE,OAAO,EACb,KAAK,EAAE,UAAU,EACjB,GAAG,EAAE,UAAU,EACf,GAAG,CAAC,EAAE,UAAU,cAiCjB;AAED;;;GAGG;AACH,eAAO,MAAM,GAAG,SAEI,UAAU,SAAS,UAAU,KAAG,gBAAgB;;;CAgBnE,CAAC;AAgDF,MAAM,MAAM,SAAS,GAAG;IAAE,cAAc,CAAC,EAAE,OAAO,CAAA;CAAE,CAAC;AAErD;;;GAGG;AACH,eAAO,MAAM,GAAG,SAEI,UAAU,SAAQ,SAAS,KAAQ,gBAAgB;;CAoCtE,CAAC;AAEF;;;GAGG;AACH,eAAO,MAAM,GAAG,SAEI,UAAU,MAAM,UAAU,SAAQ,SAAS,KAAQ,gBAAgB;;;CA+CtF,CAAC;AAEF;;;GAGG;AACH,eAAO,MAAM,GAAG,SAEI,UAAU,MAAM,UAAU,KAAG,gBAAgB;;;CAqChE,CAAC;AAqBF;;;;GAIG;AACH,eAAO,MAAM,GAAG,SAEI,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,MAAM;;;;CAyD3E,CAAC;AAOF;;;;;GAKG;AACH,eAAO,MAAM,GAAG,SAEI,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,MAAM;;;;CAqF3E,CAAC;AAUF,iBAAS,YAAY,CAAC,EAAE,EAAE,WAAW,EAAE,KAAK,EAAE,UAAU,cAOvD;AAED,iBAAS,YAAY,CAAC,EAAE,EAAE,WAAW,EAAE,KAAK,EAAE,UAAU,cAOvD;AAID,eAAO,MAAM,MAAM;;;;;;;;;CASlB,CAAC"}

675
node_modules/@noble/ciphers/aes.js generated vendored Normal file
View File

@@ -0,0 +1,675 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.unsafe = exports.siv = exports.gcm = exports.cfb = exports.cbc = exports.ecb = exports.ctr = exports.expandKeyDecLE = exports.expandKeyLE = void 0;
// prettier-ignore
const utils_js_1 = require("./utils.js");
const _polyval_js_1 = require("./_polyval.js");
const _assert_js_1 = require("./_assert.js");
/*
AES (Advanced Encryption Standard) aka Rijndael block cipher.
Data is split into 128-bit blocks. Encrypted in 10/12/14 rounds (128/192/256 bits). In every round:
1. **S-box**, table substitution
2. **Shift rows**, cyclic shift left of all rows of data array
3. **Mix columns**, multiplying every column by fixed polynomial
4. **Add round key**, round_key xor i-th column of array
Resources:
- FIPS-197 https://csrc.nist.gov/files/pubs/fips/197/final/docs/fips-197.pdf
- Original proposal: https://csrc.nist.gov/csrc/media/projects/cryptographic-standards-and-guidelines/documents/aes-development/rijndael-ammended.pdf
*/
const BLOCK_SIZE = 16;
const BLOCK_SIZE32 = 4;
const EMPTY_BLOCK = new Uint8Array(BLOCK_SIZE);
const POLY = 0x11b; // 1 + x + x**3 + x**4 + x**8
// TODO: remove multiplication, binary ops only
function mul2(n) {
return (n << 1) ^ (POLY & -(n >> 7));
}
function mul(a, b) {
let res = 0;
for (; b > 0; b >>= 1) {
// Montgomery ladder
res ^= a & -(b & 1); // if (b&1) res ^=a (but const-time).
a = mul2(a); // a = 2*a
}
return res;
}
// AES S-box is generated using finite field inversion,
// an affine transform, and xor of a constant 0x63.
const sbox = /* @__PURE__ */ (() => {
let t = new Uint8Array(256);
for (let i = 0, x = 1; i < 256; i++, x ^= mul2(x))
t[i] = x;
const box = new Uint8Array(256);
box[0] = 0x63; // first elm
for (let i = 0; i < 255; i++) {
let x = t[255 - i];
x |= x << 8;
box[t[i]] = (x ^ (x >> 4) ^ (x >> 5) ^ (x >> 6) ^ (x >> 7) ^ 0x63) & 0xff;
}
return box;
})();
// Inverted S-box
const invSbox = /* @__PURE__ */ sbox.map((_, j) => sbox.indexOf(j));
// Rotate u32 by 8
const rotr32_8 = (n) => (n << 24) | (n >>> 8);
const rotl32_8 = (n) => (n << 8) | (n >>> 24);
// T-table is optimization suggested in 5.2 of original proposal (missed from FIPS-197). Changes:
// - LE instead of BE
// - bigger tables: T0 and T1 are merged into T01 table and T2 & T3 into T23;
// so index is u16, instead of u8. This speeds up things, unexpectedly
function genTtable(sbox, fn) {
if (sbox.length !== 256)
throw new Error('Wrong sbox length');
const T0 = new Uint32Array(256).map((_, j) => fn(sbox[j]));
const T1 = T0.map(rotl32_8);
const T2 = T1.map(rotl32_8);
const T3 = T2.map(rotl32_8);
const T01 = new Uint32Array(256 * 256);
const T23 = new Uint32Array(256 * 256);
const sbox2 = new Uint16Array(256 * 256);
for (let i = 0; i < 256; i++) {
for (let j = 0; j < 256; j++) {
const idx = i * 256 + j;
T01[idx] = T0[i] ^ T1[j];
T23[idx] = T2[i] ^ T3[j];
sbox2[idx] = (sbox[i] << 8) | sbox[j];
}
}
return { sbox, sbox2, T0, T1, T2, T3, T01, T23 };
}
const tableEncoding = /* @__PURE__ */ genTtable(sbox, (s) => (mul(s, 3) << 24) | (s << 16) | (s << 8) | mul(s, 2));
const tableDecoding = /* @__PURE__ */ genTtable(invSbox, (s) => (mul(s, 11) << 24) | (mul(s, 13) << 16) | (mul(s, 9) << 8) | mul(s, 14));
const xPowers = /* @__PURE__ */ (() => {
const p = new Uint8Array(16);
for (let i = 0, x = 1; i < 16; i++, x = mul2(x))
p[i] = x;
return p;
})();
function expandKeyLE(key) {
(0, _assert_js_1.bytes)(key);
const len = key.length;
if (![16, 24, 32].includes(len))
throw new Error(`aes: wrong key size: should be 16, 24 or 32, got: ${len}`);
const { sbox2 } = tableEncoding;
const k32 = (0, utils_js_1.u32)(key);
const Nk = k32.length;
const subByte = (n) => applySbox(sbox2, n, n, n, n);
const xk = new Uint32Array(len + 28); // expanded key
xk.set(k32);
// 4.3.1 Key expansion
for (let i = Nk; i < xk.length; i++) {
let t = xk[i - 1];
if (i % Nk === 0)
t = subByte(rotr32_8(t)) ^ xPowers[i / Nk - 1];
else if (Nk > 6 && i % Nk === 4)
t = subByte(t);
xk[i] = xk[i - Nk] ^ t;
}
return xk;
}
exports.expandKeyLE = expandKeyLE;
function expandKeyDecLE(key) {
const encKey = expandKeyLE(key);
const xk = encKey.slice();
const Nk = encKey.length;
const { sbox2 } = tableEncoding;
const { T0, T1, T2, T3 } = tableDecoding;
// Inverse key by chunks of 4 (rounds)
for (let i = 0; i < Nk; i += 4) {
for (let j = 0; j < 4; j++)
xk[i + j] = encKey[Nk - i - 4 + j];
}
encKey.fill(0);
// apply InvMixColumn except first & last round
for (let i = 4; i < Nk - 4; i++) {
const x = xk[i];
const w = applySbox(sbox2, x, x, x, x);
xk[i] = T0[w & 0xff] ^ T1[(w >>> 8) & 0xff] ^ T2[(w >>> 16) & 0xff] ^ T3[w >>> 24];
}
return xk;
}
exports.expandKeyDecLE = expandKeyDecLE;
// Apply tables
function apply0123(T01, T23, s0, s1, s2, s3) {
return (T01[((s0 << 8) & 0xff00) | ((s1 >>> 8) & 0xff)] ^
T23[((s2 >>> 8) & 0xff00) | ((s3 >>> 24) & 0xff)]);
}
function applySbox(sbox2, s0, s1, s2, s3) {
return (sbox2[(s0 & 0xff) | (s1 & 0xff00)] |
(sbox2[((s2 >>> 16) & 0xff) | ((s3 >>> 16) & 0xff00)] << 16));
}
function encrypt(xk, s0, s1, s2, s3) {
const { sbox2, T01, T23 } = tableEncoding;
let k = 0;
(s0 ^= xk[k++]), (s1 ^= xk[k++]), (s2 ^= xk[k++]), (s3 ^= xk[k++]);
const rounds = xk.length / 4 - 2;
for (let i = 0; i < rounds; i++) {
const t0 = xk[k++] ^ apply0123(T01, T23, s0, s1, s2, s3);
const t1 = xk[k++] ^ apply0123(T01, T23, s1, s2, s3, s0);
const t2 = xk[k++] ^ apply0123(T01, T23, s2, s3, s0, s1);
const t3 = xk[k++] ^ apply0123(T01, T23, s3, s0, s1, s2);
(s0 = t0), (s1 = t1), (s2 = t2), (s3 = t3);
}
// last round (without mixcolumns, so using SBOX2 table)
const t0 = xk[k++] ^ applySbox(sbox2, s0, s1, s2, s3);
const t1 = xk[k++] ^ applySbox(sbox2, s1, s2, s3, s0);
const t2 = xk[k++] ^ applySbox(sbox2, s2, s3, s0, s1);
const t3 = xk[k++] ^ applySbox(sbox2, s3, s0, s1, s2);
return { s0: t0, s1: t1, s2: t2, s3: t3 };
}
function decrypt(xk, s0, s1, s2, s3) {
const { sbox2, T01, T23 } = tableDecoding;
let k = 0;
(s0 ^= xk[k++]), (s1 ^= xk[k++]), (s2 ^= xk[k++]), (s3 ^= xk[k++]);
const rounds = xk.length / 4 - 2;
for (let i = 0; i < rounds; i++) {
const t0 = xk[k++] ^ apply0123(T01, T23, s0, s3, s2, s1);
const t1 = xk[k++] ^ apply0123(T01, T23, s1, s0, s3, s2);
const t2 = xk[k++] ^ apply0123(T01, T23, s2, s1, s0, s3);
const t3 = xk[k++] ^ apply0123(T01, T23, s3, s2, s1, s0);
(s0 = t0), (s1 = t1), (s2 = t2), (s3 = t3);
}
// Last round
const t0 = xk[k++] ^ applySbox(sbox2, s0, s3, s2, s1);
const t1 = xk[k++] ^ applySbox(sbox2, s1, s0, s3, s2);
const t2 = xk[k++] ^ applySbox(sbox2, s2, s1, s0, s3);
const t3 = xk[k++] ^ applySbox(sbox2, s3, s2, s1, s0);
return { s0: t0, s1: t1, s2: t2, s3: t3 };
}
function getDst(len, dst) {
if (!dst)
return new Uint8Array(len);
(0, _assert_js_1.bytes)(dst);
if (dst.length < len)
throw new Error(`aes: wrong destination length, expected at least ${len}, got: ${dst.length}`);
return dst;
}
// TODO: investigate merging with ctr32
function ctrCounter(xk, nonce, src, dst) {
(0, _assert_js_1.bytes)(nonce, BLOCK_SIZE);
(0, _assert_js_1.bytes)(src);
const srcLen = src.length;
dst = getDst(srcLen, dst);
const ctr = nonce;
const c32 = (0, utils_js_1.u32)(ctr);
// Fill block (empty, ctr=0)
let { s0, s1, s2, s3 } = encrypt(xk, c32[0], c32[1], c32[2], c32[3]);
const src32 = (0, utils_js_1.u32)(src);
const dst32 = (0, utils_js_1.u32)(dst);
// process blocks
for (let i = 0; i + 4 <= src32.length; i += 4) {
dst32[i + 0] = src32[i + 0] ^ s0;
dst32[i + 1] = src32[i + 1] ^ s1;
dst32[i + 2] = src32[i + 2] ^ s2;
dst32[i + 3] = src32[i + 3] ^ s3;
// Full 128 bit counter with wrap around
let carry = 1;
for (let i = ctr.length - 1; i >= 0; i--) {
carry = (carry + (ctr[i] & 0xff)) | 0;
ctr[i] = carry & 0xff;
carry >>>= 8;
}
({ s0, s1, s2, s3 } = encrypt(xk, c32[0], c32[1], c32[2], c32[3]));
}
// leftovers (less than block)
// It's possible to handle > u32 fast, but is it worth it?
const start = BLOCK_SIZE * Math.floor(src32.length / BLOCK_SIZE32);
if (start < srcLen) {
const b32 = new Uint32Array([s0, s1, s2, s3]);
const buf = (0, utils_js_1.u8)(b32);
for (let i = start, pos = 0; i < srcLen; i++, pos++)
dst[i] = src[i] ^ buf[pos];
}
return dst;
}
// AES CTR with overflowing 32 bit counter
// It's possible to do 32le significantly simpler (and probably faster) by using u32.
// But, we need both, and perf bottleneck is in ghash anyway.
function ctr32(xk, isLE, nonce, src, dst) {
(0, _assert_js_1.bytes)(nonce, BLOCK_SIZE);
(0, _assert_js_1.bytes)(src);
dst = getDst(src.length, dst);
const ctr = nonce; // write new value to nonce, so it can be re-used
const c32 = (0, utils_js_1.u32)(ctr);
const view = (0, utils_js_1.createView)(ctr);
const src32 = (0, utils_js_1.u32)(src);
const dst32 = (0, utils_js_1.u32)(dst);
const ctrPos = isLE ? 0 : 12;
const srcLen = src.length;
// Fill block (empty, ctr=0)
let ctrNum = view.getUint32(ctrPos, isLE); // read current counter value
let { s0, s1, s2, s3 } = encrypt(xk, c32[0], c32[1], c32[2], c32[3]);
// process blocks
for (let i = 0; i + 4 <= src32.length; i += 4) {
dst32[i + 0] = src32[i + 0] ^ s0;
dst32[i + 1] = src32[i + 1] ^ s1;
dst32[i + 2] = src32[i + 2] ^ s2;
dst32[i + 3] = src32[i + 3] ^ s3;
ctrNum = (ctrNum + 1) >>> 0; // u32 wrap
view.setUint32(ctrPos, ctrNum, isLE);
({ s0, s1, s2, s3 } = encrypt(xk, c32[0], c32[1], c32[2], c32[3]));
}
// leftovers (less than a block)
const start = BLOCK_SIZE * Math.floor(src32.length / BLOCK_SIZE32);
if (start < srcLen) {
const b32 = new Uint32Array([s0, s1, s2, s3]);
const buf = (0, utils_js_1.u8)(b32);
for (let i = start, pos = 0; i < srcLen; i++, pos++)
dst[i] = src[i] ^ buf[pos];
}
return dst;
}
/**
* CTR: counter mode. Creates stream cipher.
* Requires good IV. Parallelizable. OK, but no MAC.
*/
exports.ctr = (0, utils_js_1.wrapCipher)({ blockSize: 16, nonceLength: 16 }, function ctr(key, nonce) {
(0, _assert_js_1.bytes)(key);
(0, _assert_js_1.bytes)(nonce, BLOCK_SIZE);
function processCtr(buf, dst) {
const xk = expandKeyLE(key);
const n = nonce.slice();
const out = ctrCounter(xk, n, buf, dst);
xk.fill(0);
n.fill(0);
return out;
}
return {
encrypt: (plaintext, dst) => processCtr(plaintext, dst),
decrypt: (ciphertext, dst) => processCtr(ciphertext, dst),
};
});
function validateBlockDecrypt(data) {
(0, _assert_js_1.bytes)(data);
if (data.length % BLOCK_SIZE !== 0) {
throw new Error(`aes/(cbc-ecb).decrypt ciphertext should consist of blocks with size ${BLOCK_SIZE}`);
}
}
function validateBlockEncrypt(plaintext, pcks5, dst) {
let outLen = plaintext.length;
const remaining = outLen % BLOCK_SIZE;
if (!pcks5 && remaining !== 0)
throw new Error('aec/(cbc-ecb): unpadded plaintext with disabled padding');
const b = (0, utils_js_1.u32)(plaintext);
if (pcks5) {
let left = BLOCK_SIZE - remaining;
if (!left)
left = BLOCK_SIZE; // if no bytes left, create empty padding block
outLen = outLen + left;
}
const out = getDst(outLen, dst);
const o = (0, utils_js_1.u32)(out);
return { b, o, out };
}
function validatePCKS(data, pcks5) {
if (!pcks5)
return data;
const len = data.length;
if (!len)
throw new Error(`aes/pcks5: empty ciphertext not allowed`);
const lastByte = data[len - 1];
if (lastByte <= 0 || lastByte > 16)
throw new Error(`aes/pcks5: wrong padding byte: ${lastByte}`);
const out = data.subarray(0, -lastByte);
for (let i = 0; i < lastByte; i++)
if (data[len - i - 1] !== lastByte)
throw new Error(`aes/pcks5: wrong padding`);
return out;
}
function padPCKS(left) {
const tmp = new Uint8Array(16);
const tmp32 = (0, utils_js_1.u32)(tmp);
tmp.set(left);
const paddingByte = BLOCK_SIZE - left.length;
for (let i = BLOCK_SIZE - paddingByte; i < BLOCK_SIZE; i++)
tmp[i] = paddingByte;
return tmp32;
}
/**
* ECB: Electronic CodeBook. Simple deterministic replacement.
* Dangerous: always map x to y. See [AES Penguin](https://words.filippo.io/the-ecb-penguin/).
*/
exports.ecb = (0, utils_js_1.wrapCipher)({ blockSize: 16 }, function ecb(key, opts = {}) {
(0, _assert_js_1.bytes)(key);
const pcks5 = !opts.disablePadding;
return {
encrypt: (plaintext, dst) => {
(0, _assert_js_1.bytes)(plaintext);
const { b, o, out: _out } = validateBlockEncrypt(plaintext, pcks5, dst);
const xk = expandKeyLE(key);
let i = 0;
for (; i + 4 <= b.length;) {
const { s0, s1, s2, s3 } = encrypt(xk, b[i + 0], b[i + 1], b[i + 2], b[i + 3]);
(o[i++] = s0), (o[i++] = s1), (o[i++] = s2), (o[i++] = s3);
}
if (pcks5) {
const tmp32 = padPCKS(plaintext.subarray(i * 4));
const { s0, s1, s2, s3 } = encrypt(xk, tmp32[0], tmp32[1], tmp32[2], tmp32[3]);
(o[i++] = s0), (o[i++] = s1), (o[i++] = s2), (o[i++] = s3);
}
xk.fill(0);
return _out;
},
decrypt: (ciphertext, dst) => {
validateBlockDecrypt(ciphertext);
const xk = expandKeyDecLE(key);
const out = getDst(ciphertext.length, dst);
const b = (0, utils_js_1.u32)(ciphertext);
const o = (0, utils_js_1.u32)(out);
for (let i = 0; i + 4 <= b.length;) {
const { s0, s1, s2, s3 } = decrypt(xk, b[i + 0], b[i + 1], b[i + 2], b[i + 3]);
(o[i++] = s0), (o[i++] = s1), (o[i++] = s2), (o[i++] = s3);
}
xk.fill(0);
return validatePCKS(out, pcks5);
},
};
});
/**
* CBC: Cipher-Block-Chaining. Key is previous rounds block.
* Fragile: needs proper padding. Unauthenticated: needs MAC.
*/
exports.cbc = (0, utils_js_1.wrapCipher)({ blockSize: 16, nonceLength: 16 }, function cbc(key, iv, opts = {}) {
(0, _assert_js_1.bytes)(key);
(0, _assert_js_1.bytes)(iv, 16);
const pcks5 = !opts.disablePadding;
return {
encrypt: (plaintext, dst) => {
const xk = expandKeyLE(key);
const { b, o, out: _out } = validateBlockEncrypt(plaintext, pcks5, dst);
const n32 = (0, utils_js_1.u32)(iv);
// prettier-ignore
let s0 = n32[0], s1 = n32[1], s2 = n32[2], s3 = n32[3];
let i = 0;
for (; i + 4 <= b.length;) {
(s0 ^= b[i + 0]), (s1 ^= b[i + 1]), (s2 ^= b[i + 2]), (s3 ^= b[i + 3]);
({ s0, s1, s2, s3 } = encrypt(xk, s0, s1, s2, s3));
(o[i++] = s0), (o[i++] = s1), (o[i++] = s2), (o[i++] = s3);
}
if (pcks5) {
const tmp32 = padPCKS(plaintext.subarray(i * 4));
(s0 ^= tmp32[0]), (s1 ^= tmp32[1]), (s2 ^= tmp32[2]), (s3 ^= tmp32[3]);
({ s0, s1, s2, s3 } = encrypt(xk, s0, s1, s2, s3));
(o[i++] = s0), (o[i++] = s1), (o[i++] = s2), (o[i++] = s3);
}
xk.fill(0);
return _out;
},
decrypt: (ciphertext, dst) => {
validateBlockDecrypt(ciphertext);
const xk = expandKeyDecLE(key);
const n32 = (0, utils_js_1.u32)(iv);
const out = getDst(ciphertext.length, dst);
const b = (0, utils_js_1.u32)(ciphertext);
const o = (0, utils_js_1.u32)(out);
// prettier-ignore
let s0 = n32[0], s1 = n32[1], s2 = n32[2], s3 = n32[3];
for (let i = 0; i + 4 <= b.length;) {
// prettier-ignore
const ps0 = s0, ps1 = s1, ps2 = s2, ps3 = s3;
(s0 = b[i + 0]), (s1 = b[i + 1]), (s2 = b[i + 2]), (s3 = b[i + 3]);
const { s0: o0, s1: o1, s2: o2, s3: o3 } = decrypt(xk, s0, s1, s2, s3);
(o[i++] = o0 ^ ps0), (o[i++] = o1 ^ ps1), (o[i++] = o2 ^ ps2), (o[i++] = o3 ^ ps3);
}
xk.fill(0);
return validatePCKS(out, pcks5);
},
};
});
/**
* CFB: Cipher Feedback Mode. The input for the block cipher is the previous cipher output.
* Unauthenticated: needs MAC.
*/
exports.cfb = (0, utils_js_1.wrapCipher)({ blockSize: 16, nonceLength: 16 }, function cfb(key, iv) {
(0, _assert_js_1.bytes)(key);
(0, _assert_js_1.bytes)(iv, 16);
function processCfb(src, isEncrypt, dst) {
const xk = expandKeyLE(key);
const srcLen = src.length;
dst = getDst(srcLen, dst);
const src32 = (0, utils_js_1.u32)(src);
const dst32 = (0, utils_js_1.u32)(dst);
const next32 = isEncrypt ? dst32 : src32;
const n32 = (0, utils_js_1.u32)(iv);
// prettier-ignore
let s0 = n32[0], s1 = n32[1], s2 = n32[2], s3 = n32[3];
for (let i = 0; i + 4 <= src32.length;) {
const { s0: e0, s1: e1, s2: e2, s3: e3 } = encrypt(xk, s0, s1, s2, s3);
dst32[i + 0] = src32[i + 0] ^ e0;
dst32[i + 1] = src32[i + 1] ^ e1;
dst32[i + 2] = src32[i + 2] ^ e2;
dst32[i + 3] = src32[i + 3] ^ e3;
(s0 = next32[i++]), (s1 = next32[i++]), (s2 = next32[i++]), (s3 = next32[i++]);
}
// leftovers (less than block)
const start = BLOCK_SIZE * Math.floor(src32.length / BLOCK_SIZE32);
if (start < srcLen) {
({ s0, s1, s2, s3 } = encrypt(xk, s0, s1, s2, s3));
const buf = (0, utils_js_1.u8)(new Uint32Array([s0, s1, s2, s3]));
for (let i = start, pos = 0; i < srcLen; i++, pos++)
dst[i] = src[i] ^ buf[pos];
buf.fill(0);
}
xk.fill(0);
return dst;
}
return {
encrypt: (plaintext, dst) => processCfb(plaintext, true, dst),
decrypt: (ciphertext, dst) => processCfb(ciphertext, false, dst),
};
});
// TODO: merge with chacha, however gcm has bitLen while chacha has byteLen
function computeTag(fn, isLE, key, data, AAD) {
const h = fn.create(key, data.length + (AAD?.length || 0));
if (AAD)
h.update(AAD);
h.update(data);
const num = new Uint8Array(16);
const view = (0, utils_js_1.createView)(num);
if (AAD)
(0, utils_js_1.setBigUint64)(view, 0, BigInt(AAD.length * 8), isLE);
(0, utils_js_1.setBigUint64)(view, 8, BigInt(data.length * 8), isLE);
h.update(num);
return h.digest();
}
/**
* GCM: Galois/Counter Mode.
* Good, modern version of CTR, parallel, with MAC.
* Be careful: MACs can be forged.
*/
exports.gcm = (0, utils_js_1.wrapCipher)({ blockSize: 16, nonceLength: 12, tagLength: 16 }, function gcm(key, nonce, AAD) {
(0, _assert_js_1.bytes)(nonce);
// Nonce can be pretty much anything (even 1 byte). But smaller nonces less secure.
if (nonce.length === 0)
throw new Error('aes/gcm: empty nonce');
const tagLength = 16;
function _computeTag(authKey, tagMask, data) {
const tag = computeTag(_polyval_js_1.ghash, false, authKey, data, AAD);
for (let i = 0; i < tagMask.length; i++)
tag[i] ^= tagMask[i];
return tag;
}
function deriveKeys() {
const xk = expandKeyLE(key);
const authKey = EMPTY_BLOCK.slice();
const counter = EMPTY_BLOCK.slice();
ctr32(xk, false, counter, counter, authKey);
if (nonce.length === 12) {
counter.set(nonce);
}
else {
// Spec (NIST 800-38d) supports variable size nonce.
// Not supported for now, but can be useful.
const nonceLen = EMPTY_BLOCK.slice();
const view = (0, utils_js_1.createView)(nonceLen);
(0, utils_js_1.setBigUint64)(view, 8, BigInt(nonce.length * 8), false);
// ghash(nonce || u64be(0) || u64be(nonceLen*8))
_polyval_js_1.ghash.create(authKey).update(nonce).update(nonceLen).digestInto(counter);
}
const tagMask = ctr32(xk, false, counter, EMPTY_BLOCK);
return { xk, authKey, counter, tagMask };
}
return {
encrypt: (plaintext) => {
(0, _assert_js_1.bytes)(plaintext);
const { xk, authKey, counter, tagMask } = deriveKeys();
const out = new Uint8Array(plaintext.length + tagLength);
ctr32(xk, false, counter, plaintext, out);
const tag = _computeTag(authKey, tagMask, out.subarray(0, out.length - tagLength));
out.set(tag, plaintext.length);
xk.fill(0);
return out;
},
decrypt: (ciphertext) => {
(0, _assert_js_1.bytes)(ciphertext);
if (ciphertext.length < tagLength)
throw new Error(`aes/gcm: ciphertext less than tagLen (${tagLength})`);
const { xk, authKey, counter, tagMask } = deriveKeys();
const data = ciphertext.subarray(0, -tagLength);
const passedTag = ciphertext.subarray(-tagLength);
const tag = _computeTag(authKey, tagMask, data);
if (!(0, utils_js_1.equalBytes)(tag, passedTag))
throw new Error('aes/gcm: invalid ghash tag');
const out = ctr32(xk, false, counter, data);
authKey.fill(0);
tagMask.fill(0);
xk.fill(0);
return out;
},
};
});
const limit = (name, min, max) => (value) => {
if (!Number.isSafeInteger(value) || min > value || value > max)
throw new Error(`${name}: invalid value=${value}, must be [${min}..${max}]`);
};
/**
* AES-GCM-SIV: classic AES-GCM with nonce-misuse resistance.
* Guarantees that, when a nonce is repeated, the only security loss is that identical
* plaintexts will produce identical ciphertexts.
* RFC 8452, https://datatracker.ietf.org/doc/html/rfc8452
*/
exports.siv = (0, utils_js_1.wrapCipher)({ blockSize: 16, nonceLength: 12, tagLength: 16 }, function siv(key, nonce, AAD) {
const tagLength = 16;
// From RFC 8452: Section 6
const AAD_LIMIT = limit('AAD', 0, 2 ** 36);
const PLAIN_LIMIT = limit('plaintext', 0, 2 ** 36);
const NONCE_LIMIT = limit('nonce', 12, 12);
const CIPHER_LIMIT = limit('ciphertext', 16, 2 ** 36 + 16);
(0, _assert_js_1.bytes)(nonce);
NONCE_LIMIT(nonce.length);
if (AAD) {
(0, _assert_js_1.bytes)(AAD);
AAD_LIMIT(AAD.length);
}
function deriveKeys() {
const len = key.length;
if (len !== 16 && len !== 24 && len !== 32)
throw new Error(`key length must be 16, 24 or 32 bytes, got: ${len} bytes`);
const xk = expandKeyLE(key);
const encKey = new Uint8Array(len);
const authKey = new Uint8Array(16);
const n32 = (0, utils_js_1.u32)(nonce);
// prettier-ignore
let s0 = 0, s1 = n32[0], s2 = n32[1], s3 = n32[2];
let counter = 0;
for (const derivedKey of [authKey, encKey].map(utils_js_1.u32)) {
const d32 = (0, utils_js_1.u32)(derivedKey);
for (let i = 0; i < d32.length; i += 2) {
// aes(u32le(0) || nonce)[:8] || aes(u32le(1) || nonce)[:8] ...
const { s0: o0, s1: o1 } = encrypt(xk, s0, s1, s2, s3);
d32[i + 0] = o0;
d32[i + 1] = o1;
s0 = ++counter; // increment counter inside state
}
}
xk.fill(0);
return { authKey, encKey: expandKeyLE(encKey) };
}
function _computeTag(encKey, authKey, data) {
const tag = computeTag(_polyval_js_1.polyval, true, authKey, data, AAD);
// Compute the expected tag by XORing S_s and the nonce, clearing the
// most significant bit of the last byte and encrypting with the
// message-encryption key.
for (let i = 0; i < 12; i++)
tag[i] ^= nonce[i];
tag[15] &= 0x7f; // Clear the highest bit
// encrypt tag as block
const t32 = (0, utils_js_1.u32)(tag);
// prettier-ignore
let s0 = t32[0], s1 = t32[1], s2 = t32[2], s3 = t32[3];
({ s0, s1, s2, s3 } = encrypt(encKey, s0, s1, s2, s3));
(t32[0] = s0), (t32[1] = s1), (t32[2] = s2), (t32[3] = s3);
return tag;
}
// actual decrypt/encrypt of message.
function processSiv(encKey, tag, input) {
let block = tag.slice();
block[15] |= 0x80; // Force highest bit
return ctr32(encKey, true, block, input);
}
return {
encrypt: (plaintext) => {
(0, _assert_js_1.bytes)(plaintext);
PLAIN_LIMIT(plaintext.length);
const { encKey, authKey } = deriveKeys();
const tag = _computeTag(encKey, authKey, plaintext);
const out = new Uint8Array(plaintext.length + tagLength);
out.set(tag, plaintext.length);
out.set(processSiv(encKey, tag, plaintext));
encKey.fill(0);
authKey.fill(0);
return out;
},
decrypt: (ciphertext) => {
(0, _assert_js_1.bytes)(ciphertext);
CIPHER_LIMIT(ciphertext.length);
const tag = ciphertext.subarray(-tagLength);
const { encKey, authKey } = deriveKeys();
const plaintext = processSiv(encKey, tag, ciphertext.subarray(0, -tagLength));
const expectedTag = _computeTag(encKey, authKey, plaintext);
encKey.fill(0);
authKey.fill(0);
if (!(0, utils_js_1.equalBytes)(tag, expectedTag))
throw new Error('invalid polyval tag');
return plaintext;
},
};
});
function isBytes32(a) {
return (a != null &&
typeof a === 'object' &&
(a instanceof Uint32Array || a.constructor.name === 'Uint32Array'));
}
function encryptBlock(xk, block) {
(0, _assert_js_1.bytes)(block, 16);
if (!isBytes32(xk))
throw new Error('_encryptBlock accepts result of expandKeyLE');
const b32 = (0, utils_js_1.u32)(block);
let { s0, s1, s2, s3 } = encrypt(xk, b32[0], b32[1], b32[2], b32[3]);
(b32[0] = s0), (b32[1] = s1), (b32[2] = s2), (b32[3] = s3);
return block;
}
function decryptBlock(xk, block) {
(0, _assert_js_1.bytes)(block, 16);
if (!isBytes32(xk))
throw new Error('_decryptBlock accepts result of expandKeyLE');
const b32 = (0, utils_js_1.u32)(block);
let { s0, s1, s2, s3 } = decrypt(xk, b32[0], b32[1], b32[2], b32[3]);
(b32[0] = s0), (b32[1] = s1), (b32[2] = s2), (b32[3] = s3);
return block;
}
// Highly unsafe private functions for implementing new modes or ciphers based on AES
// Can change at any time, no API guarantees
exports.unsafe = {
expandKeyLE,
expandKeyDecLE,
encrypt,
decrypt,
encryptBlock,
decryptBlock,
ctrCounter,
ctr32,
};
//# sourceMappingURL=aes.js.map

1
node_modules/@noble/ciphers/aes.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

61
node_modules/@noble/ciphers/chacha.d.ts generated vendored Normal file
View File

@@ -0,0 +1,61 @@
import { CipherWithOutput, XorStream } from './utils.js';
/**
* hchacha helper method, used primarily in xchacha, to hash
* key and nonce into key' and nonce'.
* Same as chachaCore, but there doesn't seem to be a way to move the block
* out without 25% performance hit.
*/
export declare function hchacha(s: Uint32Array, k: Uint32Array, i: Uint32Array, o32: Uint32Array): void;
/**
* Original, non-RFC chacha20 from DJB. 8-byte nonce, 8-byte counter.
*/
export declare const chacha20orig: XorStream;
/**
* ChaCha stream cipher. Conforms to RFC 8439 (IETF, TLS). 12-byte nonce, 4-byte counter.
* With 12-byte nonce, it's not safe to use fill it with random (CSPRNG), due to collision chance.
*/
export declare const chacha20: XorStream;
/**
* XChaCha eXtended-nonce ChaCha. 24-byte nonce.
* With 24-byte nonce, it's safe to use fill it with random (CSPRNG).
* https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha
*/
export declare const xchacha20: XorStream;
/**
* Reduced 8-round chacha, described in original paper.
*/
export declare const chacha8: XorStream;
/**
* Reduced 12-round chacha, described in original paper.
*/
export declare const chacha12: XorStream;
/**
* AEAD algorithm from RFC 8439.
* Salsa20 and chacha (RFC 8439) use poly1305 differently.
* We could have composed them similar to:
* https://github.com/paulmillr/scure-base/blob/b266c73dde977b1dd7ef40ef7a23cc15aab526b3/index.ts#L250
* But it's hard because of authKey:
* In salsa20, authKey changes position in salsa stream.
* In chacha, authKey can't be computed inside computeTag, it modifies the counter.
*/
export declare const _poly1305_aead: (xorStream: XorStream) => (key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => CipherWithOutput;
/**
* ChaCha20-Poly1305 from RFC 8439.
* With 12-byte nonce, it's not safe to use fill it with random (CSPRNG), due to collision chance.
*/
export declare const chacha20poly1305: ((key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => CipherWithOutput) & {
blockSize: number;
nonceLength: number;
tagLength: number;
};
/**
* XChaCha20-Poly1305 extended-nonce chacha.
* https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha
* With 24-byte nonce, it's safe to use fill it with random (CSPRNG).
*/
export declare const xchacha20poly1305: ((key: Uint8Array, nonce: Uint8Array, AAD?: Uint8Array) => CipherWithOutput) & {
blockSize: number;
nonceLength: number;
tagLength: number;
};
//# sourceMappingURL=chacha.d.ts.map

1
node_modules/@noble/ciphers/chacha.d.ts.map generated vendored Normal file
View File

@@ -0,0 +1 @@
{"version":3,"file":"chacha.d.ts","sourceRoot":"","sources":["src/chacha.ts"],"names":[],"mappings":"AACA,OAAO,EACO,gBAAgB,EAAE,SAAS,EACxC,MAAM,YAAY,CAAC;AA6EpB;;;;;GAKG;AAEH,wBAAgB,OAAO,CACrB,CAAC,EAAE,WAAW,EAAE,CAAC,EAAE,WAAW,EAAE,CAAC,EAAE,WAAW,EAAE,GAAG,EAAE,WAAW,QAoDjE;AACD;;GAEG;AACH,eAAO,MAAM,YAAY,WAIvB,CAAC;AACH;;;GAGG;AACH,eAAO,MAAM,QAAQ,WAInB,CAAC;AAEH;;;;GAIG;AACH,eAAO,MAAM,SAAS,WAKpB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,OAAO,WAIlB,CAAC;AAEH;;GAEG;AACH,eAAO,MAAM,QAAQ,WAInB,CAAC;AAgCH;;;;;;;;GAQG;AACH,eAAO,MAAM,cAAc,cACb,SAAS,WACf,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,gBAoCvD,CAAC;AAEJ;;;GAGG;AACH,eAAO,MAAM,gBAAgB,SA1CrB,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,gBAAgB;;;;CA6CzE,CAAC;AACF;;;;GAIG;AACH,eAAO,MAAM,iBAAiB,SAnDtB,UAAU,SAAS,UAAU,QAAQ,UAAU,KAAG,gBAAgB;;;;CAsDzE,CAAC"}

323
node_modules/@noble/ciphers/chacha.js generated vendored Normal file
View File

@@ -0,0 +1,323 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.xchacha20poly1305 = exports.chacha20poly1305 = exports._poly1305_aead = exports.chacha12 = exports.chacha8 = exports.xchacha20 = exports.chacha20 = exports.chacha20orig = exports.hchacha = void 0;
// prettier-ignore
const utils_js_1 = require("./utils.js");
const _poly1305_js_1 = require("./_poly1305.js");
const _arx_js_1 = require("./_arx.js");
const _assert_js_1 = require("./_assert.js");
// ChaCha20 stream cipher was released in 2008. ChaCha aims to increase
// the diffusion per round, but had slightly less cryptanalysis.
// https://cr.yp.to/chacha.html, http://cr.yp.to/chacha/chacha-20080128.pdf
/**
* ChaCha core function.
*/
// prettier-ignore
function chachaCore(s, k, n, out, cnt, rounds = 20) {
let y00 = s[0], y01 = s[1], y02 = s[2], y03 = s[3], // "expa" "nd 3" "2-by" "te k"
y04 = k[0], y05 = k[1], y06 = k[2], y07 = k[3], // Key Key Key Key
y08 = k[4], y09 = k[5], y10 = k[6], y11 = k[7], // Key Key Key Key
y12 = cnt, y13 = n[0], y14 = n[1], y15 = n[2]; // Counter Counter Nonce Nonce
// Save state to temporary variables
let x00 = y00, x01 = y01, x02 = y02, x03 = y03, x04 = y04, x05 = y05, x06 = y06, x07 = y07, x08 = y08, x09 = y09, x10 = y10, x11 = y11, x12 = y12, x13 = y13, x14 = y14, x15 = y15;
for (let r = 0; r < rounds; r += 2) {
x00 = (x00 + x04) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x00, 16);
x08 = (x08 + x12) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x08, 12);
x00 = (x00 + x04) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x00, 8);
x08 = (x08 + x12) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x08, 7);
x01 = (x01 + x05) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x01, 16);
x09 = (x09 + x13) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x09, 12);
x01 = (x01 + x05) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x01, 8);
x09 = (x09 + x13) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x09, 7);
x02 = (x02 + x06) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x02, 16);
x10 = (x10 + x14) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x10, 12);
x02 = (x02 + x06) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x02, 8);
x10 = (x10 + x14) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x10, 7);
x03 = (x03 + x07) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x03, 16);
x11 = (x11 + x15) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x11, 12);
x03 = (x03 + x07) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x03, 8);
x11 = (x11 + x15) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x11, 7);
x00 = (x00 + x05) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x00, 16);
x10 = (x10 + x15) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x10, 12);
x00 = (x00 + x05) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x00, 8);
x10 = (x10 + x15) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x10, 7);
x01 = (x01 + x06) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x01, 16);
x11 = (x11 + x12) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x11, 12);
x01 = (x01 + x06) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x01, 8);
x11 = (x11 + x12) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x11, 7);
x02 = (x02 + x07) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x02, 16);
x08 = (x08 + x13) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x08, 12);
x02 = (x02 + x07) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x02, 8);
x08 = (x08 + x13) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x08, 7);
x03 = (x03 + x04) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x03, 16);
x09 = (x09 + x14) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x09, 12);
x03 = (x03 + x04) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x03, 8);
x09 = (x09 + x14) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x09, 7);
}
// Write output
let oi = 0;
out[oi++] = (y00 + x00) | 0;
out[oi++] = (y01 + x01) | 0;
out[oi++] = (y02 + x02) | 0;
out[oi++] = (y03 + x03) | 0;
out[oi++] = (y04 + x04) | 0;
out[oi++] = (y05 + x05) | 0;
out[oi++] = (y06 + x06) | 0;
out[oi++] = (y07 + x07) | 0;
out[oi++] = (y08 + x08) | 0;
out[oi++] = (y09 + x09) | 0;
out[oi++] = (y10 + x10) | 0;
out[oi++] = (y11 + x11) | 0;
out[oi++] = (y12 + x12) | 0;
out[oi++] = (y13 + x13) | 0;
out[oi++] = (y14 + x14) | 0;
out[oi++] = (y15 + x15) | 0;
}
/**
* hchacha helper method, used primarily in xchacha, to hash
* key and nonce into key' and nonce'.
* Same as chachaCore, but there doesn't seem to be a way to move the block
* out without 25% performance hit.
*/
// prettier-ignore
function hchacha(s, k, i, o32) {
let x00 = s[0], x01 = s[1], x02 = s[2], x03 = s[3], x04 = k[0], x05 = k[1], x06 = k[2], x07 = k[3], x08 = k[4], x09 = k[5], x10 = k[6], x11 = k[7], x12 = i[0], x13 = i[1], x14 = i[2], x15 = i[3];
for (let r = 0; r < 20; r += 2) {
x00 = (x00 + x04) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x00, 16);
x08 = (x08 + x12) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x08, 12);
x00 = (x00 + x04) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x00, 8);
x08 = (x08 + x12) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x08, 7);
x01 = (x01 + x05) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x01, 16);
x09 = (x09 + x13) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x09, 12);
x01 = (x01 + x05) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x01, 8);
x09 = (x09 + x13) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x09, 7);
x02 = (x02 + x06) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x02, 16);
x10 = (x10 + x14) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x10, 12);
x02 = (x02 + x06) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x02, 8);
x10 = (x10 + x14) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x10, 7);
x03 = (x03 + x07) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x03, 16);
x11 = (x11 + x15) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x11, 12);
x03 = (x03 + x07) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x03, 8);
x11 = (x11 + x15) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x11, 7);
x00 = (x00 + x05) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x00, 16);
x10 = (x10 + x15) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x10, 12);
x00 = (x00 + x05) | 0;
x15 = (0, _arx_js_1.rotl)(x15 ^ x00, 8);
x10 = (x10 + x15) | 0;
x05 = (0, _arx_js_1.rotl)(x05 ^ x10, 7);
x01 = (x01 + x06) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x01, 16);
x11 = (x11 + x12) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x11, 12);
x01 = (x01 + x06) | 0;
x12 = (0, _arx_js_1.rotl)(x12 ^ x01, 8);
x11 = (x11 + x12) | 0;
x06 = (0, _arx_js_1.rotl)(x06 ^ x11, 7);
x02 = (x02 + x07) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x02, 16);
x08 = (x08 + x13) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x08, 12);
x02 = (x02 + x07) | 0;
x13 = (0, _arx_js_1.rotl)(x13 ^ x02, 8);
x08 = (x08 + x13) | 0;
x07 = (0, _arx_js_1.rotl)(x07 ^ x08, 7);
x03 = (x03 + x04) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x03, 16);
x09 = (x09 + x14) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x09, 12);
x03 = (x03 + x04) | 0;
x14 = (0, _arx_js_1.rotl)(x14 ^ x03, 8);
x09 = (x09 + x14) | 0;
x04 = (0, _arx_js_1.rotl)(x04 ^ x09, 7);
}
let oi = 0;
o32[oi++] = x00;
o32[oi++] = x01;
o32[oi++] = x02;
o32[oi++] = x03;
o32[oi++] = x12;
o32[oi++] = x13;
o32[oi++] = x14;
o32[oi++] = x15;
}
exports.hchacha = hchacha;
/**
* Original, non-RFC chacha20 from DJB. 8-byte nonce, 8-byte counter.
*/
exports.chacha20orig = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 8,
allowShortKeys: true,
});
/**
* ChaCha stream cipher. Conforms to RFC 8439 (IETF, TLS). 12-byte nonce, 4-byte counter.
* With 12-byte nonce, it's not safe to use fill it with random (CSPRNG), due to collision chance.
*/
exports.chacha20 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 4,
allowShortKeys: false,
});
/**
* XChaCha eXtended-nonce ChaCha. 24-byte nonce.
* With 24-byte nonce, it's safe to use fill it with random (CSPRNG).
* https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha
*/
exports.xchacha20 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 8,
extendNonceFn: hchacha,
allowShortKeys: false,
});
/**
* Reduced 8-round chacha, described in original paper.
*/
exports.chacha8 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 4,
rounds: 8,
});
/**
* Reduced 12-round chacha, described in original paper.
*/
exports.chacha12 = (0, _arx_js_1.createCipher)(chachaCore, {
counterRight: false,
counterLength: 4,
rounds: 12,
});
const ZEROS16 = /* @__PURE__ */ new Uint8Array(16);
// Pad to digest size with zeros
const updatePadded = (h, msg) => {
h.update(msg);
const left = msg.length % 16;
if (left)
h.update(ZEROS16.subarray(left));
};
const ZEROS32 = /* @__PURE__ */ new Uint8Array(32);
function computeTag(fn, key, nonce, data, AAD) {
const authKey = fn(key, nonce, ZEROS32);
const h = _poly1305_js_1.poly1305.create(authKey);
if (AAD)
updatePadded(h, AAD);
updatePadded(h, data);
const num = new Uint8Array(16);
const view = (0, utils_js_1.createView)(num);
(0, utils_js_1.setBigUint64)(view, 0, BigInt(AAD ? AAD.length : 0), true);
(0, utils_js_1.setBigUint64)(view, 8, BigInt(data.length), true);
h.update(num);
const res = h.digest();
authKey.fill(0);
return res;
}
/**
* AEAD algorithm from RFC 8439.
* Salsa20 and chacha (RFC 8439) use poly1305 differently.
* We could have composed them similar to:
* https://github.com/paulmillr/scure-base/blob/b266c73dde977b1dd7ef40ef7a23cc15aab526b3/index.ts#L250
* But it's hard because of authKey:
* In salsa20, authKey changes position in salsa stream.
* In chacha, authKey can't be computed inside computeTag, it modifies the counter.
*/
const _poly1305_aead = (xorStream) => (key, nonce, AAD) => {
const tagLength = 16;
(0, _assert_js_1.bytes)(key, 32);
(0, _assert_js_1.bytes)(nonce);
return {
encrypt: (plaintext, output) => {
const plength = plaintext.length;
const clength = plength + tagLength;
if (output) {
(0, _assert_js_1.bytes)(output, clength);
}
else {
output = new Uint8Array(clength);
}
xorStream(key, nonce, plaintext, output, 1);
const tag = computeTag(xorStream, key, nonce, output.subarray(0, -tagLength), AAD);
output.set(tag, plength); // append tag
return output;
},
decrypt: (ciphertext, output) => {
const clength = ciphertext.length;
const plength = clength - tagLength;
if (clength < tagLength)
throw new Error(`encrypted data must be at least ${tagLength} bytes`);
if (output) {
(0, _assert_js_1.bytes)(output, plength);
}
else {
output = new Uint8Array(plength);
}
const data = ciphertext.subarray(0, -tagLength);
const passedTag = ciphertext.subarray(-tagLength);
const tag = computeTag(xorStream, key, nonce, data, AAD);
if (!(0, utils_js_1.equalBytes)(passedTag, tag))
throw new Error('invalid tag');
xorStream(key, nonce, data, output, 1);
return output;
},
};
};
exports._poly1305_aead = _poly1305_aead;
/**
* ChaCha20-Poly1305 from RFC 8439.
* With 12-byte nonce, it's not safe to use fill it with random (CSPRNG), due to collision chance.
*/
exports.chacha20poly1305 = (0, utils_js_1.wrapCipher)({ blockSize: 64, nonceLength: 12, tagLength: 16 }, (0, exports._poly1305_aead)(exports.chacha20));
/**
* XChaCha20-Poly1305 extended-nonce chacha.
* https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-xchacha
* With 24-byte nonce, it's safe to use fill it with random (CSPRNG).
*/
exports.xchacha20poly1305 = (0, utils_js_1.wrapCipher)({ blockSize: 64, nonceLength: 24, tagLength: 16 }, (0, exports._poly1305_aead)(exports.xchacha20));
//# sourceMappingURL=chacha.js.map

1
node_modules/@noble/ciphers/chacha.js.map generated vendored Normal file

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More