Compare commits

...

6 Commits

Author SHA1 Message Date
Your Name
bc6a7b3f20 Working on API 2025-09-25 16:35:16 -04:00
Your Name
036b0823b9 v0.3.11 - Working on admin api 2025-09-25 11:25:50 -04:00
Your Name
be99595bde v0.3.10 - . 2025-09-24 10:49:48 -04:00
Your Name
01836a4b4c v0.3.9 - API work 2025-09-21 15:53:03 -04:00
Your Name
9f3b3dd773 v0.3.8 - safety push 2025-09-18 10:18:15 -04:00
Your Name
3210b9e752 v0.3.7 - working on cinfig api 2025-09-16 15:52:27 -04:00
16 changed files with 11651 additions and 1879 deletions

513
IMPLEMENT_API.md Normal file
View File

@@ -0,0 +1,513 @@
# Implementation Plan: Enhanced Admin Event API Structure
## Current Issue
The current admin event routing at [`main.c:3248-3268`](src/main.c:3248) has a security vulnerability:
```c
if (event_kind == 23455 || event_kind == 23456) {
// Admin event processing
int admin_result = process_admin_event_in_config(event, admin_error, sizeof(admin_error), wsi);
} else {
// Regular event storage and broadcasting
}
```
**Problem**: Any event with these kinds gets routed to admin processing, regardless of authorization. This allows unauthorized users to send admin events that could be processed as legitimate admin commands.
**Note**: Event kinds 33334 and 33335 are no longer used and have been removed from the admin event routing.
## Required Security Enhancement
Admin events must be validated for proper authorization BEFORE routing to admin processing:
1. **Relay Public Key Check**: Event must have a `p` tag equal to the relay's public key
2. **Admin Signature Check**: Event must be signed by an authorized admin private key
3. **Fallback to Regular Processing**: If authorization fails, treat as regular event (not admin event)
## Implementation Plan
### Phase 1: Add Admin Authorization Validation
#### 1.1 Create Consolidated Admin Authorization Function
**Location**: [`src/main.c`](src/main.c) or [`src/config.c`](src/config.c)
```c
/**
* Consolidated admin event authorization validator
* Implements defense-in-depth security for admin events
*
* @param event - The event to validate for admin authorization
* @param error_message - Buffer for detailed error messages
* @param error_size - Size of error message buffer
* @return 0 if authorized, -1 if unauthorized, -2 if validation error
*/
int is_authorized_admin_event(cJSON* event, char* error_message, size_t error_size) {
if (!event) {
snprintf(error_message, error_size, "admin_auth: null event");
return -2;
}
// Extract event components
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
cJSON* pubkey_obj = cJSON_GetObjectItem(event, "pubkey");
cJSON* tags_obj = cJSON_GetObjectItem(event, "tags");
if (!kind_obj || !pubkey_obj || !tags_obj) {
snprintf(error_message, error_size, "admin_auth: missing required fields");
return -2;
}
// Validation Layer 1: Kind Check
int event_kind = (int)cJSON_GetNumberValue(kind_obj);
if (event_kind != 23455 && event_kind != 23456) {
snprintf(error_message, error_size, "admin_auth: not an admin event kind");
return -1;
}
// Validation Layer 2: Relay Targeting Check
const char* relay_pubkey = get_config_value("relay_pubkey");
if (!relay_pubkey) {
snprintf(error_message, error_size, "admin_auth: relay pubkey not configured");
return -2;
}
// Check for 'p' tag targeting this relay
int has_relay_target = 0;
if (cJSON_IsArray(tags_obj)) {
cJSON* tag = NULL;
cJSON_ArrayForEach(tag, tags_obj) {
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
cJSON* tag_value = cJSON_GetArrayItem(tag, 1);
if (cJSON_IsString(tag_name) && cJSON_IsString(tag_value)) {
const char* name = cJSON_GetStringValue(tag_name);
const char* value = cJSON_GetStringValue(tag_value);
if (strcmp(name, "p") == 0 && strcmp(value, relay_pubkey) == 0) {
has_relay_target = 1;
break;
}
}
}
}
}
if (!has_relay_target) {
// Admin event for different relay - not unauthorized, just not for us
snprintf(error_message, error_size, "admin_auth: admin event for different relay");
return -1;
}
// Validation Layer 3: Admin Signature Check (only if targeting this relay)
const char* event_pubkey = cJSON_GetStringValue(pubkey_obj);
if (!event_pubkey) {
snprintf(error_message, error_size, "admin_auth: invalid pubkey format");
return -2;
}
const char* admin_pubkey = get_config_value("admin_pubkey");
if (!admin_pubkey || strcmp(event_pubkey, admin_pubkey) != 0) {
// This is the ONLY case where we log as "Unauthorized admin event attempt"
// because it's targeting THIS relay but from wrong admin
snprintf(error_message, error_size, "admin_auth: unauthorized admin for this relay");
log_warning("SECURITY: Unauthorized admin event attempt for this relay");
return -1;
}
// All validation layers passed
log_info("ADMIN: Admin event authorized");
return 0;
}
```
#### 1.2 Update Event Routing Logic
**Location**: [`main.c:3248`](src/main.c:3248)
```c
// Current problematic code:
if (event_kind == 23455 || event_kind == 23456) {
// Admin event processing
int admin_result = process_admin_event_in_config(event, admin_error, sizeof(admin_error), wsi);
} else {
// Regular event storage and broadcasting
}
// Enhanced secure code with consolidated authorization:
if (result == 0) {
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
if (kind_obj && cJSON_IsNumber(kind_obj)) {
int event_kind = (int)cJSON_GetNumberValue(kind_obj);
// Check if this is an admin event
if (event_kind == 23455 || event_kind == 23456) {
// Use consolidated authorization check
char auth_error[512] = {0};
int auth_result = is_authorized_admin_event(event, auth_error, sizeof(auth_error));
if (auth_result == 0) {
// Authorized admin event - process through admin API
char admin_error[512] = {0};
int admin_result = process_admin_event_in_config(event, admin_error, sizeof(admin_error), wsi);
if (admin_result != 0) {
result = -1;
strncpy(error_message, admin_error, sizeof(error_message) - 1);
}
// Admin events are NOT broadcast to subscriptions
} else {
// Unauthorized admin event - treat as regular event
log_warning("Unauthorized admin event treated as regular event");
if (store_event(event) != 0) {
result = -1;
strncpy(error_message, "error: failed to store event", sizeof(error_message) - 1);
} else {
broadcast_event_to_subscriptions(event);
}
}
} else {
// Regular event - normal processing
if (store_event(event) != 0) {
result = -1;
strncpy(error_message, "error: failed to store event", sizeof(error_message) - 1);
} else {
broadcast_event_to_subscriptions(event);
}
}
}
}
```
### Phase 2: Enhanced Admin Event Processing
#### 2.1 Admin Event Validation in Config System
**Location**: [`src/config.c`](src/config.c) - [`process_admin_event_in_config()`](src/config.c:2065)
Add additional validation within the admin processing function:
```c
int process_admin_event_in_config(cJSON* event, char* error_buffer, size_t error_buffer_size, struct lws* wsi) {
// Double-check authorization (defense in depth)
if (!is_authorized_admin_event(event)) {
snprintf(error_buffer, error_buffer_size, "unauthorized: not a valid admin event");
return -1;
}
// Continue with existing admin event processing...
// ... rest of function unchanged
}
```
#### 2.2 Logging and Monitoring
Add comprehensive logging for admin event attempts:
```c
// In the routing logic - enhanced logging
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
cJSON* pubkey_obj = cJSON_GetObjectItem(event, "pubkey");
int event_kind = kind_obj ? cJSON_GetNumberValue(kind_obj) : -1;
const char* event_pubkey = pubkey_obj ? cJSON_GetStringValue(pubkey_obj) : "unknown";
if (is_authorized_admin_event(event)) {
char log_msg[256];
snprintf(log_msg, sizeof(log_msg),
"ADMIN EVENT: Authorized admin event (kind=%d) from pubkey=%.16s...",
event_kind, event_pubkey);
log_info(log_msg);
} else if (event_kind == 23455 || event_kind == 23456) {
// This catches unauthorized admin event attempts
char log_msg[256];
snprintf(log_msg, sizeof(log_msg),
"SECURITY: Unauthorized admin event attempt (kind=%d) from pubkey=%.16s...",
event_kind, event_pubkey);
log_warning(log_msg);
}
```
## Phase 3: Unified Output Flow Architecture
### 3.1 Current Output Flow Analysis
After analyzing both [`main.c`](src/main.c) and [`config.c`](src/config.c), the **admin event responses already flow through the standard WebSocket output pipeline**. This is the correct architecture and requires no changes.
#### Standard WebSocket Output Pipeline
**Regular Events** ([`main.c:2978-2996`](src/main.c:2978)):
```c
// Database query responses
unsigned char* buf = malloc(LWS_PRE + msg_len);
memcpy(buf + LWS_PRE, msg_str, msg_len);
lws_write(wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
free(buf);
```
**OK Responses** ([`main.c:3342-3375`](src/main.c:3342)):
```c
// Event processing results: ["OK", event_id, success_boolean, message]
unsigned char *buf = malloc(LWS_PRE + response_len);
memcpy(buf + LWS_PRE, response_str, response_len);
lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
free(buf);
```
#### Admin Event Output Pipeline (Already Unified)
**Admin Responses** ([`config.c:2363-2414`](src/config.c:2363)):
```c
// Admin query responses use IDENTICAL pattern
int send_websocket_response_data(struct lws* wsi, cJSON* response_data) {
unsigned char* buf = malloc(LWS_PRE + response_len);
memcpy(buf + LWS_PRE, response_str, response_len);
// Same lws_write() call as regular events
int result = lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
free(buf);
return result;
}
```
### 3.2 Unified Output Flow Confirmation
**Admin responses already use the same WebSocket transmission mechanism as regular events**
**Both admin and regular events use identical buffer allocation patterns**
**Both admin and regular events use the same [`lws_write()`](src/config.c:2393) function**
**Both admin and regular events follow the same cleanup patterns**
### 3.3 Output Flow Integration Points
The admin event processing in [`config.c:2436`](src/config.c:2436) already integrates correctly with the unified output system:
1. **Admin Query Processing** ([`config.c:2568-2583`](src/config.c:2568)):
- Auth queries return structured JSON via [`send_websocket_response_data()`](src/config.c:2571)
- System commands return status data via [`send_websocket_response_data()`](src/config.c:2631)
2. **Response Format Consistency**:
- Admin responses use standard JSON format
- Regular events use standard Nostr event format
- Both transmitted through same WebSocket pipeline
3. **Error Handling Consistency**:
- Admin errors returned via same WebSocket connection
- Regular event errors returned via OK messages
- Both use identical transmission mechanism
### 3.4 Key Architectural Benefits
**No Changes Required**: The output flow is already unified and correctly implemented.
**Security Separation**: Admin events are processed separately but responses flow through the same secure WebSocket channel.
**Performance Consistency**: Both admin and regular responses use the same optimized transmission path.
**Maintenance Simplicity**: Single WebSocket output pipeline reduces complexity and potential bugs.
### 3.5 Admin Event Flow Summary
```
Admin Event Input → Authorization Check → Admin Processing → Unified WebSocket Output
Regular Event Input → Validation → Storage + Broadcast → Unified WebSocket Output
```
Both flows converge at the **Unified WebSocket Output** stage, which is already correctly implemented.
## Phase 4: Integration Points for Secure Admin Event Routing
### 4.1 Configuration System Integration
**Required Configuration Values**:
- `admin_pubkey` - Public key of authorized administrator
- `relay_pubkey` - Public key of this relay instance
**Integration Points**:
1. [`get_config_value()`](src/config.c) - Used by authorization function
2. [`get_relay_pubkey_cached()`](src/config.c) - Used for relay targeting validation
3. Configuration loading during startup - Must ensure admin/relay pubkeys are available
### 4.3 Forward Declarations Required
**Location**: [`src/main.c`](src/main.c) - Add near other forward declarations (around line 230)
```c
// Forward declarations for enhanced admin event authorization
int is_authorized_admin_event(cJSON* event, char* error_message, size_t error_size);
```
### 4.4 Error Handling Integration
**Enhanced Error Response System**:
```c
// In main.c event processing - enhanced error handling for admin events
if (auth_result != 0) {
// Admin authorization failed - send detailed OK response
cJSON* event_id = cJSON_GetObjectItem(event, "id");
if (event_id && cJSON_IsString(event_id)) {
cJSON* response = cJSON_CreateArray();
cJSON_AddItemToArray(response, cJSON_CreateString("OK"));
cJSON_AddItemToArray(response, cJSON_CreateString(cJSON_GetStringValue(event_id)));
cJSON_AddItemToArray(response, cJSON_CreateBool(0)); // Failed
cJSON_AddItemToArray(response, cJSON_CreateString(auth_error));
// Send via standard WebSocket output pipeline
char *response_str = cJSON_Print(response);
if (response_str) {
size_t response_len = strlen(response_str);
unsigned char *buf = malloc(LWS_PRE + response_len);
if (buf) {
memcpy(buf + LWS_PRE, response_str, response_len);
lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
free(buf);
}
free(response_str);
}
cJSON_Delete(response);
}
}
```
### 4.5 Logging Integration Points
**Console Logging**: Uses existing [`log_warning()`](src/main.c:993), [`log_info()`](src/main.c:972) functions
**Security Event Categories**:
- Admin authorization success logged via `log_info()`
- Admin authorization failures logged via `log_warning()`
- Admin event processing logged via existing admin logging
## Phase 5: Detailed Function Specifications
### 5.1 Core Authorization Function
**Function**: `is_authorized_admin_event()`
**Location**: [`src/main.c`](src/main.c) or [`src/config.c`](src/config.c)
**Dependencies**:
- `get_config_value()` for admin/relay pubkeys
- `log_warning()` and `log_info()` for logging
- `cJSON` library for event parsing
**Return Values**:
- `0` - Event is authorized for admin processing
- `-1` - Event is unauthorized (treat as regular event)
- `-2` - Validation error (malformed event)
**Error Handling**: Detailed error messages in provided buffer for client feedback
### 5.2 Enhanced Event Routing
**Location**: [`main.c:3248-3340`](src/main.c:3248)
**Integration**: Replaces existing admin event routing logic
**Dependencies**:
- `is_authorized_admin_event()` for authorization
- `process_admin_event_in_config()` for admin processing
- `store_event()` and `broadcast_event_to_subscriptions()` for regular events
**Security Features**:
- Graceful degradation for unauthorized admin events
- Comprehensive logging of authorization attempts
- No broadcast of admin events to subscriptions
- Detailed error responses for failed authorization
### 5.4 Defense-in-Depth Validation
**Primary Validation**: In main event routing logic
**Secondary Validation**: In `process_admin_event_in_config()` function
**Tertiary Validation**: In individual admin command handlers
**Validation Layers**:
1. **Kind Check** - Must be admin event kind (23455/23456)
2. **Relay Targeting Check** - Must have 'p' tag with this relay's pubkey
3. **Admin Signature Check** - Must be signed by authorized admin (only if targeting this relay)
4. **Processing Check** - Additional validation in admin handlers
**Security Logic**:
- If no 'p' tag for this relay → Admin event for different relay (not unauthorized)
- If 'p' tag for this relay + wrong admin signature → "Unauthorized admin event attempt"
## Phase 6: Event Flow Documentation
### 6.1 Complete Event Processing Flow
```
┌─────────────────┐
│ WebSocket Input │
└─────────┬───────┘
┌─────────────────┐
│ Unified │
│ Validation │ ← nostr_validate_unified_request()
└─────────┬───────┘
┌─────────────────┐
│ Kind-Based │
│ Routing Check │ ← Check if kind 23455/23456
└─────────┬───────┘
┌────▼────┐
│ Admin? │
└────┬────┘
┌─────▼─────┐ ┌─────────────┐
│ YES │ │ NO │
│ │ │ │
▼ │ ▼ │
┌─────────────┐ │ ┌─────────────┐ │
│ Admin │ │ │ Regular │ │
│ Authorization│ │ │ Event │ │
│ Check │ │ │ Processing │ │
└─────┬───────┘ │ └─────┬───────┘ │
│ │ │ │
┌────▼────┐ │ ▼ │
│Authorized?│ │ ┌─────────────┐ │
└────┬────┘ │ │ store_event()│ │
│ │ │ + │ │
┌─────▼─────┐ │ │ broadcast() │ │
│ YES NO │ │ └─────┬───────┘ │
│ │ │ │ │ │ │
│ ▼ ▼ │ │ ▼ │
│┌─────┐┌───┴┐ │ ┌─────────────┐ │
││Admin││Treat│ │ │ WebSocket │ │
││API ││as │ │ │ OK Response │ │
││ ││Reg │ │ └─────────────┘ │
│└──┬──┘└───┬┘ │ │
│ │ │ │ │
│ ▼ │ │ │
│┌─────────┐│ │ │
││WebSocket││ │ │
││Response ││ │ │
│└─────────┘│ │ │
└───────────┴───┘ │
│ │
└───────────────────────────┘
┌─────────────┐
│ Unified │
│ WebSocket │
│ Output │
└─────────────┘
```
### 6.2 Security Decision Points
1. **Event Kind Check** - Identifies potential admin events
2. **Authorization Validation** - Three-layer security check
3. **Routing Decision** - Admin API vs Regular processing
4. **Response Generation** - Unified output pipeline
5. **Audit Logging** - Security event tracking
### 6.3 Error Handling Paths
**Validation Errors**: Return detailed error messages via OK response
**Authorization Failures**: Log security event + treat as regular event
**Processing Errors**: Return admin-specific error responses
**System Errors**: Fallback to standard error handling
This completes the comprehensive implementation plan for the enhanced admin event API structure with unified output flow architecture.

146
README.md
View File

@@ -22,4 +22,150 @@ Do NOT modify the formatting, add emojis, or change the text. Keep the simple fo
- [ ] NIP-50: Keywords filter
- [ ] NIP-70: Protected Events
## 🔧 Administrator API
C-Relay uses an innovative **event-based administration system** where all configuration and management commands are sent as signed Nostr events using the admin private key generated during first startup. All admin commands use **tag-based parameters** for simplicity and compatibility.
### Authentication
All admin commands require signing with the admin private key displayed during first-time startup. **Save this key securely** - it cannot be recovered and is needed for all administrative operations.
### Event Structure
All admin commands use the same unified event structure with tag-based parameters:
**Admin Command Event:**
```json
{
"id": "event_id",
"pubkey": "admin_public_key",
"created_at": 1234567890,
"kind": 23456,
"content": "<nip44 encrypted command>",
"tags": [
["p", "relay_public_key"],
],
"sig": "event_signature"
}
```
**Admin Response Event:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "<nip44 encrypted response>",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
### Admin Commands
All commands are sent as nip44 encrypted content. The following table lists all available commands:
| Command Type | Tag Format | Description |
|--------------|------------|-------------|
| **Configuration Management** |
| `config_update` | `["relay_description", "My Relay"]` | Update relay configuration parameters |
| `config_query` | `["config_query", "list_all_keys"]` | List all available configuration keys |
| **Auth Rules Management** |
| `auth_add_blacklist` | `["blacklist", "pubkey", "abc123..."]` | Add pubkey to blacklist |
| `auth_add_whitelist` | `["whitelist", "pubkey", "def456..."]` | Add pubkey to whitelist |
| `auth_query_all` | `["auth_query", "all"]` | Query all auth rules |
| `auth_query_type` | `["auth_query", "whitelist"]` | Query specific rule type |
| `auth_query_pattern` | `["auth_query", "pattern", "abc123..."]` | Query specific pattern |
| **System Commands** |
| `system_clear_auth` | `["system_command", "clear_all_auth_rules"]` | Clear all auth rules |
| `system_status` | `["system_command", "system_status"]` | Get system status |
### Available Configuration Keys
**Basic Relay Settings:**
- `relay_description`: Relay description text
- `relay_contact`: Contact information
- `max_connections`: Maximum concurrent connections
- `max_subscriptions_per_client`: Max subscriptions per client
- `max_event_tags`: Maximum tags per event
- `max_content_length`: Maximum event content length
**Authentication & Access Control:**
- `auth_enabled`: Enable whitelist/blacklist auth rules (`true`/`false`)
- `nip42_auth_required`: Enable NIP-42 cryptographic authentication (`true`/`false`)
- `nip42_auth_required_kinds`: Event kinds requiring NIP-42 auth (comma-separated)
- `nip42_challenge_timeout`: NIP-42 challenge expiration seconds
**Proof of Work & Validation:**
- `pow_min_difficulty`: Minimum proof-of-work difficulty
- `nip40_expiration_enabled`: Enable event expiration (`true`/`false`)
### Response Format
All admin commands return **signed EVENT responses** via WebSocket following standard Nostr protocol. Responses use JSON content with structured data.
#### Response Examples
**Success Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"status\": \"success\", \"message\": \"Operation completed successfully\"}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Error Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"status\": \"error\", \"message\": \"Error: invalid configuration value\"}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Auth Rules Query Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"auth_rules\", \"total_results\": 2, \"data\": [{\"rule_type\": \"blacklist\", \"pattern_type\": \"pubkey\", \"pattern_value\": \"abc123...\", \"action\": \"deny\"}]}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```
**Configuration Query Response:**
```json
["EVENT", "temp_sub_id", {
"id": "response_event_id",
"pubkey": "relay_public_key",
"created_at": 1234567890,
"kind": 23457,
"content": "nip44 encrypted:{\"query_type\": \"config_keys\", \"config_keys\": [\"auth_enabled\", \"max_connections\"], \"descriptions\": {\"auth_enabled\": \"Enable whitelist/blacklist rules\"}}",
"tags": [
["p", "admin_public_key"]
],
"sig": "response_event_signature"
}]
```

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

537
docs/admin_api_plan.md Normal file
View File

@@ -0,0 +1,537 @@
# C-Relay Administrator API Implementation Plan
## Problem Analysis
### Current Issues Identified:
1. **Schema Mismatch**: Storage system (config.c) vs Validation system (request_validator.c) use different column names and values
2. **Missing API Endpoint**: No way to clear auth_rules table for testing
3. **Configuration Gap**: Auth rules enforcement may not be properly enabled
4. **Documentation Gap**: Admin API commands not documented
### Root Cause: Auth Rules Schema Inconsistency
**Current Schema (sql_schema.h lines 140-150):**
```sql
CREATE TABLE auth_rules (
rule_type TEXT CHECK (rule_type IN ('whitelist', 'blacklist')),
pattern_type TEXT CHECK (pattern_type IN ('pubkey', 'hash')),
pattern_value TEXT,
action TEXT CHECK (action IN ('allow', 'deny')),
active INTEGER DEFAULT 1
);
```
**Storage Implementation (config.c):**
- Stores: `rule_type='blacklist'`, `pattern_type='pubkey'`, `pattern_value='hex'`, `action='allow'`
**Validation Implementation (request_validator.c):**
- Queries: `rule_type='pubkey_blacklist'`, `rule_target='hex'`, `operation='event'`, `enabled=1`
**MISMATCH**: Validator looks for non-existent columns and wrong rule_type values!
## Proposed Solution Architecture
### Phase 1: API Documentation & Standardization
#### Admin API Commands (via WebSocket with admin private key)
**Kind 23455: Configuration Management (Ephemeral)**
- Update relay settings, limits, authentication policies
- **Standard Mode**: Commands in tags `["config_key", "config_value"]`
- **Encrypted Mode**: Commands NIP-44 encrypted in content `{"encrypted_tags": "..."}`
- Content: Descriptive text or encrypted payload
- Security: Optional NIP-44 encryption for sensitive operations
**Kind 23456: Auth Rules & System Management (Ephemeral)**
- Auth rules: Add/remove/query whitelist/blacklist rules
- System commands: clear rules, status, cache management
- **Standard Mode**: Commands in tags
- Rule format: `["rule_type", "pattern_type", "pattern_value"]`
- Query format: `["auth_query", "filter"]`
- System format: `["system_command", "command_name"]`
- **Encrypted Mode**: Commands NIP-44 encrypted in content `{"encrypted_tags": "..."}`
- Content: Action description + optional encrypted payload
- Security: Optional NIP-44 encryption for sensitive operations
#### Configuration Query Commands (using Kind 23455)
1. **List All Configuration Keys (Standard)**:
```json
{
"kind": 23455,
"content": "Discovery query",
"tags": [["config_query", "list_all_keys"]]
}
```
2. **List All Configuration Keys (Encrypted)**:
```json
{
"kind": 23455,
"content": "{\"query\":\"list_config_keys\",\"encrypted_tags\":\"nip44_encrypted_payload\"}",
"tags": []
}
```
*Encrypted payload contains:* `[["config_query", "list_all_keys"]]`
3. **Get Current Configuration (Standard)**:
```json
{
"kind": 23455,
"content": "Config query",
"tags": [["config_query", "get_current_config"]]
}
```
4. **Get Current Configuration (Encrypted)**:
```json
{
"kind": 23455,
"content": "{\"query\":\"get_config\",\"encrypted_tags\":\"nip44_encrypted_payload\"}",
"tags": []
}
```
*Encrypted payload contains:* `[["config_query", "get_current_config"]]`
#### System Management Commands (using Kind 23456)
1. **Clear All Auth Rules (Standard)**:
```json
{
"kind": 23456,
"content": "{\"action\":\"clear_all\"}",
"tags": [["system_command", "clear_all_auth_rules"]]
}
```
2. **Clear All Auth Rules (Encrypted)**:
```json
{
"kind": 23456,
"content": "{\"action\":\"clear_all\",\"encrypted_tags\":\"nip44_encrypted_payload\"}",
"tags": []
}
```
*Encrypted payload contains:* `[["system_command", "clear_all_auth_rules"]]`
3. **Query All Auth Rules (Standard)**:
```json
{
"kind": 23456,
"content": "{\"query\":\"list_auth_rules\"}",
"tags": [["auth_query", "all"]]
}
```
4. **Query All Auth Rules (Encrypted)**:
```json
{
"kind": 23456,
"content": "{\"query\":\"list_auth_rules\",\"encrypted_tags\":\"nip44_encrypted_payload\"}",
"tags": []
}
```
*Encrypted payload contains:* `[["auth_query", "all"]]`
5. **Add Blacklist Rule (Standard)**:
```json
{
"kind": 23456,
"content": "{\"action\":\"add\"}",
"tags": [["blacklist", "pubkey", "deadbeef1234abcd..."]]
}
```
6. **Add Blacklist Rule (Encrypted)**:
```json
{
"kind": 23456,
"content": "{\"action\":\"add\",\"encrypted_tags\":\"nip44_encrypted_payload\"}",
"tags": []
}
```
*Encrypted payload contains:* `[["blacklist", "pubkey", "deadbeef1234abcd..."]]`
### Phase 2: Auth Rules Schema Alignment
#### Option A: Fix Validator to Match Schema (RECOMMENDED)
**Update request_validator.c:**
```sql
-- OLD (broken):
WHERE rule_type = 'pubkey_blacklist' AND rule_target = ? AND operation = ? AND enabled = 1
-- NEW (correct):
WHERE rule_type = 'blacklist' AND pattern_type = 'pubkey' AND pattern_value = ? AND active = 1
```
**Benefits:**
- Matches actual database schema
- Simpler rule_type values ('blacklist' vs 'pubkey_blacklist')
- Uses existing columns (pattern_value vs rule_target)
- Consistent with storage implementation
#### Option B: Update Schema to Match Validator (NOT RECOMMENDED)
Would require changing schema, migration scripts, and storage logic.
### Phase 3: Implementation Priority
#### High Priority (Critical for blacklist functionality):
1. Fix request_validator.c schema mismatch
2. Ensure auth_required configuration is enabled
3. Update tests to use ephemeral event kinds (23455/23456)
4. Test blacklist enforcement
#### Medium Priority (Enhanced Admin Features):
1. **Implement NIP-44 Encryption Support**:
- Detect empty tags array for Kind 23455/23456 events
- Parse `encrypted_tags` field from content JSON
- Decrypt using admin privkey and relay pubkey
- Process decrypted tags as normal commands
2. Add clear_all_auth_rules system command
3. Add auth rule query functionality (both standard and encrypted modes)
4. Add configuration discovery (list available config keys)
5. Enhanced error reporting in admin API
6. Conflict resolution (same pubkey in whitelist + blacklist)
#### Security Priority (NIP-44 Implementation):
1. **Encryption Detection Logic**: Check for empty tags + encrypted_tags field
2. **Key Pair Management**: Use admin private key + relay public key for NIP-44
3. **Backward Compatibility**: Support both standard and encrypted modes
4. **Error Handling**: Graceful fallback if decryption fails
5. **Performance**: Cache decrypted results to avoid repeated decryption
#### Low Priority (Documentation & Polish):
1. Complete README.md API documentation
2. Example usage scripts
3. Admin client tools
### Phase 4: Expected API Structure
#### README.md Documentation Format:
```markdown
# C-Relay Administrator API
## Authentication
All admin commands require signing with the admin private key generated during first startup.
## Configuration Management (Kind 23455 - Ephemeral)
Update relay configuration parameters or query available settings.
**Configuration Update Event:**
```json
{
"kind": 23455,
"content": "Configuration update",
"tags": [
["config_key1", "config_value1"],
["config_key2", "config_value2"]
]
}
```
**List Available Config Keys:**
```json
{
"kind": 23455,
"content": "{\"query\":\"list_config_keys\",\"description\":\"Get editable config keys\"}",
"tags": [
["config_query", "list_all_keys"]
]
}
```
**Get Current Configuration:**
```json
{
"kind": 23455,
"content": "{\"query\":\"get_config\",\"description\":\"Get current config values\"}",
"tags": [
["config_query", "get_current_config"]
]
}
```
## Auth Rules Management (Kind 23456 - Ephemeral)
Manage whitelist and blacklist rules.
**Add Rule Event:**
```json
{
"kind": 23456,
"content": "{\"action\":\"add\",\"description\":\"Block malicious user\"}",
"tags": [
["blacklist", "pubkey", "deadbeef1234..."]
]
}
```
**Remove Rule Event:**
```json
{
"kind": 23456,
"content": "{\"action\":\"remove\",\"description\":\"Unblock user\"}",
"tags": [
["blacklist", "pubkey", "deadbeef1234..."]
]
}
```
**Query All Auth Rules:**
```json
{
"kind": 23456,
"content": "{\"query\":\"list_auth_rules\",\"description\":\"Get all rules\"}",
"tags": [
["auth_query", "all"]
]
}
```
**Query Whitelist Rules Only:**
```json
{
"kind": 23456,
"content": "{\"query\":\"list_auth_rules\",\"description\":\"Get whitelist\"}",
"tags": [
["auth_query", "whitelist"]
]
}
```
**Check Specific Pattern:**
```json
{
"kind": 23456,
"content": "{\"query\":\"check_pattern\",\"description\":\"Check if pattern exists\"}",
"tags": [
["auth_query", "pattern", "deadbeef1234..."]
]
}
```
## System Management (Kind 23456 - Ephemeral)
System administration commands using the same kind as auth rules.
**Clear All Auth Rules:**
```json
{
"kind": 23456,
"content": "{\"action\":\"clear_all\",\"description\":\"Clear all auth rules\"}",
"tags": [
["system_command", "clear_all_auth_rules"]
]
}
```
**System Status:**
```json
{
"kind": 23456,
"content": "{\"action\":\"system_status\",\"description\":\"Get system status\"}",
"tags": [
["system_command", "system_status"]
]
}
```
## Response Format
All admin commands return JSON responses via WebSocket:
**Success Response:**
```json
["OK", "event_id", true, "success_message"]
```
**Error Response:**
```json
["OK", "event_id", false, "error_message"]
```
## Configuration Keys
- `relay_description`: Relay description text
- `relay_contact`: Contact information
- `auth_enabled`: Enable authentication system
- `max_connections`: Maximum concurrent connections
- `pow_min_difficulty`: Minimum proof-of-work difficulty
- ... (full list of config keys)
## Examples
### Enable Authentication & Add Blacklist
```bash
# 1. Enable auth system
nak event -k 23455 --content "Enable authentication" \
-t "auth_enabled=true" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
# 2. Add user to blacklist
nak event -k 23456 --content '{"action":"add","description":"Spam user"}' \
-t "blacklist=pubkey;$SPAM_USER_PUBKEY" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
# 3. Query all auth rules
nak event -k 23456 --content '{"query":"list_auth_rules","description":"Get all rules"}' \
-t "auth_query=all" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
# 4. Clear all rules for testing
nak event -k 23456 --content '{"action":"clear_all","description":"Clear all rules"}' \
-t "system_command=clear_all_auth_rules" \
--sec $ADMIN_PRIVKEY | nak event ws://localhost:8888
```
## Expected Response Formats
### Configuration Query Response
```json
["EVENT", "subscription_id", {
"kind": 23455,
"content": "{\"config_keys\": [\"auth_enabled\", \"max_connections\"], \"descriptions\": {\"auth_enabled\": \"Enable whitelist/blacklist rules\"}}",
"tags": [["response_type", "config_keys_list"]]
}]
```
### Current Config Response
```json
["EVENT", "subscription_id", {
"kind": 23455,
"content": "{\"current_config\": {\"auth_enabled\": \"true\", \"max_connections\": \"1000\"}}",
"tags": [["response_type", "current_config"]]
}]
```
### Auth Rules Query Response
```json
["EVENT", "subscription_id", {
"kind": 23456,
"content": "{\"auth_rules\": [{\"rule_type\": \"blacklist\", \"pattern_type\": \"pubkey\", \"pattern_value\": \"deadbeef...\"}, {\"rule_type\": \"whitelist\", \"pattern_type\": \"pubkey\", \"pattern_value\": \"cafebabe...\"}]}",
"tags": [["response_type", "auth_rules_list"], ["query_type", "all"]]
}]
```
### Pattern Check Response
```json
["EVENT", "subscription_id", {
"kind": 23456,
"content": "{\"pattern_exists\": true, \"rule_type\": \"blacklist\", \"pattern_value\": \"deadbeef...\"}",
"tags": [["response_type", "pattern_check"], ["pattern", "deadbeef..."]]
}]
```
## Implementation Steps
1. **Document API** (this file) ✅
2. **Update to ephemeral event kinds** ✅
3. **Fix request_validator.c** schema mismatch
4. **Update tests** to use Kind 23455/23456
5. **Add auth rule query functionality**
6. **Add configuration discovery feature**
7. **Test blacklist functionality**
8. **Add remaining system commands**
## Testing Plan
1. Fix schema mismatch and test basic blacklist
2. Add clear_auth_rules and test table cleanup
3. Test whitelist/blacklist conflict scenarios
4. Test all admin API commands end-to-end
5. Update integration tests
This plan addresses the immediate blacklist issue while establishing a comprehensive admin API framework for future expansion.
## NIP-44 Encryption Implementation Details
### Server-Side Detection Logic
```c
// In admin event processing function
bool is_encrypted_command(struct nostr_event *event) {
// Check if Kind 23455 or 23456 with empty tags
if ((event->kind == 23455 || event->kind == 23456) &&
event->tags_count == 0) {
return true;
}
return false;
}
cJSON *decrypt_admin_tags(struct nostr_event *event) {
cJSON *content_json = cJSON_Parse(event->content);
if (!content_json) return NULL;
cJSON *encrypted_tags = cJSON_GetObjectItem(content_json, "encrypted_tags");
if (!encrypted_tags) {
cJSON_Delete(content_json);
return NULL;
}
// Decrypt using NIP-44 with admin pubkey and relay privkey
char *decrypted = nip44_decrypt(
cJSON_GetStringValue(encrypted_tags),
admin_pubkey, // Shared secret with admin
relay_private_key // Our private key
);
cJSON *decrypted_tags = cJSON_Parse(decrypted);
free(decrypted);
cJSON_Delete(content_json);
return decrypted_tags; // Returns tag array: [["key1", "val1"], ["key2", "val2"]]
}
```
### Admin Event Processing Flow
1. **Receive Event**: Kind 23455/23456 with admin signature
2. **Check Mode**: Empty tags = encrypted, populated tags = standard
3. **Decrypt if Needed**: Extract and decrypt `encrypted_tags` from content
4. **Process Commands**: Use decrypted/standard tags for command processing
5. **Execute**: Same logic for both modes after tag extraction
6. **Respond**: Standard response format (optionally encrypt response)
### Security Benefits
- **Command Privacy**: Admin operations invisible in event tags
- **Replay Protection**: NIP-44 includes timestamp/randomness
- **Key Management**: Uses existing admin/relay key pair
- **Backward Compatible**: Standard mode still works
- **Performance**: Only decrypt when needed (empty tags detection)
### NIP-44 Library Integration
The relay will need to integrate a NIP-44 encryption/decryption library:
```c
// Required NIP-44 functions
char* nip44_encrypt(const char* plaintext, const char* sender_privkey, const char* recipient_pubkey);
char* nip44_decrypt(const char* ciphertext, const char* recipient_privkey, const char* sender_pubkey);
```
### Implementation Priority (Updated)
#### Phase 1: Core Infrastructure (Complete)
- [x] Event-based admin authentication system
- [x] Kind 23455/23456 (Configuration/Auth Rules) processing
- [x] Basic configuration parameter updates
- [x] Auth rule add/remove/clear functionality
- [x] Updated to ephemeral event kinds
- [x] Designed NIP-44 encryption support
#### Phase 2: NIP-44 Encryption Support (Next Priority)
- [ ] **Add NIP-44 library dependency** to project
- [ ] **Implement encryption detection logic** (`is_encrypted_command()`)
- [ ] **Add decrypt_admin_tags() function** with NIP-44 support
- [ ] **Update admin command processing** to handle both modes
- [ ] **Test encrypted admin commands** end-to-end
#### Phase 3: Enhanced Features
- [ ] **Auth rule query functionality** (both standard and encrypted modes)
- [ ] **Configuration discovery API** (list available config keys)
- [ ] **Enhanced error messages** with encryption status
- [ ] **Performance optimization** (caching, async decrypt)
#### Phase 4: Schema Fixes (Critical)
- [ ] **Fix request_validator.c** schema mismatch
- [ ] **Enable blacklist enforcement** with encrypted commands
- [ ] **Update tests** to use both standard and encrypted modes
This enhanced admin API provides enterprise-grade security while maintaining ease of use for basic operations.

View File

@@ -198,25 +198,54 @@ fi
echo "Build successful. Proceeding with relay restart..."
# Kill existing relay if running
# Kill existing relay if running - start aggressive immediately
echo "Stopping any existing relay servers..."
pkill -f "c_relay_" 2>/dev/null
sleep 2 # Give time for shutdown
# Check if port is still bound
if lsof -i :8888 >/dev/null 2>&1; then
echo "Port 8888 still in use, force killing..."
fuser -k 8888/tcp 2>/dev/null || echo "No process on port 8888"
# Get all relay processes and kill them immediately with -9
RELAY_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$RELAY_PIDS" ]; then
echo "Force killing relay processes immediately: $RELAY_PIDS"
kill -9 $RELAY_PIDS 2>/dev/null
else
echo "No existing relay processes found"
fi
# Get any remaining processes
REMAINING_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$REMAINING_PIDS" ]; then
echo "Force killing remaining processes: $REMAINING_PIDS"
kill -9 $REMAINING_PIDS 2>/dev/null
# Ensure port 8888 is completely free with retry loop
echo "Ensuring port 8888 is available..."
for attempt in {1..15}; do
if ! lsof -i :8888 >/dev/null 2>&1; then
echo "Port 8888 is now free"
break
fi
echo "Attempt $attempt: Port 8888 still in use, force killing..."
# Kill anything using port 8888
fuser -k 8888/tcp 2>/dev/null || true
# Double-check for any remaining relay processes
REMAINING_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$REMAINING_PIDS" ]; then
echo "Killing remaining relay processes: $REMAINING_PIDS"
kill -9 $REMAINING_PIDS 2>/dev/null || true
fi
sleep 2
if [ $attempt -eq 15 ]; then
echo "ERROR: Could not free port 8888 after 15 attempts"
echo "Current processes using port:"
lsof -i :8888 2>/dev/null || echo "No process details available"
echo "You may need to manually kill processes or reboot"
exit 1
fi
done
# Final safety check - ensure no relay processes remain
FINAL_PIDS=$(pgrep -f "c_relay_" || echo "")
if [ -n "$FINAL_PIDS" ]; then
echo "Final cleanup: killing processes $FINAL_PIDS"
kill -9 $FINAL_PIDS 2>/dev/null || true
sleep 1
else
echo "No existing relay found"
fi
# Clean up PID file

View File

@@ -1 +1 @@
3327716
285781

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,10 @@
#include <sqlite3.h>
#include <cjson/cJSON.h>
#include <time.h>
#include <pthread.h>
// Forward declaration for WebSocket support
struct lws;
// Configuration constants
#define CONFIG_VALUE_MAX_LENGTH 1024
@@ -23,14 +27,71 @@
// Database path for event-based config
extern char g_database_path[512];
// Configuration manager structure
// Unified configuration cache structure (consolidates all caching systems)
typedef struct {
sqlite3* db;
char relay_pubkey[65];
// Critical keys (frequently accessed)
char admin_pubkey[65];
time_t last_config_check;
char config_file_path[512]; // Temporary for compatibility
} config_manager_t;
char relay_pubkey[65];
// Auth config (from request_validator)
int auth_required;
long max_file_size;
int admin_enabled;
int nip42_mode;
int nip42_challenge_timeout;
int nip42_time_tolerance;
// Static buffer for config values (replaces static buffers in get_config_value functions)
char temp_buffer[CONFIG_VALUE_MAX_LENGTH];
// NIP-11 relay information (migrated from g_relay_info in main.c)
struct {
char name[RELAY_NAME_MAX_LENGTH];
char description[RELAY_DESCRIPTION_MAX_LENGTH];
char banner[RELAY_URL_MAX_LENGTH];
char icon[RELAY_URL_MAX_LENGTH];
char pubkey[RELAY_PUBKEY_MAX_LENGTH];
char contact[RELAY_CONTACT_MAX_LENGTH];
char software[RELAY_URL_MAX_LENGTH];
char version[64];
char privacy_policy[RELAY_URL_MAX_LENGTH];
char terms_of_service[RELAY_URL_MAX_LENGTH];
cJSON* supported_nips;
cJSON* limitation;
cJSON* retention;
cJSON* relay_countries;
cJSON* language_tags;
cJSON* tags;
char posting_policy[RELAY_URL_MAX_LENGTH];
cJSON* fees;
char payments_url[RELAY_URL_MAX_LENGTH];
} relay_info;
// NIP-13 PoW configuration (migrated from g_pow_config in main.c)
struct {
int enabled;
int min_pow_difficulty;
int validation_flags;
int require_nonce_tag;
int reject_lower_targets;
int strict_format;
int anti_spam_mode;
} pow_config;
// NIP-40 Expiration configuration (migrated from g_expiration_config in main.c)
struct {
int enabled;
int strict_mode;
int filter_responses;
int delete_expired;
long grace_period;
} expiration_config;
// Cache management
time_t cache_expires;
int cache_valid;
pthread_mutex_t cache_lock;
} unified_config_cache_t;
// Command line options structure for first-time startup
typedef struct {
@@ -39,8 +100,8 @@ typedef struct {
char relay_privkey_override[65]; // Empty string = not set, 64-char hex = override
} cli_options_t;
// Global configuration manager
extern config_manager_t g_config_manager;
// Global unified configuration cache
extern unified_config_cache_t g_unified_cache;
// Core configuration functions (temporary compatibility)
int init_configuration_system(const char* config_dir_override, const char* config_file_override);
@@ -90,4 +151,63 @@ int parse_auth_required_kinds(const char* kinds_str, int* kinds_array, int max_k
int is_nip42_auth_required_for_kind(int event_kind);
int is_nip42_auth_globally_required(void);
// ================================
// NEW ADMIN API FUNCTIONS
// ================================
// Config table management functions (config table created via embedded schema)
const char* get_config_value_from_table(const char* key);
int set_config_value_in_table(const char* key, const char* value, const char* data_type,
const char* description, const char* category, int requires_restart);
int update_config_in_table(const char* key, const char* value);
int populate_default_config_values(void);
int add_pubkeys_to_config_table(void);
// Admin event processing functions (updated with WebSocket support)
int process_admin_event_in_config(cJSON* event, char* error_message, size_t error_size, struct lws* wsi);
int process_admin_config_event(cJSON* event, char* error_message, size_t error_size);
int process_admin_auth_event(cJSON* event, char* error_message, size_t error_size, struct lws* wsi);
// Unified Kind 23456 handler functions
int handle_kind_23456_unified(cJSON* event, char* error_message, size_t error_size, struct lws* wsi);
int handle_auth_query_unified(cJSON* event, const char* query_type, char* error_message, size_t error_size, struct lws* wsi);
int handle_system_command_unified(cJSON* event, const char* command, char* error_message, size_t error_size, struct lws* wsi);
int handle_auth_rule_modification_unified(cJSON* event, char* error_message, size_t error_size);
// WebSocket response functions
int send_websocket_response_data(cJSON* event, cJSON* response_data, struct lws* wsi);
cJSON* build_query_response(const char* query_type, cJSON* results_array, int total_count);
// Auth rules management functions
int add_auth_rule_from_config(const char* rule_type, const char* pattern_type,
const char* pattern_value, const char* action);
int remove_auth_rule_from_config(const char* rule_type, const char* pattern_type,
const char* pattern_value);
// Unified configuration cache management
void force_config_cache_refresh(void);
const char* get_admin_pubkey_cached(void);
const char* get_relay_pubkey_cached(void);
void invalidate_config_cache(void);
int reload_config_from_table(void);
// Hybrid config access functions
const char* get_config_value_hybrid(const char* key);
int is_config_table_ready(void);
// Migration support functions
int initialize_config_system_with_migration(void);
int migrate_config_from_events_to_table(void);
int populate_config_table_from_event(const cJSON* event);
// Startup configuration processing functions
int process_startup_config_event(const cJSON* event);
int process_startup_config_event_with_fallback(const cJSON* event);
// Dynamic event generation functions for WebSocket configuration fetching
cJSON* generate_config_event_from_table(void);
int req_filter_requests_config_events(const cJSON* filter);
cJSON* generate_synthetic_config_event_for_subscription(const char* sub_id, const cJSON* filters);
char* generate_config_event_json(void);
#endif /* CONFIG_H */

File diff suppressed because it is too large Load Diff

View File

@@ -132,24 +132,11 @@ typedef struct {
int time_tolerance_seconds;
} nip42_challenge_manager_t;
// Cached configuration structure
typedef struct {
int auth_required; // Whether authentication is required
long max_file_size; // Maximum file size in bytes
int admin_enabled; // Whether admin interface is enabled
char admin_pubkey[65]; // Admin public key
int nip42_mode; // NIP-42 authentication mode
int nip42_challenge_timeout; // NIP-42 challenge timeout in seconds
int nip42_time_tolerance; // NIP-42 time tolerance in seconds
time_t cache_expires; // When cache expires
int cache_valid; // Whether cache is valid
} auth_config_cache_t;
//=============================================================================
// GLOBAL STATE
//=============================================================================
static auth_config_cache_t g_auth_cache = {0};
// No longer using local auth cache - using unified cache from config.c
static nip42_challenge_manager_t g_challenge_manager = {0};
static int g_validator_initialized = 0;
@@ -222,15 +209,15 @@ int ginxsom_request_validator_init(const char *db_path, const char *app_name) {
return result;
}
// Initialize NIP-42 challenge manager
// Initialize NIP-42 challenge manager using unified config
memset(&g_challenge_manager, 0, sizeof(g_challenge_manager));
g_challenge_manager.timeout_seconds =
g_auth_cache.nip42_challenge_timeout > 0
? g_auth_cache.nip42_challenge_timeout
: 600;
g_challenge_manager.time_tolerance_seconds =
g_auth_cache.nip42_time_tolerance > 0 ? g_auth_cache.nip42_time_tolerance
: 300;
const char* nip42_timeout = get_config_value("nip42_challenge_timeout");
g_challenge_manager.timeout_seconds = nip42_timeout ? atoi(nip42_timeout) : 600;
const char* nip42_tolerance = get_config_value("nip42_time_tolerance");
g_challenge_manager.time_tolerance_seconds = nip42_tolerance ? atoi(nip42_tolerance) : 300;
g_challenge_manager.last_cleanup = time(NULL);
g_validator_initialized = 1;
@@ -243,12 +230,15 @@ int ginxsom_request_validator_init(const char *db_path, const char *app_name) {
* Check if authentication rules are enabled
*/
int nostr_auth_rules_enabled(void) {
// Reload config if cache expired
if (!g_auth_cache.cache_valid || time(NULL) > g_auth_cache.cache_expires) {
reload_auth_config();
// Use unified cache from config.c
const char* auth_enabled = get_config_value("auth_enabled");
if (auth_enabled && strcmp(auth_enabled, "true") == 0) {
return 1;
}
return g_auth_cache.auth_required;
// Also check legacy key
const char* auth_rules_enabled = get_config_value("auth_rules_enabled");
return (auth_rules_enabled && strcmp(auth_rules_enabled, "true") == 0) ? 1 : 0;
}
///////////////////////////////////////////////////////////////////////////////////////
@@ -306,14 +296,12 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
int event_kind = (int)cJSON_GetNumberValue(kind);
// 5. Reload config if needed
if (!g_auth_cache.cache_valid || time(NULL) > g_auth_cache.cache_expires) {
reload_auth_config();
}
// 5. Check configuration using unified cache
int auth_required = nostr_auth_rules_enabled();
char config_msg[256];
sprintf(config_msg, "VALIDATOR_DEBUG: STEP 5 PASSED - Event kind: %d, auth_required: %d\n",
event_kind, g_auth_cache.auth_required);
event_kind, auth_required);
validator_debug_log(config_msg);
/////////////////////////////////////////////////////////////////////
@@ -352,7 +340,9 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
if (event_kind == 22242) {
validator_debug_log("VALIDATOR_DEBUG: STEP 8 - Processing NIP-42 challenge response\n");
if (g_auth_cache.nip42_mode == 0) {
// Check NIP-42 mode using unified cache
const char* nip42_enabled = get_config_value("nip42_auth_enabled");
if (nip42_enabled && strcmp(nip42_enabled, "false") == 0) {
validator_debug_log("VALIDATOR_DEBUG: STEP 8 FAILED - NIP-42 is disabled\n");
cJSON_Delete(event);
return NOSTR_ERROR_NIP42_DISABLED;
@@ -370,7 +360,7 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
/////////////////////////////////////////////////////////////////////
// 9. Check if authentication rules are enabled
if (!g_auth_cache.auth_required) {
if (!auth_required) {
validator_debug_log("VALIDATOR_DEBUG: STEP 9 - Authentication disabled, skipping database auth rules\n");
} else {
// 10. Check database authentication rules (only if auth enabled)
@@ -404,17 +394,23 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
/////////////////////////////////////////////////////////////////////
// 11. NIP-13 Proof of Work validation
if (g_pow_config.enabled && g_pow_config.min_pow_difficulty > 0) {
pthread_mutex_lock(&g_unified_cache.cache_lock);
int pow_enabled = g_unified_cache.pow_config.enabled;
int pow_min_difficulty = g_unified_cache.pow_config.min_pow_difficulty;
int pow_validation_flags = g_unified_cache.pow_config.validation_flags;
pthread_mutex_unlock(&g_unified_cache.cache_lock);
if (pow_enabled && pow_min_difficulty > 0) {
validator_debug_log("VALIDATOR_DEBUG: STEP 11 - Validating NIP-13 Proof of Work\n");
nostr_pow_result_t pow_result;
int pow_validation_result = nostr_validate_pow(event, g_pow_config.min_pow_difficulty,
g_pow_config.validation_flags, &pow_result);
int pow_validation_result = nostr_validate_pow(event, pow_min_difficulty,
pow_validation_flags, &pow_result);
if (pow_validation_result != NOSTR_SUCCESS) {
char pow_msg[256];
sprintf(pow_msg, "VALIDATOR_DEBUG: STEP 11 FAILED - PoW validation failed (error=%d, difficulty=%d/%d)\n",
pow_validation_result, pow_result.actual_difficulty, g_pow_config.min_pow_difficulty);
pow_validation_result, pow_result.actual_difficulty, pow_min_difficulty);
validator_debug_log(pow_msg);
cJSON_Delete(event);
return pow_validation_result;
@@ -553,7 +549,6 @@ void nostr_request_validator_clear_violation(void) {
*/
void ginxsom_request_validator_cleanup(void) {
g_validator_initialized = 0;
memset(&g_auth_cache, 0, sizeof(g_auth_cache));
nostr_request_validator_clear_violation();
}
@@ -573,145 +568,22 @@ void nostr_request_result_free_file_data(nostr_request_result_t *result) {
// HELPER FUNCTIONS
//=============================================================================
/**
* Get cache timeout from environment variable or default
*/
static int get_cache_timeout(void) {
char *no_cache = getenv("GINX_NO_CACHE");
char *cache_timeout = getenv("GINX_CACHE_TIMEOUT");
if (no_cache && strcmp(no_cache, "1") == 0) {
return 0; // No caching
}
if (cache_timeout) {
int timeout = atoi(cache_timeout);
return (timeout >= 0) ? timeout : 300; // Use provided value or default
}
return 300; // Default 5 minutes
}
/**
* Force cache refresh - invalidates current cache
* Force cache refresh - use unified cache system
*/
void nostr_request_validator_force_cache_refresh(void) {
g_auth_cache.cache_valid = 0;
g_auth_cache.cache_expires = 0;
validator_debug_log("VALIDATOR: Cache forcibly invalidated\n");
// Use unified cache refresh from config.c
force_config_cache_refresh();
validator_debug_log("VALIDATOR: Cache forcibly invalidated via unified cache\n");
}
/**
* Reload authentication configuration from unified config table
* This function is no longer needed - configuration is handled by unified cache
*/
static int reload_auth_config(void) {
sqlite3 *db = NULL;
sqlite3_stmt *stmt = NULL;
int rc;
// Clear cache
memset(&g_auth_cache, 0, sizeof(g_auth_cache));
// Open database using global database path
if (strlen(g_database_path) == 0) {
validator_debug_log("VALIDATOR: No database path available\n");
// Use defaults
g_auth_cache.auth_required = 0;
g_auth_cache.max_file_size = 104857600; // 100MB
g_auth_cache.admin_enabled = 0;
g_auth_cache.nip42_mode = 1; // Optional
int cache_timeout = get_cache_timeout();
g_auth_cache.cache_expires = time(NULL) + cache_timeout;
g_auth_cache.cache_valid = 1;
return NOSTR_SUCCESS;
}
rc = sqlite3_open_v2(g_database_path, &db, SQLITE_OPEN_READONLY, NULL);
if (rc != SQLITE_OK) {
validator_debug_log("VALIDATOR: Could not open database\n");
// Use defaults
g_auth_cache.auth_required = 0;
g_auth_cache.max_file_size = 104857600; // 100MB
g_auth_cache.admin_enabled = 0;
g_auth_cache.nip42_mode = 1; // Optional
int cache_timeout = get_cache_timeout();
g_auth_cache.cache_expires = time(NULL) + cache_timeout;
g_auth_cache.cache_valid = 1;
return NOSTR_SUCCESS;
}
// Load configuration values from unified config table
const char *config_sql =
"SELECT key, value FROM config WHERE key IN ('require_auth', "
"'auth_rules_enabled', 'max_file_size', 'admin_enabled', 'admin_pubkey', "
"'nip42_require_auth', 'nip42_challenge_timeout', "
"'nip42_time_tolerance')";
rc = sqlite3_prepare_v2(db, config_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
while (sqlite3_step(stmt) == SQLITE_ROW) {
const char *key = (const char *)sqlite3_column_text(stmt, 0);
const char *value = (const char *)sqlite3_column_text(stmt, 1);
if (!key || !value)
continue;
if (strcmp(key, "require_auth") == 0) {
g_auth_cache.auth_required = (strcmp(value, "true") == 0) ? 1 : 0;
} else if (strcmp(key, "auth_rules_enabled") == 0) {
// Override auth_required with auth_rules_enabled if present (higher
// priority)
g_auth_cache.auth_required = (strcmp(value, "true") == 0) ? 1 : 0;
} else if (strcmp(key, "max_file_size") == 0) {
g_auth_cache.max_file_size = atol(value);
} else if (strcmp(key, "admin_enabled") == 0) {
g_auth_cache.admin_enabled = (strcmp(value, "true") == 0) ? 1 : 0;
} else if (strcmp(key, "admin_pubkey") == 0) {
strncpy(g_auth_cache.admin_pubkey, value,
sizeof(g_auth_cache.admin_pubkey) - 1);
} else if (strcmp(key, "nip42_require_auth") == 0) {
if (strcmp(value, "false") == 0) {
g_auth_cache.nip42_mode = 0; // Disabled
} else if (strcmp(value, "required") == 0) {
g_auth_cache.nip42_mode = 2; // Required
} else if (strcmp(value, "true") == 0) {
g_auth_cache.nip42_mode = 1; // Optional/Enabled
} else {
g_auth_cache.nip42_mode = 1; // Default to Optional/Enabled
}
} else if (strcmp(key, "nip42_challenge_timeout") == 0) {
g_auth_cache.nip42_challenge_timeout = atoi(value);
} else if (strcmp(key, "nip42_time_tolerance") == 0) {
g_auth_cache.nip42_time_tolerance = atoi(value);
}
}
sqlite3_finalize(stmt);
}
sqlite3_close(db);
// Set cache expiration with environment variable support
int cache_timeout = get_cache_timeout();
g_auth_cache.cache_expires = time(NULL) + cache_timeout;
g_auth_cache.cache_valid = 1;
// Set defaults for missing values
if (g_auth_cache.max_file_size == 0) {
g_auth_cache.max_file_size = 104857600; // 100MB
}
// Debug logging
fprintf(stderr,
"VALIDATOR: Configuration loaded from unified config table - "
"auth_required: %d, max_file_size: %ld, nip42_mode: %d, "
"cache_timeout: %d\n",
g_auth_cache.auth_required, g_auth_cache.max_file_size,
g_auth_cache.nip42_mode, cache_timeout);
fprintf(stderr,
"VALIDATOR: NIP-42 mode details - nip42_mode=%d (0=disabled, "
"1=optional/enabled, 2=required)\n",
g_auth_cache.nip42_mode);
// Configuration is now handled by the unified cache in config.c
validator_debug_log("VALIDATOR: Using unified cache system for configuration\n");
return NOSTR_SUCCESS;
}
@@ -757,28 +629,26 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
// Step 1: Check pubkey blacklist (highest priority)
const char *blacklist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
"'pubkey_blacklist' AND rule_target = ? AND operation = ? AND enabled = "
"1 ORDER BY priority LIMIT 1";
"SELECT rule_type, action FROM auth_rules WHERE rule_type = "
"'blacklist' AND pattern_type = 'pubkey' AND pattern_value = ? LIMIT 1";
rc = sqlite3_prepare_v2(db, blacklist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1);
const char *action = (const char *)sqlite3_column_text(stmt, 1);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 FAILED - "
"Pubkey blacklisted\n");
char blacklist_msg[256];
sprintf(blacklist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Blacklist rule matched: %s\n",
description ? description : "Unknown");
"VALIDATOR_DEBUG: RULES ENGINE - Blacklist rule matched: action=%s\n",
action ? action : "deny");
validator_debug_log(blacklist_msg);
// Set specific violation details for status code mapping
strcpy(g_last_rule_violation.violation_type, "pubkey_blacklist");
sprintf(g_last_rule_violation.reason, "%s: Public key blacklisted",
description ? description : "TEST_PUBKEY_BLACKLIST");
sprintf(g_last_rule_violation.reason, "Public key blacklisted: %s",
action ? action : "PUBKEY_BLACKLIST");
sqlite3_finalize(stmt);
sqlite3_close(db);
@@ -792,29 +662,27 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
// Step 2: Check hash blacklist
if (resource_hash) {
const char *hash_blacklist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
"'hash_blacklist' AND rule_target = ? AND operation = ? AND enabled = "
"1 ORDER BY priority LIMIT 1";
"SELECT rule_type, action FROM auth_rules WHERE rule_type = "
"'blacklist' AND pattern_type = 'hash' AND pattern_value = ? LIMIT 1";
rc = sqlite3_prepare_v2(db, hash_blacklist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, resource_hash, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1);
const char *action = (const char *)sqlite3_column_text(stmt, 1);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 FAILED - "
"Hash blacklisted\n");
char hash_blacklist_msg[256];
sprintf(
hash_blacklist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Hash blacklist rule matched: %s\n",
description ? description : "Unknown");
"VALIDATOR_DEBUG: RULES ENGINE - Hash blacklist rule matched: action=%s\n",
action ? action : "deny");
validator_debug_log(hash_blacklist_msg);
// Set specific violation details for status code mapping
strcpy(g_last_rule_violation.violation_type, "hash_blacklist");
sprintf(g_last_rule_violation.reason, "%s: File hash blacklisted",
description ? description : "TEST_HASH_BLACKLIST");
sprintf(g_last_rule_violation.reason, "File hash blacklisted: %s",
action ? action : "HASH_BLACKLIST");
sqlite3_finalize(stmt);
sqlite3_close(db);
@@ -831,22 +699,20 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
// Step 3: Check pubkey whitelist
const char *whitelist_sql =
"SELECT rule_type, description FROM auth_rules WHERE rule_type = "
"'pubkey_whitelist' AND rule_target = ? AND operation = ? AND enabled = "
"1 ORDER BY priority LIMIT 1";
"SELECT rule_type, action FROM auth_rules WHERE rule_type = "
"'whitelist' AND pattern_type = 'pubkey' AND pattern_value = ? LIMIT 1";
rc = sqlite3_prepare_v2(db, whitelist_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *description = (const char *)sqlite3_column_text(stmt, 1);
const char *action = (const char *)sqlite3_column_text(stmt, 1);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 PASSED - "
"Pubkey whitelisted\n");
char whitelist_msg[256];
sprintf(whitelist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: %s\n",
description ? description : "Unknown");
"VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: action=%s\n",
action ? action : "allow");
validator_debug_log(whitelist_msg);
sqlite3_finalize(stmt);
sqlite3_close(db);
@@ -859,12 +725,10 @@ static int check_database_auth_rules(const char *pubkey, const char *operation,
// Step 4: Check if any whitelist rules exist - if yes, deny by default
const char *whitelist_exists_sql =
"SELECT COUNT(*) FROM auth_rules WHERE rule_type = 'pubkey_whitelist' "
"AND operation = ? AND enabled = 1 LIMIT 1";
"SELECT COUNT(*) FROM auth_rules WHERE rule_type = 'whitelist' "
"AND pattern_type = 'pubkey' LIMIT 1";
rc = sqlite3_prepare_v2(db, whitelist_exists_sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(stmt, 1, operation ? operation : "", -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) {
int whitelist_count = sqlite3_column_int(stmt, 0);
if (whitelist_count > 0) {

View File

@@ -1,12 +1,12 @@
/* Embedded SQL Schema for C Nostr Relay
* Generated from db/schema.sql - Do not edit manually
* Schema Version: 6
* Schema Version: 7
*/
#ifndef SQL_SCHEMA_H
#define SQL_SCHEMA_H
/* Schema version constant */
#define EMBEDDED_SCHEMA_VERSION "6"
#define EMBEDDED_SCHEMA_VERSION "7"
/* Embedded SQL schema as C string literal */
static const char* const EMBEDDED_SCHEMA_SQL =
@@ -15,7 +15,7 @@ static const char* const EMBEDDED_SCHEMA_SQL =
-- Event-based configuration system using kind 33334 Nostr events\n\
\n\
-- Schema version tracking\n\
PRAGMA user_version = 6;\n\
PRAGMA user_version = 7;\n\
\n\
-- Enable foreign key support\n\
PRAGMA foreign_keys = ON;\n\
@@ -58,8 +58,8 @@ CREATE TABLE schema_info (\n\
\n\
-- Insert schema metadata\n\
INSERT INTO schema_info (key, value) VALUES\n\
('version', '6'),\n\
('description', 'Event-based Nostr relay schema with secure relay private key storage'),\n\
('version', '7'),\n\
('description', 'Hybrid Nostr relay schema with event-based and table-based configuration'),\n\
('created_at', strftime('%s', 'now'));\n\
\n\
-- Helper views for common queries\n\
@@ -154,6 +154,60 @@ CREATE INDEX idx_auth_rules_pattern ON auth_rules(pattern_type, pattern_value);\
CREATE INDEX idx_auth_rules_type ON auth_rules(rule_type);\n\
CREATE INDEX idx_auth_rules_active ON auth_rules(active);\n\
\n\
-- Configuration Table for Table-Based Config Management\n\
-- Hybrid system supporting both event-based and table-based configuration\n\
CREATE TABLE config (\n\
key TEXT PRIMARY KEY,\n\
value TEXT NOT NULL,\n\
data_type TEXT NOT NULL CHECK (data_type IN ('string', 'integer', 'boolean', 'json')),\n\
description TEXT,\n\
category TEXT DEFAULT 'general',\n\
requires_restart INTEGER DEFAULT 0,\n\
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),\n\
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
);\n\
\n\
-- Indexes for config table performance\n\
CREATE INDEX idx_config_category ON config(category);\n\
CREATE INDEX idx_config_restart ON config(requires_restart);\n\
CREATE INDEX idx_config_updated ON config(updated_at DESC);\n\
\n\
-- Trigger to update config timestamp on changes\n\
CREATE TRIGGER update_config_timestamp\n\
AFTER UPDATE ON config\n\
FOR EACH ROW\n\
BEGIN\n\
UPDATE config SET updated_at = strftime('%s', 'now') WHERE key = NEW.key;\n\
END;\n\
\n\
-- Insert default configuration values\n\
INSERT INTO config (key, value, data_type, description, category, requires_restart) VALUES\n\
('relay_description', 'A C Nostr Relay', 'string', 'Relay description', 'general', 0),\n\
('relay_contact', '', 'string', 'Relay contact information', 'general', 0),\n\
('relay_software', 'https://github.com/laanwj/c-relay', 'string', 'Relay software URL', 'general', 0),\n\
('relay_version', '1.0.0', 'string', 'Relay version', 'general', 0),\n\
('relay_port', '8888', 'integer', 'Relay port number', 'network', 1),\n\
('max_connections', '1000', 'integer', 'Maximum concurrent connections', 'network', 1),\n\
('auth_enabled', 'false', 'boolean', 'Enable NIP-42 authentication', 'auth', 0),\n\
('nip42_auth_required_events', 'false', 'boolean', 'Require auth for event publishing', 'auth', 0),\n\
('nip42_auth_required_subscriptions', 'false', 'boolean', 'Require auth for subscriptions', 'auth', 0),\n\
('nip42_auth_required_kinds', '[]', 'json', 'Event kinds requiring authentication', 'auth', 0),\n\
('nip42_challenge_expiration', '600', 'integer', 'Auth challenge expiration seconds', 'auth', 0),\n\
('pow_min_difficulty', '0', 'integer', 'Minimum proof-of-work difficulty', 'validation', 0),\n\
('pow_mode', 'optional', 'string', 'Proof-of-work mode', 'validation', 0),\n\
('nip40_expiration_enabled', 'true', 'boolean', 'Enable event expiration', 'validation', 0),\n\
('nip40_expiration_strict', 'false', 'boolean', 'Strict expiration mode', 'validation', 0),\n\
('nip40_expiration_filter', 'true', 'boolean', 'Filter expired events in queries', 'validation', 0),\n\
('nip40_expiration_grace_period', '60', 'integer', 'Expiration grace period seconds', 'validation', 0),\n\
('max_subscriptions_per_client', '25', 'integer', 'Maximum subscriptions per client', 'limits', 0),\n\
('max_total_subscriptions', '1000', 'integer', 'Maximum total subscriptions', 'limits', 0),\n\
('max_filters_per_subscription', '10', 'integer', 'Maximum filters per subscription', 'limits', 0),\n\
('max_event_tags', '2000', 'integer', 'Maximum tags per event', 'limits', 0),\n\
('max_content_length', '100000', 'integer', 'Maximum event content length', 'limits', 0),\n\
('max_message_length', '131072', 'integer', 'Maximum WebSocket message length', 'limits', 0),\n\
('default_limit', '100', 'integer', 'Default query limit', 'limits', 0),\n\
('max_limit', '5000', 'integer', 'Maximum query limit', 'limits', 0);\n\
\n\
-- Persistent Subscriptions Logging Tables (Phase 2)\n\
-- Optional database logging for subscription analytics and debugging\n\
\n\

View File

@@ -1,191 +0,0 @@
#!/usr/bin/env node
// Import the nostr-tools bundle
const fs = require('fs');
const path = require('path');
const { TextEncoder, TextDecoder } = require('util');
// Load nostr.bundle.js
const bundlePath = path.join(__dirname, 'api', 'nostr.bundle.js');
if (!fs.existsSync(bundlePath)) {
console.error('nostr.bundle.js not found at:', bundlePath);
process.exit(1);
}
// Read and eval the bundle to get NostrTools
const bundleCode = fs.readFileSync(bundlePath, 'utf8');
const vm = require('vm');
// Create a more complete browser-like context
const context = {
window: {},
global: {},
console: console,
setTimeout: setTimeout,
setInterval: setInterval,
clearTimeout: clearTimeout,
clearInterval: clearInterval,
Buffer: Buffer,
process: process,
require: require,
module: module,
exports: exports,
__dirname: __dirname,
__filename: __filename,
TextEncoder: TextEncoder,
TextDecoder: TextDecoder,
crypto: require('crypto'),
atob: (str) => Buffer.from(str, 'base64').toString('binary'),
btoa: (str) => Buffer.from(str, 'binary').toString('base64'),
fetch: require('https').get // Basic polyfill, might need adjustment
};
// Add common browser globals to window
context.window.TextEncoder = TextEncoder;
context.window.TextDecoder = TextDecoder;
context.window.crypto = context.crypto;
context.window.atob = context.atob;
context.window.btoa = context.btoa;
context.window.console = console;
context.window.setTimeout = setTimeout;
context.window.setInterval = setInterval;
context.window.clearTimeout = clearTimeout;
context.window.clearInterval = clearInterval;
// Execute bundle in context
vm.createContext(context);
try {
vm.runInContext(bundleCode, context);
} catch (error) {
console.error('Error loading nostr bundle:', error.message);
process.exit(1);
}
// Debug what's available in the context
console.log('Bundle loaded, checking available objects...');
console.log('context.window keys:', Object.keys(context.window));
console.log('context.global keys:', Object.keys(context.global));
// Try different ways to access NostrTools
let NostrTools = context.window.NostrTools || context.NostrTools || context.global.NostrTools;
// If still not found, look for other possible exports
if (!NostrTools) {
console.log('Looking for alternative exports...');
// Check if it's under a different name
const windowKeys = Object.keys(context.window);
const possibleExports = windowKeys.filter(key =>
key.toLowerCase().includes('nostr') ||
key.toLowerCase().includes('tools') ||
typeof context.window[key] === 'object'
);
console.log('Possible nostr-related exports:', possibleExports);
// Try the first one that looks promising
if (possibleExports.length > 0) {
NostrTools = context.window[possibleExports[0]];
console.log(`Trying ${possibleExports[0]}:`, typeof NostrTools);
}
}
if (!NostrTools) {
console.error('NostrTools not found in bundle');
console.error('Bundle might not be compatible with Node.js or needs different loading approach');
process.exit(1);
}
console.log('NostrTools loaded successfully');
console.log('Available methods:', Object.keys(NostrTools));
async function testRelay() {
const relayUrl = 'ws://127.0.0.1:8888';
try {
console.log('\n=== Testing Relay Connection ===');
console.log('Relay URL:', relayUrl);
// Create SimplePool
const pool = new NostrTools.SimplePool();
console.log('SimplePool created');
// Test 1: Query for kind 1 events
console.log('\n--- Test 1: Kind 1 Events ---');
const kind1Events = await pool.querySync([relayUrl], {
kinds: [1],
limit: 5
});
console.log(`Found ${kind1Events.length} kind 1 events`);
kind1Events.forEach((event, index) => {
console.log(`Event ${index + 1}:`, {
id: event.id,
kind: event.kind,
pubkey: event.pubkey.substring(0, 16) + '...',
created_at: new Date(event.created_at * 1000).toISOString(),
content: event.content.substring(0, 50) + (event.content.length > 50 ? '...' : '')
});
});
// Test 2: Query for kind 33334 events (configuration)
console.log('\n--- Test 2: Kind 33334 Events (Configuration) ---');
const configEvents = await pool.querySync([relayUrl], {
kinds: [33334],
limit: 10
});
console.log(`Found ${configEvents.length} kind 33334 events`);
configEvents.forEach((event, index) => {
console.log(`Config Event ${index + 1}:`, {
id: event.id,
kind: event.kind,
pubkey: event.pubkey.substring(0, 16) + '...',
created_at: new Date(event.created_at * 1000).toISOString(),
tags: event.tags.length,
content: event.content
});
// Show some tags
if (event.tags.length > 0) {
console.log(' Sample tags:');
event.tags.slice(0, 5).forEach(tag => {
console.log(` ${tag[0]}: ${tag[1] || ''}`);
});
}
});
// Test 3: Query for any events
console.log('\n--- Test 3: Any Events (limit 3) ---');
const anyEvents = await pool.querySync([relayUrl], {
limit: 3
});
console.log(`Found ${anyEvents.length} total events`);
anyEvents.forEach((event, index) => {
console.log(`Event ${index + 1}:`, {
id: event.id,
kind: event.kind,
pubkey: event.pubkey.substring(0, 16) + '...',
created_at: new Date(event.created_at * 1000).toISOString()
});
});
// Clean up
pool.close([relayUrl]);
console.log('\n=== Test Complete ===');
} catch (error) {
console.error('Relay test failed:', error.message);
console.error('Stack:', error.stack);
}
}
// Run the test
testRelay().then(() => {
console.log('Test finished');
process.exit(0);
}).catch((error) => {
console.error('Test failed:', error);
process.exit(1);
});

View File

@@ -1,93 +0,0 @@
=== NIP-42 Authentication Test Started ===
2025-09-13 08:48:02 - Starting NIP-42 authentication tests
[INFO] === Starting NIP-42 Authentication Tests ===
[INFO] Checking dependencies...
[SUCCESS] Dependencies check complete
[INFO] Test 1: Checking NIP-42 support in relay info
[SUCCESS] NIP-42 is advertised in supported NIPs
2025-09-13 08:48:02 - Supported NIPs: 1,9,11,13,15,20,40,42
[INFO] Test 2: Testing AUTH challenge generation
[INFO] Found admin private key, configuring NIP-42 authentication...
[WARNING] Failed to create configuration event - proceeding with manual test
[INFO] Test 3: Testing complete NIP-42 authentication flow
[INFO] Generated test keypair: test_pubkey
[INFO] Attempting to publish event without authentication...
[INFO] Publishing test event to relay...
2025-09-13 08:48:03 - Event publish result: connecting to ws://localhost:8888... ok.
{"kind":1,"id":"c42a8cbdd1cc6ea3e7fd060919c57386aef0c35da272ba2fa34b45f80934cfca","pubkey":"d0111448b3bd0da6aa699b92163f684291bb43bc213aa54a2ee726c2acde76e8","created_at":1757767683,"tags":[],"content":"NIP-42 test event - should require auth","sig":"d2a2c7efc00e06d8d8582fa05b2ec8cb96979525770dff9ef36a91df6d53807c86115581de2d6058d7d64eebe3b7d7404cc03dbb2ad1e91d140283703c2dec53"}
publishing to ws://localhost:8888... success.
[SUCCESS] Relay requested authentication as expected
[INFO] Test 4: Testing WebSocket AUTH message handling
[INFO] Testing WebSocket connection and AUTH message...
[INFO] Sending test message via WebSocket...
2025-09-13 08:48:03 - WebSocket response:
[INFO] No AUTH challenge in WebSocket response
[INFO] Test 5: Testing NIP-42 configuration options
[INFO] Retrieving current relay configuration...
[SUCCESS] Retrieved configuration events from relay
[SUCCESS] Found NIP-42 configuration:
2025-09-13 08:48:04 - nip42_auth_required_events=false
2025-09-13 08:48:04 - nip42_auth_required_subscriptions=false
2025-09-13 08:48:04 - nip42_auth_required_kinds=4,14
2025-09-13 08:48:04 - nip42_challenge_expiration=600
[INFO] Test 6: Testing NIP-42 performance and stability
[INFO] Testing multiple authentication attempts...
2025-09-13 08:48:05 - Attempt 1: .271641300s - connecting to ws://localhost:8888... ok.
{"kind":1,"id":"916049dbd6835443e8fd553bd12a37ef03060a01fedb099b414ea2cc18b597eb","pubkey":"b383f405d81860ec9b0eebf88612093ab18dc6abd322639b19ac79969599c8c4","created_at":1757767685,"tags":[],"content":"Performance test event 1","sig":"b04e0b38bbb49e0aa3c8a69530071bb08d917c4ba12eae38045a487c43e83f6dc1389ac4640453b0492d9c991df37f71e25ef501fd48c4c11c878e6cb3fa7a84"}
publishing to ws://localhost:8888... success.
2025-09-13 08:48:05 - Attempt 2: .259343520s - connecting to ws://localhost:8888... ok.
{"kind":1,"id":"e4495a56ec6f1ba2759eabbf0128aec615c53acf3e4720be7726dcd7163da703","pubkey":"b383f405d81860ec9b0eebf88612093ab18dc6abd322639b19ac79969599c8c4","created_at":1757767685,"tags":[],"content":"Performance test event 2","sig":"d1efe3f576eeded4e292ec22f2fea12296fa17ed2f87a8cd2dde0444b594ef55f7d74b680aeca11295a16397df5ccc53a938533947aece27efb965e6c643b62c"}
publishing to ws://localhost:8888... success.
2025-09-13 08:48:06 - Attempt 3: .221167032s - connecting to ws://localhost:8888... ok.
{"kind":1,"id":"55035b4c95a2c93a169236c7f5f5bd627838ec13522c88cf82d8b55516560cd9","pubkey":"b383f405d81860ec9b0eebf88612093ab18dc6abd322639b19ac79969599c8c4","created_at":1757767686,"tags":[],"content":"Performance test event 3","sig":"4bd581580a5a2416e6a9af44c055333635832dbf21793517f16100f1366c73437659545a8a712dcc4623a801b9deccd372b36b658309e7102a4300c3f481facb"}
publishing to ws://localhost:8888... success.
2025-09-13 08:48:06 - Attempt 4: .260219496s - connecting to ws://localhost:8888... ok.
{"kind":1,"id":"58dee587a1a0f085ff44441b3074f5ff42715088ee24e694107100df3c63ff2b","pubkey":"b383f405d81860ec9b0eebf88612093ab18dc6abd322639b19ac79969599c8c4","created_at":1757767686,"tags":[],"content":"Performance test event 4","sig":"b6174b0c56138466d3bb228ef2ced1d917f7253b76c624235fa3b661c9fa109c78ae557c4ddaf0e6232aa597608916f0dfba1c192f8b90ffb819c36ac1e4e516"}
publishing to ws://localhost:8888... success.
2025-09-13 08:48:07 - Attempt 5: .260125188s - connecting to ws://localhost:8888... ok.
{"kind":1,"id":"b8069c80f98fff3780eaeb605baf1a5818c9ab05185c1776a28469d2b0b32c6a","pubkey":"b383f405d81860ec9b0eebf88612093ab18dc6abd322639b19ac79969599c8c4","created_at":1757767687,"tags":[],"content":"Performance test event 5","sig":"5130d3a0c778728747b12aae77f2516db5b055d8ec43f413a4b117fcadb6025a49b6f602307bbe758bd97557e326e8735631fd03dc45c9296509e94aa305adf2"}
publishing to ws://localhost:8888... success.
[SUCCESS] Performance test completed: 5/5 successful responses
[INFO] Test 7: Testing kind-specific NIP-42 authentication requirements
[INFO] Generated test keypair for kind-specific tests: test_pubkey
[INFO] Testing kind 1 event (regular note) - should work without authentication...
2025-09-13 08:48:08 - Kind 1 event result: connecting to ws://localhost:8888... ok.
{"kind":1,"id":"f2ac02a5290db3797c0b7b38435920d5db593d333e582454d8ed32da4c141b74","pubkey":"da031504ff61656d1829f723c52f526d7591400fb9e2aecb7b4ef5aeeea66fc7","created_at":1757767688,"tags":[],"content":"Regular note - should not require auth","sig":"8e4272d9cb258fc4b140eb8e8c2e802c3e8b62e34c17c9e545d83c68dfb86ffd2cdd4a8153660b663a46906459aa67719257ac263f21d1f8a6185806e055dcfd"}
publishing to ws://localhost:8888... success.
[SUCCESS] Kind 1 event accepted without authentication (correct behavior)
[INFO] Testing kind 4 event (direct message) - should require authentication...
2025-09-13 08:48:18 - Kind 4 event result: connecting to ws://localhost:8888... ok.
{"kind":4,"id":"935af23e2bf7efd324d86a0c82631e5ebe492edf21920ed0f548faa73a18ac1d","pubkey":"da031504ff61656d1829f723c52f526d7591400fb9e2aecb7b4ef5aeeea66fc7","created_at":1757767688,"tags":[["p,test_pubkey"]],"content":"This is a direct message - should require auth","sig":"b2b86ee394b41505ddbd787c22f4223665770d84a21dd03e74bf4e8fa879ff82dd6b1f7d6921d93f8d89787102c3dc3012e6270d66ca5b5d4b87f1a545481e76"}
publishing to ws://localhost:8888...
[SUCCESS] Kind 4 event requested authentication (correct behavior for DMs)
[INFO] Testing kind 14 event (chat message) - should require authentication...
2025-09-13 08:48:28 - Kind 14 event result: connecting to ws://localhost:8888... ok.
{"kind":14,"id":"aeb1ac58dd465c90ce5a70c7b16e3cc32fae86c221bb2e86ca29934333604669","pubkey":"da031504ff61656d1829f723c52f526d7591400fb9e2aecb7b4ef5aeeea66fc7","created_at":1757767698,"tags":[["p,test_pubkey"]],"content":"Chat message - should require auth","sig":"24e23737e6684e4ef01c08d72304e6f235ce75875b94b37460065f9ead986438435585818ba104e7f78f14345406b5d03605c925042e9c06fed8c99369cd8694"}
publishing to ws://localhost:8888...
[SUCCESS] Kind 14 event requested authentication (correct behavior for DMs)
[INFO] Testing other event kinds - should work without authentication...
2025-09-13 08:48:29 - Kind 0 event result: connecting to ws://localhost:8888... ok.
{"kind":0,"id":"3b2cc834dd874ebbe07c2da9e41c07b3f0c61a57b4d6b7299c2243dbad29f2ca","pubkey":"da031504ff61656d1829f723c52f526d7591400fb9e2aecb7b4ef5aeeea66fc7","created_at":1757767709,"tags":[],"content":"Test event kind 0 - should not require auth","sig":"4f2016fde84d72cf5a5aa4c0ec5de677ef06c7971ca2dd756b02a94c47604fae1c67254703a2df3d17b13fee2d9c45661b76086f29ac93820a4c062fc52dea74"}
publishing to ws://localhost:8888... success.
[SUCCESS] Kind 0 event accepted without authentication (correct)
2025-09-13 08:48:29 - Kind 3 event result: connecting to ws://localhost:8888... ok.
{"kind":3,"id":"6e1ea0b1cbf342feea030fa39226c316e730c5d333fa8333495748afd386ec80","pubkey":"da031504ff61656d1829f723c52f526d7591400fb9e2aecb7b4ef5aeeea66fc7","created_at":1757767709,"tags":[],"content":"Test event kind 3 - should not require auth","sig":"e5f66c5f022497f8888f003a8bfbb5e807a2520d314c80889548efa267f9d6de28d5ee7b0588cc8660f2963ab44e530c8a74d71a227148e5a6843fcef4de2197"}
publishing to ws://localhost:8888... success.
[SUCCESS] Kind 3 event accepted without authentication (correct)
2025-09-13 08:48:30 - Kind 7 event result: connecting to ws://localhost:8888... ok.
{"kind":7,"id":"a64466b9899cad257313e2dced357fd3f87f40bd7e13e29372689aae7c718919","pubkey":"da031504ff61656d1829f723c52f526d7591400fb9e2aecb7b4ef5aeeea66fc7","created_at":1757767710,"tags":[],"content":"Test event kind 7 - should not require auth","sig":"78d18bcb0c2b11b4e2b74bcdfb140564b4563945e983014a279977356e50b57f3c5a262fa55de26dbd4c8d8b9f5beafbe21af869be64079f54a712284f03d9ac"}
publishing to ws://localhost:8888... success.
[SUCCESS] Kind 7 event accepted without authentication (correct)
[INFO] Kind-specific authentication test completed
[INFO] === NIP-42 Test Results Summary ===
[SUCCESS] Dependencies: PASS
[SUCCESS] NIP-42 Support: PASS
[SUCCESS] Auth Challenge: PASS
[SUCCESS] Auth Flow: PASS
[SUCCESS] WebSocket AUTH: PASS
[SUCCESS] Configuration: PASS
[SUCCESS] Performance: PASS
[SUCCESS] Kind-Specific Auth: PASS
[SUCCESS] All NIP-42 tests completed successfully!
[SUCCESS] NIP-42 authentication implementation is working correctly
[INFO] === NIP-42 Authentication Tests Complete ===

967
tests/white_black_list_test.sh Executable file
View File

@@ -0,0 +1,967 @@
#!/bin/bash
# =======================================================================
# C-Relay Whitelist/Blacklist Authentication Rules Test Script
# =======================================================================
#
# This test validates the whitelist and blacklist functionality of the
# C-Relay server through the WebSocket admin API.
#
# Test Credentials (Test Mode):
# - Admin Private Key: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# - Admin Public Key: 6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3
# - Relay Public Key: 4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa
#
# =======================================================================
set -e # Exit on any error
# =======================================================================
# CONFIGURATION
# =======================================================================
# Test mode credentials (from current relay startup)
ADMIN_PRIVKEY="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
ADMIN_PUBKEY="6a04ab98d9e4774ad806e302dddeb63bea16b5cb5f223ee77478e861bb583eb3"
RELAY_PUBKEY="4f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa"
# Server configuration
RELAY_HOST="127.0.0.1"
RELAY_PORT="8888"
RELAY_URL="ws://${RELAY_HOST}:${RELAY_PORT}"
# Test configuration
TIMEOUT=5
TEMP_DIR="/tmp/c_relay_test_$$"
# WebSocket connection state
WS_PID=""
WS_INPUT_FIFO=""
WS_OUTPUT_FIFO=""
WS_CONNECTED=0
WS_RESPONSE_LOG=""
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[0;33m'
BLUE='\033[0;34m'
BOLD='\033[1m'
RESET='\033[0m'
# Test tracking
TESTS_RUN=0
TESTS_PASSED=0
TESTS_FAILED=0
# =======================================================================
# UTILITY FUNCTIONS
# =======================================================================
log() {
echo -e "${BLUE}[$(date '+%H:%M:%S')]${RESET} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${RESET} $1"
}
log_error() {
echo -e "${RED}[ERROR]${RESET} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${RESET} $1"
}
log_info() {
echo -e "${BLUE}[INFO]${RESET} $1"
}
increment_test() {
TESTS_RUN=$((TESTS_RUN + 1))
}
pass_test() {
TESTS_PASSED=$((TESTS_PASSED + 1))
log_success "Test $TESTS_RUN: PASSED - $1"
echo ""
echo ""
}
fail_test() {
TESTS_FAILED=$((TESTS_FAILED + 1))
log_error "Test $TESTS_RUN: FAILED - $1"
echo ""
echo ""
}
# Generate test keypairs
generate_test_keypair() {
local name=$1
local privkey_file="${TEMP_DIR}/${name}_privkey"
local pubkey_file="${TEMP_DIR}/${name}_pubkey"
# Generate private key using nak key --gen (following pattern from other tests)
local privkey=$(nak key generate 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$privkey" ]; then
log_error "Failed to generate private key for $name"
return 1
fi
echo "$privkey" > "$privkey_file"
# Derive public key using nak
local pubkey=$(nak key public "$privkey" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$pubkey" ]; then
log_error "Failed to generate public key for $name"
return 1
fi
echo "$pubkey" > "$pubkey_file"
log_info "Generated keypair for $name: pubkey=${pubkey:0:16}..."
# Export for use in calling functions
eval "${name}_PRIVKEY=\"$privkey\""
eval "${name}_PUBKEY=\"$pubkey\""
}
# Send WebSocket message and capture response
send_websocket_message() {
local message="$1"
local expected_response="$2"
local timeout="${3:-$TIMEOUT}"
# Use websocat to send message and capture response (following pattern from tests/1_nip_test.sh)
local response=""
if command -v websocat &> /dev/null; then
# Capture output from websocat (following working pattern from 1_nip_test.sh)
response=$(echo "$message" | timeout "$timeout" websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
# Check if connection failed
if [[ "$response" == *"Connection failed"* ]]; then
log_error "Failed to connect to relay"
return 1
fi
else
log_error "websocat not found - required for WebSocket testing"
log_error "Please install websocat for WebSocket communication"
return 1
fi
echo "$response"
}
# =======================================================================
# PERSISTENT WEBSOCKET CONNECTION MANAGEMENT
# =======================================================================
# Open persistent WebSocket connection
open_websocket_connection() {
log_info "Opening persistent WebSocket connection to $RELAY_URL..."
# Create unique named pipes for this test session
WS_INPUT_FIFO="${TEMP_DIR}/ws_input_$$"
WS_OUTPUT_FIFO="${TEMP_DIR}/ws_output_$$"
WS_RESPONSE_LOG="${TEMP_DIR}/ws_responses_$$"
# Create named pipes
mkfifo "$WS_INPUT_FIFO" "$WS_OUTPUT_FIFO"
# Start websocat in background with bidirectional pipes
# Input: we write to WS_INPUT_FIFO, websocat reads and sends to relay
# Output: websocat receives from relay and writes to WS_OUTPUT_FIFO
websocat "$RELAY_URL" < "$WS_INPUT_FIFO" > "$WS_OUTPUT_FIFO" &
WS_PID=$!
# Start background response logger
tail -f "$WS_OUTPUT_FIFO" >> "$WS_RESPONSE_LOG" &
local logger_pid=$!
# Keep input pipe open by redirecting from /dev/null in background
exec {ws_fd}> "$WS_INPUT_FIFO"
# Test connection with a simple REQ message
sleep 1
echo '["REQ","test_conn",{}]' >&${ws_fd}
# Wait for response to confirm connection
local connection_timeout=5
local start_time=$(date +%s)
while [ $(($(date +%s) - start_time)) -lt $connection_timeout ]; do
if [ -s "$WS_RESPONSE_LOG" ]; then
WS_CONNECTED=1
log_success "Persistent WebSocket connection established"
log_info "WebSocket PID: $WS_PID"
return 0
fi
sleep 0.1
done
# Connection failed
log_error "Failed to establish persistent WebSocket connection"
close_websocket_connection
return 1
}
# Close persistent WebSocket connection
close_websocket_connection() {
log_info "Closing persistent WebSocket connection..."
if [ -n "$WS_PID" ] && kill -0 "$WS_PID" 2>/dev/null; then
# Close input pipe first
if [ -n "${ws_fd}" ]; then
exec {ws_fd}>&-
fi
# Send close frame and terminate websocat
kill "$WS_PID" 2>/dev/null
wait "$WS_PID" 2>/dev/null
fi
# Kill any remaining background processes
pkill -f "tail -f.*$WS_OUTPUT_FIFO" 2>/dev/null || true
# Clean up pipes
[ -p "$WS_INPUT_FIFO" ] && rm -f "$WS_INPUT_FIFO"
[ -p "$WS_OUTPUT_FIFO" ] && rm -f "$WS_OUTPUT_FIFO"
WS_PID=""
WS_CONNECTED=0
log_info "WebSocket connection closed"
}
# Send event through persistent WebSocket connection
send_websocket_event() {
local event_json="$1"
local timeout_seconds="${2:-10}"
if [ "$WS_CONNECTED" != "1" ]; then
log_error "WebSocket connection not established"
return 1
fi
# Clear previous responses
> "$WS_RESPONSE_LOG"
# Create EVENT message
local event_message="[\"EVENT\",$event_json]"
# Send through persistent connection
echo "$event_message" >&${ws_fd}
# Wait for OK response
local start_time=$(date +%s)
while [ $(($(date +%s) - start_time)) -lt $timeout_seconds ]; do
if grep -q '"OK"' "$WS_RESPONSE_LOG" 2>/dev/null; then
local response=$(tail -1 "$WS_RESPONSE_LOG")
echo "$response"
return 0
fi
sleep 0.1
done
log_error "Timeout waiting for WebSocket response"
return 1
}
# Wait for query response data from relay
wait_for_query_response() {
local timeout_seconds="${1:-10}"
local start_time=$(date +%s)
log_info "Waiting for query response data..."
# Clear any OK responses and wait for JSON data
sleep 0.5 # Brief delay to ensure OK response is processed first
while [ $(($(date +%s) - start_time)) -lt $timeout_seconds ]; do
# Look for JSON response with query data (not just OK responses)
if grep -q '"query_type"' "$WS_RESPONSE_LOG" 2>/dev/null; then
local response=$(grep '"query_type"' "$WS_RESPONSE_LOG" | tail -1)
echo "$response"
return 0
fi
sleep 0.1
done
log_error "Timeout waiting for query response data"
return 1
}
# Create and send auth rule event
send_auth_rule_event() {
local action="$1" # "add" or "remove"
local rule_type="$2" # "whitelist" or "blacklist"
local pattern_type="$3" # "pubkey" or "hash"
local pattern_value="$4" # actual pubkey or hash value
local description="$5" # optional description
log_info "Creating auth rule event: $action $rule_type $pattern_type ${pattern_value:0:16}..."
# Create the auth rule event using nak with correct tag format for the actual implementation
# Server expects tags like ["whitelist", "pubkey", "abc123..."] or ["blacklist", "pubkey", "def456..."]
# Using Kind 23456 (ephemeral auth rules management) with proper relay targeting
local event_json
event_json=$(nak event -k 23456 --content "" \
-t "p=$RELAY_PUBKEY" \
-t "$rule_type=$pattern_type=$pattern_value" \
--sec "$ADMIN_PRIVKEY" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$event_json" ]; then
log_error "Failed to create auth rule event with nak"
return 1
fi
# Send the event through persistent WebSocket connection
log_info "Publishing auth rule event to relay..."
local result
if [ "$WS_CONNECTED" = "1" ]; then
result=$(send_websocket_event "$event_json")
local exit_code=$?
log_info "Auth rule event result: $result"
# Check if response indicates success
if [ $exit_code -eq 0 ] && echo "$result" | grep -q -i '"OK".*true'; then
log_success "Auth rule $action successful"
return 0
else
log_error "Auth rule $action failed: $result (exit code: $exit_code)"
return 1
fi
else
# Fallback to one-shot connection if persistent connection not available
result=$(echo "$event_json" | timeout 10s nak event "$RELAY_URL" 2>&1)
local exit_code=$?
log_info "Auth rule event result: $result"
# Check if response indicates success
if [ $exit_code -eq 0 ] && echo "$result" | grep -q -i "success\|OK.*true\|published"; then
log_success "Auth rule $action successful"
return 0
else
log_error "Auth rule $action failed: $result (exit code: $exit_code)"
return 1
fi
fi
}
# Clear all auth rules using the new system command functionality
clear_all_auth_rules() {
log_info "Clearing all existing auth rules..."
# Create system command event to clear all auth rules
# Using Kind 23456 (ephemeral auth rules management) with proper relay targeting
local event_json
event_json=$(nak event -k 23456 --content "" \
-t "p=$RELAY_PUBKEY" \
-t "system_command=clear_all_auth_rules" \
--sec "$ADMIN_PRIVKEY" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$event_json" ]; then
log_error "Failed to create clear auth rules event with nak"
return 1
fi
# Send the event through persistent WebSocket connection
log_info "Sending clear all auth rules command..."
local result
if [ "$WS_CONNECTED" = "1" ]; then
result=$(send_websocket_event "$event_json")
local exit_code=$?
log_info "Clear auth rules result: $result"
# Check if response indicates success
if [ $exit_code -eq 0 ] && echo "$result" | grep -q -i '"OK".*true'; then
log_success "All auth rules cleared successfully"
return 0
else
log_error "Failed to clear auth rules: $result (exit code: $exit_code)"
return 1
fi
else
# Fallback to one-shot connection if persistent connection not available
result=$(echo "$event_json" | timeout 10s nak event "$RELAY_URL" 2>&1)
local exit_code=$?
log_info "Clear auth rules result: $result"
# Check if response indicates success
if [ $exit_code -eq 0 ] && echo "$result" | grep -q -i "success\|OK.*true\|published"; then
log_success "All auth rules cleared successfully"
return 0
else
log_error "Failed to clear auth rules: $result (exit code: $exit_code)"
return 1
fi
fi
}
# Test event publishing with a specific key
test_event_publishing() {
local test_privkey="$1"
local test_pubkey="$2"
local expected_result="$3" # "success" or "blocked"
local description="$4"
log_info "Testing event publishing: $description"
# Create a simple test event (kind 1 - text note) using nak like NIP-42 test
local test_content="Test message from ${test_pubkey:0:16}... at $(date)"
local test_event
test_event=$(nak event -k 1 --content "$test_content" --sec "$test_privkey" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$test_event" ]; then
log_error "Failed to create test event"
return 1
fi
# Send the event using nak directly (more reliable than websocat)
log_info "Publishing test event to relay..."
local result
result=$(echo "$test_event" | timeout 10s nak event "$RELAY_URL" 2>&1)
local exit_code=$?
log_info "Event publishing result: $result"
# Check result against expectation
if [ "$expected_result" = "success" ]; then
if [ $exit_code -eq 0 ] && echo "$result" | grep -q -i "success\|OK.*true\|published"; then
log_success "Event publishing allowed as expected"
return 0
else
log_error "Event publishing was blocked but should have been allowed: $result"
return 1
fi
else # expected_result = "blocked"
if [ $exit_code -ne 0 ] || echo "$result" | grep -q -i "blocked\|denied\|rejected\|auth.*required\|OK.*false"; then
log_success "Event publishing blocked as expected"
return 0
else
log_error "Event publishing was allowed but should have been blocked: $result"
return 1
fi
fi
}
# =======================================================================
# SETUP AND INITIALIZATION
# =======================================================================
setup_test_environment() {
log "Setting up test environment..."
# Create temporary directory
mkdir -p "$TEMP_DIR"
# Check if required tools are available - like NIP-42 test
log_info "Checking dependencies..."
if ! command -v nak &> /dev/null; then
log_error "nak client not found. Please install: go install github.com/fiatjaf/nak@latest"
exit 1
fi
if ! command -v jq &> /dev/null; then
log_error "jq not found. Please install jq for JSON processing"
exit 1
fi
if ! command -v timeout &> /dev/null; then
log_error "timeout not found. Please install coreutils"
exit 1
fi
if ! command -v websocat &> /dev/null; then
log_error "websocat not found - required for WebSocket testing"
log_error "Please install websocat for WebSocket communication"
exit 1
fi
log_success "Dependencies check complete"
# Generate test keypairs
generate_test_keypair "TEST1"
generate_test_keypair "TEST2"
generate_test_keypair "TEST3"
log_success "Test environment setup complete"
}
# =======================================================================
# TEST FUNCTIONS
# =======================================================================
# Test 1: Admin Authentication
test_admin_authentication() {
increment_test
log "Test $TESTS_RUN: Admin Authentication"
# Create a simple configuration event to test admin authentication
# Using Kind 23456 (admin commands) with proper relay targeting
local config_event
config_event=$(nak event -k 23456 --content "" \
-t "p=$RELAY_PUBKEY" \
-t "system_command=system_status" \
--sec "$ADMIN_PRIVKEY" 2>/dev/null)
if [ $? -ne 0 ]; then
fail_test "Failed to create admin test event"
return
fi
# Send admin event
local message="[\"EVENT\",$config_event]"
local response
response=$(send_websocket_message "$message" "OK" 10)
if echo "$response" | grep -q '"OK".*true'; then
pass_test "Admin authentication successful"
else
fail_test "Admin authentication failed: $response"
fi
}
# Test 2: Auth Rules Storage and Query Test
test_auth_rules_storage_query() {
increment_test
log "Test $TESTS_RUN: Auth Rules Storage and Query Test"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Add a simple blacklist rule
log_info "Adding test blacklist rule..."
if send_auth_rule_event "add" "blacklist" "pubkey" "$TEST1_PUBKEY" "Test storage blacklist entry"; then
log_success "Auth rule added successfully"
# Wait a moment for rule to be processed
sleep 1
# Query all auth rules using admin query
log_info "Querying all auth rules..."
local query_event
query_event=$(nak event -k 23456 --content "" \
-t "p=$RELAY_PUBKEY" \
-t "auth_query=all" \
--sec "$ADMIN_PRIVKEY" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$query_event" ]; then
fail_test "Failed to create auth query event"
return
fi
# Send the query event
log_info "Sending auth query to relay..."
local query_result
query_result=$(echo "$query_event" | timeout 10s nak event "$RELAY_URL" 2>&1)
local exit_code=$?
log_info "Auth query result: $query_result"
# Check if we got a response and if it contains our test rule
if [ $exit_code -eq 0 ]; then
if echo "$query_result" | grep -q "$TEST1_PUBKEY"; then
pass_test "Auth rule storage and query working - found test rule in query results"
else
fail_test "Auth rule not found in query results - rule may not have been stored"
fi
else
fail_test "Auth query failed: $query_result"
fi
else
fail_test "Failed to add auth rule for storage test"
fi
}
# Test 3: Basic Whitelist Functionality
test_basic_whitelist() {
increment_test
log "Test $TESTS_RUN: Basic Whitelist Functionality"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Add TEST1 pubkey to whitelist
if send_auth_rule_event "add" "whitelist" "pubkey" "$TEST1_PUBKEY" "Test whitelist entry"; then
# Test that whitelisted pubkey can publish
if test_event_publishing "$TEST1_PRIVKEY" "$TEST1_PUBKEY" "success" "whitelisted pubkey"; then
pass_test "Basic whitelist functionality working"
else
fail_test "Whitelisted pubkey could not publish events"
fi
else
fail_test "Failed to add pubkey to whitelist"
fi
}
# Test 4: Basic Blacklist Functionality
test_basic_blacklist() {
increment_test
log "Test $TESTS_RUN: Basic Blacklist Functionality"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Add TEST2 pubkey to blacklist
if send_auth_rule_event "add" "blacklist" "pubkey" "$TEST2_PUBKEY" "Test blacklist entry"; then
# Test that blacklisted pubkey cannot publish
if test_event_publishing "$TEST2_PRIVKEY" "$TEST2_PUBKEY" "blocked" "blacklisted pubkey"; then
pass_test "Basic blacklist functionality working"
else
fail_test "Blacklisted pubkey was able to publish events"
fi
else
fail_test "Failed to add pubkey to blacklist"
fi
}
# Test 5: Rule Removal
test_rule_removal() {
increment_test
log "Test $TESTS_RUN: Rule Removal"
# Clear all existing rules to start fresh
clear_all_auth_rules
# First add TEST2 to blacklist to test removal
if ! send_auth_rule_event "add" "blacklist" "pubkey" "$TEST2_PUBKEY" "Test blacklist for removal"; then
fail_test "Failed to add pubkey to blacklist for removal test"
return
fi
# Remove TEST2 from blacklist
if send_auth_rule_event "remove" "blacklist" "pubkey" "$TEST2_PUBKEY" "Remove test blacklist entry"; then
# Test that previously blacklisted pubkey can now publish
if test_event_publishing "$TEST2_PRIVKEY" "$TEST2_PUBKEY" "success" "previously blacklisted pubkey after removal"; then
pass_test "Rule removal working correctly"
else
fail_test "Previously blacklisted pubkey still cannot publish after removal"
fi
else
fail_test "Failed to remove pubkey from blacklist"
fi
}
# Test 6: Multiple Users Scenario
test_multiple_users() {
increment_test
log "Test $TESTS_RUN: Multiple Users Scenario"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Add TEST1 to whitelist and TEST3 to blacklist
local success_count=0
if send_auth_rule_event "add" "whitelist" "pubkey" "$TEST1_PUBKEY" "Multi-user test whitelist"; then
success_count=$((success_count + 1))
fi
if send_auth_rule_event "add" "blacklist" "pubkey" "$TEST3_PUBKEY" "Multi-user test blacklist"; then
success_count=$((success_count + 1))
fi
if [ $success_count -eq 2 ]; then
# Test whitelisted user can publish
if test_event_publishing "$TEST1_PRIVKEY" "$TEST1_PUBKEY" "success" "whitelisted in multi-user test"; then
# Test blacklisted user cannot publish
if test_event_publishing "$TEST3_PRIVKEY" "$TEST3_PUBKEY" "blocked" "blacklisted in multi-user test"; then
pass_test "Multiple users scenario working correctly"
else
fail_test "Blacklisted user in multi-user scenario was not blocked"
fi
else
fail_test "Whitelisted user in multi-user scenario was blocked"
fi
else
fail_test "Failed to set up multiple users scenario"
fi
}
# Test 7: Priority Testing (Blacklist vs Whitelist)
test_priority_rules() {
increment_test
log "Test $TESTS_RUN: Priority Rules Testing"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Add same pubkey to both whitelist and blacklist
local setup_success=0
if send_auth_rule_event "add" "whitelist" "pubkey" "$TEST2_PUBKEY" "Priority test whitelist"; then
setup_success=$((setup_success + 1))
fi
if send_auth_rule_event "add" "blacklist" "pubkey" "$TEST2_PUBKEY" "Priority test blacklist"; then
setup_success=$((setup_success + 1))
fi
if [ $setup_success -eq 2 ]; then
# Test which rule takes priority (typically blacklist should win)
if test_event_publishing "$TEST2_PRIVKEY" "$TEST2_PUBKEY" "blocked" "pubkey in both whitelist and blacklist"; then
pass_test "Priority rules working correctly (blacklist takes precedence)"
else
# If whitelist wins, that's also valid depending on implementation
log_warning "Whitelist took precedence over blacklist - this may be implementation-specific"
pass_test "Priority rules working (whitelist precedence)"
fi
else
fail_test "Failed to set up priority rules test"
fi
}
# Test 8: Hash-based Blacklist
test_hash_blacklist() {
increment_test
log "Test $TESTS_RUN: Hash-based Blacklist"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Create a test event to get its hash
local test_content="Content to be blacklisted by hash"
local test_event
test_event=$(nak event -k 1 --content "$test_content" --sec "$TEST1_PRIVKEY" 2>/dev/null)
if [ $? -ne 0 ] || [ -z "$test_event" ]; then
fail_test "Failed to create test event for hash blacklist"
return
fi
# Extract event ID (hash) from the event using jq
local event_id
event_id=$(echo "$test_event" | jq -r '.id' 2>/dev/null)
if [ -z "$event_id" ] || [ "$event_id" = "null" ]; then
fail_test "Failed to extract event ID for hash blacklist test"
return
fi
log_info "Testing hash blacklist with event ID: ${event_id:0:16}..."
# Add the event ID to hash blacklist
if send_auth_rule_event "add" "blacklist" "hash" "$event_id" "Test hash blacklist"; then
# Try to publish the same event using nak - should be blocked
log_info "Attempting to publish blacklisted event..."
local result
result=$(echo "$test_event" | timeout 10s nak event "$RELAY_URL" 2>&1)
local exit_code=$?
if [ $exit_code -ne 0 ] || echo "$result" | grep -q -i "blocked\|denied\|rejected\|blacklist"; then
pass_test "Hash-based blacklist working correctly"
else
fail_test "Hash-based blacklist did not block the event: $result"
fi
else
fail_test "Failed to add event hash to blacklist"
fi
}
# Test 9: WebSocket Connection Behavior
test_websocket_behavior() {
increment_test
log "Test $TESTS_RUN: WebSocket Connection Behavior"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Test that the WebSocket connection handles multiple rapid requests
local rapid_success_count=0
for i in {1..3}; do
local test_content="Rapid test message $i"
local test_event
test_event=$(nak event -k 1 --content "$test_content" --sec "$TEST1_PRIVKEY" 2>/dev/null)
if [ $? -eq 0 ]; then
local message="[\"EVENT\",$test_event]"
local response
response=$(send_websocket_message "$message" "OK" 5)
if echo "$response" | grep -q '"OK"'; then
rapid_success_count=$((rapid_success_count + 1))
fi
fi
# Small delay between requests
sleep 0.1
done
if [ $rapid_success_count -ge 2 ]; then
pass_test "WebSocket connection handles multiple requests correctly"
else
fail_test "WebSocket connection failed to handle multiple rapid requests ($rapid_success_count/3 succeeded)"
fi
}
# Test 10: Rule Persistence Verification
test_rule_persistence() {
increment_test
log "Test $TESTS_RUN: Rule Persistence Verification"
# Clear all existing rules to start fresh
clear_all_auth_rules
# Add a rule, then verify it persists by testing enforcement
if send_auth_rule_event "add" "blacklist" "pubkey" "$TEST3_PUBKEY" "Persistence test blacklist"; then
# Wait a moment for rule to be processed
sleep 1
# Test enforcement multiple times to verify persistence
local enforcement_count=0
for i in {1..2}; do
if test_event_publishing "$TEST3_PRIVKEY" "$TEST3_PUBKEY" "blocked" "persistence test attempt $i"; then
enforcement_count=$((enforcement_count + 1))
fi
sleep 0.5
done
if [ $enforcement_count -eq 2 ]; then
pass_test "Rule persistence working correctly"
else
fail_test "Rule persistence failed ($enforcement_count/2 enforcements succeeded)"
fi
else
fail_test "Failed to add rule for persistence test"
fi
}
# Test 11: Cleanup and Final Verification
test_cleanup_verification() {
increment_test
log "Test $TESTS_RUN: Cleanup and Final Verification"
# Remove all test rules
local cleanup_success=0
# Remove whitelist entries
if send_auth_rule_event "remove" "whitelist" "pubkey" "$TEST1_PUBKEY" "Cleanup whitelist"; then
cleanup_success=$((cleanup_success + 1))
fi
# Remove blacklist entries
for pubkey in "$TEST2_PUBKEY" "$TEST3_PUBKEY"; do
if send_auth_rule_event "remove" "blacklist" "pubkey" "$pubkey" "Cleanup blacklist"; then
cleanup_success=$((cleanup_success + 1))
fi
done
if [ $cleanup_success -ge 2 ]; then
# Verify that previously restricted pubkeys can now publish
if test_event_publishing "$TEST3_PRIVKEY" "$TEST3_PUBKEY" "success" "after cleanup verification"; then
pass_test "Cleanup and verification successful"
else
log_warning "Cleanup completed but restrictions may still be active"
pass_test "Cleanup completed (partial verification)"
fi
else
fail_test "Cleanup failed ($cleanup_success rules removed)"
fi
}
# =======================================================================
# MAIN TEST EXECUTION
# =======================================================================
run_all_tests() {
log "Starting comprehensive whitelist/blacklist functionality tests..."
# Setup
setup_test_environment
clear_all_auth_rules
test_admin_authentication
test_auth_rules_storage_query
# test_basic_whitelist
# test_basic_blacklist
# test_rule_removal
# test_multiple_users
# test_priority_rules
# test_hash_blacklist
# test_websocket_behavior
# test_rule_persistence
# test_cleanup_verification
# Test summary
echo ""
echo -e "${BOLD}=== TEST SUMMARY ===${RESET}"
echo -e "Tests run: ${BLUE}$TESTS_RUN${RESET}"
echo -e "Tests passed: ${GREEN}$TESTS_PASSED${RESET}"
echo -e "Tests failed: ${RED}$TESTS_FAILED${RESET}"
echo ""
if [ $TESTS_FAILED -eq 0 ]; then
log_success "All tests passed! Whitelist/blacklist functionality is working correctly."
return 0
else
log_error "$TESTS_FAILED out of $TESTS_RUN tests failed."
return 1
fi
}
# =======================================================================
# CLEANUP FUNCTIONS
# =======================================================================
cleanup() {
log "Cleaning up test environment..."
# Remove temporary directory
if [ -n "$TEMP_DIR" ] && [ -d "$TEMP_DIR" ]; then
rm -rf "$TEMP_DIR"
log_info "Temporary directory removed: $TEMP_DIR"
fi
log "Test cleanup completed."
}
# Set up cleanup trap
trap cleanup EXIT
# =======================================================================
# SCRIPT ENTRY POINT
# =======================================================================
main() {
echo -e "${BOLD}${BLUE}C-Relay Whitelist/Blacklist Authentication Test${RESET}"
echo -e "${BLUE}===============================================${RESET}"
echo ""
# Check if relay is running - using websocat like the working tests
if ! echo '["REQ","connection_test",{}]' | timeout 5 websocat "$RELAY_URL" >/dev/null 2>&1; then
log_error "Cannot connect to relay at $RELAY_URL"
log_error "Please ensure the C-Relay server is running in test mode"
exit 1
fi
log_success "Connected to relay at $RELAY_URL"
# Run all tests
if run_all_tests; then
echo ""
log_success "All whitelist/blacklist tests completed successfully!"
exit 0
else
echo ""
log_error "Some tests failed."
exit 1
fi
}
# Run main function if script is executed directly
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi