Basic relay functionality completed

This commit is contained in:
Your Name
2025-09-04 07:10:13 -04:00
parent 227c579147
commit 662feab881
15 changed files with 2207 additions and 210 deletions

1
.gitignore vendored
View File

@@ -1 +1,2 @@
nostr_core_lib/
nips/

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -115,7 +115,7 @@ verify_database() {
log_info "Database schema version: $schema_version"
# Check that main tables exist
local table_count=$(sqlite3 "$DB_PATH" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name IN ('event', 'tag');")
local table_count=$(sqlite3 "$DB_PATH" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name IN ('events', 'schema_info');")
if [ "$table_count" -eq 2 ]; then
log_success "Core tables created successfully"
else

View File

@@ -1,66 +1,90 @@
-- C Nostr Relay Database Schema
-- Simplified schema with just event and tag tables
-- SQLite database for storing Nostr events
-- SQLite schema for storing Nostr events with JSON tags support
-- ============================================================================
-- DATABASE SETTINGS
-- ============================================================================
-- Schema version tracking
PRAGMA user_version = 2;
PRAGMA encoding = "UTF-8";
PRAGMA journal_mode = WAL;
PRAGMA auto_vacuum = FULL;
PRAGMA synchronous = NORMAL;
-- Enable foreign key support
PRAGMA foreign_keys = ON;
-- ============================================================================
-- EVENT TABLE
-- ============================================================================
-- Optimize for performance
PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA cache_size = 10000;
-- Main event table - stores all Nostr events
CREATE TABLE IF NOT EXISTS event (
-- Core events table with hybrid single-table design
CREATE TABLE events (
id TEXT PRIMARY KEY, -- Nostr event ID (hex string)
pubkey TEXT NOT NULL, -- Public key of event author (hex string)
created_at INTEGER NOT NULL, -- Event creation timestamp (Unix timestamp)
kind INTEGER NOT NULL, -- Event kind (0-65535)
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
content TEXT NOT NULL, -- Event content (text content only)
sig TEXT NOT NULL -- Event signature (hex string)
sig TEXT NOT NULL, -- Event signature (hex string)
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event
);
-- Event indexes for performance
CREATE INDEX IF NOT EXISTS event_pubkey_index ON event(pubkey);
CREATE INDEX IF NOT EXISTS event_created_at_index ON event(created_at);
CREATE INDEX IF NOT EXISTS event_kind_index ON event(kind);
CREATE INDEX IF NOT EXISTS event_pubkey_created_at_index ON event(pubkey, created_at);
CREATE INDEX IF NOT EXISTS event_kind_created_at_index ON event(kind, created_at);
-- Core performance indexes
CREATE INDEX idx_events_pubkey ON events(pubkey);
CREATE INDEX idx_events_kind ON events(kind);
CREATE INDEX idx_events_created_at ON events(created_at DESC);
CREATE INDEX idx_events_event_type ON events(event_type);
-- ============================================================================
-- TAG TABLE
-- ============================================================================
-- Composite indexes for common query patterns
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
CREATE INDEX idx_events_pubkey_kind ON events(pubkey, kind);
-- Tag table for storing event tags
CREATE TABLE IF NOT EXISTS tag (
id TEXT NOT NULL, -- Nostr event ID (references event.id)
name TEXT NOT NULL, -- Tag name (e.g., "e", "p", "d")
value TEXT NOT NULL, -- Tag value
parameters TEXT, -- Additional tag parameters (JSON string)
FOREIGN KEY(id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE
-- Schema information table
CREATE TABLE schema_info (
key TEXT PRIMARY KEY,
value TEXT NOT NULL,
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
);
-- Tag indexes for performance
CREATE INDEX IF NOT EXISTS tag_id_index ON tag(id);
CREATE INDEX IF NOT EXISTS tag_name_index ON tag(name);
CREATE INDEX IF NOT EXISTS tag_name_value_index ON tag(name, value);
CREATE INDEX IF NOT EXISTS tag_id_name_index ON tag(id, name);
-- Insert schema metadata
INSERT INTO schema_info (key, value) VALUES
('version', '2'),
('description', 'Hybrid single-table Nostr relay schema with JSON tags'),
('created_at', strftime('%s', 'now'));
-- ============================================================================
-- PERFORMANCE OPTIMIZATIONS
-- ============================================================================
-- Helper views for common queries
CREATE VIEW recent_events AS
SELECT id, pubkey, created_at, kind, event_type, content
FROM events
WHERE event_type != 'ephemeral'
ORDER BY created_at DESC
LIMIT 1000;
-- Enable query planner optimizations
PRAGMA optimize;
CREATE VIEW event_stats AS
SELECT
event_type,
COUNT(*) as count,
AVG(length(content)) as avg_content_length,
MIN(created_at) as earliest,
MAX(created_at) as latest
FROM events
GROUP BY event_type;
-- Set recommended pragmas for performance
PRAGMA main.synchronous = NORMAL;
PRAGMA foreign_keys = ON;
PRAGMA temp_store = 2; -- use memory for temp tables
PRAGMA main.cache_size = 10000; -- 40MB cache per connection
-- Optimization: Trigger for automatic cleanup of ephemeral events older than 1 hour
CREATE TRIGGER cleanup_ephemeral_events
AFTER INSERT ON events
WHEN NEW.event_type = 'ephemeral'
BEGIN
DELETE FROM events
WHERE event_type = 'ephemeral'
AND first_seen < (strftime('%s', 'now') - 3600);
END;
-- Replaceable event handling trigger
CREATE TRIGGER handle_replaceable_events
AFTER INSERT ON events
WHEN NEW.event_type = 'replaceable'
BEGIN
DELETE FROM events
WHERE pubkey = NEW.pubkey
AND kind = NEW.kind
AND event_type = 'replaceable'
AND id != NEW.id;
END;

View File

@@ -0,0 +1,337 @@
# Advanced Nostr Relay Schema Design
## Overview
This document outlines the design for an advanced multi-table schema that enforces Nostr protocol compliance at the database level, with separate tables for different event types based on their storage and replacement characteristics.
## Event Type Classification
Based on the Nostr specification, events are classified into four categories:
### 1. Regular Events
- **Kinds**: `1000 <= n < 10000` || `4 <= n < 45` || `n == 1` || `n == 2`
- **Storage Policy**: All events stored permanently
- **Examples**: Text notes (1), Reposts (6), Reactions (7), Direct Messages (4)
### 2. Replaceable Events
- **Kinds**: `10000 <= n < 20000` || `n == 0` || `n == 3`
- **Storage Policy**: Only latest per `(pubkey, kind)` combination
- **Replacement Logic**: Latest `created_at`, then lowest `id` lexically
- **Examples**: Metadata (0), Contacts (3), Mute List (10000)
### 3. Ephemeral Events
- **Kinds**: `20000 <= n < 30000`
- **Storage Policy**: Not expected to be stored (optional temporary storage)
- **Examples**: Typing indicators, presence updates, ephemeral messages
### 4. Addressable Events
- **Kinds**: `30000 <= n < 40000`
- **Storage Policy**: Only latest per `(pubkey, kind, d_tag)` combination
- **Replacement Logic**: Same as replaceable events
- **Examples**: Long-form content (30023), Application-specific data
## SQLite JSON Capabilities Research
SQLite provides powerful JSON functions that could be leveraged for tag storage:
### Core JSON Functions
```sql
-- Extract specific values
json_extract(column, '$.path')
-- Iterate through arrays
json_each(json_array_column)
-- Flatten nested structures
json_tree(json_column)
-- Validate JSON structure
json_valid(column)
-- Array operations
json_array_length(column)
json_extract(column, '$[0]') -- First element
```
### Tag Query Examples
#### Find all 'e' tag references:
```sql
SELECT
id,
json_extract(value, '$[1]') as referenced_event_id,
json_extract(value, '$[2]') as relay_hint,
json_extract(value, '$[3]') as marker
FROM events, json_each(tags)
WHERE json_extract(value, '$[0]') = 'e';
```
#### Find events with specific hashtags:
```sql
SELECT id, content
FROM events, json_each(tags)
WHERE json_extract(value, '$[0]') = 't'
AND json_extract(value, '$[1]') = 'bitcoin';
```
#### Extract 'd' tag for addressable events:
```sql
SELECT
id,
json_extract(value, '$[1]') as d_tag_value
FROM events, json_each(tags)
WHERE json_extract(value, '$[0]') = 'd'
LIMIT 1;
```
### JSON Functional Indexes
```sql
-- Index on hashtags
CREATE INDEX idx_hashtags ON events(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 't';
-- Index on 'd' tags for addressable events
CREATE INDEX idx_d_tags ON events_addressable(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 'd';
```
## Proposed Schema Design
### Option 1: Separate Tables with JSON Tags
```sql
-- Regular Events (permanent storage)
CREATE TABLE events_regular (
id TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
created_at INTEGER NOT NULL,
kind INTEGER NOT NULL,
content TEXT NOT NULL,
sig TEXT NOT NULL,
tags JSON,
first_seen INTEGER DEFAULT (strftime('%s', 'now')),
CONSTRAINT kind_regular CHECK (
(kind >= 1000 AND kind < 10000) OR
(kind >= 4 AND kind < 45) OR
kind = 1 OR kind = 2
)
);
-- Replaceable Events (latest per pubkey+kind)
CREATE TABLE events_replaceable (
pubkey TEXT NOT NULL,
kind INTEGER NOT NULL,
id TEXT NOT NULL,
created_at INTEGER NOT NULL,
content TEXT NOT NULL,
sig TEXT NOT NULL,
tags JSON,
replaced_at INTEGER DEFAULT (strftime('%s', 'now')),
PRIMARY KEY (pubkey, kind),
CONSTRAINT kind_replaceable CHECK (
(kind >= 10000 AND kind < 20000) OR
kind = 0 OR kind = 3
)
);
-- Ephemeral Events (temporary/optional storage)
CREATE TABLE events_ephemeral (
id TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
created_at INTEGER NOT NULL,
kind INTEGER NOT NULL,
content TEXT NOT NULL,
sig TEXT NOT NULL,
tags JSON,
expires_at INTEGER DEFAULT (strftime('%s', 'now', '+1 hour')),
CONSTRAINT kind_ephemeral CHECK (
kind >= 20000 AND kind < 30000
)
);
-- Addressable Events (latest per pubkey+kind+d_tag)
CREATE TABLE events_addressable (
pubkey TEXT NOT NULL,
kind INTEGER NOT NULL,
d_tag TEXT NOT NULL,
id TEXT NOT NULL,
created_at INTEGER NOT NULL,
content TEXT NOT NULL,
sig TEXT NOT NULL,
tags JSON,
replaced_at INTEGER DEFAULT (strftime('%s', 'now')),
PRIMARY KEY (pubkey, kind, d_tag),
CONSTRAINT kind_addressable CHECK (
kind >= 30000 AND kind < 40000
)
);
```
### Indexes for Performance
```sql
-- Regular events indexes
CREATE INDEX idx_regular_pubkey ON events_regular(pubkey);
CREATE INDEX idx_regular_kind ON events_regular(kind);
CREATE INDEX idx_regular_created_at ON events_regular(created_at);
CREATE INDEX idx_regular_kind_created_at ON events_regular(kind, created_at);
-- Replaceable events indexes
CREATE INDEX idx_replaceable_created_at ON events_replaceable(created_at);
CREATE INDEX idx_replaceable_id ON events_replaceable(id);
-- Ephemeral events indexes
CREATE INDEX idx_ephemeral_expires_at ON events_ephemeral(expires_at);
CREATE INDEX idx_ephemeral_pubkey ON events_ephemeral(pubkey);
-- Addressable events indexes
CREATE INDEX idx_addressable_created_at ON events_addressable(created_at);
CREATE INDEX idx_addressable_id ON events_addressable(id);
-- JSON tag indexes (examples)
CREATE INDEX idx_regular_e_tags ON events_regular(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 'e';
CREATE INDEX idx_regular_p_tags ON events_regular(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 'p';
```
### Option 2: Unified Tag Table Approach
```sql
-- Unified tag storage (alternative to JSON)
CREATE TABLE tags_unified (
event_id TEXT NOT NULL,
event_type TEXT NOT NULL, -- 'regular', 'replaceable', 'ephemeral', 'addressable'
tag_index INTEGER NOT NULL, -- Position in tag array
name TEXT NOT NULL,
value TEXT NOT NULL,
param_2 TEXT, -- Third element if present
param_3 TEXT, -- Fourth element if present
param_json TEXT, -- JSON for additional parameters
PRIMARY KEY (event_id, tag_index)
);
CREATE INDEX idx_tags_name_value ON tags_unified(name, value);
CREATE INDEX idx_tags_event_type ON tags_unified(event_type);
```
## Implementation Strategy
### 1. Kind Classification Function (C Code)
```c
typedef enum {
EVENT_TYPE_REGULAR,
EVENT_TYPE_REPLACEABLE,
EVENT_TYPE_EPHEMERAL,
EVENT_TYPE_ADDRESSABLE,
EVENT_TYPE_INVALID
} event_type_t;
event_type_t classify_event_kind(int kind) {
if ((kind >= 1000 && kind < 10000) ||
(kind >= 4 && kind < 45) ||
kind == 1 || kind == 2) {
return EVENT_TYPE_REGULAR;
}
if ((kind >= 10000 && kind < 20000) ||
kind == 0 || kind == 3) {
return EVENT_TYPE_REPLACEABLE;
}
if (kind >= 20000 && kind < 30000) {
return EVENT_TYPE_EPHEMERAL;
}
if (kind >= 30000 && kind < 40000) {
return EVENT_TYPE_ADDRESSABLE;
}
return EVENT_TYPE_INVALID;
}
```
### 2. Replacement Logic for Replaceable Events
```sql
-- Trigger for replaceable events
CREATE TRIGGER replace_event_on_insert
BEFORE INSERT ON events_replaceable
FOR EACH ROW
WHEN EXISTS (
SELECT 1 FROM events_replaceable
WHERE pubkey = NEW.pubkey AND kind = NEW.kind
)
BEGIN
DELETE FROM events_replaceable
WHERE pubkey = NEW.pubkey
AND kind = NEW.kind
AND (
created_at < NEW.created_at OR
(created_at = NEW.created_at AND id > NEW.id)
);
END;
```
### 3. D-Tag Extraction for Addressable Events
```c
char* extract_d_tag(cJSON* tags) {
if (!tags || !cJSON_IsArray(tags)) {
return NULL;
}
cJSON* tag;
cJSON_ArrayForEach(tag, tags) {
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
cJSON* tag_value = cJSON_GetArrayItem(tag, 1);
if (cJSON_IsString(tag_name) && cJSON_IsString(tag_value)) {
if (strcmp(cJSON_GetStringValue(tag_name), "d") == 0) {
return strdup(cJSON_GetStringValue(tag_value));
}
}
}
}
return strdup(""); // Default empty d-tag
}
```
## Advantages of This Design
### 1. Protocol Compliance
- **Enforced at DB level**: Schema constraints prevent invalid event storage
- **Automatic replacement**: Triggers handle replaceable/addressable event logic
- **Type safety**: Separate tables ensure correct handling per event type
### 2. Performance Benefits
- **Targeted indexes**: Each table optimized for its access patterns
- **Reduced storage**: Ephemeral events can be auto-expired
- **Query optimization**: SQLite can optimize queries per table structure
### 3. JSON Tag Benefits
- **Atomic storage**: Tags stored with their event
- **Rich querying**: SQLite JSON functions enable complex tag queries
- **Schema flexibility**: Can handle arbitrary tag structures
- **Functional indexes**: Index specific tag patterns efficiently
## Migration Strategy
1. **Phase 1**: Create new schema alongside existing
2. **Phase 2**: Implement kind classification and routing logic
3. **Phase 3**: Migrate existing data to appropriate tables
4. **Phase 4**: Update application logic to use new tables
5. **Phase 5**: Drop old schema after verification
## Next Steps for Implementation
1. **Prototype JSON performance**: Create test database with sample data
2. **Benchmark query patterns**: Compare JSON vs normalized approaches
3. **Implement kind classification**: Add routing logic to C code
4. **Create migration scripts**: Handle existing data transformation
5. **Update test suite**: Verify compliance with new schema

View File

@@ -0,0 +1,416 @@
# Final Schema Recommendation: Hybrid Single Table Approach
## Executive Summary
After analyzing the subscription query complexity, **the multi-table approach creates more problems than it solves**. REQ filters don't align with storage semantics - clients filter by kind, author, and tags regardless of event type classification.
**Recommendation: Modified Single Table with Event Type Classification**
## The Multi-Table Problem
### REQ Filter Reality Check
- Clients send: `{"kinds": [1, 0, 30023], "authors": ["pubkey"], "#p": ["target"]}`
- Multi-table requires: 3 separate queries + UNION + complex ordering
- Single table requires: 1 query with simple WHERE conditions
### Query Complexity Explosion
```sql
-- Multi-table nightmare for simple filter
WITH results AS (
SELECT * FROM events_regular WHERE kind = 1 AND pubkey = ?
UNION ALL
SELECT * FROM events_replaceable WHERE kind = 0 AND pubkey = ?
UNION ALL
SELECT * FROM events_addressable WHERE kind = 30023 AND pubkey = ?
)
SELECT r.* FROM results r
JOIN multiple_tag_tables t ON complex_conditions
ORDER BY created_at DESC, id ASC LIMIT ?;
-- vs Single table simplicity
SELECT e.* FROM events e, json_each(e.tags) t
WHERE e.kind IN (1, 0, 30023)
AND e.pubkey = ?
AND json_extract(t.value, '$[0]') = 'p'
AND json_extract(t.value, '$[1]') = ?
ORDER BY e.created_at DESC, e.id ASC LIMIT ?;
```
## Recommended Schema: Hybrid Approach
### Core Design Philosophy
- **Single table for REQ query simplicity**
- **Event type classification for protocol compliance**
- **JSON tags for atomic storage and rich querying**
- **Partial unique constraints for replacement logic**
### Schema Definition
```sql
CREATE TABLE events (
id TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
created_at INTEGER NOT NULL,
kind INTEGER NOT NULL,
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
content TEXT NOT NULL,
sig TEXT NOT NULL,
tags JSON NOT NULL DEFAULT '[]',
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
-- Additional fields for addressable events
d_tag TEXT GENERATED ALWAYS AS (
CASE
WHEN event_type = 'addressable' THEN
json_extract(tags, '$[*][1]')
FROM json_each(tags)
WHERE json_extract(value, '$[0]') = 'd'
LIMIT 1
ELSE NULL
END
) STORED,
-- Replacement tracking
replaced_at INTEGER,
-- Protocol compliance constraints
CONSTRAINT unique_replaceable
UNIQUE (pubkey, kind)
WHERE event_type = 'replaceable',
CONSTRAINT unique_addressable
UNIQUE (pubkey, kind, d_tag)
WHERE event_type = 'addressable' AND d_tag IS NOT NULL
);
```
### Event Type Classification Function
```sql
-- Function to determine event type from kind
CREATE VIEW event_type_lookup AS
SELECT
CASE
WHEN (kind >= 1000 AND kind < 10000) OR
(kind >= 4 AND kind < 45) OR
kind = 1 OR kind = 2 THEN 'regular'
WHEN (kind >= 10000 AND kind < 20000) OR
kind = 0 OR kind = 3 THEN 'replaceable'
WHEN kind >= 20000 AND kind < 30000 THEN 'ephemeral'
WHEN kind >= 30000 AND kind < 40000 THEN 'addressable'
ELSE 'unknown'
END as event_type,
kind
FROM (
-- Generate all possible kind values for lookup
WITH RECURSIVE kinds(kind) AS (
SELECT 0
UNION ALL
SELECT kind + 1 FROM kinds WHERE kind < 65535
)
SELECT kind FROM kinds
);
```
### Performance Indexes
```sql
-- Core query patterns
CREATE INDEX idx_events_pubkey ON events(pubkey);
CREATE INDEX idx_events_kind ON events(kind);
CREATE INDEX idx_events_created_at ON events(created_at DESC);
CREATE INDEX idx_events_event_type ON events(event_type);
-- Composite indexes for common filters
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
CREATE INDEX idx_events_type_created_at ON events(event_type, created_at DESC);
-- JSON tag indexes for common patterns
CREATE INDEX idx_events_e_tags ON events(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 'e';
CREATE INDEX idx_events_p_tags ON events(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 'p';
CREATE INDEX idx_events_hashtags ON events(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 't';
-- Addressable events d_tag index
CREATE INDEX idx_events_d_tag ON events(d_tag)
WHERE event_type = 'addressable' AND d_tag IS NOT NULL;
```
### Replacement Logic Implementation
#### Replaceable Events Trigger
```sql
CREATE TRIGGER handle_replaceable_events
BEFORE INSERT ON events
FOR EACH ROW
WHEN NEW.event_type = 'replaceable'
BEGIN
-- Delete older replaceable events with same pubkey+kind
DELETE FROM events
WHERE event_type = 'replaceable'
AND pubkey = NEW.pubkey
AND kind = NEW.kind
AND (
created_at < NEW.created_at OR
(created_at = NEW.created_at AND id > NEW.id)
);
END;
```
#### Addressable Events Trigger
```sql
CREATE TRIGGER handle_addressable_events
BEFORE INSERT ON events
FOR EACH ROW
WHEN NEW.event_type = 'addressable'
BEGIN
-- Delete older addressable events with same pubkey+kind+d_tag
DELETE FROM events
WHERE event_type = 'addressable'
AND pubkey = NEW.pubkey
AND kind = NEW.kind
AND d_tag = NEW.d_tag
AND (
created_at < NEW.created_at OR
(created_at = NEW.created_at AND id > NEW.id)
);
END;
```
## Implementation Strategy
### C Code Integration
#### Event Type Classification
```c
typedef enum {
EVENT_TYPE_REGULAR,
EVENT_TYPE_REPLACEABLE,
EVENT_TYPE_EPHEMERAL,
EVENT_TYPE_ADDRESSABLE,
EVENT_TYPE_UNKNOWN
} event_type_t;
event_type_t classify_event_kind(int kind) {
if ((kind >= 1000 && kind < 10000) ||
(kind >= 4 && kind < 45) ||
kind == 1 || kind == 2) {
return EVENT_TYPE_REGULAR;
}
if ((kind >= 10000 && kind < 20000) ||
kind == 0 || kind == 3) {
return EVENT_TYPE_REPLACEABLE;
}
if (kind >= 20000 && kind < 30000) {
return EVENT_TYPE_EPHEMERAL;
}
if (kind >= 30000 && kind < 40000) {
return EVENT_TYPE_ADDRESSABLE;
}
return EVENT_TYPE_UNKNOWN;
}
const char* event_type_to_string(event_type_t type) {
switch (type) {
case EVENT_TYPE_REGULAR: return "regular";
case EVENT_TYPE_REPLACEABLE: return "replaceable";
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
case EVENT_TYPE_ADDRESSABLE: return "addressable";
default: return "unknown";
}
}
```
#### Simplified Event Storage
```c
int store_event(cJSON* event) {
// Extract fields
cJSON* id = cJSON_GetObjectItem(event, "id");
cJSON* pubkey = cJSON_GetObjectItem(event, "pubkey");
cJSON* created_at = cJSON_GetObjectItem(event, "created_at");
cJSON* kind = cJSON_GetObjectItem(event, "kind");
cJSON* content = cJSON_GetObjectItem(event, "content");
cJSON* sig = cJSON_GetObjectItem(event, "sig");
// Classify event type
event_type_t type = classify_event_kind(cJSON_GetNumberValue(kind));
// Serialize tags to JSON
cJSON* tags = cJSON_GetObjectItem(event, "tags");
char* tags_json = cJSON_Print(tags ? tags : cJSON_CreateArray());
// Single INSERT statement - database handles replacement via triggers
const char* sql =
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
free(tags_json);
return -1;
}
sqlite3_bind_text(stmt, 1, cJSON_GetStringValue(id), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
rc = sqlite3_step(stmt);
sqlite3_finalize(stmt);
free(tags_json);
return (rc == SQLITE_DONE) ? 0 : -1;
}
```
#### Simple REQ Query Building
```c
char* build_filter_query(cJSON* filter) {
// Build single query against events table
// Much simpler than multi-table approach
GString* query = g_string_new("SELECT * FROM events WHERE 1=1");
// Handle ids filter
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
if (ids && cJSON_IsArray(ids)) {
g_string_append(query, " AND id IN (");
// Add parameter placeholders
g_string_append(query, ")");
}
// Handle authors filter
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
if (authors && cJSON_IsArray(authors)) {
g_string_append(query, " AND pubkey IN (");
// Add parameter placeholders
g_string_append(query, ")");
}
// Handle kinds filter
cJSON* kinds = cJSON_GetObjectItem(filter, "kinds");
if (kinds && cJSON_IsArray(kinds)) {
g_string_append(query, " AND kind IN (");
// Add parameter placeholders
g_string_append(query, ")");
}
// Handle tag filters (#e, #p, etc.)
cJSON* item;
cJSON_ArrayForEach(item, filter) {
char* key = item->string;
if (key && key[0] == '#' && strlen(key) == 2) {
char tag_name = key[1];
g_string_append_printf(query,
" AND EXISTS (SELECT 1 FROM json_each(tags) "
"WHERE json_extract(value, '$[0]') = '%c' "
"AND json_extract(value, '$[1]') IN (", tag_name);
// Add parameter placeholders
g_string_append(query, "))");
}
}
// Handle time range
cJSON* since = cJSON_GetObjectItem(filter, "since");
if (since) {
g_string_append(query, " AND created_at >= ?");
}
cJSON* until = cJSON_GetObjectItem(filter, "until");
if (until) {
g_string_append(query, " AND created_at <= ?");
}
// Standard ordering and limit
g_string_append(query, " ORDER BY created_at DESC, id ASC");
cJSON* limit = cJSON_GetObjectItem(filter, "limit");
if (limit) {
g_string_append(query, " LIMIT ?");
}
return g_string_free(query, FALSE);
}
```
## Benefits of This Approach
### 1. Query Simplicity
- ✅ Single table = simple REQ queries
- ✅ No UNION complexity
- ✅ Familiar SQL patterns
- ✅ Easy LIMIT and ORDER BY handling
### 2. Protocol Compliance
- ✅ Event type classification enforced
- ✅ Replacement logic via triggers
- ✅ Unique constraints prevent duplicates
- ✅ Proper handling of all event types
### 3. Performance
- ✅ Unified indexes across all events
- ✅ No join overhead for basic queries
- ✅ JSON tag indexes for complex filters
- ✅ Single table scan for cross-kind queries
### 4. Implementation Simplicity
- ✅ Minimal changes from current code
- ✅ Database handles replacement logic
- ✅ Simple event storage function
- ✅ No complex routing logic needed
### 5. Future Flexibility
- ✅ Can add columns for new event types
- ✅ Can split tables later if needed
- ✅ Easy to add new indexes
- ✅ Extensible constraint system
## Migration Path
### Phase 1: Schema Update
1. Add `event_type` column to existing events table
2. Add JSON `tags` column
3. Create classification triggers
4. Add partial unique constraints
### Phase 2: Data Migration
1. Classify existing events by kind
2. Convert existing tag table data to JSON
3. Verify constraint compliance
4. Update indexes
### Phase 3: Code Updates
1. Update event storage to use new schema
2. Simplify REQ query building
3. Remove tag table JOIN logic
4. Test subscription filtering
### Phase 4: Optimization
1. Monitor query performance
2. Add specialized indexes as needed
3. Tune replacement triggers
4. Consider ephemeral event cleanup
## Conclusion
This hybrid approach achieves the best of both worlds:
- **Protocol compliance** through event type classification and constraints
- **Query simplicity** through unified storage
- **Performance** through targeted indexes
- **Implementation ease** through minimal complexity
The multi-table approach, while theoretically cleaner, creates a subscription query nightmare that would significantly burden the implementation. The hybrid single-table approach provides all the benefits with manageable complexity.

326
docs/implementation_plan.md Normal file
View File

@@ -0,0 +1,326 @@
# Implementation Plan: Hybrid Schema Migration
## Overview
Migrating from the current two-table design (event + tag tables) to a single event table with JSON tags column and event type classification.
## Current Schema → Target Schema
### Current Schema (to be replaced)
```sql
CREATE TABLE event (
id TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
created_at INTEGER NOT NULL,
kind INTEGER NOT NULL,
content TEXT NOT NULL,
sig TEXT NOT NULL
);
CREATE TABLE tag (
id TEXT NOT NULL, -- references event.id
name TEXT NOT NULL,
value TEXT NOT NULL,
parameters TEXT
);
```
### Target Schema (simplified from final recommendation)
```sql
CREATE TABLE events (
id TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
created_at INTEGER NOT NULL,
kind INTEGER NOT NULL,
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
content TEXT NOT NULL,
sig TEXT NOT NULL,
tags JSON NOT NULL DEFAULT '[]',
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
-- Optional: Protocol compliance constraints (can be added later)
CONSTRAINT unique_replaceable
UNIQUE (pubkey, kind) WHERE event_type = 'replaceable',
CONSTRAINT unique_addressable
UNIQUE (pubkey, kind, json_extract(tags, '$[?(@[0]=="d")][1]'))
WHERE event_type = 'addressable'
);
```
## Implementation Steps
### Phase 1: Update Schema File
**File**: `db/schema.sql`
1. Replace current event table definition
2. Remove tag table completely
3. Add new indexes for performance
4. Add event type classification logic
### Phase 2: Update C Code
**File**: `src/main.c`
1. Add event type classification function
2. Update `store_event()` function to use JSON tags
3. Update `retrieve_event()` function to return JSON tags
4. Remove all tag table related code
5. Update REQ query handling to use JSON tag queries
### Phase 3: Update Database Initialization
**File**: `db/init.sh`
1. Update table count validation (expect 1 table instead of 2)
2. Update schema verification logic
### Phase 4: Update Tests
**File**: `tests/1_nip_test.sh`
1. Verify events are stored with JSON tags
2. Test query functionality with new schema
3. Validate event type classification
### Phase 5: Migration Strategy
Create migration script to handle existing data (if any).
## Detailed Implementation
### 1. Event Type Classification
```c
// Add to src/main.c
typedef enum {
EVENT_TYPE_REGULAR,
EVENT_TYPE_REPLACEABLE,
EVENT_TYPE_EPHEMERAL,
EVENT_TYPE_ADDRESSABLE,
EVENT_TYPE_UNKNOWN
} event_type_t;
event_type_t classify_event_kind(int kind) {
if ((kind >= 1000 && kind < 10000) ||
(kind >= 4 && kind < 45) ||
kind == 1 || kind == 2) {
return EVENT_TYPE_REGULAR;
}
if ((kind >= 10000 && kind < 20000) ||
kind == 0 || kind == 3) {
return EVENT_TYPE_REPLACEABLE;
}
if (kind >= 20000 && kind < 30000) {
return EVENT_TYPE_EPHEMERAL;
}
if (kind >= 30000 && kind < 40000) {
return EVENT_TYPE_ADDRESSABLE;
}
return EVENT_TYPE_UNKNOWN;
}
const char* event_type_to_string(event_type_t type) {
switch (type) {
case EVENT_TYPE_REGULAR: return "regular";
case EVENT_TYPE_REPLACEABLE: return "replaceable";
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
case EVENT_TYPE_ADDRESSABLE: return "addressable";
default: return "unknown";
}
}
```
### 2. Updated store_event Function
```c
// Replace existing store_event function
int store_event(cJSON* event) {
if (!g_db || !event) {
return -1;
}
// Extract event fields
cJSON* id = cJSON_GetObjectItem(event, "id");
cJSON* pubkey = cJSON_GetObjectItem(event, "pubkey");
cJSON* created_at = cJSON_GetObjectItem(event, "created_at");
cJSON* kind = cJSON_GetObjectItem(event, "kind");
cJSON* content = cJSON_GetObjectItem(event, "content");
cJSON* sig = cJSON_GetObjectItem(event, "sig");
cJSON* tags = cJSON_GetObjectItem(event, "tags");
if (!id || !pubkey || !created_at || !kind || !content || !sig) {
log_error("Invalid event - missing required fields");
return -1;
}
// Classify event type
event_type_t type = classify_event_kind((int)cJSON_GetNumberValue(kind));
// Serialize tags to JSON (use empty array if no tags)
char* tags_json = NULL;
if (tags && cJSON_IsArray(tags)) {
tags_json = cJSON_Print(tags);
} else {
tags_json = strdup("[]");
}
if (!tags_json) {
log_error("Failed to serialize tags to JSON");
return -1;
}
// Single INSERT statement
const char* sql =
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
log_error("Failed to prepare event insert statement");
free(tags_json);
return -1;
}
// Bind parameters
sqlite3_bind_text(stmt, 1, cJSON_GetStringValue(id), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
// Execute statement
rc = sqlite3_step(stmt);
sqlite3_finalize(stmt);
if (rc != SQLITE_DONE) {
if (rc == SQLITE_CONSTRAINT) {
log_warning("Event already exists in database");
free(tags_json);
return 0; // Not an error, just duplicate
}
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Failed to insert event: %s", sqlite3_errmsg(g_db));
log_error(error_msg);
free(tags_json);
return -1;
}
free(tags_json);
log_success("Event stored in database");
return 0;
}
```
### 3. Updated retrieve_event Function
```c
// Replace existing retrieve_event function
cJSON* retrieve_event(const char* event_id) {
if (!g_db || !event_id) {
return NULL;
}
const char* sql =
"SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE id = ?";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
return NULL;
}
sqlite3_bind_text(stmt, 1, event_id, -1, SQLITE_STATIC);
cJSON* event = NULL;
if (sqlite3_step(stmt) == SQLITE_ROW) {
event = cJSON_CreateObject();
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
cJSON_AddStringToObject(event, "pubkey", (char*)sqlite3_column_text(stmt, 1));
cJSON_AddNumberToObject(event, "created_at", sqlite3_column_int64(stmt, 2));
cJSON_AddNumberToObject(event, "kind", sqlite3_column_int(stmt, 3));
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
// Parse tags JSON
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
if (tags_json) {
cJSON* tags = cJSON_Parse(tags_json);
if (tags) {
cJSON_AddItemToObject(event, "tags", tags);
} else {
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
}
} else {
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
}
}
sqlite3_finalize(stmt);
return event;
}
```
## Migration Considerations
### Handling Existing Data
If there's existing data in the current schema:
1. **Export existing events and tags**
2. **Transform tag data to JSON format**
3. **Classify events by kind**
4. **Import into new schema**
### Backward Compatibility
- API remains the same - events still have the same JSON structure
- Internal storage changes but external interface is unchanged
- Tests should pass with minimal modifications
## Performance Optimizations
### Essential Indexes
```sql
-- Core performance indexes
CREATE INDEX idx_events_pubkey ON events(pubkey);
CREATE INDEX idx_events_kind ON events(kind);
CREATE INDEX idx_events_created_at ON events(created_at DESC);
CREATE INDEX idx_events_event_type ON events(event_type);
-- Composite indexes for common query patterns
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
-- JSON tag indexes for common tag patterns
CREATE INDEX idx_events_e_tags ON events(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 'e';
CREATE INDEX idx_events_p_tags ON events(
json_extract(tags, '$[*][1]')
) WHERE json_extract(tags, '$[*][0]') = 'p';
```
## Next Steps
1. **Switch to code mode** to implement the schema changes
2. **Update db/schema.sql** with new table definition
3. **Modify src/main.c** with new functions
4. **Update db/init.sh** for single table validation
5. **Test with existing test suite**
This approach will provide:
- ✅ Simplified schema management
- ✅ Protocol compliance preparation
- ✅ JSON tag query capabilities
- ✅ Performance optimization opportunities
- ✅ Easy REQ subscription handling
Ready to proceed with implementation?

View File

@@ -0,0 +1,331 @@
# Subscription Query Complexity Analysis
## Overview
This document analyzes how Nostr REQ subscription filters would be implemented across different schema designs, focusing on query complexity, performance implications, and implementation burden.
## Nostr REQ Filter Specification Recap
Clients send REQ messages with filters containing:
- **`ids`**: List of specific event IDs
- **`authors`**: List of pubkeys
- **`kinds`**: List of event kinds
- **`#<letter>`**: Tag filters (e.g., `#e` for event refs, `#p` for pubkey mentions)
- **`since`/`until`**: Time range filters
- **`limit`**: Maximum events to return
### Key Filter Behaviors:
- **Multiple filters = OR logic**: Match any filter
- **Within filter = AND logic**: Match all specified conditions
- **Lists = IN logic**: Match any value in the list
- **Tag filters**: Must have at least one matching tag
## Schema Comparison for REQ Handling
### Current Simple Schema (Single Table)
```sql
CREATE TABLE event (
id TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
created_at INTEGER NOT NULL,
kind INTEGER NOT NULL,
content TEXT NOT NULL,
sig TEXT NOT NULL
);
CREATE TABLE tag (
id TEXT NOT NULL, -- event ID
name TEXT NOT NULL,
value TEXT NOT NULL,
parameters TEXT
);
```
#### Sample REQ Query Implementation:
```sql
-- Filter: {"authors": ["pubkey1", "pubkey2"], "kinds": [1, 6], "#p": ["target_pubkey"]}
SELECT DISTINCT e.*
FROM event e
WHERE e.pubkey IN ('pubkey1', 'pubkey2')
AND e.kind IN (1, 6)
AND EXISTS (
SELECT 1 FROM tag t
WHERE t.id = e.id AND t.name = 'p' AND t.value = 'target_pubkey'
)
ORDER BY e.created_at DESC, e.id ASC
LIMIT ?;
```
### Multi-Table Schema Challenge
With separate tables (`events_regular`, `events_replaceable`, `events_ephemeral`, `events_addressable`), a REQ filter could potentially match events across ALL tables.
#### Problem Example:
Filter: `{"kinds": [1, 0, 20001, 30023]}`
- Kind 1 → `events_regular`
- Kind 0 → `events_replaceable`
- Kind 20001 → `events_ephemeral`
- Kind 30023 → `events_addressable`
This requires **4 separate queries + UNION**, significantly complicating the implementation.
## Multi-Table Query Complexity
### Scenario 1: Cross-Table Kind Filter
```sql
-- Filter: {"kinds": [1, 0, 30023]}
-- Requires querying 3 different tables
SELECT id, pubkey, created_at, kind, content, sig FROM events_regular
WHERE kind = 1
UNION ALL
SELECT id, pubkey, created_at, kind, content, sig FROM events_replaceable
WHERE kind = 0
UNION ALL
SELECT id, pubkey, created_at, kind, content, sig FROM events_addressable
WHERE kind = 30023
ORDER BY created_at DESC, id ASC
LIMIT ?;
```
### Scenario 2: Cross-Table Author Filter
```sql
-- Filter: {"authors": ["pubkey1"]}
-- Must check ALL tables for this author
SELECT id, pubkey, created_at, kind, content, sig FROM events_regular
WHERE pubkey = 'pubkey1'
UNION ALL
SELECT id, pubkey, created_at, kind, content, sig FROM events_replaceable
WHERE pubkey = 'pubkey1'
UNION ALL
SELECT id, pubkey, created_at, kind, content, sig FROM events_ephemeral
WHERE pubkey = 'pubkey1'
UNION ALL
SELECT id, pubkey, created_at, kind, content, sig FROM events_addressable
WHERE pubkey = 'pubkey1'
ORDER BY created_at DESC, id ASC
LIMIT ?;
```
### Scenario 3: Complex Multi-Condition Filter
```sql
-- Filter: {"authors": ["pubkey1"], "kinds": [1, 0], "#p": ["target"], "since": 1234567890}
-- Extremely complex with multiple UNIONs and tag JOINs
WITH regular_results AS (
SELECT DISTINCT r.*
FROM events_regular r
JOIN tags_regular tr ON r.id = tr.event_id
WHERE r.pubkey = 'pubkey1'
AND r.kind = 1
AND r.created_at >= 1234567890
AND tr.name = 'p' AND tr.value = 'target'
),
replaceable_results AS (
SELECT DISTINCT rp.*
FROM events_replaceable rp
JOIN tags_replaceable trp ON (rp.pubkey, rp.kind) = (trp.event_pubkey, trp.event_kind)
WHERE rp.pubkey = 'pubkey1'
AND rp.kind = 0
AND rp.created_at >= 1234567890
AND trp.name = 'p' AND trp.value = 'target'
)
SELECT * FROM regular_results
UNION ALL
SELECT * FROM replaceable_results
ORDER BY created_at DESC, id ASC
LIMIT ?;
```
## Implementation Burden Analysis
### Single Table Approach
```c
// Simple - one query builder function
char* build_filter_query(cJSON* filters) {
// Build single SELECT with WHERE conditions
// Single ORDER BY and LIMIT
// One execution path
}
```
### Multi-Table Approach
```c
// Complex - requires routing and union logic
char* build_multi_table_query(cJSON* filters) {
// 1. Analyze kinds to determine which tables to query
// 2. Split filters per table type
// 3. Build separate queries for each table
// 4. Union results with complex ORDER BY
// 5. Handle LIMIT across UNION (tricky!)
}
typedef struct {
bool needs_regular;
bool needs_replaceable;
bool needs_ephemeral;
bool needs_addressable;
cJSON* regular_filter;
cJSON* replaceable_filter;
cJSON* ephemeral_filter;
cJSON* addressable_filter;
} filter_routing_t;
```
### Query Routing Complexity
For each REQ filter, we must:
1. **Analyze kinds** → Determine which tables to query
2. **Split filters** → Create per-table filter conditions
3. **Handle tag filters** → Different tag table references per event type
4. **Union results** → Merge with proper ordering
5. **Apply LIMIT** → Complex with UNION queries
## Performance Implications
### Single Table Advantages:
-**Single query execution**
-**One index strategy**
-**Simple LIMIT handling**
-**Unified ORDER BY**
-**No UNION overhead**
### Multi-Table Disadvantages:
-**Multiple query executions**
-**UNION sorting overhead**
-**Complex LIMIT application**
-**Index fragmentation across tables**
-**Result set merging complexity**
## Specific REQ Filter Challenges
### 1. LIMIT Handling with UNION
```sql
-- WRONG: Limit applies to each subquery
(SELECT * FROM events_regular WHERE ... LIMIT 100)
UNION ALL
(SELECT * FROM events_replaceable WHERE ... LIMIT 100)
-- Could return 200 events!
-- CORRECT: Limit applies to final result
SELECT * FROM (
SELECT * FROM events_regular WHERE ...
UNION ALL
SELECT * FROM events_replaceable WHERE ...
ORDER BY created_at DESC, id ASC
) LIMIT 100;
-- But this sorts ALL results before limiting!
```
### 2. Tag Filter Complexity
Each event type needs different tag table joins:
- `events_regular``tags_regular`
- `events_replaceable``tags_replaceable` (with composite key)
- `events_addressable``tags_addressable` (with composite key)
- `events_ephemeral``tags_ephemeral`
### 3. Subscription State Management
With multiple tables, subscription state becomes complex:
- Which tables does this subscription monitor?
- How to efficiently check new events across tables?
- Different trigger/notification patterns per table
## Alternative: Unified Event View
### Hybrid Approach: Views Over Multi-Tables
```sql
-- Create unified view for queries
CREATE VIEW all_events AS
SELECT
'regular' as event_type,
id, pubkey, created_at, kind, content, sig
FROM events_regular
UNION ALL
SELECT
'replaceable' as event_type,
id, pubkey, created_at, kind, content, sig
FROM events_replaceable
UNION ALL
SELECT
'ephemeral' as event_type,
id, pubkey, created_at, kind, content, sig
FROM events_ephemeral
UNION ALL
SELECT
'addressable' as event_type,
id, pubkey, created_at, kind, content, sig
FROM events_addressable;
-- Unified tag view
CREATE VIEW all_tags AS
SELECT event_id, name, value, parameters FROM tags_regular
UNION ALL
SELECT CONCAT(event_pubkey, ':', event_kind), name, value, parameters FROM tags_replaceable
UNION ALL
SELECT event_id, name, value, parameters FROM tags_ephemeral
UNION ALL
SELECT CONCAT(event_pubkey, ':', event_kind, ':', d_tag), name, value, parameters FROM tags_addressable;
```
### REQ Query Against Views:
```sql
-- Much simpler - back to single-table complexity
SELECT DISTINCT e.*
FROM all_events e
JOIN all_tags t ON e.id = t.event_id
WHERE e.pubkey IN (?)
AND e.kind IN (?)
AND t.name = 'p' AND t.value = ?
ORDER BY e.created_at DESC, e.id ASC
LIMIT ?;
```
## Recommendation
**The multi-table approach creates significant subscription query complexity that may outweigh its benefits.**
### Key Issues:
1. **REQ filters don't map to event types** - clients filter by kind, author, tags, not storage semantics
2. **UNION query complexity** - much harder to optimize and implement
3. **Subscription management burden** - must monitor multiple tables
4. **Performance uncertainty** - UNION queries may be slower than single table
### Alternative Recommendation:
**Modified Single Table with Event Type Column:**
```sql
CREATE TABLE events (
id TEXT PRIMARY KEY,
pubkey TEXT NOT NULL,
created_at INTEGER NOT NULL,
kind INTEGER NOT NULL,
event_type TEXT NOT NULL, -- 'regular', 'replaceable', 'ephemeral', 'addressable'
content TEXT NOT NULL,
sig TEXT NOT NULL,
tags JSON,
-- Replaceable event fields
replaced_at INTEGER,
-- Addressable event fields
d_tag TEXT,
-- Unique constraints per event type
CONSTRAINT unique_replaceable
UNIQUE (pubkey, kind) WHERE event_type = 'replaceable',
CONSTRAINT unique_addressable
UNIQUE (pubkey, kind, d_tag) WHERE event_type = 'addressable'
);
```
### Benefits:
-**Simple REQ queries** - single table, familiar patterns
-**Type enforcement** - partial unique constraints handle replacement logic
-**Performance** - unified indexes, no UNIONs
-**Implementation simplicity** - minimal changes from current code
-**Future flexibility** - can split tables later if needed
This approach gets the best of both worlds: protocol compliance through constraints, but query simplicity through unified storage.

229
relay.log
View File

@@ -5,7 +5,236 @@
[SUCCESS] WebSocket relay started on ws://127.0.0.1:8888
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 5 rows
[INFO] Total events sent: 5
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 17 rows
[INFO] Total events sent: 17
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 7 rows
[INFO] Total events sent: 7
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0) ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 1 rows
[INFO] Total events sent: 1
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND pubkey IN ('aa4fc8665f5696e33db7e1a572e3b0f5b3d615837b0f362dcb1c8068b098c7b4') ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 17 rows
[INFO] Total events sent: 17
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND created_at >= 1756983802 ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 6 rows
[INFO] Total events sent: 6
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 17 rows
[INFO] Total events sent: 17
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0,1) ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 8 rows
[INFO] Total events sent: 8
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 1
[INFO] Query returned 1 rows
[INFO] Total events sent: 1
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling EVENT message
[SUCCESS] Event stored in database
[SUCCESS] Event stored successfully
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 22 rows
[INFO] Total events sent: 22
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 9 rows
[INFO] Total events sent: 9
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0) ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 1 rows
[INFO] Total events sent: 1
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND pubkey IN ('aa4fc8665f5696e33db7e1a572e3b0f5b3d615837b0f362dcb1c8068b098c7b4') ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 22 rows
[INFO] Total events sent: 22
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND created_at >= 1756983945 ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 9 rows
[INFO] Total events sent: 9
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 22 rows
[INFO] Total events sent: 22
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (0,1) ORDER BY created_at DESC LIMIT 500
[INFO] Query returned 10 rows
[INFO] Total events sent: 10
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed
[INFO] WebSocket connection established
[INFO] Received WebSocket message
[INFO] Handling REQ message
[INFO] Executing SQL: SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND kind IN (1) ORDER BY created_at DESC LIMIT 1
[INFO] Query returned 1 rows
[INFO] Total events sent: 1
[INFO] Received WebSocket message
[INFO] Subscription closed
[INFO] WebSocket connection closed

View File

@@ -1 +1 @@
320933
417956

BIN
src/main

Binary file not shown.

View File

@@ -81,6 +81,44 @@ void close_database() {
}
}
// Event type classification
typedef enum {
EVENT_TYPE_REGULAR,
EVENT_TYPE_REPLACEABLE,
EVENT_TYPE_EPHEMERAL,
EVENT_TYPE_ADDRESSABLE,
EVENT_TYPE_UNKNOWN
} event_type_t;
event_type_t classify_event_kind(int kind) {
if ((kind >= 1000 && kind < 10000) ||
(kind >= 4 && kind < 45) ||
kind == 1 || kind == 2) {
return EVENT_TYPE_REGULAR;
}
if ((kind >= 10000 && kind < 20000) ||
kind == 0 || kind == 3) {
return EVENT_TYPE_REPLACEABLE;
}
if (kind >= 20000 && kind < 30000) {
return EVENT_TYPE_EPHEMERAL;
}
if (kind >= 30000 && kind < 40000) {
return EVENT_TYPE_ADDRESSABLE;
}
return EVENT_TYPE_UNKNOWN;
}
const char* event_type_to_string(event_type_t type) {
switch (type) {
case EVENT_TYPE_REGULAR: return "regular";
case EVENT_TYPE_REPLACEABLE: return "replaceable";
case EVENT_TYPE_EPHEMERAL: return "ephemeral";
case EVENT_TYPE_ADDRESSABLE: return "addressable";
default: return "unknown";
}
}
// Store event in database
int store_event(cJSON* event) {
if (!g_db || !event) {
@@ -101,15 +139,32 @@ int store_event(cJSON* event) {
return -1;
}
// Classify event type
event_type_t type = classify_event_kind((int)cJSON_GetNumberValue(kind));
// Serialize tags to JSON (use empty array if no tags)
char* tags_json = NULL;
if (tags && cJSON_IsArray(tags)) {
tags_json = cJSON_Print(tags);
} else {
tags_json = strdup("[]");
}
if (!tags_json) {
log_error("Failed to serialize tags to JSON");
return -1;
}
// Prepare SQL statement for event insertion
const char* sql =
"INSERT INTO event (id, pubkey, created_at, kind, content, sig) "
"VALUES (?, ?, ?, ?, ?, ?)";
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
log_error("Failed to prepare event insert statement");
free(tags_json);
return -1;
}
@@ -118,8 +173,10 @@ int store_event(cJSON* event) {
sqlite3_bind_text(stmt, 2, cJSON_GetStringValue(pubkey), -1, SQLITE_STATIC);
sqlite3_bind_int64(stmt, 3, (sqlite3_int64)cJSON_GetNumberValue(created_at));
sqlite3_bind_int(stmt, 4, (int)cJSON_GetNumberValue(kind));
sqlite3_bind_text(stmt, 5, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 5, event_type_to_string(type), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
// Execute statement
rc = sqlite3_step(stmt);
@@ -128,56 +185,17 @@ int store_event(cJSON* event) {
if (rc != SQLITE_DONE) {
if (rc == SQLITE_CONSTRAINT) {
log_warning("Event already exists in database");
free(tags_json);
return 0; // Not an error, just duplicate
}
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Failed to insert event: %s", sqlite3_errmsg(g_db));
log_error(error_msg);
free(tags_json);
return -1;
}
// Insert tags if present
if (tags && cJSON_IsArray(tags)) {
const char* event_id = cJSON_GetStringValue(id);
cJSON* tag;
cJSON_ArrayForEach(tag, tags) {
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
cJSON* tag_value = cJSON_GetArrayItem(tag, 1);
if (cJSON_IsString(tag_name) && cJSON_IsString(tag_value)) {
// Collect additional tag parameters if present
char* parameters = NULL;
if (cJSON_GetArraySize(tag) > 2) {
cJSON* params_array = cJSON_CreateArray();
for (int i = 2; i < cJSON_GetArraySize(tag); i++) {
cJSON_AddItemToArray(params_array, cJSON_Duplicate(cJSON_GetArrayItem(tag, i), 1));
}
parameters = cJSON_Print(params_array);
cJSON_Delete(params_array);
}
const char* tag_sql =
"INSERT INTO tag (id, name, value, parameters) VALUES (?, ?, ?, ?)";
sqlite3_stmt* tag_stmt;
rc = sqlite3_prepare_v2(g_db, tag_sql, -1, &tag_stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_text(tag_stmt, 1, event_id, -1, SQLITE_STATIC);
sqlite3_bind_text(tag_stmt, 2, cJSON_GetStringValue(tag_name), -1, SQLITE_STATIC);
sqlite3_bind_text(tag_stmt, 3, cJSON_GetStringValue(tag_value), -1, SQLITE_STATIC);
sqlite3_bind_text(tag_stmt, 4, parameters, -1, SQLITE_TRANSIENT);
sqlite3_step(tag_stmt);
sqlite3_finalize(tag_stmt);
}
if (parameters) free(parameters);
}
}
}
}
free(tags_json);
log_success("Event stored in database");
return 0;
}
@@ -189,7 +207,7 @@ cJSON* retrieve_event(const char* event_id) {
}
const char* sql =
"SELECT id, pubkey, created_at, kind, content, sig FROM event WHERE id = ?";
"SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE id = ?";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
@@ -210,69 +228,210 @@ cJSON* retrieve_event(const char* event_id) {
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
// Add tags array - retrieve from tag table
cJSON* tags_array = cJSON_CreateArray();
const char* tag_sql = "SELECT name, value, parameters FROM tag WHERE id = ?";
sqlite3_stmt* tag_stmt;
if (sqlite3_prepare_v2(g_db, tag_sql, -1, &tag_stmt, NULL) == SQLITE_OK) {
sqlite3_bind_text(tag_stmt, 1, event_id, -1, SQLITE_STATIC);
while (sqlite3_step(tag_stmt) == SQLITE_ROW) {
cJSON* tag = cJSON_CreateArray();
cJSON_AddItemToArray(tag, cJSON_CreateString((char*)sqlite3_column_text(tag_stmt, 0)));
cJSON_AddItemToArray(tag, cJSON_CreateString((char*)sqlite3_column_text(tag_stmt, 1)));
// Add parameters if they exist
const char* parameters = (char*)sqlite3_column_text(tag_stmt, 2);
if (parameters && strlen(parameters) > 0) {
cJSON* params = cJSON_Parse(parameters);
if (params && cJSON_IsArray(params)) {
int param_count = cJSON_GetArraySize(params);
for (int i = 0; i < param_count; i++) {
cJSON* param = cJSON_GetArrayItem(params, i);
cJSON_AddItemToArray(tag, cJSON_Duplicate(param, 1));
}
}
if (params) cJSON_Delete(params);
}
cJSON_AddItemToArray(tags_array, tag);
// Parse tags JSON
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
if (tags_json) {
cJSON* tags = cJSON_Parse(tags_json);
if (tags) {
cJSON_AddItemToObject(event, "tags", tags);
} else {
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
}
sqlite3_finalize(tag_stmt);
} else {
cJSON_AddItemToObject(event, "tags", cJSON_CreateArray());
}
cJSON_AddItemToObject(event, "tags", tags_array);
}
sqlite3_finalize(stmt);
return event;
}
// Handle REQ message (subscription)
int handle_req_message(const char* sub_id, cJSON* filters) {
// Handle REQ message (subscription) - send events matching filters
int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi) {
log_info("Handling REQ message");
// For now, just handle simple event ID requests
if (cJSON_IsArray(filters)) {
cJSON* filter = cJSON_GetArrayItem(filters, 0);
if (filter) {
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
if (ids && cJSON_IsArray(ids)) {
cJSON* event_id = cJSON_GetArrayItem(ids, 0);
if (event_id && cJSON_IsString(event_id)) {
cJSON* event = retrieve_event(cJSON_GetStringValue(event_id));
if (event) {
log_success("Found event for subscription");
cJSON_Delete(event);
return 1; // Found event
}
}
}
}
if (!cJSON_IsArray(filters)) {
log_error("REQ filters is not an array");
return 0;
}
return 0; // No events found
int events_sent = 0;
// Process each filter in the array
for (int i = 0; i < cJSON_GetArraySize(filters); i++) {
cJSON* filter = cJSON_GetArrayItem(filters, i);
if (!filter || !cJSON_IsObject(filter)) {
log_warning("Invalid filter object");
continue;
}
// Build SQL query based on filter
char sql[1024] = "SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1";
char* sql_ptr = sql + strlen(sql);
int remaining = sizeof(sql) - strlen(sql);
// Handle kinds filter
cJSON* kinds = cJSON_GetObjectItem(filter, "kinds");
if (kinds && cJSON_IsArray(kinds)) {
int kind_count = cJSON_GetArraySize(kinds);
if (kind_count > 0) {
snprintf(sql_ptr, remaining, " AND kind IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int k = 0; k < kind_count; k++) {
cJSON* kind = cJSON_GetArrayItem(kinds, k);
if (cJSON_IsNumber(kind)) {
if (k > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "%d", (int)cJSON_GetNumberValue(kind));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
}
// Handle authors filter
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
if (authors && cJSON_IsArray(authors)) {
int author_count = cJSON_GetArraySize(authors);
if (author_count > 0) {
snprintf(sql_ptr, remaining, " AND pubkey IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int a = 0; a < author_count; a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(author));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
}
// Handle since filter
cJSON* since = cJSON_GetObjectItem(filter, "since");
if (since && cJSON_IsNumber(since)) {
snprintf(sql_ptr, remaining, " AND created_at >= %ld", (long)cJSON_GetNumberValue(since));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
// Handle until filter
cJSON* until = cJSON_GetObjectItem(filter, "until");
if (until && cJSON_IsNumber(until)) {
snprintf(sql_ptr, remaining, " AND created_at <= %ld", (long)cJSON_GetNumberValue(until));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
// Add ordering and limit
snprintf(sql_ptr, remaining, " ORDER BY created_at DESC");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Handle limit filter
cJSON* limit = cJSON_GetObjectItem(filter, "limit");
if (limit && cJSON_IsNumber(limit)) {
int limit_val = (int)cJSON_GetNumberValue(limit);
if (limit_val > 0 && limit_val <= 5000) {
snprintf(sql_ptr, remaining, " LIMIT %d", limit_val);
}
} else {
// Default limit to prevent excessive queries
snprintf(sql_ptr, remaining, " LIMIT 500");
}
// Debug: Log the SQL query being executed
char debug_msg[1280];
snprintf(debug_msg, sizeof(debug_msg), "Executing SQL: %s", sql);
log_info(debug_msg);
// Execute query and send events
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc != SQLITE_OK) {
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Failed to prepare subscription query: %s", sqlite3_errmsg(g_db));
log_error(error_msg);
continue;
}
int row_count = 0;
while (sqlite3_step(stmt) == SQLITE_ROW) {
row_count++;
// Build event JSON
cJSON* event = cJSON_CreateObject();
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
cJSON_AddStringToObject(event, "pubkey", (char*)sqlite3_column_text(stmt, 1));
cJSON_AddNumberToObject(event, "created_at", sqlite3_column_int64(stmt, 2));
cJSON_AddNumberToObject(event, "kind", sqlite3_column_int(stmt, 3));
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
// Parse tags JSON
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
cJSON* tags = NULL;
if (tags_json) {
tags = cJSON_Parse(tags_json);
}
if (!tags) {
tags = cJSON_CreateArray();
}
cJSON_AddItemToObject(event, "tags", tags);
// Send EVENT message
cJSON* event_msg = cJSON_CreateArray();
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
cJSON_AddItemToArray(event_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(event_msg, event);
char* msg_str = cJSON_Print(event_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
lws_write(wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
free(buf);
}
free(msg_str);
}
cJSON_Delete(event_msg);
events_sent++;
}
char row_debug[128];
snprintf(row_debug, sizeof(row_debug), "Query returned %d rows", row_count);
log_info(row_debug);
sqlite3_finalize(stmt);
}
char events_debug[128];
snprintf(events_debug, sizeof(events_debug), "Total events sent: %d", events_sent);
log_info(events_debug);
return events_sent;
}
// Handle EVENT message (publish)
@@ -364,13 +523,25 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
} else if (strcmp(msg_type, "REQ") == 0) {
// Handle REQ message
cJSON* sub_id = cJSON_GetArrayItem(json, 1);
cJSON* filters = cJSON_GetArrayItem(json, 2);
if (sub_id && cJSON_IsString(sub_id)) {
const char* subscription_id = cJSON_GetStringValue(sub_id);
strncpy(pss->subscription_id, subscription_id, sizeof(pss->subscription_id) - 1);
handle_req_message(subscription_id, filters);
// Create array of filter objects from position 2 onwards
cJSON* filters = cJSON_CreateArray();
int json_size = cJSON_GetArraySize(json);
for (int i = 2; i < json_size; i++) {
cJSON* filter = cJSON_GetArrayItem(json, i);
if (filter) {
cJSON_AddItemToArray(filters, cJSON_Duplicate(filter, 1));
}
}
handle_req_message(subscription_id, filters, wsi);
// Clean up the filters array we created
cJSON_Delete(filters);
// Send EOSE (End of Stored Events)
cJSON* eose_response = cJSON_CreateArray();
@@ -462,14 +633,7 @@ int start_websocket_relay() {
log_success("WebSocket relay started on ws://127.0.0.1:8888");
// Main event loop with proper signal handling
fd_set rfds;
struct timeval tv;
while (g_server_running) {
FD_ZERO(&rfds);
tv.tv_sec = 1;
tv.tv_usec = 0;
int result = lws_service(ws_context, 1000);
if (result < 0) {

View File

@@ -1,7 +1,7 @@
#!/bin/bash
# Simple C-Relay Test - Create type 1 event and upload to relay
# Uses nak to generate and publish a single event
# Comprehensive C-Relay Test - Test event types and subscriptions
# Uses nak to generate and publish various event types, then tests subscriptions
set -e # Exit on any error
@@ -16,7 +16,6 @@ RESET='\033[0m'
# Test configuration
RELAY_URL="ws://127.0.0.1:8888"
TEST_PRIVATE_KEY="nsec1j4c6269y9w0q2er2xjw8sv2ehyrtfxq3jwgdlxj6qfn8z4gjsq5qfvfk99"
TEST_CONTENT="Hello from C-Relay test!"
# Print functions
print_header() {
@@ -39,74 +38,244 @@ print_info() {
echo -e "${BLUE}[INFO]${RESET} $1"
}
# Main test function
run_test() {
print_header "C-Relay Simple Test"
print_warning() {
echo -e "${YELLOW}[WARNING]${RESET} $1"
}
# Check if nak is available
# Global arrays to store event IDs for subscription tests
declare -a REGULAR_EVENT_IDS=()
declare -a REPLACEABLE_EVENT_IDS=()
declare -a EPHEMERAL_EVENT_IDS=()
declare -a ADDRESSABLE_EVENT_IDS=()
# Helper function to publish event and extract ID
publish_event() {
local event_json="$1"
local event_type="$2"
local description="$3"
# Extract event ID
local event_id=$(echo "$event_json" | jq -r '.id' 2>/dev/null)
if [[ "$event_id" == "null" || -z "$event_id" ]]; then
print_error "Could not extract event ID from $description"
return 1
fi
print_info "Publishing $description..."
# Create EVENT message in Nostr format
local event_message="[\"EVENT\",$event_json]"
# Publish to relay
local response=""
if command -v websocat &> /dev/null; then
response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
else
print_error "websocat not found - required for testing"
return 1
fi
# Check response
if [[ "$response" == *"Connection failed"* ]]; then
print_error "Failed to connect to relay for $description"
return 1
elif [[ "$response" == *"true"* ]]; then
print_success "$description uploaded (ID: ${event_id:0:16}...)"
# Store event ID in appropriate array
case "$event_type" in
"regular") REGULAR_EVENT_IDS+=("$event_id") ;;
"replaceable") REPLACEABLE_EVENT_IDS+=("$event_id") ;;
"ephemeral") EPHEMERAL_EVENT_IDS+=("$event_id") ;;
"addressable") ADDRESSABLE_EVENT_IDS+=("$event_id") ;;
esac
echo # Add blank line for readability
return 0
else
print_warning "$description might have failed: $response"
echo # Add blank line for readability
return 1
fi
}
# Test subscription with filters
test_subscription() {
local sub_id="$1"
local filter="$2"
local description="$3"
local expected_count="$4"
print_step "Testing subscription: $description"
# Create REQ message
local req_message="[\"REQ\",\"$sub_id\",$filter]"
print_info "Testing filter: $filter"
# Send subscription and collect events
local response=""
if command -v websocat &> /dev/null; then
response=$(echo -e "$req_message\n[\"CLOSE\",\"$sub_id\"]" | timeout 3s websocat "$RELAY_URL" 2>/dev/null || echo "")
fi
# Count EVENT responses (lines containing ["EVENT","sub_id",...])
local event_count=0
if [[ -n "$response" ]]; then
event_count=$(echo "$response" | grep -c "\"EVENT\"" 2>/dev/null || echo "0")
fi
if [[ "$expected_count" == "any" ]]; then
if [[ $event_count -gt 0 ]]; then
print_success "$description - Found $event_count events"
else
print_warning "$description - No events found"
fi
elif [[ $event_count -eq $expected_count ]]; then
print_success "$description - Found expected $event_count events"
else
print_warning "$description - Expected $expected_count events, found $event_count"
fi
# Show a few sample events for verification (first 2)
if [[ $event_count -gt 0 && "$description" == "All events" ]]; then
print_info "Sample events (first 2):"
echo "$response" | grep "\"EVENT\"" | head -2 | while IFS= read -r line; do
local event_content=$(echo "$line" | jq -r '.[2].content' 2>/dev/null || echo "N/A")
local event_kind=$(echo "$line" | jq -r '.[2].kind' 2>/dev/null || echo "N/A")
local event_id=$(echo "$line" | jq -r '.[2].id' 2>/dev/null || echo "N/A")
echo " - ID: ${event_id:0:16}... Kind: $event_kind Content: ${event_content:0:30}..."
done
fi
echo # Add blank line for readability
return 0
}
# Main test function
run_comprehensive_test() {
print_header "C-Relay Comprehensive Test"
# Check dependencies
print_step "Checking dependencies..."
if ! command -v nak &> /dev/null; then
print_error "nak command not found"
print_info "Please install nak: go install github.com/fiatjaf/nak@latest"
return 1
fi
print_success "nak found"
# Step 1: Create type 1 event with nak including tags
print_step "Creating type 1 event with nak and tags..."
local event_json
if ! event_json=$(nak event --sec "$TEST_PRIVATE_KEY" -c "$TEST_CONTENT" -k 1 --ts $(date +%s) -e "test_event_id" -p "test_pubkey" -t "subject=Test Event" 2>/dev/null); then
print_error "Failed to generate event with nak"
if ! command -v websocat &> /dev/null; then
print_error "websocat command not found"
print_info "Please install websocat for testing"
return 1
fi
print_success "Event created successfully"
print_header "FULL EVENT JSON"
echo "$event_json" | jq . 2>/dev/null || echo "$event_json"
echo
# Step 2: Upload to C-Relay
print_step "Uploading event to C-Relay at $RELAY_URL..."
# Create EVENT message in Nostr format
local event_message="[\"EVENT\",$event_json]"
# Use websocat or wscat to send to relay if available
local response=""
if command -v websocat &> /dev/null; then
print_info "Using websocat to connect to relay..."
response=$(echo "$event_message" | timeout 5s websocat "$RELAY_URL" 2>&1 || echo "Connection failed")
elif command -v wscat &> /dev/null; then
print_info "Using wscat to connect to relay..."
response=$(echo "$event_message" | timeout 5s wscat -c "$RELAY_URL" 2>&1 || echo "Connection failed")
else
# Fallback: use nak publish
print_info "Using nak to publish event..."
response=$(echo "$event_json" | nak event --relay "$RELAY_URL" 2>&1 || echo "Publish failed")
fi
print_header "FULL RELAY RESPONSE"
echo "$response"
echo
if [[ "$response" == *"Connection failed"* ]] || [[ "$response" == *"Publish failed"* ]]; then
print_error "Failed to connect to relay or publish event"
print_info "Make sure the relay is running: ./make_and_restart_relay.sh"
if ! command -v jq &> /dev/null; then
print_error "jq command not found"
print_info "Please install jq for JSON processing"
return 1
else
print_success "Event uploaded to relay"
return 0
fi
print_success "All dependencies found"
print_header "PHASE 1: Publishing Various Event Types"
# Test 1: Regular Events (kind 1)
print_step "Creating regular events (kind 1)..."
local regular1=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Regular event #1" -k 1 --ts $(($(date +%s) - 100)) -t "type=regular" -t "test=phase1" 2>/dev/null)
local regular2=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Regular event #2 with tags" -k 1 --ts $(($(date +%s) - 90)) -e "previous_event_id" -p "test_pubkey" -t "type=regular" -t "test=phase1" 2>/dev/null)
publish_event "$regular1" "regular" "Regular event #1"
publish_event "$regular2" "regular" "Regular event #2"
# Test 2: Replaceable Events (kind 0 - metadata)
print_step "Creating replaceable events (kind 0)..."
local replaceable1=$(nak event --sec "$TEST_PRIVATE_KEY" -c '{"name":"Test User","about":"Testing C-Relay"}' -k 0 --ts $(($(date +%s) - 80)) -t "type=replaceable" 2>/dev/null)
local replaceable2=$(nak event --sec "$TEST_PRIVATE_KEY" -c '{"name":"Test User Updated","about":"Updated profile"}' -k 0 --ts $(($(date +%s) - 70)) -t "type=replaceable" 2>/dev/null)
publish_event "$replaceable1" "replaceable" "Replaceable event #1 (metadata)"
publish_event "$replaceable2" "replaceable" "Replaceable event #2 (metadata update)"
# Test 3: Ephemeral Events (kind 20000+)
print_step "Creating ephemeral events (kind 20001)..."
local ephemeral1=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Ephemeral event - should not be stored permanently" -k 20001 --ts $(date +%s) -t "type=ephemeral" 2>/dev/null)
publish_event "$ephemeral1" "ephemeral" "Ephemeral event"
# Test 4: Addressable Events (kind 30000+)
print_step "Creating addressable events (kind 30001)..."
local addressable1=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Addressable event with d-tag" -k 30001 --ts $(($(date +%s) - 50)) -t "d=test-article" -t "type=addressable" 2>/dev/null)
local addressable2=$(nak event --sec "$TEST_PRIVATE_KEY" -c "Updated addressable event" -k 30001 --ts $(($(date +%s) - 40)) -t "d=test-article" -t "type=addressable" -t "updated=true" 2>/dev/null)
publish_event "$addressable1" "addressable" "Addressable event #1"
publish_event "$addressable2" "addressable" "Addressable event #2 (update)"
# Brief pause to let events settle
sleep 2
print_header "PHASE 2: Testing Subscriptions and Filters"
# Test subscription filters
print_step "Testing various subscription filters..."
# Test 1: Get all events
test_subscription "test_all" '{}' "All events" "any"
# Test 2: Get events by kind
test_subscription "test_kind1" '{"kinds":[1]}' "Kind 1 events only" "2"
test_subscription "test_kind0" '{"kinds":[0]}' "Kind 0 events only" "any"
# Test 3: Get events by author (pubkey)
local test_pubkey=$(echo "$regular1" | jq -r '.pubkey' 2>/dev/null)
test_subscription "test_author" "{\"authors\":[\"$test_pubkey\"]}" "Events by specific author" "any"
# Test 4: Get recent events (time-based)
local recent_timestamp=$(($(date +%s) - 200))
test_subscription "test_recent" "{\"since\":$recent_timestamp}" "Recent events" "any"
# Test 5: Get events with specific tags
test_subscription "test_tag_type" '{"#type":["regular"]}' "Events with type=regular tag" "any"
# Test 6: Multiple kinds
test_subscription "test_multi_kinds" '{"kinds":[0,1]}' "Multiple kinds (0,1)" "any"
# Test 7: Limit results
test_subscription "test_limit" '{"kinds":[1],"limit":1}' "Limited to 1 event" "1"
print_header "PHASE 3: Database Verification"
# Check what's actually stored in the database
print_step "Verifying database contents..."
if command -v sqlite3 &> /dev/null; then
print_info "Events by type in database:"
sqlite3 db/c_nostr_relay.db "SELECT event_type, COUNT(*) as count FROM events GROUP BY event_type;" | while read line; do
echo " $line"
done
print_info "Recent events in database:"
sqlite3 db/c_nostr_relay.db "SELECT substr(id, 1, 16) || '...' as short_id, event_type, kind, substr(content, 1, 30) || '...' as short_content FROM events ORDER BY created_at DESC LIMIT 5;" | while read line; do
echo " $line"
done
print_success "Database verification complete"
else
print_warning "sqlite3 not available for database verification"
fi
return 0
}
# Run the test
if run_test; then
# Run the comprehensive test
print_header "Starting C-Relay Comprehensive Test Suite"
echo
if run_comprehensive_test; then
echo
print_success "All tests completed successfully!"
print_info "The C-Relay hybrid schema implementation is working correctly"
echo
print_success "Test completed successfully"
exit 0
else
echo
print_error "Test failed"
print_error "Some tests failed"
exit 1
fi