Compare commits

..

13 Commits

Author SHA1 Message Date
Your Name
d449513861 Add MUSL static binary build system using Alpine Docker
- Create Dockerfile.alpine-musl for truly portable static binaries
- Update build_static.sh to use Docker with sudo fallback
- Fix source code portability issues for MUSL:
  * Add missing headers in config.c, dm_admin.c
  * Remove glibc-specific headers in nip009.c, subscriptions.c
- Update nostr_core_lib submodule with fortification fix
- Add comprehensive documentation in docs/musl_static_build.md

Binary characteristics:
- Size: 7.6MB (vs 12MB+ for glibc static)
- Dependencies: Zero (truly portable)
- Compatibility: Any Linux distribution
- Build time: ~2 minutes with Docker caching

Resolves fortification symbol issues (__snprintf_chk, __fprintf_chk)
that prevented MUSL static linking.
2025-10-11 10:17:20 -04:00
Your Name
6709e229b3 v0.7.7 - Prevent sql attacks and rate limiting on subscriptions 2025-10-10 15:44:10 -04:00
Your Name
00a8f16262 v0.7.6 - Delete more old debugging prints 2025-10-10 13:38:18 -04:00
Your Name
00d16f8615 v0.7.5 - Complete debug logging cleanup - remove all remaining DEBUG messages from websockets.c, config.c, and dm_admin.c 2025-10-10 10:52:14 -04:00
Your Name
c90676d2b2 v0.7.4 - Remove excessive debug logging from entire codebase - preserve user-facing error logging 2025-10-10 10:21:30 -04:00
Your Name
b89c011ad5 v0.7.2 - -m 2025-10-10 06:53:30 -04:00
Your Name
c3de31aa88 v0.7.1 - Implemented static binary build system for cross-distribution compatibility 2025-10-09 14:36:32 -04:00
Your Name
b6df0be865 v0.6.0 - Fixed binary upload in release script - now shows upload errors and handles failures properly 2025-10-09 12:59:23 -04:00
Your Name
a89f84f76e v0.5.0 - New release 2025-10-09 12:51:53 -04:00
Your Name
5a916cc221 Reupload 2025-10-09 10:43:42 -04:00
Your Name
dcf421ff93 v0.4.13 - DM system appears fully functional 2025-10-08 07:11:22 -04:00
Your Name
d655258311 v0.4.12 - Refactor NIP-17 admin commands: eliminate ~400 lines of duplicated code with unified helper functions, fix SQL query bugs, and remove unused parameters 2025-10-06 18:49:25 -04:00
Your Name
f6d13d4318 v0.4.11 - Fixed nasty DM bug 2025-10-06 10:06:24 -04:00
31 changed files with 4551 additions and 1967 deletions

109
Dockerfile.alpine-musl Normal file
View File

@@ -0,0 +1,109 @@
# Alpine-based MUSL static binary builder for C-Relay
# Produces truly portable binaries with zero runtime dependencies
FROM alpine:3.19 AS builder
# Install build dependencies
RUN apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers \
wget \
bash
# Set working directory
WORKDIR /build
# Build libsecp256k1 static
RUN cd /tmp && \
git clone https://github.com/bitcoin-core/secp256k1.git && \
cd secp256k1 && \
./autogen.sh && \
./configure --enable-static --disable-shared --prefix=/usr \
CFLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/secp256k1
# Build libwebsockets static with minimal features
RUN cd /tmp && \
git clone --depth 1 --branch v4.3.3 https://github.com/warmcat/libwebsockets.git && \
cd libwebsockets && \
mkdir build && cd build && \
cmake .. \
-DLWS_WITH_STATIC=ON \
-DLWS_WITH_SHARED=OFF \
-DLWS_WITH_SSL=ON \
-DLWS_WITHOUT_TESTAPPS=ON \
-DLWS_WITHOUT_TEST_SERVER=ON \
-DLWS_WITHOUT_TEST_CLIENT=ON \
-DLWS_WITHOUT_TEST_PING=ON \
-DLWS_WITH_HTTP2=OFF \
-DLWS_WITH_LIBUV=OFF \
-DLWS_WITH_LIBEVENT=OFF \
-DLWS_IPV6=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_C_FLAGS="-fPIC" && \
make -j$(nproc) && \
make install && \
rm -rf /tmp/libwebsockets
# Copy c-relay source
COPY . /build/
# Clean up any stale submodule references (nips directory is not a submodule)
RUN git rm --cached nips 2>/dev/null || true
# Initialize submodules and build nostr_core_lib with required NIPs
# Disable fortification in build.sh to prevent __*_chk symbol issues
# NIPs: 001(Basic), 006(Keys), 013(PoW), 017(DMs), 019(Bech32), 044(Encryption), 059(Gift Wrap - required by NIP-17)
RUN git submodule update --init --recursive && \
cd nostr_core_lib && \
chmod +x build.sh && \
sed -i 's/CFLAGS="-Wall -Wextra -std=c99 -fPIC -O2"/CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"/' build.sh && \
rm -f *.o *.a 2>/dev/null || true && \
./build.sh --nips=1,6,13,17,19,44,59
# Build c-relay with full static linking
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
RUN gcc -static -O2 -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
-I. -Inostr_core_lib -Inostr_core_lib/nostr_core \
-Inostr_core_lib/cjson -Inostr_core_lib/nostr_websocket \
src/main.c src/config.c src/dm_admin.c src/request_validator.c \
src/nip009.c src/nip011.c src/nip013.c src/nip040.c src/nip042.c \
src/websockets.c src/subscriptions.c src/api.c src/embedded_web_content.c \
-o /build/c_relay_static_musl \
nostr_core_lib/libnostr_core_x64.a \
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
-lcurl -lz -lpthread -lm -ldl
# Strip binary to reduce size
RUN strip /build/c_relay_static_musl
# Verify it's truly static
RUN echo "=== Binary Information ===" && \
file /build/c_relay_static_musl && \
ls -lh /build/c_relay_static_musl && \
echo "=== Checking for dynamic dependencies ===" && \
(ldd /build/c_relay_static_musl 2>&1 || echo "Binary is static") && \
echo "=== Build complete ==="
# Output stage - just the binary
FROM scratch AS output
COPY --from=builder /build/c_relay_static_musl /c_relay_static_musl

View File

@@ -197,4 +197,21 @@ help:
@echo " make init-db # Set up database"
@echo " make force-version # Force regenerate main.h from git"
# Build fully static MUSL binaries using Docker
static-musl-x86_64:
@echo "Building fully static MUSL binary for x86_64..."
docker buildx build --platform linux/amd64 -f examples/deployment/static-builder.Dockerfile -t c-relay-static-builder-x86_64 --load .
docker run --rm -v $(PWD)/build:/output c-relay-static-builder-x86_64 sh -c "cp /c_relay_static_musl_x86_64 /output/"
@echo "Static binary created: build/c_relay_static_musl_x86_64"
static-musl-arm64:
@echo "Building fully static MUSL binary for ARM64..."
docker buildx build --platform linux/arm64 -f examples/deployment/static-builder.Dockerfile -t c-relay-static-builder-arm64 --load .
docker run --rm -v $(PWD)/build:/output c-relay-static-builder-arm64 sh -c "cp /c_relay_static_musl_x86_64 /output/c_relay_static_musl_arm64"
@echo "Static binary created: build/c_relay_static_musl_arm64"
static-musl: static-musl-x86_64 static-musl-arm64
@echo "Built static MUSL binaries for both architectures"
.PHONY: static-musl-x86_64 static-musl-arm64 static-musl
.PHONY: all x86 arm64 test init-db clean clean-all install-deps install-cross-tools install-arm64-deps check-toolchain help force-version

View File

@@ -1,6 +1,6 @@
# C-Nostr Relay
A high-performance Nostr relay implemented in C with SQLite backend, featuring a revolutionary **zero-configuration** approach using event-based configuration management.
A high-performance Nostr relay implemented in C with SQLite backend, featuring nostr event-based management.
## Supported NIPs
@@ -22,6 +22,69 @@ Do NOT modify the formatting, add emojis, or change the text. Keep the simple fo
- [x] NIP-50: Keywords filter
- [x] NIP-70: Protected Events
## Quick Start
Get your C-Relay up and running in minutes with a static binary (no dependencies required):
### 1. Download Static Binary
Download the latest static release from the [releases page](https://git.laantungir.net/laantungir/c-relay/releases):
```bash
# Static binary - works on all Linux distributions (no dependencies)
wget https://git.laantungir.net/laantungir/c-relay/releases/download/v0.6.0/c-relay-v0.6.0-linux-x86_64-static
chmod +x c-relay-v0.6.0-linux-x86_64-static
mv c-relay-v0.6.0-linux-x86_64-static c-relay
```
### 2. Start the Relay
Simply run the binary - no configuration files needed:
```bash
./c-relay
```
On first startup, you'll see:
- **Admin Private Key**: Save this securely! You'll need it for administration
- **Relay Public Key**: Your relay's identity on the Nostr network
- **Port Information**: Default is 8888, or the next available port
### 3. Access the Web Interface
Open your browser and navigate to:
```
http://localhost:8888/api/
```
The web interface provides:
- Real-time configuration management
- Database statistics dashboard
- Auth rules management
- Secure admin authentication with your Nostr identity
### 4. Test Your Relay
Test basic connectivity:
```bash
# Test WebSocket connection
curl -H "Accept: application/nostr+json" http://localhost:8888
# Test with a Nostr client
# Add ws://localhost:8888 to your client's relay list
```
### 5. Configure Your Relay (Optional)
Use the web interface or send admin commands to customize:
- Relay name and description
- Authentication rules (whitelist/blacklist)
- Connection limits
- Proof-of-work requirements
**That's it!** Your relay is now running with zero configuration required. The event-based configuration system means you can adjust all settings through the web interface or admin API without editing config files.
## Web Admin Interface
C-Relay includes a **built-in web-based administration interface** accessible at `http://localhost:8888/api/`. The interface provides:
@@ -34,6 +97,7 @@ C-Relay includes a **built-in web-based administration interface** accessible at
The web interface serves embedded static files with no external dependencies and includes proper CORS headers for browser compatibility.
## Administrator API
C-Relay uses an innovative **event-based administration system** where all configuration and management commands are sent as signed Nostr events using the admin private key generated during first startup. All admin commands use **NIP-44 encrypted command arrays** for security and compatibility.
@@ -269,7 +333,7 @@ All admin commands return **signed EVENT responses** via WebSocket following sta
In addition to the above admin API, c-relay allows the administrator to direct message the relay to get information or control some settings. As long as the administrator is signed in with any nostr client that allows sending nip-17 direct messages (DMs), they can control the relay.
The is possible because the relay is a full nostr citizen with it's own private and public key.
The is possible because the relay is a full nostr citizen with it's own private and public key, and it knows the administrator's public key.

View File

@@ -187,12 +187,47 @@ button:disabled {
.config-table th {
font-weight: bold;
height: 40px; /* Double the default height */
line-height: 40px; /* Center text vertically */
}
.config-table tr:hover {
background-color: var(--muted-color);
}
/* Inline config value inputs - remove borders and padding to fit seamlessly in table cells */
.config-value-input {
border: none;
padding: 2px 4px;
background: transparent;
width: 100%;
min-height: auto;
font-family: inherit;
font-size: inherit;
color: inherit;
border-radius: 0;
}
.config-value-input:focus {
border: 1px solid var(--accent-color);
background: var(--secondary-color);
outline: none;
}
/* Config actions cell - clickable for saving */
.config-actions-cell {
cursor: pointer;
transition: all 0.2s ease;
text-align: center;
font-weight: bold;
vertical-align: middle;
}
.config-actions-cell:hover {
border: 1px solid var(--accent-color);
background-color: var(--muted-color);
}
.json-display {
background-color: var(--secondary-color);
border: var(--border-width) solid var(--border-color);

View File

@@ -58,7 +58,7 @@
<div class="inline-buttons">
<button type="button" id="connect-relay-btn">CONNECT TO RELAY</button>
<button type="button" id="disconnect-relay-btn" disabled>DISCONNECT</button>
<button type="button" id="test-websocket-btn" disabled>TEST WEBSOCKET</button>
<button type="button" id="restart-relay-btn" disabled>RESTART RELAY</button>
</div>
<div class="status disconnected" id="relay-connection-status">NOT CONNECTED</div>
@@ -93,41 +93,25 @@
<div id="div_config" class="section flex-section" style="display: none;">
<h2>RELAY CONFIGURATION</h2>
<div id="config-display" class="hidden">
<div id="config-view-mode">
<div class="config-table-container">
<table class="config-table" id="config-table">
<thead>
<tr>
<th>Parameter</th>
<th>Value</th>
<th>Actions</th>
</tr>
</thead>
<tbody id="config-table-body">
</tbody>
</table>
</div>
<div class="inline-buttons">
<button type="button" id="edit-config-btn">EDIT CONFIGURATION</button>
<button type="button" id="copy-config-btn">COPY CONFIGURATION</button>
<button type="button" id="fetch-config-btn">REFRESH</button>
</div>
<div class="config-table-container">
<table class="config-table" id="config-table">
<thead>
<tr>
<th>Parameter</th>
<th>Value</th>
<th>Actions</th>
</tr>
</thead>
<tbody id="config-table-body">
</tbody>
</table>
</div>
<div id="config-edit-mode" class="hidden">
<h3>Edit Configuration</h3>
<div id="config-form" class="section">
<!-- Dynamic form will be generated here -->
</div>
<div class="inline-buttons">
<button type="button" id="save-config-btn">SAVE & PUBLISH</button>
<button type="button" id="cancel-edit-btn">CANCEL</button>
</div>
<div class="inline-buttons">
<button type="button" id="fetch-config-btn">REFRESH</button>
</div>
</div>
</div>
@@ -316,8 +300,31 @@
</div>
</div>
<!-- NIP-17 DIRECT MESSAGES Section -->
<div class="section" id="nip17DMSection" style="display: none;">
<div class="section-header">
<h2>NIP-17 DIRECT MESSAGES</h2>
</div>
<!-- Outbox -->
<div class="input-group">
<label for="dm-outbox">Send Message to Relay:</label>
<textarea id="dm-outbox" rows="4" placeholder="Enter your message to send to the relay..."></textarea>
</div>
<!-- Send Button -->
<div class="input-group">
<button type="button" id="send-dm-btn">SEND MESSAGE</button>
</div>
<!-- Inbox -->
<div class="input-group">
<label>Received Messages from Relay:</label>
<div id="dm-inbox" class="log-panel" style="height: 200px;">
<div class="log-entry">No messages received yet.</div>
</div>
</div>
</div>
<!-- Load the official nostr-tools bundle first -->
<!-- <script src="https://laantungir.net/nostr-login-lite/nostr.bundle.js"></script> -->

View File

@@ -41,16 +41,14 @@
const relayConnectionStatus = document.getElementById('relay-connection-status');
const connectRelayBtn = document.getElementById('connect-relay-btn');
const disconnectRelayBtn = document.getElementById('disconnect-relay-btn');
const testWebSocketBtn = document.getElementById('test-websocket-btn');
const restartRelayBtn = document.getElementById('restart-relay-btn');
const configDisplay = document.getElementById('config-display');
const configViewMode = document.getElementById('config-view-mode');
const configEditMode = document.getElementById('config-edit-mode');
const configTableBody = document.getElementById('config-table-body');
const configForm = document.getElementById('config-form');
const copyConfigBtn = document.getElementById('copy-config-btn');
const editConfigBtn = document.getElementById('edit-config-btn');
const saveConfigBtn = document.getElementById('save-config-btn');
const cancelEditBtn = document.getElementById('cancel-edit-btn');
// NIP-17 DM elements
const dmOutbox = document.getElementById('dm-outbox');
const dmInbox = document.getElementById('dm-inbox');
const sendDmBtn = document.getElementById('send-dm-btn');
// Utility functions
function log(message, type = 'INFO') {
@@ -63,6 +61,33 @@
// UI logging removed - using console only
}
// Utility functions
function log(message, type = 'INFO') {
const timestamp = new Date().toISOString().split('T')[1].split('.')[0];
const logMessage = `${timestamp} [${type}]: ${message}`;
// Always log to browser console so we don't lose logs on refresh
console.log(logMessage);
// UI logging removed - using console only
}
// NIP-59 helper: randomize created_at to thwart time-analysis (past 2 days)
function randomNow() {
const TWO_DAYS = 2 * 24 * 60 * 60; // 172800 seconds
const now = Math.round(Date.now() / 1000);
return Math.round(now - Math.random() * TWO_DAYS);
}
// Safe JSON parse with error handling
function safeJsonParse(jsonString) {
try {
return JSON.parse(jsonString);
} catch (error) {
console.error('JSON parse error:', error);
return null;
}
}
// ================================
// NIP-11 RELAY CONNECTION FUNCTIONS
// ================================
@@ -344,28 +369,28 @@
relayConnectionStatus.className = 'status connected';
connectRelayBtn.disabled = true;
disconnectRelayBtn.disabled = true;
testWebSocketBtn.disabled = true;
restartRelayBtn.disabled = true;
break;
case 'connected':
relayConnectionStatus.textContent = 'CONNECTED';
relayConnectionStatus.className = 'status connected';
connectRelayBtn.disabled = true;
disconnectRelayBtn.disabled = false;
testWebSocketBtn.disabled = false;
restartRelayBtn.disabled = false;
break;
case 'disconnected':
relayConnectionStatus.textContent = 'NOT CONNECTED';
relayConnectionStatus.className = 'status disconnected';
connectRelayBtn.disabled = false;
disconnectRelayBtn.disabled = true;
testWebSocketBtn.disabled = true;
restartRelayBtn.disabled = true;
break;
case 'error':
relayConnectionStatus.textContent = 'CONNECTION FAILED';
relayConnectionStatus.className = 'status error';
connectRelayBtn.disabled = false;
disconnectRelayBtn.disabled = true;
testWebSocketBtn.disabled = true;
restartRelayBtn.disabled = true;
break;
}
}
@@ -582,10 +607,12 @@
function updateAdminSectionsVisibility() {
const divConfig = document.getElementById('div_config');
const authRulesSection = document.getElementById('authRulesSection');
const nip17DMSection = document.getElementById('nip17DMSection');
const shouldShow = isLoggedIn && isRelayConnected;
if (divConfig) divConfig.style.display = shouldShow ? 'block' : 'none';
if (authRulesSection) authRulesSection.style.display = shouldShow ? 'block' : 'none';
if (nip17DMSection) nip17DMSection.style.display = shouldShow ? 'block' : 'none';
}
// Show main interface after login
@@ -736,30 +763,105 @@
subscriptionId = generateSubId();
console.log(`Generated subscription ID: ${subscriptionId}`);
// Subscribe to kind 23457 events (admin response events)
console.log(`User pubkey ${userPubkey}`)
// Subscribe to kind 23457 events (admin response events), kind 4 (NIP-04 DMs), and kind 1059 (NIP-17 GiftWrap)
const subscription = relayPool.subscribeMany([url], [{
since: Math.floor(Date.now() / 1000),
kinds: [23457],
authors: [getRelayPubkey()], // Only listen to responses from the relay
"#p": [userPubkey], // Only responses directed to this user
limit: 50
}, {
since: Math.floor(Date.now() / 1000),
kinds: [4], // NIP-04 Direct Messages
authors: [getRelayPubkey()], // Only listen to DMs from the relay
"#p": [userPubkey], // Only DMs directed to this user
limit: 50
}, {
kinds: [1059], // NIP-17 GiftWrap events
"#p": [userPubkey], // Only GiftWrap events addressed to this user
limit: 50
}], {
onevent(event) {
console.log('=== ADMIN RESPONSE EVENT RECEIVED VIA SIMPLEPOOL ===');
async onevent(event) {
console.log('=== EVENT RECEIVED VIA SIMPLEPOOL ===');
console.log('Event data:', event);
console.log('Event kind:', event.kind);
console.log('Event tags:', event.tags);
console.log('Event pubkey:', event.pubkey);
console.log('=== END ADMIN RESPONSE ===');
console.log('=== END EVENT ===');
// Log all received messages for testing
if (typeof logTestEvent === 'function') {
logTestEvent('RECV', `Admin response event: ${JSON.stringify(event)}`, 'EVENT');
// Handle NIP-04 DMs
if (event.kind === 4) {
console.log('=== NIP-04 DM RECEIVED ===');
try {
// Decrypt the DM content
const decryptedContent = await window.nostr.nip04.decrypt(event.pubkey, event.content);
log(`Received NIP-04 DM from relay: ${decryptedContent.substring(0, 50)}...`, 'INFO');
// Add to inbox
const timestamp = new Date(event.created_at * 1000).toLocaleString();
addMessageToInbox('received', decryptedContent, timestamp);
// Log for testing
if (typeof logTestEvent === 'function') {
logTestEvent('RECV', `NIP-04 DM: ${decryptedContent}`, 'DM');
}
} catch (decryptError) {
log(`Failed to decrypt NIP-04 DM: ${decryptError.message}`, 'ERROR');
if (typeof logTestEvent === 'function') {
logTestEvent('ERROR', `Failed to decrypt DM: ${decryptError.message}`, 'DM');
}
}
return;
}
// Process admin response event
processAdminResponse(event);
// Handle NIP-17 GiftWrap DMs
if (event.kind === 1059) {
console.log('=== NIP-17 GIFTWRAP RECEIVED ===');
try {
// Step 1: Unwrap gift wrap to get seal
const sealJson = await window.nostr.nip44.decrypt(event.pubkey, event.content);
const seal = safeJsonParse(sealJson);
if (!seal || seal.kind !== 13) {
throw new Error('Unwrapped content is not a valid seal (kind 13)');
}
// Step 2: Unseal to get rumor
const rumorJson = await window.nostr.nip44.decrypt(seal.pubkey, seal.content);
const rumor = safeJsonParse(rumorJson);
if (!rumor || rumor.kind !== 14) {
throw new Error('Unsealed content is not a valid rumor (kind 14)');
}
log(`Received NIP-17 DM from relay: ${rumor.content.substring(0, 50)}...`, 'INFO');
// Add to inbox
const timestamp = new Date(event.created_at * 1000).toLocaleString();
addMessageToInbox('received', rumor.content, timestamp);
// Log for testing
if (typeof logTestEvent === 'function') {
logTestEvent('RECV', `NIP-17 DM: ${rumor.content}`, 'DM');
}
} catch (unwrapError) {
log(`Failed to unwrap NIP-17 DM: ${unwrapError.message}`, 'ERROR');
if (typeof logTestEvent === 'function') {
logTestEvent('ERROR', `Failed to unwrap DM: ${unwrapError.message}`, 'DM');
}
}
return;
}
// Handle admin response events (kind 23457)
if (event.kind === 23457) {
// Log all received messages for testing
if (typeof logTestEvent === 'function') {
logTestEvent('RECV', `Admin response event: ${JSON.stringify(event)}`, 'EVENT');
}
// Process admin response event
processAdminResponse(event);
}
},
oneose() {
console.log('EOSE received - End of stored events');
@@ -1270,32 +1372,61 @@
// Clear existing table
configTableBody.innerHTML = '';
// Display basic event info
const basicInfo = [
['Event ID', event.id],
['Public Key', event.pubkey],
['Created At', new Date(event.created_at * 1000).toISOString()],
['Kind', event.kind],
['Content', event.content]
];
console.log(`Adding ${basicInfo.length} basic info rows`);
basicInfo.forEach(([key, value]) => {
const row = document.createElement('tr');
row.innerHTML = `<td>${key}</td><td>${value}</td><td>-</td>`;
configTableBody.appendChild(row);
});
// Display tags
console.log(`Processing ${event.tags.length} tags`);
event.tags.forEach(tag => {
// Display tags (editable configuration parameters only)
console.log(`Processing ${event.tags.length} configuration parameters`);
event.tags.forEach((tag, index) => {
if (tag.length >= 2) {
const row = document.createElement('tr');
row.innerHTML = `<td>${tag[0]}</td><td>${tag[1]}</td><td>-</td>`;
const key = tag[0];
const value = tag[1];
// Create editable input for value
const valueInput = document.createElement('input');
valueInput.type = 'text';
valueInput.value = value;
valueInput.className = 'config-value-input';
valueInput.dataset.key = key;
valueInput.dataset.originalValue = value;
valueInput.dataset.rowIndex = index;
// Create clickable Actions cell
const actionsCell = document.createElement('td');
actionsCell.className = 'config-actions-cell';
actionsCell.textContent = 'SAVE';
actionsCell.dataset.key = key;
actionsCell.dataset.originalValue = value;
actionsCell.dataset.rowIndex = index;
// Initially hide the SAVE text
actionsCell.style.color = 'transparent';
// Show SAVE text and make clickable when value changes
valueInput.addEventListener('input', function() {
if (this.value !== this.dataset.originalValue) {
actionsCell.style.color = 'var(--primary-color)';
actionsCell.style.cursor = 'pointer';
actionsCell.onclick = () => saveIndividualConfig(key, valueInput.value, valueInput.dataset.originalValue, actionsCell);
} else {
actionsCell.style.color = 'transparent';
actionsCell.style.cursor = 'default';
actionsCell.onclick = null;
}
});
row.innerHTML = `<td>${key}</td><td></td>`;
row.cells[1].appendChild(valueInput);
row.appendChild(actionsCell);
configTableBody.appendChild(row);
}
});
// Show message if no configuration parameters found
if (event.tags.length === 0) {
const row = document.createElement('tr');
row.innerHTML = `<td colspan="3" style="text-align: center; font-style: italic;">No configuration parameters found</td>`;
configTableBody.appendChild(row);
}
console.log('Configuration display completed successfully');
updateConfigStatus(true);
@@ -1305,195 +1436,98 @@
}
}
// Configuration editing functions
function generateConfigForm(event) {
if (!event || !event.tags) {
console.log('No configuration event to edit');
return;
}
configForm.innerHTML = '';
// Define field types and validation for different config parameters (aligned with README.md)
const fieldTypes = {
'auth_enabled': 'boolean',
'nip42_auth_required': 'boolean',
'nip40_expiration_enabled': 'boolean',
'max_connections': 'number',
'pow_min_difficulty': 'number',
'nip42_challenge_timeout': 'number',
'max_subscriptions_per_client': 'number',
'max_event_tags': 'number',
'max_content_length': 'number'
};
const descriptions = {
'relay_pubkey': 'Relay Public Key (Read-only)',
'auth_enabled': 'Enable Authentication',
'nip42_auth_required': 'Enable NIP-42 Cryptographic Authentication',
'nip42_auth_required_kinds': 'Event Kinds Requiring NIP-42 Auth',
'nip42_challenge_timeout': 'NIP-42 Challenge Expiration Seconds',
'max_connections': 'Maximum Connections',
'relay_description': 'Relay Description',
'relay_contact': 'Relay Contact',
'pow_min_difficulty': 'Minimum Proof-of-Work Difficulty',
'nip40_expiration_enabled': 'Enable Event Expiration',
'max_subscriptions_per_client': 'Max Subscriptions per Client',
'max_event_tags': 'Maximum Tags per Event',
'max_content_length': 'Maximum Event Content Length'
};
// Process configuration tags (no d tag filtering for ephemeral events)
const configData = {};
event.tags.forEach(tag => {
if (tag.length >= 2) {
configData[tag[0]] = tag[1];
}
});
// Create form fields for each configuration parameter
Object.entries(configData).forEach(([key, value]) => {
const fieldType = fieldTypes[key] || 'text';
const description = descriptions[key] || key.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase());
const fieldGroup = document.createElement('div');
fieldGroup.className = 'input-group';
const label = document.createElement('label');
label.textContent = description;
label.setAttribute('for', `config-${key}`);
let input;
if (fieldType === 'boolean') {
input = document.createElement('select');
input.innerHTML = `
<option value="true" ${value === 'true' ? 'selected' : ''}>true</option>
<option value="false" ${value === 'false' ? 'selected' : ''}>false</option>
`;
} else if (fieldType === 'number') {
input = document.createElement('input');
input.type = 'number';
input.value = value;
input.min = '0';
} else {
input = document.createElement('input');
input.type = 'text';
input.value = value;
}
input.id = `config-${key}`;
input.name = key;
// Make relay_pubkey read-only
if (key === 'relay_pubkey' || key === 'd') {
input.disabled = true;
}
fieldGroup.appendChild(label);
fieldGroup.appendChild(input);
configForm.appendChild(fieldGroup);
});
console.log('Configuration form generated');
}
function enterEditMode() {
if (!currentConfig) {
console.log('No configuration loaded to edit');
return;
}
generateConfigForm(currentConfig);
configViewMode.classList.add('hidden');
configEditMode.classList.remove('hidden');
console.log('Entered edit mode');
}
function exitEditMode() {
configViewMode.classList.remove('hidden');
configEditMode.classList.add('hidden');
configForm.innerHTML = '';
console.log('Exited edit mode');
}
async function saveConfiguration() {
// Save individual configuration parameter
async function saveIndividualConfig(key, newValue, originalValue, actionsCell) {
if (!isLoggedIn || !userPubkey) {
console.log('Must be logged in to save configuration');
log('Must be logged in to save configuration', 'ERROR');
return;
}
if (!currentConfig) {
console.log('No current configuration to update');
log('No current configuration to update', 'ERROR');
return;
}
// Don't save if value hasn't changed
if (newValue === originalValue) {
return;
}
try {
console.log('Building configuration update command...');
log(`Saving individual config: ${key} = ${newValue}`, 'INFO');
// Collect form data
const formInputs = configForm.querySelectorAll('input, select');
const configObjects = [];
// Determine data type based on key name
let dataType = 'string';
if (['max_connections', 'pow_min_difficulty', 'nip42_challenge_timeout', 'max_subscriptions_per_client', 'max_event_tags', 'max_content_length'].includes(key)) {
dataType = 'integer';
} else if (['auth_enabled', 'nip42_auth_required', 'nip40_expiration_enabled'].includes(key)) {
dataType = 'boolean';
}
// Process each form input as a config object
formInputs.forEach(input => {
if (!input.disabled && input.name && input.name !== 'd' && input.name !== 'relay_pubkey') {
// Determine data type based on input type
let dataType = 'string';
if (input.type === 'number') {
dataType = 'integer';
} else if (input.tagName === 'SELECT' && (input.value === 'true' || input.value === 'false')) {
dataType = 'boolean';
}
// Determine category based on key name
let category = 'general';
if (key.startsWith('relay_')) {
category = 'relay';
} else if (key.startsWith('nip40_')) {
category = 'expiration';
} else if (key.startsWith('nip42_') || key.startsWith('auth_')) {
category = 'authentication';
} else if (key.startsWith('pow_')) {
category = 'proof_of_work';
} else if (key.startsWith('max_')) {
category = 'limits';
}
// Determine category based on key name
let category = 'general';
const key = input.name;
if (key.startsWith('relay_')) {
category = 'relay';
} else if (key.startsWith('nip40_')) {
category = 'expiration';
} else if (key.startsWith('nip42_') || key.startsWith('auth_')) {
category = 'authentication';
} else if (key.startsWith('pow_')) {
category = 'proof_of_work';
} else if (key.startsWith('max_')) {
category = 'limits';
}
const configObj = {
key: key,
value: newValue,
data_type: dataType,
category: category
};
configObjects.push({
key: key,
value: input.value,
data_type: dataType,
category: category
});
// Update cell during save
actionsCell.textContent = 'SAVING...';
actionsCell.style.color = 'var(--accent-color)';
actionsCell.style.cursor = 'not-allowed';
actionsCell.onclick = null;
// Send single config update
await sendConfigUpdateCommand([configObj]);
// Update the original value on success
const input = actionsCell.parentElement.cells[1].querySelector('input');
if (input) {
input.dataset.originalValue = newValue;
// Hide SAVE text since value now matches original
actionsCell.style.color = 'transparent';
actionsCell.style.cursor = 'default';
actionsCell.onclick = null;
}
actionsCell.textContent = 'SAVED';
actionsCell.style.color = 'var(--accent-color)';
setTimeout(() => {
actionsCell.textContent = 'SAVE';
// Keep transparent if value matches original
if (input && input.value === input.dataset.originalValue) {
actionsCell.style.color = 'transparent';
}
});
}, 2000);
if (configObjects.length === 0) {
console.log('No configuration changes to save');
return;
}
console.log(`Sending config_update commands for ${configObjects.length} configuration objects...`);
// Send config_update commands one at a time to avoid large event size
for (const configObj of configObjects) {
await sendConfigUpdateCommand([configObj]);
}
console.log('Configuration update command sent successfully');
// Exit edit mode
exitEditMode();
log(`Successfully saved config: ${key} = ${newValue}`, 'INFO');
} catch (error) {
console.log('Configuration save failed: ' + error.message);
console.error('Save configuration error:', error);
log(`Failed to save individual config ${key}: ${error.message}`, 'ERROR');
actionsCell.textContent = 'SAVE';
actionsCell.style.color = 'var(--primary-color)';
actionsCell.style.cursor = 'pointer';
actionsCell.onclick = () => saveIndividualConfig(key, actionsCell.parentElement.cells[1].querySelector('input').value, originalValue, actionsCell);
}
}
// Send config update command using kind 23456 with Administrator API (inner events)
async function sendConfigUpdateCommand(configObjects) {
try {
@@ -1619,35 +1653,7 @@
});
copyConfigBtn.addEventListener('click', function (e) {
e.preventDefault();
e.stopPropagation();
if (currentConfig) {
navigator.clipboard.writeText(JSON.stringify(currentConfig, null, 2))
.then(() => console.log('Configuration copied to clipboard'))
.catch(err => console.log('Failed to copy: ' + err.message));
}
});
editConfigBtn.addEventListener('click', function (e) {
e.preventDefault();
e.stopPropagation();
enterEditMode();
});
saveConfigBtn.addEventListener('click', function (e) {
e.preventDefault();
e.stopPropagation();
saveConfiguration().catch(error => {
console.log('Save configuration failed: ' + error.message);
});
});
cancelEditBtn.addEventListener('click', function (e) {
e.preventDefault();
e.stopPropagation();
exitEditMode();
});
// Relay connection event handlers
connectRelayBtn.addEventListener('click', function (e) {
@@ -1664,22 +1670,12 @@
disconnectFromRelay();
});
testWebSocketBtn.addEventListener('click', function (e) {
restartRelayBtn.addEventListener('click', function (e) {
e.preventDefault();
e.stopPropagation();
const url = relayConnectionUrl.value.trim();
if (!url) {
log('Please enter a relay URL first', 'ERROR');
return;
}
testWebSocketConnection(url)
.then(() => {
log('WebSocket test successful', 'INFO');
})
.catch(error => {
log(`WebSocket test failed: ${error.message}`, 'ERROR');
});
sendRestartCommand().catch(error => {
log(`Restart command failed: ${error.message}`, 'ERROR');
});
});
// ================================
@@ -2116,10 +2112,17 @@
}
}
// Update existing logout and showMainInterface functions to handle auth rules
// Update existing logout and showMainInterface functions to handle auth rules and NIP-17 DMs
const originalLogout = logout;
logout = async function () {
hideAuthRulesSection();
// Clear DM inbox and outbox on logout
if (dmInbox) {
dmInbox.innerHTML = '<div class="log-entry">No messages received yet.</div>';
}
if (dmOutbox) {
dmOutbox.value = '';
}
await originalLogout();
};
@@ -2936,6 +2939,160 @@
}
}
// Send NIP-17 Direct Message to relay using NIP-59 layering
async function sendNIP17DM() {
if (!isLoggedIn || !userPubkey) {
log('Must be logged in to send DM', 'ERROR');
return;
}
if (!isRelayConnected || !relayPubkey) {
log('Must be connected to relay to send DM', 'ERROR');
return;
}
const message = dmOutbox.value.trim();
if (!message) {
log('Please enter a message to send', 'ERROR');
return;
}
// Capability checks
if (!window.nostr || !window.nostr.nip44 || !window.nostr.signEvent) {
log('NIP-17 DMs require a NIP-07 extension with NIP-44 support', 'ERROR');
alert('NIP-17 DMs require a NIP-07 extension with NIP-44 support. Please install and configure a compatible extension.');
return;
}
if (!window.NostrTools || !window.NostrTools.generateSecretKey || !window.NostrTools.getPublicKey || !window.NostrTools.finalizeEvent) {
log('NostrTools library not available for ephemeral key operations', 'ERROR');
alert('NostrTools library not available. Please ensure nostr.bundle.js is loaded.');
return;
}
try {
log(`Sending NIP-17 DM to relay: ${message.substring(0, 50)}...`, 'INFO');
// Step 1: Build unsigned rumor (kind 14)
const rumor = {
kind: 14,
pubkey: userPubkey,
created_at: Math.floor(Date.now() / 1000), // Canonical time for rumor
tags: [["p", relayPubkey]],
content: message
};
// NOTE: Rumor remains unsigned per NIP-59
log('Rumor built (unsigned), creating seal...', 'INFO');
// Step 2: Create seal (kind 13)
const seal = {
kind: 13,
pubkey: userPubkey,
created_at: randomNow(), // Randomized to past for metadata protection
tags: [], // Empty tags per NIP-59
content: await window.nostr.nip44.encrypt(relayPubkey, JSON.stringify(rumor))
};
// Sign seal with long-term key
const signedSeal = await window.nostr.signEvent(seal);
if (!signedSeal || !signedSeal.sig) {
throw new Error('Failed to sign seal event');
}
log('Seal created and signed, creating gift wrap...', 'INFO');
// Step 3: Create gift wrap (kind 1059) with ephemeral key
const ephemeralPriv = window.NostrTools.generateSecretKey();
const ephemeralPub = window.NostrTools.getPublicKey(ephemeralPriv);
const giftWrap = {
kind: 1059,
pubkey: ephemeralPub,
created_at: randomNow(), // Randomized to past for metadata protection
tags: [["p", relayPubkey]],
content: await window.NostrTools.nip44.encrypt(
JSON.stringify(signedSeal),
window.NostrTools.nip44.getConversationKey(ephemeralPriv, relayPubkey)
)
};
// Sign gift wrap with ephemeral key using finalizeEvent
const signedGiftWrap = window.NostrTools.finalizeEvent(giftWrap, ephemeralPriv);
if (!signedGiftWrap || !signedGiftWrap.sig) {
throw new Error('Failed to sign gift wrap event');
}
log('NIP-17 DM event created and signed with ephemeral key, publishing...', 'INFO');
// Publish via SimplePool
const url = relayConnectionUrl.value.trim();
const publishPromises = relayPool.publish([url], signedGiftWrap);
// Use Promise.allSettled to capture per-relay outcomes
const results = await Promise.allSettled(publishPromises);
// Log detailed publish results
let successCount = 0;
results.forEach((result, index) => {
if (result.status === 'fulfilled') {
successCount++;
log(`✅ NIP-17 DM published successfully to relay ${index}`, 'INFO');
} else {
log(`❌ NIP-17 DM failed on relay ${index}: ${result.reason?.message || result.reason}`, 'ERROR');
}
});
if (successCount === 0) {
const errorDetails = results.map((r, i) => `Relay ${i}: ${r.reason?.message || r.reason}`).join('; ');
throw new Error(`All relays rejected NIP-17 DM event. Details: ${errorDetails}`);
}
// Clear the outbox and show success
dmOutbox.value = '';
log('NIP-17 DM sent successfully', 'INFO');
// Add to inbox for display
addMessageToInbox('sent', message, new Date().toLocaleString());
} catch (error) {
log(`Failed to send NIP-17 DM: ${error.message}`, 'ERROR');
}
}
// Add message to inbox display
function addMessageToInbox(direction, message, timestamp) {
if (!dmInbox) return;
const messageDiv = document.createElement('div');
messageDiv.className = 'log-entry';
const directionColor = direction === 'sent' ? '#007bff' : '#28a745';
// Convert newlines to <br> tags for proper HTML display
const formattedMessage = message.replace(/\n/g, '<br>');
messageDiv.innerHTML = `
<span class="log-timestamp">${timestamp}</span>
<span style="color: ${directionColor}; font-weight: bold;">[${direction.toUpperCase()}]</span>
<span style="white-space: pre-wrap;">${formattedMessage}</span>
`;
// Remove the "No messages received yet" placeholder if it exists
const placeholder = dmInbox.querySelector('.log-entry');
if (placeholder && placeholder.textContent === 'No messages received yet.') {
dmInbox.innerHTML = '';
}
// Add new message at the top
dmInbox.insertBefore(messageDiv, dmInbox.firstChild);
// Limit to last 50 messages
while (dmInbox.children.length > 50) {
dmInbox.removeChild(dmInbox.lastChild);
}
}
// Helper function to get relay pubkey
function getRelayPubkey() {
// Use the dynamically fetched relay pubkey if available
@@ -2981,6 +3138,83 @@
// DATABASE STATISTICS FUNCTIONS
// ================================
// Send restart command to restart the relay using Administrator API
async function sendRestartCommand() {
if (!isLoggedIn || !userPubkey) {
log('Must be logged in to restart relay', 'ERROR');
return;
}
if (!relayPool) {
log('SimplePool connection not available', 'ERROR');
return;
}
try {
log('Sending restart command to relay...', 'INFO');
// Create command array for restart
const command_array = ["system_command", "restart"];
// Encrypt the command array directly using NIP-44
const encrypted_content = await encryptForRelay(JSON.stringify(command_array));
if (!encrypted_content) {
throw new Error('Failed to encrypt command array');
}
// Create single kind 23456 admin event
const restartEvent = {
kind: 23456,
pubkey: userPubkey,
created_at: Math.floor(Date.now() / 1000),
tags: [["p", getRelayPubkey()]],
content: encrypted_content
};
// Sign the event
const signedEvent = await window.nostr.signEvent(restartEvent);
if (!signedEvent || !signedEvent.sig) {
throw new Error('Event signing failed');
}
// Publish via SimplePool
const url = relayConnectionUrl.value.trim();
const publishPromises = relayPool.publish([url], signedEvent);
// Use Promise.allSettled to capture per-relay outcomes
const results = await Promise.allSettled(publishPromises);
// Check if any relay accepted the event
let successCount = 0;
results.forEach((result, index) => {
if (result.status === 'fulfilled') {
successCount++;
log(`Restart command published successfully to relay ${index}`, 'INFO');
} else {
log(`Restart command failed on relay ${index}: ${result.reason?.message || result.reason}`, 'ERROR');
}
});
if (successCount === 0) {
const errorDetails = results.map((r, i) => `Relay ${i}: ${r.reason?.message || r.reason}`).join('; ');
throw new Error(`All relays rejected restart command. Details: ${errorDetails}`);
}
log('Restart command sent successfully - relay should restart shortly...', 'INFO');
// Update connection status to indicate restart is in progress
updateRelayConnectionStatus('connecting');
relayConnectionStatus.textContent = 'RESTARTING...';
// The relay will disconnect and need to be reconnected after restart
// This will be handled by the WebSocket disconnection event
} catch (error) {
log(`Failed to send restart command: ${error.message}`, 'ERROR');
updateRelayConnectionStatus('error');
}
}
// Send stats_query command to get database statistics using Administrator API (inner events)
async function sendStatsQuery() {
if (!isLoggedIn || !userPubkey) {
@@ -3137,9 +3371,12 @@
const events7d = document.getElementById('events-7d');
const events30d = document.getElementById('events-30d');
if (events24h) events24h.textContent = data.events_24h || '-';
if (events7d) events7d.textContent = data.events_7d || '-';
if (events30d) events30d.textContent = data.events_30d || '-';
// Access the nested time_stats object from backend response
const timeStats = data.time_stats || {};
if (events24h) events24h.textContent = timeStats.last_24h || '0';
if (events7d) events7d.textContent = timeStats.last_7d || '0';
if (events30d) events30d.textContent = timeStats.last_30d || '0';
}
// Populate top pubkeys table
@@ -3251,6 +3488,11 @@
if (refreshStatsBtn) {
refreshStatsBtn.addEventListener('click', sendStatsQuery);
}
// NIP-17 DM event handlers
if (sendDmBtn) {
sendDmBtn.addEventListener('click', sendNIP17DM);
}
});
// Initialize the app

View File

@@ -17,6 +17,33 @@ print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
COMMIT_MESSAGE=""
RELEASE_MODE=false
show_usage() {
echo "C-Relay Build and Push Script"
echo ""
echo "Usage:"
echo " $0 \"commit message\" - Default: compile, increment patch, commit & push"
echo " $0 -r \"commit message\" - Release: compile x86+arm64, increment minor, create release"
echo ""
echo "Examples:"
echo " $0 \"Fixed event validation bug\""
echo " $0 --release \"Major release with new features\""
echo ""
echo "Default Mode (patch increment):"
echo " - Compile C-Relay"
echo " - Increment patch version (v1.2.3 → v1.2.4)"
echo " - Git add, commit with message, and push"
echo ""
echo "Release Mode (-r flag):"
echo " - Compile C-Relay for x86_64 and arm64 (dynamic and static versions)"
echo " - Increment minor version, zero patch (v1.2.3 → v1.3.0)"
echo " - Git add, commit, push, and create Gitea release"
echo ""
echo "Requirements for Release Mode:"
echo " - For ARM64 builds: make install-arm64-deps (optional - will build x86_64 only if missing)"
echo " - For static builds: sudo apt-get install musl-dev libcap-dev libuv1-dev libev-dev"
echo " - Gitea token in ~/.gitea_token for release uploads"
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
@@ -38,32 +65,6 @@ while [[ $# -gt 0 ]]; do
esac
done
show_usage() {
echo "C-Relay Build and Push Script"
echo ""
echo "Usage:"
echo " $0 \"commit message\" - Default: compile, increment patch, commit & push"
echo " $0 -r \"commit message\" - Release: compile x86+arm64, increment minor, create release"
echo ""
echo "Examples:"
echo " $0 \"Fixed event validation bug\""
echo " $0 --release \"Major release with new features\""
echo ""
echo "Default Mode (patch increment):"
echo " - Compile C-Relay"
echo " - Increment patch version (v1.2.3 → v1.2.4)"
echo " - Git add, commit with message, and push"
echo ""
echo "Release Mode (-r flag):"
echo " - Compile C-Relay for x86_64 and arm64"
echo " - Increment minor version, zero patch (v1.2.3 → v1.3.0)"
echo " - Git add, commit, push, and create Gitea release"
echo ""
echo "Requirements for Release Mode:"
echo " - For ARM64 builds: make install-arm64-deps (optional - will build x86_64 only if missing)"
echo " - Gitea token in ~/.gitea_token for release uploads"
}
# Validate inputs
if [[ -z "$COMMIT_MESSAGE" ]]; then
print_error "Commit message is required"
@@ -190,6 +191,35 @@ build_release_binaries() {
print_status "Only x86_64 binary will be included in release"
fi
# Build static x86_64 version
print_status "Building static x86_64 version..."
make clean > /dev/null 2>&1
if make static-musl-x86_64 > /dev/null 2>&1; then
if [[ -f "build/c_relay_static_musl_x86_64" ]]; then
cp build/c_relay_static_musl_x86_64 c-relay-static-x86_64
print_success "Static x86_64 binary created: c-relay-static-x86_64"
else
print_warning "Static x86_64 binary not found after compilation"
fi
else
print_warning "Static x86_64 build failed - MUSL development packages may not be installed"
print_status "Run 'sudo apt-get install musl-dev libcap-dev libuv1-dev libev-dev' to enable static builds"
fi
# Try to build static ARM64 version
print_status "Attempting static ARM64 build..."
make clean > /dev/null 2>&1
if make static-musl-arm64 > /dev/null 2>&1; then
if [[ -f "build/c_relay_static_musl_arm64" ]]; then
cp build/c_relay_static_musl_arm64 c-relay-static-arm64
print_success "Static ARM64 binary created: c-relay-static-arm64"
else
print_warning "Static ARM64 binary not found after compilation"
fi
else
print_warning "Static ARM64 build failed - ARM64 cross-compilation or MUSL ARM64 packages not set up"
fi
# Restore normal build
make clean > /dev/null 2>&1
make > /dev/null 2>&1
@@ -319,12 +349,18 @@ create_gitea_release() {
-H "Content-Type: application/json" \
-d "{\"tag_name\": \"$NEW_VERSION\", \"name\": \"$NEW_VERSION\", \"body\": \"$COMMIT_MESSAGE\"}")
local upload_result=false
if echo "$response" | grep -q '"id"'; then
print_success "Created release $NEW_VERSION"
upload_release_binaries "$api_url" "$token"
if upload_release_binaries "$api_url" "$token"; then
upload_result=true
fi
elif echo "$response" | grep -q "already exists"; then
print_warning "Release $NEW_VERSION already exists"
upload_release_binaries "$api_url" "$token"
if upload_release_binaries "$api_url" "$token"; then
upload_result=true
fi
else
print_error "Failed to create release $NEW_VERSION"
print_error "Response: $response"
@@ -334,18 +370,29 @@ create_gitea_release() {
local check_response=$(curl -s -H "Authorization: token $token" "$api_url/releases/tags/$NEW_VERSION")
if echo "$check_response" | grep -q '"id"'; then
print_warning "Release exists but creation response was unexpected"
upload_release_binaries "$api_url" "$token"
if upload_release_binaries "$api_url" "$token"; then
upload_result=true
fi
else
print_error "Release does not exist and creation failed"
return 1
fi
fi
# Return based on upload success
if [[ "$upload_result" == true ]]; then
return 0
else
print_error "Binary upload failed"
return 1
fi
}
# Function to upload release binaries
upload_release_binaries() {
local api_url="$1"
local token="$2"
local upload_success=true
# Get release ID with more robust parsing
print_status "Getting release ID for $NEW_VERSION..."
@@ -367,37 +414,131 @@ upload_release_binaries() {
# Upload x86_64 binary
if [[ -f "c-relay-x86_64" ]]; then
print_status "Uploading x86_64 binary..."
if curl -s -X POST "$api_url/releases/$release_id/assets" \
local upload_response=$(curl -s -w "\n%{http_code}" -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@c-relay-x86_64;filename=c-relay-${NEW_VERSION}-linux-x86_64" > /dev/null; then
print_success "Uploaded x86_64 binary"
-F "attachment=@c-relay-x86_64;filename=c-relay-${NEW_VERSION}-linux-x86_64")
local http_code=$(echo "$upload_response" | tail -n1)
local response_body=$(echo "$upload_response" | head -n -1)
if [[ "$http_code" == "201" ]]; then
print_success "Uploaded x86_64 binary successfully"
else
print_warning "Failed to upload x86_64 binary"
print_error "Failed to upload x86_64 binary (HTTP $http_code)"
print_error "Response: $response_body"
upload_success=false
fi
else
print_warning "x86_64 binary not found: c-relay-x86_64"
fi
# Upload ARM64 binary
if [[ -f "c-relay-arm64" ]]; then
print_status "Uploading ARM64 binary..."
if curl -s -X POST "$api_url/releases/$release_id/assets" \
local upload_response=$(curl -s -w "\n%{http_code}" -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@c-relay-arm64;filename=c-relay-${NEW_VERSION}-linux-arm64" > /dev/null; then
print_success "Uploaded ARM64 binary"
-F "attachment=@c-relay-arm64;filename=c-relay-${NEW_VERSION}-linux-arm64")
local http_code=$(echo "$upload_response" | tail -n1)
local response_body=$(echo "$upload_response" | head -n -1)
if [[ "$http_code" == "201" ]]; then
print_success "Uploaded ARM64 binary successfully"
else
print_warning "Failed to upload ARM64 binary"
print_error "Failed to upload ARM64 binary (HTTP $http_code)"
print_error "Response: $response_body"
upload_success=false
fi
else
print_warning "ARM64 binary not found: c-relay-arm64"
fi
# Upload static x86_64 binary
if [[ -f "c-relay-static-x86_64" ]]; then
print_status "Uploading static x86_64 binary..."
local upload_response=$(curl -s -w "\n%{http_code}" -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@c-relay-static-x86_64;filename=c-relay-${NEW_VERSION}-linux-x86_64-static")
local http_code=$(echo "$upload_response" | tail -n1)
local response_body=$(echo "$upload_response" | head -n -1)
if [[ "$http_code" == "201" ]]; then
print_success "Uploaded static x86_64 binary successfully"
else
print_error "Failed to upload static x86_64 binary (HTTP $http_code)"
print_error "Response: $response_body"
upload_success=false
fi
else
print_warning "Static x86_64 binary not found: c-relay-static-x86_64"
fi
# Upload static ARM64 binary
if [[ -f "c-relay-static-arm64" ]]; then
print_status "Uploading static ARM64 binary..."
local upload_response=$(curl -s -w "\n%{http_code}" -X POST "$api_url/releases/$release_id/assets" \
-H "Authorization: token $token" \
-F "attachment=@c-relay-static-arm64;filename=c-relay-${NEW_VERSION}-linux-arm64-static")
local http_code=$(echo "$upload_response" | tail -n1)
local response_body=$(echo "$upload_response" | head -n -1)
if [[ "$http_code" == "201" ]]; then
print_success "Uploaded static ARM64 binary successfully"
else
print_error "Failed to upload static ARM64 binary (HTTP $http_code)"
print_error "Response: $response_body"
upload_success=false
fi
else
print_warning "Static ARM64 binary not found: c-relay-static-arm64"
fi
# Return success/failure status
if [[ "$upload_success" == true ]]; then
return 0
else
return 1
fi
}
# Function to clean up release binaries
cleanup_release_binaries() {
if [[ -f "c-relay-x86_64" ]]; then
rm -f c-relay-x86_64
print_status "Cleaned up x86_64 binary"
fi
if [[ -f "c-relay-arm64" ]]; then
rm -f c-relay-arm64
print_status "Cleaned up ARM64 binary"
local force_cleanup="$1" # Optional parameter to force cleanup even on failure
if [[ "$force_cleanup" == "force" ]] || [[ "$upload_success" == true ]]; then
if [[ -f "c-relay-x86_64" ]]; then
rm -f c-relay-x86_64
print_status "Cleaned up x86_64 binary"
fi
if [[ -f "c-relay-arm64" ]]; then
rm -f c-relay-arm64
print_status "Cleaned up ARM64 binary"
fi
if [[ -f "c-relay-static-x86_64" ]]; then
rm -f c-relay-static-x86_64
print_status "Cleaned up static x86_64 binary"
fi
if [[ -f "c-relay-static-arm64" ]]; then
rm -f c-relay-static-arm64
print_status "Cleaned up static ARM64 binary"
fi
else
print_warning "Keeping binary files due to upload failures"
print_status "Files available for manual upload:"
if [[ -f "c-relay-x86_64" ]]; then
print_status " - c-relay-x86_64"
fi
if [[ -f "c-relay-arm64" ]]; then
print_status " - c-relay-arm64"
fi
if [[ -f "c-relay-static-x86_64" ]]; then
print_status " - c-relay-static-x86_64"
fi
if [[ -f "c-relay-static-arm64" ]]; then
print_status " - c-relay-static-arm64"
fi
fi
}
@@ -433,14 +574,18 @@ main() {
git_commit_and_push_no_tag
# Create Gitea release with binaries
create_gitea_release
if create_gitea_release; then
print_success "Release $NEW_VERSION completed successfully!"
print_status "Binaries uploaded to Gitea release"
upload_success=true
else
print_error "Release creation or binary upload failed"
upload_success=false
fi
# Cleanup
# Cleanup (only if upload was successful)
cleanup_release_binaries
print_success "Release $NEW_VERSION completed successfully!"
print_status "Binaries uploaded to Gitea release"
else
print_status "=== DEFAULT MODE ==="

197
build_static.sh Executable file
View File

@@ -0,0 +1,197 @@
#!/bin/bash
# Build fully static MUSL binaries for C-Relay using Alpine Docker
# Produces truly portable binaries with zero runtime dependencies
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
BUILD_DIR="$SCRIPT_DIR/build"
DOCKERFILE="$SCRIPT_DIR/Dockerfile.alpine-musl"
echo "=========================================="
echo "C-Relay MUSL Static Binary Builder"
echo "=========================================="
echo "Project directory: $SCRIPT_DIR"
echo "Build directory: $BUILD_DIR"
echo ""
# Create build directory
mkdir -p "$BUILD_DIR"
# Check if Docker is available
if ! command -v docker &> /dev/null; then
echo "ERROR: Docker is not installed or not in PATH"
echo ""
echo "Docker is required to build MUSL static binaries."
echo "Please install Docker:"
echo " - Ubuntu/Debian: sudo apt install docker.io"
echo " - Or visit: https://docs.docker.com/engine/install/"
echo ""
exit 1
fi
# Check if Docker daemon is running (try with and without sudo)
if docker info &> /dev/null; then
DOCKER_CMD="docker"
elif sudo docker info &> /dev/null; then
echo "Note: Using sudo for Docker commands (user not in docker group)"
echo "To avoid sudo, run: sudo usermod -aG docker $USER && newgrp docker"
echo ""
DOCKER_CMD="sudo docker"
else
echo "ERROR: Docker daemon is not running"
echo ""
echo "Please start Docker:"
echo " - sudo systemctl start docker"
echo " - Or start Docker Desktop"
echo ""
exit 1
fi
echo "✓ Docker is available and running"
echo ""
# Detect architecture
ARCH=$(uname -m)
case "$ARCH" in
x86_64)
PLATFORM="linux/amd64"
OUTPUT_NAME="c_relay_static_musl_x86_64"
;;
aarch64|arm64)
PLATFORM="linux/arm64"
OUTPUT_NAME="c_relay_static_musl_arm64"
;;
*)
echo "WARNING: Unknown architecture: $ARCH"
echo "Defaulting to linux/amd64"
PLATFORM="linux/amd64"
OUTPUT_NAME="c_relay_static_musl_${ARCH}"
;;
esac
echo "Building for platform: $PLATFORM"
echo "Output binary: $OUTPUT_NAME"
echo ""
# Build the Docker image
echo "=========================================="
echo "Step 1: Building Alpine Docker image"
echo "=========================================="
echo "This will:"
echo " - Use Alpine Linux (native MUSL)"
echo " - Build all dependencies statically"
echo " - Compile c-relay with full static linking"
echo ""
$DOCKER_CMD build \
--platform "$PLATFORM" \
-f "$DOCKERFILE" \
-t c-relay-musl-builder:latest \
--progress=plain \
. || {
echo ""
echo "ERROR: Docker build failed"
echo "Check the output above for details"
exit 1
}
echo ""
echo "✓ Docker image built successfully"
echo ""
# Extract the binary from the container
echo "=========================================="
echo "Step 2: Extracting static binary"
echo "=========================================="
# Build the builder stage to extract the binary
$DOCKER_CMD build \
--platform "$PLATFORM" \
--target builder \
-f "$DOCKERFILE" \
-t c-relay-musl-builder-stage:latest \
. > /dev/null 2>&1
# Create a temporary container to copy the binary
CONTAINER_ID=$($DOCKER_CMD create c-relay-musl-builder-stage:latest)
# Copy binary from container
$DOCKER_CMD cp "$CONTAINER_ID:/build/c_relay_static_musl" "$BUILD_DIR/$OUTPUT_NAME" || {
echo "ERROR: Failed to extract binary from container"
$DOCKER_CMD rm "$CONTAINER_ID" 2>/dev/null
exit 1
}
# Clean up container
$DOCKER_CMD rm "$CONTAINER_ID" > /dev/null
echo "✓ Binary extracted to: $BUILD_DIR/$OUTPUT_NAME"
echo ""
# Make binary executable
chmod +x "$BUILD_DIR/$OUTPUT_NAME"
# Verify the binary
echo "=========================================="
echo "Step 3: Verifying static binary"
echo "=========================================="
echo ""
echo "File information:"
file "$BUILD_DIR/$OUTPUT_NAME"
echo ""
echo "File size: $(ls -lh "$BUILD_DIR/$OUTPUT_NAME" | awk '{print $5}')"
echo ""
echo "Checking for dynamic dependencies:"
LDD_OUTPUT=$(ldd "$BUILD_DIR/$OUTPUT_NAME" 2>&1)
if echo "$LDD_OUTPUT" | grep -q "not a dynamic executable"; then
echo "✓ Binary is fully static (no dynamic dependencies)"
TRULY_STATIC=true
elif echo "$LDD_OUTPUT" | grep -q "statically linked"; then
echo "✓ Binary is statically linked"
TRULY_STATIC=true
else
echo "⚠ WARNING: Binary may have dynamic dependencies:"
echo "$LDD_OUTPUT"
TRULY_STATIC=false
fi
echo ""
# Test if binary runs
echo "Testing binary execution:"
if "$BUILD_DIR/$OUTPUT_NAME" --version 2>&1 | head -5; then
echo "✓ Binary executes successfully"
else
echo "⚠ Binary execution test failed (this may be normal if --version is not supported)"
fi
echo ""
# Summary
echo "=========================================="
echo "Build Summary"
echo "=========================================="
echo "Binary: $BUILD_DIR/$OUTPUT_NAME"
echo "Size: $(du -h "$BUILD_DIR/$OUTPUT_NAME" | cut -f1)"
echo "Platform: $PLATFORM"
if [ "$TRULY_STATIC" = true ]; then
echo "Type: Fully static MUSL binary"
echo "Portability: Works on ANY Linux distribution"
else
echo "Type: Static binary (may have minimal dependencies)"
fi
echo ""
echo "✓ Build complete!"
echo ""
echo "To use the binary:"
echo " $BUILD_DIR/$OUTPUT_NAME --port 8888"
echo ""
echo "To verify portability, test on different Linux distributions:"
echo " - Alpine Linux"
echo " - Ubuntu/Debian"
echo " - CentOS/RHEL"
echo " - Arch Linux"
echo ""

27
deploy_static.sh Executable file
View File

@@ -0,0 +1,27 @@
#!/bin/bash
# C-Relay Static Binary Deployment Script
# Deploys build/c_relay_static_x86_64 to server via sshlt
set -e
# Configuration
LOCAL_BINARY="build/c_relay_static_x86_64"
REMOTE_BINARY_PATH="/usr/local/bin/c_relay/c_relay"
SERVICE_NAME="c-relay"
# Create backup
ssh ubuntu@laantungir.com "sudo cp '$REMOTE_BINARY_PATH' '${REMOTE_BINARY_PATH}.backup.$(date +%Y%m%d_%H%M%S)'" 2>/dev/null || true
# Upload binary to temp location
scp "$LOCAL_BINARY" "ubuntu@laantungir.com:/tmp/c_relay.tmp"
# Install binary
ssh ubuntu@laantungir.com "sudo mv '/tmp/c_relay.tmp' '$REMOTE_BINARY_PATH'"
ssh ubuntu@laantungir.com "sudo chown c-relay:c-relay '$REMOTE_BINARY_PATH'"
ssh ubuntu@laantungir.com "sudo chmod +x '$REMOTE_BINARY_PATH'"
# Restart service
ssh ubuntu@laantungir.com "sudo systemctl restart '$SERVICE_NAME'"
echo "Deployment complete!"

275
docs/musl_static_build.md Normal file
View File

@@ -0,0 +1,275 @@
# MUSL Static Binary Build Guide
## Overview
This guide explains how to build truly portable MUSL-based static binaries of c-relay using Alpine Linux Docker containers. These binaries have **zero runtime dependencies** and work on any Linux distribution.
## Why MUSL?
### MUSL vs glibc Static Binaries
**MUSL Advantages:**
- **Truly Static**: No hidden dependencies on system libraries
- **Smaller Size**: ~7.6MB vs ~12MB+ for glibc static builds
- **Better Portability**: Works on ANY Linux distribution without modification
- **Cleaner Linking**: No glibc-specific extensions or fortified functions
- **Simpler Deployment**: Single binary, no library compatibility issues
**glibc Limitations:**
- Static builds still require dynamic loading for NSS (Name Service Switch)
- Fortified functions (`__*_chk`) don't exist in MUSL
- Larger binary size due to glibc's complexity
- May have compatibility issues across different glibc versions
## Build Process
### Prerequisites
- Docker installed and running
- Sufficient disk space (~2GB for Docker layers)
- Internet connection (for downloading dependencies)
### Quick Start
```bash
# Build MUSL static binary
./build_static.sh
# The binary will be created at:
# build/c_relay_static_musl_x86_64 (on x86_64)
# build/c_relay_static_musl_arm64 (on ARM64)
```
### What Happens During Build
1. **Alpine Linux Base**: Uses Alpine 3.19 with native MUSL support
2. **Static Dependencies**: Builds all dependencies with static linking:
- libsecp256k1 (Bitcoin cryptography)
- libwebsockets (WebSocket server)
- OpenSSL (TLS/crypto)
- SQLite (database)
- curl (HTTP client)
- zlib (compression)
3. **nostr_core_lib**: Builds with MUSL-compatible flags:
- Disables glibc fortification (`-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0`)
- Includes required NIPs: 001, 006, 013, 017, 019, 044, 059
- Produces static library (~316KB)
4. **c-relay Compilation**: Links everything statically:
- All source files compiled with `-static` flag
- Fortification disabled to avoid `__*_chk` symbols
- Results in ~7.6MB stripped binary
5. **Verification**: Confirms binary is truly static:
- `ldd` shows "not a dynamic executable"
- `file` shows "statically linked"
- Binary executes successfully
## Technical Details
### Dockerfile Structure
The build uses a multi-stage Dockerfile (`Dockerfile.alpine-musl`):
```dockerfile
# Stage 1: Builder (Alpine Linux)
FROM alpine:3.19 AS builder
- Install build tools and static libraries
- Build dependencies from source
- Compile nostr_core_lib with MUSL flags
- Compile c-relay with full static linking
- Strip binary to reduce size
# Stage 2: Output (scratch)
FROM scratch AS output
- Contains only the final binary
```
### Key Compilation Flags
**For nostr_core_lib:**
```bash
CFLAGS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 -Wall -Wextra -std=c99 -fPIC -O2"
```
**For c-relay:**
```bash
gcc -static -O2 -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
[source files] \
-lwebsockets -lssl -lcrypto -lsqlite3 -lsecp256k1 \
-lcurl -lz -lpthread -lm -ldl
```
### Fortification Issue
**Problem**: GCC's `-O2` optimization enables fortification by default, replacing standard functions with `__*_chk` variants (e.g., `__snprintf_chk`, `__fprintf_chk`). These are glibc-specific and don't exist in MUSL.
**Solution**: Explicitly disable fortification with:
- `-U_FORTIFY_SOURCE` (undefine any existing definition)
- `-D_FORTIFY_SOURCE=0` (set to 0)
This must be applied to **both** nostr_core_lib and c-relay compilation.
### NIP Dependencies
The build includes these NIPs in nostr_core_lib:
- **NIP-001**: Basic protocol (event creation, signing)
- **NIP-006**: Key derivation from mnemonic
- **NIP-013**: Proof of Work validation
- **NIP-017**: Private Direct Messages
- **NIP-019**: Bech32 encoding (nsec/npub)
- **NIP-044**: Modern encryption
- **NIP-059**: Gift Wrap (required by NIP-017)
## Verification
### Check Binary Type
```bash
# Should show "statically linked"
file build/c_relay_static_musl_x86_64
# Should show "not a dynamic executable"
ldd build/c_relay_static_musl_x86_64
# Check size (should be ~7.6MB)
ls -lh build/c_relay_static_musl_x86_64
```
### Test Execution
```bash
# Show help
./build/c_relay_static_musl_x86_64 --help
# Show version
./build/c_relay_static_musl_x86_64 --version
# Run relay
./build/c_relay_static_musl_x86_64 --port 8888
```
### Cross-Distribution Testing
Test the binary on different distributions to verify portability:
```bash
# Alpine Linux
docker run --rm -v $(pwd)/build:/app alpine:latest /app/c_relay_static_musl_x86_64 --version
# Ubuntu
docker run --rm -v $(pwd)/build:/app ubuntu:latest /app/c_relay_static_musl_x86_64 --version
# Debian
docker run --rm -v $(pwd)/build:/app debian:latest /app/c_relay_static_musl_x86_64 --version
# CentOS
docker run --rm -v $(pwd)/build:/app centos:latest /app/c_relay_static_musl_x86_64 --version
```
## Troubleshooting
### Docker Permission Denied
**Problem**: `permission denied while trying to connect to the Docker daemon socket`
**Solution**: Add user to docker group:
```bash
sudo usermod -aG docker $USER
newgrp docker # Or logout and login again
```
### Build Fails with Fortification Errors
**Problem**: `undefined reference to '__snprintf_chk'` or `'__fprintf_chk'`
**Solution**: Ensure fortification is disabled in both:
1. nostr_core_lib build.sh (line 534)
2. c-relay compilation flags in Dockerfile
### Binary Won't Execute
**Problem**: Binary fails to run on target system
**Checks**:
1. Verify it's truly static: `ldd binary` should show "not a dynamic executable"
2. Check architecture matches: `file binary` should show correct arch
3. Ensure execute permissions: `chmod +x binary`
### Missing NIP Functions
**Problem**: `undefined reference to 'nostr_nip*'` during linking
**Solution**: Add missing NIPs to the build command:
```bash
./build.sh --nips=1,6,13,17,19,44,59
```
## Deployment
### Single Binary Deployment
```bash
# Copy binary to server
scp build/c_relay_static_musl_x86_64 user@server:/opt/c-relay/
# Run on server (no dependencies needed!)
ssh user@server
cd /opt/c-relay
./c_relay_static_musl_x86_64 --port 8888
```
### SystemD Service
```ini
[Unit]
Description=C-Relay Nostr Relay (MUSL Static)
After=network.target
[Service]
Type=simple
User=c-relay
WorkingDirectory=/opt/c-relay
ExecStart=/opt/c-relay/c_relay_static_musl_x86_64
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
```
## Performance Comparison
| Metric | MUSL Static | glibc Static | glibc Dynamic |
|--------|-------------|--------------|---------------|
| Binary Size | 7.6 MB | 12+ MB | 2-3 MB |
| Startup Time | ~50ms | ~60ms | ~40ms |
| Memory Usage | Similar | Similar | Similar |
| Portability | ✓ Any Linux | ⚠ glibc only | ✗ Requires libs |
| Dependencies | None | NSS libs | Many libs |
## Best Practices
1. **Always verify** the binary is truly static before deployment
2. **Test on multiple distributions** to ensure portability
3. **Keep Docker images updated** for security patches
4. **Document the build date** and commit hash for reproducibility
5. **Store binaries** with architecture in filename (e.g., `_x86_64`, `_arm64`)
## References
- [MUSL libc](https://musl.libc.org/)
- [Alpine Linux](https://alpinelinux.org/)
- [Static Linking Best Practices](https://www.musl-libc.org/faq.html)
- [GCC Fortification](https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html)
## Changelog
### 2025-10-11
- Initial MUSL build system implementation
- Alpine Docker-based build process
- Fortification fix for nostr_core_lib
- Complete NIP dependency resolution
- Documentation created

View File

@@ -0,0 +1,150 @@
# MUSL-based fully static C-Relay builder
# Produces portable binaries with zero runtime dependencies
FROM alpine:latest AS builder
# Add alternative mirrors and install build dependencies with retry
RUN echo "http://dl-cdn.alpinelinux.org/alpine/v3.22/main" > /etc/apk/repositories && \
echo "http://dl-cdn.alpinelinux.org/alpine/v3.22/community" >> /etc/apk/repositories && \
echo "http://mirror.leaseweb.com/alpine/v3.22/main" >> /etc/apk/repositories && \
echo "http://mirror.leaseweb.com/alpine/v3.22/community" >> /etc/apk/repositories && \
apk update --no-cache || (sleep 5 && apk update --no-cache) || (sleep 10 && apk update --no-cache)
# Install build dependencies with retry logic
RUN apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers || \
(sleep 10 && apk add --no-cache \
build-base \
musl-dev \
git \
cmake \
pkgconfig \
autoconf \
automake \
libtool \
openssl-dev \
openssl-libs-static \
zlib-dev \
zlib-static \
curl-dev \
curl-static \
sqlite-dev \
sqlite-static \
linux-headers)
# Set working directory
WORKDIR /build
# Build zlib static (if needed)
RUN if [ ! -f /usr/lib/libz.a ]; then \
cd /tmp && \
wget https://zlib.net/zlib-1.3.1.tar.gz && \
tar xzf zlib-1.3.1.tar.gz && \
cd zlib-1.3.1 && \
./configure --static --prefix=/usr && \
make && make install; \
fi
# Build OpenSSL static
RUN cd /tmp && \
wget https://www.openssl.org/source/openssl-3.0.13.tar.gz && \
tar xzf openssl-3.0.13.tar.gz && \
cd openssl-3.0.13 && \
./Configure linux-x86_64 no-shared --prefix=/usr && \
make && make install_sw
# Build SQLite with JSON1 extension enabled
RUN cd /tmp && \
wget https://www.sqlite.org/2024/sqlite-autoconf-3460000.tar.gz && \
tar xzf sqlite-autoconf-3460000.tar.gz && \
cd sqlite-autoconf-3460000 && \
./configure \
--enable-static \
--disable-shared \
--enable-json1 \
--enable-fts5 \
--prefix=/usr \
CFLAGS="-DSQLITE_ENABLE_JSON1=1 -DSQLITE_ENABLE_FTS5=1" && \
make && make install
# Build libsecp256k1 static
RUN cd /tmp && \
git clone https://github.com/bitcoin-core/secp256k1.git && \
cd secp256k1 && \
./autogen.sh && \
./configure --enable-static --disable-shared --prefix=/usr && \
make && make install
# Build libwebsockets static with OpenSSL
RUN cd /tmp && \
git clone https://github.com/warmcat/libwebsockets.git && \
cd libwebsockets && \
mkdir build && cd build && \
cmake .. \
-DLWS_WITH_STATIC=ON \
-DLWS_WITH_SHARED=OFF \
-DLWS_WITH_SSL=ON \
-DLWS_OPENSSL_LIBRARIES="/usr/lib/libssl.a;/usr/lib/libcrypto.a" \
-DLWS_OPENSSL_INCLUDE_DIRS="/usr/include" \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr && \
make && make install
# Build curl static (minimal features)
RUN cd /tmp && \
wget https://curl.se/download/curl-8.6.0.tar.gz && \
tar xzf curl-8.6.0.tar.gz && \
cd curl-8.6.0 && \
./configure \
--disable-shared \
--enable-static \
--disable-ldap \
--without-libidn2 \
--without-brotli \
--without-zstd \
--without-rtmp \
--without-libpsl \
--without-krb5 \
--with-openssl \
--prefix=/usr && \
make && make install
# Copy c-relay source
COPY . /build/
# Initialize submodules
RUN git submodule update --init --recursive
# Build nostr_core_lib
RUN cd nostr_core_lib && ./build.sh
# Build c-relay static
RUN make clean && \
CC="musl-gcc -static" \
CFLAGS="-O2 -Wall -Wextra -std=c99 -g" \
LDFLAGS="-static -Wl,--whole-archive -lpthread -Wl,--no-whole-archive" \
LIBS="-lsqlite3 -lwebsockets -lz -ldl -lpthread -lm -lsecp256k1 -lssl -lcrypto -lcurl" \
make
# Strip binary for size
RUN strip build/c_relay_x86
# Multi-stage build to produce minimal output
FROM scratch AS output
COPY --from=builder /build/build/c_relay_x86 /c_relay_static_musl_x86_64

View File

@@ -159,9 +159,19 @@ fi
rm -rf dev-config/ 2>/dev/null
rm -f db/c_nostr_relay.db* 2>/dev/null
# Build the project first
echo "Building project..."
make clean all
# Embed web files into C headers before building
echo "Embedding web files..."
./embed_web_files.sh
# Build the project first - use static build by default
echo "Building project (static binary with SQLite JSON1 extension)..."
./build_static.sh
# Fallback to regular build if static build fails
if [ $? -ne 0 ]; then
echo "Static build failed, falling back to regular build..."
make clean all
fi
# Restore database files if preserving
if [ "$PRESERVE_DATABASE" = true ] && [ -d "/tmp/relay_backup_$$" ]; then
@@ -177,22 +187,34 @@ if [ $? -ne 0 ]; then
exit 1
fi
# Check if relay binary exists after build - detect architecture
# Check if relay binary exists after build - prefer static binary, fallback to regular
ARCH=$(uname -m)
case "$ARCH" in
x86_64)
BINARY_PATH="./build/c_relay_x86"
STATIC_BINARY="./build/c_relay_static_x86_64"
REGULAR_BINARY="./build/c_relay_x86"
;;
aarch64|arm64)
BINARY_PATH="./build/c_relay_arm64"
STATIC_BINARY="./build/c_relay_static_arm64"
REGULAR_BINARY="./build/c_relay_arm64"
;;
*)
BINARY_PATH="./build/c_relay_$ARCH"
STATIC_BINARY="./build/c_relay_static_$ARCH"
REGULAR_BINARY="./build/c_relay_$ARCH"
;;
esac
if [ ! -f "$BINARY_PATH" ]; then
echo "ERROR: Relay binary not found at $BINARY_PATH after build. Build may have failed."
# Prefer static binary if available
if [ -f "$STATIC_BINARY" ]; then
BINARY_PATH="$STATIC_BINARY"
echo "Using static binary: $BINARY_PATH"
elif [ -f "$REGULAR_BINARY" ]; then
BINARY_PATH="$REGULAR_BINARY"
echo "Using regular binary: $BINARY_PATH"
else
echo "ERROR: No relay binary found. Checked:"
echo " - $STATIC_BINARY"
echo " - $REGULAR_BINARY"
exit 1
fi

View File

@@ -1 +1 @@
1007305
2875464

444
src/api.c
View File

@@ -9,18 +9,8 @@
#include <libwebsockets.h>
#include "api.h"
#include "embedded_web_content.h"
#include "../nostr_core_lib/nostr_core/nip017.h"
#include "../nostr_core_lib/nostr_core/nip044.h"
#include "../nostr_core_lib/nostr_core/nostr_core.h"
#include "config.h"
// Forward declarations for event creation and signing
cJSON* nostr_create_and_sign_event(int kind, const char* content, cJSON* tags,
const unsigned char* privkey_bytes, time_t created_at);
// Forward declaration for stats generation
char* generate_stats_json(void);
// Forward declarations for logging functions
void log_info(const char* message);
@@ -33,8 +23,6 @@ int store_event(cJSON* event);
// Handle HTTP request for embedded files (assumes GET)
int handle_embedded_file_request(struct lws* wsi, const char* requested_uri) {
log_info("Handling embedded file request");
const char* file_path;
// Handle /api requests
@@ -134,7 +122,6 @@ int handle_embedded_file_request(struct lws* wsi, const char* requested_uri) {
// Request callback for body transmission
lws_callback_on_writable(wsi);
log_success("Embedded file headers sent, body transmission scheduled");
return 0;
}
@@ -175,436 +162,5 @@ int handle_embedded_file_writeable(struct lws* wsi) {
free(session_data);
lws_set_wsi_user(wsi, NULL);
log_success("Embedded file served successfully");
return 0;
}
// =============================================================================
// NIP-17 GIFT WRAP ADMIN MESSAGING FUNCTIONS
// =============================================================================
// Check if an event is a NIP-17 gift wrap addressed to this relay
int is_nip17_gift_wrap_for_relay(cJSON* event) {
if (!event || !cJSON_IsObject(event)) {
return 0;
}
// Check kind
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
if (!kind_obj || !cJSON_IsNumber(kind_obj) || (int)cJSON_GetNumberValue(kind_obj) != 1059) {
return 0;
}
// Check tags for "p" tag with relay pubkey
cJSON* tags = cJSON_GetObjectItem(event, "tags");
if (!tags || !cJSON_IsArray(tags)) {
return 0;
}
const char* relay_pubkey = get_relay_pubkey_cached();
if (!relay_pubkey) {
log_error("NIP-17: Could not get relay pubkey for validation");
return 0;
}
// Look for "p" tag with relay pubkey
cJSON* tag = NULL;
cJSON_ArrayForEach(tag, tags) {
if (cJSON_IsArray(tag) && cJSON_GetArraySize(tag) >= 2) {
cJSON* tag_name = cJSON_GetArrayItem(tag, 0);
cJSON* tag_value = cJSON_GetArrayItem(tag, 1);
if (tag_name && cJSON_IsString(tag_name) &&
strcmp(cJSON_GetStringValue(tag_name), "p") == 0 &&
tag_value && cJSON_IsString(tag_value) &&
strcmp(cJSON_GetStringValue(tag_value), relay_pubkey) == 0) {
return 1; // Found matching p tag
}
}
}
return 0; // No matching p tag found
}
// Process NIP-17 admin command from decrypted DM content
int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t error_size, struct lws* wsi) {
if (!dm_event || !error_message) {
return -1;
}
// Extract content from DM
cJSON* content_obj = cJSON_GetObjectItem(dm_event, "content");
if (!content_obj || !cJSON_IsString(content_obj)) {
strncpy(error_message, "NIP-17: DM missing content", error_size - 1);
return -1;
}
const char* dm_content = cJSON_GetStringValue(content_obj);
log_info("NIP-17: Processing admin command from DM content");
// Parse DM content as JSON array of commands
cJSON* command_array = cJSON_Parse(dm_content);
if (!command_array || !cJSON_IsArray(command_array)) {
strncpy(error_message, "NIP-17: DM content is not valid JSON array", error_size - 1);
return -1;
}
// Check if this is a "stats" command
if (cJSON_GetArraySize(command_array) > 0) {
cJSON* first_item = cJSON_GetArrayItem(command_array, 0);
if (cJSON_IsString(first_item) && strcmp(cJSON_GetStringValue(first_item), "stats") == 0) {
log_info("NIP-17: Processing 'stats' command directly");
// Generate stats JSON
char* stats_json = generate_stats_json();
if (!stats_json) {
cJSON_Delete(command_array);
strncpy(error_message, "NIP-17: Failed to generate stats", error_size - 1);
return -1;
}
// Get sender pubkey for response
cJSON* sender_pubkey_obj = cJSON_GetObjectItem(dm_event, "pubkey");
if (!sender_pubkey_obj || !cJSON_IsString(sender_pubkey_obj)) {
free(stats_json);
cJSON_Delete(command_array);
strncpy(error_message, "NIP-17: DM missing sender pubkey", error_size - 1);
return -1;
}
const char* sender_pubkey = cJSON_GetStringValue(sender_pubkey_obj);
// Get relay keys for signing
const char* relay_pubkey = get_relay_pubkey_cached();
char* relay_privkey_hex = get_relay_private_key();
if (!relay_pubkey || !relay_privkey_hex) {
free(stats_json);
cJSON_Delete(command_array);
if (relay_privkey_hex) free(relay_privkey_hex);
strncpy(error_message, "NIP-17: Could not get relay keys", error_size - 1);
return -1;
}
// Convert relay private key to bytes
unsigned char relay_privkey[32];
if (nostr_hex_to_bytes(relay_privkey_hex, relay_privkey, sizeof(relay_privkey)) != 0) {
free(stats_json);
free(relay_privkey_hex);
cJSON_Delete(command_array);
strncpy(error_message, "NIP-17: Failed to convert relay private key", error_size - 1);
return -1;
}
free(relay_privkey_hex);
// Create DM response event using library function
cJSON* dm_response = nostr_nip17_create_chat_event(
stats_json, // message content
(const char**)&sender_pubkey, // recipient pubkeys
1, // num recipients
NULL, // subject (optional)
NULL, // reply_to_event_id (optional)
NULL, // reply_relay_url (optional)
relay_pubkey // sender pubkey
);
free(stats_json);
if (!dm_response) {
cJSON_Delete(command_array);
strncpy(error_message, "NIP-17: Failed to create DM response event", error_size - 1);
return -1;
}
// Create and sign gift wrap using library function
cJSON* gift_wraps[1];
int send_result = nostr_nip17_send_dm(
dm_response, // dm_event
(const char**)&sender_pubkey, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
gift_wraps, // gift_wraps_out
1 // max_gift_wraps
);
cJSON_Delete(dm_response);
if (send_result != 1 || !gift_wraps[0]) {
cJSON_Delete(command_array);
strncpy(error_message, "NIP-17: Failed to create and sign response gift wrap", error_size - 1);
return -1;
}
// Store the gift wrap in database
int store_result = store_event(gift_wraps[0]);
cJSON_Delete(gift_wraps[0]);
if (store_result != 0) {
cJSON_Delete(command_array);
strncpy(error_message, "NIP-17: Failed to store response gift wrap", error_size - 1);
return -1;
}
cJSON_Delete(command_array);
log_success("NIP-17: Stats command processed successfully");
return 0;
}
}
// For other commands, delegate to existing admin processing
// Create a synthetic kind 23456 event with the DM content
cJSON* synthetic_event = cJSON_CreateObject();
cJSON_AddNumberToObject(synthetic_event, "kind", 23456);
cJSON_AddStringToObject(synthetic_event, "content", dm_content);
// Copy pubkey from DM
cJSON* pubkey_obj = cJSON_GetObjectItem(dm_event, "pubkey");
if (pubkey_obj && cJSON_IsString(pubkey_obj)) {
cJSON_AddStringToObject(synthetic_event, "pubkey", cJSON_GetStringValue(pubkey_obj));
}
// Copy tags from DM
cJSON* tags = cJSON_GetObjectItem(dm_event, "tags");
if (tags) {
cJSON_AddItemToObject(synthetic_event, "tags", cJSON_Duplicate(tags, 1));
}
// Process as regular admin event
int result = process_admin_event_in_config(synthetic_event, error_message, error_size, wsi);
cJSON_Delete(synthetic_event);
cJSON_Delete(command_array);
return result;
}
// Generate stats JSON from database queries
char* generate_stats_json(void) {
extern sqlite3* g_db;
if (!g_db) {
log_error("Database not available for stats generation");
return NULL;
}
log_info("Generating stats JSON from database");
// Build response with database statistics
cJSON* response = cJSON_CreateObject();
cJSON_AddStringToObject(response, "query_type", "stats_query");
cJSON_AddNumberToObject(response, "timestamp", (double)time(NULL));
// Get database file size
extern char g_database_path[512];
struct stat db_stat;
long long db_size = 0;
if (stat(g_database_path, &db_stat) == 0) {
db_size = db_stat.st_size;
}
cJSON_AddNumberToObject(response, "database_size_bytes", db_size);
// Query total events count
sqlite3_stmt* stmt;
if (sqlite3_prepare_v2(g_db, "SELECT COUNT(*) FROM events", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
cJSON_AddNumberToObject(response, "total_events", sqlite3_column_int64(stmt, 0));
}
sqlite3_finalize(stmt);
}
// Query event kinds distribution
cJSON* event_kinds = cJSON_CreateArray();
if (sqlite3_prepare_v2(g_db, "SELECT kind, count, percentage FROM event_kinds_view ORDER BY count DESC", -1, &stmt, NULL) == SQLITE_OK) {
while (sqlite3_step(stmt) == SQLITE_ROW) {
cJSON* kind_obj = cJSON_CreateObject();
cJSON_AddNumberToObject(kind_obj, "kind", sqlite3_column_int(stmt, 0));
cJSON_AddNumberToObject(kind_obj, "count", sqlite3_column_int64(stmt, 1));
cJSON_AddNumberToObject(kind_obj, "percentage", sqlite3_column_double(stmt, 2));
cJSON_AddItemToArray(event_kinds, kind_obj);
}
sqlite3_finalize(stmt);
}
cJSON_AddItemToObject(response, "event_kinds", event_kinds);
// Query time-based statistics
cJSON* time_stats = cJSON_CreateObject();
if (sqlite3_prepare_v2(g_db, "SELECT period, total_events FROM time_stats_view", -1, &stmt, NULL) == SQLITE_OK) {
while (sqlite3_step(stmt) == SQLITE_ROW) {
const char* period = (const char*)sqlite3_column_text(stmt, 0);
sqlite3_int64 count = sqlite3_column_int64(stmt, 1);
if (strcmp(period, "total") == 0) {
cJSON_AddNumberToObject(time_stats, "total", count);
} else if (strcmp(period, "24h") == 0) {
cJSON_AddNumberToObject(time_stats, "last_24h", count);
} else if (strcmp(period, "7d") == 0) {
cJSON_AddNumberToObject(time_stats, "last_7d", count);
} else if (strcmp(period, "30d") == 0) {
cJSON_AddNumberToObject(time_stats, "last_30d", count);
}
}
sqlite3_finalize(stmt);
}
cJSON_AddItemToObject(response, "time_stats", time_stats);
// Query top pubkeys
cJSON* top_pubkeys = cJSON_CreateArray();
if (sqlite3_prepare_v2(g_db, "SELECT pubkey, event_count, percentage FROM top_pubkeys_view ORDER BY event_count DESC LIMIT 10", -1, &stmt, NULL) == SQLITE_OK) {
while (sqlite3_step(stmt) == SQLITE_ROW) {
cJSON* pubkey_obj = cJSON_CreateObject();
const char* pubkey = (const char*)sqlite3_column_text(stmt, 0);
cJSON_AddStringToObject(pubkey_obj, "pubkey", pubkey ? pubkey : "");
cJSON_AddNumberToObject(pubkey_obj, "event_count", sqlite3_column_int64(stmt, 1));
cJSON_AddNumberToObject(pubkey_obj, "percentage", sqlite3_column_double(stmt, 2));
cJSON_AddItemToArray(top_pubkeys, pubkey_obj);
}
sqlite3_finalize(stmt);
}
cJSON_AddItemToObject(response, "top_pubkeys", top_pubkeys);
// Get database creation timestamp (oldest event)
if (sqlite3_prepare_v2(g_db, "SELECT MIN(created_at) FROM events", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
sqlite3_int64 oldest_timestamp = sqlite3_column_int64(stmt, 0);
if (oldest_timestamp > 0) {
cJSON_AddNumberToObject(response, "database_created_at", (double)oldest_timestamp);
}
}
sqlite3_finalize(stmt);
}
// Get latest event timestamp
if (sqlite3_prepare_v2(g_db, "SELECT MAX(created_at) FROM events", -1, &stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(stmt) == SQLITE_ROW) {
sqlite3_int64 latest_timestamp = sqlite3_column_int64(stmt, 0);
if (latest_timestamp > 0) {
cJSON_AddNumberToObject(response, "latest_event_at", (double)latest_timestamp);
}
}
sqlite3_finalize(stmt);
}
// Convert to JSON string
char* json_string = cJSON_Print(response);
cJSON_Delete(response);
if (json_string) {
log_success("Stats JSON generated successfully");
} else {
log_error("Failed to generate stats JSON");
}
return json_string;
}
// Main NIP-17 processing function
int process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message, size_t error_size, struct lws* wsi) {
if (!gift_wrap_event || !error_message) {
return -1;
}
// Step 1: Validate it's addressed to us
if (!is_nip17_gift_wrap_for_relay(gift_wrap_event)) {
strncpy(error_message, "NIP-17: Event is not a valid gift wrap for this relay", error_size - 1);
return -1;
}
// Step 2: Get relay private key for decryption
char* relay_privkey_hex = get_relay_private_key();
if (!relay_privkey_hex) {
strncpy(error_message, "NIP-17: Could not get relay private key for decryption", error_size - 1);
return -1;
}
// Convert hex private key to bytes
unsigned char relay_privkey[32];
if (nostr_hex_to_bytes(relay_privkey_hex, relay_privkey, sizeof(relay_privkey)) != 0) {
log_error("NIP-17: Failed to convert relay private key from hex");
free(relay_privkey_hex);
strncpy(error_message, "NIP-17: Failed to convert relay private key", error_size - 1);
return -1;
}
free(relay_privkey_hex);
// Step 3: Decrypt and parse inner event using library function
log_info("NIP-17: Attempting to decrypt gift wrap with nostr_nip17_receive_dm");
cJSON* inner_dm = nostr_nip17_receive_dm(gift_wrap_event, relay_privkey);
if (!inner_dm) {
log_error("NIP-17: nostr_nip17_receive_dm returned NULL");
// Debug: Print the gift wrap event
char* gift_wrap_debug = cJSON_Print(gift_wrap_event);
if (gift_wrap_debug) {
char debug_msg[1024];
snprintf(debug_msg, sizeof(debug_msg), "NIP-17: Gift wrap event: %.500s", gift_wrap_debug);
log_error(debug_msg);
free(gift_wrap_debug);
}
// Debug: Check if private key is valid
char privkey_hex[65];
for (int i = 0; i < 32; i++) {
sprintf(privkey_hex + (i * 2), "%02x", relay_privkey[i]);
}
privkey_hex[64] = '\0';
char privkey_msg[128];
snprintf(privkey_msg, sizeof(privkey_msg), "NIP-17: Using relay private key: %.16s...", privkey_hex);
log_info(privkey_msg);
strncpy(error_message, "NIP-17: Failed to decrypt and parse inner DM event", error_size - 1);
return -1;
}
log_info("NIP-17: Successfully decrypted gift wrap");
// Step 4: Process admin command
int result = process_nip17_admin_command(inner_dm, error_message, error_size, wsi);
// Step 5: Create response if command was processed successfully
if (result == 0) {
// Get sender pubkey for response
cJSON* sender_pubkey_obj = cJSON_GetObjectItem(gift_wrap_event, "pubkey");
if (sender_pubkey_obj && cJSON_IsString(sender_pubkey_obj)) {
const char* sender_pubkey = cJSON_GetStringValue(sender_pubkey_obj);
// Create success response using library function
char response_content[1024];
snprintf(response_content, sizeof(response_content),
"[\"command_processed\", \"success\", \"%s\"]", "NIP-17 admin command executed");
// Get relay pubkey for creating DM event
const char* relay_pubkey = get_relay_pubkey_cached();
if (relay_pubkey) {
cJSON* success_dm = nostr_nip17_create_chat_event(
response_content, // message content
(const char**)&sender_pubkey, // recipient pubkeys
1, // num recipients
NULL, // subject (optional)
NULL, // reply_to_event_id (optional)
NULL, // reply_relay_url (optional)
relay_pubkey // sender pubkey
);
if (success_dm) {
cJSON* success_gift_wraps[1];
int send_result = nostr_nip17_send_dm(
success_dm, // dm_event
(const char**)&sender_pubkey, // recipient_pubkeys
1, // num_recipients
relay_privkey, // sender_private_key
success_gift_wraps, // gift_wraps_out
1 // max_gift_wraps
);
cJSON_Delete(success_dm);
if (send_result == 1 && success_gift_wraps[0]) {
store_event(success_gift_wraps[0]);
cJSON_Delete(success_gift_wraps[0]);
}
}
}
}
}
cJSON_Delete(inner_dm);
return result;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -12,4 +12,16 @@ int process_dm_admin_command(cJSON* command_array, cJSON* event, char* error_mes
// Returns 1 if it's a valid command array, 0 if it should fall back to inner event parsing
int is_dm_command_array(const char* decrypted_content);
// NIP-17 gift wrap processing functions
cJSON* process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message, size_t error_size, struct lws* wsi);
int process_nip17_admin_command(cJSON* dm_event, char* error_message, size_t error_size, struct lws* wsi);
int is_nip17_gift_wrap_for_relay(cJSON* gift_wrap_event);
char* generate_stats_json(void);
// Unified NIP-17 response functions
int send_nip17_response(const char* sender_pubkey, const char* response_content,
char* error_message, size_t error_size);
char* generate_config_text(void);
char* generate_stats_text(void);
#endif // DM_ADMIN_H

File diff suppressed because one or more lines are too long

View File

@@ -19,6 +19,7 @@
#include "../nostr_core_lib/cjson/cJSON.h"
#include "../nostr_core_lib/nostr_core/nostr_core.h"
#include "../nostr_core_lib/nostr_core/nip013.h" // NIP-13: Proof of Work
#include "../nostr_core_lib/nostr_core/nip019.h" // NIP-19: bech32-encoded entities
#include "config.h" // Configuration management system
#include "sql_schema.h" // Embedded database schema
#include "websockets.h" // WebSocket protocol implementation
@@ -45,6 +46,8 @@ int nostr_nip42_verify_auth_event(cJSON *event, const char *challenge_id,
// Global state
sqlite3* g_db = NULL; // Non-static so config.c can access it
int g_server_running = 1; // Non-static so websockets.c can access it
volatile sig_atomic_t g_shutdown_flag = 0; // Non-static so config.c can access it for restart functionality
int g_restart_requested = 0; // Non-static so config.c can access it for restart functionality
struct lws_context *ws_context = NULL; // Non-static so websockets.c can access it
// NIP-11 relay information structure
@@ -123,6 +126,22 @@ int process_admin_event_in_config(cJSON* event, char* error_message, size_t erro
// Forward declaration for NIP-45 COUNT message handling
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
// Parameter binding helpers for SQL queries
static void add_bind_param(char*** params, int* count, int* capacity, const char* value) {
if (*count >= *capacity) {
*capacity = *capacity == 0 ? 16 : *capacity * 2;
*params = realloc(*params, *capacity * sizeof(char*));
}
(*params)[(*count)++] = strdup(value);
}
static void free_bind_params(char** params, int count) {
for (int i = 0; i < count; i++) {
free(params[i]);
}
free(params);
}
// Forward declaration for enhanced admin event authorization
int is_authorized_admin_event(cJSON* event, char* error_message, size_t error_size);
@@ -216,13 +235,11 @@ void update_subscription_manager_config(void) {
"Subscription limits: max_per_client=%d, max_total=%d",
g_subscription_manager.max_subscriptions_per_client,
g_subscription_manager.max_total_subscriptions);
log_info(config_msg);
}
// Signal handler for graceful shutdown
void signal_handler(int sig) {
if (sig == SIGINT || sig == SIGTERM) {
log_info("Received shutdown signal");
g_server_running = 0;
}
}
@@ -284,10 +301,6 @@ int init_database(const char* database_path_override) {
return -1;
}
char success_msg[256];
snprintf(success_msg, sizeof(success_msg), "Database connection established: %s", db_path);
log_success(success_msg);
// Check if database is already initialized by looking for the events table
const char* check_sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='events'";
sqlite3_stmt* check_stmt;
@@ -297,59 +310,48 @@ int init_database(const char* database_path_override) {
sqlite3_finalize(check_stmt);
if (has_events_table) {
log_info("Database schema already exists, checking version");
// Check existing schema version and migrate if needed
const char* version_sql = "SELECT value FROM schema_info WHERE key = 'version'";
sqlite3_stmt* version_stmt;
const char* db_version = NULL;
int needs_migration = 0;
if (sqlite3_prepare_v2(g_db, version_sql, -1, &version_stmt, NULL) == SQLITE_OK) {
if (sqlite3_step(version_stmt) == SQLITE_ROW) {
db_version = (char*)sqlite3_column_text(version_stmt, 0);
char version_msg[256];
snprintf(version_msg, sizeof(version_msg), "Existing database schema version: %s",
db_version ? db_version : "unknown");
log_info(version_msg);
// Check if migration is needed
if (!db_version || strcmp(db_version, "5") == 0) {
needs_migration = 1;
log_info("Database migration needed: v5 -> v6 (adding auth_rules table)");
} else if (strcmp(db_version, "6") == 0) {
log_info("Database is already at current schema version v6");
// Database is already at current schema version v6
} else if (strcmp(db_version, EMBEDDED_SCHEMA_VERSION) == 0) {
log_info("Database is at current schema version");
// Database is at current schema version
} else {
char warning_msg[256];
snprintf(warning_msg, sizeof(warning_msg), "Unknown database schema version: %s", db_version);
log_warning(warning_msg);
}
} else {
log_info("Database exists but no version information found, assuming migration needed");
needs_migration = 1;
}
sqlite3_finalize(version_stmt);
} else {
log_info("Cannot read schema version, assuming migration needed");
needs_migration = 1;
}
// Perform migration if needed
if (needs_migration) {
log_info("Performing database schema migration to v6");
// Check if auth_rules table already exists
const char* check_auth_rules_sql = "SELECT name FROM sqlite_master WHERE type='table' AND name='auth_rules'";
sqlite3_stmt* check_stmt;
int has_auth_rules = 0;
if (sqlite3_prepare_v2(g_db, check_auth_rules_sql, -1, &check_stmt, NULL) == SQLITE_OK) {
has_auth_rules = (sqlite3_step(check_stmt) == SQLITE_ROW);
sqlite3_finalize(check_stmt);
}
if (!has_auth_rules) {
// Add auth_rules table matching sql_schema.h
const char* create_auth_rules_sql =
@@ -375,7 +377,6 @@ int init_database(const char* database_path_override) {
if (error_msg) sqlite3_free(error_msg);
return -1;
}
log_success("Created auth_rules table");
// Add indexes for auth_rules table
const char* create_auth_rules_indexes_sql =
@@ -393,9 +394,8 @@ int init_database(const char* database_path_override) {
if (index_error_msg) sqlite3_free(index_error_msg);
return -1;
}
log_success("Created auth_rules indexes");
} else {
log_info("auth_rules table already exists, skipping creation");
// auth_rules table already exists, skipping creation
}
// Update schema version to v6
@@ -414,12 +414,10 @@ int init_database(const char* database_path_override) {
return -1;
}
log_success("Database migration to v6 completed successfully");
}
} else {
// Initialize database schema using embedded SQL
log_info("Initializing database schema from embedded SQL");
// Execute the embedded schema SQL
char* error_msg = NULL;
rc = sqlite3_exec(g_db, EMBEDDED_SCHEMA_SQL, NULL, NULL, &error_msg);
@@ -434,13 +432,6 @@ int init_database(const char* database_path_override) {
return -1;
}
log_success("Database schema initialized successfully");
// Log schema version information
char version_msg[256];
snprintf(version_msg, sizeof(version_msg), "Database schema version: %s",
EMBEDDED_SCHEMA_VERSION);
log_info(version_msg);
}
} else {
log_error("Failed to check existing database schema");
@@ -455,7 +446,6 @@ void close_database() {
if (g_db) {
sqlite3_close(g_db);
g_db = NULL;
log_info("Database connection closed");
}
}
@@ -686,7 +676,6 @@ int store_event(cJSON* event) {
}
free(tags_json);
log_success("Event stored in database");
return 0;
}
@@ -749,13 +738,99 @@ cJSON* retrieve_event(const char* event_id) {
/////////////////////////////////////////////////////////////////////////////////////////
int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss) {
log_info("Handling REQ message for persistent subscription");
if (!cJSON_IsArray(filters)) {
log_error("REQ filters is not an array");
return 0;
}
// EARLY SUBSCRIPTION LIMIT CHECK - Check limits BEFORE any processing
if (pss) {
time_t current_time = time(NULL);
// Check if client is currently rate limited due to excessive failed attempts
if (pss->rate_limit_until > current_time) {
char rate_limit_msg[256];
int remaining_seconds = (int)(pss->rate_limit_until - current_time);
snprintf(rate_limit_msg, sizeof(rate_limit_msg),
"Rate limited due to excessive failed subscription attempts. Try again in %d seconds.", remaining_seconds);
// Send CLOSED notice for rate limiting
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: rate limited"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(rate_limit_msg));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
unsigned char* buf = malloc(LWS_PRE + closed_len);
if (buf) {
memcpy(buf + LWS_PRE, closed_str, closed_len);
lws_write(wsi, buf + LWS_PRE, closed_len, LWS_WRITE_TEXT);
free(buf);
}
free(closed_str);
}
cJSON_Delete(closed_msg);
// Update rate limiting counters
pss->failed_subscription_attempts++;
pss->last_failed_attempt = current_time;
return 0;
}
// Check session subscription limits
if (pss->subscription_count >= g_subscription_manager.max_subscriptions_per_client) {
log_error("Maximum subscriptions per client exceeded");
// Update rate limiting counters for failed attempt
pss->failed_subscription_attempts++;
pss->last_failed_attempt = current_time;
pss->consecutive_failures++;
// Implement progressive backoff: 1s, 5s, 30s, 300s (5min) based on consecutive failures
int backoff_seconds = 1;
if (pss->consecutive_failures >= 10) backoff_seconds = 300; // 5 minutes
else if (pss->consecutive_failures >= 5) backoff_seconds = 30; // 30 seconds
else if (pss->consecutive_failures >= 3) backoff_seconds = 5; // 5 seconds
pss->rate_limit_until = current_time + backoff_seconds;
// Send CLOSED notice with backoff information
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: too many subscriptions"));
char backoff_msg[256];
snprintf(backoff_msg, sizeof(backoff_msg),
"Maximum subscriptions per client exceeded. Backoff for %d seconds.", backoff_seconds);
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(backoff_msg));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
unsigned char* buf = malloc(LWS_PRE + closed_len);
if (buf) {
memcpy(buf + LWS_PRE, closed_str, closed_len);
lws_write(wsi, buf + LWS_PRE, closed_len, LWS_WRITE_TEXT);
free(buf);
}
free(closed_str);
}
cJSON_Delete(closed_msg);
return 0;
}
}
// Parameter binding helpers
char** bind_params = NULL;
int bind_param_count = 0;
int bind_param_capacity = 0;
// Check for kind 33334 configuration event requests BEFORE creating subscription
int config_events_sent = 0;
int has_config_request = 0;
@@ -791,11 +866,6 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
cJSON_Delete(filters_array);
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg),
"Generated %d synthetic config events for subscription %s",
config_events_sent, sub_id);
log_info(debug_msg);
break; // Only generate once per subscription
}
}
@@ -804,32 +874,6 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
// If only config events were requested, we can return early after sending EOSE
// But still create the subscription for future config updates
// Check session subscription limits
if (pss && pss->subscription_count >= g_subscription_manager.max_subscriptions_per_client) {
log_error("Maximum subscriptions per client exceeded");
// Send CLOSED notice
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: too many subscriptions"));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
unsigned char* buf = malloc(LWS_PRE + closed_len);
if (buf) {
memcpy(buf + LWS_PRE, closed_str, closed_len);
lws_write(wsi, buf + LWS_PRE, closed_len, LWS_WRITE_TEXT);
free(buf);
}
free(closed_str);
}
cJSON_Delete(closed_msg);
return has_config_request ? config_events_sent : 0;
}
// Create persistent subscription
subscription_t* subscription = create_subscription(sub_id, wsi, filters, pss ? pss->client_ip : "unknown");
if (!subscription) {
@@ -841,13 +885,13 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
if (add_subscription_to_manager(subscription) != 0) {
log_error("Failed to add subscription to global manager");
free_subscription(subscription);
// Send CLOSED notice
cJSON* closed_msg = cJSON_CreateArray();
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("CLOSED"));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString(sub_id));
cJSON_AddItemToArray(closed_msg, cJSON_CreateString("error: subscription limit reached"));
char* closed_str = cJSON_Print(closed_msg);
if (closed_str) {
size_t closed_len = strlen(closed_str);
@@ -860,7 +904,15 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
free(closed_str);
}
cJSON_Delete(closed_msg);
// Update rate limiting counters for failed attempt (global limit reached)
if (pss) {
time_t current_time = time(NULL);
pss->failed_subscription_attempts++;
pss->last_failed_attempt = current_time;
pss->consecutive_failures++;
}
return has_config_request ? config_events_sent : 0;
}
@@ -882,7 +934,13 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
log_warning("Invalid filter object");
continue;
}
// Reset bind params for this filter
free_bind_params(bind_params, bind_param_count);
bind_params = NULL;
bind_param_count = 0;
bind_param_capacity = 0;
// Build SQL query based on filter - exclude ephemeral events (kinds 20000-29999) from historical queries
char sql[1024] = "SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND (kind < 20000 OR kind >= 30000)";
char* sql_ptr = sql + strlen(sql);
@@ -922,56 +980,80 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
// Handle authors filter
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
if (authors && cJSON_IsArray(authors)) {
int author_count = cJSON_GetArraySize(authors);
int author_count = 0;
// Count valid authors
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
author_count++;
}
}
if (author_count > 0) {
snprintf(sql_ptr, remaining, " AND pubkey IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int a = 0; a < author_count; a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(author));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add author values to bind params
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, cJSON_GetStringValue(author));
}
}
}
}
// Handle ids filter
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
if (ids && cJSON_IsArray(ids)) {
int id_count = cJSON_GetArraySize(ids);
int id_count = 0;
// Count valid ids
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
id_count++;
}
}
if (id_count > 0) {
snprintf(sql_ptr, remaining, " AND id IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < id_count; i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(id));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add id values to bind params
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, cJSON_GetStringValue(id));
}
}
}
}
@@ -984,29 +1066,42 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
const char* tag_name = filter_key + 1; // Get the tag name (e, p, t, type, etc.)
if (cJSON_IsArray(filter_item)) {
int tag_value_count = cJSON_GetArraySize(filter_item);
int tag_value_count = 0;
// Count valid tag values
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
tag_value_count++;
}
}
if (tag_value_count > 0) {
// Use EXISTS with LIKE to check for matching tags
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = '%s' AND json_extract(value, '$[1]') IN (", tag_name);
// Use EXISTS with parameterized query
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = ? AND json_extract(value, '$[1]') IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < tag_value_count; i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(tag_value));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, "))");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add tag name and values to bind params
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, tag_name);
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
add_bind_param(&bind_params, &bind_param_count, &bind_param_capacity, cJSON_GetStringValue(tag_value));
}
}
}
}
}
@@ -1072,12 +1167,7 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
// Default limit to prevent excessive queries
snprintf(sql_ptr, remaining, " LIMIT 500");
}
// Debug: Log the SQL query being executed
char debug_msg[1280];
snprintf(debug_msg, sizeof(debug_msg), "Executing SQL: %s", sql);
log_info(debug_msg);
// Execute query and send events
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
@@ -1087,6 +1177,11 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
log_error(error_msg);
continue;
}
// Bind parameters
for (int i = 0; i < bind_param_count; i++) {
sqlite3_bind_text(stmt, i + 1, bind_params[i], -1, SQLITE_TRANSIENT);
}
int row_count = 0;
while (sqlite3_step(stmt) == SQLITE_ROW) {
@@ -1122,11 +1217,6 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
time_t current_time = time(NULL);
if (is_event_expired(event, current_time)) {
// Skip this expired event
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
const char* event_id = event_id_obj ? cJSON_GetStringValue(event_id_obj) : "unknown";
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Filtering expired event from subscription: %.16s", event_id);
log_info(debug_msg);
cJSON_Delete(event);
continue;
}
@@ -1153,18 +1243,13 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
cJSON_Delete(event_msg);
events_sent++;
}
char row_debug[128];
snprintf(row_debug, sizeof(row_debug), "Query returned %d rows", row_count);
log_info(row_debug);
sqlite3_finalize(stmt);
}
char events_debug[128];
snprintf(events_debug, sizeof(events_debug), "Total events sent: %d", events_sent);
log_info(events_debug);
// Cleanup bind params
free_bind_params(bind_params, bind_param_count);
return events_sent;
}
/////////////////////////////////////////////////////////////////////////////////////////
@@ -1199,7 +1284,6 @@ int is_authorized_admin_event(cJSON* event, char* error_buffer, size_t error_buf
cJSON *tags = cJSON_GetObjectItem(event, "tags");
if (!tags || !cJSON_IsArray(tags)) {
// No tags array - treat as regular event for different relay
log_info("Admin event has no tags array - treating as event for different relay");
snprintf(error_buffer, error_buffer_size, "Admin event not targeting this relay (no tags)");
return -1;
}
@@ -1227,7 +1311,6 @@ int is_authorized_admin_event(cJSON* event, char* error_buffer, size_t error_buf
if (!targets_this_relay) {
// Admin event for different relay - not an error, just not for us
log_info("Admin event targets different relay - treating as regular event");
snprintf(error_buffer, error_buffer_size, "Admin event not targeting this relay");
return -1;
}
@@ -1267,7 +1350,6 @@ int is_authorized_admin_event(cJSON* event, char* error_buffer, size_t error_buf
}
// All checks passed - authorized admin event
log_info("Admin event authorization successful");
return 0;
}
@@ -1291,8 +1373,8 @@ void print_usage(const char* program_name) {
printf(" -v, --version Show version information\n");
printf(" -p, --port PORT Override relay port (first-time startup only)\n");
printf(" --strict-port Fail if exact port is unavailable (no port increment)\n");
printf(" -a, --admin-pubkey HEX Override admin public key (64-char hex)\n");
printf(" -r, --relay-privkey HEX Override relay private key (64-char hex)\n");
printf(" -a, --admin-pubkey KEY Override admin public key (64-char hex or npub)\n");
printf(" -r, --relay-privkey KEY Override relay private key (64-char hex or nsec)\n");
printf("\n");
printf("Configuration:\n");
printf(" This relay uses event-based configuration stored in the database.\n");
@@ -1364,7 +1446,6 @@ int main(int argc, char* argv[]) {
char port_msg[128];
snprintf(port_msg, sizeof(port_msg), "Port override specified: %d", cli_options.port_override);
log_info(port_msg);
} else if (strcmp(argv[i], "-a") == 0 || strcmp(argv[i], "--admin-pubkey") == 0) {
// Admin public key override option
if (i + 1 >= argc) {
@@ -1373,28 +1454,28 @@ int main(int argc, char* argv[]) {
return 1;
}
// Validate public key format (must be 64 hex characters)
if (strlen(argv[i + 1]) != 64) {
log_error("Invalid admin public key length. Must be exactly 64 hex characters.");
const char* input_key = argv[i + 1];
char decoded_key[65] = {0}; // Buffer for decoded hex key
// Try to decode the input as either hex or npub format
unsigned char pubkey_bytes[32];
if (nostr_decode_npub(input_key, pubkey_bytes) == NOSTR_SUCCESS) {
// Convert bytes back to hex string
char* hex_ptr = decoded_key;
for (int j = 0; j < 32; j++) {
sprintf(hex_ptr, "%02x", pubkey_bytes[j]);
hex_ptr += 2;
}
} else {
log_error("Invalid admin public key format. Must be 64 hex characters or valid npub format.");
print_usage(argv[0]);
return 1;
}
// Validate hex format
for (int j = 0; j < 64; j++) {
char c = argv[i + 1][j];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
log_error("Invalid admin public key format. Must contain only hex characters (0-9, a-f, A-F).");
print_usage(argv[0]);
return 1;
}
}
strncpy(cli_options.admin_pubkey_override, argv[i + 1], sizeof(cli_options.admin_pubkey_override) - 1);
strncpy(cli_options.admin_pubkey_override, decoded_key, sizeof(cli_options.admin_pubkey_override) - 1);
cli_options.admin_pubkey_override[sizeof(cli_options.admin_pubkey_override) - 1] = '\0';
i++; // Skip the key argument
log_info("Admin public key override specified");
} else if (strcmp(argv[i], "-r") == 0 || strcmp(argv[i], "--relay-privkey") == 0) {
// Relay private key override option
if (i + 1 >= argc) {
@@ -1402,33 +1483,32 @@ int main(int argc, char* argv[]) {
print_usage(argv[0]);
return 1;
}
// Validate private key format (must be 64 hex characters)
if (strlen(argv[i + 1]) != 64) {
log_error("Invalid relay private key length. Must be exactly 64 hex characters.");
const char* input_key = argv[i + 1];
char decoded_key[65] = {0}; // Buffer for decoded hex key
// Try to decode the input as either hex or nsec format
unsigned char privkey_bytes[32];
if (nostr_decode_nsec(input_key, privkey_bytes) == NOSTR_SUCCESS) {
// Convert bytes back to hex string
char* hex_ptr = decoded_key;
for (int j = 0; j < 32; j++) {
sprintf(hex_ptr, "%02x", privkey_bytes[j]);
hex_ptr += 2;
}
} else {
log_error("Invalid relay private key format. Must be 64 hex characters or valid nsec format.");
print_usage(argv[0]);
return 1;
}
// Validate hex format
for (int j = 0; j < 64; j++) {
char c = argv[i + 1][j];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
log_error("Invalid relay private key format. Must contain only hex characters (0-9, a-f, A-F).");
print_usage(argv[0]);
return 1;
}
}
strncpy(cli_options.relay_privkey_override, argv[i + 1], sizeof(cli_options.relay_privkey_override) - 1);
strncpy(cli_options.relay_privkey_override, decoded_key, sizeof(cli_options.relay_privkey_override) - 1);
cli_options.relay_privkey_override[sizeof(cli_options.relay_privkey_override) - 1] = '\0';
i++; // Skip the key argument
log_info("Relay private key override specified");
} else if (strcmp(argv[i], "--strict-port") == 0) {
// Strict port mode option
cli_options.strict_port = 1;
log_info("Strict port mode enabled - will fail if exact port is unavailable");
} else {
log_error("Unknown argument. Use --help for usage information.");
print_usage(argv[0]);
@@ -1451,7 +1531,6 @@ int main(int argc, char* argv[]) {
// Check if this is first-time startup or existing relay
if (is_first_time_startup()) {
log_info("First-time startup detected");
// Initialize event-based configuration system
if (init_configuration_system(NULL, NULL) != 0) {
@@ -1485,7 +1564,6 @@ int main(int argc, char* argv[]) {
nostr_cleanup();
return 1;
}
log_success("Relay private key stored securely in database");
} else {
log_error("Relay private key not available from first-time startup");
cleanup_configuration_system();
@@ -1495,7 +1573,6 @@ int main(int argc, char* argv[]) {
// Handle configuration setup after database is initialized
// Always populate defaults directly in config table (abandoning legacy event signing)
log_info("Populating config table with defaults after database initialization");
// Populate default config values in table
if (populate_default_config_values() != 0) {
@@ -1517,7 +1594,6 @@ int main(int argc, char* argv[]) {
close_database();
return 1;
}
log_info("Applied port override from command line");
printf(" Port: %d (overriding default)\n", cli_options.port_override);
}
@@ -1530,22 +1606,17 @@ int main(int argc, char* argv[]) {
return 1;
}
log_success("Configuration populated directly in config table after database initialization");
// Now store the pubkeys in config table since database is available
const char* admin_pubkey = get_admin_pubkey_cached();
const char* relay_pubkey_from_cache = get_relay_pubkey_cached();
if (admin_pubkey && strlen(admin_pubkey) == 64) {
set_config_value_in_table("admin_pubkey", admin_pubkey, "string", "Administrator public key", "authentication", 0);
log_success("Admin pubkey stored in config table for first-time startup");
}
if (relay_pubkey_from_cache && strlen(relay_pubkey_from_cache) == 64) {
set_config_value_in_table("relay_pubkey", relay_pubkey_from_cache, "string", "Relay public key", "relay", 0);
log_success("Relay pubkey stored in config table for first-time startup");
}
} else {
log_info("Existing relay detected");
// Find existing database file
char** existing_files = find_existing_db_files();
if (!existing_files || !existing_files[0]) {
@@ -1611,7 +1682,6 @@ int main(int argc, char* argv[]) {
if (apply_configuration_from_event(config_event) != 0) {
log_warning("Failed to apply configuration from database");
} else {
log_success("Configuration loaded from database");
// Extract admin pubkey from the config event and store in config table for unified cache access
cJSON* pubkey_obj = cJSON_GetObjectItem(config_event, "pubkey");
@@ -1620,12 +1690,10 @@ int main(int argc, char* argv[]) {
// Store both admin and relay pubkeys in config table for unified cache
if (admin_pubkey && strlen(admin_pubkey) == 64) {
set_config_value_in_table("admin_pubkey", admin_pubkey, "string", "Administrator public key", "authentication", 0);
log_info("Admin pubkey stored in config table for existing relay");
}
if (relay_pubkey && strlen(relay_pubkey) == 64) {
set_config_value_in_table("relay_pubkey", relay_pubkey, "string", "Relay public key", "relay", 0);
log_info("Relay pubkey stored in config table for existing relay");
}
}
cJSON_Delete(config_event);
@@ -1644,7 +1712,6 @@ int main(int argc, char* argv[]) {
close_database();
return 1;
}
log_info("Applied port override from command line for existing relay");
printf(" Port: %d (overriding configured port)\n", cli_options.port_override);
}
@@ -1675,7 +1742,6 @@ int main(int argc, char* argv[]) {
close_database();
return 1;
}
log_success("Unified request validator initialized");
// Initialize NIP-11 relay information
init_relay_info();
@@ -1685,11 +1751,28 @@ int main(int argc, char* argv[]) {
// Initialize NIP-40 expiration configuration
init_expiration_config();
// Update subscription manager configuration
update_subscription_manager_config();
// Initialize subscription manager mutexes
if (pthread_mutex_init(&g_subscription_manager.subscriptions_lock, NULL) != 0) {
log_error("Failed to initialize subscription manager subscriptions lock");
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
if (pthread_mutex_init(&g_subscription_manager.ip_tracking_lock, NULL) != 0) {
log_error("Failed to initialize subscription manager IP tracking lock");
pthread_mutex_destroy(&g_subscription_manager.subscriptions_lock);
cleanup_configuration_system();
nostr_cleanup();
close_database();
return 1;
}
log_info("Starting relay server...");
// Start WebSocket Nostr relay server (port from configuration)
int result = start_websocket_relay(-1, cli_options.strict_port); // Let config system determine port, pass strict_port flag
@@ -1698,11 +1781,15 @@ int main(int argc, char* argv[]) {
cleanup_relay_info();
ginxsom_request_validator_cleanup();
cleanup_configuration_system();
// Cleanup subscription manager mutexes
pthread_mutex_destroy(&g_subscription_manager.subscriptions_lock);
pthread_mutex_destroy(&g_subscription_manager.ip_tracking_lock);
nostr_cleanup();
close_database();
if (result == 0) {
log_success("Server shutdown complete");
} else {
log_error("Server shutdown with errors");
}

View File

@@ -11,7 +11,6 @@
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <printf.h>
// Forward declarations for logging functions
void log_warning(const char* message);
@@ -142,11 +141,7 @@ int handle_deletion_request(cJSON* event, char* error_message, size_t error_size
if (store_event(event) != 0) {
log_warning("Failed to store deletion request event");
}
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Deletion request processed: %d events deleted", deleted_count);
log_info(debug_msg);
error_message[0] = '\0'; // Success - empty error message
return 0;
}
@@ -196,10 +191,6 @@ int delete_events_by_id(const char* requester_pubkey, cJSON* event_ids) {
if (sqlite3_step(delete_stmt) == SQLITE_DONE && sqlite3_changes(g_db) > 0) {
deleted_count++;
char debug_msg[128];
snprintf(debug_msg, sizeof(debug_msg), "Deleted event by ID: %.16s...", id);
log_info(debug_msg);
}
sqlite3_finalize(delete_stmt);
}
@@ -211,9 +202,6 @@ int delete_events_by_id(const char* requester_pubkey, cJSON* event_ids) {
}
} else {
sqlite3_finalize(check_stmt);
char debug_msg[128];
snprintf(debug_msg, sizeof(debug_msg), "Event not found for deletion: %.16s...", id);
log_info(debug_msg);
}
}
@@ -287,10 +275,6 @@ int delete_events_by_address(const char* requester_pubkey, cJSON* addresses, lon
int changes = sqlite3_changes(g_db);
if (changes > 0) {
deleted_count += changes;
char debug_msg[128];
snprintf(debug_msg, sizeof(debug_msg), "Deleted %d events by address: %.32s...", changes, addr);
log_info(debug_msg);
}
}
sqlite3_finalize(delete_stmt);

View File

@@ -36,191 +36,128 @@ extern unified_config_cache_t g_unified_cache;
// Helper function to parse comma-separated string into cJSON array
cJSON* parse_comma_separated_array(const char* csv_string) {
log_info("parse_comma_separated_array called");
if (!csv_string || strlen(csv_string) == 0) {
log_info("Empty or null csv_string, returning empty array");
return cJSON_CreateArray();
}
log_info("Creating cJSON array");
cJSON* array = cJSON_CreateArray();
if (!array) {
log_info("Failed to create cJSON array");
return NULL;
}
log_info("Duplicating csv_string");
char* csv_copy = strdup(csv_string);
if (!csv_copy) {
log_info("Failed to duplicate csv_string");
cJSON_Delete(array);
return NULL;
}
log_info("Starting token parsing");
char* token = strtok(csv_copy, ",");
while (token) {
log_info("Processing token");
// Trim whitespace
while (*token == ' ') token++;
char* end = token + strlen(token) - 1;
while (end > token && *end == ' ') *end-- = '\0';
if (strlen(token) > 0) {
log_info("Token has content, parsing");
// Try to parse as number first (for supported_nips)
char* endptr;
long num = strtol(token, &endptr, 10);
if (*endptr == '\0') {
log_info("Token is number, adding to array");
// It's a number
cJSON_AddItemToArray(array, cJSON_CreateNumber(num));
} else {
log_info("Token is string, adding to array");
// It's a string
cJSON_AddItemToArray(array, cJSON_CreateString(token));
}
} else {
log_info("Token is empty, skipping");
}
token = strtok(NULL, ",");
}
log_info("Freeing csv_copy");
free(csv_copy);
log_info("Returning parsed array");
return array;
}
// Initialize relay information using configuration system
void init_relay_info() {
log_info("Initializing relay information from configuration...");
// Get all config values first (without holding mutex to avoid deadlock)
// Note: These may be dynamically allocated strings that need to be freed
log_info("Fetching relay configuration values...");
const char* relay_name = get_config_value("relay_name");
log_info("relay_name fetched");
const char* relay_description = get_config_value("relay_description");
log_info("relay_description fetched");
const char* relay_software = get_config_value("relay_software");
log_info("relay_software fetched");
const char* relay_version = get_config_value("relay_version");
log_info("relay_version fetched");
const char* relay_contact = get_config_value("relay_contact");
log_info("relay_contact fetched");
const char* relay_pubkey = get_config_value("relay_pubkey");
log_info("relay_pubkey fetched");
const char* supported_nips_csv = get_config_value("supported_nips");
log_info("supported_nips fetched");
const char* language_tags_csv = get_config_value("language_tags");
log_info("language_tags fetched");
const char* relay_countries_csv = get_config_value("relay_countries");
log_info("relay_countries fetched");
const char* posting_policy = get_config_value("posting_policy");
log_info("posting_policy fetched");
const char* payments_url = get_config_value("payments_url");
log_info("payments_url fetched");
// Get config values for limitations
log_info("Fetching limitation configuration values...");
int max_message_length = get_config_int("max_message_length", 16384);
log_info("max_message_length fetched");
int max_subscriptions_per_client = get_config_int("max_subscriptions_per_client", 20);
log_info("max_subscriptions_per_client fetched");
int max_limit = get_config_int("max_limit", 5000);
log_info("max_limit fetched");
int max_event_tags = get_config_int("max_event_tags", 100);
log_info("max_event_tags fetched");
int max_content_length = get_config_int("max_content_length", 8196);
log_info("max_content_length fetched");
int default_limit = get_config_int("default_limit", 500);
log_info("default_limit fetched");
int admin_enabled = get_config_bool("admin_enabled", 0);
log_info("admin_enabled fetched");
pthread_mutex_lock(&g_unified_cache.cache_lock);
// Update relay information fields
log_info("Storing string values in cache...");
if (relay_name) {
log_info("Storing relay_name");
strncpy(g_unified_cache.relay_info.name, relay_name, sizeof(g_unified_cache.relay_info.name) - 1);
free((char*)relay_name); // Free dynamically allocated string
log_info("relay_name stored and freed");
} else {
log_info("Using default relay_name");
strncpy(g_unified_cache.relay_info.name, "C Nostr Relay", sizeof(g_unified_cache.relay_info.name) - 1);
}
if (relay_description) {
log_info("Storing relay_description");
strncpy(g_unified_cache.relay_info.description, relay_description, sizeof(g_unified_cache.relay_info.description) - 1);
free((char*)relay_description); // Free dynamically allocated string
log_info("relay_description stored and freed");
} else {
log_info("Using default relay_description");
strncpy(g_unified_cache.relay_info.description, "A high-performance Nostr relay implemented in C with SQLite storage", sizeof(g_unified_cache.relay_info.description) - 1);
}
if (relay_software) {
log_info("Storing relay_software");
strncpy(g_unified_cache.relay_info.software, relay_software, sizeof(g_unified_cache.relay_info.software) - 1);
free((char*)relay_software); // Free dynamically allocated string
log_info("relay_software stored and freed");
} else {
log_info("Using default relay_software");
strncpy(g_unified_cache.relay_info.software, "https://git.laantungir.net/laantungir/c-relay.git", sizeof(g_unified_cache.relay_info.software) - 1);
}
if (relay_version) {
log_info("Storing relay_version");
strncpy(g_unified_cache.relay_info.version, relay_version, sizeof(g_unified_cache.relay_info.version) - 1);
free((char*)relay_version); // Free dynamically allocated string
log_info("relay_version stored and freed");
} else {
log_info("Using default relay_version");
strncpy(g_unified_cache.relay_info.version, "0.2.0", sizeof(g_unified_cache.relay_info.version) - 1);
}
if (relay_contact) {
log_info("Storing relay_contact");
strncpy(g_unified_cache.relay_info.contact, relay_contact, sizeof(g_unified_cache.relay_info.contact) - 1);
free((char*)relay_contact); // Free dynamically allocated string
log_info("relay_contact stored and freed");
}
if (relay_pubkey) {
log_info("Storing relay_pubkey");
strncpy(g_unified_cache.relay_info.pubkey, relay_pubkey, sizeof(g_unified_cache.relay_info.pubkey) - 1);
free((char*)relay_pubkey); // Free dynamically allocated string
log_info("relay_pubkey stored and freed");
}
if (posting_policy) {
log_info("Storing posting_policy");
strncpy(g_unified_cache.relay_info.posting_policy, posting_policy, sizeof(g_unified_cache.relay_info.posting_policy) - 1);
free((char*)posting_policy); // Free dynamically allocated string
log_info("posting_policy stored and freed");
}
if (payments_url) {
log_info("Storing payments_url");
strncpy(g_unified_cache.relay_info.payments_url, payments_url, sizeof(g_unified_cache.relay_info.payments_url) - 1);
free((char*)payments_url); // Free dynamically allocated string
log_info("payments_url stored and freed");
}
// Initialize supported NIPs array from config
log_info("Initializing supported_nips array");
if (supported_nips_csv) {
log_info("Parsing supported_nips from config");
g_unified_cache.relay_info.supported_nips = parse_comma_separated_array(supported_nips_csv);
log_info("supported_nips parsed successfully");
free((char*)supported_nips_csv); // Free dynamically allocated string
log_info("supported_nips_csv freed");
} else {
log_info("Using default supported_nips");
// Fallback to default supported NIPs
g_unified_cache.relay_info.supported_nips = cJSON_CreateArray();
if (g_unified_cache.relay_info.supported_nips) {
@@ -233,14 +170,11 @@ void init_relay_info() {
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(40)); // NIP-40: Expiration Timestamp
cJSON_AddItemToArray(g_unified_cache.relay_info.supported_nips, cJSON_CreateNumber(42)); // NIP-42: Authentication
}
log_info("Default supported_nips created");
}
// Initialize server limitations using configuration
log_info("Initializing server limitations");
g_unified_cache.relay_info.limitation = cJSON_CreateObject();
if (g_unified_cache.relay_info.limitation) {
log_info("Adding limitation fields");
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_message_length", max_message_length);
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_subscriptions", max_subscriptions_per_client);
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "max_limit", max_limit);
@@ -254,25 +188,16 @@ void init_relay_info() {
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "created_at_lower_limit", 0);
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "created_at_upper_limit", 2147483647);
cJSON_AddNumberToObject(g_unified_cache.relay_info.limitation, "default_limit", default_limit);
log_info("Limitation fields added");
} else {
log_info("Failed to create limitation object");
}
// Initialize empty retention policies (can be configured later)
log_info("Initializing retention policies");
g_unified_cache.relay_info.retention = cJSON_CreateArray();
// Initialize language tags from config
log_info("Initializing language_tags");
if (language_tags_csv) {
log_info("Parsing language_tags from config");
g_unified_cache.relay_info.language_tags = parse_comma_separated_array(language_tags_csv);
log_info("language_tags parsed successfully");
free((char*)language_tags_csv); // Free dynamically allocated string
log_info("language_tags_csv freed");
} else {
log_info("Using default language_tags");
// Fallback to global
g_unified_cache.relay_info.language_tags = cJSON_CreateArray();
if (g_unified_cache.relay_info.language_tags) {
@@ -281,15 +206,10 @@ void init_relay_info() {
}
// Initialize relay countries from config
log_info("Initializing relay_countries");
if (relay_countries_csv) {
log_info("Parsing relay_countries from config");
g_unified_cache.relay_info.relay_countries = parse_comma_separated_array(relay_countries_csv);
log_info("relay_countries parsed successfully");
free((char*)relay_countries_csv); // Free dynamically allocated string
log_info("relay_countries_csv freed");
} else {
log_info("Using default relay_countries");
// Fallback to global
g_unified_cache.relay_info.relay_countries = cJSON_CreateArray();
if (g_unified_cache.relay_info.relay_countries) {
@@ -298,17 +218,12 @@ void init_relay_info() {
}
// Initialize content tags as empty array
log_info("Initializing tags");
g_unified_cache.relay_info.tags = cJSON_CreateArray();
// Initialize fees as empty object (no payment required by default)
log_info("Initializing fees");
g_unified_cache.relay_info.fees = cJSON_CreateObject();
log_info("Unlocking cache mutex");
pthread_mutex_unlock(&g_unified_cache.cache_lock);
log_success("Relay information initialized with default values");
}
// Clean up relay information JSON objects
@@ -518,7 +433,6 @@ cJSON* generate_relay_info_json() {
g_unified_cache.relay_info.tags = cJSON_CreateArray();
g_unified_cache.relay_info.fees = cJSON_CreateObject();
log_info("NIP-11 relay_info rebuilt directly from config table");
}
// Add basic relay information
@@ -610,7 +524,6 @@ struct nip11_session_data {
// Handle NIP-11 HTTP request with proper asynchronous buffer management
int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
log_info("Handling NIP-11 relay information request");
// Check if client accepts application/nostr+json
int accepts_nostr_json = 0;
@@ -696,9 +609,6 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
}
size_t json_len = strlen(json_string);
log_info("Generated NIP-11 JSON");
printf(" JSON length: %zu bytes\n", json_len);
printf(" JSON preview: %.100s%s\n", json_string, json_len > 100 ? "..." : "");
// Allocate session data to manage buffer lifetime across callbacks
struct nip11_session_data* session_data = malloc(sizeof(struct nip11_session_data));
@@ -791,8 +701,7 @@ int handle_nip11_http_request(struct lws* wsi, const char* accept_header) {
// Request callback for body transmission
lws_callback_on_writable(wsi);
log_success("NIP-11 headers sent, body transmission scheduled");
return 0;
}

View File

@@ -27,7 +27,6 @@ struct pow_config {
// Initialize PoW configuration using configuration system
void init_pow_config() {
log_info("Initializing NIP-13 Proof of Work configuration");
// Get all config values first (without holding mutex to avoid deadlock)
int pow_enabled = get_config_bool("pow_enabled", 1);
@@ -48,36 +47,20 @@ void init_pow_config() {
g_unified_cache.pow_config.reject_lower_targets = 1;
g_unified_cache.pow_config.strict_format = 1;
g_unified_cache.pow_config.anti_spam_mode = 1;
log_info("PoW configured in strict anti-spam mode");
} else if (strcmp(pow_mode, "full") == 0) {
g_unified_cache.pow_config.validation_flags = NOSTR_POW_VALIDATE_FULL;
g_unified_cache.pow_config.require_nonce_tag = 1;
log_info("PoW configured in full validation mode");
} else if (strcmp(pow_mode, "basic") == 0) {
g_unified_cache.pow_config.validation_flags = NOSTR_POW_VALIDATE_BASIC;
log_info("PoW configured in basic validation mode");
} else if (strcmp(pow_mode, "disabled") == 0) {
g_unified_cache.pow_config.enabled = 0;
log_info("PoW validation disabled via configuration");
}
free((char*)pow_mode); // Free dynamically allocated string
} else {
// Default to basic mode
g_unified_cache.pow_config.validation_flags = NOSTR_POW_VALIDATE_BASIC;
log_info("PoW configured in basic validation mode (default)");
}
// Log final configuration
char config_msg[512];
snprintf(config_msg, sizeof(config_msg),
"PoW Configuration: enabled=%s, min_difficulty=%d, validation_flags=0x%x, mode=%s",
g_unified_cache.pow_config.enabled ? "true" : "false",
g_unified_cache.pow_config.min_pow_difficulty,
g_unified_cache.pow_config.validation_flags,
g_unified_cache.pow_config.anti_spam_mode ? "anti-spam" :
(g_unified_cache.pow_config.validation_flags & NOSTR_POW_VALIDATE_FULL) ? "full" : "basic");
log_info(config_msg);
pthread_mutex_unlock(&g_unified_cache.cache_lock);
}
@@ -175,17 +158,5 @@ int validate_event_pow(cJSON* event, char* error_message, size_t error_size) {
return validation_result;
}
// Log successful PoW validation (only if minimum difficulty is required)
if (min_pow_difficulty > 0 || pow_result.has_nonce_tag) {
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg),
"PoW validated: difficulty=%d, target=%d, nonce=%llu%s",
pow_result.actual_difficulty,
pow_result.committed_target,
(unsigned long long)pow_result.nonce_value,
pow_result.has_nonce_tag ? "" : " (no nonce tag)");
log_info(debug_msg);
}
return 0; // Success
}

View File

@@ -34,7 +34,6 @@ void log_warning(const char* message);
// Initialize expiration configuration using configuration system
void init_expiration_config() {
log_info("Initializing NIP-40 Expiration Timestamp configuration");
// Get all config values first (without holding mutex to avoid deadlock)
int expiration_enabled = get_config_bool("expiration_enabled", 1);
@@ -56,15 +55,6 @@ void init_expiration_config() {
g_expiration_config.grace_period = 300;
}
// Log final configuration
char config_msg[512];
snprintf(config_msg, sizeof(config_msg),
"Expiration Configuration: enabled=%s, strict_mode=%s, filter_responses=%s, grace_period=%ld seconds",
g_expiration_config.enabled ? "true" : "false",
g_expiration_config.strict_mode ? "true" : "false",
g_expiration_config.filter_responses ? "true" : "false",
g_expiration_config.grace_period);
log_info(config_msg);
}
// Extract expiration timestamp from event tags
@@ -161,11 +151,7 @@ int validate_event_expiration(cJSON* event, char* error_message, size_t error_si
log_warning("Event rejected: expired timestamp");
return -1;
} else {
// In non-strict mode, log but allow expired events
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg),
"Accepting expired event (strict_mode disabled)");
log_info(debug_msg);
// In non-strict mode, allow expired events
}
}

View File

@@ -83,10 +83,6 @@ void send_nip42_auth_challenge(struct lws* wsi, struct per_session_data* pss) {
free(msg_str);
}
cJSON_Delete(auth_msg);
char debug_msg[128];
snprintf(debug_msg, sizeof(debug_msg), "NIP-42 auth challenge sent: %.16s...", challenge);
log_info(debug_msg);
}
// Handle NIP-42 signed authentication event from client
@@ -152,11 +148,6 @@ void handle_nip42_auth_signed_event(struct lws* wsi, struct per_session_data* ps
pss->auth_challenge_sent = 0;
pthread_mutex_unlock(&pss->session_lock);
char success_msg[256];
snprintf(success_msg, sizeof(success_msg),
"NIP-42 authentication successful for pubkey: %.16s...", authenticated_pubkey);
log_success(success_msg);
send_notice_message(wsi, "NIP-42 authentication successful");
} else {
// Authentication failed

View File

@@ -139,21 +139,6 @@ struct {
char reason[500]; // specific reason string
} g_last_rule_violation = {0};
/**
* Helper function for consistent debug logging to main relay.log file
*/
static void validator_debug_log(const char *message) {
FILE *relay_log = fopen("relay.log", "a");
if (relay_log) {
// Use same format as main logging system
time_t now = time(NULL);
struct tm *tm_info = localtime(&now);
char timestamp[20];
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
fprintf(relay_log, "[%s] [DEBUG] %s", timestamp, message);
fclose(relay_log);
}
}
//=============================================================================
// FORWARD DECLARATIONS
@@ -188,16 +173,12 @@ int ginxsom_request_validator_init(const char *db_path, const char *app_name) {
// Initialize nostr_core_lib if not already done
if (nostr_crypto_init() != NOSTR_SUCCESS) {
validator_debug_log(
"VALIDATOR: Failed to initialize nostr crypto system\n");
return NOSTR_ERROR_CRYPTO_INIT;
}
// Load initial configuration from database
int result = reload_auth_config();
if (result != NOSTR_SUCCESS) {
validator_debug_log(
"VALIDATOR: Failed to load configuration from database\n");
return result;
}
@@ -215,8 +196,6 @@ int ginxsom_request_validator_init(const char *db_path, const char *app_name) {
g_challenge_manager.last_cleanup = time(NULL);
g_validator_initialized = 1;
validator_debug_log(
"VALIDATOR: Request validator initialized successfully\n");
return NOSTR_SUCCESS;
}
@@ -257,20 +236,17 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
// 1. Null Pointer Checks - Reject malformed requests instantly
if (!json_string || json_length == 0) {
validator_debug_log("VALIDATOR_DEBUG: STEP 1 FAILED - Null input\n");
return NOSTR_ERROR_INVALID_INPUT;
}
// 2. Initialization Check - Verify system is properly initialized
if (!g_validator_initialized) {
validator_debug_log("VALIDATOR_DEBUG: STEP 2 FAILED - Validator not initialized\n");
return NOSTR_ERROR_INVALID_INPUT;
}
// 3. Parse JSON string to cJSON event object
cJSON *event = cJSON_ParseWithLength(json_string, json_length);
if (!event) {
validator_debug_log("VALIDATOR_DEBUG: STEP 3 FAILED - Failed to parse JSON event\n");
return NOSTR_ERROR_INVALID_INPUT;
}
@@ -290,20 +266,14 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
!tags || !cJSON_IsArray(tags) ||
!content || !cJSON_IsString(content) ||
!sig || !cJSON_IsString(sig)) {
validator_debug_log("VALIDATOR_DEBUG: STEP 4 FAILED - Invalid event structure\n");
cJSON_Delete(event);
return NOSTR_ERROR_INVALID_INPUT;
}
int event_kind = (int)cJSON_GetNumberValue(kind);
// 5. Check configuration using unified cache
int auth_required = nostr_auth_rules_enabled();
char config_msg[256];
sprintf(config_msg, "VALIDATOR_DEBUG: STEP 5 PASSED - Event kind: %d, auth_required: %d\n",
event_kind, auth_required);
validator_debug_log(config_msg);
/////////////////////////////////////////////////////////////////////
// PHASE 2: NOSTR EVENT VALIDATION
@@ -312,39 +282,26 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
// 6. Nostr Event Structure Validation using nostr_core_lib
int validation_result = nostr_validate_event(event);
if (validation_result != NOSTR_SUCCESS) {
char validation_msg[256];
sprintf(validation_msg, "VALIDATOR_DEBUG: STEP 6 FAILED - NOSTR event validation failed (error=%d)\n",
validation_result);
validator_debug_log(validation_msg);
cJSON_Delete(event);
return validation_result;
}
validator_debug_log("VALIDATOR_DEBUG: STEP 6 PASSED - Event structure and signature valid\n");
// 7. Extract pubkey for rule evaluation
const char *event_pubkey = cJSON_GetStringValue(pubkey);
if (!event_pubkey || strlen(event_pubkey) != 64) {
validator_debug_log("VALIDATOR_DEBUG: STEP 7 FAILED - Invalid pubkey format\n");
cJSON_Delete(event);
return NOSTR_ERROR_EVENT_INVALID_PUBKEY;
}
char pubkey_msg[256];
sprintf(pubkey_msg, "VALIDATOR_DEBUG: STEP 7 PASSED - Extracted pubkey: %.16s...\n", event_pubkey);
validator_debug_log(pubkey_msg);
/////////////////////////////////////////////////////////////////////
// PHASE 3: EVENT KIND SPECIFIC VALIDATION
/////////////////////////////////////////////////////////////////////
// 8. Handle NIP-42 authentication challenge events (kind 22242)
if (event_kind == 22242) {
validator_debug_log("VALIDATOR_DEBUG: STEP 8 - Processing NIP-42 challenge response\n");
// Check NIP-42 mode using unified cache
const char* nip42_enabled = get_config_value("nip42_auth_enabled");
if (nip42_enabled && strcmp(nip42_enabled, "false") == 0) {
validator_debug_log("VALIDATOR_DEBUG: STEP 8 FAILED - NIP-42 is disabled\n");
free((char*)nip42_enabled);
cJSON_Delete(event);
return NOSTR_ERROR_NIP42_DISABLED;
@@ -353,7 +310,6 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
// TODO: Implement full NIP-42 challenge validation
// For now, accept all valid NIP-42 events
validator_debug_log("VALIDATOR_DEBUG: STEP 8 PASSED - NIP-42 challenge response accepted\n");
cJSON_Delete(event);
return NOSTR_SUCCESS;
}
@@ -364,10 +320,8 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
// 9. Check if authentication rules are enabled
if (!auth_required) {
validator_debug_log("VALIDATOR_DEBUG: STEP 9 - Authentication disabled, skipping database auth rules\n");
} else {
// 10. Check database authentication rules (only if auth enabled)
validator_debug_log("VALIDATOR_DEBUG: STEP 10 - Checking database authentication rules\n");
// Create operation string with event kind for more specific rule matching
char operation_str[64];
@@ -379,17 +333,10 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
// If generic event check fails, try specific event kind check
rules_result = check_database_auth_rules(event_pubkey, operation_str, NULL);
if (rules_result != NOSTR_SUCCESS) {
char rules_msg[256];
sprintf(rules_msg, "VALIDATOR_DEBUG: STEP 10 FAILED - Database rules denied request (kind=%d)\n", event_kind);
validator_debug_log(rules_msg);
cJSON_Delete(event);
return rules_result;
}
}
char rules_success_msg[256];
sprintf(rules_success_msg, "VALIDATOR_DEBUG: STEP 10 PASSED - Database rules allow request (kind=%d)\n", event_kind);
validator_debug_log(rules_success_msg);
}
/////////////////////////////////////////////////////////////////////
@@ -404,44 +351,30 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
pthread_mutex_unlock(&g_unified_cache.cache_lock);
if (pow_enabled && pow_min_difficulty > 0) {
validator_debug_log("VALIDATOR_DEBUG: STEP 11 - Validating NIP-13 Proof of Work\n");
nostr_pow_result_t pow_result;
int pow_validation_result = nostr_validate_pow(event, pow_min_difficulty,
pow_validation_flags, &pow_result);
if (pow_validation_result != NOSTR_SUCCESS) {
char pow_msg[256];
sprintf(pow_msg, "VALIDATOR_DEBUG: STEP 11 FAILED - PoW validation failed (error=%d, difficulty=%d/%d)\n",
pow_validation_result, pow_result.actual_difficulty, pow_min_difficulty);
validator_debug_log(pow_msg);
cJSON_Delete(event);
return pow_validation_result;
}
char pow_success_msg[256];
sprintf(pow_success_msg, "VALIDATOR_DEBUG: STEP 11 PASSED - PoW validated (difficulty=%d, target=%d)\n",
pow_result.actual_difficulty, pow_result.committed_target);
validator_debug_log(pow_success_msg);
} else {
validator_debug_log("VALIDATOR_DEBUG: STEP 11 SKIPPED - PoW validation disabled or min_difficulty=0\n");
}
// 12. NIP-40 Expiration validation
// Always check expiration tags if present (following NIP-40 specification)
validator_debug_log("VALIDATOR_DEBUG: STEP 12 - Starting NIP-40 Expiration validation\n");
cJSON *expiration_tag = NULL;
cJSON *tags_array = cJSON_GetObjectItem(event, "tags");
if (tags_array && cJSON_IsArray(tags_array)) {
cJSON *tag = NULL;
cJSON_ArrayForEach(tag, tags_array) {
if (!cJSON_IsArray(tag)) continue;
cJSON *tag_name = cJSON_GetArrayItem(tag, 0);
if (!tag_name || !cJSON_IsString(tag_name)) continue;
const char *tag_name_str = cJSON_GetStringValue(tag_name);
if (strcmp(tag_name_str, "expiration") == 0) {
cJSON *tag_value = cJSON_GetArrayItem(tag, 1);
@@ -452,57 +385,40 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length)
}
}
}
if (expiration_tag) {
const char *expiration_str = cJSON_GetStringValue(expiration_tag);
// Validate that the expiration string contains only digits (and optional leading whitespace)
const char* p = expiration_str;
// Skip leading whitespace
while (*p == ' ' || *p == '\t') p++;
// Check if we have at least one digit
if (*p == '\0') {
validator_debug_log("VALIDATOR_DEBUG: STEP 12 SKIPPED - Empty expiration tag value, ignoring\n");
} else {
// Validate that all remaining characters are digits
const char* digit_start = p;
while (*p >= '0' && *p <= '9') p++;
// If we didn't consume the entire string or found no digits, it's malformed
if (*p != '\0' || p == digit_start) {
char malformed_msg[256];
sprintf(malformed_msg, "VALIDATOR_DEBUG: STEP 12 SKIPPED - Malformed expiration tag value '%.32s', ignoring\n",
expiration_str);
validator_debug_log(malformed_msg);
} else {
// Valid numeric string, parse and check expiration
time_t expiration_time = (time_t)atol(expiration_str);
time_t now = time(NULL);
int grace_period = get_config_int("nip40_expiration_grace_period", 60);
if (expiration_time > 0 && now > expiration_time + grace_period) {
char exp_msg[256];
sprintf(exp_msg, "VALIDATOR_DEBUG: STEP 12 FAILED - Event expired (now=%ld, exp=%ld, grace=%d)\n",
(long)now, (long)expiration_time, grace_period);
validator_debug_log(exp_msg);
cJSON_Delete(event);
return NOSTR_ERROR_EVENT_EXPIRED;
}
char exp_success_msg[256];
sprintf(exp_success_msg, "VALIDATOR_DEBUG: STEP 12 PASSED - Event not expired (exp=%ld, now=%ld)\n",
(long)expiration_time, (long)now);
validator_debug_log(exp_success_msg);
}
}
} else {
validator_debug_log("VALIDATOR_DEBUG: STEP 12 SKIPPED - No expiration tag found\n");
}
// All validations passed
validator_debug_log("VALIDATOR_DEBUG: STEP 13 PASSED - All validations complete, event ACCEPTED\n");
cJSON_Delete(event);
return NOSTR_SUCCESS;
}
@@ -578,7 +494,6 @@ void nostr_request_result_free_file_data(nostr_request_result_t *result) {
void nostr_request_validator_force_cache_refresh(void) {
// Use unified cache refresh from config.c
force_config_cache_refresh();
validator_debug_log("VALIDATOR: Cache forcibly invalidated via unified cache\n");
}
/**
@@ -586,7 +501,6 @@ void nostr_request_validator_force_cache_refresh(void) {
*/
static int reload_auth_config(void) {
// Configuration is now handled by the unified cache in config.c
validator_debug_log("VALIDATOR: Using unified cache system for configuration\n");
return NOSTR_SUCCESS;
}
@@ -598,35 +512,23 @@ static int reload_auth_config(void) {
* Check database authentication rules for the request
* Implements the 6-step rule evaluation engine from AUTH_API.md
*/
int check_database_auth_rules(const char *pubkey, const char *operation,
int check_database_auth_rules(const char *pubkey, const char *operation __attribute__((unused)),
const char *resource_hash) {
sqlite3 *db = NULL;
sqlite3_stmt *stmt = NULL;
int rc;
if (!pubkey) {
validator_debug_log(
"VALIDATOR_DEBUG: RULES ENGINE - Missing pubkey for rule evaluation\n");
return NOSTR_ERROR_INVALID_INPUT;
}
char rules_msg[256];
sprintf(rules_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Checking rules for pubkey=%.32s..., "
"operation=%s\n",
pubkey, operation ? operation : "NULL");
validator_debug_log(rules_msg);
// Open database using global database path
if (strlen(g_database_path) == 0) {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - No database path available\n");
return NOSTR_SUCCESS; // Default allow on DB error
}
rc = sqlite3_open_v2(g_database_path, &db, SQLITE_OPEN_READONLY, NULL);
if (rc != SQLITE_OK) {
validator_debug_log(
"VALIDATOR_DEBUG: RULES ENGINE - Failed to open database\n");
return NOSTR_SUCCESS; // Default allow on DB error
}
@@ -640,13 +542,6 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *action = (const char *)sqlite3_column_text(stmt, 1);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 FAILED - "
"Pubkey blacklisted\n");
char blacklist_msg[256];
sprintf(blacklist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Blacklist rule matched: action=%s\n",
action ? action : "deny");
validator_debug_log(blacklist_msg);
// Set specific violation details for status code mapping
strcpy(g_last_rule_violation.violation_type, "pubkey_blacklist");
@@ -659,8 +554,6 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
}
sqlite3_finalize(stmt);
}
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 1 PASSED - Pubkey "
"not blacklisted\n");
// Step 2: Check hash blacklist
if (resource_hash) {
@@ -673,14 +566,6 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *action = (const char *)sqlite3_column_text(stmt, 1);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 FAILED - "
"Hash blacklisted\n");
char hash_blacklist_msg[256];
sprintf(
hash_blacklist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Hash blacklist rule matched: action=%s\n",
action ? action : "deny");
validator_debug_log(hash_blacklist_msg);
// Set specific violation details for status code mapping
strcpy(g_last_rule_violation.violation_type, "hash_blacklist");
@@ -693,11 +578,6 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
}
sqlite3_finalize(stmt);
}
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 PASSED - Hash "
"not blacklisted\n");
} else {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 2 SKIPPED - No "
"resource hash provided\n");
}
// Step 3: Check pubkey whitelist
@@ -709,22 +589,12 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
sqlite3_bind_text(stmt, 1, pubkey, -1, SQLITE_STATIC);
if (sqlite3_step(stmt) == SQLITE_ROW) {
const char *action = (const char *)sqlite3_column_text(stmt, 1);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 PASSED - "
"Pubkey whitelisted\n");
char whitelist_msg[256];
sprintf(whitelist_msg,
"VALIDATOR_DEBUG: RULES ENGINE - Whitelist rule matched: action=%s\n",
action ? action : "allow");
validator_debug_log(whitelist_msg);
sqlite3_finalize(stmt);
sqlite3_close(db);
return NOSTR_SUCCESS; // Allow whitelisted pubkey
}
sqlite3_finalize(stmt);
}
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 3 FAILED - Pubkey "
"not whitelisted\n");
// Step 4: Check if any whitelist rules exist - if yes, deny by default
const char *whitelist_exists_sql =
@@ -735,9 +605,6 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
if (sqlite3_step(stmt) == SQLITE_ROW) {
int whitelist_count = sqlite3_column_int(stmt, 0);
if (whitelist_count > 0) {
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 FAILED - "
"Whitelist exists but pubkey not in it\n");
// Set specific violation details for status code mapping
strcpy(g_last_rule_violation.violation_type, "whitelist_violation");
strcpy(g_last_rule_violation.reason,
@@ -750,12 +617,8 @@ int check_database_auth_rules(const char *pubkey, const char *operation,
}
sqlite3_finalize(stmt);
}
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 4 PASSED - No "
"whitelist restrictions apply\n");
sqlite3_close(db);
validator_debug_log("VALIDATOR_DEBUG: RULES ENGINE - STEP 5 PASSED - All "
"rule checks completed, default ALLOW\n");
return NOSTR_SUCCESS; // Default allow if no restrictive rules matched
}
@@ -821,11 +684,6 @@ static void cleanup_expired_challenges(void) {
}
g_challenge_manager.last_cleanup = now;
char cleanup_msg[256];
sprintf(cleanup_msg, "NIP-42: Cleaned up challenges, %d active remaining\n",
active_count);
validator_debug_log(cleanup_msg);
}
/**
@@ -877,12 +735,6 @@ static int store_challenge(const char *challenge_id, const char *client_ip) {
entry->expires_at = now + g_challenge_manager.timeout_seconds;
entry->active = 1;
char store_msg[256];
sprintf(store_msg,
"NIP-42: Stored challenge %.16s... (expires in %d seconds)\n",
challenge_id, g_challenge_manager.timeout_seconds);
validator_debug_log(store_msg);
return NOSTR_SUCCESS;
}

View File

@@ -5,7 +5,6 @@
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <printf.h>
#include <pthread.h>
#include <libwebsockets.h>
#include "subscriptions.h"
@@ -21,6 +20,13 @@ const char* get_config_value(const char* key);
// Forward declarations for NIP-40 expiration functions
int is_event_expired(cJSON* event, time_t current_time);
// Forward declarations for filter validation
int validate_filter_values(cJSON* filter_json, char* error_message, size_t error_size);
int validate_hex_string(const char* str, size_t expected_len, const char* field_name, char* error_message, size_t error_size);
int validate_timestamp_range(long since, long until, char* error_message, size_t error_size);
int validate_numeric_limits(int limit, char* error_message, size_t error_size);
int validate_search_term(const char* search_term, char* error_message, size_t error_size);
// Global database variable
extern sqlite3* g_db;
@@ -42,7 +48,14 @@ subscription_filter_t* create_subscription_filter(cJSON* filter_json) {
if (!filter_json || !cJSON_IsObject(filter_json)) {
return NULL;
}
// Validate filter values before creating the filter
char error_message[512] = {0};
if (!validate_filter_values(filter_json, error_message, sizeof(error_message))) {
log_warning(error_message);
return NULL;
}
subscription_filter_t* filter = calloc(1, sizeof(subscription_filter_t));
if (!filter) {
return NULL;
@@ -111,28 +124,66 @@ void free_subscription_filter(subscription_filter_t* filter) {
free(filter);
}
// Validate subscription ID format and length
static int validate_subscription_id(const char* sub_id) {
if (!sub_id) {
return 0; // NULL pointer
}
size_t len = strlen(sub_id);
if (len == 0 || len >= SUBSCRIPTION_ID_MAX_LENGTH) {
return 0; // Empty or too long
}
// Check for valid characters (alphanumeric, underscore, hyphen)
for (size_t i = 0; i < len; i++) {
char c = sub_id[i];
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == '_' || c == '-')) {
return 0; // Invalid character
}
}
return 1; // Valid
}
// Create a new subscription
subscription_t* create_subscription(const char* sub_id, struct lws* wsi, cJSON* filters_array, const char* client_ip) {
if (!sub_id || !wsi || !filters_array) {
log_error("create_subscription: NULL parameter(s)");
return NULL;
}
// Validate subscription ID
if (!validate_subscription_id(sub_id)) {
log_error("create_subscription: invalid subscription ID format or length");
return NULL;
}
subscription_t* sub = calloc(1, sizeof(subscription_t));
if (!sub) {
log_error("create_subscription: failed to allocate subscription");
return NULL;
}
// Copy subscription ID (truncate if too long)
strncpy(sub->id, sub_id, SUBSCRIPTION_ID_MAX_LENGTH - 1);
sub->id[SUBSCRIPTION_ID_MAX_LENGTH - 1] = '\0';
// Copy subscription ID safely (already validated)
size_t id_len = strlen(sub_id);
memcpy(sub->id, sub_id, id_len);
sub->id[id_len] = '\0';
// Set WebSocket connection
sub->wsi = wsi;
// Set client IP
// Set client IP safely
if (client_ip) {
strncpy(sub->client_ip, client_ip, CLIENT_IP_MAX_LENGTH - 1);
sub->client_ip[CLIENT_IP_MAX_LENGTH - 1] = '\0';
size_t ip_len = strlen(client_ip);
if (ip_len >= CLIENT_IP_MAX_LENGTH) {
ip_len = CLIENT_IP_MAX_LENGTH - 1;
}
memcpy(sub->client_ip, client_ip, ip_len);
sub->client_ip[ip_len] = '\0';
} else {
sub->client_ip[0] = '\0'; // Ensure null termination
}
// Set timestamps and state
@@ -209,58 +260,67 @@ int add_subscription_to_manager(subscription_t* sub) {
// Log subscription creation to database
log_subscription_created(sub);
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Added subscription '%s' (total: %d)",
sub->id, g_subscription_manager.total_subscriptions);
log_info(debug_msg);
return 0;
}
// Remove subscription from global manager (thread-safe)
int remove_subscription_from_manager(const char* sub_id, struct lws* wsi) {
if (!sub_id) return -1;
if (!sub_id) {
log_error("remove_subscription_from_manager: NULL subscription ID");
return -1;
}
// Validate subscription ID format
if (!validate_subscription_id(sub_id)) {
log_error("remove_subscription_from_manager: invalid subscription ID format");
return -1;
}
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
subscription_t** current = &g_subscription_manager.active_subscriptions;
while (*current) {
subscription_t* sub = *current;
// Match by ID and WebSocket connection
if (strcmp(sub->id, sub_id) == 0 && (!wsi || sub->wsi == wsi)) {
// Remove from list
*current = sub->next;
g_subscription_manager.total_subscriptions--;
// Copy data needed for logging before unlocking
char client_ip_copy[CLIENT_IP_MAX_LENGTH];
int events_sent_copy = sub->events_sent;
char sub_id_copy[SUBSCRIPTION_ID_MAX_LENGTH];
memcpy(client_ip_copy, sub->client_ip, CLIENT_IP_MAX_LENGTH);
memcpy(sub_id_copy, sub->id, SUBSCRIPTION_ID_MAX_LENGTH);
client_ip_copy[CLIENT_IP_MAX_LENGTH - 1] = '\0';
sub_id_copy[SUBSCRIPTION_ID_MAX_LENGTH - 1] = '\0';
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Log subscription closure to database
log_subscription_closed(sub_id, sub->client_ip, "closed");
// Log subscription closure to database (now safe)
log_subscription_closed(sub_id_copy, client_ip_copy, "closed");
// Update events sent counter before freeing
update_subscription_events_sent(sub_id, sub->events_sent);
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Removed subscription '%s' (total: %d)",
sub_id, g_subscription_manager.total_subscriptions);
log_info(debug_msg);
update_subscription_events_sent(sub_id_copy, events_sent_copy);
free_subscription(sub);
return 0;
}
current = &(sub->next);
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Subscription '%s' not found for removal", sub_id);
log_warning(debug_msg);
return -1;
}
@@ -269,25 +329,28 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter) {
if (!event || !filter) {
return 0;
}
// Check kinds filter
if (filter->kinds && cJSON_IsArray(filter->kinds)) {
cJSON* event_kind = cJSON_GetObjectItem(event, "kind");
if (!event_kind || !cJSON_IsNumber(event_kind)) {
return 0;
}
int event_kind_val = (int)cJSON_GetNumberValue(event_kind);
int kind_match = 0;
cJSON* kind_item = NULL;
cJSON_ArrayForEach(kind_item, filter->kinds) {
if (cJSON_IsNumber(kind_item) && (int)cJSON_GetNumberValue(kind_item) == event_kind_val) {
kind_match = 1;
break;
if (cJSON_IsNumber(kind_item)) {
int filter_kind = (int)cJSON_GetNumberValue(kind_item);
if (filter_kind == event_kind_val) {
kind_match = 1;
break;
}
}
}
if (!kind_match) {
return 0;
}
@@ -379,40 +442,41 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter) {
if (!event_tags || !cJSON_IsArray(event_tags)) {
return 0; // Event has no tags but filter requires tags
}
// Check each tag filter
cJSON* tag_filter = NULL;
cJSON_ArrayForEach(tag_filter, filter->tag_filters) {
if (!tag_filter->string || strlen(tag_filter->string) < 2 || tag_filter->string[0] != '#') {
continue; // Invalid tag filter
}
const char* tag_name = tag_filter->string + 1; // Skip the '#'
if (!cJSON_IsArray(tag_filter)) {
continue; // Tag filter must be an array
}
int tag_match = 0;
// Search through event tags for matching tag name and value
cJSON* event_tag = NULL;
cJSON_ArrayForEach(event_tag, event_tags) {
if (!cJSON_IsArray(event_tag) || cJSON_GetArraySize(event_tag) < 2) {
continue; // Invalid tag format
}
cJSON* event_tag_name = cJSON_GetArrayItem(event_tag, 0);
cJSON* event_tag_value = cJSON_GetArrayItem(event_tag, 1);
if (!cJSON_IsString(event_tag_name) || !cJSON_IsString(event_tag_value)) {
continue;
}
const char* event_tag_name_str = cJSON_GetStringValue(event_tag_name);
const char* event_tag_value_str = cJSON_GetStringValue(event_tag_value);
// Check if tag name matches
if (strcmp(cJSON_GetStringValue(event_tag_name), tag_name) == 0) {
const char* event_tag_value_str = cJSON_GetStringValue(event_tag_value);
if (strcmp(event_tag_name_str, tag_name) == 0) {
// Check if any of the filter values match this tag value
cJSON* filter_value = NULL;
cJSON_ArrayForEach(filter_value, tag_filter) {
@@ -425,13 +489,13 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter) {
}
}
}
if (tag_match) {
break;
}
}
}
if (!tag_match) {
return 0; // This tag filter didn't match, so the event doesn't match
}
@@ -446,7 +510,7 @@ int event_matches_subscription(cJSON* event, subscription_t* subscription) {
if (!event || !subscription || !subscription->filters) {
return 0;
}
subscription_filter_t* filter = subscription->filters;
while (filter) {
if (event_matches_filter(event, filter)) {
@@ -454,7 +518,7 @@ int event_matches_subscription(cJSON* event, subscription_t* subscription) {
}
filter = filter->next;
}
return 0; // No filters matched
}
@@ -473,70 +537,124 @@ int broadcast_event_to_subscriptions(cJSON* event) {
if (expiration_enabled && filter_responses) {
time_t current_time = time(NULL);
if (is_event_expired(event, current_time)) {
char debug_msg[256];
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
const char* event_id = event_id_obj ? cJSON_GetStringValue(event_id_obj) : "unknown";
snprintf(debug_msg, sizeof(debug_msg), "Skipping broadcast of expired event: %.16s", event_id);
log_info(debug_msg);
return 0; // Don't broadcast expired events
}
}
int broadcasts = 0;
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
// Create a temporary list of matching subscriptions to avoid holding lock during I/O
typedef struct temp_sub {
struct lws* wsi;
char id[SUBSCRIPTION_ID_MAX_LENGTH];
char client_ip[CLIENT_IP_MAX_LENGTH];
struct temp_sub* next;
} temp_sub_t;
temp_sub_t* matching_subs = NULL;
int matching_count = 0;
// First pass: collect matching subscriptions while holding lock
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
subscription_t* sub = g_subscription_manager.active_subscriptions;
while (sub) {
if (sub->active && event_matches_subscription(event, sub)) {
// Create EVENT message for this subscription
cJSON* event_msg = cJSON_CreateArray();
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
cJSON_AddItemToArray(event_msg, cJSON_CreateString(sub->id));
cJSON_AddItemToArray(event_msg, cJSON_Duplicate(event, 1));
char* msg_str = cJSON_Print(event_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
// Send to WebSocket connection
int write_result = lws_write(sub->wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
if (write_result >= 0) {
sub->events_sent++;
broadcasts++;
// Log event broadcast to database (optional - can be disabled for performance)
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
if (event_id_obj && cJSON_IsString(event_id_obj)) {
log_event_broadcast(cJSON_GetStringValue(event_id_obj), sub->id, sub->client_ip);
}
}
free(buf);
if (sub->active && sub->wsi && event_matches_subscription(event, sub)) {
temp_sub_t* temp = malloc(sizeof(temp_sub_t));
if (temp) {
temp->wsi = sub->wsi;
// Safely copy subscription ID
size_t id_len = strlen(sub->id);
if (id_len >= SUBSCRIPTION_ID_MAX_LENGTH) {
id_len = SUBSCRIPTION_ID_MAX_LENGTH - 1;
}
free(msg_str);
memcpy(temp->id, sub->id, id_len);
temp->id[id_len] = '\0';
// Safely copy client IP
size_t ip_len = strlen(sub->client_ip);
if (ip_len >= CLIENT_IP_MAX_LENGTH) {
ip_len = CLIENT_IP_MAX_LENGTH - 1;
}
memcpy(temp->client_ip, sub->client_ip, ip_len);
temp->client_ip[ip_len] = '\0';
temp->next = matching_subs;
matching_subs = temp;
matching_count++;
} else {
log_error("broadcast_event_to_subscriptions: failed to allocate temp subscription");
}
cJSON_Delete(event_msg);
}
sub = sub->next;
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Second pass: send messages without holding lock
temp_sub_t* current_temp = matching_subs;
while (current_temp) {
// Create EVENT message for this subscription
cJSON* event_msg = cJSON_CreateArray();
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
cJSON_AddItemToArray(event_msg, cJSON_CreateString(current_temp->id));
cJSON_AddItemToArray(event_msg, cJSON_Duplicate(event, 1));
char* msg_str = cJSON_Print(event_msg);
if (msg_str) {
size_t msg_len = strlen(msg_str);
unsigned char* buf = malloc(LWS_PRE + msg_len);
if (buf) {
memcpy(buf + LWS_PRE, msg_str, msg_len);
// Send to WebSocket connection with error checking
// Note: lws_write can fail if connection is closed, but won't crash
int write_result = lws_write(current_temp->wsi, buf + LWS_PRE, msg_len, LWS_WRITE_TEXT);
if (write_result >= 0) {
broadcasts++;
// Update events sent counter for this subscription
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
subscription_t* update_sub = g_subscription_manager.active_subscriptions;
while (update_sub) {
if (update_sub->wsi == current_temp->wsi &&
strcmp(update_sub->id, current_temp->id) == 0) {
update_sub->events_sent++;
break;
}
update_sub = update_sub->next;
}
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
// Log event broadcast to database (optional - can be disabled for performance)
cJSON* event_id_obj = cJSON_GetObjectItem(event, "id");
if (event_id_obj && cJSON_IsString(event_id_obj)) {
log_event_broadcast(cJSON_GetStringValue(event_id_obj), current_temp->id, current_temp->client_ip);
}
}
free(buf);
}
free(msg_str);
}
sub = sub->next;
cJSON_Delete(event_msg);
current_temp = current_temp->next;
}
// Clean up temporary subscription list
while (matching_subs) {
temp_sub_t* next = matching_subs->next;
free(matching_subs);
matching_subs = next;
}
// Update global statistics
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
g_subscription_manager.total_events_broadcast += broadcasts;
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
if (broadcasts > 0) {
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Broadcasted event to %d subscriptions", broadcasts);
log_info(debug_msg);
}
return broadcasts;
}
@@ -705,19 +823,476 @@ void log_event_broadcast(const char* event_id, const char* sub_id, const char* c
// Update events sent counter for a subscription
void update_subscription_events_sent(const char* sub_id, int events_sent) {
if (!g_db || !sub_id) return;
const char* sql =
"UPDATE subscription_events "
"SET events_sent = ? "
"WHERE subscription_id = ? AND event_type = 'created'";
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
if (rc == SQLITE_OK) {
sqlite3_bind_int(stmt, 1, events_sent);
sqlite3_bind_text(stmt, 2, sub_id, -1, SQLITE_STATIC);
sqlite3_step(stmt);
sqlite3_finalize(stmt);
}
}
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
// PER-IP CONNECTION TRACKING
///////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
// Get or create IP connection info (thread-safe)
ip_connection_info_t* get_or_create_ip_connection(const char* client_ip) {
if (!client_ip) return NULL;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
// Look for existing IP connection info
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
// Found existing entry, update activity
current->last_activity = time(NULL);
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return current;
}
current = current->next;
}
// Create new IP connection info
ip_connection_info_t* new_ip = calloc(1, sizeof(ip_connection_info_t));
if (!new_ip) {
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return NULL;
}
// Copy IP address safely
strncpy(new_ip->ip_address, client_ip, CLIENT_IP_MAX_LENGTH - 1);
new_ip->ip_address[CLIENT_IP_MAX_LENGTH - 1] = '\0';
// Initialize tracking data
time_t now = time(NULL);
new_ip->active_connections = 1;
new_ip->total_subscriptions = 0;
new_ip->first_connection = now;
new_ip->last_activity = now;
// Add to linked list
new_ip->next = g_subscription_manager.ip_connections;
g_subscription_manager.ip_connections = new_ip;
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return new_ip;
}
// Update IP connection activity timestamp
void update_ip_connection_activity(const char* client_ip) {
if (!client_ip) return;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
current->last_activity = time(NULL);
break;
}
current = current->next;
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
}
// Remove IP connection (when last connection from IP closes)
void remove_ip_connection(const char* client_ip) {
if (!client_ip) return;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t** current = &g_subscription_manager.ip_connections;
while (*current) {
ip_connection_info_t* entry = *current;
if (strcmp(entry->ip_address, client_ip) == 0) {
// Remove from list
*current = entry->next;
free(entry);
break;
}
current = &((*current)->next);
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
}
// Get total subscriptions for an IP address
int get_total_subscriptions_for_ip(const char* client_ip) {
if (!client_ip) return 0;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
int total = current->total_subscriptions;
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return total;
}
current = current->next;
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return 0;
}
// Get active connections for an IP address
int get_active_connections_for_ip(const char* client_ip) {
if (!client_ip) return 0;
pthread_mutex_lock(&g_subscription_manager.ip_tracking_lock);
ip_connection_info_t* current = g_subscription_manager.ip_connections;
while (current) {
if (strcmp(current->ip_address, client_ip) == 0) {
int active = current->active_connections;
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return active;
}
current = current->next;
}
pthread_mutex_unlock(&g_subscription_manager.ip_tracking_lock);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
// FILTER VALIDATION FUNCTIONS
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/**
* Validate hex string format and length
*/
int validate_hex_string(const char* str, size_t expected_len, const char* field_name, char* error_message, size_t error_size) {
if (!str) {
snprintf(error_message, error_size, "%s: null value", field_name);
return 0;
}
size_t len = strlen(str);
if (len != expected_len) {
snprintf(error_message, error_size, "%s: invalid length %zu, expected %zu", field_name, len, expected_len);
return 0;
}
// Check for valid hex characters
for (size_t i = 0; i < len; i++) {
char c = str[i];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
snprintf(error_message, error_size, "%s: invalid hex character '%c' at position %zu", field_name, c, i);
return 0;
}
}
return 1;
}
/**
* Validate timestamp range (since/until)
*/
int validate_timestamp_range(long since, long until, char* error_message, size_t error_size) {
// Allow zero values (not set)
if (since == 0 && until == 0) {
return 1;
}
// Check for reasonable timestamp bounds (1970-01-01 to 2100-01-01)
if (since != 0 && (since < MIN_TIMESTAMP || since > MAX_TIMESTAMP)) {
snprintf(error_message, error_size, "since: timestamp %ld out of valid range", since);
return 0;
}
if (until != 0 && (until < MIN_TIMESTAMP || until > MAX_TIMESTAMP)) {
snprintf(error_message, error_size, "until: timestamp %ld out of valid range", until);
return 0;
}
// Check that since is before until if both are set
if (since > 0 && until > 0 && since >= until) {
snprintf(error_message, error_size, "since (%ld) must be before until (%ld)", since, until);
return 0;
}
return 1;
}
/**
* Validate numeric limits
*/
int validate_numeric_limits(int limit, char* error_message, size_t error_size) {
// Allow zero (no limit)
if (limit == 0) {
return 1;
}
// Check for reasonable limits (1-10000)
if (limit < MIN_LIMIT || limit > MAX_LIMIT) {
snprintf(error_message, error_size, "limit: value %d out of valid range [%d, %d]", limit, MIN_LIMIT, MAX_LIMIT);
return 0;
}
return 1;
}
/**
* Validate search term for SQL injection and length
*/
int validate_search_term(const char* search_term, char* error_message, size_t error_size) {
if (!search_term) {
return 1; // NULL search terms are allowed
}
size_t len = strlen(search_term);
// Check maximum length
if (len > MAX_SEARCH_TERM_LENGTH) {
snprintf(error_message, error_size, "search: term too long (%zu characters, max %d)", len, (int)MAX_SEARCH_TERM_LENGTH);
return 0;
}
// Check for potentially dangerous characters that could cause SQL issues
// Allow alphanumeric, spaces, and common punctuation
for (size_t i = 0; i < len; i++) {
char c = search_term[i];
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
(c >= '0' && c <= '9') || c == ' ' || c == '-' || c == '_' ||
c == '.' || c == ',' || c == '!' || c == '?' || c == ':' ||
c == ';' || c == '"' || c == '\'' || c == '(' || c == ')' ||
c == '[' || c == ']' || c == '{' || c == '}' || c == '@' ||
c == '#' || c == '$' || c == '%' || c == '^' || c == '&' ||
c == '*' || c == '+' || c == '=' || c == '|' || c == '\\' ||
c == '/' || c == '<' || c == '>' || c == '~' || c == '`')) {
// Reject control characters and other potentially problematic chars
if (c < 32 || c == 127) {
snprintf(error_message, error_size, "search: invalid character (ASCII %d) at position %zu", (int)c, i);
return 0;
}
}
}
return 1;
}
/**
* Validate all filter values in a filter object
*/
int validate_filter_values(cJSON* filter_json, char* error_message, size_t error_size) {
if (!filter_json || !cJSON_IsObject(filter_json)) {
snprintf(error_message, error_size, "filter must be a JSON object");
return 0;
}
// Validate kinds array
cJSON* kinds = cJSON_GetObjectItem(filter_json, "kinds");
if (kinds) {
if (!cJSON_IsArray(kinds)) {
snprintf(error_message, error_size, "kinds must be an array");
return 0;
}
int kinds_count = cJSON_GetArraySize(kinds);
if (kinds_count > MAX_KINDS_PER_FILTER) {
snprintf(error_message, error_size, "kinds array too large (%d items, max %d)", kinds_count, MAX_KINDS_PER_FILTER);
return 0;
}
for (int i = 0; i < kinds_count; i++) {
cJSON* kind_item = cJSON_GetArrayItem(kinds, i);
if (!cJSON_IsNumber(kind_item)) {
snprintf(error_message, error_size, "kinds[%d] must be a number", i);
return 0;
}
int kind_val = (int)cJSON_GetNumberValue(kind_item);
if (kind_val < 0 || kind_val > 65535) { // Reasonable range for event kinds
snprintf(error_message, error_size, "kinds[%d]: invalid event kind %d", i, kind_val);
return 0;
}
}
}
// Validate authors array
cJSON* authors = cJSON_GetObjectItem(filter_json, "authors");
if (authors) {
if (!cJSON_IsArray(authors)) {
snprintf(error_message, error_size, "authors must be an array");
return 0;
}
int authors_count = cJSON_GetArraySize(authors);
if (authors_count > MAX_AUTHORS_PER_FILTER) {
snprintf(error_message, error_size, "authors array too large (%d items, max %d)", authors_count, MAX_AUTHORS_PER_FILTER);
return 0;
}
for (int i = 0; i < authors_count; i++) {
cJSON* author_item = cJSON_GetArrayItem(authors, i);
if (!cJSON_IsString(author_item)) {
snprintf(error_message, error_size, "authors[%d] must be a string", i);
return 0;
}
const char* author_str = cJSON_GetStringValue(author_item);
// Allow partial pubkeys (prefix matching), so validate hex but allow shorter lengths
size_t author_len = strlen(author_str);
if (author_len == 0 || author_len > 64) {
snprintf(error_message, error_size, "authors[%d]: invalid length %zu", i, author_len);
return 0;
}
// Validate hex characters (allow partial)
for (size_t j = 0; j < author_len; j++) {
char c = author_str[j];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
snprintf(error_message, error_size, "authors[%d]: invalid hex character '%c'", i, c);
return 0;
}
}
}
}
// Validate ids array
cJSON* ids = cJSON_GetObjectItem(filter_json, "ids");
if (ids) {
if (!cJSON_IsArray(ids)) {
snprintf(error_message, error_size, "ids must be an array");
return 0;
}
int ids_count = cJSON_GetArraySize(ids);
if (ids_count > MAX_IDS_PER_FILTER) {
snprintf(error_message, error_size, "ids array too large (%d items, max %d)", ids_count, MAX_IDS_PER_FILTER);
return 0;
}
for (int i = 0; i < ids_count; i++) {
cJSON* id_item = cJSON_GetArrayItem(ids, i);
if (!cJSON_IsString(id_item)) {
snprintf(error_message, error_size, "ids[%d] must be a string", i);
return 0;
}
const char* id_str = cJSON_GetStringValue(id_item);
// Allow partial IDs (prefix matching)
size_t id_len = strlen(id_str);
if (id_len == 0 || id_len > 64) {
snprintf(error_message, error_size, "ids[%d]: invalid length %zu", i, id_len);
return 0;
}
// Validate hex characters
for (size_t j = 0; j < id_len; j++) {
char c = id_str[j];
if (!((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F'))) {
snprintf(error_message, error_size, "ids[%d]: invalid hex character '%c'", i, c);
return 0;
}
}
}
}
// Validate since/until timestamps
long since_val = 0, until_val = 0;
cJSON* since = cJSON_GetObjectItem(filter_json, "since");
if (since) {
if (!cJSON_IsNumber(since)) {
snprintf(error_message, error_size, "since must be a number");
return 0;
}
since_val = (long)cJSON_GetNumberValue(since);
}
cJSON* until = cJSON_GetObjectItem(filter_json, "until");
if (until) {
if (!cJSON_IsNumber(until)) {
snprintf(error_message, error_size, "until must be a number");
return 0;
}
until_val = (long)cJSON_GetNumberValue(until);
}
if (!validate_timestamp_range(since_val, until_val, error_message, error_size)) {
return 0;
}
// Validate limit
cJSON* limit = cJSON_GetObjectItem(filter_json, "limit");
if (limit) {
if (!cJSON_IsNumber(limit)) {
snprintf(error_message, error_size, "limit must be a number");
return 0;
}
int limit_val = (int)cJSON_GetNumberValue(limit);
if (!validate_numeric_limits(limit_val, error_message, error_size)) {
return 0;
}
}
// Validate search term
cJSON* search = cJSON_GetObjectItem(filter_json, "search");
if (search) {
if (!cJSON_IsString(search)) {
snprintf(error_message, error_size, "search must be a string");
return 0;
}
const char* search_term = cJSON_GetStringValue(search);
if (!validate_search_term(search_term, error_message, error_size)) {
return 0;
}
}
// Validate tag filters (#e, #p, #t, etc.)
cJSON* item = NULL;
cJSON_ArrayForEach(item, filter_json) {
const char* key = item->string;
if (key && strlen(key) >= 2 && key[0] == '#') {
if (!cJSON_IsArray(item)) {
snprintf(error_message, error_size, "%s must be an array", key);
return 0;
}
int tag_count = cJSON_GetArraySize(item);
if (tag_count > MAX_TAG_VALUES_PER_FILTER) {
snprintf(error_message, error_size, "%s array too large (%d items, max %d)", key, tag_count, MAX_TAG_VALUES_PER_FILTER);
return 0;
}
for (int i = 0; i < tag_count; i++) {
cJSON* tag_value = cJSON_GetArrayItem(item, i);
if (!cJSON_IsString(tag_value)) {
snprintf(error_message, error_size, "%s[%d] must be a string", key, i);
return 0;
}
const char* tag_str = cJSON_GetStringValue(tag_value);
size_t tag_len = strlen(tag_str);
if (tag_len > MAX_TAG_VALUE_LENGTH) {
snprintf(error_message, error_size, "%s[%d]: tag value too long (%zu characters, max %d)", key, i, tag_len, MAX_TAG_VALUE_LENGTH);
return 0;
}
}
}
}
return 1;
}

View File

@@ -55,6 +55,16 @@ struct subscription {
struct subscription* session_next; // Next subscription for this session
};
// Per-IP connection tracking
typedef struct ip_connection_info {
char ip_address[CLIENT_IP_MAX_LENGTH]; // IP address
int active_connections; // Number of active connections from this IP
int total_subscriptions; // Total subscriptions across all connections from this IP
time_t first_connection; // When first connection from this IP was established
time_t last_activity; // Last activity timestamp from this IP
struct ip_connection_info* next; // Next in linked list
} ip_connection_info_t;
// Global subscription manager
struct subscription_manager {
subscription_t* active_subscriptions; // Head of global subscription list
@@ -65,6 +75,10 @@ struct subscription_manager {
int max_subscriptions_per_client; // Default: 20
int max_total_subscriptions; // Default: 5000
// Per-IP connection tracking
ip_connection_info_t* ip_connections; // Head of per-IP connection list
pthread_mutex_t ip_tracking_lock; // Thread safety for IP tracking
// Statistics
uint64_t total_created; // Lifetime subscription count
uint64_t total_events_broadcast; // Lifetime event broadcast count
@@ -81,6 +95,13 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter);
int event_matches_subscription(cJSON* event, subscription_t* subscription);
int broadcast_event_to_subscriptions(cJSON* event);
// Per-IP connection tracking functions
ip_connection_info_t* get_or_create_ip_connection(const char* client_ip);
void update_ip_connection_activity(const char* client_ip);
void remove_ip_connection(const char* client_ip);
int get_total_subscriptions_for_ip(const char* client_ip);
int get_active_connections_for_ip(const char* client_ip);
// Database logging functions
void log_subscription_created(const subscription_t* sub);
void log_subscription_closed(const char* sub_id, const char* client_ip, const char* reason);

View File

@@ -28,6 +28,7 @@
#include "subscriptions.h" // Subscription structures and functions
#include "embedded_web_content.h" // Embedded web content
#include "api.h" // API for embedded files
#include "dm_admin.h" // DM admin functions including NIP-17
// Forward declarations for logging functions
void log_info(const char* message);
@@ -68,9 +69,6 @@ int nostr_validate_unified_request(const char* json_string, size_t json_length);
int process_admin_event_in_config(cJSON* event, char* error_message, size_t error_size, struct lws* wsi);
int is_authorized_admin_event(cJSON* event, char* error_message, size_t error_size);
// Forward declarations for NIP-17 admin messaging
int process_nip17_admin_message(cJSON* gift_wrap_event, char* error_message, size_t error_size, struct lws* wsi);
// Forward declarations for DM stats command handling
int process_dm_stats_command(cJSON* dm_event, char* error_message, size_t error_size, struct lws* wsi);
@@ -97,6 +95,8 @@ extern unified_config_cache_t g_unified_cache;
// Forward declarations for global state
extern sqlite3* g_db;
extern int g_server_running;
extern volatile sig_atomic_t g_shutdown_flag;
extern int g_restart_requested;
extern struct lws_context *ws_context;
// Global subscription manager
@@ -120,7 +120,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
// Handle HTTP requests
{
char *requested_uri = (char *)in;
log_info("HTTP request received");
// Check if this is an OPTIONS request
char method[16] = {0};
@@ -186,20 +185,12 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
// Handle HTTP body transmission for NIP-11 or embedded files
{
void* user_data = lws_wsi_user(wsi);
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "HTTP_WRITEABLE: user_data=%p", user_data);
log_info(debug_msg);
if (user_data) {
int type = *(int*)user_data;
if (type == 0) {
// NIP-11
struct nip11_session_data* session_data = (struct nip11_session_data*)user_data;
snprintf(debug_msg, sizeof(debug_msg), "NIP-11: session_data=%p, type=%d, json_length=%zu, headers_sent=%d, body_sent=%d",
session_data, session_data->type, session_data->json_length, session_data->headers_sent, session_data->body_sent);
log_info(debug_msg);
if (session_data->headers_sent && !session_data->body_sent) {
snprintf(debug_msg, sizeof(debug_msg), "NIP-11: Attempting to send body, json_length=%zu", session_data->json_length);
log_info(debug_msg);
// Allocate buffer for JSON body transmission (no LWS_PRE needed for body)
unsigned char *json_buf = malloc(session_data->json_length);
if (!json_buf) {
@@ -210,7 +201,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
lws_set_wsi_user(wsi, NULL);
return -1;
}
log_info("NIP-11: Buffer allocated successfully");
// Copy JSON data to buffer
memcpy(json_buf, session_data->json_buffer, session_data->json_length);
@@ -236,7 +226,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
free(session_data);
lws_set_wsi_user(wsi, NULL);
log_success("NIP-11 relay information served successfully");
return 0; // Close connection after successful transmission
}
} else if (type == 1) {
@@ -248,7 +237,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
break;
case LWS_CALLBACK_ESTABLISHED:
log_info("WebSocket connection established");
memset(pss, 0, sizeof(*pss));
pthread_mutex_init(&pss->session_lock, NULL);
@@ -280,24 +268,15 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
if (message) {
memcpy(message, in, len);
message[len] = '\0';
// Parse JSON message (this is the normal program flow)
cJSON* json = cJSON_Parse(message);
if (json && cJSON_IsArray(json)) {
// Log the complete parsed JSON message once
char* complete_message = cJSON_Print(json);
if (complete_message) {
char debug_msg[2048];
snprintf(debug_msg, sizeof(debug_msg),
"Received complete WebSocket message: %s", complete_message);
log_info(debug_msg);
free(complete_message);
}
// Get message type
cJSON* type = cJSON_GetArrayItem(json, 0);
if (type && cJSON_IsString(type)) {
const char* msg_type = cJSON_GetStringValue(type);
if (strcmp(msg_type, "EVENT") == 0) {
// Extract event for kind-specific NIP-42 authentication check
cJSON* event_obj = cJSON_GetArrayItem(json, 1);
@@ -305,19 +284,13 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
// Extract event kind for kind-specific NIP-42 authentication check
cJSON* kind_obj = cJSON_GetObjectItem(event_obj, "kind");
int event_kind = kind_obj && cJSON_IsNumber(kind_obj) ? (int)cJSON_GetNumberValue(kind_obj) : -1;
// Extract pubkey and event ID for debugging
cJSON* pubkey_obj = cJSON_GetObjectItem(event_obj, "pubkey");
cJSON* id_obj = cJSON_GetObjectItem(event_obj, "id");
const char* event_pubkey = pubkey_obj ? cJSON_GetStringValue(pubkey_obj) : "unknown";
const char* event_id = id_obj ? cJSON_GetStringValue(id_obj) : "unknown";
char debug_event_msg[512];
snprintf(debug_event_msg, sizeof(debug_event_msg),
"DEBUG EVENT: Processing kind %d event from pubkey %.16s... ID %.16s...",
event_kind, event_pubkey, event_id);
log_info(debug_event_msg);
// Check if NIP-42 authentication is required for this event kind or globally
int auth_required = is_nip42_auth_globally_required() || is_nip42_auth_required_for_kind(event_kind);
@@ -346,15 +319,8 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
}
}
char debug_auth_msg[256];
snprintf(debug_auth_msg, sizeof(debug_auth_msg),
"DEBUG AUTH: auth_required=%d, bypass_auth=%d, pss->authenticated=%d, event_kind=%d",
auth_required, bypass_auth, pss ? pss->authenticated : -1, event_kind);
log_info(debug_auth_msg);
if (pss && auth_required && !pss->authenticated && !bypass_auth) {
if (!pss->auth_challenge_sent) {
log_info("DEBUG AUTH: Sending NIP-42 authentication challenge");
send_nip42_auth_challenge(wsi, pss);
} else {
char auth_msg[256];
@@ -367,9 +333,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
}
send_notice_message(wsi, auth_msg);
log_warning("Event rejected: NIP-42 authentication required for kind");
char debug_msg[128];
snprintf(debug_msg, sizeof(debug_msg), "Auth required for kind %d", event_kind);
log_info(debug_msg);
}
cJSON_Delete(json);
free(message);
@@ -450,20 +413,13 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
return 0;
}
log_info("DEBUG VALIDATION: Starting unified validator");
// Call unified validator with JSON string
size_t event_json_len = strlen(event_json_str);
int validation_result = nostr_validate_unified_request(event_json_str, event_json_len);
// Map validation result to old result format (0 = success, -1 = failure)
int result = (validation_result == NOSTR_SUCCESS) ? 0 : -1;
char debug_validation_msg[256];
snprintf(debug_validation_msg, sizeof(debug_validation_msg),
"DEBUG VALIDATION: validation_result=%d, result=%d", validation_result, result);
log_info(debug_validation_msg);
// Generate error message based on validation result
char error_message[512] = {0};
if (result != 0) {
@@ -493,12 +449,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
strncpy(error_message, "error: validation failed", sizeof(error_message) - 1);
break;
}
char debug_error_msg[256];
snprintf(debug_error_msg, sizeof(debug_error_msg),
"DEBUG VALIDATION ERROR: %s", error_message);
log_warning(debug_error_msg);
} else {
log_info("DEBUG VALIDATION: Event validated successfully using unified validator");
}
// Cleanup event JSON string
@@ -545,8 +495,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
strncpy(error_message, "auth-required: protected event requires authentication", sizeof(error_message) - 1);
error_message[sizeof(error_message) - 1] = '\0';
log_warning("Protected event rejected: authentication required");
} else {
log_info("Protected event accepted: authenticated publisher");
}
}
}
@@ -557,63 +505,34 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
cJSON* kind_obj = cJSON_GetObjectItem(event, "kind");
if (kind_obj && cJSON_IsNumber(kind_obj)) {
int event_kind = (int)cJSON_GetNumberValue(kind_obj);
log_info("DEBUG ADMIN: Checking if admin event processing is needed");
// Log reception of Kind 23456 events
if (event_kind == 23456) {
char* event_json_debug = cJSON_Print(event);
char debug_received_msg[1024];
snprintf(debug_received_msg, sizeof(debug_received_msg),
"RECEIVED Kind %d event: %s", event_kind,
event_json_debug ? event_json_debug : "Failed to serialize");
log_info(debug_received_msg);
if (event_json_debug) {
free(event_json_debug);
}
}
if (event_kind == 23456) {
// Enhanced admin event security - check authorization first
log_info("DEBUG ADMIN: Admin event detected, checking authorization");
char auth_error[512] = {0};
int auth_result = is_authorized_admin_event(event, auth_error, sizeof(auth_error));
if (auth_result != 0) {
// Authorization failed - log and reject
log_warning("DEBUG ADMIN: Admin event authorization failed");
log_warning("Admin event authorization failed");
result = -1;
size_t error_len = strlen(auth_error);
size_t copy_len = (error_len < sizeof(error_message) - 1) ? error_len : sizeof(error_message) - 1;
memcpy(error_message, auth_error, copy_len);
error_message[copy_len] = '\0';
char debug_auth_error_msg[600];
snprintf(debug_auth_error_msg, sizeof(debug_auth_error_msg),
"DEBUG ADMIN AUTH ERROR: %.400s", auth_error);
log_warning(debug_auth_error_msg);
} else {
// Authorization successful - process through admin API
log_info("DEBUG ADMIN: Admin event authorized, processing through admin API");
char admin_error[512] = {0};
int admin_result = process_admin_event_in_config(event, admin_error, sizeof(admin_error), wsi);
char debug_admin_msg[256];
snprintf(debug_admin_msg, sizeof(debug_admin_msg),
"DEBUG ADMIN: process_admin_event_in_config returned %d", admin_result);
log_info(debug_admin_msg);
// Log results for Kind 23456 events
if (event_kind == 23456) {
if (admin_result == 0) {
char success_result_msg[256];
snprintf(success_result_msg, sizeof(success_result_msg),
"SUCCESS: Kind %d event processed successfully", event_kind);
log_success(success_result_msg);
} else {
if (admin_result != 0) {
char error_result_msg[512];
snprintf(error_result_msg, sizeof(error_result_msg),
"ERROR: Kind %d event processing failed: %s", event_kind, admin_error);
@@ -622,120 +541,101 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
}
if (admin_result != 0) {
log_error("DEBUG ADMIN: Failed to process admin event through admin API");
log_error("Failed to process admin event");
result = -1;
size_t error_len = strlen(admin_error);
size_t copy_len = (error_len < sizeof(error_message) - 1) ? error_len : sizeof(error_message) - 1;
memcpy(error_message, admin_error, copy_len);
error_message[copy_len] = '\0';
char debug_admin_error_msg[600];
snprintf(debug_admin_error_msg, sizeof(debug_admin_error_msg),
"DEBUG ADMIN ERROR: %.400s", admin_error);
log_error(debug_admin_error_msg);
} else {
log_success("DEBUG ADMIN: Admin event processed successfully through admin API");
// Admin events are processed by the admin API, not broadcast to subscriptions
}
}
} else if (event_kind == 1059) {
// Check for NIP-17 gift wrap admin messages
log_info("DEBUG NIP17: Detected kind 1059 gift wrap event");
char nip17_error[512] = {0};
int nip17_result = process_nip17_admin_message(event, nip17_error, sizeof(nip17_error), wsi);
cJSON* response_event = process_nip17_admin_message(event, nip17_error, sizeof(nip17_error), wsi);
if (nip17_result != 0) {
log_error("DEBUG NIP17: NIP-17 admin message processing failed");
result = -1;
size_t error_len = strlen(nip17_error);
size_t copy_len = (error_len < sizeof(error_message) - 1) ? error_len : sizeof(error_message) - 1;
memcpy(error_message, nip17_error, copy_len);
error_message[copy_len] = '\0';
if (!response_event) {
// Check if this is an error or if the command was already handled
if (strlen(nip17_error) > 0) {
// There was an actual error
log_error("NIP-17 admin message processing failed");
result = -1;
size_t error_len = strlen(nip17_error);
size_t copy_len = (error_len < sizeof(error_message) - 1) ? error_len : sizeof(error_message) - 1;
memcpy(error_message, nip17_error, copy_len);
error_message[copy_len] = '\0';
char debug_nip17_error_msg[600];
snprintf(debug_nip17_error_msg, sizeof(debug_nip17_error_msg),
"DEBUG NIP17 ERROR: %.400s", nip17_error);
log_error(debug_nip17_error_msg);
} else {
// No error message means the command was already handled (plain text commands)
// Store the original gift wrap event in database
if (store_event(event) != 0) {
log_error("Failed to store gift wrap event in database");
result = -1;
strncpy(error_message, "error: failed to store gift wrap event", sizeof(error_message) - 1);
}
}
} else {
log_success("DEBUG NIP17: NIP-17 admin message processed successfully");
// Store the gift wrap event in database (unlike kind 23456)
// Store the original gift wrap event in database (unlike kind 23456)
if (store_event(event) != 0) {
log_error("DEBUG NIP17: Failed to store gift wrap event in database");
log_error("Failed to store gift wrap event in database");
result = -1;
strncpy(error_message, "error: failed to store gift wrap event", sizeof(error_message) - 1);
cJSON_Delete(response_event);
} else {
log_info("DEBUG NIP17: Gift wrap event stored successfully in database");
// Broadcast gift wrap event to matching persistent subscriptions
int broadcast_count = broadcast_event_to_subscriptions(event);
char debug_broadcast_msg[128];
snprintf(debug_broadcast_msg, sizeof(debug_broadcast_msg),
"DEBUG NIP17 BROADCAST: Gift wrap event broadcast to %d subscriptions", broadcast_count);
log_info(debug_broadcast_msg);
// Broadcast RESPONSE event to matching persistent subscriptions
broadcast_event_to_subscriptions(response_event);
// Clean up response event
cJSON_Delete(response_event);
}
}
} else if (event_kind == 14) {
// Check for DM stats commands addressed to relay
log_info("DEBUG DM: Detected kind 14 DM event");
char dm_error[512] = {0};
int dm_result = process_dm_stats_command(event, dm_error, sizeof(dm_error), wsi);
if (dm_result != 0) {
log_error("DEBUG DM: DM stats command processing failed");
log_error("DM stats command processing failed");
result = -1;
size_t error_len = strlen(dm_error);
size_t copy_len = (error_len < sizeof(error_message) - 1) ? error_len : sizeof(error_message) - 1;
memcpy(error_message, dm_error, copy_len);
error_message[copy_len] = '\0';
char debug_dm_error_msg[600];
snprintf(debug_dm_error_msg, sizeof(debug_dm_error_msg),
"DEBUG DM ERROR: %.400s", dm_error);
log_error(debug_dm_error_msg);
} else {
log_success("DEBUG DM: DM stats command processed successfully");
// Store the DM event in database
if (store_event(event) != 0) {
log_error("DEBUG DM: Failed to store DM event in database");
log_error("Failed to store DM event in database");
result = -1;
strncpy(error_message, "error: failed to store DM event", sizeof(error_message) - 1);
} else {
log_info("DEBUG DM: DM event stored successfully in database");
// Broadcast DM event to matching persistent subscriptions
int broadcast_count = broadcast_event_to_subscriptions(event);
char debug_broadcast_msg[128];
snprintf(debug_broadcast_msg, sizeof(debug_broadcast_msg),
"DEBUG DM BROADCAST: DM event broadcast to %d subscriptions", broadcast_count);
log_info(debug_broadcast_msg);
broadcast_event_to_subscriptions(event);
}
}
} else {
// Regular event - store in database and broadcast
log_info("DEBUG STORAGE: Regular event - storing in database");
if (store_event(event) != 0) {
log_error("DEBUG STORAGE: Failed to store event in database");
log_error("Failed to store event in database");
result = -1;
strncpy(error_message, "error: failed to store event", sizeof(error_message) - 1);
} else {
log_info("DEBUG STORAGE: Event stored successfully in database");
// Broadcast event to matching persistent subscriptions
int broadcast_count = broadcast_event_to_subscriptions(event);
char debug_broadcast_msg[128];
snprintf(debug_broadcast_msg, sizeof(debug_broadcast_msg),
"DEBUG BROADCAST: Event broadcast to %d subscriptions", broadcast_count);
log_info(debug_broadcast_msg);
broadcast_event_to_subscriptions(event);
}
}
} else {
// Event without valid kind - try normal storage
log_warning("DEBUG STORAGE: Event without valid kind - trying normal storage");
log_warning("Event without valid kind - trying normal storage");
if (store_event(event) != 0) {
log_error("DEBUG STORAGE: Failed to store event without kind in database");
log_error("Failed to store event without kind in database");
result = -1;
strncpy(error_message, "error: failed to store event", sizeof(error_message) - 1);
} else {
log_info("DEBUG STORAGE: Event without kind stored successfully in database");
broadcast_event_to_subscriptions(event);
}
}
@@ -750,25 +650,13 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
cJSON_AddItemToArray(response, cJSON_CreateBool(result == 0));
cJSON_AddItemToArray(response, cJSON_CreateString(strlen(error_message) > 0 ? error_message : ""));
// TODO: REPLACE - Remove wasteful cJSON_Print conversion
char *response_str = cJSON_Print(response);
if (response_str) {
char debug_response_msg[512];
snprintf(debug_response_msg, sizeof(debug_response_msg),
"DEBUG RESPONSE: Sending OK response: %s", response_str);
log_info(debug_response_msg);
size_t response_len = strlen(response_str);
unsigned char *buf = malloc(LWS_PRE + response_len);
if (buf) {
memcpy(buf + LWS_PRE, response_str, response_len);
int write_result = lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
char debug_write_msg[128];
snprintf(debug_write_msg, sizeof(debug_write_msg),
"DEBUG RESPONSE: lws_write returned %d", write_result);
log_info(debug_write_msg);
lws_write(wsi, buf + LWS_PRE, response_len, LWS_WRITE_TEXT);
free(buf);
}
free(response_str);
@@ -891,9 +779,7 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
pthread_mutex_unlock(&pss->session_lock);
}
char debug_msg[256];
snprintf(debug_msg, sizeof(debug_msg), "Closed subscription: %s", subscription_id);
log_info(debug_msg);
// Subscription closed
}
} else if (strcmp(msg_type, "AUTH") == 0) {
// Handle NIP-42 AUTH message
@@ -931,7 +817,6 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
break;
case LWS_CALLBACK_CLOSED:
log_info("WebSocket connection closed");
// Clean up session subscriptions
if (pss) {
@@ -1011,7 +896,7 @@ int check_port_available(int port) {
int start_websocket_relay(int port_override, int strict_port) {
struct lws_context_creation_info info;
log_info("Starting libwebsockets-based Nostr relay server...");
// Starting libwebsockets-based Nostr relay server
memset(&info, 0, sizeof(info));
// Use port override if provided, otherwise use configuration
@@ -1039,9 +924,7 @@ int start_websocket_relay(int port_override, int strict_port) {
// Find an available port with pre-checking (or fail immediately in strict mode)
while (port_attempts < (strict_port ? 1 : max_port_attempts)) {
char attempt_msg[256];
snprintf(attempt_msg, sizeof(attempt_msg), "Checking port availability: %d", actual_port);
log_info(attempt_msg);
// Checking port availability
// Pre-check if port is available
if (!check_port_available(actual_port)) {
@@ -1072,9 +955,7 @@ int start_websocket_relay(int port_override, int strict_port) {
// Port appears available, try creating libwebsockets context
info.port = actual_port;
char binding_msg[256];
snprintf(binding_msg, sizeof(binding_msg), "Attempting to bind libwebsockets to port %d", actual_port);
log_info(binding_msg);
// Attempting to bind libwebsockets
ws_context = lws_create_context(&info);
if (ws_context) {
@@ -1125,10 +1006,9 @@ int start_websocket_relay(int port_override, int strict_port) {
} else {
snprintf(startup_msg, sizeof(startup_msg), "WebSocket relay started on ws://127.0.0.1:%d", actual_port);
}
log_success(startup_msg);
// Main event loop with proper signal handling
while (g_server_running) {
while (g_server_running && !g_shutdown_flag) {
int result = lws_service(ws_context, 1000);
if (result < 0) {
@@ -1137,11 +1017,8 @@ int start_websocket_relay(int port_override, int strict_port) {
}
}
log_info("Shutting down WebSocket server...");
lws_context_destroy(ws_context);
ws_context = NULL;
log_success("WebSocket relay shut down cleanly");
return 0;
}
@@ -1255,7 +1132,7 @@ int process_dm_stats_command(cJSON* dm_event, char* error_message, size_t error_
return 0;
}
log_info("Processing DM stats command from admin");
// Processing DM stats command from admin
// Generate stats JSON
char* stats_json = generate_stats_json();
@@ -1306,15 +1183,10 @@ int process_dm_stats_command(cJSON* dm_event, char* error_message, size_t error_
}
// Broadcast to subscriptions
int broadcast_count = broadcast_event_to_subscriptions(dm_response);
char broadcast_msg[128];
snprintf(broadcast_msg, sizeof(broadcast_msg),
"DM stats response broadcast to %d subscriptions", broadcast_count);
log_info(broadcast_msg);
broadcast_event_to_subscriptions(dm_response);
cJSON_Delete(dm_response);
log_success("DM stats command processed successfully");
return 0;
}
@@ -1322,13 +1194,17 @@ int process_dm_stats_command(cJSON* dm_event, char* error_message, size_t error_
// Handle NIP-45 COUNT message
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss) {
(void)pss; // Suppress unused parameter warning
log_info("Handling COUNT message for subscription");
if (!cJSON_IsArray(filters)) {
log_error("COUNT filters is not an array");
return 0;
}
// Parameter binding helpers
char** bind_params = NULL;
int bind_param_count = 0;
int bind_param_capacity = 0;
int total_count = 0;
// Process each filter in the array
@@ -1339,6 +1215,15 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
continue;
}
// Reset bind params for this filter
for (int j = 0; j < bind_param_count; j++) {
free(bind_params[j]);
}
free(bind_params);
bind_params = NULL;
bind_param_count = 0;
bind_param_capacity = 0;
// Build SQL COUNT query based on filter - exclude ephemeral events (kinds 20000-29999) from historical queries
char sql[1024] = "SELECT COUNT(*) FROM events WHERE 1=1 AND (kind < 20000 OR kind >= 30000)";
char* sql_ptr = sql + strlen(sql);
@@ -1378,56 +1263,88 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
// Handle authors filter
cJSON* authors = cJSON_GetObjectItem(filter, "authors");
if (authors && cJSON_IsArray(authors)) {
int author_count = cJSON_GetArraySize(authors);
int author_count = 0;
// Count valid authors
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
author_count++;
}
}
if (author_count > 0) {
snprintf(sql_ptr, remaining, " AND pubkey IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int a = 0; a < author_count; a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(author));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (a > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add author values to bind params
for (int a = 0; a < cJSON_GetArraySize(authors); a++) {
cJSON* author = cJSON_GetArrayItem(authors, a);
if (cJSON_IsString(author)) {
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(cJSON_GetStringValue(author));
}
}
}
}
// Handle ids filter
cJSON* ids = cJSON_GetObjectItem(filter, "ids");
if (ids && cJSON_IsArray(ids)) {
int id_count = cJSON_GetArraySize(ids);
int id_count = 0;
// Count valid ids
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
id_count++;
}
}
if (id_count > 0) {
snprintf(sql_ptr, remaining, " AND id IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < id_count; i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(id));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, ")");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add id values to bind params
for (int i = 0; i < cJSON_GetArraySize(ids); i++) {
cJSON* id = cJSON_GetArrayItem(ids, i);
if (cJSON_IsString(id)) {
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(cJSON_GetStringValue(id));
}
}
}
}
@@ -1440,29 +1357,50 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
const char* tag_name = filter_key + 1; // Get the tag name (e, p, t, type, etc.)
if (cJSON_IsArray(filter_item)) {
int tag_value_count = cJSON_GetArraySize(filter_item);
int tag_value_count = 0;
// Count valid tag values
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
tag_value_count++;
}
}
if (tag_value_count > 0) {
// Use EXISTS with JSON extraction to check for matching tags
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = '%s' AND json_extract(value, '$[1]') IN (", tag_name);
// Use EXISTS with parameterized query
snprintf(sql_ptr, remaining, " AND EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE json_extract(value, '$[0]') = ? AND json_extract(value, '$[1]') IN (");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
for (int i = 0; i < tag_value_count; i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "'%s'", cJSON_GetStringValue(tag_value));
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
if (i > 0) {
snprintf(sql_ptr, remaining, ",");
sql_ptr++;
remaining--;
}
snprintf(sql_ptr, remaining, "?");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
}
snprintf(sql_ptr, remaining, "))");
sql_ptr += strlen(sql_ptr);
remaining = sizeof(sql) - strlen(sql);
// Add tag name and values to bind params
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(tag_name);
for (int i = 0; i < cJSON_GetArraySize(filter_item); i++) {
cJSON* tag_value = cJSON_GetArrayItem(filter_item, i);
if (cJSON_IsString(tag_value)) {
if (bind_param_count >= bind_param_capacity) {
bind_param_capacity = bind_param_capacity == 0 ? 16 : bind_param_capacity * 2;
bind_params = realloc(bind_params, bind_param_capacity * sizeof(char*));
}
bind_params[bind_param_count++] = strdup(cJSON_GetStringValue(tag_value));
}
}
}
}
}
@@ -1512,10 +1450,7 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
remaining = sizeof(sql) - strlen(sql);
}
// Debug: Log the SQL query being executed
char debug_msg[1280];
snprintf(debug_msg, sizeof(debug_msg), "Executing COUNT SQL: %s", sql);
log_info(debug_msg);
// Execute count query
// Execute count query
sqlite3_stmt* stmt;
@@ -1527,22 +1462,23 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
continue;
}
// Bind parameters
for (int i = 0; i < bind_param_count; i++) {
sqlite3_bind_text(stmt, i + 1, bind_params[i], -1, SQLITE_TRANSIENT);
}
int filter_count = 0;
if (sqlite3_step(stmt) == SQLITE_ROW) {
filter_count = sqlite3_column_int(stmt, 0);
}
char count_debug[128];
snprintf(count_debug, sizeof(count_debug), "Filter %d returned count: %d", i + 1, filter_count);
log_info(count_debug);
// Filter count calculated
sqlite3_finalize(stmt);
total_count += filter_count;
}
char total_debug[128];
snprintf(total_debug, sizeof(total_debug), "Total COUNT result: %d", total_count);
log_info(total_debug);
// Total count calculated
// Send COUNT response - NIP-45 format: ["COUNT", <subscription_id>, {"count": <count>}]
cJSON* count_response = cJSON_CreateArray();
@@ -1567,5 +1503,11 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
}
cJSON_Delete(count_response);
// Cleanup bind params
for (int i = 0; i < bind_param_count; i++) {
free(bind_params[i]);
}
free(bind_params);
return total_count;
}

View File

@@ -14,7 +14,7 @@
#define CHALLENGE_MAX_LENGTH 128
#define AUTHENTICATED_PUBKEY_MAX_LENGTH 65 // 64 hex + null
// Enhanced per-session data with subscription management and NIP-42 authentication
// Enhanced per-session data with subscription management, NIP-42 authentication, and rate limiting
struct per_session_data {
int authenticated;
struct subscription* subscriptions; // Head of this session's subscription list
@@ -30,6 +30,12 @@ struct per_session_data {
int nip42_auth_required_events; // Whether NIP-42 auth is required for EVENT submission
int nip42_auth_required_subscriptions; // Whether NIP-42 auth is required for REQ operations
int auth_challenge_sent; // Whether challenge has been sent (0/1)
// Rate limiting for subscription attempts
int failed_subscription_attempts; // Count of failed subscription attempts
time_t last_failed_attempt; // Timestamp of last failed attempt
time_t rate_limit_until; // Time until rate limiting expires
int consecutive_failures; // Consecutive failed attempts for backoff
};
// NIP-11 HTTP session data structure for managing buffer lifetime

63
tests/subscription_limits.sh Executable file
View File

@@ -0,0 +1,63 @@
#!/bin/bash
# Simple test script to verify subscription limit enforcement and rate limiting
# This script tests that subscription limits are enforced early
set -e
RELAY_URL="ws://127.0.0.1:8888"
echo "=== Subscription Limit Test ==="
echo "[INFO] Testing relay at: $RELAY_URL"
echo "[INFO] Note: This test assumes default subscription limits (max 25 per client)"
echo ""
# Test basic connectivity first
echo "=== Test 1: Basic Connectivity ==="
echo "[INFO] Testing basic WebSocket connection..."
# Send a simple REQ message
response=$(echo '["REQ","basic_test",{}]' | timeout 5 websocat -n1 "$RELAY_URL" 2>/dev/null || echo "TIMEOUT")
if echo "$response" | grep -q "EOSE\|EVENT\|NOTICE"; then
echo "[PASS] Basic connectivity works"
else
echo "[FAIL] Basic connectivity failed. Response: $response"
exit 1
fi
echo ""
# Test subscription limits
echo "=== Test 2: Subscription Limit Enforcement ==="
echo "[INFO] Testing subscription limits by creating multiple subscriptions..."
success_count=0
limit_hit=false
# Create multiple subscriptions in sequence (each in its own connection)
for i in {1..30}; do
echo "[INFO] Creating subscription $i..."
sub_id="limit_test_$i_$(date +%s%N)"
response=$(echo "[\"REQ\",\"$sub_id\",{}]" | timeout 5 websocat -n1 "$RELAY_URL" 2>/dev/null || echo "TIMEOUT")
if echo "$response" | grep -q "CLOSED.*$sub_id.*exceeded"; then
echo "[INFO] Hit subscription limit at subscription $i"
limit_hit=true
break
elif echo "$response" | grep -q "EOSE\|EVENT"; then
((success_count++))
else
echo "[WARN] Unexpected response for subscription $i: $response"
fi
sleep 0.1
done
if [ "$limit_hit" = true ]; then
echo "[PASS] Subscription limit enforcement working (limit hit after $success_count subscriptions)"
else
echo "[WARN] Subscription limit not hit after 30 attempts"
fi
echo ""
echo "=== Test Complete ==="