279 lines
8.8 KiB
Bash
Executable File
279 lines
8.8 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Performance Benchmarking Suite for C-Relay
|
|
# Measures performance metrics and throughput
|
|
|
|
set -e
|
|
|
|
# Configuration
|
|
RELAY_HOST="127.0.0.1"
|
|
RELAY_PORT="8888"
|
|
BENCHMARK_DURATION=30 # seconds
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Metrics tracking
|
|
TOTAL_REQUESTS=0
|
|
SUCCESSFUL_REQUESTS=0
|
|
FAILED_REQUESTS=0
|
|
TOTAL_RESPONSE_TIME=0
|
|
MIN_RESPONSE_TIME=999999
|
|
MAX_RESPONSE_TIME=0
|
|
|
|
# Function to benchmark single request
|
|
benchmark_request() {
|
|
local message="$1"
|
|
local start_time
|
|
local end_time
|
|
local response_time
|
|
local success=0
|
|
|
|
start_time=$(date +%s%N)
|
|
local response
|
|
response=$(echo "$message" | timeout 5 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT 2>/dev/null | head -1 || echo 'TIMEOUT')
|
|
end_time=$(date +%s%N)
|
|
|
|
response_time=$(( (end_time - start_time) / 1000000 )) # Convert to milliseconds
|
|
|
|
if [[ "$response" == *"EOSE"* ]] || [[ "$response" == *"EVENT"* ]] || [[ "$response" == *"OK"* ]]; then
|
|
success=1
|
|
fi
|
|
|
|
# Return: response_time:success
|
|
echo "$response_time:$success"
|
|
}
|
|
|
|
# Function to run throughput benchmark
|
|
run_throughput_benchmark() {
|
|
local test_name="$1"
|
|
local message="$2"
|
|
local concurrent_clients="${3:-10}"
|
|
local test_duration="${4:-$BENCHMARK_DURATION}"
|
|
|
|
echo "=========================================="
|
|
echo "Throughput Benchmark: $test_name"
|
|
echo "=========================================="
|
|
echo "Concurrent clients: $concurrent_clients"
|
|
echo "Duration: ${test_duration}s"
|
|
echo ""
|
|
|
|
# Reset metrics
|
|
TOTAL_REQUESTS=0
|
|
SUCCESSFUL_REQUESTS=0
|
|
FAILED_REQUESTS=0
|
|
TOTAL_RESPONSE_TIME=0
|
|
MIN_RESPONSE_TIME=999999
|
|
MAX_RESPONSE_TIME=0
|
|
|
|
local start_time
|
|
start_time=$(date +%s)
|
|
|
|
# Launch concurrent clients and collect results
|
|
local pids=()
|
|
local client_results=()
|
|
|
|
for i in $(seq 1 "$concurrent_clients"); do
|
|
(
|
|
local client_start
|
|
client_start=$(date +%s)
|
|
local client_requests=0
|
|
local client_total_response_time=0
|
|
local client_successful_requests=0
|
|
local client_min_time=999999
|
|
local client_max_time=0
|
|
|
|
while [[ $(($(date +%s) - client_start)) -lt test_duration ]]; do
|
|
local result
|
|
result=$(benchmark_request "$message")
|
|
local response_time success
|
|
IFS=':' read -r response_time success <<< "$result"
|
|
|
|
client_total_response_time=$((client_total_response_time + response_time))
|
|
client_requests=$((client_requests + 1))
|
|
|
|
if [[ "$success" == "1" ]]; then
|
|
client_successful_requests=$((client_successful_requests + 1))
|
|
fi
|
|
|
|
if [[ $response_time -lt client_min_time ]]; then
|
|
client_min_time=$response_time
|
|
fi
|
|
|
|
if [[ $response_time -gt client_max_time ]]; then
|
|
client_max_time=$response_time
|
|
fi
|
|
|
|
# Small delay to prevent overwhelming
|
|
sleep 0.01
|
|
done
|
|
|
|
# Return client results: requests:successful:total_response_time:min_time:max_time
|
|
echo "$client_requests:$client_successful_requests:$client_total_response_time:$client_min_time:$client_max_time"
|
|
) &
|
|
pids+=($!)
|
|
done
|
|
|
|
# Wait for all clients to complete and collect results
|
|
for pid in "${pids[@]}"; do
|
|
local result
|
|
result=$(wait "$pid")
|
|
client_results+=("$result")
|
|
done
|
|
|
|
local end_time
|
|
end_time=$(date +%s)
|
|
local actual_duration=$((end_time - start_time))
|
|
|
|
# Process client results
|
|
local total_requests=0
|
|
local successful_requests=0
|
|
local total_response_time=0
|
|
local min_response_time=999999
|
|
local max_response_time=0
|
|
|
|
for client_result in "${client_results[@]}"; do
|
|
IFS=':' read -r client_requests client_successful client_total_time client_min_time client_max_time <<< "$client_result"
|
|
|
|
total_requests=$((total_requests + client_requests))
|
|
successful_requests=$((successful_requests + client_successful))
|
|
total_response_time=$((total_response_time + client_total_time))
|
|
|
|
if [[ $client_min_time -lt min_response_time ]]; then
|
|
min_response_time=$client_min_time
|
|
fi
|
|
|
|
if [[ $client_max_time -gt max_response_time ]]; then
|
|
max_response_time=$client_max_time
|
|
fi
|
|
done
|
|
|
|
# Calculate metrics
|
|
local avg_response_time="N/A"
|
|
if [[ $successful_requests -gt 0 ]]; then
|
|
avg_response_time="$((total_response_time / successful_requests))ms"
|
|
fi
|
|
|
|
local requests_per_second="N/A"
|
|
if [[ $actual_duration -gt 0 ]]; then
|
|
requests_per_second="$((total_requests / actual_duration))"
|
|
fi
|
|
|
|
local success_rate="N/A"
|
|
if [[ $total_requests -gt 0 ]]; then
|
|
success_rate="$((successful_requests * 100 / total_requests))%"
|
|
fi
|
|
|
|
local failed_requests=$((total_requests - successful_requests))
|
|
|
|
# Report results
|
|
echo "=== Benchmark Results ==="
|
|
echo "Total requests: $total_requests"
|
|
echo "Successful requests: $successful_requests"
|
|
echo "Failed requests: $failed_requests"
|
|
echo "Success rate: $success_rate"
|
|
echo "Requests per second: $requests_per_second"
|
|
echo "Average response time: $avg_response_time"
|
|
echo "Min response time: ${min_response_time}ms"
|
|
echo "Max response time: ${max_response_time}ms"
|
|
echo "Actual duration: ${actual_duration}s"
|
|
echo ""
|
|
|
|
# Performance assessment
|
|
if [[ $requests_per_second -gt 1000 ]]; then
|
|
echo -e "${GREEN}✓ EXCELLENT throughput${NC}"
|
|
elif [[ $requests_per_second -gt 500 ]]; then
|
|
echo -e "${GREEN}✓ GOOD throughput${NC}"
|
|
elif [[ $requests_per_second -gt 100 ]]; then
|
|
echo -e "${YELLOW}⚠ MODERATE throughput${NC}"
|
|
else
|
|
echo -e "${RED}✗ LOW throughput${NC}"
|
|
fi
|
|
}
|
|
|
|
# Function to benchmark memory usage patterns
|
|
benchmark_memory_usage() {
|
|
echo "=========================================="
|
|
echo "Memory Usage Benchmark"
|
|
echo "=========================================="
|
|
|
|
local initial_memory
|
|
initial_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
|
|
|
|
echo "Initial memory usage: ${initial_memory}KB"
|
|
|
|
# Create increasing number of subscriptions
|
|
for i in {10,25,50,100}; do
|
|
echo -n "Testing with $i concurrent subscriptions... "
|
|
|
|
# Create subscriptions
|
|
for j in $(seq 1 "$i"); do
|
|
echo "[\"REQ\",\"mem_test_${j}\",{}]" | timeout 2 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 &
|
|
done
|
|
|
|
sleep 2
|
|
|
|
local current_memory
|
|
current_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
|
|
local memory_increase=$((current_memory - initial_memory))
|
|
|
|
echo "${current_memory}KB (+${memory_increase}KB)"
|
|
|
|
# Clean up subscriptions
|
|
for j in $(seq 1 "$i"); do
|
|
echo "[\"CLOSE\",\"mem_test_${j}\"]" | timeout 2 websocat -B 1048576 ws://$RELAY_HOST:$RELAY_PORT >/dev/null 2>&1 &
|
|
done
|
|
|
|
sleep 1
|
|
done
|
|
|
|
local final_memory
|
|
final_memory=$(ps aux | grep c_relay | grep -v grep | awk '{print $6}' | head -1)
|
|
echo "Final memory usage: ${final_memory}KB"
|
|
}
|
|
|
|
# Only run main code if script is executed directly (not sourced)
|
|
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
echo "=========================================="
|
|
echo "C-Relay Performance Benchmarking Suite"
|
|
echo "=========================================="
|
|
echo "Benchmarking relay at ws://$RELAY_HOST:$RELAY_PORT"
|
|
echo ""
|
|
|
|
# Test basic connectivity
|
|
echo "=== Connectivity Test ==="
|
|
connectivity_result=$(benchmark_request '["REQ","bench_test",{}]')
|
|
IFS=':' read -r response_time success <<< "$connectivity_result"
|
|
if [[ "$success" != "1" ]]; then
|
|
echo -e "${RED}Cannot connect to relay. Aborting benchmarks.${NC}"
|
|
exit 1
|
|
fi
|
|
echo -e "${GREEN}✓ Relay is accessible${NC}"
|
|
echo ""
|
|
|
|
# Run throughput benchmarks
|
|
run_throughput_benchmark "Simple REQ Throughput" '["REQ","throughput_'$(date +%s%N)'",{}]' 10 15
|
|
echo ""
|
|
|
|
run_throughput_benchmark "Complex Filter Throughput" '["REQ","complex_'$(date +%s%N)'",{"kinds":[1,2,3],"#e":["test"],"limit":10}]' 10 15
|
|
echo ""
|
|
|
|
run_throughput_benchmark "COUNT Message Throughput" '["REQ","count_'$(date +%s%N)'",{}]' 10 15
|
|
echo ""
|
|
|
|
run_throughput_benchmark "High Load Throughput" '["REQ","high_load_'$(date +%s%N)'",{}]' 25 20
|
|
echo ""
|
|
|
|
# Memory usage benchmark
|
|
benchmark_memory_usage
|
|
echo ""
|
|
|
|
echo "=========================================="
|
|
echo "Benchmarking Complete"
|
|
echo "=========================================="
|
|
echo "Performance benchmarks completed. Review results above for optimization opportunities."
|
|
fi |