Updated build.sh to automatically commit and push changes after successful build

This commit is contained in:
Your Name
2025-08-14 20:08:19 -04:00
parent b5e115ab26
commit ef2b13e511
8 changed files with 8057 additions and 63 deletions

View File

@@ -0,0 +1,41 @@
This library is fully staticly linked. There should be no external dependencies.
When building, use build.sh, not make.
Use it as follows: build.sh -m "useful comment on changes being made"
When making TUI menus, try to use the first leter of the command and the key to press to execute that command. For example, if the command is "Open file" try to use a keypress of "o" upper or lower case to signal to open the file. Use this instead of number keyed menus when possible. In the command, the letter should be underlined that signifies the command.
When deleting, everything gets moved to the Trash folder.
MAKEFILE POLICY: There should be only ONE Makefile in the entire project. All build logic (library, tests, examples, websocket) must be consolidated into the root Makefile. Do not create separate Makefiles in subdirectories as this creates testing inconsistencies where you test with one Makefile but run with another.
When building, use build.sh, not make.
Use it as follows: build.sh -m "useful comment on changes being made"
When making TUI menus, try to use the first leter of the command and the key to press to execute that command. For example, if the command is "Open file" try to use a keypress of "o" upper or lower case to signal to open the file. Use this instead of number keyed menus when possible. In the command, the letter should be underlined that signifies the command.
## Buffer Size Guidelines
### Path Handling
- Always use buffers of size 1024 or PATH_MAX (4096) for file paths
- When concatenating paths with snprintf, ensure buffer is at least 2x the expected maximum input
- Use safer path construction patterns that check lengths before concatenation
### String Formatting Safety
- Before using snprintf with dynamic strings, validate that buffer size >= sum of all input string lengths + format characters + 1
- Use strnlen() to check actual string lengths before formatting
- Consider using asprintf() for dynamic allocation when exact size is unknown
- Add length validation before snprintf calls
### Compiler Warning Prevention
- Always size string buffers generously (minimum 1024 for paths, 512 for general strings)
- Use buffer size calculations: `size >= strlen(str1) + strlen(str2) + format_overhead + 1`
- Add runtime length checks before snprintf operations
- Consider using safer alternatives like strlcpy/strlcat if available
### Code Patterns to Avoid
- Fixed-size buffers (512 bytes) for path operations where inputs could be 255+ bytes each
- Concatenating unchecked strings with snprintf
- Assuming maximum path component sizes without validation

3
.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"git.ignoreLimitWarning": true
}

1
VERSION Normal file
View File

@@ -0,0 +1 @@
0.1.22

284
build.sh Executable file
View File

@@ -0,0 +1,284 @@
#!/bin/bash
# Event Miner Build Script
# Provides convenient build targets with automatic version management
# Automatically increments patch version with each build
set -e # Exit on any error
# Project configuration
PROJECT_NAME="event_miner"
PROJECT_NAME_UPPER="EVENT_MINER"
MAIN_BINARY="event_miner"
SRC_DIR="."
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to print colored output
print_status() {
echo -e "${BLUE}[INFO]${NC} $1"
}
print_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to automatically increment version
increment_version() {
print_status "Incrementing version..."
# Check if we're in a git repository
if ! git rev-parse --git-dir > /dev/null 2>&1; then
print_warning "Not in a git repository - skipping version increment"
return 0
fi
# Get the highest version tag (not necessarily the most recent chronologically)
LATEST_TAG=$(git tag -l 'v*.*.*' | sort -V | tail -n 1 || echo "v0.1.0")
if [[ -z "$LATEST_TAG" ]]; then
LATEST_TAG="v0.1.0"
fi
# Extract version components (remove 'v' prefix if present)
VERSION=${LATEST_TAG#v}
# Parse major.minor.patch
if [[ $VERSION =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
PATCH=${BASH_REMATCH[3]}
else
print_error "Invalid version format in tag: $LATEST_TAG"
print_error "Expected format: v0.1.0"
return 1
fi
# Increment patch version
NEW_PATCH=$((PATCH + 1))
NEW_VERSION="v${MAJOR}.${MINOR}.${NEW_PATCH}"
print_status "Current version: $LATEST_TAG"
print_status "New version: $NEW_VERSION"
# Create new git tag
if git tag "$NEW_VERSION" 2>/dev/null; then
print_success "Created new version tag: $NEW_VERSION"
else
print_warning "Tag $NEW_VERSION already exists - using existing version"
NEW_VERSION=$LATEST_TAG
fi
# Update VERSION file for compatibility
echo "${NEW_VERSION#v}" > VERSION
print_success "Updated VERSION file to ${NEW_VERSION#v}"
}
# Function to show usage
show_usage() {
echo "Event Miner Build Script"
echo "========================"
echo ""
echo "Usage: $0 [target] [-m \"commit message\"]"
echo ""
echo "Available targets:"
echo " clean - Clean all build artifacts"
echo " build - Build event_miner binary (default)"
echo " test - Run tests"
echo " install - Install event_miner to /usr/local/bin"
echo " uninstall - Remove event_miner from /usr/local/bin"
echo " help - Show this help message"
echo ""
echo "Options:"
echo " -m \"message\" - Commit message (recommended for documentation)"
echo ""
echo "Binary output: $MAIN_BINARY"
echo "Source files: event_miner.c"
echo ""
echo "Note: Each build automatically increments the patch version,"
echo " creates git tags, and updates the VERSION file."
}
# Parse command line arguments
COMMIT_MESSAGE=""
TARGET=""
while [[ $# -gt 0 ]]; do
case $1 in
-m)
COMMIT_MESSAGE="$2"
shift 2
;;
clean|build|lib|examples|test|install|uninstall|help|--help|-h)
TARGET="$1"
shift
;;
*)
if [[ -z "$TARGET" ]]; then
TARGET="$1"
else
print_error "Unknown option: $1"
show_usage
exit 1
fi
shift
;;
esac
done
# Set default target
TARGET=${TARGET:-build}
# Handle targets
case "$TARGET" in
clean)
print_status "Cleaning build artifacts..."
if [[ -f "Makefile" ]]; then
make clean
else
rm -f *.a *.so *.o "$MAIN_BINARY"
rm -f "${SRC_DIR}/version.h" "${SRC_DIR}/version.c"
fi
print_success "Clean completed"
;;
build)
if [[ -n "$COMMIT_MESSAGE" ]]; then
print_status "Build message: $COMMIT_MESSAGE"
fi
increment_version
print_status "Building $MAIN_BINARY..."
if [[ -f "Makefile" ]]; then
make clean
make
else
print_error "No Makefile found. Please create a Makefile for your project."
print_error "The build script works as a wrapper around make."
exit 1
fi
# Check if binary was built
if [[ -f "$MAIN_BINARY" ]]; then
SIZE=$(stat -c%s "$MAIN_BINARY" 2>/dev/null || stat -f%z "$MAIN_BINARY" 2>/dev/null || echo "unknown")
print_success "Event miner built successfully (${SIZE} bytes)"
ls -la "$MAIN_BINARY"
# Commit and push changes if we have a commit message and we're in a git repository
if [[ -n "$COMMIT_MESSAGE" ]] && git rev-parse --git-dir > /dev/null 2>&1; then
print_status "Adding changes to git..."
git add .
print_status "Committing changes..."
git commit -m "$COMMIT_MESSAGE"
print_success "Changes committed with message: $COMMIT_MESSAGE"
print_status "Pushing to remote repository..."
git push origin HEAD && git push --tags
print_success "Changes and tags pushed to remote repository"
fi
else
print_error "Failed to build $MAIN_BINARY binary"
exit 1
fi
;;
examples)
if [[ ! -d "examples" ]]; then
print_error "No examples/ directory found"
exit 1
fi
increment_version
print_status "Building examples..."
if [[ -f "Makefile" ]]; then
make clean
make
if make examples 2>/dev/null; then
print_success "Examples built successfully"
ls -la examples/
else
print_warning "Examples target not found in Makefile"
fi
else
print_error "No Makefile found for building examples"
exit 1
fi
;;
test)
print_status "Running tests..."
if [[ -f "Makefile" ]]; then
make clean
make
if make test 2>/dev/null; then
print_success "All tests passed"
else
print_warning "Test target not found in Makefile, checking tests/ directory"
if [[ -d "tests" ]]; then
cd tests && make test 2>/dev/null && cd ..
print_success "Tests completed"
else
print_error "No tests found"
exit 1
fi
fi
else
print_error "No Makefile found for running tests"
exit 1
fi
;;
install)
increment_version
print_status "Installing library to system..."
if [[ -f "Makefile" ]]; then
make clean
make
sudo make install
print_success "Library installed to /usr/local"
else
print_error "No Makefile found for installation"
exit 1
fi
;;
uninstall)
print_status "Uninstalling library from system..."
if [[ -f "Makefile" ]]; then
sudo make uninstall
print_success "Library uninstalled"
else
print_error "No Makefile found for uninstallation"
exit 1
fi
;;
help|--help|-h)
show_usage
;;
*)
print_error "Unknown target: $TARGET"
echo ""
show_usage
exit 1
;;
esac

7552
debug.log

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -17,18 +17,46 @@
#include <getopt.h>
#include <errno.h>
#include "nostr_core_lib/nostr_core/nostr_core.h"
#include "nostr_core_lib/nostr_core/cJSON.h"
#include "nostr_core_lib/cjson/cJSON.h"
// Constants
#define MAX_EVENT_SIZE 1048576 // 1MB max event size
#define DEFAULT_THREADS 4
#define DEFAULT_POW 2
// Data structures
// Forward declarations for callbacks
typedef struct mining_context mining_context_t;
// Callback function types
typedef void (*solution_callback_t)(cJSON* solution, void* user_data);
typedef void (*progress_callback_t)(int thread_id, uint64_t attempts, void* user_data);
typedef void (*error_callback_t)(int thread_id, int error_code, void* user_data);
// Main context for control decisions
typedef struct {
volatile int solution_found;
volatile int timeout_reached;
cJSON* result_event;
pthread_mutex_t result_mutex;
} main_context_t;
// Mining context for workers (keeping legacy fields for now during transition)
struct mining_context {
cJSON* event;
unsigned char private_key[32];
int target_difficulty;
int thread_id;
// Callbacks for reporting (no control decisions)
solution_callback_t solution_callback;
progress_callback_t progress_callback;
error_callback_t error_callback;
void* user_data;
// Control flag (only main thread modifies)
volatile int should_stop;
// Legacy fields for compatibility during transition
volatile int found;
cJSON* result_event;
pthread_mutex_t mutex;
@@ -36,14 +64,14 @@ typedef struct {
time_t start_time;
int timeout_seconds;
int thread_count;
} mining_context_t;
};
typedef struct {
int pow;
char* nsec;
int threads;
char* event_file;
int timeout_min;
int timeout_sec;
int help;
} args_t;
@@ -56,6 +84,11 @@ static void* miner_thread(void* arg);
static int mine_event(mining_context_t* ctx);
static void cleanup_context(mining_context_t* ctx);
// Callback implementations
static void solution_found_callback(cJSON* solution, void* user_data);
static void progress_report_callback(int thread_id, uint64_t attempts, void* user_data);
static void error_report_callback(int thread_id, int error_code, void* user_data);
// Usage information
static void usage(const char* prog_name) {
fprintf(stderr, "Usage: %s -pow <difficulty> -nsec <private_key> -threads <count> [options]\n\n", prog_name);
@@ -65,11 +98,11 @@ static void usage(const char* prog_name) {
fprintf(stderr, " -threads <count> Number of mining threads to use\n\n");
fprintf(stderr, "Optional arguments:\n");
fprintf(stderr, " -e <filename> Read event from file (default: stdin)\n");
fprintf(stderr, " --timeout_min <min> Timeout in minutes (default: no timeout)\n");
fprintf(stderr, " --timeout_sec <sec> Timeout in seconds (default: no timeout)\n");
fprintf(stderr, " -h, --help Show this help message\n\n");
fprintf(stderr, "Examples:\n");
fprintf(stderr, " echo '{\"kind\":1,...}' | %s -pow 4 -nsec nsec1... -threads 8\n", prog_name);
fprintf(stderr, " %s -pow 4 -nsec abc123... -threads 8 -e event.json --timeout_min 10\n", prog_name);
fprintf(stderr, " %s -pow 4 -nsec abc123... -threads 8 -e event.json --timeout_sec 60\n", prog_name);
}
// Parse command line arguments
@@ -110,10 +143,10 @@ static int parse_arguments(int argc, char* argv[], args_t* args) {
return -1;
}
i++; // Skip the next argument
} else if (strcmp(argv[i], "--timeout_min") == 0 && i + 1 < argc) {
args->timeout_min = atoi(argv[i + 1]);
if (args->timeout_min <= 0) {
fprintf(stderr, "Error: timeout_min must be a positive integer\n");
} else if (strcmp(argv[i], "--timeout_sec") == 0 && i + 1 < argc) {
args->timeout_sec = atoi(argv[i + 1]);
if (args->timeout_sec <= 0) {
fprintf(stderr, "Error: timeout_sec must be a positive integer\n");
return -1;
}
i++; // Skip the next argument
@@ -204,13 +237,44 @@ static char* read_stdin_json(void) {
return buffer;
}
// Mining thread function
// Callback implementations
static void solution_found_callback(cJSON* solution, void* user_data) {
main_context_t* main_ctx = (main_context_t*)user_data;
pthread_mutex_lock(&main_ctx->result_mutex);
if (!main_ctx->solution_found) {
main_ctx->solution_found = 1;
main_ctx->result_event = cJSON_Duplicate(solution, 1);
}
pthread_mutex_unlock(&main_ctx->result_mutex);
}
static void progress_report_callback(int thread_id, uint64_t attempts, void* user_data) {
// Progress callback - placeholder for future implementation
// For now, do nothing as requested
(void)thread_id; // Suppress unused parameter warning
(void)attempts; // Suppress unused parameter warning
(void)user_data; // Suppress unused parameter warning
}
static void error_report_callback(int thread_id, int error_code, void* user_data) {
// Error callback for debugging - placeholder for future implementation
// For now, do nothing but could be used for debugging thread issues
(void)thread_id; // Suppress unused parameter warning
(void)error_code; // Suppress unused parameter warning
(void)user_data; // Suppress unused parameter warning
}
// Mining thread function - New callback-based approach
static void* miner_thread(void* arg) {
mining_context_t* ctx = (mining_context_t*)arg;
// Create a copy of the event for this thread
char* event_str = cJSON_Print(ctx->event);
if (!event_str) {
if (ctx->error_callback) {
ctx->error_callback(ctx->thread_id, -1, ctx->user_data);
}
return NULL;
}
@@ -218,106 +282,149 @@ static void* miner_thread(void* arg) {
free(event_str);
if (!local_event) {
if (ctx->error_callback) {
ctx->error_callback(ctx->thread_id, -2, ctx->user_data);
}
return NULL;
}
// Mine until solution found or timeout
while (!ctx->found) {
// Check timeout
if (ctx->timeout_seconds > 0) {
time_t current_time = time(NULL);
if (current_time - ctx->start_time >= ctx->timeout_seconds) {
pthread_mutex_lock(&ctx->mutex);
if (!ctx->found) {
ctx->found = -1; // Timeout flag
}
pthread_cond_broadcast(&ctx->cond);
pthread_mutex_unlock(&ctx->mutex);
break;
}
}
uint64_t attempts = 0;
// Mine until solution found or signaled to stop by main thread
while (!ctx->should_stop) {
// Attempt mining
int result = nostr_add_proof_of_work(local_event, ctx->private_key,
ctx->target_difficulty, NULL, NULL);
attempts++;
if (result == NOSTR_SUCCESS) {
pthread_mutex_lock(&ctx->mutex);
if (!ctx->found) {
ctx->found = 1; // Success flag
ctx->result_event = cJSON_Duplicate(local_event, 1);
pthread_cond_broadcast(&ctx->cond);
// Found solution - report to main thread via callback
if (ctx->solution_callback) {
ctx->solution_callback(local_event, ctx->user_data);
}
pthread_mutex_unlock(&ctx->mutex);
break;
break; // Exit after reporting solution
}
// Small delay to prevent CPU overuse
usleep(1000); // 1ms
// Progress reporting (currently disabled but ready for future use)
if (ctx->progress_callback && attempts % 10000 == 0) {
ctx->progress_callback(ctx->thread_id, attempts, ctx->user_data);
}
// Small delay to prevent CPU overuse and allow responsive stopping
usleep(100); // 0.1ms - more responsive to should_stop signal
}
cJSON_Delete(local_event);
return NULL;
}
// Main mining function
// Main mining function - New hub-and-spoke model
static int mine_event(mining_context_t* ctx) {
// Initialize synchronization objects
if (pthread_mutex_init(&ctx->mutex, NULL) != 0) {
fprintf(stderr, "Error: Failed to initialize mutex\n");
// Set up main context for centralized control
main_context_t main_ctx;
memset(&main_ctx, 0, sizeof(main_context_t));
// Initialize result mutex
if (pthread_mutex_init(&main_ctx.result_mutex, NULL) != 0) {
fprintf(stderr, "Error: Failed to initialize result mutex\n");
return -1;
}
if (pthread_cond_init(&ctx->cond, NULL) != 0) {
fprintf(stderr, "Error: Failed to initialize condition variable\n");
pthread_mutex_destroy(&ctx->mutex);
// Set up callback system
ctx->solution_callback = solution_found_callback;
ctx->progress_callback = progress_report_callback;
ctx->error_callback = error_report_callback;
ctx->user_data = &main_ctx;
ctx->should_stop = 0;
// Create individual worker contexts (each gets its own thread_id)
mining_context_t* worker_contexts = malloc(ctx->thread_count * sizeof(mining_context_t));
if (!worker_contexts) {
fprintf(stderr, "Error: Memory allocation failed for worker contexts\n");
pthread_mutex_destroy(&main_ctx.result_mutex);
return -1;
}
// Copy base context to each worker and set thread_id
for (int i = 0; i < ctx->thread_count; i++) {
memcpy(&worker_contexts[i], ctx, sizeof(mining_context_t));
worker_contexts[i].thread_id = i;
}
// Create worker threads
pthread_t* threads = malloc(ctx->thread_count * sizeof(pthread_t));
if (!threads) {
fprintf(stderr, "Error: Memory allocation failed\n");
pthread_mutex_destroy(&ctx->mutex);
pthread_cond_destroy(&ctx->cond);
fprintf(stderr, "Error: Memory allocation failed for threads\n");
free(worker_contexts);
pthread_mutex_destroy(&main_ctx.result_mutex);
return -1;
}
ctx->start_time = time(NULL);
time_t start_time = time(NULL);
// Start threads
for (int i = 0; i < ctx->thread_count; i++) {
if (pthread_create(&threads[i], NULL, miner_thread, ctx) != 0) {
if (pthread_create(&threads[i], NULL, miner_thread, &worker_contexts[i]) != 0) {
fprintf(stderr, "Error: Failed to create thread %d\n", i);
// Clean up already created threads
ctx->found = -2; // Error flag
// Stop already running threads
for (int j = 0; j < ctx->thread_count; j++) {
worker_contexts[j].should_stop = 1;
}
// Wait for threads that were created
for (int j = 0; j < i; j++) {
pthread_join(threads[j], NULL);
}
free(threads);
pthread_mutex_destroy(&ctx->mutex);
pthread_cond_destroy(&ctx->cond);
free(worker_contexts);
pthread_mutex_destroy(&main_ctx.result_mutex);
return -1;
}
}
// Wait for solution or timeout
pthread_mutex_lock(&ctx->mutex);
while (ctx->found == 0) {
pthread_cond_wait(&ctx->cond, &ctx->mutex);
// Main thread control loop - centralized monitoring
int result = 0;
while (!main_ctx.solution_found && !main_ctx.timeout_reached) {
// Check for timeout
if (ctx->timeout_seconds > 0) {
time_t current_time = time(NULL);
if (current_time - start_time >= ctx->timeout_seconds) {
main_ctx.timeout_reached = 1;
result = -1; // Timeout
break;
}
}
// Small sleep to avoid busy waiting
usleep(10000); // 10ms
}
// Signal all workers to stop
for (int i = 0; i < ctx->thread_count; i++) {
worker_contexts[i].should_stop = 1;
}
pthread_mutex_unlock(&ctx->mutex);
// Wait for all threads to finish
for (int i = 0; i < ctx->thread_count; i++) {
pthread_join(threads[i], NULL);
}
free(threads);
pthread_mutex_destroy(&ctx->mutex);
pthread_cond_destroy(&ctx->cond);
// Handle results
if (main_ctx.solution_found && main_ctx.result_event) {
ctx->result_event = main_ctx.result_event; // Transfer ownership
result = 1; // Success
} else if (main_ctx.timeout_reached) {
result = -1; // Timeout
} else {
result = -2; // Error
}
return ctx->found;
// Cleanup
free(threads);
free(worker_contexts);
pthread_mutex_destroy(&main_ctx.result_mutex);
return result;
}
// Cleanup context
@@ -393,7 +500,7 @@ int main(int argc, char* argv[]) {
// Set mining parameters
ctx.target_difficulty = args.pow;
ctx.thread_count = args.threads;
ctx.timeout_seconds = args.timeout_min > 0 ? args.timeout_min * 60 : 0;
ctx.timeout_seconds = args.timeout_sec > 0 ? args.timeout_sec : 0;
// Start mining
int mining_result = mine_event(&ctx);
@@ -409,7 +516,7 @@ int main(int argc, char* argv[]) {
exit_code = 1;
}
} else if (mining_result == -1) {
fprintf(stderr, "Error: Mining timeout reached\n");
printf("timeout\n");
exit_code = 1;
} else {
fprintf(stderr, "Error: Mining failed\n");

6
update_nostr_core.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
git submodule update --remote nostr_core_lib
git add nostr_core_lib
git commit -m "Update nostr_core_lib submodule to latest version"