mirror of
https://github.com/fiatjaf/nak.git
synced 2026-01-26 04:18:50 +00:00
Compare commits
52 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
686d960f62 | ||
|
|
af04838153 | ||
|
|
c6da13649d | ||
|
|
acd6227dd0 | ||
|
|
00fbda9af7 | ||
|
|
e838de9b72 | ||
|
|
6dfbed4413 | ||
|
|
0e283368ed | ||
|
|
38775e0d93 | ||
|
|
fabcad3f61 | ||
|
|
69e4895e48 | ||
|
|
81524de04f | ||
|
|
8334474f96 | ||
|
|
87f27e214e | ||
|
|
32999917b4 | ||
|
|
a19a179548 | ||
|
|
9b684f2c65 | ||
|
|
6d87887855 | ||
|
|
e9c4deaf6d | ||
|
|
965a312b46 | ||
|
|
2e4079f92c | ||
|
|
5b64795015 | ||
|
|
5d4fe434c3 | ||
|
|
b95665d986 | ||
|
|
3be80c29df | ||
|
|
e01cfbde47 | ||
|
|
e91d4429ec | ||
|
|
21423b4a21 | ||
|
|
1b7f3162b5 | ||
|
|
8f38468103 | ||
|
|
9bf728d850 | ||
|
|
8396738fe2 | ||
|
|
c1d1682d6e | ||
|
|
6f00ff4c73 | ||
|
|
68bbece3db | ||
|
|
a83b23d76b | ||
|
|
a288cc47a4 | ||
|
|
5ee7670ba8 | ||
|
|
b973b476bc | ||
|
|
252612b12f | ||
|
|
4b8b6bb3de | ||
|
|
df491be232 | ||
|
|
1dab81f77c | ||
|
|
11228d7082 | ||
|
|
a422b5f708 | ||
|
|
852fe6bdfb | ||
|
|
210cf66d5f | ||
|
|
f9335b0ab4 | ||
|
|
16916d7d95 | ||
|
|
3ff4dbe196 | ||
|
|
2de3ff78ee | ||
|
|
03c1bf832e |
222
.github/workflows/release-cli.yml
vendored
222
.github/workflows/release-cli.yml
vendored
@@ -4,46 +4,210 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
branches: [ main, master ]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
make-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/create-release@latest
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ github.ref }}
|
||||
build-all-for-all:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- make-release
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goos: [linux, freebsd, darwin, windows]
|
||||
goos: [linux, freebsd, windows]
|
||||
goarch: [amd64, arm64, riscv64]
|
||||
exclude:
|
||||
- goarch: arm64
|
||||
goos: windows
|
||||
- goarch: riscv64
|
||||
goos: windows
|
||||
- goarch: riscv64
|
||||
goos: darwin
|
||||
- goarch: arm64
|
||||
goos: freebsd
|
||||
- goarch: arm64
|
||||
goos: windows
|
||||
- goarch: riscv64
|
||||
goos: windows
|
||||
- goarch: arm64
|
||||
goos: freebsd
|
||||
- goarch: riscv64
|
||||
goos: freebsd
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: wangyoucao577/go-release-action@v1.40
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
ldflags: -X main.version=${{ github.ref_name }}
|
||||
overwrite: true
|
||||
md5sum: false
|
||||
sha256sum: false
|
||||
compress_assets: false
|
||||
go-version: 'stable'
|
||||
- name: Install FUSE dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libfuse-dev
|
||||
- name: Install cross-compilation tools for ARM64
|
||||
if: matrix.goarch == 'arm64' && matrix.goos == 'linux'
|
||||
run: |
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
- name: Install cross-compilation tools for RISC-V
|
||||
if: matrix.goarch == 'riscv64' && matrix.goos == 'linux'
|
||||
run: |
|
||||
sudo apt-get install -y gcc-riscv64-linux-gnu
|
||||
- name: Install FreeBSD SDK
|
||||
if: matrix.goos == 'freebsd'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y clang lld binutils-riscv64-unknown-elf 2>/dev/null || true
|
||||
mkdir -p /tmp/freebsd-sdk
|
||||
cd /tmp/freebsd-sdk
|
||||
# Determine download path based on architecture
|
||||
case "${{ matrix.goarch }}" in
|
||||
arm64) DLPATH="arm64" ;;
|
||||
riscv64) DLPATH="riscv" ;;
|
||||
*) DLPATH="amd64" ;;
|
||||
esac
|
||||
wget -q "https://download.freebsd.org/releases/${DLPATH}/14.3-RELEASE/base.txz" && tar -xf base.txz
|
||||
# Download and extract libfuse source
|
||||
mkdir -p libfuse && cd libfuse
|
||||
wget -q https://github.com/libfuse/libfuse/releases/download/fuse-2.9.9/fuse-2.9.9.tar.gz
|
||||
tar -xzf fuse-2.9.9.tar.gz --strip-components=1
|
||||
mkdir -p /tmp/freebsd-sdk/usr/include
|
||||
cp include/fuse.h /tmp/freebsd-sdk/usr/include/
|
||||
cp include/fuse_*.h /tmp/freebsd-sdk/usr/include/ 2>/dev/null || true
|
||||
cd /tmp/freebsd-sdk
|
||||
- name: Set cross-compiler
|
||||
id: set-cc
|
||||
run: |
|
||||
if [ "${{ matrix.goarch }}" = "arm64" ] && [ "${{ matrix.goos }}" = "linux" ]; then
|
||||
echo "CC=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.goarch }}" = "riscv64" ] && [ "${{ matrix.goos }}" = "linux" ]; then
|
||||
echo "CC=riscv64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.goos }}" = "freebsd" ]; then
|
||||
TRIPLE="x86_64-unknown-freebsd14.3"
|
||||
[ "${{ matrix.goarch }}" = "arm64" ] && TRIPLE="aarch64-unknown-freebsd14.3"
|
||||
[ "${{ matrix.goarch }}" = "riscv64" ] && TRIPLE="riscv64-unknown-freebsd14.3"
|
||||
echo "CC=clang --target=$TRIPLE --sysroot=/tmp/freebsd-sdk" >> $GITHUB_ENV
|
||||
echo "CGO_CFLAGS=-isystem /tmp/freebsd-sdk/usr/include --target=$TRIPLE" >> $GITHUB_ENV
|
||||
echo "CGO_LDFLAGS=-L/tmp/freebsd-sdk/usr/lib -L/tmp/freebsd-sdk/lib -L/tmp/freebsd-sdk/usr/lib64 --target=$TRIPLE" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Build binary
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: ${{ matrix.goos == 'windows' && '0' || '1' }}
|
||||
run: |
|
||||
go build -ldflags "-X main.version=${{ github.ref_name }}" -o nak-${{ github.ref_name }}-${{ matrix.goos }}-${{ matrix.goarch }}
|
||||
- name: Upload Release Asset
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: ./nak-${{ github.ref_name }}-${{ matrix.goos }}-${{ matrix.goarch }}
|
||||
|
||||
build-darwin:
|
||||
runs-on: macos-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: [amd64, arm64]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 'stable'
|
||||
- name: Install macFUSE
|
||||
run: brew install --cask macfuse
|
||||
- name: Build binary
|
||||
env:
|
||||
GOOS: darwin
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: '1'
|
||||
run: |
|
||||
go build -ldflags "-X main.version=${{ github.ref_name }}" -o nak-${{ github.ref_name }}-darwin-${{ matrix.goarch }}
|
||||
- name: Upload Release Asset
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: ./nak-${{ github.ref_name }}-darwin-${{ matrix.goarch }}
|
||||
|
||||
smoke-test-linux-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-all-for-all
|
||||
steps:
|
||||
- name: download and smoke test latest binary
|
||||
run: |
|
||||
set -eo pipefail # exit on error, and on pipe failures
|
||||
|
||||
echo "downloading nak binary from releases"
|
||||
RELEASE_URL="https://api.github.com/repos/fiatjaf/nak/releases/latest"
|
||||
wget $(wget -q -O - ${RELEASE_URL} | jq -r '.assets[] | select(.name | contains("linux-amd64")) | .browser_download_url') -O nak -nv
|
||||
chmod +x nak
|
||||
|
||||
echo "printing version..."
|
||||
./nak --version
|
||||
|
||||
# generate and manipulate keys
|
||||
echo "testing key operations..."
|
||||
SECRET_KEY=$(./nak key generate)
|
||||
PUBLIC_KEY=$(echo $SECRET_KEY | ./nak key public)
|
||||
echo "generated key pair: $SECRET_KEY => $PUBLIC_KEY"
|
||||
|
||||
# create events
|
||||
echo "testing event creation..."
|
||||
./nak event -c "hello world"
|
||||
HELLOWORLD=$(./nak event -c "hello world")
|
||||
echo " hello world again: $HELLOWORLD"
|
||||
./nak event --ts "2 days ago" -c "event with timestamp"
|
||||
./nak event -k 1 -t "t=test" -c "event with tag"
|
||||
|
||||
# test NIP-19 encoding/decoding
|
||||
echo "testing NIP-19 encoding/decoding..."
|
||||
NSEC=$(echo $SECRET_KEY | ./nak encode nsec)
|
||||
echo "encoded nsec: $NSEC"
|
||||
./nak encode npub 79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
|
||||
EVENT_ID="5ae731bbc7711f78513da14927c48cc7143a91e6cad0565fdc4d73b8967a7d59"
|
||||
NEVENT1=$(./nak encode nevent $EVENT_ID)
|
||||
echo "encoded nevent1: $NEVENT1"
|
||||
./nak decode $NEVENT1
|
||||
./nak decode npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6
|
||||
|
||||
# test event verification
|
||||
echo "testing event verification..."
|
||||
# create an event and verify it
|
||||
VERIFY_EVENT=$(./nak event -c "verify me")
|
||||
echo $VERIFY_EVENT | ./nak verify
|
||||
|
||||
# test PoW
|
||||
echo "testing pow..."
|
||||
./nak event -c "testing pow" --pow 8
|
||||
|
||||
# test NIP-49 key encryption/decryption
|
||||
echo "testing NIP-49 key encryption/decryption..."
|
||||
ENCRYPTED_KEY=$(./nak key encrypt $SECRET_KEY "testpassword")
|
||||
echo "encrypted key: ${ENCRYPTED_KEY: 0:20}..."
|
||||
DECRYPTED_KEY=$(./nak key decrypt $ENCRYPTED_KEY "testpassword")
|
||||
if [ "$DECRYPTED_KEY" != "$SECRET_KEY" ]; then
|
||||
echo "nip-49 encryption/decryption test failed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# test multi-value tags
|
||||
echo "testing multi-value tags..."
|
||||
./nak event --ts "yesterday" -t "e=f59911b561c37c90b01e9e5c2557307380835c83399756f4d62d8167227e420a;wss://relay.example.com;root" -c "testing multi-value tags"
|
||||
|
||||
# test relay operations (with a public relay)
|
||||
echo "testing publishing..."
|
||||
# publish a simple event to a public relay
|
||||
EVENT_JSON=$(./nak event --sec $SECRET_KEY -c "test from nak smoke test" nos.lol < /dev/null)
|
||||
EVENT_ID=$(echo $EVENT_JSON | jq -r .id)
|
||||
echo "published event ID: $EVENT_ID"
|
||||
|
||||
# wait a moment for propagation
|
||||
sleep 2
|
||||
|
||||
# fetch the event we just published
|
||||
./nak req -i $EVENT_ID nos.lol
|
||||
|
||||
# test serving (just start and immediately kill)
|
||||
echo "testing serve command..."
|
||||
timeout 2s ./nak serve || true
|
||||
|
||||
# test filesystem mount (just start and immediately kill)
|
||||
echo "testing fs mount command..."
|
||||
mkdir -p /tmp/nostr-mount
|
||||
timeout 2s ./nak fs --sec $SECRET_KEY /tmp/nostr-mount || true
|
||||
|
||||
echo "all tests passed"
|
||||
|
||||
97
.github/workflows/smoke-test-release.yml
vendored
97
.github/workflows/smoke-test-release.yml
vendored
@@ -1,97 +0,0 @@
|
||||
name: Smoke test the binary
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["build cli for all platforms"]
|
||||
types:
|
||||
- completed
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
smoke-test-linux-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
steps:
|
||||
- name: Download and smoke test latest binary
|
||||
run: |
|
||||
set -eo pipefail # Exit on error, and on pipe failures
|
||||
|
||||
echo "Downloading nak binary from releases"
|
||||
RELEASE_URL="https://api.github.com/repos/fiatjaf/nak/releases/latest"
|
||||
wget $(wget -q -O - ${RELEASE_URL} | jq -r '.assets[] | select(.name | contains("linux-amd64")) | .browser_download_url') -O nak -nv
|
||||
chmod +x nak
|
||||
|
||||
echo "Running basic tests..."
|
||||
./nak --version
|
||||
|
||||
# Generate and manipulate keys
|
||||
echo "Testing key operations..."
|
||||
SECRET_KEY=$(./nak key generate)
|
||||
PUBLIC_KEY=$(echo $SECRET_KEY | ./nak key public)
|
||||
echo "Generated key pair: $PUBLIC_KEY"
|
||||
|
||||
# Create events
|
||||
echo "Testing event creation..."
|
||||
./nak event -c "hello world"
|
||||
./nak event --ts "2 days ago" -c "event with timestamp"
|
||||
./nak event -k 1 -t "t=test" -c "event with tag"
|
||||
|
||||
# Test NIP-19 encoding/decoding
|
||||
echo "Testing NIP-19 encoding/decoding..."
|
||||
NSEC=$(echo $SECRET_KEY | ./nak encode nsec)
|
||||
echo "Encoded nsec: $NSEC"
|
||||
./nak encode npub 79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
|
||||
NOTE_ID="5ae731bbc7711f78513da14927c48cc7143a91e6cad0565fdc4d73b8967a7d59"
|
||||
NOTE1=$(./nak encode note $NOTE_ID)
|
||||
echo "Encoded note1: $NOTE1"
|
||||
./nak decode $NOTE1
|
||||
./nak decode npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6
|
||||
|
||||
# Test event verification
|
||||
echo "Testing event verification..."
|
||||
# Create an event and verify it
|
||||
VERIFY_EVENT=$(./nak event -c "verify me")
|
||||
echo $VERIFY_EVENT | ./nak verify
|
||||
|
||||
# Test PoW
|
||||
echo "Testing PoW..."
|
||||
./nak event -c "testing pow" --pow 8
|
||||
|
||||
# Test NIP-49 key encryption/decryption
|
||||
echo "Testing NIP-49 key encryption/decryption..."
|
||||
ENCRYPTED_KEY=$(./nak key encrypt $SECRET_KEY "testpassword")
|
||||
echo "Encrypted key: ${ENCRYPTED_KEY:0:20}..."
|
||||
DECRYPTED_KEY=$(./nak key decrypt $ENCRYPTED_KEY "testpassword")
|
||||
if [ "$DECRYPTED_KEY" != "$SECRET_KEY" ]; then
|
||||
echo "NIP-49 encryption/decryption test failed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test multi-value tags
|
||||
echo "Testing multi-value tags..."
|
||||
./nak event --ts "yesterday" -t "e=f59911b561c37c90b01e9e5c2557307380835c83399756f4d62d8167227e420a;wss://relay.example.com;root" -c "Testing multi-value tags"
|
||||
|
||||
# Test relay operations (with a public relay)
|
||||
echo "Testing relay operations..."
|
||||
# Publish a simple event to a public relay
|
||||
EVENT_JSON=$(./nak event --sec $SECRET_KEY -c "Test from nak smoke test" nos.lol)
|
||||
EVENT_ID=$(echo $EVENT_JSON | jq -r .id)
|
||||
echo "Published event ID: $EVENT_ID"
|
||||
|
||||
# Wait a moment for propagation
|
||||
sleep 2
|
||||
|
||||
# Fetch the event we just published
|
||||
./nak req -i $EVENT_ID nos.lol
|
||||
|
||||
# Test serving (just start and immediately kill)
|
||||
echo "Testing serve command..."
|
||||
timeout 2s ./nak serve || true
|
||||
|
||||
# Test filesystem mount (just start and immediately kill)
|
||||
echo "Testing fs mount command..."
|
||||
mkdir -p /tmp/nostr-mount
|
||||
timeout 2s ./nak fs --sec $SECRET_KEY /tmp/nostr-mount || true
|
||||
|
||||
echo "All tests passed"
|
||||
10
bunker.go
10
bunker.go
@@ -329,6 +329,10 @@ var bunker = &cli.Command{
|
||||
|
||||
// asking user for authorization
|
||||
signer.AuthorizeRequest = func(harmless bool, from nostr.PubKey, secret string) bool {
|
||||
if slices.Contains(config.AuthorizedKeys, from) || slices.Contains(authorizedSecrets, secret) {
|
||||
return true
|
||||
}
|
||||
|
||||
if secret == newSecret {
|
||||
// store this key
|
||||
config.AuthorizedKeys = appendUnique(config.AuthorizedKeys, from)
|
||||
@@ -343,9 +347,11 @@ var bunker = &cli.Command{
|
||||
if persist != nil {
|
||||
persist()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return slices.Contains(config.AuthorizedKeys, from) || slices.Contains(authorizedSecrets, secret)
|
||||
return false
|
||||
}
|
||||
|
||||
for ie := range events {
|
||||
@@ -366,6 +372,7 @@ var bunker = &cli.Command{
|
||||
handlerWg.Add(len(relayURLs))
|
||||
for _, relayURL := range relayURLs {
|
||||
go func(relayURL string) {
|
||||
defer handlerWg.Done()
|
||||
if relay, _ := sys.Pool.EnsureRelay(relayURL); relay != nil {
|
||||
err := relay.Publish(ctx, eventResponse)
|
||||
printLock.Lock()
|
||||
@@ -375,7 +382,6 @@ var bunker = &cli.Command{
|
||||
log("* failed to send response: %s\n", err)
|
||||
}
|
||||
printLock.Unlock()
|
||||
handlerWg.Done()
|
||||
}
|
||||
}(relayURL)
|
||||
}
|
||||
|
||||
435
dekey.go
Normal file
435
dekey.go
Normal file
@@ -0,0 +1,435 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip44"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var dekey = &cli.Command{
|
||||
Name: "dekey",
|
||||
Usage: "handles NIP-4E decoupled encryption keys",
|
||||
Description: "maybe this picture will explain better than I can do here for now: https://cdn.azzamo.net/89c543d261ad0d665c1dea78f91e527c2e39e7fe503b440265a3c47e63c9139f.png",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.StringFlag{
|
||||
Name: "device",
|
||||
Usage: "name of this device that will be published and displayed on other clients",
|
||||
Value: func() string {
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
return "nak@" + hostname
|
||||
}
|
||||
return "nak@unknown"
|
||||
}(),
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "rotate",
|
||||
Usage: "force the creation of a new decoupled encryption key, effectively invalidating any previous ones",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "authorize-all",
|
||||
Aliases: []string{"yolo"},
|
||||
Usage: "do not ask for confirmation, just automatically send the decoupled encryption key to all devices that exist",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "reject-all",
|
||||
Usage: "do not ask for confirmation, just not send the decoupled encryption key to any device",
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
userPub, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get user public key: %w", err)
|
||||
}
|
||||
|
||||
configPath := c.String("config-path")
|
||||
deviceName := c.String("device")
|
||||
|
||||
log("handling device key for %s as %s\n",
|
||||
color.YellowString(deviceName),
|
||||
color.CyanString(nip19.EncodeNpub(userPub)),
|
||||
)
|
||||
// check if we already have a local-device secret key
|
||||
deviceKeyPath := filepath.Join(configPath, "dekey", "device-key")
|
||||
var deviceSec nostr.SecretKey
|
||||
if data, err := os.ReadFile(deviceKeyPath); err == nil {
|
||||
log(color.GreenString("found existing device key\n"))
|
||||
deviceSec, err = nostr.SecretKeyFromHex(string(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid device key in %s: %w", deviceKeyPath, err)
|
||||
}
|
||||
} else {
|
||||
log(color.YellowString("generating new device key\n"))
|
||||
// create one
|
||||
deviceSec = nostr.Generate()
|
||||
os.MkdirAll(filepath.Dir(deviceKeyPath), 0700)
|
||||
if err := os.WriteFile(deviceKeyPath, []byte(deviceSec.Hex()), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write device key: %w", err)
|
||||
}
|
||||
log(color.GreenString("device key generated and stored\n"))
|
||||
}
|
||||
devicePub := deviceSec.Public()
|
||||
|
||||
// get relays for the user
|
||||
log("fetching write relays for %s\n", color.CyanString(nip19.EncodeNpub(userPub)))
|
||||
relays := sys.FetchWriteRelays(ctx, userPub)
|
||||
relayList := connectToAllRelays(ctx, c, relays, nil, nostr.PoolOptions{})
|
||||
if len(relayList) == 0 {
|
||||
return fmt.Errorf("no relays to use")
|
||||
}
|
||||
|
||||
// check for kind:10044
|
||||
log("- checking for decoupled encryption key (kind:10044)\n")
|
||||
keyAnnouncementResult := sys.Pool.FetchManyReplaceable(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{10044},
|
||||
Authors: []nostr.PubKey{userPub},
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"})
|
||||
var eSec nostr.SecretKey
|
||||
var ePub nostr.PubKey
|
||||
|
||||
var generateNewEncryptionKey bool
|
||||
keyAnnouncementEvent, ok := keyAnnouncementResult.Load(nostr.ReplaceableKey{PubKey: userPub, D: ""})
|
||||
if !ok {
|
||||
log("- no decoupled encryption key found, generating new one\n")
|
||||
generateNewEncryptionKey = true
|
||||
} else {
|
||||
// get the pub from the tag
|
||||
for _, tag := range keyAnnouncementEvent.Tags {
|
||||
if len(tag) >= 2 && tag[0] == "n" {
|
||||
ePub, _ = nostr.PubKeyFromHex(tag[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
if ePub == nostr.ZeroPK {
|
||||
return fmt.Errorf("got invalid kind:10044 event, no 'n' tag")
|
||||
}
|
||||
|
||||
log(". a decoupled encryption public key already exists: %s\n", color.CyanString(ePub.Hex()))
|
||||
if c.Bool("rotate") {
|
||||
log(color.GreenString("rotating it by generating a new one\n"))
|
||||
generateNewEncryptionKey = true
|
||||
}
|
||||
}
|
||||
|
||||
if generateNewEncryptionKey {
|
||||
// generate main secret key
|
||||
eSec = nostr.Generate()
|
||||
ePub = eSec.Public()
|
||||
|
||||
// store it
|
||||
eKeyPath := filepath.Join(configPath, "dekey", "p", userPub.Hex(), "e", ePub.Hex())
|
||||
os.MkdirAll(filepath.Dir(eKeyPath), 0700)
|
||||
if err := os.WriteFile(eKeyPath, []byte(eSec.Hex()), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write decoupled encryption key: %w", err)
|
||||
}
|
||||
log("decoupled encryption key generated and stored, public key: %s\n", color.CyanString(ePub.Hex()))
|
||||
|
||||
// publish kind:10044
|
||||
log("publishing decoupled encryption public key (kind:10044)\n")
|
||||
evt10044 := nostr.Event{
|
||||
Kind: 10044,
|
||||
Content: "",
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{
|
||||
{"n", ePub.Hex()},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &evt10044); err != nil {
|
||||
return fmt.Errorf("failed to sign kind:10044: %w", err)
|
||||
}
|
||||
if err := publishFlow(ctx, c, kr, evt10044, relayList); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// check if we have the key
|
||||
eKeyPath := filepath.Join(configPath, "dekey", "p", userPub.Hex(), "e", ePub.Hex())
|
||||
if data, err := os.ReadFile(eKeyPath); err == nil {
|
||||
log(color.GreenString("- and we have it locally already\n"))
|
||||
eSec, err = nostr.SecretKeyFromHex(string(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid main key: %w", err)
|
||||
}
|
||||
if eSec.Public() != ePub {
|
||||
return fmt.Errorf("stored decoupled encryption key is corrupted: %w", err)
|
||||
}
|
||||
} else {
|
||||
log("- decoupled encryption key not found locally, attempting to fetch the key from other devices\n")
|
||||
|
||||
// check if our kind:4454 is already published
|
||||
log("- checking for existing device announcement (kind:4454)\n")
|
||||
ourDeviceAnnouncementEvents := make([]nostr.Event, 0, 1)
|
||||
for evt := range sys.Pool.FetchMany(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{4454},
|
||||
Authors: []nostr.PubKey{userPub},
|
||||
Tags: nostr.TagMap{
|
||||
"P": []string{devicePub.Hex()},
|
||||
},
|
||||
Limit: 1,
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"}) {
|
||||
ourDeviceAnnouncementEvents = append(ourDeviceAnnouncementEvents, evt.Event)
|
||||
}
|
||||
if len(ourDeviceAnnouncementEvents) == 0 {
|
||||
log(". no device announcement found, publishing kind:4454 for %s\n", color.YellowString(deviceName))
|
||||
// publish kind:4454
|
||||
evt := nostr.Event{
|
||||
Kind: 4454,
|
||||
Content: "",
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{
|
||||
{"client", deviceName},
|
||||
{"P", devicePub.Hex()},
|
||||
},
|
||||
}
|
||||
|
||||
// sign with main key
|
||||
if err := kr.SignEvent(ctx, &evt); err != nil {
|
||||
return fmt.Errorf("failed to sign device event: %w", err)
|
||||
}
|
||||
|
||||
// publish
|
||||
if err := publishFlow(ctx, c, kr, evt, relayList); err != nil {
|
||||
return err
|
||||
}
|
||||
log(color.GreenString(". device announcement published\n"))
|
||||
ourDeviceAnnouncementEvents = append(ourDeviceAnnouncementEvents, evt)
|
||||
} else {
|
||||
log(color.GreenString(". device already registered\n"))
|
||||
}
|
||||
|
||||
// see if some other device has shared the key with us from kind:4455
|
||||
for eKeyMsg := range sys.Pool.FetchMany(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{4455},
|
||||
Tags: nostr.TagMap{
|
||||
"p": []string{devicePub.Hex()},
|
||||
},
|
||||
Since: keyAnnouncementEvent.CreatedAt + 1,
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"}) {
|
||||
var senderPub nostr.PubKey
|
||||
for _, tag := range eKeyMsg.Tags {
|
||||
if len(tag) >= 2 && tag[0] == "P" {
|
||||
senderPub, _ = nostr.PubKeyFromHex(tag[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
if senderPub == nostr.ZeroPK {
|
||||
continue
|
||||
}
|
||||
ss, err := nip44.GenerateConversationKey(senderPub, deviceSec)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
eSecHex, err := nip44.Decrypt(eKeyMsg.Content, ss)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
eSec, err = nostr.SecretKeyFromHex(eSecHex)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// check if it matches mainPub
|
||||
if eSec.Public() == ePub {
|
||||
log(color.GreenString("successfully received decoupled encryption key from another device\n"))
|
||||
// store it
|
||||
os.MkdirAll(filepath.Dir(eKeyPath), 0700)
|
||||
os.WriteFile(eKeyPath, []byte(eSecHex), 0600)
|
||||
|
||||
// delete our 4454 if we had one, since we received the key
|
||||
if len(ourDeviceAnnouncementEvents) > 0 {
|
||||
log("deleting our device announcement (kind:4454) since we received the decoupled encryption key\n")
|
||||
deletion4454 := nostr.Event{
|
||||
CreatedAt: nostr.Now(),
|
||||
Kind: 5,
|
||||
Tags: nostr.Tags{
|
||||
{"e", ourDeviceAnnouncementEvents[0].ID.Hex()},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &deletion4454); err != nil {
|
||||
log(color.RedString("failed to sign 4454 deletion: %v\n"), err)
|
||||
} else if err := publishFlow(ctx, c, kr, deletion4454, relayList); err != nil {
|
||||
log(color.RedString("failed to publish 4454 deletion: %v\n"), err)
|
||||
} else {
|
||||
log(color.GreenString("- device announcement deleted\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// delete the 4455 we just decrypted
|
||||
log("deleting the key message (kind:4455) we just decrypted\n")
|
||||
deletion4455 := nostr.Event{
|
||||
CreatedAt: nostr.Now(),
|
||||
Kind: 5,
|
||||
Tags: nostr.Tags{
|
||||
{"e", eKeyMsg.ID.Hex()},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &deletion4455); err != nil {
|
||||
log(color.RedString("failed to sign 4455 deletion: %v\n"), err)
|
||||
} else if err := publishFlow(ctx, c, kr, deletion4455, relayList); err != nil {
|
||||
log(color.RedString("failed to publish 4455 deletion: %v\n"), err)
|
||||
} else {
|
||||
log(color.GreenString("- key message deleted\n"))
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if eSec == [32]byte{} {
|
||||
log("decoupled encryption secret key not available, must be sent from another device to %s first\n",
|
||||
color.YellowString(deviceName))
|
||||
return nil
|
||||
}
|
||||
log(color.GreenString("- decoupled encryption key ready\n"))
|
||||
|
||||
// now we have mainSec, check for other kind:4454 events newer than the 10044
|
||||
log("- checking for other devices and key messages so we can send the key\n")
|
||||
keyMsgs := make([]string, 0, 5)
|
||||
for keyOrDeviceEvt := range sys.Pool.FetchMany(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{4454, 4455},
|
||||
Authors: []nostr.PubKey{userPub},
|
||||
Since: keyAnnouncementEvent.CreatedAt + 1,
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"}) {
|
||||
if keyOrDeviceEvt.Kind == 4455 {
|
||||
// got key event
|
||||
keyEvent := keyOrDeviceEvt
|
||||
|
||||
// assume a key msg will always come before its associated devicemsg
|
||||
// so just store them here:
|
||||
pubkeyTag := keyEvent.Tags.Find("p")
|
||||
if pubkeyTag == nil {
|
||||
continue
|
||||
}
|
||||
keyMsgs = append(keyMsgs, pubkeyTag[1])
|
||||
} else if keyOrDeviceEvt.Kind == 4454 {
|
||||
// device event
|
||||
deviceEvt := keyOrDeviceEvt
|
||||
|
||||
// skip ourselves
|
||||
if deviceEvt.Tags.FindWithValue("P", devicePub.Hex()) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if there is a clock skew (current time is earlier than the time of this device's announcement) skip it
|
||||
if nostr.Now() < deviceEvt.CreatedAt {
|
||||
continue
|
||||
}
|
||||
|
||||
// if this already has a corresponding keyMsg then skip it
|
||||
pubkeyTag := deviceEvt.Tags.Find("P")
|
||||
if pubkeyTag == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(keyMsgs, pubkeyTag[1]) {
|
||||
continue
|
||||
}
|
||||
|
||||
deviceTag := deviceEvt.Tags.Find("client")
|
||||
if deviceTag == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// here we know we're dealing with a deviceMsg without a corresponding keyMsg
|
||||
// so we have to build a keyMsg for them
|
||||
theirDevice, err := nostr.PubKeyFromHex(pubkeyTag[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if c.Bool("authorize-all") {
|
||||
// will proceed
|
||||
} else if c.Bool("reject-all") {
|
||||
log(" - skipping %s\n", color.YellowString(deviceTag[1]))
|
||||
continue
|
||||
} else {
|
||||
var proceed bool
|
||||
if err := survey.AskOne(&survey.Confirm{
|
||||
Message: fmt.Sprintf("share decoupled encryption key with %s"+colors.bold("?"),
|
||||
color.YellowString(deviceTag[1])),
|
||||
}, &proceed); err != nil {
|
||||
return err
|
||||
}
|
||||
if proceed {
|
||||
// will proceed
|
||||
} else {
|
||||
// won't proceed
|
||||
var deleteDevice bool
|
||||
if err := survey.AskOne(&survey.Confirm{
|
||||
Message: fmt.Sprintf(" delete %s"+colors.bold("'s announcement?"), color.YellowString(deviceTag[1])),
|
||||
}, &deleteDevice); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if deleteDevice {
|
||||
log(" - deleting %s\n", color.YellowString(deviceTag[1]))
|
||||
deletion := nostr.Event{
|
||||
CreatedAt: nostr.Now(),
|
||||
Kind: 5,
|
||||
Tags: nostr.Tags{
|
||||
{"e", deviceEvt.ID.Hex()},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &deletion); err != nil {
|
||||
return fmt.Errorf("failed to sign deletion '%s': %w", deletion.GetID().Hex(), err)
|
||||
}
|
||||
if err := publishFlow(ctx, c, kr, deletion, relayList); err != nil {
|
||||
return fmt.Errorf("publish flow failed: %w", err)
|
||||
}
|
||||
} else {
|
||||
log(" - skipped\n")
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
log("- sending decoupled encryption key to new device %s\n", color.YellowString(deviceTag[1]))
|
||||
ss, err := nip44.GenerateConversationKey(theirDevice, deviceSec)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ciphertext, err := nip44.Encrypt(eSec.Hex(), ss)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
evt4455 := nostr.Event{
|
||||
Kind: 4455,
|
||||
Content: ciphertext,
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{
|
||||
{"p", theirDevice.Hex()},
|
||||
{"P", devicePub.Hex()},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &evt4455); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := publishFlow(ctx, c, kr, evt4455, relayList); err != nil {
|
||||
log(color.RedString("failed to publish key message: %v\n"), err)
|
||||
} else {
|
||||
log(" - decoupled encryption key sent to %s\n", color.GreenString(deviceTag[1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stdout(ePub.Hex())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
55
encode.go
55
encode.go
@@ -25,13 +25,6 @@ var encode = &cli.Command{
|
||||
"relays":["wss://nada.zero"],
|
||||
"author":"ebb6ff85430705651b311ed51328767078fd790b14f02d22efba68d5513376bc"
|
||||
} | nak encode`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "attach relay hints to naddr code",
|
||||
},
|
||||
},
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
if c.Args().Len() != 0 {
|
||||
@@ -126,7 +119,12 @@ var encode = &cli.Command{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "attach relay hints to nprofile code",
|
||||
Usage: "attach relay hints to the code",
|
||||
},
|
||||
&BoolIntFlag{
|
||||
Name: "outbox",
|
||||
Usage: "automatically appends outbox relays to the code",
|
||||
Value: 3,
|
||||
},
|
||||
},
|
||||
DisableSliceFlagSeparator: true,
|
||||
@@ -139,6 +137,13 @@ var encode = &cli.Command{
|
||||
}
|
||||
|
||||
relays := c.StringSlice("relay")
|
||||
|
||||
if getBoolInt(c, "outbox") > 0 {
|
||||
for _, r := range sys.FetchOutboxRelays(ctx, pk, int(getBoolInt(c, "outbox"))) {
|
||||
relays = appendUnique(relays, r)
|
||||
}
|
||||
}
|
||||
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -159,6 +164,16 @@ var encode = &cli.Command{
|
||||
Aliases: []string{"a"},
|
||||
Usage: "attach an author pubkey as a hint to the nevent code",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "attach relay hints to the code",
|
||||
},
|
||||
&BoolIntFlag{
|
||||
Name: "outbox",
|
||||
Usage: "automatically appends outbox relays to the code",
|
||||
Value: 3,
|
||||
},
|
||||
},
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
@@ -171,6 +186,13 @@ var encode = &cli.Command{
|
||||
|
||||
author := getPubKey(c, "author")
|
||||
relays := c.StringSlice("relay")
|
||||
|
||||
if getBoolInt(c, "outbox") > 0 && author != nostr.ZeroPK {
|
||||
for _, r := range sys.FetchOutboxRelays(ctx, author, int(getBoolInt(c, "outbox"))) {
|
||||
relays = appendUnique(relays, r)
|
||||
}
|
||||
}
|
||||
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -204,6 +226,16 @@ var encode = &cli.Command{
|
||||
Usage: "kind of referred replaceable event",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "attach relay hints to the code",
|
||||
},
|
||||
&BoolIntFlag{
|
||||
Name: "outbox",
|
||||
Usage: "automatically appends outbox relays to the code",
|
||||
Value: 3,
|
||||
},
|
||||
},
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
@@ -224,6 +256,13 @@ var encode = &cli.Command{
|
||||
}
|
||||
|
||||
relays := c.StringSlice("relay")
|
||||
|
||||
if getBoolInt(c, "outbox") > 0 {
|
||||
for _, r := range sys.FetchOutboxRelays(ctx, pubkey, int(getBoolInt(c, "outbox"))) {
|
||||
relays = appendUnique(relays, r)
|
||||
}
|
||||
}
|
||||
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ var encrypt = &cli.Command{
|
||||
defaultKeyFlags,
|
||||
&PubKeyFlag{
|
||||
Name: "recipient-pubkey",
|
||||
Aliases: []string{"p", "tgt", "target", "pubkey"},
|
||||
Aliases: []string{"p", "tgt", "target", "pubkey", "to"},
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
@@ -79,7 +79,7 @@ var decrypt = &cli.Command{
|
||||
defaultKeyFlags,
|
||||
&PubKeyFlag{
|
||||
Name: "sender-pubkey",
|
||||
Aliases: []string{"p", "src", "source", "pubkey"},
|
||||
Aliases: []string{"p", "src", "source", "pubkey", "from"},
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
|
||||
3
event.go
3
event.go
@@ -24,7 +24,7 @@ const (
|
||||
CATEGORY_EXTRAS = "EXTRAS"
|
||||
)
|
||||
|
||||
var event = &cli.Command{
|
||||
var eventCmd = &cli.Command{
|
||||
Name: "event",
|
||||
Usage: "generates an encoded event and either prints it or sends it to a set of relays",
|
||||
Description: `outputs an event built with the flags. if one or more relays are given as arguments, an attempt is also made to publish the event to these relays.
|
||||
@@ -155,6 +155,7 @@ example:
|
||||
os.Exit(3)
|
||||
}
|
||||
}
|
||||
|
||||
kr, sec, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var filter = &cli.Command{
|
||||
var filterCmd = &cli.Command{
|
||||
Name: "filter",
|
||||
Usage: "applies an event filter to an event to see if it matches.",
|
||||
Description: `
|
||||
|
||||
56
flags.go
56
flags.go
@@ -11,6 +11,62 @@ import (
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
type (
|
||||
BoolIntFlag = cli.FlagBase[int, struct{}, boolIntValue]
|
||||
)
|
||||
|
||||
type boolIntValue struct {
|
||||
int int
|
||||
defaultWhenSet int
|
||||
hasDefault bool
|
||||
hasBeenSet bool
|
||||
}
|
||||
|
||||
var _ cli.ValueCreator[int, struct{}] = boolIntValue{}
|
||||
|
||||
func (t boolIntValue) Create(val int, p *int, c struct{}) cli.Value {
|
||||
*p = val
|
||||
|
||||
return &boolIntValue{
|
||||
defaultWhenSet: val,
|
||||
hasDefault: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (t boolIntValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t boolIntValue) ToString(b int) string { return "<<>>" }
|
||||
|
||||
func (t *boolIntValue) Set(value string) error {
|
||||
t.hasBeenSet = true
|
||||
if value == "true" {
|
||||
if t.hasDefault {
|
||||
t.int = t.defaultWhenSet
|
||||
} else {
|
||||
t.int = 1
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
var err error
|
||||
t.int, err = strconv.Atoi(value)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *boolIntValue) String() string { return fmt.Sprintf("%#v", t.int) }
|
||||
func (t *boolIntValue) Value() int { return t.int }
|
||||
func (t *boolIntValue) Get() any { return t.int }
|
||||
|
||||
func getBoolInt(cmd *cli.Command, name string) int {
|
||||
return cmd.Value(name).(int)
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
type NaturalTimeFlag = cli.FlagBase[nostr.Timestamp, struct{}, naturalTimeValue]
|
||||
|
||||
type naturalTimeValue struct {
|
||||
|
||||
50
fs.go
50
fs.go
@@ -13,10 +13,8 @@ import (
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"github.com/fatih/color"
|
||||
"github.com/fiatjaf/nak/nostrfs"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/urfave/cli/v3"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
)
|
||||
|
||||
var fsCmd = &cli.Command{
|
||||
@@ -64,7 +62,7 @@ var fsCmd = &cli.Command{
|
||||
apat = time.Hour * 24 * 365 * 3
|
||||
}
|
||||
|
||||
root := nostrfs.NewNostrRoot(
|
||||
root := NewFSRoot(
|
||||
context.WithValue(
|
||||
context.WithValue(
|
||||
ctx,
|
||||
@@ -75,7 +73,7 @@ var fsCmd = &cli.Command{
|
||||
sys,
|
||||
kr,
|
||||
mountpoint,
|
||||
nostrfs.Options{
|
||||
FSOptions{
|
||||
AutoPublishNotesTimeout: apnt,
|
||||
AutoPublishArticlesTimeout: apat,
|
||||
},
|
||||
@@ -83,21 +81,22 @@ var fsCmd = &cli.Command{
|
||||
|
||||
// create the server
|
||||
log("- mounting at %s... ", color.HiCyanString(mountpoint))
|
||||
timeout := time.Second * 120
|
||||
server, err := fs.Mount(mountpoint, root, &fs.Options{
|
||||
MountOptions: fuse.MountOptions{
|
||||
Debug: isVerbose,
|
||||
Name: "nak",
|
||||
FsName: "nak",
|
||||
RememberInodes: true,
|
||||
},
|
||||
AttrTimeout: &timeout,
|
||||
EntryTimeout: &timeout,
|
||||
Logger: nostr.DebugLogger,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("mount failed: %w", err)
|
||||
|
||||
// create cgofuse host
|
||||
host := fuse.NewFileSystemHost(root)
|
||||
host.SetCapReaddirPlus(true)
|
||||
host.SetUseIno(true)
|
||||
|
||||
// mount the filesystem
|
||||
mountArgs := []string{"-s", mountpoint}
|
||||
if isVerbose {
|
||||
mountArgs = append([]string{"-d"}, mountArgs...)
|
||||
}
|
||||
|
||||
go func() {
|
||||
host.Mount("", mountArgs)
|
||||
}()
|
||||
|
||||
log("ok.\n")
|
||||
|
||||
// setup signal handling for clean unmount
|
||||
@@ -107,17 +106,12 @@ var fsCmd = &cli.Command{
|
||||
go func() {
|
||||
<-ch
|
||||
log("- unmounting... ")
|
||||
err := server.Unmount()
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf("unmount failed: %w", err)
|
||||
} else {
|
||||
log("ok\n")
|
||||
chErr <- nil
|
||||
}
|
||||
// cgofuse doesn't have explicit unmount, it unmounts on process exit
|
||||
log("ok\n")
|
||||
chErr <- nil
|
||||
}()
|
||||
|
||||
// serve the filesystem until unmounted
|
||||
server.Wait()
|
||||
// wait for signals
|
||||
return <-chErr
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || openbsd
|
||||
//go:build openbsd
|
||||
|
||||
package main
|
||||
|
||||
@@ -15,6 +15,6 @@ var fsCmd = &cli.Command{
|
||||
Description: `doesn't work on Windows and OpenBSD.`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
return fmt.Errorf("this doesn't work on Windows and OpenBSD.")
|
||||
return fmt.Errorf("this doesn't work on OpenBSD.")
|
||||
},
|
||||
}
|
||||
|
||||
1179
fs_root.go
Normal file
1179
fs_root.go
Normal file
File diff suppressed because it is too large
Load Diff
139
fs_windows.go
Normal file
139
fs_windows.go
Normal file
@@ -0,0 +1,139 @@
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v3"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
)
|
||||
|
||||
var fsCmd = &cli.Command{
|
||||
Name: "fs",
|
||||
Usage: "mount a FUSE filesystem that exposes Nostr events as files.",
|
||||
Description: `(experimental)`,
|
||||
ArgsUsage: "<mountpoint>",
|
||||
Flags: append(defaultKeyFlags,
|
||||
&PubKeyFlag{
|
||||
Name: "pubkey",
|
||||
Usage: "public key from where to to prepopulate directories",
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "auto-publish-notes",
|
||||
Usage: "delay after which new notes will be auto-published, set to -1 to not publish.",
|
||||
Value: time.Second * 30,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "auto-publish-articles",
|
||||
Usage: "delay after which edited articles will be auto-published.",
|
||||
Value: time.Hour * 24 * 365 * 2,
|
||||
DefaultText: "basically infinite",
|
||||
},
|
||||
),
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
mountpoint := c.Args().First()
|
||||
if mountpoint == "" {
|
||||
return fmt.Errorf("must be called with a directory path to serve as the mountpoint as an argument")
|
||||
}
|
||||
|
||||
var kr nostr.User
|
||||
if signer, _, err := gatherKeyerFromArguments(ctx, c); err == nil {
|
||||
kr = signer
|
||||
} else {
|
||||
kr = keyer.NewReadOnlyUser(getPubKey(c, "pubkey"))
|
||||
}
|
||||
|
||||
apnt := c.Duration("auto-publish-notes")
|
||||
if apnt < 0 {
|
||||
apnt = time.Hour * 24 * 365 * 3
|
||||
}
|
||||
apat := c.Duration("auto-publish-articles")
|
||||
if apat < 0 {
|
||||
apat = time.Hour * 24 * 365 * 3
|
||||
}
|
||||
|
||||
root := NewFSRoot(
|
||||
context.WithValue(
|
||||
context.WithValue(
|
||||
ctx,
|
||||
"log", log,
|
||||
),
|
||||
"logverbose", logverbose,
|
||||
),
|
||||
sys,
|
||||
kr,
|
||||
mountpoint,
|
||||
FSOptions{
|
||||
AutoPublishNotesTimeout: apnt,
|
||||
AutoPublishArticlesTimeout: apat,
|
||||
},
|
||||
)
|
||||
|
||||
// create the server
|
||||
log("- mounting at %s... ", color.HiCyanString(mountpoint))
|
||||
|
||||
// create cgofuse host
|
||||
host := fuse.NewFileSystemHost(root)
|
||||
host.SetCapReaddirPlus(true)
|
||||
host.SetUseIno(true)
|
||||
|
||||
// mount the filesystem - Windows/WinFsp version
|
||||
// based on rclone cmount implementation
|
||||
mountArgs := []string{
|
||||
"-o", "uid=-1",
|
||||
"-o", "gid=-1",
|
||||
"--FileSystemName=nak",
|
||||
}
|
||||
|
||||
// check if mountpoint is a drive letter or directory
|
||||
isDriveLetter := len(mountpoint) == 2 && mountpoint[1] == ':'
|
||||
|
||||
if !isDriveLetter {
|
||||
// winFsp primarily supports drive letters on Windows
|
||||
// directory mounting may not work reliably
|
||||
log("WARNING: directory mounting may not work on Windows (WinFsp limitation)\n")
|
||||
log(" consider using a drive letter instead (e.g., 'nak fs Z:')\n")
|
||||
|
||||
// for directory mounts, follow rclone's approach:
|
||||
// 1. check that mountpoint doesn't already exist
|
||||
if _, err := os.Stat(mountpoint); err == nil {
|
||||
return fmt.Errorf("mountpoint path already exists: %s (must not exist before mounting)", mountpoint)
|
||||
} else if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to check mountpoint: %w", err)
|
||||
}
|
||||
|
||||
// 2. check that parent directory exists
|
||||
parent := filepath.Join(mountpoint, "..")
|
||||
if _, err := os.Stat(parent); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("parent of mountpoint directory does not exist: %s", parent)
|
||||
}
|
||||
return fmt.Errorf("failed to check parent directory: %w", err)
|
||||
}
|
||||
|
||||
// 3. use network mode for directory mounts
|
||||
mountArgs = append(mountArgs, "--VolumePrefix=\\nak\\"+filepath.Base(mountpoint))
|
||||
}
|
||||
|
||||
if isVerbose {
|
||||
mountArgs = append(mountArgs, "-o", "debug")
|
||||
}
|
||||
mountArgs = append(mountArgs, mountpoint)
|
||||
|
||||
log("ok.\n")
|
||||
|
||||
if !host.Mount("", mountArgs) {
|
||||
return fmt.Errorf("failed to mount filesystem")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
359
gift.go
Normal file
359
gift.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"fiatjaf.com/nostr/nip44"
|
||||
"github.com/fatih/color"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var gift = &cli.Command{
|
||||
Name: "gift",
|
||||
Usage: "gift-wraps (or unwraps) an event according to NIP-59",
|
||||
Description: `example:
|
||||
nak event | nak gift wrap --sec <sec-a> -p <sec-b> | nak gift unwrap --sec <sec-b> --from <pub-a>
|
||||
|
||||
a decoupled key (if it has been created or received with "nak dekey" previously) will be used by default.`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: defaultKeyFlags,
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "wrap",
|
||||
Flags: []cli.Flag{
|
||||
&PubKeyFlag{
|
||||
Name: "recipient-pubkey",
|
||||
Aliases: []string{"p", "tgt", "target", "pubkey", "to"},
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "use-our-identity-key",
|
||||
Usage: "Encrypt with the key given to --sec directly even when a decoupled key exists for the sender.",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "use-their-identity-key",
|
||||
Usage: "Encrypt to the public key given as --recipient-pubkey directly even when a decoupled key exists for the receiver.",
|
||||
},
|
||||
},
|
||||
Usage: "turns an event into a rumor (unsigned) then gift-wraps it to the recipient",
|
||||
Description: `example:
|
||||
nak event -c 'hello' | nak gift wrap --sec <my-secret-key> -p <target-public-key>`,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get sender pubkey (ourselves)
|
||||
sender, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get sender pubkey: %w", err)
|
||||
}
|
||||
|
||||
var using bool
|
||||
|
||||
var cipher nostr.Cipher = kr
|
||||
// use decoupled key if it exists
|
||||
using = false
|
||||
if !c.Bool("use-our-identity-key") {
|
||||
configPath := c.String("config-path")
|
||||
eSec, has, err := getDecoupledEncryptionSecretKey(ctx, configPath, sender)
|
||||
if has {
|
||||
if err != nil {
|
||||
return fmt.Errorf("our decoupled encryption key exists, but we failed to get it: %w; call `nak dekey` to attempt a fix or call this again with --encrypt-with-our-identity-key to bypass", err)
|
||||
}
|
||||
cipher = keyer.NewPlainKeySigner(eSec)
|
||||
log("- using our decoupled encryption key %s\n", color.CyanString(eSec.Public().Hex()))
|
||||
using = true
|
||||
}
|
||||
}
|
||||
if !using {
|
||||
log("- using our identity key %s\n", color.CyanString(sender.Hex()))
|
||||
}
|
||||
|
||||
recipient := getPubKey(c, "recipient-pubkey")
|
||||
using = false
|
||||
if !c.Bool("use-their-identity-key") {
|
||||
if theirEPub, exists := getDecoupledEncryptionPublicKey(ctx, recipient); exists {
|
||||
recipient = theirEPub
|
||||
using = true
|
||||
log("- using their decoupled encryption public key %s\n", color.CyanString(theirEPub.Hex()))
|
||||
}
|
||||
}
|
||||
if !using {
|
||||
log("- using their identity public key %s\n", color.CyanString(recipient.Hex()))
|
||||
}
|
||||
|
||||
// read event from stdin
|
||||
for eventJSON := range getJsonsOrBlank() {
|
||||
if eventJSON == "{}" {
|
||||
continue
|
||||
}
|
||||
|
||||
var originalEvent nostr.Event
|
||||
if err := easyjson.Unmarshal([]byte(eventJSON), &originalEvent); err != nil {
|
||||
return fmt.Errorf("invalid event JSON: %w", err)
|
||||
}
|
||||
|
||||
// turn into rumor (unsigned event)
|
||||
rumor := originalEvent
|
||||
rumor.Sig = [64]byte{} // remove signature
|
||||
rumor.PubKey = sender
|
||||
rumor.ID = rumor.GetID() // compute ID
|
||||
|
||||
// create seal
|
||||
rumorJSON, _ := easyjson.Marshal(rumor)
|
||||
encryptedRumor, err := cipher.Encrypt(ctx, string(rumorJSON), recipient)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt rumor: %w", err)
|
||||
}
|
||||
seal := &nostr.Event{
|
||||
Kind: 13,
|
||||
Content: encryptedRumor,
|
||||
PubKey: sender,
|
||||
CreatedAt: randomNow(),
|
||||
Tags: nostr.Tags{},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, seal); err != nil {
|
||||
return fmt.Errorf("failed to sign seal: %w", err)
|
||||
}
|
||||
|
||||
// create gift wrap
|
||||
ephemeral := nostr.Generate()
|
||||
sealJSON, _ := easyjson.Marshal(seal)
|
||||
convkey, err := nip44.GenerateConversationKey(recipient, ephemeral)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate conversation key: %w", err)
|
||||
}
|
||||
encryptedSeal, err := nip44.Encrypt(string(sealJSON), convkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt seal: %w", err)
|
||||
}
|
||||
wrap := &nostr.Event{
|
||||
Kind: 1059,
|
||||
Content: encryptedSeal,
|
||||
CreatedAt: randomNow(),
|
||||
Tags: nostr.Tags{{"p", recipient.Hex()}},
|
||||
}
|
||||
wrap.Sign(ephemeral)
|
||||
|
||||
// print the gift-wrap
|
||||
wrapJSON, err := easyjson.Marshal(wrap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal gift wrap: %w", err)
|
||||
}
|
||||
stdout(string(wrapJSON))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "unwrap",
|
||||
Usage: "decrypts a gift-wrap event sent by the sender to us and exposes its internal rumor (unsigned event).",
|
||||
Description: `example:
|
||||
nak req -p <my-public-key> -k 1059 dmrelay.com | nak gift unwrap --sec <my-secret-key>`,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get receiver public key (ourselves)
|
||||
receiver, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ciphers := []nostr.Cipher{kr}
|
||||
// use decoupled key if it exists
|
||||
configPath := c.String("config-path")
|
||||
eSec, has, err := getDecoupledEncryptionSecretKey(ctx, configPath, receiver)
|
||||
if has {
|
||||
if err != nil {
|
||||
return fmt.Errorf("our decoupled encryption key exists, but we failed to get it: %w; call `nak dekey` to attempt a fix or call this again with --use-direct to bypass", err)
|
||||
}
|
||||
ciphers = append(ciphers, kr)
|
||||
ciphers[0] = keyer.NewPlainKeySigner(eSec) // pub decoupled key first
|
||||
}
|
||||
|
||||
// read gift-wrapped event from stdin
|
||||
for wrapJSON := range getJsonsOrBlank() {
|
||||
if wrapJSON == "{}" {
|
||||
continue
|
||||
}
|
||||
|
||||
var wrap nostr.Event
|
||||
if err := easyjson.Unmarshal([]byte(wrapJSON), &wrap); err != nil {
|
||||
return fmt.Errorf("invalid gift wrap JSON: %w", err)
|
||||
}
|
||||
|
||||
if wrap.Kind != 1059 {
|
||||
return fmt.Errorf("not a gift wrap event (kind %d)", wrap.Kind)
|
||||
}
|
||||
|
||||
// decrypt seal (in the process also find out if they encrypted it to our identity key or to our decoupled key)
|
||||
var cipher nostr.Cipher
|
||||
var seal nostr.Event
|
||||
|
||||
// try both the receiver identity key and decoupled key
|
||||
err = nil
|
||||
for c, potentialCipher := range ciphers {
|
||||
switch c {
|
||||
case 0:
|
||||
log("- trying the receiver's decoupled encryption key %s\n", color.CyanString(eSec.Public().Hex()))
|
||||
case 1:
|
||||
log("- trying the receiver's identity key %s\n", color.CyanString(receiver.Hex()))
|
||||
}
|
||||
|
||||
sealj, thisErr := potentialCipher.Decrypt(ctx, wrap.Content, wrap.PubKey)
|
||||
if thisErr != nil {
|
||||
err = thisErr
|
||||
continue
|
||||
}
|
||||
if thisErr := easyjson.Unmarshal([]byte(sealj), &seal); thisErr != nil {
|
||||
err = fmt.Errorf("invalid seal JSON: %w", thisErr)
|
||||
continue
|
||||
}
|
||||
|
||||
cipher = potentialCipher
|
||||
break
|
||||
}
|
||||
if seal.ID == nostr.ZeroID {
|
||||
// if both ciphers failed above we'll reach here
|
||||
return fmt.Errorf("failed to decrypt seal: %w", err)
|
||||
}
|
||||
|
||||
if seal.Kind != 13 {
|
||||
return fmt.Errorf("not a seal event (kind %d)", seal.Kind)
|
||||
}
|
||||
|
||||
senderEncryptionPublicKeys := []nostr.PubKey{seal.PubKey}
|
||||
if theirEPub, exists := getDecoupledEncryptionPublicKey(ctx, seal.PubKey); exists {
|
||||
senderEncryptionPublicKeys = append(senderEncryptionPublicKeys, seal.PubKey)
|
||||
senderEncryptionPublicKeys[0] = theirEPub // put decoupled key first
|
||||
}
|
||||
|
||||
// decrypt rumor (at this point we know what cipher is the one they encrypted to)
|
||||
// (but we don't know if they have encrypted with their identity key or their decoupled key, so try both)
|
||||
var rumor nostr.Event
|
||||
err = nil
|
||||
for s, senderEncryptionPublicKey := range senderEncryptionPublicKeys {
|
||||
switch s {
|
||||
case 0:
|
||||
log("- trying the sender's decoupled encryption public key %s\n", color.CyanString(senderEncryptionPublicKey.Hex()))
|
||||
case 1:
|
||||
log("- trying the sender's identity public key %s\n", color.CyanString(senderEncryptionPublicKey.Hex()))
|
||||
}
|
||||
|
||||
rumorj, thisErr := cipher.Decrypt(ctx, seal.Content, senderEncryptionPublicKey)
|
||||
if thisErr != nil {
|
||||
err = fmt.Errorf("failed to decrypt rumor: %w", thisErr)
|
||||
continue
|
||||
}
|
||||
if thisErr := easyjson.Unmarshal([]byte(rumorj), &rumor); thisErr != nil {
|
||||
err = fmt.Errorf("invalid rumor JSON: %w", thisErr)
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if rumor.ID == nostr.ZeroID {
|
||||
return fmt.Errorf("failed to decrypt rumor: %w", err)
|
||||
}
|
||||
|
||||
// output the unwrapped event (rumor)
|
||||
stdout(rumor.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func randomNow() nostr.Timestamp {
|
||||
const twoDays = 2 * 24 * 60 * 60
|
||||
now := time.Now().Unix()
|
||||
randomOffset := rand.Int63n(twoDays)
|
||||
return nostr.Timestamp(now - randomOffset)
|
||||
}
|
||||
|
||||
func getDecoupledEncryptionSecretKey(ctx context.Context, configPath string, pubkey nostr.PubKey) (nostr.SecretKey, bool, error) {
|
||||
relays := sys.FetchWriteRelays(ctx, pubkey)
|
||||
|
||||
keyAnnouncementResult := sys.Pool.FetchManyReplaceable(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{10044},
|
||||
Authors: []nostr.PubKey{pubkey},
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e-gift"})
|
||||
|
||||
keyAnnouncementEvent, ok := keyAnnouncementResult.Load(nostr.ReplaceableKey{PubKey: pubkey, D: ""})
|
||||
if ok {
|
||||
var ePub nostr.PubKey
|
||||
|
||||
// get the pub from the tag
|
||||
for _, tag := range keyAnnouncementEvent.Tags {
|
||||
if len(tag) >= 2 && tag[0] == "n" {
|
||||
ePub, _ = nostr.PubKeyFromHex(tag[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
if ePub == nostr.ZeroPK {
|
||||
return [32]byte{}, true, fmt.Errorf("got invalid kind:10044 event, no 'n' tag")
|
||||
}
|
||||
|
||||
// check if we have the key
|
||||
eKeyPath := filepath.Join(configPath, "dekey", "p", pubkey.Hex(), "e", ePub.Hex())
|
||||
if data, err := os.ReadFile(eKeyPath); err == nil {
|
||||
eSec, err := nostr.SecretKeyFromHex(string(data))
|
||||
if err != nil {
|
||||
return [32]byte{}, true, fmt.Errorf("invalid main key: %w", err)
|
||||
}
|
||||
if eSec.Public() != ePub {
|
||||
return [32]byte{}, true, fmt.Errorf("stored decoupled encryption key is corrupted: %w", err)
|
||||
}
|
||||
|
||||
return eSec, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return [32]byte{}, false, nil
|
||||
}
|
||||
|
||||
func getDecoupledEncryptionPublicKey(ctx context.Context, pubkey nostr.PubKey) (nostr.PubKey, bool) {
|
||||
relays := sys.FetchWriteRelays(ctx, pubkey)
|
||||
|
||||
keyAnnouncementResult := sys.Pool.FetchManyReplaceable(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{10044},
|
||||
Authors: []nostr.PubKey{pubkey},
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e-gift"})
|
||||
|
||||
keyAnnouncementEvent, ok := keyAnnouncementResult.Load(nostr.ReplaceableKey{PubKey: pubkey, D: ""})
|
||||
if ok {
|
||||
var ePub nostr.PubKey
|
||||
|
||||
// get the pub from the tag
|
||||
for _, tag := range keyAnnouncementEvent.Tags {
|
||||
if len(tag) >= 2 && tag[0] == "n" {
|
||||
ePub, _ = nostr.PubKeyFromHex(tag[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
if ePub == nostr.ZeroPK {
|
||||
return nostr.ZeroPK, false
|
||||
}
|
||||
|
||||
return ePub, true
|
||||
}
|
||||
|
||||
return nostr.ZeroPK, false
|
||||
}
|
||||
594
git.go
594
git.go
@@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
@@ -92,6 +93,9 @@ aside from those, there is also:
|
||||
}
|
||||
}
|
||||
|
||||
var defaultOwner string
|
||||
var defaultIdentifier string
|
||||
|
||||
// check if nip34.json already exists
|
||||
existingConfig, err := readNip34ConfigFile("")
|
||||
if err == nil {
|
||||
@@ -99,47 +103,118 @@ aside from those, there is also:
|
||||
if !c.Bool("force") && !c.Bool("interactive") {
|
||||
return fmt.Errorf("nip34.json already exists, use --force to overwrite or --interactive to update")
|
||||
}
|
||||
}
|
||||
|
||||
// get repository base directory name for defaults
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current directory: %w", err)
|
||||
}
|
||||
baseName := filepath.Base(cwd)
|
||||
|
||||
// get earliest unique commit
|
||||
var earliestCommit string
|
||||
if output, err := exec.Command("git", "rev-list", "--max-parents=0", "HEAD").Output(); err == nil {
|
||||
earliest := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
if len(earliest) > 0 {
|
||||
earliestCommit = earliest[0]
|
||||
}
|
||||
}
|
||||
|
||||
// extract clone URLs from nostr:// git remotes
|
||||
// (this is just for migrating from ngit)
|
||||
var defaultCloneURLs []string
|
||||
if output, err := exec.Command("git", "remote", "-v").Output(); err == nil {
|
||||
remotes := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, remote := range remotes {
|
||||
if strings.Contains(remote, "nostr://") {
|
||||
parts := strings.Fields(remote)
|
||||
if len(parts) >= 2 {
|
||||
nostrURL := parts[1]
|
||||
// parse nostr://npub.../relay_hostname/identifier
|
||||
if owner, identifier, relays, err := parseRepositoryAddress(ctx, nostrURL); err == nil && len(relays) > 0 {
|
||||
relayURL := relays[0]
|
||||
// convert to https://relay_hostname/npub.../identifier.git
|
||||
cloneURL := fmt.Sprintf("http%s/%s/%s.git",
|
||||
relayURL[2:], nip19.EncodeNpub(owner), identifier)
|
||||
defaultCloneURLs = appendUnique(defaultCloneURLs, cloneURL)
|
||||
defaultIdentifier = existingConfig.Identifier
|
||||
defaultOwner = existingConfig.Owner
|
||||
} else {
|
||||
// extract info from nostr:// git remotes (this is just for migrating from ngit)
|
||||
if output, err := exec.Command("git", "remote", "-v").Output(); err == nil {
|
||||
remotes := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, remote := range remotes {
|
||||
if strings.Contains(remote, "nostr://") {
|
||||
parts := strings.Fields(remote)
|
||||
if len(parts) >= 2 {
|
||||
nostrURL := parts[1]
|
||||
// parse nostr://npub.../relay_hostname/identifier
|
||||
if remoteOwner, remoteIdentifier, relays, err := parseRepositoryAddress(ctx, nostrURL); err == nil && len(relays) > 0 {
|
||||
defaultIdentifier = remoteIdentifier
|
||||
defaultOwner = nip19.EncodeNpub(remoteOwner)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get repository base directory name for defaults
|
||||
if defaultIdentifier == "" {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current directory: %w", err)
|
||||
}
|
||||
defaultIdentifier = filepath.Base(cwd)
|
||||
}
|
||||
|
||||
// prompt for identifier first
|
||||
var identifier string
|
||||
if c.String("identifier") != "" {
|
||||
identifier = c.String("identifier")
|
||||
} else if c.Bool("interactive") {
|
||||
if err := survey.AskOne(&survey.Input{
|
||||
Message: "identifier",
|
||||
Default: defaultIdentifier,
|
||||
}, &identifier); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
identifier = defaultIdentifier
|
||||
}
|
||||
|
||||
// prompt for owner pubkey
|
||||
var owner nostr.PubKey
|
||||
var ownerStr string
|
||||
if c.String("owner") != "" {
|
||||
owner, err = parsePubKey(ownerStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid owner pubkey: %w", err)
|
||||
}
|
||||
ownerStr = nip19.EncodeNpub(owner)
|
||||
} else if c.Bool("interactive") {
|
||||
for {
|
||||
if err := survey.AskOne(&survey.Input{
|
||||
Message: "owner (npub or hex)",
|
||||
Default: defaultOwner,
|
||||
}, &ownerStr); err != nil {
|
||||
return err
|
||||
}
|
||||
owner, err = parsePubKey(ownerStr)
|
||||
if err == nil {
|
||||
ownerStr = nip19.EncodeNpub(owner)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("owner pubkey is required (use --owner or --interactive)")
|
||||
}
|
||||
|
||||
// try to fetch existing repository announcement (kind 30617)
|
||||
var fetchedRepo *nip34.Repository
|
||||
if existingConfig.Identifier == "" {
|
||||
log(" searching for existing events... ")
|
||||
repo, _, _, err := fetchRepositoryAndState(ctx, owner, identifier, nil)
|
||||
if err == nil && repo.Event.ID != nostr.ZeroID {
|
||||
fetchedRepo = &repo
|
||||
log("found one from %s.\n", repo.Event.CreatedAt.Time().Format(time.DateOnly))
|
||||
} else {
|
||||
log("none found.\n")
|
||||
}
|
||||
}
|
||||
|
||||
// set config with fetched values or defaults
|
||||
var config Nip34Config
|
||||
if fetchedRepo != nil {
|
||||
config = RepositoryToConfig(*fetchedRepo)
|
||||
} else if existingConfig.Identifier != "" {
|
||||
config = existingConfig
|
||||
} else {
|
||||
// get earliest unique commit
|
||||
var earliestCommit string
|
||||
if output, err := exec.Command("git", "rev-list", "--max-parents=0", "HEAD").Output(); err == nil {
|
||||
earliestCommit = strings.TrimSpace(string(output))
|
||||
}
|
||||
|
||||
config = Nip34Config{
|
||||
Identifier: identifier,
|
||||
Owner: ownerStr,
|
||||
Name: identifier,
|
||||
Description: "",
|
||||
Web: []string{},
|
||||
GraspServers: []string{"gitnostr.com", "relay.ngit.dev"},
|
||||
EarliestUniqueCommit: earliestCommit,
|
||||
Maintainers: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// helper to get value from flags, existing config, or default
|
||||
getValue := func(existingVal, flagVal, defaultVal string) string {
|
||||
if flagVal != "" {
|
||||
@@ -161,21 +236,88 @@ aside from those, there is also:
|
||||
return defaultVals
|
||||
}
|
||||
|
||||
config := Nip34Config{
|
||||
Identifier: getValue(existingConfig.Identifier, c.String("identifier"), baseName),
|
||||
Name: getValue(existingConfig.Name, c.String("name"), baseName),
|
||||
Description: getValue(existingConfig.Description, c.String("description"), ""),
|
||||
Web: getSliceValue(existingConfig.Web, c.StringSlice("web"), []string{}),
|
||||
Owner: getValue(existingConfig.Owner, c.String("owner"), ""),
|
||||
GraspServers: getSliceValue(existingConfig.GraspServers, c.StringSlice("grasp-servers"), []string{"gitnostr.com", "relay.ngit.dev"}),
|
||||
EarliestUniqueCommit: getValue(existingConfig.EarliestUniqueCommit, c.String("earliest-unique-commit"), earliestCommit),
|
||||
Maintainers: getSliceValue(existingConfig.Maintainers, c.StringSlice("maintainers"), []string{}),
|
||||
}
|
||||
// override with flags and existing config
|
||||
config.Identifier = getValue(existingConfig.Identifier, c.String("identifier"), config.Identifier)
|
||||
config.Name = getValue(existingConfig.Name, c.String("name"), config.Name)
|
||||
config.Description = getValue(existingConfig.Description, c.String("description"), config.Description)
|
||||
config.Web = getSliceValue(existingConfig.Web, c.StringSlice("web"), config.Web)
|
||||
config.Owner = getValue(existingConfig.Owner, c.String("owner"), config.Owner)
|
||||
config.GraspServers = getSliceValue(existingConfig.GraspServers, c.StringSlice("grasp-servers"), config.GraspServers)
|
||||
config.EarliestUniqueCommit = getValue(existingConfig.EarliestUniqueCommit, c.String("earliest-unique-commit"), config.EarliestUniqueCommit)
|
||||
config.Maintainers = getSliceValue(existingConfig.Maintainers, c.StringSlice("maintainers"), config.Maintainers)
|
||||
|
||||
if c.Bool("interactive") {
|
||||
if err := promptForConfig(&config); err != nil {
|
||||
// prompt for name
|
||||
if err := survey.AskOne(&survey.Input{
|
||||
Message: "name",
|
||||
Default: config.Name,
|
||||
}, &config.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prompt for description
|
||||
if err := survey.AskOne(&survey.Input{
|
||||
Message: "description",
|
||||
Default: config.Description,
|
||||
}, &config.Description); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prompt for grasp servers
|
||||
graspServers, err := promptForStringList("grasp servers", config.GraspServers, []string{
|
||||
"gitnostr.com",
|
||||
"relay.ngit.dev",
|
||||
"pyramid.fiatjaf.com",
|
||||
"git.shakespeare.dyi",
|
||||
}, graspServerHost, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.GraspServers = graspServers
|
||||
|
||||
// prompt for web URLs
|
||||
webURLs, err := promptForStringList("web URLs", config.Web, []string{
|
||||
fmt.Sprintf("https://viewsource.win/%s/%s",
|
||||
nip19.EncodeNpub(nostr.MustPubKeyFromHex(config.Owner)),
|
||||
config.Identifier,
|
||||
),
|
||||
fmt.Sprintf("https://gitworkshop.dev/%s/%s",
|
||||
nip19.EncodeNpub(nostr.MustPubKeyFromHex(config.Owner)),
|
||||
config.Identifier,
|
||||
),
|
||||
}, func(s string) string {
|
||||
return "http" + nostr.NormalizeURL(s)[2:]
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Web = webURLs
|
||||
|
||||
// prompt for earliest unique commit
|
||||
if err := survey.AskOne(&survey.Input{
|
||||
Message: "earliest unique commit",
|
||||
Default: config.EarliestUniqueCommit,
|
||||
}, &config.EarliestUniqueCommit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Prompt for maintainers
|
||||
maintainers, err := promptForStringList("maintainers", config.Maintainers, []string{}, nil, func(s string) bool {
|
||||
pk, err := parsePubKey(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if pk.Hex() == config.Owner {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Maintainers = maintainers
|
||||
|
||||
log("\n")
|
||||
}
|
||||
|
||||
if err := config.Validate(); err != nil {
|
||||
@@ -197,7 +339,7 @@ aside from those, there is also:
|
||||
|
||||
log("edit %s if needed, then run %s to publish.\n",
|
||||
color.CyanString("nip34.json"),
|
||||
color.CyanString("nak git announce"))
|
||||
color.CyanString("nak git sync"))
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -229,7 +371,7 @@ aside from those, there is also:
|
||||
}
|
||||
|
||||
// fetch repository metadata and state
|
||||
repo, state, err := fetchRepositoryAndState(ctx, owner, identifier, relayHints)
|
||||
repo, _, state, err := fetchRepositoryAndState(ctx, owner, identifier, relayHints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -266,22 +408,7 @@ aside from those, there is also:
|
||||
}
|
||||
|
||||
// write nip34.json inside cloned directory
|
||||
localConfig := Nip34Config{
|
||||
Identifier: repo.ID,
|
||||
Name: repo.Name,
|
||||
Description: repo.Description,
|
||||
Web: repo.Web,
|
||||
Owner: nip19.EncodeNpub(repo.Event.PubKey),
|
||||
GraspServers: make([]string, 0, len(repo.Relays)),
|
||||
EarliestUniqueCommit: repo.EarliestUniqueCommitID,
|
||||
Maintainers: make([]string, 0, len(repo.Maintainers)),
|
||||
}
|
||||
for _, r := range repo.Relays {
|
||||
localConfig.GraspServers = append(localConfig.GraspServers, nostr.NormalizeURL(r))
|
||||
}
|
||||
for _, m := range repo.Maintainers {
|
||||
localConfig.Maintainers = append(localConfig.Maintainers, nip19.EncodeNpub(m))
|
||||
}
|
||||
localConfig := RepositoryToConfig(repo)
|
||||
|
||||
if err := localConfig.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid config: %w", err)
|
||||
@@ -332,11 +459,17 @@ aside from those, there is also:
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "push git changes",
|
||||
Flags: append(defaultKeyFlags, &cli.BoolFlag{
|
||||
Name: "force",
|
||||
Aliases: []string{"f"},
|
||||
Usage: "force push to git remotes",
|
||||
}),
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.BoolFlag{
|
||||
Name: "force",
|
||||
Aliases: []string{"f"},
|
||||
Usage: "force push to git remotes",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "tags",
|
||||
Usage: "push all refs under refs/tags",
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
// setup signer
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
@@ -403,6 +536,40 @@ aside from those, there is also:
|
||||
log("- setting HEAD to branch %s\n", color.CyanString(remoteBranch))
|
||||
}
|
||||
|
||||
if c.Bool("tags") {
|
||||
// add all refs/tags
|
||||
output, err := exec.Command("git", "show-ref", "--tags").Output()
|
||||
if err != nil && err.Error() != "exit status 1" {
|
||||
// exit status 1 is returned when there are no tags, which should be ok for us
|
||||
return fmt.Errorf("failed to get local tags: %s", err)
|
||||
} else {
|
||||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
commitHash := parts[0]
|
||||
ref := parts[1]
|
||||
|
||||
tagName := strings.TrimPrefix(ref, "refs/tags/")
|
||||
|
||||
if !c.Bool("force") {
|
||||
// if --force is not passed then we can't overwrite tags
|
||||
if existingHash, exists := state.Tags[tagName]; exists && existingHash != commitHash {
|
||||
return fmt.Errorf("tag %s that is already published pointing to %s, call with --force to overwrite", tagName, existingHash)
|
||||
}
|
||||
}
|
||||
state.Tags[tagName] = commitHash
|
||||
log("- setting tag %s to commit %s\n", color.CyanString(tagName), color.CyanString(commitHash))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create and sign the new state event
|
||||
newStateEvent := state.ToEvent()
|
||||
err = kr.SignEvent(ctx, &newStateEvent)
|
||||
@@ -423,16 +590,19 @@ aside from those, there is also:
|
||||
pushSuccesses := 0
|
||||
for _, relay := range repo.Relays {
|
||||
relayURL := nostr.NormalizeURL(relay)
|
||||
remoteName := "nip34/grasp/" + strings.TrimPrefix(relayURL, "wss://")
|
||||
remoteName = strings.TrimPrefix(remoteName, "ws://")
|
||||
remoteName := gitRemoteName(relayURL)
|
||||
|
||||
log("pushing to %s...\n", color.CyanString(remoteName))
|
||||
pushArgs := []string{"push", remoteName, fmt.Sprintf("%s:refs/heads/%s", localBranch, remoteBranch)}
|
||||
if c.Bool("force") {
|
||||
pushArgs = append(pushArgs, "--force")
|
||||
}
|
||||
if c.Bool("tags") {
|
||||
pushArgs = append(pushArgs, "--tags")
|
||||
}
|
||||
pushCmd := exec.Command("git", pushArgs...)
|
||||
pushCmd.Stderr = os.Stderr
|
||||
pushCmd.Stdout = os.Stdout
|
||||
if err := pushCmd.Run(); err != nil {
|
||||
log("! failed to push to %s: %v\n", color.YellowString(remoteName), err)
|
||||
} else {
|
||||
@@ -617,50 +787,45 @@ aside from those, there is also:
|
||||
|
||||
func promptForStringList(
|
||||
name string,
|
||||
existing []string,
|
||||
defaults []string,
|
||||
alternatives []string,
|
||||
normalize func(string) string,
|
||||
validate func(string) bool,
|
||||
) ([]string, error) {
|
||||
options := make([]string, 0, len(defaults)+len(existing)+1)
|
||||
options := make([]string, 0, len(defaults)+len(alternatives)+1)
|
||||
options = append(options, defaults...)
|
||||
options = append(options, "add another")
|
||||
|
||||
// add existing not in options
|
||||
for _, item := range existing {
|
||||
for _, item := range alternatives {
|
||||
if !slices.Contains(options, item) {
|
||||
options = append(options, item)
|
||||
}
|
||||
}
|
||||
|
||||
selected := make([]string, len(existing))
|
||||
copy(selected, existing)
|
||||
options = append(options, "add another")
|
||||
|
||||
selected := make([]string, len(defaults))
|
||||
copy(selected, defaults)
|
||||
|
||||
for {
|
||||
prompt := &survey.MultiSelect{
|
||||
newSelected := []string{}
|
||||
if err := survey.AskOne(&survey.MultiSelect{
|
||||
Message: name,
|
||||
Options: options,
|
||||
Default: selected,
|
||||
PageSize: 20,
|
||||
}
|
||||
|
||||
if err := survey.AskOne(prompt, &selected); err != nil {
|
||||
}, &newSelected); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selected = newSelected
|
||||
|
||||
if slices.Contains(selected, "add another") {
|
||||
selected = slices.DeleteFunc(selected, func(s string) bool { return s == "add another" })
|
||||
|
||||
singular := name
|
||||
if strings.HasSuffix(singular, "s") {
|
||||
singular = singular[:len(singular)-1]
|
||||
}
|
||||
|
||||
newPrompt := &survey.Input{
|
||||
Message: fmt.Sprintf("enter new %s", singular),
|
||||
}
|
||||
var newItem string
|
||||
if err := survey.AskOne(newPrompt, &newItem); err != nil {
|
||||
if err := survey.AskOne(&survey.Input{
|
||||
Message: fmt.Sprintf("enter new %s", strings.TrimSuffix(name, "s")),
|
||||
}, &newItem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -690,97 +855,6 @@ func promptForStringList(
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
func promptForConfig(config *Nip34Config) error {
|
||||
log("\nenter repository details (use arrow keys to navigate, space to select/deselect, enter to confirm):\n\n")
|
||||
|
||||
// prompt for identifier
|
||||
identifierPrompt := &survey.Input{
|
||||
Message: "identifier",
|
||||
Default: config.Identifier,
|
||||
}
|
||||
if err := survey.AskOne(identifierPrompt, &config.Identifier); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prompt for name
|
||||
namePrompt := &survey.Input{
|
||||
Message: "name",
|
||||
Default: config.Name,
|
||||
}
|
||||
if err := survey.AskOne(namePrompt, &config.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prompt for description
|
||||
descPrompt := &survey.Input{
|
||||
Message: "description",
|
||||
Default: config.Description,
|
||||
}
|
||||
if err := survey.AskOne(descPrompt, &config.Description); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// prompt for owner
|
||||
for {
|
||||
ownerPrompt := &survey.Input{
|
||||
Message: "owner (npub or hex)",
|
||||
Default: config.Owner,
|
||||
}
|
||||
if err := survey.AskOne(ownerPrompt, &config.Owner); err != nil {
|
||||
return err
|
||||
}
|
||||
if pubkey, err := parsePubKey(config.Owner); err == nil {
|
||||
config.Owner = pubkey.Hex()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// prompt for grasp servers
|
||||
graspServers, err := promptForStringList("grasp servers", config.GraspServers, []string{
|
||||
"gitnostr.com",
|
||||
"relay.ngit.dev",
|
||||
"pyramid.fiatjaf.com",
|
||||
"git.shakespeare.dyi",
|
||||
}, graspServerHost, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.GraspServers = graspServers
|
||||
|
||||
// prompt for web URLs
|
||||
webURLs, err := promptForStringList("web URLs", config.Web, []string{
|
||||
fmt.Sprintf("https://gitworkshop.dev/%s/%s",
|
||||
nip19.EncodeNpub(nostr.MustPubKeyFromHex(config.Owner)),
|
||||
config.Identifier,
|
||||
),
|
||||
}, func(s string) string {
|
||||
return "http" + nostr.NormalizeURL(s)[2:]
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Web = webURLs
|
||||
|
||||
// Prompt for maintainers
|
||||
maintainers, err := promptForStringList("maintainers", config.Maintainers, []string{}, nil, func(s string) bool {
|
||||
pk, err := parsePubKey(s)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if pk.Hex() == config.Owner {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.Maintainers = maintainers
|
||||
|
||||
log("\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
func gitSync(ctx context.Context, signer nostr.Keyer) (nip34.Repository, *nip34.RepositoryState, error) {
|
||||
// read current nip34.json
|
||||
localConfig, err := readNip34ConfigFile("")
|
||||
@@ -795,9 +869,26 @@ func gitSync(ctx context.Context, signer nostr.Keyer) (nip34.Repository, *nip34.
|
||||
}
|
||||
|
||||
// fetch repository announcement and state from relays
|
||||
repo, state, err := fetchRepositoryAndState(ctx, owner, localConfig.Identifier, localConfig.GraspServers)
|
||||
if err != nil && repo.Event.ID == nostr.ZeroID {
|
||||
log("couldn't fetch repository metadata (%s), will publish now\n", err)
|
||||
repo, upToDateRelays, state, err := fetchRepositoryAndState(ctx, owner, localConfig.Identifier, localConfig.GraspServers)
|
||||
notUpToDate := func(graspServer string) bool {
|
||||
return !slices.Contains(upToDateRelays, nostr.NormalizeURL(graspServer))
|
||||
}
|
||||
if upToDateRelays == nil || slices.ContainsFunc(localConfig.GraspServers, notUpToDate) {
|
||||
var relays []string
|
||||
if upToDateRelays == nil {
|
||||
// condition 1
|
||||
relays = append(sys.FetchOutboxRelays(ctx, owner, 3), localConfig.GraspServers...)
|
||||
log("couldn't fetch repository metadata (%s), will publish now\n", err)
|
||||
} else {
|
||||
// condition 2
|
||||
relays = make([]string, 0, len(localConfig.GraspServers)-1)
|
||||
for _, gs := range localConfig.GraspServers {
|
||||
if notUpToDate(gs) {
|
||||
relays = append(relays, graspServerHost(gs))
|
||||
}
|
||||
}
|
||||
log("some grasp servers (%v) are not up-to-date, will publish to them\n", relays)
|
||||
}
|
||||
// create a local repository object from config and publish it
|
||||
localRepo := localConfig.ToRepository()
|
||||
|
||||
@@ -814,7 +905,6 @@ func gitSync(ctx context.Context, signer nostr.Keyer) (nip34.Repository, *nip34.
|
||||
return repo, state, fmt.Errorf("failed to sign announcement: %w", err)
|
||||
}
|
||||
|
||||
relays := append(sys.FetchOutboxRelays(ctx, owner, 3), localConfig.GraspServers...)
|
||||
for res := range sys.Pool.PublishMany(ctx, relays, event) {
|
||||
if res.Error != nil {
|
||||
log("! error publishing to %s: %v\n", color.YellowString(res.RelayURL), res.Error)
|
||||
@@ -914,7 +1004,7 @@ func gitSync(ctx context.Context, signer nostr.Keyer) (nip34.Repository, *nip34.
|
||||
func fetchFromRemotes(ctx context.Context, targetDir string, repo nip34.Repository) {
|
||||
// fetch from each grasp remote
|
||||
for _, grasp := range repo.Relays {
|
||||
remoteName := "nip34/grasp/" + strings.Split(grasp, "/")[2]
|
||||
remoteName := gitRemoteName(grasp)
|
||||
|
||||
logverbose("fetching from %s...\n", remoteName)
|
||||
fetchCmd := exec.Command("git", "fetch", remoteName)
|
||||
@@ -940,45 +1030,69 @@ func gitSetupRemotes(ctx context.Context, dir string, repo nip34.Repository) {
|
||||
return
|
||||
}
|
||||
|
||||
// delete all nip34/grasp/ remotes
|
||||
// delete all nip34/grasp/ remotes that we don't have anymore in repo
|
||||
remotes := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for i, remote := range remotes {
|
||||
remote = strings.TrimSpace(remote)
|
||||
remotes[i] = remote
|
||||
|
||||
if strings.HasPrefix(remote, "nip34/grasp/") {
|
||||
if !slices.Contains(repo.Relays, nostr.NormalizeURL(remote[12:])) {
|
||||
delCmd := exec.Command("git", "remote", "remove", remote)
|
||||
if dir != "" {
|
||||
delCmd.Dir = dir
|
||||
}
|
||||
if err := delCmd.Run(); err != nil {
|
||||
logverbose("failed to remove remote %s: %v\n", remote, err)
|
||||
graspURL := rebuildGraspURLFromRemote(remote)
|
||||
|
||||
getUrlCmd := exec.Command("git", "remote", "get-url", remote)
|
||||
if dir != "" {
|
||||
getUrlCmd.Dir = dir
|
||||
}
|
||||
if output, err := getUrlCmd.Output(); err != nil {
|
||||
panic(fmt.Errorf("failed to read remote (%s) url from git: %s", remote, err))
|
||||
} else {
|
||||
// check if the remote url is correct so we can update it if not
|
||||
gitURL := fmt.Sprintf("http%s/%s/%s.git", nostr.NormalizeURL(graspURL)[2:], nip19.EncodeNpub(repo.PubKey), repo.ID)
|
||||
if strings.TrimSpace(string(output)) != gitURL {
|
||||
goto delete
|
||||
}
|
||||
}
|
||||
|
||||
// check if this remote is not present in our grasp list anymore
|
||||
if !slices.Contains(repo.Relays, nostr.NormalizeURL(graspURL)) {
|
||||
goto delete
|
||||
}
|
||||
|
||||
continue
|
||||
|
||||
delete:
|
||||
logverbose("deleting remote %s\n", remote)
|
||||
delCmd := exec.Command("git", "remote", "remove", remote)
|
||||
if dir != "" {
|
||||
delCmd.Dir = dir
|
||||
}
|
||||
if err := delCmd.Run(); err != nil {
|
||||
logverbose("failed to remove remote %s: %v\n", remote, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create new remotes for each grasp server
|
||||
remotes = strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, relay := range repo.Relays {
|
||||
remote := "nip34/grasp/" + strings.TrimPrefix(relay, "wss://")
|
||||
remote := gitRemoteName(relay)
|
||||
gitURL := fmt.Sprintf("http%s/%s/%s.git", nostr.NormalizeURL(relay)[2:], nip19.EncodeNpub(repo.PubKey), repo.ID)
|
||||
|
||||
if !slices.Contains(remotes, remote) {
|
||||
// construct the git URL
|
||||
gitURL := fmt.Sprintf("http%s/%s/%s.git",
|
||||
relay[2:], nip19.EncodeNpub(repo.PubKey), repo.ID)
|
||||
if slices.Contains(remotes, remote) {
|
||||
continue
|
||||
}
|
||||
|
||||
addCmd := exec.Command("git", "remote", "add", remote, gitURL)
|
||||
if dir != "" {
|
||||
addCmd.Dir = dir
|
||||
}
|
||||
if out, err := addCmd.Output(); err != nil {
|
||||
var stderr string
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
stderr = string(exiterr.Stderr)
|
||||
}
|
||||
logverbose("failed to add remote %s: %s %s\n", remote, stderr, string(out))
|
||||
logverbose("adding new remote for '%s'\n", relay)
|
||||
addCmd := exec.Command("git", "remote", "add", remote, gitURL)
|
||||
if dir != "" {
|
||||
addCmd.Dir = dir
|
||||
}
|
||||
if out, err := addCmd.Output(); err != nil {
|
||||
var stderr string
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
stderr = string(exiterr.Stderr)
|
||||
}
|
||||
logverbose("failed to add remote %s: %s %s\n", remote, stderr, string(out))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -994,7 +1108,7 @@ func gitUpdateRefs(ctx context.Context, dir string, state nip34.RepositoryState)
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 && strings.Contains(parts[1], "refs/remotes/nip34/state/") {
|
||||
if len(parts) >= 2 && strings.Contains(parts[1], "refs/heads/nip34/state/") {
|
||||
delCmd := exec.Command("git", "update-ref", "-d", parts[1])
|
||||
if dir != "" {
|
||||
delCmd.Dir = dir
|
||||
@@ -1011,7 +1125,7 @@ func gitUpdateRefs(ctx context.Context, dir string, state nip34.RepositoryState)
|
||||
branchName = "refs/heads/" + branchName
|
||||
}
|
||||
|
||||
refName := "refs/remotes/nip34/state/" + strings.TrimPrefix(branchName, "refs/heads/")
|
||||
refName := "refs/heads/nip34/state/" + strings.TrimPrefix(branchName, "refs/heads/")
|
||||
updateCmd := exec.Command("git", "update-ref", refName, commit)
|
||||
if dir != "" {
|
||||
updateCmd.Dir = dir
|
||||
@@ -1024,7 +1138,7 @@ func gitUpdateRefs(ctx context.Context, dir string, state nip34.RepositoryState)
|
||||
// create ref for HEAD
|
||||
if state.HEAD != "" {
|
||||
if headCommit, ok := state.Branches[state.HEAD]; ok {
|
||||
headRefName := "refs/remotes/nip34/state/HEAD"
|
||||
headRefName := "refs/heads/nip34/state/HEAD"
|
||||
updateCmd := exec.Command("git", "update-ref", headRefName, headCommit)
|
||||
if dir != "" {
|
||||
updateCmd.Dir = dir
|
||||
@@ -1041,7 +1155,7 @@ func fetchRepositoryAndState(
|
||||
pubkey nostr.PubKey,
|
||||
identifier string,
|
||||
relayHints []string,
|
||||
) (repo nip34.Repository, state *nip34.RepositoryState, err error) {
|
||||
) (repo nip34.Repository, upToDateRelays []string, state *nip34.RepositoryState, err error) {
|
||||
// fetch repository announcement (30617)
|
||||
relays := appendUnique(relayHints, sys.FetchOutboxRelays(ctx, pubkey, 3)...)
|
||||
for ie := range sys.Pool.FetchMany(ctx, relays, nostr.Filter{
|
||||
@@ -1051,13 +1165,24 @@ func fetchRepositoryAndState(
|
||||
"d": []string{identifier},
|
||||
},
|
||||
Limit: 2,
|
||||
}, nostr.SubscriptionOptions{Label: "nak-git"}) {
|
||||
}, nostr.SubscriptionOptions{
|
||||
Label: "nak-git",
|
||||
CheckDuplicate: func(id nostr.ID, relay string) bool {
|
||||
return false
|
||||
},
|
||||
}) {
|
||||
if ie.Event.CreatedAt > repo.CreatedAt {
|
||||
repo = nip34.ParseRepository(ie.Event)
|
||||
|
||||
// reset this list as the previous was for relays with the older version
|
||||
upToDateRelays = []string{ie.Relay.URL}
|
||||
} else if ie.Event.CreatedAt == repo.CreatedAt {
|
||||
// we discard this because it's the same, but this relay is up-to-date
|
||||
upToDateRelays = append(upToDateRelays, ie.Relay.URL)
|
||||
}
|
||||
}
|
||||
if repo.Event.ID == nostr.ZeroID {
|
||||
return repo, state, fmt.Errorf("no repository announcement (kind 30617) found for %s", identifier)
|
||||
return repo, upToDateRelays, state, fmt.Errorf("no repository announcement (kind 30617) found for %s", identifier)
|
||||
}
|
||||
|
||||
// fetch repository state (30618)
|
||||
@@ -1087,10 +1212,10 @@ func fetchRepositoryAndState(
|
||||
}
|
||||
}
|
||||
if stateErr != nil {
|
||||
return repo, state, stateErr
|
||||
return repo, upToDateRelays, state, stateErr
|
||||
}
|
||||
|
||||
return repo, state, nil
|
||||
return repo, upToDateRelays, state, nil
|
||||
}
|
||||
|
||||
type StateErr struct{ string }
|
||||
@@ -1367,8 +1492,6 @@ func figureOutBranches(c *cli.Command, refspec string, isPush bool) (
|
||||
return localBranch, remoteBranch, nil
|
||||
}
|
||||
|
||||
func graspServerHost(s string) string { return strings.SplitN(nostr.NormalizeURL(s), "/", 3)[2] }
|
||||
|
||||
type Nip34Config struct {
|
||||
Identifier string `json:"identifier"`
|
||||
Name string `json:"name"`
|
||||
@@ -1380,6 +1503,26 @@ type Nip34Config struct {
|
||||
Maintainers []string `json:"maintainers"`
|
||||
}
|
||||
|
||||
func RepositoryToConfig(repo nip34.Repository) Nip34Config {
|
||||
config := Nip34Config{
|
||||
Identifier: repo.ID,
|
||||
Name: repo.Name,
|
||||
Description: repo.Description,
|
||||
Web: repo.Web,
|
||||
Owner: nip19.EncodeNpub(repo.Event.PubKey),
|
||||
GraspServers: make([]string, 0, len(repo.Relays)),
|
||||
EarliestUniqueCommit: repo.EarliestUniqueCommitID,
|
||||
Maintainers: make([]string, 0, len(repo.Maintainers)),
|
||||
}
|
||||
for _, r := range repo.Relays {
|
||||
config.GraspServers = append(config.GraspServers, graspServerHost(r))
|
||||
}
|
||||
for _, m := range repo.Maintainers {
|
||||
config.Maintainers = append(config.Maintainers, nip19.EncodeNpub(m))
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func (localConfig Nip34Config) Validate() error {
|
||||
_, err := parsePubKey(localConfig.Owner)
|
||||
if err != nil {
|
||||
@@ -1430,3 +1573,18 @@ func (localConfig Nip34Config) ToRepository() nip34.Repository {
|
||||
|
||||
return localRepo
|
||||
}
|
||||
|
||||
func gitRemoteName(graspURL string) string {
|
||||
host := graspServerHost(graspURL)
|
||||
host = strings.Replace(host, ":", "__", 1)
|
||||
return "nip34/grasp/" + host
|
||||
}
|
||||
|
||||
func rebuildGraspURLFromRemote(remoteName string) string {
|
||||
host := strings.TrimPrefix(remoteName, "nip34/grasp/")
|
||||
return strings.Replace(host, "__", ":", 1)
|
||||
}
|
||||
|
||||
func graspServerHost(s string) string {
|
||||
return strings.SplitN(nostr.NormalizeURL(s), "/", 3)[2]
|
||||
}
|
||||
|
||||
35
go.mod
35
go.mod
@@ -1,18 +1,18 @@
|
||||
module github.com/fiatjaf/nak
|
||||
|
||||
go 1.24.1
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
fiatjaf.com/lib v0.3.1
|
||||
fiatjaf.com/nostr v0.0.0-20251124002842-de54dd1fa4b8
|
||||
fiatjaf.com/nostr v0.0.0-20251230181913-e52ffa631bd6
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/bep/debounce v1.2.1
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6
|
||||
github.com/charmbracelet/glamour v0.10.0
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0
|
||||
github.com/fatih/color v1.16.0
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/liamg/magic v0.0.1
|
||||
github.com/liamg/magic v0.0.1 // indirect
|
||||
github.com/mailru/easyjson v0.9.1
|
||||
github.com/mark3labs/mcp-go v0.8.3
|
||||
github.com/markusmobius/go-dateparser v1.2.3
|
||||
@@ -22,27 +22,38 @@ require (
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v3 v3.0.0-beta1
|
||||
github.com/winfsp/cgofuse v1.6.0
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/term v0.32.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 // indirect
|
||||
github.com/FastFilter/xorfilter v0.2.1 // indirect
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
|
||||
github.com/PowerDNS/lmdb-go v1.9.3 // indirect
|
||||
github.com/alecthomas/chroma/v2 v2.14.0 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/bluekeyes/go-gitdiff v0.7.1 // indirect
|
||||
github.com/btcsuite/btcd v0.24.2 // indirect
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 // indirect
|
||||
github.com/charmbracelet/x/ansi v0.8.0 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
|
||||
github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/chzyer/logex v1.1.10 // indirect
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect
|
||||
github.com/coder/websocket v1.8.14 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 // indirect
|
||||
github.com/dlclark/regexp2 v1.11.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/elliotchance/pie/v2 v2.7.0 // indirect
|
||||
github.com/elnosh/gonuts v0.4.2 // indirect
|
||||
@@ -50,19 +61,25 @@ require (
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-git/go-git/v5 v5.16.3 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/hablullah/go-hijri v1.0.2 // indirect
|
||||
github.com/hablullah/go-juliandays v1.0.0 // indirect
|
||||
github.com/jalaali/go-jalaali v0.0.0-20210801064154-80525e88d958 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/magefile/mage v1.14.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.27 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/muesli/reflow v0.3.0 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rs/cors v1.11.1 // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
@@ -75,7 +92,9 @@ require (
|
||||
github.com/valyala/fasthttp v1.59.0 // indirect
|
||||
github.com/wasilibs/go-re2 v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/bbolt v1.4.2 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
github.com/yuin/goldmark v1.7.8 // indirect
|
||||
github.com/yuin/goldmark-emoji v1.0.5 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
|
||||
75
go.sum
75
go.sum
@@ -1,19 +1,30 @@
|
||||
fiatjaf.com/lib v0.3.1 h1:/oFQwNtFRfV+ukmOCxfBEAuayoLwXp4wu2/fz5iHpwA=
|
||||
fiatjaf.com/lib v0.3.1/go.mod h1:Ycqq3+mJ9jAWu7XjbQI1cVr+OFgnHn79dQR5oTII47g=
|
||||
fiatjaf.com/nostr v0.0.0-20251124002842-de54dd1fa4b8 h1:R16mnlJ3qvVar7G4rzY+Z+mEAf2O6wpHTlRlHAt2Od8=
|
||||
fiatjaf.com/nostr v0.0.0-20251124002842-de54dd1fa4b8/go.mod h1:QEGyTgAjjTFwDx2BJGZiCdmoAcWA/G+sQy7wDqKzSPU=
|
||||
fiatjaf.com/nostr v0.0.0-20251230181913-e52ffa631bd6 h1:yH+cU9ZNgUdMCRa5eS3pmqTPP/QdZtSmQAIrN/U5nEc=
|
||||
fiatjaf.com/nostr v0.0.0-20251230181913-e52ffa631bd6/go.mod h1:ue7yw0zHfZj23Ml2kVSdBx0ENEaZiuvGxs/8VEN93FU=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/FastFilter/xorfilter v0.2.1 h1:lbdeLG9BdpquK64ZsleBS8B4xO/QW1IM0gMzF7KaBKc=
|
||||
github.com/FastFilter/xorfilter v0.2.1/go.mod h1:aumvdkhscz6YBZF9ZA/6O4fIoNod4YR50kIVGGZ7l9I=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
|
||||
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
|
||||
github.com/PowerDNS/lmdb-go v1.9.3 h1:AUMY2pZT8WRpkEv39I9Id3MuoHd+NZbTVpNhruVkPTg=
|
||||
github.com/PowerDNS/lmdb-go v1.9.3/go.mod h1:TE0l+EZK8Z1B4dx070ZxkWTlp8RG1mjN0/+FkFRQMtU=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
|
||||
github.com/alecthomas/assert/v2 v2.7.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
|
||||
github.com/alecthomas/chroma/v2 v2.14.0 h1:R3+wzpnUArGcQz7fCETQBzO5n9IMNi13iIs46aU4V9E=
|
||||
github.com/alecthomas/chroma/v2 v2.14.0/go.mod h1:QolEbTfmUHIMVpBqxeDnNBj2uoeI4EbYP4i6n68SG4I=
|
||||
github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc=
|
||||
github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
|
||||
github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
|
||||
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
|
||||
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
|
||||
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
||||
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||
github.com/bluekeyes/go-gitdiff v0.7.1 h1:graP4ElLRshr8ecu0UtqfNTCHrtSyZd3DABQm/DWesQ=
|
||||
@@ -48,6 +59,22 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
|
||||
github.com/charmbracelet/glamour v0.10.0 h1:MtZvfwsYCx8jEPFJm3rIBFIMZUfUJ765oX8V6kXldcY=
|
||||
github.com/charmbracelet/glamour v0.10.0/go.mod h1:f+uf+I/ChNmqo087elLnVdCiVgjSKWuXa/l6NU2ndYk=
|
||||
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834 h1:ZR7e0ro+SZZiIZD7msJyA+NjkCNNavuiPBLgerbOziE=
|
||||
github.com/charmbracelet/lipgloss v1.1.1-0.20250404203927-76690c660834/go.mod h1:aKC/t2arECF6rNOnaKaVU6y4t4ZeHQzqfxedE/VkVhA=
|
||||
github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE=
|
||||
github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a h1:G99klV19u0QnhiizODirwVksQB91TJKV/UaTnACcG30=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20240806155701-69247e0abc2a/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
|
||||
github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf h1:rLG0Yb6MQSDKdB52aGX55JT1oi0P0Kuaj7wi1bLUpnI=
|
||||
github.com/charmbracelet/x/exp/slice v0.0.0-20250327172914-2fdc97757edf/go.mod h1:B3UgsnsBZS/eX42BlaNiJkD1pPOUa+oF1IYC6Yd2CEU=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
|
||||
@@ -56,6 +83,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWs
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -72,6 +100,8 @@ github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXR
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=
|
||||
github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
@@ -105,13 +135,16 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
|
||||
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hablullah/go-hijri v1.0.2 h1:drT/MZpSZJQXo7jftf5fthArShcaMtsal0Zf/dnmp6k=
|
||||
github.com/hablullah/go-hijri v1.0.2/go.mod h1:OS5qyYLDjORXzK4O1adFw9Q5WfhOcMdAKglDkcTxgWQ=
|
||||
github.com/hablullah/go-juliandays v1.0.0 h1:A8YM7wIj16SzlKT0SRJc9CD29iiaUzpBLzh5hr0/5p0=
|
||||
github.com/hablullah/go-juliandays v1.0.0/go.mod h1:0JOYq4oFOuDja+oospuc61YoX+uNEn7Z6uHYTbBzdGc=
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2 h1:SbJP1sUP+n1UF8NXBA14BuojmTez+mDgOk0bC057HQw=
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2/go.mod h1:ugNaD/iv5JYyS1Rcvi57Wz7/vrLQJo10mmketmoef48=
|
||||
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
|
||||
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jalaali/go-jalaali v0.0.0-20210801064154-80525e88d958 h1:qxLoi6CAcXVzjfvu+KXIXJOAsQB62LXjsfbOaErsVzE=
|
||||
@@ -132,10 +165,10 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/liamg/magic v0.0.1 h1:Ru22ElY+sCh6RvRTWjQzKKCxsEco8hE0co8n1qe7TBM=
|
||||
github.com/liamg/magic v0.0.1/go.mod h1:yQkOmZZI52EA+SQ2xyHpVw8fNvTBruF873Y+Vt6S+fk=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo=
|
||||
github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8=
|
||||
@@ -150,19 +183,26 @@ github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stg
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-tty v0.0.7 h1:KJ486B6qI8+wBO7kQxYgmmEFDaFEE96JMBQ7h400N8Q=
|
||||
github.com/mattn/go-tty v0.0.7/go.mod h1:f2i5ZOvXBU/tCABmLmOfzLz9azMo5wdAaElRNnJKr+k=
|
||||
github.com/mdp/qrterminal/v3 v3.2.1 h1:6+yQjiiOsSuXT5n9/m60E54vdgFsw0zhADHhHLrFet4=
|
||||
github.com/mdp/qrterminal/v3 v3.2.1/go.mod h1:jOTmXvnBsMy5xqLniO0R++Jmjs2sTm9dFSuQ5kpz/SU=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
|
||||
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
|
||||
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -178,6 +218,10 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
@@ -217,13 +261,20 @@ github.com/wasilibs/go-re2 v1.3.0 h1:LFhBNzoStM3wMie6rN2slD1cuYH2CGiHpvNL3UtcsMw
|
||||
github.com/wasilibs/go-re2 v1.3.0/go.mod h1:AafrCXVvGRJJOImMajgJ2M7rVmWyisVK7sFshbxnVrg=
|
||||
github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ=
|
||||
github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo=
|
||||
github.com/winfsp/cgofuse v1.6.0 h1:re3W+HTd0hj4fISPBqfsrwyvPFpzqhDu8doJ9nOPDB0=
|
||||
github.com/winfsp/cgofuse v1.6.0/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
|
||||
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
|
||||
github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
|
||||
github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic=
|
||||
github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
|
||||
github.com/yuin/goldmark-emoji v1.0.5 h1:EMVWyCGPlXJfUXBXpuMu+ii3TIaxbVBnEX9uaDC4cIk=
|
||||
github.com/yuin/goldmark-emoji v1.0.5/go.mod h1:tTkZEbwu5wkPmgTcitqddVxY9osFZiavD+r4AzQrh1U=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
||||
12
helpers.go
12
helpers.go
@@ -46,8 +46,14 @@ var (
|
||||
)
|
||||
|
||||
func isPiped() bool {
|
||||
stat, _ := os.Stdin.Stat()
|
||||
return stat.Mode()&os.ModeCharDevice == 0
|
||||
stat, err := os.Stdin.Stat()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
mode := stat.Mode()
|
||||
is := mode&os.ModeCharDevice == 0
|
||||
return is
|
||||
}
|
||||
|
||||
func getJsonsOrBlank() iter.Seq[string] {
|
||||
@@ -76,7 +82,7 @@ func getJsonsOrBlank() iter.Seq[string] {
|
||||
return true
|
||||
})
|
||||
|
||||
if !hasStdin && !isPiped() {
|
||||
if !hasStdin {
|
||||
yield("{}")
|
||||
}
|
||||
|
||||
|
||||
13
key.go
13
key.go
@@ -279,12 +279,13 @@ func getSecretKeysFromStdinLinesOrSlice(ctx context.Context, _ *cli.Command, key
|
||||
continue
|
||||
}
|
||||
sk = data.(nostr.SecretKey)
|
||||
}
|
||||
|
||||
sk, err := nostr.SecretKeyFromHex(sec)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid hex key: %s", err)
|
||||
continue
|
||||
} else {
|
||||
var err error
|
||||
sk, err = nostr.SecretKeyFromHex(sec)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid hex key: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ch <- sk
|
||||
|
||||
43
lmdb.go
Normal file
43
lmdb.go
Normal file
@@ -0,0 +1,43 @@
|
||||
//go:build linux && !riscv64 && !arm64
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/nullstore"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"fiatjaf.com/nostr/sdk/hints/lmdbh"
|
||||
lmdbkv "fiatjaf.com/nostr/sdk/kvstore/lmdb"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
func setupLocalDatabases(c *cli.Command, sys *sdk.System) {
|
||||
configPath := c.String("config-path")
|
||||
if configPath != "" {
|
||||
hintsPath := filepath.Join(configPath, "outbox/hints")
|
||||
os.MkdirAll(hintsPath, 0755)
|
||||
_, err := lmdbh.NewLMDBHints(hintsPath)
|
||||
if err != nil {
|
||||
log("failed to create lmdb hints db at '%s': %s\n", hintsPath, err)
|
||||
}
|
||||
|
||||
eventsPath := filepath.Join(configPath, "events")
|
||||
os.MkdirAll(eventsPath, 0755)
|
||||
sys.Store = &lmdb.LMDBBackend{Path: eventsPath}
|
||||
if err := sys.Store.Init(); err != nil {
|
||||
log("failed to create boltdb events db at '%s': %s\n", eventsPath, err)
|
||||
sys.Store = &nullstore.NullStore{}
|
||||
}
|
||||
|
||||
kvPath := filepath.Join(configPath, "kvstore")
|
||||
os.MkdirAll(kvPath, 0755)
|
||||
if kv, err := lmdbkv.NewStore(kvPath); err != nil {
|
||||
log("failed to create boltdb kvstore db at '%s': %s\n", kvPath, err)
|
||||
} else {
|
||||
sys.KVStore = kv
|
||||
}
|
||||
}
|
||||
}
|
||||
18
main.go
18
main.go
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"os"
|
||||
@@ -26,9 +25,9 @@ var app = &cli.Command{
|
||||
Usage: "the nostr army knife command-line tool",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Commands: []*cli.Command{
|
||||
event,
|
||||
eventCmd,
|
||||
req,
|
||||
filter,
|
||||
filterCmd,
|
||||
fetch,
|
||||
count,
|
||||
decode,
|
||||
@@ -40,15 +39,20 @@ var app = &cli.Command{
|
||||
bunker,
|
||||
serve,
|
||||
blossomCmd,
|
||||
dekey,
|
||||
encrypt,
|
||||
decrypt,
|
||||
outbox,
|
||||
gift,
|
||||
outboxCmd,
|
||||
wallet,
|
||||
mcpServer,
|
||||
curl,
|
||||
fsCmd,
|
||||
publish,
|
||||
git,
|
||||
nip,
|
||||
syncCmd,
|
||||
spell,
|
||||
},
|
||||
Version: version,
|
||||
Flags: []cli.Flag{
|
||||
@@ -59,7 +63,7 @@ var app = &cli.Command{
|
||||
if home, err := os.UserHomeDir(); err == nil {
|
||||
return filepath.Join(home, ".config/nak")
|
||||
} else {
|
||||
return filepath.Join("/dev/null")
|
||||
return ""
|
||||
}
|
||||
})(),
|
||||
},
|
||||
@@ -95,9 +99,7 @@ var app = &cli.Command{
|
||||
Before: func(ctx context.Context, c *cli.Command) (context.Context, error) {
|
||||
sys = sdk.NewSystem()
|
||||
|
||||
if err := initializeOutboxHintsDB(c, sys); err != nil {
|
||||
return ctx, fmt.Errorf("failed to initialize outbox hints: %w", err)
|
||||
}
|
||||
setupLocalDatabases(c, sys)
|
||||
|
||||
sys.Pool = nostr.NewPool(nostr.PoolOptions{
|
||||
AuthorKindQueryMiddleware: sys.TrackQueryAttempts,
|
||||
|
||||
201
nip.go
Normal file
201
nip.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/charmbracelet/glamour"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
type nipInfo struct {
|
||||
nip, desc, link string
|
||||
}
|
||||
|
||||
var nip = &cli.Command{
|
||||
Name: "nip",
|
||||
Usage: "list NIPs or get the description of a NIP from its number",
|
||||
Description: `lists NIPs, fetches and displays NIP text, or opens a NIP page in the browser.
|
||||
|
||||
examples:
|
||||
nak nip # list all NIPs
|
||||
nak nip 29 # shows nip29 details
|
||||
nak nip open 29 # opens nip29 in browser`,
|
||||
ArgsUsage: "[NIP number]",
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "open",
|
||||
Usage: "open the NIP page in the browser",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
reqNum := c.Args().First()
|
||||
if reqNum == "" {
|
||||
return fmt.Errorf("missing NIP number")
|
||||
}
|
||||
|
||||
normalize := func(s string) string {
|
||||
s = strings.ToLower(s)
|
||||
s = strings.TrimPrefix(s, "nip-")
|
||||
s = strings.TrimLeft(s, "0")
|
||||
if s == "" {
|
||||
s = "0"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
reqNum = normalize(reqNum)
|
||||
|
||||
foundLink := ""
|
||||
for info := range listnips() {
|
||||
nipNum := normalize(info.nip)
|
||||
if nipNum == reqNum {
|
||||
foundLink = info.link
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if foundLink == "" {
|
||||
return fmt.Errorf("NIP-%s not found", strings.ToUpper(reqNum))
|
||||
}
|
||||
|
||||
url := "https://github.com/nostr-protocol/nips/blob/master/" + foundLink
|
||||
fmt.Println("Opening " + url)
|
||||
|
||||
var cmd *exec.Cmd
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
cmd = exec.Command("open", url)
|
||||
case "windows":
|
||||
cmd = exec.Command("cmd", "/c", "start", url)
|
||||
default:
|
||||
cmd = exec.Command("xdg-open", url)
|
||||
}
|
||||
|
||||
return cmd.Start()
|
||||
},
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
reqNum := c.Args().First()
|
||||
if reqNum == "" {
|
||||
// list all NIPs
|
||||
for info := range listnips() {
|
||||
stdout(info.nip + ": " + info.desc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
normalize := func(s string) string {
|
||||
s = strings.ToLower(s)
|
||||
s = strings.TrimPrefix(s, "nip-")
|
||||
s = strings.TrimLeft(s, "0")
|
||||
if s == "" {
|
||||
s = "0"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
reqNum = normalize(reqNum)
|
||||
|
||||
var foundLink string
|
||||
for info := range listnips() {
|
||||
nipNum := normalize(info.nip)
|
||||
|
||||
if nipNum == reqNum {
|
||||
foundLink = info.link
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if foundLink == "" {
|
||||
return fmt.Errorf("NIP-%s not found", strings.ToUpper(reqNum))
|
||||
}
|
||||
|
||||
// fetch the NIP markdown
|
||||
url := "https://raw.githubusercontent.com/nostr-protocol/nips/master/" + foundLink
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch NIP: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read NIP: %w", err)
|
||||
}
|
||||
|
||||
// render markdown
|
||||
rendered, err := glamour.Render(string(body), "auto")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render markdown: %w", err)
|
||||
}
|
||||
|
||||
fmt.Print(rendered)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func listnips() <-chan nipInfo {
|
||||
ch := make(chan nipInfo)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
resp, err := http.Get("https://raw.githubusercontent.com/nostr-protocol/nips/master/README.md")
|
||||
if err != nil {
|
||||
// TODO: handle error? but since chan, maybe send error somehow, but for now, just close
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bodyStr := string(body)
|
||||
epoch := strings.Index(bodyStr, "## List")
|
||||
if epoch == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
lines := strings.SplitSeq(bodyStr[epoch+8:], "\n")
|
||||
for line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "##") {
|
||||
break
|
||||
}
|
||||
if !strings.HasPrefix(line, "- [NIP-") {
|
||||
continue
|
||||
}
|
||||
|
||||
start := strings.Index(line, "[")
|
||||
end := strings.Index(line, "]")
|
||||
if start == -1 || end == -1 || end < start {
|
||||
continue
|
||||
}
|
||||
|
||||
content := line[start+1 : end]
|
||||
|
||||
parts := strings.SplitN(content, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
nipPart := parts[0]
|
||||
descPart := parts[1]
|
||||
|
||||
rest := line[end+1:]
|
||||
linkStart := strings.Index(rest, "(")
|
||||
linkEnd := strings.Index(rest, ")")
|
||||
link := ""
|
||||
if linkStart != -1 && linkEnd != -1 && linkEnd > linkStart {
|
||||
link = rest[linkStart+1 : linkEnd]
|
||||
}
|
||||
|
||||
ch <- nipInfo{nipPart, strings.TrimSpace(descPart), link}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
11
non_lmdb.go
Normal file
11
non_lmdb.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !linux || riscv64 || arm64
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
func setupLocalDatabases(c *cli.Command, sys *sdk.System) {
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
type AsyncFile struct {
|
||||
fs.Inode
|
||||
ctx context.Context
|
||||
fetched atomic.Bool
|
||||
data []byte
|
||||
ts nostr.Timestamp
|
||||
load func() ([]byte, nostr.Timestamp)
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpener)((*AsyncFile)(nil))
|
||||
_ = (fs.NodeGetattrer)((*AsyncFile)(nil))
|
||||
)
|
||||
|
||||
func (af *AsyncFile) Getattr(ctx context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
if af.fetched.CompareAndSwap(false, true) {
|
||||
af.data, af.ts = af.load()
|
||||
}
|
||||
|
||||
out.Size = uint64(len(af.data))
|
||||
out.Mtime = uint64(af.ts)
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (af *AsyncFile) Open(ctx context.Context, flags uint32) (fs.FileHandle, uint32, syscall.Errno) {
|
||||
if af.fetched.CompareAndSwap(false, true) {
|
||||
af.data, af.ts = af.load()
|
||||
}
|
||||
|
||||
return nil, fuse.FOPEN_KEEP_CACHE, 0
|
||||
}
|
||||
|
||||
func (af *AsyncFile) Read(
|
||||
ctx context.Context,
|
||||
f fs.FileHandle,
|
||||
dest []byte,
|
||||
off int64,
|
||||
) (fuse.ReadResult, syscall.Errno) {
|
||||
end := int(off) + len(dest)
|
||||
if end > len(af.data) {
|
||||
end = len(af.data)
|
||||
}
|
||||
return fuse.ReadResultData(af.data[off:end]), 0
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type DeterministicFile struct {
|
||||
fs.Inode
|
||||
get func() (ctime, mtime uint64, data string)
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpener)((*DeterministicFile)(nil))
|
||||
_ = (fs.NodeReader)((*DeterministicFile)(nil))
|
||||
_ = (fs.NodeGetattrer)((*DeterministicFile)(nil))
|
||||
)
|
||||
|
||||
func (r *NostrRoot) NewDeterministicFile(get func() (ctime, mtime uint64, data string)) *DeterministicFile {
|
||||
return &DeterministicFile{
|
||||
get: get,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DeterministicFile) Open(ctx context.Context, flags uint32) (fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
return nil, fuse.FOPEN_KEEP_CACHE, fs.OK
|
||||
}
|
||||
|
||||
func (f *DeterministicFile) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
var content string
|
||||
out.Mode = 0444
|
||||
out.Ctime, out.Mtime, content = f.get()
|
||||
out.Size = uint64(len(content))
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *DeterministicFile) Read(ctx context.Context, fh fs.FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
|
||||
_, _, content := f.get()
|
||||
data := unsafe.Slice(unsafe.StringData(content), len(content))
|
||||
|
||||
end := int(off) + len(dest)
|
||||
if end > len(data) {
|
||||
end = len(data)
|
||||
}
|
||||
return fuse.ReadResultData(data[off:end]), fs.OK
|
||||
}
|
||||
@@ -1,408 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"fiatjaf.com/lib/debouncer"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip27"
|
||||
"fiatjaf.com/nostr/nip73"
|
||||
"fiatjaf.com/nostr/nip92"
|
||||
sdk "fiatjaf.com/nostr/sdk"
|
||||
"github.com/fatih/color"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type EntityDir struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
|
||||
publisher *debouncer.Debouncer
|
||||
event *nostr.Event
|
||||
updating struct {
|
||||
title string
|
||||
content string
|
||||
publishedAt uint64
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOnAdder)((*EntityDir)(nil))
|
||||
_ = (fs.NodeGetattrer)((*EntityDir)(nil))
|
||||
_ = (fs.NodeSetattrer)((*EntityDir)(nil))
|
||||
_ = (fs.NodeCreater)((*EntityDir)(nil))
|
||||
_ = (fs.NodeUnlinker)((*EntityDir)(nil))
|
||||
)
|
||||
|
||||
func (e *EntityDir) Getattr(_ context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
out.Ctime = uint64(e.event.CreatedAt)
|
||||
if e.updating.publishedAt != 0 {
|
||||
out.Mtime = e.updating.publishedAt
|
||||
} else {
|
||||
out.Mtime = e.PublishedAt()
|
||||
}
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (e *EntityDir) Create(
|
||||
_ context.Context,
|
||||
name string,
|
||||
flags uint32,
|
||||
mode uint32,
|
||||
out *fuse.EntryOut,
|
||||
) (node *fs.Inode, fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
if name == "publish" && e.publisher.IsRunning() {
|
||||
// this causes the publish process to be triggered faster
|
||||
log := e.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
log("publishing now!\n")
|
||||
e.publisher.Flush()
|
||||
return nil, nil, 0, syscall.ENOTDIR
|
||||
}
|
||||
|
||||
return nil, nil, 0, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func (e *EntityDir) Unlink(ctx context.Context, name string) syscall.Errno {
|
||||
switch name {
|
||||
case "content" + kindToExtension(e.event.Kind):
|
||||
e.updating.content = e.event.Content
|
||||
return syscall.ENOTDIR
|
||||
case "title":
|
||||
e.updating.title = e.Title()
|
||||
return syscall.ENOTDIR
|
||||
default:
|
||||
return syscall.EINTR
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EntityDir) Setattr(_ context.Context, _ fs.FileHandle, in *fuse.SetAttrIn, _ *fuse.AttrOut) syscall.Errno {
|
||||
e.updating.publishedAt = in.Mtime
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (e *EntityDir) OnAdd(_ context.Context) {
|
||||
log := e.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
e.AddChild("@author", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(e.root.wd + "/" + nip19.EncodeNpub(e.event.PubKey)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
|
||||
e.AddChild("event.json", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&DeterministicFile{
|
||||
get: func() (ctime uint64, mtime uint64, data string) {
|
||||
eventj, _ := json.MarshalIndent(e.event, "", " ")
|
||||
return uint64(e.event.CreatedAt),
|
||||
uint64(e.event.CreatedAt),
|
||||
unsafe.String(unsafe.SliceData(eventj), len(eventj))
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
e.AddChild("identifier", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(e.event.Tags.GetD()),
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(e.event.CreatedAt),
|
||||
Mtime: uint64(e.event.CreatedAt),
|
||||
Size: uint64(len(e.event.Tags.GetD())),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
if e.root.signer == nil || e.root.rootPubKey != e.event.PubKey {
|
||||
// read-only
|
||||
e.AddChild("title", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&DeterministicFile{
|
||||
get: func() (ctime uint64, mtime uint64, data string) {
|
||||
return uint64(e.event.CreatedAt), e.PublishedAt(), e.Title()
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
e.AddChild("content."+kindToExtension(e.event.Kind), e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&DeterministicFile{
|
||||
get: func() (ctime uint64, mtime uint64, data string) {
|
||||
return uint64(e.event.CreatedAt), e.PublishedAt(), e.event.Content
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
} else {
|
||||
// writeable
|
||||
e.updating.title = e.Title()
|
||||
e.updating.publishedAt = e.PublishedAt()
|
||||
e.updating.content = e.event.Content
|
||||
|
||||
e.AddChild("title", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
e.root.NewWriteableFile(e.updating.title, uint64(e.event.CreatedAt), e.updating.publishedAt, func(s string) {
|
||||
log("title updated")
|
||||
e.updating.title = strings.TrimSpace(s)
|
||||
e.handleWrite()
|
||||
}),
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
e.AddChild("content."+kindToExtension(e.event.Kind), e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
e.root.NewWriteableFile(e.updating.content, uint64(e.event.CreatedAt), e.updating.publishedAt, func(s string) {
|
||||
log("content updated")
|
||||
e.updating.content = strings.TrimSpace(s)
|
||||
e.handleWrite()
|
||||
}),
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
|
||||
var refsdir *fs.Inode
|
||||
i := 0
|
||||
for ref := range nip27.Parse(e.event.Content) {
|
||||
if _, isExternal := ref.Pointer.(nip73.ExternalPointer); isExternal {
|
||||
continue
|
||||
}
|
||||
i++
|
||||
|
||||
if refsdir == nil {
|
||||
refsdir = e.NewPersistentInode(e.root.ctx, &fs.Inode{}, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
e.root.AddChild("references", refsdir, true)
|
||||
}
|
||||
refsdir.AddChild(fmt.Sprintf("ref_%02d", i), refsdir.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(e.root.wd + "/" + nip19.EncodePointer(ref.Pointer)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
|
||||
var imagesdir *fs.Inode
|
||||
addImage := func(url string) {
|
||||
if imagesdir == nil {
|
||||
in := &fs.Inode{}
|
||||
imagesdir = e.NewPersistentInode(e.root.ctx, in, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
e.AddChild("images", imagesdir, true)
|
||||
}
|
||||
imagesdir.AddChild(filepath.Base(url), imagesdir.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&AsyncFile{
|
||||
ctx: e.root.ctx,
|
||||
load: func() ([]byte, nostr.Timestamp) {
|
||||
ctx, cancel := context.WithTimeout(e.root.ctx, time.Second*20)
|
||||
defer cancel()
|
||||
r, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
log("failed to load image %s: %s\n", url, err)
|
||||
return nil, 0
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
log("failed to load image %s: %s\n", url, err)
|
||||
return nil, 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode >= 300 {
|
||||
log("failed to load image %s: %s\n", url, err)
|
||||
return nil, 0
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
io.Copy(w, resp.Body)
|
||||
return w.Bytes(), 0
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
|
||||
images := nip92.ParseTags(e.event.Tags)
|
||||
for _, imeta := range images {
|
||||
if imeta.URL == "" {
|
||||
continue
|
||||
}
|
||||
addImage(imeta.URL)
|
||||
}
|
||||
|
||||
if tag := e.event.Tags.Find("image"); tag != nil {
|
||||
addImage(tag[1])
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EntityDir) IsNew() bool {
|
||||
return e.event.CreatedAt == 0
|
||||
}
|
||||
|
||||
func (e *EntityDir) PublishedAt() uint64 {
|
||||
if tag := e.event.Tags.Find("published_at"); tag != nil {
|
||||
publishedAt, _ := strconv.ParseUint(tag[1], 10, 64)
|
||||
return publishedAt
|
||||
}
|
||||
return uint64(e.event.CreatedAt)
|
||||
}
|
||||
|
||||
func (e *EntityDir) Title() string {
|
||||
if tag := e.event.Tags.Find("title"); tag != nil {
|
||||
return tag[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (e *EntityDir) handleWrite() {
|
||||
log := e.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
logverbose := e.root.ctx.Value("logverbose").(func(msg string, args ...any))
|
||||
|
||||
if e.root.opts.AutoPublishArticlesTimeout.Hours() < 24*365 {
|
||||
if e.publisher.IsRunning() {
|
||||
log(", timer reset")
|
||||
}
|
||||
log(", publishing the ")
|
||||
if e.IsNew() {
|
||||
log("new")
|
||||
} else {
|
||||
log("updated")
|
||||
}
|
||||
log(" event in %d seconds...\n", int(e.root.opts.AutoPublishArticlesTimeout.Seconds()))
|
||||
} else {
|
||||
log(".\n")
|
||||
}
|
||||
if !e.publisher.IsRunning() {
|
||||
log("- `touch publish` to publish immediately\n")
|
||||
log("- `rm title content." + kindToExtension(e.event.Kind) + "` to erase and cancel the edits\n")
|
||||
}
|
||||
|
||||
e.publisher.Call(func() {
|
||||
if e.Title() == e.updating.title && e.event.Content == e.updating.content {
|
||||
log("not modified, publish canceled.\n")
|
||||
return
|
||||
}
|
||||
|
||||
evt := nostr.Event{
|
||||
Kind: e.event.Kind,
|
||||
Content: e.updating.content,
|
||||
Tags: make(nostr.Tags, len(e.event.Tags)),
|
||||
CreatedAt: nostr.Now(),
|
||||
}
|
||||
copy(evt.Tags, e.event.Tags) // copy tags because that's the rule
|
||||
if e.updating.title != "" {
|
||||
if titleTag := evt.Tags.Find("title"); titleTag != nil {
|
||||
titleTag[1] = e.updating.title
|
||||
} else {
|
||||
evt.Tags = append(evt.Tags, nostr.Tag{"title", e.updating.title})
|
||||
}
|
||||
}
|
||||
|
||||
// "published_at" tag
|
||||
publishedAtStr := strconv.FormatUint(e.updating.publishedAt, 10)
|
||||
if publishedAtStr != "0" {
|
||||
if publishedAtTag := evt.Tags.Find("published_at"); publishedAtTag != nil {
|
||||
publishedAtTag[1] = publishedAtStr
|
||||
} else {
|
||||
evt.Tags = append(evt.Tags, nostr.Tag{"published_at", publishedAtStr})
|
||||
}
|
||||
}
|
||||
|
||||
// add "p" tags from people mentioned and "q" tags from events mentioned
|
||||
for ref := range nip27.Parse(evt.Content) {
|
||||
if _, isExternal := ref.Pointer.(nip73.ExternalPointer); isExternal {
|
||||
continue
|
||||
}
|
||||
|
||||
tag := ref.Pointer.AsTag()
|
||||
key := tag[0]
|
||||
val := tag[1]
|
||||
if key == "e" || key == "a" {
|
||||
key = "q"
|
||||
}
|
||||
if existing := evt.Tags.FindWithValue(key, val); existing == nil {
|
||||
evt.Tags = append(evt.Tags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
// sign and publish
|
||||
if err := e.root.signer.SignEvent(e.root.ctx, &evt); err != nil {
|
||||
log("failed to sign: '%s'.\n", err)
|
||||
return
|
||||
}
|
||||
logverbose("%s\n", evt)
|
||||
|
||||
relays := e.root.sys.FetchWriteRelays(e.root.ctx, e.root.rootPubKey)
|
||||
if len(relays) == 0 {
|
||||
relays = e.root.sys.FetchOutboxRelays(e.root.ctx, e.root.rootPubKey, 6)
|
||||
}
|
||||
|
||||
log("publishing to %d relays... ", len(relays))
|
||||
success := false
|
||||
first := true
|
||||
for res := range e.root.sys.Pool.PublishMany(e.root.ctx, relays, evt) {
|
||||
cleanUrl, _ := strings.CutPrefix(res.RelayURL, "wss://")
|
||||
if !first {
|
||||
log(", ")
|
||||
}
|
||||
first = false
|
||||
|
||||
if res.Error != nil {
|
||||
log("%s: %s", color.RedString(cleanUrl), res.Error)
|
||||
} else {
|
||||
success = true
|
||||
log("%s: ok", color.GreenString(cleanUrl))
|
||||
}
|
||||
}
|
||||
log("\n")
|
||||
|
||||
if success {
|
||||
e.event = &evt
|
||||
log("event updated locally.\n")
|
||||
e.updating.publishedAt = uint64(evt.CreatedAt) // set this so subsequent edits get the correct value
|
||||
} else {
|
||||
log("failed.\n")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (r *NostrRoot) FetchAndCreateEntityDir(
|
||||
parent fs.InodeEmbedder,
|
||||
extension string,
|
||||
pointer nostr.EntityPointer,
|
||||
) (*fs.Inode, error) {
|
||||
event, _, err := r.sys.FetchSpecificEvent(r.ctx, pointer, sdk.FetchSpecificEventParameters{
|
||||
WithRelays: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch: %w", err)
|
||||
}
|
||||
|
||||
return r.CreateEntityDir(parent, event), nil
|
||||
}
|
||||
|
||||
func (r *NostrRoot) CreateEntityDir(
|
||||
parent fs.InodeEmbedder,
|
||||
event *nostr.Event,
|
||||
) *fs.Inode {
|
||||
return parent.EmbeddedInode().NewPersistentInode(
|
||||
r.ctx,
|
||||
&EntityDir{root: r, event: event, publisher: debouncer.New(r.opts.AutoPublishArticlesTimeout)},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
)
|
||||
}
|
||||
@@ -1,241 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip10"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip22"
|
||||
"fiatjaf.com/nostr/nip27"
|
||||
"fiatjaf.com/nostr/nip73"
|
||||
"fiatjaf.com/nostr/nip92"
|
||||
sdk "fiatjaf.com/nostr/sdk"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type EventDir struct {
|
||||
fs.Inode
|
||||
ctx context.Context
|
||||
wd string
|
||||
evt *nostr.Event
|
||||
}
|
||||
|
||||
var _ = (fs.NodeGetattrer)((*EventDir)(nil))
|
||||
|
||||
func (e *EventDir) Getattr(_ context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
out.Mtime = uint64(e.evt.CreatedAt)
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (r *NostrRoot) FetchAndCreateEventDir(
|
||||
parent fs.InodeEmbedder,
|
||||
pointer nostr.EventPointer,
|
||||
) (*fs.Inode, error) {
|
||||
event, _, err := r.sys.FetchSpecificEvent(r.ctx, pointer, sdk.FetchSpecificEventParameters{
|
||||
WithRelays: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch: %w", err)
|
||||
}
|
||||
|
||||
return r.CreateEventDir(parent, event), nil
|
||||
}
|
||||
|
||||
func (r *NostrRoot) CreateEventDir(
|
||||
parent fs.InodeEmbedder,
|
||||
event *nostr.Event,
|
||||
) *fs.Inode {
|
||||
h := parent.EmbeddedInode().NewPersistentInode(
|
||||
r.ctx,
|
||||
&EventDir{ctx: r.ctx, wd: r.wd, evt: event},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR, Ino: binary.BigEndian.Uint64(event.ID[8:16])},
|
||||
)
|
||||
|
||||
h.AddChild("@author", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nip19.EncodeNpub(event.PubKey)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
|
||||
eventj, _ := json.MarshalIndent(event, "", " ")
|
||||
h.AddChild("event.json", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: eventj,
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(event.CreatedAt),
|
||||
Mtime: uint64(event.CreatedAt),
|
||||
Size: uint64(len(event.Content)),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
h.AddChild("id", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(event.ID.Hex()),
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(event.CreatedAt),
|
||||
Mtime: uint64(event.CreatedAt),
|
||||
Size: uint64(64),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
h.AddChild("content.txt", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(event.Content),
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(event.CreatedAt),
|
||||
Mtime: uint64(event.CreatedAt),
|
||||
Size: uint64(len(event.Content)),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
var refsdir *fs.Inode
|
||||
i := 0
|
||||
for ref := range nip27.Parse(event.Content) {
|
||||
if _, isExternal := ref.Pointer.(nip73.ExternalPointer); isExternal {
|
||||
continue
|
||||
}
|
||||
i++
|
||||
|
||||
if refsdir == nil {
|
||||
refsdir = h.NewPersistentInode(r.ctx, &fs.Inode{}, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
h.AddChild("references", refsdir, true)
|
||||
}
|
||||
refsdir.AddChild(fmt.Sprintf("ref_%02d", i), refsdir.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nip19.EncodePointer(ref.Pointer)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
|
||||
var imagesdir *fs.Inode
|
||||
images := nip92.ParseTags(event.Tags)
|
||||
for _, imeta := range images {
|
||||
if imeta.URL == "" {
|
||||
continue
|
||||
}
|
||||
if imagesdir == nil {
|
||||
in := &fs.Inode{}
|
||||
imagesdir = h.NewPersistentInode(r.ctx, in, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
h.AddChild("images", imagesdir, true)
|
||||
}
|
||||
imagesdir.AddChild(filepath.Base(imeta.URL), imagesdir.NewPersistentInode(
|
||||
r.ctx,
|
||||
&AsyncFile{
|
||||
ctx: r.ctx,
|
||||
load: func() ([]byte, nostr.Timestamp) {
|
||||
ctx, cancel := context.WithTimeout(r.ctx, time.Second*20)
|
||||
defer cancel()
|
||||
r, err := http.NewRequestWithContext(ctx, "GET", imeta.URL, nil)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode >= 300 {
|
||||
return nil, 0
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
io.Copy(w, resp.Body)
|
||||
return w.Bytes(), 0
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
|
||||
if event.Kind == 1 {
|
||||
if pointer := nip10.GetThreadRoot(event.Tags); pointer != nil {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@root", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
if pointer := nip10.GetImmediateParent(event.Tags); pointer != nil {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
} else if event.Kind == 1111 {
|
||||
if pointer := nip22.GetThreadRoot(event.Tags); pointer != nil {
|
||||
if xp, ok := pointer.(nip73.ExternalPointer); ok {
|
||||
h.AddChild("@root", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(`<!doctype html><meta http-equiv="refresh" content="0; url=` + xp.Thing + `" />`),
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
} else {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
}
|
||||
if pointer := nip22.GetImmediateParent(event.Tags); pointer != nil {
|
||||
if xp, ok := pointer.(nip73.ExternalPointer); ok {
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(`<!doctype html><meta http-equiv="refresh" content="0; url=` + xp.Thing + `" />`),
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
} else {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func kindToExtension(kind nostr.Kind) string {
|
||||
switch kind {
|
||||
case 30023:
|
||||
return "md"
|
||||
case 30818:
|
||||
return "adoc"
|
||||
default:
|
||||
return "txt"
|
||||
}
|
||||
}
|
||||
@@ -1,261 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"github.com/fatih/color"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/liamg/magic"
|
||||
)
|
||||
|
||||
type NpubDir struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
pointer nostr.ProfilePointer
|
||||
fetched atomic.Bool
|
||||
}
|
||||
|
||||
var _ = (fs.NodeOnAdder)((*NpubDir)(nil))
|
||||
|
||||
func (r *NostrRoot) CreateNpubDir(
|
||||
parent fs.InodeEmbedder,
|
||||
pointer nostr.ProfilePointer,
|
||||
signer nostr.Signer,
|
||||
) *fs.Inode {
|
||||
npubdir := &NpubDir{root: r, pointer: pointer}
|
||||
return parent.EmbeddedInode().NewPersistentInode(
|
||||
r.ctx,
|
||||
npubdir,
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR, Ino: binary.BigEndian.Uint64(pointer.PublicKey[8:16])},
|
||||
)
|
||||
}
|
||||
|
||||
func (h *NpubDir) OnAdd(_ context.Context) {
|
||||
log := h.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
relays := h.root.sys.FetchOutboxRelays(h.root.ctx, h.pointer.PublicKey, 2)
|
||||
log("- adding folder for %s with relays %s\n",
|
||||
color.HiYellowString(nip19.EncodePointer(h.pointer)), color.HiGreenString("%v", relays))
|
||||
|
||||
h.AddChild("pubkey", h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&fs.MemRegularFile{Data: []byte(h.pointer.PublicKey.Hex() + "\n"), Attr: fuse.Attr{Mode: 0444}},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
go func() {
|
||||
pm := h.root.sys.FetchProfileMetadata(h.root.ctx, h.pointer.PublicKey)
|
||||
if pm.Event == nil {
|
||||
return
|
||||
}
|
||||
|
||||
metadataj, _ := json.MarshalIndent(pm, "", " ")
|
||||
h.AddChild(
|
||||
"metadata.json",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: metadataj,
|
||||
Attr: fuse.Attr{
|
||||
Mtime: uint64(pm.Event.CreatedAt),
|
||||
Mode: 0444,
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
),
|
||||
true,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(h.root.ctx, time.Second*20)
|
||||
defer cancel()
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", pm.Picture, nil)
|
||||
if err == nil {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 300 {
|
||||
b := &bytes.Buffer{}
|
||||
io.Copy(b, resp.Body)
|
||||
|
||||
ext := "png"
|
||||
if ft, err := magic.Lookup(b.Bytes()); err == nil {
|
||||
ext = ft.Extension
|
||||
}
|
||||
|
||||
h.AddChild("picture."+ext, h.NewPersistentInode(
|
||||
ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: b.Bytes(),
|
||||
Attr: fuse.Attr{
|
||||
Mtime: uint64(pm.Event.CreatedAt),
|
||||
Mode: 0444,
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if h.GetChild("notes") == nil {
|
||||
h.AddChild(
|
||||
"notes",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{1},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: true,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
createable: true,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("comments") == nil {
|
||||
h.AddChild(
|
||||
"comments",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{1111},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: true,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("photos") == nil {
|
||||
h.AddChild(
|
||||
"photos",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{20},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: true,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("videos") == nil {
|
||||
h.AddChild(
|
||||
"videos",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{21, 22},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("highlights") == nil {
|
||||
h.AddChild(
|
||||
"highlights",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{9802},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("articles") == nil {
|
||||
h.AddChild(
|
||||
"articles",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{30023},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: true,
|
||||
createable: true,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("wiki") == nil {
|
||||
h.AddChild(
|
||||
"wiki",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{30818},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: true,
|
||||
createable: true,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
}
|
||||
130
nostrfs/root.go
130
nostrfs/root.go
@@ -1,130 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip05"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
AutoPublishNotesTimeout time.Duration
|
||||
AutoPublishArticlesTimeout time.Duration
|
||||
}
|
||||
|
||||
type NostrRoot struct {
|
||||
fs.Inode
|
||||
|
||||
ctx context.Context
|
||||
wd string
|
||||
sys *sdk.System
|
||||
rootPubKey nostr.PubKey
|
||||
signer nostr.Signer
|
||||
|
||||
opts Options
|
||||
}
|
||||
|
||||
var _ = (fs.NodeOnAdder)((*NostrRoot)(nil))
|
||||
|
||||
func NewNostrRoot(ctx context.Context, sys *sdk.System, user nostr.User, mountpoint string, o Options) *NostrRoot {
|
||||
pubkey, _ := user.GetPublicKey(ctx)
|
||||
abs, _ := filepath.Abs(mountpoint)
|
||||
|
||||
var signer nostr.Signer
|
||||
if user != nil {
|
||||
signer, _ = user.(nostr.Signer)
|
||||
}
|
||||
|
||||
return &NostrRoot{
|
||||
ctx: ctx,
|
||||
sys: sys,
|
||||
rootPubKey: pubkey,
|
||||
signer: signer,
|
||||
wd: abs,
|
||||
|
||||
opts: o,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *NostrRoot) OnAdd(_ context.Context) {
|
||||
if r.rootPubKey == nostr.ZeroPK {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
// add our contacts
|
||||
fl := r.sys.FetchFollowList(r.ctx, r.rootPubKey)
|
||||
for _, f := range fl.Items {
|
||||
pointer := nostr.ProfilePointer{PublicKey: f.Pubkey, Relays: []string{f.Relay}}
|
||||
r.AddChild(
|
||||
nip19.EncodeNpub(f.Pubkey),
|
||||
r.CreateNpubDir(r, pointer, nil),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
// add ourselves
|
||||
npub := nip19.EncodeNpub(r.rootPubKey)
|
||||
if r.GetChild(npub) == nil {
|
||||
pointer := nostr.ProfilePointer{PublicKey: r.rootPubKey}
|
||||
|
||||
r.AddChild(
|
||||
npub,
|
||||
r.CreateNpubDir(r, pointer, r.signer),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
// add a link to ourselves
|
||||
r.AddChild("@me", r.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{Data: []byte(r.wd + "/" + npub)},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *NostrRoot) Lookup(_ context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
|
||||
out.SetEntryTimeout(time.Minute * 5)
|
||||
|
||||
child := r.GetChild(name)
|
||||
if child != nil {
|
||||
return child, fs.OK
|
||||
}
|
||||
|
||||
if pp, err := nip05.QueryIdentifier(r.ctx, name); err == nil {
|
||||
return r.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{Data: []byte(r.wd + "/" + nip19.EncodePointer(*pp))},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), fs.OK
|
||||
}
|
||||
|
||||
pointer, err := nip19.ToPointer(name)
|
||||
if err != nil {
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
switch p := pointer.(type) {
|
||||
case nostr.ProfilePointer:
|
||||
npubdir := r.CreateNpubDir(r, p, nil)
|
||||
return npubdir, fs.OK
|
||||
case nostr.EventPointer:
|
||||
eventdir, err := r.FetchAndCreateEventDir(r, p)
|
||||
if err != nil {
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
return eventdir, fs.OK
|
||||
default:
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
}
|
||||
@@ -1,267 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"fiatjaf.com/lib/debouncer"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/fatih/color"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type ViewDir struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
fetched atomic.Bool
|
||||
filter nostr.Filter
|
||||
paginate bool
|
||||
relays []string
|
||||
replaceable bool
|
||||
createable bool
|
||||
publisher *debouncer.Debouncer
|
||||
publishing struct {
|
||||
note string
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpendirer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeGetattrer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeMkdirer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeSetattrer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeCreater)((*ViewDir)(nil))
|
||||
_ = (fs.NodeUnlinker)((*ViewDir)(nil))
|
||||
)
|
||||
|
||||
func (f *ViewDir) Setattr(_ context.Context, _ fs.FileHandle, _ *fuse.SetAttrIn, _ *fuse.AttrOut) syscall.Errno {
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (n *ViewDir) Create(
|
||||
_ context.Context,
|
||||
name string,
|
||||
flags uint32,
|
||||
mode uint32,
|
||||
out *fuse.EntryOut,
|
||||
) (node *fs.Inode, fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
if !n.createable || n.root.rootPubKey != n.filter.Authors[0] {
|
||||
return nil, nil, 0, syscall.EPERM
|
||||
}
|
||||
if n.publisher == nil {
|
||||
n.publisher = debouncer.New(n.root.opts.AutoPublishNotesTimeout)
|
||||
}
|
||||
if n.filter.Kinds[0] != 1 {
|
||||
return nil, nil, 0, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "new":
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
if n.publisher.IsRunning() {
|
||||
log("pending note updated, timer reset.")
|
||||
} else {
|
||||
log("new note detected")
|
||||
if n.root.opts.AutoPublishNotesTimeout.Hours() < 24*365 {
|
||||
log(", publishing it in %d seconds...\n", int(n.root.opts.AutoPublishNotesTimeout.Seconds()))
|
||||
} else {
|
||||
log(".\n")
|
||||
}
|
||||
log("- `touch publish` to publish immediately\n")
|
||||
log("- `rm new` to erase and cancel the publication.\n")
|
||||
}
|
||||
|
||||
n.publisher.Call(n.publishNote)
|
||||
|
||||
first := true
|
||||
|
||||
return n.NewPersistentInode(
|
||||
n.root.ctx,
|
||||
n.root.NewWriteableFile(n.publishing.note, uint64(nostr.Now()), uint64(nostr.Now()), func(s string) {
|
||||
if !first {
|
||||
log("pending note updated, timer reset.\n")
|
||||
}
|
||||
first = false
|
||||
n.publishing.note = strings.TrimSpace(s)
|
||||
n.publisher.Call(n.publishNote)
|
||||
}),
|
||||
fs.StableAttr{},
|
||||
), nil, 0, fs.OK
|
||||
case "publish":
|
||||
if n.publisher.IsRunning() {
|
||||
// this causes the publish process to be triggered faster
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
log("publishing now!\n")
|
||||
n.publisher.Flush()
|
||||
return nil, nil, 0, syscall.ENOTDIR
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, 0, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func (n *ViewDir) Unlink(ctx context.Context, name string) syscall.Errno {
|
||||
if !n.createable || n.root.rootPubKey != n.filter.Authors[0] {
|
||||
return syscall.EPERM
|
||||
}
|
||||
if n.publisher == nil {
|
||||
n.publisher = debouncer.New(n.root.opts.AutoPublishNotesTimeout)
|
||||
}
|
||||
if n.filter.Kinds[0] != 1 {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "new":
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
log("publishing canceled.\n")
|
||||
n.publisher.Stop()
|
||||
n.publishing.note = ""
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func (n *ViewDir) publishNote() {
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
log("publishing note...\n")
|
||||
evt := nostr.Event{
|
||||
Kind: 1,
|
||||
CreatedAt: nostr.Now(),
|
||||
Content: n.publishing.note,
|
||||
Tags: make(nostr.Tags, 0, 2),
|
||||
}
|
||||
|
||||
// our write relays
|
||||
relays := n.root.sys.FetchWriteRelays(n.root.ctx, n.root.rootPubKey)
|
||||
if len(relays) == 0 {
|
||||
relays = n.root.sys.FetchOutboxRelays(n.root.ctx, n.root.rootPubKey, 6)
|
||||
}
|
||||
|
||||
// massage and extract tags from raw text
|
||||
targetRelays := n.root.sys.PrepareNoteEvent(n.root.ctx, &evt)
|
||||
relays = nostr.AppendUnique(relays, targetRelays...)
|
||||
|
||||
// sign and publish
|
||||
if err := n.root.signer.SignEvent(n.root.ctx, &evt); err != nil {
|
||||
log("failed to sign: %s\n", err)
|
||||
return
|
||||
}
|
||||
log(evt.String() + "\n")
|
||||
|
||||
log("publishing to %d relays... ", len(relays))
|
||||
success := false
|
||||
first := true
|
||||
for res := range n.root.sys.Pool.PublishMany(n.root.ctx, relays, evt) {
|
||||
cleanUrl, _ := strings.CutPrefix(res.RelayURL, "wss://")
|
||||
if !first {
|
||||
log(", ")
|
||||
}
|
||||
first = false
|
||||
|
||||
if res.Error != nil {
|
||||
log("%s: %s", color.RedString(cleanUrl), res.Error)
|
||||
} else {
|
||||
success = true
|
||||
log("%s: ok", color.GreenString(cleanUrl))
|
||||
}
|
||||
}
|
||||
log("\n")
|
||||
|
||||
if success {
|
||||
n.RmChild("new")
|
||||
n.AddChild(evt.ID.Hex(), n.root.CreateEventDir(n, &evt), true)
|
||||
log("event published as %s and updated locally.\n", color.BlueString(evt.ID.Hex()))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ViewDir) Getattr(_ context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
now := nostr.Now()
|
||||
if n.filter.Until != 0 {
|
||||
now = n.filter.Until
|
||||
}
|
||||
aMonthAgo := now - 30*24*60*60
|
||||
out.Mtime = uint64(aMonthAgo)
|
||||
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (n *ViewDir) Opendir(ctx context.Context) syscall.Errno {
|
||||
if n.fetched.CompareAndSwap(true, true) {
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
if n.paginate {
|
||||
now := nostr.Now()
|
||||
if n.filter.Until != 0 {
|
||||
now = n.filter.Until
|
||||
}
|
||||
aMonthAgo := now - 30*24*60*60
|
||||
n.filter.Since = aMonthAgo
|
||||
|
||||
filter := n.filter
|
||||
filter.Until = aMonthAgo
|
||||
|
||||
n.AddChild("@previous", n.NewPersistentInode(
|
||||
n.root.ctx,
|
||||
&ViewDir{
|
||||
root: n.root,
|
||||
filter: filter,
|
||||
relays: n.relays,
|
||||
replaceable: n.replaceable,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
), true)
|
||||
}
|
||||
|
||||
if n.replaceable {
|
||||
for rkey, evt := range n.root.sys.Pool.FetchManyReplaceable(n.root.ctx, n.relays, n.filter, nostr.SubscriptionOptions{
|
||||
Label: "nakfs",
|
||||
}).Range {
|
||||
name := rkey.D
|
||||
if name == "" {
|
||||
name = "_"
|
||||
}
|
||||
if n.GetChild(name) == nil {
|
||||
n.AddChild(name, n.root.CreateEntityDir(n, &evt), true)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for ie := range n.root.sys.Pool.FetchMany(n.root.ctx, n.relays, n.filter,
|
||||
nostr.SubscriptionOptions{
|
||||
Label: "nakfs",
|
||||
}) {
|
||||
if n.GetChild(ie.Event.ID.Hex()) == nil {
|
||||
n.AddChild(ie.Event.ID.Hex(), n.root.CreateEventDir(n, &ie.Event), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (n *ViewDir) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
|
||||
if !n.createable || n.root.signer == nil || n.root.rootPubKey != n.filter.Authors[0] {
|
||||
return nil, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
if n.replaceable {
|
||||
// create a template event that can later be modified and published as new
|
||||
return n.root.CreateEntityDir(n, &nostr.Event{
|
||||
PubKey: n.root.rootPubKey,
|
||||
CreatedAt: 0,
|
||||
Kind: n.filter.Kinds[0],
|
||||
Tags: nostr.Tags{
|
||||
nostr.Tag{"d", name},
|
||||
},
|
||||
}), fs.OK
|
||||
}
|
||||
|
||||
return nil, syscall.ENOTSUP
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type WriteableFile struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
mu sync.Mutex
|
||||
data []byte
|
||||
attr fuse.Attr
|
||||
onWrite func(string)
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpener)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeReader)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeWriter)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeGetattrer)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeSetattrer)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeFlusher)((*WriteableFile)(nil))
|
||||
)
|
||||
|
||||
func (r *NostrRoot) NewWriteableFile(data string, ctime, mtime uint64, onWrite func(string)) *WriteableFile {
|
||||
return &WriteableFile{
|
||||
root: r,
|
||||
data: []byte(data),
|
||||
attr: fuse.Attr{
|
||||
Mode: 0666,
|
||||
Ctime: ctime,
|
||||
Mtime: mtime,
|
||||
Size: uint64(len(data)),
|
||||
},
|
||||
onWrite: onWrite,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Open(ctx context.Context, flags uint32) (fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
return nil, fuse.FOPEN_KEEP_CACHE, fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Write(ctx context.Context, fh fs.FileHandle, data []byte, off int64) (uint32, syscall.Errno) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
offset := int(off)
|
||||
end := offset + len(data)
|
||||
if len(f.data) < end {
|
||||
newData := make([]byte, offset+len(data))
|
||||
copy(newData, f.data)
|
||||
f.data = newData
|
||||
}
|
||||
copy(f.data[offset:], data)
|
||||
f.data = f.data[0:end]
|
||||
|
||||
f.onWrite(string(f.data))
|
||||
return uint32(len(data)), fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
out.Attr = f.attr
|
||||
out.Attr.Size = uint64(len(f.data))
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Setattr(_ context.Context, _ fs.FileHandle, in *fuse.SetAttrIn, _ *fuse.AttrOut) syscall.Errno {
|
||||
f.attr.Mtime = in.Mtime
|
||||
f.attr.Atime = in.Atime
|
||||
f.attr.Ctime = in.Ctime
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Flush(ctx context.Context, fh fs.FileHandle) syscall.Errno {
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Read(ctx context.Context, fh fs.FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
end := int(off) + len(dest)
|
||||
if end > len(f.data) {
|
||||
end = len(f.data)
|
||||
}
|
||||
return fuse.ReadResultData(f.data[off:end]), fs.OK
|
||||
}
|
||||
61
outbox.go
61
outbox.go
@@ -3,80 +3,21 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"fiatjaf.com/nostr/sdk/hints/bbolth"
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
hintsFilePath string
|
||||
hintsFileExists bool
|
||||
)
|
||||
|
||||
func initializeOutboxHintsDB(c *cli.Command, sys *sdk.System) error {
|
||||
configPath := c.String("config-path")
|
||||
if configPath != "" {
|
||||
hintsFilePath = filepath.Join(configPath, "outbox/hints.db")
|
||||
}
|
||||
if hintsFilePath != "" {
|
||||
if _, err := os.Stat(hintsFilePath); err == nil {
|
||||
hintsFileExists = true
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hintsFileExists && hintsFilePath != "" {
|
||||
hintsdb, err := bbolth.NewBoltHints(hintsFilePath)
|
||||
if err == nil {
|
||||
sys.Hints = hintsdb
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var outbox = &cli.Command{
|
||||
var outboxCmd = &cli.Command{
|
||||
Name: "outbox",
|
||||
Usage: "manage outbox relay hints database",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "init",
|
||||
Usage: "initialize the outbox hints database",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
if hintsFileExists {
|
||||
return nil
|
||||
}
|
||||
if hintsFilePath == "" {
|
||||
return fmt.Errorf("couldn't find a place to store the hints, pass --config-path to fix.")
|
||||
}
|
||||
|
||||
os.MkdirAll(hintsFilePath, 0755)
|
||||
_, err := bbolth.NewBoltHints(hintsFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create bolt hints db at '%s': %w", hintsFilePath, err)
|
||||
}
|
||||
|
||||
log("initialized hints database at %s\n", hintsFilePath)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "list outbox relays for a given pubkey",
|
||||
ArgsUsage: "<pubkey>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
if !hintsFileExists {
|
||||
log(color.YellowString("running with temporary fragile data.\n"))
|
||||
log(color.YellowString("call `nak outbox init` to setup persistence.\n"))
|
||||
}
|
||||
|
||||
if c.Args().Len() != 1 {
|
||||
return fmt.Errorf("expected exactly one argument (pubkey)")
|
||||
}
|
||||
|
||||
237
req.go
237
req.go
@@ -9,6 +9,7 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
@@ -77,11 +78,6 @@ example:
|
||||
Name: "paginate-interval",
|
||||
Usage: "time between queries when using --paginate",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "paginate-global-limit",
|
||||
Usage: "global limit at which --paginate should stop",
|
||||
DefaultText: "uses the value given by --limit/-l or infinite",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bare",
|
||||
Usage: "when printing the filter, print just the filter, not enveloped in a [\"REQ\", ...] array",
|
||||
@@ -96,6 +92,10 @@ example:
|
||||
Usage: "after connecting, for a nip42 \"AUTH\" message to be received, act on it and only then send the \"REQ\"",
|
||||
Category: CATEGORY_SIGNER,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "spell",
|
||||
Usage: "output a spell event (kind 777) instead of a filter",
|
||||
},
|
||||
)...,
|
||||
),
|
||||
ArgsUsage: "[relay...]",
|
||||
@@ -115,7 +115,16 @@ example:
|
||||
return fmt.Errorf("incompatible flags --paginate and --outbox")
|
||||
}
|
||||
|
||||
if c.Bool("bare") && c.Bool("spell") {
|
||||
return fmt.Errorf("incompatible flags --bare and --spell")
|
||||
}
|
||||
|
||||
relayUrls := c.Args().Slice()
|
||||
|
||||
if len(relayUrls) > 0 && (c.Bool("bare") || c.Bool("spell")) {
|
||||
return fmt.Errorf("relay URLs are incompatible with --bare or --spell")
|
||||
}
|
||||
|
||||
if len(relayUrls) > 0 && !negentropy {
|
||||
// this is used both for the normal AUTH (after "auth-required:" is received) or forced pre-auth
|
||||
// connect to all relays we expect to use in this call in parallel
|
||||
@@ -226,100 +235,29 @@ example:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var results chan nostr.RelayEvent
|
||||
opts := nostr.SubscriptionOptions{
|
||||
Label: "nak-req",
|
||||
}
|
||||
|
||||
if c.Bool("paginate") {
|
||||
paginator := sys.Pool.PaginatorWithInterval(c.Duration("paginate-interval"))
|
||||
results = paginator(ctx, relayUrls, filter, opts)
|
||||
} else if c.Bool("outbox") {
|
||||
defs := make([]nostr.DirectedFilter, 0, len(filter.Authors)*2)
|
||||
|
||||
// hardcoded relays, if any
|
||||
for _, relayUrl := range relayUrls {
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: relayUrl,
|
||||
})
|
||||
}
|
||||
|
||||
// relays for each pubkey
|
||||
errg := errgroup.Group{}
|
||||
errg.SetLimit(16)
|
||||
mu := sync.Mutex{}
|
||||
for _, pubkey := range filter.Authors {
|
||||
errg.Go(func() error {
|
||||
n := int(c.Uint("outbox-relays-per-pubkey"))
|
||||
for _, url := range sys.FetchOutboxRelays(ctx, pubkey, n) {
|
||||
if slices.Contains(relayUrls, url) {
|
||||
// already hardcoded, ignore
|
||||
continue
|
||||
}
|
||||
if !nostr.IsValidRelayURL(url) {
|
||||
continue
|
||||
}
|
||||
|
||||
matchUrl := func(def nostr.DirectedFilter) bool { return def.Relay == url }
|
||||
idx := slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// new relay, add it
|
||||
mu.Lock()
|
||||
// check again after locking to prevent races
|
||||
idx = slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// then add it
|
||||
filter := filter.Clone()
|
||||
filter.Authors = []nostr.PubKey{pubkey}
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: url,
|
||||
})
|
||||
mu.Unlock()
|
||||
continue // done with this relay url
|
||||
}
|
||||
|
||||
// otherwise we'll just use the idx
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// existing relay, add this pubkey
|
||||
defs[idx].Authors = append(defs[idx].Authors, pubkey)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
errg.Wait()
|
||||
|
||||
if c.Bool("stream") {
|
||||
results = sys.Pool.BatchedSubscribeMany(ctx, defs, opts)
|
||||
} else {
|
||||
results = sys.Pool.BatchedQueryMany(ctx, defs, opts)
|
||||
}
|
||||
} else {
|
||||
if c.Bool("stream") {
|
||||
results = sys.Pool.SubscribeMany(ctx, relayUrls, filter, opts)
|
||||
} else {
|
||||
results = sys.Pool.FetchMany(ctx, relayUrls, filter, opts)
|
||||
}
|
||||
}
|
||||
|
||||
for ie := range results {
|
||||
stdout(ie.Event)
|
||||
}
|
||||
performReq(ctx, filter, relayUrls, c.Bool("stream"), c.Bool("outbox"), c.Uint("outbox-relays-per-pubkey"), c.Bool("paginate"), c.Duration("paginate-interval"), "nak-req")
|
||||
}
|
||||
} else {
|
||||
// no relays given, will just print the filter
|
||||
// no relays given, will just print the filter or spell
|
||||
var result string
|
||||
if c.Bool("bare") {
|
||||
if c.Bool("spell") {
|
||||
// output a spell event instead of a filter
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
spellEvent := createSpellEvent(ctx, filter, kr)
|
||||
j, _ := json.Marshal(spellEvent)
|
||||
result = string(j)
|
||||
} else if c.Bool("bare") {
|
||||
// bare filter output
|
||||
result = filter.String()
|
||||
} else {
|
||||
// normal filter
|
||||
j, _ := json.Marshal(nostr.ReqEnvelope{SubscriptionID: "nak", Filters: []nostr.Filter{filter}})
|
||||
result = string(j)
|
||||
}
|
||||
|
||||
}
|
||||
stdout(result)
|
||||
}
|
||||
}
|
||||
@@ -329,6 +267,123 @@ example:
|
||||
},
|
||||
}
|
||||
|
||||
func performReq(
|
||||
ctx context.Context,
|
||||
filter nostr.Filter,
|
||||
relayUrls []string,
|
||||
stream bool,
|
||||
outbox bool,
|
||||
outboxRelaysPerPubKey uint64,
|
||||
paginate bool,
|
||||
paginateInterval time.Duration,
|
||||
label string,
|
||||
) {
|
||||
var results chan nostr.RelayEvent
|
||||
var closeds chan nostr.RelayClosed
|
||||
|
||||
opts := nostr.SubscriptionOptions{
|
||||
Label: label,
|
||||
}
|
||||
|
||||
if paginate {
|
||||
paginator := sys.Pool.PaginatorWithInterval(paginateInterval)
|
||||
results = paginator(ctx, relayUrls, filter, opts)
|
||||
} else if outbox {
|
||||
defs := make([]nostr.DirectedFilter, 0, len(filter.Authors)*2)
|
||||
|
||||
for _, relayUrl := range relayUrls {
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: relayUrl,
|
||||
})
|
||||
}
|
||||
|
||||
// relays for each pubkey
|
||||
errg := errgroup.Group{}
|
||||
errg.SetLimit(16)
|
||||
mu := sync.Mutex{}
|
||||
logverbose("gathering outbox relays for %d authors...\n", len(filter.Authors))
|
||||
for _, pubkey := range filter.Authors {
|
||||
errg.Go(func() error {
|
||||
n := int(outboxRelaysPerPubKey)
|
||||
for _, url := range sys.FetchOutboxRelays(ctx, pubkey, n) {
|
||||
if slices.Contains(relayUrls, url) {
|
||||
// already specified globally, ignore
|
||||
continue
|
||||
}
|
||||
if !nostr.IsValidRelayURL(url) {
|
||||
continue
|
||||
}
|
||||
|
||||
matchUrl := func(def nostr.DirectedFilter) bool { return def.Relay == url }
|
||||
idx := slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// new relay, add it
|
||||
mu.Lock()
|
||||
// check again after locking to prevent races
|
||||
idx = slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// then add it
|
||||
filter := filter.Clone()
|
||||
filter.Authors = []nostr.PubKey{pubkey}
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: url,
|
||||
})
|
||||
mu.Unlock()
|
||||
continue // done with this relay url
|
||||
}
|
||||
|
||||
// otherwise we'll just use the idx
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// existing relay, add this pubkey
|
||||
defs[idx].Authors = append(defs[idx].Authors, pubkey)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
errg.Wait()
|
||||
|
||||
if stream {
|
||||
logverbose("running subscription with %d directed filters...\n", len(defs))
|
||||
results, closeds = sys.Pool.BatchedSubscribeManyNotifyClosed(ctx, defs, opts)
|
||||
} else {
|
||||
logverbose("running query with %d directed filters...\n", len(defs))
|
||||
results, closeds = sys.Pool.BatchedQueryManyNotifyClosed(ctx, defs, opts)
|
||||
}
|
||||
} else {
|
||||
if stream {
|
||||
logverbose("running subscription to %d relays...\n", len(relayUrls))
|
||||
results, closeds = sys.Pool.SubscribeManyNotifyClosed(ctx, relayUrls, filter, opts)
|
||||
} else {
|
||||
logverbose("running query to %d relays...\n", len(relayUrls))
|
||||
results, closeds = sys.Pool.FetchManyNotifyClosed(ctx, relayUrls, filter, opts)
|
||||
}
|
||||
}
|
||||
|
||||
readevents:
|
||||
for {
|
||||
select {
|
||||
case ie, ok := <-results:
|
||||
if !ok {
|
||||
break readevents
|
||||
}
|
||||
stdout(ie.Event)
|
||||
case closed := <-closeds:
|
||||
if closed.HandledAuth {
|
||||
logverbose("%s CLOSED: %s\n", closed.Relay.URL, closed.Reason)
|
||||
} else {
|
||||
log("%s CLOSED: %s\n", closed.Relay.URL, closed.Reason)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break readevents
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reqFilterFlags = []cli.Flag{
|
||||
&PubKeySliceFlag{
|
||||
Name: "author",
|
||||
|
||||
17
serve.go
17
serve.go
@@ -51,6 +51,12 @@ var serve = &cli.Command{
|
||||
Name: "grasp",
|
||||
Usage: "enable grasp server",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grasp-path",
|
||||
Usage: "where to store the repositories",
|
||||
TakesFile: true,
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "blossom",
|
||||
Usage: "enable blossom server",
|
||||
@@ -135,10 +141,13 @@ var serve = &cli.Command{
|
||||
}
|
||||
|
||||
if c.Bool("grasp") {
|
||||
var err error
|
||||
repoDir, err = os.MkdirTemp("", "nak-serve-grasp-repos-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create grasp repos directory: %w", err)
|
||||
repoDir = c.String("grasp-path")
|
||||
if repoDir == "" {
|
||||
var err error
|
||||
repoDir, err = os.MkdirTemp("", "nak-serve-grasp-repos-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create grasp repos directory: %w", err)
|
||||
}
|
||||
}
|
||||
g := grasp.New(rl, repoDir)
|
||||
g.OnRead = func(ctx context.Context, pubkey nostr.PubKey, repo string) (reject bool, reason string) {
|
||||
|
||||
547
spell.go
Normal file
547
spell.go
Normal file
@@ -0,0 +1,547 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/sdk/hints"
|
||||
"github.com/fatih/color"
|
||||
"github.com/markusmobius/go-dateparser"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var spell = &cli.Command{
|
||||
Name: "spell",
|
||||
Usage: "downloads a spell event and executes its REQ request",
|
||||
ArgsUsage: "[nevent_code]",
|
||||
Description: `fetches a spell event (kind 777) and executes REQ command encoded in its tags.`,
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.StringFlag{
|
||||
Name: "pub",
|
||||
Usage: "public key to run spells in the context of (if you don't want to pass a --sec)",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "outbox-relays-per-pubkey",
|
||||
Aliases: []string{"n"},
|
||||
Usage: "number of outbox relays to use for each pubkey",
|
||||
Value: 3,
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
configPath := c.String("config-path")
|
||||
os.MkdirAll(filepath.Join(configPath, "spells"), 0755)
|
||||
|
||||
// load history from file
|
||||
var history []SpellHistoryEntry
|
||||
historyPath := filepath.Join(configPath, "spells/history")
|
||||
file, err := os.Open(historyPath)
|
||||
if err == nil {
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
var entry SpellHistoryEntry
|
||||
if err := json.Unmarshal([]byte(scanner.Text()), &entry); err != nil {
|
||||
continue // skip invalid entries
|
||||
}
|
||||
history = append(history, entry)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Args().Len() == 0 {
|
||||
// check if we have input from stdin
|
||||
for stdinEvent := range getJsonsOrBlank() {
|
||||
if stdinEvent == "{}" {
|
||||
break
|
||||
}
|
||||
|
||||
var spell nostr.Event
|
||||
if err := json.Unmarshal([]byte(stdinEvent), &spell); err != nil {
|
||||
return fmt.Errorf("failed to parse spell event from stdin: %w", err)
|
||||
}
|
||||
if spell.Kind != 777 {
|
||||
return fmt.Errorf("event is not a spell (expected kind 777, got %d)", spell.Kind)
|
||||
}
|
||||
|
||||
return runSpell(ctx, c, historyPath, history, nostr.EventPointer{ID: spell.ID}, spell)
|
||||
}
|
||||
|
||||
// no stdin input, show recent spells
|
||||
log("recent spells:\n")
|
||||
for i, entry := range history {
|
||||
if i >= 10 {
|
||||
break
|
||||
}
|
||||
|
||||
displayName := entry.Name
|
||||
if displayName == "" {
|
||||
displayName = entry.Content
|
||||
if len(displayName) > 28 {
|
||||
displayName = displayName[:27] + "…"
|
||||
}
|
||||
}
|
||||
if displayName != "" {
|
||||
displayName = color.HiMagentaString(displayName) + ": "
|
||||
}
|
||||
|
||||
desc := entry.Content
|
||||
if len(desc) > 50 {
|
||||
desc = desc[0:49] + "…"
|
||||
}
|
||||
|
||||
lastUsed := entry.LastUsed.Format("2006-01-02 15:04")
|
||||
stdout(fmt.Sprintf(" %s %s%s - %s",
|
||||
color.BlueString(entry.Identifier),
|
||||
displayName,
|
||||
color.YellowString(lastUsed),
|
||||
desc,
|
||||
))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode nevent to get the spell event
|
||||
var pointer nostr.EventPointer
|
||||
identifier := c.Args().First()
|
||||
prefix, value, err := nip19.Decode(identifier)
|
||||
if err == nil {
|
||||
if prefix != "nevent" {
|
||||
return fmt.Errorf("expected nevent code, got %s", prefix)
|
||||
}
|
||||
pointer = value.(nostr.EventPointer)
|
||||
} else {
|
||||
// search our history
|
||||
for _, entry := range history {
|
||||
if entry.Identifier == identifier || entry.Name == identifier {
|
||||
pointer = entry.Pointer
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pointer.ID == nostr.ZeroID {
|
||||
return fmt.Errorf("invalid spell reference")
|
||||
}
|
||||
|
||||
// first try to fetch spell from sys.Store
|
||||
var spell nostr.Event
|
||||
found := false
|
||||
for evt := range sys.Store.QueryEvents(nostr.Filter{IDs: []nostr.ID{pointer.ID}}, 1) {
|
||||
spell = evt
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
var relays []string
|
||||
if !found {
|
||||
// if not found in store, fetch from external relays
|
||||
relays = pointer.Relays
|
||||
if pointer.Author != nostr.ZeroPK {
|
||||
for _, url := range relays {
|
||||
sys.Hints.Save(pointer.Author, nostr.NormalizeURL(url), hints.LastInHint, nostr.Now())
|
||||
}
|
||||
relays = append(relays, sys.FetchOutboxRelays(ctx, pointer.Author, 3)...)
|
||||
}
|
||||
result := sys.Pool.QuerySingle(ctx, relays, nostr.Filter{IDs: []nostr.ID{pointer.ID}},
|
||||
nostr.SubscriptionOptions{Label: "nak-spell-f"})
|
||||
if result == nil {
|
||||
return fmt.Errorf("spell event not found")
|
||||
}
|
||||
spell = result.Event
|
||||
}
|
||||
if spell.Kind != 777 {
|
||||
return fmt.Errorf("event is not a spell (expected kind 777, got %d)", spell.Kind)
|
||||
}
|
||||
|
||||
return runSpell(ctx, c, historyPath, history, pointer, spell)
|
||||
},
|
||||
}
|
||||
|
||||
func runSpell(
|
||||
ctx context.Context,
|
||||
c *cli.Command,
|
||||
historyPath string,
|
||||
history []SpellHistoryEntry,
|
||||
pointer nostr.EventPointer,
|
||||
spell nostr.Event,
|
||||
) error {
|
||||
// parse spell tags to build REQ filter
|
||||
spellFilter, err := buildSpellReq(ctx, c, spell.Tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse spell tags: %w", err)
|
||||
}
|
||||
|
||||
// determine relays to query
|
||||
var spellRelays []string
|
||||
var outbox bool
|
||||
relaysTag := spell.Tags.Find("relays")
|
||||
if relaysTag == nil {
|
||||
// if this tag doesn't exist assume $outbox
|
||||
relaysTag = nostr.Tag{"relays", "$outbox"}
|
||||
}
|
||||
for i := 1; i < len(relaysTag); i++ {
|
||||
switch relaysTag[i] {
|
||||
case "$outbox":
|
||||
outbox = true
|
||||
default:
|
||||
spellRelays = append(spellRelays, relaysTag[i])
|
||||
}
|
||||
}
|
||||
|
||||
stream := !spell.Tags.Has("close-on-eose")
|
||||
|
||||
// fill in the author if we didn't have it
|
||||
pointer.Author = spell.PubKey
|
||||
|
||||
// save spell to sys.Store
|
||||
if err := sys.Store.SaveEvent(spell); err != nil {
|
||||
logverbose("failed to save spell to store: %v\n", err)
|
||||
}
|
||||
|
||||
// add to history before execution
|
||||
{
|
||||
idStr := nip19.EncodeNevent(spell.ID, nil, nostr.ZeroPK)
|
||||
identifier := "spell" + idStr[len(idStr)-7:]
|
||||
nameTag := spell.Tags.Find("name")
|
||||
var name string
|
||||
if nameTag != nil {
|
||||
name = nameTag[1]
|
||||
}
|
||||
if len(history) > 100 {
|
||||
history = history[:100]
|
||||
}
|
||||
// write back to file
|
||||
file, err := os.Create(historyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, _ := json.Marshal(SpellHistoryEntry{
|
||||
Identifier: identifier,
|
||||
Name: name,
|
||||
Content: spell.Content,
|
||||
LastUsed: time.Now(),
|
||||
Pointer: pointer,
|
||||
})
|
||||
file.Write(data)
|
||||
file.Write([]byte{'\n'})
|
||||
for i, entry := range history {
|
||||
if entry.Identifier == identifier {
|
||||
continue
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(entry)
|
||||
file.Write(data)
|
||||
file.Write([]byte{'\n'})
|
||||
|
||||
// limit history size (keep last 100)
|
||||
if i == 100 {
|
||||
break
|
||||
}
|
||||
}
|
||||
file.Close()
|
||||
|
||||
logverbose("executing %s: %s relays=%v outbox=%v stream=%v\n",
|
||||
identifier, spellFilter, spellRelays, outbox, stream)
|
||||
}
|
||||
|
||||
// execute
|
||||
logSpellDetails(spell)
|
||||
performReq(ctx, spellFilter, spellRelays, stream, outbox, c.Uint("outbox-relays-per-pubkey"), false, 0, "nak-spell")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildSpellReq(ctx context.Context, c *cli.Command, tags nostr.Tags) (nostr.Filter, error) {
|
||||
filter := nostr.Filter{}
|
||||
|
||||
getMe := func() (nostr.PubKey, error) {
|
||||
if !c.IsSet("sec") && !c.IsSet("prompt-sec") && c.IsSet("pub") {
|
||||
return parsePubKey(c.String("pub"))
|
||||
}
|
||||
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return nostr.ZeroPK, fmt.Errorf("failed to get keyer: %w", err)
|
||||
}
|
||||
|
||||
pubkey, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return nostr.ZeroPK, fmt.Errorf("failed to get public key from keyer: %w", err)
|
||||
}
|
||||
|
||||
return pubkey, nil
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
if len(tag) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch tag[0] {
|
||||
case "cmd":
|
||||
if len(tag) < 2 || tag[1] != "REQ" {
|
||||
return nostr.Filter{}, fmt.Errorf("only REQ commands are supported")
|
||||
}
|
||||
|
||||
case "k":
|
||||
for i := 1; i < len(tag); i++ {
|
||||
if kind, err := strconv.Atoi(tag[i]); err == nil {
|
||||
filter.Kinds = append(filter.Kinds, nostr.Kind(kind))
|
||||
}
|
||||
}
|
||||
|
||||
case "authors":
|
||||
for i := 1; i < len(tag); i++ {
|
||||
switch tag[i] {
|
||||
case "$me":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
filter.Authors = append(filter.Authors, me)
|
||||
case "$contacts":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
for _, f := range sys.FetchFollowList(ctx, me).Items {
|
||||
filter.Authors = append(filter.Authors, f.Pubkey)
|
||||
}
|
||||
default:
|
||||
pubkey, err := nostr.PubKeyFromHex(tag[i])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid pubkey '%s' in 'authors': %w", tag[i], err)
|
||||
}
|
||||
filter.Authors = append(filter.Authors, pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
case "ids":
|
||||
for i := 1; i < len(tag); i++ {
|
||||
id, err := nostr.IDFromHex(tag[i])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid id '%s' in 'authors': %w", tag[i], err)
|
||||
}
|
||||
filter.IDs = append(filter.IDs, id)
|
||||
}
|
||||
|
||||
case "tag":
|
||||
if len(tag) < 3 {
|
||||
continue
|
||||
}
|
||||
tagName := tag[1]
|
||||
if filter.Tags == nil {
|
||||
filter.Tags = make(nostr.TagMap)
|
||||
}
|
||||
for i := 2; i < len(tag); i++ {
|
||||
switch tag[i] {
|
||||
case "$me":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
filter.Tags[tagName] = append(filter.Tags[tagName], me.Hex())
|
||||
case "$contacts":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
for _, f := range sys.FetchFollowList(ctx, me).Items {
|
||||
filter.Tags[tagName] = append(filter.Tags[tagName], f.Pubkey.Hex())
|
||||
}
|
||||
default:
|
||||
filter.Tags[tagName] = append(filter.Tags[tagName], tag[i])
|
||||
}
|
||||
}
|
||||
|
||||
case "limit":
|
||||
if len(tag) >= 2 {
|
||||
if limit, err := strconv.Atoi(tag[1]); err == nil {
|
||||
filter.Limit = limit
|
||||
}
|
||||
}
|
||||
|
||||
case "since":
|
||||
if len(tag) >= 2 {
|
||||
date, err := dateparser.Parse(&dateparser.Configuration{
|
||||
DefaultTimezone: time.Local,
|
||||
CurrentTime: time.Now(),
|
||||
}, tag[1])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid date %s: %w", tag[1], err)
|
||||
}
|
||||
filter.Since = nostr.Timestamp(date.Time.Unix())
|
||||
}
|
||||
|
||||
case "until":
|
||||
if len(tag) >= 2 {
|
||||
date, err := dateparser.Parse(&dateparser.Configuration{
|
||||
DefaultTimezone: time.Local,
|
||||
CurrentTime: time.Now(),
|
||||
}, tag[1])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid date %s: %w", tag[1], err)
|
||||
}
|
||||
filter.Until = nostr.Timestamp(date.Time.Unix())
|
||||
}
|
||||
|
||||
case "search":
|
||||
if len(tag) >= 2 {
|
||||
filter.Search = tag[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filter, nil
|
||||
}
|
||||
|
||||
func parseRelativeTime(timeStr string) (nostr.Timestamp, error) {
|
||||
// Handle special cases
|
||||
switch timeStr {
|
||||
case "now":
|
||||
return nostr.Now(), nil
|
||||
}
|
||||
|
||||
// Try to parse as relative time (e.g., "7d", "1h", "30m")
|
||||
if strings.HasSuffix(timeStr, "d") {
|
||||
days := strings.TrimSuffix(timeStr, "d")
|
||||
if daysInt, err := strconv.Atoi(days); err == nil {
|
||||
return nostr.Now() - nostr.Timestamp(daysInt*24*60*60), nil
|
||||
}
|
||||
} else if strings.HasSuffix(timeStr, "h") {
|
||||
hours := strings.TrimSuffix(timeStr, "h")
|
||||
if hoursInt, err := strconv.Atoi(hours); err == nil {
|
||||
return nostr.Now() - nostr.Timestamp(hoursInt*60*60), nil
|
||||
}
|
||||
} else if strings.HasSuffix(timeStr, "m") {
|
||||
minutes := strings.TrimSuffix(timeStr, "m")
|
||||
if minutesInt, err := strconv.Atoi(minutes); err == nil {
|
||||
return nostr.Now() - nostr.Timestamp(minutesInt*60), nil
|
||||
}
|
||||
}
|
||||
|
||||
// try to parse as direct timestamp
|
||||
if ts, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
|
||||
return nostr.Timestamp(ts), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("invalid time format: %s", timeStr)
|
||||
}
|
||||
|
||||
type SpellHistoryEntry struct {
|
||||
Identifier string `json:"_id"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
LastUsed time.Time `json:"last_used"`
|
||||
Pointer nostr.EventPointer `json:"pointer"`
|
||||
}
|
||||
|
||||
func logSpellDetails(spell nostr.Event) {
|
||||
nameTag := spell.Tags.Find("name")
|
||||
name := ""
|
||||
if nameTag != nil {
|
||||
name = nameTag[1]
|
||||
if len(name) > 28 {
|
||||
name = name[:27] + "…"
|
||||
}
|
||||
}
|
||||
if name != "" {
|
||||
name = ": " + color.HiMagentaString(name)
|
||||
}
|
||||
|
||||
desc := spell.Content
|
||||
if len(desc) > 50 {
|
||||
desc = desc[0:49] + "…"
|
||||
}
|
||||
|
||||
idStr := nip19.EncodeNevent(spell.ID, nil, nostr.ZeroPK)
|
||||
identifier := "spell" + idStr[len(idStr)-7:]
|
||||
|
||||
log("running %s%s - %s\n",
|
||||
color.BlueString(identifier),
|
||||
name,
|
||||
desc,
|
||||
)
|
||||
}
|
||||
|
||||
func createSpellEvent(ctx context.Context, filter nostr.Filter, kr nostr.Keyer) nostr.Event {
|
||||
spell := nostr.Event{
|
||||
Kind: 777,
|
||||
Tags: make(nostr.Tags, 0),
|
||||
}
|
||||
|
||||
// add cmd tag
|
||||
spell.Tags = append(spell.Tags, nostr.Tag{"cmd", "REQ"})
|
||||
|
||||
// add kinds
|
||||
if len(filter.Kinds) > 0 {
|
||||
kindTag := nostr.Tag{"k"}
|
||||
for _, kind := range filter.Kinds {
|
||||
kindTag = append(kindTag, strconv.Itoa(int(kind)))
|
||||
}
|
||||
spell.Tags = append(spell.Tags, kindTag)
|
||||
}
|
||||
|
||||
// add authors
|
||||
if len(filter.Authors) > 0 {
|
||||
authorsTag := nostr.Tag{"authors"}
|
||||
for _, author := range filter.Authors {
|
||||
authorsTag = append(authorsTag, author.Hex())
|
||||
}
|
||||
spell.Tags = append(spell.Tags, authorsTag)
|
||||
}
|
||||
|
||||
// add ids
|
||||
if len(filter.IDs) > 0 {
|
||||
idsTag := nostr.Tag{"ids"}
|
||||
for _, id := range filter.IDs {
|
||||
idsTag = append(idsTag, id.Hex())
|
||||
}
|
||||
spell.Tags = append(spell.Tags, idsTag)
|
||||
}
|
||||
|
||||
// add tags
|
||||
for tagName, values := range filter.Tags {
|
||||
if len(values) > 0 {
|
||||
tag := nostr.Tag{"tag", tagName}
|
||||
for _, value := range values {
|
||||
tag = append(tag, value)
|
||||
}
|
||||
spell.Tags = append(spell.Tags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
// add limit
|
||||
if filter.Limit > 0 {
|
||||
spell.Tags = append(spell.Tags, nostr.Tag{"limit", strconv.Itoa(filter.Limit)})
|
||||
}
|
||||
|
||||
// add since
|
||||
if filter.Since > 0 {
|
||||
spell.Tags = append(spell.Tags, nostr.Tag{"since", strconv.FormatInt(int64(filter.Since), 10)})
|
||||
}
|
||||
|
||||
// add until
|
||||
if filter.Until > 0 {
|
||||
spell.Tags = append(spell.Tags, nostr.Tag{"until", strconv.FormatInt(int64(filter.Until), 10)})
|
||||
}
|
||||
|
||||
// add search
|
||||
if filter.Search != "" {
|
||||
spell.Tags = append(spell.Tags, nostr.Tag{"search", filter.Search})
|
||||
}
|
||||
|
||||
if err := kr.SignEvent(ctx, &spell); err != nil {
|
||||
log("failed to sign spell: %s\n", err)
|
||||
}
|
||||
|
||||
return spell
|
||||
}
|
||||
464
sync.go
Normal file
464
sync.go
Normal file
@@ -0,0 +1,464 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip77"
|
||||
"fiatjaf.com/nostr/nip77/negentropy"
|
||||
"fiatjaf.com/nostr/nip77/negentropy/storage"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var syncCmd = &cli.Command{
|
||||
Name: "sync",
|
||||
Usage: "sync events between two relays using negentropy",
|
||||
Description: `uses nip77 negentropy to sync events between two relays`,
|
||||
ArgsUsage: "<relay1> <relay2>",
|
||||
Flags: reqFilterFlags,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
args := c.Args().Slice()
|
||||
if len(args) != 2 {
|
||||
return fmt.Errorf("need exactly two relay URLs: source and target")
|
||||
}
|
||||
|
||||
filter := nostr.Filter{}
|
||||
if err := applyFlagsToFilter(c, &filter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peerA, err := NewRelayThirdPartyRemote(ctx, args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("error setting up %s: %w", args[0], err)
|
||||
}
|
||||
|
||||
peerB, err := NewRelayThirdPartyRemote(ctx, args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("error setting up %s: %w", args[1], err)
|
||||
}
|
||||
|
||||
tpn := NewThirdPartyNegentropy(
|
||||
peerA,
|
||||
peerB,
|
||||
filter,
|
||||
)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
wg.Go(func() {
|
||||
err = tpn.Run(ctx)
|
||||
})
|
||||
|
||||
wg.Go(func() {
|
||||
type op struct {
|
||||
src *nostr.Relay
|
||||
dst *nostr.Relay
|
||||
ids []nostr.ID
|
||||
}
|
||||
|
||||
pending := []op{
|
||||
{peerA.relay, peerB.relay, make([]nostr.ID, 0, 30)},
|
||||
{peerB.relay, peerA.relay, make([]nostr.ID, 0, 30)},
|
||||
}
|
||||
|
||||
for delta := range tpn.Deltas {
|
||||
have := delta.Have.relay
|
||||
havenot := delta.HaveNot.relay
|
||||
logverbose("%s has %s, %s doesn't.\n", have.URL, delta.ID.Hex(), havenot.URL)
|
||||
|
||||
idx := 0 // peerA
|
||||
if have == peerB.relay {
|
||||
idx = 1 // peerB
|
||||
}
|
||||
pending[idx].ids = append(pending[idx].ids, delta.ID)
|
||||
|
||||
// every 30 ids do a fetch-and-publish
|
||||
if len(pending[idx].ids) == 30 {
|
||||
for evt := range pending[idx].src.QueryEvents(nostr.Filter{IDs: pending[idx].ids}) {
|
||||
pending[idx].dst.Publish(ctx, evt)
|
||||
}
|
||||
pending[idx].ids = pending[idx].ids[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// do it for the remaining ids
|
||||
for _, op := range pending {
|
||||
if len(op.ids) > 0 {
|
||||
for evt := range op.src.QueryEvents(nostr.Filter{IDs: op.ids}) {
|
||||
op.dst.Publish(ctx, evt)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
type ThirdPartyNegentropy struct {
|
||||
PeerA *RelayThirdPartyRemote
|
||||
PeerB *RelayThirdPartyRemote
|
||||
Filter nostr.Filter
|
||||
|
||||
Deltas chan Delta
|
||||
}
|
||||
|
||||
type Delta struct {
|
||||
ID nostr.ID
|
||||
Have *RelayThirdPartyRemote
|
||||
HaveNot *RelayThirdPartyRemote
|
||||
}
|
||||
|
||||
type boundKey string
|
||||
|
||||
func getBoundKey(b negentropy.Bound) boundKey {
|
||||
return boundKey(fmt.Sprintf("%d:%x", b.Timestamp, b.IDPrefix))
|
||||
}
|
||||
|
||||
type RelayThirdPartyRemote struct {
|
||||
relay *nostr.Relay
|
||||
messages chan string
|
||||
err error
|
||||
}
|
||||
|
||||
func NewRelayThirdPartyRemote(ctx context.Context, url string) (*RelayThirdPartyRemote, error) {
|
||||
rtpr := &RelayThirdPartyRemote{
|
||||
messages: make(chan string, 3),
|
||||
}
|
||||
|
||||
var err error
|
||||
rtpr.relay, err = nostr.RelayConnect(ctx, url, nostr.RelayOptions{
|
||||
CustomHandler: func(data string) {
|
||||
envelope := nip77.ParseNegMessage(data)
|
||||
if envelope == nil {
|
||||
return
|
||||
}
|
||||
switch env := envelope.(type) {
|
||||
case *nip77.OpenEnvelope, *nip77.CloseEnvelope:
|
||||
rtpr.err = fmt.Errorf("unexpected %s received from relay", env.Label())
|
||||
return
|
||||
case *nip77.ErrorEnvelope:
|
||||
rtpr.err = fmt.Errorf("relay returned a %s: %s", env.Label(), env.Reason)
|
||||
return
|
||||
case *nip77.MessageEnvelope:
|
||||
rtpr.messages <- env.Message
|
||||
}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rtpr, nil
|
||||
}
|
||||
|
||||
func (rtpr *RelayThirdPartyRemote) SendInitialMessage(filter nostr.Filter, msg string) error {
|
||||
msgj, _ := json.Marshal(nip77.OpenEnvelope{
|
||||
SubscriptionID: "sync3",
|
||||
Filter: filter,
|
||||
Message: msg,
|
||||
})
|
||||
return rtpr.relay.WriteWithError(msgj)
|
||||
}
|
||||
|
||||
func (rtpr *RelayThirdPartyRemote) SendMessage(msg string) error {
|
||||
msgj, _ := json.Marshal(nip77.MessageEnvelope{
|
||||
SubscriptionID: "sync3",
|
||||
Message: msg,
|
||||
})
|
||||
return rtpr.relay.WriteWithError(msgj)
|
||||
}
|
||||
|
||||
func (rtpr *RelayThirdPartyRemote) SendClose() error {
|
||||
msgj, _ := json.Marshal(nip77.CloseEnvelope{
|
||||
SubscriptionID: "sync3",
|
||||
})
|
||||
return rtpr.relay.WriteWithError(msgj)
|
||||
}
|
||||
|
||||
var thirdPartyRemoteEndOfMessages = errors.New("the-end")
|
||||
|
||||
func (rtpr *RelayThirdPartyRemote) Receive() (string, error) {
|
||||
if rtpr.err != nil {
|
||||
return "", rtpr.err
|
||||
}
|
||||
if msg, ok := <-rtpr.messages; ok {
|
||||
return msg, nil
|
||||
}
|
||||
return "", thirdPartyRemoteEndOfMessages
|
||||
}
|
||||
|
||||
func NewThirdPartyNegentropy(peerA, peerB *RelayThirdPartyRemote, filter nostr.Filter) *ThirdPartyNegentropy {
|
||||
return &ThirdPartyNegentropy{
|
||||
PeerA: peerA,
|
||||
PeerB: peerB,
|
||||
Filter: filter,
|
||||
Deltas: make(chan Delta, 100),
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ThirdPartyNegentropy) Run(ctx context.Context) error {
|
||||
peerAIds := make(map[nostr.ID]struct{})
|
||||
peerBIds := make(map[nostr.ID]struct{})
|
||||
peerASkippedBounds := make(map[boundKey]struct{})
|
||||
peerBSkippedBounds := make(map[boundKey]struct{})
|
||||
|
||||
// send an empty message to A to start things up
|
||||
initialMsg := createInitialMessage()
|
||||
err := n.PeerA.SendInitialMessage(n.Filter, initialMsg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasSentInitialMessageToB := false
|
||||
|
||||
for {
|
||||
// receive message from A
|
||||
msgA, err := n.PeerA.Receive()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgAb, _ := nostr.HexDecodeString(msgA)
|
||||
if len(msgAb) == 1 {
|
||||
break
|
||||
}
|
||||
|
||||
msgToB, err := parseMessageBuildNext(
|
||||
msgA,
|
||||
peerBSkippedBounds,
|
||||
func(id nostr.ID) {
|
||||
if _, exists := peerBIds[id]; exists {
|
||||
delete(peerBIds, id)
|
||||
} else {
|
||||
peerAIds[id] = struct{}{}
|
||||
}
|
||||
},
|
||||
func(boundKey boundKey) {
|
||||
peerASkippedBounds[boundKey] = struct{}{}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// emit deltas from B after receiving message from A
|
||||
for id := range peerBIds {
|
||||
select {
|
||||
case n.Deltas <- Delta{ID: id, Have: n.PeerB, HaveNot: n.PeerA}:
|
||||
case <-ctx.Done():
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
delete(peerBIds, id)
|
||||
}
|
||||
|
||||
if len(msgToB) == 2 {
|
||||
// exit condition (no more messages to send)
|
||||
break
|
||||
}
|
||||
|
||||
// send message to B
|
||||
if hasSentInitialMessageToB {
|
||||
err = n.PeerB.SendMessage(msgToB)
|
||||
} else {
|
||||
err = n.PeerB.SendInitialMessage(n.Filter, msgToB)
|
||||
hasSentInitialMessageToB = true
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// receive message from B
|
||||
msgB, err := n.PeerB.Receive()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgBb, _ := nostr.HexDecodeString(msgB)
|
||||
if len(msgBb) == 1 {
|
||||
break
|
||||
}
|
||||
|
||||
msgToA, err := parseMessageBuildNext(
|
||||
msgB,
|
||||
peerASkippedBounds,
|
||||
func(id nostr.ID) {
|
||||
if _, exists := peerAIds[id]; exists {
|
||||
delete(peerAIds, id)
|
||||
} else {
|
||||
peerBIds[id] = struct{}{}
|
||||
}
|
||||
},
|
||||
func(boundKey boundKey) {
|
||||
peerBSkippedBounds[boundKey] = struct{}{}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// emit deltas from A after receiving message from B
|
||||
for id := range peerAIds {
|
||||
select {
|
||||
case n.Deltas <- Delta{ID: id, Have: n.PeerA, HaveNot: n.PeerB}:
|
||||
case <-ctx.Done():
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
delete(peerAIds, id)
|
||||
}
|
||||
|
||||
if len(msgToA) == 2 {
|
||||
// exit condition (no more messages to send)
|
||||
break
|
||||
}
|
||||
|
||||
// send message to A
|
||||
err = n.PeerA.SendMessage(msgToA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// emit remaining deltas before exit
|
||||
for id := range peerAIds {
|
||||
select {
|
||||
case n.Deltas <- Delta{ID: id, Have: n.PeerA, HaveNot: n.PeerB}:
|
||||
case <-ctx.Done():
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
for id := range peerBIds {
|
||||
select {
|
||||
case n.Deltas <- Delta{ID: id, Have: n.PeerB, HaveNot: n.PeerA}:
|
||||
case <-ctx.Done():
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
n.PeerA.SendClose()
|
||||
n.PeerB.SendClose()
|
||||
close(n.Deltas)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createInitialMessage() string {
|
||||
output := bytes.NewBuffer(make([]byte, 0, 64))
|
||||
output.WriteByte(negentropy.ProtocolVersion)
|
||||
|
||||
dummy := negentropy.BoundWriter{}
|
||||
dummy.WriteBound(output, negentropy.InfiniteBound)
|
||||
output.WriteByte(byte(negentropy.FingerprintMode))
|
||||
|
||||
// hardcoded random fingerprint
|
||||
fingerprint := [negentropy.FingerprintSize]byte{
|
||||
0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
|
||||
0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
|
||||
}
|
||||
output.Write(fingerprint[:])
|
||||
|
||||
return nostr.HexEncodeToString(output.Bytes())
|
||||
}
|
||||
|
||||
func parseMessageBuildNext(
|
||||
msg string,
|
||||
skippedBounds map[boundKey]struct{},
|
||||
idCallback func(id nostr.ID),
|
||||
skipCallback func(boundKey boundKey),
|
||||
) (string, error) {
|
||||
msgb, err := nostr.HexDecodeString(msg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
br := &negentropy.BoundReader{}
|
||||
bw := &negentropy.BoundWriter{}
|
||||
|
||||
nextMsg := bytes.NewBuffer(make([]byte, 0, len(msgb)))
|
||||
acc := &storage.Accumulator{} // this will be used for building our own fingerprints and also as a placeholder
|
||||
|
||||
reader := bytes.NewReader(msgb)
|
||||
pv, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if pv != negentropy.ProtocolVersion {
|
||||
return "", fmt.Errorf("unsupported protocol version %v", pv)
|
||||
}
|
||||
|
||||
nextMsg.WriteByte(pv)
|
||||
|
||||
for reader.Len() > 0 {
|
||||
bound, err := br.ReadBound(reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
modeVal, err := negentropy.ReadVarInt(reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
mode := negentropy.Mode(modeVal)
|
||||
|
||||
switch mode {
|
||||
case negentropy.SkipMode:
|
||||
skipCallback(getBoundKey(bound))
|
||||
if _, skipped := skippedBounds[getBoundKey(bound)]; !skipped {
|
||||
bw.WriteBound(nextMsg, bound)
|
||||
negentropy.WriteVarInt(nextMsg, int(negentropy.SkipMode))
|
||||
}
|
||||
|
||||
case negentropy.FingerprintMode:
|
||||
_, err = reader.Read(acc.Buf[0:negentropy.FingerprintSize] /* use this buffer as a dummy */)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, skipped := skippedBounds[getBoundKey(bound)]; !skipped {
|
||||
bw.WriteBound(nextMsg, bound)
|
||||
negentropy.WriteVarInt(nextMsg, int(negentropy.FingerprintMode))
|
||||
nextMsg.Write(acc.Buf[0:negentropy.FingerprintSize] /* idem */)
|
||||
}
|
||||
case negentropy.IdListMode:
|
||||
// when receiving an idlist we will never send this bound again to this peer
|
||||
skipCallback(getBoundKey(bound))
|
||||
|
||||
// and instead of sending these ids to the other peer we'll send a fingerprint
|
||||
acc.Reset()
|
||||
|
||||
numIds, err := negentropy.ReadVarInt(reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for range numIds {
|
||||
id := nostr.ID{}
|
||||
|
||||
_, err = reader.Read(id[:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
idCallback(id)
|
||||
|
||||
acc.AddBytes(id[:])
|
||||
}
|
||||
|
||||
if _, skipped := skippedBounds[getBoundKey(bound)]; !skipped {
|
||||
fingerprint := acc.GetFingerprint(numIds)
|
||||
|
||||
bw.WriteBound(nextMsg, bound)
|
||||
negentropy.WriteVarInt(nextMsg, int(negentropy.FingerprintMode))
|
||||
nextMsg.Write(fingerprint[:])
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("unknown mode %v", mode)
|
||||
}
|
||||
}
|
||||
|
||||
return nostr.HexEncodeToString(nextMsg.Bytes()), nil
|
||||
}
|
||||
Reference in New Issue
Block a user