mirror of
https://github.com/fiatjaf/nak.git
synced 2025-12-08 16:48:51 +00:00
Compare commits
296 Commits
v0.1.0
...
11a690b1c6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
11a690b1c6 | ||
|
|
9f8679591e | ||
|
|
75c1a88333 | ||
|
|
26fc7c338a | ||
|
|
ddc009a391 | ||
|
|
68e49fa6e5 | ||
|
|
79c1a70683 | ||
|
|
77afab780b | ||
|
|
a4f53021f0 | ||
|
|
afa31a58fc | ||
|
|
26f9b33d53 | ||
|
|
51876f89c4 | ||
|
|
ae3cb7c108 | ||
|
|
bec821d3c0 | ||
|
|
5d7240b112 | ||
|
|
bbe1661096 | ||
|
|
ea4ad84aa0 | ||
|
|
85a04aa7ce | ||
|
|
e0ca768695 | ||
|
|
bef3739a67 | ||
|
|
210c0aa282 | ||
|
|
2758285d51 | ||
|
|
ecb7f8f195 | ||
|
|
9251702460 | ||
|
|
13452e6916 | ||
|
|
cdd64e340f | ||
|
|
3b4d6046cf | ||
|
|
bf1690a041 | ||
|
|
88031c888b | ||
|
|
6f0e777324 | ||
|
|
b316646821 | ||
|
|
d3975679e4 | ||
|
|
23e27da077 | ||
|
|
1a221a133c | ||
|
|
a698c59b0b | ||
|
|
87bf5ef446 | ||
|
|
7c58948924 | ||
|
|
ff02e6890b | ||
|
|
fb377f4775 | ||
|
|
b1114766e5 | ||
|
|
e0febbf190 | ||
|
|
2d2e657778 | ||
|
|
d32654447a | ||
|
|
fea23aecc3 | ||
|
|
cc526acb10 | ||
|
|
fd19855543 | ||
|
|
ecfe3a298e | ||
|
|
9c5f68a955 | ||
|
|
0aef173e8b | ||
|
|
6e4a546212 | ||
|
|
55c9d4ee45 | ||
|
|
550c89d8d7 | ||
|
|
1e9be3ed84 | ||
|
|
79cbc57dde | ||
|
|
1e237b4c42 | ||
|
|
89ec8b9822 | ||
|
|
fba83ea39e | ||
|
|
bd5569955c | ||
|
|
35ea2582d8 | ||
|
|
fa63dbfea3 | ||
|
|
a6509909d0 | ||
|
|
239dd2d42a | ||
|
|
0073c9bdf1 | ||
|
|
b5bd2aecf6 | ||
|
|
f27ac6c0e3 | ||
|
|
6e5441aa18 | ||
|
|
61a68f3dca | ||
|
|
f450e735b6 | ||
|
|
1304a65179 | ||
|
|
aa89093d57 | ||
|
|
4387595437 | ||
|
|
4eb5e929d4 | ||
|
|
150625ee74 | ||
|
|
fc255b5a9a | ||
|
|
5bcf2da794 | ||
|
|
aadcc73906 | ||
|
|
f799c65779 | ||
|
|
c3822225b4 | ||
|
|
67e291e80d | ||
|
|
83195d9a00 | ||
|
|
f9033f778d | ||
|
|
9055f98f66 | ||
|
|
02f22a8c2f | ||
|
|
f98bd7483f | ||
|
|
3005c62566 | ||
|
|
e91a454fc0 | ||
|
|
148f6e8bcb | ||
|
|
024111a8be | ||
|
|
8fba611ad0 | ||
|
|
4d12550d74 | ||
|
|
5d44600f17 | ||
|
|
5a8c7df811 | ||
|
|
01be954ae6 | ||
|
|
d733a31898 | ||
|
|
1b43dbda02 | ||
|
|
e45b54ea62 | ||
|
|
35da063c30 | ||
|
|
15aefe3df4 | ||
|
|
55fd631787 | ||
|
|
6f48c29d0f | ||
|
|
703c186958 | ||
|
|
7ae2e686cb | ||
|
|
9547711e8d | ||
|
|
50119e21e6 | ||
|
|
33f4272dd0 | ||
|
|
7b6f387aad | ||
|
|
b1a03800e6 | ||
|
|
db5dafb58a | ||
|
|
4b15cdf625 | ||
|
|
4b8c067e00 | ||
|
|
931da4b0ae | ||
|
|
c87371208e | ||
|
|
bfe1e6ca94 | ||
|
|
602e03a9a1 | ||
|
|
fe1f50f798 | ||
|
|
d899a92f15 | ||
|
|
1c058f2846 | ||
|
|
4b4d9ec155 | ||
|
|
3031568266 | ||
|
|
a828ee3793 | ||
|
|
186948db9a | ||
|
|
5fe354f642 | ||
|
|
3d961d4bec | ||
|
|
d6a23bd00c | ||
|
|
c1248eb37b | ||
|
|
c60bb82be8 | ||
|
|
f5316a0f35 | ||
|
|
e6448debf2 | ||
|
|
7bb7543ef7 | ||
|
|
43a3e5f40d | ||
|
|
707e5b3918 | ||
|
|
faca2e50f0 | ||
|
|
26930d40bc | ||
|
|
17920d8aef | ||
|
|
95bed5d5a8 | ||
|
|
2e30dfe2eb | ||
|
|
55c6f75b8a | ||
|
|
1f2492c9b1 | ||
|
|
d00976a669 | ||
|
|
4392293ed6 | ||
|
|
60d1292f80 | ||
|
|
6c634d8081 | ||
|
|
1e353680bc | ||
|
|
ff8701a3b0 | ||
|
|
ad6b8c4ba5 | ||
|
|
dba3f648ad | ||
|
|
12a1f1563e | ||
|
|
e2dd3ca544 | ||
|
|
df5ebd3f56 | ||
|
|
81571c6952 | ||
|
|
6e43a6b733 | ||
|
|
943e8835f9 | ||
|
|
6b659c1552 | ||
|
|
aa53f2cd60 | ||
|
|
5509095277 | ||
|
|
a3ef9b45de | ||
|
|
df20a3241a | ||
|
|
53a2451303 | ||
|
|
2d992f235e | ||
|
|
7675929056 | ||
|
|
7f608588a2 | ||
|
|
fd5cd55f6f | ||
|
|
932361fe8f | ||
|
|
11ae7bc4d3 | ||
|
|
7033bfee19 | ||
|
|
f425097c5a | ||
|
|
dd0ef2ca64 | ||
|
|
491a094e07 | ||
|
|
9d619ddf00 | ||
|
|
5d32739573 | ||
|
|
a187e448f2 | ||
|
|
9a9e96a829 | ||
|
|
4c6181d649 | ||
|
|
71b106fd45 | ||
|
|
40892c1228 | ||
|
|
847f8aaa69 | ||
|
|
134d1225d6 | ||
|
|
464766a836 | ||
|
|
ea53eca74f | ||
|
|
38ed370c59 | ||
|
|
5b04bc4859 | ||
|
|
2988c71ccb | ||
|
|
d7c0ff2bb7 | ||
|
|
43fe41df5d | ||
|
|
3215726417 | ||
|
|
a4886dc445 | ||
|
|
dae7eba8ca | ||
|
|
2b5f3355bc | ||
|
|
bd5ca27661 | ||
|
|
9d02301b2d | ||
|
|
9bbc87b27a | ||
|
|
88a07a3504 | ||
|
|
8a934cc76b | ||
|
|
e0c967efa9 | ||
|
|
36c32ae308 | ||
|
|
6d23509d8c | ||
|
|
29b6ecbafe | ||
|
|
11f37afa5b | ||
|
|
cf1694704e | ||
|
|
b3ef2c1289 | ||
|
|
cfdea699bc | ||
|
|
014c6bc11d | ||
|
|
0240866fa1 | ||
|
|
a4d9ceecfa | ||
|
|
56657d8aa9 | ||
|
|
ea7b88cfd7 | ||
|
|
2042b14578 | ||
|
|
9d43e66fac | ||
|
|
85e9610265 | ||
|
|
2edfa5cbea | ||
|
|
9690dc70cb | ||
|
|
c90e61dbec | ||
|
|
d226cd6ce4 | ||
|
|
3d78e91f62 | ||
|
|
84965f2253 | ||
|
|
928c73513c | ||
|
|
a36142604d | ||
|
|
220fe84f1b | ||
|
|
48c0e342e3 | ||
|
|
ec2e214c02 | ||
|
|
9f62d4679f | ||
|
|
809865ca0c | ||
|
|
813ab3b6ac | ||
|
|
09ed2a040a | ||
|
|
7846960c4e | ||
|
|
ce6bb0aa22 | ||
|
|
49ce12ffc2 | ||
|
|
a5013c513d | ||
|
|
8f51fe757b | ||
|
|
30ca5776c5 | ||
|
|
e18e8c00e7 | ||
|
|
bca4362ca5 | ||
|
|
54c4be10bd | ||
|
|
27f925c05e | ||
|
|
79cb63a1b4 | ||
|
|
5ee0036128 | ||
|
|
316d94166e | ||
|
|
2ca6bb0940 | ||
|
|
ac00c5065f | ||
|
|
441ee9a5ed | ||
|
|
9a41450209 | ||
|
|
dba2ed0b5f | ||
|
|
2079ddf818 | ||
|
|
2135b68106 | ||
|
|
9f98a0aea3 | ||
|
|
1ba39ca7d7 | ||
|
|
262c0c892a | ||
|
|
363bd66a8a | ||
|
|
eccce6dc4a | ||
|
|
31f007ffc2 | ||
|
|
bb45059218 | ||
|
|
71dfe583ed | ||
|
|
84bde7dacd | ||
|
|
81968f6c0c | ||
|
|
f198a46c19 | ||
|
|
c3ea9c15f6 | ||
|
|
8ddb9ce021 | ||
|
|
569d38a137 | ||
|
|
34c189af28 | ||
|
|
ffe2db7f96 | ||
|
|
c5f7926471 | ||
|
|
e008e08105 | ||
|
|
5dd5a7c699 | ||
|
|
347a82eaa9 | ||
|
|
e89823b10e | ||
|
|
6626001dd2 | ||
|
|
b7a7e0504f | ||
|
|
01e1f52a70 | ||
|
|
0b9e861f90 | ||
|
|
bda18e035a | ||
|
|
0d46d48881 | ||
|
|
6f24112b5e | ||
|
|
f4921f1fe9 | ||
|
|
3dfcec69b7 | ||
|
|
14b69f36cf | ||
|
|
3f7089e27e | ||
|
|
6a75c8aec3 | ||
|
|
b17887fe21 | ||
|
|
77103cae0c | ||
|
|
59a2c16b42 | ||
|
|
48d19196bb | ||
|
|
ad7010e506 | ||
|
|
584881266e | ||
|
|
a30f422d7d | ||
|
|
637b9440ef | ||
|
|
16c1e795bd | ||
|
|
8373da647e | ||
|
|
f295f130f2 | ||
|
|
5415fd369c | ||
|
|
f35cb4bd1d | ||
|
|
242b028656 | ||
|
|
f0d90b567c | ||
|
|
2d1e27f766 | ||
|
|
bfa72640cd | ||
|
|
e5b0b15908 | ||
|
|
0860cfcf6d |
36
.dockerignore
Normal file
36
.dockerignore
Normal file
@@ -0,0 +1,36 @@
|
||||
# git files
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# documentation
|
||||
README.md
|
||||
LICENSE
|
||||
|
||||
# development files
|
||||
justfile
|
||||
*.md
|
||||
|
||||
# test files
|
||||
*_test.go
|
||||
cli_test.go
|
||||
|
||||
# build artifacts
|
||||
nak
|
||||
*.exe
|
||||
mnt
|
||||
|
||||
# ide and editor files
|
||||
.vscode
|
||||
.idea
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# os generated files
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
9
.github/workflows/release-cli.yml
vendored
9
.github/workflows/release-cli.yml
vendored
@@ -25,10 +25,16 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, freebsd, darwin, windows]
|
||||
goarch: [amd64, arm64]
|
||||
goarch: [amd64, arm64, riscv64]
|
||||
exclude:
|
||||
- goarch: arm64
|
||||
goos: windows
|
||||
- goarch: riscv64
|
||||
goos: windows
|
||||
- goarch: riscv64
|
||||
goos: darwin
|
||||
- goarch: arm64
|
||||
goos: freebsd
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: wangyoucao577/go-release-action@v1.40
|
||||
@@ -36,6 +42,7 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
goos: ${{ matrix.goos }}
|
||||
goarch: ${{ matrix.goarch }}
|
||||
ldflags: -X main.version=${{ github.ref_name }}
|
||||
overwrite: true
|
||||
md5sum: false
|
||||
sha256sum: false
|
||||
|
||||
97
.github/workflows/smoke-test-release.yml
vendored
Normal file
97
.github/workflows/smoke-test-release.yml
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
name: Smoke test the binary
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["build cli for all platforms"]
|
||||
types:
|
||||
- completed
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
smoke-test-linux-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
||||
steps:
|
||||
- name: Download and smoke test latest binary
|
||||
run: |
|
||||
set -eo pipefail # Exit on error, and on pipe failures
|
||||
|
||||
echo "Downloading nak binary from releases"
|
||||
RELEASE_URL="https://api.github.com/repos/fiatjaf/nak/releases/latest"
|
||||
wget $(wget -q -O - ${RELEASE_URL} | jq -r '.assets[] | select(.name | contains("linux-amd64")) | .browser_download_url') -O nak -nv
|
||||
chmod +x nak
|
||||
|
||||
echo "Running basic tests..."
|
||||
./nak --version
|
||||
|
||||
# Generate and manipulate keys
|
||||
echo "Testing key operations..."
|
||||
SECRET_KEY=$(./nak key generate)
|
||||
PUBLIC_KEY=$(echo $SECRET_KEY | ./nak key public)
|
||||
echo "Generated key pair: $PUBLIC_KEY"
|
||||
|
||||
# Create events
|
||||
echo "Testing event creation..."
|
||||
./nak event -c "hello world"
|
||||
./nak event --ts "2 days ago" -c "event with timestamp"
|
||||
./nak event -k 1 -t "t=test" -c "event with tag"
|
||||
|
||||
# Test NIP-19 encoding/decoding
|
||||
echo "Testing NIP-19 encoding/decoding..."
|
||||
NSEC=$(echo $SECRET_KEY | ./nak encode nsec)
|
||||
echo "Encoded nsec: $NSEC"
|
||||
./nak encode npub 79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
|
||||
NOTE_ID="5ae731bbc7711f78513da14927c48cc7143a91e6cad0565fdc4d73b8967a7d59"
|
||||
NOTE1=$(./nak encode note $NOTE_ID)
|
||||
echo "Encoded note1: $NOTE1"
|
||||
./nak decode $NOTE1
|
||||
./nak decode npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6
|
||||
|
||||
# Test event verification
|
||||
echo "Testing event verification..."
|
||||
# Create an event and verify it
|
||||
VERIFY_EVENT=$(./nak event -c "verify me")
|
||||
echo $VERIFY_EVENT | ./nak verify
|
||||
|
||||
# Test PoW
|
||||
echo "Testing PoW..."
|
||||
./nak event -c "testing pow" --pow 8
|
||||
|
||||
# Test NIP-49 key encryption/decryption
|
||||
echo "Testing NIP-49 key encryption/decryption..."
|
||||
ENCRYPTED_KEY=$(./nak key encrypt $SECRET_KEY "testpassword")
|
||||
echo "Encrypted key: ${ENCRYPTED_KEY:0:20}..."
|
||||
DECRYPTED_KEY=$(./nak key decrypt $ENCRYPTED_KEY "testpassword")
|
||||
if [ "$DECRYPTED_KEY" != "$SECRET_KEY" ]; then
|
||||
echo "NIP-49 encryption/decryption test failed!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test multi-value tags
|
||||
echo "Testing multi-value tags..."
|
||||
./nak event --ts "yesterday" -t "e=f59911b561c37c90b01e9e5c2557307380835c83399756f4d62d8167227e420a;wss://relay.example.com;root" -c "Testing multi-value tags"
|
||||
|
||||
# Test relay operations (with a public relay)
|
||||
echo "Testing relay operations..."
|
||||
# Publish a simple event to a public relay
|
||||
EVENT_JSON=$(./nak event --sec $SECRET_KEY -c "Test from nak smoke test" nos.lol)
|
||||
EVENT_ID=$(echo $EVENT_JSON | jq -r .id)
|
||||
echo "Published event ID: $EVENT_ID"
|
||||
|
||||
# Wait a moment for propagation
|
||||
sleep 2
|
||||
|
||||
# Fetch the event we just published
|
||||
./nak req -i $EVENT_ID nos.lol
|
||||
|
||||
# Test serving (just start and immediately kill)
|
||||
echo "Testing serve command..."
|
||||
timeout 2s ./nak serve || true
|
||||
|
||||
# Test filesystem mount (just start and immediately kill)
|
||||
echo "Testing fs mount command..."
|
||||
mkdir -p /tmp/nostr-mount
|
||||
timeout 2s ./nak fs --sec $SECRET_KEY /tmp/nostr-mount || true
|
||||
|
||||
echo "All tests passed"
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1 +1,3 @@
|
||||
nak
|
||||
mnt
|
||||
nak.exe
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
version = 3.5.8
|
||||
runner.dialect = scala3
|
||||
49
Dockerfile
Normal file
49
Dockerfile
Normal file
@@ -0,0 +1,49 @@
|
||||
# build stage
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
# install git and ca-certificates (needed for fetching dependencies)
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
# set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# copy go mod files first for better caching
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
# download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# copy source code
|
||||
COPY . .
|
||||
|
||||
# build the application
|
||||
# use cgo_enabled=0 to create a static binary
|
||||
# use -ldflags to strip debug info and reduce binary size
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-w -s" -o nak .
|
||||
|
||||
# runtime stage
|
||||
FROM alpine:latest
|
||||
|
||||
# install ca-certificates for https requests (needed for relay connections)
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
# create a non-root user
|
||||
RUN adduser -D -s /bin/sh nakuser
|
||||
|
||||
# set working directory
|
||||
WORKDIR /home/nakuser
|
||||
|
||||
# copy the binary from builder stage
|
||||
COPY --from=builder /app/nak /usr/local/bin/nak
|
||||
|
||||
# make sure the binary is executable
|
||||
RUN chmod +x /usr/local/bin/nak
|
||||
|
||||
# switch to non-root user
|
||||
USER nakuser
|
||||
|
||||
# set the entrypoint
|
||||
ENTRYPOINT ["nak"]
|
||||
|
||||
# default command (show help)
|
||||
CMD ["--help"]
|
||||
267
README.md
267
README.md
@@ -3,6 +3,8 @@
|
||||
install with `go install github.com/fiatjaf/nak@latest` or
|
||||
[download a binary](https://github.com/fiatjaf/nak/releases).
|
||||
|
||||
or get the source with `git clone https://github.com/fiatjaf/nak` then install with `go install` or run with docker using `docker build -t nak . && docker run nak event`.
|
||||
|
||||
## what can you do with it?
|
||||
|
||||
take a look at the help text that comes in it to learn all possibilities, but here are some:
|
||||
@@ -16,7 +18,7 @@ take a look at the help text that comes in it to learn all possibilities, but he
|
||||
|
||||
### make a nostr event with custom content and tags, sign it with a different key and publish it to two relays
|
||||
```shell
|
||||
~> nak event --sec 02 -c 'good morning' --tag t=gm wss://nostr-pub.wellorder.net wss://relay.damus.io
|
||||
~> nak event --sec 02 -c 'good morning' --tag t=gm nostr-pub.wellorder.net relay.damus.io
|
||||
{"id":"e20978737ab7cd36eca300a65f11738176123f2e0c23054544b18fe493e2aa1a","pubkey":"c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5","created_at":1698632753,"kind":1,"tags":[["t","gm"]],"content":"good morning","sig":"5687c1a97066c349cb3bde0c0719fd1652a13403ba6aca7557b646307ee6718528cd86989db08bf6a7fd04bea0b0b87c1dd1b78c2d21b80b80eebab7f40b8916"}
|
||||
publishing to wss://nostr-pub.wellorder.net... success.
|
||||
publishing to wss://relay.damus.io... success.
|
||||
@@ -24,7 +26,7 @@ publishing to wss://relay.damus.io... success.
|
||||
|
||||
### query a bunch of relays for a tag with a limit of 2 for each, print their content
|
||||
```shell
|
||||
~> nak req -k 1 -t t=gm -l 2 wss://nostr.mom wss://nostr.wine wss://nostr-pub.wellorder.net | jq .content
|
||||
~> nak req -k 1 -t t=gm -l 2 nostr.mom nostr.wine nostr-pub.wellorder.net | jq .content
|
||||
"#GM, you sovereign savage #freeple of the #nostrverse. Let's cause some #nostroversy. "
|
||||
"ITM slaves!\n#gm https://image.nostr.build/cbbcdf80bfc302a6678ecf9387c87d87deca3e0e288a12e262926c34feb3f6aa.jpg "
|
||||
"good morning"
|
||||
@@ -34,17 +36,22 @@ publishing to wss://relay.damus.io... success.
|
||||
|
||||
### decode a nip19 note1 code, add a relay hint, encode it back to nevent1
|
||||
```shell
|
||||
~> nak decode note1ttnnrw78wy0hs5fa59yj03yvcu2r4y0xetg9vh7uf4em39n604vsyp37f2 | jq -r .id | nak encode nevent -r wss://nostr.zbd.gg
|
||||
~> nak decode note1ttnnrw78wy0hs5fa59yj03yvcu2r4y0xetg9vh7uf4em39n604vsyp37f2 | jq -r .id | nak encode nevent -r nostr.zbd.gg
|
||||
nevent1qqs94ee3h0rhz8mc2y76zjf8cjxvw9p6j8nv45zktlwy6uacjea86kgpzfmhxue69uhkummnw3ezu7nzvshxwec8zw8h7
|
||||
~> nak decode nevent1qqs94ee3h0rhz8mc2y76zjf8cjxvw9p6j8nv45zktlwy6uacjea86kgpzfmhxue69uhkummnw3ezu7nzvshxwec8zw8h7
|
||||
{
|
||||
"id": "5ae731bbc7711f78513da14927c48cc7143a91e6cad0565fdc4d73b8967a7d59",
|
||||
"relays": [
|
||||
"wss://nostr.zbd.gg"
|
||||
"nostr.zbd.gg"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### fetch all events except those that are present in a given line-delimited json file (negentropy sync)
|
||||
```shell
|
||||
~> nak req --only-missing ./events.jsonl -k 30617 pyramid.fiatjaf.com
|
||||
```
|
||||
|
||||
### fetch an event using relay and author hints automatically from a nevent1 code, pretty-print it
|
||||
```shell
|
||||
nak fetch nevent1qqs2e3k48vtrkzjm8vvyzcmsmkf58unrxtq2k4h5yspay6vhcqm4wqcpz9mhxue69uhkummnw3ezuamfdejj7q3ql2vyh47mk2p0qlsku7hg0vn29faehy9hy34ygaclpn66ukqp3afqxpqqqqqqz7ttjyq | jq
|
||||
@@ -61,7 +68,7 @@ nak fetch nevent1qqs2e3k48vtrkzjm8vvyzcmsmkf58unrxtq2k4h5yspay6vhcqm4wqcpz9mhxue
|
||||
|
||||
### republish an event from one relay to multiple others
|
||||
```shell
|
||||
~> nak req -i e20978737ab7cd36eca300a65f11738176123f2e0c23054544b18fe493e2aa1a wss://nostr.wine/ wss://nostr-pub.wellorder.net | nak event wss://nostr.wine wss://offchain.pub wss://public.relaying.io wss://eden.nostr.land wss://atlas.nostr.land wss://relayable.org
|
||||
~> nak req -i e20978737ab7cd36eca300a65f11738176123f2e0c23054544b18fe493e2aa1a nostr.wine/ nostr-pub.wellorder.net | nak event nostr.wine offchain.pub public.relaying.io eden.nostr.land atlas.nostr.land relayable.org
|
||||
{"id":"e20978737ab7cd36eca300a65f11738176123f2e0c23054544b18fe493e2aa1a","pubkey":"c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5","created_at":1698632753,"kind":1,"tags":[["t","gm"]],"content":"good morning","sig":"5687c1a97066c349cb3bde0c0719fd1652a13403ba6aca7557b646307ee6718528cd86989db08bf6a7fd04bea0b0b87c1dd1b78c2d21b80b80eebab7f40b8916"}
|
||||
publishing to wss://nostr.wine... failed: msg: blocked: not an active paid member
|
||||
publishing to wss://offchain.pub... success.
|
||||
@@ -77,7 +84,253 @@ publishing to wss://relayable.org... success.
|
||||
invalid .id, expected 05bd99d54cb835f427e0092c4275ee44c7ff51219eff417c19f70c9e2c53ad5a, got 05bd99d54cb835f327e0092c4275ee44c7ff51219eff417c19f70c9e2c53ad5a
|
||||
```
|
||||
|
||||
### fetch all quoted events by a given pubkey in their last 100 notes
|
||||
### fetch all quoted events by a given pubkey in their last 10 notes of 2023
|
||||
```shell
|
||||
nak req -l 100 -k 1 -a 2edbcea694d164629854a52583458fd6d965b161e3c48b57d3aff01940558884 wss://relay.damus.io | jq -r '.content | match("nostr:((note1|nevent1)[a-z0-9]+)";"g") | .captures[0].string' | nak decode | jq -cr '{ids: [.id]}' | nak req wss://relay.damus.io
|
||||
~> nak req -l 10 -k 1 --until 'December 31 2023' -a 2edbcea694d164629854a52583458fd6d965b161e3c48b57d3aff01940558884 relay.damus.io | jq -r '.content | match("nostr:((note1|nevent1)[a-z0-9]+)";"g") | .captures[0].string' | nak decode | jq -cr '{ids: [.id]}' | nak req relay.damus.io
|
||||
connecting to relay.damus.io...
|
||||
ok.
|
||||
{"kind":1,"id":"0000000a5109c9747e3847282fcaef3d221d1be5e864ced7b2099d416a18d15a","pubkey":"7bdef7be22dd8e59f4600e044aa53a1cf975a9dc7d27df5833bc77db784a5805","created_at":1703869609,"tags":[["nonce","12912720851599460299","25"]],"content":"https://image.nostr.build/5eb40d3cae799bc572763b8f8bee95643344fa392d280efcb0fd28a935879e2a.png\n\nNostr is not dying.\nIt is just a physiological and healthy slowdown on the part of all those who have made this possible in such a short time, sharing extraordinary enthusiasm. This is necessary to regain a little energy, it will allow some things to be cleaned up and more focused goals to be set.\n\nIt is like the caterpillar that is about to become a butterfly, it has to stop moving, acting, doing all the time; it has to do one last silent work and rest, letting time go by. And then a new phase of life can begin.\n\nWe have an amazing 2024 ahead.\nThank you all, who have given so much and believe in Nostr.\n\nPS: an interesting cue suggested by this image, you cannot have both silk and butterfly, you have to choose: a precious and sophisticated ornament, or the living, colorful beauty of freedom.","sig":"16fe157fb13dba2474d510db5253edc409b465515371015a91b26b8f39e5aa873453bc366947c37463c49466f5fceb7dea0485432f979a03471c8f76b73e553c"}
|
||||
{"kind":1,"id":"ac0cc72dfee39f41d94568f574e7b613d3979facbd7b477a16b52eb763db4b6e","pubkey":"2250f69694c2a43929e77e5de0f6a61ae5e37a1ee6d6a3baef1706ed9901248b","created_at":1703873865,"tags":[["r","https://zine.wavlake.com/2023-year-in-review/"]],"content":"It's been an incredible year for us here at Wavlake and we wanted to take a moment to look back and see how far we've come since launch. Read more.. https://zine.wavlake.com/2023-year-in-review/","sig":"189e354f67f48f3046fd762c83f9bf3a776d502d514e2839a1b459c30107a02453304ef695cdc7d254724041feec3800806b21eb76259df87144aaef821ace5b"}
|
||||
{"kind":1,"id":"6215766c5aadfaf51488134682f7d28f237218b5405da2fc11d1fefe1ebf8154","pubkey":"4ce6abbd68dab6e9fdf6e8e9912a8e12f9b539e078c634c55a9bff2994a514dd","created_at":1703879775,"tags":[["imeta","url https://video.nostr.build/7b4e7c326fa4fcba58a40914ce9db4f060bd917878f2194f6d139948b085ebb9.mp4","blurhash eHD,QG_4ogMu_3to%O-:MwM_IWRjx^-pIUoe-;t7%Nt7%gV?M{WBxu","dim 480x268"],["t","zaps"],["t","powakili23"],["p","4f82bced42584a6acfced2a657b5acabc4f90d75a95ed3ff888f3b04b8928630"],["p","ce75bae2349804caa5f4de8ae8f775bb558135f412441d9e32f88e4226c5d165"],["p","94bd495b78f8f6e5aff8ebc90e052d3a409d1f9d82e43ab56ca2cafb81b18ddf"],["p","50ff5b7ebeac1cc0d03dc878be8a59f1b63d45a7d5e60ade4b6f6f31eca25954"],["p","f300cf2bdf9808ed229dfa468260753a0b179935bdb87612b6d4f5b9fe3fc7cf"],["r","https://geyser.fund/entry/2636"],["r","https://video.nostr.build/7b4e7c326fa4fcba58a40914ce9db4f060bd917878f2194f6d139948b085ebb9.mp4"]],"content":"POWA - HQ UPDATE - DEC 2023\nTLDR: plan to open January 2024, 1 million Sats to go to reach milestone. #zaps go to fund this project. ⚡️powa@geyser.fund\n\nHello,\n\nFirst and foremost, I’d like to thank you for the incredible support shown for this project. It’s been an absolute honor to oversee this Proof of Work initiative.\n\nI am thrilled to announce that we are right on track for the grand opening in January 2024.\n\nCurrently, we're just over 1 million Sats away from reaching our target for this phase.\n\nPlease take a moment to enjoy the video and stay tuned for further updates about POWA. \n\nMan Like Who?\nMan Like Kweks!\n🇹🇿⚡️💜🏔️\n#powakili23\nnostr:npub1f7ptem2ztp9x4n7w62n90ddv40z0jrt4490d8lug3uasfwyjsccqkknerm nostr:npub1ee6m4c35nqzv4f05m69w3am4hd2czd05zfzpm83jlz8yyfk969js78tfcv nostr:npub1jj75jkmclrmwttlca0ysupfd8fqf68uastjr4dtv5t90hqd33h0s4gcksp nostr:npub12rl4kl474swvp5paeputazje7xmr63d86hnq4hjtdahnrm9zt92qgq500s nostr:npub17vqv727lnqyw6g5alfrgycr48g930xf4hku8vy4k6n6mnl3lcl8sglecc5 \n\nhttps://geyser.fund/entry/2636 https://video.nostr.build/7b4e7c326fa4fcba58a40914ce9db4f060bd917878f2194f6d139948b085ebb9.mp4 ","sig":"97d13c17d91c319f343cc770222d6d4a0a714d0e7e4ef43373adaf215a4c077f0bdf12bac488c74dbd4d55718d46c17a617b93c8660736b70bcd61a8820ece67"}
|
||||
...
|
||||
```
|
||||
|
||||
### sign an event collaboratively with multiple parties using musig2
|
||||
```shell
|
||||
~> nak event --sec 1234 -k 1 -c 'hello from a combined key' --musig 2
|
||||
the following code should be saved secretly until the next step an included with --musig-nonce-secret:
|
||||
QebOT03ERmV7km22CqEqBPFmzAkgxQzGGbR7Si8yIZCBrd1N9A3LKwGLO71kbgXZ9EYFKpjiwun4u0mj5Tq6vwM3pK7x+EI8oHbkt9majKv/QN24Ix8qnwEIHxXX+mXBug==
|
||||
|
||||
the next signer and they should call this on their side:
|
||||
nak event --sec <insert-secret-key> --musig 2 -k 1 -ts 1720821287 -c 'hello from a combined key' --musig-pubkey 0337a4aef1f8423ca076e4b7d99a8cabff40ddb8231f2a9f01081f15d7fa65c1ba --musig-nonce 0285af37c6c43638cda2c773098e867c749ddf1e9d096b78686c5d000603935ad3025c4a1e042eb6b0dcfd864d1e072d2ce8da06f2c0dcf13fd7d1fcef0dd26dbc92
|
||||
```
|
||||
|
||||
demo videos with [2](https://njump.me/nevent1qqs8pmmae89agph80928l6gjm0wymechqazv80jwqrqy4cgk08epjaczyqalp33lewf5vdq847t6te0wvnags0gs0mu72kz8938tn24wlfze674zkzz), [3](https://njump.me/nevent1qqsrp320drqcnmnam6jvmdd4lgdvh2ay0xrdesrvy6q9qqdfsk7r55qzyqalp33lewf5vdq847t6te0wvnags0gs0mu72kz8938tn24wlfze6c32d4m) and [4](https://njump.me/nevent1qqsre84xe6qpagf2w2xjtjwc95j4dd5ccue68gxl8grkd6t6hjhaj5qzyqalp33lewf5vdq847t6te0wvnags0gs0mu72kz8938tn24wlfze6t8t7ak) parties.
|
||||
|
||||
### generate a private key
|
||||
```shell
|
||||
~> nak key generate
|
||||
7b94e287b1fafa694ded1619b27de7effd3646104a158e187ff4edc56bc6148d
|
||||
```
|
||||
|
||||
### encrypt key with NIP-49
|
||||
```shell
|
||||
~> nak key encrypt 7b94e287b1fafa694ded1619b27de7effd3646104a158e187ff4edc56bc6148d mypassword
|
||||
ncryptsec1qggx54cg270zy9y8krwmfz29jyypsuxken2fkk99gr52qhje968n6mwkrfstqaqhq9eq94pnzl4nff437l4lp4ur2cs4f9um8738s35l2esx2tas48thtfhrk5kq94pf9j2tpk54yuermra0xu6hl5ls
|
||||
```
|
||||
|
||||
### decrypt key with NIP-49
|
||||
```shell
|
||||
~> nak key decrypt ncryptsec1qggx54cg270zy9y8krwmfz29jyypsuxken2fkk99gr52qhje968n6mwkrfstqaqhq9eq94pnzl4nff437l4lp4ur2cs4f9um8738s35l2esx2tas48thtfhrk5kq94pf9j2tpk54yuermra0xu6hl5ls mypassword
|
||||
7b94e287b1fafa694ded1619b27de7effd3646104a158e187ff4edc56bc6148d
|
||||
~>
|
||||
~> nak key decrypt ncryptsec1qggx54cg270zy9y8krwmfz29jyypsuxken2fkk99gr52qhje968n6mwkrfstqaqhq9eq94pnzl4nff437l4lp4ur2cs4f9um8738s35l2esx2tas48thtfhrk5kq94pf9j2tpk54yuermra0xu6hl5ls
|
||||
type the password to decrypt your secret key: **********
|
||||
7b94e287b1fafa694ded1619b27de7effd3646104a158e187ff4edc56bc6148d
|
||||
```
|
||||
|
||||
### get a public key from a private key
|
||||
```shell
|
||||
~> nak key public 7b94e287b1fafa694ded1619b27de7effd3646104a158e187ff4edc56bc6148d
|
||||
985d66d2644dfa7676e26046914470d66ebc7fa783a3f57f139fde32d0d631d7
|
||||
```
|
||||
|
||||
### sign an event using a bunker provider (amber, promenade etc)
|
||||
```shell
|
||||
~> export NOSTR_CLIENT_KEY="$(nak key generate)"
|
||||
~> nak event --sec 'bunker://a9e0f110f636f3191644110c19a33448daf09d7cda9708a769e91b7e91340208?relay=wss%3A%2F%2Frelay.damus.io&relay=wss%3A%2F%2Frelay.nsecbunker.com&relay=wss%3A%2F%2Fnos.lol&secret=TWfGbjQCLxUf' -c 'hello from bunker'
|
||||
```
|
||||
|
||||
(in most cases it's better to set `NOSTR_CLIENT_KEY` permanently on your shell, as that identity will be recorded by the bunker provider.)
|
||||
|
||||
### sign an event using a NIP-49 encrypted key
|
||||
```shell
|
||||
~> nak event --sec ncryptsec1qggx54cg270zy9y8krwmfz29jyypsuxken2fkk99gr52qhje968n6mwkrfstqaqhq9eq94pnzl4nff437l4lp4ur2cs4f9um8738s35l2esx2tas48thtfhrk5kq94pf9j2tpk54yuermra0xu6hl5ls -c 'hello from encrypted key'
|
||||
type the password to decrypt your secret key: **********
|
||||
{"kind":1,"id":"8aa5c931fb1da507f14801de6a1814b7f0baae984dc502b9889f347f5aa3cc4e","pubkey":"985d66d2644dfa7676e26046914470d66ebc7fa783a3f57f139fde32d0d631d7","created_at":1720822280,"tags":[],"content":"hello from encrypted key","sig":"9d1c9e56e87f787cc5b6191ec47690ce59fa4bef105b56297484253953e18fb930f6683f007e84a9ce9dc9a25b20c191c510629156dcd24bd16e15d302d20944"}
|
||||
```
|
||||
|
||||
### talk to a relay's NIP-86 management API
|
||||
```shell
|
||||
nak admin allowpubkey --sec ncryptsec1qggx54cg270zy9y8krwmfz29jyypsuxken2fkk99gr52qhje968n6mwkrfstqaqhq9eq94pnzl4nff437l4lp4ur2cs4f9um8738s35l2esx2tas48thtfhrk5kq94pf9j2tpk54yuermra0xu6hl5ls --pubkey a9e0f110f636f3191644110c19a33448daf09d7cda9708a769e91b7e91340208 pyramid.fiatjaf.com
|
||||
type the password to decrypt your secret key: **********
|
||||
calling 'allowpubkey' on https://pyramid.fiatjaf.com...
|
||||
{
|
||||
"result": null,
|
||||
"error": "failed to add to whitelist: pubkey 985d66d2644dfa7676e26046914470d66ebc7fa783a3f57f139fde32d0d631d7 doesn't have permission to invite"
|
||||
}
|
||||
```
|
||||
|
||||
### start a bunker locally
|
||||
```shell
|
||||
~> nak bunker --sec ncryptsec1qggrp80ptf0s7kyl0r38ktzg60fem85m89uz7um6rjn4pnep2nnvcgqm8h7q36c76z9sypatdh4fmw6etfxu99mv5cxkw4ymcsryw0zz7evyuplsgvnj5yysf449lq94klzvnahsw2lzxflvcq4qpf5q -k 3fbf7fbb2a2111e205f74aca0166e29e421729c9a07bc45aa85d39535b47c9ed relay.damus.io nos.lol relay.nsecbunker.com
|
||||
connecting to relay.damus.io... ok.
|
||||
connecting to nos.lol... ok.
|
||||
connecting to relay.nsecbunker.com... ok.
|
||||
type the password to decrypt your secret key: ***
|
||||
listening at [wss://relay.damus.io wss://nos.lol wss://relay.nsecbunker.com]:
|
||||
pubkey: f59911b561c37c90b01e9e5c2557307380835c83399756f4d62d8167227e420a
|
||||
npub: npub17kv3rdtpcd7fpvq7newz24eswwqgxhyr8xt4daxk9kqkwgn7gg9q4gy8vf
|
||||
authorized keys:
|
||||
- 3fbf7fbb2a2111e205f74aca0166e29e421729c9a07bc45aa85d39535b47c9ed
|
||||
to restart: nak bunker --sec ncryptsec1qggrp80ptf0s7kyl0r38ktzg60fem85m89uz7um6rjn4pnep2nnvcgqm8h7q36c76z9sypatdh4fmw6etfxu99mv5cxkw4ymcsryw0zz7evyuplsgvnj5yysf449lq94klzvnahsw2lzxflvcq4qpf5q -k 3fbf7fbb2a2111e205f74aca0166e29e421729c9a07bc45aa85d39535b47c9ed relay.damus.io nos.lol relay.nsecbunker.com
|
||||
bunker: bunker://f59911b561c37c90b01e9e5c2557307380835c83399756f4d62d8167227e420a?relay=wss%3A%2F%2Frelay.damus.io&relay=wss%3A%2F%2Fnos.lol&relay=wss%3A%2F%2Frelay.nsecbunker.com&secret=XuuiMbcLwuwL
|
||||
```
|
||||
|
||||
you can also display a QR code for the bunker URI by adding the `--qrcode` flag:
|
||||
|
||||
```shell
|
||||
~> nak bunker --qrcode --sec ncryptsec1... relay.damus.io
|
||||
```
|
||||
|
||||
### start a bunker that persists its metadata (secret key, relays, authorized client pubkeys) to disc
|
||||
```shell
|
||||
~> nak bunker --persist --sec ncryptsec1... relay.nsec.app nos.lol
|
||||
```
|
||||
|
||||
```shell
|
||||
then later just
|
||||
|
||||
```shell
|
||||
~> nak bunker --persist
|
||||
```
|
||||
|
||||
or give it a named profile:
|
||||
|
||||
```shell
|
||||
~> nak bunker --profile myself ...
|
||||
```
|
||||
|
||||
### generate a NIP-70 protected event with a date set to two weeks ago and some multi-value tags
|
||||
```shell
|
||||
~> nak event --ts 'two weeks ago' -t '-' -t 'e=f59911b561c37c90b01e9e5c2557307380835c83399756f4d62d8167227e420a;wss://relay.whatever.com;root;a9e0f110f636f3191644110c19a33448daf09d7cda9708a769e91b7e91340208' -t 'p=a9e0f110f636f3191644110c19a33448daf09d7cda9708a769e91b7e91340208;wss://p-relay.com' -c 'I know the future'
|
||||
{"kind":1,"id":"f030fccd90c783858dfcee204af94826cf0f1c85d6fc85a0087e9e5172419393","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1719677535,"tags":[["-"],["e","f59911b561c37c90b01e9e5c2557307380835c83399756f4d62d8167227e420a","wss://relay.whatever.com","root","a9e0f110f636f3191644110c19a33448daf09d7cda9708a769e91b7e91340208"],["p","a9e0f110f636f3191644110c19a33448daf09d7cda9708a769e91b7e91340208","wss://p-relay.com"]],"content":"I know the future","sig":"8b36a74e29df8bc12bed66896820da6940d4d9409721b3ed2e910c838833a178cb45fd5bb1c6eb6adc66ab2808bfac9f6644a2c55a6570bb2ad90f221c9c7551"}
|
||||
```
|
||||
|
||||
### download the latest 50000 notes from a relay, regardless of their natural query limits, by paginating requests
|
||||
```shell
|
||||
~> nak req -k 1 --limit 50000 --paginate --paginate-interval 2s nos.lol > events.jsonl
|
||||
~> wc -l events.jsonl
|
||||
50000 events.jsonl
|
||||
```
|
||||
|
||||
### run a somewhat verbose local relay for test purposes
|
||||
```shell
|
||||
~> nak serve
|
||||
> relay running at ws://localhost:10547
|
||||
got request {"kinds":[1],"authors":["79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"],"since":1724082362}
|
||||
got event {"kind":1,"id":"e3c6bf630d6deea74c0ee2f7f7ba6da55a627498a32f1e72029229bb1810bce3","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1724082366,"tags":[],"content":"two","sig":"34261cf226c3fee2df24e55a89f43f5349c98a64bce46bdc46807b0329f334cea93e9e8bc285c1259a5684cf23f5e507c8e6dad47a31a6615d706b1130d09e69"}
|
||||
got event {"kind":1,"id":"0bbb397c8f87ae557650b9d6ee847292df8e530c458ffea1b24bdcb7bed0ec5e","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1724082369,"tags":[],"content":"three","sig":"aa1cb7d5f0f03f358fc4c0a4351a4f1c66e3a7627021b618601c56ba598b825b6d95d9c8720a4c60666a7eb21e17018cf326222f9f574a9396f2f2da7f007546"}
|
||||
• events stored: 2, subscriptions opened: 1
|
||||
got event {"kind":1,"id":"029ebff759dd54dbd01b929f879fea5802de297e1c3768ca16d9b97cc8bca38f","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1724082371,"tags":[],"content":"four","sig":"9816de517d87d4c3ede57c1c50e3c237486794241afadcd891e1acbba2c5e672286090e6ad3402b047d69bae8095bc4e20e57ac70d92386dfa26db216379330f"}
|
||||
got event {"kind":1,"id":"fe6489fa6fbb925be839377b9b7049d73be755dc2bdad97ff6dd9eecbf8b3a32","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1724082383,"tags":[],"content":"five","sig":"865ce5e32eead5bdb950ac1fbc55bc92dde26818ee3136634538ec42914de179a51e672c2d4269d4362176e5e8cd5e08e69b35b91c6c2af867e129b93d607635"}
|
||||
got request {"kinds":[30818]}
|
||||
• events stored: 4, subscriptions opened: 1
|
||||
```
|
||||
|
||||
### enable negentropy (nip77) support in your development relay
|
||||
```shell
|
||||
~> nak serve --negentropy
|
||||
```
|
||||
|
||||
### run a grasp server (with a relay)
|
||||
```shell
|
||||
~> nak serve --grasp
|
||||
```
|
||||
|
||||
### run a blossom server (with a relay)
|
||||
```shell
|
||||
~> nak serve --blossom
|
||||
```
|
||||
|
||||
### make an event with a PoW target
|
||||
```shell
|
||||
~> nak event -c 'hello getwired.app and labour.fiatjaf.com' --pow 24
|
||||
{"kind":1,"id":"0000009dcc7c62056eafdb41fac817379ec2becf0ce27c5fbe98d0735d968147","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1724160828,"tags":[["nonce","515504","24"]],"content":"hello getwired.app and labour.fiatjaf.com","sig":"7edb988065ccc12779fe99270945b212f3723838f315d76d5e90e9ffa27198f13fa556614295f518d968d55bab81878167d4162b3a7cf81a6b423c6761bd504c"}
|
||||
```
|
||||
|
||||
### make a nostr event signed with a key given as an environment variable
|
||||
|
||||
```shell
|
||||
~> export NOSTR_SECRET_KEY=ncryptsec1qggyy9vw0nclmw8ly9caz6aa7f85a4ufhsct64uva337pulsdw00n6twa2lzhzk2znzsyu60urx9s08lx00ke6ual3lszyn5an9zarm6s70lw5lj6dv3mj3f9p4tvp0we6qyz4gp420mapfmvqheuttv
|
||||
~> nak event -c 'it supports keys as hex, nsec or ncryptsec'
|
||||
type the password to decrypt your secret key: ********
|
||||
{"kind":1,"id":"5cbf3feb9a7d99c3ee2a88693a591caca1a8348fea427b3652c27f7a8a76af48","pubkey":"b00bcab55375d8c7b731dd9841f6d805ff1cf6fdc945e7326786deb5ddac6ce4","created_at":1724247924,"tags":[],"content":"it supports keys as hex, nsec or ncryptsec","sig":"fb3fd170bc10e5042322c7a05dd4bbd8ac9947b39026b8a7afd1ee02524e8e3aa1d9554e9c7b6181ca1b45cab01cd06643bdffa5ce678b475e6b185e1c14b085"}
|
||||
```
|
||||
|
||||
### download some helpful `jq` functions for dealing with nostr events
|
||||
```shell
|
||||
~> nak req -i 412f2d3e73acc312942c055ac2a695dc60bf58ff97e06689a8a79e97796c4cdb relay.westernbtc.com | jq -r .content > ~/.jq
|
||||
```
|
||||
|
||||
### watch a NIP-53 livestream (zap.stream, amethyst, shosho etc)
|
||||
```shell
|
||||
~> # this requires the jq utils from the step above
|
||||
~> mpv $(nak fetch naddr1qqjxvvm9xscnsdtx95cxvcfk956rsvtx943rje3k95mx2dp389jnwwrp8ymxgqg4waehxw309aex2mrp0yhxgctdw4eju6t09upzpn6956apxcad0mfp8grcuugdysg44eepex68h50t73zcathmfs49qvzqqqrkvu7ed38k | jq -r 'tag_value("streaming")')
|
||||
~>
|
||||
~> # or without the utils
|
||||
~> mpv $(nak fetch naddr1qqjxvvm9xscnsdtx95cxvcfk956rsvtx943rje3k95mx2dp389jnwwrp8ymxgqg4waehxw309aex2mrp0yhxgctdw4eju6t09upzpn6956apxcad0mfp8grcuugdysg44eepex68h50t73zcathmfs49qvzqqqrkvu7ed38k | jq -r '.tags | map(select(.[0] == "streaming") | .[1])[0]')
|
||||
```
|
||||
|
||||
### download a NIP-35 torrent from an `nevent`
|
||||
```shell
|
||||
~> # this requires the jq utils from two steps above
|
||||
~> aria2c $(nak fetch nevent1qqsdsg6x7uujekac4ga7k7qa9q9sx8gqj7xzjf5w9us0dm0ghvf4ugspp4mhxue69uhkummn9ekx7mq6dw9y4 | jq -r '"magnet:?xt=urn:btih:\(tag_value("x"))&dn=\(tag_value("title"))&tr=http%3A%2F%2Ftracker.loadpeers.org%3A8080%2FxvRKfvAlnfuf5EfxTT5T0KIVPtbqAHnX%2Fannounce&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A6969%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.torrent.eu.org%3A451%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337&tr=\(tags("tracker") | map(.[1] | @uri) | join("&tr="))"')
|
||||
```
|
||||
|
||||
### mount Nostr as a FUSE filesystem and publish a note
|
||||
```shell
|
||||
~> nak fs --sec 01 ~/nostr
|
||||
- mounting at /home/user/nostr... ok.
|
||||
~> cd ~/nostr/npub1xxxxxx/notes/
|
||||
~> echo "satellites are bad!" > new
|
||||
pending note updated, timer reset.
|
||||
- `touch publish` to publish immediately
|
||||
- `rm new` to erase and cancel the publication.
|
||||
~> touch publish
|
||||
publishing now!
|
||||
{"id":"f1cbfa6...","pubkey":"...","content":"satellites are bad!","sig":"..."}
|
||||
publishing to 3 relays... offchain.pub: ok, nostr.wine: ok, pyramid.fiatjaf.com: ok
|
||||
event published as f1cbfa6... and updated locally.
|
||||
```
|
||||
|
||||
### list NIP-60 wallet tokens and send some
|
||||
```shell
|
||||
~> nak wallet tokens
|
||||
91a10b6fc8bbe7ef2ad9ad0142871d80468b697716d9d2820902db304ff1165e 500 cashu.space
|
||||
cac7f89f0611021984d92a7daca219e4cd1c9798950e50e952bba7cde1ac1337 1000 legend.lnbits.com
|
||||
~> nak wallet send 100
|
||||
cashuA1psxqyry8...
|
||||
~> nak wallet pay lnbc1...
|
||||
```
|
||||
|
||||
### upload and download files with blossom
|
||||
```shell
|
||||
~> nak blossom --server blossom.azzamo.net --sec 01 upload image.png
|
||||
{"sha256":"38c51756f3e9fedf039488a1f6e513286f6743194e7a7f25effdc84a0ee4c2cf","url":"https://blossom.azzamo.net/38c51756f3e9fedf039488a1f6e513286f6743194e7a7f25effdc84a0ee4c2cf.png"}
|
||||
~> nak blossom --server aegis.utxo.one download acc8ea43d4e6b706f68b249144364f446854b7f63ba1927371831c05dcf0256c -o downloaded.png
|
||||
```
|
||||
|
||||
### publish a fully formed event with correct tags, URIs and to the correct read and write relays
|
||||
```shell
|
||||
echo "#surely you're joking, mr npub1l2vyh47mk2p0qlsku7hg0vn29faehy9hy34ygaclpn66ukqp3afqutajft olas.app is broken again" | nak publish
|
||||
|
||||
# it will add the hashtag, turn the npub1 code into a nostr:npub1 URI, turn the olas.app string into https://olas.app, add the "p" tag (and "q" tags too if you were mentioning an nevent1 code or naddr1 code) and finally publish it to your "write" relays and to any mentioned person (or author of mentioned events)'s "read" relays.
|
||||
# there is also a --reply flag that you can pass an nevent, naddr or hex id to and it will do the right thing (including setting the correct kind to either 1 or 1111).
|
||||
# and there is a --confirm flag that gives you a chance to confirm before actually publishing the result to relays.
|
||||
```
|
||||
|
||||
### record and publish an audio note (yakbak, nostur etc) signed from a bunker
|
||||
```shell
|
||||
ffmpeg -f alsa -i default -f webm -t 00:00:03 pipe:1 | nak blossom --server blossom.primal.net upload | jq -rc '{content: .url}' | nak event -k 1222 --sec 'bunker://urlgoeshere' pyramid.fiatjaf.com nostr.wine
|
||||
```
|
||||
|
||||
### from a file with events get only those that have kind 1111 and were created by a given pubkey
|
||||
```shell
|
||||
~> cat all.jsonl | nak filter -k 1111 -a 117673e191b10fe1aedf1736ee74de4cffd4c132ca701960b70a5abad5870faa > filtered.jsonl
|
||||
```
|
||||
|
||||
### use negentropy (nip77) to only fetch the ids for a given query
|
||||
```shell
|
||||
~> nak req --ids-only -k 1111 -a npub1vyrx2prp0mne8pczrcvv38ahn5wahsl8hlceeu3f3aqyvmu8zh5s7kfy55 relay.damus.io
|
||||
```
|
||||
|
||||
186
admin.go
Normal file
186
admin.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip86"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var admin = &cli.Command{
|
||||
Name: "admin",
|
||||
Usage: "manage relays using the relay management API",
|
||||
Description: `examples:
|
||||
nak admin allowpubkey myrelay.com --pubkey 1234... --reason "good user"
|
||||
nak admin banpubkey myrelay.com --pubkey 1234... --reason "spam"
|
||||
nak admin listallowedpubkeys myrelay.com
|
||||
nak admin changerelayname myrelay.com --name "My Relay"`,
|
||||
ArgsUsage: "<relay-url>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: defaultKeyFlags,
|
||||
Commands: (func() []*cli.Command {
|
||||
methods := []struct {
|
||||
method string
|
||||
args []string
|
||||
}{
|
||||
{"allowpubkey", []string{"pubkey", "reason"}},
|
||||
{"banpubkey", []string{"pubkey", "reason"}},
|
||||
{"listallowedpubkeys", nil},
|
||||
{"listbannedpubkeys", nil},
|
||||
{"listeventsneedingmoderation", nil},
|
||||
{"allowevent", []string{"id", "reason"}},
|
||||
{"banevent", []string{"id", "reason"}},
|
||||
{"listbannedevents", nil},
|
||||
{"changerelayname", []string{"name"}},
|
||||
{"changerelaydescription", []string{"description"}},
|
||||
{"changerelayicon", []string{"icon"}},
|
||||
{"allowkind", []string{"kind"}},
|
||||
{"disallowkind", []string{"kind"}},
|
||||
{"listallowedkinds", nil},
|
||||
{"blockip", []string{"ip", "reason"}},
|
||||
{"unblockip", []string{"ip", "reason"}},
|
||||
{"listblockedips", nil},
|
||||
}
|
||||
|
||||
commands := make([]*cli.Command, 0, len(methods))
|
||||
for _, def := range methods {
|
||||
def := def
|
||||
|
||||
flags := make([]cli.Flag, len(def.args), len(def.args)+4)
|
||||
for i, argName := range def.args {
|
||||
flags[i] = declareFlag(argName)
|
||||
}
|
||||
|
||||
cmd := &cli.Command{
|
||||
Name: def.method,
|
||||
Usage: fmt.Sprintf(`the "%s" relay management RPC call`, def.method),
|
||||
Description: fmt.Sprintf(
|
||||
`the "%s" management RPC call, see https://nips.nostr.com/86 for more information`, def.method),
|
||||
Flags: flags,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
params := make([]any, len(def.args))
|
||||
for i, argName := range def.args {
|
||||
params[i] = getArgument(c, argName)
|
||||
}
|
||||
req := nip86.Request{Method: def.method, Params: params}
|
||||
reqj, _ := json.Marshal(req)
|
||||
|
||||
relayUrls := c.Args().Slice()
|
||||
if len(relayUrls) == 0 {
|
||||
stdout(string(reqj))
|
||||
return nil
|
||||
}
|
||||
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, relayUrl := range relayUrls {
|
||||
httpUrl := "http" + nostr.NormalizeURL(relayUrl)[2:]
|
||||
log("calling '%s' on %s... ", def.method, httpUrl)
|
||||
body := bytes.NewBuffer(nil)
|
||||
body.Write(reqj)
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", httpUrl, body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
// Authorization
|
||||
payloadHash := sha256.Sum256(reqj)
|
||||
tokenEvent := nostr.Event{
|
||||
Kind: 27235,
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{
|
||||
{"u", httpUrl},
|
||||
{"method", "POST"},
|
||||
{"payload", hex.EncodeToString(payloadHash[:])},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &tokenEvent); err != nil {
|
||||
return fmt.Errorf("failed to sign token event: %w", err)
|
||||
}
|
||||
evtj, _ := json.Marshal(tokenEvent)
|
||||
req.Header.Set("Authorization", "Nostr "+base64.StdEncoding.EncodeToString(evtj))
|
||||
|
||||
// Content-Type
|
||||
req.Header.Set("Content-Type", "application/nostr+json+rpc")
|
||||
|
||||
// make request to relay
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
log("failed: %s\n", err)
|
||||
continue
|
||||
}
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log("failed to read response: %s\n", err)
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode >= 300 {
|
||||
log("failed with status %d\n", resp.StatusCode)
|
||||
bodyPrintable := string(b)
|
||||
if len(bodyPrintable) > 300 {
|
||||
bodyPrintable = bodyPrintable[0:297] + "..."
|
||||
}
|
||||
log(bodyPrintable)
|
||||
continue
|
||||
}
|
||||
var response nip86.Response
|
||||
if err := json.Unmarshal(b, &response); err != nil {
|
||||
log("bad json response: %s\n", err)
|
||||
bodyPrintable := string(b)
|
||||
if len(bodyPrintable) > 300 {
|
||||
bodyPrintable = bodyPrintable[0:297] + "..."
|
||||
}
|
||||
log(bodyPrintable)
|
||||
continue
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
// print the result
|
||||
log("\n")
|
||||
pretty, _ := json.MarshalIndent(response, "", " ")
|
||||
stdout(string(pretty))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
commands = append(commands, cmd)
|
||||
}
|
||||
|
||||
return commands
|
||||
})(),
|
||||
}
|
||||
|
||||
func declareFlag(argName string) cli.Flag {
|
||||
usage := "parameter for this management RPC call, see https://nips.nostr.com/86 for more information."
|
||||
switch argName {
|
||||
case "kind":
|
||||
return &cli.IntFlag{Name: argName, Required: true, Usage: usage}
|
||||
case "reason":
|
||||
return &cli.StringFlag{Name: argName, Usage: usage}
|
||||
default:
|
||||
return &cli.StringFlag{Name: argName, Required: true, Usage: usage}
|
||||
}
|
||||
}
|
||||
|
||||
func getArgument(c *cli.Command, argName string) any {
|
||||
switch argName {
|
||||
case "kind":
|
||||
return c.Int(argName)
|
||||
default:
|
||||
return c.String(argName)
|
||||
}
|
||||
}
|
||||
250
blossom.go
Normal file
250
blossom.go
Normal file
@@ -0,0 +1,250 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"fiatjaf.com/nostr/nipb0/blossom"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var blossomCmd = &cli.Command{
|
||||
Name: "blossom",
|
||||
Suggest: true,
|
||||
UseShortOptionHandling: true,
|
||||
Usage: "an army knife for blossom things",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.StringFlag{
|
||||
Name: "server",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "the hostname of the target mediaserver",
|
||||
Required: true,
|
||||
},
|
||||
),
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "lists blobs from a pubkey",
|
||||
Description: `takes one pubkey passed as an argument or derives one from the --sec supplied. if that is given then it will also pre-authorize the list, which some servers may require.`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "[pubkey]",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
var client *blossom.Client
|
||||
pubkey := c.Args().First()
|
||||
if pubkey != "" {
|
||||
pk, err := parsePubKey(pubkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid public key '%s': %w", pubkey, err)
|
||||
}
|
||||
client = blossom.NewClient(c.String("server"), keyer.NewReadOnlySigner(pk))
|
||||
} else {
|
||||
var err error
|
||||
client, err = getBlossomClient(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bds, err := client.List(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bd := range bds {
|
||||
stdout(bd)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "upload",
|
||||
Usage: "uploads a file to a specific mediaserver.",
|
||||
Description: `takes any number of local file paths and uploads them to a mediaserver, printing the resulting blob descriptions when successful.`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "[files...]",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
client, err := getBlossomClient(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isPiped() {
|
||||
// get file from stdin
|
||||
if c.Args().Len() > 0 {
|
||||
return fmt.Errorf("do not pass arguments when piping from stdin")
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read stdin: %w", err)
|
||||
}
|
||||
|
||||
bd, err := client.UploadBlob(ctx, bytes.NewReader(data), "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
j, _ := json.Marshal(bd)
|
||||
stdout(string(j))
|
||||
} else {
|
||||
// get filenames from arguments
|
||||
hasError := false
|
||||
for _, fpath := range c.Args().Slice() {
|
||||
bd, err := client.UploadFilePath(ctx, fpath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
hasError = true
|
||||
continue
|
||||
}
|
||||
|
||||
j, _ := json.Marshal(bd)
|
||||
stdout(string(j))
|
||||
}
|
||||
|
||||
if hasError {
|
||||
os.Exit(3)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "download",
|
||||
Usage: "downloads files from mediaservers",
|
||||
Description: `takes any number of sha256 hashes as hex, downloads them and prints them to stdout (unless --output is specified).`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "[sha256...]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "output",
|
||||
Aliases: []string{"o"},
|
||||
Usage: "file name to save downloaded file to, can be passed multiple times when downloading multiple hashes",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
client, err := getBlossomClient(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outputs := c.StringSlice("output")
|
||||
|
||||
hasError := false
|
||||
for i, hash := range c.Args().Slice() {
|
||||
if len(outputs)-1 >= i && outputs[i] != "--" {
|
||||
// save to this file
|
||||
err := client.DownloadToFile(ctx, hash, outputs[i])
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
hasError = true
|
||||
}
|
||||
} else {
|
||||
// if output wasn't specified, print to stdout
|
||||
data, err := client.Download(ctx, hash)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
hasError = true
|
||||
continue
|
||||
}
|
||||
stdout(data)
|
||||
}
|
||||
}
|
||||
|
||||
if hasError {
|
||||
os.Exit(2)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "del",
|
||||
Aliases: []string{"delete"},
|
||||
Usage: "deletes a file from a mediaserver",
|
||||
Description: `takes any number of sha256 hashes, signs authorizations and deletes them from the current mediaserver.
|
||||
|
||||
if any of the files are not deleted command will fail, otherwise it will succeed. it will also print error messages to stderr and the hashes it successfully deletes to stdout.`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "[sha256...]",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
client, err := getBlossomClient(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasError := false
|
||||
for _, hash := range c.Args().Slice() {
|
||||
err := client.Delete(ctx, hash)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
hasError = true
|
||||
continue
|
||||
}
|
||||
|
||||
stdout(hash)
|
||||
}
|
||||
|
||||
if hasError {
|
||||
os.Exit(3)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "check",
|
||||
Usage: "asks the mediaserver if it has the specified hashes.",
|
||||
Description: `uses the HEAD request to succintly check if the server has the specified sha256 hash.
|
||||
|
||||
if any of the files are not found the command will fail, otherwise it will succeed. it will also print error messages to stderr and the hashes it finds to stdout.`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "[sha256...]",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
client, err := getBlossomClient(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasError := false
|
||||
for _, hash := range c.Args().Slice() {
|
||||
err := client.Check(ctx, hash)
|
||||
if err != nil {
|
||||
hasError = true
|
||||
fmt.Fprintf(os.Stderr, "%s\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
stdout(hash)
|
||||
}
|
||||
|
||||
if hasError {
|
||||
os.Exit(2)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "mirror",
|
||||
Usage: "",
|
||||
Description: ``,
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func getBlossomClient(ctx context.Context, c *cli.Command) (*blossom.Client, error) {
|
||||
keyer, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blossom.NewClient(c.String("server"), keyer), nil
|
||||
}
|
||||
491
bunker.go
Normal file
491
bunker.go
Normal file
@@ -0,0 +1,491 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip46"
|
||||
"github.com/fatih/color"
|
||||
"github.com/mdp/qrterminal/v3"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
const PERSISTENCE = "PERSISTENCE"
|
||||
|
||||
var bunker = &cli.Command{
|
||||
Name: "bunker",
|
||||
Usage: "starts a nip46 signer daemon with the given --sec key",
|
||||
ArgsUsage: "[relay...]",
|
||||
Description: ``,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "persist",
|
||||
Usage: "whether to read and store authorized keys from and to a config file",
|
||||
Category: PERSISTENCE,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "profile",
|
||||
Value: "default",
|
||||
Usage: "config file name to use for --persist mode (implies that if provided) -- based on --config-path, i.e. ~/.config/nak/",
|
||||
OnlyOnce: true,
|
||||
Category: PERSISTENCE,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "sec",
|
||||
Usage: "secret key to sign the event, as hex or nsec",
|
||||
DefaultText: "the key '1'",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "prompt-sec",
|
||||
Usage: "prompt the user to paste a hex or nsec with which to sign the event",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "authorized-secrets",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "secrets for which we will always respond",
|
||||
},
|
||||
&PubKeySliceFlag{
|
||||
Name: "authorized-keys",
|
||||
Aliases: []string{"k"},
|
||||
Usage: "pubkeys for which we will always respond",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Usage: "relays to connect to (can also be provided as naked arguments)",
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "qrcode",
|
||||
Usage: "display a QR code for the bunker URI",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
// read config from file
|
||||
config := struct {
|
||||
AuthorizedKeys []nostr.PubKey `json:"authorized-keys"`
|
||||
Secret plainOrEncryptedKey `json:"sec"`
|
||||
Relays []string `json:"relays"`
|
||||
}{
|
||||
AuthorizedKeys: make([]nostr.PubKey, 0, 3),
|
||||
}
|
||||
baseRelaysUrls := appendUnique(c.Args().Slice(), c.StringSlice("relay")...)
|
||||
for i, url := range baseRelaysUrls {
|
||||
baseRelaysUrls[i] = nostr.NormalizeURL(url)
|
||||
}
|
||||
baseAuthorizedKeys := getPubKeySlice(c, "authorized-keys")
|
||||
|
||||
var baseSecret plainOrEncryptedKey
|
||||
{
|
||||
sec := c.String("sec")
|
||||
if c.Bool("prompt-sec") {
|
||||
var err error
|
||||
sec, err = askPassword("type your secret key as ncryptsec, nsec or hex: ", nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get secret key: %w", err)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(sec, "ncryptsec1") {
|
||||
baseSecret.Encrypted = &sec
|
||||
} else if sec != "" {
|
||||
if prefix, ski, err := nip19.Decode(sec); err == nil && prefix == "nsec" {
|
||||
sk := ski.(nostr.SecretKey)
|
||||
baseSecret.Plain = &sk
|
||||
} else if sk, err := nostr.SecretKeyFromHex(sec); err != nil {
|
||||
return fmt.Errorf("invalid secret key: %w", err)
|
||||
} else {
|
||||
baseSecret.Plain = &sk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// default case: persist() is nil
|
||||
var persist func()
|
||||
|
||||
if c.Bool("persist") || c.IsSet("profile") {
|
||||
path := filepath.Join(c.String("config-path"), "bunker")
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
path = filepath.Join(path, c.String("profile"))
|
||||
|
||||
persist = func() {
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
log(color.RedString("failed to persist: %w\n"), err)
|
||||
os.Exit(4)
|
||||
}
|
||||
data, err := json.MarshalIndent(config, "", " ")
|
||||
if err != nil {
|
||||
log(color.RedString("failed to persist: %w\n"), err)
|
||||
os.Exit(4)
|
||||
}
|
||||
if err := os.WriteFile(path, data, 0600); err != nil {
|
||||
log(color.RedString("failed to persist: %w\n"), err)
|
||||
os.Exit(4)
|
||||
}
|
||||
}
|
||||
|
||||
log(color.YellowString("reading config from %s\n"), path)
|
||||
b, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(b, &config); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, url := range config.Relays {
|
||||
config.Relays[i] = nostr.NormalizeURL(url)
|
||||
}
|
||||
config.Relays = appendUnique(config.Relays, baseRelaysUrls...)
|
||||
config.AuthorizedKeys = appendUnique(config.AuthorizedKeys, baseAuthorizedKeys...)
|
||||
|
||||
if config.Secret.Plain == nil && config.Secret.Encrypted == nil {
|
||||
// we don't have any secret key stored, so just use whatever was given via flags
|
||||
config.Secret = baseSecret
|
||||
} else if baseSecret.Plain == nil && baseSecret.Encrypted == nil {
|
||||
// we didn't provide any keys, so we just use the stored
|
||||
} else {
|
||||
// we have a secret key stored
|
||||
// if we also provided a key we check if they match and fail otherwise
|
||||
if !baseSecret.equals(config.Secret) {
|
||||
return fmt.Errorf("--sec provided conflicts with stored, you should create a new --profile or omit the --sec flag")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
config.Secret = baseSecret
|
||||
config.Relays = baseRelaysUrls
|
||||
config.AuthorizedKeys = baseAuthorizedKeys
|
||||
}
|
||||
|
||||
// if we got here without any keys set (no flags, first time using a profile), use the default
|
||||
if config.Secret.Plain == nil && config.Secret.Encrypted == nil {
|
||||
sec := os.Getenv("NOSTR_SECRET_KEY")
|
||||
if sec == "" {
|
||||
sec = defaultKey
|
||||
}
|
||||
sk, err := nostr.SecretKeyFromHex(sec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("default key is wrong: %w", err)
|
||||
}
|
||||
config.Secret.Plain = &sk
|
||||
}
|
||||
|
||||
if len(config.Relays) == 0 {
|
||||
return fmt.Errorf("no relays given")
|
||||
}
|
||||
|
||||
// decrypt key here if necessary
|
||||
var sec nostr.SecretKey
|
||||
if config.Secret.Plain != nil {
|
||||
sec = *config.Secret.Plain
|
||||
} else {
|
||||
plain, err := promptDecrypt(*config.Secret.Encrypted)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decrypt: %w", err)
|
||||
}
|
||||
sec = plain
|
||||
}
|
||||
|
||||
if persist != nil {
|
||||
persist()
|
||||
}
|
||||
|
||||
// try to connect to the relays here
|
||||
qs := url.Values{}
|
||||
relayURLs := make([]string, 0, len(config.Relays))
|
||||
relays := connectToAllRelays(ctx, c, config.Relays, nil, nostr.PoolOptions{})
|
||||
if len(relays) == 0 {
|
||||
log("failed to connect to any of the given relays.\n")
|
||||
os.Exit(3)
|
||||
}
|
||||
for _, relay := range relays {
|
||||
relayURLs = append(relayURLs, relay.URL)
|
||||
qs.Add("relay", relay.URL)
|
||||
}
|
||||
if len(relayURLs) == 0 {
|
||||
return fmt.Errorf("not connected to any relays: please specify at least one")
|
||||
}
|
||||
|
||||
// other arguments
|
||||
authorizedSecrets := c.StringSlice("authorized-secrets")
|
||||
|
||||
// this will be used to auto-authorize the next person who connects who isn't pre-authorized
|
||||
// it will be stored
|
||||
newSecret := randString(12)
|
||||
|
||||
// static information
|
||||
pubkey := sec.Public()
|
||||
npub := nip19.EncodeNpub(pubkey)
|
||||
|
||||
// this function will be called every now and then
|
||||
printBunkerInfo := func() {
|
||||
qs.Set("secret", newSecret)
|
||||
bunkerURI := fmt.Sprintf("bunker://%s?%s", pubkey.Hex(), qs.Encode())
|
||||
|
||||
authorizedKeysStr := ""
|
||||
if len(config.AuthorizedKeys) != 0 {
|
||||
authorizedKeysStr = "\n authorized keys:"
|
||||
for _, pubkey := range config.AuthorizedKeys {
|
||||
authorizedKeysStr += "\n - " + colors.italic(pubkey.Hex())
|
||||
}
|
||||
}
|
||||
|
||||
authorizedSecretsStr := ""
|
||||
if len(authorizedSecrets) != 0 {
|
||||
authorizedSecretsStr = "\n authorized secrets:\n - " + colors.italic(strings.Join(authorizedSecrets, "\n - "))
|
||||
}
|
||||
|
||||
preauthorizedFlags := ""
|
||||
for _, k := range config.AuthorizedKeys {
|
||||
preauthorizedFlags += " -k " + k.Hex()
|
||||
}
|
||||
for _, s := range authorizedSecrets {
|
||||
preauthorizedFlags += " -s " + s
|
||||
}
|
||||
|
||||
secretKeyFlag := ""
|
||||
if sec := c.String("sec"); sec != "" {
|
||||
secretKeyFlag = "--sec " + sec
|
||||
}
|
||||
|
||||
relayURLsPossiblyWithoutSchema := make([]string, len(relayURLs))
|
||||
for i, url := range relayURLs {
|
||||
if strings.HasPrefix(url, "wss://") {
|
||||
relayURLsPossiblyWithoutSchema[i] = url[6:]
|
||||
} else {
|
||||
relayURLsPossiblyWithoutSchema[i] = url
|
||||
}
|
||||
}
|
||||
|
||||
// only print the restart command if not persisting:
|
||||
if persist == nil {
|
||||
restartCommand := fmt.Sprintf("nak bunker %s%s %s",
|
||||
secretKeyFlag,
|
||||
preauthorizedFlags,
|
||||
strings.Join(relayURLsPossiblyWithoutSchema, " "),
|
||||
)
|
||||
|
||||
log("listening at %v:\n pubkey: %s \n npub: %s%s%s\n to restart: %s\n bunker: %s\n\n",
|
||||
colors.bold(relayURLs),
|
||||
colors.bold(pubkey.Hex()),
|
||||
colors.bold(npub),
|
||||
authorizedKeysStr,
|
||||
authorizedSecretsStr,
|
||||
color.CyanString(restartCommand),
|
||||
colors.bold(bunkerURI),
|
||||
)
|
||||
} else {
|
||||
// otherwise just print the data
|
||||
log("listening at %v:\n pubkey: %s \n npub: %s%s%s\n bunker: %s\n\n",
|
||||
colors.bold(relayURLs),
|
||||
colors.bold(pubkey.Hex()),
|
||||
colors.bold(npub),
|
||||
authorizedKeysStr,
|
||||
authorizedSecretsStr,
|
||||
colors.bold(bunkerURI),
|
||||
)
|
||||
}
|
||||
|
||||
// print QR code if requested
|
||||
if c.Bool("qrcode") {
|
||||
log("QR Code for bunker URI:\n")
|
||||
qrterminal.Generate(bunkerURI, qrterminal.L, os.Stdout)
|
||||
log("\n\n")
|
||||
}
|
||||
}
|
||||
printBunkerInfo()
|
||||
|
||||
// subscribe to relays
|
||||
events := sys.Pool.SubscribeMany(ctx, relayURLs, nostr.Filter{
|
||||
Kinds: []nostr.Kind{nostr.KindNostrConnect},
|
||||
Tags: nostr.TagMap{"p": []string{pubkey.Hex()}},
|
||||
Since: nostr.Now(),
|
||||
LimitZero: true,
|
||||
}, nostr.SubscriptionOptions{
|
||||
Label: "nak-bunker",
|
||||
})
|
||||
|
||||
signer := nip46.NewStaticKeySigner(sec)
|
||||
handlerWg := sync.WaitGroup{}
|
||||
printLock := sync.Mutex{}
|
||||
|
||||
// just a gimmick
|
||||
var cancelPreviousBunkerInfoPrint context.CancelFunc
|
||||
_, cancel := context.WithCancel(ctx)
|
||||
cancelPreviousBunkerInfoPrint = cancel
|
||||
|
||||
// asking user for authorization
|
||||
signer.AuthorizeRequest = func(harmless bool, from nostr.PubKey, secret string) bool {
|
||||
if secret == newSecret {
|
||||
// store this key
|
||||
config.AuthorizedKeys = appendUnique(config.AuthorizedKeys, from)
|
||||
// discard this and generate a new secret
|
||||
newSecret = randString(12)
|
||||
// print bunker info again after this
|
||||
go func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
printBunkerInfo()
|
||||
}()
|
||||
|
||||
if persist != nil {
|
||||
persist()
|
||||
}
|
||||
}
|
||||
|
||||
return slices.Contains(config.AuthorizedKeys, from) || slices.Contains(authorizedSecrets, secret)
|
||||
}
|
||||
|
||||
for ie := range events {
|
||||
cancelPreviousBunkerInfoPrint() // this prevents us from printing a million bunker info blocks
|
||||
|
||||
// handle the NIP-46 request event
|
||||
req, resp, eventResponse, err := signer.HandleRequest(ctx, ie.Event)
|
||||
if err != nil {
|
||||
log("< failed to handle request from %s: %s\n", ie.Event.PubKey, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
jreq, _ := json.MarshalIndent(req, "", " ")
|
||||
log("- got request from '%s': %s\n", color.New(color.Bold, color.FgBlue).Sprint(ie.Event.PubKey.Hex()), string(jreq))
|
||||
jresp, _ := json.MarshalIndent(resp, "", " ")
|
||||
log("~ responding with %s\n", string(jresp))
|
||||
|
||||
handlerWg.Add(len(relayURLs))
|
||||
for _, relayURL := range relayURLs {
|
||||
go func(relayURL string) {
|
||||
if relay, _ := sys.Pool.EnsureRelay(relayURL); relay != nil {
|
||||
err := relay.Publish(ctx, eventResponse)
|
||||
printLock.Lock()
|
||||
if err == nil {
|
||||
log("* sent response through %s\n", relay.URL)
|
||||
} else {
|
||||
log("* failed to send response: %s\n", err)
|
||||
}
|
||||
printLock.Unlock()
|
||||
handlerWg.Done()
|
||||
}
|
||||
}(relayURL)
|
||||
}
|
||||
handlerWg.Wait()
|
||||
|
||||
// just after handling one request we trigger this
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cancelPreviousBunkerInfoPrint = cancel
|
||||
// the idea is that we will print the bunker URL again so it is easier to copy-paste by users
|
||||
// but we will only do if the bunker is inactive for more than 5 minutes
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-time.After(time.Minute * 5):
|
||||
log("\n")
|
||||
printBunkerInfo()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "connect",
|
||||
Usage: "use the client-initiated NostrConnect flow of NIP46",
|
||||
ArgsUsage: "<nostrconnect-uri>",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
if c.Args().Len() != 1 {
|
||||
return fmt.Errorf("must be called with a nostrconnect://... uri")
|
||||
}
|
||||
|
||||
uri, err := url.Parse(c.Args().First())
|
||||
if err != nil || uri.Scheme != "nostrconnect" {
|
||||
return fmt.Errorf("invalid uri")
|
||||
}
|
||||
|
||||
// TODO
|
||||
|
||||
return fmt.Errorf("this is not implemented yet")
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
type plainOrEncryptedKey struct {
|
||||
Plain *nostr.SecretKey
|
||||
Encrypted *string
|
||||
}
|
||||
|
||||
func (pe plainOrEncryptedKey) MarshalJSON() ([]byte, error) {
|
||||
if pe.Plain != nil {
|
||||
res := make([]byte, 66)
|
||||
hex.Encode(res[1:], (*pe.Plain)[:])
|
||||
res[0] = '"'
|
||||
res[65] = '"'
|
||||
return res, nil
|
||||
} else if pe.Encrypted != nil {
|
||||
return json.Marshal(*pe.Encrypted)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no key to marshal")
|
||||
}
|
||||
|
||||
func (pe *plainOrEncryptedKey) UnmarshalJSON(buf []byte) error {
|
||||
if len(buf) == 66 {
|
||||
sk, err := nostr.SecretKeyFromHex(string(buf[1 : 1+64]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pe.Plain = &sk
|
||||
return nil
|
||||
} else if bytes.HasPrefix(buf, []byte("\"nsec")) {
|
||||
_, v, err := nip19.Decode(string(buf[1 : len(buf)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sk := v.(nostr.SecretKey)
|
||||
pe.Plain = &sk
|
||||
return nil
|
||||
} else if bytes.HasPrefix(buf, []byte("\"ncryptsec1")) {
|
||||
ncryptsec := string(buf[1 : len(buf)-1])
|
||||
pe.Encrypted = &ncryptsec
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unrecognized key format '%s'", string(buf))
|
||||
}
|
||||
|
||||
func (a plainOrEncryptedKey) equals(b plainOrEncryptedKey) bool {
|
||||
if a.Plain == nil && b.Plain != nil {
|
||||
return false
|
||||
}
|
||||
if a.Plain != nil && b.Plain == nil {
|
||||
return false
|
||||
}
|
||||
if a.Plain != nil && b.Plain != nil && *a.Plain != *b.Plain {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.Encrypted == nil && b.Encrypted != nil {
|
||||
return false
|
||||
}
|
||||
if a.Encrypted != nil && b.Encrypted == nil {
|
||||
return false
|
||||
}
|
||||
if a.Encrypted != nil && b.Encrypted != nil && *a.Encrypted != *b.Encrypted {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
231
cli_test.go
Normal file
231
cli_test.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
stdjson "encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// these tests are tricky because commands and flags are declared as globals and values set in one call may persist
|
||||
// to the next. for example, if in the first test we set --limit 2 then doesn't specify --limit in the second then
|
||||
// it will still return true for cmd.IsSet("limit") and then we will set .LimitZero = true
|
||||
|
||||
func call(t *testing.T, cmd string) string {
|
||||
var output strings.Builder
|
||||
stdout = func(a ...any) {
|
||||
output.WriteString(fmt.Sprint(a...))
|
||||
output.WriteString("\n")
|
||||
}
|
||||
err := app.Run(t.Context(), strings.Split(cmd, " "))
|
||||
require.NoError(t, err)
|
||||
|
||||
return strings.TrimSpace(output.String())
|
||||
}
|
||||
|
||||
func TestEventBasic(t *testing.T) {
|
||||
output := call(t, "nak event --ts 1699485669")
|
||||
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(output), &evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nostr.Kind(1), evt.Kind)
|
||||
require.Equal(t, nostr.Timestamp(1699485669), evt.CreatedAt)
|
||||
require.Equal(t, "hello from the nostr army knife", evt.Content)
|
||||
require.Equal(t, "36d88cf5fcc449f2390a424907023eda7a74278120eebab8d02797cd92e7e29c", evt.ID.Hex())
|
||||
require.Equal(t, "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", evt.PubKey.Hex())
|
||||
require.Equal(t, "68e71a192e8abcf8582a222434ac823ecc50607450ebe8cc4c145eb047794cc382dc3f888ce879d2f404f5ba6085a47601360a0fa2dd4b50d317bd0c6197c2c2", hex.EncodeToString(evt.Sig[:]))
|
||||
}
|
||||
|
||||
func TestEventComplex(t *testing.T) {
|
||||
output := call(t, "nak event --ts 1699485669 -k 11 -c skjdbaskd --sec 17 -t t=spam -e 36d88cf5fcc449f2390a424907023eda7a74278120eebab8d02797cd92e7e29c -t r=https://abc.def?name=foobar;nothing")
|
||||
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(output), &evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nostr.Kind(11), evt.Kind)
|
||||
require.Equal(t, nostr.Timestamp(1699485669), evt.CreatedAt)
|
||||
require.Equal(t, "skjdbaskd", evt.Content)
|
||||
require.Equal(t, "19aba166dcf354bf5ef64f4afe69ada1eb851495001ee05e07d393ee8c8ea179", evt.ID.Hex())
|
||||
require.Equal(t, "2fa2104d6b38d11b0230010559879124e42ab8dfeff5ff29dc9cdadd4ecacc3f", evt.PubKey.Hex())
|
||||
require.Equal(t, "cf452def4a68341c897c3fc96fa34dc6895a5b8cc266d4c041bcdf758ec992ec5adb8b0179e98552aaaf9450526a26d7e62e413b15b1c57e0cfc8db6b29215d7", hex.EncodeToString(evt.Sig[:]))
|
||||
|
||||
require.Len(t, evt.Tags, 3)
|
||||
require.Equal(t, nostr.Tag{"t", "spam"}, evt.Tags[0])
|
||||
require.Equal(t, nostr.Tag{"r", "https://abc.def?name=foobar", "nothing"}, evt.Tags[1])
|
||||
require.Equal(t, nostr.Tag{"e", "36d88cf5fcc449f2390a424907023eda7a74278120eebab8d02797cd92e7e29c"}, evt.Tags[2])
|
||||
}
|
||||
|
||||
func TestEncode(t *testing.T) {
|
||||
require.Equal(t,
|
||||
"npub156n8a7wuhwk9tgrzjh8gwzc8q2dlekedec5djk0js9d3d7qhnq3qjpdq28",
|
||||
call(t, "nak encode npub a6a67ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179822"),
|
||||
)
|
||||
require.Equal(t,
|
||||
`nprofile1qqs2dfn7l8wthtz45p3ftn58pvrs9xlumvkuu2xet8egzkcklqtesgspz9mhxue69uhk27rpd4cxcefwvdhk6fl5jug
|
||||
nprofile1qqs22kfpwwt4mmvlsd4f2uh23vg60ctvadnyvntx659jw93l0upe6tqpz9mhxue69uhk27rpd4cxcefwvdhk64h265a`,
|
||||
call(t, "nak encode nprofile -r wss://example.com a6a67ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179822 a5592173975ded9f836a9572ea8b11a7e16ceb66464d66d50b27163f7f039d2c"),
|
||||
)
|
||||
}
|
||||
|
||||
func TestDecodeNaddr(t *testing.T) {
|
||||
output := call(t, "nak decode naddr1qqyrgcmyxe3kvefhqyxhwumn8ghj7mn0wvhxcmmvqgs9kqvr4dkruv3t7n2pc6e6a7v9v2s5fprmwjv4gde8c4fe5y29v0srqsqqql9ngrt6tu")
|
||||
|
||||
var result map[string]interface{}
|
||||
err := stdjson.Unmarshal([]byte(output), &result)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "5b0183ab6c3e322bf4d41c6b3aef98562a144847b7499543727c5539a114563e", result["pubkey"])
|
||||
require.Equal(t, float64(31923), result["kind"])
|
||||
require.Equal(t, "4cd6cfe7", result["identifier"])
|
||||
require.Equal(t, []interface{}{"wss://nos.lol"}, result["relays"])
|
||||
}
|
||||
|
||||
func TestDecodePubkey(t *testing.T) {
|
||||
output := call(t, "nak decode -p npub10xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqpkge6d npub1ccz8l9zpa47k6vz9gphftsrumpw80rjt3nhnefat4symjhrsnmjs38mnyd")
|
||||
|
||||
expected := "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798\nc6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5"
|
||||
require.Equal(t, expected, output)
|
||||
}
|
||||
|
||||
func TestDecodeMultipleNpubs(t *testing.T) {
|
||||
output := call(t, "nak decode npub1l2vyh47mk2p0qlsku7hg0vn29faehy9hy34ygaclpn66ukqp3afqutajft npub10000003zmk89narqpczy4ff6rnuht2wu05na7kpnh3mak7z2tqzsv8vwqk")
|
||||
require.Len(t, strings.Split(output, "\n"), 2)
|
||||
}
|
||||
|
||||
func TestDecodeEventId(t *testing.T) {
|
||||
output := call(t, "nak decode -e nevent1qyd8wumn8ghj7urewfsk66ty9enxjct5dfskvtnrdakj7qgmwaehxw309aex2mrp0yh8wetnw3jhymnzw33jucm0d5hszxthwden5te0wfjkccte9eekummjwsh8xmmrd9skctcpzamhxue69uhkzarvv9ejumn0wd68ytnvv9hxgtcqyqllp5v5j0nxr74fptqxkhvfv0h3uj870qpk3ln8a58agyxl3fka296ewr8 nevent1qqswh48lurxs8u0pll9qj2rzctvjncwhstpzlstq59rdtzlty79awns5hl5uf")
|
||||
|
||||
expected := "3ff0d19493e661faa90ac06b5d8963ef1e48fe780368fe67ed0fd410df8a6dd5\nebd4ffe0cd03f1e1ffca092862c2d929e1d782c22fc160a146d58beb278bd74e"
|
||||
require.Equal(t, expected, output)
|
||||
}
|
||||
|
||||
func TestReq(t *testing.T) {
|
||||
output := call(t, "nak req -k 1 -l 18 -a 2fa2104d6b38d11b0230010559879124e42ab8dfeff5ff29dc9cdadd4ecacc3f -e aec4de6d051a7c2b6ca2d087903d42051a31e07fb742f1240970084822de10a6")
|
||||
|
||||
var result []interface{}
|
||||
err := stdjson.Unmarshal([]byte(output), &result)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "REQ", result[0])
|
||||
require.Equal(t, "nak", result[1])
|
||||
|
||||
filter := result[2].(map[string]interface{})
|
||||
require.Equal(t, []interface{}{float64(1)}, filter["kinds"])
|
||||
require.Equal(t, []interface{}{"2fa2104d6b38d11b0230010559879124e42ab8dfeff5ff29dc9cdadd4ecacc3f"}, filter["authors"])
|
||||
require.Equal(t, float64(18), filter["limit"])
|
||||
require.Equal(t, []interface{}{"aec4de6d051a7c2b6ca2d087903d42051a31e07fb742f1240970084822de10a6"}, filter["#e"])
|
||||
}
|
||||
|
||||
func TestMultipleFetch(t *testing.T) {
|
||||
output := call(t, "nak fetch naddr1qqyrgcmyxe3kvefhqyxhwumn8ghj7mn0wvhxcmmvqgs9kqvr4dkruv3t7n2pc6e6a7v9v2s5fprmwjv4gde8c4fe5y29v0srqsqqql9ngrt6tu nevent1qyd8wumn8ghj7urewfsk66ty9enxjct5dfskvtnrdakj7qgmwaehxw309aex2mrp0yh8wetnw3jhymnzw33jucm0d5hszxthwden5te0wfjkccte9eekummjwsh8xmmrd9skctcpzamhxue69uhkzarvv9ejumn0wd68ytnvv9hxgtcqyqllp5v5j0nxr74fptqxkhvfv0h3uj870qpk3ln8a58agyxl3fka296ewr8")
|
||||
|
||||
var events []nostr.Event
|
||||
for _, line := range strings.Split(output, "\n") {
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(line), &evt)
|
||||
require.NoError(t, err)
|
||||
events = append(events, evt)
|
||||
}
|
||||
|
||||
require.Len(t, events, 2)
|
||||
|
||||
// first event validation
|
||||
require.Equal(t, nostr.Kind(31923), events[0].Kind)
|
||||
require.Equal(t, "9ae5014573fc75ced00b343868d2cd9343ebcbbae50591c6fa8ae1cd99568f05", events[0].ID.Hex())
|
||||
require.Equal(t, "5b0183ab6c3e322bf4d41c6b3aef98562a144847b7499543727c5539a114563e", events[0].PubKey.Hex())
|
||||
require.Equal(t, nostr.Timestamp(1707764605), events[0].CreatedAt)
|
||||
|
||||
// second event validation
|
||||
require.Equal(t, nostr.Kind(1), events[1].Kind)
|
||||
require.Equal(t, "3ff0d19493e661faa90ac06b5d8963ef1e48fe780368fe67ed0fd410df8a6dd5", events[1].ID.Hex())
|
||||
require.Equal(t, "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d", events[1].PubKey.Hex())
|
||||
require.Equal(t, nostr.Timestamp(1710759386), events[1].CreatedAt)
|
||||
}
|
||||
|
||||
func TestKeyPublic(t *testing.T) {
|
||||
output := call(t, "nak key public 3ff0d19493e661faa90ac06b5d8963ef1e48fe780368fe67ed0fd410df8a6dd5 3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d")
|
||||
|
||||
expected := "70f7120d065870513a6bddb61c8d400ad1e43449b1900ffdb5551e4c421375c8\n718d756f60cf5179ef35b39dc6db3ff58f04c0734f81f6d4410f0b047ddf9029"
|
||||
require.Equal(t, expected, output)
|
||||
}
|
||||
|
||||
func TestKeyDecrypt(t *testing.T) {
|
||||
output := call(t, "nak key decrypt ncryptsec1qgg2gx2a7hxpsse2zulrv7m8qwccvl3mh8e9k8vtz3wpyrwuuclaq73gz7ddt5kpa93qyfhfjakguuf8uhw90jn6mszh7kqeh9mxzlyw8hy75fluzx4h75frwmu2yngsq7hx7w32d0vdyxyns5g6rqft banana")
|
||||
require.Equal(t, "718d756f60cf5179ef35b39dc6db3ff58f04c0734f81f6d4410f0b047ddf9029", output)
|
||||
}
|
||||
|
||||
func TestReqIdFromRelay(t *testing.T) {
|
||||
output := call(t, "nak req -i 20a6606ed548fe7107533cf3416ce1aa5e957c315c2a40249e12bd9873dca7da --limit 1 nos.lol")
|
||||
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(output), &evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nostr.Kind(1), evt.Kind)
|
||||
require.Equal(t, "20a6606ed548fe7107533cf3416ce1aa5e957c315c2a40249e12bd9873dca7da", evt.ID.Hex())
|
||||
require.Equal(t, "dd664d5e4016433a8cd69f005ae1480804351789b59de5af06276de65633d319", evt.PubKey.Hex())
|
||||
require.Equal(t, nostr.Timestamp(1720972243), evt.CreatedAt)
|
||||
require.Equal(t, "Yeah, so bizarre, but I guess most people are meant to be serfs.", evt.Content)
|
||||
}
|
||||
|
||||
func TestReqWithFlagsAfter1(t *testing.T) {
|
||||
output := call(t, "nak req nos.lol -i 20a6606ed548fe7107533cf3416ce1aa5e957c315c2a40249e12bd9873dca7da --limit 1")
|
||||
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(output), &evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nostr.Kind(1), evt.Kind)
|
||||
require.Equal(t, "20a6606ed548fe7107533cf3416ce1aa5e957c315c2a40249e12bd9873dca7da", evt.ID.Hex())
|
||||
require.Equal(t, "dd664d5e4016433a8cd69f005ae1480804351789b59de5af06276de65633d319", evt.PubKey.Hex())
|
||||
require.Equal(t, nostr.Timestamp(1720972243), evt.CreatedAt)
|
||||
require.Equal(t, "Yeah, so bizarre, but I guess most people are meant to be serfs.", evt.Content)
|
||||
}
|
||||
|
||||
func TestReqWithFlagsAfter2(t *testing.T) {
|
||||
output := call(t, "nak req -e 893d4c10f1c230240812c6bdf9ad877eed1e29e87029d153820c24680bb183b1 nostr.mom --author 2a7dcf382bcc96a393ada5c975f500393b3f7be6e466bff220aa161ad6b15eb6 --limit 1 -k 7")
|
||||
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(output), &evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nostr.Kind(7), evt.Kind)
|
||||
require.Equal(t, "9b4868b068ea34ae51092807586c4541b3569d9efc23862aea48ef13de275857", evt.ID.Hex())
|
||||
require.Equal(t, "2a7dcf382bcc96a393ada5c975f500393b3f7be6e466bff220aa161ad6b15eb6", evt.PubKey.Hex())
|
||||
require.Equal(t, nostr.Timestamp(1720987327), evt.CreatedAt)
|
||||
require.Equal(t, "❤️", evt.Content)
|
||||
}
|
||||
|
||||
func TestReqWithFlagsAfter3(t *testing.T) {
|
||||
output := call(t, "nak req --limit 1 pyramid.fiatjaf.com -a 3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24 -qp 3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24 -e 9f3c1121c96edf17d84b9194f74d66d012b28c4e25b3ef190582c76b8546a188")
|
||||
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(output), &evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nostr.Kind(1), evt.Kind)
|
||||
require.Equal(t, "101572c80ebdc963dab8440f6307387a3023b6d90f7e495d6c5ee1ef77045a67", evt.ID.Hex())
|
||||
require.Equal(t, "3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24", evt.PubKey.Hex())
|
||||
require.Equal(t, nostr.Timestamp(1720987305), evt.CreatedAt)
|
||||
require.Equal(t, "Nope. I grew up playing in the woods. Never once saw a bear in the woods. If I did, I'd probably shiy my pants, then scream at it like I was a crazy person with my arms above my head to make me seem huge.", evt.Content)
|
||||
}
|
||||
|
||||
func TestNaturalTimestamps(t *testing.T) {
|
||||
output := call(t, "nak event -t plu=pla -e 3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24 --ts '2018-May-19T03:37:19' -c nn")
|
||||
|
||||
var evt nostr.Event
|
||||
err := stdjson.Unmarshal([]byte(output), &evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, nostr.Kind(1), evt.Kind)
|
||||
require.Equal(t, "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", evt.PubKey.Hex())
|
||||
require.Equal(t, nostr.Timestamp(1526711839), evt.CreatedAt)
|
||||
require.Equal(t, "nn", evt.Content)
|
||||
}
|
||||
119
count.go
119
count.go
@@ -1,24 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip45"
|
||||
"fiatjaf.com/nostr/nip45/hyperloglog"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var count = &cli.Command{
|
||||
Name: "count",
|
||||
Usage: "generates encoded COUNT messages and optionally use them to talk to relays",
|
||||
Description: `outputs a NIP-45 request (the flags are mostly the same as 'nak req').`,
|
||||
Name: "count",
|
||||
Usage: "generates encoded COUNT messages and optionally use them to talk to relays",
|
||||
Description: `outputs a nip45 request (the flags are mostly the same as 'nak req').`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
&PubKeySliceFlag{
|
||||
Name: "author",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "only accept events from these authors (pubkey as hex)",
|
||||
Usage: "only accept events from these authors",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntSliceFlag{
|
||||
@@ -43,13 +46,13 @@ var count = &cli.Command{
|
||||
Usage: "shortcut for --tag p=<value>",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
&NaturalTimeFlag{
|
||||
Name: "since",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "only accept events newer than this (unix timestamp)",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
&NaturalTimeFlag{
|
||||
Name: "until",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "only accept events older than this (unix timestamp)",
|
||||
@@ -63,33 +66,51 @@ var count = &cli.Command{
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[relay...]",
|
||||
Action: func(c *cli.Context) error {
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
biggerUrlSize := 0
|
||||
relayUrls := c.Args().Slice()
|
||||
if len(relayUrls) > 0 {
|
||||
relays := connectToAllRelays(ctx, c, relayUrls, nil, nostr.PoolOptions{})
|
||||
if len(relays) == 0 {
|
||||
log("failed to connect to any of the given relays.\n")
|
||||
os.Exit(3)
|
||||
}
|
||||
relayUrls = make([]string, len(relays))
|
||||
for i, relay := range relays {
|
||||
relayUrls[i] = relay.URL
|
||||
if len(relay.URL) > biggerUrlSize {
|
||||
biggerUrlSize = len(relay.URL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filter := nostr.Filter{}
|
||||
|
||||
if authors := c.StringSlice("author"); len(authors) > 0 {
|
||||
if authors := getPubKeySlice(c, "author"); len(authors) > 0 {
|
||||
filter.Authors = authors
|
||||
}
|
||||
if ids := c.StringSlice("id"); len(ids) > 0 {
|
||||
filter.IDs = ids
|
||||
}
|
||||
if kinds := c.IntSlice("kind"); len(kinds) > 0 {
|
||||
if kinds64 := c.IntSlice("kind"); len(kinds64) > 0 {
|
||||
kinds := make([]nostr.Kind, len(kinds64))
|
||||
for i, v := range kinds64 {
|
||||
kinds[i] = nostr.Kind(v)
|
||||
}
|
||||
filter.Kinds = kinds
|
||||
}
|
||||
|
||||
tags := make([][]string, 0, 5)
|
||||
for _, tagFlag := range c.StringSlice("tag") {
|
||||
spl := strings.Split(tagFlag, "=")
|
||||
if len(spl) == 2 && len(spl[0]) == 1 {
|
||||
tags = append(tags, spl)
|
||||
spl := strings.SplitN(tagFlag, "=", 2)
|
||||
if len(spl) == 2 {
|
||||
tags = append(tags, []string{spl[0], decodeTagValue(spl[1])})
|
||||
} else {
|
||||
return fmt.Errorf("invalid --tag '%s'", tagFlag)
|
||||
}
|
||||
}
|
||||
for _, etag := range c.StringSlice("e") {
|
||||
tags = append(tags, []string{"e", etag})
|
||||
tags = append(tags, []string{"e", decodeTagValue(etag)})
|
||||
}
|
||||
for _, ptag := range c.StringSlice("p") {
|
||||
tags = append(tags, []string{"p", ptag})
|
||||
tags = append(tags, []string{"p", decodeTagValue(ptag)})
|
||||
}
|
||||
if len(tags) > 0 {
|
||||
filter.Tags = make(nostr.TagMap)
|
||||
@@ -101,45 +122,55 @@ var count = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
if since := c.Int("since"); since != 0 {
|
||||
ts := nostr.Timestamp(since)
|
||||
filter.Since = &ts
|
||||
if c.IsSet("since") {
|
||||
filter.Since = getNaturalDate(c, "since")
|
||||
}
|
||||
if until := c.Int("until"); until != 0 {
|
||||
ts := nostr.Timestamp(until)
|
||||
filter.Until = &ts
|
||||
}
|
||||
if limit := c.Int("limit"); limit != 0 {
|
||||
filter.Limit = limit
|
||||
if c.IsSet("until") {
|
||||
filter.Until = getNaturalDate(c, "until")
|
||||
}
|
||||
|
||||
if limit := c.Int("limit"); limit != 0 {
|
||||
filter.Limit = int(limit)
|
||||
}
|
||||
|
||||
relays := c.Args().Slice()
|
||||
successes := 0
|
||||
failures := make([]error, 0, len(relays))
|
||||
if len(relays) > 0 {
|
||||
for _, relayUrl := range relays {
|
||||
relay, err := nostr.RelayConnect(c.Context, relayUrl)
|
||||
if len(relayUrls) > 0 {
|
||||
var hll *hyperloglog.HyperLogLog
|
||||
if offset := nip45.HyperLogLogEventPubkeyOffsetForFilter(filter); offset != -1 && len(relayUrls) > 1 {
|
||||
hll = hyperloglog.New(offset)
|
||||
}
|
||||
for _, relayUrl := range relayUrls {
|
||||
relay, _ := sys.Pool.EnsureRelay(relayUrl)
|
||||
count, hllRegisters, err := relay.Count(ctx, filter, nostr.SubscriptionOptions{
|
||||
Label: "nak-count",
|
||||
})
|
||||
fmt.Fprintf(os.Stderr, "%s%s: ", strings.Repeat(" ", biggerUrlSize-len(relayUrl)), relayUrl)
|
||||
|
||||
if err != nil {
|
||||
failures = append(failures, err)
|
||||
fmt.Fprintf(os.Stderr, "❌ %s\n", err)
|
||||
continue
|
||||
}
|
||||
count, err := relay.Count(c.Context, nostr.Filters{filter})
|
||||
if err != nil {
|
||||
failures = append(failures, err)
|
||||
continue
|
||||
|
||||
var hasHLLStr string
|
||||
if hll != nil && len(hllRegisters) == 256 {
|
||||
hll.MergeRegisters(hllRegisters)
|
||||
hasHLLStr = " 📋"
|
||||
}
|
||||
fmt.Printf("%s: %d\n", relay.URL, count)
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%d%s\n", count, hasHLLStr)
|
||||
successes++
|
||||
}
|
||||
if successes == 0 {
|
||||
return errors.Join(failures...)
|
||||
return fmt.Errorf("all relays have failed")
|
||||
} else if hll != nil {
|
||||
fmt.Fprintf(os.Stderr, "📋 HyperLogLog sum: %d\n", hll.Count())
|
||||
}
|
||||
} else {
|
||||
// no relays given, will just print the filter
|
||||
var result string
|
||||
j, _ := json.Marshal([]any{"COUNT", "nak", filter})
|
||||
result = string(j)
|
||||
fmt.Println(result)
|
||||
stdout(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
132
curl.go
Normal file
132
curl.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/urfave/cli/v3"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var curlFlags []string
|
||||
|
||||
var curl = &cli.Command{
|
||||
Name: "curl",
|
||||
Usage: "calls curl but with a nip98 header",
|
||||
Description: "accepts all flags and arguments exactly as they would be passed to curl.",
|
||||
Flags: defaultKeyFlags,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// cowboy parsing of curl flags to get the data we need for nip98
|
||||
var url string
|
||||
var method string
|
||||
var presumedMethod string
|
||||
|
||||
curlBodyBuildingFlags := []string{
|
||||
"-d",
|
||||
"--data",
|
||||
"--data-binary",
|
||||
"--data-ascii",
|
||||
"--data-raw",
|
||||
"--data-urlencode",
|
||||
"-F",
|
||||
"--form",
|
||||
"--form-string",
|
||||
"--form-escape",
|
||||
"--upload-file",
|
||||
}
|
||||
|
||||
nextIsMethod := false
|
||||
for _, f := range curlFlags {
|
||||
if nextIsMethod {
|
||||
method = f
|
||||
method, _ = strings.CutPrefix(method, `"`)
|
||||
method, _ = strings.CutSuffix(method, `"`)
|
||||
method = strings.ToUpper(method)
|
||||
} else if strings.HasPrefix(f, "https://") || strings.HasPrefix(f, "http://") {
|
||||
url = f
|
||||
} else if f == "--request" || f == "-X" {
|
||||
nextIsMethod = true
|
||||
continue
|
||||
} else if slices.Contains(curlBodyBuildingFlags, f) ||
|
||||
slices.ContainsFunc(curlBodyBuildingFlags, func(s string) bool {
|
||||
return strings.HasPrefix(f, s)
|
||||
}) {
|
||||
presumedMethod = "POST"
|
||||
}
|
||||
nextIsMethod = false
|
||||
}
|
||||
|
||||
if url == "" {
|
||||
return fmt.Errorf("can't create nip98 event: target url is empty")
|
||||
}
|
||||
|
||||
if method == "" {
|
||||
if presumedMethod != "" {
|
||||
method = presumedMethod
|
||||
} else {
|
||||
method = "GET"
|
||||
}
|
||||
}
|
||||
|
||||
// make and sign event
|
||||
evt := nostr.Event{
|
||||
Kind: 27235,
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{
|
||||
{"u", url},
|
||||
{"method", method},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &evt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the first 2 indexes of curlFlags were reserved for this
|
||||
curlFlags[0] = "-H"
|
||||
curlFlags[1] = fmt.Sprintf("Authorization: Nostr %s", base64.StdEncoding.EncodeToString([]byte(evt.String())))
|
||||
|
||||
// call curl
|
||||
cmd := exec.Command("curl", curlFlags...)
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Run()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func realCurl() error {
|
||||
curlFlags = make([]string, 2, max(len(os.Args)-4, 2))
|
||||
keyFlags := make([]string, 0, 5)
|
||||
|
||||
for i := 0; i < len(os.Args[2:]); i++ {
|
||||
arg := os.Args[i+2]
|
||||
if slices.ContainsFunc(defaultKeyFlags, func(f cli.Flag) bool {
|
||||
bareArg, _ := strings.CutPrefix(arg, "-")
|
||||
bareArg, _ = strings.CutPrefix(bareArg, "-")
|
||||
return slices.Contains(f.Names(), bareArg)
|
||||
}) {
|
||||
keyFlags = append(keyFlags, arg)
|
||||
if arg != "--prompt-sec" {
|
||||
i++
|
||||
val := os.Args[i+2]
|
||||
keyFlags = append(keyFlags, val)
|
||||
}
|
||||
} else {
|
||||
curlFlags = append(curlFlags, arg)
|
||||
}
|
||||
}
|
||||
|
||||
return curl.Run(context.Background(), keyFlags)
|
||||
}
|
||||
120
decode.go
120
decode.go
@@ -1,15 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
stdjson "encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip19"
|
||||
sdk "github.com/nbd-wtf/nostr-sdk"
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip05"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var decode = &cli.Command{
|
||||
@@ -20,6 +20,7 @@ var decode = &cli.Command{
|
||||
nak decode nevent1qqs29yet5tp0qq5xu5qgkeehkzqh5qu46739axzezcxpj4tjlkx9j7gpr4mhxue69uhkummnw3ez6ur4vgh8wetvd3hhyer9wghxuet5sh59ud
|
||||
nak decode nprofile1qqsrhuxx8l9ex335q7he0f09aej04zpazpl0ne2cgukyawd24mayt8gpz4mhxue69uhk2er9dchxummnw3ezumrpdejqz8thwden5te0dehhxarj94c82c3wwajkcmr0wfjx2u3wdejhgqgcwaehxw309aex2mrp0yhxummnw3exzarf9e3k7mgnp0sh5
|
||||
nak decode nsec1jrmyhtjhgd9yqalps8hf9mayvd58852gtz66m7tqpacjedkp6kxq4dyxsr`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "id",
|
||||
@@ -33,79 +34,62 @@ var decode = &cli.Command{
|
||||
},
|
||||
},
|
||||
ArgsUsage: "<npub | nprofile | nip05 | nevent | naddr | nsec>",
|
||||
Action: func(c *cli.Context) error {
|
||||
for input := range getStdinLinesOrFirstArgument(c) {
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for input := range getStdinLinesOrArguments(c.Args()) {
|
||||
if strings.HasPrefix(input, "nostr:") {
|
||||
input = input[6:]
|
||||
}
|
||||
|
||||
var decodeResult DecodeResult
|
||||
if b, err := hex.DecodeString(input); err == nil {
|
||||
if len(b) == 64 {
|
||||
decodeResult.HexResult.PossibleTypes = []string{"sig"}
|
||||
decodeResult.HexResult.Signature = hex.EncodeToString(b)
|
||||
} else if len(b) == 32 {
|
||||
decodeResult.HexResult.PossibleTypes = []string{"pubkey", "private_key", "event_id"}
|
||||
decodeResult.HexResult.ID = hex.EncodeToString(b)
|
||||
decodeResult.HexResult.PrivateKey = hex.EncodeToString(b)
|
||||
decodeResult.HexResult.PublicKey = hex.EncodeToString(b)
|
||||
} else {
|
||||
lineProcessingError(c, "hex string with invalid number of bytes: %d", len(b))
|
||||
_, data, err := nip19.Decode(input)
|
||||
if err == nil {
|
||||
switch v := data.(type) {
|
||||
case nostr.SecretKey:
|
||||
stdout(v.Hex())
|
||||
continue
|
||||
case nostr.PubKey:
|
||||
stdout(v.Hex())
|
||||
continue
|
||||
case [32]byte:
|
||||
stdout(hex.EncodeToString(v[:]))
|
||||
continue
|
||||
case nostr.EventPointer:
|
||||
if c.Bool("id") {
|
||||
stdout(v.ID.Hex())
|
||||
continue
|
||||
}
|
||||
out, _ := stdjson.MarshalIndent(v, "", " ")
|
||||
stdout(string(out))
|
||||
continue
|
||||
case nostr.ProfilePointer:
|
||||
if c.Bool("pubkey") {
|
||||
stdout(v.PublicKey.Hex())
|
||||
continue
|
||||
}
|
||||
out, _ := stdjson.MarshalIndent(v, "", " ")
|
||||
stdout(string(out))
|
||||
continue
|
||||
case nostr.EntityPointer:
|
||||
out, _ := stdjson.MarshalIndent(v, "", " ")
|
||||
stdout(string(out))
|
||||
continue
|
||||
}
|
||||
} else if evp := sdk.InputToEventPointer(input); evp != nil {
|
||||
decodeResult = DecodeResult{EventPointer: evp}
|
||||
} else if pp := sdk.InputToProfile(c.Context, input); pp != nil {
|
||||
decodeResult = DecodeResult{ProfilePointer: pp}
|
||||
} else if prefix, value, err := nip19.Decode(input); err == nil && prefix == "naddr" {
|
||||
ep := value.(nostr.EntityPointer)
|
||||
decodeResult = DecodeResult{EntityPointer: &ep}
|
||||
} else if prefix, value, err := nip19.Decode(input); err == nil && prefix == "nsec" {
|
||||
decodeResult.PrivateKey.PrivateKey = value.(string)
|
||||
decodeResult.PrivateKey.PublicKey, _ = nostr.GetPublicKey(value.(string))
|
||||
} else {
|
||||
lineProcessingError(c, "couldn't decode input '%s': %s", input, err)
|
||||
}
|
||||
|
||||
pp, _ := nip05.QueryIdentifier(ctx, input)
|
||||
if pp != nil {
|
||||
if c.Bool("pubkey") {
|
||||
stdout(pp.PublicKey.Hex())
|
||||
continue
|
||||
}
|
||||
out, _ := stdjson.MarshalIndent(pp, "", " ")
|
||||
stdout(string(out))
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Println(decodeResult.JSON())
|
||||
|
||||
ctx = lineProcessingError(ctx, "couldn't decode input '%s'", input)
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type DecodeResult struct {
|
||||
*nostr.EventPointer
|
||||
*nostr.ProfilePointer
|
||||
*nostr.EntityPointer
|
||||
HexResult struct {
|
||||
PossibleTypes []string `json:"possible_types"`
|
||||
PublicKey string `json:"pubkey,omitempty"`
|
||||
ID string `json:"event_id,omitempty"`
|
||||
PrivateKey string `json:"private_key,omitempty"`
|
||||
Signature string `json:"sig,omitempty"`
|
||||
}
|
||||
PrivateKey struct {
|
||||
nostr.ProfilePointer
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
}
|
||||
|
||||
func (d DecodeResult) JSON() string {
|
||||
var j []byte
|
||||
if d.EventPointer != nil {
|
||||
j, _ = json.MarshalIndent(d.EventPointer, "", " ")
|
||||
} else if d.ProfilePointer != nil {
|
||||
j, _ = json.MarshalIndent(d.ProfilePointer, "", " ")
|
||||
} else if d.EntityPointer != nil {
|
||||
j, _ = json.MarshalIndent(d.EntityPointer, "", " ")
|
||||
} else if len(d.HexResult.PossibleTypes) > 0 {
|
||||
j, _ = json.MarshalIndent(d.HexResult, "", " ")
|
||||
} else if d.PrivateKey.PrivateKey != "" {
|
||||
j, _ = json.MarshalIndent(d.PrivateKey, "", " ")
|
||||
}
|
||||
return string(j)
|
||||
}
|
||||
|
||||
231
encode.go
231
encode.go
@@ -1,10 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr/nip19"
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var encode = &cli.Command{
|
||||
@@ -16,53 +18,104 @@ var encode = &cli.Command{
|
||||
nak encode nprofile --relay <relay-url> <pubkey-hex>
|
||||
nak encode nevent <event-id>
|
||||
nak encode nevent --author <pubkey-hex> --relay <relay-url> --relay <other-relay> <event-id>
|
||||
nak encode nsec <privkey-hex>`,
|
||||
Before: func(c *cli.Context) error {
|
||||
if c.Args().Len() < 1 {
|
||||
return fmt.Errorf("expected more than 1 argument.")
|
||||
nak encode nsec <privkey-hex>
|
||||
echo '{"pubkey":"7b225d32d3edb978dba1adfd9440105646babbabbda181ea383f74ba53c3be19","relays":["wss://nada.zero"]}' | nak encode
|
||||
echo '{
|
||||
"id":"7b225d32d3edb978dba1adfd9440105646babbabbda181ea383f74ba53c3be19"
|
||||
"relays":["wss://nada.zero"],
|
||||
"author":"ebb6ff85430705651b311ed51328767078fd790b14f02d22efba68d5513376bc"
|
||||
} | nak encode`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "attach relay hints to naddr code",
|
||||
},
|
||||
},
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
if c.Args().Len() != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
relays := c.StringSlice("relay")
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasStdin := false
|
||||
for jsonStr := range getJsonsOrBlank() {
|
||||
if jsonStr == "{}" {
|
||||
hasStdin = false
|
||||
continue
|
||||
} else {
|
||||
hasStdin = true
|
||||
}
|
||||
|
||||
var eventPtr nostr.EventPointer
|
||||
if err := json.Unmarshal([]byte(jsonStr), &eventPtr); err == nil && eventPtr.ID != nostr.ZeroID {
|
||||
stdout(nip19.EncodeNevent(eventPtr.ID, appendUnique(relays, eventPtr.Relays...), eventPtr.Author))
|
||||
continue
|
||||
}
|
||||
|
||||
var profilePtr nostr.ProfilePointer
|
||||
if err := json.Unmarshal([]byte(jsonStr), &profilePtr); err == nil && profilePtr.PublicKey != nostr.ZeroPK {
|
||||
stdout(nip19.EncodeNprofile(profilePtr.PublicKey, appendUnique(relays, profilePtr.Relays...)))
|
||||
continue
|
||||
}
|
||||
|
||||
var entityPtr nostr.EntityPointer
|
||||
if err := json.Unmarshal([]byte(jsonStr), &entityPtr); err == nil && entityPtr.PublicKey != nostr.ZeroPK {
|
||||
stdout(nip19.EncodeNaddr(entityPtr.PublicKey, entityPtr.Kind, entityPtr.Identifier, appendUnique(relays, entityPtr.Relays...)))
|
||||
continue
|
||||
}
|
||||
|
||||
ctx = lineProcessingError(ctx, "couldn't decode JSON '%s'", jsonStr)
|
||||
}
|
||||
|
||||
if !hasStdin {
|
||||
return nil
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "npub",
|
||||
Usage: "encode a hex public key into bech32 'npub' format",
|
||||
Action: func(c *cli.Context) error {
|
||||
for target := range getStdinLinesOrFirstArgument(c) {
|
||||
if err := validate32BytesHex(target); err != nil {
|
||||
lineProcessingError(c, "invalid public key: %s", target, err)
|
||||
Name: "npub",
|
||||
Usage: "encode a hex public key into bech32 'npub' format",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for target := range getStdinLinesOrArguments(c.Args()) {
|
||||
pk, err := nostr.PubKeyFromHexCheap(target)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid public key '%s': %w", target, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if npub, err := nip19.EncodePublicKey(target); err == nil {
|
||||
fmt.Println(npub)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
stdout(nip19.EncodeNpub(pk))
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "nsec",
|
||||
Usage: "encode a hex private key into bech32 'nsec' format",
|
||||
Action: func(c *cli.Context) error {
|
||||
for target := range getStdinLinesOrFirstArgument(c) {
|
||||
if err := validate32BytesHex(target); err != nil {
|
||||
lineProcessingError(c, "invalid private key: %s", target, err)
|
||||
Name: "nsec",
|
||||
Usage: "encode a hex private key into bech32 'nsec' format",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for target := range getStdinLinesOrArguments(c.Args()) {
|
||||
sk, err := nostr.SecretKeyFromHex(target)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid private key '%s': %w", target, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if npub, err := nip19.EncodePrivateKey(target); err == nil {
|
||||
fmt.Println(npub)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
stdout(nip19.EncodeNsec(sk))
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -76,26 +129,24 @@ var encode = &cli.Command{
|
||||
Usage: "attach relay hints to nprofile code",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
for target := range getStdinLinesOrFirstArgument(c) {
|
||||
if err := validate32BytesHex(target); err != nil {
|
||||
lineProcessingError(c, "invalid public key: %s", target, err)
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for target := range getStdinLinesOrArguments(c.Args()) {
|
||||
pk, err := nostr.PubKeyFromHexCheap(target)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid public key '%s': %w", target, err)
|
||||
continue
|
||||
}
|
||||
|
||||
relays := c.StringSlice("relay")
|
||||
if err := validateRelayURLs(relays); err != nil {
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if npub, err := nip19.EncodeProfile(target, relays); err == nil {
|
||||
fmt.Println(npub)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
stdout(nip19.EncodeNprofile(pk, relays))
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -103,49 +154,37 @@ var encode = &cli.Command{
|
||||
Name: "nevent",
|
||||
Usage: "generate event codes with optionally attached relay information",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "attach relay hints to nevent code",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "author",
|
||||
Usage: "attach an author pubkey as a hint to the nevent code",
|
||||
&PubKeyFlag{
|
||||
Name: "author",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "attach an author pubkey as a hint to the nevent code",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
for target := range getStdinLinesOrFirstArgument(c) {
|
||||
if err := validate32BytesHex(target); err != nil {
|
||||
lineProcessingError(c, "invalid event id: %s", target, err)
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for target := range getStdinLinesOrArguments(c.Args()) {
|
||||
id, err := parseEventID(target)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid event id: %s", target)
|
||||
continue
|
||||
}
|
||||
|
||||
author := c.String("author")
|
||||
if author != "" {
|
||||
if err := validate32BytesHex(author); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
author := getPubKey(c, "author")
|
||||
relays := c.StringSlice("relay")
|
||||
if err := validateRelayURLs(relays); err != nil {
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if npub, err := nip19.EncodeEvent(target, relays, author); err == nil {
|
||||
fmt.Println(npub)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
stdout(nip19.EncodeNevent(id, relays, author))
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "naddr",
|
||||
Usage: "generate codes for NIP-33 parameterized replaceable events",
|
||||
Usage: "generate codes for addressable events",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "identifier",
|
||||
@@ -153,78 +192,46 @@ var encode = &cli.Command{
|
||||
Usage: "the \"d\" tag identifier of this replaceable event -- can also be read from stdin",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
&PubKeyFlag{
|
||||
Name: "pubkey",
|
||||
Usage: "pubkey of the naddr author",
|
||||
Aliases: []string{"p"},
|
||||
Aliases: []string{"author", "a", "p"},
|
||||
Required: true,
|
||||
},
|
||||
&cli.Int64Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "kind",
|
||||
Aliases: []string{"k"},
|
||||
Usage: "kind of referred replaceable event",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "attach relay hints to naddr code",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for d := range getStdinLinesOrBlank() {
|
||||
pubkey := c.String("pubkey")
|
||||
if err := validate32BytesHex(pubkey); err != nil {
|
||||
return err
|
||||
}
|
||||
pubkey := getPubKey(c, "pubkey")
|
||||
|
||||
kind := c.Int("kind")
|
||||
if kind < 30000 || kind >= 40000 {
|
||||
return fmt.Errorf("kind must be between 30000 and 39999, as per NIP-16, got %d", kind)
|
||||
return fmt.Errorf("kind must be between 30000 and 39999, got %d", kind)
|
||||
}
|
||||
|
||||
if d == "" {
|
||||
d = c.String("identifier")
|
||||
if d == "" {
|
||||
lineProcessingError(c, "\"d\" tag identifier can't be empty")
|
||||
ctx = lineProcessingError(ctx, "\"d\" tag identifier can't be empty")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
relays := c.StringSlice("relay")
|
||||
if err := validateRelayURLs(relays); err != nil {
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if npub, err := nip19.EncodeEntity(pubkey, kind, d, relays); err == nil {
|
||||
fmt.Println(npub)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
stdout(nip19.EncodeNaddr(pubkey, nostr.Kind(kind), d, relays))
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "note",
|
||||
Usage: "generate note1 event codes (not recommended)",
|
||||
Action: func(c *cli.Context) error {
|
||||
for target := range getStdinLinesOrFirstArgument(c) {
|
||||
if err := validate32BytesHex(target); err != nil {
|
||||
lineProcessingError(c, "invalid event id: %s", target, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if note, err := nip19.EncodeNote(target); err == nil {
|
||||
fmt.Println(note)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
|
||||
133
encrypt_decrypt.go
Normal file
133
encrypt_decrypt.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr/nip04"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var encrypt = &cli.Command{
|
||||
Name: "encrypt",
|
||||
Usage: "encrypts a string with nip44 (or nip04 if specified using a flag) and returns the resulting ciphertext as base64",
|
||||
ArgsUsage: "[plaintext string]",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(
|
||||
defaultKeyFlags,
|
||||
&PubKeyFlag{
|
||||
Name: "recipient-pubkey",
|
||||
Aliases: []string{"p", "tgt", "target", "pubkey"},
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nip04",
|
||||
Usage: "use nip04 encryption instead of nip44",
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
target := getPubKey(c, "recipient-pubkey")
|
||||
|
||||
plaintext := c.Args().First()
|
||||
|
||||
if c.Bool("nip04") {
|
||||
sec, bunker, err := gatherSecretKeyOrBunkerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bunker != nil {
|
||||
ciphertext, err := bunker.NIP04Encrypt(ctx, target, plaintext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdout(ciphertext)
|
||||
} else {
|
||||
ss, err := nip04.ComputeSharedSecret(target, sec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute nip04 shared secret: %w", err)
|
||||
}
|
||||
ciphertext, err := nip04.Encrypt(plaintext, ss)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt as nip04: %w", err)
|
||||
}
|
||||
stdout(ciphertext)
|
||||
}
|
||||
} else {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := kr.Encrypt(ctx, plaintext, target)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt: %w", err)
|
||||
}
|
||||
stdout(res)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var decrypt = &cli.Command{
|
||||
Name: "decrypt",
|
||||
Usage: "decrypts a base64 nip44 ciphertext (or nip04 if specified using a flag) and returns the resulting plaintext",
|
||||
ArgsUsage: "[ciphertext base64]",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(
|
||||
defaultKeyFlags,
|
||||
&PubKeyFlag{
|
||||
Name: "sender-pubkey",
|
||||
Aliases: []string{"p", "src", "source", "pubkey"},
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nip04",
|
||||
Usage: "use nip04 encryption instead of nip44",
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
source := getPubKey(c, "sender-pubkey")
|
||||
|
||||
ciphertext := c.Args().First()
|
||||
|
||||
if c.Bool("nip04") {
|
||||
sec, bunker, err := gatherSecretKeyOrBunkerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bunker != nil {
|
||||
plaintext, err := bunker.NIP04Decrypt(ctx, source, ciphertext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdout(plaintext)
|
||||
} else {
|
||||
ss, err := nip04.ComputeSharedSecret(source, sec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute nip04 shared secret: %w", err)
|
||||
}
|
||||
plaintext, err := nip04.Decrypt(ciphertext, ss)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt as nip04: %w", err)
|
||||
}
|
||||
stdout(plaintext)
|
||||
}
|
||||
} else {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := kr.Decrypt(ctx, ciphertext, source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt: %w", err)
|
||||
}
|
||||
stdout(res)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
430
event.go
430
event.go
@@ -2,21 +2,27 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"fiatjaf.com/nostr/nip13"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"github.com/fatih/color"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nson"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/exp/slices"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
const CATEGORY_EVENT_FIELDS = "EVENT FIELDS"
|
||||
const (
|
||||
CATEGORY_EVENT_FIELDS = "EVENT FIELDS"
|
||||
CATEGORY_SIGNER = "SIGNER OPTIONS"
|
||||
CATEGORY_EXTRAS = "EXTRAS"
|
||||
)
|
||||
|
||||
var event = &cli.Command{
|
||||
Name: "event",
|
||||
@@ -32,30 +38,55 @@ if an event -- or a partial event -- is given on stdin, the flags can be used to
|
||||
example:
|
||||
echo '{"id":"a889df6a387419ff204305f4c2d296ee328c3cd4f8b62f205648a541b4554dfb","pubkey":"c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5","created_at":1698623783,"kind":1,"tags":[],"content":"hello from the nostr army knife","sig":"84876e1ee3e726da84e5d195eb79358b2b3eaa4d9bd38456fde3e8a2af3f1cd4cda23f23fda454869975b3688797d4c66e12f4c51c1b43c6d2997c5e61865661"}' | nak event wss://offchain.pub
|
||||
echo '{"tags": [["t", "spam"]]}' | nak event -c 'this is spam'`,
|
||||
Flags: []cli.Flag{
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(defaultKeyFlags,
|
||||
// ~ these args are only for the convoluted musig2 signing process
|
||||
// they will be generally copy-shared-pasted across some manual coordination method between participants
|
||||
&cli.UintFlag{
|
||||
Name: "musig",
|
||||
Usage: "number of signers to use for musig2",
|
||||
Value: 1,
|
||||
DefaultText: "1 -- i.e. do not use musig2 at all",
|
||||
Category: CATEGORY_SIGNER,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "musig-pubkey",
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "sec",
|
||||
Usage: "secret key to sign the event, as hex or nsec",
|
||||
DefaultText: "the key '1'",
|
||||
Value: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
Name: "musig-nonce-secret",
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "musig-nonce",
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "musig-partial",
|
||||
Hidden: true,
|
||||
},
|
||||
// ~~~
|
||||
&cli.UintFlag{
|
||||
Name: "pow",
|
||||
Usage: "nip13 difficulty to target when doing hash work on the event id",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "prompt-sec",
|
||||
Usage: "prompt the user to paste a hex or nsec with which to sign the event",
|
||||
Name: "envelope",
|
||||
Usage: "print the event enveloped in a [\"EVENT\", ...] message ready to be sent to a relay",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "envelope",
|
||||
Usage: "print the event enveloped in a [\"EVENT\", ...] message ready to be sent to a relay",
|
||||
Name: "auth",
|
||||
Usage: "always perform nip42 \"AUTH\" when facing an \"auth-required: \" rejection and try again",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "auth",
|
||||
Usage: "always perform NIP-42 \"AUTH\" when facing an \"auth-required: \" rejection and try again",
|
||||
Name: "nevent",
|
||||
Usage: "print the nevent code (to stderr) after the event is published",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nson",
|
||||
Usage: "encode the event using NSON",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
&cli.UintFlag{
|
||||
Name: "kind",
|
||||
Aliases: []string{"k"},
|
||||
Usage: "event kind",
|
||||
@@ -66,7 +97,7 @@ example:
|
||||
&cli.StringFlag{
|
||||
Name: "content",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "event content",
|
||||
Usage: "event content (if it starts with an '@' will read from a file)",
|
||||
DefaultText: "hello from the nostr army knife",
|
||||
Value: "",
|
||||
Category: CATEGORY_EVENT_FIELDS,
|
||||
@@ -74,7 +105,7 @@ example:
|
||||
&cli.StringSliceFlag{
|
||||
Name: "tag",
|
||||
Aliases: []string{"t"},
|
||||
Usage: "sets a tag field on the event, takes a value like -t e=<id>",
|
||||
Usage: "sets a tag field on the event, takes a value like -t e=<id> or -t sometag=\"value one;value two;value three\"",
|
||||
Category: CATEGORY_EVENT_FIELDS,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
@@ -87,89 +118,124 @@ example:
|
||||
Usage: "shortcut for --tag p=<value>",
|
||||
Category: CATEGORY_EVENT_FIELDS,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "d",
|
||||
Usage: "shortcut for --tag d=<value>",
|
||||
Category: CATEGORY_EVENT_FIELDS,
|
||||
},
|
||||
&NaturalTimeFlag{
|
||||
Name: "created-at",
|
||||
Aliases: []string{"time", "ts"},
|
||||
Usage: "unix timestamp value for the created_at field",
|
||||
DefaultText: "now",
|
||||
Value: "",
|
||||
Value: nostr.Now(),
|
||||
Category: CATEGORY_EVENT_FIELDS,
|
||||
},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "confirm",
|
||||
Usage: "ask before publishing the event",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
),
|
||||
ArgsUsage: "[relay...]",
|
||||
Action: func(c *cli.Context) error {
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
// try to connect to the relays here
|
||||
var relays []*nostr.Relay
|
||||
|
||||
if relayUrls := c.Args().Slice(); len(relayUrls) > 0 {
|
||||
_, relays = connectToAllRelays(c.Context, relayUrls)
|
||||
relays = connectToAllRelays(ctx, c, relayUrls, nil,
|
||||
nostr.PoolOptions{
|
||||
AuthHandler: func(ctx context.Context, authEvent *nostr.Event) error {
|
||||
return authSigner(ctx, c, func(s string, args ...any) {}, authEvent)
|
||||
},
|
||||
},
|
||||
)
|
||||
if len(relays) == 0 {
|
||||
log("failed to connect to any of the given relays.\n")
|
||||
os.Exit(3)
|
||||
}
|
||||
}
|
||||
|
||||
// gather the secret key
|
||||
sec, err := gatherSecretKeyFromArguments(c)
|
||||
kr, sec, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
doAuth := c.Bool("auth")
|
||||
// then process input and generate events:
|
||||
|
||||
// then process input and generate events
|
||||
nextline:
|
||||
for stdinEvent := range getStdinLinesOrBlank() {
|
||||
evt := nostr.Event{
|
||||
Tags: make(nostr.Tags, 0, 3),
|
||||
}
|
||||
// will reuse this
|
||||
var evt nostr.Event
|
||||
|
||||
kindWasSupplied := false
|
||||
// this is called when we have a valid json from stdin
|
||||
handleEvent := func(stdinEvent string) error {
|
||||
evt.Content = ""
|
||||
|
||||
kindWasSupplied := strings.Contains(stdinEvent, `"kind"`)
|
||||
contentWasSupplied := strings.Contains(stdinEvent, `"content"`)
|
||||
mustRehashAndResign := false
|
||||
|
||||
if stdinEvent != "" {
|
||||
if err := easyjson.Unmarshal([]byte(stdinEvent), &evt); err != nil {
|
||||
lineProcessingError(c, "invalid event received from stdin: %s", err)
|
||||
continue
|
||||
}
|
||||
kindWasSupplied = strings.Contains(stdinEvent, `"kind"`)
|
||||
if err := easyjson.Unmarshal([]byte(stdinEvent), &evt); err != nil {
|
||||
return fmt.Errorf("invalid event received from stdin: %s", err)
|
||||
}
|
||||
|
||||
if kind := c.Int("kind"); slices.Contains(c.FlagNames(), "kind") {
|
||||
evt.Kind = kind
|
||||
if kind := c.Uint("kind"); slices.Contains(c.FlagNames(), "kind") {
|
||||
evt.Kind = nostr.Kind(kind)
|
||||
mustRehashAndResign = true
|
||||
} else if !kindWasSupplied {
|
||||
evt.Kind = 1
|
||||
mustRehashAndResign = true
|
||||
}
|
||||
|
||||
if content := c.String("content"); content != "" {
|
||||
evt.Content = content
|
||||
if c.IsSet("content") {
|
||||
content := c.String("content")
|
||||
if strings.HasPrefix(content, "@") {
|
||||
filedata, err := os.ReadFile(content[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file '%s' for content: %w", content[1:], err)
|
||||
}
|
||||
evt.Content = string(filedata)
|
||||
} else {
|
||||
evt.Content = content
|
||||
}
|
||||
mustRehashAndResign = true
|
||||
} else if evt.Content == "" && evt.Kind == 1 {
|
||||
} else if !contentWasSupplied && evt.Content == "" && evt.Kind == 1 {
|
||||
evt.Content = "hello from the nostr army knife"
|
||||
mustRehashAndResign = true
|
||||
}
|
||||
|
||||
tags := make(nostr.Tags, 0, 5)
|
||||
for _, tagFlag := range c.StringSlice("tag") {
|
||||
tagFlags := c.StringSlice("tag")
|
||||
tags := make(nostr.Tags, 0, len(tagFlags)+2)
|
||||
for _, tagFlag := range tagFlags {
|
||||
// tags are in the format key=value
|
||||
spl := strings.Split(tagFlag, "=")
|
||||
if len(spl) == 2 && len(spl[0]) > 0 {
|
||||
tag := nostr.Tag{spl[0]}
|
||||
tagName, tagValue, found := strings.Cut(tagFlag, "=")
|
||||
tag := []string{tagName}
|
||||
if found {
|
||||
// tags may also contain extra elements separated with a ";"
|
||||
spl2 := strings.Split(spl[1], ";")
|
||||
tag = append(tag, spl2...)
|
||||
// ~
|
||||
tags = append(tags, tag)
|
||||
tagValues := strings.Split(tagValue, ";")
|
||||
if len(tagValues) >= 1 {
|
||||
tagValues[0] = decodeTagValue(tagValues[0])
|
||||
}
|
||||
tag = append(tag, tagValues...)
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
|
||||
for _, etag := range c.StringSlice("e") {
|
||||
decodedEtag := decodeTagValue(etag)
|
||||
if tags.FindWithValue("e", decodedEtag) == nil {
|
||||
tags = append(tags, nostr.Tag{"e", decodedEtag})
|
||||
}
|
||||
}
|
||||
for _, etag := range c.StringSlice("e") {
|
||||
tags = append(tags, []string{"e", etag})
|
||||
mustRehashAndResign = true
|
||||
}
|
||||
for _, ptag := range c.StringSlice("p") {
|
||||
tags = append(tags, []string{"p", ptag})
|
||||
mustRehashAndResign = true
|
||||
decodedPtag := decodeTagValue(ptag)
|
||||
if tags.FindWithValue("p", decodedPtag) == nil {
|
||||
tags = append(tags, nostr.Tag{"p", decodedPtag})
|
||||
}
|
||||
}
|
||||
for _, dtag := range c.StringSlice("d") {
|
||||
decodedDtag := decodeTagValue(dtag)
|
||||
if tags.FindWithValue("d", decodedDtag) == nil {
|
||||
tags = append(tags, nostr.Tag{"d", decodedDtag})
|
||||
}
|
||||
}
|
||||
if len(tags) > 0 {
|
||||
for _, tag := range tags {
|
||||
@@ -178,24 +244,61 @@ example:
|
||||
mustRehashAndResign = true
|
||||
}
|
||||
|
||||
if createdAt := c.String("created-at"); createdAt != "" {
|
||||
ts := time.Now()
|
||||
if createdAt != "now" {
|
||||
if v, err := strconv.ParseInt(createdAt, 10, 64); err != nil {
|
||||
return fmt.Errorf("failed to parse timestamp '%s': %w", createdAt, err)
|
||||
} else {
|
||||
ts = time.Unix(v, 0)
|
||||
}
|
||||
}
|
||||
evt.CreatedAt = nostr.Timestamp(ts.Unix())
|
||||
if c.IsSet("created-at") {
|
||||
evt.CreatedAt = getNaturalDate(c, "created-at")
|
||||
mustRehashAndResign = true
|
||||
} else if evt.CreatedAt == 0 {
|
||||
evt.CreatedAt = nostr.Now()
|
||||
mustRehashAndResign = true
|
||||
}
|
||||
|
||||
if evt.Sig == "" || mustRehashAndResign {
|
||||
if err := evt.Sign(sec); err != nil {
|
||||
if c.IsSet("musig") || c.IsSet("sec") || c.IsSet("prompt-sec") {
|
||||
mustRehashAndResign = true
|
||||
}
|
||||
|
||||
if difficulty := c.Uint("pow"); difficulty > 0 {
|
||||
// before doing pow we need the pubkey
|
||||
if numSigners := c.Uint("musig"); numSigners > 1 {
|
||||
pubkeys := c.StringSlice("musig-pubkey")
|
||||
if int(numSigners) != len(pubkeys) {
|
||||
return fmt.Errorf("when doing a pow with musig we must know all signer pubkeys upfront")
|
||||
}
|
||||
evt.PubKey, err = getMusigAggregatedKey(ctx, pubkeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
evt.PubKey, _ = kr.GetPublicKey(ctx)
|
||||
}
|
||||
|
||||
// try to generate work with this difficulty -- runs forever
|
||||
nonceTag, _ := nip13.DoWork(ctx, evt, int(difficulty))
|
||||
evt.Tags = append(evt.Tags, nonceTag)
|
||||
|
||||
mustRehashAndResign = true
|
||||
}
|
||||
|
||||
if evt.Sig == [64]byte{} || mustRehashAndResign {
|
||||
if numSigners := c.Uint("musig"); numSigners > 1 {
|
||||
// must do musig
|
||||
pubkeys := c.StringSlice("musig-pubkey")
|
||||
secNonce := c.String("musig-nonce-secret")
|
||||
pubNonces := c.StringSlice("musig-nonce")
|
||||
partialSigs := c.StringSlice("musig-partial")
|
||||
signed, err := performMusig(ctx,
|
||||
sec, &evt, int(numSigners), pubkeys, pubNonces, secNonce, partialSigs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("musig error: %w", err)
|
||||
}
|
||||
if !signed {
|
||||
// we haven't finished signing the event, so the users still have to do more steps
|
||||
// instructions for what to do should have been printed by the performMusig() function
|
||||
return nil
|
||||
}
|
||||
} else if err := kr.SignEvent(ctx, &evt); err != nil {
|
||||
if _, isBunker := kr.(keyer.BunkerSigner); isBunker && errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
err = fmt.Errorf("timeout waiting for bunker to respond")
|
||||
}
|
||||
return fmt.Errorf("error signing with provided key: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -205,48 +308,151 @@ example:
|
||||
if c.Bool("envelope") {
|
||||
j, _ := json.Marshal(nostr.EventEnvelope{Event: evt})
|
||||
result = string(j)
|
||||
} else if c.Bool("nson") {
|
||||
result, _ = nson.Marshal(&evt)
|
||||
} else {
|
||||
j, _ := easyjson.Marshal(&evt)
|
||||
result = string(j)
|
||||
}
|
||||
fmt.Println(result)
|
||||
stdout(result)
|
||||
|
||||
// publish to relays
|
||||
if len(relays) > 0 {
|
||||
os.Stdout.Sync()
|
||||
for _, relay := range relays {
|
||||
publish:
|
||||
log("publishing to %s... ", relay.URL)
|
||||
ctx, cancel := context.WithTimeout(c.Context, 10*time.Second)
|
||||
defer cancel()
|
||||
return publishFlow(ctx, c, kr, evt, relays)
|
||||
}
|
||||
|
||||
if err := relay.Publish(ctx, evt); err == nil {
|
||||
// published fine
|
||||
log("success.\n")
|
||||
continue nextline
|
||||
}
|
||||
|
||||
// error publishing
|
||||
if strings.HasPrefix(err.Error(), "msg: auth-required:") && sec != "" && doAuth {
|
||||
// if the relay is requesting auth and we can auth, let's do it
|
||||
pk, _ := nostr.GetPublicKey(sec)
|
||||
log("performing auth as %s... ", pk)
|
||||
if err := relay.Auth(c.Context, func(evt *nostr.Event) error { return evt.Sign(sec) }); err == nil {
|
||||
// try to publish again, but this time don't try to auth again
|
||||
doAuth = false
|
||||
goto publish
|
||||
} else {
|
||||
log("auth error: %s. ", err)
|
||||
}
|
||||
}
|
||||
log("failed: %s\n", err)
|
||||
}
|
||||
for stdinEvent := range getJsonsOrBlank() {
|
||||
if err := handleEvent(stdinEvent); err != nil {
|
||||
ctx = lineProcessingError(ctx, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func publishFlow(ctx context.Context, c *cli.Command, kr nostr.Signer, evt nostr.Event, relays []*nostr.Relay) error {
|
||||
doAuth := c.Bool("auth")
|
||||
|
||||
// publish to relays
|
||||
successRelays := make([]string, 0, len(relays))
|
||||
if len(relays) > 0 {
|
||||
os.Stdout.Sync()
|
||||
|
||||
if c.Bool("confirm") {
|
||||
relaysStr := make([]string, len(relays))
|
||||
for i, r := range relays {
|
||||
relaysStr[i] = strings.ToLower(strings.Split(r.URL, "://")[1])
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
if !askConfirmation("publish to [ " + strings.Join(relaysStr, " ") + " ]? ") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if supportsDynamicMultilineMagic() {
|
||||
// overcomplicated multiline rendering magic
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
urls := make([]string, len(relays))
|
||||
lines := make([][][]byte, len(urls))
|
||||
flush := func() {
|
||||
for _, line := range lines {
|
||||
for _, part := range line {
|
||||
os.Stderr.Write(part)
|
||||
}
|
||||
os.Stderr.Write([]byte{'\n'})
|
||||
}
|
||||
}
|
||||
render := func() {
|
||||
clearLines(len(lines))
|
||||
flush()
|
||||
}
|
||||
flush()
|
||||
|
||||
logthis := func(relayUrl, s string, args ...any) {
|
||||
idx := slices.Index(urls, relayUrl)
|
||||
lines[idx] = append(lines[idx], []byte(fmt.Sprintf(s, args...)))
|
||||
render()
|
||||
}
|
||||
colorizethis := func(relayUrl string, colorize func(string, ...any) string) {
|
||||
cleanUrl, _ := strings.CutPrefix(relayUrl, "wss://")
|
||||
idx := slices.Index(urls, relayUrl)
|
||||
lines[idx][0] = []byte(fmt.Sprintf("publishing to %s... ", colorize(cleanUrl)))
|
||||
render()
|
||||
}
|
||||
|
||||
for i, relay := range relays {
|
||||
urls[i] = relay.URL
|
||||
lines[i] = make([][]byte, 1, 3)
|
||||
colorizethis(relay.URL, color.CyanString)
|
||||
}
|
||||
render()
|
||||
|
||||
for res := range sys.Pool.PublishMany(ctx, urls, evt) {
|
||||
if res.Error == nil {
|
||||
colorizethis(res.RelayURL, colors.successf)
|
||||
logthis(res.RelayURL, "success.")
|
||||
successRelays = append(successRelays, res.RelayURL)
|
||||
} else {
|
||||
colorizethis(res.RelayURL, colors.errorf)
|
||||
|
||||
// in this case it's likely that the lowest-level error is the one that will be more helpful
|
||||
low := unwrapAll(res.Error)
|
||||
|
||||
// hack for some messages such as from relay.westernbtc.com
|
||||
msg := strings.ReplaceAll(low.Error(), evt.PubKey.Hex(), "author")
|
||||
|
||||
// do not allow the message to overflow the term window
|
||||
msg = clampMessage(msg, 20+len(res.RelayURL))
|
||||
|
||||
logthis(res.RelayURL, msg)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// normal dumb flow
|
||||
for i, relay := range relays {
|
||||
publish:
|
||||
cleanUrl, _ := strings.CutPrefix(relay.URL, "wss://")
|
||||
log("publishing to %s... ", color.CyanString(cleanUrl))
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if !relay.IsConnected() {
|
||||
if new_, err := sys.Pool.EnsureRelay(relay.URL); err == nil {
|
||||
relays[i] = new_
|
||||
relay = new_
|
||||
}
|
||||
}
|
||||
|
||||
err := relay.Publish(ctx, evt)
|
||||
if err == nil {
|
||||
// published fine
|
||||
log("success.\n")
|
||||
successRelays = append(successRelays, relay.URL)
|
||||
continue // continue to next relay
|
||||
}
|
||||
|
||||
// error publishing
|
||||
if strings.HasPrefix(err.Error(), "msg: auth-required:") && kr != nil && doAuth {
|
||||
// if the relay is requesting auth and we can auth, let's do it
|
||||
pk, _ := kr.GetPublicKey(ctx)
|
||||
npub := nip19.EncodeNpub(pk)
|
||||
log("authenticating as %s... ", color.YellowString("%s…%s", npub[0:7], npub[58:]))
|
||||
if err := relay.Auth(ctx, kr.SignEvent); err == nil {
|
||||
// try to publish again, but this time don't try to auth again
|
||||
doAuth = false
|
||||
goto publish
|
||||
} else {
|
||||
log("auth error: %s. ", err)
|
||||
}
|
||||
}
|
||||
log("failed: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(successRelays) > 0 && c.Bool("nevent") {
|
||||
log(nip19.EncodeNevent(evt.ID, successRelays, evt.PubKey) + "\n")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
package main
|
||||
|
||||
func ExampleEventBasic() {
|
||||
app.Run([]string{"nak", "event", "--ts", "1699485669"})
|
||||
// Output:
|
||||
// {"id":"36d88cf5fcc449f2390a424907023eda7a74278120eebab8d02797cd92e7e29c","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1699485669,"kind":1,"tags":[],"content":"hello from the nostr army knife","sig":"68e71a192e8abcf8582a222434ac823ecc50607450ebe8cc4c145eb047794cc382dc3f888ce879d2f404f5ba6085a47601360a0fa2dd4b50d317bd0c6197c2c2"}
|
||||
}
|
||||
|
||||
func ExampleEventComplex() {
|
||||
app.Run([]string{"nak", "event", "--ts", "1699485669", "-k", "11", "-c", "skjdbaskd", "--sec", "17", "-t", "t=spam", "-e", "36d88cf5fcc449f2390a424907023eda7a74278120eebab8d02797cd92e7e29c", "-t", "r=https://abc.def;nothing"})
|
||||
// Output:
|
||||
// {"id":"aec4de6d051a7c2b6ca2d087903d42051a31e07fb742f1240970084822de10a6","pubkey":"2fa2104d6b38d11b0230010559879124e42ab8dfeff5ff29dc9cdadd4ecacc3f","created_at":1699485669,"kind":11,"tags":[["t","spam"],["r","https://abc.def","nothing"],["e","36d88cf5fcc449f2390a424907023eda7a74278120eebab8d02797cd92e7e29c"]],"content":"skjdbaskd","sig":"1165ac7a27d774d351ef19c8e918fb22f4005fcba193976c3d7edba6ef87ead7f14467f376a9e199f8371835368d86a8506f591e382528d00287fb168a7b8f38"}
|
||||
}
|
||||
|
||||
func ExampleReq() {
|
||||
app.Run([]string{"nak", "req", "-k", "1", "-l", "18", "-a", "2fa2104d6b38d11b0230010559879124e42ab8dfeff5ff29dc9cdadd4ecacc3f", "-e", "aec4de6d051a7c2b6ca2d087903d42051a31e07fb742f1240970084822de10a6"})
|
||||
// Output:
|
||||
// ["REQ","nak",{"kinds":[1],"authors":["2fa2104d6b38d11b0230010559879124e42ab8dfeff5ff29dc9cdadd4ecacc3f"],"limit":18,"#e":["aec4de6d051a7c2b6ca2d087903d42051a31e07fb742f1240970084822de10a6"]}]
|
||||
}
|
||||
|
||||
func ExampleEncodeNpub() {
|
||||
app.Run([]string{"nak", "encode", "npub", "a6a67ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179822"})
|
||||
// Output:
|
||||
// npub156n8a7wuhwk9tgrzjh8gwzc8q2dlekedec5djk0js9d3d7qhnq3qjpdq28
|
||||
}
|
||||
|
||||
func ExampleEncodeNprofile() {
|
||||
app.Run([]string{"nak", "encode", "nprofile", "-r", "wss://example.com", "a6a67ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179822"})
|
||||
// Output:
|
||||
// nprofile1qqs2dfn7l8wthtz45p3ftn58pvrs9xlumvkuu2xet8egzkcklqtesgspz9mhxue69uhk27rpd4cxcefwvdhk6fl5jug
|
||||
}
|
||||
139
fetch.go
139
fetch.go
@@ -1,94 +1,119 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip19"
|
||||
sdk "github.com/nbd-wtf/nostr-sdk"
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip05"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/sdk/hints"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var fetch = &cli.Command{
|
||||
Name: "fetch",
|
||||
Usage: "fetches events related to the given nip19 code from the included relay hints",
|
||||
Usage: "fetches events related to the given nip19 or nip05 code from the included relay hints or the author's outbox relays.",
|
||||
Description: `example usage:
|
||||
nak fetch nevent1qqsxrwm0hd3s3fddh4jc2574z3xzufq6qwuyz2rvv3n087zvym3dpaqprpmhxue69uhhqatzd35kxtnjv4kxz7tfdenju6t0xpnej4
|
||||
echo npub1h8spmtw9m2huyv6v2j2qd5zv956z2zdugl6mgx02f2upffwpm3nqv0j4ps | nak fetch --relay wss://relay.nostr.band`,
|
||||
Flags: []cli.Flag{
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(reqFilterFlags,
|
||||
&cli.StringSliceFlag{
|
||||
Name: "relay",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "also use these relays to fetch from",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[nip19code]",
|
||||
Action: func(c *cli.Context) error {
|
||||
for code := range getStdinLinesOrFirstArgument(c) {
|
||||
),
|
||||
ArgsUsage: "[nip05_or_nip19_code]",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for code := range getStdinLinesOrArguments(c.Args()) {
|
||||
filter := nostr.Filter{}
|
||||
var authorHint nostr.PubKey
|
||||
relays := c.StringSlice("relay")
|
||||
|
||||
prefix, value, err := nip19.Decode(code)
|
||||
if err != nil {
|
||||
lineProcessingError(c, "failed to decode: %s", err)
|
||||
continue
|
||||
if nip05.IsValidIdentifier(code) {
|
||||
pp, err := nip05.QueryIdentifier(ctx, code)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "failed to fetch nip05: %s", err)
|
||||
continue
|
||||
}
|
||||
authorHint = pp.PublicKey
|
||||
relays = append(relays, pp.Relays...)
|
||||
filter.Authors = append(filter.Authors, pp.PublicKey)
|
||||
} else {
|
||||
prefix, value, err := nip19.Decode(code)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "failed to decode: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := normalizeAndValidateRelayURLs(relays); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch prefix {
|
||||
case "nevent":
|
||||
v := value.(nostr.EventPointer)
|
||||
filter.IDs = append(filter.IDs, v.ID)
|
||||
if v.Author != nostr.ZeroPK {
|
||||
authorHint = v.Author
|
||||
}
|
||||
relays = append(relays, v.Relays...)
|
||||
case "note":
|
||||
filter.IDs = append(filter.IDs, value.([32]byte))
|
||||
case "naddr":
|
||||
v := value.(nostr.EntityPointer)
|
||||
filter.Kinds = []nostr.Kind{v.Kind}
|
||||
filter.Tags = nostr.TagMap{"d": []string{v.Identifier}}
|
||||
filter.Authors = append(filter.Authors, v.PublicKey)
|
||||
authorHint = v.PublicKey
|
||||
relays = append(relays, v.Relays...)
|
||||
case "nprofile":
|
||||
v := value.(nostr.ProfilePointer)
|
||||
filter.Authors = append(filter.Authors, v.PublicKey)
|
||||
authorHint = v.PublicKey
|
||||
relays = append(relays, v.Relays...)
|
||||
case "npub":
|
||||
v := value.(nostr.PubKey)
|
||||
filter.Authors = append(filter.Authors, v)
|
||||
authorHint = v
|
||||
default:
|
||||
return fmt.Errorf("unexpected prefix %s", prefix)
|
||||
}
|
||||
}
|
||||
|
||||
relays := c.StringSlice("relay")
|
||||
if err := validateRelayURLs(relays); err != nil {
|
||||
if authorHint != nostr.ZeroPK {
|
||||
for _, url := range relays {
|
||||
sys.Hints.Save(authorHint, nostr.NormalizeURL(url), hints.LastInHint, nostr.Now())
|
||||
}
|
||||
|
||||
for _, url := range sys.FetchOutboxRelays(ctx, authorHint, 3) {
|
||||
relays = append(relays, url)
|
||||
}
|
||||
}
|
||||
|
||||
if err := applyFlagsToFilter(c, &filter); err != nil {
|
||||
return err
|
||||
}
|
||||
var authorHint string
|
||||
|
||||
switch prefix {
|
||||
case "nevent":
|
||||
v := value.(nostr.EventPointer)
|
||||
filter.IDs = append(filter.IDs, v.ID)
|
||||
if v.Author != "" {
|
||||
authorHint = v.Author
|
||||
}
|
||||
relays = append(relays, v.Relays...)
|
||||
case "naddr":
|
||||
v := value.(nostr.EntityPointer)
|
||||
filter.Tags = nostr.TagMap{"d": []string{v.Identifier}}
|
||||
filter.Kinds = append(filter.Kinds, v.Kind)
|
||||
filter.Authors = append(filter.Authors, v.PublicKey)
|
||||
authorHint = v.PublicKey
|
||||
relays = append(relays, v.Relays...)
|
||||
case "nprofile":
|
||||
v := value.(nostr.ProfilePointer)
|
||||
filter.Authors = append(filter.Authors, v.PublicKey)
|
||||
if len(filter.Authors) > 0 && len(filter.Kinds) == 0 {
|
||||
filter.Kinds = append(filter.Kinds, 0)
|
||||
authorHint = v.PublicKey
|
||||
relays = append(relays, v.Relays...)
|
||||
case "npub":
|
||||
v := value.(string)
|
||||
filter.Authors = append(filter.Authors, v)
|
||||
filter.Kinds = append(filter.Kinds, 0)
|
||||
authorHint = v
|
||||
}
|
||||
|
||||
pool := nostr.NewSimplePool(c.Context)
|
||||
if authorHint != "" {
|
||||
relayList := sdk.FetchRelaysForPubkey(c.Context, pool, authorHint,
|
||||
"wss://purplepag.es", "wss://offchain.pub", "wss://public.relaying.io")
|
||||
for _, relayListItem := range relayList {
|
||||
if relayListItem.Outbox {
|
||||
relays = append(relays, relayListItem.URL)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(relays) == 0 {
|
||||
lineProcessingError(c, "no relay hints found")
|
||||
ctx = lineProcessingError(ctx, "no relay hints found")
|
||||
continue
|
||||
}
|
||||
|
||||
for ie := range pool.SubManyEose(c.Context, relays, nostr.Filters{filter}) {
|
||||
fmt.Println(ie.Event)
|
||||
for ie := range sys.Pool.FetchMany(ctx, relays, filter, nostr.SubscriptionOptions{
|
||||
Label: "nak-fetch",
|
||||
}) {
|
||||
stdout(ie.Event)
|
||||
}
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
95
filter.go
Normal file
95
filter.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var filter = &cli.Command{
|
||||
Name: "filter",
|
||||
Usage: "applies an event filter to an event to see if it matches.",
|
||||
Description: `
|
||||
example:
|
||||
echo '{"kind": 1, "content": "hello"}' | nak filter -k 1
|
||||
nak filter '{"kind": 1, "content": "hello"}' -k 1
|
||||
nak filter '{"kind": 1, "content": "hello"}' '{"kinds": [1]}' -k 0
|
||||
`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: reqFilterFlags,
|
||||
ArgsUsage: "[event_json] [base_filter_json]",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
args := c.Args().Slice()
|
||||
|
||||
var baseFilter nostr.Filter
|
||||
var baseEvent nostr.Event
|
||||
|
||||
if len(args) == 2 {
|
||||
// two arguments: first is event, second is base filter
|
||||
if err := easyjson.Unmarshal([]byte(args[0]), &baseEvent); err != nil {
|
||||
return fmt.Errorf("invalid base event: %w", err)
|
||||
}
|
||||
if err := easyjson.Unmarshal([]byte(args[1]), &baseFilter); err != nil {
|
||||
return fmt.Errorf("invalid base filter: %w", err)
|
||||
}
|
||||
} else if len(args) == 1 {
|
||||
if isPiped() {
|
||||
// one argument + stdin: argument is base filter
|
||||
if err := easyjson.Unmarshal([]byte(args[0]), &baseFilter); err != nil {
|
||||
return fmt.Errorf("invalid base filter: %w", err)
|
||||
}
|
||||
} else {
|
||||
// one argument, no stdin: argument is event
|
||||
if err := easyjson.Unmarshal([]byte(args[0]), &baseEvent); err != nil {
|
||||
return fmt.Errorf("invalid base event: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// apply flags to filter
|
||||
if err := applyFlagsToFilter(c, &baseFilter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if there is no stdin we'll still get an empty object here
|
||||
for evtj := range getJsonsOrBlank() {
|
||||
var evt nostr.Event
|
||||
if err := easyjson.Unmarshal([]byte(evtj), &evt); err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid event: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// merge that with the base event
|
||||
if evt.ID == nostr.ZeroID {
|
||||
evt.ID = baseEvent.ID
|
||||
}
|
||||
if evt.PubKey == nostr.ZeroPK {
|
||||
evt.PubKey = baseEvent.PubKey
|
||||
}
|
||||
if evt.Sig == [64]byte{} {
|
||||
evt.Sig = baseEvent.Sig
|
||||
}
|
||||
if evt.Content == "" {
|
||||
evt.Content = baseEvent.Content
|
||||
}
|
||||
if len(evt.Tags) == 0 {
|
||||
evt.Tags = baseEvent.Tags
|
||||
}
|
||||
if evt.CreatedAt == 0 {
|
||||
evt.CreatedAt = baseEvent.CreatedAt
|
||||
}
|
||||
|
||||
if baseFilter.Matches(evt) {
|
||||
stdout(evt)
|
||||
} else {
|
||||
logverbose("event %s didn't match %s", evt, baseFilter)
|
||||
}
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
175
flags.go
Normal file
175
flags.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/markusmobius/go-dateparser"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
type NaturalTimeFlag = cli.FlagBase[nostr.Timestamp, struct{}, naturalTimeValue]
|
||||
|
||||
type naturalTimeValue struct {
|
||||
timestamp *nostr.Timestamp
|
||||
hasBeenSet bool
|
||||
}
|
||||
|
||||
var _ cli.ValueCreator[nostr.Timestamp, struct{}] = naturalTimeValue{}
|
||||
|
||||
func (t naturalTimeValue) Create(val nostr.Timestamp, p *nostr.Timestamp, c struct{}) cli.Value {
|
||||
*p = val
|
||||
return &naturalTimeValue{
|
||||
timestamp: p,
|
||||
}
|
||||
}
|
||||
|
||||
func (t naturalTimeValue) ToString(b nostr.Timestamp) string {
|
||||
ts := b.Time()
|
||||
if ts.IsZero() {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%v", ts)
|
||||
}
|
||||
|
||||
func (t *naturalTimeValue) Set(value string) error {
|
||||
var ts time.Time
|
||||
if n, err := strconv.ParseInt(value, 10, 64); err == nil {
|
||||
// when the input is a raw number, treat it as an exact timestamp
|
||||
ts = time.Unix(n, 0)
|
||||
} else if errors.Is(err, strconv.ErrRange) {
|
||||
// this means a huge number, so we should fail
|
||||
return err
|
||||
} else {
|
||||
// otherwise try to parse it as a human date string in natural language
|
||||
date, err := dateparser.Parse(&dateparser.Configuration{
|
||||
DefaultTimezone: time.Local,
|
||||
CurrentTime: time.Now(),
|
||||
}, value)
|
||||
ts = date.Time
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if t.timestamp != nil {
|
||||
*t.timestamp = nostr.Timestamp(ts.Unix())
|
||||
}
|
||||
|
||||
t.hasBeenSet = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *naturalTimeValue) String() string { return fmt.Sprintf("%#v", t.timestamp) }
|
||||
func (t *naturalTimeValue) Value() *nostr.Timestamp { return t.timestamp }
|
||||
func (t *naturalTimeValue) Get() any { return *t.timestamp }
|
||||
|
||||
func getNaturalDate(cmd *cli.Command, name string) nostr.Timestamp {
|
||||
return cmd.Value(name).(nostr.Timestamp)
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
type (
|
||||
PubKeyFlag = cli.FlagBase[nostr.PubKey, struct{}, pubkeyValue]
|
||||
)
|
||||
|
||||
type pubkeyValue struct {
|
||||
pubkey nostr.PubKey
|
||||
hasBeenSet bool
|
||||
}
|
||||
|
||||
var _ cli.ValueCreator[nostr.PubKey, struct{}] = pubkeyValue{}
|
||||
|
||||
func (t pubkeyValue) Create(val nostr.PubKey, p *nostr.PubKey, c struct{}) cli.Value {
|
||||
*p = val
|
||||
return &pubkeyValue{
|
||||
pubkey: val,
|
||||
}
|
||||
}
|
||||
|
||||
func (t pubkeyValue) ToString(b nostr.PubKey) string { return t.pubkey.String() }
|
||||
|
||||
func (t *pubkeyValue) Set(value string) error {
|
||||
pubkey, err := parsePubKey(value)
|
||||
t.pubkey = pubkey
|
||||
t.hasBeenSet = true
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *pubkeyValue) String() string { return fmt.Sprintf("%#v", t.pubkey) }
|
||||
func (t *pubkeyValue) Value() nostr.PubKey { return t.pubkey }
|
||||
func (t *pubkeyValue) Get() any { return t.pubkey }
|
||||
|
||||
func getPubKey(cmd *cli.Command, name string) nostr.PubKey {
|
||||
return cmd.Value(name).(nostr.PubKey)
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
type (
|
||||
pubkeySlice = cli.SliceBase[nostr.PubKey, struct{}, pubkeyValue]
|
||||
PubKeySliceFlag = cli.FlagBase[[]nostr.PubKey, struct{}, pubkeySlice]
|
||||
)
|
||||
|
||||
func getPubKeySlice(cmd *cli.Command, name string) []nostr.PubKey {
|
||||
return cmd.Value(name).([]nostr.PubKey)
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
type (
|
||||
IDFlag = cli.FlagBase[nostr.ID, struct{}, idValue]
|
||||
)
|
||||
|
||||
type idValue struct {
|
||||
id nostr.ID
|
||||
hasBeenSet bool
|
||||
}
|
||||
|
||||
var _ cli.ValueCreator[nostr.ID, struct{}] = idValue{}
|
||||
|
||||
func (t idValue) Create(val nostr.ID, p *nostr.ID, c struct{}) cli.Value {
|
||||
*p = val
|
||||
return &idValue{
|
||||
id: val,
|
||||
}
|
||||
}
|
||||
func (t idValue) ToString(b nostr.ID) string { return t.id.String() }
|
||||
|
||||
func (t *idValue) Set(value string) error {
|
||||
id, err := parseEventID(value)
|
||||
t.id = id
|
||||
t.hasBeenSet = true
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *idValue) String() string { return fmt.Sprintf("%#v", t.id) }
|
||||
func (t *idValue) Value() nostr.ID { return t.id }
|
||||
func (t *idValue) Get() any { return t.id }
|
||||
|
||||
func getID(cmd *cli.Command, name string) nostr.ID {
|
||||
return cmd.Value(name).(nostr.ID)
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
|
||||
type (
|
||||
idSlice = cli.SliceBase[nostr.ID, struct{}, idValue]
|
||||
IDSliceFlag = cli.FlagBase[[]nostr.ID, struct{}, idSlice]
|
||||
)
|
||||
|
||||
func getIDSlice(cmd *cli.Command, name string) []nostr.ID {
|
||||
return cmd.Value(name).([]nostr.ID)
|
||||
}
|
||||
123
fs.go
Normal file
123
fs.go
Normal file
@@ -0,0 +1,123 @@
|
||||
//go:build !windows && !openbsd
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"github.com/fatih/color"
|
||||
"github.com/fiatjaf/nak/nostrfs"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var fsCmd = &cli.Command{
|
||||
Name: "fs",
|
||||
Usage: "mount a FUSE filesystem that exposes Nostr events as files.",
|
||||
Description: `(experimental)`,
|
||||
ArgsUsage: "<mountpoint>",
|
||||
Flags: append(defaultKeyFlags,
|
||||
&PubKeyFlag{
|
||||
Name: "pubkey",
|
||||
Usage: "public key from where to to prepopulate directories",
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "auto-publish-notes",
|
||||
Usage: "delay after which new notes will be auto-published, set to -1 to not publish.",
|
||||
Value: time.Second * 30,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "auto-publish-articles",
|
||||
Usage: "delay after which edited articles will be auto-published.",
|
||||
Value: time.Hour * 24 * 365 * 2,
|
||||
DefaultText: "basically infinite",
|
||||
},
|
||||
),
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
mountpoint := c.Args().First()
|
||||
if mountpoint == "" {
|
||||
return fmt.Errorf("must be called with a directory path to serve as the mountpoint as an argument")
|
||||
}
|
||||
|
||||
var kr nostr.User
|
||||
if signer, _, err := gatherKeyerFromArguments(ctx, c); err == nil {
|
||||
kr = signer
|
||||
} else {
|
||||
kr = keyer.NewReadOnlyUser(getPubKey(c, "pubkey"))
|
||||
}
|
||||
|
||||
apnt := c.Duration("auto-publish-notes")
|
||||
if apnt < 0 {
|
||||
apnt = time.Hour * 24 * 365 * 3
|
||||
}
|
||||
apat := c.Duration("auto-publish-articles")
|
||||
if apat < 0 {
|
||||
apat = time.Hour * 24 * 365 * 3
|
||||
}
|
||||
|
||||
root := nostrfs.NewNostrRoot(
|
||||
context.WithValue(
|
||||
context.WithValue(
|
||||
ctx,
|
||||
"log", log,
|
||||
),
|
||||
"logverbose", logverbose,
|
||||
),
|
||||
sys,
|
||||
kr,
|
||||
mountpoint,
|
||||
nostrfs.Options{
|
||||
AutoPublishNotesTimeout: apnt,
|
||||
AutoPublishArticlesTimeout: apat,
|
||||
},
|
||||
)
|
||||
|
||||
// create the server
|
||||
log("- mounting at %s... ", color.HiCyanString(mountpoint))
|
||||
timeout := time.Second * 120
|
||||
server, err := fs.Mount(mountpoint, root, &fs.Options{
|
||||
MountOptions: fuse.MountOptions{
|
||||
Debug: isVerbose,
|
||||
Name: "nak",
|
||||
FsName: "nak",
|
||||
RememberInodes: true,
|
||||
},
|
||||
AttrTimeout: &timeout,
|
||||
EntryTimeout: &timeout,
|
||||
Logger: nostr.DebugLogger,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("mount failed: %w", err)
|
||||
}
|
||||
log("ok.\n")
|
||||
|
||||
// setup signal handling for clean unmount
|
||||
ch := make(chan os.Signal, 1)
|
||||
chErr := make(chan error)
|
||||
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-ch
|
||||
log("- unmounting... ")
|
||||
err := server.Unmount()
|
||||
if err != nil {
|
||||
chErr <- fmt.Errorf("unmount failed: %w", err)
|
||||
} else {
|
||||
log("ok\n")
|
||||
chErr <- nil
|
||||
}
|
||||
}()
|
||||
|
||||
// serve the filesystem until unmounted
|
||||
server.Wait()
|
||||
return <-chErr
|
||||
},
|
||||
}
|
||||
20
fs_other.go
Normal file
20
fs_other.go
Normal file
@@ -0,0 +1,20 @@
|
||||
//go:build windows || openbsd
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var fsCmd = &cli.Command{
|
||||
Name: "fs",
|
||||
Usage: "mount a FUSE filesystem that exposes Nostr events as files.",
|
||||
Description: `doesn't work on Windows and OpenBSD.`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
return fmt.Errorf("this doesn't work on Windows and OpenBSD.")
|
||||
},
|
||||
}
|
||||
100
go.mod
100
go.mod
@@ -1,40 +1,82 @@
|
||||
module github.com/fiatjaf/nak
|
||||
|
||||
go 1.21
|
||||
|
||||
toolchain go1.21.0
|
||||
go 1.24.1
|
||||
|
||||
require (
|
||||
github.com/bgentry/speakeasy v0.1.0
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/nbd-wtf/go-nostr v0.27.0
|
||||
github.com/nbd-wtf/nostr-sdk v0.0.5
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
|
||||
fiatjaf.com/lib v0.3.1
|
||||
fiatjaf.com/nostr v0.0.0-20251124002842-de54dd1fa4b8
|
||||
github.com/bep/debounce v1.2.1
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0
|
||||
github.com/fatih/color v1.16.0
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/liamg/magic v0.0.1
|
||||
github.com/mailru/easyjson v0.9.1
|
||||
github.com/mark3labs/mcp-go v0.8.3
|
||||
github.com/markusmobius/go-dateparser v1.2.3
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/mattn/go-tty v0.0.7
|
||||
github.com/mdp/qrterminal/v3 v3.2.1
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v3 v3.0.0-beta1
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/term v0.32.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||
github.com/FastFilter/xorfilter v0.2.1 // indirect
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/bluekeyes/go-gitdiff v0.7.1 // indirect
|
||||
github.com/btcsuite/btcd v0.24.2 // indirect
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chzyer/logex v1.1.10 // indirect
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect
|
||||
github.com/coder/websocket v1.8.14 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fiatjaf/eventstore v0.2.16 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/gobwas/ws v1.3.1 // indirect
|
||||
github.com/golang/glog v1.1.2 // indirect
|
||||
github.com/elliotchance/pie/v2 v2.7.0 // indirect
|
||||
github.com/elnosh/gonuts v0.4.2 // indirect
|
||||
github.com/fasthttp/websocket v1.5.12 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-git/go-git/v5 v5.16.3 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hablullah/go-hijri v1.0.2 // indirect
|
||||
github.com/hablullah/go-juliandays v1.0.0 // indirect
|
||||
github.com/jalaali/go-jalaali v0.0.0-20210801064154-80525e88d958 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/puzpuzpuz/xsync/v2 v2.5.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/tidwall/gjson v1.17.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/magefile/mage v1.14.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rs/cors v1.11.1 // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
||||
github.com/tetratelabs/wazero v1.8.0 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.2.0 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasthttp v1.59.0 // indirect
|
||||
github.com/wasilibs/go-re2 v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/bbolt v1.4.2 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/qr v0.2.0 // indirect
|
||||
)
|
||||
|
||||
229
go.sum
229
go.sum
@@ -1,21 +1,37 @@
|
||||
fiatjaf.com/lib v0.3.1 h1:/oFQwNtFRfV+ukmOCxfBEAuayoLwXp4wu2/fz5iHpwA=
|
||||
fiatjaf.com/lib v0.3.1/go.mod h1:Ycqq3+mJ9jAWu7XjbQI1cVr+OFgnHn79dQR5oTII47g=
|
||||
fiatjaf.com/nostr v0.0.0-20251124002842-de54dd1fa4b8 h1:R16mnlJ3qvVar7G4rzY+Z+mEAf2O6wpHTlRlHAt2Od8=
|
||||
fiatjaf.com/nostr v0.0.0-20251124002842-de54dd1fa4b8/go.mod h1:QEGyTgAjjTFwDx2BJGZiCdmoAcWA/G+sQy7wDqKzSPU=
|
||||
github.com/FastFilter/xorfilter v0.2.1 h1:lbdeLG9BdpquK64ZsleBS8B4xO/QW1IM0gMzF7KaBKc=
|
||||
github.com/FastFilter/xorfilter v0.2.1/go.mod h1:aumvdkhscz6YBZF9ZA/6O4fIoNod4YR50kIVGGZ7l9I=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
|
||||
github.com/PowerDNS/lmdb-go v1.9.3 h1:AUMY2pZT8WRpkEv39I9Id3MuoHd+NZbTVpNhruVkPTg=
|
||||
github.com/PowerDNS/lmdb-go v1.9.3/go.mod h1:TE0l+EZK8Z1B4dx070ZxkWTlp8RG1mjN0/+FkFRQMtU=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
||||
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||
github.com/bluekeyes/go-gitdiff v0.7.1 h1:graP4ElLRshr8ecu0UtqfNTCHrtSyZd3DABQm/DWesQ=
|
||||
github.com/bluekeyes/go-gitdiff v0.7.1/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
|
||||
github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY=
|
||||
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
|
||||
github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY=
|
||||
github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6 h1:IzlsEr9olcSRKB/n7c4351F3xHKxS2lma+1UFGCYd4E=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
|
||||
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
@@ -25,42 +41,50 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
||||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
|
||||
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/fiatjaf/eventstore v0.2.16 h1:NR64mnyUT5nJR8Sj2AwJTd1Hqs5kKJcCFO21ggUkvWg=
|
||||
github.com/fiatjaf/eventstore v0.2.16/go.mod h1:rUc1KhVufVmC+HUOiuPweGAcvG6lEOQCkRCn2Xn5VRA=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/elliotchance/pie/v2 v2.7.0 h1:FqoIKg4uj0G/CrLGuMS9ejnFKa92lxE1dEgBD3pShXg=
|
||||
github.com/elliotchance/pie/v2 v2.7.0/go.mod h1:18t0dgGFH006g4eVdDtWfgFZPQEgl10IoEO8YWEq3Og=
|
||||
github.com/elnosh/gonuts v0.4.2 h1:/WubPAWGxTE+okJ0WPvmtEzTzpi04RGxiTHAF1FYU+M=
|
||||
github.com/elnosh/gonuts v0.4.2/go.mod h1:vgZomh4YQk7R3w4ltZc0sHwCmndfHkuX6V4sga/8oNs=
|
||||
github.com/fasthttp/websocket v1.5.12 h1:e4RGPpWW2HTbL3zV0Y/t7g0ub294LkiuXXUuTOUInlE=
|
||||
github.com/fasthttp/websocket v1.5.12/go.mod h1:I+liyL7/4moHojiOgUOIKEWm9EIxHqxZChS+aMFltyg=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.3.1 h1:Qi34dfLMWJbiKaNbDVzM9x27nZBjmkaW6i4+Ku+pGVU=
|
||||
github.com/gobwas/ws v1.3.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-git/go-git/v5 v5.16.3 h1:Z8BtvxZ09bYm/yYNgPKCzgWtaRqDTgIKRgIRHBfU6Z8=
|
||||
github.com/go-git/go-git/v5 v5.16.3/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
@@ -72,19 +96,62 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hablullah/go-hijri v1.0.2 h1:drT/MZpSZJQXo7jftf5fthArShcaMtsal0Zf/dnmp6k=
|
||||
github.com/hablullah/go-hijri v1.0.2/go.mod h1:OS5qyYLDjORXzK4O1adFw9Q5WfhOcMdAKglDkcTxgWQ=
|
||||
github.com/hablullah/go-juliandays v1.0.0 h1:A8YM7wIj16SzlKT0SRJc9CD29iiaUzpBLzh5hr0/5p0=
|
||||
github.com/hablullah/go-juliandays v1.0.0/go.mod h1:0JOYq4oFOuDja+oospuc61YoX+uNEn7Z6uHYTbBzdGc=
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2 h1:SbJP1sUP+n1UF8NXBA14BuojmTez+mDgOk0bC057HQw=
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2/go.mod h1:ugNaD/iv5JYyS1Rcvi57Wz7/vrLQJo10mmketmoef48=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jalaali/go-jalaali v0.0.0-20210801064154-80525e88d958 h1:qxLoi6CAcXVzjfvu+KXIXJOAsQB62LXjsfbOaErsVzE=
|
||||
github.com/jalaali/go-jalaali v0.0.0-20210801064154-80525e88d958/go.mod h1:Wqfu7mjUHj9WDzSSPI5KfBclTTEnLveRUFr/ujWnTgE=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/nbd-wtf/go-nostr v0.27.0 h1:h6JmMMmfNcAORTL2kk/K3+U6Mju6rk/IjcHA/PMeOc8=
|
||||
github.com/nbd-wtf/go-nostr v0.27.0/go.mod h1:bkffJI+x914sPQWum9ZRUn66D7NpDnAoWo1yICvj3/0=
|
||||
github.com/nbd-wtf/nostr-sdk v0.0.5 h1:rec+FcDizDVO0W25PX0lgYMXvP7zNNOgI3Fu9UCm4BY=
|
||||
github.com/nbd-wtf/nostr-sdk v0.0.5/go.mod h1:iJJsikesCGLNFZ9dLqhLPDzdt924EagUmdQxT3w2Lmk=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/liamg/magic v0.0.1 h1:Ru22ElY+sCh6RvRTWjQzKKCxsEco8hE0co8n1qe7TBM=
|
||||
github.com/liamg/magic v0.0.1/go.mod h1:yQkOmZZI52EA+SQ2xyHpVw8fNvTBruF873Y+Vt6S+fk=
|
||||
github.com/magefile/mage v1.14.0 h1:6QDX3g6z1YvJ4olPhT1wksUcSa/V0a1B+pJb73fBjyo=
|
||||
github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8=
|
||||
github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mark3labs/mcp-go v0.8.3 h1:IzlyN8BaP4YwUMUDqxOGJhGdZXEDQiAPX43dNPgnzrg=
|
||||
github.com/mark3labs/mcp-go v0.8.3/go.mod h1:cjMlBU0cv/cj9kjlgmRhoJ5JREdS7YX83xeIG9Ko/jE=
|
||||
github.com/markusmobius/go-dateparser v1.2.3 h1:TvrsIvr5uk+3v6poDjaicnAFJ5IgtFHgLiuMY2Eb7Nw=
|
||||
github.com/markusmobius/go-dateparser v1.2.3/go.mod h1:cMwQRrBUQlK1UI5TIFHEcvpsMbkWrQLXuaPNMFzuYLk=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-tty v0.0.7 h1:KJ486B6qI8+wBO7kQxYgmmEFDaFEE96JMBQ7h400N8Q=
|
||||
github.com/mattn/go-tty v0.0.7/go.mod h1:f2i5ZOvXBU/tCABmLmOfzLz9azMo5wdAaElRNnJKr+k=
|
||||
github.com/mdp/qrterminal/v3 v3.2.1 h1:6+yQjiiOsSuXT5n9/m60E54vdgFsw0zhADHhHLrFet4=
|
||||
github.com/mdp/qrterminal/v3 v3.2.1/go.mod h1:jOTmXvnBsMy5xqLniO0R++Jmjs2sTm9dFSuQ5kpz/SU=
|
||||
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -98,40 +165,70 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/puzpuzpuz/xsync/v2 v2.5.1 h1:mVGYAvzDSu52+zaGyNjC+24Xw2bQi3kTr4QJ6N9pIIU=
|
||||
github.com/puzpuzpuz/xsync/v2 v2.5.1/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM=
|
||||
github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
||||
github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g=
|
||||
github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/match v1.2.0 h1:0pt8FlkOwjN2fPt4bIl4BoNxb98gGHN2ObFEDkrfZnM=
|
||||
github.com/tidwall/match v1.2.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/urfave/cli/v3 v3.0.0-beta1 h1:6DTaaUarcM0wX7qj5Hcvs+5Dm3dyUTBbEwIWAjcw9Zg=
|
||||
github.com/urfave/cli/v3 v3.0.0-beta1/go.mod h1:FnIeEMYu+ko8zP1F9Ypr3xkZMIDqW3DR92yUtY39q1Y=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.59.0 h1:Qu0qYHfXvPk1mSLNqcFtEk6DpxgA26hy6bmydotDpRI=
|
||||
github.com/valyala/fasthttp v1.59.0/go.mod h1:GTxNb9Bc6r2a9D0TWNSPwDz78UxnTGBViY3xZNEqyYU=
|
||||
github.com/wasilibs/go-re2 v1.3.0 h1:LFhBNzoStM3wMie6rN2slD1cuYH2CGiHpvNL3UtcsMw=
|
||||
github.com/wasilibs/go-re2 v1.3.0/go.mod h1:AafrCXVvGRJJOImMajgJ2M7rVmWyisVK7sFshbxnVrg=
|
||||
github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ=
|
||||
github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
|
||||
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -141,13 +238,16 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -158,12 +258,17 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY=
|
||||
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs=
|
||||
|
||||
568
helpers.go
568
helpers.go
@@ -3,81 +3,144 @@ package main
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"iter"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bgentry/speakeasy"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip19"
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip05"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip42"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/chzyer/readline"
|
||||
"github.com/fatih/color"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/mattn/go-tty"
|
||||
"github.com/urfave/cli/v3"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
var sys *sdk.System
|
||||
|
||||
var json = jsoniter.ConfigFastest
|
||||
|
||||
const (
|
||||
LINE_PROCESSING_ERROR = iota
|
||||
|
||||
BOLD_ON = "\033[1m"
|
||||
BOLD_OFF = "\033[21m"
|
||||
)
|
||||
|
||||
var log = func(msg string, args ...any) {
|
||||
fmt.Fprintf(os.Stderr, msg, args...)
|
||||
}
|
||||
var (
|
||||
log = func(msg string, args ...any) { fmt.Fprintf(color.Error, msg, args...) }
|
||||
logverbose = func(msg string, args ...any) {} // by default do nothing
|
||||
stdout = func(args ...any) { fmt.Fprintln(color.Output, args...) }
|
||||
)
|
||||
|
||||
func isPiped() bool {
|
||||
stat, _ := os.Stdin.Stat()
|
||||
return stat.Mode()&os.ModeCharDevice == 0
|
||||
}
|
||||
|
||||
func getStdinLinesOrBlank() chan string {
|
||||
multi := make(chan string)
|
||||
if hasStdinLines := writeStdinLinesOrNothing(multi); !hasStdinLines {
|
||||
single := make(chan string, 1)
|
||||
single <- ""
|
||||
close(single)
|
||||
return single
|
||||
} else {
|
||||
return multi
|
||||
func getJsonsOrBlank() iter.Seq[string] {
|
||||
var curr strings.Builder
|
||||
|
||||
var finalJsonErr error
|
||||
return func(yield func(string) bool) {
|
||||
hasStdin := writeStdinLinesOrNothing(func(stdinLine string) bool {
|
||||
// we're look for an event, but it may be in multiple lines, so if json parsing fails
|
||||
// we'll try the next line until we're successful
|
||||
curr.WriteString(stdinLine)
|
||||
stdinEvent := curr.String()
|
||||
|
||||
var dummy any
|
||||
if err := json.Unmarshal([]byte(stdinEvent), &dummy); err != nil {
|
||||
finalJsonErr = err
|
||||
return true
|
||||
}
|
||||
finalJsonErr = nil
|
||||
|
||||
if !yield(stdinEvent) {
|
||||
return false
|
||||
}
|
||||
|
||||
curr.Reset()
|
||||
return true
|
||||
})
|
||||
|
||||
if !hasStdin && !isPiped() {
|
||||
yield("{}")
|
||||
}
|
||||
|
||||
if finalJsonErr != nil {
|
||||
log(color.YellowString("stdin json parse error: %s", finalJsonErr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getStdinLinesOrFirstArgument(c *cli.Context) chan string {
|
||||
func getStdinLinesOrBlank() iter.Seq[string] {
|
||||
return func(yield func(string) bool) {
|
||||
hasStdin := writeStdinLinesOrNothing(func(stdinLine string) bool {
|
||||
if !yield(stdinLine) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if !hasStdin {
|
||||
yield("")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getStdinLinesOrArguments(args cli.Args) iter.Seq[string] {
|
||||
return getStdinLinesOrArgumentsFromSlice(args.Slice())
|
||||
}
|
||||
|
||||
func getStdinLinesOrArgumentsFromSlice(args []string) iter.Seq[string] {
|
||||
// try the first argument
|
||||
target := c.Args().First()
|
||||
if target != "" {
|
||||
single := make(chan string, 1)
|
||||
single <- target
|
||||
close(single)
|
||||
return single
|
||||
if len(args) > 0 {
|
||||
return slices.Values(args)
|
||||
}
|
||||
|
||||
// try the stdin
|
||||
multi := make(chan string)
|
||||
writeStdinLinesOrNothing(multi)
|
||||
return multi
|
||||
return func(yield func(string) bool) {
|
||||
writeStdinLinesOrNothing(yield)
|
||||
}
|
||||
}
|
||||
|
||||
func writeStdinLinesOrNothing(ch chan string) (hasStdinLines bool) {
|
||||
func writeStdinLinesOrNothing(yield func(string) bool) (hasStdinLines bool) {
|
||||
if isPiped() {
|
||||
// piped
|
||||
go func() {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
ch <- strings.TrimSpace(scanner.Text())
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
scanner.Buffer(make([]byte, 16*1024*1024), 256*1024*1024)
|
||||
hasEmittedAtLeastOne := false
|
||||
for scanner.Scan() {
|
||||
if !yield(strings.TrimSpace(scanner.Text())) {
|
||||
return
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return true
|
||||
hasEmittedAtLeastOne = true
|
||||
}
|
||||
return hasEmittedAtLeastOne
|
||||
} else {
|
||||
// not piped
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func validateRelayURLs(wsurls []string) error {
|
||||
for _, wsurl := range wsurls {
|
||||
func normalizeAndValidateRelayURLs(wsurls []string) error {
|
||||
for i, wsurl := range wsurls {
|
||||
wsurl = nostr.NormalizeURL(wsurl)
|
||||
wsurls[i] = wsurl
|
||||
|
||||
u, err := url.Parse(wsurl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid relay url '%s': %s", wsurl, err)
|
||||
@@ -95,76 +158,395 @@ func validateRelayURLs(wsurls []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validate32BytesHex(target string) error {
|
||||
if _, err := hex.DecodeString(target); err != nil {
|
||||
return fmt.Errorf("target '%s' is not valid hex: %s", target, err)
|
||||
}
|
||||
if len(target) != 64 {
|
||||
return fmt.Errorf("expected '%s' to be 64 characters (32 bytes), got %d", target, len(target))
|
||||
}
|
||||
if strings.ToLower(target) != target {
|
||||
return fmt.Errorf("expected target to be all lowercase hex. try again with '%s'", strings.ToLower(target))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func connectToAllRelays(
|
||||
ctx context.Context,
|
||||
c *cli.Command,
|
||||
relayUrls []string,
|
||||
opts ...nostr.PoolOption,
|
||||
) (*nostr.SimplePool, []*nostr.Relay) {
|
||||
relays := make([]*nostr.Relay, 0, len(relayUrls))
|
||||
pool := nostr.NewSimplePool(ctx, opts...)
|
||||
preAuthSigner func(ctx context.Context, c *cli.Command, log func(s string, args ...any), authEvent *nostr.Event) (err error), // if this exists we will force preauth
|
||||
opts nostr.PoolOptions,
|
||||
) []*nostr.Relay {
|
||||
// first pass to check if these are valid relay URLs
|
||||
for _, url := range relayUrls {
|
||||
log("connecting to %s... ", url)
|
||||
if relay, err := pool.EnsureRelay(url); err == nil {
|
||||
relays = append(relays, relay)
|
||||
log("ok.\n")
|
||||
} else {
|
||||
log(err.Error() + "\n")
|
||||
if !nostr.IsValidRelayURL(nostr.NormalizeURL(url)) {
|
||||
log("invalid relay URL: %s\n", url)
|
||||
os.Exit(4)
|
||||
}
|
||||
}
|
||||
return pool, relays
|
||||
|
||||
opts.EventMiddleware = sys.TrackEventHints
|
||||
opts.PenaltyBox = true
|
||||
opts.RelayOptions = nostr.RelayOptions{
|
||||
RequestHeader: http.Header{textproto.CanonicalMIMEHeaderKey("user-agent"): {"nak/s"}},
|
||||
}
|
||||
sys.Pool = nostr.NewPool(opts)
|
||||
|
||||
relays := make([]*nostr.Relay, 0, len(relayUrls))
|
||||
|
||||
if supportsDynamicMultilineMagic() {
|
||||
// overcomplicated multiline rendering magic
|
||||
lines := make([][][]byte, len(relayUrls))
|
||||
flush := func() {
|
||||
for _, line := range lines {
|
||||
for _, part := range line {
|
||||
os.Stderr.Write(part)
|
||||
}
|
||||
os.Stderr.Write([]byte{'\n'})
|
||||
}
|
||||
}
|
||||
render := func() {
|
||||
clearLines(len(lines))
|
||||
flush()
|
||||
}
|
||||
flush()
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(relayUrls))
|
||||
for i, url := range relayUrls {
|
||||
lines[i] = make([][]byte, 1, 2)
|
||||
logthis := func(s string, args ...any) {
|
||||
lines[i] = append(lines[i], []byte(fmt.Sprintf(s, args...)))
|
||||
render()
|
||||
}
|
||||
colorizepreamble := func(c func(string, ...any) string) {
|
||||
lines[i][0] = []byte(fmt.Sprintf("%s... ", c(url)))
|
||||
}
|
||||
colorizepreamble(color.CyanString)
|
||||
|
||||
go func() {
|
||||
relay := connectToSingleRelay(ctx, c, url, preAuthSigner, colorizepreamble, logthis)
|
||||
if relay != nil {
|
||||
relays = append(relays, relay)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
} else {
|
||||
// simple flow
|
||||
for _, url := range relayUrls {
|
||||
log("connecting to %s... ", color.CyanString(url))
|
||||
relay := connectToSingleRelay(ctx, c, url, preAuthSigner, nil, log)
|
||||
if relay != nil {
|
||||
relays = append(relays, relay)
|
||||
}
|
||||
log("\n")
|
||||
}
|
||||
}
|
||||
|
||||
return relays
|
||||
}
|
||||
|
||||
func lineProcessingError(c *cli.Context, msg string, args ...any) {
|
||||
c.Context = context.WithValue(c.Context, LINE_PROCESSING_ERROR, true)
|
||||
func connectToSingleRelay(
|
||||
ctx context.Context,
|
||||
c *cli.Command,
|
||||
url string,
|
||||
preAuthSigner func(ctx context.Context, c *cli.Command, log func(s string, args ...any), authEvent *nostr.Event) (err error),
|
||||
colorizepreamble func(c func(string, ...any) string),
|
||||
logthis func(s string, args ...any),
|
||||
) *nostr.Relay {
|
||||
if relay, err := sys.Pool.EnsureRelay(url); err == nil {
|
||||
if preAuthSigner != nil {
|
||||
if colorizepreamble != nil {
|
||||
colorizepreamble(color.YellowString)
|
||||
}
|
||||
logthis("waiting for auth challenge... ")
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
|
||||
for range 5 {
|
||||
if err := relay.Auth(ctx, func(ctx context.Context, authEvent *nostr.Event) error {
|
||||
challengeTag := authEvent.Tags.Find("challenge")
|
||||
if challengeTag[1] == "" {
|
||||
return fmt.Errorf("auth not received yet *****") // what a giant hack
|
||||
}
|
||||
return preAuthSigner(ctx, c, logthis, authEvent)
|
||||
}); err == nil {
|
||||
// auth succeeded
|
||||
goto preauthSuccess
|
||||
} else {
|
||||
// auth failed
|
||||
if strings.HasSuffix(err.Error(), "auth not received yet *****") {
|
||||
// it failed because we didn't receive the challenge yet, so keep waiting
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
} else {
|
||||
// it failed for some other reason, so skip this relay
|
||||
if colorizepreamble != nil {
|
||||
colorizepreamble(colors.errorf)
|
||||
}
|
||||
logthis(err.Error())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if colorizepreamble != nil {
|
||||
colorizepreamble(colors.errorf)
|
||||
}
|
||||
logthis("failed to get an AUTH challenge in enough time.")
|
||||
return nil
|
||||
}
|
||||
|
||||
preauthSuccess:
|
||||
if colorizepreamble != nil {
|
||||
colorizepreamble(colors.successf)
|
||||
}
|
||||
logthis("ok.")
|
||||
return relay
|
||||
} else {
|
||||
if colorizepreamble != nil {
|
||||
colorizepreamble(colors.errorf)
|
||||
}
|
||||
|
||||
// if we're here that means we've failed to connect, this may be a huge message
|
||||
// but we're likely to only be interested in the lowest level error (although we can leave space)
|
||||
logthis(clampError(err, len(url)+12))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func clearLines(lineCount int) {
|
||||
for i := 0; i < lineCount; i++ {
|
||||
os.Stderr.Write([]byte("\033[0A\033[2K\r"))
|
||||
}
|
||||
}
|
||||
|
||||
func supportsDynamicMultilineMagic() bool {
|
||||
if runtime.GOOS == "windows" {
|
||||
return false
|
||||
}
|
||||
if !term.IsTerminal(0) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
return false
|
||||
}
|
||||
|
||||
width, _, err := term.GetSize(int(os.Stderr.Fd()))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if width < 110 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func authSigner(ctx context.Context, c *cli.Command, log func(s string, args ...any), authEvent *nostr.Event) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cleanUrl, _ := strings.CutPrefix(nip42.GetRelayURLFromAuthEvent(*authEvent), "wss://")
|
||||
log("%s auth failed: %s", colors.errorf(cleanUrl), err)
|
||||
}
|
||||
}()
|
||||
|
||||
if !c.Bool("auth") && !c.Bool("force-pre-auth") {
|
||||
return fmt.Errorf("auth required, but --auth flag not given")
|
||||
}
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pk, _ := kr.GetPublicKey(ctx)
|
||||
npub := nip19.EncodeNpub(pk)
|
||||
log("authenticating as %s... ", color.YellowString("%s…%s", npub[0:7], npub[58:]))
|
||||
|
||||
return kr.SignEvent(ctx, authEvent)
|
||||
}
|
||||
|
||||
func lineProcessingError(ctx context.Context, msg string, args ...any) context.Context {
|
||||
log(msg+"\n", args...)
|
||||
return context.WithValue(ctx, LINE_PROCESSING_ERROR, true)
|
||||
}
|
||||
|
||||
func exitIfLineProcessingError(c *cli.Context) {
|
||||
if val := c.Context.Value(LINE_PROCESSING_ERROR); val != nil && val.(bool) {
|
||||
func exitIfLineProcessingError(ctx context.Context) {
|
||||
if val := ctx.Value(LINE_PROCESSING_ERROR); val != nil && val.(bool) {
|
||||
os.Exit(123)
|
||||
}
|
||||
}
|
||||
|
||||
func gatherSecretKeyFromArguments(c *cli.Context) (string, error) {
|
||||
sec := c.String("sec")
|
||||
if c.Bool("prompt-sec") {
|
||||
if isPiped() {
|
||||
return "", fmt.Errorf("can't prompt for a secret key when processing data from a pipe, try again without --prompt-sec")
|
||||
}
|
||||
var err error
|
||||
sec, err = speakeasy.FAsk(os.Stderr, "type your secret key as nsec or hex: ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get secret key: %w", err)
|
||||
}
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
func randString(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand.Intn(len(letterBytes))]
|
||||
}
|
||||
if strings.HasPrefix(sec, "nsec1") {
|
||||
_, hex, err := nip19.Decode(sec)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid nsec: %w", err)
|
||||
}
|
||||
sec = hex.(string)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func unwrapAll(err error) error {
|
||||
low := err
|
||||
for n := low; n != nil; n = errors.Unwrap(low) {
|
||||
low = n
|
||||
}
|
||||
if len(sec) > 64 {
|
||||
return "", fmt.Errorf("invalid secret key: too large")
|
||||
}
|
||||
sec = strings.Repeat("0", 64-len(sec)) + sec // left-pad
|
||||
if err := validate32BytesHex(sec); err != nil {
|
||||
return "", fmt.Errorf("invalid secret key")
|
||||
return low
|
||||
}
|
||||
|
||||
func clampMessage(msg string, prefixAlreadyPrinted int) string {
|
||||
termSize, _, _ := term.GetSize(int(os.Stderr.Fd()))
|
||||
|
||||
prf := "expected handshake response status code 101 but got "
|
||||
if len(msg) > len(prf) && msg[0:len(prf)] == prf {
|
||||
msg = "status " + msg[len(prf):]
|
||||
}
|
||||
|
||||
return sec, nil
|
||||
if len(msg) > termSize-prefixAlreadyPrinted && prefixAlreadyPrinted+1 < termSize {
|
||||
msg = msg[0:termSize-prefixAlreadyPrinted-1] + "…"
|
||||
}
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
func clampError(err error, prefixAlreadyPrinted int) string {
|
||||
termSize, _, _ := term.GetSize(0)
|
||||
msg := err.Error()
|
||||
if len(msg) > termSize-prefixAlreadyPrinted {
|
||||
err = unwrapAll(err)
|
||||
msg = clampMessage(err.Error(), prefixAlreadyPrinted)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func appendUnique[A comparable](list []A, newEls ...A) []A {
|
||||
ex:
|
||||
for _, newEl := range newEls {
|
||||
for _, el := range list {
|
||||
if el == newEl {
|
||||
continue ex
|
||||
}
|
||||
}
|
||||
list = append(list, newEl)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func askConfirmation(msg string) bool {
|
||||
if isPiped() {
|
||||
tty, err := tty.Open()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer tty.Close()
|
||||
|
||||
log(color.YellowString(msg))
|
||||
answer, err := tty.ReadString()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// print newline after password input
|
||||
fmt.Fprintln(os.Stderr)
|
||||
|
||||
answer = strings.TrimSpace(string(answer))
|
||||
return answer == "y" || answer == "yes"
|
||||
} else {
|
||||
config := &readline.Config{
|
||||
Stdout: color.Error,
|
||||
Prompt: color.YellowString(msg),
|
||||
InterruptPrompt: "^C",
|
||||
DisableAutoSaveHistory: true,
|
||||
EnableMask: false,
|
||||
MaskRune: '*',
|
||||
}
|
||||
|
||||
rl, err := readline.NewEx(config)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
answer, err := rl.Readline()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
answer = strings.ToLower(strings.TrimSpace(answer))
|
||||
return answer == "y" || answer == "yes"
|
||||
}
|
||||
}
|
||||
|
||||
func parsePubKey(value string) (nostr.PubKey, error) {
|
||||
// try nip05 first
|
||||
if nip05.IsValidIdentifier(value) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
|
||||
pp, err := nip05.QueryIdentifier(ctx, value)
|
||||
cancel()
|
||||
if err == nil {
|
||||
return pp.PublicKey, nil
|
||||
}
|
||||
// if nip05 fails, fall through to try as pubkey
|
||||
}
|
||||
|
||||
pk, err := nostr.PubKeyFromHex(value)
|
||||
if err == nil {
|
||||
return pk, nil
|
||||
}
|
||||
|
||||
if prefix, decoded, err := nip19.Decode(value); err == nil {
|
||||
switch prefix {
|
||||
case "npub":
|
||||
if pk, ok := decoded.(nostr.PubKey); ok {
|
||||
return pk, nil
|
||||
}
|
||||
case "nprofile":
|
||||
if profile, ok := decoded.(nostr.ProfilePointer); ok {
|
||||
return profile.PublicKey, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nostr.PubKey{}, fmt.Errorf("invalid pubkey (\"%s\"): expected hex, npub, or nprofile", value)
|
||||
}
|
||||
|
||||
func parseEventID(value string) (nostr.ID, error) {
|
||||
id, err := nostr.IDFromHex(value)
|
||||
if err == nil {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
if prefix, decoded, err := nip19.Decode(value); err == nil {
|
||||
switch prefix {
|
||||
case "note":
|
||||
if id, ok := decoded.(nostr.ID); ok {
|
||||
return id, nil
|
||||
}
|
||||
case "nevent":
|
||||
if event, ok := decoded.(nostr.EventPointer); ok {
|
||||
return event.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nostr.ID{}, fmt.Errorf("invalid event id (\"%s\"): expected hex, note, or nevent", value)
|
||||
}
|
||||
|
||||
func decodeTagValue(value string) string {
|
||||
if strings.HasPrefix(value, "npub1") || strings.HasPrefix(value, "nevent1") || strings.HasPrefix(value, "note1") || strings.HasPrefix(value, "nprofile1") || strings.HasPrefix(value, "naddr1") {
|
||||
if ptr, err := nip19.ToPointer(value); err == nil {
|
||||
return ptr.AsTagReference()
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
var colors = struct {
|
||||
reset func(...any) (int, error)
|
||||
italic func(...any) string
|
||||
italicf func(string, ...any) string
|
||||
bold func(...any) string
|
||||
boldf func(string, ...any) string
|
||||
error func(...any) string
|
||||
errorf func(string, ...any) string
|
||||
success func(...any) string
|
||||
successf func(string, ...any) string
|
||||
}{
|
||||
color.New(color.Reset).Print,
|
||||
color.New(color.Italic).Sprint,
|
||||
color.New(color.Italic).Sprintf,
|
||||
color.New(color.Bold).Sprint,
|
||||
color.New(color.Bold).Sprintf,
|
||||
color.New(color.Bold, color.FgHiRed).Sprint,
|
||||
color.New(color.Bold, color.FgHiRed).Sprintf,
|
||||
color.New(color.Bold, color.FgHiGreen).Sprint,
|
||||
color.New(color.Bold, color.FgHiGreen).Sprintf,
|
||||
}
|
||||
|
||||
194
helpers_key.go
Normal file
194
helpers_key.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip46"
|
||||
"fiatjaf.com/nostr/nip49"
|
||||
"github.com/chzyer/readline"
|
||||
"github.com/fatih/color"
|
||||
"github.com/mattn/go-tty"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var defaultKey = nostr.KeyOne.Hex()
|
||||
|
||||
var defaultKeyFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sec",
|
||||
Usage: "secret key to sign the event, as nsec, ncryptsec or hex, or a bunker URL",
|
||||
DefaultText: "the key '01'",
|
||||
Category: CATEGORY_SIGNER,
|
||||
Sources: cli.EnvVars("NOSTR_SECRET_KEY"),
|
||||
Value: defaultKey,
|
||||
HideDefault: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "prompt-sec",
|
||||
Usage: "prompt the user to paste a hex or nsec with which to sign the event",
|
||||
Category: CATEGORY_SIGNER,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "connect-as",
|
||||
Usage: "private key to use when communicating with nip46 bunkers",
|
||||
DefaultText: "a random key",
|
||||
Category: CATEGORY_SIGNER,
|
||||
Sources: cli.EnvVars("NOSTR_CLIENT_KEY"),
|
||||
},
|
||||
}
|
||||
|
||||
func gatherKeyerFromArguments(ctx context.Context, c *cli.Command) (nostr.Keyer, nostr.SecretKey, error) {
|
||||
key, bunker, err := gatherSecretKeyOrBunkerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return nil, nostr.SecretKey{}, err
|
||||
}
|
||||
|
||||
var kr nostr.Keyer
|
||||
if bunker != nil {
|
||||
kr = keyer.NewBunkerSignerFromBunkerClient(bunker)
|
||||
} else {
|
||||
kr = keyer.NewPlainKeySigner(key)
|
||||
}
|
||||
|
||||
return kr, key, nil
|
||||
}
|
||||
|
||||
func gatherSecretKeyOrBunkerFromArguments(ctx context.Context, c *cli.Command) (nostr.SecretKey, *nip46.BunkerClient, error) {
|
||||
sec := c.String("sec")
|
||||
if strings.HasPrefix(sec, "bunker://") {
|
||||
// it's a bunker
|
||||
bunkerURL := sec
|
||||
clientKeyHex := c.String("connect-as")
|
||||
var clientKey nostr.SecretKey
|
||||
|
||||
if clientKeyHex != "" {
|
||||
var err error
|
||||
clientKey, err = nostr.SecretKeyFromHex(clientKeyHex)
|
||||
if err != nil {
|
||||
return nostr.SecretKey{}, nil, fmt.Errorf("bunker client key '%s' is invalid: %w", clientKeyHex, err)
|
||||
}
|
||||
} else {
|
||||
clientKey = nostr.Generate()
|
||||
}
|
||||
|
||||
logverbose("[nip46]: connecting to %s with client key %s\n", bunkerURL, clientKey.Hex())
|
||||
|
||||
bunker, err := nip46.ConnectBunker(ctx, clientKey, bunkerURL, nil, func(s string) {
|
||||
log(color.CyanString("[nip46]: open the following URL: %s"), s)
|
||||
})
|
||||
if err != nil {
|
||||
return nostr.SecretKey{}, nil, fmt.Errorf("failed to connect to %s: %w", bunkerURL, err)
|
||||
}
|
||||
|
||||
return nostr.SecretKey{}, bunker, err
|
||||
}
|
||||
|
||||
if c.Bool("prompt-sec") {
|
||||
var err error
|
||||
sec, err = askPassword("type your secret key as ncryptsec, nsec or hex: ", nil)
|
||||
if err != nil {
|
||||
return nostr.SecretKey{}, nil, fmt.Errorf("failed to get secret key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(sec, "ncryptsec1") {
|
||||
sk, err := promptDecrypt(sec)
|
||||
if err != nil {
|
||||
return nostr.SecretKey{}, nil, fmt.Errorf("failed to decrypt: %w", err)
|
||||
}
|
||||
return sk, nil, nil
|
||||
}
|
||||
|
||||
if prefix, ski, err := nip19.Decode(sec); err == nil && prefix == "nsec" {
|
||||
return ski.(nostr.SecretKey), nil, nil
|
||||
}
|
||||
|
||||
sk, err := nostr.SecretKeyFromHex(sec)
|
||||
if err != nil {
|
||||
return nostr.SecretKey{}, nil, fmt.Errorf("invalid secret key: %w", err)
|
||||
}
|
||||
|
||||
return sk, nil, nil
|
||||
}
|
||||
|
||||
func promptDecrypt(ncryptsec string) (nostr.SecretKey, error) {
|
||||
for i := 1; i < 4; i++ {
|
||||
var attemptStr string
|
||||
if i > 1 {
|
||||
attemptStr = fmt.Sprintf(" [%d/3]", i)
|
||||
}
|
||||
password, err := askPassword("type the password to decrypt your secret key"+attemptStr+": ", nil)
|
||||
if err != nil {
|
||||
return nostr.SecretKey{}, err
|
||||
}
|
||||
sec, err := nip49.Decrypt(ncryptsec, password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return sec, nil
|
||||
}
|
||||
return nostr.SecretKey{}, fmt.Errorf("couldn't decrypt private key")
|
||||
}
|
||||
|
||||
func askPassword(msg string, shouldAskAgain func(answer string) bool) (string, error) {
|
||||
if isPiped() {
|
||||
// use TTY method when stdin is piped
|
||||
tty, err := tty.Open()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("can't prompt for a secret key when processing data from a pipe on this system (failed to open /dev/tty: %w), try again without --prompt-sec or provide the key via --sec or NOSTR_SECRET_KEY environment variable", err)
|
||||
}
|
||||
defer tty.Close()
|
||||
for {
|
||||
// print the prompt to stderr so it's visible to the user
|
||||
log(color.YellowString(msg))
|
||||
|
||||
// read password from TTY with masking
|
||||
password, err := tty.ReadPassword()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// print newline after password input
|
||||
fmt.Fprintln(os.Stderr)
|
||||
|
||||
answer := strings.TrimSpace(string(password))
|
||||
if shouldAskAgain != nil && shouldAskAgain(answer) {
|
||||
continue
|
||||
}
|
||||
return answer, nil
|
||||
}
|
||||
} else {
|
||||
// use normal readline method when stdin is not piped
|
||||
config := &readline.Config{
|
||||
Stdout: os.Stderr,
|
||||
Prompt: color.YellowString(msg),
|
||||
InterruptPrompt: "^C",
|
||||
DisableAutoSaveHistory: true,
|
||||
EnableMask: true,
|
||||
MaskRune: '*',
|
||||
}
|
||||
|
||||
rl, err := readline.NewEx(config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
answer, err := rl.Readline()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
answer = strings.TrimSpace(answer)
|
||||
if shouldAskAgain != nil && shouldAskAgain(answer) {
|
||||
continue
|
||||
}
|
||||
return answer, err
|
||||
}
|
||||
}
|
||||
}
|
||||
5
justfile
Normal file
5
justfile
Normal file
@@ -0,0 +1,5 @@
|
||||
test:
|
||||
#!/usr/bin/env fish
|
||||
for test in (go test -list .)
|
||||
go test -run=$test -v
|
||||
end
|
||||
295
key.go
Normal file
295
key.go
Normal file
@@ -0,0 +1,295 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip49"
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcec/v2/schnorr/musig2"
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var key = &cli.Command{
|
||||
Name: "key",
|
||||
Usage: "operations on secret keys: generate, derive, encrypt, decrypt",
|
||||
Description: ``,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Commands: []*cli.Command{
|
||||
generate,
|
||||
public,
|
||||
encryptKey,
|
||||
decryptKey,
|
||||
combine,
|
||||
},
|
||||
}
|
||||
|
||||
var generate = &cli.Command{
|
||||
Name: "generate",
|
||||
Usage: "generates a secret key",
|
||||
Description: ``,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
sec := nostr.Generate()
|
||||
stdout(sec.Hex())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var public = &cli.Command{
|
||||
Name: "public",
|
||||
Usage: "computes a public key from a secret key",
|
||||
Description: ``,
|
||||
ArgsUsage: "[secret]",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "with-parity",
|
||||
Usage: "output 33 bytes instead of 32, the first one being either '02' or '03', a prefix indicating whether this pubkey is even or odd.",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for sk := range getSecretKeysFromStdinLinesOrSlice(ctx, c, c.Args().Slice()) {
|
||||
_, pk := btcec.PrivKeyFromBytes(sk[:])
|
||||
|
||||
if c.Bool("with-parity") {
|
||||
stdout(hex.EncodeToString(pk.SerializeCompressed()))
|
||||
} else {
|
||||
stdout(hex.EncodeToString(pk.SerializeCompressed()[1:]))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var encryptKey = &cli.Command{
|
||||
Name: "encrypt",
|
||||
Usage: "encrypts a secret key and prints an ncryptsec code",
|
||||
Description: `uses the nip49 standard.`,
|
||||
ArgsUsage: "<secret> <password>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "logn",
|
||||
Usage: "the bigger the number the harder it will be to bruteforce the password",
|
||||
Value: 16,
|
||||
DefaultText: "16",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
keys := make([]string, 0, 1)
|
||||
var password string
|
||||
switch c.Args().Len() {
|
||||
case 1:
|
||||
password = c.Args().Get(0)
|
||||
case 2:
|
||||
keys = append(keys, c.Args().Get(0))
|
||||
password = c.Args().Get(1)
|
||||
}
|
||||
if password == "" {
|
||||
return fmt.Errorf("no password given")
|
||||
}
|
||||
for sec := range getSecretKeysFromStdinLinesOrSlice(ctx, c, keys) {
|
||||
ncryptsec, err := nip49.Encrypt(sec, password, uint8(c.Int("logn")), 0x02)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "failed to encrypt: %s", err)
|
||||
continue
|
||||
}
|
||||
stdout(ncryptsec)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var decryptKey = &cli.Command{
|
||||
Name: "decrypt",
|
||||
Usage: "takes an ncrypsec and a password and decrypts it into an nsec",
|
||||
Description: `uses the nip49 standard.`,
|
||||
ArgsUsage: "<ncryptsec-code> <password>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
var ncryptsec string
|
||||
var password string
|
||||
switch c.Args().Len() {
|
||||
case 2:
|
||||
ncryptsec = c.Args().Get(0)
|
||||
password = c.Args().Get(1)
|
||||
if password == "" {
|
||||
return fmt.Errorf("no password given")
|
||||
}
|
||||
sk, err := nip49.Decrypt(ncryptsec, password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decrypt: %s", err)
|
||||
}
|
||||
stdout(sk.Hex())
|
||||
return nil
|
||||
case 1:
|
||||
if arg := c.Args().Get(0); strings.HasPrefix(arg, "ncryptsec1") {
|
||||
ncryptsec = arg
|
||||
if sk, err := promptDecrypt(ncryptsec); err != nil {
|
||||
return err
|
||||
} else {
|
||||
stdout(sk.Hex())
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
password = c.Args().Get(0)
|
||||
for ncryptsec := range getStdinLinesOrArgumentsFromSlice([]string{ncryptsec}) {
|
||||
sk, err := nip49.Decrypt(ncryptsec, password)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "failed to decrypt: %s", err)
|
||||
continue
|
||||
}
|
||||
stdout(sk.Hex())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid number of arguments")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var combine = &cli.Command{
|
||||
Name: "combine",
|
||||
Usage: "combines two or more pubkeys using musig2",
|
||||
Description: `The public keys must have 33 bytes (66 characters hex), with the 02 or 03 prefix. It is common in Nostr to drop that first byte, so you'll have to derive the public keys again from the private keys in order to get it back.
|
||||
|
||||
However, if the intent is to check if two existing Nostr pubkeys match a given combined pubkey, then it might be sufficient to calculate the combined key for all the possible combinations of pubkeys in the input.`,
|
||||
ArgsUsage: "[pubkey...]",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
type Combination struct {
|
||||
Variants []string `json:"input_variants"`
|
||||
Output struct {
|
||||
XOnly string `json:"x_only"`
|
||||
Variant string `json:"variant"`
|
||||
} `json:"combined_key"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Keys []string `json:"keys"`
|
||||
Combinations []Combination `json:"combinations"`
|
||||
}
|
||||
|
||||
result := Result{}
|
||||
|
||||
result.Keys = c.Args().Slice()
|
||||
keyGroups := make([][]*btcec.PublicKey, 0, len(result.Keys))
|
||||
|
||||
for i, keyhex := range result.Keys {
|
||||
keyb, err := hex.DecodeString(keyhex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing key %s: %w", keyhex, err)
|
||||
}
|
||||
|
||||
if len(keyb) == 32 /* we'll use both the 02 and the 03 prefix versions */ {
|
||||
group := make([]*btcec.PublicKey, 2)
|
||||
for i, prefix := range []byte{0x02, 0x03} {
|
||||
pubk, err := btcec.ParsePubKey(append([]byte{prefix}, keyb...))
|
||||
if err != nil {
|
||||
log("error parsing key %s: %s", keyhex, err)
|
||||
continue
|
||||
}
|
||||
group[i] = pubk
|
||||
}
|
||||
keyGroups = append(keyGroups, group)
|
||||
} else /* assume it's 33 */ {
|
||||
pubk, err := btcec.ParsePubKey(keyb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing key %s: %w", keyhex, err)
|
||||
}
|
||||
keyGroups = append(keyGroups, []*btcec.PublicKey{pubk})
|
||||
|
||||
// remove the leading byte from the output just so it is all uniform
|
||||
result.Keys[i] = result.Keys[i][2:]
|
||||
}
|
||||
}
|
||||
|
||||
result.Combinations = make([]Combination, 0, 16)
|
||||
|
||||
var fn func(prepend int, curr []int)
|
||||
fn = func(prepend int, curr []int) {
|
||||
curr = append([]int{prepend}, curr...)
|
||||
if len(curr) == len(keyGroups) {
|
||||
combi := Combination{
|
||||
Variants: make([]string, len(keyGroups)),
|
||||
}
|
||||
|
||||
combining := make([]*btcec.PublicKey, len(keyGroups))
|
||||
for g, altKeys := range keyGroups {
|
||||
altKey := altKeys[curr[g]]
|
||||
variant := secp256k1.PubKeyFormatCompressedEven
|
||||
if altKey.Y().Bit(0) == 1 {
|
||||
variant = secp256k1.PubKeyFormatCompressedOdd
|
||||
}
|
||||
combi.Variants[g] = hex.EncodeToString([]byte{variant})
|
||||
combining[g] = altKey
|
||||
}
|
||||
|
||||
agg, _, _, err := musig2.AggregateKeys(combining, true)
|
||||
if err != nil {
|
||||
log("error aggregating: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
serialized := agg.FinalKey.SerializeCompressed()
|
||||
combi.Output.XOnly = hex.EncodeToString(serialized[1:])
|
||||
combi.Output.Variant = hex.EncodeToString(serialized[0:1])
|
||||
result.Combinations = append(result.Combinations, combi)
|
||||
return
|
||||
}
|
||||
|
||||
fn(0, curr)
|
||||
if len(keyGroups[len(keyGroups)-len(curr)-1]) > 1 {
|
||||
fn(1, curr)
|
||||
}
|
||||
}
|
||||
|
||||
fn(0, nil)
|
||||
if len(keyGroups[len(keyGroups)-1]) > 1 {
|
||||
fn(1, nil)
|
||||
}
|
||||
|
||||
res, _ := json.MarshalIndent(result, "", " ")
|
||||
stdout(string(res))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func getSecretKeysFromStdinLinesOrSlice(ctx context.Context, _ *cli.Command, keys []string) chan nostr.SecretKey {
|
||||
ch := make(chan nostr.SecretKey)
|
||||
go func() {
|
||||
for sec := range getStdinLinesOrArgumentsFromSlice(keys) {
|
||||
if sec == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var sk nostr.SecretKey
|
||||
if strings.HasPrefix(sec, "nsec1") {
|
||||
_, data, err := nip19.Decode(sec)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid nsec code: %s", err)
|
||||
continue
|
||||
}
|
||||
sk = data.(nostr.SecretKey)
|
||||
}
|
||||
|
||||
sk, err := nostr.SecretKeyFromHex(sec)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid hex key: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- sk
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
129
main.go
129
main.go
@@ -1,44 +1,143 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var app = &cli.App{
|
||||
Name: "nak",
|
||||
Usage: "the nostr army knife command-line tool",
|
||||
var (
|
||||
version string = "debug"
|
||||
isVerbose bool = false
|
||||
)
|
||||
|
||||
var app = &cli.Command{
|
||||
Name: "nak",
|
||||
Suggest: true,
|
||||
UseShortOptionHandling: true,
|
||||
Usage: "the nostr army knife command-line tool",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Commands: []*cli.Command{
|
||||
req,
|
||||
count,
|
||||
fetch,
|
||||
event,
|
||||
req,
|
||||
filter,
|
||||
fetch,
|
||||
count,
|
||||
decode,
|
||||
encode,
|
||||
key,
|
||||
verify,
|
||||
relay,
|
||||
nsecbunker,
|
||||
admin,
|
||||
bunker,
|
||||
serve,
|
||||
blossomCmd,
|
||||
encrypt,
|
||||
decrypt,
|
||||
outbox,
|
||||
wallet,
|
||||
mcpServer,
|
||||
curl,
|
||||
fsCmd,
|
||||
publish,
|
||||
git,
|
||||
},
|
||||
Version: version,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config-path",
|
||||
Hidden: true,
|
||||
Value: (func() string {
|
||||
if home, err := os.UserHomeDir(); err == nil {
|
||||
return filepath.Join(home, ".config/nak")
|
||||
} else {
|
||||
return filepath.Join("/dev/null")
|
||||
}
|
||||
})(),
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "silent",
|
||||
Usage: "do not print logs and info messages to stderr",
|
||||
Aliases: []string{"s"},
|
||||
Action: func(ctx *cli.Context, b bool) error {
|
||||
if b {
|
||||
Name: "quiet",
|
||||
Usage: "do not print logs and info messages to stderr, use -qq to also not print anything to stdout",
|
||||
Aliases: []string{"q"},
|
||||
Action: func(ctx context.Context, c *cli.Command, b bool) error {
|
||||
q := c.Count("quiet")
|
||||
if q >= 1 {
|
||||
log = func(msg string, args ...any) {}
|
||||
if q >= 2 {
|
||||
stdout = func(_ ...any) {}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "verbose",
|
||||
Usage: "print more stuff than normally",
|
||||
Aliases: []string{"v"},
|
||||
Action: func(ctx context.Context, c *cli.Command, b bool) error {
|
||||
v := c.Count("verbose")
|
||||
if v >= 1 {
|
||||
logverbose = log
|
||||
isVerbose = true
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
Before: func(ctx context.Context, c *cli.Command) (context.Context, error) {
|
||||
sys = sdk.NewSystem()
|
||||
|
||||
if err := initializeOutboxHintsDB(c, sys); err != nil {
|
||||
return ctx, fmt.Errorf("failed to initialize outbox hints: %w", err)
|
||||
}
|
||||
|
||||
sys.Pool = nostr.NewPool(nostr.PoolOptions{
|
||||
AuthorKindQueryMiddleware: sys.TrackQueryAttempts,
|
||||
EventMiddleware: sys.TrackEventHints,
|
||||
RelayOptions: nostr.RelayOptions{
|
||||
RequestHeader: http.Header{textproto.CanonicalMIMEHeaderKey("user-agent"): {"nak/b"}},
|
||||
},
|
||||
})
|
||||
|
||||
return ctx, nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
cli.VersionFlag = &cli.BoolFlag{
|
||||
Name: "version",
|
||||
Usage: "prints the version",
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fmt.Println(err)
|
||||
defer colors.reset()
|
||||
|
||||
// a megahack to enable this curl command proxy
|
||||
if len(os.Args) > 2 && os.Args[1] == "curl" {
|
||||
if err := realCurl(); err != nil {
|
||||
if err != nil {
|
||||
log(color.YellowString(err.Error()) + "\n")
|
||||
}
|
||||
colors.reset()
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := app.Run(context.Background(), os.Args); err != nil {
|
||||
if err != nil {
|
||||
log("%s\n", color.RedString(err.Error()))
|
||||
}
|
||||
colors.reset()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
268
mcp.go
Normal file
268
mcp.go
Normal file
@@ -0,0 +1,268 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/mark3labs/mcp-go/server"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var mcpServer = &cli.Command{
|
||||
Name: "mcp",
|
||||
Usage: "pander to the AI gods",
|
||||
Description: ``,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(
|
||||
defaultKeyFlags,
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
s := server.NewMCPServer(
|
||||
"nak",
|
||||
version,
|
||||
)
|
||||
|
||||
keyer, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.AddTool(mcp.NewTool("publish_note",
|
||||
mcp.WithDescription("Publish a short note event to Nostr with the given text content"),
|
||||
mcp.WithString("content", mcp.Description("Arbitrary string to be published"), mcp.Required()),
|
||||
mcp.WithString("relay", mcp.Description("Relay to publish the note to")),
|
||||
mcp.WithString("mention", mcp.Description("Nostr user's public key to be mentioned")),
|
||||
), func(ctx context.Context, r mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
content := required[string](r, "content")
|
||||
mention, _ := optional[string](r, "mention")
|
||||
relay, _ := optional[string](r, "relay")
|
||||
|
||||
var relays []string
|
||||
|
||||
evt := nostr.Event{
|
||||
Kind: 1,
|
||||
Tags: nostr.Tags{{"client", "goose/nak"}},
|
||||
Content: content,
|
||||
CreatedAt: nostr.Now(),
|
||||
}
|
||||
|
||||
if mention != "" {
|
||||
pk, err := nostr.PubKeyFromHex(mention)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError("the given mention isn't a valid public key, it must be 32 bytes hex, like the ones returned by search_profile. Got error: " + err.Error()), nil
|
||||
}
|
||||
|
||||
evt.Tags = append(evt.Tags, nostr.Tag{"p", pk.Hex()})
|
||||
// their inbox relays
|
||||
relays = sys.FetchInboxRelays(ctx, pk, 3)
|
||||
}
|
||||
|
||||
if err := keyer.SignEvent(ctx, &evt); err != nil {
|
||||
return mcp.NewToolResultError("it was impossible to sign the event, so we can't proceed to publishwith publishing it."), nil
|
||||
}
|
||||
|
||||
// our write relays
|
||||
relays = append(relays, sys.FetchOutboxRelays(ctx, evt.PubKey, 3)...)
|
||||
|
||||
if len(relays) == 0 {
|
||||
relays = []string{"nos.lol", "relay.damus.io"}
|
||||
}
|
||||
|
||||
// extra relay specified
|
||||
if relay != "" {
|
||||
relays = append(relays, relay)
|
||||
}
|
||||
|
||||
result := strings.Builder{}
|
||||
result.WriteString(
|
||||
fmt.Sprintf("the event we generated has id '%s', kind '%d' and is signed by pubkey '%s'. ",
|
||||
evt.ID,
|
||||
evt.Kind,
|
||||
evt.PubKey,
|
||||
),
|
||||
)
|
||||
|
||||
for res := range sys.Pool.PublishMany(ctx, relays, evt) {
|
||||
if res.Error != nil {
|
||||
result.WriteString(
|
||||
fmt.Sprintf("there was an error publishing the event to the relay %s. ",
|
||||
res.RelayURL),
|
||||
)
|
||||
} else {
|
||||
result.WriteString(
|
||||
fmt.Sprintf("the event was successfully published to the relay %s. ",
|
||||
res.RelayURL),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return mcp.NewToolResultText(result.String()), nil
|
||||
})
|
||||
|
||||
s.AddTool(mcp.NewTool("resolve_nostr_uri",
|
||||
mcp.WithDescription("Resolve URIs prefixed with nostr:, including nostr:nevent1..., nostr:npub1..., nostr:nprofile1... and nostr:naddr1..."),
|
||||
mcp.WithString("uri", mcp.Description("URI to be resolved"), mcp.Required()),
|
||||
), func(ctx context.Context, r mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
uri := required[string](r, "uri")
|
||||
if strings.HasPrefix(uri, "nostr:") {
|
||||
uri = uri[6:]
|
||||
}
|
||||
|
||||
prefix, data, err := nip19.Decode(uri)
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError("this Nostr uri is invalid"), nil
|
||||
}
|
||||
|
||||
switch prefix {
|
||||
case "npub":
|
||||
pm := sys.FetchProfileMetadata(ctx, data.(nostr.PubKey))
|
||||
return mcp.NewToolResultText(
|
||||
fmt.Sprintf("this is a Nostr profile named '%s', their public key is '%s'",
|
||||
pm.ShortName(), pm.PubKey),
|
||||
), nil
|
||||
case "nprofile":
|
||||
pm, _ := sys.FetchProfileFromInput(ctx, uri)
|
||||
return mcp.NewToolResultText(
|
||||
fmt.Sprintf("this is a Nostr profile named '%s', their public key is '%s'",
|
||||
pm.ShortName(), pm.PubKey),
|
||||
), nil
|
||||
case "nevent":
|
||||
event, _, err := sys.FetchSpecificEventFromInput(ctx, uri, sdk.FetchSpecificEventParameters{
|
||||
WithRelays: false,
|
||||
})
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError("Couldn't find this event anywhere"), nil
|
||||
}
|
||||
|
||||
return mcp.NewToolResultText(
|
||||
fmt.Sprintf("this is a Nostr event: %s", event),
|
||||
), nil
|
||||
case "naddr":
|
||||
return mcp.NewToolResultError("For now we can't handle this kind of Nostr uri"), nil
|
||||
default:
|
||||
return mcp.NewToolResultError("We don't know how to handle this Nostr uri"), nil
|
||||
}
|
||||
})
|
||||
|
||||
s.AddTool(mcp.NewTool("search_profile",
|
||||
mcp.WithDescription("Search for the public key of a Nostr user given their name"),
|
||||
mcp.WithString("name", mcp.Description("Name to be searched"), mcp.Required()),
|
||||
mcp.WithNumber("limit", mcp.Description("How many results to return")),
|
||||
), func(ctx context.Context, r mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
name := required[string](r, "name")
|
||||
limit, _ := optional[float64](r, "limit")
|
||||
|
||||
filter := nostr.Filter{Search: name, Kinds: []nostr.Kind{0}}
|
||||
if limit > 0 {
|
||||
filter.Limit = int(limit)
|
||||
}
|
||||
|
||||
res := strings.Builder{}
|
||||
res.WriteString("Search results: ")
|
||||
l := 0
|
||||
for result := range sys.Pool.FetchMany(ctx, []string{"relay.nostr.band", "nostr.wine"}, filter, nostr.SubscriptionOptions{
|
||||
Label: "nak-mcp-search",
|
||||
}) {
|
||||
l++
|
||||
pm, _ := sdk.ParseMetadata(result.Event)
|
||||
res.WriteString(fmt.Sprintf("\n\nResult %d\nUser name: \"%s\"\nPublic key: \"%s\"\nDescription: \"%s\"\n",
|
||||
l, pm.ShortName(), pm.PubKey.Hex(), pm.About))
|
||||
|
||||
if l >= int(limit) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if l == 0 {
|
||||
return mcp.NewToolResultError("Couldn't find anyone with that name."), nil
|
||||
}
|
||||
return mcp.NewToolResultText(res.String()), nil
|
||||
})
|
||||
|
||||
s.AddTool(mcp.NewTool("get_outbox_relay_for_pubkey",
|
||||
mcp.WithDescription("Get the best relay from where to read notes from a specific Nostr user"),
|
||||
mcp.WithString("pubkey", mcp.Description("Public key of Nostr user we want to know the relay from where to read"), mcp.Required()),
|
||||
), func(ctx context.Context, r mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
pubkey, err := nostr.PubKeyFromHex(required[string](r, "pubkey"))
|
||||
if err != nil {
|
||||
return mcp.NewToolResultError("the pubkey given isn't a valid public key, it must be 32 bytes hex, like the ones returned by search_profile. Got error: " + err.Error()), nil
|
||||
}
|
||||
|
||||
res := sys.FetchOutboxRelays(ctx, pubkey, 1)
|
||||
return mcp.NewToolResultText(res[0]), nil
|
||||
})
|
||||
|
||||
s.AddTool(mcp.NewTool("read_events_from_relay",
|
||||
mcp.WithDescription("Makes a REQ query to one relay using the specified parameters, this can be used to fetch notes from a profile"),
|
||||
mcp.WithString("relay", mcp.Description("relay URL to send the query to"), mcp.Required()),
|
||||
mcp.WithNumber("kind", mcp.Description("event kind number to include in the 'kinds' field"), mcp.Required()),
|
||||
mcp.WithNumber("limit", mcp.Description("maximum number of events to query"), mcp.Required()),
|
||||
mcp.WithString("pubkey", mcp.Description("pubkey to include in the 'authors' field, if this is not given we will read any events from this relay")),
|
||||
), func(ctx context.Context, r mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
relay := required[string](r, "relay")
|
||||
kind := int(required[float64](r, "kind"))
|
||||
limit := int(required[float64](r, "limit"))
|
||||
pubkey, hasPubKey := optional[string](r, "pubkey")
|
||||
|
||||
filter := nostr.Filter{
|
||||
Limit: limit,
|
||||
Kinds: []nostr.Kind{nostr.Kind(kind)},
|
||||
}
|
||||
|
||||
if hasPubKey {
|
||||
if pk, err := nostr.PubKeyFromHex(pubkey); err != nil {
|
||||
return mcp.NewToolResultError("the pubkey given isn't a valid public key, it must be 32 bytes hex, like the ones returned by search_profile. Got error: " + err.Error()), nil
|
||||
} else {
|
||||
filter.Authors = append(filter.Authors, pk)
|
||||
}
|
||||
}
|
||||
|
||||
events := sys.Pool.FetchMany(ctx, []string{relay}, filter, nostr.SubscriptionOptions{
|
||||
Label: "nak-mcp-profile-events",
|
||||
})
|
||||
|
||||
result := strings.Builder{}
|
||||
for ie := range events {
|
||||
result.WriteString("author public key: ")
|
||||
result.WriteString(ie.PubKey.Hex())
|
||||
result.WriteString("content: '")
|
||||
result.WriteString(ie.Content)
|
||||
result.WriteString("'")
|
||||
result.WriteString("\n---\n")
|
||||
}
|
||||
|
||||
return mcp.NewToolResultText(result.String()), nil
|
||||
})
|
||||
|
||||
return server.ServeStdio(s)
|
||||
},
|
||||
}
|
||||
|
||||
func required[T comparable](r mcp.CallToolRequest, p string) T {
|
||||
var zero T
|
||||
if _, ok := r.Params.Arguments[p]; !ok {
|
||||
return zero
|
||||
}
|
||||
if _, ok := r.Params.Arguments[p].(T); !ok {
|
||||
return zero
|
||||
}
|
||||
if r.Params.Arguments[p].(T) == zero {
|
||||
return zero
|
||||
}
|
||||
return r.Params.Arguments[p].(T)
|
||||
}
|
||||
|
||||
func optional[T any](r mcp.CallToolRequest, p string) (T, bool) {
|
||||
var zero T
|
||||
if _, ok := r.Params.Arguments[p]; !ok {
|
||||
return zero, false
|
||||
}
|
||||
if _, ok := r.Params.Arguments[p].(T); !ok {
|
||||
return zero, false
|
||||
}
|
||||
return r.Params.Arguments[p].(T), true
|
||||
}
|
||||
347
musig2.go
Normal file
347
musig2.go
Normal file
@@ -0,0 +1,347 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcec/v2/schnorr/musig2"
|
||||
)
|
||||
|
||||
func getMusigAggregatedKey(_ context.Context, keys []string) (nostr.PubKey, error) {
|
||||
knownSigners := make([]*btcec.PublicKey, len(keys))
|
||||
for i, spk := range keys {
|
||||
bpk, err := hex.DecodeString(spk)
|
||||
if err != nil {
|
||||
return nostr.ZeroPK, fmt.Errorf("'%s' is invalid hex: %w", spk, err)
|
||||
}
|
||||
if len(bpk) == 32 {
|
||||
return nostr.ZeroPK, fmt.Errorf("'%s' is missing the leading parity byte", spk)
|
||||
}
|
||||
pk, err := btcec.ParsePubKey(bpk)
|
||||
if err != nil {
|
||||
return nostr.ZeroPK, fmt.Errorf("'%s' is not a valid pubkey: %w", spk, err)
|
||||
}
|
||||
knownSigners[i] = pk
|
||||
}
|
||||
|
||||
aggpk, _, _, err := musig2.AggregateKeys(knownSigners, true)
|
||||
if err != nil {
|
||||
return nostr.ZeroPK, fmt.Errorf("aggregation failed: %w", err)
|
||||
}
|
||||
|
||||
return nostr.PubKey(aggpk.FinalKey.SerializeCompressed()[1:]), nil
|
||||
}
|
||||
|
||||
func performMusig(
|
||||
_ context.Context,
|
||||
sec nostr.SecretKey,
|
||||
evt *nostr.Event,
|
||||
numSigners int,
|
||||
keys []string,
|
||||
nonces []string,
|
||||
secNonce string,
|
||||
partialSigs []string,
|
||||
) (signed bool, err error) {
|
||||
// preprocess data received
|
||||
seck, pubk := btcec.PrivKeyFromBytes(sec[:])
|
||||
|
||||
knownSigners := make([]*btcec.PublicKey, 0, numSigners)
|
||||
includesUs := false
|
||||
for _, hexpub := range keys {
|
||||
bpub, err := hex.DecodeString(hexpub)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
spub, err := btcec.ParsePubKey(bpub)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
knownSigners = append(knownSigners, spub)
|
||||
|
||||
if spub.IsEqual(pubk) {
|
||||
includesUs = true
|
||||
}
|
||||
}
|
||||
if !includesUs {
|
||||
knownSigners = append(knownSigners, pubk)
|
||||
}
|
||||
|
||||
knownNonces := make([][66]byte, 0, numSigners)
|
||||
for _, hexnonce := range nonces {
|
||||
bnonce, err := hex.DecodeString(hexnonce)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(bnonce) != 66 {
|
||||
return false, fmt.Errorf("nonce is not 66 bytes: %s", hexnonce)
|
||||
}
|
||||
var b66nonce [66]byte
|
||||
copy(b66nonce[:], bnonce)
|
||||
knownNonces = append(knownNonces, b66nonce)
|
||||
}
|
||||
|
||||
knownPartialSigs := make([]*musig2.PartialSignature, 0, numSigners)
|
||||
for _, hexps := range partialSigs {
|
||||
bps, err := hex.DecodeString(hexps)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var ps musig2.PartialSignature
|
||||
if err := ps.Decode(bytes.NewBuffer(bps)); err != nil {
|
||||
return false, fmt.Errorf("invalid partial signature %s: %w", hexps, err)
|
||||
}
|
||||
knownPartialSigs = append(knownPartialSigs, &ps)
|
||||
}
|
||||
|
||||
// create the context
|
||||
var mctx *musig2.Context
|
||||
if len(knownSigners) < numSigners {
|
||||
// we don't know all the signers yet
|
||||
mctx, err = musig2.NewContext(seck, true,
|
||||
musig2.WithNumSigners(numSigners),
|
||||
musig2.WithEarlyNonceGen(),
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create signing context with %d unknown signers: %w",
|
||||
numSigners, err)
|
||||
}
|
||||
} else {
|
||||
// we know all the signers
|
||||
mctx, err = musig2.NewContext(seck, true,
|
||||
musig2.WithKnownSigners(knownSigners),
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create signing context with %d known signers: %w",
|
||||
len(knownSigners), err)
|
||||
}
|
||||
}
|
||||
|
||||
// nonce generation phase -- for sharing
|
||||
if len(knownSigners) < numSigners {
|
||||
// if we don't have all the signers we just generate a nonce and yield it to the next people
|
||||
nonce, err := mctx.EarlySessionNonce()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
log("the following code should be saved secretly until the next step an included with --musig-nonce-secret:\n")
|
||||
log("%s\n\n", base64.StdEncoding.EncodeToString(nonce.SecNonce[:]))
|
||||
|
||||
knownNonces = append(knownNonces, nonce.PubNonce)
|
||||
printPublicCommandForNextPeer(evt, numSigners, knownSigners, knownNonces, nil, false)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if we got here we have all the pubkeys, so we can print the combined key
|
||||
if comb, err := mctx.CombinedKey(); err != nil {
|
||||
return false, fmt.Errorf("failed to combine keys (after %d signers): %w", len(knownSigners), err)
|
||||
} else {
|
||||
evt.PubKey = nostr.PubKey(comb.SerializeCompressed()[1:])
|
||||
evt.ID = evt.GetID()
|
||||
log("combined key: %x\n\n", comb.SerializeCompressed())
|
||||
}
|
||||
|
||||
// we have all the signers, which means we must also have all the nonces
|
||||
var session *musig2.Session
|
||||
if len(keys) == numSigners-1 {
|
||||
// if we were the last to include our key, that means we have to include our nonce here to
|
||||
// i.e. we didn't input our own pub nonce in the parameters
|
||||
session, err = mctx.NewSession()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create session as the last peer to include our key: %w", err)
|
||||
}
|
||||
knownNonces = append(knownNonces, session.PublicNonce())
|
||||
} else {
|
||||
// otherwise we have included our own nonce in the parameters (from copypasting) but must
|
||||
// also include the secret nonce that wasn't shared with peers
|
||||
if secNonce == "" {
|
||||
return false, fmt.Errorf("missing --musig-nonce-secret value")
|
||||
}
|
||||
secNonceB, err := base64.StdEncoding.DecodeString(secNonce)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid --musig-nonce-secret: %w", err)
|
||||
}
|
||||
var secNonce97 [97]byte
|
||||
copy(secNonce97[:], secNonceB)
|
||||
session, err = mctx.NewSession(musig2.WithPreGeneratedNonce(&musig2.Nonces{
|
||||
SecNonce: secNonce97,
|
||||
PubNonce: secNonceToPubNonce(secNonce97),
|
||||
}))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create signing session with secret nonce: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var noncesOk bool
|
||||
for _, b66nonce := range knownNonces {
|
||||
if b66nonce == session.PublicNonce() {
|
||||
// don't add our own nonce
|
||||
continue
|
||||
}
|
||||
|
||||
noncesOk, err = session.RegisterPubNonce(b66nonce)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to register nonce: %w", err)
|
||||
}
|
||||
}
|
||||
if !noncesOk {
|
||||
return false, fmt.Errorf("we've registered all the nonces we had but at least one is missing, this shouldn't happen")
|
||||
}
|
||||
|
||||
// signing phase
|
||||
// we always have to sign, so let's do this
|
||||
partialSig, err := session.Sign(evt.GetID()) // this will already include our sig in the bundle
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to produce partial signature: %w", err)
|
||||
}
|
||||
|
||||
if len(knownPartialSigs)+1 < len(knownSigners) {
|
||||
// still missing some signatures
|
||||
knownPartialSigs = append(knownPartialSigs, partialSig) // we include ours here just so it's printed
|
||||
printPublicCommandForNextPeer(evt, numSigners, knownSigners, knownNonces, knownPartialSigs, true)
|
||||
return false, nil
|
||||
} else {
|
||||
// we have all signatures
|
||||
for _, ps := range knownPartialSigs {
|
||||
_, err = session.CombineSig(ps)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to combine partial signature: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we have the signature
|
||||
evt.Sig = [64]byte(session.FinalSig().Serialize())
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func printPublicCommandForNextPeer(
|
||||
evt *nostr.Event,
|
||||
numSigners int,
|
||||
knownSigners []*btcec.PublicKey,
|
||||
knownNonces [][66]byte,
|
||||
knownPartialSigs []*musig2.PartialSignature,
|
||||
includeNonceSecret bool,
|
||||
) {
|
||||
maybeNonceSecret := ""
|
||||
if includeNonceSecret {
|
||||
maybeNonceSecret = " --musig-nonce-secret '<insert-nonce-secret>'"
|
||||
}
|
||||
|
||||
log("the next signer and they should call this on their side:\nnak event --sec <insert-secret-key> --musig %d %s%s%s%s%s\n",
|
||||
numSigners,
|
||||
eventToCliArgs(evt),
|
||||
signersToCliArgs(knownSigners),
|
||||
noncesToCliArgs(knownNonces),
|
||||
partialSigsToCliArgs(knownPartialSigs),
|
||||
maybeNonceSecret,
|
||||
)
|
||||
}
|
||||
|
||||
func eventToCliArgs(evt *nostr.Event) string {
|
||||
b := strings.Builder{}
|
||||
b.Grow(100)
|
||||
|
||||
b.WriteString("-k ")
|
||||
b.WriteString(strconv.Itoa(int(evt.Kind)))
|
||||
|
||||
b.WriteString(" -ts ")
|
||||
b.WriteString(strconv.FormatInt(int64(evt.CreatedAt), 10))
|
||||
|
||||
b.WriteString(" -c '")
|
||||
b.WriteString(evt.Content)
|
||||
b.WriteString("'")
|
||||
|
||||
for _, tag := range evt.Tags {
|
||||
b.WriteString(" -t '")
|
||||
b.WriteString(tag[0])
|
||||
if len(tag) > 1 {
|
||||
b.WriteString("=")
|
||||
b.WriteString(tag[1])
|
||||
if len(tag) > 2 {
|
||||
for _, item := range tag[2:] {
|
||||
b.WriteString(";")
|
||||
b.WriteString(item)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.WriteString("'")
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func signersToCliArgs(knownSigners []*btcec.PublicKey) string {
|
||||
b := strings.Builder{}
|
||||
b.Grow(len(knownSigners) * (16 + 66))
|
||||
|
||||
for _, signerPub := range knownSigners {
|
||||
b.WriteString(" --musig-pubkey ")
|
||||
b.WriteString(hex.EncodeToString(signerPub.SerializeCompressed()))
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func noncesToCliArgs(knownNonces [][66]byte) string {
|
||||
b := strings.Builder{}
|
||||
b.Grow(len(knownNonces) * (15 + 132))
|
||||
|
||||
for _, nonce := range knownNonces {
|
||||
b.WriteString(" --musig-nonce ")
|
||||
b.WriteString(hex.EncodeToString(nonce[:]))
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func partialSigsToCliArgs(knownPartialSigs []*musig2.PartialSignature) string {
|
||||
b := strings.Builder{}
|
||||
b.Grow(len(knownPartialSigs) * (17 + 64))
|
||||
|
||||
for _, partialSig := range knownPartialSigs {
|
||||
b.WriteString(" --musig-partial ")
|
||||
w := &bytes.Buffer{}
|
||||
partialSig.Encode(w)
|
||||
b.Write([]byte(hex.EncodeToString(w.Bytes())))
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// this function is copied from btcec because it's not exported for some reason
|
||||
func secNonceToPubNonce(secNonce [musig2.SecNonceSize]byte) [musig2.PubNonceSize]byte {
|
||||
var k1Mod, k2Mod btcec.ModNScalar
|
||||
k1Mod.SetByteSlice(secNonce[:btcec.PrivKeyBytesLen])
|
||||
k2Mod.SetByteSlice(secNonce[btcec.PrivKeyBytesLen:])
|
||||
|
||||
var r1, r2 btcec.JacobianPoint
|
||||
btcec.ScalarBaseMultNonConst(&k1Mod, &r1)
|
||||
btcec.ScalarBaseMultNonConst(&k2Mod, &r2)
|
||||
|
||||
// Next, we'll convert the key in jacobian format to a normal public
|
||||
// key expressed in affine coordinates.
|
||||
r1.ToAffine()
|
||||
r2.ToAffine()
|
||||
r1Pub := btcec.NewPublicKey(&r1.X, &r1.Y)
|
||||
r2Pub := btcec.NewPublicKey(&r2.X, &r2.Y)
|
||||
|
||||
var pubNonce [musig2.PubNonceSize]byte
|
||||
|
||||
// The public nonces are serialized as: R1 || R2, where both keys are
|
||||
// serialized in compressed format.
|
||||
copy(pubNonce[:], r1Pub.SerializeCompressed())
|
||||
copy(
|
||||
pubNonce[btcec.PubKeyBytesLenCompressed:],
|
||||
r2Pub.SerializeCompressed(),
|
||||
)
|
||||
|
||||
return pubNonce
|
||||
}
|
||||
56
nostrfs/asyncfile.go
Normal file
56
nostrfs/asyncfile.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
type AsyncFile struct {
|
||||
fs.Inode
|
||||
ctx context.Context
|
||||
fetched atomic.Bool
|
||||
data []byte
|
||||
ts nostr.Timestamp
|
||||
load func() ([]byte, nostr.Timestamp)
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpener)((*AsyncFile)(nil))
|
||||
_ = (fs.NodeGetattrer)((*AsyncFile)(nil))
|
||||
)
|
||||
|
||||
func (af *AsyncFile) Getattr(ctx context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
if af.fetched.CompareAndSwap(false, true) {
|
||||
af.data, af.ts = af.load()
|
||||
}
|
||||
|
||||
out.Size = uint64(len(af.data))
|
||||
out.Mtime = uint64(af.ts)
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (af *AsyncFile) Open(ctx context.Context, flags uint32) (fs.FileHandle, uint32, syscall.Errno) {
|
||||
if af.fetched.CompareAndSwap(false, true) {
|
||||
af.data, af.ts = af.load()
|
||||
}
|
||||
|
||||
return nil, fuse.FOPEN_KEEP_CACHE, 0
|
||||
}
|
||||
|
||||
func (af *AsyncFile) Read(
|
||||
ctx context.Context,
|
||||
f fs.FileHandle,
|
||||
dest []byte,
|
||||
off int64,
|
||||
) (fuse.ReadResult, syscall.Errno) {
|
||||
end := int(off) + len(dest)
|
||||
if end > len(af.data) {
|
||||
end = len(af.data)
|
||||
}
|
||||
return fuse.ReadResultData(af.data[off:end]), 0
|
||||
}
|
||||
50
nostrfs/deterministicfile.go
Normal file
50
nostrfs/deterministicfile.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type DeterministicFile struct {
|
||||
fs.Inode
|
||||
get func() (ctime, mtime uint64, data string)
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpener)((*DeterministicFile)(nil))
|
||||
_ = (fs.NodeReader)((*DeterministicFile)(nil))
|
||||
_ = (fs.NodeGetattrer)((*DeterministicFile)(nil))
|
||||
)
|
||||
|
||||
func (r *NostrRoot) NewDeterministicFile(get func() (ctime, mtime uint64, data string)) *DeterministicFile {
|
||||
return &DeterministicFile{
|
||||
get: get,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DeterministicFile) Open(ctx context.Context, flags uint32) (fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
return nil, fuse.FOPEN_KEEP_CACHE, fs.OK
|
||||
}
|
||||
|
||||
func (f *DeterministicFile) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
var content string
|
||||
out.Mode = 0444
|
||||
out.Ctime, out.Mtime, content = f.get()
|
||||
out.Size = uint64(len(content))
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *DeterministicFile) Read(ctx context.Context, fh fs.FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
|
||||
_, _, content := f.get()
|
||||
data := unsafe.Slice(unsafe.StringData(content), len(content))
|
||||
|
||||
end := int(off) + len(dest)
|
||||
if end > len(data) {
|
||||
end = len(data)
|
||||
}
|
||||
return fuse.ReadResultData(data[off:end]), fs.OK
|
||||
}
|
||||
408
nostrfs/entitydir.go
Normal file
408
nostrfs/entitydir.go
Normal file
@@ -0,0 +1,408 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"fiatjaf.com/lib/debouncer"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip27"
|
||||
"fiatjaf.com/nostr/nip73"
|
||||
"fiatjaf.com/nostr/nip92"
|
||||
sdk "fiatjaf.com/nostr/sdk"
|
||||
"github.com/fatih/color"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type EntityDir struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
|
||||
publisher *debouncer.Debouncer
|
||||
event *nostr.Event
|
||||
updating struct {
|
||||
title string
|
||||
content string
|
||||
publishedAt uint64
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOnAdder)((*EntityDir)(nil))
|
||||
_ = (fs.NodeGetattrer)((*EntityDir)(nil))
|
||||
_ = (fs.NodeSetattrer)((*EntityDir)(nil))
|
||||
_ = (fs.NodeCreater)((*EntityDir)(nil))
|
||||
_ = (fs.NodeUnlinker)((*EntityDir)(nil))
|
||||
)
|
||||
|
||||
func (e *EntityDir) Getattr(_ context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
out.Ctime = uint64(e.event.CreatedAt)
|
||||
if e.updating.publishedAt != 0 {
|
||||
out.Mtime = e.updating.publishedAt
|
||||
} else {
|
||||
out.Mtime = e.PublishedAt()
|
||||
}
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (e *EntityDir) Create(
|
||||
_ context.Context,
|
||||
name string,
|
||||
flags uint32,
|
||||
mode uint32,
|
||||
out *fuse.EntryOut,
|
||||
) (node *fs.Inode, fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
if name == "publish" && e.publisher.IsRunning() {
|
||||
// this causes the publish process to be triggered faster
|
||||
log := e.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
log("publishing now!\n")
|
||||
e.publisher.Flush()
|
||||
return nil, nil, 0, syscall.ENOTDIR
|
||||
}
|
||||
|
||||
return nil, nil, 0, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func (e *EntityDir) Unlink(ctx context.Context, name string) syscall.Errno {
|
||||
switch name {
|
||||
case "content" + kindToExtension(e.event.Kind):
|
||||
e.updating.content = e.event.Content
|
||||
return syscall.ENOTDIR
|
||||
case "title":
|
||||
e.updating.title = e.Title()
|
||||
return syscall.ENOTDIR
|
||||
default:
|
||||
return syscall.EINTR
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EntityDir) Setattr(_ context.Context, _ fs.FileHandle, in *fuse.SetAttrIn, _ *fuse.AttrOut) syscall.Errno {
|
||||
e.updating.publishedAt = in.Mtime
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (e *EntityDir) OnAdd(_ context.Context) {
|
||||
log := e.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
e.AddChild("@author", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(e.root.wd + "/" + nip19.EncodeNpub(e.event.PubKey)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
|
||||
e.AddChild("event.json", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&DeterministicFile{
|
||||
get: func() (ctime uint64, mtime uint64, data string) {
|
||||
eventj, _ := json.MarshalIndent(e.event, "", " ")
|
||||
return uint64(e.event.CreatedAt),
|
||||
uint64(e.event.CreatedAt),
|
||||
unsafe.String(unsafe.SliceData(eventj), len(eventj))
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
e.AddChild("identifier", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(e.event.Tags.GetD()),
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(e.event.CreatedAt),
|
||||
Mtime: uint64(e.event.CreatedAt),
|
||||
Size: uint64(len(e.event.Tags.GetD())),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
if e.root.signer == nil || e.root.rootPubKey != e.event.PubKey {
|
||||
// read-only
|
||||
e.AddChild("title", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&DeterministicFile{
|
||||
get: func() (ctime uint64, mtime uint64, data string) {
|
||||
return uint64(e.event.CreatedAt), e.PublishedAt(), e.Title()
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
e.AddChild("content."+kindToExtension(e.event.Kind), e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&DeterministicFile{
|
||||
get: func() (ctime uint64, mtime uint64, data string) {
|
||||
return uint64(e.event.CreatedAt), e.PublishedAt(), e.event.Content
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
} else {
|
||||
// writeable
|
||||
e.updating.title = e.Title()
|
||||
e.updating.publishedAt = e.PublishedAt()
|
||||
e.updating.content = e.event.Content
|
||||
|
||||
e.AddChild("title", e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
e.root.NewWriteableFile(e.updating.title, uint64(e.event.CreatedAt), e.updating.publishedAt, func(s string) {
|
||||
log("title updated")
|
||||
e.updating.title = strings.TrimSpace(s)
|
||||
e.handleWrite()
|
||||
}),
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
e.AddChild("content."+kindToExtension(e.event.Kind), e.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
e.root.NewWriteableFile(e.updating.content, uint64(e.event.CreatedAt), e.updating.publishedAt, func(s string) {
|
||||
log("content updated")
|
||||
e.updating.content = strings.TrimSpace(s)
|
||||
e.handleWrite()
|
||||
}),
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
|
||||
var refsdir *fs.Inode
|
||||
i := 0
|
||||
for ref := range nip27.Parse(e.event.Content) {
|
||||
if _, isExternal := ref.Pointer.(nip73.ExternalPointer); isExternal {
|
||||
continue
|
||||
}
|
||||
i++
|
||||
|
||||
if refsdir == nil {
|
||||
refsdir = e.NewPersistentInode(e.root.ctx, &fs.Inode{}, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
e.root.AddChild("references", refsdir, true)
|
||||
}
|
||||
refsdir.AddChild(fmt.Sprintf("ref_%02d", i), refsdir.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(e.root.wd + "/" + nip19.EncodePointer(ref.Pointer)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
|
||||
var imagesdir *fs.Inode
|
||||
addImage := func(url string) {
|
||||
if imagesdir == nil {
|
||||
in := &fs.Inode{}
|
||||
imagesdir = e.NewPersistentInode(e.root.ctx, in, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
e.AddChild("images", imagesdir, true)
|
||||
}
|
||||
imagesdir.AddChild(filepath.Base(url), imagesdir.NewPersistentInode(
|
||||
e.root.ctx,
|
||||
&AsyncFile{
|
||||
ctx: e.root.ctx,
|
||||
load: func() ([]byte, nostr.Timestamp) {
|
||||
ctx, cancel := context.WithTimeout(e.root.ctx, time.Second*20)
|
||||
defer cancel()
|
||||
r, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
log("failed to load image %s: %s\n", url, err)
|
||||
return nil, 0
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
log("failed to load image %s: %s\n", url, err)
|
||||
return nil, 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode >= 300 {
|
||||
log("failed to load image %s: %s\n", url, err)
|
||||
return nil, 0
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
io.Copy(w, resp.Body)
|
||||
return w.Bytes(), 0
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
|
||||
images := nip92.ParseTags(e.event.Tags)
|
||||
for _, imeta := range images {
|
||||
if imeta.URL == "" {
|
||||
continue
|
||||
}
|
||||
addImage(imeta.URL)
|
||||
}
|
||||
|
||||
if tag := e.event.Tags.Find("image"); tag != nil {
|
||||
addImage(tag[1])
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EntityDir) IsNew() bool {
|
||||
return e.event.CreatedAt == 0
|
||||
}
|
||||
|
||||
func (e *EntityDir) PublishedAt() uint64 {
|
||||
if tag := e.event.Tags.Find("published_at"); tag != nil {
|
||||
publishedAt, _ := strconv.ParseUint(tag[1], 10, 64)
|
||||
return publishedAt
|
||||
}
|
||||
return uint64(e.event.CreatedAt)
|
||||
}
|
||||
|
||||
func (e *EntityDir) Title() string {
|
||||
if tag := e.event.Tags.Find("title"); tag != nil {
|
||||
return tag[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (e *EntityDir) handleWrite() {
|
||||
log := e.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
logverbose := e.root.ctx.Value("logverbose").(func(msg string, args ...any))
|
||||
|
||||
if e.root.opts.AutoPublishArticlesTimeout.Hours() < 24*365 {
|
||||
if e.publisher.IsRunning() {
|
||||
log(", timer reset")
|
||||
}
|
||||
log(", publishing the ")
|
||||
if e.IsNew() {
|
||||
log("new")
|
||||
} else {
|
||||
log("updated")
|
||||
}
|
||||
log(" event in %d seconds...\n", int(e.root.opts.AutoPublishArticlesTimeout.Seconds()))
|
||||
} else {
|
||||
log(".\n")
|
||||
}
|
||||
if !e.publisher.IsRunning() {
|
||||
log("- `touch publish` to publish immediately\n")
|
||||
log("- `rm title content." + kindToExtension(e.event.Kind) + "` to erase and cancel the edits\n")
|
||||
}
|
||||
|
||||
e.publisher.Call(func() {
|
||||
if e.Title() == e.updating.title && e.event.Content == e.updating.content {
|
||||
log("not modified, publish canceled.\n")
|
||||
return
|
||||
}
|
||||
|
||||
evt := nostr.Event{
|
||||
Kind: e.event.Kind,
|
||||
Content: e.updating.content,
|
||||
Tags: make(nostr.Tags, len(e.event.Tags)),
|
||||
CreatedAt: nostr.Now(),
|
||||
}
|
||||
copy(evt.Tags, e.event.Tags) // copy tags because that's the rule
|
||||
if e.updating.title != "" {
|
||||
if titleTag := evt.Tags.Find("title"); titleTag != nil {
|
||||
titleTag[1] = e.updating.title
|
||||
} else {
|
||||
evt.Tags = append(evt.Tags, nostr.Tag{"title", e.updating.title})
|
||||
}
|
||||
}
|
||||
|
||||
// "published_at" tag
|
||||
publishedAtStr := strconv.FormatUint(e.updating.publishedAt, 10)
|
||||
if publishedAtStr != "0" {
|
||||
if publishedAtTag := evt.Tags.Find("published_at"); publishedAtTag != nil {
|
||||
publishedAtTag[1] = publishedAtStr
|
||||
} else {
|
||||
evt.Tags = append(evt.Tags, nostr.Tag{"published_at", publishedAtStr})
|
||||
}
|
||||
}
|
||||
|
||||
// add "p" tags from people mentioned and "q" tags from events mentioned
|
||||
for ref := range nip27.Parse(evt.Content) {
|
||||
if _, isExternal := ref.Pointer.(nip73.ExternalPointer); isExternal {
|
||||
continue
|
||||
}
|
||||
|
||||
tag := ref.Pointer.AsTag()
|
||||
key := tag[0]
|
||||
val := tag[1]
|
||||
if key == "e" || key == "a" {
|
||||
key = "q"
|
||||
}
|
||||
if existing := evt.Tags.FindWithValue(key, val); existing == nil {
|
||||
evt.Tags = append(evt.Tags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
// sign and publish
|
||||
if err := e.root.signer.SignEvent(e.root.ctx, &evt); err != nil {
|
||||
log("failed to sign: '%s'.\n", err)
|
||||
return
|
||||
}
|
||||
logverbose("%s\n", evt)
|
||||
|
||||
relays := e.root.sys.FetchWriteRelays(e.root.ctx, e.root.rootPubKey)
|
||||
if len(relays) == 0 {
|
||||
relays = e.root.sys.FetchOutboxRelays(e.root.ctx, e.root.rootPubKey, 6)
|
||||
}
|
||||
|
||||
log("publishing to %d relays... ", len(relays))
|
||||
success := false
|
||||
first := true
|
||||
for res := range e.root.sys.Pool.PublishMany(e.root.ctx, relays, evt) {
|
||||
cleanUrl, _ := strings.CutPrefix(res.RelayURL, "wss://")
|
||||
if !first {
|
||||
log(", ")
|
||||
}
|
||||
first = false
|
||||
|
||||
if res.Error != nil {
|
||||
log("%s: %s", color.RedString(cleanUrl), res.Error)
|
||||
} else {
|
||||
success = true
|
||||
log("%s: ok", color.GreenString(cleanUrl))
|
||||
}
|
||||
}
|
||||
log("\n")
|
||||
|
||||
if success {
|
||||
e.event = &evt
|
||||
log("event updated locally.\n")
|
||||
e.updating.publishedAt = uint64(evt.CreatedAt) // set this so subsequent edits get the correct value
|
||||
} else {
|
||||
log("failed.\n")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (r *NostrRoot) FetchAndCreateEntityDir(
|
||||
parent fs.InodeEmbedder,
|
||||
extension string,
|
||||
pointer nostr.EntityPointer,
|
||||
) (*fs.Inode, error) {
|
||||
event, _, err := r.sys.FetchSpecificEvent(r.ctx, pointer, sdk.FetchSpecificEventParameters{
|
||||
WithRelays: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch: %w", err)
|
||||
}
|
||||
|
||||
return r.CreateEntityDir(parent, event), nil
|
||||
}
|
||||
|
||||
func (r *NostrRoot) CreateEntityDir(
|
||||
parent fs.InodeEmbedder,
|
||||
event *nostr.Event,
|
||||
) *fs.Inode {
|
||||
return parent.EmbeddedInode().NewPersistentInode(
|
||||
r.ctx,
|
||||
&EntityDir{root: r, event: event, publisher: debouncer.New(r.opts.AutoPublishArticlesTimeout)},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
)
|
||||
}
|
||||
241
nostrfs/eventdir.go
Normal file
241
nostrfs/eventdir.go
Normal file
@@ -0,0 +1,241 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip10"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/nip22"
|
||||
"fiatjaf.com/nostr/nip27"
|
||||
"fiatjaf.com/nostr/nip73"
|
||||
"fiatjaf.com/nostr/nip92"
|
||||
sdk "fiatjaf.com/nostr/sdk"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type EventDir struct {
|
||||
fs.Inode
|
||||
ctx context.Context
|
||||
wd string
|
||||
evt *nostr.Event
|
||||
}
|
||||
|
||||
var _ = (fs.NodeGetattrer)((*EventDir)(nil))
|
||||
|
||||
func (e *EventDir) Getattr(_ context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
out.Mtime = uint64(e.evt.CreatedAt)
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (r *NostrRoot) FetchAndCreateEventDir(
|
||||
parent fs.InodeEmbedder,
|
||||
pointer nostr.EventPointer,
|
||||
) (*fs.Inode, error) {
|
||||
event, _, err := r.sys.FetchSpecificEvent(r.ctx, pointer, sdk.FetchSpecificEventParameters{
|
||||
WithRelays: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch: %w", err)
|
||||
}
|
||||
|
||||
return r.CreateEventDir(parent, event), nil
|
||||
}
|
||||
|
||||
func (r *NostrRoot) CreateEventDir(
|
||||
parent fs.InodeEmbedder,
|
||||
event *nostr.Event,
|
||||
) *fs.Inode {
|
||||
h := parent.EmbeddedInode().NewPersistentInode(
|
||||
r.ctx,
|
||||
&EventDir{ctx: r.ctx, wd: r.wd, evt: event},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR, Ino: binary.BigEndian.Uint64(event.ID[8:16])},
|
||||
)
|
||||
|
||||
h.AddChild("@author", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nip19.EncodeNpub(event.PubKey)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
|
||||
eventj, _ := json.MarshalIndent(event, "", " ")
|
||||
h.AddChild("event.json", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: eventj,
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(event.CreatedAt),
|
||||
Mtime: uint64(event.CreatedAt),
|
||||
Size: uint64(len(event.Content)),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
h.AddChild("id", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(event.ID.Hex()),
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(event.CreatedAt),
|
||||
Mtime: uint64(event.CreatedAt),
|
||||
Size: uint64(64),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
h.AddChild("content.txt", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(event.Content),
|
||||
Attr: fuse.Attr{
|
||||
Mode: 0444,
|
||||
Ctime: uint64(event.CreatedAt),
|
||||
Mtime: uint64(event.CreatedAt),
|
||||
Size: uint64(len(event.Content)),
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
var refsdir *fs.Inode
|
||||
i := 0
|
||||
for ref := range nip27.Parse(event.Content) {
|
||||
if _, isExternal := ref.Pointer.(nip73.ExternalPointer); isExternal {
|
||||
continue
|
||||
}
|
||||
i++
|
||||
|
||||
if refsdir == nil {
|
||||
refsdir = h.NewPersistentInode(r.ctx, &fs.Inode{}, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
h.AddChild("references", refsdir, true)
|
||||
}
|
||||
refsdir.AddChild(fmt.Sprintf("ref_%02d", i), refsdir.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nip19.EncodePointer(ref.Pointer)),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
|
||||
var imagesdir *fs.Inode
|
||||
images := nip92.ParseTags(event.Tags)
|
||||
for _, imeta := range images {
|
||||
if imeta.URL == "" {
|
||||
continue
|
||||
}
|
||||
if imagesdir == nil {
|
||||
in := &fs.Inode{}
|
||||
imagesdir = h.NewPersistentInode(r.ctx, in, fs.StableAttr{Mode: syscall.S_IFDIR})
|
||||
h.AddChild("images", imagesdir, true)
|
||||
}
|
||||
imagesdir.AddChild(filepath.Base(imeta.URL), imagesdir.NewPersistentInode(
|
||||
r.ctx,
|
||||
&AsyncFile{
|
||||
ctx: r.ctx,
|
||||
load: func() ([]byte, nostr.Timestamp) {
|
||||
ctx, cancel := context.WithTimeout(r.ctx, time.Second*20)
|
||||
defer cancel()
|
||||
r, err := http.NewRequestWithContext(ctx, "GET", imeta.URL, nil)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode >= 300 {
|
||||
return nil, 0
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
io.Copy(w, resp.Body)
|
||||
return w.Bytes(), 0
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
|
||||
if event.Kind == 1 {
|
||||
if pointer := nip10.GetThreadRoot(event.Tags); pointer != nil {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@root", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
if pointer := nip10.GetImmediateParent(event.Tags); pointer != nil {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
} else if event.Kind == 1111 {
|
||||
if pointer := nip22.GetThreadRoot(event.Tags); pointer != nil {
|
||||
if xp, ok := pointer.(nip73.ExternalPointer); ok {
|
||||
h.AddChild("@root", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(`<!doctype html><meta http-equiv="refresh" content="0; url=` + xp.Thing + `" />`),
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
} else {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
}
|
||||
if pointer := nip22.GetImmediateParent(event.Tags); pointer != nil {
|
||||
if xp, ok := pointer.(nip73.ExternalPointer); ok {
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: []byte(`<!doctype html><meta http-equiv="refresh" content="0; url=` + xp.Thing + `" />`),
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
} else {
|
||||
nevent := nip19.EncodePointer(pointer)
|
||||
h.AddChild("@parent", h.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{
|
||||
Data: []byte(r.wd + "/" + nevent),
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
16
nostrfs/helpers.go
Normal file
16
nostrfs/helpers.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func kindToExtension(kind nostr.Kind) string {
|
||||
switch kind {
|
||||
case 30023:
|
||||
return "md"
|
||||
case 30818:
|
||||
return "adoc"
|
||||
default:
|
||||
return "txt"
|
||||
}
|
||||
}
|
||||
261
nostrfs/npubdir.go
Normal file
261
nostrfs/npubdir.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"github.com/fatih/color"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/liamg/magic"
|
||||
)
|
||||
|
||||
type NpubDir struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
pointer nostr.ProfilePointer
|
||||
fetched atomic.Bool
|
||||
}
|
||||
|
||||
var _ = (fs.NodeOnAdder)((*NpubDir)(nil))
|
||||
|
||||
func (r *NostrRoot) CreateNpubDir(
|
||||
parent fs.InodeEmbedder,
|
||||
pointer nostr.ProfilePointer,
|
||||
signer nostr.Signer,
|
||||
) *fs.Inode {
|
||||
npubdir := &NpubDir{root: r, pointer: pointer}
|
||||
return parent.EmbeddedInode().NewPersistentInode(
|
||||
r.ctx,
|
||||
npubdir,
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR, Ino: binary.BigEndian.Uint64(pointer.PublicKey[8:16])},
|
||||
)
|
||||
}
|
||||
|
||||
func (h *NpubDir) OnAdd(_ context.Context) {
|
||||
log := h.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
relays := h.root.sys.FetchOutboxRelays(h.root.ctx, h.pointer.PublicKey, 2)
|
||||
log("- adding folder for %s with relays %s\n",
|
||||
color.HiYellowString(nip19.EncodePointer(h.pointer)), color.HiGreenString("%v", relays))
|
||||
|
||||
h.AddChild("pubkey", h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&fs.MemRegularFile{Data: []byte(h.pointer.PublicKey.Hex() + "\n"), Attr: fuse.Attr{Mode: 0444}},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
|
||||
go func() {
|
||||
pm := h.root.sys.FetchProfileMetadata(h.root.ctx, h.pointer.PublicKey)
|
||||
if pm.Event == nil {
|
||||
return
|
||||
}
|
||||
|
||||
metadataj, _ := json.MarshalIndent(pm, "", " ")
|
||||
h.AddChild(
|
||||
"metadata.json",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: metadataj,
|
||||
Attr: fuse.Attr{
|
||||
Mtime: uint64(pm.Event.CreatedAt),
|
||||
Mode: 0444,
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
),
|
||||
true,
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(h.root.ctx, time.Second*20)
|
||||
defer cancel()
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", pm.Picture, nil)
|
||||
if err == nil {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 300 {
|
||||
b := &bytes.Buffer{}
|
||||
io.Copy(b, resp.Body)
|
||||
|
||||
ext := "png"
|
||||
if ft, err := magic.Lookup(b.Bytes()); err == nil {
|
||||
ext = ft.Extension
|
||||
}
|
||||
|
||||
h.AddChild("picture."+ext, h.NewPersistentInode(
|
||||
ctx,
|
||||
&fs.MemRegularFile{
|
||||
Data: b.Bytes(),
|
||||
Attr: fuse.Attr{
|
||||
Mtime: uint64(pm.Event.CreatedAt),
|
||||
Mode: 0444,
|
||||
},
|
||||
},
|
||||
fs.StableAttr{},
|
||||
), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if h.GetChild("notes") == nil {
|
||||
h.AddChild(
|
||||
"notes",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{1},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: true,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
createable: true,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("comments") == nil {
|
||||
h.AddChild(
|
||||
"comments",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{1111},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: true,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("photos") == nil {
|
||||
h.AddChild(
|
||||
"photos",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{20},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: true,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("videos") == nil {
|
||||
h.AddChild(
|
||||
"videos",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{21, 22},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("highlights") == nil {
|
||||
h.AddChild(
|
||||
"highlights",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{9802},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: false,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("articles") == nil {
|
||||
h.AddChild(
|
||||
"articles",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{30023},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: true,
|
||||
createable: true,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
if h.GetChild("wiki") == nil {
|
||||
h.AddChild(
|
||||
"wiki",
|
||||
h.NewPersistentInode(
|
||||
h.root.ctx,
|
||||
&ViewDir{
|
||||
root: h.root,
|
||||
filter: nostr.Filter{
|
||||
Kinds: []nostr.Kind{30818},
|
||||
Authors: []nostr.PubKey{h.pointer.PublicKey},
|
||||
},
|
||||
paginate: false,
|
||||
relays: relays,
|
||||
replaceable: true,
|
||||
createable: true,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
),
|
||||
true,
|
||||
)
|
||||
}
|
||||
}
|
||||
130
nostrfs/root.go
Normal file
130
nostrfs/root.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip05"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
AutoPublishNotesTimeout time.Duration
|
||||
AutoPublishArticlesTimeout time.Duration
|
||||
}
|
||||
|
||||
type NostrRoot struct {
|
||||
fs.Inode
|
||||
|
||||
ctx context.Context
|
||||
wd string
|
||||
sys *sdk.System
|
||||
rootPubKey nostr.PubKey
|
||||
signer nostr.Signer
|
||||
|
||||
opts Options
|
||||
}
|
||||
|
||||
var _ = (fs.NodeOnAdder)((*NostrRoot)(nil))
|
||||
|
||||
func NewNostrRoot(ctx context.Context, sys *sdk.System, user nostr.User, mountpoint string, o Options) *NostrRoot {
|
||||
pubkey, _ := user.GetPublicKey(ctx)
|
||||
abs, _ := filepath.Abs(mountpoint)
|
||||
|
||||
var signer nostr.Signer
|
||||
if user != nil {
|
||||
signer, _ = user.(nostr.Signer)
|
||||
}
|
||||
|
||||
return &NostrRoot{
|
||||
ctx: ctx,
|
||||
sys: sys,
|
||||
rootPubKey: pubkey,
|
||||
signer: signer,
|
||||
wd: abs,
|
||||
|
||||
opts: o,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *NostrRoot) OnAdd(_ context.Context) {
|
||||
if r.rootPubKey == nostr.ZeroPK {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
|
||||
// add our contacts
|
||||
fl := r.sys.FetchFollowList(r.ctx, r.rootPubKey)
|
||||
for _, f := range fl.Items {
|
||||
pointer := nostr.ProfilePointer{PublicKey: f.Pubkey, Relays: []string{f.Relay}}
|
||||
r.AddChild(
|
||||
nip19.EncodeNpub(f.Pubkey),
|
||||
r.CreateNpubDir(r, pointer, nil),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
// add ourselves
|
||||
npub := nip19.EncodeNpub(r.rootPubKey)
|
||||
if r.GetChild(npub) == nil {
|
||||
pointer := nostr.ProfilePointer{PublicKey: r.rootPubKey}
|
||||
|
||||
r.AddChild(
|
||||
npub,
|
||||
r.CreateNpubDir(r, pointer, r.signer),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
// add a link to ourselves
|
||||
r.AddChild("@me", r.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{Data: []byte(r.wd + "/" + npub)},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), true)
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *NostrRoot) Lookup(_ context.Context, name string, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
|
||||
out.SetEntryTimeout(time.Minute * 5)
|
||||
|
||||
child := r.GetChild(name)
|
||||
if child != nil {
|
||||
return child, fs.OK
|
||||
}
|
||||
|
||||
if pp, err := nip05.QueryIdentifier(r.ctx, name); err == nil {
|
||||
return r.NewPersistentInode(
|
||||
r.ctx,
|
||||
&fs.MemSymlink{Data: []byte(r.wd + "/" + nip19.EncodePointer(*pp))},
|
||||
fs.StableAttr{Mode: syscall.S_IFLNK},
|
||||
), fs.OK
|
||||
}
|
||||
|
||||
pointer, err := nip19.ToPointer(name)
|
||||
if err != nil {
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
|
||||
switch p := pointer.(type) {
|
||||
case nostr.ProfilePointer:
|
||||
npubdir := r.CreateNpubDir(r, p, nil)
|
||||
return npubdir, fs.OK
|
||||
case nostr.EventPointer:
|
||||
eventdir, err := r.FetchAndCreateEventDir(r, p)
|
||||
if err != nil {
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
return eventdir, fs.OK
|
||||
default:
|
||||
return nil, syscall.ENOENT
|
||||
}
|
||||
}
|
||||
267
nostrfs/viewdir.go
Normal file
267
nostrfs/viewdir.go
Normal file
@@ -0,0 +1,267 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"fiatjaf.com/lib/debouncer"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/fatih/color"
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type ViewDir struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
fetched atomic.Bool
|
||||
filter nostr.Filter
|
||||
paginate bool
|
||||
relays []string
|
||||
replaceable bool
|
||||
createable bool
|
||||
publisher *debouncer.Debouncer
|
||||
publishing struct {
|
||||
note string
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpendirer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeGetattrer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeMkdirer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeSetattrer)((*ViewDir)(nil))
|
||||
_ = (fs.NodeCreater)((*ViewDir)(nil))
|
||||
_ = (fs.NodeUnlinker)((*ViewDir)(nil))
|
||||
)
|
||||
|
||||
func (f *ViewDir) Setattr(_ context.Context, _ fs.FileHandle, _ *fuse.SetAttrIn, _ *fuse.AttrOut) syscall.Errno {
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (n *ViewDir) Create(
|
||||
_ context.Context,
|
||||
name string,
|
||||
flags uint32,
|
||||
mode uint32,
|
||||
out *fuse.EntryOut,
|
||||
) (node *fs.Inode, fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
if !n.createable || n.root.rootPubKey != n.filter.Authors[0] {
|
||||
return nil, nil, 0, syscall.EPERM
|
||||
}
|
||||
if n.publisher == nil {
|
||||
n.publisher = debouncer.New(n.root.opts.AutoPublishNotesTimeout)
|
||||
}
|
||||
if n.filter.Kinds[0] != 1 {
|
||||
return nil, nil, 0, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "new":
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
if n.publisher.IsRunning() {
|
||||
log("pending note updated, timer reset.")
|
||||
} else {
|
||||
log("new note detected")
|
||||
if n.root.opts.AutoPublishNotesTimeout.Hours() < 24*365 {
|
||||
log(", publishing it in %d seconds...\n", int(n.root.opts.AutoPublishNotesTimeout.Seconds()))
|
||||
} else {
|
||||
log(".\n")
|
||||
}
|
||||
log("- `touch publish` to publish immediately\n")
|
||||
log("- `rm new` to erase and cancel the publication.\n")
|
||||
}
|
||||
|
||||
n.publisher.Call(n.publishNote)
|
||||
|
||||
first := true
|
||||
|
||||
return n.NewPersistentInode(
|
||||
n.root.ctx,
|
||||
n.root.NewWriteableFile(n.publishing.note, uint64(nostr.Now()), uint64(nostr.Now()), func(s string) {
|
||||
if !first {
|
||||
log("pending note updated, timer reset.\n")
|
||||
}
|
||||
first = false
|
||||
n.publishing.note = strings.TrimSpace(s)
|
||||
n.publisher.Call(n.publishNote)
|
||||
}),
|
||||
fs.StableAttr{},
|
||||
), nil, 0, fs.OK
|
||||
case "publish":
|
||||
if n.publisher.IsRunning() {
|
||||
// this causes the publish process to be triggered faster
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
log("publishing now!\n")
|
||||
n.publisher.Flush()
|
||||
return nil, nil, 0, syscall.ENOTDIR
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, 0, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func (n *ViewDir) Unlink(ctx context.Context, name string) syscall.Errno {
|
||||
if !n.createable || n.root.rootPubKey != n.filter.Authors[0] {
|
||||
return syscall.EPERM
|
||||
}
|
||||
if n.publisher == nil {
|
||||
n.publisher = debouncer.New(n.root.opts.AutoPublishNotesTimeout)
|
||||
}
|
||||
if n.filter.Kinds[0] != 1 {
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "new":
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
log("publishing canceled.\n")
|
||||
n.publisher.Stop()
|
||||
n.publishing.note = ""
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
return syscall.ENOTSUP
|
||||
}
|
||||
|
||||
func (n *ViewDir) publishNote() {
|
||||
log := n.root.ctx.Value("log").(func(msg string, args ...any))
|
||||
|
||||
log("publishing note...\n")
|
||||
evt := nostr.Event{
|
||||
Kind: 1,
|
||||
CreatedAt: nostr.Now(),
|
||||
Content: n.publishing.note,
|
||||
Tags: make(nostr.Tags, 0, 2),
|
||||
}
|
||||
|
||||
// our write relays
|
||||
relays := n.root.sys.FetchWriteRelays(n.root.ctx, n.root.rootPubKey)
|
||||
if len(relays) == 0 {
|
||||
relays = n.root.sys.FetchOutboxRelays(n.root.ctx, n.root.rootPubKey, 6)
|
||||
}
|
||||
|
||||
// massage and extract tags from raw text
|
||||
targetRelays := n.root.sys.PrepareNoteEvent(n.root.ctx, &evt)
|
||||
relays = nostr.AppendUnique(relays, targetRelays...)
|
||||
|
||||
// sign and publish
|
||||
if err := n.root.signer.SignEvent(n.root.ctx, &evt); err != nil {
|
||||
log("failed to sign: %s\n", err)
|
||||
return
|
||||
}
|
||||
log(evt.String() + "\n")
|
||||
|
||||
log("publishing to %d relays... ", len(relays))
|
||||
success := false
|
||||
first := true
|
||||
for res := range n.root.sys.Pool.PublishMany(n.root.ctx, relays, evt) {
|
||||
cleanUrl, _ := strings.CutPrefix(res.RelayURL, "wss://")
|
||||
if !first {
|
||||
log(", ")
|
||||
}
|
||||
first = false
|
||||
|
||||
if res.Error != nil {
|
||||
log("%s: %s", color.RedString(cleanUrl), res.Error)
|
||||
} else {
|
||||
success = true
|
||||
log("%s: ok", color.GreenString(cleanUrl))
|
||||
}
|
||||
}
|
||||
log("\n")
|
||||
|
||||
if success {
|
||||
n.RmChild("new")
|
||||
n.AddChild(evt.ID.Hex(), n.root.CreateEventDir(n, &evt), true)
|
||||
log("event published as %s and updated locally.\n", color.BlueString(evt.ID.Hex()))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ViewDir) Getattr(_ context.Context, f fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
now := nostr.Now()
|
||||
if n.filter.Until != 0 {
|
||||
now = n.filter.Until
|
||||
}
|
||||
aMonthAgo := now - 30*24*60*60
|
||||
out.Mtime = uint64(aMonthAgo)
|
||||
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (n *ViewDir) Opendir(ctx context.Context) syscall.Errno {
|
||||
if n.fetched.CompareAndSwap(true, true) {
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
if n.paginate {
|
||||
now := nostr.Now()
|
||||
if n.filter.Until != 0 {
|
||||
now = n.filter.Until
|
||||
}
|
||||
aMonthAgo := now - 30*24*60*60
|
||||
n.filter.Since = aMonthAgo
|
||||
|
||||
filter := n.filter
|
||||
filter.Until = aMonthAgo
|
||||
|
||||
n.AddChild("@previous", n.NewPersistentInode(
|
||||
n.root.ctx,
|
||||
&ViewDir{
|
||||
root: n.root,
|
||||
filter: filter,
|
||||
relays: n.relays,
|
||||
replaceable: n.replaceable,
|
||||
},
|
||||
fs.StableAttr{Mode: syscall.S_IFDIR},
|
||||
), true)
|
||||
}
|
||||
|
||||
if n.replaceable {
|
||||
for rkey, evt := range n.root.sys.Pool.FetchManyReplaceable(n.root.ctx, n.relays, n.filter, nostr.SubscriptionOptions{
|
||||
Label: "nakfs",
|
||||
}).Range {
|
||||
name := rkey.D
|
||||
if name == "" {
|
||||
name = "_"
|
||||
}
|
||||
if n.GetChild(name) == nil {
|
||||
n.AddChild(name, n.root.CreateEntityDir(n, &evt), true)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for ie := range n.root.sys.Pool.FetchMany(n.root.ctx, n.relays, n.filter,
|
||||
nostr.SubscriptionOptions{
|
||||
Label: "nakfs",
|
||||
}) {
|
||||
if n.GetChild(ie.Event.ID.Hex()) == nil {
|
||||
n.AddChild(ie.Event.ID.Hex(), n.root.CreateEventDir(n, &ie.Event), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (n *ViewDir) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
|
||||
if !n.createable || n.root.signer == nil || n.root.rootPubKey != n.filter.Authors[0] {
|
||||
return nil, syscall.ENOTSUP
|
||||
}
|
||||
|
||||
if n.replaceable {
|
||||
// create a template event that can later be modified and published as new
|
||||
return n.root.CreateEntityDir(n, &nostr.Event{
|
||||
PubKey: n.root.rootPubKey,
|
||||
CreatedAt: 0,
|
||||
Kind: n.filter.Kinds[0],
|
||||
Tags: nostr.Tags{
|
||||
nostr.Tag{"d", name},
|
||||
},
|
||||
}), fs.OK
|
||||
}
|
||||
|
||||
return nil, syscall.ENOTSUP
|
||||
}
|
||||
93
nostrfs/writeablefile.go
Normal file
93
nostrfs/writeablefile.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package nostrfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
type WriteableFile struct {
|
||||
fs.Inode
|
||||
root *NostrRoot
|
||||
mu sync.Mutex
|
||||
data []byte
|
||||
attr fuse.Attr
|
||||
onWrite func(string)
|
||||
}
|
||||
|
||||
var (
|
||||
_ = (fs.NodeOpener)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeReader)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeWriter)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeGetattrer)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeSetattrer)((*WriteableFile)(nil))
|
||||
_ = (fs.NodeFlusher)((*WriteableFile)(nil))
|
||||
)
|
||||
|
||||
func (r *NostrRoot) NewWriteableFile(data string, ctime, mtime uint64, onWrite func(string)) *WriteableFile {
|
||||
return &WriteableFile{
|
||||
root: r,
|
||||
data: []byte(data),
|
||||
attr: fuse.Attr{
|
||||
Mode: 0666,
|
||||
Ctime: ctime,
|
||||
Mtime: mtime,
|
||||
Size: uint64(len(data)),
|
||||
},
|
||||
onWrite: onWrite,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Open(ctx context.Context, flags uint32) (fh fs.FileHandle, fuseFlags uint32, errno syscall.Errno) {
|
||||
return nil, fuse.FOPEN_KEEP_CACHE, fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Write(ctx context.Context, fh fs.FileHandle, data []byte, off int64) (uint32, syscall.Errno) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
offset := int(off)
|
||||
end := offset + len(data)
|
||||
if len(f.data) < end {
|
||||
newData := make([]byte, offset+len(data))
|
||||
copy(newData, f.data)
|
||||
f.data = newData
|
||||
}
|
||||
copy(f.data[offset:], data)
|
||||
f.data = f.data[0:end]
|
||||
|
||||
f.onWrite(string(f.data))
|
||||
return uint32(len(data)), fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Getattr(ctx context.Context, fh fs.FileHandle, out *fuse.AttrOut) syscall.Errno {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
out.Attr = f.attr
|
||||
out.Attr.Size = uint64(len(f.data))
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Setattr(_ context.Context, _ fs.FileHandle, in *fuse.SetAttrIn, _ *fuse.AttrOut) syscall.Errno {
|
||||
f.attr.Mtime = in.Mtime
|
||||
f.attr.Atime = in.Atime
|
||||
f.attr.Ctime = in.Ctime
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Flush(ctx context.Context, fh fs.FileHandle) syscall.Errno {
|
||||
return fs.OK
|
||||
}
|
||||
|
||||
func (f *WriteableFile) Read(ctx context.Context, fh fs.FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
end := int(off) + len(dest)
|
||||
if end > len(f.data) {
|
||||
end = len(f.data)
|
||||
}
|
||||
return fuse.ReadResultData(f.data[off:end]), fs.OK
|
||||
}
|
||||
123
nsecbunker.go
123
nsecbunker.go
@@ -1,123 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/bgentry/speakeasy"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip19"
|
||||
"github.com/nbd-wtf/go-nostr/nip46"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var nsecbunker = &cli.Command{
|
||||
Name: "nsecbunker",
|
||||
Usage: "starts a NIP-46 signer daemon with the given --sec key",
|
||||
ArgsUsage: "[relay...]",
|
||||
Description: ``,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "sec",
|
||||
Usage: "secret key to sign the event, as hex or nsec",
|
||||
DefaultText: "the key '1'",
|
||||
Value: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "prompt-sec",
|
||||
Usage: "prompt the user to paste a hex or nsec with which to sign the event",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "yes",
|
||||
Aliases: []string{"y"},
|
||||
Usage: "always respond to any NIP-46 requests from anyone",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
// try to connect to the relays here
|
||||
qs := url.Values{}
|
||||
relayURLs := make([]string, 0, c.Args().Len())
|
||||
if relayUrls := c.Args().Slice(); len(relayUrls) > 0 {
|
||||
_, relays := connectToAllRelays(c.Context, relayUrls)
|
||||
if len(relays) == 0 {
|
||||
log("failed to connect to any of the given relays.\n")
|
||||
os.Exit(3)
|
||||
}
|
||||
for _, relay := range relays {
|
||||
relayURLs = append(relayURLs, relay.URL)
|
||||
qs.Add("relay", relay.URL)
|
||||
}
|
||||
}
|
||||
if len(relayURLs) == 0 {
|
||||
return fmt.Errorf("not connected to any relays: please specify at least one")
|
||||
}
|
||||
|
||||
// gather the secret key
|
||||
sec, err := gatherSecretKeyFromArguments(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pubkey, err := nostr.GetPublicKey(sec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
npub, _ := nip19.EncodePublicKey(pubkey)
|
||||
log("listening at %s%v%s:\n %spubkey:%s %s\n %snpub:%s %s\n %sconnection code:%s %s\n %sbunker:%s %s\n\n",
|
||||
BOLD_ON, relayURLs, BOLD_OFF,
|
||||
BOLD_ON, BOLD_OFF, pubkey,
|
||||
BOLD_ON, BOLD_OFF, npub,
|
||||
BOLD_ON, BOLD_OFF, fmt.Sprintf("%s#secret?%s", npub, qs.Encode()),
|
||||
BOLD_ON, BOLD_OFF, fmt.Sprintf("bunker://%s?%s", pubkey, qs.Encode()),
|
||||
)
|
||||
|
||||
alwaysYes := c.Bool("yes")
|
||||
|
||||
// subscribe to relays
|
||||
pool := nostr.NewSimplePool(c.Context)
|
||||
events := pool.SubMany(c.Context, relayURLs, nostr.Filters{
|
||||
{
|
||||
Kinds: []int{24133},
|
||||
Tags: nostr.TagMap{"p": []string{pubkey}},
|
||||
},
|
||||
})
|
||||
|
||||
signer := nip46.NewSigner(sec)
|
||||
for ie := range events {
|
||||
req, resp, eventResponse, harmless, err := signer.HandleRequest(ie.Event)
|
||||
if err != nil {
|
||||
log("< failed to handle request from %s: %w", ie.Event.PubKey, err)
|
||||
continue
|
||||
}
|
||||
|
||||
jreq, _ := json.MarshalIndent(req, " ", " ")
|
||||
log("- got request from '%s': %s\n", ie.Event.PubKey, string(jreq))
|
||||
jresp, _ := json.MarshalIndent(resp, " ", " ")
|
||||
log("~ responding with %s\n", string(jresp))
|
||||
|
||||
if alwaysYes || harmless || askUserIfWeCanRespond() {
|
||||
if err := ie.Relay.Publish(c.Context, eventResponse); err == nil {
|
||||
log("* sent response!\n")
|
||||
} else {
|
||||
log("* failed to send response: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func askUserIfWeCanRespond() bool {
|
||||
answer, err := speakeasy.FAsk(os.Stderr,
|
||||
fmt.Sprintf("proceed? y/n"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if answer == "y" || answer == "yes" {
|
||||
return true
|
||||
}
|
||||
|
||||
return askUserIfWeCanRespond()
|
||||
}
|
||||
97
outbox.go
Normal file
97
outbox.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"fiatjaf.com/nostr/sdk/hints/bbolth"
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
hintsFilePath string
|
||||
hintsFileExists bool
|
||||
)
|
||||
|
||||
func initializeOutboxHintsDB(c *cli.Command, sys *sdk.System) error {
|
||||
configPath := c.String("config-path")
|
||||
if configPath != "" {
|
||||
hintsFilePath = filepath.Join(configPath, "outbox/hints.db")
|
||||
}
|
||||
if hintsFilePath != "" {
|
||||
if _, err := os.Stat(hintsFilePath); err == nil {
|
||||
hintsFileExists = true
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hintsFileExists && hintsFilePath != "" {
|
||||
hintsdb, err := bbolth.NewBoltHints(hintsFilePath)
|
||||
if err == nil {
|
||||
sys.Hints = hintsdb
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var outbox = &cli.Command{
|
||||
Name: "outbox",
|
||||
Usage: "manage outbox relay hints database",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "init",
|
||||
Usage: "initialize the outbox hints database",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
if hintsFileExists {
|
||||
return nil
|
||||
}
|
||||
if hintsFilePath == "" {
|
||||
return fmt.Errorf("couldn't find a place to store the hints, pass --config-path to fix.")
|
||||
}
|
||||
|
||||
os.MkdirAll(hintsFilePath, 0755)
|
||||
_, err := bbolth.NewBoltHints(hintsFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create bolt hints db at '%s': %w", hintsFilePath, err)
|
||||
}
|
||||
|
||||
log("initialized hints database at %s\n", hintsFilePath)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "list",
|
||||
Usage: "list outbox relays for a given pubkey",
|
||||
ArgsUsage: "<pubkey>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
if !hintsFileExists {
|
||||
log(color.YellowString("running with temporary fragile data.\n"))
|
||||
log(color.YellowString("call `nak outbox init` to setup persistence.\n"))
|
||||
}
|
||||
|
||||
if c.Args().Len() != 1 {
|
||||
return fmt.Errorf("expected exactly one argument (pubkey)")
|
||||
}
|
||||
|
||||
pk, err := parsePubKey(c.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid public key '%s': %w", c.Args().First(), err)
|
||||
}
|
||||
|
||||
for _, relay := range sys.FetchOutboxRelays(ctx, pk, 6) {
|
||||
stdout(relay)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
181
publish.go
Normal file
181
publish.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var publish = &cli.Command{
|
||||
Name: "publish",
|
||||
Usage: "publishes a note with content from stdin",
|
||||
Description: `reads content from stdin and publishes it as a note, optionally as a reply to another note.
|
||||
|
||||
example:
|
||||
echo "hello world" | nak publish
|
||||
echo "I agree!" | nak publish --reply nevent1...
|
||||
echo "tagged post" | nak publish -t t=mytag -t e=someeventid`,
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.StringFlag{
|
||||
Name: "reply",
|
||||
Usage: "event id, naddr1 or nevent1 code to reply to",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "tag",
|
||||
Aliases: []string{"t"},
|
||||
Usage: "sets a tag field on the event, takes a value like -t e=<id> or -t sometag=\"value one;value two;value three\"",
|
||||
},
|
||||
&NaturalTimeFlag{
|
||||
Name: "created-at",
|
||||
Aliases: []string{"time", "ts"},
|
||||
Usage: "unix timestamp value for the created_at field",
|
||||
DefaultText: "now",
|
||||
Value: nostr.Now(),
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "auth",
|
||||
Usage: "always perform nip42 \"AUTH\" when facing an \"auth-required: \" rejection and try again",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nevent",
|
||||
Usage: "print the nevent code (to stderr) after the event is published",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "confirm",
|
||||
Usage: "ask before publishing the event",
|
||||
Category: CATEGORY_EXTRAS,
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
content, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read from stdin: %w", err)
|
||||
}
|
||||
|
||||
evt := nostr.Event{
|
||||
Kind: 1,
|
||||
Content: strings.TrimSpace(string(content)),
|
||||
Tags: make(nostr.Tags, 0, 4),
|
||||
CreatedAt: nostr.Now(),
|
||||
}
|
||||
|
||||
// handle timestamp flag
|
||||
if c.IsSet("created-at") {
|
||||
evt.CreatedAt = getNaturalDate(c, "created-at")
|
||||
}
|
||||
|
||||
// handle reply flag
|
||||
var replyRelays []string
|
||||
if replyTo := c.String("reply"); replyTo != "" {
|
||||
var replyEvent *nostr.Event
|
||||
|
||||
// try to decode as nevent or naddr first
|
||||
if strings.HasPrefix(replyTo, "nevent1") || strings.HasPrefix(replyTo, "naddr1") {
|
||||
_, value, err := nip19.Decode(replyTo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid reply target: %w", err)
|
||||
}
|
||||
|
||||
switch pointer := value.(type) {
|
||||
case nostr.EventPointer:
|
||||
replyEvent, _, err = sys.FetchSpecificEvent(ctx, pointer, sdk.FetchSpecificEventParameters{})
|
||||
case nostr.EntityPointer:
|
||||
replyEvent, _, err = sys.FetchSpecificEvent(ctx, pointer, sdk.FetchSpecificEventParameters{})
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch reply target event: %w", err)
|
||||
}
|
||||
} else {
|
||||
// try as raw event ID
|
||||
id, err := nostr.IDFromHex(replyTo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid event id: %w", err)
|
||||
}
|
||||
replyEvent, _, err = sys.FetchSpecificEvent(ctx, nostr.EventPointer{ID: id}, sdk.FetchSpecificEventParameters{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch reply target event: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if replyEvent.Kind != 1 {
|
||||
evt.Kind = 1111
|
||||
}
|
||||
|
||||
// add reply tags
|
||||
evt.Tags = append(evt.Tags,
|
||||
nostr.Tag{"e", replyEvent.ID.Hex(), "", "reply"},
|
||||
nostr.Tag{"p", replyEvent.PubKey.Hex()},
|
||||
)
|
||||
|
||||
replyRelays = sys.FetchInboxRelays(ctx, replyEvent.PubKey, 3)
|
||||
}
|
||||
|
||||
// handle other tags -- copied from event.go
|
||||
tagFlags := c.StringSlice("tag")
|
||||
for _, tagFlag := range tagFlags {
|
||||
// tags are in the format key=value
|
||||
tagName, tagValue, found := strings.Cut(tagFlag, "=")
|
||||
tag := []string{tagName}
|
||||
if found {
|
||||
// tags may also contain extra elements separated with a ";"
|
||||
tagValues := strings.Split(tagValue, ";")
|
||||
tag = append(tag, tagValues...)
|
||||
}
|
||||
evt.Tags = append(evt.Tags, tag)
|
||||
}
|
||||
|
||||
// process the content
|
||||
targetRelays := sys.PrepareNoteEvent(ctx, &evt)
|
||||
|
||||
// connect to all the relays (like event.go)
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get our public key: %w", err)
|
||||
}
|
||||
|
||||
relayUrls := sys.FetchWriteRelays(ctx, pk)
|
||||
relayUrls = nostr.AppendUnique(relayUrls, targetRelays...)
|
||||
relayUrls = nostr.AppendUnique(relayUrls, replyRelays...)
|
||||
relayUrls = nostr.AppendUnique(relayUrls, c.Args().Slice()...)
|
||||
relays := connectToAllRelays(ctx, c, relayUrls, nil,
|
||||
nostr.PoolOptions{
|
||||
AuthHandler: func(ctx context.Context, authEvent *nostr.Event) error {
|
||||
return authSigner(ctx, c, func(s string, args ...any) {}, authEvent)
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
if len(relays) == 0 {
|
||||
if len(relayUrls) == 0 {
|
||||
return fmt.Errorf("no relays to publish this note to.")
|
||||
} else {
|
||||
return fmt.Errorf("failed to connect to any of [ %v ].", relayUrls)
|
||||
}
|
||||
}
|
||||
|
||||
// sign the event
|
||||
if err := kr.SignEvent(ctx, &evt); err != nil {
|
||||
return fmt.Errorf("error signing event: %w", err)
|
||||
}
|
||||
|
||||
// print
|
||||
stdout(evt.String())
|
||||
|
||||
// publish (like event.go)
|
||||
return publishFlow(ctx, c, kr, evt, relays)
|
||||
},
|
||||
}
|
||||
41
relay.go
41
relay.go
@@ -1,37 +1,36 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr/nip11"
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr/nip11"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var relay = &cli.Command{
|
||||
Name: "relay",
|
||||
Usage: "gets the relay information document for the given relay, as JSON",
|
||||
Description: `example:
|
||||
nak relay nostr.wine`,
|
||||
ArgsUsage: "<relay-url>",
|
||||
Action: func(c *cli.Context) error {
|
||||
url := c.Args().First()
|
||||
if url == "" {
|
||||
return fmt.Errorf("specify the <relay-url>")
|
||||
}
|
||||
Description: `
|
||||
nak relay nostr.wine
|
||||
`,
|
||||
ArgsUsage: "<relay-url>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for url := range getStdinLinesOrArguments(c.Args()) {
|
||||
if url == "" {
|
||||
return fmt.Errorf("specify the <relay-url>")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(url, "wss://") && !strings.HasPrefix(url, "ws://") {
|
||||
url = "wss://" + url
|
||||
}
|
||||
info, err := nip11.Fetch(ctx, url)
|
||||
if err != nil {
|
||||
ctx = lineProcessingError(ctx, "failed to fetch '%s' information document: %w", url, err)
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := nip11.Fetch(c.Context, url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch '%s' information document: %w", url, err)
|
||||
pretty, _ := json.MarshalIndent(info, "", " ")
|
||||
stdout(string(pretty))
|
||||
}
|
||||
|
||||
pretty, _ := json.MarshalIndent(info, "", " ")
|
||||
fmt.Println(string(pretty))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
565
req.go
565
req.go
@@ -1,22 +1,36 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
"fiatjaf.com/nostr/eventstore/wrappers"
|
||||
"fiatjaf.com/nostr/nip42"
|
||||
"fiatjaf.com/nostr/nip77"
|
||||
"github.com/fatih/color"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/urfave/cli/v2"
|
||||
"github.com/urfave/cli/v3"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const CATEGORY_FILTER_ATTRIBUTES = "FILTER ATTRIBUTES"
|
||||
const (
|
||||
CATEGORY_FILTER_ATTRIBUTES = "FILTER ATTRIBUTES"
|
||||
// CATEGORY_SIGNER = "SIGNER OPTIONS" -- defined at event.go as the same (yes, I know)
|
||||
)
|
||||
|
||||
var req = &cli.Command{
|
||||
Name: "req",
|
||||
Usage: "generates encoded REQ messages and optionally use them to talk to relays",
|
||||
Description: `outputs a NIP-01 Nostr filter. when a relay is not given, will print the filter, otherwise will connect to the given relay and send the filter.
|
||||
Description: `outputs a nip01 Nostr filter. when a relay is not given, will print the filter, otherwise will connect to the given relay and send the filter.
|
||||
|
||||
example:
|
||||
nak req -k 1 -l 15 wss://nostr.wine wss://nostr-pub.wellorder.net
|
||||
@@ -26,106 +40,110 @@ it can also take a filter from stdin, optionally modify it with flags and send i
|
||||
|
||||
example:
|
||||
echo '{"kinds": [1], "#t": ["test"]}' | nak req -l 5 -k 4549 --tag t=spam wss://nostr-pub.wellorder.net`,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "author",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "only accept events from these authors (pubkey as hex)",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "id",
|
||||
Aliases: []string{"i"},
|
||||
Usage: "only accept events with these ids (hex)",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntSliceFlag{
|
||||
Name: "kind",
|
||||
Aliases: []string{"k"},
|
||||
Usage: "only accept events with these kind numbers",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "tag",
|
||||
Aliases: []string{"t"},
|
||||
Usage: "takes a tag like -t e=<id>, only accept events with these tags",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "e",
|
||||
Usage: "shortcut for --tag e=<value>",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "p",
|
||||
Usage: "shortcut for --tag p=<value>",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "since",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "only accept events newer than this (unix timestamp)",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "until",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "only accept events older than this (unix timestamp)",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "limit",
|
||||
Aliases: []string{"l"},
|
||||
Usage: "only accept up to this number of events",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "search",
|
||||
Usage: "a NIP-50 search query, use it only with relays that explicitly support it",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "stream",
|
||||
Usage: "keep the subscription open, printing all events as they are returned",
|
||||
DefaultText: "false, will close on EOSE",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bare",
|
||||
Usage: "when printing the filter, print just the filter, not enveloped in a [\"REQ\", ...] array",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "auth",
|
||||
Usage: "always perform NIP-42 \"AUTH\" when facing an \"auth-required: \" rejection and try again",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "sec",
|
||||
Usage: "secret key to sign the AUTH challenge, as hex or nsec",
|
||||
DefaultText: "the key '1'",
|
||||
Value: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "prompt-sec",
|
||||
Usage: "prompt the user to paste a hex or nsec with which to sign the AUTH challenge",
|
||||
},
|
||||
},
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(defaultKeyFlags,
|
||||
append(reqFilterFlags,
|
||||
&cli.StringFlag{
|
||||
Name: "only-missing",
|
||||
Usage: "use nip77 negentropy to only fetch events that aren't present in the given jsonl file",
|
||||
TakesFile: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "ids-only",
|
||||
Usage: "use nip77 to fetch just a list of ids",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "stream",
|
||||
Usage: "keep the subscription open, printing all events as they are returned",
|
||||
DefaultText: "false, will close on EOSE",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "outbox",
|
||||
Usage: "use outbox relays from specified public keys",
|
||||
DefaultText: "false, will only use manually-specified relays",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "outbox-relays-per-pubkey",
|
||||
Aliases: []string{"n"},
|
||||
Usage: "number of outbox relays to use for each pubkey",
|
||||
Value: 3,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "paginate",
|
||||
Usage: "make multiple REQs to the relay decreasing the value of 'until' until 'limit' or 'since' conditions are met",
|
||||
DefaultText: "false",
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "paginate-interval",
|
||||
Usage: "time between queries when using --paginate",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "paginate-global-limit",
|
||||
Usage: "global limit at which --paginate should stop",
|
||||
DefaultText: "uses the value given by --limit/-l or infinite",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bare",
|
||||
Usage: "when printing the filter, print just the filter, not enveloped in a [\"REQ\", ...] array",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "auth",
|
||||
Usage: "always perform nip42 \"AUTH\" when facing an \"auth-required: \" rejection and try again",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "force-pre-auth",
|
||||
Aliases: []string{"fpa"},
|
||||
Usage: "after connecting, for a nip42 \"AUTH\" message to be received, act on it and only then send the \"REQ\"",
|
||||
Category: CATEGORY_SIGNER,
|
||||
},
|
||||
)...,
|
||||
),
|
||||
ArgsUsage: "[relay...]",
|
||||
Action: func(c *cli.Context) error {
|
||||
var pool *nostr.SimplePool
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
negentropy := c.Bool("ids-only") || c.IsSet("only-missing")
|
||||
if negentropy {
|
||||
if c.Bool("paginate") || c.Bool("stream") || c.Bool("outbox") {
|
||||
return fmt.Errorf("negentropy is incompatible with --stream, --outbox or --paginate")
|
||||
}
|
||||
}
|
||||
|
||||
if c.Bool("paginate") && c.Bool("stream") {
|
||||
return fmt.Errorf("incompatible flags --paginate and --stream")
|
||||
}
|
||||
|
||||
if c.Bool("paginate") && c.Bool("outbox") {
|
||||
return fmt.Errorf("incompatible flags --paginate and --outbox")
|
||||
}
|
||||
|
||||
relayUrls := c.Args().Slice()
|
||||
if len(relayUrls) > 0 {
|
||||
var relays []*nostr.Relay
|
||||
pool, relays = connectToAllRelays(c.Context, relayUrls, nostr.WithAuthHandler(func(evt *nostr.Event) error {
|
||||
if !c.Bool("auth") {
|
||||
return fmt.Errorf("auth not authorized")
|
||||
}
|
||||
sec, err := gatherSecretKeyFromArguments(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk, _ := nostr.GetPublicKey(sec)
|
||||
log("performing auth as %s...\n", pk)
|
||||
return evt.Sign(sec)
|
||||
}))
|
||||
if len(relayUrls) > 0 && !negentropy {
|
||||
// this is used both for the normal AUTH (after "auth-required:" is received) or forced pre-auth
|
||||
// connect to all relays we expect to use in this call in parallel
|
||||
forcePreAuthSigner := authSigner
|
||||
if !c.Bool("force-pre-auth") {
|
||||
forcePreAuthSigner = nil
|
||||
}
|
||||
relays := connectToAllRelays(
|
||||
ctx,
|
||||
c,
|
||||
relayUrls,
|
||||
forcePreAuthSigner,
|
||||
nostr.PoolOptions{
|
||||
AuthHandler: func(ctx context.Context, authEvent *nostr.Event) error {
|
||||
return authSigner(ctx, c, func(s string, args ...any) {
|
||||
if strings.HasPrefix(s, "authenticating as") {
|
||||
cleanUrl, _ := strings.CutPrefix(
|
||||
nip42.GetRelayURLFromAuthEvent(*authEvent),
|
||||
"wss://",
|
||||
)
|
||||
s = "authenticating to " + color.CyanString(cleanUrl) + " as" + s[len("authenticating as"):]
|
||||
}
|
||||
log(s+"\n", args...)
|
||||
}, authEvent)
|
||||
},
|
||||
})
|
||||
|
||||
// stop here already if all connections failed
|
||||
if len(relays) == 0 {
|
||||
log("failed to connect to any of the given relays.\n")
|
||||
os.Exit(3)
|
||||
@@ -136,73 +154,161 @@ example:
|
||||
}
|
||||
}
|
||||
|
||||
for stdinFilter := range getStdinLinesOrBlank() {
|
||||
// go line by line from stdin or run once with input from flags
|
||||
for stdinFilter := range getJsonsOrBlank() {
|
||||
filter := nostr.Filter{}
|
||||
if stdinFilter != "" {
|
||||
if err := easyjson.Unmarshal([]byte(stdinFilter), &filter); err != nil {
|
||||
lineProcessingError(c, "invalid filter '%s' received from stdin: %s", stdinFilter, err)
|
||||
ctx = lineProcessingError(ctx, "invalid filter '%s' received from stdin: %s", stdinFilter, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if authors := c.StringSlice("author"); len(authors) > 0 {
|
||||
filter.Authors = append(filter.Authors, authors...)
|
||||
if err := applyFlagsToFilter(c, &filter); err != nil {
|
||||
return err
|
||||
}
|
||||
if ids := c.StringSlice("id"); len(ids) > 0 {
|
||||
filter.IDs = append(filter.IDs, ids...)
|
||||
}
|
||||
if kinds := c.IntSlice("kind"); len(kinds) > 0 {
|
||||
filter.Kinds = append(filter.Kinds, kinds...)
|
||||
}
|
||||
if search := c.String("search"); search != "" {
|
||||
filter.Search = search
|
||||
}
|
||||
tags := make([][]string, 0, 5)
|
||||
for _, tagFlag := range c.StringSlice("tag") {
|
||||
spl := strings.Split(tagFlag, "=")
|
||||
if len(spl) == 2 && len(spl[0]) == 1 {
|
||||
tags = append(tags, spl)
|
||||
|
||||
if len(relayUrls) > 0 || c.Bool("outbox") {
|
||||
if negentropy {
|
||||
store := &slicestore.SliceStore{}
|
||||
store.Init()
|
||||
|
||||
if syncFile := c.String("only-missing"); syncFile != "" {
|
||||
file, err := os.Open(syncFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open sync file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
scanner.Buffer(make([]byte, 16*1024*1024), 256*1024*1024)
|
||||
for scanner.Scan() {
|
||||
var evt nostr.Event
|
||||
if err := easyjson.Unmarshal([]byte(scanner.Text()), &evt); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := store.SaveEvent(evt); err != nil || err == eventstore.ErrDupEvent {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return fmt.Errorf("failed to read sync file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
target := PrintingQuerierPublisher{
|
||||
QuerierPublisher: wrappers.StorePublisher{Store: store, MaxLimit: math.MaxInt},
|
||||
}
|
||||
|
||||
var source nostr.Querier = nil
|
||||
if c.IsSet("only-missing") {
|
||||
source = target
|
||||
}
|
||||
|
||||
handle := nip77.SyncEventsFromIDs
|
||||
|
||||
if c.Bool("ids-only") {
|
||||
seen := make(map[nostr.ID]struct{}, max(500, filter.Limit))
|
||||
handle = func(ctx context.Context, dir nip77.Direction) {
|
||||
for id := range dir.Items {
|
||||
if _, ok := seen[id]; ok {
|
||||
continue
|
||||
}
|
||||
seen[id] = struct{}{}
|
||||
stdout(id.Hex())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, url := range relayUrls {
|
||||
err := nip77.NegentropySync(ctx, url, filter, source, target, handle)
|
||||
if err != nil {
|
||||
log("negentropy sync from %s failed: %s", url, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid --tag '%s'", tagFlag)
|
||||
}
|
||||
}
|
||||
for _, etag := range c.StringSlice("e") {
|
||||
tags = append(tags, []string{"e", etag})
|
||||
}
|
||||
for _, ptag := range c.StringSlice("p") {
|
||||
tags = append(tags, []string{"p", ptag})
|
||||
}
|
||||
var results chan nostr.RelayEvent
|
||||
opts := nostr.SubscriptionOptions{
|
||||
Label: "nak-req",
|
||||
}
|
||||
|
||||
if len(tags) > 0 && filter.Tags == nil {
|
||||
filter.Tags = make(nostr.TagMap)
|
||||
}
|
||||
if c.Bool("paginate") {
|
||||
paginator := sys.Pool.PaginatorWithInterval(c.Duration("paginate-interval"))
|
||||
results = paginator(ctx, relayUrls, filter, opts)
|
||||
} else if c.Bool("outbox") {
|
||||
defs := make([]nostr.DirectedFilter, 0, len(filter.Authors)*2)
|
||||
|
||||
for _, tag := range tags {
|
||||
if _, ok := filter.Tags[tag[0]]; !ok {
|
||||
filter.Tags[tag[0]] = make([]string, 0, 3)
|
||||
}
|
||||
filter.Tags[tag[0]] = append(filter.Tags[tag[0]], tag[1])
|
||||
}
|
||||
// hardcoded relays, if any
|
||||
for _, relayUrl := range relayUrls {
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: relayUrl,
|
||||
})
|
||||
}
|
||||
|
||||
if since := c.Int("since"); since != 0 {
|
||||
ts := nostr.Timestamp(since)
|
||||
filter.Since = &ts
|
||||
}
|
||||
if until := c.Int("until"); until != 0 {
|
||||
ts := nostr.Timestamp(until)
|
||||
filter.Until = &ts
|
||||
}
|
||||
if limit := c.Int("limit"); limit != 0 {
|
||||
filter.Limit = limit
|
||||
}
|
||||
// relays for each pubkey
|
||||
errg := errgroup.Group{}
|
||||
errg.SetLimit(16)
|
||||
mu := sync.Mutex{}
|
||||
for _, pubkey := range filter.Authors {
|
||||
errg.Go(func() error {
|
||||
n := int(c.Uint("outbox-relays-per-pubkey"))
|
||||
for _, url := range sys.FetchOutboxRelays(ctx, pubkey, n) {
|
||||
if slices.Contains(relayUrls, url) {
|
||||
// already hardcoded, ignore
|
||||
continue
|
||||
}
|
||||
if !nostr.IsValidRelayURL(url) {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(relayUrls) > 0 {
|
||||
fn := pool.SubManyEose
|
||||
if c.Bool("stream") {
|
||||
fn = pool.SubMany
|
||||
}
|
||||
for ie := range fn(c.Context, relayUrls, nostr.Filters{filter}) {
|
||||
fmt.Println(ie.Event)
|
||||
matchUrl := func(def nostr.DirectedFilter) bool { return def.Relay == url }
|
||||
idx := slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// new relay, add it
|
||||
mu.Lock()
|
||||
// check again after locking to prevent races
|
||||
idx = slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// then add it
|
||||
filter := filter.Clone()
|
||||
filter.Authors = []nostr.PubKey{pubkey}
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: url,
|
||||
})
|
||||
mu.Unlock()
|
||||
continue // done with this relay url
|
||||
}
|
||||
|
||||
// otherwise we'll just use the idx
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// existing relay, add this pubkey
|
||||
defs[idx].Authors = append(defs[idx].Authors, pubkey)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
errg.Wait()
|
||||
|
||||
if c.Bool("stream") {
|
||||
results = sys.Pool.BatchedSubscribeMany(ctx, defs, opts)
|
||||
} else {
|
||||
results = sys.Pool.BatchedQueryMany(ctx, defs, opts)
|
||||
}
|
||||
} else {
|
||||
if c.Bool("stream") {
|
||||
results = sys.Pool.SubscribeMany(ctx, relayUrls, filter, opts)
|
||||
} else {
|
||||
results = sys.Pool.FetchMany(ctx, relayUrls, filter, opts)
|
||||
}
|
||||
}
|
||||
|
||||
for ie := range results {
|
||||
stdout(ie.Event)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// no relays given, will just print the filter
|
||||
@@ -210,15 +316,154 @@ example:
|
||||
if c.Bool("bare") {
|
||||
result = filter.String()
|
||||
} else {
|
||||
j, _ := json.Marshal(nostr.ReqEnvelope{SubscriptionID: "nak", Filters: nostr.Filters{filter}})
|
||||
j, _ := json.Marshal(nostr.ReqEnvelope{SubscriptionID: "nak", Filters: []nostr.Filter{filter}})
|
||||
result = string(j)
|
||||
}
|
||||
|
||||
fmt.Println(result)
|
||||
stdout(result)
|
||||
}
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var reqFilterFlags = []cli.Flag{
|
||||
&PubKeySliceFlag{
|
||||
Name: "author",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "only accept events from these authors",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&IDSliceFlag{
|
||||
Name: "id",
|
||||
Aliases: []string{"i"},
|
||||
Usage: "only accept events with these ids",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.IntSliceFlag{
|
||||
Name: "kind",
|
||||
Aliases: []string{"k"},
|
||||
Usage: "only accept events with these kind numbers",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "tag",
|
||||
Aliases: []string{"t"},
|
||||
Usage: "takes a tag like -t e=<id>, only accept events with these tags",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "e",
|
||||
Usage: "shortcut for --tag e=<value>",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "p",
|
||||
Usage: "shortcut for --tag p=<value>",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "d",
|
||||
Usage: "shortcut for --tag d=<value>",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&NaturalTimeFlag{
|
||||
Name: "since",
|
||||
Aliases: []string{"s"},
|
||||
Usage: "only accept events newer than this (unix timestamp)",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&NaturalTimeFlag{
|
||||
Name: "until",
|
||||
Aliases: []string{"u"},
|
||||
Usage: "only accept events older than this (unix timestamp)",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "limit",
|
||||
Aliases: []string{"l"},
|
||||
Usage: "only accept up to this number of events",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "search",
|
||||
Usage: "a nip50 search query, use it only with relays that explicitly support it",
|
||||
Category: CATEGORY_FILTER_ATTRIBUTES,
|
||||
},
|
||||
}
|
||||
|
||||
func applyFlagsToFilter(c *cli.Command, filter *nostr.Filter) error {
|
||||
if authors := getPubKeySlice(c, "author"); len(authors) > 0 {
|
||||
filter.Authors = append(filter.Authors, authors...)
|
||||
}
|
||||
if ids := getIDSlice(c, "id"); len(ids) > 0 {
|
||||
filter.IDs = append(filter.IDs, ids...)
|
||||
}
|
||||
for _, kind64 := range c.IntSlice("kind") {
|
||||
filter.Kinds = append(filter.Kinds, nostr.Kind(kind64))
|
||||
}
|
||||
if search := c.String("search"); search != "" {
|
||||
filter.Search = search
|
||||
}
|
||||
tags := make([][]string, 0, 5)
|
||||
for _, tagFlag := range c.StringSlice("tag") {
|
||||
spl := strings.SplitN(tagFlag, "=", 2)
|
||||
if len(spl) == 2 {
|
||||
tags = append(tags, []string{spl[0], decodeTagValue(spl[1])})
|
||||
} else {
|
||||
return fmt.Errorf("invalid --tag '%s'", tagFlag)
|
||||
}
|
||||
}
|
||||
for _, etag := range c.StringSlice("e") {
|
||||
tags = append(tags, []string{"e", decodeTagValue(etag)})
|
||||
}
|
||||
for _, ptag := range c.StringSlice("p") {
|
||||
tags = append(tags, []string{"p", decodeTagValue(ptag)})
|
||||
}
|
||||
for _, dtag := range c.StringSlice("d") {
|
||||
tags = append(tags, []string{"d", decodeTagValue(dtag)})
|
||||
}
|
||||
|
||||
if len(tags) > 0 && filter.Tags == nil {
|
||||
filter.Tags = make(nostr.TagMap)
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
if _, ok := filter.Tags[tag[0]]; !ok {
|
||||
filter.Tags[tag[0]] = make([]string, 0, 3)
|
||||
}
|
||||
filter.Tags[tag[0]] = append(filter.Tags[tag[0]], tag[1])
|
||||
}
|
||||
|
||||
if c.IsSet("since") {
|
||||
filter.Since = getNaturalDate(c, "since")
|
||||
}
|
||||
if c.IsSet("until") {
|
||||
filter.Until = getNaturalDate(c, "until")
|
||||
}
|
||||
|
||||
if limit := c.Uint("limit"); limit != 0 {
|
||||
filter.Limit = int(limit)
|
||||
} else if c.IsSet("limit") {
|
||||
filter.LimitZero = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type PrintingQuerierPublisher struct {
|
||||
nostr.QuerierPublisher
|
||||
}
|
||||
|
||||
func (p PrintingQuerierPublisher) Publish(ctx context.Context, evt nostr.Event) error {
|
||||
if err := p.QuerierPublisher.Publish(ctx, evt); err == nil {
|
||||
stdout(evt)
|
||||
return nil
|
||||
} else if err == eventstore.ErrDupEvent {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
248
serve.go
Normal file
248
serve.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
"fiatjaf.com/nostr/khatru"
|
||||
"fiatjaf.com/nostr/khatru/blossom"
|
||||
"fiatjaf.com/nostr/khatru/grasp"
|
||||
"github.com/bep/debounce"
|
||||
"github.com/fatih/color"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var serve = &cli.Command{
|
||||
Name: "serve",
|
||||
Usage: "starts an in-memory relay for testing purposes",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "hostname",
|
||||
Usage: "hostname where to listen for connections",
|
||||
Value: "localhost",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "port",
|
||||
Usage: "port where to listen for connections",
|
||||
Value: 10547,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "events",
|
||||
Usage: "file containing the initial batch of events that will be served by the relay as newline-separated JSON (jsonl)",
|
||||
DefaultText: "the relay will start empty",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "negentropy",
|
||||
Usage: "enable negentropy syncing",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "grasp",
|
||||
Usage: "enable grasp server",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "blossom",
|
||||
Usage: "enable blossom server",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
db := &slicestore.SliceStore{}
|
||||
|
||||
var blobStore *xsync.MapOf[string, []byte]
|
||||
var repoDir string
|
||||
|
||||
var scanner *bufio.Scanner
|
||||
if path := c.String("events"); path != "" {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to file at '%s': %w", path, err)
|
||||
}
|
||||
scanner = bufio.NewScanner(f)
|
||||
} else if isPiped() {
|
||||
scanner = bufio.NewScanner(os.Stdin)
|
||||
}
|
||||
|
||||
if scanner != nil {
|
||||
scanner.Buffer(make([]byte, 16*1024*1024), 256*1024*1024)
|
||||
i := 0
|
||||
for scanner.Scan() {
|
||||
var evt nostr.Event
|
||||
if err := json.Unmarshal(scanner.Bytes(), &evt); err != nil {
|
||||
return fmt.Errorf("invalid event received at line %d: %s (`%s`)", i, err, scanner.Text())
|
||||
}
|
||||
db.SaveEvent(evt)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
rl := khatru.NewRelay()
|
||||
|
||||
rl.Info.Name = "nak serve"
|
||||
rl.Info.Description = "a local relay for testing, debugging and development."
|
||||
rl.Info.Software = "https://github.com/fiatjaf/nak"
|
||||
rl.Info.Version = version
|
||||
|
||||
rl.UseEventstore(db, 500)
|
||||
|
||||
if c.Bool("negentropy") {
|
||||
rl.Negentropy = true
|
||||
}
|
||||
|
||||
started := make(chan bool)
|
||||
exited := make(chan error)
|
||||
|
||||
hostname := c.String("hostname")
|
||||
port := int(c.Uint("port"))
|
||||
|
||||
var printStatus func()
|
||||
|
||||
if c.Bool("blossom") {
|
||||
bs := blossom.New(rl, fmt.Sprintf("http://%s:%d", hostname, port))
|
||||
bs.Store = blossom.NewMemoryBlobIndex()
|
||||
|
||||
blobStore = xsync.NewMapOf[string, []byte]()
|
||||
bs.StoreBlob = func(ctx context.Context, sha256 string, ext string, body []byte) error {
|
||||
blobStore.Store(sha256+ext, body)
|
||||
log(" got %s %s\n", color.GreenString("blob stored"), sha256+ext)
|
||||
printStatus()
|
||||
return nil
|
||||
}
|
||||
bs.LoadBlob = func(ctx context.Context, sha256 string, ext string) (io.ReadSeeker, *url.URL, error) {
|
||||
if body, ok := blobStore.Load(sha256 + ext); ok {
|
||||
log(" got %s %s\n", color.BlueString("blob downloaded"), sha256+ext)
|
||||
printStatus()
|
||||
return bytes.NewReader(body), nil, nil
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
bs.DeleteBlob = func(ctx context.Context, sha256 string, ext string) error {
|
||||
blobStore.Delete(sha256 + ext)
|
||||
log(" got %s %s\n", color.RedString("blob deleted"), sha256+ext)
|
||||
printStatus()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if c.Bool("grasp") {
|
||||
var err error
|
||||
repoDir, err = os.MkdirTemp("", "nak-serve-grasp-repos-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create grasp repos directory: %w", err)
|
||||
}
|
||||
g := grasp.New(rl, repoDir)
|
||||
g.OnRead = func(ctx context.Context, pubkey nostr.PubKey, repo string) (reject bool, reason string) {
|
||||
log(" got %s %s %s\n", color.CyanString("git read"), pubkey.Hex(), repo)
|
||||
printStatus()
|
||||
return false, ""
|
||||
}
|
||||
g.OnWrite = func(ctx context.Context, pubkey nostr.PubKey, repo string) (reject bool, reason string) {
|
||||
log(" got %s %s %s\n", color.YellowString("git write"), pubkey.Hex(), repo)
|
||||
printStatus()
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := rl.Start(hostname, port, started)
|
||||
exited <- err
|
||||
}()
|
||||
|
||||
// relay logging
|
||||
rl.OnRequest = func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
negentropy := ""
|
||||
if khatru.IsNegentropySession(ctx) {
|
||||
negentropy = color.HiBlueString("negentropy ")
|
||||
}
|
||||
|
||||
log(" got %s%s %v\n", negentropy, color.HiYellowString("request"), colors.italic(filter))
|
||||
printStatus()
|
||||
return false, ""
|
||||
}
|
||||
|
||||
rl.OnCount = func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
log(" got %s %v\n", color.HiCyanString("count request"), colors.italic(filter))
|
||||
printStatus()
|
||||
return false, ""
|
||||
}
|
||||
|
||||
rl.OnEvent = func(ctx context.Context, event nostr.Event) (reject bool, msg string) {
|
||||
log(" got %s %v\n", color.BlueString("event"), colors.italic(event))
|
||||
printStatus()
|
||||
return false, ""
|
||||
}
|
||||
|
||||
totalConnections := atomic.Int32{}
|
||||
rl.OnConnect = func(ctx context.Context) {
|
||||
totalConnections.Add(1)
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
totalConnections.Add(-1)
|
||||
}()
|
||||
}
|
||||
|
||||
d := debounce.New(time.Second * 2)
|
||||
printStatus = func() {
|
||||
d(func() {
|
||||
totalEvents, err := db.CountEvents(nostr.Filter{})
|
||||
if err != nil {
|
||||
log("failed to count: %s\n", err)
|
||||
}
|
||||
subs := rl.GetListeningFilters()
|
||||
|
||||
blossomMsg := ""
|
||||
if c.Bool("blossom") {
|
||||
blobsStored := blobStore.Size()
|
||||
blossomMsg = fmt.Sprintf("blobs: %s, ",
|
||||
color.HiMagentaString("%d", blobsStored),
|
||||
)
|
||||
}
|
||||
|
||||
graspMsg := ""
|
||||
if c.Bool("grasp") {
|
||||
gitAnnounced := 0
|
||||
gitStored := 0
|
||||
for evt := range db.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{nostr.Kind(30617)}}, 500) {
|
||||
gitAnnounced++
|
||||
identifier := evt.Tags.GetD()
|
||||
if info, err := os.Stat(filepath.Join(repoDir, identifier)); err == nil && info.IsDir() {
|
||||
gitStored++
|
||||
}
|
||||
}
|
||||
graspMsg = fmt.Sprintf("git announced: %s, git stored: %s, ",
|
||||
color.HiMagentaString("%d", gitAnnounced),
|
||||
color.HiMagentaString("%d", gitStored),
|
||||
)
|
||||
}
|
||||
|
||||
log(" %s events: %s, %s%sconnections: %s, subscriptions: %s\n",
|
||||
color.HiMagentaString("•"),
|
||||
color.HiMagentaString("%d", totalEvents),
|
||||
blossomMsg,
|
||||
graspMsg,
|
||||
color.HiMagentaString("%d", totalConnections.Load()),
|
||||
color.HiMagentaString("%d", len(subs)),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
<-started
|
||||
log("%s relay running at %s", color.HiRedString(">"), colors.boldf("ws://%s:%d", hostname, port))
|
||||
if c.Bool("grasp") {
|
||||
log(" (grasp repos at %s)", repoDir)
|
||||
}
|
||||
log("\n")
|
||||
|
||||
return <-exited
|
||||
},
|
||||
}
|
||||
39
verify.go
39
verify.go
@@ -1,41 +1,52 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"context"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/urfave/cli/v2"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var verify = &cli.Command{
|
||||
Name: "verify",
|
||||
Usage: "checks the hash and signature of an event given through stdin",
|
||||
Usage: "checks the hash and signature of an event given through stdin or as the first argument",
|
||||
Description: `example:
|
||||
echo '{"id":"a889df6a387419ff204305f4c2d296ee328c3cd4f8b62f205648a541b4554dfb","pubkey":"c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5","created_at":1698623783,"kind":1,"tags":[],"content":"hello from the nostr army knife","sig":"84876e1ee3e726da84e5d195eb79358b2b3eaa4d9bd38456fde3e8a2af3f1cd4cda23f23fda454869975b3688797d4c66e12f4c51c1b43c6d2997c5e61865661"}' | nak verify
|
||||
|
||||
it outputs nothing if the verification is successful.`,
|
||||
Action: func(c *cli.Context) error {
|
||||
for stdinEvent := range getStdinLinesOrBlank() {
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
for stdinEvent := range getJsonsOrBlank() {
|
||||
evt := nostr.Event{}
|
||||
if stdinEvent != "" {
|
||||
if err := json.Unmarshal([]byte(stdinEvent), &evt); err != nil {
|
||||
lineProcessingError(c, "invalid event: %s", err)
|
||||
if stdinEvent == "" {
|
||||
stdinEvent = c.Args().First()
|
||||
if stdinEvent == "" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if evt.GetID() != evt.ID {
|
||||
lineProcessingError(c, "invalid .id, expected %s, got %s", evt.GetID(), evt.ID)
|
||||
if err := json.Unmarshal([]byte(stdinEvent), &evt); err != nil {
|
||||
ctx = lineProcessingError(ctx, "invalid event: %s", err)
|
||||
logverbose("<>: invalid event.\n", evt.ID.Hex())
|
||||
continue
|
||||
}
|
||||
|
||||
if ok, err := evt.CheckSignature(); !ok {
|
||||
lineProcessingError(c, "invalid signature: %s", err)
|
||||
if evt.GetID() != evt.ID {
|
||||
ctx = lineProcessingError(ctx, "invalid .id, expected %s, got %s", evt.GetID(), evt.ID)
|
||||
logverbose("%s: invalid id.\n", evt.ID.Hex())
|
||||
continue
|
||||
}
|
||||
|
||||
if !evt.VerifySignature() {
|
||||
ctx = lineProcessingError(ctx, "invalid signature")
|
||||
logverbose("%s: invalid signature.\n", evt.ID.Hex())
|
||||
continue
|
||||
}
|
||||
|
||||
logverbose("%s: valid.\n", evt.ID.Hex())
|
||||
}
|
||||
|
||||
exitIfLineProcessingError(c)
|
||||
exitIfLineProcessingError(ctx)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
542
wallet.go
Normal file
542
wallet.go
Normal file
@@ -0,0 +1,542 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip60"
|
||||
"fiatjaf.com/nostr/nip61"
|
||||
"fiatjaf.com/nostr/sdk"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
func prepareWallet(ctx context.Context, c *cli.Command) (*nip60.Wallet, func(), error) {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pk, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
relays := sys.FetchOutboxRelays(ctx, pk, 3)
|
||||
w := nip60.LoadWallet(ctx, kr, sys.Pool, relays, nip60.WalletOptions{})
|
||||
if w == nil {
|
||||
return nil, nil, fmt.Errorf("error loading walle")
|
||||
}
|
||||
|
||||
w.Processed = func(evt nostr.Event, err error) {
|
||||
if err == nil {
|
||||
logverbose("processed event %s\n", evt)
|
||||
|
||||
if c.Bool("stream") {
|
||||
// after EOSE log updates and the new balance
|
||||
select {
|
||||
case <-w.Stable:
|
||||
switch evt.Kind {
|
||||
case 5:
|
||||
log("- token deleted\n")
|
||||
case 7375:
|
||||
log("- token added\n")
|
||||
default:
|
||||
return
|
||||
}
|
||||
|
||||
log(" balance: %d\n", w.Balance())
|
||||
default:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log("error processing event %s: %s\n", evt, err)
|
||||
}
|
||||
}
|
||||
|
||||
w.PublishUpdate = func(event nostr.Event, deleted, received, change *nip60.Token, isHistory bool) {
|
||||
desc := "wallet"
|
||||
if received != nil {
|
||||
mint, _ := strings.CutPrefix(received.Mint, "https://")
|
||||
desc = fmt.Sprintf("received from %s with %d proofs totalling %d",
|
||||
mint, len(received.Proofs), received.Proofs.Amount())
|
||||
} else if change != nil {
|
||||
mint, _ := strings.CutPrefix(change.Mint, "https://")
|
||||
desc = fmt.Sprintf("change from %s with %d proofs totalling %d",
|
||||
mint, len(change.Proofs), change.Proofs.Amount())
|
||||
} else if deleted != nil {
|
||||
mint, _ := strings.CutPrefix(deleted.Mint, "https://")
|
||||
desc = fmt.Sprintf("deleting a used token from %s with %d proofs totalling %d",
|
||||
mint, len(deleted.Proofs), deleted.Proofs.Amount())
|
||||
} else if isHistory {
|
||||
desc = "history entry"
|
||||
}
|
||||
|
||||
log("- saving kind:%d event (%s)... ", event.Kind, desc)
|
||||
first := true
|
||||
for res := range sys.Pool.PublishMany(ctx, relays, event) {
|
||||
cleanUrl, _ := strings.CutPrefix(res.RelayURL, "wss://")
|
||||
if !first {
|
||||
log(", ")
|
||||
}
|
||||
first = false
|
||||
|
||||
if res.Error != nil {
|
||||
log("%s: %s", colors.errorf(cleanUrl), res.Error)
|
||||
} else {
|
||||
log("%s: ok", colors.successf(cleanUrl))
|
||||
}
|
||||
}
|
||||
log("\n")
|
||||
}
|
||||
|
||||
<-w.Stable
|
||||
|
||||
return w, func() {
|
||||
w.Close()
|
||||
}, nil
|
||||
}
|
||||
|
||||
var wallet = &cli.Command{
|
||||
Name: "wallet",
|
||||
Usage: "displays the current wallet balance",
|
||||
Description: "all wallet data is stored on Nostr relays, signed and encrypted with the given key, and reloaded again from relays on every call.\n\nthe same data can be accessed by other compatible nip60 clients.",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.BoolFlag{
|
||||
Name: "stream",
|
||||
Usage: "keep listening for wallet-related events and logging them",
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log("balance: ")
|
||||
stdout(w.Balance())
|
||||
|
||||
if c.Bool("stream") {
|
||||
<-ctx.Done() // this will hang forever
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "mints",
|
||||
Usage: "lists, adds or remove default mints from the wallet",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, url := range w.Mints {
|
||||
stdout(strings.Split(url, "://")[1])
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "<mint>...",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.AddMint(ctx, c.Args().Slice()...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "<mint>...",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.RemoveMint(ctx, c.Args().Slice()...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "tokens",
|
||||
Usage: "lists existing tokens with their mints and aggregated amounts",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, token := range w.Tokens {
|
||||
stdout(token.ID(), token.Proofs.Amount(), strings.Split(token.Mint, "://")[1])
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "drop",
|
||||
Usage: "deletes a token from the wallet",
|
||||
DisableSliceFlagSeparator: true,
|
||||
ArgsUsage: "<id>...",
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
ids := c.Args().Slice()
|
||||
if len(ids) == 0 {
|
||||
return fmt.Errorf("no token ids specified")
|
||||
}
|
||||
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, token := range w.Tokens {
|
||||
if slices.Contains(ids, token.ID()) {
|
||||
w.DropToken(ctx, token.ID())
|
||||
log("dropped %s %d %s\n", token.ID(), token.Proofs.Amount(), strings.Split(token.Mint, "://")[1])
|
||||
}
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "receive",
|
||||
Usage: "takes a cashu token string as an argument and adds it to the wallet",
|
||||
ArgsUsage: "<token>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "mint",
|
||||
Usage: "mint to swap the token into",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
args := c.Args().Slice()
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("must be called as `nak wallet receive <token>")
|
||||
}
|
||||
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
proofs, mint, err := nip60.GetProofsAndMint(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.Receive(ctx, proofs, mint, nip60.ReceiveOptions{
|
||||
IntoMint: c.StringSlice("mint"),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "send",
|
||||
Usage: "prints a cashu token with the given amount for sending to someone else",
|
||||
ArgsUsage: "<amount>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "mint",
|
||||
Usage: "send from a specific mint",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
args := c.Args().Slice()
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("must be called as `nak wallet send <amount>")
|
||||
}
|
||||
amount, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("amount '%s' is invalid", args[0])
|
||||
}
|
||||
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var sourceMint string
|
||||
if mint := c.String("mint"); mint != "" {
|
||||
sourceMint = "http" + nostr.NormalizeURL(mint)[2:]
|
||||
}
|
||||
proofs, mint, err := w.SendInternal(ctx, amount, nip60.SendOptions{
|
||||
SpecificSourceMint: sourceMint,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stdout(nip60.MakeTokenString(proofs, mint))
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "pay",
|
||||
Usage: "pays a bolt11 lightning invoice and outputs the preimage",
|
||||
ArgsUsage: "<invoice>",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "mint",
|
||||
Usage: "pay from a specific mint",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
args := c.Args().Slice()
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("must be called as `nak wallet pay <invoice>")
|
||||
}
|
||||
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var sourceMint string
|
||||
if mint := c.String("mint"); mint != "" {
|
||||
sourceMint = "http" + nostr.NormalizeURL(mint)[2:]
|
||||
}
|
||||
|
||||
preimage, err := w.PayBolt11(ctx, args[0], nip60.PayOptions{
|
||||
FromMint: sourceMint,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stdout(preimage)
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "nutzap",
|
||||
Usage: "sends a nip61 nutzap to one or more Nostr profiles and/or events",
|
||||
ArgsUsage: "<amount> <target>",
|
||||
Description: "<amount> is in satoshis, <target> can be an npub, nprofile, nevent or hex pubkey.",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "mint",
|
||||
Usage: "send from a specific mint",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "message",
|
||||
Usage: "attach a message to the nutzap",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
args := c.Args().Slice()
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("must be called as `nak wallet nutzap <amount> <target>...")
|
||||
}
|
||||
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
amount, err := strconv.ParseInt(c.Args().First(), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid amount '%s': %w", c.Args().First(), err)
|
||||
}
|
||||
|
||||
target := c.String("target")
|
||||
var pm sdk.ProfileMetadata
|
||||
|
||||
var evt *nostr.Event
|
||||
var eventId nostr.ID
|
||||
|
||||
if strings.HasPrefix(target, "nevent1") {
|
||||
evt, _, err = sys.FetchSpecificEventFromInput(ctx, target, sdk.FetchSpecificEventParameters{
|
||||
WithRelays: false,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eventId = evt.ID
|
||||
pm = sys.FetchProfileMetadata(ctx, evt.PubKey)
|
||||
} else {
|
||||
pm, err = sys.FetchProfileFromInput(ctx, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log("sending %d sat to '%s' (%s)", amount, pm.ShortName(), pm.Npub())
|
||||
|
||||
var sourceMint string
|
||||
if mint := c.String("mint"); mint != "" {
|
||||
sourceMint = "http" + nostr.NormalizeURL(mint)[2:]
|
||||
}
|
||||
|
||||
kr, _, _ := gatherKeyerFromArguments(ctx, c)
|
||||
results, err := nip61.SendNutzap(
|
||||
ctx,
|
||||
kr,
|
||||
w,
|
||||
sys.Pool,
|
||||
uint64(amount),
|
||||
pm.PubKey,
|
||||
sys.FetchWriteRelays(ctx, pm.PubKey),
|
||||
nip61.NutzapOptions{
|
||||
Message: c.String("message"),
|
||||
SendToRelays: sys.FetchInboxRelays(ctx, pm.PubKey, 3),
|
||||
EventID: eventId,
|
||||
SpecificSourceMint: sourceMint,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log("- publishing nutzap... ")
|
||||
first := true
|
||||
for res := range results {
|
||||
cleanUrl, _ := strings.CutPrefix(res.RelayURL, "wss://")
|
||||
if !first {
|
||||
log(", ")
|
||||
}
|
||||
first = false
|
||||
if res.Error != nil {
|
||||
log("%s: %s", colors.errorf(cleanUrl), res.Error)
|
||||
} else {
|
||||
log("%s: ok", colors.successf(cleanUrl))
|
||||
}
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
Commands: []*cli.Command{
|
||||
{
|
||||
Name: "setup",
|
||||
Usage: "setup your wallet private key and kind:10019 event for receiving nutzaps",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringSliceFlag{
|
||||
Name: "mint",
|
||||
Usage: "mints to receive nutzaps in",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "private-key",
|
||||
Usage: "private key used for receiving nutzaps",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "force",
|
||||
Aliases: []string{"f"},
|
||||
Usage: "forces replacement of private-key",
|
||||
},
|
||||
},
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
w, closew, err := prepareWallet(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if w.PrivateKey == nil {
|
||||
if sk := c.String("private-key"); sk != "" {
|
||||
if err := w.SetPrivateKey(ctx, sk); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("missing --private-key")
|
||||
}
|
||||
} else if sk := c.String("private-key"); sk != "" && !c.Bool("force") {
|
||||
return fmt.Errorf("refusing to replace existing private key, use the --force flag")
|
||||
}
|
||||
|
||||
kr, _, _ := gatherKeyerFromArguments(ctx, c)
|
||||
pk, _ := kr.GetPublicKey(ctx)
|
||||
relays := sys.FetchWriteRelays(ctx, pk)
|
||||
|
||||
info := nip61.Info{}
|
||||
ie := sys.Pool.QuerySingle(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{10019},
|
||||
Authors: []nostr.PubKey{pk},
|
||||
Limit: 1,
|
||||
}, nostr.SubscriptionOptions{})
|
||||
if ie != nil {
|
||||
info.ParseEvent(ie.Event)
|
||||
}
|
||||
|
||||
if mints := c.StringSlice("mints"); len(mints) == 0 && len(info.Mints) == 0 {
|
||||
info.Mints = w.Mints
|
||||
}
|
||||
if len(info.Mints) == 0 {
|
||||
return fmt.Errorf("missing --mint")
|
||||
}
|
||||
|
||||
evt := nostr.Event{}
|
||||
if err := info.ToEvent(ctx, kr, &evt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stdout(evt)
|
||||
log("- saving kind:10019 event... ")
|
||||
first := true
|
||||
for res := range sys.Pool.PublishMany(ctx, relays, evt) {
|
||||
cleanUrl, _ := strings.CutPrefix(res.RelayURL, "wss://")
|
||||
|
||||
if !first {
|
||||
log(", ")
|
||||
}
|
||||
first = false
|
||||
|
||||
if res.Error != nil {
|
||||
log("%s: %s", colors.errorf(cleanUrl), res.Error)
|
||||
} else {
|
||||
log("%s: ok", colors.successf(cleanUrl))
|
||||
}
|
||||
}
|
||||
|
||||
closew()
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
7
zapstore.yaml
Normal file
7
zapstore.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
repository: https://github.com/fiatjaf/nak
|
||||
assets:
|
||||
- nak-v\d+\.\d+\.\d+-darwin-arm64
|
||||
- nak-v\d+\.\d+\.\d+-linux-amd64
|
||||
- nak-v\d+\.\d+\.\d+-linux-arm64
|
||||
remote_metadata:
|
||||
- github
|
||||
Reference in New Issue
Block a user