2020117-agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/exec-processor.d.ts +19 -0
- package/dist/adapters/exec-processor.js +75 -0
- package/dist/adapters/http-processor.d.ts +18 -0
- package/dist/adapters/http-processor.js +102 -0
- package/dist/adapters/none-processor.d.ts +13 -0
- package/dist/adapters/none-processor.js +18 -0
- package/dist/adapters/ollama-processor.d.ts +15 -0
- package/dist/adapters/ollama-processor.js +29 -0
- package/dist/adapters/ollama.d.ts +30 -0
- package/dist/adapters/ollama.js +86 -0
- package/dist/agent.d.ts +17 -0
- package/dist/agent.js +663 -0
- package/dist/api.d.ts +78 -0
- package/dist/api.js +267 -0
- package/dist/cashu.d.ts +41 -0
- package/dist/cashu.js +95 -0
- package/dist/customer.d.ts +18 -0
- package/dist/customer.js +200 -0
- package/dist/pipeline.d.ts +16 -0
- package/dist/pipeline.js +227 -0
- package/dist/processor.d.ts +25 -0
- package/dist/processor.js +35 -0
- package/dist/provider.d.ts +17 -0
- package/dist/provider.js +238 -0
- package/dist/swarm.d.ts +68 -0
- package/dist/swarm.js +122 -0
- package/package.json +41 -0
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Pipeline — chain multiple DVM providers in sequence
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* BUDGET_SATS=100 TARGET_LANG=Chinese npm run pipeline "Write a short poem about the moon"
|
|
7
|
+
*
|
|
8
|
+
* Flow:
|
|
9
|
+
* Phase 1: Connect to Provider A (text generation, kind 5100)
|
|
10
|
+
* → stream output with micro-payments → collect full text
|
|
11
|
+
* Phase 2: Connect to Provider B (translation, kind 5302)
|
|
12
|
+
* → feed Phase 1 output as input → stream translated output
|
|
13
|
+
*
|
|
14
|
+
* Each phase independently mints tokens, negotiates price, and pays per chunk.
|
|
15
|
+
*/
|
|
16
|
+
export {};
|
package/dist/pipeline.js
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Pipeline — chain multiple DVM providers in sequence
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* BUDGET_SATS=100 TARGET_LANG=Chinese npm run pipeline "Write a short poem about the moon"
|
|
7
|
+
*
|
|
8
|
+
* Flow:
|
|
9
|
+
* Phase 1: Connect to Provider A (text generation, kind 5100)
|
|
10
|
+
* → stream output with micro-payments → collect full text
|
|
11
|
+
* Phase 2: Connect to Provider B (translation, kind 5302)
|
|
12
|
+
* → feed Phase 1 output as input → stream translated output
|
|
13
|
+
*
|
|
14
|
+
* Each phase independently mints tokens, negotiates price, and pays per chunk.
|
|
15
|
+
*/
|
|
16
|
+
import { SwarmNode, topicFromKind } from './swarm.js';
|
|
17
|
+
import { mintTokens, splitTokens } from './cashu.js';
|
|
18
|
+
import { randomBytes } from 'crypto';
|
|
19
|
+
import { getOnlineProviders } from './api.js';
|
|
20
|
+
const BUDGET_SATS = Number(process.env.BUDGET_SATS) || 100;
|
|
21
|
+
const MAX_SATS_PER_CHUNK = Number(process.env.MAX_SATS_PER_CHUNK) || 5;
|
|
22
|
+
const GEN_KIND = Number(process.env.GEN_KIND) || 5100;
|
|
23
|
+
const TRANS_KIND = Number(process.env.TRANS_KIND) || 5302;
|
|
24
|
+
const TARGET_LANG = process.env.TARGET_LANG || 'Chinese';
|
|
25
|
+
async function showAvailableProviders(kind, label) {
|
|
26
|
+
try {
|
|
27
|
+
const agents = await getOnlineProviders(kind);
|
|
28
|
+
if (agents.length === 0) {
|
|
29
|
+
console.log(`[${label}] No providers online on platform for kind ${kind}`);
|
|
30
|
+
}
|
|
31
|
+
else {
|
|
32
|
+
console.log(`[${label}] ${agents.length} provider(s) online on platform for kind ${kind}:`);
|
|
33
|
+
for (const a of agents) {
|
|
34
|
+
const cap = a.capacity !== undefined ? `, capacity: ${a.capacity}` : '';
|
|
35
|
+
const price = a.pricing ? `, pricing: ${JSON.stringify(a.pricing)}` : '';
|
|
36
|
+
console.log(`[${label}] - ${a.username || a.user_id} (${a.status}${cap}${price})`);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
catch {
|
|
41
|
+
console.log(`[${label}] Could not query platform`);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Run a single pipeline step: mint → connect → offer → split → pay → collect output → destroy
|
|
46
|
+
*/
|
|
47
|
+
async function runStep(opts) {
|
|
48
|
+
const { kind, prompt, budgetSats, label, maxSatsPerChunk } = opts;
|
|
49
|
+
const jobId = randomBytes(8).toString('hex');
|
|
50
|
+
console.log(`[${label}] Job ${jobId}: "${prompt.slice(0, 60)}..."`);
|
|
51
|
+
console.log(`[${label}] Budget: ${budgetSats} sats, max price: ${maxSatsPerChunk} sat/chunk`);
|
|
52
|
+
// --- Mint tokens ---
|
|
53
|
+
console.log(`[${label}] Minting ${budgetSats} sats...`);
|
|
54
|
+
let bigToken;
|
|
55
|
+
try {
|
|
56
|
+
const minted = await mintTokens(budgetSats);
|
|
57
|
+
bigToken = minted.token;
|
|
58
|
+
console.log(`[${label}] Token ready: ${bigToken.slice(0, 40)}...`);
|
|
59
|
+
}
|
|
60
|
+
catch (e) {
|
|
61
|
+
throw new Error(`[${label}] Mint failed: ${e.message}`);
|
|
62
|
+
}
|
|
63
|
+
// --- Connect to provider ---
|
|
64
|
+
const node = new SwarmNode();
|
|
65
|
+
const topic = topicFromKind(kind);
|
|
66
|
+
console.log(`[${label}] Looking for providers (kind ${kind})...`);
|
|
67
|
+
await node.connect(topic);
|
|
68
|
+
let peer;
|
|
69
|
+
try {
|
|
70
|
+
peer = await node.waitForPeer(30000);
|
|
71
|
+
}
|
|
72
|
+
catch {
|
|
73
|
+
await node.destroy();
|
|
74
|
+
throw new Error(`[${label}] No provider found within 30s`);
|
|
75
|
+
}
|
|
76
|
+
console.log(`[${label}] Connected to provider: ${peer.peerId.slice(0, 12)}...`);
|
|
77
|
+
// --- Streaming payment loop ---
|
|
78
|
+
return new Promise((resolve, reject) => {
|
|
79
|
+
let microTokens = [];
|
|
80
|
+
let tokenIndex = 0;
|
|
81
|
+
let output = '';
|
|
82
|
+
const cleanup = async () => {
|
|
83
|
+
clearTimeout(timer);
|
|
84
|
+
await node.destroy();
|
|
85
|
+
};
|
|
86
|
+
node.on('message', async (msg) => {
|
|
87
|
+
switch (msg.type) {
|
|
88
|
+
case 'offer': {
|
|
89
|
+
const spc = msg.sats_per_chunk ?? 0;
|
|
90
|
+
const cpp = msg.chunks_per_payment ?? 0;
|
|
91
|
+
const satsPerPayment = spc * cpp;
|
|
92
|
+
console.log(`[${label}] Offer: ${spc} sat/chunk, ${cpp} chunks/payment (${satsPerPayment} sats/cycle)`);
|
|
93
|
+
if (spc > maxSatsPerChunk) {
|
|
94
|
+
node.send(peer.socket, { type: 'stop', id: jobId });
|
|
95
|
+
await cleanup();
|
|
96
|
+
reject(new Error(`[${label}] Price too high: ${spc} sat/chunk > max ${maxSatsPerChunk}`));
|
|
97
|
+
return;
|
|
98
|
+
}
|
|
99
|
+
if (satsPerPayment <= 0) {
|
|
100
|
+
await cleanup();
|
|
101
|
+
reject(new Error(`[${label}] Invalid offer: sats_per_payment = 0`));
|
|
102
|
+
return;
|
|
103
|
+
}
|
|
104
|
+
console.log(`[${label}] Splitting ${budgetSats} sats into ${satsPerPayment}-sat micro-tokens...`);
|
|
105
|
+
try {
|
|
106
|
+
microTokens = await splitTokens(bigToken, satsPerPayment);
|
|
107
|
+
console.log(`[${label}] Ready: ${microTokens.length} micro-tokens`);
|
|
108
|
+
}
|
|
109
|
+
catch (e) {
|
|
110
|
+
await cleanup();
|
|
111
|
+
reject(new Error(`[${label}] Split failed: ${e.message}`));
|
|
112
|
+
return;
|
|
113
|
+
}
|
|
114
|
+
if (microTokens.length === 0) {
|
|
115
|
+
await cleanup();
|
|
116
|
+
reject(new Error(`[${label}] Budget too small for payment cycle`));
|
|
117
|
+
return;
|
|
118
|
+
}
|
|
119
|
+
sendNextPayment();
|
|
120
|
+
break;
|
|
121
|
+
}
|
|
122
|
+
case 'payment_ack':
|
|
123
|
+
console.log(`[${label}] Payment confirmed: ${msg.amount} sats`);
|
|
124
|
+
break;
|
|
125
|
+
case 'accepted':
|
|
126
|
+
console.log(`[${label}] Job accepted, streaming...\n`);
|
|
127
|
+
break;
|
|
128
|
+
case 'chunk':
|
|
129
|
+
if (msg.data) {
|
|
130
|
+
process.stdout.write(msg.data);
|
|
131
|
+
output += msg.data;
|
|
132
|
+
}
|
|
133
|
+
break;
|
|
134
|
+
case 'pay_required':
|
|
135
|
+
console.log(`\n[${label}] Payment required (earned: ${msg.earned} sats, next: ${msg.next} sats)`);
|
|
136
|
+
if (tokenIndex < microTokens.length) {
|
|
137
|
+
sendNextPayment();
|
|
138
|
+
}
|
|
139
|
+
else {
|
|
140
|
+
console.log(`[${label}] Budget exhausted, sending stop`);
|
|
141
|
+
node.send(peer.socket, { type: 'stop', id: jobId });
|
|
142
|
+
}
|
|
143
|
+
break;
|
|
144
|
+
case 'result':
|
|
145
|
+
console.log(`\n[${label}] Done (${(msg.output || '').length} chars, ${msg.total_sats ?? '?'} sats)`);
|
|
146
|
+
await cleanup();
|
|
147
|
+
resolve(output || msg.output || '');
|
|
148
|
+
break;
|
|
149
|
+
case 'error':
|
|
150
|
+
await cleanup();
|
|
151
|
+
reject(new Error(`[${label}] Provider error: ${msg.message}`));
|
|
152
|
+
break;
|
|
153
|
+
}
|
|
154
|
+
});
|
|
155
|
+
function sendNextPayment() {
|
|
156
|
+
if (tokenIndex >= microTokens.length)
|
|
157
|
+
return;
|
|
158
|
+
const token = microTokens[tokenIndex++];
|
|
159
|
+
console.log(`[${label}] Sending payment ${tokenIndex}/${microTokens.length}`);
|
|
160
|
+
node.send(peer.socket, { type: 'payment', id: jobId, token });
|
|
161
|
+
}
|
|
162
|
+
// Send request
|
|
163
|
+
node.send(peer.socket, {
|
|
164
|
+
type: 'request',
|
|
165
|
+
id: jobId,
|
|
166
|
+
kind,
|
|
167
|
+
input: prompt,
|
|
168
|
+
budget: budgetSats,
|
|
169
|
+
});
|
|
170
|
+
// Timeout safety
|
|
171
|
+
const timer = setTimeout(async () => {
|
|
172
|
+
await cleanup();
|
|
173
|
+
reject(new Error(`[${label}] Timeout after 120s`));
|
|
174
|
+
}, 120000);
|
|
175
|
+
});
|
|
176
|
+
}
|
|
177
|
+
async function main() {
|
|
178
|
+
const prompt = process.argv.slice(2).join(' ');
|
|
179
|
+
if (!prompt) {
|
|
180
|
+
console.error('Usage: BUDGET_SATS=100 TARGET_LANG=Chinese npm run pipeline "your prompt here"');
|
|
181
|
+
process.exit(1);
|
|
182
|
+
}
|
|
183
|
+
console.log('='.repeat(60));
|
|
184
|
+
console.log(`Pipeline: generate (kind ${GEN_KIND}) → translate to ${TARGET_LANG} (kind ${TRANS_KIND})`);
|
|
185
|
+
console.log(`Total budget: ${BUDGET_SATS} sats`);
|
|
186
|
+
console.log('='.repeat(60));
|
|
187
|
+
const genBudget = Math.ceil(BUDGET_SATS * 0.6);
|
|
188
|
+
const transBudget = BUDGET_SATS - genBudget;
|
|
189
|
+
// Phase 1: Text Generation
|
|
190
|
+
console.log(`\n${'─'.repeat(60)}`);
|
|
191
|
+
console.log(`Phase 1: Text Generation (budget: ${genBudget} sats)`);
|
|
192
|
+
console.log('─'.repeat(60));
|
|
193
|
+
await showAvailableProviders(GEN_KIND, 'gen');
|
|
194
|
+
const generated = await runStep({
|
|
195
|
+
kind: GEN_KIND,
|
|
196
|
+
prompt,
|
|
197
|
+
budgetSats: genBudget,
|
|
198
|
+
label: 'gen',
|
|
199
|
+
maxSatsPerChunk: MAX_SATS_PER_CHUNK,
|
|
200
|
+
});
|
|
201
|
+
if (!generated.trim()) {
|
|
202
|
+
console.error('\n[pipeline] Phase 1 produced no output, aborting');
|
|
203
|
+
process.exit(1);
|
|
204
|
+
}
|
|
205
|
+
// Phase 2: Translation
|
|
206
|
+
console.log(`\n${'─'.repeat(60)}`);
|
|
207
|
+
console.log(`Phase 2: Translation to ${TARGET_LANG} (budget: ${transBudget} sats)`);
|
|
208
|
+
console.log('─'.repeat(60));
|
|
209
|
+
await showAvailableProviders(TRANS_KIND, 'trans');
|
|
210
|
+
const translated = await runStep({
|
|
211
|
+
kind: TRANS_KIND,
|
|
212
|
+
prompt: `Translate the following text to ${TARGET_LANG}:\n\n${generated}`,
|
|
213
|
+
budgetSats: transBudget,
|
|
214
|
+
label: 'trans',
|
|
215
|
+
maxSatsPerChunk: MAX_SATS_PER_CHUNK,
|
|
216
|
+
});
|
|
217
|
+
// Summary
|
|
218
|
+
console.log(`\n${'='.repeat(60)}`);
|
|
219
|
+
console.log('Pipeline complete!');
|
|
220
|
+
console.log('='.repeat(60));
|
|
221
|
+
console.log(`\nGenerated (${generated.length} chars):\n${generated}`);
|
|
222
|
+
console.log(`\nTranslated (${translated.length} chars):\n${translated}`);
|
|
223
|
+
}
|
|
224
|
+
main().catch(err => {
|
|
225
|
+
console.error('[pipeline] Fatal:', err.message || err);
|
|
226
|
+
process.exit(1);
|
|
227
|
+
});
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Processor interface — abstraction over the compute backend.
|
|
3
|
+
*
|
|
4
|
+
* agent.ts talks only to Processor; the actual backend is selected at
|
|
5
|
+
* startup via the PROCESSOR env var:
|
|
6
|
+
*
|
|
7
|
+
* PROCESSOR=ollama (default) — local Ollama
|
|
8
|
+
* PROCESSOR=none — pass-through, no model needed
|
|
9
|
+
* PROCESSOR=exec:./cmd — stdin/stdout child process
|
|
10
|
+
* PROCESSOR=http://url — remote HTTP endpoint
|
|
11
|
+
*/
|
|
12
|
+
export interface Processor {
|
|
13
|
+
/** Human-readable name for logs (e.g. "ollama:llama3.2", "none") */
|
|
14
|
+
readonly name: string;
|
|
15
|
+
/** Startup check — may throw to abort launch */
|
|
16
|
+
verify(): Promise<void>;
|
|
17
|
+
/** Non-streaming generation */
|
|
18
|
+
generate(prompt: string): Promise<string>;
|
|
19
|
+
/** Streaming generation — yields chunks as they arrive */
|
|
20
|
+
generateStream(prompt: string): AsyncGenerator<string>;
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* Factory — reads PROCESSOR env var and returns the appropriate backend.
|
|
24
|
+
*/
|
|
25
|
+
export declare function createProcessor(): Promise<Processor>;
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Processor interface — abstraction over the compute backend.
|
|
3
|
+
*
|
|
4
|
+
* agent.ts talks only to Processor; the actual backend is selected at
|
|
5
|
+
* startup via the PROCESSOR env var:
|
|
6
|
+
*
|
|
7
|
+
* PROCESSOR=ollama (default) — local Ollama
|
|
8
|
+
* PROCESSOR=none — pass-through, no model needed
|
|
9
|
+
* PROCESSOR=exec:./cmd — stdin/stdout child process
|
|
10
|
+
* PROCESSOR=http://url — remote HTTP endpoint
|
|
11
|
+
*/
|
|
12
|
+
/**
|
|
13
|
+
* Factory — reads PROCESSOR env var and returns the appropriate backend.
|
|
14
|
+
*/
|
|
15
|
+
export async function createProcessor() {
|
|
16
|
+
const spec = process.env.PROCESSOR || 'ollama';
|
|
17
|
+
if (spec === 'none') {
|
|
18
|
+
const { NoneProcessor } = await import('./adapters/none-processor.js');
|
|
19
|
+
return new NoneProcessor();
|
|
20
|
+
}
|
|
21
|
+
if (spec === 'ollama') {
|
|
22
|
+
const { OllamaProcessor } = await import('./adapters/ollama-processor.js');
|
|
23
|
+
return new OllamaProcessor();
|
|
24
|
+
}
|
|
25
|
+
if (spec.startsWith('exec:')) {
|
|
26
|
+
const cmd = spec.slice('exec:'.length);
|
|
27
|
+
const { ExecProcessor } = await import('./adapters/exec-processor.js');
|
|
28
|
+
return new ExecProcessor(cmd);
|
|
29
|
+
}
|
|
30
|
+
if (spec.startsWith('http://') || spec.startsWith('https://')) {
|
|
31
|
+
const { HttpProcessor } = await import('./adapters/http-processor.js');
|
|
32
|
+
return new HttpProcessor(spec);
|
|
33
|
+
}
|
|
34
|
+
throw new Error(`Unknown PROCESSOR value: "${spec}". Use: none | ollama | exec:<cmd> | http(s)://<url>`);
|
|
35
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Provider daemon — streaming payment mode
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* SATS_PER_CHUNK=1 CHUNKS_PER_PAYMENT=10 npx tsx src/provider.ts
|
|
7
|
+
*
|
|
8
|
+
* Flow:
|
|
9
|
+
* 1. Join Hyperswarm topic for kind 5100 (text generation)
|
|
10
|
+
* 2. Wait for customer connections
|
|
11
|
+
* 3. Receive request → send offer (price quote)
|
|
12
|
+
* 4. Receive first payment → peek to verify, start generating
|
|
13
|
+
* 5. Stream N chunks per payment cycle
|
|
14
|
+
* 6. Pause and send pay_required → wait for next payment
|
|
15
|
+
* 7. On completion or stop → send result, batch claim all tokens
|
|
16
|
+
*/
|
|
17
|
+
export {};
|
package/dist/provider.js
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Provider daemon — streaming payment mode
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* SATS_PER_CHUNK=1 CHUNKS_PER_PAYMENT=10 npx tsx src/provider.ts
|
|
7
|
+
*
|
|
8
|
+
* Flow:
|
|
9
|
+
* 1. Join Hyperswarm topic for kind 5100 (text generation)
|
|
10
|
+
* 2. Wait for customer connections
|
|
11
|
+
* 3. Receive request → send offer (price quote)
|
|
12
|
+
* 4. Receive first payment → peek to verify, start generating
|
|
13
|
+
* 5. Stream N chunks per payment cycle
|
|
14
|
+
* 6. Pause and send pay_required → wait for next payment
|
|
15
|
+
* 7. On completion or stop → send result, batch claim all tokens
|
|
16
|
+
*/
|
|
17
|
+
import { SwarmNode, topicFromKind } from './swarm.js';
|
|
18
|
+
import { receiveToken, peekToken } from './cashu.js';
|
|
19
|
+
import { generateStream, listModels } from './adapters/ollama.js';
|
|
20
|
+
import { hasApiKey, registerService, startHeartbeatLoop } from './api.js';
|
|
21
|
+
const KIND = Number(process.env.DVM_KIND) || 5100;
|
|
22
|
+
const SATS_PER_CHUNK = Number(process.env.SATS_PER_CHUNK) || 1;
|
|
23
|
+
const CHUNKS_PER_PAYMENT = Number(process.env.CHUNKS_PER_PAYMENT) || 10;
|
|
24
|
+
const PAYMENT_TIMEOUT = Number(process.env.PAYMENT_TIMEOUT) || 30000;
|
|
25
|
+
const MODEL = process.env.OLLAMA_MODEL || 'llama3.2';
|
|
26
|
+
const jobs = new Map();
|
|
27
|
+
async function main() {
|
|
28
|
+
// Verify Ollama is running
|
|
29
|
+
console.log(`[provider] Checking Ollama (model: ${MODEL})...`);
|
|
30
|
+
try {
|
|
31
|
+
const models = await listModels();
|
|
32
|
+
if (!models.some(m => m.startsWith(MODEL))) {
|
|
33
|
+
console.warn(`[provider] Model "${MODEL}" not found. Available: ${models.join(', ')}`);
|
|
34
|
+
console.warn(`[provider] Run: ollama pull ${MODEL}`);
|
|
35
|
+
process.exit(1);
|
|
36
|
+
}
|
|
37
|
+
console.log(`[provider] Ollama OK — model "${MODEL}" available`);
|
|
38
|
+
}
|
|
39
|
+
catch (e) {
|
|
40
|
+
console.error(`[provider] Ollama not reachable: ${e.message}`);
|
|
41
|
+
console.error(`[provider] Make sure Ollama is running: ollama serve`);
|
|
42
|
+
process.exit(1);
|
|
43
|
+
}
|
|
44
|
+
// --- Platform registration ---
|
|
45
|
+
let stopHeartbeat = null;
|
|
46
|
+
if (hasApiKey()) {
|
|
47
|
+
console.log('[provider] Registering on platform...');
|
|
48
|
+
await registerService({ kind: KIND, satsPerChunk: SATS_PER_CHUNK, chunksPerPayment: CHUNKS_PER_PAYMENT, model: MODEL });
|
|
49
|
+
stopHeartbeat = startHeartbeatLoop();
|
|
50
|
+
}
|
|
51
|
+
else {
|
|
52
|
+
console.log('[provider] No API key — P2P-only mode');
|
|
53
|
+
}
|
|
54
|
+
const satsPerPayment = SATS_PER_CHUNK * CHUNKS_PER_PAYMENT;
|
|
55
|
+
const node = new SwarmNode();
|
|
56
|
+
const topic = topicFromKind(KIND);
|
|
57
|
+
console.log(`[provider] Streaming payment: ${SATS_PER_CHUNK} sat/chunk, ${CHUNKS_PER_PAYMENT} chunks/payment (${satsPerPayment} sats/cycle)`);
|
|
58
|
+
console.log(`[provider] Joining topic for kind ${KIND}`);
|
|
59
|
+
await node.listen(topic);
|
|
60
|
+
console.log(`[provider] Listening for customers...\n`);
|
|
61
|
+
node.on('message', async (msg, socket, peerId) => {
|
|
62
|
+
const tag = peerId.slice(0, 8);
|
|
63
|
+
if (msg.type === 'request') {
|
|
64
|
+
console.log(`[provider] Job ${msg.id} from ${tag}: "${(msg.input || '').slice(0, 60)}..."`);
|
|
65
|
+
if (msg.budget !== undefined) {
|
|
66
|
+
console.log(`[provider] Customer budget: ${msg.budget} sats`);
|
|
67
|
+
}
|
|
68
|
+
// Initialize job state
|
|
69
|
+
const job = {
|
|
70
|
+
socket,
|
|
71
|
+
credit: 0,
|
|
72
|
+
tokens: [],
|
|
73
|
+
totalEarned: 0,
|
|
74
|
+
stopped: false,
|
|
75
|
+
paymentResolve: null,
|
|
76
|
+
};
|
|
77
|
+
jobs.set(msg.id, job);
|
|
78
|
+
// Send offer
|
|
79
|
+
node.send(socket, {
|
|
80
|
+
type: 'offer',
|
|
81
|
+
id: msg.id,
|
|
82
|
+
sats_per_chunk: SATS_PER_CHUNK,
|
|
83
|
+
chunks_per_payment: CHUNKS_PER_PAYMENT,
|
|
84
|
+
});
|
|
85
|
+
console.log(`[provider] Sent offer: ${SATS_PER_CHUNK} sat/chunk, ${CHUNKS_PER_PAYMENT} chunks/payment`);
|
|
86
|
+
// Wait for first payment before starting
|
|
87
|
+
const paid = await waitForPayment(job, msg.id, node);
|
|
88
|
+
if (!paid) {
|
|
89
|
+
console.log(`[provider] Job ${msg.id}: no initial payment, aborting`);
|
|
90
|
+
jobs.delete(msg.id);
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
93
|
+
// Start generating
|
|
94
|
+
node.send(socket, { type: 'accepted', id: msg.id });
|
|
95
|
+
await runGeneration(node, job, msg);
|
|
96
|
+
}
|
|
97
|
+
if (msg.type === 'payment') {
|
|
98
|
+
const job = jobs.get(msg.id);
|
|
99
|
+
if (!job)
|
|
100
|
+
return;
|
|
101
|
+
if (!msg.token) {
|
|
102
|
+
node.send(socket, { type: 'error', id: msg.id, message: 'Payment missing token' });
|
|
103
|
+
return;
|
|
104
|
+
}
|
|
105
|
+
try {
|
|
106
|
+
const peek = peekToken(msg.token);
|
|
107
|
+
const chunksUnlocked = Math.floor(peek.amount / SATS_PER_CHUNK);
|
|
108
|
+
job.credit += chunksUnlocked;
|
|
109
|
+
job.totalEarned += peek.amount;
|
|
110
|
+
job.tokens.push(msg.token);
|
|
111
|
+
console.log(`[provider] Payment for ${msg.id}: ${peek.amount} sats → +${chunksUnlocked} chunks (credit: ${job.credit}, total: ${job.totalEarned} sats)`);
|
|
112
|
+
node.send(socket, { type: 'payment_ack', id: msg.id, amount: peek.amount });
|
|
113
|
+
// Wake up generation loop if it's waiting
|
|
114
|
+
if (job.paymentResolve) {
|
|
115
|
+
job.paymentResolve();
|
|
116
|
+
job.paymentResolve = null;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
catch (e) {
|
|
120
|
+
node.send(socket, { type: 'error', id: msg.id, message: `Payment failed: ${e.message}` });
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
if (msg.type === 'stop') {
|
|
124
|
+
const job = jobs.get(msg.id);
|
|
125
|
+
if (!job)
|
|
126
|
+
return;
|
|
127
|
+
console.log(`[provider] Job ${msg.id}: customer requested stop`);
|
|
128
|
+
job.stopped = true;
|
|
129
|
+
// Wake up generation loop if waiting for payment
|
|
130
|
+
if (job.paymentResolve) {
|
|
131
|
+
job.paymentResolve();
|
|
132
|
+
job.paymentResolve = null;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
});
|
|
136
|
+
// Graceful shutdown
|
|
137
|
+
process.on('SIGINT', async () => {
|
|
138
|
+
console.log('\n[provider] Shutting down...');
|
|
139
|
+
if (stopHeartbeat)
|
|
140
|
+
stopHeartbeat();
|
|
141
|
+
await node.destroy();
|
|
142
|
+
process.exit(0);
|
|
143
|
+
});
|
|
144
|
+
}
|
|
145
|
+
function waitForPayment(job, jobId, node) {
|
|
146
|
+
return new Promise((resolve) => {
|
|
147
|
+
const timer = setTimeout(() => {
|
|
148
|
+
job.paymentResolve = null;
|
|
149
|
+
console.log(`[provider] Job ${jobId}: payment timeout (${PAYMENT_TIMEOUT}ms)`);
|
|
150
|
+
node.send(job.socket, {
|
|
151
|
+
type: 'error',
|
|
152
|
+
id: jobId,
|
|
153
|
+
message: `Payment timeout after ${PAYMENT_TIMEOUT}ms`,
|
|
154
|
+
});
|
|
155
|
+
resolve(false);
|
|
156
|
+
}, PAYMENT_TIMEOUT);
|
|
157
|
+
job.paymentResolve = () => {
|
|
158
|
+
clearTimeout(timer);
|
|
159
|
+
resolve(true);
|
|
160
|
+
};
|
|
161
|
+
// If already has credit (shouldn't happen for initial, but just in case)
|
|
162
|
+
if (job.credit > 0) {
|
|
163
|
+
clearTimeout(timer);
|
|
164
|
+
job.paymentResolve = null;
|
|
165
|
+
resolve(true);
|
|
166
|
+
}
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
async function runGeneration(node, job, msg) {
|
|
170
|
+
const jobId = msg.id;
|
|
171
|
+
let fullOutput = '';
|
|
172
|
+
try {
|
|
173
|
+
for await (const chunk of generateStream({ model: MODEL, prompt: msg.input || '' })) {
|
|
174
|
+
// Check for stop
|
|
175
|
+
if (job.stopped) {
|
|
176
|
+
console.log(`[provider] Job ${jobId}: stopped by customer`);
|
|
177
|
+
break;
|
|
178
|
+
}
|
|
179
|
+
// Check credit
|
|
180
|
+
if (job.credit <= 0) {
|
|
181
|
+
// Ask for more payment
|
|
182
|
+
const nextAmount = SATS_PER_CHUNK * CHUNKS_PER_PAYMENT;
|
|
183
|
+
node.send(job.socket, {
|
|
184
|
+
type: 'pay_required',
|
|
185
|
+
id: jobId,
|
|
186
|
+
earned: job.totalEarned,
|
|
187
|
+
next: nextAmount,
|
|
188
|
+
});
|
|
189
|
+
console.log(`[provider] Job ${jobId}: pay_required (earned: ${job.totalEarned}, next: ${nextAmount})`);
|
|
190
|
+
// Wait for payment or timeout
|
|
191
|
+
const paid = await waitForPayment(job, jobId, node);
|
|
192
|
+
if (!paid || job.stopped) {
|
|
193
|
+
console.log(`[provider] Job ${jobId}: ending (paid=${paid}, stopped=${job.stopped})`);
|
|
194
|
+
break;
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
// Send chunk
|
|
198
|
+
fullOutput += chunk;
|
|
199
|
+
node.send(job.socket, { type: 'chunk', id: jobId, data: chunk });
|
|
200
|
+
job.credit--;
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
catch (e) {
|
|
204
|
+
console.error(`[provider] Job ${jobId} generation error: ${e.message}`);
|
|
205
|
+
node.send(job.socket, { type: 'error', id: jobId, message: e.message });
|
|
206
|
+
}
|
|
207
|
+
// Send result
|
|
208
|
+
node.send(job.socket, {
|
|
209
|
+
type: 'result',
|
|
210
|
+
id: jobId,
|
|
211
|
+
output: fullOutput,
|
|
212
|
+
total_sats: job.totalEarned,
|
|
213
|
+
});
|
|
214
|
+
console.log(`[provider] Job ${jobId} completed (${fullOutput.length} chars, ${job.totalEarned} sats earned)`);
|
|
215
|
+
// Batch claim all accumulated tokens
|
|
216
|
+
await batchClaim(job.tokens, jobId);
|
|
217
|
+
jobs.delete(jobId);
|
|
218
|
+
}
|
|
219
|
+
async function batchClaim(tokens, jobId) {
|
|
220
|
+
if (tokens.length === 0)
|
|
221
|
+
return;
|
|
222
|
+
console.log(`[provider] Job ${jobId}: claiming ${tokens.length} tokens...`);
|
|
223
|
+
let totalClaimed = 0;
|
|
224
|
+
for (let i = 0; i < tokens.length; i++) {
|
|
225
|
+
try {
|
|
226
|
+
const received = await receiveToken(tokens[i]);
|
|
227
|
+
totalClaimed += received.amount;
|
|
228
|
+
}
|
|
229
|
+
catch (e) {
|
|
230
|
+
console.warn(`[provider] Job ${jobId}: claim ${i + 1}/${tokens.length} failed: ${e.message}`);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
console.log(`[provider] Job ${jobId}: claimed ${totalClaimed} sats total`);
|
|
234
|
+
}
|
|
235
|
+
main().catch(err => {
|
|
236
|
+
console.error('[provider] Fatal:', err);
|
|
237
|
+
process.exit(1);
|
|
238
|
+
});
|
package/dist/swarm.d.ts
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Hyperswarm P2P helper — discover peers and establish encrypted connections
|
|
3
|
+
*
|
|
4
|
+
* Provider: joins a topic (hash of service kind) and listens for connections
|
|
5
|
+
* Customer: joins the same topic to find the provider
|
|
6
|
+
*
|
|
7
|
+
* Wire protocol (newline-delimited JSON) — streaming payment:
|
|
8
|
+
* → { type: "request", id, kind, input, budget } customer sends job with budget
|
|
9
|
+
* ← { type: "offer", id, sats_per_chunk, chunks_per_payment } provider quotes price
|
|
10
|
+
* → { type: "payment", id, token } customer sends micro-token
|
|
11
|
+
* ← { type: "payment_ack", id, amount } provider confirms + accepts
|
|
12
|
+
* ← { type: "accepted", id } provider starts generating
|
|
13
|
+
* ← { type: "chunk", id, data } streaming output (N chunks)
|
|
14
|
+
* ← { type: "pay_required", id, earned, next } provider pauses for payment
|
|
15
|
+
* → { type: "payment", id, token } customer sends next micro-token
|
|
16
|
+
* ← { type: "payment_ack", id, amount } provider confirms
|
|
17
|
+
* ← { type: "chunk", id, data } more chunks...
|
|
18
|
+
* ← { type: "result", id, output, total_sats } final result
|
|
19
|
+
* → { type: "stop", id } customer stops early
|
|
20
|
+
* ← { type: "error", id, message } error
|
|
21
|
+
*/
|
|
22
|
+
import Hyperswarm from 'hyperswarm';
|
|
23
|
+
import { EventEmitter } from 'events';
|
|
24
|
+
export interface SwarmMessage {
|
|
25
|
+
type: 'request' | 'accepted' | 'chunk' | 'result' | 'error' | 'payment' | 'payment_ack' | 'offer' | 'pay_required' | 'stop';
|
|
26
|
+
id: string;
|
|
27
|
+
kind?: number;
|
|
28
|
+
input?: string;
|
|
29
|
+
output?: string;
|
|
30
|
+
data?: string;
|
|
31
|
+
token?: string;
|
|
32
|
+
amount?: number;
|
|
33
|
+
message?: string;
|
|
34
|
+
sats_per_chunk?: number;
|
|
35
|
+
chunks_per_payment?: number;
|
|
36
|
+
budget?: number;
|
|
37
|
+
earned?: number;
|
|
38
|
+
next?: number;
|
|
39
|
+
total_sats?: number;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Create a deterministic topic hash from a service kind number.
|
|
43
|
+
* All providers of kind 5100 join the same topic so customers can find them.
|
|
44
|
+
*/
|
|
45
|
+
export declare function topicFromKind(kind: number): Buffer;
|
|
46
|
+
/**
|
|
47
|
+
* Thin wrapper around Hyperswarm that handles JSON message framing
|
|
48
|
+
*/
|
|
49
|
+
export declare class SwarmNode extends EventEmitter {
|
|
50
|
+
swarm: Hyperswarm;
|
|
51
|
+
connections: Map<string, any>;
|
|
52
|
+
private buffers;
|
|
53
|
+
constructor();
|
|
54
|
+
/** Send a JSON message to a specific peer */
|
|
55
|
+
send(socket: any, msg: SwarmMessage): void;
|
|
56
|
+
/** Broadcast a JSON message to all connected peers */
|
|
57
|
+
broadcast(msg: SwarmMessage): void;
|
|
58
|
+
/** Join a topic as server (provider) */
|
|
59
|
+
listen(topic: Buffer): Promise<void>;
|
|
60
|
+
/** Join a topic as client (customer) */
|
|
61
|
+
connect(topic: Buffer): Promise<void>;
|
|
62
|
+
/** Wait for the first peer connection (with timeout) */
|
|
63
|
+
waitForPeer(timeoutMs?: number): Promise<{
|
|
64
|
+
socket: any;
|
|
65
|
+
peerId: string;
|
|
66
|
+
}>;
|
|
67
|
+
destroy(): Promise<void>;
|
|
68
|
+
}
|