2020117-agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/exec-processor.d.ts +19 -0
- package/dist/adapters/exec-processor.js +75 -0
- package/dist/adapters/http-processor.d.ts +18 -0
- package/dist/adapters/http-processor.js +102 -0
- package/dist/adapters/none-processor.d.ts +13 -0
- package/dist/adapters/none-processor.js +18 -0
- package/dist/adapters/ollama-processor.d.ts +15 -0
- package/dist/adapters/ollama-processor.js +29 -0
- package/dist/adapters/ollama.d.ts +30 -0
- package/dist/adapters/ollama.js +86 -0
- package/dist/agent.d.ts +17 -0
- package/dist/agent.js +663 -0
- package/dist/api.d.ts +78 -0
- package/dist/api.js +267 -0
- package/dist/cashu.d.ts +41 -0
- package/dist/cashu.js +95 -0
- package/dist/customer.d.ts +18 -0
- package/dist/customer.js +200 -0
- package/dist/pipeline.d.ts +16 -0
- package/dist/pipeline.js +227 -0
- package/dist/processor.d.ts +25 -0
- package/dist/processor.js +35 -0
- package/dist/provider.d.ts +17 -0
- package/dist/provider.js +238 -0
- package/dist/swarm.d.ts +68 -0
- package/dist/swarm.js +122 -0
- package/package.json +41 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Exec processor — delegates to an external command via stdin/stdout.
|
|
3
|
+
*
|
|
4
|
+
* PROCESSOR=exec:./my-model.sh
|
|
5
|
+
*
|
|
6
|
+
* - verify(): checks the command file exists and is executable
|
|
7
|
+
* - generate(): spawns process, writes prompt to stdin, reads full stdout
|
|
8
|
+
* - generateStream(): same but yields stdout line-by-line
|
|
9
|
+
*/
|
|
10
|
+
import type { Processor } from '../processor.js';
|
|
11
|
+
export declare class ExecProcessor implements Processor {
|
|
12
|
+
private cmd;
|
|
13
|
+
private args;
|
|
14
|
+
constructor(cmdSpec: string);
|
|
15
|
+
get name(): string;
|
|
16
|
+
verify(): Promise<void>;
|
|
17
|
+
generate(prompt: string): Promise<string>;
|
|
18
|
+
generateStream(prompt: string): AsyncGenerator<string>;
|
|
19
|
+
}
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Exec processor — delegates to an external command via stdin/stdout.
|
|
3
|
+
*
|
|
4
|
+
* PROCESSOR=exec:./my-model.sh
|
|
5
|
+
*
|
|
6
|
+
* - verify(): checks the command file exists and is executable
|
|
7
|
+
* - generate(): spawns process, writes prompt to stdin, reads full stdout
|
|
8
|
+
* - generateStream(): same but yields stdout line-by-line
|
|
9
|
+
*/
|
|
10
|
+
import { spawn } from 'child_process';
|
|
11
|
+
import { access, constants } from 'fs/promises';
|
|
12
|
+
export class ExecProcessor {
|
|
13
|
+
cmd;
|
|
14
|
+
args;
|
|
15
|
+
constructor(cmdSpec) {
|
|
16
|
+
const parts = cmdSpec.split(/\s+/);
|
|
17
|
+
this.cmd = parts[0];
|
|
18
|
+
this.args = parts.slice(1);
|
|
19
|
+
}
|
|
20
|
+
get name() {
|
|
21
|
+
return `exec:${this.cmd}`;
|
|
22
|
+
}
|
|
23
|
+
async verify() {
|
|
24
|
+
try {
|
|
25
|
+
await access(this.cmd, constants.X_OK);
|
|
26
|
+
}
|
|
27
|
+
catch {
|
|
28
|
+
throw new Error(`Exec processor: "${this.cmd}" is not executable or does not exist`);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
generate(prompt) {
|
|
32
|
+
return new Promise((resolve, reject) => {
|
|
33
|
+
const child = spawn(this.cmd, this.args, { stdio: ['pipe', 'pipe', 'pipe'] });
|
|
34
|
+
const chunks = [];
|
|
35
|
+
let stderr = '';
|
|
36
|
+
child.stdout.on('data', (data) => chunks.push(data));
|
|
37
|
+
child.stderr.on('data', (data) => { stderr += data.toString(); });
|
|
38
|
+
child.on('error', (err) => reject(new Error(`Exec spawn error: ${err.message}`)));
|
|
39
|
+
child.on('close', (code) => {
|
|
40
|
+
if (code !== 0) {
|
|
41
|
+
reject(new Error(`Exec process exited with code ${code}: ${stderr}`));
|
|
42
|
+
}
|
|
43
|
+
else {
|
|
44
|
+
resolve(Buffer.concat(chunks).toString('utf-8'));
|
|
45
|
+
}
|
|
46
|
+
});
|
|
47
|
+
child.stdin.write(prompt);
|
|
48
|
+
child.stdin.end();
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
async *generateStream(prompt) {
|
|
52
|
+
const child = spawn(this.cmd, this.args, { stdio: ['pipe', 'pipe', 'pipe'] });
|
|
53
|
+
child.stdin.write(prompt);
|
|
54
|
+
child.stdin.end();
|
|
55
|
+
// Yield stdout line-by-line
|
|
56
|
+
let buffer = '';
|
|
57
|
+
try {
|
|
58
|
+
for await (const data of child.stdout) {
|
|
59
|
+
buffer += data.toString();
|
|
60
|
+
const lines = buffer.split('\n');
|
|
61
|
+
buffer = lines.pop();
|
|
62
|
+
for (const line of lines) {
|
|
63
|
+
yield line + '\n';
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
// Flush remaining
|
|
67
|
+
if (buffer.length > 0) {
|
|
68
|
+
yield buffer;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
finally {
|
|
72
|
+
child.kill();
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HTTP processor — delegates to a remote HTTP endpoint.
|
|
3
|
+
*
|
|
4
|
+
* PROCESSOR=http://localhost:8080/generate
|
|
5
|
+
*
|
|
6
|
+
* - verify(): HEAD request to check endpoint is reachable
|
|
7
|
+
* - generate(): POST JSON { prompt }, reads result/data/output field
|
|
8
|
+
* - generateStream(): POST with Accept: application/x-ndjson, yields lines
|
|
9
|
+
*/
|
|
10
|
+
import type { Processor } from '../processor.js';
|
|
11
|
+
export declare class HttpProcessor implements Processor {
|
|
12
|
+
private url;
|
|
13
|
+
constructor(url: string);
|
|
14
|
+
get name(): string;
|
|
15
|
+
verify(): Promise<void>;
|
|
16
|
+
generate(prompt: string): Promise<string>;
|
|
17
|
+
generateStream(prompt: string): AsyncGenerator<string>;
|
|
18
|
+
}
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HTTP processor — delegates to a remote HTTP endpoint.
|
|
3
|
+
*
|
|
4
|
+
* PROCESSOR=http://localhost:8080/generate
|
|
5
|
+
*
|
|
6
|
+
* - verify(): HEAD request to check endpoint is reachable
|
|
7
|
+
* - generate(): POST JSON { prompt }, reads result/data/output field
|
|
8
|
+
* - generateStream(): POST with Accept: application/x-ndjson, yields lines
|
|
9
|
+
*/
|
|
10
|
+
export class HttpProcessor {
|
|
11
|
+
url;
|
|
12
|
+
constructor(url) {
|
|
13
|
+
this.url = url;
|
|
14
|
+
}
|
|
15
|
+
get name() {
|
|
16
|
+
return `http:${this.url}`;
|
|
17
|
+
}
|
|
18
|
+
async verify() {
|
|
19
|
+
try {
|
|
20
|
+
const res = await fetch(this.url, { method: 'HEAD' });
|
|
21
|
+
// Accept any response — we just need to know it's reachable
|
|
22
|
+
if (!res.ok && res.status !== 405) {
|
|
23
|
+
throw new Error(`HTTP ${res.status}`);
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
catch (e) {
|
|
27
|
+
throw new Error(`HTTP processor: endpoint not reachable at ${this.url}: ${e.message}`);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
async generate(prompt) {
|
|
31
|
+
const res = await fetch(this.url, {
|
|
32
|
+
method: 'POST',
|
|
33
|
+
headers: { 'Content-Type': 'application/json' },
|
|
34
|
+
body: JSON.stringify({ prompt }),
|
|
35
|
+
});
|
|
36
|
+
if (!res.ok) {
|
|
37
|
+
const text = await res.text();
|
|
38
|
+
throw new Error(`HTTP processor error ${res.status}: ${text}`);
|
|
39
|
+
}
|
|
40
|
+
const data = await res.json();
|
|
41
|
+
// Try common field names
|
|
42
|
+
const output = data.result ?? data.data ?? data.output ?? data.text ?? data.response;
|
|
43
|
+
if (output === undefined) {
|
|
44
|
+
throw new Error(`HTTP processor: response has no result/data/output/text/response field`);
|
|
45
|
+
}
|
|
46
|
+
return String(output);
|
|
47
|
+
}
|
|
48
|
+
async *generateStream(prompt) {
|
|
49
|
+
const res = await fetch(this.url, {
|
|
50
|
+
method: 'POST',
|
|
51
|
+
headers: {
|
|
52
|
+
'Content-Type': 'application/json',
|
|
53
|
+
'Accept': 'application/x-ndjson',
|
|
54
|
+
},
|
|
55
|
+
body: JSON.stringify({ prompt }),
|
|
56
|
+
});
|
|
57
|
+
if (!res.ok) {
|
|
58
|
+
const text = await res.text();
|
|
59
|
+
throw new Error(`HTTP processor stream error ${res.status}: ${text}`);
|
|
60
|
+
}
|
|
61
|
+
const reader = res.body?.getReader();
|
|
62
|
+
if (!reader)
|
|
63
|
+
throw new Error('HTTP processor: no response body');
|
|
64
|
+
const decoder = new TextDecoder();
|
|
65
|
+
let buffer = '';
|
|
66
|
+
while (true) {
|
|
67
|
+
const { done, value } = await reader.read();
|
|
68
|
+
if (done)
|
|
69
|
+
break;
|
|
70
|
+
buffer += decoder.decode(value, { stream: true });
|
|
71
|
+
const lines = buffer.split('\n');
|
|
72
|
+
buffer = lines.pop();
|
|
73
|
+
for (const line of lines) {
|
|
74
|
+
if (!line.trim())
|
|
75
|
+
continue;
|
|
76
|
+
try {
|
|
77
|
+
const obj = JSON.parse(line);
|
|
78
|
+
const chunk = obj.chunk ?? obj.data ?? obj.text ?? obj.token ?? obj.response;
|
|
79
|
+
if (chunk !== undefined) {
|
|
80
|
+
yield String(chunk);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
catch {
|
|
84
|
+
// Not JSON — yield raw line
|
|
85
|
+
yield line;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
// Flush remaining
|
|
90
|
+
if (buffer.trim()) {
|
|
91
|
+
try {
|
|
92
|
+
const obj = JSON.parse(buffer);
|
|
93
|
+
const chunk = obj.chunk ?? obj.data ?? obj.text ?? obj.token ?? obj.response;
|
|
94
|
+
if (chunk !== undefined)
|
|
95
|
+
yield String(chunk);
|
|
96
|
+
}
|
|
97
|
+
catch {
|
|
98
|
+
yield buffer;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* None processor — pure pass-through, no local model needed.
|
|
3
|
+
*
|
|
4
|
+
* Use case: broker agents that receive tasks and delegate to sub-providers.
|
|
5
|
+
* generate() returns the prompt as-is so the pipeline can forward it.
|
|
6
|
+
*/
|
|
7
|
+
import type { Processor } from '../processor.js';
|
|
8
|
+
export declare class NoneProcessor implements Processor {
|
|
9
|
+
readonly name = "none";
|
|
10
|
+
verify(): Promise<void>;
|
|
11
|
+
generate(prompt: string): Promise<string>;
|
|
12
|
+
generateStream(prompt: string): AsyncGenerator<string>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* None processor — pure pass-through, no local model needed.
|
|
3
|
+
*
|
|
4
|
+
* Use case: broker agents that receive tasks and delegate to sub-providers.
|
|
5
|
+
* generate() returns the prompt as-is so the pipeline can forward it.
|
|
6
|
+
*/
|
|
7
|
+
export class NoneProcessor {
|
|
8
|
+
name = 'none';
|
|
9
|
+
async verify() {
|
|
10
|
+
// No-op — nothing to check
|
|
11
|
+
}
|
|
12
|
+
async generate(prompt) {
|
|
13
|
+
return prompt;
|
|
14
|
+
}
|
|
15
|
+
async *generateStream(prompt) {
|
|
16
|
+
yield prompt;
|
|
17
|
+
}
|
|
18
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama processor — wraps existing ollama.ts into the Processor interface.
|
|
3
|
+
*
|
|
4
|
+
* Reads OLLAMA_MODEL env var (default "llama3.2").
|
|
5
|
+
* Zero behavior change from the previous hard-coded path in agent.ts.
|
|
6
|
+
*/
|
|
7
|
+
import type { Processor } from '../processor.js';
|
|
8
|
+
export declare class OllamaProcessor implements Processor {
|
|
9
|
+
private model;
|
|
10
|
+
constructor();
|
|
11
|
+
get name(): string;
|
|
12
|
+
verify(): Promise<void>;
|
|
13
|
+
generate(prompt: string): Promise<string>;
|
|
14
|
+
generateStream(prompt: string): AsyncGenerator<string>;
|
|
15
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama processor — wraps existing ollama.ts into the Processor interface.
|
|
3
|
+
*
|
|
4
|
+
* Reads OLLAMA_MODEL env var (default "llama3.2").
|
|
5
|
+
* Zero behavior change from the previous hard-coded path in agent.ts.
|
|
6
|
+
*/
|
|
7
|
+
import { generate, generateStream, listModels } from './ollama.js';
|
|
8
|
+
export class OllamaProcessor {
|
|
9
|
+
model;
|
|
10
|
+
constructor() {
|
|
11
|
+
this.model = process.env.OLLAMA_MODEL || 'llama3.2';
|
|
12
|
+
}
|
|
13
|
+
get name() {
|
|
14
|
+
return `ollama:${this.model}`;
|
|
15
|
+
}
|
|
16
|
+
async verify() {
|
|
17
|
+
const models = await listModels();
|
|
18
|
+
if (!models.some(m => m.startsWith(this.model))) {
|
|
19
|
+
throw new Error(`Model "${this.model}" not found. Available: ${models.join(', ')}\n` +
|
|
20
|
+
`Run: ollama pull ${this.model}`);
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
async generate(prompt) {
|
|
24
|
+
return generate({ model: this.model, prompt });
|
|
25
|
+
}
|
|
26
|
+
async *generateStream(prompt) {
|
|
27
|
+
yield* generateStream({ model: this.model, prompt });
|
|
28
|
+
}
|
|
29
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama adapter — call local LLM via Ollama HTTP API
|
|
3
|
+
*
|
|
4
|
+
* Ollama runs on localhost:11434 by default.
|
|
5
|
+
* Supports streaming for real-time token delivery over Hyperswarm.
|
|
6
|
+
*/
|
|
7
|
+
export interface OllamaGenerateOptions {
|
|
8
|
+
model?: string;
|
|
9
|
+
prompt: string;
|
|
10
|
+
system?: string;
|
|
11
|
+
temperature?: number;
|
|
12
|
+
max_tokens?: number;
|
|
13
|
+
}
|
|
14
|
+
export interface OllamaChunk {
|
|
15
|
+
model: string;
|
|
16
|
+
response: string;
|
|
17
|
+
done: boolean;
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Non-streaming generate — returns complete response
|
|
21
|
+
*/
|
|
22
|
+
export declare function generate(opts: OllamaGenerateOptions): Promise<string>;
|
|
23
|
+
/**
|
|
24
|
+
* Streaming generate — yields tokens as they arrive
|
|
25
|
+
*/
|
|
26
|
+
export declare function generateStream(opts: OllamaGenerateOptions): AsyncGenerator<string>;
|
|
27
|
+
/**
|
|
28
|
+
* Check if Ollama is running and list available models
|
|
29
|
+
*/
|
|
30
|
+
export declare function listModels(): Promise<string[]>;
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Ollama adapter — call local LLM via Ollama HTTP API
|
|
3
|
+
*
|
|
4
|
+
* Ollama runs on localhost:11434 by default.
|
|
5
|
+
* Supports streaming for real-time token delivery over Hyperswarm.
|
|
6
|
+
*/
|
|
7
|
+
const OLLAMA_BASE = process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
|
|
8
|
+
/**
|
|
9
|
+
* Non-streaming generate — returns complete response
|
|
10
|
+
*/
|
|
11
|
+
export async function generate(opts) {
|
|
12
|
+
const res = await fetch(`${OLLAMA_BASE}/api/generate`, {
|
|
13
|
+
method: 'POST',
|
|
14
|
+
headers: { 'Content-Type': 'application/json' },
|
|
15
|
+
body: JSON.stringify({
|
|
16
|
+
model: opts.model || 'llama3.2',
|
|
17
|
+
prompt: opts.prompt,
|
|
18
|
+
system: opts.system,
|
|
19
|
+
stream: false,
|
|
20
|
+
options: {
|
|
21
|
+
temperature: opts.temperature ?? 0.7,
|
|
22
|
+
num_predict: opts.max_tokens ?? 2048,
|
|
23
|
+
},
|
|
24
|
+
}),
|
|
25
|
+
});
|
|
26
|
+
if (!res.ok) {
|
|
27
|
+
const text = await res.text();
|
|
28
|
+
throw new Error(`Ollama error ${res.status}: ${text}`);
|
|
29
|
+
}
|
|
30
|
+
const data = await res.json();
|
|
31
|
+
return data.response;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* Streaming generate — yields tokens as they arrive
|
|
35
|
+
*/
|
|
36
|
+
export async function* generateStream(opts) {
|
|
37
|
+
const res = await fetch(`${OLLAMA_BASE}/api/generate`, {
|
|
38
|
+
method: 'POST',
|
|
39
|
+
headers: { 'Content-Type': 'application/json' },
|
|
40
|
+
body: JSON.stringify({
|
|
41
|
+
model: opts.model || 'llama3.2',
|
|
42
|
+
prompt: opts.prompt,
|
|
43
|
+
system: opts.system,
|
|
44
|
+
stream: true,
|
|
45
|
+
options: {
|
|
46
|
+
temperature: opts.temperature ?? 0.7,
|
|
47
|
+
num_predict: opts.max_tokens ?? 2048,
|
|
48
|
+
},
|
|
49
|
+
}),
|
|
50
|
+
});
|
|
51
|
+
if (!res.ok) {
|
|
52
|
+
const text = await res.text();
|
|
53
|
+
throw new Error(`Ollama error ${res.status}: ${text}`);
|
|
54
|
+
}
|
|
55
|
+
const reader = res.body?.getReader();
|
|
56
|
+
if (!reader)
|
|
57
|
+
throw new Error('No response body');
|
|
58
|
+
const decoder = new TextDecoder();
|
|
59
|
+
let buffer = '';
|
|
60
|
+
while (true) {
|
|
61
|
+
const { done, value } = await reader.read();
|
|
62
|
+
if (done)
|
|
63
|
+
break;
|
|
64
|
+
buffer += decoder.decode(value, { stream: true });
|
|
65
|
+
const lines = buffer.split('\n');
|
|
66
|
+
buffer = lines.pop();
|
|
67
|
+
for (const line of lines) {
|
|
68
|
+
if (!line.trim())
|
|
69
|
+
continue;
|
|
70
|
+
const chunk = JSON.parse(line);
|
|
71
|
+
if (chunk.response) {
|
|
72
|
+
yield chunk.response;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Check if Ollama is running and list available models
|
|
79
|
+
*/
|
|
80
|
+
export async function listModels() {
|
|
81
|
+
const res = await fetch(`${OLLAMA_BASE}/api/tags`);
|
|
82
|
+
if (!res.ok)
|
|
83
|
+
throw new Error(`Ollama not reachable at ${OLLAMA_BASE}`);
|
|
84
|
+
const data = await res.json();
|
|
85
|
+
return (data.models || []).map((m) => m.name);
|
|
86
|
+
}
|
package/dist/agent.d.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Unified Agent Runtime — runs as a long-lived daemon that handles both:
|
|
4
|
+
* 1. Async platform tasks (inbox polling → accept → Ollama → submit result)
|
|
5
|
+
* 2. Real-time P2P streaming (Hyperswarm + Cashu micro-payments)
|
|
6
|
+
*
|
|
7
|
+
* Both channels share a single capacity counter so the agent never overloads.
|
|
8
|
+
*
|
|
9
|
+
* Usage:
|
|
10
|
+
* AGENT=translator DVM_KIND=5302 OLLAMA_MODEL=qwen2.5:0.5b npm run agent
|
|
11
|
+
* AGENT=my-agent DVM_KIND=5100 MAX_JOBS=5 npm run agent
|
|
12
|
+
* DVM_KIND=5100 npm run agent # no API key → P2P-only mode
|
|
13
|
+
* AGENT=broker DVM_KIND=5302 PROCESSOR=none SUB_KIND=5100 npm run agent
|
|
14
|
+
* AGENT=custom DVM_KIND=5100 PROCESSOR=exec:./my-model.sh npm run agent
|
|
15
|
+
* AGENT=remote DVM_KIND=5100 PROCESSOR=http://localhost:8080 npm run agent
|
|
16
|
+
*/
|
|
17
|
+
export {};
|