agentnetes 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +1018 -0
- package/package.json +35 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,1018 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
#!/usr/bin/env node
|
|
3
|
+
"use strict";
|
|
4
|
+
var __create = Object.create;
|
|
5
|
+
var __defProp = Object.defineProperty;
|
|
6
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
7
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
8
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
9
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
19
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
20
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
21
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
22
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
23
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
24
|
+
mod
|
|
25
|
+
));
|
|
26
|
+
|
|
27
|
+
// src/index.ts
|
|
28
|
+
var import_child_process4 = require("child_process");
|
|
29
|
+
|
|
30
|
+
// src/commands/run.ts
|
|
31
|
+
var import_fs2 = require("fs");
|
|
32
|
+
var import_path2 = require("path");
|
|
33
|
+
var import_picocolors2 = __toESM(require("picocolors"));
|
|
34
|
+
|
|
35
|
+
// ../../lib/vrlm/runtime.ts
|
|
36
|
+
var import_ai2 = require("ai");
|
|
37
|
+
var import_zod2 = require("zod");
|
|
38
|
+
|
|
39
|
+
// ../../lib/gateway.ts
|
|
40
|
+
var import_gateway = require("@ai-sdk/gateway");
|
|
41
|
+
var import_google = require("@ai-sdk/google");
|
|
42
|
+
function makeProvider() {
|
|
43
|
+
if (process.env.AI_GATEWAY_BASE_URL) {
|
|
44
|
+
return (0, import_gateway.createGatewayProvider)({ baseURL: process.env.AI_GATEWAY_BASE_URL });
|
|
45
|
+
}
|
|
46
|
+
return (0, import_google.createGoogleGenerativeAI)({
|
|
47
|
+
apiKey: process.env.GOOGLE_API_KEY ?? process.env.GOOGLE_GENERATIVE_AI_API_KEY
|
|
48
|
+
});
|
|
49
|
+
}
|
|
50
|
+
var provider = makeProvider();
|
|
51
|
+
function gateway(model) {
|
|
52
|
+
if (process.env.AI_GATEWAY_BASE_URL) {
|
|
53
|
+
return provider(model);
|
|
54
|
+
}
|
|
55
|
+
const modelId = model.includes("/") ? model.split("/").slice(1).join("/") : model;
|
|
56
|
+
return provider(modelId);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// ../../lib/vrlm/tools.ts
|
|
60
|
+
var import_ai = require("ai");
|
|
61
|
+
var import_zod = require("zod");
|
|
62
|
+
function createAgentTools(sandbox) {
|
|
63
|
+
const search = (0, import_ai.tool)({
|
|
64
|
+
description: "Search for patterns across the codebase. Returns matching file paths. Use this to locate where interfaces, types, functions, and patterns are defined.",
|
|
65
|
+
inputSchema: import_zod.z.object({
|
|
66
|
+
pattern: import_zod.z.string().describe("The regex or literal pattern to search for"),
|
|
67
|
+
path: import_zod.z.string().optional().describe("Directory to search in (default: current dir)"),
|
|
68
|
+
fileGlob: import_zod.z.string().optional().describe('Glob for file types, e.g. "*.ts" or "*.json"')
|
|
69
|
+
}),
|
|
70
|
+
execute: async ({ pattern, path = ".", fileGlob }) => {
|
|
71
|
+
const include = fileGlob ? `--include="${fileGlob}"` : "";
|
|
72
|
+
const cmd = `grep -r ${JSON.stringify(pattern)} ${path} ${include} -l 2>/dev/null | head -40 || true`;
|
|
73
|
+
const result = await sandbox.runCommand("bash", ["-c", cmd]);
|
|
74
|
+
const out = (await result.stdout()).trim();
|
|
75
|
+
return out || "(no matches)";
|
|
76
|
+
}
|
|
77
|
+
});
|
|
78
|
+
const execute = (0, import_ai.tool)({
|
|
79
|
+
description: "Execute any shell command in the isolated sandbox. Use to read files (cat), list directories (ls/find), run tests (vitest), compile (tsc), write files (tee), install packages, etc.",
|
|
80
|
+
inputSchema: import_zod.z.object({
|
|
81
|
+
command: import_zod.z.string().describe("The shell command to execute")
|
|
82
|
+
}),
|
|
83
|
+
execute: async ({ command: command2 }) => {
|
|
84
|
+
const cmd = `(${command2}) 2>&1 | head -200`;
|
|
85
|
+
const result = await sandbox.runCommand("bash", ["-c", cmd]);
|
|
86
|
+
const output = (await result.stdout()).trim();
|
|
87
|
+
return {
|
|
88
|
+
output: output || "(no output)",
|
|
89
|
+
exitCode: result.exitCode
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
});
|
|
93
|
+
return { search, execute };
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// ../../lib/vrlm/local-sandbox.ts
|
|
97
|
+
var import_child_process = require("child_process");
|
|
98
|
+
var import_fs = require("fs");
|
|
99
|
+
var import_os = require("os");
|
|
100
|
+
var import_path = require("path");
|
|
101
|
+
var LocalSandbox = class {
|
|
102
|
+
id = "local";
|
|
103
|
+
dir;
|
|
104
|
+
constructor(dir) {
|
|
105
|
+
this.dir = dir;
|
|
106
|
+
}
|
|
107
|
+
async runCommand(shell, args2) {
|
|
108
|
+
const cmd = args2.join(" ");
|
|
109
|
+
try {
|
|
110
|
+
const output = (0, import_child_process.execSync)(cmd, {
|
|
111
|
+
cwd: this.dir,
|
|
112
|
+
encoding: "utf-8",
|
|
113
|
+
timeout: 6e4,
|
|
114
|
+
stdio: ["ignore", "pipe", "pipe"]
|
|
115
|
+
});
|
|
116
|
+
return { stdout: async () => output ?? "", exitCode: 0 };
|
|
117
|
+
} catch (err) {
|
|
118
|
+
const out = (err.stdout ?? "") + (err.stderr ?? "") || (err.message ?? "");
|
|
119
|
+
return { stdout: async () => out, exitCode: err.status ?? 1 };
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
async readFile({ path }) {
|
|
123
|
+
try {
|
|
124
|
+
const content = (0, import_fs.readFileSync)(path);
|
|
125
|
+
async function* gen() {
|
|
126
|
+
yield content;
|
|
127
|
+
}
|
|
128
|
+
return gen();
|
|
129
|
+
} catch {
|
|
130
|
+
return null;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
async stop() {
|
|
134
|
+
try {
|
|
135
|
+
(0, import_fs.rmSync)(this.dir, { recursive: true, force: true });
|
|
136
|
+
} catch {
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
};
|
|
140
|
+
async function createLocalSandbox(repoUrl) {
|
|
141
|
+
const dir = (0, import_fs.mkdtempSync)((0, import_path.join)((0, import_os.tmpdir)(), "agentnetes-"));
|
|
142
|
+
(0, import_child_process.execSync)(`git clone --depth 1 ${repoUrl} .`, {
|
|
143
|
+
cwd: dir,
|
|
144
|
+
timeout: 12e4,
|
|
145
|
+
stdio: "ignore"
|
|
146
|
+
});
|
|
147
|
+
return new LocalSandbox(dir);
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// ../../lib/vrlm/docker-sandbox.ts
|
|
151
|
+
var import_child_process2 = require("child_process");
|
|
152
|
+
var DOCKER_IMAGE = "node:20-alpine";
|
|
153
|
+
var DockerSandbox = class {
|
|
154
|
+
id;
|
|
155
|
+
containerId;
|
|
156
|
+
constructor(containerId) {
|
|
157
|
+
this.containerId = containerId;
|
|
158
|
+
this.id = `docker-${containerId.slice(0, 12)}`;
|
|
159
|
+
}
|
|
160
|
+
async runCommand(_shell, args2) {
|
|
161
|
+
const cmd = args2.join(" ");
|
|
162
|
+
try {
|
|
163
|
+
const output = (0, import_child_process2.execSync)(
|
|
164
|
+
`docker exec ${this.containerId} sh -c ${JSON.stringify(cmd)}`,
|
|
165
|
+
{ encoding: "utf-8", timeout: 6e4 }
|
|
166
|
+
);
|
|
167
|
+
return { stdout: async () => output ?? "", exitCode: 0 };
|
|
168
|
+
} catch (err) {
|
|
169
|
+
const out = (err.stdout ?? "") + (err.stderr ?? "") || (err.message ?? "");
|
|
170
|
+
return { stdout: async () => out, exitCode: err.status ?? 1 };
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
async readFile({ path }) {
|
|
174
|
+
try {
|
|
175
|
+
const content = (0, import_child_process2.execSync)(`docker exec ${this.containerId} cat ${path}`, {
|
|
176
|
+
timeout: 1e4
|
|
177
|
+
});
|
|
178
|
+
async function* gen() {
|
|
179
|
+
yield content;
|
|
180
|
+
}
|
|
181
|
+
return gen();
|
|
182
|
+
} catch {
|
|
183
|
+
return null;
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
async stop() {
|
|
187
|
+
try {
|
|
188
|
+
(0, import_child_process2.execSync)(`docker rm -f ${this.containerId}`, { stdio: "ignore" });
|
|
189
|
+
} catch {
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
};
|
|
193
|
+
async function createDockerSandbox(repoUrl) {
|
|
194
|
+
const containerId = (0, import_child_process2.execSync)(
|
|
195
|
+
`docker run -d --rm ${DOCKER_IMAGE} sh -c "sleep 3600"`,
|
|
196
|
+
{ encoding: "utf-8" }
|
|
197
|
+
).trim();
|
|
198
|
+
(0, import_child_process2.execSync)(
|
|
199
|
+
`docker exec ${containerId} sh -c "apk add --no-cache git 2>/dev/null"`,
|
|
200
|
+
{ timeout: 3e4, stdio: "ignore" }
|
|
201
|
+
);
|
|
202
|
+
(0, import_child_process2.execSync)(
|
|
203
|
+
`docker exec ${containerId} git clone --depth 1 ${repoUrl} /workspace`,
|
|
204
|
+
{ timeout: 12e4, stdio: "ignore" }
|
|
205
|
+
);
|
|
206
|
+
return new DockerSandbox(containerId);
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// ../../lib/vrlm/sandbox-manager.ts
|
|
210
|
+
function detectProvider() {
|
|
211
|
+
const explicit = process.env.SANDBOX_PROVIDER?.toLowerCase();
|
|
212
|
+
if (explicit === "vercel") return "vercel";
|
|
213
|
+
if (explicit === "e2b") return "e2b";
|
|
214
|
+
if (explicit === "daytona") return "daytona";
|
|
215
|
+
if (explicit === "docker") return "docker";
|
|
216
|
+
if (explicit === "local") return "local";
|
|
217
|
+
if (process.env.VERCEL_TOKEN || process.env.VERCEL) return "vercel";
|
|
218
|
+
if (process.env.E2B_API_KEY) return "e2b";
|
|
219
|
+
if (process.env.DAYTONA_API_KEY) return "daytona";
|
|
220
|
+
return "docker";
|
|
221
|
+
}
|
|
222
|
+
async function createWorkerSandbox(repoUrl, snapshotId) {
|
|
223
|
+
const provider2 = detectProvider();
|
|
224
|
+
if (provider2 === "vercel") {
|
|
225
|
+
const { Sandbox: Sandbox2 } = await import("@vercel/sandbox");
|
|
226
|
+
if (snapshotId) {
|
|
227
|
+
return Sandbox2.create({ source: { type: "snapshot", snapshotId }, timeout: 10 * 60 * 1e3 });
|
|
228
|
+
}
|
|
229
|
+
return Sandbox2.create({ source: { type: "git", url: repoUrl, depth: 1 }, timeout: 10 * 60 * 1e3 });
|
|
230
|
+
}
|
|
231
|
+
if (provider2 === "e2b") {
|
|
232
|
+
console.warn('[sandbox] e2b: install the "e2b" package to use this provider. Falling back to local.');
|
|
233
|
+
}
|
|
234
|
+
if (provider2 === "daytona") {
|
|
235
|
+
console.warn('[sandbox] daytona: install the "@daytonaio/sdk" package to use this provider. Falling back to local.');
|
|
236
|
+
}
|
|
237
|
+
if (provider2 === "docker") {
|
|
238
|
+
return createDockerSandbox(repoUrl);
|
|
239
|
+
}
|
|
240
|
+
return createLocalSandbox(repoUrl);
|
|
241
|
+
}
|
|
242
|
+
async function collectNewFiles(sandbox, dir, extensions = ["ts", "js", "json", "md"]) {
|
|
243
|
+
const extList = extensions.map((e) => `-name "*.${e}"`).join(" -o ");
|
|
244
|
+
const findCmd = `find ${dir} \\( ${extList} \\) -type f 2>/dev/null | head -30`;
|
|
245
|
+
const result = await sandbox.runCommand("bash", ["-c", findCmd]);
|
|
246
|
+
const files = (await result.stdout()).trim().split("\n").filter(Boolean);
|
|
247
|
+
const out = [];
|
|
248
|
+
for (const filePath of files) {
|
|
249
|
+
const stream = await sandbox.readFile({ path: filePath });
|
|
250
|
+
if (!stream) continue;
|
|
251
|
+
const chunks = [];
|
|
252
|
+
for await (const chunk of stream) {
|
|
253
|
+
chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
|
|
254
|
+
}
|
|
255
|
+
out.push({ path: filePath, content: Buffer.concat(chunks).toString("utf-8") });
|
|
256
|
+
}
|
|
257
|
+
return out;
|
|
258
|
+
}
|
|
259
|
+
async function stopSandbox(sandbox) {
|
|
260
|
+
try {
|
|
261
|
+
await sandbox.stop();
|
|
262
|
+
} catch {
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// ../../lib/vrlm/prompts.ts
|
|
267
|
+
var PLANNER_SYSTEM_PROMPT = `
|
|
268
|
+
You are the Tech Lead of Agentnetes, a self-organizing AI agency.
|
|
269
|
+
|
|
270
|
+
Your job is to decompose a software engineering goal into a team of specialist agents.
|
|
271
|
+
Each agent will run concurrently in its own isolated sandbox with access to the codebase.
|
|
272
|
+
Each worker agent will explore the repo themselves before completing their task.
|
|
273
|
+
|
|
274
|
+
Rules:
|
|
275
|
+
- Invent roles that fit the task AND the likely shape of the repo. Do NOT use generic names like "Agent 1".
|
|
276
|
+
- Each agent needs a clear, scoped goal it can complete independently.
|
|
277
|
+
- Keep the team small: 3-5 agents is usually right. Never more than 6.
|
|
278
|
+
- Roles must be complementary, not overlapping.
|
|
279
|
+
- The first agent should always be an explorer or scout that maps the codebase structure.
|
|
280
|
+
- Include a test or verification agent if the task involves writing code.
|
|
281
|
+
- Choose an appropriate emoji icon for each role.
|
|
282
|
+
- Tailor roles to what kind of repo this is likely to be based on the goal text.
|
|
283
|
+
|
|
284
|
+
Common role patterns (adapt to the task):
|
|
285
|
+
- Architecture Scout: maps the repo structure, finds relevant files and patterns
|
|
286
|
+
- [Domain] Engineer: implements the core feature
|
|
287
|
+
- Test Engineer: writes tests and verifies correctness
|
|
288
|
+
- Package/Config Engineer: handles package.json, tsconfig, exports, build config
|
|
289
|
+
- Security Scanner: finds vulnerabilities, unsafe patterns
|
|
290
|
+
- Documentation Writer: writes docs, README, changelogs
|
|
291
|
+
|
|
292
|
+
Respond ONLY with valid JSON in exactly this format, no markdown fences:
|
|
293
|
+
{
|
|
294
|
+
"workers": [
|
|
295
|
+
{
|
|
296
|
+
"role": "Architecture Scout",
|
|
297
|
+
"goal": "Explore the repo structure. Run: find . -maxdepth 3 -type f | head -60 to map the repo. Read package.json or equivalent to detect the stack. Document relevant files and patterns for the goal.",
|
|
298
|
+
"icon": "\u{1F50D}"
|
|
299
|
+
}
|
|
300
|
+
]
|
|
301
|
+
}
|
|
302
|
+
`.trim();
|
|
303
|
+
function buildPlannerPrompt(goal) {
|
|
304
|
+
return `Goal: ${goal}
|
|
305
|
+
|
|
306
|
+
Decompose this into a specialist agent team. Return JSON only, no markdown fences.`;
|
|
307
|
+
}
|
|
308
|
+
function buildWorkerPrompt(task, findings) {
|
|
309
|
+
const findingsSection = findings.length > 0 ? `
|
|
310
|
+
|
|
311
|
+
Findings from other agents:
|
|
312
|
+
${findings.map((f) => `- ${f}`).join("\n")}` : "";
|
|
313
|
+
return `
|
|
314
|
+
You are ${task.role} in the Agentnetes swarm.
|
|
315
|
+
|
|
316
|
+
Your goal: ${task.goal}
|
|
317
|
+
|
|
318
|
+
You have two tools:
|
|
319
|
+
- search(pattern, path?, fileGlob?): grep the codebase for patterns, returns matching file paths
|
|
320
|
+
- execute(command): run any shell command in your sandbox (cat, ls, find, tsc, vitest, etc.)
|
|
321
|
+
|
|
322
|
+
The codebase is available in your sandbox. Start by exploring to understand the structure, then complete your goal.
|
|
323
|
+
|
|
324
|
+
Strategy:
|
|
325
|
+
1. First, explore the repo: run find . -maxdepth 3 -type f | head -60 to map the layout
|
|
326
|
+
2. Read package.json or equivalent config to detect the language, framework and test runner
|
|
327
|
+
3. Use search() to find relevant files and patterns related to your goal
|
|
328
|
+
4. Use execute() to read files (cat), run build or test commands (tsc, vitest, pytest, etc.)
|
|
329
|
+
5. When writing code, use execute() with heredoc or tee to write files
|
|
330
|
+
6. Run verification commands to check your work
|
|
331
|
+
7. Fix any errors before completing
|
|
332
|
+
|
|
333
|
+
Be methodical. Explore before implementing. Verify after implementing.
|
|
334
|
+
If you encounter errors, diagnose and fix them.
|
|
335
|
+
When done, provide a clear summary of everything you accomplished.${findingsSection}
|
|
336
|
+
`.trim();
|
|
337
|
+
}
|
|
338
|
+
function buildSynthesisPrompt(goal, workerSummaries) {
|
|
339
|
+
return `
|
|
340
|
+
You are the Tech Lead summarizing the results of your agent team.
|
|
341
|
+
|
|
342
|
+
Original goal: ${goal}
|
|
343
|
+
|
|
344
|
+
Agent results:
|
|
345
|
+
${workerSummaries.map((s, i) => `Agent ${i + 1}:
|
|
346
|
+
${s}`).join("\n\n")}
|
|
347
|
+
|
|
348
|
+
Write a concise markdown summary covering:
|
|
349
|
+
1. What was accomplished (with specific files and test results if available)
|
|
350
|
+
2. Any issues encountered and how they were resolved
|
|
351
|
+
3. What to do next (if anything remains)
|
|
352
|
+
|
|
353
|
+
Be specific. Mention actual file names, test counts, error counts.
|
|
354
|
+
`.trim();
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
// ../../lib/a2a.ts
|
|
358
|
+
function slugify(role) {
|
|
359
|
+
return role.toLowerCase().replace(/\s+/g, "-").replace(/[^a-z0-9-]/g, "");
|
|
360
|
+
}
|
|
361
|
+
function skillsForRole(role) {
|
|
362
|
+
const r = role.toLowerCase();
|
|
363
|
+
if (r.includes("scout") || r.includes("explorer") || r.includes("architect")) {
|
|
364
|
+
return [
|
|
365
|
+
{
|
|
366
|
+
id: "explore-codebase",
|
|
367
|
+
name: "Explore Codebase",
|
|
368
|
+
description: "Scans a repository and documents architecture patterns, interface contracts, and file conventions",
|
|
369
|
+
tags: ["codebase", "architecture", "typescript", "monorepo"],
|
|
370
|
+
examples: [
|
|
371
|
+
"Explore the provider package structure in vercel/ai",
|
|
372
|
+
"Document interface contracts for LanguageModelV1",
|
|
373
|
+
"Find all test patterns across packages"
|
|
374
|
+
]
|
|
375
|
+
},
|
|
376
|
+
{
|
|
377
|
+
id: "map-dependencies",
|
|
378
|
+
name: "Map Dependencies",
|
|
379
|
+
description: "Traces dependency graphs across a monorepo and identifies shared utilities",
|
|
380
|
+
tags: ["dependencies", "monorepo", "pnpm"],
|
|
381
|
+
examples: ["Map all workspace dependencies in vercel/ai", "Find all packages that depend on @ai-sdk/provider"]
|
|
382
|
+
}
|
|
383
|
+
];
|
|
384
|
+
}
|
|
385
|
+
if (r.includes("provider") || r.includes("implement") || r.includes("engineer")) {
|
|
386
|
+
return [
|
|
387
|
+
{
|
|
388
|
+
id: "implement-provider",
|
|
389
|
+
name: "Implement Provider",
|
|
390
|
+
description: "Creates a new AI SDK provider package following existing patterns \u2014 provider class, chat model, streaming",
|
|
391
|
+
tags: ["typescript", "ai-sdk", "provider", "streaming"],
|
|
392
|
+
examples: [
|
|
393
|
+
"Add @ai-sdk/deepseek with streaming and reasoning tokens",
|
|
394
|
+
"Implement a new OpenAI-compatible provider",
|
|
395
|
+
"Add tool-calling support to an existing provider"
|
|
396
|
+
]
|
|
397
|
+
},
|
|
398
|
+
{
|
|
399
|
+
id: "implement-feature",
|
|
400
|
+
name: "Implement Feature",
|
|
401
|
+
description: "Builds a software feature in an existing codebase following discovered conventions",
|
|
402
|
+
tags: ["typescript", "implementation", "feature"],
|
|
403
|
+
examples: ["Add a /research command to a CLI tool", "Implement a new API endpoint following existing patterns"]
|
|
404
|
+
}
|
|
405
|
+
];
|
|
406
|
+
}
|
|
407
|
+
if (r.includes("test") || r.includes("verif") || r.includes("qa")) {
|
|
408
|
+
return [
|
|
409
|
+
{
|
|
410
|
+
id: "write-tests",
|
|
411
|
+
name: "Write Tests",
|
|
412
|
+
description: "Writes comprehensive tests following the codebase test conventions \u2014 unit, integration, streaming",
|
|
413
|
+
tags: ["vitest", "testing", "typescript", "coverage"],
|
|
414
|
+
examples: [
|
|
415
|
+
"Write tests for a new provider package",
|
|
416
|
+
"Add streaming tests for a chat model",
|
|
417
|
+
"Write integration tests for a new API endpoint"
|
|
418
|
+
]
|
|
419
|
+
},
|
|
420
|
+
{
|
|
421
|
+
id: "run-verification",
|
|
422
|
+
name: "Run Verification",
|
|
423
|
+
description: "Runs tests, type checks, and linting to verify correctness. Reports failures with context.",
|
|
424
|
+
tags: ["typescript", "vitest", "tsc", "verification"],
|
|
425
|
+
examples: ["Verify TypeScript compilation passes", "Run all tests and report failures"]
|
|
426
|
+
}
|
|
427
|
+
];
|
|
428
|
+
}
|
|
429
|
+
if (r.includes("package") || r.includes("config") || r.includes("integrat")) {
|
|
430
|
+
return [
|
|
431
|
+
{
|
|
432
|
+
id: "setup-package",
|
|
433
|
+
name: "Setup Package",
|
|
434
|
+
description: "Configures package.json, tsconfig.json, exports, and monorepo wiring for a new package",
|
|
435
|
+
tags: ["monorepo", "pnpm", "typescript", "package"],
|
|
436
|
+
examples: [
|
|
437
|
+
"Set up a new @ai-sdk/* package from scratch",
|
|
438
|
+
"Configure ESM + CJS dual exports",
|
|
439
|
+
"Wire a new package into pnpm workspace"
|
|
440
|
+
]
|
|
441
|
+
}
|
|
442
|
+
];
|
|
443
|
+
}
|
|
444
|
+
if (r.includes("security") || r.includes("scanner") || r.includes("auth") || r.includes("risk")) {
|
|
445
|
+
return [
|
|
446
|
+
{
|
|
447
|
+
id: "security-audit",
|
|
448
|
+
name: "Security Audit",
|
|
449
|
+
description: "Analyzes code for security vulnerabilities \u2014 auth flows, input validation, dependency CVEs",
|
|
450
|
+
tags: ["security", "audit", "vulnerabilities", "owasp"],
|
|
451
|
+
examples: [
|
|
452
|
+
"Audit authentication flows for session fixation",
|
|
453
|
+
"Scan dependencies for known CVEs",
|
|
454
|
+
"Find XSS vulnerabilities in user input handling"
|
|
455
|
+
]
|
|
456
|
+
}
|
|
457
|
+
];
|
|
458
|
+
}
|
|
459
|
+
return [
|
|
460
|
+
{
|
|
461
|
+
id: "general-task",
|
|
462
|
+
name: "General Engineering Task",
|
|
463
|
+
description: "Performs software engineering tasks \u2014 exploration, implementation, verification",
|
|
464
|
+
tags: ["typescript", "engineering"],
|
|
465
|
+
examples: ["Implement a requested feature following existing patterns"]
|
|
466
|
+
}
|
|
467
|
+
];
|
|
468
|
+
}
|
|
469
|
+
function buildA2ACard(task) {
|
|
470
|
+
const slug = slugify(task.role);
|
|
471
|
+
return {
|
|
472
|
+
name: task.role,
|
|
473
|
+
description: task.goal,
|
|
474
|
+
url: `https://agentnetes.vercel.app/agents/${slug}`,
|
|
475
|
+
version: "1.0.0",
|
|
476
|
+
provider: { organization: "Agentnetes" },
|
|
477
|
+
capabilities: {
|
|
478
|
+
streaming: true,
|
|
479
|
+
pushNotifications: false,
|
|
480
|
+
stateTransitionHistory: true
|
|
481
|
+
},
|
|
482
|
+
authentication: { schemes: ["bearer"] },
|
|
483
|
+
defaultInputModes: ["text/plain", "application/json"],
|
|
484
|
+
defaultOutputModes: ["text/plain", "application/json"],
|
|
485
|
+
skills: skillsForRole(task.role)
|
|
486
|
+
};
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
// ../../lib/vrlm/runtime.ts
|
|
490
|
+
var WorkerPlanSchema = import_zod2.z.object({
|
|
491
|
+
workers: import_zod2.z.array(
|
|
492
|
+
import_zod2.z.object({
|
|
493
|
+
role: import_zod2.z.string(),
|
|
494
|
+
goal: import_zod2.z.string(),
|
|
495
|
+
icon: import_zod2.z.string().default("\u{1F916}")
|
|
496
|
+
})
|
|
497
|
+
)
|
|
498
|
+
});
|
|
499
|
+
function uid() {
|
|
500
|
+
return crypto.randomUUID();
|
|
501
|
+
}
|
|
502
|
+
function detectLanguage(filename) {
|
|
503
|
+
const ext = filename.split(".").pop() ?? "";
|
|
504
|
+
const map = {
|
|
505
|
+
ts: "typescript",
|
|
506
|
+
tsx: "tsx",
|
|
507
|
+
js: "javascript",
|
|
508
|
+
jsx: "jsx",
|
|
509
|
+
json: "json",
|
|
510
|
+
md: "markdown",
|
|
511
|
+
sh: "bash",
|
|
512
|
+
py: "python"
|
|
513
|
+
};
|
|
514
|
+
return map[ext] ?? "text";
|
|
515
|
+
}
|
|
516
|
+
var VrlmRuntime = class {
|
|
517
|
+
emitter;
|
|
518
|
+
config;
|
|
519
|
+
constructor(emitter, config) {
|
|
520
|
+
this.emitter = emitter;
|
|
521
|
+
this.config = config;
|
|
522
|
+
}
|
|
523
|
+
async run(goal) {
|
|
524
|
+
const snapshotId = this.config.repoSnapshotId;
|
|
525
|
+
const repoUrl = this.config.repoUrl;
|
|
526
|
+
try {
|
|
527
|
+
const rootTask = this.makeTask({
|
|
528
|
+
role: "Tech Lead",
|
|
529
|
+
goal: `Plan and coordinate: ${goal}`,
|
|
530
|
+
icon: "\u{1F9E0}",
|
|
531
|
+
depth: 0,
|
|
532
|
+
parentId: null
|
|
533
|
+
});
|
|
534
|
+
this.emitTaskCreated(rootTask);
|
|
535
|
+
this.emitTaskUpdate(rootTask.id, "running", "Planning agent team...");
|
|
536
|
+
const plan = await this.runPlanner(goal);
|
|
537
|
+
this.emitTaskUpdate(rootTask.id, "running", `Spawning ${plan.workers.length} agents`);
|
|
538
|
+
const workerTasks = plan.workers.slice(0, this.config.maxWorkers).map(
|
|
539
|
+
(w) => this.makeTask({
|
|
540
|
+
role: w.role,
|
|
541
|
+
goal: w.goal,
|
|
542
|
+
icon: w.icon,
|
|
543
|
+
depth: 1,
|
|
544
|
+
parentId: rootTask.id
|
|
545
|
+
})
|
|
546
|
+
);
|
|
547
|
+
for (const task of workerTasks) {
|
|
548
|
+
rootTask.children.push(task.id);
|
|
549
|
+
this.emitTaskCreated(task);
|
|
550
|
+
}
|
|
551
|
+
const workerResults = await Promise.allSettled(
|
|
552
|
+
workerTasks.map((task) => this.runWorker(task, repoUrl, snapshotId))
|
|
553
|
+
);
|
|
554
|
+
const summaries = [];
|
|
555
|
+
for (let i = 0; i < workerResults.length; i++) {
|
|
556
|
+
const result = workerResults[i];
|
|
557
|
+
if (result.status === "fulfilled") {
|
|
558
|
+
summaries.push(`${workerTasks[i].role}: ${result.value}`);
|
|
559
|
+
} else {
|
|
560
|
+
summaries.push(`${workerTasks[i].role}: FAILED - ${result.reason}`);
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
this.emitTaskUpdate(rootTask.id, "running", "Synthesizing results...");
|
|
564
|
+
const synthesis = await this.runSynthesis(goal, summaries);
|
|
565
|
+
this.emitter.emit({
|
|
566
|
+
type: "synthesis",
|
|
567
|
+
taskId: rootTask.id,
|
|
568
|
+
data: { content: synthesis }
|
|
569
|
+
});
|
|
570
|
+
this.emitTaskCompleted(rootTask.id, [], []);
|
|
571
|
+
this.emitter.emit({
|
|
572
|
+
type: "done",
|
|
573
|
+
data: { content: synthesis }
|
|
574
|
+
});
|
|
575
|
+
} catch (err) {
|
|
576
|
+
this.emitter.emit({
|
|
577
|
+
type: "error",
|
|
578
|
+
data: { message: String(err) }
|
|
579
|
+
});
|
|
580
|
+
} finally {
|
|
581
|
+
this.emitter.close();
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
// ── Planner ─────────────────────────────────────────────────────────────────
|
|
585
|
+
async runPlanner(goal) {
|
|
586
|
+
const { text } = await (0, import_ai2.generateText)({
|
|
587
|
+
model: gateway(this.config.plannerModel),
|
|
588
|
+
system: PLANNER_SYSTEM_PROMPT,
|
|
589
|
+
prompt: buildPlannerPrompt(goal),
|
|
590
|
+
maxOutputTokens: 2e3
|
|
591
|
+
});
|
|
592
|
+
const clean = text.replace(/^```(?:json)?\n?/m, "").replace(/\n?```$/m, "").trim();
|
|
593
|
+
const raw = JSON.parse(clean);
|
|
594
|
+
return WorkerPlanSchema.parse(raw);
|
|
595
|
+
}
|
|
596
|
+
// ── Worker ──────────────────────────────────────────────────────────────────
|
|
597
|
+
async runWorker(task, repoUrl, snapshotId) {
|
|
598
|
+
let sandbox = await createWorkerSandbox(repoUrl, snapshotId);
|
|
599
|
+
task.sandboxId = sandbox.id ?? "local";
|
|
600
|
+
this.emitTaskUpdate(task.id, "running", "Sandbox ready");
|
|
601
|
+
this.emitter.emit({
|
|
602
|
+
type: "task-updated",
|
|
603
|
+
taskId: task.id,
|
|
604
|
+
data: {
|
|
605
|
+
a2aCard: buildA2ACard(task),
|
|
606
|
+
status: "running",
|
|
607
|
+
statusText: "Sandbox ready"
|
|
608
|
+
}
|
|
609
|
+
});
|
|
610
|
+
const tools = createAgentTools(sandbox);
|
|
611
|
+
const findings = [];
|
|
612
|
+
const terminalLines = [];
|
|
613
|
+
const agent = new import_ai2.ToolLoopAgent({
|
|
614
|
+
model: gateway(this.config.workerModel),
|
|
615
|
+
tools,
|
|
616
|
+
stopWhen: (0, import_ai2.stepCountIs)(this.config.maxStepsPerAgent),
|
|
617
|
+
instructions: buildWorkerPrompt(task, findings),
|
|
618
|
+
onStepFinish: async ({ text, toolCalls, toolResults }) => {
|
|
619
|
+
if (text?.trim()) {
|
|
620
|
+
const lines = text.trim().split("\n").filter(Boolean);
|
|
621
|
+
for (const line of lines.slice(0, 3)) {
|
|
622
|
+
findings.push(line);
|
|
623
|
+
this.emitter.emit({
|
|
624
|
+
type: "finding",
|
|
625
|
+
taskId: task.id,
|
|
626
|
+
data: { text: line }
|
|
627
|
+
});
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
for (let i = 0; i < (toolCalls?.length ?? 0); i++) {
|
|
631
|
+
const call = toolCalls[i];
|
|
632
|
+
const result2 = toolResults?.[i];
|
|
633
|
+
const input = call.input ?? {};
|
|
634
|
+
let cmdLine = "";
|
|
635
|
+
if (call.toolName === "search") {
|
|
636
|
+
cmdLine = `$ grep -r "${input.pattern}" ${input.path ?? "."} ${input.fileGlob ? `--include="${input.fileGlob}"` : ""}`;
|
|
637
|
+
} else {
|
|
638
|
+
cmdLine = `$ ${input.command ?? ""}`;
|
|
639
|
+
}
|
|
640
|
+
terminalLines.push(cmdLine);
|
|
641
|
+
this.emitter.emit({
|
|
642
|
+
type: "terminal",
|
|
643
|
+
taskId: task.id,
|
|
644
|
+
data: { line: cmdLine }
|
|
645
|
+
});
|
|
646
|
+
if (result2?.result) {
|
|
647
|
+
const output = typeof result2.result === "string" ? result2.result : result2.result.output ?? JSON.stringify(result2.result);
|
|
648
|
+
const preview = String(output).split("\n").slice(0, 8).join("\n");
|
|
649
|
+
if (preview.trim()) {
|
|
650
|
+
this.emitter.emit({
|
|
651
|
+
type: "terminal",
|
|
652
|
+
taskId: task.id,
|
|
653
|
+
data: { line: preview, dim: true }
|
|
654
|
+
});
|
|
655
|
+
}
|
|
656
|
+
}
|
|
657
|
+
}
|
|
658
|
+
this.emitTaskUpdate(task.id, "running", `Step ${terminalLines.length} done`);
|
|
659
|
+
}
|
|
660
|
+
});
|
|
661
|
+
const result = await agent.stream({ prompt: task.goal });
|
|
662
|
+
for await (const _ of result.fullStream) {
|
|
663
|
+
}
|
|
664
|
+
const finalText = await result.text;
|
|
665
|
+
const artifacts = [];
|
|
666
|
+
try {
|
|
667
|
+
const newFiles = await collectNewFiles(sandbox, "/vercel/sandbox", ["ts", "tsx", "js", "json", "md"]);
|
|
668
|
+
for (const file of newFiles) {
|
|
669
|
+
const filename = file.path.split("/").pop() ?? file.path;
|
|
670
|
+
const artifact = {
|
|
671
|
+
filename,
|
|
672
|
+
content: file.content,
|
|
673
|
+
language: detectLanguage(filename)
|
|
674
|
+
};
|
|
675
|
+
artifacts.push(artifact);
|
|
676
|
+
this.emitter.emit({
|
|
677
|
+
type: "artifact",
|
|
678
|
+
taskId: task.id,
|
|
679
|
+
data: { artifact }
|
|
680
|
+
});
|
|
681
|
+
}
|
|
682
|
+
} catch {
|
|
683
|
+
}
|
|
684
|
+
this.emitTaskCompleted(task.id, findings, artifacts);
|
|
685
|
+
await stopSandbox(sandbox);
|
|
686
|
+
return finalText || findings.join("\n") || "No output";
|
|
687
|
+
}
|
|
688
|
+
// ── Synthesis ────────────────────────────────────────────────────────────────
|
|
689
|
+
async runSynthesis(goal, summaries) {
|
|
690
|
+
const { text } = await (0, import_ai2.generateText)({
|
|
691
|
+
model: gateway(this.config.plannerModel),
|
|
692
|
+
prompt: buildSynthesisPrompt(goal, summaries),
|
|
693
|
+
maxOutputTokens: 1500
|
|
694
|
+
});
|
|
695
|
+
return text;
|
|
696
|
+
}
|
|
697
|
+
// ── Helpers ──────────────────────────────────────────────────────────────────
|
|
698
|
+
makeTask(params) {
|
|
699
|
+
return {
|
|
700
|
+
id: uid(),
|
|
701
|
+
parentId: params.parentId,
|
|
702
|
+
role: params.role,
|
|
703
|
+
goal: params.goal,
|
|
704
|
+
icon: params.icon,
|
|
705
|
+
status: "pending",
|
|
706
|
+
statusText: "Waiting",
|
|
707
|
+
findings: [],
|
|
708
|
+
artifacts: [],
|
|
709
|
+
children: [],
|
|
710
|
+
sandboxId: null,
|
|
711
|
+
model: params.depth === 0 ? this.config.plannerModel : this.config.workerModel,
|
|
712
|
+
createdAt: Date.now(),
|
|
713
|
+
completedAt: null,
|
|
714
|
+
depth: params.depth
|
|
715
|
+
};
|
|
716
|
+
}
|
|
717
|
+
emitTaskCreated(task) {
|
|
718
|
+
this.emitter.emit({ type: "task-created", taskId: task.id, data: { task } });
|
|
719
|
+
}
|
|
720
|
+
emitTaskUpdate(id, status, statusText) {
|
|
721
|
+
this.emitter.emit({ type: "task-updated", taskId: id, data: { status, statusText } });
|
|
722
|
+
}
|
|
723
|
+
emitTaskCompleted(id, findings, artifacts) {
|
|
724
|
+
this.emitter.emit({
|
|
725
|
+
type: "task-completed",
|
|
726
|
+
taskId: id,
|
|
727
|
+
data: { findings, artifacts }
|
|
728
|
+
});
|
|
729
|
+
}
|
|
730
|
+
};
|
|
731
|
+
|
|
732
|
+
// ../../lib/vrlm/events.ts
|
|
733
|
+
var VrlmEventEmitter = class {
|
|
734
|
+
handlers = [];
|
|
735
|
+
queue = [];
|
|
736
|
+
closed = false;
|
|
737
|
+
on(handler) {
|
|
738
|
+
this.handlers.push(handler);
|
|
739
|
+
for (const event of this.queue) {
|
|
740
|
+
handler(event);
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
emit(event) {
|
|
744
|
+
this.queue.push(event);
|
|
745
|
+
for (const handler of this.handlers) {
|
|
746
|
+
handler(event);
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
close() {
|
|
750
|
+
this.closed = true;
|
|
751
|
+
}
|
|
752
|
+
isClosed() {
|
|
753
|
+
return this.closed;
|
|
754
|
+
}
|
|
755
|
+
};
|
|
756
|
+
|
|
757
|
+
// ../../lib/vrlm/types.ts
|
|
758
|
+
var DEFAULT_CONFIG = {
|
|
759
|
+
maxDepth: 3,
|
|
760
|
+
maxWorkers: 6,
|
|
761
|
+
maxTotalTasks: 15,
|
|
762
|
+
maxStepsPerAgent: 20,
|
|
763
|
+
plannerModel: "google/gemini-2.5-pro",
|
|
764
|
+
workerModel: "google/gemini-2.5-flash",
|
|
765
|
+
repoUrl: ""
|
|
766
|
+
};
|
|
767
|
+
|
|
768
|
+
// src/renderer.ts
|
|
769
|
+
var import_picocolors = __toESM(require("picocolors"));
|
|
770
|
+
var TerminalRenderer = class {
|
|
771
|
+
taskRoles = /* @__PURE__ */ new Map();
|
|
772
|
+
// taskId -> role
|
|
773
|
+
render(event) {
|
|
774
|
+
switch (event.type) {
|
|
775
|
+
case "task-created": {
|
|
776
|
+
const task = event.data.task;
|
|
777
|
+
this.taskRoles.set(task.id, task.role);
|
|
778
|
+
if (task.depth === 0) {
|
|
779
|
+
console.log("");
|
|
780
|
+
console.log(import_picocolors.default.bold(import_picocolors.default.white(` ${task.icon ?? "\u{1F9E0}"} ${task.role}`)));
|
|
781
|
+
console.log(import_picocolors.default.dim(` ${task.goal}`));
|
|
782
|
+
} else {
|
|
783
|
+
console.log(import_picocolors.default.dim(` + `) + import_picocolors.default.cyan(`${task.icon ?? "\u{1F916}"} ${task.role}`));
|
|
784
|
+
}
|
|
785
|
+
break;
|
|
786
|
+
}
|
|
787
|
+
case "task-updated": {
|
|
788
|
+
const role = this.taskRoles.get(event.taskId ?? "") ?? "";
|
|
789
|
+
const status = event.data.statusText;
|
|
790
|
+
if (status && role) {
|
|
791
|
+
console.log(import_picocolors.default.dim(` [${role}] `) + import_picocolors.default.white(status));
|
|
792
|
+
}
|
|
793
|
+
break;
|
|
794
|
+
}
|
|
795
|
+
case "terminal": {
|
|
796
|
+
const line = event.data.line;
|
|
797
|
+
const dim = event.data.dim;
|
|
798
|
+
if (dim) {
|
|
799
|
+
console.log(import_picocolors.default.dim(` ${line}`));
|
|
800
|
+
} else {
|
|
801
|
+
console.log(import_picocolors.default.blue(` ${line}`));
|
|
802
|
+
}
|
|
803
|
+
break;
|
|
804
|
+
}
|
|
805
|
+
case "finding": {
|
|
806
|
+
const text = event.data.text;
|
|
807
|
+
const role = this.taskRoles.get(event.taskId ?? "") ?? "";
|
|
808
|
+
console.log(import_picocolors.default.yellow(` [${role}] `) + text);
|
|
809
|
+
break;
|
|
810
|
+
}
|
|
811
|
+
case "artifact": {
|
|
812
|
+
const artifact = event.data.artifact;
|
|
813
|
+
console.log(import_picocolors.default.green(` + ${artifact.filename}`) + import_picocolors.default.dim(` (${artifact.language})`));
|
|
814
|
+
break;
|
|
815
|
+
}
|
|
816
|
+
case "collaboration": {
|
|
817
|
+
console.log(import_picocolors.default.magenta(` ~ Collaboration: `) + (event.data.message ?? ""));
|
|
818
|
+
break;
|
|
819
|
+
}
|
|
820
|
+
case "task-completed": {
|
|
821
|
+
const role = this.taskRoles.get(event.taskId ?? "") ?? "";
|
|
822
|
+
console.log(import_picocolors.default.green(` ok `) + role);
|
|
823
|
+
break;
|
|
824
|
+
}
|
|
825
|
+
case "synthesis": {
|
|
826
|
+
console.log("");
|
|
827
|
+
console.log(import_picocolors.default.bold(import_picocolors.default.white("Summary")));
|
|
828
|
+
console.log(import_picocolors.default.dim("------"));
|
|
829
|
+
console.log(event.data.content);
|
|
830
|
+
break;
|
|
831
|
+
}
|
|
832
|
+
case "done": {
|
|
833
|
+
console.log("");
|
|
834
|
+
console.log(import_picocolors.default.bold(import_picocolors.default.green("Done.")));
|
|
835
|
+
break;
|
|
836
|
+
}
|
|
837
|
+
case "error": {
|
|
838
|
+
console.error(import_picocolors.default.red("Error: ") + event.data.message);
|
|
839
|
+
break;
|
|
840
|
+
}
|
|
841
|
+
}
|
|
842
|
+
}
|
|
843
|
+
};
|
|
844
|
+
|
|
845
|
+
// src/commands/run.ts
|
|
846
|
+
function loadSnapshotId(repoUrl) {
|
|
847
|
+
const snapshotFile = (0, import_path2.join)(process.cwd(), ".agentnetes", "snapshot.json");
|
|
848
|
+
if (!(0, import_fs2.existsSync)(snapshotFile)) return void 0;
|
|
849
|
+
try {
|
|
850
|
+
const data = JSON.parse((0, import_fs2.readFileSync)(snapshotFile, "utf-8"));
|
|
851
|
+
if (data.repoUrl === repoUrl) return data.snapshotId;
|
|
852
|
+
} catch {
|
|
853
|
+
}
|
|
854
|
+
return void 0;
|
|
855
|
+
}
|
|
856
|
+
async function run({ goal, repoUrl }) {
|
|
857
|
+
const snapshotId = loadSnapshotId(repoUrl);
|
|
858
|
+
console.log("");
|
|
859
|
+
console.log(import_picocolors2.default.bold(import_picocolors2.default.white("Agentnetes")));
|
|
860
|
+
console.log(import_picocolors2.default.dim("Zero to a self-organizing AI agency. On demand."));
|
|
861
|
+
console.log("");
|
|
862
|
+
console.log(import_picocolors2.default.dim("Repo: ") + repoUrl);
|
|
863
|
+
console.log(import_picocolors2.default.dim("Goal: ") + import_picocolors2.default.white(goal));
|
|
864
|
+
if (snapshotId) {
|
|
865
|
+
console.log(import_picocolors2.default.dim("Snapshot:") + import_picocolors2.default.green(" pre-warmed (fast start)"));
|
|
866
|
+
} else {
|
|
867
|
+
console.log(import_picocolors2.default.dim("Sandbox: ") + import_picocolors2.default.yellow('cloning from git (run "agentnetes snapshot create" to speed this up)'));
|
|
868
|
+
}
|
|
869
|
+
console.log("");
|
|
870
|
+
const emitter = new VrlmEventEmitter();
|
|
871
|
+
const renderer = new TerminalRenderer();
|
|
872
|
+
const config = {
|
|
873
|
+
...DEFAULT_CONFIG,
|
|
874
|
+
repoUrl,
|
|
875
|
+
repoSnapshotId: snapshotId,
|
|
876
|
+
plannerModel: process.env.PLANNER_MODEL ?? DEFAULT_CONFIG.plannerModel,
|
|
877
|
+
workerModel: process.env.WORKER_MODEL ?? DEFAULT_CONFIG.workerModel
|
|
878
|
+
};
|
|
879
|
+
emitter.on((event) => renderer.render(event));
|
|
880
|
+
const runtime = new VrlmRuntime(emitter, config);
|
|
881
|
+
await runtime.run(goal);
|
|
882
|
+
}
|
|
883
|
+
|
|
884
|
+
// src/commands/snapshot.ts
|
|
885
|
+
var import_child_process3 = require("child_process");
|
|
886
|
+
var import_fs3 = require("fs");
|
|
887
|
+
var import_path3 = require("path");
|
|
888
|
+
var import_picocolors3 = __toESM(require("picocolors"));
|
|
889
|
+
var import_sandbox = require("@vercel/sandbox");
|
|
890
|
+
async function snapshotCreate() {
|
|
891
|
+
let repoUrl;
|
|
892
|
+
try {
|
|
893
|
+
repoUrl = (0, import_child_process3.execSync)("git remote get-url origin", { encoding: "utf-8" }).trim();
|
|
894
|
+
if (repoUrl.startsWith("git@github.com:")) {
|
|
895
|
+
repoUrl = repoUrl.replace("git@github.com:", "https://github.com/").replace(".git", "");
|
|
896
|
+
}
|
|
897
|
+
if (repoUrl.endsWith(".git")) repoUrl = repoUrl.slice(0, -4);
|
|
898
|
+
} catch {
|
|
899
|
+
console.error('Error: not inside a git repository with a remote "origin".');
|
|
900
|
+
process.exit(1);
|
|
901
|
+
return;
|
|
902
|
+
}
|
|
903
|
+
console.log("");
|
|
904
|
+
console.log(import_picocolors3.default.bold("Creating sandbox snapshot"));
|
|
905
|
+
console.log(import_picocolors3.default.dim("Repo: ") + repoUrl);
|
|
906
|
+
console.log(import_picocolors3.default.dim("This takes 3-8 minutes depending on repo size."));
|
|
907
|
+
console.log("");
|
|
908
|
+
const sandbox = await import_sandbox.Sandbox.create({
|
|
909
|
+
source: { type: "git", url: repoUrl, depth: 1 },
|
|
910
|
+
timeout: 15 * 60 * 1e3
|
|
911
|
+
});
|
|
912
|
+
console.log(import_picocolors3.default.dim("Sandbox created. Installing dependencies..."));
|
|
913
|
+
const detectCmd = await sandbox.runCommand("bash", [
|
|
914
|
+
"-c",
|
|
915
|
+
"if [ -f pnpm-lock.yaml ]; then echo pnpm; elif [ -f yarn.lock ]; then echo yarn; else echo npm; fi"
|
|
916
|
+
]);
|
|
917
|
+
const pkgManager = (await detectCmd.stdout()).trim();
|
|
918
|
+
const installCmd = pkgManager === "pnpm" ? "npm i -g pnpm && pnpm install --frozen-lockfile" : pkgManager === "yarn" ? "yarn install --frozen-lockfile" : "npm install";
|
|
919
|
+
console.log(import_picocolors3.default.dim(`Running ${pkgManager} install...`));
|
|
920
|
+
const install = await sandbox.runCommand("bash", ["-c", `${installCmd} 2>&1 | tail -10`]);
|
|
921
|
+
console.log(import_picocolors3.default.dim(await install.stdout()));
|
|
922
|
+
console.log(import_picocolors3.default.dim("Taking snapshot..."));
|
|
923
|
+
const snap = await sandbox.snapshot();
|
|
924
|
+
const dir = (0, import_path3.join)(process.cwd(), ".agentnetes");
|
|
925
|
+
if (!(0, import_fs3.existsSync)(dir)) (0, import_fs3.mkdirSync)(dir, { recursive: true });
|
|
926
|
+
(0, import_fs3.writeFileSync)((0, import_path3.join)(dir, "snapshot.json"), JSON.stringify({
|
|
927
|
+
snapshotId: snap.snapshotId,
|
|
928
|
+
repoUrl,
|
|
929
|
+
createdAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
930
|
+
}, null, 2));
|
|
931
|
+
const gitignorePath = (0, import_path3.join)(process.cwd(), ".gitignore");
|
|
932
|
+
if ((0, import_fs3.existsSync)(gitignorePath)) {
|
|
933
|
+
const content = (0, import_fs3.readFileSync)(gitignorePath, "utf-8");
|
|
934
|
+
if (!content.includes(".agentnetes/")) {
|
|
935
|
+
(0, import_fs3.appendFileSync)(gitignorePath, "\n.agentnetes/\n");
|
|
936
|
+
}
|
|
937
|
+
}
|
|
938
|
+
console.log("");
|
|
939
|
+
console.log(import_picocolors3.default.bold(import_picocolors3.default.green("Snapshot created!")));
|
|
940
|
+
console.log(import_picocolors3.default.dim("ID: ") + snap.snapshotId);
|
|
941
|
+
console.log(import_picocolors3.default.dim("Saved to .agentnetes/snapshot.json"));
|
|
942
|
+
console.log("");
|
|
943
|
+
console.log("Future runs will use this snapshot automatically (near-instant start).");
|
|
944
|
+
await sandbox.stop();
|
|
945
|
+
}
|
|
946
|
+
async function snapshotList() {
|
|
947
|
+
console.log("");
|
|
948
|
+
console.log(import_picocolors3.default.bold("Available snapshots"));
|
|
949
|
+
console.log("");
|
|
950
|
+
const token = process.env.VERCEL_TOKEN;
|
|
951
|
+
if (!token) {
|
|
952
|
+
console.error(import_picocolors3.default.red("Error: VERCEL_TOKEN environment variable is not set."));
|
|
953
|
+
process.exit(1);
|
|
954
|
+
return;
|
|
955
|
+
}
|
|
956
|
+
const resp = await fetch("https://api.vercel.com/v1/sandbox/snapshots", {
|
|
957
|
+
headers: { Authorization: `Bearer ${token}` }
|
|
958
|
+
});
|
|
959
|
+
if (!resp.ok) {
|
|
960
|
+
console.error(import_picocolors3.default.red(`Error fetching snapshots: ${resp.status} ${resp.statusText}`));
|
|
961
|
+
process.exit(1);
|
|
962
|
+
return;
|
|
963
|
+
}
|
|
964
|
+
const data = await resp.json();
|
|
965
|
+
for (const snap of data.snapshots) {
|
|
966
|
+
console.log(import_picocolors3.default.cyan(snap.id));
|
|
967
|
+
console.log(import_picocolors3.default.dim(` Created: ${new Date(snap.createdAt).toLocaleString()}`));
|
|
968
|
+
console.log(import_picocolors3.default.dim(` Size: ${Math.round(snap.sizeBytes / 1024 / 1024)} MB`));
|
|
969
|
+
console.log("");
|
|
970
|
+
}
|
|
971
|
+
}
|
|
972
|
+
|
|
973
|
+
// src/index.ts
|
|
974
|
+
var args = process.argv.slice(2);
|
|
975
|
+
var command = args[0];
|
|
976
|
+
async function main() {
|
|
977
|
+
if (command === "run") {
|
|
978
|
+
const goal = args[1];
|
|
979
|
+
if (!goal) {
|
|
980
|
+
console.error('Usage: agentnetes run "your goal"');
|
|
981
|
+
process.exit(1);
|
|
982
|
+
}
|
|
983
|
+
let repoUrl;
|
|
984
|
+
try {
|
|
985
|
+
repoUrl = (0, import_child_process4.execSync)("git remote get-url origin", { encoding: "utf-8" }).trim();
|
|
986
|
+
if (repoUrl.startsWith("git@github.com:")) {
|
|
987
|
+
repoUrl = repoUrl.replace("git@github.com:", "https://github.com/").replace(".git", "");
|
|
988
|
+
}
|
|
989
|
+
if (repoUrl.endsWith(".git")) {
|
|
990
|
+
repoUrl = repoUrl.slice(0, -4);
|
|
991
|
+
}
|
|
992
|
+
} catch {
|
|
993
|
+
console.error('Error: not inside a git repository or no remote "origin" found.');
|
|
994
|
+
console.error("Run this command from the root of a git repository.");
|
|
995
|
+
process.exit(1);
|
|
996
|
+
}
|
|
997
|
+
await run({ goal, repoUrl });
|
|
998
|
+
} else if (command === "snapshot" && args[1] === "create") {
|
|
999
|
+
await snapshotCreate();
|
|
1000
|
+
} else if (command === "snapshot" && args[1] === "list") {
|
|
1001
|
+
await snapshotList();
|
|
1002
|
+
} else {
|
|
1003
|
+
console.log("agentnetes - zero to a self-organizing AI agency. On demand.");
|
|
1004
|
+
console.log("");
|
|
1005
|
+
console.log("Usage:");
|
|
1006
|
+
console.log(' agentnetes run "goal" Run agents on the current git repo');
|
|
1007
|
+
console.log(" agentnetes snapshot create Pre-warm a sandbox snapshot for faster runs");
|
|
1008
|
+
console.log(" agentnetes snapshot list List available snapshots");
|
|
1009
|
+
console.log("");
|
|
1010
|
+
console.log("Environment variables:");
|
|
1011
|
+
console.log(" AI_GATEWAY_BASE_URL Vercel AI Gateway endpoint");
|
|
1012
|
+
console.log(" VERCEL_TOKEN Vercel API token (for sandbox)");
|
|
1013
|
+
}
|
|
1014
|
+
}
|
|
1015
|
+
main().catch((err) => {
|
|
1016
|
+
console.error("Fatal:", err.message ?? err);
|
|
1017
|
+
process.exit(1);
|
|
1018
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "agentnetes",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Zero to a self-organizing AI agency. On demand.",
|
|
5
|
+
"keywords": ["ai", "agents", "cli", "gemini", "vercel", "autonomous"],
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"repository": {
|
|
8
|
+
"type": "git",
|
|
9
|
+
"url": "https://github.com/Shashikant86/agentnetes"
|
|
10
|
+
},
|
|
11
|
+
"homepage": "https://shashikant86.github.io/agentnetes",
|
|
12
|
+
"bin": {
|
|
13
|
+
"agentnetes": "./dist/index.js"
|
|
14
|
+
},
|
|
15
|
+
"files": ["dist"],
|
|
16
|
+
"scripts": {
|
|
17
|
+
"build": "tsup",
|
|
18
|
+
"dev": "tsx src/index.ts",
|
|
19
|
+
"prepublishOnly": "npm run build"
|
|
20
|
+
},
|
|
21
|
+
"dependencies": {
|
|
22
|
+
"@ai-sdk/gateway": "beta",
|
|
23
|
+
"@ai-sdk/google": "^3.0.52",
|
|
24
|
+
"@vercel/sandbox": "latest",
|
|
25
|
+
"ai": "beta",
|
|
26
|
+
"picocolors": "^1.1.1",
|
|
27
|
+
"zod": "latest"
|
|
28
|
+
},
|
|
29
|
+
"devDependencies": {
|
|
30
|
+
"@types/node": "latest",
|
|
31
|
+
"tsup": "^8.0.0",
|
|
32
|
+
"tsx": "latest",
|
|
33
|
+
"typescript": "latest"
|
|
34
|
+
}
|
|
35
|
+
}
|