@nodes/agent 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,27 @@
1
+ # @nodes/agent
2
+
3
+ Autonomous AI agent runtime for Nodes platform.
4
+
5
+ **Status: WIP — API may change.**
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ npm install @nodes/agent
11
+ ```
12
+
13
+ ## Usage
14
+
15
+ ```bash
16
+ NODES_URL=https://nodes.ws \
17
+ NODES_API_KEY=... \
18
+ NODES_AGENT=org/my-agent \
19
+ GATEWAY_API_KEY=... \
20
+ nodes
21
+ ```
22
+
23
+ ### Modes
24
+
25
+ - `nodes` — Interactive chat
26
+ - `nodes --tick` — Single autonomous execution
27
+ - `nodes --serve` — Expose local tools as MCP server
package/dist/cli.d.ts ADDED
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env node
2
+ export {};
package/dist/cli.js ADDED
@@ -0,0 +1,107 @@
1
+ #!/usr/bin/env node
2
+ import { parseArgs } from 'node:util';
3
+ import { createRequire } from 'module';
4
+ import { NodesClient } from '@nodes/sdk';
5
+ import { runOnce, interactive } from './core/loop.js';
6
+ import { interactiveTUI } from './core/tui-chat.js';
7
+ import { serve } from './core/serve.js';
8
+ const require = createRequire(import.meta.url);
9
+ const pkg = require('../package.json');
10
+ const { values } = parseArgs({
11
+ options: {
12
+ prompt: { type: 'string', short: 'p' },
13
+ node: { type: 'string', short: 'n' },
14
+ serve: { type: 'boolean' },
15
+ simple: { type: 'boolean' },
16
+ help: { type: 'boolean', short: 'h' },
17
+ version: { type: 'boolean', short: 'v' },
18
+ },
19
+ strict: false,
20
+ });
21
+ if (values.help) {
22
+ console.log(`
23
+ nodes — Autonomous AI agent for Nodes
24
+
25
+ Usage:
26
+ nodes Start in interactive mode (new chat)
27
+ nodes -n <nodeId> Resume interactive chat on existing node
28
+ nodes -p "prompt" Run a single prompt and exit
29
+ nodes -p "prompt" -n <id> Run prompt in existing chat and exit
30
+ nodes --serve Start MCP server (exposes local tools)
31
+ nodes --simple Use simple readline UI (no TUI)
32
+ nodes --help Show this help
33
+
34
+ Environment:
35
+ NODES_URL Nodes instance URL (default: https://nodes.ws)
36
+ NODES_API_KEY API key for authentication
37
+ NODES_AGENT Agent slug (e.g. "nodes/my-agent") or user ID
38
+ PORT MCP server port (default: 8788, --serve only)
39
+ `);
40
+ process.exit(0);
41
+ }
42
+ if (values.version) {
43
+ console.log(`nodes ${pkg.version}`);
44
+ process.exit(0);
45
+ }
46
+ // --serve doesn't require Nodes credentials (purely local MCP server)
47
+ if (values.serve) {
48
+ serve().catch((err) => {
49
+ console.error(err.message);
50
+ process.exit(1);
51
+ });
52
+ }
53
+ else {
54
+ // All other modes require Nodes connection
55
+ const url = process.env.NODES_URL || 'https://nodes.ws';
56
+ const apiKey = process.env.NODES_API_KEY || '';
57
+ const agent = process.env.NODES_AGENT || '';
58
+ if (!apiKey) {
59
+ console.error('Error: NODES_API_KEY is required.');
60
+ process.exit(1);
61
+ }
62
+ if (!agent) {
63
+ console.error('Error: NODES_AGENT is required (agent slug or user ID).');
64
+ process.exit(1);
65
+ }
66
+ // Extract userId from user-scoped token (nodes_<userId>_<secret>)
67
+ const tokenMatch = apiKey.match(/^nodes_([a-f0-9]{24})_[a-f0-9]{32}$/);
68
+ const userId = tokenMatch?.[1];
69
+ const prompt = values.prompt;
70
+ const nodeId = values.node;
71
+ const nodes = new NodesClient({ url, apiKey, agent });
72
+ const config = { nodes, agent, userId, prompt, nodeId };
73
+ if (prompt) {
74
+ // One-shot mode
75
+ runOnce(config)
76
+ .then((result) => {
77
+ console.log(JSON.stringify(result, null, 2));
78
+ process.exit(0);
79
+ })
80
+ .catch((err) => {
81
+ console.error(err.message);
82
+ process.exit(1);
83
+ })
84
+ .finally(() => nodes.disconnect());
85
+ }
86
+ else {
87
+ if (values.simple) {
88
+ // Simple readline mode
89
+ process.on('SIGINT', async () => {
90
+ console.log('\nGoodbye.');
91
+ await nodes.disconnect();
92
+ process.exit(0);
93
+ });
94
+ interactive(config).catch((err) => {
95
+ console.error(err.message);
96
+ process.exit(1);
97
+ });
98
+ }
99
+ else {
100
+ // TUI mode (default) — handles its own SIGINT
101
+ interactiveTUI(config).catch((err) => {
102
+ console.error(err.message);
103
+ process.exit(1);
104
+ });
105
+ }
106
+ }
107
+ }
@@ -0,0 +1,70 @@
1
+ import { type UIMessage, type LanguageModel } from 'ai';
2
+ import { NodesClient } from '@nodes/sdk';
3
+ export interface AgentConfig {
4
+ nodes: NodesClient;
5
+ /** Agent slug (e.g. "nodes/my-agent") or user ID */
6
+ agent: string;
7
+ model?: string;
8
+ /** Human user ID — extracted from user-scoped API key (nodes_<userId>_<secret>) */
9
+ userId?: string;
10
+ /** One-shot prompt — run and exit */
11
+ prompt?: string;
12
+ /** Existing chat node ID to resume */
13
+ nodeId?: string;
14
+ }
15
+ export interface AgentEnv {
16
+ modelId: string;
17
+ model: LanguageModel;
18
+ provider: string;
19
+ allTools: Record<string, any>;
20
+ systemPrompt: string;
21
+ chatsNodeId?: string;
22
+ agentName: string;
23
+ }
24
+ export declare function loadAgentEnv(config: AgentConfig): Promise<AgentEnv>;
25
+ export interface TurnOptions {
26
+ prompt?: string;
27
+ messages?: Array<{
28
+ role: 'user' | 'assistant';
29
+ content: string;
30
+ }>;
31
+ abortSignal?: AbortSignal;
32
+ onTextDelta?: (delta: string) => void;
33
+ onReasoningDelta?: (delta: string) => void;
34
+ onToolCallStart?: (toolName: string, args: unknown) => void;
35
+ onToolCall?: (toolName: string, args: unknown, result: unknown) => void;
36
+ }
37
+ interface UsageMetadata {
38
+ provider?: string;
39
+ model?: string;
40
+ totalUsage?: {
41
+ inputTokens?: number;
42
+ outputTokens?: number;
43
+ totalTokens?: number;
44
+ reasoningTokens?: number;
45
+ cachedInputTokens?: number;
46
+ };
47
+ timings?: {
48
+ totalMs?: number;
49
+ };
50
+ }
51
+ interface TurnResult {
52
+ text: string;
53
+ toolCalls: number;
54
+ parts: UIMessage['parts'];
55
+ metadata: UsageMetadata;
56
+ }
57
+ export declare function runTurn(env: AgentEnv, options: TurnOptions): Promise<TurnResult>;
58
+ export declare function runOnce(config: AgentConfig): Promise<{
59
+ ok: boolean;
60
+ error: string;
61
+ chatNodeId?: undefined;
62
+ toolCalls?: undefined;
63
+ } | {
64
+ ok: boolean;
65
+ chatNodeId: string;
66
+ toolCalls: number;
67
+ error?: undefined;
68
+ }>;
69
+ export declare function interactive(config: AgentConfig): Promise<void>;
70
+ export {};
@@ -0,0 +1,253 @@
1
+ import { streamText, hasToolCall, stepCountIs, readUIMessageStream } from 'ai';
2
+ import { localTools } from './tools.js';
3
+ import { loadRemoteTools } from './remote-tools.js';
4
+ import { resolveModel } from './providers.js';
5
+ const DEFAULT_MAX_STEPS = 50;
6
+ const MAX_STEPS = Number(process.env.MAX_STEPS) || DEFAULT_MAX_STEPS;
7
+ const stopWhen = [hasToolCall('ai_end'), hasToolCall('ai_buttons'), stepCountIs(MAX_STEPS)];
8
+ export async function loadAgentEnv(config) {
9
+ const { nodes, agent } = config;
10
+ const agentInfo = await nodes.ai.readAgent(agent, { include: ['systemPrompt'] });
11
+ const provider = agentInfo.ai?.provider || 'gateway';
12
+ const modelId = config.model || agentInfo.ai?.model || 'google/gemini-2.5-flash';
13
+ const model = resolveModel(provider, modelId);
14
+ const remoteTools = await loadRemoteTools(nodes, agentInfo.id);
15
+ const allTools = { ...localTools, ...remoteTools };
16
+ const systemPrompt = agentInfo.systemPrompt || 'You are a helpful AI agent.';
17
+ return {
18
+ modelId,
19
+ model,
20
+ provider,
21
+ allTools,
22
+ systemPrompt,
23
+ chatsNodeId: agentInfo.ai?.chatsNode,
24
+ agentName: agentInfo.name,
25
+ };
26
+ }
27
+ export async function runTurn(env, options) {
28
+ const startMs = Date.now();
29
+ const result = streamText({
30
+ model: env.model,
31
+ system: env.systemPrompt,
32
+ tools: env.allTools,
33
+ stopWhen,
34
+ ...(options.abortSignal ? { abortSignal: options.abortSignal } : {}),
35
+ ...(options.messages
36
+ ? { messages: options.messages }
37
+ : { prompt: options.prompt || '' }),
38
+ });
39
+ // Use native AI SDK UIMessage stream — same format the Nodes UI uses
40
+ const uiStream = readUIMessageStream({ stream: result.toUIMessageStream() });
41
+ let message;
42
+ let prevTextLen = 0;
43
+ let prevReasoningLen = 0;
44
+ const seenToolStarts = new Set();
45
+ const seenToolResults = new Set();
46
+ for await (const snapshot of uiStream) {
47
+ message = snapshot;
48
+ // Stream text deltas
49
+ const textParts = snapshot.parts.filter(p => p.type === 'text');
50
+ const currentText = textParts.map(p => p.text).join('');
51
+ if (currentText.length > prevTextLen) {
52
+ options.onTextDelta?.(currentText.slice(prevTextLen));
53
+ prevTextLen = currentText.length;
54
+ }
55
+ // Stream reasoning deltas
56
+ if (options.onReasoningDelta) {
57
+ const reasoningParts = snapshot.parts.filter(p => p.type === 'reasoning');
58
+ const currentReasoning = reasoningParts.map(p => p.text).join('');
59
+ if (currentReasoning.length > prevReasoningLen) {
60
+ options.onReasoningDelta(currentReasoning.slice(prevReasoningLen));
61
+ prevReasoningLen = currentReasoning.length;
62
+ }
63
+ }
64
+ // Detect tool starts and completions
65
+ for (const part of snapshot.parts) {
66
+ const p = part;
67
+ if (!p.type.startsWith('tool-') || !p.toolCallId)
68
+ continue;
69
+ // Tool START (any state before output-available)
70
+ if (!seenToolStarts.has(p.toolCallId) && p.state !== 'output-available') {
71
+ seenToolStarts.add(p.toolCallId);
72
+ options.onToolCallStart?.(p.type.replace('tool-', ''), p.input);
73
+ }
74
+ // Tool COMPLETE
75
+ if (p.state === 'output-available' && !seenToolResults.has(p.toolCallId)) {
76
+ seenToolResults.add(p.toolCallId);
77
+ options.onToolCall?.(p.type.replace('tool-', ''), p.input, p.output);
78
+ }
79
+ }
80
+ }
81
+ const totalMs = Date.now() - startMs;
82
+ const usage = await result.totalUsage;
83
+ const parts = message?.parts || [];
84
+ const fullText = parts
85
+ .filter(p => p.type === 'text')
86
+ .map(p => p.text)
87
+ .join('');
88
+ const toolCalls = parts.filter(p => p.type.startsWith('tool-')).length;
89
+ return {
90
+ text: fullText,
91
+ toolCalls,
92
+ parts,
93
+ metadata: {
94
+ provider: env.provider,
95
+ model: env.modelId,
96
+ totalUsage: {
97
+ inputTokens: usage.inputTokens,
98
+ outputTokens: usage.outputTokens,
99
+ totalTokens: usage.totalTokens,
100
+ reasoningTokens: usage.outputTokenDetails?.reasoningTokens,
101
+ cachedInputTokens: usage.inputTokenDetails?.cacheReadTokens,
102
+ },
103
+ timings: { totalMs },
104
+ },
105
+ };
106
+ }
107
+ // --- One-shot mode (-p "prompt") ---
108
+ export async function runOnce(config) {
109
+ const { nodes, prompt } = config;
110
+ if (!prompt) {
111
+ console.error('Error: prompt is required for one-shot mode (-p "prompt").');
112
+ return { ok: false, error: 'No prompt provided' };
113
+ }
114
+ console.log('Loading agent config...');
115
+ const env = await loadAgentEnv(config);
116
+ if (!env.chatsNodeId) {
117
+ console.error('No chatsNode on agent. Open agent in UI first to initialize.');
118
+ return { ok: false, error: 'No chatsNode found on agent AI side' };
119
+ }
120
+ console.log(`Agent: "${env.agentName}" | Model: ${env.modelId} | Tools: ${Object.keys(env.allTools).length}`);
121
+ // Create or reuse chat + write user message
122
+ const userResult = await nodes.chat.write({
123
+ ...(config.nodeId ? { nodeId: config.nodeId } : { parentId: env.chatsNodeId }),
124
+ role: 'user',
125
+ text: prompt,
126
+ ...(config.userId ? { createdBy: config.userId } : {}),
127
+ });
128
+ if (!userResult?.chatNodeId) {
129
+ console.error('Failed to create chat:', userResult?.error);
130
+ return { ok: false, error: userResult?.error || 'Failed to create chat' };
131
+ }
132
+ console.log(`Chat: ${userResult.chatNodeId}`);
133
+ // Load history if resuming existing chat
134
+ const messages = [];
135
+ if (config.nodeId) {
136
+ try {
137
+ const history = await nodes.chat.read(config.nodeId);
138
+ if (history?.docs) {
139
+ for (const doc of history.docs) {
140
+ const msg = doc;
141
+ const role = msg.sides?.message?.role;
142
+ const text = msg.sides?.message?.parts?.filter(p => p.type === 'text').map(p => p.text || '').join('') || '';
143
+ if (role && (role === 'user' || role === 'assistant') && text) {
144
+ messages.push({ role, content: text });
145
+ }
146
+ }
147
+ console.log(`Loaded ${messages.length} messages from history`);
148
+ }
149
+ }
150
+ catch (err) {
151
+ console.warn('Could not load chat history:', err.message);
152
+ }
153
+ }
154
+ // Add current prompt
155
+ messages.push({ role: 'user', content: prompt });
156
+ // Run turn
157
+ const turn = await runTurn(env, {
158
+ messages,
159
+ onTextDelta: (delta) => process.stdout.write(delta),
160
+ onToolCall: (name) => console.log(`\n [tool: ${name}]`),
161
+ });
162
+ if (turn.text)
163
+ process.stdout.write('\n');
164
+ console.log(`\nDone. ${turn.text.length} chars, ${turn.toolCalls} tool calls.`);
165
+ // Save assistant message to Nodes (with usage metadata for billing)
166
+ await nodes.chat.write({
167
+ nodeId: userResult.chatNodeId,
168
+ role: 'assistant',
169
+ text: turn.text,
170
+ parts: turn.parts,
171
+ _metadata: turn.metadata,
172
+ });
173
+ return { ok: true, chatNodeId: userResult.chatNodeId, toolCalls: turn.toolCalls };
174
+ }
175
+ // --- Interactive mode ---
176
+ export async function interactive(config) {
177
+ const { nodes } = config;
178
+ const readline = await import('readline');
179
+ console.log('Loading agent config...');
180
+ const env = await loadAgentEnv(config);
181
+ if (!env.chatsNodeId) {
182
+ console.error('No chatsNode on agent. Open agent in UI first to initialize.');
183
+ return;
184
+ }
185
+ console.log(`Agent: "${env.agentName}" | Model: ${env.modelId} | Tools: ${Object.keys(env.allTools).length}`);
186
+ const messages = [];
187
+ let chatNodeId = config.nodeId;
188
+ // Resume existing chat — load history
189
+ if (chatNodeId) {
190
+ try {
191
+ const history = await nodes.chat.read(chatNodeId);
192
+ if (history?.docs) {
193
+ for (const doc of history.docs) {
194
+ const msg = doc;
195
+ const role = msg.sides?.message?.role;
196
+ const text = msg.sides?.message?.parts?.filter(p => p.type === 'text').map(p => p.text || '').join('') || '';
197
+ if (role && (role === 'user' || role === 'assistant') && text) {
198
+ messages.push({ role, content: text });
199
+ }
200
+ }
201
+ console.log(`Resumed chat ${chatNodeId} (${messages.length} messages)`);
202
+ }
203
+ }
204
+ catch (err) {
205
+ console.warn('Could not load chat history:', err.message);
206
+ }
207
+ }
208
+ console.log('Type your message. Ctrl+C to exit.\n');
209
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
210
+ const ask = () => new Promise((resolve) => {
211
+ rl.question('> ', (answer) => resolve(answer));
212
+ });
213
+ while (true) {
214
+ const input = await ask();
215
+ if (!input.trim())
216
+ continue;
217
+ // Sync user message to Nodes (creates chat on first message)
218
+ const userResult = await nodes.chat.write({
219
+ ...(chatNodeId ? { nodeId: chatNodeId } : { parentId: env.chatsNodeId }),
220
+ role: 'user',
221
+ text: input,
222
+ ...(config.userId ? { createdBy: config.userId } : {}),
223
+ });
224
+ if (!userResult?.chatNodeId) {
225
+ console.error('Failed to sync user message:', userResult?.error);
226
+ }
227
+ else {
228
+ chatNodeId = userResult.chatNodeId;
229
+ }
230
+ messages.push({ role: 'user', content: input });
231
+ const turn = await runTurn(env, {
232
+ messages,
233
+ onTextDelta: (delta) => process.stdout.write(delta),
234
+ onToolCall: (name, _args, result) => {
235
+ const preview = JSON.stringify(result).slice(0, 120);
236
+ console.log(`\n [tool: ${name}] ${preview}${JSON.stringify(result).length > 120 ? '...' : ''}`);
237
+ },
238
+ });
239
+ if (turn.text)
240
+ process.stdout.write('\n\n');
241
+ // Sync assistant message to Nodes (with usage metadata for billing)
242
+ if (chatNodeId) {
243
+ await nodes.chat.write({
244
+ nodeId: chatNodeId,
245
+ role: 'assistant',
246
+ text: turn.text,
247
+ parts: turn.parts,
248
+ _metadata: turn.metadata,
249
+ });
250
+ }
251
+ messages.push({ role: 'assistant', content: turn.text || '(tool calls only)' });
252
+ }
253
+ }
@@ -0,0 +1,2 @@
1
+ import type { LanguageModel } from 'ai';
2
+ export declare function resolveModel(provider: string, modelId: string): LanguageModel;
@@ -0,0 +1,17 @@
1
+ import { createGateway } from '@ai-sdk/gateway';
2
+ import { createOpenAI } from '@ai-sdk/openai';
3
+ import { createAnthropic } from '@ai-sdk/anthropic';
4
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
5
+ export function resolveModel(provider, modelId) {
6
+ switch (provider) {
7
+ case 'openai':
8
+ return createOpenAI({ apiKey: process.env.OPENAI_API_KEY }).languageModel(modelId);
9
+ case 'anthropic':
10
+ return createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }).languageModel(modelId);
11
+ case 'google':
12
+ return createGoogleGenerativeAI({ apiKey: process.env.GOOGLE_API_KEY }).languageModel(modelId);
13
+ case 'gateway':
14
+ default:
15
+ return createGateway({ apiKey: process.env.GATEWAY_API_KEY }).languageModel(modelId);
16
+ }
17
+ }
@@ -0,0 +1,6 @@
1
+ import type { NodesClient } from '@nodes/sdk';
2
+ /**
3
+ * Fetch agent's Nodes tools and convert to AI SDK tool format.
4
+ * Each tool's execute calls back to Nodes via MCP.
5
+ */
6
+ export declare function loadRemoteTools(nodes: NodesClient, agentId: string): Promise<Record<string, import("ai").Tool<never, never>>>;
@@ -0,0 +1,22 @@
1
+ import { tool, jsonSchema } from 'ai';
2
+ /**
3
+ * Fetch agent's Nodes tools and convert to AI SDK tool format.
4
+ * Each tool's execute calls back to Nodes via MCP.
5
+ */
6
+ export async function loadRemoteTools(nodes, agentId) {
7
+ const { tools: toolDefs } = await nodes.agents.tools(agentId);
8
+ const tools = {};
9
+ for (const def of toolDefs) {
10
+ const schema = def.inputSchema || { type: 'object', properties: {} };
11
+ // @ts-expect-error AI SDK tool() overload resolution
12
+ tools[def.name] = tool({
13
+ description: def.description,
14
+ parameters: jsonSchema(schema),
15
+ // @ts-expect-error AI SDK tool() overload resolution
16
+ execute: async (args) => {
17
+ return nodes.callTool(def.name, args);
18
+ },
19
+ });
20
+ }
21
+ return tools;
22
+ }
@@ -0,0 +1,5 @@
1
+ import { IncomingMessage, ServerResponse } from 'node:http';
2
+ export interface ServeOptions {
3
+ port?: number;
4
+ }
5
+ export declare function serve(options?: ServeOptions): Promise<import("http").Server<typeof IncomingMessage, typeof ServerResponse>>;
@@ -0,0 +1,113 @@
1
+ import { createRequire } from 'module';
2
+ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
3
+ import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
4
+ import { createServer } from 'node:http';
5
+ import { z } from 'zod';
6
+ import { execBash, execReadFile, execWriteFile } from './tools.js';
7
+ const require = createRequire(import.meta.url);
8
+ const pkg = require('../../package.json');
9
+ export async function serve(options = {}) {
10
+ const port = options.port || Number(process.env.PORT) || 8788;
11
+ // Create MCP server
12
+ const mcp = new McpServer({
13
+ name: 'nodes-agent',
14
+ version: pkg.version,
15
+ });
16
+ // --- Local tools (shared logic from tools.ts) ---
17
+ mcp.tool('bash', 'Execute a bash command and return stdout/stderr', {
18
+ command: z.string().describe('The bash command to execute'),
19
+ cwd: z.string().optional().describe('Working directory'),
20
+ }, async ({ command, cwd }) => {
21
+ const result = await execBash(command, cwd);
22
+ return { content: [{ type: 'text', text: JSON.stringify(result) }] };
23
+ });
24
+ mcp.tool('read_file', 'Read a file from the filesystem', {
25
+ path: z.string().describe('Absolute or relative file path'),
26
+ }, async ({ path }) => {
27
+ const result = execReadFile(path);
28
+ const isError = 'error' in result;
29
+ return { content: [{ type: 'text', text: isError ? JSON.stringify(result) : result.content }], isError };
30
+ });
31
+ mcp.tool('write_file', 'Write content to a file (creates directories if needed)', {
32
+ path: z.string().describe('Absolute or relative file path'),
33
+ content: z.string().describe('File content to write'),
34
+ }, async ({ path, content }) => {
35
+ const result = execWriteFile(path, content);
36
+ const isError = 'error' in result;
37
+ return { content: [{ type: 'text', text: JSON.stringify(result) }], isError };
38
+ });
39
+ // --- Transport ---
40
+ const transport = new StreamableHTTPServerTransport({
41
+ sessionIdGenerator: undefined, // stateless
42
+ });
43
+ await mcp.connect(transport);
44
+ // --- Auth ---
45
+ const apiKey = process.env.AGENT_API_KEY || '';
46
+ if (!apiKey) {
47
+ console.warn('[serve] WARNING: AGENT_API_KEY not set — server is open to anyone!');
48
+ }
49
+ function checkAuth(req) {
50
+ if (!apiKey)
51
+ return true; // no key configured = open
52
+ const authHeader = req.headers.authorization;
53
+ if (!authHeader?.startsWith('Bearer '))
54
+ return false;
55
+ return authHeader.slice(7) === apiKey;
56
+ }
57
+ // --- HTTP server ---
58
+ const httpServer = createServer(async (req, res) => {
59
+ // CORS for browser-based MCP clients
60
+ res.setHeader('Access-Control-Allow-Origin', '*');
61
+ res.setHeader('Access-Control-Allow-Methods', 'GET, POST, DELETE, OPTIONS');
62
+ res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, mcp-session-id');
63
+ if (req.method === 'OPTIONS') {
64
+ res.writeHead(204);
65
+ res.end();
66
+ return;
67
+ }
68
+ // Route /mcp to the transport
69
+ const url = new URL(req.url || '/', `http://localhost:${port}`);
70
+ if (url.pathname === '/mcp') {
71
+ if (!checkAuth(req)) {
72
+ res.writeHead(401, { 'Content-Type': 'application/json' });
73
+ res.end(JSON.stringify({ error: 'Unauthorized' }));
74
+ return;
75
+ }
76
+ // Parse JSON body for POST
77
+ if (req.method === 'POST') {
78
+ const chunks = [];
79
+ for await (const chunk of req)
80
+ chunks.push(chunk);
81
+ let body;
82
+ try {
83
+ body = JSON.parse(Buffer.concat(chunks).toString());
84
+ }
85
+ catch {
86
+ res.writeHead(400, { 'Content-Type': 'application/json' });
87
+ res.end(JSON.stringify({ error: 'Invalid JSON' }));
88
+ return;
89
+ }
90
+ await transport.handleRequest(req, res, body);
91
+ }
92
+ else {
93
+ await transport.handleRequest(req, res);
94
+ }
95
+ return;
96
+ }
97
+ // Health check
98
+ if (url.pathname === '/health') {
99
+ res.writeHead(200, { 'Content-Type': 'application/json' });
100
+ res.end(JSON.stringify({ status: 'ok', name: 'nodes-agent', version: pkg.version }));
101
+ return;
102
+ }
103
+ res.writeHead(404);
104
+ res.end('Not found');
105
+ });
106
+ httpServer.listen(port, () => {
107
+ console.log(`[serve] Nodes Agent MCP server listening on http://0.0.0.0:${port}/mcp`);
108
+ console.log(`[serve] Health check: http://localhost:${port}/health`);
109
+ console.log(`[serve] Tools: bash, read_file, write_file`);
110
+ console.log(`[serve] Auth: ${apiKey ? 'Bearer token required' : 'OPEN (no AGENT_API_KEY set)'}`);
111
+ });
112
+ return httpServer;
113
+ }
@@ -0,0 +1,48 @@
1
+ export declare function execBash(command: string, cwd?: string): Promise<{
2
+ stdout: string;
3
+ stderr?: string;
4
+ exitCode: number;
5
+ }>;
6
+ export declare function execReadFile(path: string): {
7
+ content: string;
8
+ error?: undefined;
9
+ } | {
10
+ error: string;
11
+ content?: undefined;
12
+ };
13
+ export declare function execWriteFile(path: string, content: string): {
14
+ success: boolean;
15
+ error?: undefined;
16
+ } | {
17
+ error: string;
18
+ success?: undefined;
19
+ };
20
+ export declare const localTools: {
21
+ bash: import("ai").Tool<{
22
+ command: string;
23
+ cwd?: string;
24
+ }, {
25
+ stdout: string;
26
+ stderr?: string;
27
+ exitCode: number;
28
+ }>;
29
+ read_file: import("ai").Tool<{
30
+ path: string;
31
+ }, {
32
+ content: string;
33
+ error?: undefined;
34
+ } | {
35
+ error: string;
36
+ content?: undefined;
37
+ }>;
38
+ write_file: import("ai").Tool<{
39
+ path: string;
40
+ content: string;
41
+ }, {
42
+ success: boolean;
43
+ error?: undefined;
44
+ } | {
45
+ error: string;
46
+ success?: undefined;
47
+ }>;
48
+ };
@@ -0,0 +1,81 @@
1
+ import { tool } from 'ai';
2
+ import { z } from 'zod';
3
+ import { exec } from 'child_process';
4
+ import { readFileSync, writeFileSync, mkdirSync } from 'fs';
5
+ import { dirname } from 'path';
6
+ const MAX_OUTPUT = 30_000;
7
+ function truncate(output) {
8
+ if (output.length <= MAX_OUTPUT)
9
+ return output;
10
+ return output.slice(0, MAX_OUTPUT) + `\n\n[truncated: ${output.length - MAX_OUTPUT} characters removed]`;
11
+ }
12
+ // --- Shared execution logic ---
13
+ export function execBash(command, cwd) {
14
+ return new Promise((resolve) => {
15
+ exec(command, {
16
+ cwd: cwd || process.cwd(),
17
+ encoding: 'utf-8',
18
+ timeout: 60_000,
19
+ maxBuffer: 1024 * 1024,
20
+ }, (error, stdout, stderr) => {
21
+ if (error) {
22
+ resolve({
23
+ stdout: truncate(stdout || ''),
24
+ stderr: truncate(stderr || ''),
25
+ exitCode: error.code != null ? (typeof error.code === 'number' ? error.code : 1) : 1,
26
+ });
27
+ }
28
+ else {
29
+ resolve({ stdout: truncate(stdout), exitCode: 0 });
30
+ }
31
+ });
32
+ });
33
+ }
34
+ export function execReadFile(path) {
35
+ try {
36
+ const content = readFileSync(path, 'utf-8');
37
+ return { content };
38
+ }
39
+ catch (err) {
40
+ return { error: err.message };
41
+ }
42
+ }
43
+ export function execWriteFile(path, content) {
44
+ try {
45
+ mkdirSync(dirname(path), { recursive: true });
46
+ writeFileSync(path, content, 'utf-8');
47
+ return { success: true };
48
+ }
49
+ catch (err) {
50
+ return { error: err.message };
51
+ }
52
+ }
53
+ // --- AI SDK tool wrappers ---
54
+ export const localTools = {
55
+ bash: tool({
56
+ description: 'Execute a bash command and return stdout/stderr',
57
+ parameters: z.object({
58
+ command: z.string().describe('The bash command to execute'),
59
+ cwd: z.string().optional().describe('Working directory'),
60
+ }),
61
+ // @ts-expect-error AI SDK tool() overload resolution
62
+ execute: async ({ command, cwd }) => execBash(command, cwd),
63
+ }),
64
+ read_file: tool({
65
+ description: 'Read a file from the filesystem',
66
+ parameters: z.object({
67
+ path: z.string().describe('Absolute or relative file path'),
68
+ }),
69
+ // @ts-expect-error AI SDK tool() overload resolution
70
+ execute: async ({ path }) => execReadFile(path),
71
+ }),
72
+ write_file: tool({
73
+ description: 'Write content to a file (creates directories if needed)',
74
+ parameters: z.object({
75
+ path: z.string().describe('Absolute or relative file path'),
76
+ content: z.string().describe('File content to write'),
77
+ }),
78
+ // @ts-expect-error AI SDK tool() overload resolution
79
+ execute: async ({ path, content }) => execWriteFile(path, content),
80
+ }),
81
+ };
@@ -0,0 +1,2 @@
1
+ import { type AgentConfig } from './loop.js';
2
+ export declare function interactiveTUI(config: AgentConfig): Promise<void>;
@@ -0,0 +1,324 @@
1
+ import { createRequire } from 'module';
2
+ import { ProcessTerminal, TUI, Container, Editor, Markdown, Loader, Text, Spacer, Box, matchesKey } from '@mariozechner/pi-tui';
3
+ import pc from 'picocolors';
4
+ import { loadAgentEnv, runTurn } from './loop.js';
5
+ const require = createRequire(import.meta.url);
6
+ const pkg = require('../../package.json');
7
+ // --- Background color helpers (24-bit ANSI) ---
8
+ function bgHex(hex) {
9
+ const r = parseInt(hex.slice(1, 3), 16);
10
+ const g = parseInt(hex.slice(3, 5), 16);
11
+ const b = parseInt(hex.slice(5, 7), 16);
12
+ return (text) => `\x1b[48;2;${r};${g};${b}m${text}\x1b[49m`;
13
+ }
14
+ const toolSuccessBg = bgHex('#283228');
15
+ const toolErrorBg = bgHex('#3c2828');
16
+ const toolPendingBg = bgHex('#282832');
17
+ const userMsgBg = bgHex('#343541');
18
+ // --- Editor theme ---
19
+ const selectListTheme = {
20
+ selectedPrefix: pc.cyan,
21
+ selectedText: pc.bold,
22
+ description: pc.dim,
23
+ scrollInfo: pc.dim,
24
+ noMatch: pc.dim,
25
+ };
26
+ const editorTheme = {
27
+ borderColor: pc.dim,
28
+ selectList: selectListTheme,
29
+ };
30
+ function toolHeader(name, args) {
31
+ const a = (args && typeof args === 'object' ? args : {});
32
+ if (name === 'bash')
33
+ return pc.bold(`$ ${String(a.command || '...')}`);
34
+ if (name === 'read_file')
35
+ return pc.bold('read ') + pc.cyan(String(a.path || '...'));
36
+ if (name === 'write_file')
37
+ return pc.bold('write ') + pc.cyan(String(a.path || '...'));
38
+ const argPairs = Object.entries(a)
39
+ .map(([k, v]) => {
40
+ const val = typeof v === 'string' ? (v.length > 60 ? v.slice(0, 60) + '...' : v) : JSON.stringify(v);
41
+ return `${pc.dim(k)}: ${val}`;
42
+ })
43
+ .join(' ');
44
+ return pc.bold(name) + (argPairs ? ' ' + argPairs : '');
45
+ }
46
+ function toolOutput(name, args, result) {
47
+ const r = (result && typeof result === 'object' ? result : {});
48
+ if (name === 'bash') {
49
+ const stdout = String(r.stdout || '');
50
+ const stderr = String(r.stderr || '');
51
+ const output = (stdout + (stderr ? `\n${stderr}` : '')).trim();
52
+ if (!output)
53
+ return null;
54
+ const lines = output.split('\n');
55
+ const preview = lines.slice(0, 5).map(l => pc.dim(l)).join('\n');
56
+ const suffix = lines.length > 5 ? pc.dim(`\n... (${lines.length - 5} more lines)`) : '';
57
+ return preview + suffix;
58
+ }
59
+ if (name === 'read_file' || name === 'write_file')
60
+ return null;
61
+ const resultStr = JSON.stringify(result);
62
+ if (!resultStr || resultStr === '{}')
63
+ return null;
64
+ const preview = resultStr.length > 200 ? resultStr.slice(0, 200) + '...' : resultStr;
65
+ return pc.dim(preview);
66
+ }
67
+ function rerenderToolBox(entry, expanded) {
68
+ const r = (entry.result && typeof entry.result === 'object' ? entry.result : {});
69
+ const isError = entry.result != null && ('error' in r || ('exitCode' in r && r.exitCode !== 0));
70
+ const bg = entry.result == null ? toolPendingBg : isError ? toolErrorBg : toolSuccessBg;
71
+ entry.box.setBgFn(bg);
72
+ // Clear and rebuild children
73
+ while (entry.box.children.length > 0)
74
+ entry.box.removeChild(entry.box.children[0]);
75
+ entry.box.addChild(new Text(toolHeader(entry.name, entry.args), 0, 0));
76
+ if (expanded && entry.result != null) {
77
+ const output = toolOutput(entry.name, entry.args, entry.result);
78
+ if (output)
79
+ entry.box.addChild(new Text('\n' + output, 0, 0));
80
+ }
81
+ }
82
+ // --- Markdown theme using picocolors ---
83
+ const theme = {
84
+ heading: pc.bold,
85
+ link: pc.cyan,
86
+ linkUrl: pc.dim,
87
+ code: pc.yellow,
88
+ codeBlock: pc.dim,
89
+ codeBlockBorder: pc.dim,
90
+ quote: pc.italic,
91
+ quoteBorder: pc.dim,
92
+ hr: pc.dim,
93
+ listBullet: pc.cyan,
94
+ bold: pc.bold,
95
+ italic: pc.italic,
96
+ strikethrough: pc.strikethrough,
97
+ underline: pc.underline,
98
+ };
99
+ async function loadChatHistory(nodes, chatNodeId) {
100
+ const messages = [];
101
+ const history = await nodes.chat.read(chatNodeId);
102
+ if (history?.docs) {
103
+ for (const doc of history.docs) {
104
+ const msg = doc;
105
+ const role = msg.sides?.message?.role;
106
+ const text = msg.sides?.message?.parts?.filter(p => p.type === 'text').map(p => p.text || '').join('') || '';
107
+ if (role && (role === 'user' || role === 'assistant') && text) {
108
+ messages.push({ role, content: text });
109
+ }
110
+ }
111
+ }
112
+ return messages;
113
+ }
114
+ // --- TUI interactive mode ---
115
+ export async function interactiveTUI(config) {
116
+ const { nodes } = config;
117
+ // Start TUI immediately — show banner while loading
118
+ const terminal = new ProcessTerminal();
119
+ const tui = new TUI(terminal);
120
+ const version = new Text(pc.bold(pc.cyan('nodes')) + pc.dim(` v${pkg.version}`), 1, 0);
121
+ const info = new Text(pc.dim('loading...'), 1, 0);
122
+ const keybindings = new Text(pc.dim('ctrl+c') + pc.gray(' exit/stop') + ' ' +
123
+ pc.dim('tab') + pc.gray(' expand tools'), 1, 0);
124
+ const chat = new Container();
125
+ const editor = new Editor(tui, editorTheme);
126
+ tui.addChild(version);
127
+ tui.addChild(info);
128
+ tui.addChild(keybindings);
129
+ tui.addChild(new Spacer(1));
130
+ tui.addChild(chat);
131
+ tui.addChild(editor);
132
+ tui.setFocus(editor);
133
+ // State
134
+ let abortController;
135
+ let toolsExpanded = false;
136
+ const toolEntries = [];
137
+ const cleanup = () => {
138
+ tui.stop();
139
+ nodes.disconnect().catch(() => { });
140
+ process.exit(0);
141
+ };
142
+ tui.addInputListener((data) => {
143
+ // Ctrl+C: abort if processing, exit otherwise
144
+ if (data === '\x03' || matchesKey(data, 'ctrl+c')) {
145
+ const ac = abortController;
146
+ if (ac) {
147
+ ac.abort();
148
+ return { consume: true };
149
+ }
150
+ cleanup();
151
+ return { consume: true };
152
+ }
153
+ // Tab: toggle tool expand/collapse
154
+ if (matchesKey(data, 'tab')) {
155
+ toolsExpanded = !toolsExpanded;
156
+ for (const entry of toolEntries) {
157
+ rerenderToolBox(entry, toolsExpanded);
158
+ }
159
+ tui.requestRender();
160
+ return { consume: true };
161
+ }
162
+ return undefined;
163
+ });
164
+ tui.start();
165
+ // Load config in background
166
+ const env = await loadAgentEnv(config);
167
+ if (!env.chatsNodeId) {
168
+ info.setText(pc.red('No chatsNode on agent. Open agent in UI first to initialize.'));
169
+ tui.requestRender();
170
+ return;
171
+ }
172
+ info.setText(pc.dim(`${env.agentName} · ${env.modelId} · ${Object.keys(env.allTools).length} tools`));
173
+ tui.requestRender();
174
+ // State
175
+ const messages = [];
176
+ let chatNodeId = config.nodeId;
177
+ // Resume existing chat
178
+ if (chatNodeId) {
179
+ try {
180
+ const loaded = await loadChatHistory(nodes, chatNodeId);
181
+ messages.push(...loaded);
182
+ for (const msg of loaded) {
183
+ chat.addChild(new Spacer(1));
184
+ if (msg.role === 'user') {
185
+ chat.addChild(new Markdown(msg.content, 1, 1, theme, { bgColor: userMsgBg }));
186
+ }
187
+ else {
188
+ chat.addChild(new Markdown(msg.content, 0, 0, theme));
189
+ }
190
+ }
191
+ tui.requestRender();
192
+ }
193
+ catch {
194
+ // ignore
195
+ }
196
+ }
197
+ // Handle submit
198
+ editor.onSubmit = (text) => {
199
+ handleSubmit(text).catch((err) => {
200
+ chat.addChild(new Text(pc.red(`Error: ${err.message}`), 0, 0));
201
+ abortController = undefined;
202
+ tui.requestRender();
203
+ });
204
+ };
205
+ async function handleSubmit(text) {
206
+ if (!text.trim() || abortController)
207
+ return;
208
+ editor.addToHistory(text);
209
+ editor.setText('');
210
+ // Show user message
211
+ chat.addChild(new Spacer(1));
212
+ chat.addChild(new Markdown(text, 1, 1, theme, { bgColor: userMsgBg }));
213
+ chat.addChild(new Spacer(1));
214
+ // Show loader
215
+ const loader = new Loader(tui, pc.cyan, pc.dim, 'Thinking...');
216
+ chat.addChild(loader);
217
+ tui.requestRender();
218
+ // Sync user message to Nodes
219
+ const userResult = await nodes.chat.write({
220
+ ...(chatNodeId ? { nodeId: chatNodeId } : { parentId: env.chatsNodeId }),
221
+ role: 'user',
222
+ text,
223
+ ...(config.userId ? { createdBy: config.userId } : {}),
224
+ });
225
+ if (userResult?.chatNodeId)
226
+ chatNodeId = userResult.chatNodeId;
227
+ messages.push({ role: 'user', content: text });
228
+ // Stream response
229
+ const md = new Markdown('', 0, 0, theme);
230
+ const reasoningText = new Text('', 1, 0);
231
+ let accumulated = '';
232
+ let accumulatedReasoning = '';
233
+ let reasoningAdded = false;
234
+ const turnToolEntries = [];
235
+ const controller = new AbortController();
236
+ abortController = controller;
237
+ function removeLoader() {
238
+ if (chat.children.includes(loader)) {
239
+ loader.stop();
240
+ chat.removeChild(loader);
241
+ }
242
+ }
243
+ function insertBeforeLoaderOrMd(component) {
244
+ const loaderIdx = chat.children.indexOf(loader);
245
+ const mdIdx = chat.children.indexOf(md);
246
+ const insertBefore = loaderIdx !== -1 ? loaderIdx : mdIdx !== -1 ? mdIdx : -1;
247
+ if (insertBefore !== -1) {
248
+ chat.children.splice(insertBefore, 0, component);
249
+ }
250
+ else {
251
+ chat.addChild(component);
252
+ }
253
+ }
254
+ const turn = await runTurn(env, {
255
+ messages,
256
+ abortSignal: controller.signal,
257
+ onReasoningDelta: (delta) => {
258
+ accumulatedReasoning += delta;
259
+ reasoningText.setText(pc.italic(pc.gray(accumulatedReasoning)));
260
+ if (!reasoningAdded) {
261
+ reasoningAdded = true;
262
+ removeLoader();
263
+ chat.addChild(reasoningText);
264
+ chat.addChild(loader);
265
+ }
266
+ tui.requestRender();
267
+ },
268
+ onTextDelta: (delta) => {
269
+ accumulated += delta;
270
+ if (!chat.children.includes(md)) {
271
+ removeLoader();
272
+ chat.addChild(md);
273
+ }
274
+ md.setText(accumulated);
275
+ tui.requestRender();
276
+ },
277
+ onToolCallStart: (name, args) => {
278
+ const box = new Box(1, 0, toolPendingBg);
279
+ box.addChild(new Text(toolHeader(name, args), 0, 0));
280
+ const entry = { name, args, box };
281
+ turnToolEntries.push(entry);
282
+ toolEntries.push(entry);
283
+ insertBeforeLoaderOrMd(box);
284
+ tui.requestRender();
285
+ },
286
+ onToolCall: (_name, args, result) => {
287
+ // Find matching pending entry (first without result)
288
+ const entry = turnToolEntries.find(e => !e.result);
289
+ if (entry) {
290
+ entry.args = args;
291
+ entry.result = result;
292
+ rerenderToolBox(entry, toolsExpanded);
293
+ }
294
+ // Re-show loader after tool completes (more tools may follow)
295
+ if (!chat.children.includes(loader) && !chat.children.includes(md)) {
296
+ chat.addChild(loader);
297
+ }
298
+ tui.requestRender();
299
+ },
300
+ });
301
+ // Ensure loader is cleaned up
302
+ removeLoader();
303
+ if (accumulated && !chat.children.includes(md))
304
+ chat.addChild(md);
305
+ // Show aborted indicator
306
+ if (controller.signal.aborted) {
307
+ chat.addChild(new Text(pc.dim('(aborted)'), 1, 0));
308
+ }
309
+ chat.addChild(new Spacer(1));
310
+ // Sync assistant message to Nodes
311
+ if (chatNodeId && turn.text) {
312
+ await nodes.chat.write({
313
+ nodeId: chatNodeId,
314
+ role: 'assistant',
315
+ text: turn.text,
316
+ parts: turn.parts,
317
+ _metadata: turn.metadata,
318
+ });
319
+ }
320
+ messages.push({ role: 'assistant', content: turn.text || '(tool calls only)' });
321
+ abortController = undefined;
322
+ tui.requestRender();
323
+ }
324
+ }
@@ -0,0 +1,12 @@
1
+ export declare const c: {
2
+ dim: import("picocolors/types").Formatter;
3
+ bold: import("picocolors/types").Formatter;
4
+ cyan: import("picocolors/types").Formatter;
5
+ green: import("picocolors/types").Formatter;
6
+ yellow: import("picocolors/types").Formatter;
7
+ red: import("picocolors/types").Formatter;
8
+ magenta: import("picocolors/types").Formatter;
9
+ };
10
+ export declare function spinner(message: string): {
11
+ stop(finalMessage?: string): void;
12
+ };
@@ -0,0 +1,27 @@
1
+ import pc from 'picocolors';
2
+ // --- Colors ---
3
+ export const c = {
4
+ dim: pc.dim,
5
+ bold: pc.bold,
6
+ cyan: pc.cyan,
7
+ green: pc.green,
8
+ yellow: pc.yellow,
9
+ red: pc.red,
10
+ magenta: pc.magenta,
11
+ };
12
+ // --- Spinner ---
13
+ const FRAMES = ['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
14
+ export function spinner(message) {
15
+ let i = 0;
16
+ const id = setInterval(() => {
17
+ process.stderr.write(`\r${c.cyan(FRAMES[i++ % FRAMES.length])} ${c.dim(message)}`);
18
+ }, 80);
19
+ return {
20
+ stop(finalMessage) {
21
+ clearInterval(id);
22
+ process.stderr.write(`\r\x1b[2K`); // clear line
23
+ if (finalMessage)
24
+ console.error(finalMessage);
25
+ },
26
+ };
27
+ }
@@ -0,0 +1,3 @@
1
+ export { runOnce, interactive } from './core/loop.js';
2
+ export { localTools } from './core/tools.js';
3
+ export type { AgentConfig } from './core/loop.js';
package/dist/index.js ADDED
@@ -0,0 +1,2 @@
1
+ export { runOnce, interactive } from './core/loop.js';
2
+ export { localTools } from './core/tools.js';
package/package.json ADDED
@@ -0,0 +1,41 @@
1
+ {
2
+ "name": "@nodes/agent",
3
+ "version": "0.0.1",
4
+ "description": "Autonomous AI agent runtime for Nodes",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "bin": {
9
+ "nodes": "dist/cli.js"
10
+ },
11
+ "files": [
12
+ "dist"
13
+ ],
14
+ "dependencies": {
15
+ "@ai-sdk/anthropic": "3.0.47",
16
+ "@ai-sdk/gateway": "3.0.46",
17
+ "@ai-sdk/google": "3.0.31",
18
+ "@ai-sdk/openai": "3.0.33",
19
+ "@mariozechner/pi-tui": "^0.55.0",
20
+ "@modelcontextprotocol/sdk": "1.23.0",
21
+ "ai": "6.0.86",
22
+ "picocolors": "^1.1.1",
23
+ "zod": "3.25.76",
24
+ "@nodes/sdk": "0.0.1"
25
+ },
26
+ "devDependencies": {
27
+ "@types/node": "^24.0.0",
28
+ "tsx": "^4.19.0",
29
+ "typescript": "^5.6.0",
30
+ "vitest": "^4.0.18"
31
+ },
32
+ "scripts": {
33
+ "build": "tsc",
34
+ "dev": "tsx --env-file=.env src/cli.ts",
35
+ "prompt": "tsx --env-file=.env src/cli.ts -p",
36
+ "serve": "tsx --env-file=.env src/cli.ts --serve",
37
+ "test": "vitest run",
38
+ "test:watch": "vitest",
39
+ "test:integration": "vitest run src/tests/run-once.test.ts"
40
+ }
41
+ }