@nodes/agent 0.0.2 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2 +1,10 @@
1
1
  import type { LanguageModel } from 'ai';
2
- export declare function resolveModel(provider: string, modelId: string): LanguageModel;
2
+ /**
3
+ * Resolve a language model from provider + model ID.
4
+ *
5
+ * Supports two formats:
6
+ * 1. Explicit provider: resolveModel('minimax', 'MiniMax-M2.7')
7
+ * 2. Slash format: resolveModel('minimax/MiniMax-M2.7')
8
+ * (provider extracted from prefix, rest is model ID)
9
+ */
10
+ export declare function resolveModel(provider: string, modelId?: string): LanguageModel;
@@ -2,7 +2,25 @@ import { createGateway } from '@ai-sdk/gateway';
2
2
  import { createOpenAI } from '@ai-sdk/openai';
3
3
  import { createAnthropic } from '@ai-sdk/anthropic';
4
4
  import { createGoogleGenerativeAI } from '@ai-sdk/google';
5
+ import { createMinimax } from 'vercel-minimax-ai-provider';
6
+ /**
7
+ * Resolve a language model from provider + model ID.
8
+ *
9
+ * Supports two formats:
10
+ * 1. Explicit provider: resolveModel('minimax', 'MiniMax-M2.7')
11
+ * 2. Slash format: resolveModel('minimax/MiniMax-M2.7')
12
+ * (provider extracted from prefix, rest is model ID)
13
+ */
5
14
  export function resolveModel(provider, modelId) {
15
+ // Support "provider/model" format — split on first slash
16
+ if (!modelId && provider.includes('/')) {
17
+ const idx = provider.indexOf('/');
18
+ modelId = provider.slice(idx + 1);
19
+ provider = provider.slice(0, idx);
20
+ }
21
+ if (!modelId) {
22
+ throw new Error(`No model ID provided for provider "${provider}"`);
23
+ }
6
24
  switch (provider) {
7
25
  case 'openai':
8
26
  return createOpenAI({ apiKey: process.env.OPENAI_API_KEY }).languageModel(modelId);
@@ -10,8 +28,10 @@ export function resolveModel(provider, modelId) {
10
28
  return createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }).languageModel(modelId);
11
29
  case 'google':
12
30
  return createGoogleGenerativeAI({ apiKey: process.env.GOOGLE_API_KEY }).languageModel(modelId);
31
+ case 'minimax':
32
+ return createMinimax({ apiKey: process.env.MINIMAX_API_KEY }).languageModel(modelId);
13
33
  case 'gateway':
14
34
  default:
15
- return createGateway({ apiKey: process.env.GATEWAY_API_KEY }).languageModel(modelId);
35
+ return createGateway({ apiKey: process.env.GATEWAY_API_KEY }).languageModel(modelId.includes('/') ? modelId : `${provider}/${modelId}`);
16
36
  }
17
37
  }
@@ -5,7 +5,9 @@ import { createServer } from 'node:http';
5
5
  import { z } from 'zod';
6
6
  import { execBash, execReadFile, execWriteFile } from './tools.js';
7
7
  import { createCLIStream, isCLIProvider } from './cli-stream.js';
8
- import { JsonToSseTransformStream, readUIMessageStream } from 'ai';
8
+ import { resolveModel } from './providers.js';
9
+ import { localTools } from './tools.js';
10
+ import { JsonToSseTransformStream, readUIMessageStream, streamText, hasToolCall, stepCountIs } from 'ai';
9
11
  const require = createRequire(import.meta.url);
10
12
  const pkg = require('../../package.json');
11
13
  export async function serve(options = {}) {
@@ -128,27 +130,118 @@ export async function serve(options = {}) {
128
130
  res.end(JSON.stringify({ error: 'Missing prompt or messages' }));
129
131
  return;
130
132
  }
131
- const provider = (body.provider || 'claude-cli');
132
- if (!isCLIProvider(provider)) {
133
- res.writeHead(400, { 'Content-Type': 'application/json' });
134
- res.end(JSON.stringify({ error: `Unsupported CLI provider: ${provider}` }));
135
- return;
136
- }
133
+ const rawProvider = (body.provider || 'claude-cli');
137
134
  const startTime = Date.now();
138
- const shouldStream = body.stream !== false;
139
- console.log(`[chat] → Received: provider=${provider}, model=${body.model || 'default'}, stream=${shouldStream}, prompt=${prompt.length} chars`);
140
- // Create CLI stream (bypass permissions for headless webhook mode)
141
- console.log(`[chat] Spawning CLI stream...`);
142
- const cliStream = createCLIStream({
143
- provider: provider,
144
- prompt,
145
- systemPrompt: body.systemPrompt,
146
- model: body.model,
147
- bypassPermissions: true,
148
- });
149
- if (shouldStream) {
150
- // SSE streaming response
151
- const sseStream = cliStream.pipeThrough(new JsonToSseTransformStream());
135
+ const streamMode = body.stream === false ? 'off' : body.stream === 'raw' ? 'raw' : 'sse';
136
+ console.log(`[chat] → Received: provider=${rawProvider}, model=${body.model || 'default'}, stream=${streamMode}, prompt=${prompt.length} chars`);
137
+ // Resolve execution stream CLI binary or API provider
138
+ let execStream;
139
+ if (isCLIProvider(rawProvider)) {
140
+ // CLI providers: spawn local binary
141
+ console.log(`[chat] Spawning CLI stream...`);
142
+ execStream = createCLIStream({
143
+ provider: rawProvider,
144
+ prompt,
145
+ systemPrompt: body.systemPrompt,
146
+ model: body.model,
147
+ bypassPermissions: true,
148
+ });
149
+ }
150
+ else {
151
+ // API providers (including nodes-cli): resolve model from body.model (e.g. "minimax/MiniMax-M2.7")
152
+ const modelId = body.model || rawProvider;
153
+ console.log(`[chat] Resolving API model: ${modelId}`);
154
+ try {
155
+ const model = resolveModel(modelId);
156
+ const startAt = Date.now();
157
+ let firstTokenAt;
158
+ const result = streamText({
159
+ model,
160
+ system: body.systemPrompt,
161
+ tools: localTools,
162
+ stopWhen: [hasToolCall('ai_end'), stepCountIs(50)],
163
+ ...(body.messages?.length
164
+ ? { messages: body.messages }
165
+ : { prompt }),
166
+ });
167
+ // Parse provider/model from the slash format for metadata
168
+ const providerName = modelId.includes('/') ? modelId.split('/')[0] : rawProvider;
169
+ const modelName = modelId.includes('/') ? modelId.split('/').slice(1).join('/') : modelId;
170
+ // Embed real metadata into the stream — same pattern as handlerV2's createMessageMetadata
171
+ const messageMetadata = ({ part }) => {
172
+ if (!firstTokenAt && part && (part.type === 'text-delta' || part.type === 'reasoning-delta')) {
173
+ firstTokenAt = Date.now();
174
+ return { timings: { ttfbMs: firstTokenAt - startAt } };
175
+ }
176
+ if (part.type === 'start') {
177
+ return { provider: providerName, model: modelName };
178
+ }
179
+ if (part.type === 'finish') {
180
+ const totalMs = Date.now() - startAt;
181
+ const outputPhaseMs = firstTokenAt ? Date.now() - firstTokenAt : undefined;
182
+ const usageRaw = (part.totalUsage ?? {});
183
+ const outputTokenDetails = usageRaw.outputTokenDetails;
184
+ const inputTokenDetails = usageRaw.inputTokenDetails;
185
+ const outputTokens = typeof usageRaw.outputTokens === 'number' ? usageRaw.outputTokens : 0;
186
+ const reasoningTokens = outputTokenDetails?.reasoningTokens ?? (typeof usageRaw.reasoningTokens === 'number' ? usageRaw.reasoningTokens : 0);
187
+ const cachedInputTokens = inputTokenDetails?.cacheReadTokens;
188
+ const tokensPerSec = (outputTokens + reasoningTokens) > 0 && outputPhaseMs
189
+ ? (outputTokens + reasoningTokens) / (outputPhaseMs / 1000) : undefined;
190
+ return {
191
+ totalUsage: {
192
+ ...usageRaw,
193
+ reasoningTokens: reasoningTokens || undefined,
194
+ cachedInputTokens: cachedInputTokens || undefined,
195
+ },
196
+ timings: { ttfbMs: firstTokenAt ? firstTokenAt - startAt : undefined, totalMs, outputPhaseMs, tokensPerSec },
197
+ };
198
+ }
199
+ return undefined;
200
+ };
201
+ execStream = result.toUIMessageStream({ messageMetadata, sendReasoning: true });
202
+ }
203
+ catch (err) {
204
+ console.error('[chat] ✗ Model resolution failed:', err);
205
+ res.writeHead(400, { 'Content-Type': 'application/json' });
206
+ res.end(JSON.stringify({ error: err instanceof Error ? err.message : 'Failed to resolve model' }));
207
+ return;
208
+ }
209
+ }
210
+ if (streamMode === 'raw') {
211
+ // Raw UIMessageStream chunks — for server-to-server (Nodes web)
212
+ res.writeHead(200, {
213
+ 'Content-Type': 'application/x-ndjson',
214
+ 'Cache-Control': 'no-cache',
215
+ Connection: 'keep-alive',
216
+ 'X-Accel-Buffering': 'no',
217
+ });
218
+ res.flushHeaders();
219
+ console.log(`[chat] Streaming raw...`);
220
+ let chunkCount = 0;
221
+ const reader = execStream.getReader();
222
+ try {
223
+ while (true) {
224
+ const { done, value } = await reader.read();
225
+ if (done)
226
+ break;
227
+ chunkCount++;
228
+ // Each chunk is a JSON object from the UIMessageStream — write as NDJSON
229
+ const line = typeof value === 'string' ? value : JSON.stringify(value);
230
+ res.write(line + '\n');
231
+ }
232
+ }
233
+ catch (err) {
234
+ console.error('[chat] ✗ Stream error:', err);
235
+ }
236
+ finally {
237
+ const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
238
+ console.log(`[chat] ← Done (raw): ${chunkCount} chunks in ${elapsed}s`);
239
+ res.end();
240
+ }
241
+ }
242
+ else if (streamMode === 'sse') {
243
+ // SSE streaming response — for browsers/external clients
244
+ const sseStream = execStream.pipeThrough(new JsonToSseTransformStream());
152
245
  res.writeHead(200, {
153
246
  'Content-Type': 'text/event-stream',
154
247
  'Cache-Control': 'no-cache',
@@ -157,7 +250,7 @@ export async function serve(options = {}) {
157
250
  'X-Vercel-AI-UI-Message-Stream': 'v1',
158
251
  });
159
252
  res.flushHeaders();
160
- console.log(`[chat] Streaming response...`);
253
+ console.log(`[chat] Streaming SSE...`);
161
254
  let chunkCount = 0;
162
255
  const reader = sseStream.getReader();
163
256
  try {
@@ -177,15 +270,15 @@ export async function serve(options = {}) {
177
270
  }
178
271
  finally {
179
272
  const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
180
- console.log(`[chat] ← Done: ${chunkCount} chunks in ${elapsed}s`);
273
+ console.log(`[chat] ← Done (SSE): ${chunkCount} chunks in ${elapsed}s`);
181
274
  res.end();
182
275
  }
183
276
  }
184
277
  else {
185
- // Non-stream: consume CLI stream, return JSON with final UIMessage
278
+ // Non-stream: consume stream, return JSON with final UIMessage
186
279
  try {
187
280
  let lastAssistant;
188
- for await (const snapshot of readUIMessageStream({ stream: cliStream })) {
281
+ for await (const snapshot of readUIMessageStream({ stream: execStream })) {
189
282
  if (snapshot.role === 'assistant')
190
283
  lastAssistant = snapshot;
191
284
  }
@@ -198,7 +291,7 @@ export async function serve(options = {}) {
198
291
  catch (err) {
199
292
  console.error('[chat] ✗ Error:', err);
200
293
  res.writeHead(500, { 'Content-Type': 'application/json' });
201
- res.end(JSON.stringify({ error: err instanceof Error ? err.message : 'CLI execution failed' }));
294
+ res.end(JSON.stringify({ error: err instanceof Error ? err.message : 'Execution failed' }));
202
295
  }
203
296
  }
204
297
  return;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nodes/agent",
3
- "version": "0.0.2",
3
+ "version": "0.0.4",
4
4
  "description": "Autonomous AI agent runtime for Nodes",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -20,6 +20,7 @@
20
20
  "@modelcontextprotocol/sdk": "1.23.0",
21
21
  "ai": "6.0.86",
22
22
  "picocolors": "^1.1.1",
23
+ "vercel-minimax-ai-provider": "0.0.2",
23
24
  "zod": "3.25.76",
24
25
  "@nodes/sdk": "0.0.1"
25
26
  },