@omarestrella/ai-sdk-agent-sdk 1.0.0-beta.1 → 1.0.0-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/dist/index.d.mts +41 -0
  2. package/dist/index.d.mts.map +1 -0
  3. package/dist/index.mjs +641 -0
  4. package/dist/index.mjs.map +1 -0
  5. package/package.json +9 -8
  6. package/dist/src/index.d.ts +0 -4
  7. package/dist/src/index.d.ts.map +0 -1
  8. package/dist/src/index.js +0 -6
  9. package/dist/src/index.js.map +0 -1
  10. package/dist/src/json.d.ts +0 -6
  11. package/dist/src/json.d.ts.map +0 -1
  12. package/dist/src/json.js +0 -29
  13. package/dist/src/json.js.map +0 -1
  14. package/dist/src/language-model.d.ts +0 -23
  15. package/dist/src/language-model.d.ts.map +0 -1
  16. package/dist/src/language-model.js +0 -440
  17. package/dist/src/language-model.js.map +0 -1
  18. package/dist/src/logger.d.ts +0 -15
  19. package/dist/src/logger.d.ts.map +0 -1
  20. package/dist/src/logger.js +0 -142
  21. package/dist/src/logger.js.map +0 -1
  22. package/dist/src/messages.d.ts +0 -14
  23. package/dist/src/messages.d.ts.map +0 -1
  24. package/dist/src/messages.js +0 -92
  25. package/dist/src/messages.js.map +0 -1
  26. package/dist/src/provider.d.ts +0 -15
  27. package/dist/src/provider.d.ts.map +0 -1
  28. package/dist/src/provider.js +0 -19
  29. package/dist/src/provider.js.map +0 -1
  30. package/dist/src/tools.d.ts +0 -21
  31. package/dist/src/tools.d.ts.map +0 -1
  32. package/dist/src/tools.js +0 -82
  33. package/dist/src/tools.js.map +0 -1
  34. package/dist/test/messages.test.d.ts +0 -2
  35. package/dist/test/messages.test.d.ts.map +0 -1
  36. package/dist/test/messages.test.js +0 -173
  37. package/dist/test/messages.test.js.map +0 -1
  38. package/dist/test/tools.test.d.ts +0 -2
  39. package/dist/test/tools.test.d.ts.map +0 -1
  40. package/dist/test/tools.test.js +0 -175
  41. package/dist/test/tools.test.js.map +0 -1
@@ -0,0 +1,41 @@
1
+ import { LanguageModelV2 } from "@ai-sdk/provider";
2
+
3
+ //#region src/provider.d.ts
4
+ interface ClaudeAgentProviderSettings {
5
+ name?: string;
6
+ /**
7
+ * Working directory for the Agent SDK.
8
+ * @default process.cwd()
9
+ */
10
+ cwd?: string;
11
+ }
12
+ interface ClaudeAgentProvider {
13
+ (modelId: string): LanguageModelV2;
14
+ languageModel(modelId: string): LanguageModelV2;
15
+ }
16
+ declare function createClaudeAgent(options?: ClaudeAgentProviderSettings): ClaudeAgentProvider;
17
+ //#endregion
18
+ //#region src/language-model.d.ts
19
+ interface ClaudeAgentLanguageModelConfig {
20
+ provider: string;
21
+ cwd?: string;
22
+ }
23
+ type DoGenerateOptions = Parameters<LanguageModelV2["doGenerate"]>[0];
24
+ type DoGenerateResult = Awaited<ReturnType<LanguageModelV2["doGenerate"]>>;
25
+ type DoStreamOptions = Parameters<LanguageModelV2["doStream"]>[0];
26
+ type DoStreamResult = Awaited<ReturnType<LanguageModelV2["doStream"]>>;
27
+ declare class ClaudeAgentLanguageModel implements LanguageModelV2 {
28
+ readonly specificationVersion: "v2";
29
+ readonly modelId: string;
30
+ readonly defaultObjectGenerationMode: undefined;
31
+ private readonly config;
32
+ constructor(modelId: string, config: ClaudeAgentLanguageModelConfig);
33
+ get provider(): string;
34
+ get supportedUrls(): Record<string, RegExp[]>;
35
+ private buildQueryOptions;
36
+ doGenerate(options: DoGenerateOptions): Promise<DoGenerateResult>;
37
+ doStream(options: DoStreamOptions): Promise<DoStreamResult>;
38
+ }
39
+ //#endregion
40
+ export { ClaudeAgentLanguageModel, type ClaudeAgentProvider, type ClaudeAgentProviderSettings, createClaudeAgent as create, createClaudeAgent };
41
+ //# sourceMappingURL=index.d.mts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.mts","names":[],"sources":["../src/provider.ts","../src/language-model.ts"],"mappings":";;;UAQiB,2BAAA;EACf,IAAA;EADe;;;;EAOf,GAAA;AAAA;AAAA,UAGe,mBAAA;EAAA,CACd,OAAA,WAAkB,eAAA;EACnB,aAAA,CAAc,OAAA,WAAkB,eAAA;AAAA;AAAA,iBAGlB,iBAAA,CACd,OAAA,GAAS,2BAAA,GACR,mBAAA;;;UCuBc,8BAAA;EACf,QAAA;EACA,GAAA;AAAA;AAAA,KAGG,iBAAA,GAAoB,UAAA,CAAW,eAAA;AAAA,KAC/B,gBAAA,GAAmB,OAAA,CAAQ,UAAA,CAAW,eAAA;AAAA,KACtC,eAAA,GAAkB,UAAA,CAAW,eAAA;AAAA,KAC7B,cAAA,GAAiB,OAAA,CAAQ,UAAA,CAAW,eAAA;AAAA,cAO5B,wBAAA,YAAoC,eAAA;EAAA,SACtC,oBAAA;EAAA,SACA,OAAA;EAAA,SACA,2BAAA;EAAA,iBAEQ,MAAA;cAEL,OAAA,UAAiB,MAAA,EAAQ,8BAAA;EAAA,IAKjC,QAAA,CAAA;EAAA,IAIA,aAAA,CAAA,GAAiB,MAAA,SAAe,MAAA;EAAA,QAI5B,iBAAA;EAmCF,UAAA,CAAW,OAAA,EAAS,iBAAA,GAAoB,OAAA,CAAQ,gBAAA;EA0GhD,QAAA,CAAS,OAAA,EAAS,eAAA,GAAkB,OAAA,CAAQ,cAAA;AAAA"}
package/dist/index.mjs ADDED
@@ -0,0 +1,641 @@
1
+ import { createSdkMcpServer, query, tool } from "@anthropic-ai/claude-agent-sdk";
2
+ import { createWriteStream, existsSync, mkdirSync } from "fs";
3
+ import { homedir } from "os";
4
+ import { join } from "path";
5
+ import * as z from "zod";
6
+
7
+ //#region src/logger.ts
8
+ /**
9
+ * This is a weird file. We want to log to a file, but only if the consola package
10
+ * we are using is available.
11
+ */
12
+ const LOG_LEVEL = process.env.LOG_LEVEL || "debug";
13
+ const LOG_DIR = process.env.LOG_DIR || join(homedir(), ".cache", "ai-sdk-claude-agent");
14
+ const LOG_FILE = process.env.LOG_FILE || "ai-sdk-claude-agent.log";
15
+ if (!existsSync(LOG_DIR)) try {
16
+ mkdirSync(LOG_DIR, { recursive: true });
17
+ } catch {}
18
+ const LOG_FILE_PATH = join(LOG_DIR, LOG_FILE);
19
+ let logStream = null;
20
+ function getLogStream() {
21
+ if (!logStream) logStream = createWriteStream(LOG_FILE_PATH, { flags: "a" });
22
+ return logStream;
23
+ }
24
+ let consolaInstance = null;
25
+ let consolaLoadAttempted = false;
26
+ /**
27
+ * Dynamically loads consola if available.
28
+ * This is an optional peer dependency - if not installed, logging is a no-op.
29
+ */
30
+ async function loadConsola() {
31
+ if (consolaLoadAttempted) return consolaInstance;
32
+ consolaLoadAttempted = true;
33
+ try {
34
+ const { createConsola } = await import("consola");
35
+ const reporters = [];
36
+ if (LOG_FILE) reporters.push({ log: (logObj) => {
37
+ const levelName = {
38
+ 0: "ERROR",
39
+ 1: "WARN",
40
+ 2: "LOG",
41
+ 3: "INFO",
42
+ 4: "DEBUG",
43
+ 5: "TRACE"
44
+ }[logObj.level] || "LOG";
45
+ const message = logObj.args.map((arg) => typeof arg === "object" ? safeJsonStringify(arg) : String(arg)).join(" ");
46
+ const line = safeJsonStringify({
47
+ timestamp: logObj.date.toISOString(),
48
+ level: levelName,
49
+ message
50
+ }) + "\n";
51
+ getLogStream().write(line);
52
+ } });
53
+ consolaInstance = createConsola({
54
+ level: LOG_LEVEL === "trace" ? 5 : LOG_LEVEL === "debug" ? 4 : LOG_LEVEL === "info" ? 3 : LOG_LEVEL === "warn" ? 1 : 0,
55
+ reporters
56
+ });
57
+ consolaInstance.info("Logger initialized with consola", {
58
+ level: LOG_LEVEL,
59
+ file: LOG_FILE_PATH
60
+ });
61
+ return consolaInstance;
62
+ } catch {
63
+ return null;
64
+ }
65
+ }
66
+ const consolaPromise = loadConsola();
67
+ /**
68
+ * Logger interface that wraps consola if available, otherwise no-op.
69
+ */
70
+ const logger = {
71
+ debug(message, ...args) {
72
+ if (consolaInstance) consolaInstance.debug(message, ...args);
73
+ else consolaPromise.then((c) => c?.debug(message, ...args));
74
+ },
75
+ info(message, ...args) {
76
+ if (consolaInstance) consolaInstance.info(message, ...args);
77
+ else consolaPromise.then((c) => c?.info(message, ...args));
78
+ },
79
+ warn(message, ...args) {
80
+ if (consolaInstance) consolaInstance.warn(message, ...args);
81
+ else consolaPromise.then((c) => c?.warn(message, ...args));
82
+ },
83
+ error(message, ...args) {
84
+ if (consolaInstance) consolaInstance.error(message, ...args);
85
+ else consolaPromise.then((c) => c?.error(message, ...args));
86
+ },
87
+ log(message, ...args) {
88
+ this.info(message, ...args);
89
+ }
90
+ };
91
+
92
+ //#endregion
93
+ //#region src/json.ts
94
+ /**
95
+ * Safely serializes a value to JSON, handling circular references
96
+ * by replacing them with `[Circular]`.
97
+ */
98
+ function safeJsonStringify(value, space) {
99
+ const seen = /* @__PURE__ */ new WeakSet();
100
+ try {
101
+ return JSON.stringify(value, (key, val) => {
102
+ if (val === null || typeof val !== "object") return val;
103
+ if (seen.has(val)) return "[Circular]";
104
+ seen.add(val);
105
+ return val;
106
+ }, space);
107
+ } catch (e) {
108
+ const err = e;
109
+ logger.error("Cannot stringify JSON", {
110
+ error: err.message,
111
+ stack: err.stack
112
+ });
113
+ return "{}";
114
+ }
115
+ }
116
+
117
+ //#endregion
118
+ //#region src/messages.ts
119
+ /**
120
+ * Converts an AI SDK LanguageModelV2 prompt (array of system/user/assistant/tool messages)
121
+ * into a system prompt string and a user prompt string for the Claude Agent SDK's query().
122
+ *
123
+ * Since we use maxTurns: 1, the Agent SDK makes a single LLM call. We serialize the full
124
+ * conversation history into the prompt so the LLM has context from prior turns.
125
+ */
126
+ function convertMessages(messages) {
127
+ logger.debug("Converting messages:", { count: messages.length });
128
+ const systemParts = [];
129
+ const conversationParts = [];
130
+ for (const message of messages) {
131
+ logger.debug("Processing message:", { role: message.role });
132
+ switch (message.role) {
133
+ case "system":
134
+ systemParts.push(message.content);
135
+ break;
136
+ case "user": {
137
+ const parts = [];
138
+ logger.debug("Processing user message parts:", { count: message.content.length });
139
+ for (const part of message.content) switch (part.type) {
140
+ case "text":
141
+ parts.push(part.text);
142
+ break;
143
+ case "file":
144
+ parts.push(`[File: ${part.filename ?? part.mediaType}]`);
145
+ break;
146
+ }
147
+ if (parts.length > 0) conversationParts.push(`[user]\n${parts.join("\n")}`);
148
+ break;
149
+ }
150
+ case "assistant": {
151
+ const parts = [];
152
+ logger.debug("Processing assistant message parts:", { count: message.content.length });
153
+ for (const part of message.content) switch (part.type) {
154
+ case "text":
155
+ parts.push(part.text);
156
+ break;
157
+ case "tool-call":
158
+ parts.push(`[tool_call: ${part.toolName}(${safeJsonStringify(part.input)})]`);
159
+ break;
160
+ case "reasoning":
161
+ parts.push(`[thinking]\n${part.text}\n[/thinking]`);
162
+ break;
163
+ }
164
+ if (parts.length > 0) conversationParts.push(`[assistant]\n${parts.join("\n")}`);
165
+ break;
166
+ }
167
+ case "tool": {
168
+ const parts = [];
169
+ for (const part of message.content) {
170
+ const output = part.output;
171
+ let outputText;
172
+ if (Array.isArray(output)) outputText = output.map((o) => {
173
+ if (o.type === "text") return o.text;
174
+ return `[${o.type}]`;
175
+ }).join("\n");
176
+ else outputText = typeof output === "string" ? output : safeJsonStringify(output);
177
+ parts.push(`[tool_result: ${part.toolName} (id: ${part.toolCallId})]\n${outputText}`);
178
+ }
179
+ if (parts.length > 0) conversationParts.push(parts.join("\n"));
180
+ break;
181
+ }
182
+ }
183
+ }
184
+ return {
185
+ systemPrompt: systemParts.join("\n\n"),
186
+ prompt: conversationParts.join("\n\n")
187
+ };
188
+ }
189
+
190
+ //#endregion
191
+ //#region src/tools.ts
192
+ /**
193
+ * The name of the MCP server that hosts AI SDK tools.
194
+ * This is used to identify tools when the Agent SDK returns them
195
+ * in the format: mcp__{SERVER_NAME}__{tool_name}
196
+ */
197
+ const AI_SDK_MCP_SERVER_NAME = "ai-sdk-tools";
198
+ /**
199
+ * Extracts Zod schema from AI SDK tool inputSchema using Zod 4's native
200
+ * JSON Schema conversion.
201
+ */
202
+ function extractZodSchema(tool) {
203
+ const inputSchema = tool.inputSchema;
204
+ if (!inputSchema || typeof inputSchema !== "object") return {};
205
+ try {
206
+ const zodSchema = z.fromJSONSchema(inputSchema);
207
+ if (zodSchema instanceof z.ZodObject) return zodSchema.shape;
208
+ return { value: zodSchema };
209
+ } catch (error) {
210
+ logger.error("Failed to convert JSON Schema to Zod:", {
211
+ tool: tool.name,
212
+ error
213
+ });
214
+ return {};
215
+ }
216
+ }
217
+ /**
218
+ * Converts AI SDK function tool definitions into an in-process Agent SDK MCP server.
219
+ *
220
+ * Each AI SDK tool becomes an MCP tool with proper parameter validation.
221
+ * Since we use maxTurns: 1, the Agent SDK will report tool_use blocks in the
222
+ * assistant message but won't execute them. The AI SDK caller handles actual
223
+ * tool execution.
224
+ */
225
+ function convertTools(tools) {
226
+ if (!tools || tools.length === 0) return void 0;
227
+ logger.debug("Converting tools:", {
228
+ count: tools.length,
229
+ tools: tools.map((t) => t.name)
230
+ });
231
+ const mcpTools = tools.map((aiTool) => {
232
+ const zodSchema = extractZodSchema(aiTool);
233
+ logger.debug("Creating tool:", {
234
+ name: aiTool.name,
235
+ schemaKeys: Object.keys(zodSchema)
236
+ });
237
+ return tool(aiTool.name, aiTool.description ?? "", zodSchema, async () => {
238
+ return { content: [{
239
+ type: "text",
240
+ text: safeJsonStringify({
241
+ _deferred: true,
242
+ message: "Tool execution deferred to AI SDK caller"
243
+ })
244
+ }] };
245
+ });
246
+ });
247
+ logger.info("Created MCP server with", mcpTools.length, "tools");
248
+ const mcpServer = createSdkMcpServer({
249
+ name: AI_SDK_MCP_SERVER_NAME,
250
+ tools: mcpTools
251
+ });
252
+ const allowedTools = tools.map((t) => `mcp__${AI_SDK_MCP_SERVER_NAME}__${t.name}`);
253
+ logger.debug("Allowed tools:", allowedTools);
254
+ return {
255
+ mcpServer,
256
+ allowedTools
257
+ };
258
+ }
259
+
260
+ //#endregion
261
+ //#region src/language-model.ts
262
+ /**
263
+ * Strips the MCP prefix from tool names returned by the Agent SDK.
264
+ * The Agent SDK returns tools in format: mcp__{serverName}__{toolName}
265
+ * The AI SDK expects just the original tool name.
266
+ */
267
+ function stripMcpPrefix(toolName) {
268
+ const prefix = `mcp__${AI_SDK_MCP_SERVER_NAME}__`;
269
+ if (toolName.startsWith(prefix)) return toolName.slice(prefix.length);
270
+ return toolName;
271
+ }
272
+ function mapFinishReason(stopReason, hasToolCalls) {
273
+ if (hasToolCalls) return "tool-calls";
274
+ switch (stopReason) {
275
+ case "end_turn": return "stop";
276
+ case "max_tokens": return "length";
277
+ case "stop_sequence": return "stop";
278
+ case "tool_use": return "tool-calls";
279
+ default: return "unknown";
280
+ }
281
+ }
282
+ let idCounter = 0;
283
+ function generateId() {
284
+ return `agent-${Date.now()}-${++idCounter}`;
285
+ }
286
+ var ClaudeAgentLanguageModel = class {
287
+ specificationVersion = "v2";
288
+ modelId;
289
+ defaultObjectGenerationMode = void 0;
290
+ config;
291
+ constructor(modelId, config) {
292
+ this.modelId = modelId;
293
+ this.config = config;
294
+ }
295
+ get provider() {
296
+ return this.config.provider;
297
+ }
298
+ get supportedUrls() {
299
+ return {};
300
+ }
301
+ buildQueryOptions(options) {
302
+ const { systemPrompt, prompt } = convertMessages(options.prompt);
303
+ const convertedTools = convertTools(options.tools);
304
+ const abortController = new AbortController();
305
+ if (options.abortSignal) options.abortSignal.addEventListener("abort", () => {
306
+ abortController.abort();
307
+ });
308
+ const queryOptions = {
309
+ model: this.modelId,
310
+ maxTurns: 1,
311
+ permissionMode: "bypassPermissions",
312
+ allowDangerouslySkipPermissions: true,
313
+ abortController,
314
+ tools: [],
315
+ allowedTools: [`mcp__${AI_SDK_MCP_SERVER_NAME}__*`],
316
+ ...this.config.cwd ? { cwd: this.config.cwd } : {}
317
+ };
318
+ if (systemPrompt) queryOptions.systemPrompt = systemPrompt;
319
+ if (convertedTools?.mcpServer) queryOptions.mcpServers = { [AI_SDK_MCP_SERVER_NAME]: convertedTools.mcpServer };
320
+ return {
321
+ prompt,
322
+ queryOptions
323
+ };
324
+ }
325
+ async doGenerate(options) {
326
+ const warnings = [];
327
+ const { prompt, queryOptions } = this.buildQueryOptions(options);
328
+ const generator = query({
329
+ prompt,
330
+ options: queryOptions
331
+ });
332
+ const content = [];
333
+ let usage = {
334
+ inputTokens: void 0,
335
+ outputTokens: void 0,
336
+ totalTokens: void 0
337
+ };
338
+ let finishReason = "unknown";
339
+ let hasToolCalls = false;
340
+ const seenMessageIds = /* @__PURE__ */ new Set();
341
+ for await (const message of generator) {
342
+ if (message.type === "assistant") {
343
+ const apiMessage = message.message;
344
+ const messageId = message.uuid;
345
+ if (Array.isArray(apiMessage.content)) {
346
+ for (const block of apiMessage.content) if (block.type === "text") content.push({
347
+ type: "text",
348
+ text: block.text
349
+ });
350
+ else if (block.type === "tool_use") {
351
+ hasToolCalls = true;
352
+ const originalToolName = stripMcpPrefix(block.name);
353
+ content.push({
354
+ type: "tool-call",
355
+ toolCallId: block.id,
356
+ toolName: originalToolName,
357
+ input: typeof block.input === "string" ? block.input : safeJsonStringify(block.input)
358
+ });
359
+ } else if (block.type === "thinking") content.push({
360
+ type: "reasoning",
361
+ text: block.thinking ?? ""
362
+ });
363
+ if (apiMessage.usage && messageId && !seenMessageIds.has(messageId)) {
364
+ seenMessageIds.add(messageId);
365
+ usage = {
366
+ inputTokens: apiMessage.usage.input_tokens,
367
+ outputTokens: apiMessage.usage.output_tokens,
368
+ totalTokens: (apiMessage.usage.input_tokens ?? 0) + (apiMessage.usage.output_tokens ?? 0)
369
+ };
370
+ logger.debug("Usage reported in doGenerate", {
371
+ messageId,
372
+ inputTokens: usage.inputTokens,
373
+ outputTokens: usage.outputTokens,
374
+ totalTokens: usage.totalTokens
375
+ });
376
+ }
377
+ finishReason = mapFinishReason(apiMessage.stop_reason, hasToolCalls);
378
+ }
379
+ }
380
+ if (message.type === "result") {
381
+ if (message.usage) {
382
+ usage = {
383
+ inputTokens: message.usage.input_tokens ?? usage.inputTokens,
384
+ outputTokens: message.usage.output_tokens ?? usage.outputTokens,
385
+ totalTokens: usage.totalTokens
386
+ };
387
+ logger.debug("Final usage from result message", {
388
+ inputTokens: usage.inputTokens,
389
+ outputTokens: usage.outputTokens
390
+ });
391
+ }
392
+ }
393
+ }
394
+ if (usage.inputTokens !== void 0 && usage.outputTokens !== void 0) usage.totalTokens = usage.inputTokens + usage.outputTokens;
395
+ return {
396
+ content,
397
+ finishReason,
398
+ usage,
399
+ warnings,
400
+ request: { body: queryOptions },
401
+ response: { headers: void 0 }
402
+ };
403
+ }
404
+ async doStream(options) {
405
+ const warnings = [];
406
+ const { prompt, queryOptions } = this.buildQueryOptions(options);
407
+ queryOptions.includePartialMessages = true;
408
+ const generator = query({
409
+ prompt,
410
+ options: queryOptions
411
+ });
412
+ let hasToolCalls = false;
413
+ return {
414
+ stream: new ReadableStream({ async start(controller) {
415
+ controller.enqueue({
416
+ type: "stream-start",
417
+ warnings
418
+ });
419
+ let finishReason = "unknown";
420
+ let usage = {
421
+ inputTokens: void 0,
422
+ outputTokens: void 0,
423
+ totalTokens: void 0
424
+ };
425
+ let activeTextId = null;
426
+ let activeReasoningId = null;
427
+ const toolCalls = /* @__PURE__ */ new Map();
428
+ const seenMessageIds = /* @__PURE__ */ new Set();
429
+ try {
430
+ for await (const message of generator) if (message.type === "stream_event") {
431
+ const event = message.event;
432
+ if (!event || !event.type) continue;
433
+ switch (event.type) {
434
+ case "message_start": {
435
+ const msg = event.message;
436
+ if (msg) {
437
+ controller.enqueue({
438
+ type: "response-metadata",
439
+ id: msg.id,
440
+ timestamp: /* @__PURE__ */ new Date(),
441
+ modelId: msg.model
442
+ });
443
+ if (msg.usage) {
444
+ usage.inputTokens = msg.usage.input_tokens;
445
+ logger.debug("Initial usage reported in doStream (message_start)", { inputTokens: usage.inputTokens });
446
+ }
447
+ }
448
+ break;
449
+ }
450
+ case "content_block_start": {
451
+ const block = event.content_block;
452
+ const index = event.index;
453
+ if (block?.type === "text") {
454
+ activeTextId = generateId();
455
+ controller.enqueue({
456
+ type: "text-start",
457
+ id: activeTextId
458
+ });
459
+ } else if (block?.type === "tool_use") {
460
+ hasToolCalls = true;
461
+ const id = block.id ?? generateId();
462
+ toolCalls.set(index, {
463
+ toolCallId: id,
464
+ toolName: block.name,
465
+ argsText: ""
466
+ });
467
+ controller.enqueue({
468
+ type: "tool-input-start",
469
+ id,
470
+ toolName: block.name
471
+ });
472
+ } else if (block?.type === "thinking") {
473
+ activeReasoningId = generateId();
474
+ controller.enqueue({
475
+ type: "reasoning-start",
476
+ id: activeReasoningId
477
+ });
478
+ }
479
+ break;
480
+ }
481
+ case "content_block_delta": {
482
+ const delta = event.delta;
483
+ const index = event.index;
484
+ if (delta?.type === "text_delta") {
485
+ if (!activeTextId) {
486
+ activeTextId = generateId();
487
+ controller.enqueue({
488
+ type: "text-start",
489
+ id: activeTextId
490
+ });
491
+ }
492
+ controller.enqueue({
493
+ type: "text-delta",
494
+ id: activeTextId,
495
+ delta: delta.text
496
+ });
497
+ } else if (delta?.type === "input_json_delta") {
498
+ const tc = toolCalls.get(index);
499
+ if (tc) {
500
+ tc.argsText += delta.partial_json;
501
+ controller.enqueue({
502
+ type: "tool-input-delta",
503
+ id: tc.toolCallId,
504
+ delta: delta.partial_json
505
+ });
506
+ }
507
+ } else if (delta?.type === "thinking_delta") {
508
+ if (!activeReasoningId) {
509
+ activeReasoningId = generateId();
510
+ controller.enqueue({
511
+ type: "reasoning-start",
512
+ id: activeReasoningId
513
+ });
514
+ }
515
+ controller.enqueue({
516
+ type: "reasoning-delta",
517
+ id: activeReasoningId,
518
+ delta: delta.thinking
519
+ });
520
+ }
521
+ break;
522
+ }
523
+ case "content_block_stop": {
524
+ const index = event.index;
525
+ const tc = toolCalls.get(index);
526
+ if (tc) {
527
+ const originalToolName = stripMcpPrefix(tc.toolName);
528
+ controller.enqueue({
529
+ type: "tool-input-end",
530
+ id: tc.toolCallId
531
+ });
532
+ controller.enqueue({
533
+ type: "tool-call",
534
+ toolCallId: tc.toolCallId,
535
+ toolName: originalToolName,
536
+ input: tc.argsText
537
+ });
538
+ toolCalls.delete(index);
539
+ } else if (activeTextId) {
540
+ controller.enqueue({
541
+ type: "text-end",
542
+ id: activeTextId
543
+ });
544
+ activeTextId = null;
545
+ } else if (activeReasoningId) {
546
+ controller.enqueue({
547
+ type: "reasoning-end",
548
+ id: activeReasoningId
549
+ });
550
+ activeReasoningId = null;
551
+ }
552
+ break;
553
+ }
554
+ case "message_delta":
555
+ if (event.usage) {
556
+ usage.outputTokens = event.usage.output_tokens;
557
+ if (usage.inputTokens !== void 0) usage.totalTokens = usage.inputTokens + (event.usage.output_tokens ?? 0);
558
+ logger.debug("Usage delta reported in doStream (message_delta)", {
559
+ outputTokens: usage.outputTokens,
560
+ totalTokens: usage.totalTokens
561
+ });
562
+ }
563
+ finishReason = mapFinishReason(event.delta?.stop_reason, hasToolCalls);
564
+ break;
565
+ case "message_stop": break;
566
+ }
567
+ } else if (message.type === "assistant") {
568
+ const apiMessage = message.message;
569
+ const messageId = message.uuid;
570
+ if (Array.isArray(apiMessage?.content)) {
571
+ for (const block of apiMessage.content) if (block.type === "tool_use") hasToolCalls = true;
572
+ }
573
+ if (apiMessage?.usage && messageId && !seenMessageIds.has(messageId)) {
574
+ seenMessageIds.add(messageId);
575
+ logger.debug("Assistant message usage (already tracked from streaming)", {
576
+ messageId,
577
+ inputTokens: apiMessage.usage.input_tokens,
578
+ outputTokens: apiMessage.usage.output_tokens
579
+ });
580
+ }
581
+ if (apiMessage?.stop_reason) finishReason = mapFinishReason(apiMessage.stop_reason, hasToolCalls);
582
+ } else if (message.type === "result") {
583
+ const result = message;
584
+ if (result.usage) {
585
+ usage.inputTokens = result.usage.input_tokens ?? usage.inputTokens;
586
+ usage.outputTokens = result.usage.output_tokens ?? usage.outputTokens;
587
+ logger.debug("Final usage from result message", {
588
+ inputTokens: usage.inputTokens,
589
+ outputTokens: usage.outputTokens
590
+ });
591
+ }
592
+ }
593
+ } catch (error) {
594
+ controller.enqueue({
595
+ type: "error",
596
+ error
597
+ });
598
+ }
599
+ if (activeTextId) controller.enqueue({
600
+ type: "text-end",
601
+ id: activeTextId
602
+ });
603
+ if (activeReasoningId) controller.enqueue({
604
+ type: "reasoning-end",
605
+ id: activeReasoningId
606
+ });
607
+ if (usage.inputTokens !== void 0 && usage.outputTokens !== void 0) usage.totalTokens = usage.inputTokens + usage.outputTokens;
608
+ controller.enqueue({
609
+ type: "finish",
610
+ finishReason,
611
+ usage
612
+ });
613
+ controller.close();
614
+ } }),
615
+ request: { body: queryOptions },
616
+ response: { headers: void 0 }
617
+ };
618
+ }
619
+ };
620
+
621
+ //#endregion
622
+ //#region src/provider.ts
623
+ function createClaudeAgent(options = {}) {
624
+ const config = {
625
+ provider: options.name ?? "claude-agent",
626
+ cwd: options.cwd
627
+ };
628
+ logger.debug("Creating agent with:", safeJsonStringify(options));
629
+ const createLanguageModel = (modelId) => {
630
+ return new ClaudeAgentLanguageModel(modelId, config);
631
+ };
632
+ const provider = function(modelId) {
633
+ return createLanguageModel(modelId);
634
+ };
635
+ provider.languageModel = createLanguageModel;
636
+ return provider;
637
+ }
638
+
639
+ //#endregion
640
+ export { ClaudeAgentLanguageModel, createClaudeAgent as create, createClaudeAgent };
641
+ //# sourceMappingURL=index.mjs.map