@omarestrella/ai-sdk-agent-sdk 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +91 -0
  3. package/dist/src/index.d.ts +4 -0
  4. package/dist/src/index.d.ts.map +1 -0
  5. package/dist/src/index.js +6 -0
  6. package/dist/src/index.js.map +1 -0
  7. package/dist/src/json.d.ts +6 -0
  8. package/dist/src/json.d.ts.map +1 -0
  9. package/dist/src/json.js +29 -0
  10. package/dist/src/json.js.map +1 -0
  11. package/dist/src/language-model.d.ts +23 -0
  12. package/dist/src/language-model.d.ts.map +1 -0
  13. package/dist/src/language-model.js +440 -0
  14. package/dist/src/language-model.js.map +1 -0
  15. package/dist/src/logger.d.ts +15 -0
  16. package/dist/src/logger.d.ts.map +1 -0
  17. package/dist/src/logger.js +142 -0
  18. package/dist/src/logger.js.map +1 -0
  19. package/dist/src/messages.d.ts +14 -0
  20. package/dist/src/messages.d.ts.map +1 -0
  21. package/dist/src/messages.js +92 -0
  22. package/dist/src/messages.js.map +1 -0
  23. package/dist/src/provider.d.ts +15 -0
  24. package/dist/src/provider.d.ts.map +1 -0
  25. package/dist/src/provider.js +19 -0
  26. package/dist/src/provider.js.map +1 -0
  27. package/dist/src/tools.d.ts +21 -0
  28. package/dist/src/tools.d.ts.map +1 -0
  29. package/dist/src/tools.js +82 -0
  30. package/dist/src/tools.js.map +1 -0
  31. package/dist/test/messages.test.d.ts +2 -0
  32. package/dist/test/messages.test.d.ts.map +1 -0
  33. package/dist/test/messages.test.js +173 -0
  34. package/dist/test/messages.test.js.map +1 -0
  35. package/dist/test/tools.test.d.ts +2 -0
  36. package/dist/test/tools.test.d.ts.map +1 -0
  37. package/dist/test/tools.test.js +175 -0
  38. package/dist/test/tools.test.js.map +1 -0
  39. package/package.json +70 -0
  40. package/src/index.ts +11 -0
  41. package/src/json.ts +38 -0
  42. package/src/language-model.ts +526 -0
  43. package/src/logger.ts +171 -0
  44. package/src/messages.ts +102 -0
  45. package/src/provider.ts +45 -0
  46. package/src/tools.ts +112 -0
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Omar Estrella
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,91 @@
1
+ # ai-sdk-claude-agent
2
+
3
+ [AI SDK](https://sdk.vercel.ai/) provider that wraps the [Claude Agent SDK](https://platform.claude.com/docs/en/agent-sdk/overview), exposing it as a standard `LanguageModelV2`.
4
+
5
+ ## Install
6
+
7
+ ```bash
8
+ bun install ai-sdk-claude-agent @anthropic-ai/claude-agent-sdk
9
+ ```
10
+
11
+ ## Usage
12
+
13
+ ```ts
14
+ import { createClaudeAgent } from "ai-sdk-claude-agent";
15
+ import { generateText, streamText } from "ai";
16
+
17
+ const provider = createClaudeAgent();
18
+
19
+ // Generate text
20
+ const { text } = await generateText({
21
+ model: provider("claude-sonnet-4-5-20250929"),
22
+ prompt: "Explain how async generators work in JavaScript.",
23
+ });
24
+
25
+ // Stream text
26
+ const result = streamText({
27
+ model: provider("claude-sonnet-4-5-20250929"),
28
+ prompt: "Write a haiku about TypeScript.",
29
+ });
30
+
31
+ for await (const chunk of result.textStream) {
32
+ process.stdout.write(chunk);
33
+ }
34
+ ```
35
+
36
+ ## How it works
37
+
38
+ - Calls the Agent SDK's `query()` with `maxTurns: 1` so the AI SDK controls the agentic loop
39
+ - All built-in Agent SDK tools are disabled (`allowedTools: []`)
40
+ - AI SDK tool definitions are passed through as in-process MCP tools via `createSdkMcpServer()`
41
+ - Streaming uses `includePartialMessages: true` to get raw Anthropic events, mapped to `LanguageModelV2StreamPart`
42
+
43
+ ## Provider options
44
+
45
+ ```ts
46
+ const provider = createClaudeAgent({
47
+ name: "my-agent", // provider display name (default: "claude-agent")
48
+ cwd: "/path/to/dir", // working directory for the agent
49
+ });
50
+ ```
51
+
52
+ ## Available models
53
+
54
+ | Model | ID |
55
+ | ----------------- | ---------------------------- |
56
+ | Claude Sonnet 4.5 | `claude-sonnet-4-5-20250929` |
57
+ | Claude Opus 4.5 | `claude-opus-4-5-20251101` |
58
+ | Claude Haiku 4.5 | `claude-haiku-4-5-20251001` |
59
+
60
+ Any model ID supported by the Agent SDK can be used.
61
+
62
+ ## Using with opencode
63
+
64
+ The Claude Agent SDK requires a Claude Code & Claude Account or Anthropic API key.
65
+
66
+ Install the [Claude Code CLI](https://code.claude.com/docs/en/setup) before you start.
67
+
68
+ Add to your `opencode.json`:
69
+
70
+ ```json
71
+ {
72
+ "provider": {
73
+ "claude-agent": {
74
+ "npm": "ai-sdk-claude-agent",
75
+ "name": "Claude Agent SDK",
76
+ "models": {
77
+ "claude-sonnet-4-5-20250929": {
78
+ "name": "Claude Sonnet 4.5 (Agent SDK)",
79
+ "tool_call": true,
80
+ "reasoning": true,
81
+ "limit": { "context": 200000, "output": 64000 }
82
+ }
83
+ }
84
+ }
85
+ }
86
+ }
87
+ ```
88
+
89
+ ## License
90
+
91
+ MIT
@@ -0,0 +1,4 @@
1
+ export { createClaudeAgent, type ClaudeAgentProvider, type ClaudeAgentProviderSettings, } from "./provider";
2
+ export { ClaudeAgentLanguageModel } from "./language-model";
3
+ export { createClaudeAgent as create } from "./provider";
4
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,iBAAiB,EACjB,KAAK,mBAAmB,EACxB,KAAK,2BAA2B,GACjC,MAAM,YAAY,CAAC;AAEpB,OAAO,EAAE,wBAAwB,EAAE,MAAM,kBAAkB,CAAC;AAI5D,OAAO,EAAE,iBAAiB,IAAI,MAAM,EAAE,MAAM,YAAY,CAAC"}
@@ -0,0 +1,6 @@
1
+ export { createClaudeAgent, } from "./provider";
2
+ export { ClaudeAgentLanguageModel } from "./language-model";
3
+ // `create` alias for compatibility with opencode's dynamic provider loader,
4
+ // which looks for a `create` function export from npm packages.
5
+ export { createClaudeAgent as create } from "./provider";
6
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EACL,iBAAiB,GAGlB,MAAM,YAAY,CAAC;AAEpB,OAAO,EAAE,wBAAwB,EAAE,MAAM,kBAAkB,CAAC;AAE5D,4EAA4E;AAC5E,gEAAgE;AAChE,OAAO,EAAE,iBAAiB,IAAI,MAAM,EAAE,MAAM,YAAY,CAAC"}
@@ -0,0 +1,6 @@
1
+ /**
2
+ * Safely serializes a value to JSON, handling circular references
3
+ * by replacing them with `[Circular]`.
4
+ */
5
+ export declare function safeJsonStringify(value: unknown, space?: string | number): string;
6
+ //# sourceMappingURL=json.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"json.d.ts","sourceRoot":"","sources":["../../src/json.ts"],"names":[],"mappings":"AAEA;;;GAGG;AACH,wBAAgB,iBAAiB,CAC/B,KAAK,EAAE,OAAO,EACd,KAAK,CAAC,EAAE,MAAM,GAAG,MAAM,GACtB,MAAM,CA4BR"}
@@ -0,0 +1,29 @@
1
+ import { logger } from "./logger";
2
+ /**
3
+ * Safely serializes a value to JSON, handling circular references
4
+ * by replacing them with `[Circular]`.
5
+ */
6
+ export function safeJsonStringify(value, space) {
7
+ const seen = new WeakSet();
8
+ try {
9
+ return JSON.stringify(value, (key, val) => {
10
+ if (val === null || typeof val !== "object") {
11
+ return val;
12
+ }
13
+ if (seen.has(val)) {
14
+ return "[Circular]";
15
+ }
16
+ seen.add(val);
17
+ return val;
18
+ }, space);
19
+ }
20
+ catch (e) {
21
+ const err = e;
22
+ logger.error("Cannot stringify JSON", {
23
+ error: err.message,
24
+ stack: err.stack,
25
+ });
26
+ return "{}";
27
+ }
28
+ }
29
+ //# sourceMappingURL=json.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"json.js","sourceRoot":"","sources":["../../src/json.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAElC;;;GAGG;AACH,MAAM,UAAU,iBAAiB,CAC/B,KAAc,EACd,KAAuB;IAEvB,MAAM,IAAI,GAAG,IAAI,OAAO,EAAE,CAAC;IAE3B,IAAI,CAAC;QACH,OAAO,IAAI,CAAC,SAAS,CACnB,KAAK,EACL,CAAC,GAAG,EAAE,GAAG,EAAE,EAAE;YACX,IAAI,GAAG,KAAK,IAAI,IAAI,OAAO,GAAG,KAAK,QAAQ,EAAE,CAAC;gBAC5C,OAAO,GAAG,CAAC;YACb,CAAC;YAED,IAAI,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,EAAE,CAAC;gBAClB,OAAO,YAAY,CAAC;YACtB,CAAC;YAED,IAAI,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;YACd,OAAO,GAAG,CAAC;QACb,CAAC,EACD,KAAK,CACN,CAAC;IACJ,CAAC;IAAC,OAAO,CAAC,EAAE,CAAC;QACX,MAAM,GAAG,GAAG,CAAU,CAAC;QACvB,MAAM,CAAC,KAAK,CAAC,uBAAuB,EAAE;YACpC,KAAK,EAAE,GAAG,CAAC,OAAO;YAClB,KAAK,EAAE,GAAG,CAAC,KAAK;SACjB,CAAC,CAAC;QACH,OAAO,IAAI,CAAC;IACd,CAAC;AACH,CAAC"}
@@ -0,0 +1,23 @@
1
+ import type { LanguageModelV2 } from "@ai-sdk/provider";
2
+ export interface ClaudeAgentLanguageModelConfig {
3
+ provider: string;
4
+ cwd?: string;
5
+ }
6
+ type DoGenerateOptions = Parameters<LanguageModelV2["doGenerate"]>[0];
7
+ type DoGenerateResult = Awaited<ReturnType<LanguageModelV2["doGenerate"]>>;
8
+ type DoStreamOptions = Parameters<LanguageModelV2["doStream"]>[0];
9
+ type DoStreamResult = Awaited<ReturnType<LanguageModelV2["doStream"]>>;
10
+ export declare class ClaudeAgentLanguageModel implements LanguageModelV2 {
11
+ readonly specificationVersion: "v2";
12
+ readonly modelId: string;
13
+ readonly defaultObjectGenerationMode: undefined;
14
+ private readonly config;
15
+ constructor(modelId: string, config: ClaudeAgentLanguageModelConfig);
16
+ get provider(): string;
17
+ get supportedUrls(): Record<string, RegExp[]>;
18
+ private buildQueryOptions;
19
+ doGenerate(options: DoGenerateOptions): Promise<DoGenerateResult>;
20
+ doStream(options: DoStreamOptions): Promise<DoStreamResult>;
21
+ }
22
+ export {};
23
+ //# sourceMappingURL=language-model.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"language-model.d.ts","sourceRoot":"","sources":["../../src/language-model.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,eAAe,EAOhB,MAAM,kBAAkB,CAAC;AAwC1B,MAAM,WAAW,8BAA8B;IAC7C,QAAQ,EAAE,MAAM,CAAC;IACjB,GAAG,CAAC,EAAE,MAAM,CAAC;CACd;AAED,KAAK,iBAAiB,GAAG,UAAU,CAAC,eAAe,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AACtE,KAAK,gBAAgB,GAAG,OAAO,CAAC,UAAU,CAAC,eAAe,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC;AAC3E,KAAK,eAAe,GAAG,UAAU,CAAC,eAAe,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;AAClE,KAAK,cAAc,GAAG,OAAO,CAAC,UAAU,CAAC,eAAe,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC;AAOvE,qBAAa,wBAAyB,YAAW,eAAe;IAC9D,QAAQ,CAAC,oBAAoB,EAAG,IAAI,CAAU;IAC9C,QAAQ,CAAC,OAAO,EAAE,MAAM,CAAC;IACzB,QAAQ,CAAC,2BAA2B,YAAa;IAEjD,OAAO,CAAC,QAAQ,CAAC,MAAM,CAAiC;gBAE5C,OAAO,EAAE,MAAM,EAAE,MAAM,EAAE,8BAA8B;IAKnE,IAAI,QAAQ,IAAI,MAAM,CAErB;IAED,IAAI,aAAa,IAAI,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAE5C;IAED,OAAO,CAAC,iBAAiB;IAmCnB,UAAU,CAAC,OAAO,EAAE,iBAAiB,GAAG,OAAO,CAAC,gBAAgB,CAAC;IA0GjE,QAAQ,CAAC,OAAO,EAAE,eAAe,GAAG,OAAO,CAAC,cAAc,CAAC;CA6SlE"}
@@ -0,0 +1,440 @@
1
+ import { query } from "@anthropic-ai/claude-agent-sdk";
2
+ import { safeJsonStringify } from "./json";
3
+ import { convertMessages } from "./messages";
4
+ import { AI_SDK_MCP_SERVER_NAME, convertTools } from "./tools";
5
+ import { logger } from "./logger";
6
+ /**
7
+ * Strips the MCP prefix from tool names returned by the Agent SDK.
8
+ * The Agent SDK returns tools in format: mcp__{serverName}__{toolName}
9
+ * The AI SDK expects just the original tool name.
10
+ */
11
+ function stripMcpPrefix(toolName) {
12
+ const prefix = `mcp__${AI_SDK_MCP_SERVER_NAME}__`;
13
+ if (toolName.startsWith(prefix)) {
14
+ return toolName.slice(prefix.length);
15
+ }
16
+ return toolName;
17
+ }
18
+ function mapFinishReason(stopReason, hasToolCalls) {
19
+ if (hasToolCalls)
20
+ return "tool-calls";
21
+ switch (stopReason) {
22
+ case "end_turn":
23
+ return "stop";
24
+ case "max_tokens":
25
+ return "length";
26
+ case "stop_sequence":
27
+ return "stop";
28
+ case "tool_use":
29
+ return "tool-calls";
30
+ default:
31
+ return "unknown";
32
+ }
33
+ }
34
+ let idCounter = 0;
35
+ function generateId() {
36
+ return `agent-${Date.now()}-${++idCounter}`;
37
+ }
38
+ export class ClaudeAgentLanguageModel {
39
+ specificationVersion = "v2";
40
+ modelId;
41
+ defaultObjectGenerationMode = undefined;
42
+ config;
43
+ constructor(modelId, config) {
44
+ this.modelId = modelId;
45
+ this.config = config;
46
+ }
47
+ get provider() {
48
+ return this.config.provider;
49
+ }
50
+ get supportedUrls() {
51
+ return {};
52
+ }
53
+ buildQueryOptions(options) {
54
+ const { systemPrompt, prompt } = convertMessages(options.prompt);
55
+ const convertedTools = convertTools(options.tools);
56
+ const abortController = new AbortController();
57
+ if (options.abortSignal) {
58
+ options.abortSignal.addEventListener("abort", () => {
59
+ abortController.abort();
60
+ });
61
+ }
62
+ const queryOptions = {
63
+ model: this.modelId,
64
+ maxTurns: 1,
65
+ permissionMode: "bypassPermissions",
66
+ allowDangerouslySkipPermissions: true,
67
+ abortController,
68
+ tools: [],
69
+ allowedTools: [`mcp__${AI_SDK_MCP_SERVER_NAME}__*`],
70
+ ...(this.config.cwd ? { cwd: this.config.cwd } : {}),
71
+ };
72
+ if (systemPrompt) {
73
+ queryOptions.systemPrompt = systemPrompt;
74
+ }
75
+ if (convertedTools?.mcpServer) {
76
+ queryOptions.mcpServers = {
77
+ [AI_SDK_MCP_SERVER_NAME]: convertedTools.mcpServer,
78
+ };
79
+ }
80
+ return { prompt, queryOptions };
81
+ }
82
+ async doGenerate(options) {
83
+ const warnings = [];
84
+ const { prompt, queryOptions } = this.buildQueryOptions(options);
85
+ const generator = query({
86
+ prompt,
87
+ options: queryOptions,
88
+ });
89
+ const content = [];
90
+ let usage = {
91
+ inputTokens: undefined,
92
+ outputTokens: undefined,
93
+ totalTokens: undefined,
94
+ };
95
+ let finishReason = "unknown";
96
+ let hasToolCalls = false;
97
+ // Track message UUIDs to avoid counting usage multiple times
98
+ // Per SDK docs: all messages with same ID have identical usage
99
+ const seenMessageIds = new Set();
100
+ for await (const message of generator) {
101
+ if (message.type === "assistant") {
102
+ const apiMessage = message.message;
103
+ const messageId = message.uuid;
104
+ if (Array.isArray(apiMessage.content)) {
105
+ for (const block of apiMessage.content) {
106
+ if (block.type === "text") {
107
+ content.push({ type: "text", text: block.text });
108
+ }
109
+ else if (block.type === "tool_use") {
110
+ hasToolCalls = true;
111
+ const originalToolName = stripMcpPrefix(block.name);
112
+ content.push({
113
+ type: "tool-call",
114
+ toolCallId: block.id,
115
+ toolName: originalToolName,
116
+ input: typeof block.input === "string"
117
+ ? block.input
118
+ : safeJsonStringify(block.input),
119
+ });
120
+ }
121
+ else if (block.type === "thinking") {
122
+ content.push({
123
+ type: "reasoning",
124
+ text: block.thinking ?? "",
125
+ });
126
+ }
127
+ }
128
+ // Only record usage once per unique message ID
129
+ if (apiMessage.usage && messageId && !seenMessageIds.has(messageId)) {
130
+ seenMessageIds.add(messageId);
131
+ usage = {
132
+ inputTokens: apiMessage.usage.input_tokens,
133
+ outputTokens: apiMessage.usage.output_tokens,
134
+ totalTokens: (apiMessage.usage.input_tokens ?? 0) +
135
+ (apiMessage.usage.output_tokens ?? 0),
136
+ };
137
+ logger.debug("Usage reported in doGenerate", {
138
+ messageId,
139
+ inputTokens: usage.inputTokens,
140
+ outputTokens: usage.outputTokens,
141
+ totalTokens: usage.totalTokens,
142
+ });
143
+ }
144
+ finishReason = mapFinishReason(apiMessage.stop_reason, hasToolCalls);
145
+ }
146
+ }
147
+ if (message.type === "result") {
148
+ // Result message contains cumulative usage from all steps
149
+ if (message.usage) {
150
+ usage = {
151
+ inputTokens: message.usage.input_tokens ?? usage.inputTokens,
152
+ outputTokens: message.usage.output_tokens ?? usage.outputTokens,
153
+ totalTokens: usage.totalTokens,
154
+ };
155
+ logger.debug("Final usage from result message", {
156
+ inputTokens: usage.inputTokens,
157
+ outputTokens: usage.outputTokens,
158
+ });
159
+ }
160
+ }
161
+ }
162
+ // Calculate total tokens if we have both input and output
163
+ if (usage.inputTokens !== undefined && usage.outputTokens !== undefined) {
164
+ usage.totalTokens = usage.inputTokens + usage.outputTokens;
165
+ }
166
+ return {
167
+ content,
168
+ finishReason,
169
+ usage,
170
+ warnings,
171
+ request: { body: queryOptions },
172
+ response: {
173
+ headers: undefined,
174
+ },
175
+ };
176
+ }
177
+ async doStream(options) {
178
+ const warnings = [];
179
+ const { prompt, queryOptions } = this.buildQueryOptions(options);
180
+ // Enable partial messages to get raw Anthropic streaming events
181
+ queryOptions.includePartialMessages = true;
182
+ const generator = query({
183
+ prompt,
184
+ options: queryOptions,
185
+ });
186
+ let hasToolCalls = false;
187
+ const stream = new ReadableStream({
188
+ async start(controller) {
189
+ controller.enqueue({ type: "stream-start", warnings });
190
+ let finishReason = "unknown";
191
+ let usage = {
192
+ inputTokens: undefined,
193
+ outputTokens: undefined,
194
+ totalTokens: undefined,
195
+ };
196
+ // Track active text block for start/delta/end lifecycle
197
+ let activeTextId = null;
198
+ // Track active reasoning block
199
+ let activeReasoningId = null;
200
+ // Track tool calls being streamed (keyed by content block index)
201
+ const toolCalls = new Map();
202
+ // Track message UUIDs to avoid counting usage multiple times
203
+ // Per SDK docs: all messages with same ID have identical usage
204
+ const seenMessageIds = new Set();
205
+ try {
206
+ for await (const message of generator) {
207
+ if (message.type === "stream_event") {
208
+ const event = message.event;
209
+ if (!event || !event.type)
210
+ continue;
211
+ switch (event.type) {
212
+ case "message_start": {
213
+ const msg = event.message;
214
+ if (msg) {
215
+ controller.enqueue({
216
+ type: "response-metadata",
217
+ id: msg.id,
218
+ timestamp: new Date(),
219
+ modelId: msg.model,
220
+ });
221
+ if (msg.usage) {
222
+ usage.inputTokens = msg.usage.input_tokens;
223
+ logger.debug("Initial usage reported in doStream (message_start)", {
224
+ inputTokens: usage.inputTokens,
225
+ });
226
+ }
227
+ }
228
+ break;
229
+ }
230
+ case "content_block_start": {
231
+ const block = event.content_block;
232
+ const index = event.index;
233
+ if (block?.type === "text") {
234
+ activeTextId = generateId();
235
+ controller.enqueue({
236
+ type: "text-start",
237
+ id: activeTextId,
238
+ });
239
+ }
240
+ else if (block?.type === "tool_use") {
241
+ hasToolCalls = true;
242
+ const id = block.id ?? generateId();
243
+ toolCalls.set(index, {
244
+ toolCallId: id,
245
+ toolName: block.name,
246
+ argsText: "",
247
+ });
248
+ controller.enqueue({
249
+ type: "tool-input-start",
250
+ id,
251
+ toolName: block.name,
252
+ });
253
+ }
254
+ else if (block?.type === "thinking") {
255
+ activeReasoningId = generateId();
256
+ controller.enqueue({
257
+ type: "reasoning-start",
258
+ id: activeReasoningId,
259
+ });
260
+ }
261
+ break;
262
+ }
263
+ case "content_block_delta": {
264
+ const delta = event.delta;
265
+ const index = event.index;
266
+ if (delta?.type === "text_delta") {
267
+ if (!activeTextId) {
268
+ activeTextId = generateId();
269
+ controller.enqueue({
270
+ type: "text-start",
271
+ id: activeTextId,
272
+ });
273
+ }
274
+ controller.enqueue({
275
+ type: "text-delta",
276
+ id: activeTextId,
277
+ delta: delta.text,
278
+ });
279
+ }
280
+ else if (delta?.type === "input_json_delta") {
281
+ const tc = toolCalls.get(index);
282
+ if (tc) {
283
+ tc.argsText += delta.partial_json;
284
+ controller.enqueue({
285
+ type: "tool-input-delta",
286
+ id: tc.toolCallId,
287
+ delta: delta.partial_json,
288
+ });
289
+ }
290
+ }
291
+ else if (delta?.type === "thinking_delta") {
292
+ if (!activeReasoningId) {
293
+ activeReasoningId = generateId();
294
+ controller.enqueue({
295
+ type: "reasoning-start",
296
+ id: activeReasoningId,
297
+ });
298
+ }
299
+ controller.enqueue({
300
+ type: "reasoning-delta",
301
+ id: activeReasoningId,
302
+ delta: delta.thinking,
303
+ });
304
+ }
305
+ break;
306
+ }
307
+ case "content_block_stop": {
308
+ const index = event.index;
309
+ const tc = toolCalls.get(index);
310
+ if (tc) {
311
+ const originalToolName = stripMcpPrefix(tc.toolName);
312
+ // End the tool input stream
313
+ controller.enqueue({
314
+ type: "tool-input-end",
315
+ id: tc.toolCallId,
316
+ });
317
+ // Emit the complete tool call
318
+ controller.enqueue({
319
+ type: "tool-call",
320
+ toolCallId: tc.toolCallId,
321
+ toolName: originalToolName,
322
+ input: tc.argsText,
323
+ });
324
+ toolCalls.delete(index);
325
+ }
326
+ else if (activeTextId) {
327
+ controller.enqueue({
328
+ type: "text-end",
329
+ id: activeTextId,
330
+ });
331
+ activeTextId = null;
332
+ }
333
+ else if (activeReasoningId) {
334
+ controller.enqueue({
335
+ type: "reasoning-end",
336
+ id: activeReasoningId,
337
+ });
338
+ activeReasoningId = null;
339
+ }
340
+ break;
341
+ }
342
+ case "message_delta": {
343
+ if (event.usage) {
344
+ usage.outputTokens = event.usage.output_tokens;
345
+ if (usage.inputTokens !== undefined) {
346
+ usage.totalTokens =
347
+ usage.inputTokens + (event.usage.output_tokens ?? 0);
348
+ }
349
+ logger.debug("Usage delta reported in doStream (message_delta)", {
350
+ outputTokens: usage.outputTokens,
351
+ totalTokens: usage.totalTokens,
352
+ });
353
+ }
354
+ finishReason = mapFinishReason(event.delta?.stop_reason, hasToolCalls);
355
+ break;
356
+ }
357
+ case "message_stop": {
358
+ // Final streaming event
359
+ break;
360
+ }
361
+ }
362
+ }
363
+ else if (message.type === "assistant") {
364
+ // Full assistant message — only update finish reason, not usage
365
+ // Usage is tracked from streaming events (message_start, message_delta)
366
+ // Per SDK docs: assistant messages share usage with streaming events
367
+ const apiMessage = message.message;
368
+ const messageId = message.uuid;
369
+ if (Array.isArray(apiMessage?.content)) {
370
+ for (const block of apiMessage.content) {
371
+ if (block.type === "tool_use") {
372
+ hasToolCalls = true;
373
+ }
374
+ }
375
+ }
376
+ // Don't overwrite usage from streaming events - they are more accurate
377
+ // and already tracked. Only log if this is a new message ID.
378
+ if (apiMessage?.usage &&
379
+ messageId &&
380
+ !seenMessageIds.has(messageId)) {
381
+ seenMessageIds.add(messageId);
382
+ logger.debug("Assistant message usage (already tracked from streaming)", {
383
+ messageId,
384
+ inputTokens: apiMessage.usage.input_tokens,
385
+ outputTokens: apiMessage.usage.output_tokens,
386
+ });
387
+ }
388
+ if (apiMessage?.stop_reason) {
389
+ finishReason = mapFinishReason(apiMessage.stop_reason, hasToolCalls);
390
+ }
391
+ }
392
+ else if (message.type === "result") {
393
+ // Final result with cumulative usage from all steps
394
+ const result = message;
395
+ if (result.usage) {
396
+ usage.inputTokens =
397
+ result.usage.input_tokens ?? usage.inputTokens;
398
+ usage.outputTokens =
399
+ result.usage.output_tokens ?? usage.outputTokens;
400
+ logger.debug("Final usage from result message", {
401
+ inputTokens: usage.inputTokens,
402
+ outputTokens: usage.outputTokens,
403
+ });
404
+ }
405
+ }
406
+ }
407
+ }
408
+ catch (error) {
409
+ controller.enqueue({ type: "error", error });
410
+ }
411
+ // Close any dangling blocks
412
+ if (activeTextId) {
413
+ controller.enqueue({ type: "text-end", id: activeTextId });
414
+ }
415
+ if (activeReasoningId) {
416
+ controller.enqueue({ type: "reasoning-end", id: activeReasoningId });
417
+ }
418
+ // Calculate total tokens if not already done
419
+ if (usage.inputTokens !== undefined &&
420
+ usage.outputTokens !== undefined) {
421
+ usage.totalTokens = usage.inputTokens + usage.outputTokens;
422
+ }
423
+ controller.enqueue({
424
+ type: "finish",
425
+ finishReason,
426
+ usage,
427
+ });
428
+ controller.close();
429
+ },
430
+ });
431
+ return {
432
+ stream,
433
+ request: { body: queryOptions },
434
+ response: {
435
+ headers: undefined,
436
+ },
437
+ };
438
+ }
439
+ }
440
+ //# sourceMappingURL=language-model.js.map