@oh-my-pi/pi-ai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/models.ts ADDED
@@ -0,0 +1,68 @@
1
+ import { MODELS } from "./models.generated";
2
+ import type { Api, KnownProvider, Model, Usage } from "./types";
3
+
4
+ const modelRegistry: Map<string, Map<string, Model<Api>>> = new Map();
5
+
6
+ // Initialize registry from MODELS on module load
7
+ for (const [provider, models] of Object.entries(MODELS)) {
8
+ const providerModels = new Map<string, Model<Api>>();
9
+ for (const [id, model] of Object.entries(models)) {
10
+ providerModels.set(id, model as Model<Api>);
11
+ }
12
+ modelRegistry.set(provider, providerModels);
13
+ }
14
+
15
+ type ModelApi<
16
+ TProvider extends KnownProvider,
17
+ TModelId extends keyof (typeof MODELS)[TProvider],
18
+ > = (typeof MODELS)[TProvider][TModelId] extends { api: infer TApi } ? (TApi extends Api ? TApi : never) : never;
19
+
20
+ export function getModel<TProvider extends KnownProvider, TModelId extends keyof (typeof MODELS)[TProvider]>(
21
+ provider: TProvider,
22
+ modelId: TModelId,
23
+ ): Model<ModelApi<TProvider, TModelId>> {
24
+ return modelRegistry.get(provider)?.get(modelId as string) as Model<ModelApi<TProvider, TModelId>>;
25
+ }
26
+
27
+ export function getProviders(): KnownProvider[] {
28
+ return Array.from(modelRegistry.keys()) as KnownProvider[];
29
+ }
30
+
31
+ export function getModels<TProvider extends KnownProvider>(
32
+ provider: TProvider,
33
+ ): Model<ModelApi<TProvider, keyof (typeof MODELS)[TProvider]>>[] {
34
+ const models = modelRegistry.get(provider);
35
+ return models ? (Array.from(models.values()) as Model<ModelApi<TProvider, keyof (typeof MODELS)[TProvider]>>[]) : [];
36
+ }
37
+
38
+ export function calculateCost<TApi extends Api>(model: Model<TApi>, usage: Usage): Usage["cost"] {
39
+ usage.cost.input = (model.cost.input / 1000000) * usage.input;
40
+ usage.cost.output = (model.cost.output / 1000000) * usage.output;
41
+ usage.cost.cacheRead = (model.cost.cacheRead / 1000000) * usage.cacheRead;
42
+ usage.cost.cacheWrite = (model.cost.cacheWrite / 1000000) * usage.cacheWrite;
43
+ usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite;
44
+ return usage.cost;
45
+ }
46
+
47
+ /** Models that support xhigh thinking level */
48
+ const XHIGH_MODELS = new Set(["gpt-5.1-codex-max", "gpt-5.2", "gpt-5.2-codex"]);
49
+
50
+ /**
51
+ * Check if a model supports xhigh thinking level.
52
+ * Currently only certain OpenAI models support this.
53
+ */
54
+ export function supportsXhigh<TApi extends Api>(model: Model<TApi>): boolean {
55
+ return XHIGH_MODELS.has(model.id);
56
+ }
57
+
58
+ /**
59
+ * Check if two models are equal by comparing both their id and provider.
60
+ * Returns false if either model is null or undefined.
61
+ */
62
+ export function modelsAreEqual<TApi extends Api>(
63
+ a: Model<TApi> | null | undefined,
64
+ b: Model<TApi> | null | undefined,
65
+ ): boolean {
66
+ if (!a || !b) return false;
67
+ return a.id === b.id && a.provider === b.provider;
68
+ }
@@ -0,0 +1,587 @@
1
+ import Anthropic from "@anthropic-ai/sdk";
2
+ import type {
3
+ ContentBlockParam,
4
+ MessageCreateParamsStreaming,
5
+ MessageParam,
6
+ } from "@anthropic-ai/sdk/resources/messages.js";
7
+ import { calculateCost } from "../models";
8
+ import { getEnvApiKey } from "../stream";
9
+ import type {
10
+ Api,
11
+ AssistantMessage,
12
+ Context,
13
+ ImageContent,
14
+ Message,
15
+ Model,
16
+ StopReason,
17
+ StreamFunction,
18
+ StreamOptions,
19
+ TextContent,
20
+ ThinkingContent,
21
+ Tool,
22
+ ToolCall,
23
+ ToolResultMessage,
24
+ } from "../types";
25
+ import { AssistantMessageEventStream } from "../utils/event-stream";
26
+ import { parseStreamingJson } from "../utils/json-parse";
27
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
28
+
29
+ import { transformMessages } from "./transorm-messages";
30
+
31
+ /**
32
+ * Convert content blocks to Anthropic API format
33
+ */
34
+ function convertContentBlocks(content: (TextContent | ImageContent)[]):
35
+ | string
36
+ | Array<
37
+ | { type: "text"; text: string }
38
+ | {
39
+ type: "image";
40
+ source: {
41
+ type: "base64";
42
+ media_type: "image/jpeg" | "image/png" | "image/gif" | "image/webp";
43
+ data: string;
44
+ };
45
+ }
46
+ > {
47
+ // If only text blocks, return as concatenated string for simplicity
48
+ const hasImages = content.some((c) => c.type === "image");
49
+ if (!hasImages) {
50
+ return sanitizeSurrogates(content.map((c) => (c as TextContent).text).join("\n"));
51
+ }
52
+
53
+ // If we have images, convert to content block array
54
+ const blocks = content.map((block) => {
55
+ if (block.type === "text") {
56
+ return {
57
+ type: "text" as const,
58
+ text: sanitizeSurrogates(block.text),
59
+ };
60
+ }
61
+ return {
62
+ type: "image" as const,
63
+ source: {
64
+ type: "base64" as const,
65
+ media_type: block.mimeType as "image/jpeg" | "image/png" | "image/gif" | "image/webp",
66
+ data: block.data,
67
+ },
68
+ };
69
+ });
70
+
71
+ // If only images (no text), add placeholder text block
72
+ const hasText = blocks.some((b) => b.type === "text");
73
+ if (!hasText) {
74
+ blocks.unshift({
75
+ type: "text" as const,
76
+ text: "(see attached image)",
77
+ });
78
+ }
79
+
80
+ return blocks;
81
+ }
82
+
83
+ export interface AnthropicOptions extends StreamOptions {
84
+ thinkingEnabled?: boolean;
85
+ thinkingBudgetTokens?: number;
86
+ interleavedThinking?: boolean;
87
+ toolChoice?: "auto" | "any" | "none" | { type: "tool"; name: string };
88
+ }
89
+
90
+ export const streamAnthropic: StreamFunction<"anthropic-messages"> = (
91
+ model: Model<"anthropic-messages">,
92
+ context: Context,
93
+ options?: AnthropicOptions,
94
+ ): AssistantMessageEventStream => {
95
+ const stream = new AssistantMessageEventStream();
96
+
97
+ (async () => {
98
+ const output: AssistantMessage = {
99
+ role: "assistant",
100
+ content: [],
101
+ api: "anthropic-messages" as Api,
102
+ provider: model.provider,
103
+ model: model.id,
104
+ usage: {
105
+ input: 0,
106
+ output: 0,
107
+ cacheRead: 0,
108
+ cacheWrite: 0,
109
+ totalTokens: 0,
110
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
111
+ },
112
+ stopReason: "stop",
113
+ timestamp: Date.now(),
114
+ };
115
+
116
+ try {
117
+ const apiKey = options?.apiKey ?? getEnvApiKey(model.provider) ?? "";
118
+ const { client, isOAuthToken } = createClient(model, apiKey, options?.interleavedThinking ?? true);
119
+ const params = buildParams(model, context, isOAuthToken, options);
120
+ const anthropicStream = client.messages.stream({ ...params, stream: true }, { signal: options?.signal });
121
+ stream.push({ type: "start", partial: output });
122
+
123
+ type Block = (ThinkingContent | TextContent | (ToolCall & { partialJson: string })) & { index: number };
124
+ const blocks = output.content as Block[];
125
+
126
+ for await (const event of anthropicStream) {
127
+ if (event.type === "message_start") {
128
+ // Capture initial token usage from message_start event
129
+ // This ensures we have input token counts even if the stream is aborted early
130
+ output.usage.input = event.message.usage.input_tokens || 0;
131
+ output.usage.output = event.message.usage.output_tokens || 0;
132
+ output.usage.cacheRead = event.message.usage.cache_read_input_tokens || 0;
133
+ output.usage.cacheWrite = event.message.usage.cache_creation_input_tokens || 0;
134
+ // Anthropic doesn't provide total_tokens, compute from components
135
+ output.usage.totalTokens =
136
+ output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
137
+ calculateCost(model, output.usage);
138
+ } else if (event.type === "content_block_start") {
139
+ if (event.content_block.type === "text") {
140
+ const block: Block = {
141
+ type: "text",
142
+ text: "",
143
+ index: event.index,
144
+ };
145
+ output.content.push(block);
146
+ stream.push({ type: "text_start", contentIndex: output.content.length - 1, partial: output });
147
+ } else if (event.content_block.type === "thinking") {
148
+ const block: Block = {
149
+ type: "thinking",
150
+ thinking: "",
151
+ thinkingSignature: "",
152
+ index: event.index,
153
+ };
154
+ output.content.push(block);
155
+ stream.push({ type: "thinking_start", contentIndex: output.content.length - 1, partial: output });
156
+ } else if (event.content_block.type === "tool_use") {
157
+ const block: Block = {
158
+ type: "toolCall",
159
+ id: event.content_block.id,
160
+ name: event.content_block.name,
161
+ arguments: event.content_block.input as Record<string, any>,
162
+ partialJson: "",
163
+ index: event.index,
164
+ };
165
+ output.content.push(block);
166
+ stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output });
167
+ }
168
+ } else if (event.type === "content_block_delta") {
169
+ if (event.delta.type === "text_delta") {
170
+ const index = blocks.findIndex((b) => b.index === event.index);
171
+ const block = blocks[index];
172
+ if (block && block.type === "text") {
173
+ block.text += event.delta.text;
174
+ stream.push({
175
+ type: "text_delta",
176
+ contentIndex: index,
177
+ delta: event.delta.text,
178
+ partial: output,
179
+ });
180
+ }
181
+ } else if (event.delta.type === "thinking_delta") {
182
+ const index = blocks.findIndex((b) => b.index === event.index);
183
+ const block = blocks[index];
184
+ if (block && block.type === "thinking") {
185
+ block.thinking += event.delta.thinking;
186
+ stream.push({
187
+ type: "thinking_delta",
188
+ contentIndex: index,
189
+ delta: event.delta.thinking,
190
+ partial: output,
191
+ });
192
+ }
193
+ } else if (event.delta.type === "input_json_delta") {
194
+ const index = blocks.findIndex((b) => b.index === event.index);
195
+ const block = blocks[index];
196
+ if (block && block.type === "toolCall") {
197
+ block.partialJson += event.delta.partial_json;
198
+ block.arguments = parseStreamingJson(block.partialJson);
199
+ stream.push({
200
+ type: "toolcall_delta",
201
+ contentIndex: index,
202
+ delta: event.delta.partial_json,
203
+ partial: output,
204
+ });
205
+ }
206
+ } else if (event.delta.type === "signature_delta") {
207
+ const index = blocks.findIndex((b) => b.index === event.index);
208
+ const block = blocks[index];
209
+ if (block && block.type === "thinking") {
210
+ block.thinkingSignature = block.thinkingSignature || "";
211
+ block.thinkingSignature += event.delta.signature;
212
+ }
213
+ }
214
+ } else if (event.type === "content_block_stop") {
215
+ const index = blocks.findIndex((b) => b.index === event.index);
216
+ const block = blocks[index];
217
+ if (block) {
218
+ delete (block as any).index;
219
+ if (block.type === "text") {
220
+ stream.push({
221
+ type: "text_end",
222
+ contentIndex: index,
223
+ content: block.text,
224
+ partial: output,
225
+ });
226
+ } else if (block.type === "thinking") {
227
+ stream.push({
228
+ type: "thinking_end",
229
+ contentIndex: index,
230
+ content: block.thinking,
231
+ partial: output,
232
+ });
233
+ } else if (block.type === "toolCall") {
234
+ block.arguments = parseStreamingJson(block.partialJson);
235
+ delete (block as any).partialJson;
236
+ stream.push({
237
+ type: "toolcall_end",
238
+ contentIndex: index,
239
+ toolCall: block,
240
+ partial: output,
241
+ });
242
+ }
243
+ }
244
+ } else if (event.type === "message_delta") {
245
+ if (event.delta.stop_reason) {
246
+ output.stopReason = mapStopReason(event.delta.stop_reason);
247
+ }
248
+ output.usage.input = event.usage.input_tokens || 0;
249
+ output.usage.output = event.usage.output_tokens || 0;
250
+ output.usage.cacheRead = event.usage.cache_read_input_tokens || 0;
251
+ output.usage.cacheWrite = event.usage.cache_creation_input_tokens || 0;
252
+ // Anthropic doesn't provide total_tokens, compute from components
253
+ output.usage.totalTokens =
254
+ output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite;
255
+ calculateCost(model, output.usage);
256
+ }
257
+ }
258
+
259
+ if (options?.signal?.aborted) {
260
+ throw new Error("Request was aborted");
261
+ }
262
+
263
+ if (output.stopReason === "aborted" || output.stopReason === "error") {
264
+ throw new Error("An unkown error ocurred");
265
+ }
266
+
267
+ stream.push({ type: "done", reason: output.stopReason, message: output });
268
+ stream.end();
269
+ } catch (error) {
270
+ for (const block of output.content) delete (block as any).index;
271
+ output.stopReason = options?.signal?.aborted ? "aborted" : "error";
272
+ output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
273
+ stream.push({ type: "error", reason: output.stopReason, error: output });
274
+ stream.end();
275
+ }
276
+ })();
277
+
278
+ return stream;
279
+ };
280
+
281
+ function createClient(
282
+ model: Model<"anthropic-messages">,
283
+ apiKey: string,
284
+ interleavedThinking: boolean,
285
+ ): { client: Anthropic; isOAuthToken: boolean } {
286
+ const betaFeatures = ["fine-grained-tool-streaming-2025-05-14"];
287
+ if (interleavedThinking) {
288
+ betaFeatures.push("interleaved-thinking-2025-05-14");
289
+ }
290
+
291
+ if (apiKey.includes("sk-ant-oat")) {
292
+ const defaultHeaders = {
293
+ accept: "application/json",
294
+ "anthropic-dangerous-direct-browser-access": "true",
295
+ "anthropic-beta": `oauth-2025-04-20,${betaFeatures.join(",")}`,
296
+ ...(model.headers || {}),
297
+ };
298
+
299
+ const client = new Anthropic({
300
+ apiKey: null,
301
+ authToken: apiKey,
302
+ baseURL: model.baseUrl,
303
+ defaultHeaders,
304
+ dangerouslyAllowBrowser: true,
305
+ });
306
+
307
+ return { client, isOAuthToken: true };
308
+ } else {
309
+ const defaultHeaders = {
310
+ accept: "application/json",
311
+ "anthropic-dangerous-direct-browser-access": "true",
312
+ "anthropic-beta": betaFeatures.join(","),
313
+ ...(model.headers || {}),
314
+ };
315
+
316
+ const client = new Anthropic({
317
+ apiKey,
318
+ baseURL: model.baseUrl,
319
+ dangerouslyAllowBrowser: true,
320
+ defaultHeaders,
321
+ });
322
+
323
+ return { client, isOAuthToken: false };
324
+ }
325
+ }
326
+
327
+ function buildParams(
328
+ model: Model<"anthropic-messages">,
329
+ context: Context,
330
+ isOAuthToken: boolean,
331
+ options?: AnthropicOptions,
332
+ ): MessageCreateParamsStreaming {
333
+ const params: MessageCreateParamsStreaming = {
334
+ model: model.id,
335
+ messages: convertMessages(context.messages, model),
336
+ max_tokens: options?.maxTokens || (model.maxTokens / 3) | 0,
337
+ stream: true,
338
+ };
339
+
340
+ // For OAuth tokens, we MUST include Claude Code identity
341
+ if (isOAuthToken) {
342
+ params.system = [
343
+ {
344
+ type: "text",
345
+ text: "You are Claude Code, Anthropic's official CLI for Claude.",
346
+ cache_control: {
347
+ type: "ephemeral",
348
+ },
349
+ },
350
+ ];
351
+ if (context.systemPrompt) {
352
+ params.system.push({
353
+ type: "text",
354
+ text: sanitizeSurrogates(context.systemPrompt),
355
+ cache_control: {
356
+ type: "ephemeral",
357
+ },
358
+ });
359
+ }
360
+ } else if (context.systemPrompt) {
361
+ // Add cache control to system prompt for non-OAuth tokens
362
+ params.system = [
363
+ {
364
+ type: "text",
365
+ text: sanitizeSurrogates(context.systemPrompt),
366
+ cache_control: {
367
+ type: "ephemeral",
368
+ },
369
+ },
370
+ ];
371
+ }
372
+
373
+ if (options?.temperature !== undefined) {
374
+ params.temperature = options.temperature;
375
+ }
376
+
377
+ if (context.tools) {
378
+ params.tools = convertTools(context.tools);
379
+ }
380
+
381
+ if (options?.thinkingEnabled && model.reasoning) {
382
+ params.thinking = {
383
+ type: "enabled",
384
+ budget_tokens: options.thinkingBudgetTokens || 1024,
385
+ };
386
+ }
387
+
388
+ if (options?.toolChoice) {
389
+ if (typeof options.toolChoice === "string") {
390
+ params.tool_choice = { type: options.toolChoice };
391
+ } else {
392
+ params.tool_choice = options.toolChoice;
393
+ }
394
+ }
395
+
396
+ return params;
397
+ }
398
+
399
+ // Sanitize tool call IDs to match Anthropic's required pattern: ^[a-zA-Z0-9_-]+$
400
+ function sanitizeToolCallId(id: string): string {
401
+ // Replace any character that isn't alphanumeric, underscore, or hyphen with underscore
402
+ return id.replace(/[^a-zA-Z0-9_-]/g, "_");
403
+ }
404
+
405
+ function convertMessages(messages: Message[], model: Model<"anthropic-messages">): MessageParam[] {
406
+ const params: MessageParam[] = [];
407
+
408
+ // Transform messages for cross-provider compatibility
409
+ const transformedMessages = transformMessages(messages, model);
410
+
411
+ for (let i = 0; i < transformedMessages.length; i++) {
412
+ const msg = transformedMessages[i];
413
+
414
+ if (msg.role === "user") {
415
+ if (typeof msg.content === "string") {
416
+ if (msg.content.trim().length > 0) {
417
+ params.push({
418
+ role: "user",
419
+ content: sanitizeSurrogates(msg.content),
420
+ });
421
+ }
422
+ } else {
423
+ const blocks: ContentBlockParam[] = msg.content.map((item) => {
424
+ if (item.type === "text") {
425
+ return {
426
+ type: "text",
427
+ text: sanitizeSurrogates(item.text),
428
+ };
429
+ } else {
430
+ return {
431
+ type: "image",
432
+ source: {
433
+ type: "base64",
434
+ media_type: item.mimeType as "image/jpeg" | "image/png" | "image/gif" | "image/webp",
435
+ data: item.data,
436
+ },
437
+ };
438
+ }
439
+ });
440
+ let filteredBlocks = !model?.input.includes("image") ? blocks.filter((b) => b.type !== "image") : blocks;
441
+ filteredBlocks = filteredBlocks.filter((b) => {
442
+ if (b.type === "text") {
443
+ return b.text.trim().length > 0;
444
+ }
445
+ return true;
446
+ });
447
+ if (filteredBlocks.length === 0) continue;
448
+ params.push({
449
+ role: "user",
450
+ content: filteredBlocks,
451
+ });
452
+ }
453
+ } else if (msg.role === "assistant") {
454
+ const blocks: ContentBlockParam[] = [];
455
+
456
+ for (const block of msg.content) {
457
+ if (block.type === "text") {
458
+ if (block.text.trim().length === 0) continue;
459
+ blocks.push({
460
+ type: "text",
461
+ text: sanitizeSurrogates(block.text),
462
+ });
463
+ } else if (block.type === "thinking") {
464
+ if (block.thinking.trim().length === 0) continue;
465
+ // If thinking signature is missing/empty (e.g., from aborted stream),
466
+ // convert to plain text block without <thinking> tags to avoid API rejection
467
+ // and prevent Claude from mimicking the tags in responses
468
+ if (!block.thinkingSignature || block.thinkingSignature.trim().length === 0) {
469
+ blocks.push({
470
+ type: "text",
471
+ text: sanitizeSurrogates(block.thinking),
472
+ });
473
+ } else {
474
+ blocks.push({
475
+ type: "thinking",
476
+ thinking: sanitizeSurrogates(block.thinking),
477
+ signature: block.thinkingSignature,
478
+ });
479
+ }
480
+ } else if (block.type === "toolCall") {
481
+ blocks.push({
482
+ type: "tool_use",
483
+ id: sanitizeToolCallId(block.id),
484
+ name: block.name,
485
+ input: block.arguments,
486
+ });
487
+ }
488
+ }
489
+ if (blocks.length === 0) continue;
490
+ params.push({
491
+ role: "assistant",
492
+ content: blocks,
493
+ });
494
+ } else if (msg.role === "toolResult") {
495
+ // Collect all consecutive toolResult messages, needed for z.ai Anthropic endpoint
496
+ const toolResults: ContentBlockParam[] = [];
497
+
498
+ // Add the current tool result
499
+ toolResults.push({
500
+ type: "tool_result",
501
+ tool_use_id: sanitizeToolCallId(msg.toolCallId),
502
+ content: convertContentBlocks(msg.content),
503
+ is_error: msg.isError,
504
+ });
505
+
506
+ // Look ahead for consecutive toolResult messages
507
+ let j = i + 1;
508
+ while (j < transformedMessages.length && transformedMessages[j].role === "toolResult") {
509
+ const nextMsg = transformedMessages[j] as ToolResultMessage; // We know it's a toolResult
510
+ toolResults.push({
511
+ type: "tool_result",
512
+ tool_use_id: sanitizeToolCallId(nextMsg.toolCallId),
513
+ content: convertContentBlocks(nextMsg.content),
514
+ is_error: nextMsg.isError,
515
+ });
516
+ j++;
517
+ }
518
+
519
+ // Skip the messages we've already processed
520
+ i = j - 1;
521
+
522
+ // Add a single user message with all tool results
523
+ params.push({
524
+ role: "user",
525
+ content: toolResults,
526
+ });
527
+ }
528
+ }
529
+
530
+ // Add cache_control to the last user message to cache conversation history
531
+ if (params.length > 0) {
532
+ const lastMessage = params[params.length - 1];
533
+ if (lastMessage.role === "user") {
534
+ // Add cache control to the last content block
535
+ if (Array.isArray(lastMessage.content)) {
536
+ const lastBlock = lastMessage.content[lastMessage.content.length - 1];
537
+ if (
538
+ lastBlock &&
539
+ (lastBlock.type === "text" || lastBlock.type === "image" || lastBlock.type === "tool_result")
540
+ ) {
541
+ (lastBlock as any).cache_control = { type: "ephemeral" };
542
+ }
543
+ }
544
+ }
545
+ }
546
+
547
+ return params;
548
+ }
549
+
550
+ function convertTools(tools: Tool[]): Anthropic.Messages.Tool[] {
551
+ if (!tools) return [];
552
+
553
+ return tools.map((tool) => {
554
+ const jsonSchema = tool.parameters as any; // TypeBox already generates JSON Schema
555
+
556
+ return {
557
+ name: tool.name,
558
+ description: tool.description,
559
+ input_schema: {
560
+ type: "object" as const,
561
+ properties: jsonSchema.properties || {},
562
+ required: jsonSchema.required || [],
563
+ },
564
+ };
565
+ });
566
+ }
567
+
568
+ function mapStopReason(reason: Anthropic.Messages.StopReason): StopReason {
569
+ switch (reason) {
570
+ case "end_turn":
571
+ return "stop";
572
+ case "max_tokens":
573
+ return "length";
574
+ case "tool_use":
575
+ return "toolUse";
576
+ case "refusal":
577
+ return "error";
578
+ case "pause_turn": // Stop is good enough -> resubmit
579
+ return "stop";
580
+ case "stop_sequence":
581
+ return "stop"; // We don't supply stop sequences, so this should never happen
582
+ default: {
583
+ const _exhaustive: never = reason;
584
+ throw new Error(`Unhandled stop reason: ${_exhaustive}`);
585
+ }
586
+ }
587
+ }