@mariozechner/pi-ai 0.5.27 → 0.5.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +355 -275
  2. package/dist/generate.d.ts +22 -0
  3. package/dist/generate.d.ts.map +1 -0
  4. package/dist/generate.js +204 -0
  5. package/dist/generate.js.map +1 -0
  6. package/dist/index.d.ts +7 -8
  7. package/dist/index.d.ts.map +1 -1
  8. package/dist/index.js +7 -12
  9. package/dist/index.js.map +1 -1
  10. package/dist/models.d.ts +10 -71
  11. package/dist/models.d.ts.map +1 -1
  12. package/dist/models.generated.d.ts +3056 -2659
  13. package/dist/models.generated.d.ts.map +1 -1
  14. package/dist/models.generated.js +3063 -2663
  15. package/dist/models.generated.js.map +1 -1
  16. package/dist/models.js +17 -59
  17. package/dist/models.js.map +1 -1
  18. package/dist/providers/anthropic.d.ts +5 -18
  19. package/dist/providers/anthropic.d.ts.map +1 -1
  20. package/dist/providers/anthropic.js +249 -227
  21. package/dist/providers/anthropic.js.map +1 -1
  22. package/dist/providers/google.d.ts +3 -14
  23. package/dist/providers/google.d.ts.map +1 -1
  24. package/dist/providers/google.js +215 -220
  25. package/dist/providers/google.js.map +1 -1
  26. package/dist/providers/openai-completions.d.ts +4 -14
  27. package/dist/providers/openai-completions.d.ts.map +1 -1
  28. package/dist/providers/openai-completions.js +247 -215
  29. package/dist/providers/openai-completions.js.map +1 -1
  30. package/dist/providers/openai-responses.d.ts +6 -13
  31. package/dist/providers/openai-responses.d.ts.map +1 -1
  32. package/dist/providers/openai-responses.js +242 -244
  33. package/dist/providers/openai-responses.js.map +1 -1
  34. package/dist/providers/utils.d.ts +2 -14
  35. package/dist/providers/utils.d.ts.map +1 -1
  36. package/dist/providers/utils.js +2 -15
  37. package/dist/providers/utils.js.map +1 -1
  38. package/dist/types.d.ts +39 -16
  39. package/dist/types.d.ts.map +1 -1
  40. package/dist/types.js +1 -0
  41. package/dist/types.js.map +1 -1
  42. package/package.json +1 -1
@@ -1,32 +1,16 @@
1
1
  import OpenAI from "openai";
2
+ import { QueuedGenerateStream } from "../generate.js";
2
3
  import { calculateCost } from "../models.js";
3
4
  import { transformMessages } from "./utils.js";
4
- export class OpenAICompletionsLLM {
5
- client;
6
- modelInfo;
7
- constructor(model, apiKey) {
8
- if (!apiKey) {
9
- if (!process.env.OPENAI_API_KEY) {
10
- throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.");
11
- }
12
- apiKey = process.env.OPENAI_API_KEY;
13
- }
14
- this.client = new OpenAI({ apiKey, baseURL: model.baseUrl, dangerouslyAllowBrowser: true });
15
- this.modelInfo = model;
16
- }
17
- getModel() {
18
- return this.modelInfo;
19
- }
20
- getApi() {
21
- return "openai-completions";
22
- }
23
- async generate(request, options) {
5
+ export const streamOpenAICompletions = (model, context, options) => {
6
+ const stream = new QueuedGenerateStream();
7
+ (async () => {
24
8
  const output = {
25
9
  role: "assistant",
26
10
  content: [],
27
- api: this.getApi(),
28
- provider: this.modelInfo.provider,
29
- model: this.modelInfo.id,
11
+ api: model.api,
12
+ provider: model.provider,
13
+ model: model.id,
30
14
  usage: {
31
15
  input: 0,
32
16
  output: 0,
@@ -37,40 +21,12 @@ export class OpenAICompletionsLLM {
37
21
  stopReason: "stop",
38
22
  };
39
23
  try {
40
- const messages = this.convertMessages(request.messages, request.systemPrompt);
41
- const params = {
42
- model: this.modelInfo.id,
43
- messages,
44
- stream: true,
45
- stream_options: { include_usage: true },
46
- };
47
- // Cerebras/xAI dont like the "store" field
48
- if (!this.modelInfo.baseUrl?.includes("cerebras.ai") && !this.modelInfo.baseUrl?.includes("api.x.ai")) {
49
- params.store = false;
50
- }
51
- if (options?.maxTokens) {
52
- params.max_completion_tokens = options?.maxTokens;
53
- }
54
- if (options?.temperature !== undefined) {
55
- params.temperature = options?.temperature;
56
- }
57
- if (request.tools) {
58
- params.tools = this.convertTools(request.tools);
59
- }
60
- if (options?.toolChoice) {
61
- params.tool_choice = options.toolChoice;
62
- }
63
- if (options?.reasoningEffort &&
64
- this.modelInfo.reasoning &&
65
- !this.modelInfo.id.toLowerCase().includes("grok")) {
66
- params.reasoning_effort = options.reasoningEffort;
67
- }
68
- const stream = await this.client.chat.completions.create(params, {
69
- signal: options?.signal,
70
- });
71
- options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
24
+ const client = createClient(model, options?.apiKey);
25
+ const params = buildParams(model, context, options);
26
+ const openaiStream = await client.chat.completions.create(params, { signal: options?.signal });
27
+ stream.push({ type: "start", partial: output });
72
28
  let currentBlock = null;
73
- for await (const chunk of stream) {
29
+ for await (const chunk of openaiStream) {
74
30
  if (chunk.usage) {
75
31
  output.usage = {
76
32
  input: chunk.usage.prompt_tokens || 0,
@@ -86,128 +42,158 @@ export class OpenAICompletionsLLM {
86
42
  total: 0,
87
43
  },
88
44
  };
89
- calculateCost(this.modelInfo, output.usage);
45
+ calculateCost(model, output.usage);
90
46
  }
91
47
  const choice = chunk.choices[0];
92
48
  if (!choice)
93
49
  continue;
94
- // Capture finish reason
95
50
  if (choice.finish_reason) {
96
- output.stopReason = this.mapStopReason(choice.finish_reason);
51
+ output.stopReason = mapStopReason(choice.finish_reason);
97
52
  }
98
53
  if (choice.delta) {
99
- // Handle text content
100
54
  if (choice.delta.content !== null &&
101
55
  choice.delta.content !== undefined &&
102
56
  choice.delta.content.length > 0) {
103
- // Check if we need to switch to text block
104
57
  if (!currentBlock || currentBlock.type !== "text") {
105
- // Save current block if exists
106
58
  if (currentBlock) {
107
59
  if (currentBlock.type === "thinking") {
108
- options?.onEvent?.({ type: "thinking_end", content: currentBlock.thinking });
60
+ stream.push({
61
+ type: "thinking_end",
62
+ content: currentBlock.thinking,
63
+ partial: output,
64
+ });
109
65
  }
110
66
  else if (currentBlock.type === "toolCall") {
111
67
  currentBlock.arguments = JSON.parse(currentBlock.partialArgs || "{}");
112
68
  delete currentBlock.partialArgs;
113
- options?.onEvent?.({ type: "toolCall", toolCall: currentBlock });
69
+ stream.push({
70
+ type: "toolCall",
71
+ toolCall: currentBlock,
72
+ partial: output,
73
+ });
114
74
  }
115
75
  }
116
- // Start new text block
117
76
  currentBlock = { type: "text", text: "" };
118
77
  output.content.push(currentBlock);
119
- options?.onEvent?.({ type: "text_start" });
78
+ stream.push({ type: "text_start", partial: output });
120
79
  }
121
- // Append to text block
122
80
  if (currentBlock.type === "text") {
123
81
  currentBlock.text += choice.delta.content;
124
- options?.onEvent?.({
82
+ stream.push({
125
83
  type: "text_delta",
126
- content: currentBlock.text,
127
84
  delta: choice.delta.content,
85
+ partial: output,
128
86
  });
129
87
  }
130
88
  }
131
- // Handle reasoning_content field
89
+ // Some endpoints return reasoning in reasoning_content (llama.cpp)
132
90
  if (choice.delta.reasoning_content !== null &&
133
91
  choice.delta.reasoning_content !== undefined &&
134
92
  choice.delta.reasoning_content.length > 0) {
135
- // Check if we need to switch to thinking block
136
93
  if (!currentBlock || currentBlock.type !== "thinking") {
137
- // Save current block if exists
138
94
  if (currentBlock) {
139
95
  if (currentBlock.type === "text") {
140
- options?.onEvent?.({ type: "text_end", content: currentBlock.text });
96
+ stream.push({
97
+ type: "text_end",
98
+ content: currentBlock.text,
99
+ partial: output,
100
+ });
141
101
  }
142
102
  else if (currentBlock.type === "toolCall") {
143
103
  currentBlock.arguments = JSON.parse(currentBlock.partialArgs || "{}");
144
104
  delete currentBlock.partialArgs;
145
- options?.onEvent?.({ type: "toolCall", toolCall: currentBlock });
105
+ stream.push({
106
+ type: "toolCall",
107
+ toolCall: currentBlock,
108
+ partial: output,
109
+ });
146
110
  }
147
111
  }
148
- // Start new thinking block
149
- currentBlock = { type: "thinking", thinking: "", thinkingSignature: "reasoning_content" };
112
+ currentBlock = {
113
+ type: "thinking",
114
+ thinking: "",
115
+ thinkingSignature: "reasoning_content",
116
+ };
150
117
  output.content.push(currentBlock);
151
- options?.onEvent?.({ type: "thinking_start" });
118
+ stream.push({ type: "thinking_start", partial: output });
152
119
  }
153
- // Append to thinking block
154
120
  if (currentBlock.type === "thinking") {
155
121
  const delta = choice.delta.reasoning_content;
156
122
  currentBlock.thinking += delta;
157
- options?.onEvent?.({ type: "thinking_delta", content: currentBlock.thinking, delta });
123
+ stream.push({
124
+ type: "thinking_delta",
125
+ delta,
126
+ partial: output,
127
+ });
158
128
  }
159
129
  }
160
- // Handle reasoning field
130
+ // Some endpoints return reasoning in reasining (ollama, xAI, ...)
161
131
  if (choice.delta.reasoning !== null &&
162
132
  choice.delta.reasoning !== undefined &&
163
133
  choice.delta.reasoning.length > 0) {
164
- // Check if we need to switch to thinking block
165
134
  if (!currentBlock || currentBlock.type !== "thinking") {
166
- // Save current block if exists
167
135
  if (currentBlock) {
168
136
  if (currentBlock.type === "text") {
169
- options?.onEvent?.({ type: "text_end", content: currentBlock.text });
137
+ stream.push({
138
+ type: "text_end",
139
+ content: currentBlock.text,
140
+ partial: output,
141
+ });
170
142
  }
171
143
  else if (currentBlock.type === "toolCall") {
172
144
  currentBlock.arguments = JSON.parse(currentBlock.partialArgs || "{}");
173
145
  delete currentBlock.partialArgs;
174
- options?.onEvent?.({ type: "toolCall", toolCall: currentBlock });
146
+ stream.push({
147
+ type: "toolCall",
148
+ toolCall: currentBlock,
149
+ partial: output,
150
+ });
175
151
  }
176
152
  }
177
- // Start new thinking block
178
- currentBlock = { type: "thinking", thinking: "", thinkingSignature: "reasoning" };
153
+ currentBlock = {
154
+ type: "thinking",
155
+ thinking: "",
156
+ thinkingSignature: "reasoning",
157
+ };
179
158
  output.content.push(currentBlock);
180
- options?.onEvent?.({ type: "thinking_start" });
159
+ stream.push({ type: "thinking_start", partial: output });
181
160
  }
182
- // Append to thinking block
183
161
  if (currentBlock.type === "thinking") {
184
162
  const delta = choice.delta.reasoning;
185
163
  currentBlock.thinking += delta;
186
- options?.onEvent?.({ type: "thinking_delta", content: currentBlock.thinking, delta });
164
+ stream.push({ type: "thinking_delta", delta, partial: output });
187
165
  }
188
166
  }
189
- // Handle tool calls
190
167
  if (choice?.delta?.tool_calls) {
191
168
  for (const toolCall of choice.delta.tool_calls) {
192
- // Check if we need a new tool call block
193
169
  if (!currentBlock ||
194
170
  currentBlock.type !== "toolCall" ||
195
171
  (toolCall.id && currentBlock.id !== toolCall.id)) {
196
- // Save current block if exists
197
172
  if (currentBlock) {
198
173
  if (currentBlock.type === "text") {
199
- options?.onEvent?.({ type: "text_end", content: currentBlock.text });
174
+ stream.push({
175
+ type: "text_end",
176
+ content: currentBlock.text,
177
+ partial: output,
178
+ });
200
179
  }
201
180
  else if (currentBlock.type === "thinking") {
202
- options?.onEvent?.({ type: "thinking_end", content: currentBlock.thinking });
181
+ stream.push({
182
+ type: "thinking_end",
183
+ content: currentBlock.thinking,
184
+ partial: output,
185
+ });
203
186
  }
204
187
  else if (currentBlock.type === "toolCall") {
205
188
  currentBlock.arguments = JSON.parse(currentBlock.partialArgs || "{}");
206
189
  delete currentBlock.partialArgs;
207
- options?.onEvent?.({ type: "toolCall", toolCall: currentBlock });
190
+ stream.push({
191
+ type: "toolCall",
192
+ toolCall: currentBlock,
193
+ partial: output,
194
+ });
208
195
  }
209
196
  }
210
- // Start new tool call block
211
197
  currentBlock = {
212
198
  type: "toolCall",
213
199
  id: toolCall.id || "",
@@ -217,7 +203,6 @@ export class OpenAICompletionsLLM {
217
203
  };
218
204
  output.content.push(currentBlock);
219
205
  }
220
- // Accumulate tool call data
221
206
  if (currentBlock.type === "toolCall") {
222
207
  if (toolCall.id)
223
208
  currentBlock.id = toolCall.id;
@@ -231,151 +216,198 @@ export class OpenAICompletionsLLM {
231
216
  }
232
217
  }
233
218
  }
234
- // Save final block if exists
235
219
  if (currentBlock) {
236
220
  if (currentBlock.type === "text") {
237
- options?.onEvent?.({ type: "text_end", content: currentBlock.text });
221
+ stream.push({
222
+ type: "text_end",
223
+ content: currentBlock.text,
224
+ partial: output,
225
+ });
238
226
  }
239
227
  else if (currentBlock.type === "thinking") {
240
- options?.onEvent?.({ type: "thinking_end", content: currentBlock.thinking });
228
+ stream.push({
229
+ type: "thinking_end",
230
+ content: currentBlock.thinking,
231
+ partial: output,
232
+ });
241
233
  }
242
234
  else if (currentBlock.type === "toolCall") {
243
235
  currentBlock.arguments = JSON.parse(currentBlock.partialArgs || "{}");
244
236
  delete currentBlock.partialArgs;
245
- options?.onEvent?.({ type: "toolCall", toolCall: currentBlock });
237
+ stream.push({
238
+ type: "toolCall",
239
+ toolCall: currentBlock,
240
+ partial: output,
241
+ });
246
242
  }
247
243
  }
248
244
  if (options?.signal?.aborted) {
249
245
  throw new Error("Request was aborted");
250
246
  }
251
- options?.onEvent?.({ type: "done", reason: output.stopReason, message: output });
247
+ stream.push({ type: "done", reason: output.stopReason, message: output });
248
+ stream.end();
252
249
  return output;
253
250
  }
254
251
  catch (error) {
255
- // Update output with error information
256
252
  output.stopReason = "error";
257
253
  output.error = error instanceof Error ? error.message : String(error);
258
- options?.onEvent?.({ type: "error", error: output.error });
259
- return output;
254
+ stream.push({ type: "error", error: output.error, partial: output });
255
+ stream.end();
260
256
  }
261
- }
262
- convertMessages(messages, systemPrompt) {
263
- const params = [];
264
- // Transform messages for cross-provider compatibility
265
- const transformedMessages = transformMessages(messages, this.modelInfo, this.getApi());
266
- // Add system prompt if provided
267
- if (systemPrompt) {
268
- // Cerebras/xAi don't like the "developer" role
269
- const useDeveloperRole = this.modelInfo.reasoning &&
270
- !this.modelInfo.baseUrl?.includes("cerebras.ai") &&
271
- !this.modelInfo.baseUrl?.includes("api.x.ai");
272
- const role = useDeveloperRole ? "developer" : "system";
273
- params.push({ role: role, content: systemPrompt });
257
+ })();
258
+ return stream;
259
+ };
260
+ function createClient(model, apiKey) {
261
+ if (!apiKey) {
262
+ if (!process.env.OPENAI_API_KEY) {
263
+ throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.");
274
264
  }
275
- // Convert messages
276
- for (const msg of transformedMessages) {
277
- if (msg.role === "user") {
278
- // Handle both string and array content
279
- if (typeof msg.content === "string") {
280
- params.push({
281
- role: "user",
282
- content: msg.content,
283
- });
284
- }
285
- else {
286
- // Convert array content to OpenAI format
287
- const content = msg.content.map((item) => {
288
- if (item.type === "text") {
289
- return {
290
- type: "text",
291
- text: item.text,
292
- };
293
- }
294
- else {
295
- // Image content - OpenAI uses data URLs
296
- return {
297
- type: "image_url",
298
- image_url: {
299
- url: `data:${item.mimeType};base64,${item.data}`,
300
- },
301
- };
302
- }
303
- });
304
- const filteredContent = !this.modelInfo?.input.includes("image")
305
- ? content.filter((c) => c.type !== "image_url")
306
- : content;
307
- params.push({
308
- role: "user",
309
- content: filteredContent,
310
- });
311
- }
265
+ apiKey = process.env.OPENAI_API_KEY;
266
+ }
267
+ return new OpenAI({ apiKey, baseURL: model.baseUrl, dangerouslyAllowBrowser: true });
268
+ }
269
+ function buildParams(model, context, options) {
270
+ const messages = convertMessages(model, context);
271
+ const params = {
272
+ model: model.id,
273
+ messages,
274
+ stream: true,
275
+ stream_options: { include_usage: true },
276
+ };
277
+ // Cerebras/xAI dont like the "store" field
278
+ if (!model.baseUrl.includes("cerebras.ai") && !model.baseUrl.includes("api.x.ai")) {
279
+ params.store = false;
280
+ }
281
+ if (options?.maxTokens) {
282
+ params.max_completion_tokens = options?.maxTokens;
283
+ }
284
+ if (options?.temperature !== undefined) {
285
+ params.temperature = options?.temperature;
286
+ }
287
+ if (context.tools) {
288
+ params.tools = convertTools(context.tools);
289
+ }
290
+ if (options?.toolChoice) {
291
+ params.tool_choice = options.toolChoice;
292
+ }
293
+ // Grok models don't like reasoning_effort
294
+ if (options?.reasoningEffort && model.reasoning && !model.id.toLowerCase().includes("grok")) {
295
+ params.reasoning_effort = options.reasoningEffort;
296
+ }
297
+ return params;
298
+ }
299
+ function convertMessages(model, context) {
300
+ const params = [];
301
+ const transformedMessages = transformMessages(context.messages, model);
302
+ if (context.systemPrompt) {
303
+ // Cerebras/xAi don't like the "developer" role
304
+ const useDeveloperRole = model.reasoning && !model.baseUrl.includes("cerebras.ai") && !model.baseUrl.includes("api.x.ai");
305
+ const role = useDeveloperRole ? "developer" : "system";
306
+ params.push({ role: role, content: context.systemPrompt });
307
+ }
308
+ for (const msg of transformedMessages) {
309
+ if (msg.role === "user") {
310
+ if (typeof msg.content === "string") {
311
+ params.push({
312
+ role: "user",
313
+ content: msg.content,
314
+ });
312
315
  }
313
- else if (msg.role === "assistant") {
314
- const assistantMsg = {
315
- role: "assistant",
316
- content: null,
317
- };
318
- // Build content from blocks
319
- const textBlocks = msg.content.filter((b) => b.type === "text");
320
- if (textBlocks.length > 0) {
321
- assistantMsg.content = textBlocks.map((b) => b.text).join("");
322
- }
323
- // Handle thinking blocks for llama.cpp server + gpt-oss
324
- const thinkingBlocks = msg.content.filter((b) => b.type === "thinking");
325
- if (thinkingBlocks.length > 0) {
326
- // Use the signature from the first thinking block if available
327
- const signature = thinkingBlocks[0].thinkingSignature;
328
- if (signature && signature.length > 0) {
329
- assistantMsg[signature] = thinkingBlocks.map((b) => b.thinking).join("");
316
+ else {
317
+ const content = msg.content.map((item) => {
318
+ if (item.type === "text") {
319
+ return {
320
+ type: "text",
321
+ text: item.text,
322
+ };
330
323
  }
331
- }
332
- // Handle tool calls
333
- const toolCalls = msg.content.filter((b) => b.type === "toolCall");
334
- if (toolCalls.length > 0) {
335
- assistantMsg.tool_calls = toolCalls.map((tc) => ({
336
- id: tc.id,
337
- type: "function",
338
- function: {
339
- name: tc.name,
340
- arguments: JSON.stringify(tc.arguments),
341
- },
342
- }));
343
- }
344
- params.push(assistantMsg);
345
- }
346
- else if (msg.role === "toolResult") {
324
+ else {
325
+ return {
326
+ type: "image_url",
327
+ image_url: {
328
+ url: `data:${item.mimeType};base64,${item.data}`,
329
+ },
330
+ };
331
+ }
332
+ });
333
+ const filteredContent = !model.input.includes("image")
334
+ ? content.filter((c) => c.type !== "image_url")
335
+ : content;
336
+ if (filteredContent.length === 0)
337
+ continue;
347
338
  params.push({
348
- role: "tool",
349
- content: msg.content,
350
- tool_call_id: msg.toolCallId,
339
+ role: "user",
340
+ content: filteredContent,
351
341
  });
352
342
  }
353
343
  }
354
- return params;
355
- }
356
- convertTools(tools) {
357
- return tools.map((tool) => ({
358
- type: "function",
359
- function: {
360
- name: tool.name,
361
- description: tool.description,
362
- parameters: tool.parameters,
363
- },
364
- }));
344
+ else if (msg.role === "assistant") {
345
+ const assistantMsg = {
346
+ role: "assistant",
347
+ content: null,
348
+ };
349
+ const textBlocks = msg.content.filter((b) => b.type === "text");
350
+ if (textBlocks.length > 0) {
351
+ assistantMsg.content = textBlocks.map((b) => b.text).join("");
352
+ }
353
+ // Handle thinking blocks for llama.cpp server + gpt-oss
354
+ const thinkingBlocks = msg.content.filter((b) => b.type === "thinking");
355
+ if (thinkingBlocks.length > 0) {
356
+ // Use the signature from the first thinking block if available
357
+ const signature = thinkingBlocks[0].thinkingSignature;
358
+ if (signature && signature.length > 0) {
359
+ assistantMsg[signature] = thinkingBlocks.map((b) => b.thinking).join("");
360
+ }
361
+ }
362
+ const toolCalls = msg.content.filter((b) => b.type === "toolCall");
363
+ if (toolCalls.length > 0) {
364
+ assistantMsg.tool_calls = toolCalls.map((tc) => ({
365
+ id: tc.id,
366
+ type: "function",
367
+ function: {
368
+ name: tc.name,
369
+ arguments: JSON.stringify(tc.arguments),
370
+ },
371
+ }));
372
+ }
373
+ params.push(assistantMsg);
374
+ }
375
+ else if (msg.role === "toolResult") {
376
+ params.push({
377
+ role: "tool",
378
+ content: msg.content,
379
+ tool_call_id: msg.toolCallId,
380
+ });
381
+ }
365
382
  }
366
- mapStopReason(reason) {
367
- switch (reason) {
368
- case "stop":
369
- return "stop";
370
- case "length":
371
- return "length";
372
- case "function_call":
373
- case "tool_calls":
374
- return "toolUse";
375
- case "content_filter":
376
- return "safety";
377
- default:
378
- return "stop";
383
+ return params;
384
+ }
385
+ function convertTools(tools) {
386
+ return tools.map((tool) => ({
387
+ type: "function",
388
+ function: {
389
+ name: tool.name,
390
+ description: tool.description,
391
+ parameters: tool.parameters,
392
+ },
393
+ }));
394
+ }
395
+ function mapStopReason(reason) {
396
+ if (reason === null)
397
+ return "stop";
398
+ switch (reason) {
399
+ case "stop":
400
+ return "stop";
401
+ case "length":
402
+ return "length";
403
+ case "function_call":
404
+ case "tool_calls":
405
+ return "toolUse";
406
+ case "content_filter":
407
+ return "safety";
408
+ default: {
409
+ const _exhaustive = reason;
410
+ throw new Error(`Unhandled stop reason: ${_exhaustive}`);
379
411
  }
380
412
  }
381
413
  }