@mariozechner/pi-ai 0.5.27 → 0.5.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +355 -275
  2. package/dist/generate.d.ts +22 -0
  3. package/dist/generate.d.ts.map +1 -0
  4. package/dist/generate.js +204 -0
  5. package/dist/generate.js.map +1 -0
  6. package/dist/index.d.ts +7 -8
  7. package/dist/index.d.ts.map +1 -1
  8. package/dist/index.js +7 -12
  9. package/dist/index.js.map +1 -1
  10. package/dist/models.d.ts +10 -71
  11. package/dist/models.d.ts.map +1 -1
  12. package/dist/models.generated.d.ts +3056 -2659
  13. package/dist/models.generated.d.ts.map +1 -1
  14. package/dist/models.generated.js +3063 -2663
  15. package/dist/models.generated.js.map +1 -1
  16. package/dist/models.js +17 -59
  17. package/dist/models.js.map +1 -1
  18. package/dist/providers/anthropic.d.ts +5 -18
  19. package/dist/providers/anthropic.d.ts.map +1 -1
  20. package/dist/providers/anthropic.js +249 -227
  21. package/dist/providers/anthropic.js.map +1 -1
  22. package/dist/providers/google.d.ts +3 -14
  23. package/dist/providers/google.d.ts.map +1 -1
  24. package/dist/providers/google.js +215 -220
  25. package/dist/providers/google.js.map +1 -1
  26. package/dist/providers/openai-completions.d.ts +4 -14
  27. package/dist/providers/openai-completions.d.ts.map +1 -1
  28. package/dist/providers/openai-completions.js +247 -215
  29. package/dist/providers/openai-completions.js.map +1 -1
  30. package/dist/providers/openai-responses.d.ts +6 -13
  31. package/dist/providers/openai-responses.d.ts.map +1 -1
  32. package/dist/providers/openai-responses.js +242 -244
  33. package/dist/providers/openai-responses.js.map +1 -1
  34. package/dist/providers/utils.d.ts +2 -14
  35. package/dist/providers/utils.d.ts.map +1 -1
  36. package/dist/providers/utils.js +2 -15
  37. package/dist/providers/utils.js.map +1 -1
  38. package/dist/types.d.ts +39 -16
  39. package/dist/types.d.ts.map +1 -1
  40. package/dist/types.js +1 -0
  41. package/dist/types.js.map +1 -1
  42. package/package.json +1 -1
@@ -1,32 +1,20 @@
1
1
  import OpenAI from "openai";
2
+ import { QueuedGenerateStream } from "../generate.js";
2
3
  import { calculateCost } from "../models.js";
3
4
  import { transformMessages } from "./utils.js";
4
- export class OpenAIResponsesLLM {
5
- client;
6
- modelInfo;
7
- constructor(model, apiKey) {
8
- if (!apiKey) {
9
- if (!process.env.OPENAI_API_KEY) {
10
- throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.");
11
- }
12
- apiKey = process.env.OPENAI_API_KEY;
13
- }
14
- this.client = new OpenAI({ apiKey, baseURL: model.baseUrl, dangerouslyAllowBrowser: true });
15
- this.modelInfo = model;
16
- }
17
- getModel() {
18
- return this.modelInfo;
19
- }
20
- getApi() {
21
- return "openai-responses";
22
- }
23
- async generate(request, options) {
5
+ /**
6
+ * Generate function for OpenAI Responses API
7
+ */
8
+ export const streamOpenAIResponses = (model, context, options) => {
9
+ const stream = new QueuedGenerateStream();
10
+ // Start async processing
11
+ (async () => {
24
12
  const output = {
25
13
  role: "assistant",
26
14
  content: [],
27
- api: this.getApi(),
28
- provider: this.modelInfo.provider,
29
- model: this.modelInfo.id,
15
+ api: "openai-responses",
16
+ provider: model.provider,
17
+ model: model.id,
30
18
  usage: {
31
19
  input: 0,
32
20
  output: 0,
@@ -37,68 +25,28 @@ export class OpenAIResponsesLLM {
37
25
  stopReason: "stop",
38
26
  };
39
27
  try {
40
- const input = this.convertToInput(request.messages, request.systemPrompt);
41
- const params = {
42
- model: this.modelInfo.id,
43
- input,
44
- stream: true,
45
- };
46
- if (options?.maxTokens) {
47
- params.max_output_tokens = options?.maxTokens;
48
- }
49
- if (options?.temperature !== undefined) {
50
- params.temperature = options?.temperature;
51
- }
52
- if (request.tools) {
53
- params.tools = this.convertTools(request.tools);
54
- }
55
- // Add reasoning options for models that support it
56
- if (this.modelInfo?.reasoning) {
57
- if (options?.reasoningEffort || options?.reasoningSummary) {
58
- params.reasoning = {
59
- effort: options?.reasoningEffort || "medium",
60
- summary: options?.reasoningSummary || "auto",
61
- };
62
- params.include = ["reasoning.encrypted_content"];
63
- }
64
- else {
65
- params.reasoning = {
66
- effort: this.modelInfo.name.startsWith("gpt-5") ? "minimal" : null,
67
- summary: null,
68
- };
69
- if (this.modelInfo.name.startsWith("gpt-5")) {
70
- // Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7
71
- input.push({
72
- role: "developer",
73
- content: [
74
- {
75
- type: "input_text",
76
- text: "# Juice: 0 !important",
77
- },
78
- ],
79
- });
80
- }
81
- }
82
- }
83
- const stream = await this.client.responses.create(params, {
84
- signal: options?.signal,
85
- });
86
- options?.onEvent?.({ type: "start", model: this.modelInfo.id, provider: this.modelInfo.provider });
87
- const outputItems = [];
28
+ // Create OpenAI client
29
+ const client = createClient(model, options?.apiKey);
30
+ const params = buildParams(model, context, options);
31
+ const openaiStream = await client.responses.create(params, { signal: options?.signal });
32
+ stream.push({ type: "start", partial: output });
88
33
  let currentItem = null;
89
- for await (const event of stream) {
34
+ let currentBlock = null;
35
+ for await (const event of openaiStream) {
90
36
  // Handle output item start
91
37
  if (event.type === "response.output_item.added") {
92
38
  const item = event.item;
93
39
  if (item.type === "reasoning") {
94
- options?.onEvent?.({ type: "thinking_start" });
95
- outputItems.push(item);
96
40
  currentItem = item;
41
+ currentBlock = { type: "thinking", thinking: "" };
42
+ output.content.push(currentBlock);
43
+ stream.push({ type: "thinking_start", partial: output });
97
44
  }
98
45
  else if (item.type === "message") {
99
- options?.onEvent?.({ type: "text_start" });
100
- outputItems.push(item);
101
46
  currentItem = item;
47
+ currentBlock = { type: "text", text: "" };
48
+ output.content.push(currentBlock);
49
+ stream.push({ type: "text_start", partial: output });
102
50
  }
103
51
  }
104
52
  // Handle reasoning summary deltas
@@ -109,30 +57,38 @@ export class OpenAIResponsesLLM {
109
57
  }
110
58
  }
111
59
  else if (event.type === "response.reasoning_summary_text.delta") {
112
- if (currentItem && currentItem.type === "reasoning") {
60
+ if (currentItem &&
61
+ currentItem.type === "reasoning" &&
62
+ currentBlock &&
63
+ currentBlock.type === "thinking") {
113
64
  currentItem.summary = currentItem.summary || [];
114
65
  const lastPart = currentItem.summary[currentItem.summary.length - 1];
115
66
  if (lastPart) {
67
+ currentBlock.thinking += event.delta;
116
68
  lastPart.text += event.delta;
117
- options?.onEvent?.({
69
+ stream.push({
118
70
  type: "thinking_delta",
119
- content: currentItem.summary.map((s) => s.text).join("\n\n"),
120
71
  delta: event.delta,
72
+ partial: output,
121
73
  });
122
74
  }
123
75
  }
124
76
  }
125
77
  // Add a new line between summary parts (hack...)
126
78
  else if (event.type === "response.reasoning_summary_part.done") {
127
- if (currentItem && currentItem.type === "reasoning") {
79
+ if (currentItem &&
80
+ currentItem.type === "reasoning" &&
81
+ currentBlock &&
82
+ currentBlock.type === "thinking") {
128
83
  currentItem.summary = currentItem.summary || [];
129
84
  const lastPart = currentItem.summary[currentItem.summary.length - 1];
130
85
  if (lastPart) {
86
+ currentBlock.thinking += "\n\n";
131
87
  lastPart.text += "\n\n";
132
- options?.onEvent?.({
88
+ stream.push({
133
89
  type: "thinking_delta",
134
- content: currentItem.summary.map((s) => s.text).join("\n\n"),
135
90
  delta: "\n\n",
91
+ partial: output,
136
92
  });
137
93
  }
138
94
  }
@@ -145,31 +101,29 @@ export class OpenAIResponsesLLM {
145
101
  }
146
102
  }
147
103
  else if (event.type === "response.output_text.delta") {
148
- if (currentItem && currentItem.type === "message") {
104
+ if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
149
105
  const lastPart = currentItem.content[currentItem.content.length - 1];
150
106
  if (lastPart && lastPart.type === "output_text") {
107
+ currentBlock.text += event.delta;
151
108
  lastPart.text += event.delta;
152
- options?.onEvent?.({
109
+ stream.push({
153
110
  type: "text_delta",
154
- content: currentItem.content
155
- .map((c) => (c.type === "output_text" ? c.text : c.refusal))
156
- .join(""),
157
111
  delta: event.delta,
112
+ partial: output,
158
113
  });
159
114
  }
160
115
  }
161
116
  }
162
117
  else if (event.type === "response.refusal.delta") {
163
- if (currentItem && currentItem.type === "message") {
118
+ if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
164
119
  const lastPart = currentItem.content[currentItem.content.length - 1];
165
120
  if (lastPart && lastPart.type === "refusal") {
121
+ currentBlock.text += event.delta;
166
122
  lastPart.refusal += event.delta;
167
- options?.onEvent?.({
123
+ stream.push({
168
124
  type: "text_delta",
169
- content: currentItem.content
170
- .map((c) => (c.type === "output_text" ? c.text : c.refusal))
171
- .join(""),
172
125
  delta: event.delta,
126
+ partial: output,
173
127
  });
174
128
  }
175
129
  }
@@ -177,15 +131,25 @@ export class OpenAIResponsesLLM {
177
131
  // Handle output item completion
178
132
  else if (event.type === "response.output_item.done") {
179
133
  const item = event.item;
180
- if (item.type === "reasoning") {
181
- outputItems[outputItems.length - 1] = item; // Update with final item
182
- const thinkingContent = item.summary?.map((s) => s.text).join("\n\n") || "";
183
- options?.onEvent?.({ type: "thinking_end", content: thinkingContent });
134
+ if (item.type === "reasoning" && currentBlock && currentBlock.type === "thinking") {
135
+ currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
136
+ currentBlock.thinkingSignature = JSON.stringify(item);
137
+ stream.push({
138
+ type: "thinking_end",
139
+ content: currentBlock.thinking,
140
+ partial: output,
141
+ });
142
+ currentBlock = null;
184
143
  }
185
- else if (item.type === "message") {
186
- outputItems[outputItems.length - 1] = item; // Update with final item
187
- const textContent = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
188
- options?.onEvent?.({ type: "text_end", content: textContent });
144
+ else if (item.type === "message" && currentBlock && currentBlock.type === "text") {
145
+ currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
146
+ currentBlock.textSignature = item.id;
147
+ stream.push({
148
+ type: "text_end",
149
+ content: currentBlock.text,
150
+ partial: output,
151
+ });
152
+ currentBlock = null;
189
153
  }
190
154
  else if (item.type === "function_call") {
191
155
  const toolCall = {
@@ -194,8 +158,8 @@ export class OpenAIResponsesLLM {
194
158
  name: item.name,
195
159
  arguments: JSON.parse(item.arguments),
196
160
  };
197
- options?.onEvent?.({ type: "toolCall", toolCall });
198
- outputItems.push(item);
161
+ output.content.push(toolCall);
162
+ stream.push({ type: "toolCall", toolCall, partial: output });
199
163
  }
200
164
  }
201
165
  // Handle completion
@@ -210,10 +174,10 @@ export class OpenAIResponsesLLM {
210
174
  cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
211
175
  };
212
176
  }
213
- calculateCost(this.modelInfo, output.usage);
177
+ calculateCost(model, output.usage);
214
178
  // Map status to stop reason
215
- output.stopReason = this.mapStopReason(response?.status);
216
- if (outputItems.some((b) => b.type === "function_call") && output.stopReason === "stop") {
179
+ output.stopReason = mapStopReason(response?.status);
180
+ if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
217
181
  output.stopReason = "toolUse";
218
182
  }
219
183
  }
@@ -221,171 +185,205 @@ export class OpenAIResponsesLLM {
221
185
  else if (event.type === "error") {
222
186
  output.stopReason = "error";
223
187
  output.error = `Code ${event.code}: ${event.message}` || "Unknown error";
224
- options?.onEvent?.({ type: "error", error: output.error });
188
+ stream.push({ type: "error", error: output.error, partial: output });
189
+ stream.end();
225
190
  return output;
226
191
  }
227
192
  else if (event.type === "response.failed") {
228
193
  output.stopReason = "error";
229
194
  output.error = "Unknown error";
230
- options?.onEvent?.({ type: "error", error: output.error });
195
+ stream.push({ type: "error", error: output.error, partial: output });
196
+ stream.end();
231
197
  return output;
232
198
  }
233
199
  }
234
- // Convert output items to blocks
235
- for (const item of outputItems) {
236
- if (item.type === "reasoning") {
237
- output.content.push({
238
- type: "thinking",
239
- thinking: item.summary?.map((s) => s.text).join("\n\n") || "",
240
- thinkingSignature: JSON.stringify(item), // Full item for resubmission
241
- });
242
- }
243
- else if (item.type === "message") {
244
- output.content.push({
245
- type: "text",
246
- text: item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join(""),
247
- textSignature: item.id, // ID for resubmission
248
- });
249
- }
250
- else if (item.type === "function_call") {
251
- output.content.push({
252
- type: "toolCall",
253
- id: item.call_id + "|" + item.id,
254
- name: item.name,
255
- arguments: JSON.parse(item.arguments),
256
- });
257
- }
258
- }
259
200
  if (options?.signal?.aborted) {
260
201
  throw new Error("Request was aborted");
261
202
  }
262
- options?.onEvent?.({ type: "done", reason: output.stopReason, message: output });
263
- return output;
203
+ stream.push({ type: "done", reason: output.stopReason, message: output });
204
+ stream.end();
264
205
  }
265
206
  catch (error) {
266
207
  output.stopReason = "error";
267
208
  output.error = error instanceof Error ? error.message : JSON.stringify(error);
268
- options?.onEvent?.({ type: "error", error: output.error });
269
- return output;
209
+ stream.push({ type: "error", error: output.error, partial: output });
210
+ stream.end();
270
211
  }
212
+ })();
213
+ return stream;
214
+ };
215
+ function createClient(model, apiKey) {
216
+ if (!apiKey) {
217
+ if (!process.env.OPENAI_API_KEY) {
218
+ throw new Error("OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.");
219
+ }
220
+ apiKey = process.env.OPENAI_API_KEY;
271
221
  }
272
- convertToInput(messages, systemPrompt) {
273
- const input = [];
274
- // Transform messages for cross-provider compatibility
275
- const transformedMessages = transformMessages(messages, this.modelInfo, this.getApi());
276
- // Add system prompt if provided
277
- if (systemPrompt) {
278
- const role = this.modelInfo?.reasoning ? "developer" : "system";
279
- input.push({
280
- role,
281
- content: systemPrompt,
282
- });
222
+ return new OpenAI({ apiKey, baseURL: model.baseUrl, dangerouslyAllowBrowser: true });
223
+ }
224
+ function buildParams(model, context, options) {
225
+ const messages = convertMessages(model, context);
226
+ const params = {
227
+ model: model.id,
228
+ input: messages,
229
+ stream: true,
230
+ };
231
+ if (options?.maxTokens) {
232
+ params.max_output_tokens = options?.maxTokens;
233
+ }
234
+ if (options?.temperature !== undefined) {
235
+ params.temperature = options?.temperature;
236
+ }
237
+ if (context.tools) {
238
+ params.tools = convertTools(context.tools);
239
+ }
240
+ if (model.reasoning) {
241
+ if (options?.reasoningEffort || options?.reasoningSummary) {
242
+ params.reasoning = {
243
+ effort: options?.reasoningEffort || "medium",
244
+ summary: options?.reasoningSummary || "auto",
245
+ };
246
+ params.include = ["reasoning.encrypted_content"];
283
247
  }
284
- // Convert messages
285
- for (const msg of transformedMessages) {
286
- if (msg.role === "user") {
287
- // Handle both string and array content
288
- if (typeof msg.content === "string") {
289
- input.push({
290
- role: "user",
291
- content: [{ type: "input_text", text: msg.content }],
292
- });
293
- }
294
- else {
295
- // Convert array content to OpenAI Responses format
296
- const content = msg.content.map((item) => {
297
- if (item.type === "text") {
298
- return {
299
- type: "input_text",
300
- text: item.text,
301
- };
302
- }
303
- else {
304
- // Image content - OpenAI Responses uses data URLs
305
- return {
306
- type: "input_image",
307
- detail: "auto",
308
- image_url: `data:${item.mimeType};base64,${item.data}`,
309
- };
310
- }
311
- });
312
- const filteredContent = !this.modelInfo?.input.includes("image")
313
- ? content.filter((c) => c.type !== "input_image")
314
- : content;
315
- input.push({
316
- role: "user",
317
- content: filteredContent,
318
- });
319
- }
248
+ else {
249
+ params.reasoning = {
250
+ effort: model.name.startsWith("gpt-5") ? "minimal" : null,
251
+ summary: null,
252
+ };
253
+ if (model.name.startsWith("gpt-5")) {
254
+ // Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7
255
+ messages.push({
256
+ role: "developer",
257
+ content: [
258
+ {
259
+ type: "input_text",
260
+ text: "# Juice: 0 !important",
261
+ },
262
+ ],
263
+ });
320
264
  }
321
- else if (msg.role === "assistant") {
322
- // Process content blocks in order
323
- const output = [];
324
- for (const block of msg.content) {
325
- // Do not submit thinking blocks if the completion had an error (i.e. abort)
326
- if (block.type === "thinking" && msg.stopReason !== "error") {
327
- // Push the full reasoning item(s) from signature
328
- if (block.thinkingSignature) {
329
- const reasoningItem = JSON.parse(block.thinkingSignature);
330
- output.push(reasoningItem);
331
- }
265
+ }
266
+ }
267
+ return params;
268
+ }
269
+ function convertMessages(model, context) {
270
+ const messages = [];
271
+ const transformedMessages = transformMessages(context.messages, model);
272
+ if (context.systemPrompt) {
273
+ const role = model.reasoning ? "developer" : "system";
274
+ messages.push({
275
+ role,
276
+ content: context.systemPrompt,
277
+ });
278
+ }
279
+ for (const msg of transformedMessages) {
280
+ if (msg.role === "user") {
281
+ if (typeof msg.content === "string") {
282
+ messages.push({
283
+ role: "user",
284
+ content: [{ type: "input_text", text: msg.content }],
285
+ });
286
+ }
287
+ else {
288
+ const content = msg.content.map((item) => {
289
+ if (item.type === "text") {
290
+ return {
291
+ type: "input_text",
292
+ text: item.text,
293
+ };
332
294
  }
333
- else if (block.type === "text") {
334
- const textBlock = block;
335
- output.push({
336
- type: "message",
337
- role: "assistant",
338
- content: [{ type: "output_text", text: textBlock.text, annotations: [] }],
339
- status: "completed",
340
- id: textBlock.textSignature || "msg_" + Math.random().toString(36).substring(2, 15),
341
- });
342
- // Do not submit thinking blocks if the completion had an error (i.e. abort)
295
+ else {
296
+ return {
297
+ type: "input_image",
298
+ detail: "auto",
299
+ image_url: `data:${item.mimeType};base64,${item.data}`,
300
+ };
343
301
  }
344
- else if (block.type === "toolCall" && msg.stopReason !== "error") {
345
- const toolCall = block;
346
- output.push({
347
- type: "function_call",
348
- id: toolCall.id.split("|")[1], // Extract original ID
349
- call_id: toolCall.id.split("|")[0], // Extract call session ID
350
- name: toolCall.name,
351
- arguments: JSON.stringify(toolCall.arguments),
352
- });
302
+ });
303
+ const filteredContent = !model.input.includes("image")
304
+ ? content.filter((c) => c.type !== "input_image")
305
+ : content;
306
+ if (filteredContent.length === 0)
307
+ continue;
308
+ messages.push({
309
+ role: "user",
310
+ content: filteredContent,
311
+ });
312
+ }
313
+ }
314
+ else if (msg.role === "assistant") {
315
+ const output = [];
316
+ for (const block of msg.content) {
317
+ // Do not submit thinking blocks if the completion had an error (i.e. abort)
318
+ if (block.type === "thinking" && msg.stopReason !== "error") {
319
+ if (block.thinkingSignature) {
320
+ const reasoningItem = JSON.parse(block.thinkingSignature);
321
+ output.push(reasoningItem);
353
322
  }
354
323
  }
355
- // Add all output items to input
356
- input.push(...output);
357
- }
358
- else if (msg.role === "toolResult") {
359
- // Tool results are sent as function_call_output
360
- input.push({
361
- type: "function_call_output",
362
- call_id: msg.toolCallId.split("|")[0], // Extract call session ID
363
- output: msg.content,
364
- });
324
+ else if (block.type === "text") {
325
+ const textBlock = block;
326
+ output.push({
327
+ type: "message",
328
+ role: "assistant",
329
+ content: [{ type: "output_text", text: textBlock.text, annotations: [] }],
330
+ status: "completed",
331
+ id: textBlock.textSignature || "msg_" + Math.random().toString(36).substring(2, 15),
332
+ });
333
+ // Do not submit toolcall blocks if the completion had an error (i.e. abort)
334
+ }
335
+ else if (block.type === "toolCall" && msg.stopReason !== "error") {
336
+ const toolCall = block;
337
+ output.push({
338
+ type: "function_call",
339
+ id: toolCall.id.split("|")[1],
340
+ call_id: toolCall.id.split("|")[0],
341
+ name: toolCall.name,
342
+ arguments: JSON.stringify(toolCall.arguments),
343
+ });
344
+ }
365
345
  }
346
+ if (output.length === 0)
347
+ continue;
348
+ messages.push(...output);
349
+ }
350
+ else if (msg.role === "toolResult") {
351
+ messages.push({
352
+ type: "function_call_output",
353
+ call_id: msg.toolCallId.split("|")[0],
354
+ output: msg.content,
355
+ });
366
356
  }
367
- return input;
368
- }
369
- convertTools(tools) {
370
- return tools.map((tool) => ({
371
- type: "function",
372
- name: tool.name,
373
- description: tool.description,
374
- parameters: tool.parameters,
375
- strict: null,
376
- }));
377
357
  }
378
- mapStopReason(status) {
379
- switch (status) {
380
- case "completed":
381
- return "stop";
382
- case "incomplete":
383
- return "length";
384
- case "failed":
385
- case "cancelled":
386
- return "error";
387
- default:
388
- return "stop";
358
+ return messages;
359
+ }
360
+ function convertTools(tools) {
361
+ return tools.map((tool) => ({
362
+ type: "function",
363
+ name: tool.name,
364
+ description: tool.description,
365
+ parameters: tool.parameters,
366
+ strict: null,
367
+ }));
368
+ }
369
+ function mapStopReason(status) {
370
+ if (!status)
371
+ return "stop";
372
+ switch (status) {
373
+ case "completed":
374
+ return "stop";
375
+ case "incomplete":
376
+ return "length";
377
+ case "failed":
378
+ case "cancelled":
379
+ return "error";
380
+ // These two are wonky ...
381
+ case "in_progress":
382
+ case "queued":
383
+ return "stop";
384
+ default: {
385
+ const _exhaustive = status;
386
+ throw new Error(`Unhandled stop reason: ${_exhaustive}`);
389
387
  }
390
388
  }
391
389
  }