@proteinjs/conversation 2.5.0 → 2.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/CHANGELOG.md +29 -0
  2. package/dist/index.d.ts +2 -1
  3. package/dist/index.d.ts.map +1 -1
  4. package/dist/index.js +2 -0
  5. package/dist/index.js.map +1 -1
  6. package/dist/src/Conversation.d.ts.map +1 -1
  7. package/dist/src/Conversation.js +12 -16
  8. package/dist/src/Conversation.js.map +1 -1
  9. package/dist/src/OpenAi.js +3 -3
  10. package/dist/src/OpenAi.js.map +1 -1
  11. package/dist/src/OpenAiResponses.d.ts +158 -0
  12. package/dist/src/OpenAiResponses.d.ts.map +1 -0
  13. package/dist/src/OpenAiResponses.js +1621 -0
  14. package/dist/src/OpenAiResponses.js.map +1 -0
  15. package/dist/src/OpenAiStreamProcessor.js +4 -4
  16. package/dist/src/OpenAiStreamProcessor.js.map +1 -1
  17. package/dist/src/UsageData.d.ts +39 -4
  18. package/dist/src/UsageData.d.ts.map +1 -1
  19. package/dist/src/UsageData.js +302 -11
  20. package/dist/src/UsageData.js.map +1 -1
  21. package/dist/src/fs/conversation_fs/ConversationFsModule.d.ts.map +1 -1
  22. package/dist/src/fs/conversation_fs/ConversationFsModule.js +1 -0
  23. package/dist/src/fs/conversation_fs/ConversationFsModule.js.map +1 -1
  24. package/dist/src/fs/conversation_fs/FsFunctions.d.ts +26 -0
  25. package/dist/src/fs/conversation_fs/FsFunctions.d.ts.map +1 -1
  26. package/dist/src/fs/conversation_fs/FsFunctions.js +68 -27
  27. package/dist/src/fs/conversation_fs/FsFunctions.js.map +1 -1
  28. package/index.ts +2 -1
  29. package/package.json +4 -4
  30. package/src/Conversation.ts +14 -17
  31. package/src/OpenAi.ts +3 -3
  32. package/src/OpenAiResponses.ts +1869 -0
  33. package/src/OpenAiStreamProcessor.ts +3 -3
  34. package/src/UsageData.ts +376 -13
  35. package/src/fs/conversation_fs/ConversationFsModule.ts +2 -0
  36. package/src/fs/conversation_fs/FsFunctions.ts +32 -2
@@ -0,0 +1,1869 @@
1
+ import { OpenAI as OpenAIApi } from 'openai';
2
+ import type { ChatCompletionMessageParam } from 'openai/resources/chat';
3
+ import { Logger, LogLevel } from '@proteinjs/logger';
4
+ import type { ConversationModule } from './ConversationModule';
5
+ import type { Function } from './Function';
6
+ import { UsageData, UsageDataAccumulator } from './UsageData';
7
+ import { ChatCompletionMessageParamFactory } from './ChatCompletionMessageParamFactory';
8
+ import type { GenerateResponseReturn, ToolInvocationProgressEvent, ToolInvocationResult } from './OpenAi';
9
+ import { TiktokenModel } from 'tiktoken';
10
+
11
+ export const DEFAULT_RESPONSES_MODEL = 'gpt-5.2' as TiktokenModel;
12
+ export const DEFAULT_MAX_TOOL_CALLS = 50;
13
+
14
+ /** Default hard cap for background-mode polling duration (ms): 1 hour. */
15
+ export const DEFAULT_MAX_BACKGROUND_WAIT_MS = 60 * 60 * 1000;
16
+
17
+ /** Best-effort timeout for cancel calls (avoid hanging abort/timeout paths). */
18
+ const DEFAULT_CANCEL_TIMEOUT_MS = 10_000;
19
+
20
+ /**
21
+ * Responses API service tier.
22
+ * See: Responses API `service_tier` request param and response field.
23
+ */
24
+ export type OpenAiServiceTier = 'auto' | 'default' | 'flex' | 'priority' | (string & {});
25
+
26
+ export type OpenAiResponsesParams = {
27
+ modules?: ConversationModule[];
28
+ /** If provided, only these functions will be exposed to the model. */
29
+ allowedFunctionNames?: string[];
30
+ logLevel?: LogLevel;
31
+
32
+ /** Default model when none is provided per call. */
33
+ defaultModel?: TiktokenModel;
34
+
35
+ /** Default cap for tool calls (per call). */
36
+ maxToolCalls?: number;
37
+
38
+ /** Default hard cap for background-mode polling duration (ms). Default: 1 hour. */
39
+ maxBackgroundWaitMs?: number;
40
+ };
41
+
42
+ export type GenerateTextParams = {
43
+ messages: (string | ChatCompletionMessageParam)[];
44
+ model?: TiktokenModel;
45
+
46
+ abortSignal?: AbortSignal;
47
+
48
+ /** Hard cap for background-mode polling duration (ms). Default: 1 hour. */
49
+ maxBackgroundWaitMs?: number;
50
+
51
+ /** Sampling & limits */
52
+ temperature?: number;
53
+ topP?: number;
54
+ maxTokens?: number;
55
+
56
+ /** Optional realtime hook for tool-call lifecycle (started/finished). */
57
+ onToolInvocation?: (evt: ToolInvocationProgressEvent) => void;
58
+
59
+ /** Usage callback */
60
+ onUsageData?: (usageData: UsageData) => Promise<void>;
61
+
62
+ /** Per-call override for reasoning effort (reasoning models only). */
63
+ reasoningEffort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort;
64
+
65
+ /** Hard cap for custom function tool calls executed by this wrapper. */
66
+ maxToolCalls?: number;
67
+
68
+ /** If true, run using Responses API background mode (polling). */
69
+ backgroundMode?: boolean;
70
+
71
+ /** Optional Responses API service tier override (per-request). */
72
+ serviceTier?: OpenAiServiceTier;
73
+ };
74
+
75
+ export type ResponsesGenerateObjectParams<S> = {
76
+ messages: (string | ChatCompletionMessageParam)[];
77
+ model?: TiktokenModel;
78
+
79
+ abortSignal?: AbortSignal;
80
+
81
+ /** Hard cap for background-mode polling duration (ms). Default: 1 hour. */
82
+ maxBackgroundWaitMs?: number;
83
+
84
+ /** Zod schema or JSON Schema */
85
+ schema: S;
86
+
87
+ /** Sampling & limits */
88
+ temperature?: number;
89
+ topP?: number;
90
+ maxTokens?: number;
91
+
92
+ /** Optional realtime hook for tool-call lifecycle (started/finished). */
93
+ onToolInvocation?: (evt: ToolInvocationProgressEvent) => void;
94
+
95
+ /** Usage callback */
96
+ onUsageData?: (usageData: UsageData) => Promise<void>;
97
+
98
+ /** Per-call override for reasoning effort (reasoning models only). */
99
+ reasoningEffort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort;
100
+
101
+ /** Hard cap for custom function tool calls executed by this wrapper. */
102
+ maxToolCalls?: number;
103
+
104
+ /** If true, run using Responses API background mode (polling). */
105
+ backgroundMode?: boolean;
106
+
107
+ /** Optional Responses API service tier override (per-request). */
108
+ serviceTier?: OpenAiServiceTier;
109
+ };
110
+
111
+ /**
112
+ * OpenAI Responses API wrapper (tool-loop + usage tracking + ConversationModules).
113
+ * - Uses Responses API directly
114
+ * - Supports custom function tools (tool calling loop)
115
+ * - Supports structured outputs (JSON schema / Zod)
116
+ * - Tracks usage + tool calls using existing types
117
+ * - Supports background mode (polling)
118
+ * - Supports ConversationModules (system messages + tool registration)
119
+ */
120
+ export class OpenAiResponses {
121
+ private readonly client: OpenAIApi;
122
+ private readonly logger: Logger;
123
+
124
+ private readonly modules: ConversationModule[];
125
+ private readonly allowedFunctionNames?: string[];
126
+ private readonly defaultModel: TiktokenModel;
127
+ private readonly defaultMaxToolCalls: number;
128
+ private readonly defaultMaxBackgroundWaitMs: number;
129
+
130
+ private modulesProcessed = false;
131
+ private processingModulesPromise: Promise<void> | null = null;
132
+
133
+ private systemMessages: string[] = [];
134
+ private functions: Function[] = [];
135
+
136
+ constructor(opts: OpenAiResponsesParams = {}) {
137
+ this.client = new OpenAIApi();
138
+ this.logger = new Logger({ name: 'OpenAiResponses', logLevel: opts.logLevel });
139
+
140
+ this.modules = opts.modules ?? [];
141
+ this.allowedFunctionNames = opts.allowedFunctionNames;
142
+
143
+ this.defaultModel = opts.defaultModel ?? DEFAULT_RESPONSES_MODEL;
144
+ this.defaultMaxToolCalls = typeof opts.maxToolCalls === 'number' ? opts.maxToolCalls : DEFAULT_MAX_TOOL_CALLS;
145
+
146
+ this.defaultMaxBackgroundWaitMs =
147
+ typeof opts.maxBackgroundWaitMs === 'number' &&
148
+ Number.isFinite(opts.maxBackgroundWaitMs) &&
149
+ opts.maxBackgroundWaitMs > 0
150
+ ? Math.floor(opts.maxBackgroundWaitMs)
151
+ : DEFAULT_MAX_BACKGROUND_WAIT_MS;
152
+ }
153
+
154
+ /** Plain text generation (supports tool calling). */
155
+ async generateText(args: GenerateTextParams): Promise<GenerateResponseReturn> {
156
+ await this.ensureModulesProcessed();
157
+
158
+ const model = this.resolveModel(args.model);
159
+ const backgroundMode = this.resolveBackgroundMode({
160
+ requested: args.backgroundMode,
161
+ model,
162
+ reasoningEffort: args.reasoningEffort,
163
+ });
164
+
165
+ const maxToolCalls = typeof args.maxToolCalls === 'number' ? args.maxToolCalls : this.defaultMaxToolCalls;
166
+ const maxBackgroundWaitMs = this.resolveMaxBackgroundWaitMs(args.maxBackgroundWaitMs);
167
+
168
+ const result = await this.run({
169
+ model,
170
+ messages: args.messages,
171
+ temperature: args.temperature,
172
+ topP: args.topP,
173
+ maxTokens: args.maxTokens,
174
+ abortSignal: args.abortSignal,
175
+ onToolInvocation: args.onToolInvocation,
176
+ reasoningEffort: args.reasoningEffort,
177
+ maxToolCalls,
178
+ backgroundMode,
179
+ maxBackgroundWaitMs,
180
+ textFormat: undefined,
181
+ serviceTier: args.serviceTier,
182
+ });
183
+
184
+ if (args.onUsageData) {
185
+ await args.onUsageData(result.usagedata);
186
+ }
187
+
188
+ return result;
189
+ }
190
+
191
+ /** Back-compat alias for callers that use `generateResponse`. */
192
+ async generateResponse(args: GenerateTextParams): Promise<GenerateResponseReturn> {
193
+ return this.generateText(args);
194
+ }
195
+
196
+ /** Structured object generation (supports tool calling). */
197
+ async generateObject<T>(args: ResponsesGenerateObjectParams<unknown>): Promise<{ object: T; usageData: UsageData }> {
198
+ await this.ensureModulesProcessed();
199
+
200
+ const model = this.resolveModel(args.model);
201
+ const backgroundMode = this.resolveBackgroundMode({
202
+ requested: args.backgroundMode,
203
+ model,
204
+ reasoningEffort: args.reasoningEffort,
205
+ });
206
+
207
+ const maxToolCalls = typeof args.maxToolCalls === 'number' ? args.maxToolCalls : this.defaultMaxToolCalls;
208
+ const maxBackgroundWaitMs = this.resolveMaxBackgroundWaitMs(args.maxBackgroundWaitMs);
209
+ const textFormat = this.buildTextFormat(args.schema);
210
+
211
+ const result = await this.run({
212
+ model,
213
+ messages: args.messages,
214
+ temperature: args.temperature,
215
+ topP: args.topP,
216
+ maxTokens: args.maxTokens,
217
+ abortSignal: args.abortSignal,
218
+ onToolInvocation: args.onToolInvocation,
219
+ reasoningEffort: args.reasoningEffort,
220
+ maxToolCalls,
221
+ backgroundMode,
222
+ maxBackgroundWaitMs,
223
+ textFormat,
224
+ serviceTier: args.serviceTier,
225
+ });
226
+
227
+ const object = this.parseAndValidateStructuredOutput<T>(result.message, args.schema, {
228
+ model,
229
+ maxOutputTokens: args.maxTokens,
230
+ requestedServiceTier: args.serviceTier,
231
+ serviceTier: result.serviceTier,
232
+ });
233
+
234
+ const outcome = {
235
+ object,
236
+ usageData: result.usagedata,
237
+ };
238
+
239
+ if (args.onUsageData) {
240
+ await args.onUsageData(outcome.usageData);
241
+ }
242
+
243
+ return outcome;
244
+ }
245
+
246
+ // -----------------------------------------
247
+ // Core runner (tool loop)
248
+ // -----------------------------------------
249
+
250
+ private async run(args: {
251
+ model: TiktokenModel;
252
+ messages: (string | ChatCompletionMessageParam)[];
253
+
254
+ temperature?: number;
255
+ topP?: number;
256
+ maxTokens?: number;
257
+
258
+ abortSignal?: AbortSignal;
259
+ onToolInvocation?: (evt: ToolInvocationProgressEvent) => void;
260
+
261
+ reasoningEffort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort;
262
+
263
+ maxToolCalls: number;
264
+ backgroundMode: boolean;
265
+ maxBackgroundWaitMs: number;
266
+
267
+ textFormat?: unknown;
268
+
269
+ serviceTier?: OpenAiServiceTier;
270
+ }): Promise<GenerateResponseReturn & { serviceTier?: OpenAiServiceTier }> {
271
+ // UsageDataAccumulator is typed around TiktokenModel; keep accumulator model stable,
272
+ // and (optionally) report the actual model via upstream telemetry if you later choose to.
273
+ const usage = new UsageDataAccumulator({ model: args.model });
274
+ const toolInvocations: ToolInvocationResult[] = [];
275
+
276
+ const tools = this.buildResponseTools(this.functions);
277
+
278
+ const { instructions, input } = this.buildInstructionsAndInput(args.messages);
279
+
280
+ let toolCallsExecuted = 0;
281
+ let previousResponseId: string | undefined;
282
+ let nextInput: unknown = input;
283
+
284
+ for (;;) {
285
+ const response = await this.createResponseAndMaybeWait({
286
+ model: args.model,
287
+ // Always pass instructions; they are not carried over with previous_response_id.
288
+ instructions,
289
+ input: nextInput,
290
+ previousResponseId,
291
+ tools,
292
+ temperature: args.temperature,
293
+ topP: args.topP,
294
+ maxTokens: args.maxTokens,
295
+ reasoningEffort: args.reasoningEffort,
296
+ textFormat: args.textFormat,
297
+ backgroundMode: args.backgroundMode,
298
+ maxBackgroundWaitMs: args.maxBackgroundWaitMs,
299
+ abortSignal: args.abortSignal,
300
+ serviceTier: args.serviceTier,
301
+ });
302
+
303
+ this.addUsageFromResponse(response, usage, { requestedServiceTier: args.serviceTier });
304
+
305
+ // For structured outputs we should not attempt to parse incomplete/failed/cancelled responses.
306
+ // For plain-text generation, we allow "incomplete" to pass through (partial output),
307
+ // but still fail on other non-completed statuses.
308
+ this.throwIfResponseUnusable(response as any, {
309
+ allowIncomplete: !args.textFormat,
310
+ model: args.model,
311
+ maxOutputTokens: args.maxTokens,
312
+ requestedServiceTier: args.serviceTier,
313
+ });
314
+
315
+ const functionCalls = this.extractFunctionCalls(response);
316
+ if (functionCalls.length < 1) {
317
+ const message = this.extractAssistantText(response);
318
+ if (!message) {
319
+ throw new Error(`Response was empty`);
320
+ }
321
+ return {
322
+ message,
323
+ usagedata: usage.usageData,
324
+ toolInvocations,
325
+ serviceTier: response.service_tier ? response.service_tier : undefined,
326
+ };
327
+ }
328
+
329
+ if (toolCallsExecuted + functionCalls.length > args.maxToolCalls) {
330
+ throw new Error(`Max tool calls (${args.maxToolCalls}) reached. Stopping execution.`);
331
+ }
332
+
333
+ if (!response.id) {
334
+ throw new Error(`Responses API did not return an id for a tool-calling response.`);
335
+ }
336
+
337
+ const toolOutputs = await this.executeFunctionCalls({
338
+ calls: functionCalls,
339
+ functions: this.functions,
340
+ usage,
341
+ toolInvocations,
342
+ onToolInvocation: args.onToolInvocation,
343
+ });
344
+
345
+ toolCallsExecuted += functionCalls.length;
346
+
347
+ previousResponseId = response.id;
348
+ nextInput = toolOutputs;
349
+
350
+ this.logger.debug({
351
+ message: `Tool loop continuing`,
352
+ obj: { toolCallsExecuted, lastToolCallCount: functionCalls.length, responseId: previousResponseId },
353
+ });
354
+ }
355
+ }
356
+
357
+ private throwIfResponseUnusable(
358
+ response: any,
359
+ opts: {
360
+ allowIncomplete: boolean;
361
+ model?: string;
362
+ maxOutputTokens?: number;
363
+ requestedServiceTier?: OpenAiServiceTier;
364
+ }
365
+ ): void {
366
+ const statusRaw = typeof response?.status === 'string' ? String(response.status) : '';
367
+ const status = statusRaw.toLowerCase();
368
+
369
+ if (!status || status === 'completed') {
370
+ return;
371
+ }
372
+
373
+ if (status === 'incomplete' && opts.allowIncomplete) {
374
+ return;
375
+ }
376
+
377
+ const id = typeof response?.id === 'string' ? response.id : '';
378
+ const reason = response?.incomplete_details?.reason;
379
+ const apiErr = response?.error;
380
+
381
+ const serviceTier =
382
+ typeof response?.service_tier === 'string' && response.service_tier.trim() ? response.service_tier.trim() : '';
383
+
384
+ const directOutputText = typeof response?.output_text === 'string' ? response.output_text : '';
385
+ const assistantText = this.extractAssistantText(response as any);
386
+
387
+ const outTextLen = directOutputText ? directOutputText.length : 0;
388
+ const assistantLen = assistantText ? assistantText.length : 0;
389
+
390
+ const usage = response?.usage;
391
+ const inputTokens = typeof usage?.input_tokens === 'number' ? usage.input_tokens : undefined;
392
+ const outputTokens = typeof usage?.output_tokens === 'number' ? usage.output_tokens : undefined;
393
+ const totalTokens =
394
+ typeof usage?.total_tokens === 'number'
395
+ ? usage.total_tokens
396
+ : typeof inputTokens === 'number' && typeof outputTokens === 'number'
397
+ ? inputTokens + outputTokens
398
+ : undefined;
399
+
400
+ let msg = `Responses API returned status="${status}"`;
401
+ if (id) {
402
+ msg += ` (id=${id})`;
403
+ }
404
+ msg += `.`;
405
+
406
+ const details: Record<string, unknown> = {
407
+ response_id: id || undefined,
408
+ status,
409
+ model: typeof opts.model === 'string' && opts.model.trim() ? opts.model : undefined,
410
+ max_output_tokens: typeof opts.maxOutputTokens === 'number' ? opts.maxOutputTokens : undefined,
411
+
412
+ requested_service_tier:
413
+ typeof opts.requestedServiceTier === 'string' && opts.requestedServiceTier.trim()
414
+ ? opts.requestedServiceTier.trim()
415
+ : undefined,
416
+ service_tier: serviceTier || undefined,
417
+
418
+ incomplete_reason: typeof reason === 'string' && reason.trim() ? reason : undefined,
419
+ api_error: apiErr ?? undefined,
420
+
421
+ usage_input_tokens: inputTokens,
422
+ usage_output_tokens: outputTokens,
423
+ usage_total_tokens: totalTokens,
424
+
425
+ output_text_len: outTextLen || undefined,
426
+ output_text_tail: outTextLen > 0 ? truncateTail(directOutputText, 400) : undefined,
427
+
428
+ assistant_text_len: assistantLen || undefined,
429
+ assistant_text_tail: assistantLen > 0 ? truncateTail(assistantText, 400) : undefined,
430
+ };
431
+
432
+ const extra: string[] = [];
433
+ if (details.model) {
434
+ extra.push(`model=${details.model}`);
435
+ }
436
+ if (typeof details.max_output_tokens === 'number') {
437
+ extra.push(`max_output_tokens=${details.max_output_tokens}`);
438
+ }
439
+ if (typeof details.requested_service_tier === 'string') {
440
+ extra.push(`requested_service_tier=${details.requested_service_tier}`);
441
+ }
442
+ if (typeof details.service_tier === 'string') {
443
+ extra.push(`service_tier=${details.service_tier}`);
444
+ }
445
+ if (details.incomplete_reason) {
446
+ extra.push(`reason=${details.incomplete_reason}`);
447
+ }
448
+ if (typeof details.output_text_len === 'number') {
449
+ extra.push(`output_text_len=${details.output_text_len}`);
450
+ }
451
+ if (typeof details.assistant_text_len === 'number') {
452
+ extra.push(`assistant_text_len=${details.assistant_text_len}`);
453
+ }
454
+
455
+ if (extra.length > 0) {
456
+ msg += ` ${extra.join(' ')}.`;
457
+ }
458
+
459
+ throw new OpenAiResponsesError({
460
+ code: 'RESPONSE_STATUS',
461
+ message: msg,
462
+ details,
463
+ });
464
+ }
465
+
466
+ private toOpenAiApiError(
467
+ error: unknown,
468
+ meta: {
469
+ operation: 'responses.create' | 'responses.retrieve' | 'responses.cancel';
470
+ model?: string;
471
+ reasoningEffort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort;
472
+ backgroundMode?: boolean;
473
+ responseId?: string;
474
+ previousResponseId?: string;
475
+ pollAttempt?: number;
476
+ aborted?: boolean;
477
+ waitedMs?: number;
478
+ maxWaitMs?: number;
479
+ lastStatus?: string;
480
+ requestedServiceTier?: OpenAiServiceTier;
481
+ serviceTier?: string;
482
+ }
483
+ ): OpenAiResponsesError {
484
+ const status = extractHttpStatus(error);
485
+ const requestId = extractRequestId(error);
486
+ const retryable = isRetryableHttpStatus(status);
487
+
488
+ const errMsg = error instanceof Error ? error.message : String(error ?? '');
489
+ const errName = error instanceof Error ? error.name : undefined;
490
+
491
+ const aborted = meta.aborted === true || isAbortError(error);
492
+
493
+ let msg = `OpenAI ${meta.operation} failed.`;
494
+ const extra: string[] = [];
495
+
496
+ if (aborted) {
497
+ extra.push(`aborted=true`);
498
+ }
499
+ if (typeof status === 'number') {
500
+ extra.push(`status=${status}`);
501
+ }
502
+ if (requestId) {
503
+ extra.push(`requestId=${requestId}`);
504
+ }
505
+ if (meta.responseId) {
506
+ extra.push(`responseId=${meta.responseId}`);
507
+ }
508
+ if (meta.backgroundMode) {
509
+ extra.push(`background=true`);
510
+ }
511
+ if (typeof meta.pollAttempt === 'number') {
512
+ extra.push(`pollAttempt=${meta.pollAttempt}`);
513
+ }
514
+ if (typeof meta.waitedMs === 'number') {
515
+ extra.push(`waitedMs=${meta.waitedMs}`);
516
+ }
517
+ if (typeof meta.maxWaitMs === 'number') {
518
+ extra.push(`maxWaitMs=${meta.maxWaitMs}`);
519
+ }
520
+ if (typeof meta.lastStatus === 'string' && meta.lastStatus.trim()) {
521
+ extra.push(`lastStatus=${meta.lastStatus.trim()}`);
522
+ }
523
+ if (typeof meta.model === 'string' && meta.model.trim()) {
524
+ extra.push(`model=${meta.model.trim()}`);
525
+ }
526
+ if (meta.reasoningEffort) {
527
+ extra.push(`reasoningEffort=${meta.reasoningEffort}`);
528
+ }
529
+ if (typeof meta.requestedServiceTier === 'string' && meta.requestedServiceTier.trim()) {
530
+ extra.push(`requested_service_tier=${meta.requestedServiceTier.trim()}`);
531
+ }
532
+ if (typeof meta.serviceTier === 'string' && meta.serviceTier.trim()) {
533
+ extra.push(`service_tier=${meta.serviceTier.trim()}`);
534
+ }
535
+
536
+ if (extra.length > 0) {
537
+ msg += ` ${extra.join(' ')}.`;
538
+ }
539
+ if (errMsg) {
540
+ msg += ` error=${JSON.stringify(errMsg)}.`;
541
+ }
542
+
543
+ const details: Record<string, unknown> = {
544
+ operation: meta.operation,
545
+ status: typeof status === 'number' ? status : undefined,
546
+ request_id: requestId,
547
+ response_id: meta.responseId,
548
+ previous_response_id: meta.previousResponseId,
549
+ background: meta.backgroundMode ? true : undefined,
550
+ poll_attempt: meta.pollAttempt,
551
+ waited_ms: meta.waitedMs,
552
+ max_wait_ms: meta.maxWaitMs,
553
+ last_status: typeof meta.lastStatus === 'string' && meta.lastStatus.trim() ? meta.lastStatus.trim() : undefined,
554
+ model: typeof meta.model === 'string' && meta.model.trim() ? meta.model.trim() : undefined,
555
+ reasoning_effort: meta.reasoningEffort,
556
+ requested_service_tier:
557
+ typeof meta.requestedServiceTier === 'string' && meta.requestedServiceTier.trim()
558
+ ? meta.requestedServiceTier.trim()
559
+ : undefined,
560
+ service_tier:
561
+ typeof meta.serviceTier === 'string' && meta.serviceTier.trim() ? meta.serviceTier.trim() : undefined,
562
+ error_name: errName,
563
+ aborted: aborted ? true : undefined,
564
+ };
565
+
566
+ return new OpenAiResponsesError({
567
+ code: 'OPENAI_API',
568
+ message: msg,
569
+ details,
570
+ cause: error,
571
+ retryable,
572
+ });
573
+ }
574
+
575
+ private resolveMaxBackgroundWaitMs(ms?: number): number {
576
+ const n =
577
+ typeof ms === 'number' && Number.isFinite(ms) && ms > 0 ? Math.floor(ms) : this.defaultMaxBackgroundWaitMs;
578
+ // Ensure we never return a non-positive number even if misconfigured elsewhere.
579
+ return n > 0 ? n : DEFAULT_MAX_BACKGROUND_WAIT_MS;
580
+ }
581
+
582
+ private async cancelResponseBestEffort(
583
+ responseId: string
584
+ ): Promise<
585
+ | { attempted: false }
586
+ | { attempted: true; ok: true }
587
+ | { attempted: true; ok: false; error?: Record<string, unknown> }
588
+ > {
589
+ if (!responseId) {
590
+ return { attempted: false };
591
+ }
592
+
593
+ try {
594
+ const resp = await this.client.responses.cancel(responseId);
595
+
596
+ // Docs show cancelled as the post-cancel status.
597
+ if (resp?.status === 'cancelled') {
598
+ return { attempted: true, ok: true };
599
+ }
600
+
601
+ return {
602
+ attempted: true,
603
+ ok: false,
604
+ error: {
605
+ message: 'Cancel did not return status=cancelled',
606
+ status: resp?.status,
607
+ },
608
+ };
609
+ } catch (e: unknown) {
610
+ return { attempted: true, ok: false, error: safeErrorSummary(e) };
611
+ }
612
+ }
613
+
614
+ private async createResponseAndMaybeWait(args: {
615
+ model: string;
616
+ instructions?: string;
617
+ input: unknown;
618
+ previousResponseId?: string;
619
+
620
+ tools: Array<{ type: 'function'; name: string; description?: string; parameters?: unknown; strict?: boolean }>;
621
+ temperature?: number;
622
+ topP?: number;
623
+ maxTokens?: number;
624
+ reasoningEffort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort;
625
+
626
+ textFormat?: unknown;
627
+
628
+ backgroundMode: boolean;
629
+ maxBackgroundWaitMs: number;
630
+ abortSignal?: AbortSignal;
631
+
632
+ serviceTier?: OpenAiServiceTier;
633
+ }): Promise<OpenAIApi.Responses.Response> {
634
+ const body: Record<string, unknown> = {
635
+ model: args.model,
636
+ input: args.input,
637
+ };
638
+
639
+ if (args.instructions) {
640
+ body.instructions = args.instructions;
641
+ }
642
+
643
+ if (args.previousResponseId) {
644
+ body.previous_response_id = args.previousResponseId;
645
+ }
646
+
647
+ if (args.tools.length > 0) {
648
+ body.tools = args.tools;
649
+ }
650
+
651
+ if (typeof args.temperature === 'number') {
652
+ body.temperature = args.temperature;
653
+ }
654
+ if (typeof args.topP === 'number') {
655
+ body.top_p = args.topP;
656
+ }
657
+ if (typeof args.maxTokens === 'number') {
658
+ body.max_output_tokens = args.maxTokens;
659
+ }
660
+ if (args.reasoningEffort) {
661
+ body.reasoning = { effort: args.reasoningEffort };
662
+ }
663
+ if (args.textFormat) {
664
+ body.text = { format: args.textFormat };
665
+ }
666
+
667
+ if (typeof args.serviceTier === 'string' && args.serviceTier.trim()) {
668
+ body.service_tier = args.serviceTier.trim();
669
+ }
670
+
671
+ if (args.backgroundMode) {
672
+ body.background = true;
673
+ body.store = true;
674
+ }
675
+
676
+ let created: OpenAIApi.Responses.Response;
677
+ try {
678
+ created = await this.client.responses.create(
679
+ body as never,
680
+ args.abortSignal ? { signal: args.abortSignal } : undefined
681
+ );
682
+ } catch (error: unknown) {
683
+ throw this.toOpenAiApiError(error, {
684
+ operation: 'responses.create',
685
+ model: args.model,
686
+ reasoningEffort: args.reasoningEffort,
687
+ backgroundMode: args.backgroundMode,
688
+ previousResponseId: args.previousResponseId,
689
+ aborted: args.abortSignal?.aborted ? true : undefined,
690
+ requestedServiceTier: args.serviceTier,
691
+ });
692
+ }
693
+
694
+ if (!args.backgroundMode) {
695
+ return created;
696
+ }
697
+
698
+ if (!created?.id) {
699
+ return created;
700
+ }
701
+
702
+ return await this.waitForCompletion(created.id, args.abortSignal, {
703
+ model: args.model,
704
+ reasoningEffort: args.reasoningEffort,
705
+ maxWaitMs: this.resolveMaxBackgroundWaitMs(args.maxBackgroundWaitMs),
706
+ requestedServiceTier: args.serviceTier,
707
+ });
708
+ }
709
+
710
+ private async waitForCompletion(
711
+ responseId: string,
712
+ abortSignal?: AbortSignal,
713
+ ctx?: {
714
+ model?: string;
715
+ reasoningEffort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort;
716
+ maxWaitMs?: number;
717
+ requestedServiceTier?: OpenAiServiceTier;
718
+ }
719
+ ): Promise<OpenAIApi.Responses.Response> {
720
+ this.logger.debug({ message: 'Waiting for completion', obj: { responseId } });
721
+ const maxWaitMs = this.resolveMaxBackgroundWaitMs(ctx?.maxWaitMs);
722
+
723
+ const startedAtMs = Date.now();
724
+
725
+ const delayMs = 1000;
726
+ let pollAttempt = 0;
727
+
728
+ let lastStatus = '';
729
+ let cancelAttempted = false;
730
+
731
+ const warnEveryMs = 10 * 60 * 1000;
732
+ let nextWarnAtMs = warnEveryMs;
733
+
734
+ const throwPollingStop = async (args: { kind: 'aborted' | 'timeout'; cause?: unknown }): Promise<never> => {
735
+ const waitedMs = Date.now() - startedAtMs;
736
+
737
+ // Best-effort cancellation to stop server-side work when we're done waiting.
738
+ let cancel: Awaited<ReturnType<OpenAiResponses['cancelResponseBestEffort']>> | undefined = undefined;
739
+ if (!cancelAttempted) {
740
+ cancelAttempted = true;
741
+ cancel = await this.cancelResponseBestEffort(responseId);
742
+ }
743
+
744
+ const baseDetails: Record<string, unknown> = {
745
+ operation: 'responses.retrieve',
746
+ response_id: responseId,
747
+ background: true,
748
+ poll_attempt: pollAttempt,
749
+ waited_ms: waitedMs,
750
+ max_wait_ms: maxWaitMs,
751
+ last_status: lastStatus || undefined,
752
+ model: typeof ctx?.model === 'string' && ctx.model.trim() ? ctx.model.trim() : undefined,
753
+ reasoning_effort: ctx?.reasoningEffort,
754
+ requested_service_tier:
755
+ typeof ctx?.requestedServiceTier === 'string' && ctx.requestedServiceTier.trim()
756
+ ? ctx.requestedServiceTier.trim()
757
+ : undefined,
758
+ aborted: args.kind === 'aborted' ? true : undefined,
759
+ timeout: args.kind === 'timeout' ? true : undefined,
760
+ cancel_attempted: cancel?.attempted ? true : undefined,
761
+ cancel_ok: cancel && cancel.attempted && 'ok' in cancel ? (cancel as any).ok : undefined,
762
+ cancel_timed_out: cancel && cancel.attempted && (cancel as any).timedOut ? true : undefined,
763
+ cancel_error: cancel && cancel.attempted && (cancel as any).error ? (cancel as any).error : undefined,
764
+ };
765
+
766
+ if (args.cause) {
767
+ baseDetails.polling_cause = safeErrorSummary(args.cause);
768
+ }
769
+
770
+ const msg =
771
+ args.kind === 'timeout'
772
+ ? `Background response exceeded max wait (maxWaitMs=${maxWaitMs}) while polling (id=${responseId}).`
773
+ : `Background polling aborted (id=${responseId}).`;
774
+
775
+ throw new OpenAiResponsesError({
776
+ code: 'OPENAI_API',
777
+ message: msg,
778
+ details: baseDetails,
779
+ cause: args.cause,
780
+ });
781
+ };
782
+
783
+ for (;;) {
784
+ const waitedMs = Date.now() - startedAtMs;
785
+
786
+ // Abort wins immediately.
787
+ if (abortSignal?.aborted) {
788
+ await throwPollingStop({ kind: 'aborted' });
789
+ }
790
+
791
+ // Max wait cap (1h default) to prevent runaway polling.
792
+ if (waitedMs >= maxWaitMs) {
793
+ await throwPollingStop({ kind: 'timeout' });
794
+ }
795
+
796
+ // Warn every 10 minutes elapsed (best-effort; may log slightly after the boundary).
797
+ if (waitedMs >= nextWarnAtMs) {
798
+ nextWarnAtMs += warnEveryMs;
799
+
800
+ this.logger.warn({
801
+ message: `Background polling still in progress`,
802
+ obj: {
803
+ responseId,
804
+ status: lastStatus || undefined,
805
+ waitedMs,
806
+ pollAttempt,
807
+ model: typeof ctx?.model === 'string' && ctx.model.trim() ? ctx.model.trim() : undefined,
808
+ reasoningEffort: ctx?.reasoningEffort,
809
+ serviceTier:
810
+ typeof ctx?.requestedServiceTier === 'string' && ctx.requestedServiceTier.trim()
811
+ ? ctx.requestedServiceTier.trim()
812
+ : undefined,
813
+ },
814
+ });
815
+ }
816
+
817
+ pollAttempt += 1;
818
+
819
+ let resp: OpenAIApi.Responses.Response;
820
+ try {
821
+ resp = await this.client.responses.retrieve(
822
+ responseId,
823
+ undefined,
824
+ abortSignal ? { signal: abortSignal } : undefined
825
+ );
826
+ } catch (error: unknown) {
827
+ // If the request was aborted mid-flight, treat it as an abort and still attempt cancellation.
828
+ if (abortSignal?.aborted || isAbortError(error)) {
829
+ await throwPollingStop({ kind: 'aborted', cause: error });
830
+ }
831
+
832
+ throw this.toOpenAiApiError(error, {
833
+ operation: 'responses.retrieve',
834
+ model: ctx?.model,
835
+ reasoningEffort: ctx?.reasoningEffort,
836
+ backgroundMode: true,
837
+ responseId,
838
+ pollAttempt,
839
+ waitedMs,
840
+ maxWaitMs,
841
+ lastStatus,
842
+ requestedServiceTier: ctx?.requestedServiceTier,
843
+ });
844
+ }
845
+
846
+ const status = typeof resp?.status === 'string' ? resp.status : '';
847
+ lastStatus = status;
848
+
849
+ // Terminal states
850
+ if (status === 'completed' || status === 'failed' || status === 'incomplete' || status === 'cancelled') {
851
+ return resp;
852
+ }
853
+
854
+ this.logger.debug({ message: `Polling response`, obj: { responseId, status, delayMs, pollAttempt, waitedMs } });
855
+
856
+ // Sleep but wake early if aborted, so abort latency is low.
857
+ await sleepWithAbort(delayMs, abortSignal);
858
+ }
859
+ }
860
+
861
+ // -----------------------------------------
862
+ // Tool calls
863
+ // -----------------------------------------
864
+
865
+ private buildResponseTools(
866
+ functions: Function[]
867
+ ): Array<{ type: 'function'; name: string; description?: string; parameters?: unknown; strict?: boolean }> {
868
+ const tools: Array<{
869
+ type: 'function';
870
+ name: string;
871
+ description?: string;
872
+ parameters?: unknown;
873
+ strict?: boolean;
874
+ }> = [];
875
+
876
+ if (!functions || functions.length < 1) {
877
+ return tools;
878
+ }
879
+
880
+ for (const f of functions) {
881
+ const def = f.definition;
882
+ if (!def?.name) {
883
+ continue;
884
+ }
885
+
886
+ tools.push({
887
+ type: 'function',
888
+ name: def.name,
889
+ description: def.description,
890
+ parameters: def.parameters,
891
+ // strict: true,
892
+ });
893
+ }
894
+
895
+ return tools;
896
+ }
897
+
898
+ private extractFunctionCalls(response: { output?: unknown[] }): Array<{
899
+ type: 'function_call';
900
+ call_id: string;
901
+ name: string;
902
+ arguments: string;
903
+ }> {
904
+ const out = Array.isArray(response.output) ? response.output : [];
905
+ const calls: Array<{ type: 'function_call'; call_id: string; name: string; arguments: string }> = [];
906
+
907
+ for (const item of out) {
908
+ if (!item || typeof item !== 'object') {
909
+ continue;
910
+ }
911
+ const rec = item as Record<string, unknown>;
912
+ if (rec.type !== 'function_call') {
913
+ continue;
914
+ }
915
+
916
+ const call_id = typeof rec.call_id === 'string' ? rec.call_id : '';
917
+ const name = typeof rec.name === 'string' ? rec.name : '';
918
+ const args = typeof rec.arguments === 'string' ? rec.arguments : '';
919
+
920
+ if (!call_id || !name) {
921
+ continue;
922
+ }
923
+
924
+ calls.push({ type: 'function_call', call_id, name, arguments: args });
925
+ }
926
+
927
+ return calls;
928
+ }
929
+
930
+ private async executeFunctionCalls(args: {
931
+ calls: Array<{ type: 'function_call'; call_id: string; name: string; arguments: string }>;
932
+ functions: Function[];
933
+ usage: UsageDataAccumulator;
934
+ toolInvocations: ToolInvocationResult[];
935
+ onToolInvocation?: (evt: ToolInvocationProgressEvent) => void;
936
+ }): Promise<Array<{ type: 'function_call_output'; call_id: string; output: string }>> {
937
+ const outputs: Array<{ type: 'function_call_output'; call_id: string; output: string }> = [];
938
+
939
+ for (const call of args.calls) {
940
+ outputs.push(
941
+ await this.executeFunctionCall({
942
+ call,
943
+ functions: args.functions,
944
+ usage: args.usage,
945
+ toolInvocations: args.toolInvocations,
946
+ onToolInvocation: args.onToolInvocation,
947
+ })
948
+ );
949
+ }
950
+
951
+ return outputs;
952
+ }
953
+
954
+ private async executeFunctionCall(args: {
955
+ call: { call_id: string; name: string; arguments: string };
956
+ functions: Function[];
957
+ usage: UsageDataAccumulator;
958
+ toolInvocations: ToolInvocationResult[];
959
+ onToolInvocation?: (evt: ToolInvocationProgressEvent) => void;
960
+ }): Promise<{ type: 'function_call_output'; call_id: string; output: string }> {
961
+ const callId = args.call.call_id;
962
+ const rawName = args.call.name;
963
+ const shortName = rawName.split('.').pop() ?? rawName;
964
+
965
+ const functionToCall =
966
+ args.functions.find((fx) => fx.definition.name === rawName) ??
967
+ args.functions.find((fx) => (fx.definition.name.split('.').pop() ?? fx.definition.name) === shortName);
968
+
969
+ const startedAt = new Date();
970
+
971
+ let parsedArgs: unknown;
972
+ try {
973
+ parsedArgs = JSON.parse(args.call.arguments ?? '{}');
974
+ } catch {
975
+ parsedArgs = args.call.arguments;
976
+ }
977
+
978
+ args.onToolInvocation?.({
979
+ type: 'started',
980
+ id: callId,
981
+ name: functionToCall?.definition?.name ?? shortName,
982
+ startedAt,
983
+ input: parsedArgs,
984
+ });
985
+
986
+ if (!functionToCall) {
987
+ const finishedAt = new Date();
988
+ const rec: ToolInvocationResult = {
989
+ id: callId,
990
+ name: shortName,
991
+ startedAt,
992
+ finishedAt,
993
+ input: parsedArgs,
994
+ ok: false,
995
+ error: { message: `Assistant attempted to call nonexistent function` },
996
+ };
997
+ args.toolInvocations.push(rec);
998
+ args.onToolInvocation?.({ type: 'finished', result: rec });
999
+
1000
+ return {
1001
+ type: 'function_call_output',
1002
+ call_id: callId,
1003
+ output: JSON.stringify({ error: rec.error?.message, functionName: shortName }),
1004
+ };
1005
+ }
1006
+
1007
+ try {
1008
+ let argsObj: unknown;
1009
+ try {
1010
+ argsObj = JSON.parse(args.call.arguments ?? '{}');
1011
+ } catch {
1012
+ argsObj = {};
1013
+ }
1014
+
1015
+ args.usage.recordToolCall(functionToCall.definition.name);
1016
+
1017
+ const returnObject = await functionToCall.call(argsObj);
1018
+ const finishedAt = new Date();
1019
+
1020
+ const rec: ToolInvocationResult = {
1021
+ id: callId,
1022
+ name: functionToCall.definition.name,
1023
+ startedAt,
1024
+ finishedAt,
1025
+ input: argsObj,
1026
+ ok: true,
1027
+ data: returnObject,
1028
+ };
1029
+ args.toolInvocations.push(rec);
1030
+ args.onToolInvocation?.({ type: 'finished', result: rec });
1031
+
1032
+ const output = await this.formatToolReturn(returnObject);
1033
+
1034
+ return {
1035
+ type: 'function_call_output',
1036
+ call_id: callId,
1037
+ output,
1038
+ };
1039
+ } catch (error: unknown) {
1040
+ const finishedAt = new Date();
1041
+
1042
+ const errMessage = error instanceof Error ? error.message : String(error);
1043
+ const errStack = error instanceof Error ? error.stack : undefined;
1044
+
1045
+ const rec: ToolInvocationResult = {
1046
+ id: callId,
1047
+ name: functionToCall.definition.name,
1048
+ startedAt,
1049
+ finishedAt,
1050
+ input: parsedArgs,
1051
+ ok: false,
1052
+ error: { message: errMessage, stack: errStack },
1053
+ };
1054
+ args.toolInvocations.push(rec);
1055
+ args.onToolInvocation?.({ type: 'finished', result: rec });
1056
+
1057
+ throw error;
1058
+ }
1059
+ }
1060
+
1061
+ private async formatToolReturn(returnObject: unknown): Promise<string> {
1062
+ if (typeof returnObject === 'undefined') {
1063
+ return JSON.stringify({ result: 'Function with no return value executed successfully' });
1064
+ }
1065
+
1066
+ if (returnObject instanceof ChatCompletionMessageParamFactory) {
1067
+ const messageParams = await returnObject.create();
1068
+ const normalized = (messageParams ?? [])
1069
+ .map((m) => ({
1070
+ role: m.role,
1071
+ content: this.extractTextContent(m.content),
1072
+ }))
1073
+ .filter((m) => typeof m.content === 'string' && m.content.trim().length > 0);
1074
+
1075
+ return JSON.stringify({ messages: normalized });
1076
+ }
1077
+
1078
+ return JSON.stringify(returnObject);
1079
+ }
1080
+
1081
+ // -----------------------------------------
1082
+ // Usage + text extraction
1083
+ // -----------------------------------------
1084
+
1085
+ private addUsageFromResponse(
1086
+ response: OpenAIApi.Responses.Response,
1087
+ usage: UsageDataAccumulator,
1088
+ ctx?: { requestedServiceTier?: OpenAiServiceTier }
1089
+ ): void {
1090
+ if (!response.usage) {
1091
+ return;
1092
+ }
1093
+
1094
+ usage.addTokenUsage(
1095
+ {
1096
+ inputTokens: response.usage.input_tokens,
1097
+ cachedInputTokens: response.usage.input_tokens_details.cached_tokens,
1098
+ outputTokens: response.usage.output_tokens,
1099
+ reasoningTokens: response.usage.output_tokens_details.reasoning_tokens,
1100
+ totalTokens: response.usage.total_tokens,
1101
+ },
1102
+ { serviceTier: response.service_tier ?? ctx?.requestedServiceTier }
1103
+ );
1104
+ }
1105
+
1106
+ private extractAssistantText(response: { output_text?: string; output?: unknown[] }): string {
1107
+ const out = Array.isArray(response.output) ? response.output : [];
1108
+
1109
+ let lastJoined = '';
1110
+
1111
+ for (const item of out) {
1112
+ if (!item || typeof item !== 'object') {
1113
+ continue;
1114
+ }
1115
+ const rec = item as Record<string, unknown>;
1116
+ if (rec.type !== 'message') {
1117
+ continue;
1118
+ }
1119
+ if (rec.role !== 'assistant') {
1120
+ continue;
1121
+ }
1122
+
1123
+ const contentRaw = rec.content;
1124
+ if (!Array.isArray(contentRaw)) {
1125
+ continue;
1126
+ }
1127
+
1128
+ const pieces: string[] = [];
1129
+ for (const c of contentRaw) {
1130
+ if (!c || typeof c !== 'object') {
1131
+ continue;
1132
+ }
1133
+ const part = c as Record<string, unknown>;
1134
+ if (part.type !== 'output_text') {
1135
+ continue;
1136
+ }
1137
+ const t = part.text;
1138
+ if (typeof t === 'string' && t.trim()) {
1139
+ pieces.push(t);
1140
+ }
1141
+ }
1142
+
1143
+ const joined = pieces.join('\n').trim();
1144
+ if (joined) {
1145
+ lastJoined = joined;
1146
+ }
1147
+ }
1148
+
1149
+ if (lastJoined) {
1150
+ return lastJoined;
1151
+ }
1152
+
1153
+ const direct = typeof response.output_text === 'string' ? response.output_text.trim() : '';
1154
+ if (direct) {
1155
+ return direct;
1156
+ }
1157
+
1158
+ return '';
1159
+ }
1160
+
1161
+ // -----------------------------------------
1162
+ // Structured outputs (JSON schema / Zod)
1163
+ // -----------------------------------------
1164
+
1165
+ private buildTextFormat(schema: unknown): unknown {
1166
+ if (this.isZodSchema(schema)) {
1167
+ // Prefer the official helper when schema is Zod.
1168
+ // eslint-disable-next-line @typescript-eslint/no-var-requires
1169
+ const mod = require('openai/helpers/zod');
1170
+ return mod.zodTextFormat(schema, 'output');
1171
+ }
1172
+
1173
+ return {
1174
+ type: 'json_schema',
1175
+ name: 'output',
1176
+ strict: true,
1177
+ schema: this.strictifyJsonSchema(schema),
1178
+ };
1179
+ }
1180
+
1181
+ private parseAndValidateStructuredOutput<T>(
1182
+ text: string,
1183
+ schema: unknown,
1184
+ ctx?: { model?: string; maxOutputTokens?: number; requestedServiceTier?: OpenAiServiceTier; serviceTier?: string }
1185
+ ): T {
1186
+ const parsed = this.parseJson(text, ctx);
1187
+
1188
+ if (this.isZodSchema(schema)) {
1189
+ const res = schema.safeParse(parsed);
1190
+ if (!res?.success) {
1191
+ throw new Error(`Structured output failed schema validation`);
1192
+ }
1193
+ return res.data as T;
1194
+ }
1195
+
1196
+ return parsed as T;
1197
+ }
1198
+
1199
+ private isZodSchema(schema: unknown): schema is { safeParse: (input: unknown) => { success: boolean; data?: any } } {
1200
+ if (!schema || (typeof schema !== 'object' && typeof schema !== 'function')) {
1201
+ return false;
1202
+ }
1203
+ return typeof (schema as any).safeParse === 'function';
1204
+ }
1205
+
1206
+ private parseJson(
1207
+ text: string,
1208
+ ctx?: { model?: string; maxOutputTokens?: number; requestedServiceTier?: OpenAiServiceTier; serviceTier?: string }
1209
+ ): any {
1210
+ const cleaned = String(text ?? '')
1211
+ .trim()
1212
+ .replace(/^```(?:json)?/i, '')
1213
+ .replace(/```$/i, '')
1214
+ .trim();
1215
+
1216
+ try {
1217
+ return JSON.parse(cleaned);
1218
+ } catch (err1: unknown) {
1219
+ const firstErrMsg = err1 instanceof Error ? err1.message : String(err1);
1220
+
1221
+ const s = cleaned;
1222
+ const firstObj = s.indexOf('{');
1223
+ const firstArr = s.indexOf('[');
1224
+ const start = firstObj === -1 ? firstArr : firstArr === -1 ? firstObj : Math.min(firstObj, firstArr);
1225
+
1226
+ const lastObj = s.lastIndexOf('}');
1227
+ const lastArr = s.lastIndexOf(']');
1228
+ const end = Math.max(lastObj, lastArr);
1229
+
1230
+ if (start >= 0 && end > start) {
1231
+ const candidate = s.slice(start, end + 1);
1232
+ try {
1233
+ return JSON.parse(candidate);
1234
+ } catch (err2: unknown) {
1235
+ const secondErrMsg = err2 instanceof Error ? err2.message : String(err2);
1236
+
1237
+ const pos2rel = extractJsonParsePosition(secondErrMsg);
1238
+ const pos2 = typeof pos2rel === 'number' ? start + pos2rel : undefined;
1239
+
1240
+ const pos1 = extractJsonParsePosition(firstErrMsg);
1241
+ const pos = typeof pos2 === 'number' ? pos2 : pos1;
1242
+
1243
+ const lc = extractJsonParseLineCol(secondErrMsg) ?? extractJsonParseLineCol(firstErrMsg);
1244
+
1245
+ const details: Record<string, unknown> = {
1246
+ model: typeof ctx?.model === 'string' && ctx.model.trim() ? ctx.model : undefined,
1247
+ max_output_tokens: typeof ctx?.maxOutputTokens === 'number' ? ctx.maxOutputTokens : undefined,
1248
+
1249
+ requested_service_tier:
1250
+ typeof ctx?.requestedServiceTier === 'string' && String(ctx.requestedServiceTier).trim()
1251
+ ? String(ctx.requestedServiceTier).trim()
1252
+ : undefined,
1253
+ service_tier:
1254
+ typeof ctx?.serviceTier === 'string' && ctx.serviceTier.trim() ? ctx.serviceTier.trim() : undefined,
1255
+
1256
+ cleaned_len: s.length,
1257
+ cleaned_head: truncateHead(s, 250),
1258
+ cleaned_tail: truncateTail(s, 500),
1259
+
1260
+ json_start: start,
1261
+ json_end: end,
1262
+ json_candidate_len: candidate.length,
1263
+
1264
+ first_error: firstErrMsg,
1265
+ second_error: secondErrMsg,
1266
+
1267
+ error_pos: typeof pos === 'number' ? pos : undefined,
1268
+ error_line: lc?.line,
1269
+ error_column: lc?.column,
1270
+ error_context: typeof pos === 'number' ? snippetAround(s, pos, 160) : undefined,
1271
+ };
1272
+
1273
+ const msg =
1274
+ `Failed to parse model output as JSON. ` +
1275
+ `cleaned_len=${s.length} json_start=${start} json_end=${end}. ` +
1276
+ `first_error=${JSON.stringify(firstErrMsg)} second_error=${JSON.stringify(secondErrMsg)}.`;
1277
+
1278
+ throw new OpenAiResponsesError({
1279
+ code: 'JSON_PARSE',
1280
+ message: msg,
1281
+ details,
1282
+ cause: err2,
1283
+ });
1284
+ }
1285
+ }
1286
+
1287
+ const pos = extractJsonParsePosition(firstErrMsg);
1288
+ const lc = extractJsonParseLineCol(firstErrMsg);
1289
+
1290
+ const details: Record<string, unknown> = {
1291
+ model: typeof ctx?.model === 'string' && ctx.model.trim() ? ctx.model : undefined,
1292
+ max_output_tokens: typeof ctx?.maxOutputTokens === 'number' ? ctx.maxOutputTokens : undefined,
1293
+
1294
+ requested_service_tier:
1295
+ typeof ctx?.requestedServiceTier === 'string' && String(ctx.requestedServiceTier).trim()
1296
+ ? String(ctx.requestedServiceTier).trim()
1297
+ : undefined,
1298
+ service_tier:
1299
+ typeof ctx?.serviceTier === 'string' && ctx.serviceTier.trim() ? ctx.serviceTier.trim() : undefined,
1300
+
1301
+ cleaned_len: s.length,
1302
+ cleaned_head: truncateHead(s, 250),
1303
+ cleaned_tail: truncateTail(s, 500),
1304
+
1305
+ json_start: start >= 0 ? start : undefined,
1306
+ json_end: end >= 0 ? end : undefined,
1307
+
1308
+ first_error: firstErrMsg,
1309
+
1310
+ error_pos: typeof pos === 'number' ? pos : undefined,
1311
+ error_line: lc?.line,
1312
+ error_column: lc?.column,
1313
+ error_context: typeof pos === 'number' ? snippetAround(s, pos, 160) : undefined,
1314
+ };
1315
+
1316
+ const msg =
1317
+ `Failed to parse model output as JSON. ` +
1318
+ `cleaned_len=${s.length}. ` +
1319
+ `error=${JSON.stringify(firstErrMsg)}.`;
1320
+
1321
+ throw new OpenAiResponsesError({
1322
+ code: 'JSON_PARSE',
1323
+ message: msg,
1324
+ details,
1325
+ cause: err1,
1326
+ });
1327
+ }
1328
+ }
1329
+
1330
+ /**
1331
+ * Strictifies a plain JSON Schema for OpenAI Structured Outputs (strict mode):
1332
+ * - Ensures every object has `additionalProperties: false`
1333
+ * - Ensures every object has a `required` array that includes **all** keys in `properties`
1334
+ * - Adds missing `type: "object"` / `type: "array"` where implied by keywords
1335
+ */
1336
+ private strictifyJsonSchema(schema: unknown): any {
1337
+ const root = JSON.parse(JSON.stringify(schema ?? {}));
1338
+
1339
+ const visit = (node: any) => {
1340
+ if (!node || typeof node !== 'object') {
1341
+ return;
1342
+ }
1343
+
1344
+ if (!node.type) {
1345
+ if (node.properties || node.additionalProperties || node.patternProperties) {
1346
+ node.type = 'object';
1347
+ } else if (node.items || node.prefixItems) {
1348
+ node.type = 'array';
1349
+ }
1350
+ }
1351
+
1352
+ const types = Array.isArray(node.type) ? node.type : node.type ? [node.type] : [];
1353
+
1354
+ if (types.includes('object')) {
1355
+ if (node.additionalProperties !== false) {
1356
+ node.additionalProperties = false;
1357
+ }
1358
+
1359
+ if (node.properties && typeof node.properties === 'object') {
1360
+ const propKeys = Object.keys(node.properties);
1361
+ const currentReq: string[] = Array.isArray(node.required) ? node.required.slice() : [];
1362
+ node.required = Array.from(new Set([...currentReq, ...propKeys]));
1363
+
1364
+ for (const k of propKeys) {
1365
+ visit(node.properties[k]);
1366
+ }
1367
+ }
1368
+
1369
+ if (node.patternProperties && typeof node.patternProperties === 'object') {
1370
+ for (const k of Object.keys(node.patternProperties)) {
1371
+ visit(node.patternProperties[k]);
1372
+ }
1373
+ }
1374
+
1375
+ for (const defsKey of ['$defs', 'definitions']) {
1376
+ if (node[defsKey] && typeof node[defsKey] === 'object') {
1377
+ for (const key of Object.keys(node[defsKey])) {
1378
+ visit(node[defsKey][key]);
1379
+ }
1380
+ }
1381
+ }
1382
+ }
1383
+
1384
+ if (types.includes('array')) {
1385
+ if (node.items) {
1386
+ if (Array.isArray(node.items)) {
1387
+ node.items.forEach(visit);
1388
+ } else {
1389
+ visit(node.items);
1390
+ }
1391
+ }
1392
+ if (Array.isArray(node.prefixItems)) {
1393
+ node.prefixItems.forEach(visit);
1394
+ }
1395
+ }
1396
+
1397
+ for (const k of ['oneOf', 'anyOf', 'allOf']) {
1398
+ if (Array.isArray(node[k])) {
1399
+ node[k].forEach(visit);
1400
+ }
1401
+ }
1402
+
1403
+ if (node.not) {
1404
+ visit(node.not);
1405
+ }
1406
+ };
1407
+
1408
+ visit(root);
1409
+ return root;
1410
+ }
1411
+
1412
+ // -----------------------------------------
1413
+ // Messages + modules
1414
+ // -----------------------------------------
1415
+
1416
+ private buildInstructionsAndInput(messages: (string | ChatCompletionMessageParam)[]): {
1417
+ instructions?: string;
1418
+ input: Array<{ role: 'user' | 'assistant'; content: string }>;
1419
+ } {
1420
+ const instructionsParts: string[] = [];
1421
+ instructionsParts.push(...this.systemMessages);
1422
+
1423
+ const input: Array<{ role: 'user' | 'assistant'; content: string }> = [];
1424
+
1425
+ for (const m of messages) {
1426
+ const msg: ChatCompletionMessageParam =
1427
+ typeof m === 'string' ? ({ role: 'user', content: m } as ChatCompletionMessageParam) : m;
1428
+
1429
+ if (msg.role === 'system') {
1430
+ const c = this.extractTextContent(msg.content).trim();
1431
+ if (c) {
1432
+ instructionsParts.push(c);
1433
+ }
1434
+ continue;
1435
+ }
1436
+
1437
+ if (msg.role === 'tool') {
1438
+ continue;
1439
+ }
1440
+
1441
+ const role: 'user' | 'assistant' = msg.role === 'assistant' ? 'assistant' : 'user';
1442
+ const content = this.extractTextContent(msg.content).trim();
1443
+ if (!content) {
1444
+ continue;
1445
+ }
1446
+
1447
+ input.push({ role, content });
1448
+ }
1449
+
1450
+ const instructions =
1451
+ instructionsParts.map((s) => String(s ?? '').trim()).filter(Boolean).length > 0
1452
+ ? instructionsParts
1453
+ .map((s) => String(s ?? '').trim())
1454
+ .filter(Boolean)
1455
+ .join('\n\n')
1456
+ : undefined;
1457
+
1458
+ return { instructions, input };
1459
+ }
1460
+
1461
+ private extractTextContent(content: ChatCompletionMessageParam['content']): string {
1462
+ if (typeof content === 'string') {
1463
+ return content;
1464
+ }
1465
+ if (!content) {
1466
+ return '';
1467
+ }
1468
+ if (Array.isArray(content)) {
1469
+ return content
1470
+ .map((p: any) => {
1471
+ if (typeof p === 'string') {
1472
+ return p;
1473
+ }
1474
+ if (p?.type === 'text' && typeof p?.text === 'string') {
1475
+ return p.text;
1476
+ }
1477
+ return '';
1478
+ })
1479
+ .join('\n');
1480
+ }
1481
+ return '';
1482
+ }
1483
+
1484
+ private async ensureModulesProcessed(): Promise<void> {
1485
+ if (this.modulesProcessed) {
1486
+ return;
1487
+ }
1488
+ if (this.processingModulesPromise) {
1489
+ return this.processingModulesPromise;
1490
+ }
1491
+
1492
+ this.processingModulesPromise = this.processModules();
1493
+ try {
1494
+ await this.processingModulesPromise;
1495
+ this.modulesProcessed = true;
1496
+ } catch (error: unknown) {
1497
+ this.processingModulesPromise = null;
1498
+ throw error;
1499
+ }
1500
+ }
1501
+
1502
+ private async processModules(): Promise<void> {
1503
+ if (!this.modules || this.modules.length < 1) {
1504
+ return;
1505
+ }
1506
+
1507
+ for (const module of this.modules) {
1508
+ const moduleName = module.getName();
1509
+
1510
+ const rawSystem = await Promise.resolve(module.getSystemMessages());
1511
+ const sysArr = Array.isArray(rawSystem) ? rawSystem : rawSystem ? [rawSystem] : [];
1512
+ const trimmed = sysArr.map((s) => String(s ?? '').trim()).filter(Boolean);
1513
+
1514
+ if (trimmed.length > 0) {
1515
+ const formatted = trimmed.join('. ');
1516
+ this.systemMessages.push(`The following are instructions from the ${moduleName} module:\n${formatted}`);
1517
+ }
1518
+
1519
+ const moduleFunctions = module.getFunctions();
1520
+ const filtered = this.filterFunctions(moduleFunctions);
1521
+ this.functions.push(...filtered);
1522
+
1523
+ const fnInstructions = this.buildFunctionInstructionsMessage(moduleName, filtered);
1524
+ if (fnInstructions) {
1525
+ this.systemMessages.push(fnInstructions);
1526
+ }
1527
+ }
1528
+ }
1529
+
1530
+ private filterFunctions(functions: Function[]): Function[] {
1531
+ if (!this.allowedFunctionNames || this.allowedFunctionNames.length < 1) {
1532
+ return functions;
1533
+ }
1534
+
1535
+ const allow = new Set(this.allowedFunctionNames.map((n) => String(n).trim()).filter(Boolean));
1536
+ return functions.filter((f) => {
1537
+ const name = String(f.definition?.name ?? '').trim();
1538
+ if (!name) {
1539
+ return false;
1540
+ }
1541
+ const short = name.split('.').pop() ?? name;
1542
+ return allow.has(name) || allow.has(short);
1543
+ });
1544
+ }
1545
+
1546
+ private buildFunctionInstructionsMessage(moduleName: string, functions: Function[]): string | null {
1547
+ let msg = `The following are instructions from functions in the ${moduleName} module:`;
1548
+ let added = false;
1549
+
1550
+ for (const f of functions) {
1551
+ const name = String(f.definition?.name ?? '').trim();
1552
+ const instructions = f.instructions;
1553
+ if (!name || !instructions || instructions.length < 1) {
1554
+ continue;
1555
+ }
1556
+
1557
+ const paragraph = instructions
1558
+ .map((s) => String(s ?? '').trim())
1559
+ .filter(Boolean)
1560
+ .join('. ');
1561
+ if (!paragraph) {
1562
+ continue;
1563
+ }
1564
+
1565
+ added = true;
1566
+ msg += ` ${name}: ${paragraph}.`;
1567
+ }
1568
+
1569
+ return added ? msg : null;
1570
+ }
1571
+
1572
+ // -----------------------------------------
1573
+ // Model/background defaults
1574
+ // -----------------------------------------
1575
+
1576
+ private resolveModel(model?: TiktokenModel): TiktokenModel {
1577
+ return model ?? this.defaultModel;
1578
+ }
1579
+
1580
+ private resolveBackgroundMode(args: {
1581
+ requested?: boolean;
1582
+ model: string;
1583
+ reasoningEffort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort;
1584
+ }): boolean {
1585
+ if (typeof args.requested === 'boolean') {
1586
+ return args.requested;
1587
+ }
1588
+ if (this.isProModel(args.model)) {
1589
+ return true;
1590
+ }
1591
+ if (this.isHighReasoningEffort(args.reasoningEffort)) {
1592
+ return true;
1593
+ }
1594
+ return false;
1595
+ }
1596
+
1597
+ private isProModel(model: string): boolean {
1598
+ const m = String(model ?? '').toLowerCase();
1599
+ return /(^|[-_.])pro($|[-_.])/.test(m);
1600
+ }
1601
+
1602
+ private isHighReasoningEffort(effort?: OpenAIApi.Chat.Completions.ChatCompletionReasoningEffort): boolean {
1603
+ const v = String(effort ?? '').toLowerCase();
1604
+ return v === 'high' || v === 'xhigh';
1605
+ }
1606
+ }
1607
+
1608
+ export type OpenAiResponsesErrorCode = 'OPENAI_API' | 'RESPONSE_STATUS' | 'JSON_PARSE';
1609
+
1610
+ export class OpenAiResponsesError extends Error {
1611
+ public readonly code: OpenAiResponsesErrorCode;
1612
+ public readonly details: Record<string, unknown>;
1613
+ public readonly cause?: unknown;
1614
+ public readonly retryable: boolean;
1615
+
1616
+ constructor(args: {
1617
+ code: OpenAiResponsesErrorCode;
1618
+ message: string;
1619
+ details?: Record<string, unknown>;
1620
+ cause?: unknown;
1621
+ retryable?: boolean;
1622
+ }) {
1623
+ super(args.message);
1624
+ this.name = 'OpenAiResponsesError';
1625
+ this.code = args.code;
1626
+ this.details = args.details ?? {};
1627
+ this.cause = args.cause;
1628
+ this.retryable = typeof args.retryable === 'boolean' ? args.retryable : true;
1629
+ Object.setPrototypeOf(this, new.target.prototype);
1630
+ }
1631
+ }
1632
+
1633
+ function truncateHead(text: string, max: number): string {
1634
+ const s = String(text ?? '');
1635
+ if (max <= 0) {
1636
+ return '';
1637
+ }
1638
+ if (s.length <= max) {
1639
+ return s;
1640
+ }
1641
+ return s.slice(0, max) + '...';
1642
+ }
1643
+
1644
+ function truncateTail(text: string, max: number): string {
1645
+ const s = String(text ?? '');
1646
+ if (max <= 0) {
1647
+ return '';
1648
+ }
1649
+ if (s.length <= max) {
1650
+ return s;
1651
+ }
1652
+ return '...' + s.slice(s.length - max);
1653
+ }
1654
+
1655
+ function extractJsonParsePosition(errMsg: string): number | undefined {
1656
+ const m = String(errMsg ?? '').match(/at position\s+(\d+)/i);
1657
+ if (!m) {
1658
+ return undefined;
1659
+ }
1660
+ const n = Number(m[1]);
1661
+ return Number.isFinite(n) ? n : undefined;
1662
+ }
1663
+
1664
+ function extractJsonParseLineCol(errMsg: string): { line?: number; column?: number } | undefined {
1665
+ const m = String(errMsg ?? '').match(/line\s+(\d+)\s+column\s+(\d+)/i);
1666
+ if (!m) {
1667
+ return undefined;
1668
+ }
1669
+ const line = Number(m[1]);
1670
+ const column = Number(m[2]);
1671
+ return {
1672
+ line: Number.isFinite(line) ? line : undefined,
1673
+ column: Number.isFinite(column) ? column : undefined,
1674
+ };
1675
+ }
1676
+
1677
+ function snippetAround(text: string, pos: number, radius: number): string {
1678
+ const s = String(text ?? '');
1679
+ const p = Math.max(0, Math.min(s.length, Number.isFinite(pos) ? pos : 0));
1680
+ const r = Math.max(0, radius);
1681
+
1682
+ const start = Math.max(0, p - r);
1683
+ const end = Math.min(s.length, p + r);
1684
+
1685
+ const before = s.slice(start, p);
1686
+ const after = s.slice(p, end);
1687
+
1688
+ const left = start > 0 ? '...' : '';
1689
+ const right = end < s.length ? '...' : '';
1690
+
1691
+ return `${left}${before}<<HERE>>${after}${right}`;
1692
+ }
1693
+
1694
+ function sleep(ms: number): Promise<void> {
1695
+ return new Promise((resolve) => setTimeout(resolve, ms));
1696
+ }
1697
+
1698
+ /**
1699
+ * Sleep, but wake early if the signal is aborted.
1700
+ * (We do not throw here; the caller should check `signal.aborted` and act.)
1701
+ */
1702
+ function sleepWithAbort(ms: number, signal?: AbortSignal): Promise<void> {
1703
+ if (!signal) {
1704
+ return sleep(ms);
1705
+ }
1706
+ if (signal.aborted) {
1707
+ return Promise.resolve();
1708
+ }
1709
+
1710
+ return new Promise((resolve) => {
1711
+ const t = setTimeout(() => {
1712
+ cleanup();
1713
+ resolve();
1714
+ }, ms);
1715
+
1716
+ const onAbort = () => {
1717
+ cleanup();
1718
+ resolve();
1719
+ };
1720
+
1721
+ const cleanup = () => {
1722
+ try {
1723
+ clearTimeout(t);
1724
+ } catch {
1725
+ // ignore
1726
+ }
1727
+ try {
1728
+ signal.removeEventListener?.('abort', onAbort as any);
1729
+ } catch {
1730
+ // ignore
1731
+ }
1732
+ };
1733
+
1734
+ try {
1735
+ signal.addEventListener?.('abort', onAbort as any, { once: true });
1736
+ } catch {
1737
+ // If addEventListener isn't available, fall back to plain sleep.
1738
+ }
1739
+ });
1740
+ }
1741
+
1742
+ function extractHttpStatus(error: unknown): number | undefined {
1743
+ if (!error || typeof error !== 'object') {
1744
+ return undefined;
1745
+ }
1746
+ const rec = error as Record<string, unknown>;
1747
+ const status = rec.status;
1748
+ if (typeof status === 'number' && Number.isFinite(status)) {
1749
+ return status;
1750
+ }
1751
+ const statusCode = rec.statusCode;
1752
+ if (typeof statusCode === 'number' && Number.isFinite(statusCode)) {
1753
+ return statusCode;
1754
+ }
1755
+ return undefined;
1756
+ }
1757
+
1758
+ function extractRequestId(error: unknown): string | undefined {
1759
+ if (!error || typeof error !== 'object') {
1760
+ return undefined;
1761
+ }
1762
+ const rec = error as Record<string, unknown>;
1763
+
1764
+ const direct = rec.request_id ?? rec.requestId;
1765
+ if (typeof direct === 'string' && direct.trim()) {
1766
+ return direct.trim();
1767
+ }
1768
+
1769
+ const headers = rec.headers as any;
1770
+ if (!headers) {
1771
+ return undefined;
1772
+ }
1773
+
1774
+ if (typeof headers.get === 'function') {
1775
+ const v = headers.get('x-request-id');
1776
+ return typeof v === 'string' && v.trim() ? v.trim() : undefined;
1777
+ }
1778
+
1779
+ if (typeof headers === 'object' && !Array.isArray(headers)) {
1780
+ for (const k of Object.keys(headers)) {
1781
+ if (String(k).toLowerCase() !== 'x-request-id') {
1782
+ continue;
1783
+ }
1784
+ const v = (headers as any)[k];
1785
+ return typeof v === 'string' && v.trim() ? v.trim() : undefined;
1786
+ }
1787
+ }
1788
+
1789
+ return undefined;
1790
+ }
1791
+
1792
+ function isRetryableHttpStatus(status: number | undefined): boolean {
1793
+ if (typeof status !== 'number') {
1794
+ return true;
1795
+ }
1796
+ if (status === 408 || status === 409 || status === 429) {
1797
+ return true;
1798
+ }
1799
+ if (status >= 500) {
1800
+ return true;
1801
+ }
1802
+ return false;
1803
+ }
1804
+
1805
+ function isAbortError(error: unknown): boolean {
1806
+ if (!error) {
1807
+ return false;
1808
+ }
1809
+
1810
+ // Most fetch implementations:
1811
+ // - error.name === 'AbortError'
1812
+ // - or error.code === 'ABORT_ERR'
1813
+ if (error instanceof Error) {
1814
+ const name = String(error.name ?? '').toLowerCase();
1815
+ if (name === 'aborterror') {
1816
+ return true;
1817
+ }
1818
+ const msg = String(error.message ?? '').toLowerCase();
1819
+ // Keep this conservative; don't treat every "abort" substring as abort.
1820
+ if (msg === 'aborted' || msg === 'request aborted') {
1821
+ return true;
1822
+ }
1823
+ }
1824
+
1825
+ if (typeof error === 'object') {
1826
+ const rec = error as Record<string, unknown>;
1827
+ const code = rec.code;
1828
+ if (typeof code === 'string' && code.toUpperCase() === 'ABORT_ERR') {
1829
+ return true;
1830
+ }
1831
+ }
1832
+
1833
+ return false;
1834
+ }
1835
+
1836
+ function safeErrorSummary(error: unknown): Record<string, unknown> {
1837
+ if (!error) {
1838
+ return { message: 'Unknown error' };
1839
+ }
1840
+
1841
+ const status = extractHttpStatus(error);
1842
+ const requestId = extractRequestId(error);
1843
+
1844
+ if (error instanceof OpenAiResponsesError) {
1845
+ return {
1846
+ name: error.name,
1847
+ message: error.message,
1848
+ code: error.code,
1849
+ details: error.details,
1850
+ status: typeof status === 'number' ? status : undefined,
1851
+ request_id: requestId,
1852
+ };
1853
+ }
1854
+
1855
+ if (error instanceof Error) {
1856
+ return {
1857
+ name: error.name,
1858
+ message: error.message,
1859
+ status: typeof status === 'number' ? status : undefined,
1860
+ request_id: requestId,
1861
+ };
1862
+ }
1863
+
1864
+ return {
1865
+ message: String(error),
1866
+ status: typeof status === 'number' ? status : undefined,
1867
+ request_id: requestId,
1868
+ };
1869
+ }