@proteinjs/conversation 2.6.0 → 2.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/dist/index.d.ts +1 -1
  3. package/dist/index.d.ts.map +1 -1
  4. package/dist/index.js +1 -0
  5. package/dist/index.js.map +1 -1
  6. package/dist/src/Conversation.d.ts.map +1 -1
  7. package/dist/src/Conversation.js +12 -16
  8. package/dist/src/Conversation.js.map +1 -1
  9. package/dist/src/OpenAi.js +3 -3
  10. package/dist/src/OpenAi.js.map +1 -1
  11. package/dist/src/OpenAiResponses.d.ts +41 -4
  12. package/dist/src/OpenAiResponses.d.ts.map +1 -1
  13. package/dist/src/OpenAiResponses.js +757 -77
  14. package/dist/src/OpenAiResponses.js.map +1 -1
  15. package/dist/src/OpenAiStreamProcessor.js +4 -4
  16. package/dist/src/OpenAiStreamProcessor.js.map +1 -1
  17. package/dist/src/UsageData.d.ts +39 -4
  18. package/dist/src/UsageData.d.ts.map +1 -1
  19. package/dist/src/UsageData.js +302 -11
  20. package/dist/src/UsageData.js.map +1 -1
  21. package/dist/src/fs/conversation_fs/ConversationFsModule.d.ts.map +1 -1
  22. package/dist/src/fs/conversation_fs/ConversationFsModule.js +1 -0
  23. package/dist/src/fs/conversation_fs/ConversationFsModule.js.map +1 -1
  24. package/dist/src/fs/conversation_fs/FsFunctions.d.ts +26 -0
  25. package/dist/src/fs/conversation_fs/FsFunctions.d.ts.map +1 -1
  26. package/dist/src/fs/conversation_fs/FsFunctions.js +68 -27
  27. package/dist/src/fs/conversation_fs/FsFunctions.js.map +1 -1
  28. package/index.ts +1 -1
  29. package/package.json +5 -6
  30. package/src/Conversation.ts +14 -17
  31. package/src/OpenAi.ts +3 -3
  32. package/src/OpenAiResponses.ts +905 -112
  33. package/src/OpenAiStreamProcessor.ts +3 -3
  34. package/src/UsageData.ts +376 -13
  35. package/src/fs/conversation_fs/ConversationFsModule.ts +2 -0
  36. package/src/fs/conversation_fs/FsFunctions.ts +32 -2
  37. package/LICENSE +0 -21
@@ -1,4 +1,19 @@
1
1
  "use strict";
2
+ var __extends = (this && this.__extends) || (function () {
3
+ var extendStatics = function (d, b) {
4
+ extendStatics = Object.setPrototypeOf ||
5
+ ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
6
+ function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
7
+ return extendStatics(d, b);
8
+ };
9
+ return function (d, b) {
10
+ if (typeof b !== "function" && b !== null)
11
+ throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
12
+ extendStatics(d, b);
13
+ function __() { this.constructor = d; }
14
+ d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
15
+ };
16
+ })();
2
17
  var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
18
  function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
19
  return new (P || (P = Promise))(function (resolve, reject) {
@@ -45,14 +60,17 @@ var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
45
60
  return to.concat(ar || Array.prototype.slice.call(from));
46
61
  };
47
62
  Object.defineProperty(exports, "__esModule", { value: true });
48
- exports.OpenAiResponses = exports.DEFAULT_MAX_TOOL_CALLS = exports.DEFAULT_RESPONSES_MODEL = void 0;
63
+ exports.OpenAiResponsesError = exports.OpenAiResponses = exports.DEFAULT_MAX_BACKGROUND_WAIT_MS = exports.DEFAULT_MAX_TOOL_CALLS = exports.DEFAULT_RESPONSES_MODEL = void 0;
49
64
  var openai_1 = require("openai");
50
65
  var logger_1 = require("@proteinjs/logger");
51
66
  var UsageData_1 = require("./UsageData");
52
67
  var ChatCompletionMessageParamFactory_1 = require("./ChatCompletionMessageParamFactory");
53
- var OpenAi_1 = require("./OpenAi");
54
68
  exports.DEFAULT_RESPONSES_MODEL = 'gpt-5.2';
55
69
  exports.DEFAULT_MAX_TOOL_CALLS = 50;
70
+ /** Default hard cap for background-mode polling duration (ms): 1 hour. */
71
+ exports.DEFAULT_MAX_BACKGROUND_WAIT_MS = 60 * 60 * 1000;
72
+ /** Best-effort timeout for cancel calls (avoid hanging abort/timeout paths). */
73
+ var DEFAULT_CANCEL_TIMEOUT_MS = 10000;
56
74
  /**
57
75
  * OpenAI Responses API wrapper (tool-loop + usage tracking + ConversationModules).
58
76
  * - Uses Responses API directly
@@ -74,13 +92,19 @@ var OpenAiResponses = /** @class */ (function () {
74
92
  this.logger = new logger_1.Logger({ name: 'OpenAiResponses', logLevel: opts.logLevel });
75
93
  this.modules = (_a = opts.modules) !== null && _a !== void 0 ? _a : [];
76
94
  this.allowedFunctionNames = opts.allowedFunctionNames;
77
- this.defaultModel = ((_b = opts.defaultModel) !== null && _b !== void 0 ? _b : exports.DEFAULT_RESPONSES_MODEL).trim();
95
+ this.defaultModel = (_b = opts.defaultModel) !== null && _b !== void 0 ? _b : exports.DEFAULT_RESPONSES_MODEL;
78
96
  this.defaultMaxToolCalls = typeof opts.maxToolCalls === 'number' ? opts.maxToolCalls : exports.DEFAULT_MAX_TOOL_CALLS;
97
+ this.defaultMaxBackgroundWaitMs =
98
+ typeof opts.maxBackgroundWaitMs === 'number' &&
99
+ Number.isFinite(opts.maxBackgroundWaitMs) &&
100
+ opts.maxBackgroundWaitMs > 0
101
+ ? Math.floor(opts.maxBackgroundWaitMs)
102
+ : exports.DEFAULT_MAX_BACKGROUND_WAIT_MS;
79
103
  }
80
104
  /** Plain text generation (supports tool calling). */
81
105
  OpenAiResponses.prototype.generateText = function (args) {
82
106
  return __awaiter(this, void 0, void 0, function () {
83
- var model, backgroundMode, maxToolCalls, result;
107
+ var model, backgroundMode, maxToolCalls, maxBackgroundWaitMs, result;
84
108
  return __generator(this, function (_a) {
85
109
  switch (_a.label) {
86
110
  case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
@@ -93,6 +117,7 @@ var OpenAiResponses = /** @class */ (function () {
93
117
  reasoningEffort: args.reasoningEffort,
94
118
  });
95
119
  maxToolCalls = typeof args.maxToolCalls === 'number' ? args.maxToolCalls : this.defaultMaxToolCalls;
120
+ maxBackgroundWaitMs = this.resolveMaxBackgroundWaitMs(args.maxBackgroundWaitMs);
96
121
  return [4 /*yield*/, this.run({
97
122
  model: model,
98
123
  messages: args.messages,
@@ -104,7 +129,9 @@ var OpenAiResponses = /** @class */ (function () {
104
129
  reasoningEffort: args.reasoningEffort,
105
130
  maxToolCalls: maxToolCalls,
106
131
  backgroundMode: backgroundMode,
132
+ maxBackgroundWaitMs: maxBackgroundWaitMs,
107
133
  textFormat: undefined,
134
+ serviceTier: args.serviceTier,
108
135
  })];
109
136
  case 2:
110
137
  result = _a.sent();
@@ -129,7 +156,7 @@ var OpenAiResponses = /** @class */ (function () {
129
156
  /** Structured object generation (supports tool calling). */
130
157
  OpenAiResponses.prototype.generateObject = function (args) {
131
158
  return __awaiter(this, void 0, void 0, function () {
132
- var model, backgroundMode, maxToolCalls, textFormat, result, object, outcome;
159
+ var model, backgroundMode, maxToolCalls, maxBackgroundWaitMs, textFormat, result, object, outcome;
133
160
  return __generator(this, function (_a) {
134
161
  switch (_a.label) {
135
162
  case 0: return [4 /*yield*/, this.ensureModulesProcessed()];
@@ -142,6 +169,7 @@ var OpenAiResponses = /** @class */ (function () {
142
169
  reasoningEffort: args.reasoningEffort,
143
170
  });
144
171
  maxToolCalls = typeof args.maxToolCalls === 'number' ? args.maxToolCalls : this.defaultMaxToolCalls;
172
+ maxBackgroundWaitMs = this.resolveMaxBackgroundWaitMs(args.maxBackgroundWaitMs);
145
173
  textFormat = this.buildTextFormat(args.schema);
146
174
  return [4 /*yield*/, this.run({
147
175
  model: model,
@@ -154,11 +182,18 @@ var OpenAiResponses = /** @class */ (function () {
154
182
  reasoningEffort: args.reasoningEffort,
155
183
  maxToolCalls: maxToolCalls,
156
184
  backgroundMode: backgroundMode,
185
+ maxBackgroundWaitMs: maxBackgroundWaitMs,
157
186
  textFormat: textFormat,
187
+ serviceTier: args.serviceTier,
158
188
  })];
159
189
  case 2:
160
190
  result = _a.sent();
161
- object = this.parseAndValidateStructuredOutput(result.message, args.schema);
191
+ object = this.parseAndValidateStructuredOutput(result.message, args.schema, {
192
+ model: model,
193
+ maxOutputTokens: args.maxTokens,
194
+ requestedServiceTier: args.serviceTier,
195
+ serviceTier: result.serviceTier,
196
+ });
162
197
  outcome = {
163
198
  object: object,
164
199
  usageData: result.usagedata,
@@ -182,7 +217,7 @@ var OpenAiResponses = /** @class */ (function () {
182
217
  return __generator(this, function (_b) {
183
218
  switch (_b.label) {
184
219
  case 0:
185
- usage = new UsageData_1.UsageDataAccumulator({ model: OpenAi_1.DEFAULT_MODEL });
220
+ usage = new UsageData_1.UsageDataAccumulator({ model: args.model });
186
221
  toolInvocations = [];
187
222
  tools = this.buildResponseTools(this.functions);
188
223
  _a = this.buildInstructionsAndInput(args.messages), instructions = _a.instructions, input = _a.input;
@@ -191,7 +226,8 @@ var OpenAiResponses = /** @class */ (function () {
191
226
  _b.label = 1;
192
227
  case 1: return [4 /*yield*/, this.createResponseAndMaybeWait({
193
228
  model: args.model,
194
- instructions: previousResponseId ? undefined : instructions,
229
+ // Always pass instructions; they are not carried over with previous_response_id.
230
+ instructions: instructions,
195
231
  input: nextInput,
196
232
  previousResponseId: previousResponseId,
197
233
  tools: tools,
@@ -201,18 +237,34 @@ var OpenAiResponses = /** @class */ (function () {
201
237
  reasoningEffort: args.reasoningEffort,
202
238
  textFormat: args.textFormat,
203
239
  backgroundMode: args.backgroundMode,
240
+ maxBackgroundWaitMs: args.maxBackgroundWaitMs,
204
241
  abortSignal: args.abortSignal,
242
+ serviceTier: args.serviceTier,
205
243
  })];
206
244
  case 2:
207
245
  response = _b.sent();
208
- this.addUsageFromResponse(response, usage);
246
+ this.addUsageFromResponse(response, usage, { requestedServiceTier: args.serviceTier });
247
+ // For structured outputs we should not attempt to parse incomplete/failed/cancelled responses.
248
+ // For plain-text generation, we allow "incomplete" to pass through (partial output),
249
+ // but still fail on other non-completed statuses.
250
+ this.throwIfResponseUnusable(response, {
251
+ allowIncomplete: !args.textFormat,
252
+ model: args.model,
253
+ maxOutputTokens: args.maxTokens,
254
+ requestedServiceTier: args.serviceTier,
255
+ });
209
256
  functionCalls = this.extractFunctionCalls(response);
210
257
  if (functionCalls.length < 1) {
211
258
  message = this.extractAssistantText(response);
212
259
  if (!message) {
213
260
  throw new Error("Response was empty");
214
261
  }
215
- return [2 /*return*/, { message: message, usagedata: usage.usageData, toolInvocations: toolInvocations }];
262
+ return [2 /*return*/, {
263
+ message: message,
264
+ usagedata: usage.usageData,
265
+ toolInvocations: toolInvocations,
266
+ serviceTier: response.service_tier ? response.service_tier : undefined,
267
+ }];
216
268
  }
217
269
  if (toolCallsExecuted + functionCalls.length > args.maxToolCalls) {
218
270
  throw new Error("Max tool calls (".concat(args.maxToolCalls, ") reached. Stopping execution."));
@@ -243,11 +295,215 @@ var OpenAiResponses = /** @class */ (function () {
243
295
  });
244
296
  });
245
297
  };
246
- OpenAiResponses.prototype.createResponseAndMaybeWait = function (args) {
298
+ OpenAiResponses.prototype.throwIfResponseUnusable = function (response, opts) {
299
+ var _a;
300
+ var statusRaw = typeof (response === null || response === void 0 ? void 0 : response.status) === 'string' ? String(response.status) : '';
301
+ var status = statusRaw.toLowerCase();
302
+ if (!status || status === 'completed') {
303
+ return;
304
+ }
305
+ if (status === 'incomplete' && opts.allowIncomplete) {
306
+ return;
307
+ }
308
+ var id = typeof (response === null || response === void 0 ? void 0 : response.id) === 'string' ? response.id : '';
309
+ var reason = (_a = response === null || response === void 0 ? void 0 : response.incomplete_details) === null || _a === void 0 ? void 0 : _a.reason;
310
+ var apiErr = response === null || response === void 0 ? void 0 : response.error;
311
+ var serviceTier = typeof (response === null || response === void 0 ? void 0 : response.service_tier) === 'string' && response.service_tier.trim() ? response.service_tier.trim() : '';
312
+ var directOutputText = typeof (response === null || response === void 0 ? void 0 : response.output_text) === 'string' ? response.output_text : '';
313
+ var assistantText = this.extractAssistantText(response);
314
+ var outTextLen = directOutputText ? directOutputText.length : 0;
315
+ var assistantLen = assistantText ? assistantText.length : 0;
316
+ var usage = response === null || response === void 0 ? void 0 : response.usage;
317
+ var inputTokens = typeof (usage === null || usage === void 0 ? void 0 : usage.input_tokens) === 'number' ? usage.input_tokens : undefined;
318
+ var outputTokens = typeof (usage === null || usage === void 0 ? void 0 : usage.output_tokens) === 'number' ? usage.output_tokens : undefined;
319
+ var totalTokens = typeof (usage === null || usage === void 0 ? void 0 : usage.total_tokens) === 'number'
320
+ ? usage.total_tokens
321
+ : typeof inputTokens === 'number' && typeof outputTokens === 'number'
322
+ ? inputTokens + outputTokens
323
+ : undefined;
324
+ var msg = "Responses API returned status=\"".concat(status, "\"");
325
+ if (id) {
326
+ msg += " (id=".concat(id, ")");
327
+ }
328
+ msg += ".";
329
+ var details = {
330
+ response_id: id || undefined,
331
+ status: status,
332
+ model: typeof opts.model === 'string' && opts.model.trim() ? opts.model : undefined,
333
+ max_output_tokens: typeof opts.maxOutputTokens === 'number' ? opts.maxOutputTokens : undefined,
334
+ requested_service_tier: typeof opts.requestedServiceTier === 'string' && opts.requestedServiceTier.trim()
335
+ ? opts.requestedServiceTier.trim()
336
+ : undefined,
337
+ service_tier: serviceTier || undefined,
338
+ incomplete_reason: typeof reason === 'string' && reason.trim() ? reason : undefined,
339
+ api_error: apiErr !== null && apiErr !== void 0 ? apiErr : undefined,
340
+ usage_input_tokens: inputTokens,
341
+ usage_output_tokens: outputTokens,
342
+ usage_total_tokens: totalTokens,
343
+ output_text_len: outTextLen || undefined,
344
+ output_text_tail: outTextLen > 0 ? truncateTail(directOutputText, 400) : undefined,
345
+ assistant_text_len: assistantLen || undefined,
346
+ assistant_text_tail: assistantLen > 0 ? truncateTail(assistantText, 400) : undefined,
347
+ };
348
+ var extra = [];
349
+ if (details.model) {
350
+ extra.push("model=".concat(details.model));
351
+ }
352
+ if (typeof details.max_output_tokens === 'number') {
353
+ extra.push("max_output_tokens=".concat(details.max_output_tokens));
354
+ }
355
+ if (typeof details.requested_service_tier === 'string') {
356
+ extra.push("requested_service_tier=".concat(details.requested_service_tier));
357
+ }
358
+ if (typeof details.service_tier === 'string') {
359
+ extra.push("service_tier=".concat(details.service_tier));
360
+ }
361
+ if (details.incomplete_reason) {
362
+ extra.push("reason=".concat(details.incomplete_reason));
363
+ }
364
+ if (typeof details.output_text_len === 'number') {
365
+ extra.push("output_text_len=".concat(details.output_text_len));
366
+ }
367
+ if (typeof details.assistant_text_len === 'number') {
368
+ extra.push("assistant_text_len=".concat(details.assistant_text_len));
369
+ }
370
+ if (extra.length > 0) {
371
+ msg += " ".concat(extra.join(' '), ".");
372
+ }
373
+ throw new OpenAiResponsesError({
374
+ code: 'RESPONSE_STATUS',
375
+ message: msg,
376
+ details: details,
377
+ });
378
+ };
379
+ OpenAiResponses.prototype.toOpenAiApiError = function (error, meta) {
380
+ var status = extractHttpStatus(error);
381
+ var requestId = extractRequestId(error);
382
+ var retryable = isRetryableHttpStatus(status);
383
+ var errMsg = error instanceof Error ? error.message : String(error !== null && error !== void 0 ? error : '');
384
+ var errName = error instanceof Error ? error.name : undefined;
385
+ var aborted = meta.aborted === true || isAbortError(error);
386
+ var msg = "OpenAI ".concat(meta.operation, " failed.");
387
+ var extra = [];
388
+ if (aborted) {
389
+ extra.push("aborted=true");
390
+ }
391
+ if (typeof status === 'number') {
392
+ extra.push("status=".concat(status));
393
+ }
394
+ if (requestId) {
395
+ extra.push("requestId=".concat(requestId));
396
+ }
397
+ if (meta.responseId) {
398
+ extra.push("responseId=".concat(meta.responseId));
399
+ }
400
+ if (meta.backgroundMode) {
401
+ extra.push("background=true");
402
+ }
403
+ if (typeof meta.pollAttempt === 'number') {
404
+ extra.push("pollAttempt=".concat(meta.pollAttempt));
405
+ }
406
+ if (typeof meta.waitedMs === 'number') {
407
+ extra.push("waitedMs=".concat(meta.waitedMs));
408
+ }
409
+ if (typeof meta.maxWaitMs === 'number') {
410
+ extra.push("maxWaitMs=".concat(meta.maxWaitMs));
411
+ }
412
+ if (typeof meta.lastStatus === 'string' && meta.lastStatus.trim()) {
413
+ extra.push("lastStatus=".concat(meta.lastStatus.trim()));
414
+ }
415
+ if (typeof meta.model === 'string' && meta.model.trim()) {
416
+ extra.push("model=".concat(meta.model.trim()));
417
+ }
418
+ if (meta.reasoningEffort) {
419
+ extra.push("reasoningEffort=".concat(meta.reasoningEffort));
420
+ }
421
+ if (typeof meta.requestedServiceTier === 'string' && meta.requestedServiceTier.trim()) {
422
+ extra.push("requested_service_tier=".concat(meta.requestedServiceTier.trim()));
423
+ }
424
+ if (typeof meta.serviceTier === 'string' && meta.serviceTier.trim()) {
425
+ extra.push("service_tier=".concat(meta.serviceTier.trim()));
426
+ }
427
+ if (extra.length > 0) {
428
+ msg += " ".concat(extra.join(' '), ".");
429
+ }
430
+ if (errMsg) {
431
+ msg += " error=".concat(JSON.stringify(errMsg), ".");
432
+ }
433
+ var details = {
434
+ operation: meta.operation,
435
+ status: typeof status === 'number' ? status : undefined,
436
+ request_id: requestId,
437
+ response_id: meta.responseId,
438
+ previous_response_id: meta.previousResponseId,
439
+ background: meta.backgroundMode ? true : undefined,
440
+ poll_attempt: meta.pollAttempt,
441
+ waited_ms: meta.waitedMs,
442
+ max_wait_ms: meta.maxWaitMs,
443
+ last_status: typeof meta.lastStatus === 'string' && meta.lastStatus.trim() ? meta.lastStatus.trim() : undefined,
444
+ model: typeof meta.model === 'string' && meta.model.trim() ? meta.model.trim() : undefined,
445
+ reasoning_effort: meta.reasoningEffort,
446
+ requested_service_tier: typeof meta.requestedServiceTier === 'string' && meta.requestedServiceTier.trim()
447
+ ? meta.requestedServiceTier.trim()
448
+ : undefined,
449
+ service_tier: typeof meta.serviceTier === 'string' && meta.serviceTier.trim() ? meta.serviceTier.trim() : undefined,
450
+ error_name: errName,
451
+ aborted: aborted ? true : undefined,
452
+ };
453
+ return new OpenAiResponsesError({
454
+ code: 'OPENAI_API',
455
+ message: msg,
456
+ details: details,
457
+ cause: error,
458
+ retryable: retryable,
459
+ });
460
+ };
461
+ OpenAiResponses.prototype.resolveMaxBackgroundWaitMs = function (ms) {
462
+ var n = typeof ms === 'number' && Number.isFinite(ms) && ms > 0 ? Math.floor(ms) : this.defaultMaxBackgroundWaitMs;
463
+ // Ensure we never return a non-positive number even if misconfigured elsewhere.
464
+ return n > 0 ? n : exports.DEFAULT_MAX_BACKGROUND_WAIT_MS;
465
+ };
466
+ OpenAiResponses.prototype.cancelResponseBestEffort = function (responseId) {
247
467
  return __awaiter(this, void 0, void 0, function () {
248
- var body, created;
468
+ var resp, e_1;
249
469
  return __generator(this, function (_a) {
250
470
  switch (_a.label) {
471
+ case 0:
472
+ if (!responseId) {
473
+ return [2 /*return*/, { attempted: false }];
474
+ }
475
+ _a.label = 1;
476
+ case 1:
477
+ _a.trys.push([1, 3, , 4]);
478
+ return [4 /*yield*/, this.client.responses.cancel(responseId)];
479
+ case 2:
480
+ resp = _a.sent();
481
+ // Docs show cancelled as the post-cancel status.
482
+ if ((resp === null || resp === void 0 ? void 0 : resp.status) === 'cancelled') {
483
+ return [2 /*return*/, { attempted: true, ok: true }];
484
+ }
485
+ return [2 /*return*/, {
486
+ attempted: true,
487
+ ok: false,
488
+ error: {
489
+ message: 'Cancel did not return status=cancelled',
490
+ status: resp === null || resp === void 0 ? void 0 : resp.status,
491
+ },
492
+ }];
493
+ case 3:
494
+ e_1 = _a.sent();
495
+ return [2 /*return*/, { attempted: true, ok: false, error: safeErrorSummary(e_1) }];
496
+ case 4: return [2 /*return*/];
497
+ }
498
+ });
499
+ });
500
+ };
501
+ OpenAiResponses.prototype.createResponseAndMaybeWait = function (args) {
502
+ var _a;
503
+ return __awaiter(this, void 0, void 0, function () {
504
+ var body, created, error_1;
505
+ return __generator(this, function (_b) {
506
+ switch (_b.label) {
251
507
  case 0:
252
508
  body = {
253
509
  model: args.model,
@@ -277,52 +533,191 @@ var OpenAiResponses = /** @class */ (function () {
277
533
  if (args.textFormat) {
278
534
  body.text = { format: args.textFormat };
279
535
  }
536
+ if (typeof args.serviceTier === 'string' && args.serviceTier.trim()) {
537
+ body.service_tier = args.serviceTier.trim();
538
+ }
280
539
  if (args.backgroundMode) {
281
540
  body.background = true;
282
541
  body.store = true;
283
542
  }
284
- return [4 /*yield*/, this.client.responses.create(body, args.abortSignal ? { signal: args.abortSignal } : undefined)];
543
+ _b.label = 1;
285
544
  case 1:
286
- created = _a.sent();
545
+ _b.trys.push([1, 3, , 4]);
546
+ return [4 /*yield*/, this.client.responses.create(body, args.abortSignal ? { signal: args.abortSignal } : undefined)];
547
+ case 2:
548
+ created = _b.sent();
549
+ return [3 /*break*/, 4];
550
+ case 3:
551
+ error_1 = _b.sent();
552
+ throw this.toOpenAiApiError(error_1, {
553
+ operation: 'responses.create',
554
+ model: args.model,
555
+ reasoningEffort: args.reasoningEffort,
556
+ backgroundMode: args.backgroundMode,
557
+ previousResponseId: args.previousResponseId,
558
+ aborted: ((_a = args.abortSignal) === null || _a === void 0 ? void 0 : _a.aborted) ? true : undefined,
559
+ requestedServiceTier: args.serviceTier,
560
+ });
561
+ case 4:
287
562
  if (!args.backgroundMode) {
288
563
  return [2 /*return*/, created];
289
564
  }
290
565
  if (!(created === null || created === void 0 ? void 0 : created.id)) {
291
566
  return [2 /*return*/, created];
292
567
  }
293
- return [4 /*yield*/, this.waitForCompletion(created.id, args.abortSignal)];
294
- case 2: return [2 /*return*/, _a.sent()];
568
+ return [4 /*yield*/, this.waitForCompletion(created.id, args.abortSignal, {
569
+ model: args.model,
570
+ reasoningEffort: args.reasoningEffort,
571
+ maxWaitMs: this.resolveMaxBackgroundWaitMs(args.maxBackgroundWaitMs),
572
+ requestedServiceTier: args.serviceTier,
573
+ })];
574
+ case 5: return [2 /*return*/, _b.sent()];
295
575
  }
296
576
  });
297
577
  });
298
578
  };
299
- OpenAiResponses.prototype.waitForCompletion = function (responseId, abortSignal) {
579
+ OpenAiResponses.prototype.waitForCompletion = function (responseId, abortSignal, ctx) {
300
580
  return __awaiter(this, void 0, void 0, function () {
301
- var delayMs, resp, status_1;
581
+ var maxWaitMs, startedAtMs, delayMs, pollAttempt, lastStatus, cancelAttempted, warnEveryMs, nextWarnAtMs, throwPollingStop, waitedMs, resp, error_2, status_1;
582
+ var _this = this;
302
583
  return __generator(this, function (_a) {
303
584
  switch (_a.label) {
304
585
  case 0:
305
- delayMs = 500;
586
+ this.logger.debug({ message: 'Waiting for completion', obj: { responseId: responseId } });
587
+ maxWaitMs = this.resolveMaxBackgroundWaitMs(ctx === null || ctx === void 0 ? void 0 : ctx.maxWaitMs);
588
+ startedAtMs = Date.now();
589
+ delayMs = 1000;
590
+ pollAttempt = 0;
591
+ lastStatus = '';
592
+ cancelAttempted = false;
593
+ warnEveryMs = 10 * 60 * 1000;
594
+ nextWarnAtMs = warnEveryMs;
595
+ throwPollingStop = function (args) { return __awaiter(_this, void 0, void 0, function () {
596
+ var waitedMs, cancel, baseDetails, msg;
597
+ return __generator(this, function (_a) {
598
+ switch (_a.label) {
599
+ case 0:
600
+ waitedMs = Date.now() - startedAtMs;
601
+ cancel = undefined;
602
+ if (!!cancelAttempted) return [3 /*break*/, 2];
603
+ cancelAttempted = true;
604
+ return [4 /*yield*/, this.cancelResponseBestEffort(responseId)];
605
+ case 1:
606
+ cancel = _a.sent();
607
+ _a.label = 2;
608
+ case 2:
609
+ baseDetails = {
610
+ operation: 'responses.retrieve',
611
+ response_id: responseId,
612
+ background: true,
613
+ poll_attempt: pollAttempt,
614
+ waited_ms: waitedMs,
615
+ max_wait_ms: maxWaitMs,
616
+ last_status: lastStatus || undefined,
617
+ model: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.model) === 'string' && ctx.model.trim() ? ctx.model.trim() : undefined,
618
+ reasoning_effort: ctx === null || ctx === void 0 ? void 0 : ctx.reasoningEffort,
619
+ requested_service_tier: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.requestedServiceTier) === 'string' && ctx.requestedServiceTier.trim()
620
+ ? ctx.requestedServiceTier.trim()
621
+ : undefined,
622
+ aborted: args.kind === 'aborted' ? true : undefined,
623
+ timeout: args.kind === 'timeout' ? true : undefined,
624
+ cancel_attempted: (cancel === null || cancel === void 0 ? void 0 : cancel.attempted) ? true : undefined,
625
+ cancel_ok: cancel && cancel.attempted && 'ok' in cancel ? cancel.ok : undefined,
626
+ cancel_timed_out: cancel && cancel.attempted && cancel.timedOut ? true : undefined,
627
+ cancel_error: cancel && cancel.attempted && cancel.error ? cancel.error : undefined,
628
+ };
629
+ if (args.cause) {
630
+ baseDetails.polling_cause = safeErrorSummary(args.cause);
631
+ }
632
+ msg = args.kind === 'timeout'
633
+ ? "Background response exceeded max wait (maxWaitMs=".concat(maxWaitMs, ") while polling (id=").concat(responseId, ").")
634
+ : "Background polling aborted (id=".concat(responseId, ").");
635
+ throw new OpenAiResponsesError({
636
+ code: 'OPENAI_API',
637
+ message: msg,
638
+ details: baseDetails,
639
+ cause: args.cause,
640
+ });
641
+ }
642
+ });
643
+ }); };
306
644
  _a.label = 1;
307
645
  case 1:
308
- if (abortSignal === null || abortSignal === void 0 ? void 0 : abortSignal.aborted) {
309
- throw new Error("Request aborted");
646
+ waitedMs = Date.now() - startedAtMs;
647
+ if (!(abortSignal === null || abortSignal === void 0 ? void 0 : abortSignal.aborted)) return [3 /*break*/, 3];
648
+ return [4 /*yield*/, throwPollingStop({ kind: 'aborted' })];
649
+ case 2:
650
+ _a.sent();
651
+ _a.label = 3;
652
+ case 3:
653
+ if (!(waitedMs >= maxWaitMs)) return [3 /*break*/, 5];
654
+ return [4 /*yield*/, throwPollingStop({ kind: 'timeout' })];
655
+ case 4:
656
+ _a.sent();
657
+ _a.label = 5;
658
+ case 5:
659
+ // Warn every 10 minutes elapsed (best-effort; may log slightly after the boundary).
660
+ if (waitedMs >= nextWarnAtMs) {
661
+ nextWarnAtMs += warnEveryMs;
662
+ this.logger.warn({
663
+ message: "Background polling still in progress",
664
+ obj: {
665
+ responseId: responseId,
666
+ status: lastStatus || undefined,
667
+ waitedMs: waitedMs,
668
+ pollAttempt: pollAttempt,
669
+ model: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.model) === 'string' && ctx.model.trim() ? ctx.model.trim() : undefined,
670
+ reasoningEffort: ctx === null || ctx === void 0 ? void 0 : ctx.reasoningEffort,
671
+ serviceTier: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.requestedServiceTier) === 'string' && ctx.requestedServiceTier.trim()
672
+ ? ctx.requestedServiceTier.trim()
673
+ : undefined,
674
+ },
675
+ });
310
676
  }
677
+ pollAttempt += 1;
678
+ resp = void 0;
679
+ _a.label = 6;
680
+ case 6:
681
+ _a.trys.push([6, 8, , 11]);
311
682
  return [4 /*yield*/, this.client.responses.retrieve(responseId, undefined, abortSignal ? { signal: abortSignal } : undefined)];
312
- case 2:
683
+ case 7:
313
684
  resp = _a.sent();
314
- status_1 = typeof (resp === null || resp === void 0 ? void 0 : resp.status) === 'string' ? String(resp.status).toLowerCase() : '';
315
- if (status_1 === 'completed' || status_1 === 'failed' || status_1 === 'cancelled' || status_1 === 'incomplete') {
685
+ return [3 /*break*/, 11];
686
+ case 8:
687
+ error_2 = _a.sent();
688
+ if (!((abortSignal === null || abortSignal === void 0 ? void 0 : abortSignal.aborted) || isAbortError(error_2))) return [3 /*break*/, 10];
689
+ return [4 /*yield*/, throwPollingStop({ kind: 'aborted', cause: error_2 })];
690
+ case 9:
691
+ _a.sent();
692
+ _a.label = 10;
693
+ case 10: throw this.toOpenAiApiError(error_2, {
694
+ operation: 'responses.retrieve',
695
+ model: ctx === null || ctx === void 0 ? void 0 : ctx.model,
696
+ reasoningEffort: ctx === null || ctx === void 0 ? void 0 : ctx.reasoningEffort,
697
+ backgroundMode: true,
698
+ responseId: responseId,
699
+ pollAttempt: pollAttempt,
700
+ waitedMs: waitedMs,
701
+ maxWaitMs: maxWaitMs,
702
+ lastStatus: lastStatus,
703
+ requestedServiceTier: ctx === null || ctx === void 0 ? void 0 : ctx.requestedServiceTier,
704
+ });
705
+ case 11:
706
+ status_1 = typeof (resp === null || resp === void 0 ? void 0 : resp.status) === 'string' ? resp.status : '';
707
+ lastStatus = status_1;
708
+ // Terminal states
709
+ if (status_1 === 'completed' || status_1 === 'failed' || status_1 === 'incomplete' || status_1 === 'cancelled') {
316
710
  return [2 /*return*/, resp];
317
711
  }
318
- this.logger.debug({ message: "Polling response", obj: { responseId: responseId, status: status_1, delayMs: delayMs } });
319
- return [4 /*yield*/, sleep(delayMs)];
320
- case 3:
712
+ this.logger.debug({ message: "Polling response", obj: { responseId: responseId, status: status_1, delayMs: delayMs, pollAttempt: pollAttempt, waitedMs: waitedMs } });
713
+ // Sleep but wake early if aborted, so abort latency is low.
714
+ return [4 /*yield*/, sleepWithAbort(delayMs, abortSignal)];
715
+ case 12:
716
+ // Sleep but wake early if aborted, so abort latency is low.
321
717
  _a.sent();
322
- delayMs = Math.min(5000, Math.floor(delayMs * 1.5));
323
- _a.label = 4;
324
- case 4: return [3 /*break*/, 1];
325
- case 5: return [2 /*return*/];
718
+ _a.label = 13;
719
+ case 13: return [3 /*break*/, 1];
720
+ case 14: return [2 /*return*/];
326
721
  }
327
722
  });
328
723
  });
@@ -407,7 +802,7 @@ var OpenAiResponses = /** @class */ (function () {
407
802
  OpenAiResponses.prototype.executeFunctionCall = function (args) {
408
803
  var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k, _l;
409
804
  return __awaiter(this, void 0, void 0, function () {
410
- var callId, rawName, shortName, functionToCall, startedAt, parsedArgs, finishedAt, rec, argsObj, returnObject, finishedAt, rec, output, error_1, finishedAt, errMessage, errStack, rec;
805
+ var callId, rawName, shortName, functionToCall, startedAt, parsedArgs, finishedAt, rec, argsObj, returnObject, finishedAt, rec, output, error_3, finishedAt, errMessage, errStack, rec;
411
806
  return __generator(this, function (_m) {
412
807
  switch (_m.label) {
413
808
  case 0:
@@ -483,10 +878,10 @@ var OpenAiResponses = /** @class */ (function () {
483
878
  output: output,
484
879
  }];
485
880
  case 4:
486
- error_1 = _m.sent();
881
+ error_3 = _m.sent();
487
882
  finishedAt = new Date();
488
- errMessage = error_1 instanceof Error ? error_1.message : String(error_1);
489
- errStack = error_1 instanceof Error ? error_1.stack : undefined;
883
+ errMessage = error_3 instanceof Error ? error_3.message : String(error_3);
884
+ errStack = error_3 instanceof Error ? error_3.stack : undefined;
490
885
  rec = {
491
886
  id: callId,
492
887
  name: functionToCall.definition.name,
@@ -498,7 +893,7 @@ var OpenAiResponses = /** @class */ (function () {
498
893
  };
499
894
  args.toolInvocations.push(rec);
500
895
  (_l = args.onToolInvocation) === null || _l === void 0 ? void 0 : _l.call(args, { type: 'finished', result: rec });
501
- throw error_1;
896
+ throw error_3;
502
897
  case 5: return [2 /*return*/];
503
898
  }
504
899
  });
@@ -533,41 +928,22 @@ var OpenAiResponses = /** @class */ (function () {
533
928
  // -----------------------------------------
534
929
  // Usage + text extraction
535
930
  // -----------------------------------------
536
- OpenAiResponses.prototype.addUsageFromResponse = function (response, usage) {
537
- var u = response.usage;
538
- if (!u || typeof u !== 'object') {
931
+ OpenAiResponses.prototype.addUsageFromResponse = function (response, usage, ctx) {
932
+ var _a;
933
+ if (!response.usage) {
539
934
  return;
540
935
  }
541
- var rec = u;
542
- var input = typeof rec.input_tokens === 'number' ? rec.input_tokens : 0;
543
- var output = typeof rec.output_tokens === 'number' ? rec.output_tokens : 0;
544
- var total = typeof rec.total_tokens === 'number' ? rec.total_tokens : input + output;
545
- var cached = 0;
546
- var reasoning = 0;
547
- var inputDetails = rec.input_tokens_details;
548
- if (inputDetails && typeof inputDetails === 'object') {
549
- var id = inputDetails;
550
- cached = typeof id.cached_tokens === 'number' ? id.cached_tokens : 0;
551
- }
552
- var outputDetails = rec.output_tokens_details;
553
- if (outputDetails && typeof outputDetails === 'object') {
554
- var od = outputDetails;
555
- reasoning = typeof od.reasoning_tokens === 'number' ? od.reasoning_tokens : 0;
556
- }
557
936
  usage.addTokenUsage({
558
- promptTokens: input,
559
- cachedPromptTokens: cached,
560
- completionTokens: output,
561
- reasoningTokens: reasoning,
562
- totalTokens: total,
563
- });
937
+ inputTokens: response.usage.input_tokens,
938
+ cachedInputTokens: response.usage.input_tokens_details.cached_tokens,
939
+ outputTokens: response.usage.output_tokens,
940
+ reasoningTokens: response.usage.output_tokens_details.reasoning_tokens,
941
+ totalTokens: response.usage.total_tokens,
942
+ }, { serviceTier: (_a = response.service_tier) !== null && _a !== void 0 ? _a : ctx === null || ctx === void 0 ? void 0 : ctx.requestedServiceTier });
564
943
  };
565
944
  OpenAiResponses.prototype.extractAssistantText = function (response) {
566
- var direct = typeof response.output_text === 'string' ? response.output_text.trim() : '';
567
- if (direct) {
568
- return direct;
569
- }
570
945
  var out = Array.isArray(response.output) ? response.output : [];
946
+ var lastJoined = '';
571
947
  for (var _i = 0, out_2 = out; _i < out_2.length; _i++) {
572
948
  var item = out_2[_i];
573
949
  if (!item || typeof item !== 'object') {
@@ -601,9 +977,16 @@ var OpenAiResponses = /** @class */ (function () {
601
977
  }
602
978
  var joined = pieces.join('\n').trim();
603
979
  if (joined) {
604
- return joined;
980
+ lastJoined = joined;
605
981
  }
606
982
  }
983
+ if (lastJoined) {
984
+ return lastJoined;
985
+ }
986
+ var direct = typeof response.output_text === 'string' ? response.output_text.trim() : '';
987
+ if (direct) {
988
+ return direct;
989
+ }
607
990
  return '';
608
991
  };
609
992
  // -----------------------------------------
@@ -623,8 +1006,8 @@ var OpenAiResponses = /** @class */ (function () {
623
1006
  schema: this.strictifyJsonSchema(schema),
624
1007
  };
625
1008
  };
626
- OpenAiResponses.prototype.parseAndValidateStructuredOutput = function (text, schema) {
627
- var parsed = this.parseJson(text);
1009
+ OpenAiResponses.prototype.parseAndValidateStructuredOutput = function (text, schema, ctx) {
1010
+ var parsed = this.parseJson(text, ctx);
628
1011
  if (this.isZodSchema(schema)) {
629
1012
  var res = schema.safeParse(parsed);
630
1013
  if (!(res === null || res === void 0 ? void 0 : res.success)) {
@@ -640,7 +1023,8 @@ var OpenAiResponses = /** @class */ (function () {
640
1023
  }
641
1024
  return typeof schema.safeParse === 'function';
642
1025
  };
643
- OpenAiResponses.prototype.parseJson = function (text) {
1026
+ OpenAiResponses.prototype.parseJson = function (text, ctx) {
1027
+ var _a;
644
1028
  var cleaned = String(text !== null && text !== void 0 ? text : '')
645
1029
  .trim()
646
1030
  .replace(/^```(?:json)?/i, '')
@@ -649,7 +1033,8 @@ var OpenAiResponses = /** @class */ (function () {
649
1033
  try {
650
1034
  return JSON.parse(cleaned);
651
1035
  }
652
- catch (_a) {
1036
+ catch (err1) {
1037
+ var firstErrMsg = err1 instanceof Error ? err1.message : String(err1);
653
1038
  var s = cleaned;
654
1039
  var firstObj = s.indexOf('{');
655
1040
  var firstArr = s.indexOf('[');
@@ -658,9 +1043,77 @@ var OpenAiResponses = /** @class */ (function () {
658
1043
  var lastArr = s.lastIndexOf(']');
659
1044
  var end = Math.max(lastObj, lastArr);
660
1045
  if (start >= 0 && end > start) {
661
- return JSON.parse(s.slice(start, end + 1));
1046
+ var candidate = s.slice(start, end + 1);
1047
+ try {
1048
+ return JSON.parse(candidate);
1049
+ }
1050
+ catch (err2) {
1051
+ var secondErrMsg = err2 instanceof Error ? err2.message : String(err2);
1052
+ var pos2rel = extractJsonParsePosition(secondErrMsg);
1053
+ var pos2 = typeof pos2rel === 'number' ? start + pos2rel : undefined;
1054
+ var pos1 = extractJsonParsePosition(firstErrMsg);
1055
+ var pos_1 = typeof pos2 === 'number' ? pos2 : pos1;
1056
+ var lc_1 = (_a = extractJsonParseLineCol(secondErrMsg)) !== null && _a !== void 0 ? _a : extractJsonParseLineCol(firstErrMsg);
1057
+ var details_1 = {
1058
+ model: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.model) === 'string' && ctx.model.trim() ? ctx.model : undefined,
1059
+ max_output_tokens: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.maxOutputTokens) === 'number' ? ctx.maxOutputTokens : undefined,
1060
+ requested_service_tier: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.requestedServiceTier) === 'string' && String(ctx.requestedServiceTier).trim()
1061
+ ? String(ctx.requestedServiceTier).trim()
1062
+ : undefined,
1063
+ service_tier: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.serviceTier) === 'string' && ctx.serviceTier.trim() ? ctx.serviceTier.trim() : undefined,
1064
+ cleaned_len: s.length,
1065
+ cleaned_head: truncateHead(s, 250),
1066
+ cleaned_tail: truncateTail(s, 500),
1067
+ json_start: start,
1068
+ json_end: end,
1069
+ json_candidate_len: candidate.length,
1070
+ first_error: firstErrMsg,
1071
+ second_error: secondErrMsg,
1072
+ error_pos: typeof pos_1 === 'number' ? pos_1 : undefined,
1073
+ error_line: lc_1 === null || lc_1 === void 0 ? void 0 : lc_1.line,
1074
+ error_column: lc_1 === null || lc_1 === void 0 ? void 0 : lc_1.column,
1075
+ error_context: typeof pos_1 === 'number' ? snippetAround(s, pos_1, 160) : undefined,
1076
+ };
1077
+ var msg_1 = "Failed to parse model output as JSON. " +
1078
+ "cleaned_len=".concat(s.length, " json_start=").concat(start, " json_end=").concat(end, ". ") +
1079
+ "first_error=".concat(JSON.stringify(firstErrMsg), " second_error=").concat(JSON.stringify(secondErrMsg), ".");
1080
+ throw new OpenAiResponsesError({
1081
+ code: 'JSON_PARSE',
1082
+ message: msg_1,
1083
+ details: details_1,
1084
+ cause: err2,
1085
+ });
1086
+ }
662
1087
  }
663
- throw new Error("Failed to parse model output as JSON");
1088
+ var pos = extractJsonParsePosition(firstErrMsg);
1089
+ var lc = extractJsonParseLineCol(firstErrMsg);
1090
+ var details = {
1091
+ model: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.model) === 'string' && ctx.model.trim() ? ctx.model : undefined,
1092
+ max_output_tokens: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.maxOutputTokens) === 'number' ? ctx.maxOutputTokens : undefined,
1093
+ requested_service_tier: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.requestedServiceTier) === 'string' && String(ctx.requestedServiceTier).trim()
1094
+ ? String(ctx.requestedServiceTier).trim()
1095
+ : undefined,
1096
+ service_tier: typeof (ctx === null || ctx === void 0 ? void 0 : ctx.serviceTier) === 'string' && ctx.serviceTier.trim() ? ctx.serviceTier.trim() : undefined,
1097
+ cleaned_len: s.length,
1098
+ cleaned_head: truncateHead(s, 250),
1099
+ cleaned_tail: truncateTail(s, 500),
1100
+ json_start: start >= 0 ? start : undefined,
1101
+ json_end: end >= 0 ? end : undefined,
1102
+ first_error: firstErrMsg,
1103
+ error_pos: typeof pos === 'number' ? pos : undefined,
1104
+ error_line: lc === null || lc === void 0 ? void 0 : lc.line,
1105
+ error_column: lc === null || lc === void 0 ? void 0 : lc.column,
1106
+ error_context: typeof pos === 'number' ? snippetAround(s, pos, 160) : undefined,
1107
+ };
1108
+ var msg = "Failed to parse model output as JSON. " +
1109
+ "cleaned_len=".concat(s.length, ". ") +
1110
+ "error=".concat(JSON.stringify(firstErrMsg), ".");
1111
+ throw new OpenAiResponsesError({
1112
+ code: 'JSON_PARSE',
1113
+ message: msg,
1114
+ details: details,
1115
+ cause: err1,
1116
+ });
664
1117
  }
665
1118
  };
666
1119
  /**
@@ -798,7 +1251,7 @@ var OpenAiResponses = /** @class */ (function () {
798
1251
  };
799
1252
  OpenAiResponses.prototype.ensureModulesProcessed = function () {
800
1253
  return __awaiter(this, void 0, void 0, function () {
801
- var error_2;
1254
+ var error_4;
802
1255
  return __generator(this, function (_a) {
803
1256
  switch (_a.label) {
804
1257
  case 0:
@@ -818,9 +1271,9 @@ var OpenAiResponses = /** @class */ (function () {
818
1271
  this.modulesProcessed = true;
819
1272
  return [3 /*break*/, 4];
820
1273
  case 3:
821
- error_2 = _a.sent();
1274
+ error_4 = _a.sent();
822
1275
  this.processingModulesPromise = null;
823
- throw error_2;
1276
+ throw error_4;
824
1277
  case 4: return [2 /*return*/];
825
1278
  }
826
1279
  });
@@ -909,8 +1362,7 @@ var OpenAiResponses = /** @class */ (function () {
909
1362
  // Model/background defaults
910
1363
  // -----------------------------------------
911
1364
  OpenAiResponses.prototype.resolveModel = function (model) {
912
- var m = (model !== null && model !== void 0 ? model : this.defaultModel).trim();
913
- return m.length > 0 ? m : exports.DEFAULT_RESPONSES_MODEL;
1365
+ return model !== null && model !== void 0 ? model : this.defaultModel;
914
1366
  };
915
1367
  OpenAiResponses.prototype.resolveBackgroundMode = function (args) {
916
1368
  if (typeof args.requested === 'boolean') {
@@ -935,7 +1387,235 @@ var OpenAiResponses = /** @class */ (function () {
935
1387
  return OpenAiResponses;
936
1388
  }());
937
1389
  exports.OpenAiResponses = OpenAiResponses;
1390
+ var OpenAiResponsesError = /** @class */ (function (_super) {
1391
+ __extends(OpenAiResponsesError, _super);
1392
+ function OpenAiResponsesError(args) {
1393
+ var _newTarget = this.constructor;
1394
+ var _a;
1395
+ var _this = _super.call(this, args.message) || this;
1396
+ _this.name = 'OpenAiResponsesError';
1397
+ _this.code = args.code;
1398
+ _this.details = (_a = args.details) !== null && _a !== void 0 ? _a : {};
1399
+ _this.cause = args.cause;
1400
+ _this.retryable = typeof args.retryable === 'boolean' ? args.retryable : true;
1401
+ Object.setPrototypeOf(_this, _newTarget.prototype);
1402
+ return _this;
1403
+ }
1404
+ return OpenAiResponsesError;
1405
+ }(Error));
1406
+ exports.OpenAiResponsesError = OpenAiResponsesError;
1407
+ function truncateHead(text, max) {
1408
+ var s = String(text !== null && text !== void 0 ? text : '');
1409
+ if (max <= 0) {
1410
+ return '';
1411
+ }
1412
+ if (s.length <= max) {
1413
+ return s;
1414
+ }
1415
+ return s.slice(0, max) + '...';
1416
+ }
1417
+ function truncateTail(text, max) {
1418
+ var s = String(text !== null && text !== void 0 ? text : '');
1419
+ if (max <= 0) {
1420
+ return '';
1421
+ }
1422
+ if (s.length <= max) {
1423
+ return s;
1424
+ }
1425
+ return '...' + s.slice(s.length - max);
1426
+ }
1427
+ function extractJsonParsePosition(errMsg) {
1428
+ var m = String(errMsg !== null && errMsg !== void 0 ? errMsg : '').match(/at position\s+(\d+)/i);
1429
+ if (!m) {
1430
+ return undefined;
1431
+ }
1432
+ var n = Number(m[1]);
1433
+ return Number.isFinite(n) ? n : undefined;
1434
+ }
1435
+ function extractJsonParseLineCol(errMsg) {
1436
+ var m = String(errMsg !== null && errMsg !== void 0 ? errMsg : '').match(/line\s+(\d+)\s+column\s+(\d+)/i);
1437
+ if (!m) {
1438
+ return undefined;
1439
+ }
1440
+ var line = Number(m[1]);
1441
+ var column = Number(m[2]);
1442
+ return {
1443
+ line: Number.isFinite(line) ? line : undefined,
1444
+ column: Number.isFinite(column) ? column : undefined,
1445
+ };
1446
+ }
1447
+ function snippetAround(text, pos, radius) {
1448
+ var s = String(text !== null && text !== void 0 ? text : '');
1449
+ var p = Math.max(0, Math.min(s.length, Number.isFinite(pos) ? pos : 0));
1450
+ var r = Math.max(0, radius);
1451
+ var start = Math.max(0, p - r);
1452
+ var end = Math.min(s.length, p + r);
1453
+ var before = s.slice(start, p);
1454
+ var after = s.slice(p, end);
1455
+ var left = start > 0 ? '...' : '';
1456
+ var right = end < s.length ? '...' : '';
1457
+ return "".concat(left).concat(before, "<<HERE>>").concat(after).concat(right);
1458
+ }
938
1459
  function sleep(ms) {
939
1460
  return new Promise(function (resolve) { return setTimeout(resolve, ms); });
940
1461
  }
1462
+ /**
1463
+ * Sleep, but wake early if the signal is aborted.
1464
+ * (We do not throw here; the caller should check `signal.aborted` and act.)
1465
+ */
1466
+ function sleepWithAbort(ms, signal) {
1467
+ if (!signal) {
1468
+ return sleep(ms);
1469
+ }
1470
+ if (signal.aborted) {
1471
+ return Promise.resolve();
1472
+ }
1473
+ return new Promise(function (resolve) {
1474
+ var _a;
1475
+ var t = setTimeout(function () {
1476
+ cleanup();
1477
+ resolve();
1478
+ }, ms);
1479
+ var onAbort = function () {
1480
+ cleanup();
1481
+ resolve();
1482
+ };
1483
+ var cleanup = function () {
1484
+ var _a;
1485
+ try {
1486
+ clearTimeout(t);
1487
+ }
1488
+ catch (_b) {
1489
+ // ignore
1490
+ }
1491
+ try {
1492
+ (_a = signal.removeEventListener) === null || _a === void 0 ? void 0 : _a.call(signal, 'abort', onAbort);
1493
+ }
1494
+ catch (_c) {
1495
+ // ignore
1496
+ }
1497
+ };
1498
+ try {
1499
+ (_a = signal.addEventListener) === null || _a === void 0 ? void 0 : _a.call(signal, 'abort', onAbort, { once: true });
1500
+ }
1501
+ catch (_b) {
1502
+ // If addEventListener isn't available, fall back to plain sleep.
1503
+ }
1504
+ });
1505
+ }
1506
+ function extractHttpStatus(error) {
1507
+ if (!error || typeof error !== 'object') {
1508
+ return undefined;
1509
+ }
1510
+ var rec = error;
1511
+ var status = rec.status;
1512
+ if (typeof status === 'number' && Number.isFinite(status)) {
1513
+ return status;
1514
+ }
1515
+ var statusCode = rec.statusCode;
1516
+ if (typeof statusCode === 'number' && Number.isFinite(statusCode)) {
1517
+ return statusCode;
1518
+ }
1519
+ return undefined;
1520
+ }
1521
+ function extractRequestId(error) {
1522
+ var _a;
1523
+ if (!error || typeof error !== 'object') {
1524
+ return undefined;
1525
+ }
1526
+ var rec = error;
1527
+ var direct = (_a = rec.request_id) !== null && _a !== void 0 ? _a : rec.requestId;
1528
+ if (typeof direct === 'string' && direct.trim()) {
1529
+ return direct.trim();
1530
+ }
1531
+ var headers = rec.headers;
1532
+ if (!headers) {
1533
+ return undefined;
1534
+ }
1535
+ if (typeof headers.get === 'function') {
1536
+ var v = headers.get('x-request-id');
1537
+ return typeof v === 'string' && v.trim() ? v.trim() : undefined;
1538
+ }
1539
+ if (typeof headers === 'object' && !Array.isArray(headers)) {
1540
+ for (var _i = 0, _b = Object.keys(headers); _i < _b.length; _i++) {
1541
+ var k = _b[_i];
1542
+ if (String(k).toLowerCase() !== 'x-request-id') {
1543
+ continue;
1544
+ }
1545
+ var v = headers[k];
1546
+ return typeof v === 'string' && v.trim() ? v.trim() : undefined;
1547
+ }
1548
+ }
1549
+ return undefined;
1550
+ }
1551
+ function isRetryableHttpStatus(status) {
1552
+ if (typeof status !== 'number') {
1553
+ return true;
1554
+ }
1555
+ if (status === 408 || status === 409 || status === 429) {
1556
+ return true;
1557
+ }
1558
+ if (status >= 500) {
1559
+ return true;
1560
+ }
1561
+ return false;
1562
+ }
1563
+ function isAbortError(error) {
1564
+ var _a, _b;
1565
+ if (!error) {
1566
+ return false;
1567
+ }
1568
+ // Most fetch implementations:
1569
+ // - error.name === 'AbortError'
1570
+ // - or error.code === 'ABORT_ERR'
1571
+ if (error instanceof Error) {
1572
+ var name_3 = String((_a = error.name) !== null && _a !== void 0 ? _a : '').toLowerCase();
1573
+ if (name_3 === 'aborterror') {
1574
+ return true;
1575
+ }
1576
+ var msg = String((_b = error.message) !== null && _b !== void 0 ? _b : '').toLowerCase();
1577
+ // Keep this conservative; don't treat every "abort" substring as abort.
1578
+ if (msg === 'aborted' || msg === 'request aborted') {
1579
+ return true;
1580
+ }
1581
+ }
1582
+ if (typeof error === 'object') {
1583
+ var rec = error;
1584
+ var code = rec.code;
1585
+ if (typeof code === 'string' && code.toUpperCase() === 'ABORT_ERR') {
1586
+ return true;
1587
+ }
1588
+ }
1589
+ return false;
1590
+ }
1591
+ function safeErrorSummary(error) {
1592
+ if (!error) {
1593
+ return { message: 'Unknown error' };
1594
+ }
1595
+ var status = extractHttpStatus(error);
1596
+ var requestId = extractRequestId(error);
1597
+ if (error instanceof OpenAiResponsesError) {
1598
+ return {
1599
+ name: error.name,
1600
+ message: error.message,
1601
+ code: error.code,
1602
+ details: error.details,
1603
+ status: typeof status === 'number' ? status : undefined,
1604
+ request_id: requestId,
1605
+ };
1606
+ }
1607
+ if (error instanceof Error) {
1608
+ return {
1609
+ name: error.name,
1610
+ message: error.message,
1611
+ status: typeof status === 'number' ? status : undefined,
1612
+ request_id: requestId,
1613
+ };
1614
+ }
1615
+ return {
1616
+ message: String(error),
1617
+ status: typeof status === 'number' ? status : undefined,
1618
+ request_id: requestId,
1619
+ };
1620
+ }
941
1621
  //# sourceMappingURL=OpenAiResponses.js.map