@theia/ai-openai 1.67.0-next.56 → 1.67.0-next.59

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/lib/browser/openai-frontend-application-contribution.d.ts +18 -0
  2. package/lib/browser/openai-frontend-application-contribution.d.ts.map +1 -0
  3. package/lib/browser/openai-frontend-application-contribution.js +163 -0
  4. package/lib/browser/openai-frontend-application-contribution.js.map +1 -0
  5. package/lib/browser/openai-frontend-module.d.ts +4 -0
  6. package/lib/browser/openai-frontend-module.d.ts.map +1 -0
  7. package/lib/browser/openai-frontend-module.js +33 -0
  8. package/lib/browser/openai-frontend-module.js.map +1 -0
  9. package/lib/common/index.d.ts +2 -0
  10. package/lib/common/index.d.ts.map +1 -0
  11. package/lib/common/index.js +20 -0
  12. package/lib/common/index.js.map +1 -0
  13. package/lib/common/openai-language-models-manager.d.ts +63 -0
  14. package/lib/common/openai-language-models-manager.d.ts.map +1 -0
  15. package/lib/common/openai-language-models-manager.js +22 -0
  16. package/lib/common/openai-language-models-manager.js.map +1 -0
  17. package/lib/common/openai-preferences.d.ts +7 -0
  18. package/lib/common/openai-preferences.d.ts.map +1 -0
  19. package/lib/common/openai-preferences.js +141 -0
  20. package/lib/common/openai-preferences.js.map +1 -0
  21. package/lib/node/openai-backend-module.d.ts +5 -0
  22. package/lib/node/openai-backend-module.d.ts.map +1 -0
  23. package/lib/node/openai-backend-module.js +40 -0
  24. package/lib/node/openai-backend-module.js.map +1 -0
  25. package/lib/node/openai-language-model.d.ts +80 -0
  26. package/lib/node/openai-language-model.d.ts.map +1 -0
  27. package/lib/node/openai-language-model.js +324 -0
  28. package/lib/node/openai-language-model.js.map +1 -0
  29. package/lib/node/openai-language-models-manager-impl.d.ts +22 -0
  30. package/lib/node/openai-language-models-manager-impl.d.ts.map +1 -0
  31. package/lib/node/openai-language-models-manager-impl.js +162 -0
  32. package/lib/node/openai-language-models-manager-impl.js.map +1 -0
  33. package/lib/node/openai-model-utils.spec.d.ts +2 -0
  34. package/lib/node/openai-model-utils.spec.d.ts.map +1 -0
  35. package/lib/node/openai-model-utils.spec.js +467 -0
  36. package/lib/node/openai-model-utils.spec.js.map +1 -0
  37. package/lib/node/openai-request-api-context.d.ts +4 -0
  38. package/lib/node/openai-request-api-context.d.ts.map +1 -0
  39. package/lib/node/openai-request-api-context.js +18 -0
  40. package/lib/node/openai-request-api-context.js.map +1 -0
  41. package/lib/node/openai-response-api-utils.d.ts +45 -0
  42. package/lib/node/openai-response-api-utils.d.ts.map +1 -0
  43. package/lib/node/openai-response-api-utils.js +724 -0
  44. package/lib/node/openai-response-api-utils.js.map +1 -0
  45. package/lib/node/openai-streaming-iterator.d.ts +24 -0
  46. package/lib/node/openai-streaming-iterator.d.ts.map +1 -0
  47. package/lib/node/openai-streaming-iterator.js +176 -0
  48. package/lib/node/openai-streaming-iterator.js.map +1 -0
  49. package/lib/node/openai-streaming-iterator.spec.d.ts +2 -0
  50. package/lib/node/openai-streaming-iterator.spec.d.ts.map +1 -0
  51. package/lib/node/openai-streaming-iterator.spec.js +207 -0
  52. package/lib/node/openai-streaming-iterator.spec.js.map +1 -0
  53. package/package.json +6 -6
@@ -0,0 +1,724 @@
1
+ "use strict";
2
+ // *****************************************************************************
3
+ // Copyright (C) 2025 EclipseSource GmbH.
4
+ //
5
+ // This program and the accompanying materials are made available under the
6
+ // terms of the Eclipse Public License v. 2.0 which is available at
7
+ // http://www.eclipse.org/legal/epl-2.0.
8
+ //
9
+ // This Source Code may also be made available under the following Secondary
10
+ // Licenses when the conditions for such availability set forth in the Eclipse
11
+ // Public License v. 2.0 are satisfied: GNU General Public License, version 2
12
+ // with the GNU Classpath Exception which is available at
13
+ // https://www.gnu.org/software/classpath/license.html.
14
+ //
15
+ // SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
16
+ // *****************************************************************************
17
+ Object.defineProperty(exports, "__esModule", { value: true });
18
+ exports.recursiveStrictJSONSchema = exports.processSystemMessages = exports.OpenAiResponseApiUtils = void 0;
19
+ const tslib_1 = require("tslib");
20
+ const ai_core_1 = require("@theia/ai-core");
21
+ const core_1 = require("@theia/core");
22
+ const promise_util_1 = require("@theia/core/lib/common/promise-util");
23
+ const inversify_1 = require("@theia/core/shared/inversify");
24
+ /**
25
+ * Utility class for handling OpenAI Response API requests and tool calling cycles.
26
+ *
27
+ * This class encapsulates the complexity of the Response API's multi-turn conversation
28
+ * patterns for tool calling, keeping the main language model class clean and focused.
29
+ */
30
+ let OpenAiResponseApiUtils = class OpenAiResponseApiUtils {
31
+ /**
32
+ * Handles Response API requests with proper tool calling cycles.
33
+ * Works for both streaming and non-streaming cases.
34
+ */
35
+ async handleRequest(openai, request, settings, model, modelUtils, developerMessageSettings, runnerOptions, modelId, isStreaming, tokenUsageService, cancellationToken) {
36
+ if (cancellationToken === null || cancellationToken === void 0 ? void 0 : cancellationToken.isCancellationRequested) {
37
+ return { text: '' };
38
+ }
39
+ const { instructions, input } = this.processMessages(request.messages, developerMessageSettings, model);
40
+ const tools = this.convertToolsForResponseApi(request.tools);
41
+ // If no tools are provided, use simple response handling
42
+ if (!tools || tools.length === 0) {
43
+ if (isStreaming) {
44
+ const stream = openai.responses.stream({
45
+ model: model,
46
+ instructions,
47
+ input,
48
+ ...settings
49
+ });
50
+ return { stream: this.createSimpleResponseApiStreamIterator(stream, request.requestId, modelId, tokenUsageService, cancellationToken) };
51
+ }
52
+ else {
53
+ const response = await openai.responses.create({
54
+ model: model,
55
+ instructions,
56
+ input,
57
+ ...settings
58
+ });
59
+ // Record token usage if available
60
+ if (tokenUsageService && response.usage) {
61
+ await tokenUsageService.recordTokenUsage(modelId, {
62
+ inputTokens: response.usage.input_tokens,
63
+ outputTokens: response.usage.output_tokens,
64
+ requestId: request.requestId
65
+ });
66
+ }
67
+ return { text: response.output_text || '' };
68
+ }
69
+ }
70
+ // Handle tool calling with multi-turn conversation using the unified iterator
71
+ const iterator = new ResponseApiToolCallIterator(openai, request, settings, model, modelUtils, developerMessageSettings, runnerOptions, modelId, this, isStreaming, tokenUsageService, cancellationToken);
72
+ return { stream: iterator };
73
+ }
74
+ /**
75
+ * Converts ToolRequest objects to the format expected by the Response API.
76
+ */
77
+ convertToolsForResponseApi(tools) {
78
+ if (!tools || tools.length === 0) {
79
+ return undefined;
80
+ }
81
+ const converted = tools.map(tool => ({
82
+ type: 'function',
83
+ name: tool.name,
84
+ description: tool.description || '',
85
+ // The Response API is very strict re: JSON schema: all properties must be listed as required,
86
+ // and additional properties must be disallowed.
87
+ // https://platform.openai.com/docs/guides/function-calling#strict-mode
88
+ parameters: this.recursiveStrictToolCallParameters(tool.parameters),
89
+ strict: true
90
+ }));
91
+ console.debug(`Converted ${tools.length} tools for Response API:`, converted.map(t => t.name));
92
+ return converted;
93
+ }
94
+ recursiveStrictToolCallParameters(schema) {
95
+ return recursiveStrictJSONSchema(schema);
96
+ }
97
+ createSimpleResponseApiStreamIterator(stream, requestId, modelId, tokenUsageService, cancellationToken) {
98
+ return {
99
+ async *[Symbol.asyncIterator]() {
100
+ var _a;
101
+ try {
102
+ for await (const event of stream) {
103
+ if (cancellationToken === null || cancellationToken === void 0 ? void 0 : cancellationToken.isCancellationRequested) {
104
+ break;
105
+ }
106
+ if (event.type === 'response.output_text.delta') {
107
+ yield {
108
+ content: event.delta
109
+ };
110
+ }
111
+ else if (event.type === 'response.completed') {
112
+ if (tokenUsageService && ((_a = event.response) === null || _a === void 0 ? void 0 : _a.usage)) {
113
+ await tokenUsageService.recordTokenUsage(modelId, {
114
+ inputTokens: event.response.usage.input_tokens,
115
+ outputTokens: event.response.usage.output_tokens,
116
+ requestId
117
+ });
118
+ }
119
+ }
120
+ else if (event.type === 'error') {
121
+ console.error('Response API error:', event.message);
122
+ throw new Error(`Response API error: ${event.message}`);
123
+ }
124
+ }
125
+ }
126
+ catch (error) {
127
+ console.error('Error in Response API stream:', error);
128
+ throw error;
129
+ }
130
+ }
131
+ };
132
+ }
133
+ /**
134
+ * Processes the provided list of messages by applying system message adjustments and converting
135
+ * them directly to the format expected by the OpenAI Response API.
136
+ *
137
+ * This method converts messages directly without going through ChatCompletionMessageParam types.
138
+ *
139
+ * @param messages the list of messages to process.
140
+ * @param developerMessageSettings how system and developer messages are handled during processing.
141
+ * @param model the OpenAI model identifier. Currently not used, but allows subclasses to implement model-specific behavior.
142
+ * @returns an object containing instructions and input formatted for the Response API.
143
+ */
144
+ processMessages(messages, developerMessageSettings, model) {
145
+ const processed = this.processSystemMessages(messages, developerMessageSettings)
146
+ .filter(m => m.type !== 'thinking');
147
+ // Extract system/developer messages for instructions
148
+ const systemMessages = processed.filter((m) => m.type === 'text' && m.actor === 'system');
149
+ const instructions = systemMessages.length > 0
150
+ ? systemMessages.map(m => m.text).join('\n')
151
+ : undefined;
152
+ // Convert non-system messages to Response API input items
153
+ const nonSystemMessages = processed.filter(m => m.actor !== 'system');
154
+ const input = [];
155
+ for (const message of nonSystemMessages) {
156
+ if (ai_core_1.LanguageModelMessage.isTextMessage(message)) {
157
+ if (message.actor === 'ai') {
158
+ // Assistant messages use ResponseOutputMessage format
159
+ input.push({
160
+ id: `msg_${Date.now()}_${Math.random().toString(36).substring(2, 11)}`,
161
+ type: 'message',
162
+ role: 'assistant',
163
+ status: 'completed',
164
+ content: [{
165
+ type: 'output_text',
166
+ text: message.text,
167
+ annotations: []
168
+ }]
169
+ });
170
+ }
171
+ else {
172
+ // User messages use input format
173
+ input.push({
174
+ type: 'message',
175
+ role: 'user',
176
+ content: [{
177
+ type: 'input_text',
178
+ text: message.text
179
+ }]
180
+ });
181
+ }
182
+ }
183
+ else if (ai_core_1.LanguageModelMessage.isToolUseMessage(message)) {
184
+ input.push({
185
+ type: 'function_call',
186
+ call_id: message.id,
187
+ name: message.name,
188
+ arguments: JSON.stringify(message.input)
189
+ });
190
+ }
191
+ else if (ai_core_1.LanguageModelMessage.isToolResultMessage(message)) {
192
+ const content = typeof message.content === 'string' ? message.content : JSON.stringify(message.content);
193
+ input.push({
194
+ type: 'function_call_output',
195
+ call_id: message.tool_use_id,
196
+ output: content
197
+ });
198
+ }
199
+ else if (ai_core_1.LanguageModelMessage.isImageMessage(message)) {
200
+ input.push({
201
+ type: 'message',
202
+ role: 'user',
203
+ content: [{
204
+ type: 'input_image',
205
+ detail: 'auto',
206
+ image_url: ai_core_1.ImageContent.isBase64(message.image) ?
207
+ `data:${message.image.mimeType};base64,${message.image.base64data}` :
208
+ message.image.url
209
+ }]
210
+ });
211
+ }
212
+ else if (ai_core_1.LanguageModelMessage.isThinkingMessage(message)) {
213
+ // Pass
214
+ }
215
+ else {
216
+ (0, core_1.unreachable)(message);
217
+ }
218
+ }
219
+ return { instructions, input };
220
+ }
221
+ processSystemMessages(messages, developerMessageSettings) {
222
+ return processSystemMessages(messages, developerMessageSettings);
223
+ }
224
+ };
225
+ exports.OpenAiResponseApiUtils = OpenAiResponseApiUtils;
226
+ exports.OpenAiResponseApiUtils = OpenAiResponseApiUtils = tslib_1.__decorate([
227
+ (0, inversify_1.injectable)()
228
+ ], OpenAiResponseApiUtils);
229
+ /**
230
+ * Iterator for handling Response API streaming with tool calls.
231
+ * Based on the pattern from openai-streaming-iterator.ts but adapted for Response API.
232
+ */
233
+ class ResponseApiToolCallIterator {
234
+ constructor(openai, request, settings, model, modelUtils, developerMessageSettings, runnerOptions, modelId, utils, isStreaming, tokenUsageService, cancellationToken) {
235
+ this.openai = openai;
236
+ this.request = request;
237
+ this.settings = settings;
238
+ this.model = model;
239
+ this.modelUtils = modelUtils;
240
+ this.developerMessageSettings = developerMessageSettings;
241
+ this.runnerOptions = runnerOptions;
242
+ this.modelId = modelId;
243
+ this.utils = utils;
244
+ this.isStreaming = isStreaming;
245
+ this.tokenUsageService = tokenUsageService;
246
+ this.cancellationToken = cancellationToken;
247
+ this.requestQueue = new Array();
248
+ this.messageCache = new Array();
249
+ this.done = false;
250
+ this.terminalError = undefined;
251
+ this.currentToolCalls = new Map();
252
+ this.totalInputTokens = 0;
253
+ this.totalOutputTokens = 0;
254
+ this.iteration = 0;
255
+ this.currentResponseText = '';
256
+ const { instructions, input } = utils.processMessages(request.messages, developerMessageSettings, model);
257
+ this.instructions = instructions;
258
+ this.currentInput = input;
259
+ this.tools = utils.convertToolsForResponseApi(request.tools);
260
+ this.maxIterations = runnerOptions.maxChatCompletions || 100;
261
+ // Start the first iteration
262
+ this.startIteration();
263
+ }
264
+ [Symbol.asyncIterator]() {
265
+ return this;
266
+ }
267
+ async next() {
268
+ if (this.messageCache.length && this.requestQueue.length) {
269
+ throw new Error('Assertion error: cache and queue should not both be populated.');
270
+ }
271
+ // Deliver all the messages we got, even if we've since terminated.
272
+ if (this.messageCache.length) {
273
+ return {
274
+ done: false,
275
+ value: this.messageCache.shift()
276
+ };
277
+ }
278
+ else if (this.terminalError) {
279
+ throw this.terminalError;
280
+ }
281
+ else if (this.done) {
282
+ return {
283
+ done: true,
284
+ value: undefined
285
+ };
286
+ }
287
+ else {
288
+ const deferred = new promise_util_1.Deferred();
289
+ this.requestQueue.push(deferred);
290
+ return deferred.promise;
291
+ }
292
+ }
293
+ async startIteration() {
294
+ var _a;
295
+ try {
296
+ while (this.iteration < this.maxIterations && !((_a = this.cancellationToken) === null || _a === void 0 ? void 0 : _a.isCancellationRequested)) {
297
+ console.debug(`Starting Response API iteration ${this.iteration} with ${this.currentInput.length} input messages`);
298
+ await this.processStream();
299
+ // Check if we have tool calls that need execution
300
+ if (this.currentToolCalls.size === 0) {
301
+ // No tool calls, we're done
302
+ this.finalize();
303
+ return;
304
+ }
305
+ // Execute all tool calls
306
+ await this.executeToolCalls();
307
+ // Prepare for next iteration
308
+ this.prepareNextIteration();
309
+ this.iteration++;
310
+ }
311
+ // Max iterations reached
312
+ this.finalize();
313
+ }
314
+ catch (error) {
315
+ this.terminalError = error instanceof Error ? error : new Error(String(error));
316
+ this.finalize();
317
+ }
318
+ }
319
+ async processStream() {
320
+ var _a;
321
+ this.currentToolCalls.clear();
322
+ this.currentResponseText = '';
323
+ if (this.isStreaming) {
324
+ // Use streaming API
325
+ const stream = this.openai.responses.stream({
326
+ model: this.model,
327
+ instructions: this.instructions,
328
+ input: this.currentInput,
329
+ tools: this.tools,
330
+ ...this.settings
331
+ });
332
+ for await (const event of stream) {
333
+ if ((_a = this.cancellationToken) === null || _a === void 0 ? void 0 : _a.isCancellationRequested) {
334
+ break;
335
+ }
336
+ await this.handleStreamEvent(event);
337
+ }
338
+ }
339
+ else {
340
+ // Use non-streaming API but yield results incrementally
341
+ await this.processNonStreamingResponse();
342
+ }
343
+ }
344
+ async processNonStreamingResponse() {
345
+ var _a;
346
+ const response = await this.openai.responses.create({
347
+ model: this.model,
348
+ instructions: this.instructions,
349
+ input: this.currentInput,
350
+ tools: this.tools,
351
+ ...this.settings
352
+ });
353
+ // Record token usage
354
+ if (response.usage) {
355
+ this.totalInputTokens += response.usage.input_tokens;
356
+ this.totalOutputTokens += response.usage.output_tokens;
357
+ }
358
+ // First, yield any text content from the response
359
+ this.currentResponseText = response.output_text || '';
360
+ if (this.currentResponseText) {
361
+ this.handleIncoming({ content: this.currentResponseText });
362
+ }
363
+ // Find function calls in the response
364
+ const functionCalls = ((_a = response.output) === null || _a === void 0 ? void 0 : _a.filter((item) => item.type === 'function_call')) || [];
365
+ // Process each function call
366
+ for (const functionCall of functionCalls) {
367
+ if (functionCall.id && functionCall.name) {
368
+ const toolCall = {
369
+ id: functionCall.id,
370
+ call_id: functionCall.call_id || functionCall.id,
371
+ name: functionCall.name,
372
+ arguments: functionCall.arguments || '',
373
+ executed: false
374
+ };
375
+ this.currentToolCalls.set(functionCall.id, toolCall);
376
+ // Yield the tool call initiation
377
+ this.handleIncoming({
378
+ tool_calls: [{
379
+ id: functionCall.id,
380
+ finished: false,
381
+ function: {
382
+ name: functionCall.name,
383
+ arguments: functionCall.arguments || ''
384
+ }
385
+ }]
386
+ });
387
+ }
388
+ }
389
+ }
390
+ async handleStreamEvent(event) {
391
+ var _a, _b, _c;
392
+ switch (event.type) {
393
+ case 'response.output_text.delta':
394
+ this.currentResponseText += event.delta;
395
+ this.handleIncoming({ content: event.delta });
396
+ break;
397
+ case 'response.output_item.added':
398
+ if (((_a = event.item) === null || _a === void 0 ? void 0 : _a.type) === 'function_call') {
399
+ this.handleFunctionCallAdded(event.item);
400
+ }
401
+ break;
402
+ case 'response.function_call_arguments.delta':
403
+ this.handleFunctionCallArgsDelta(event);
404
+ break;
405
+ case 'response.function_call_arguments.done':
406
+ await this.handleFunctionCallArgsDone(event);
407
+ break;
408
+ case 'response.output_item.done':
409
+ if (((_b = event.item) === null || _b === void 0 ? void 0 : _b.type) === 'function_call') {
410
+ this.handleFunctionCallDone(event.item);
411
+ }
412
+ break;
413
+ case 'response.completed':
414
+ if ((_c = event.response) === null || _c === void 0 ? void 0 : _c.usage) {
415
+ this.totalInputTokens += event.response.usage.input_tokens;
416
+ this.totalOutputTokens += event.response.usage.output_tokens;
417
+ }
418
+ break;
419
+ case 'error':
420
+ console.error('Response API error:', event.message);
421
+ throw new Error(`Response API error: ${event.message}`);
422
+ }
423
+ }
424
+ handleFunctionCallAdded(functionCall) {
425
+ if (functionCall.id && functionCall.call_id) {
426
+ console.debug(`Function call added: ${functionCall.name} with id ${functionCall.id} and call_id ${functionCall.call_id}`);
427
+ const toolCall = {
428
+ id: functionCall.id,
429
+ call_id: functionCall.call_id,
430
+ name: functionCall.name || '',
431
+ arguments: functionCall.arguments || '',
432
+ executed: false
433
+ };
434
+ this.currentToolCalls.set(functionCall.id, toolCall);
435
+ this.handleIncoming({
436
+ tool_calls: [{
437
+ id: functionCall.id,
438
+ finished: false,
439
+ function: {
440
+ name: functionCall.name || '',
441
+ arguments: functionCall.arguments || ''
442
+ }
443
+ }]
444
+ });
445
+ }
446
+ }
447
+ handleFunctionCallArgsDelta(event) {
448
+ const toolCall = this.currentToolCalls.get(event.item_id);
449
+ if (toolCall) {
450
+ toolCall.arguments += event.delta;
451
+ if (event.delta) {
452
+ this.handleIncoming({
453
+ tool_calls: [{
454
+ id: event.item_id,
455
+ function: {
456
+ arguments: event.delta
457
+ }
458
+ }]
459
+ });
460
+ }
461
+ }
462
+ }
463
+ async handleFunctionCallArgsDone(event) {
464
+ let toolCall = this.currentToolCalls.get(event.item_id);
465
+ if (!toolCall) {
466
+ // Create if we didn't see the added event
467
+ toolCall = {
468
+ id: event.item_id,
469
+ name: event.name || '',
470
+ arguments: event.arguments || '',
471
+ executed: false
472
+ };
473
+ this.currentToolCalls.set(event.item_id, toolCall);
474
+ this.handleIncoming({
475
+ tool_calls: [{
476
+ id: event.item_id,
477
+ finished: false,
478
+ function: {
479
+ name: event.name || '',
480
+ arguments: event.arguments || ''
481
+ }
482
+ }]
483
+ });
484
+ }
485
+ else {
486
+ // Update with final values
487
+ toolCall.name = event.name || toolCall.name;
488
+ toolCall.arguments = event.arguments || toolCall.arguments;
489
+ }
490
+ }
491
+ handleFunctionCallDone(functionCall) {
492
+ if (!functionCall.id) {
493
+ console.warn('Unexpected absence of ID for call ID', functionCall.call_id);
494
+ return;
495
+ }
496
+ const toolCall = this.currentToolCalls.get(functionCall.id);
497
+ if (toolCall && !toolCall.call_id && functionCall.call_id) {
498
+ toolCall.call_id = functionCall.call_id;
499
+ }
500
+ }
501
+ async executeToolCalls() {
502
+ var _a;
503
+ for (const [itemId, toolCall] of this.currentToolCalls) {
504
+ if (toolCall.executed) {
505
+ continue;
506
+ }
507
+ const tool = (_a = this.request.tools) === null || _a === void 0 ? void 0 : _a.find(t => t.name === toolCall.name);
508
+ if (tool) {
509
+ try {
510
+ const result = await tool.handler(toolCall.arguments);
511
+ toolCall.result = result;
512
+ // Yield the tool call completion
513
+ this.handleIncoming({
514
+ tool_calls: [{
515
+ id: itemId,
516
+ finished: true,
517
+ function: {
518
+ name: toolCall.name,
519
+ arguments: toolCall.arguments
520
+ },
521
+ result
522
+ }]
523
+ });
524
+ }
525
+ catch (error) {
526
+ console.error(`Error executing tool ${toolCall.name}:`, error);
527
+ toolCall.error = error instanceof Error ? error : new Error(String(error));
528
+ const errorResult = {
529
+ type: 'error',
530
+ data: error instanceof Error ? error.message : String(error)
531
+ };
532
+ // Yield the tool call error
533
+ this.handleIncoming({
534
+ tool_calls: [{
535
+ id: itemId,
536
+ finished: true,
537
+ function: {
538
+ name: toolCall.name,
539
+ arguments: toolCall.arguments
540
+ },
541
+ result: errorResult
542
+ }]
543
+ });
544
+ }
545
+ }
546
+ else {
547
+ console.warn(`Tool ${toolCall.name} not found in request tools`);
548
+ toolCall.error = new Error(`Tool ${toolCall.name} not found`);
549
+ const errorResult = {
550
+ type: 'error',
551
+ data: `Tool ${toolCall.name} not found`
552
+ };
553
+ // Yield the tool call error
554
+ this.handleIncoming({
555
+ tool_calls: [{
556
+ id: itemId,
557
+ finished: true,
558
+ function: {
559
+ name: toolCall.name,
560
+ arguments: toolCall.arguments
561
+ },
562
+ result: errorResult
563
+ }]
564
+ });
565
+ }
566
+ toolCall.executed = true;
567
+ }
568
+ }
569
+ prepareNextIteration() {
570
+ // Add assistant response with the actual text that was streamed
571
+ const assistantMessage = {
572
+ role: 'assistant',
573
+ content: this.currentResponseText
574
+ };
575
+ // Add the function calls that were made by the assistant
576
+ const functionCalls = [];
577
+ for (const [itemId, toolCall] of this.currentToolCalls) {
578
+ functionCalls.push({
579
+ type: 'function_call',
580
+ call_id: toolCall.call_id || itemId,
581
+ name: toolCall.name,
582
+ arguments: toolCall.arguments
583
+ });
584
+ }
585
+ // Add tool results
586
+ const toolResults = [];
587
+ for (const [itemId, toolCall] of this.currentToolCalls) {
588
+ const callId = toolCall.call_id || itemId;
589
+ if (toolCall.result !== undefined) {
590
+ const resultContent = typeof toolCall.result === 'string' ? toolCall.result : JSON.stringify(toolCall.result);
591
+ toolResults.push({
592
+ type: 'function_call_output',
593
+ call_id: callId,
594
+ output: resultContent
595
+ });
596
+ }
597
+ else if (toolCall.error) {
598
+ toolResults.push({
599
+ type: 'function_call_output',
600
+ call_id: callId,
601
+ output: `Error: ${toolCall.error.message}`
602
+ });
603
+ }
604
+ }
605
+ this.currentInput = [...this.currentInput, assistantMessage, ...functionCalls, ...toolResults];
606
+ }
607
+ handleIncoming(message) {
608
+ if (this.messageCache.length && this.requestQueue.length) {
609
+ throw new Error('Assertion error: cache and queue should not both be populated.');
610
+ }
611
+ if (this.requestQueue.length) {
612
+ this.requestQueue.shift().resolve({
613
+ done: false,
614
+ value: message
615
+ });
616
+ }
617
+ else {
618
+ this.messageCache.push(message);
619
+ }
620
+ }
621
+ async finalize() {
622
+ this.done = true;
623
+ // Record final token usage
624
+ if (this.tokenUsageService && (this.totalInputTokens > 0 || this.totalOutputTokens > 0)) {
625
+ try {
626
+ await this.tokenUsageService.recordTokenUsage(this.modelId, {
627
+ inputTokens: this.totalInputTokens,
628
+ outputTokens: this.totalOutputTokens,
629
+ requestId: this.request.requestId
630
+ });
631
+ }
632
+ catch (error) {
633
+ console.error('Error recording token usage:', error);
634
+ }
635
+ }
636
+ // Resolve any outstanding requests
637
+ if (this.terminalError) {
638
+ this.requestQueue.forEach(request => request.reject(this.terminalError));
639
+ }
640
+ else {
641
+ this.requestQueue.forEach(request => request.resolve({ done: true, value: undefined }));
642
+ }
643
+ this.requestQueue.length = 0;
644
+ }
645
+ }
646
+ function processSystemMessages(messages, developerMessageSettings) {
647
+ if (developerMessageSettings === 'skip') {
648
+ return messages.filter(message => message.actor !== 'system');
649
+ }
650
+ else if (developerMessageSettings === 'mergeWithFollowingUserMessage') {
651
+ const updated = messages.slice();
652
+ for (let i = updated.length - 1; i >= 0; i--) {
653
+ if (updated[i].actor === 'system') {
654
+ const systemMessage = updated[i];
655
+ if (i + 1 < updated.length && updated[i + 1].actor === 'user') {
656
+ // Merge system message with the next user message
657
+ const userMessage = updated[i + 1];
658
+ updated[i + 1] = {
659
+ ...updated[i + 1],
660
+ text: systemMessage.text + '\n' + userMessage.text
661
+ };
662
+ updated.splice(i, 1);
663
+ }
664
+ else {
665
+ // The message directly after is not a user message (or none exists), so create a new user message right after
666
+ updated.splice(i + 1, 0, { actor: 'user', type: 'text', text: systemMessage.text });
667
+ updated.splice(i, 1);
668
+ }
669
+ }
670
+ }
671
+ return updated;
672
+ }
673
+ return messages;
674
+ }
675
+ exports.processSystemMessages = processSystemMessages;
676
+ function recursiveStrictJSONSchema(schema) {
677
+ if (typeof schema === 'boolean') {
678
+ return schema;
679
+ }
680
+ let result = undefined;
681
+ if (schema.properties) {
682
+ result !== null && result !== void 0 ? result : (result = { ...schema });
683
+ result.additionalProperties = false;
684
+ result.required = Object.keys(schema.properties);
685
+ result.properties = Object.fromEntries(Object.entries(schema.properties).map(([key, props]) => [key, recursiveStrictJSONSchema(props)]));
686
+ }
687
+ if (schema.items) {
688
+ result !== null && result !== void 0 ? result : (result = { ...schema });
689
+ result.items = Array.isArray(schema.items)
690
+ ? schema.items.map(recursiveStrictJSONSchema)
691
+ : recursiveStrictJSONSchema(schema.items);
692
+ }
693
+ if (schema.oneOf) {
694
+ result !== null && result !== void 0 ? result : (result = { ...schema });
695
+ result.oneOf = schema.oneOf.map(recursiveStrictJSONSchema);
696
+ }
697
+ if (schema.anyOf) {
698
+ result !== null && result !== void 0 ? result : (result = { ...schema });
699
+ result.anyOf = schema.anyOf.map(recursiveStrictJSONSchema);
700
+ }
701
+ if (schema.allOf) {
702
+ result !== null && result !== void 0 ? result : (result = { ...schema });
703
+ result.allOf = schema.allOf.map(recursiveStrictJSONSchema);
704
+ }
705
+ if (schema.if) {
706
+ result !== null && result !== void 0 ? result : (result = { ...schema });
707
+ result.if = recursiveStrictJSONSchema(schema.if);
708
+ }
709
+ if (schema.then) {
710
+ result !== null && result !== void 0 ? result : (result = { ...schema });
711
+ result.then = recursiveStrictJSONSchema(schema.then);
712
+ }
713
+ if (schema.else) {
714
+ result !== null && result !== void 0 ? result : (result = { ...schema });
715
+ result.else = recursiveStrictJSONSchema(schema.else);
716
+ }
717
+ if (schema.not) {
718
+ result !== null && result !== void 0 ? result : (result = { ...schema });
719
+ result.not = recursiveStrictJSONSchema(schema.not);
720
+ }
721
+ return result !== null && result !== void 0 ? result : schema;
722
+ }
723
+ exports.recursiveStrictJSONSchema = recursiveStrictJSONSchema;
724
+ //# sourceMappingURL=openai-response-api-utils.js.map