@theia/ai-openai 1.66.0-next.73 → 1.66.0-next.80

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/lib/browser/openai-frontend-application-contribution.d.ts.map +1 -1
  2. package/lib/browser/openai-frontend-application-contribution.js +11 -4
  3. package/lib/browser/openai-frontend-application-contribution.js.map +1 -1
  4. package/lib/common/openai-language-models-manager.d.ts +6 -0
  5. package/lib/common/openai-language-models-manager.d.ts.map +1 -1
  6. package/lib/common/openai-preferences.d.ts +1 -0
  7. package/lib/common/openai-preferences.d.ts.map +1 -1
  8. package/lib/common/openai-preferences.js +17 -1
  9. package/lib/common/openai-preferences.js.map +1 -1
  10. package/lib/node/openai-backend-module.d.ts.map +1 -1
  11. package/lib/node/openai-backend-module.js +2 -0
  12. package/lib/node/openai-backend-module.js.map +1 -1
  13. package/lib/node/openai-language-model.d.ts +8 -2
  14. package/lib/node/openai-language-model.d.ts.map +1 -1
  15. package/lib/node/openai-language-model.js +43 -42
  16. package/lib/node/openai-language-model.js.map +1 -1
  17. package/lib/node/openai-language-models-manager-impl.d.ts +2 -0
  18. package/lib/node/openai-language-models-manager-impl.d.ts.map +1 -1
  19. package/lib/node/openai-language-models-manager-impl.js +9 -2
  20. package/lib/node/openai-language-models-manager-impl.js.map +1 -1
  21. package/lib/node/openai-model-utils.spec.d.ts +1 -3
  22. package/lib/node/openai-model-utils.spec.d.ts.map +1 -1
  23. package/lib/node/openai-model-utils.spec.js +250 -23
  24. package/lib/node/openai-model-utils.spec.js.map +1 -1
  25. package/lib/node/openai-request-api-context.d.ts +4 -0
  26. package/lib/node/openai-request-api-context.d.ts.map +1 -0
  27. package/lib/node/openai-request-api-context.js +18 -0
  28. package/lib/node/openai-request-api-context.js.map +1 -0
  29. package/lib/node/openai-response-api-utils.d.ts +42 -0
  30. package/lib/node/openai-response-api-utils.d.ts.map +1 -0
  31. package/lib/node/openai-response-api-utils.js +677 -0
  32. package/lib/node/openai-response-api-utils.js.map +1 -0
  33. package/package.json +6 -6
  34. package/src/browser/openai-frontend-application-contribution.ts +10 -4
  35. package/src/common/openai-language-models-manager.ts +6 -0
  36. package/src/common/openai-preferences.ts +18 -0
  37. package/src/node/openai-backend-module.ts +2 -0
  38. package/src/node/openai-language-model.ts +59 -42
  39. package/src/node/openai-language-models-manager-impl.ts +8 -1
  40. package/src/node/openai-model-utils.spec.ts +257 -22
  41. package/src/node/openai-request-api-context.ts +23 -0
  42. package/src/node/openai-response-api-utils.ts +801 -0
@@ -0,0 +1,677 @@
1
+ "use strict";
2
+ // *****************************************************************************
3
+ // Copyright (C) 2025 EclipseSource GmbH.
4
+ //
5
+ // This program and the accompanying materials are made available under the
6
+ // terms of the Eclipse Public License v. 2.0 which is available at
7
+ // http://www.eclipse.org/legal/epl-2.0.
8
+ //
9
+ // This Source Code may also be made available under the following Secondary
10
+ // Licenses when the conditions for such availability set forth in the Eclipse
11
+ // Public License v. 2.0 are satisfied: GNU General Public License, version 2
12
+ // with the GNU Classpath Exception which is available at
13
+ // https://www.gnu.org/software/classpath/license.html.
14
+ //
15
+ // SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0
16
+ // *****************************************************************************
17
+ Object.defineProperty(exports, "__esModule", { value: true });
18
+ exports.processSystemMessages = exports.OpenAiResponseApiUtils = void 0;
19
+ const tslib_1 = require("tslib");
20
+ const ai_core_1 = require("@theia/ai-core");
21
+ const core_1 = require("@theia/core");
22
+ const promise_util_1 = require("@theia/core/lib/common/promise-util");
23
+ const inversify_1 = require("@theia/core/shared/inversify");
24
+ /**
25
+ * Utility class for handling OpenAI Response API requests and tool calling cycles.
26
+ *
27
+ * This class encapsulates the complexity of the Response API's multi-turn conversation
28
+ * patterns for tool calling, keeping the main language model class clean and focused.
29
+ */
30
+ let OpenAiResponseApiUtils = class OpenAiResponseApiUtils {
31
+ /**
32
+ * Handles Response API requests with proper tool calling cycles.
33
+ * Works for both streaming and non-streaming cases.
34
+ */
35
+ async handleRequest(openai, request, settings, model, modelUtils, developerMessageSettings, runnerOptions, modelId, isStreaming, tokenUsageService, cancellationToken) {
36
+ if (cancellationToken === null || cancellationToken === void 0 ? void 0 : cancellationToken.isCancellationRequested) {
37
+ return { text: '' };
38
+ }
39
+ const { instructions, input } = this.processMessages(request.messages, developerMessageSettings, model);
40
+ const tools = this.convertToolsForResponseApi(request.tools);
41
+ // If no tools are provided, use simple response handling
42
+ if (!tools || tools.length === 0) {
43
+ if (isStreaming) {
44
+ const stream = openai.responses.stream({
45
+ model: model,
46
+ instructions,
47
+ input,
48
+ ...settings
49
+ });
50
+ return { stream: this.createSimpleResponseApiStreamIterator(stream, request.requestId, modelId, tokenUsageService, cancellationToken) };
51
+ }
52
+ else {
53
+ const response = await openai.responses.create({
54
+ model: model,
55
+ instructions,
56
+ input,
57
+ ...settings
58
+ });
59
+ // Record token usage if available
60
+ if (tokenUsageService && response.usage) {
61
+ await tokenUsageService.recordTokenUsage(modelId, {
62
+ inputTokens: response.usage.input_tokens,
63
+ outputTokens: response.usage.output_tokens,
64
+ requestId: request.requestId
65
+ });
66
+ }
67
+ return { text: response.output_text || '' };
68
+ }
69
+ }
70
+ // Handle tool calling with multi-turn conversation using the unified iterator
71
+ const iterator = new ResponseApiToolCallIterator(openai, request, settings, model, modelUtils, developerMessageSettings, runnerOptions, modelId, this, isStreaming, tokenUsageService, cancellationToken);
72
+ return { stream: iterator };
73
+ }
74
+ /**
75
+ * Converts ToolRequest objects to the format expected by the Response API.
76
+ */
77
+ convertToolsForResponseApi(tools) {
78
+ if (!tools || tools.length === 0) {
79
+ return undefined;
80
+ }
81
+ const converted = tools.map(tool => ({
82
+ type: 'function',
83
+ name: tool.name,
84
+ description: tool.description || '',
85
+ // The Response API is very strict re: JSON schema: all properties must be listed as required,
86
+ // and additional properties must be disallowed.
87
+ // https://platform.openai.com/docs/guides/function-calling#strict-mode
88
+ parameters: {
89
+ ...tool.parameters,
90
+ additionalProperties: false,
91
+ required: tool.parameters.properties ? Object.keys(tool.parameters.properties) : []
92
+ },
93
+ strict: true
94
+ }));
95
+ console.debug(`Converted ${tools.length} tools for Response API:`, converted.map(t => t.name));
96
+ return converted;
97
+ }
98
+ createSimpleResponseApiStreamIterator(stream, requestId, modelId, tokenUsageService, cancellationToken) {
99
+ return {
100
+ async *[Symbol.asyncIterator]() {
101
+ var _a;
102
+ try {
103
+ for await (const event of stream) {
104
+ if (cancellationToken === null || cancellationToken === void 0 ? void 0 : cancellationToken.isCancellationRequested) {
105
+ break;
106
+ }
107
+ if (event.type === 'response.output_text.delta') {
108
+ yield {
109
+ content: event.delta
110
+ };
111
+ }
112
+ else if (event.type === 'response.completed') {
113
+ if (tokenUsageService && ((_a = event.response) === null || _a === void 0 ? void 0 : _a.usage)) {
114
+ await tokenUsageService.recordTokenUsage(modelId, {
115
+ inputTokens: event.response.usage.input_tokens,
116
+ outputTokens: event.response.usage.output_tokens,
117
+ requestId
118
+ });
119
+ }
120
+ }
121
+ else if (event.type === 'error') {
122
+ console.error('Response API error:', event.message);
123
+ throw new Error(`Response API error: ${event.message}`);
124
+ }
125
+ }
126
+ }
127
+ catch (error) {
128
+ console.error('Error in Response API stream:', error);
129
+ throw error;
130
+ }
131
+ }
132
+ };
133
+ }
134
+ /**
135
+ * Processes the provided list of messages by applying system message adjustments and converting
136
+ * them directly to the format expected by the OpenAI Response API.
137
+ *
138
+ * This method converts messages directly without going through ChatCompletionMessageParam types.
139
+ *
140
+ * @param messages the list of messages to process.
141
+ * @param developerMessageSettings how system and developer messages are handled during processing.
142
+ * @param model the OpenAI model identifier. Currently not used, but allows subclasses to implement model-specific behavior.
143
+ * @returns an object containing instructions and input formatted for the Response API.
144
+ */
145
+ processMessages(messages, developerMessageSettings, model) {
146
+ const processed = this.processSystemMessages(messages, developerMessageSettings)
147
+ .filter(m => m.type !== 'thinking');
148
+ // Extract system/developer messages for instructions
149
+ const systemMessages = processed.filter((m) => m.type === 'text' && m.actor === 'system');
150
+ const instructions = systemMessages.length > 0
151
+ ? systemMessages.map(m => m.text).join('\n')
152
+ : undefined;
153
+ // Convert non-system messages to Response API input items
154
+ const nonSystemMessages = processed.filter(m => m.actor !== 'system');
155
+ const input = [];
156
+ for (const message of nonSystemMessages) {
157
+ if (ai_core_1.LanguageModelMessage.isTextMessage(message)) {
158
+ if (message.actor === 'ai') {
159
+ // Assistant messages use ResponseOutputMessage format
160
+ input.push({
161
+ id: `msg_${Date.now()}_${Math.random().toString(36).substring(2, 11)}`,
162
+ type: 'message',
163
+ role: 'assistant',
164
+ status: 'completed',
165
+ content: [{
166
+ type: 'output_text',
167
+ text: message.text,
168
+ annotations: []
169
+ }]
170
+ });
171
+ }
172
+ else {
173
+ // User messages use input format
174
+ input.push({
175
+ type: 'message',
176
+ role: 'user',
177
+ content: [{
178
+ type: 'input_text',
179
+ text: message.text
180
+ }]
181
+ });
182
+ }
183
+ }
184
+ else if (ai_core_1.LanguageModelMessage.isToolUseMessage(message)) {
185
+ input.push({
186
+ type: 'function_call',
187
+ call_id: message.id,
188
+ name: message.name,
189
+ arguments: JSON.stringify(message.input)
190
+ });
191
+ }
192
+ else if (ai_core_1.LanguageModelMessage.isToolResultMessage(message)) {
193
+ const content = typeof message.content === 'string' ? message.content : JSON.stringify(message.content);
194
+ input.push({
195
+ type: 'function_call_output',
196
+ call_id: message.tool_use_id,
197
+ output: content
198
+ });
199
+ }
200
+ else if (ai_core_1.LanguageModelMessage.isImageMessage(message)) {
201
+ input.push({
202
+ type: 'message',
203
+ role: 'user',
204
+ content: [{
205
+ type: 'input_image',
206
+ detail: 'auto',
207
+ image_url: ai_core_1.ImageContent.isBase64(message.image) ?
208
+ `data:${message.image.mimeType};base64,${message.image.base64data}` :
209
+ message.image.url
210
+ }]
211
+ });
212
+ }
213
+ else if (ai_core_1.LanguageModelMessage.isThinkingMessage(message)) {
214
+ // Pass
215
+ }
216
+ else {
217
+ (0, core_1.unreachable)(message);
218
+ }
219
+ }
220
+ return { instructions, input };
221
+ }
222
+ processSystemMessages(messages, developerMessageSettings) {
223
+ return processSystemMessages(messages, developerMessageSettings);
224
+ }
225
+ };
226
+ exports.OpenAiResponseApiUtils = OpenAiResponseApiUtils;
227
+ exports.OpenAiResponseApiUtils = OpenAiResponseApiUtils = tslib_1.__decorate([
228
+ (0, inversify_1.injectable)()
229
+ ], OpenAiResponseApiUtils);
230
+ /**
231
+ * Iterator for handling Response API streaming with tool calls.
232
+ * Based on the pattern from openai-streaming-iterator.ts but adapted for Response API.
233
+ */
234
+ class ResponseApiToolCallIterator {
235
+ constructor(openai, request, settings, model, modelUtils, developerMessageSettings, runnerOptions, modelId, utils, isStreaming, tokenUsageService, cancellationToken) {
236
+ this.openai = openai;
237
+ this.request = request;
238
+ this.settings = settings;
239
+ this.model = model;
240
+ this.modelUtils = modelUtils;
241
+ this.developerMessageSettings = developerMessageSettings;
242
+ this.runnerOptions = runnerOptions;
243
+ this.modelId = modelId;
244
+ this.utils = utils;
245
+ this.isStreaming = isStreaming;
246
+ this.tokenUsageService = tokenUsageService;
247
+ this.cancellationToken = cancellationToken;
248
+ this.requestQueue = new Array();
249
+ this.messageCache = new Array();
250
+ this.done = false;
251
+ this.terminalError = undefined;
252
+ this.currentToolCalls = new Map();
253
+ this.totalInputTokens = 0;
254
+ this.totalOutputTokens = 0;
255
+ this.iteration = 0;
256
+ this.currentResponseText = '';
257
+ const { instructions, input } = utils.processMessages(request.messages, developerMessageSettings, model);
258
+ this.instructions = instructions;
259
+ this.currentInput = input;
260
+ this.tools = utils.convertToolsForResponseApi(request.tools);
261
+ this.maxIterations = runnerOptions.maxChatCompletions || 100;
262
+ // Start the first iteration
263
+ this.startIteration();
264
+ }
265
+ [Symbol.asyncIterator]() {
266
+ return this;
267
+ }
268
+ async next() {
269
+ if (this.messageCache.length && this.requestQueue.length) {
270
+ throw new Error('Assertion error: cache and queue should not both be populated.');
271
+ }
272
+ // Deliver all the messages we got, even if we've since terminated.
273
+ if (this.messageCache.length) {
274
+ return {
275
+ done: false,
276
+ value: this.messageCache.shift()
277
+ };
278
+ }
279
+ else if (this.terminalError) {
280
+ throw this.terminalError;
281
+ }
282
+ else if (this.done) {
283
+ return {
284
+ done: true,
285
+ value: undefined
286
+ };
287
+ }
288
+ else {
289
+ const deferred = new promise_util_1.Deferred();
290
+ this.requestQueue.push(deferred);
291
+ return deferred.promise;
292
+ }
293
+ }
294
+ async startIteration() {
295
+ var _a;
296
+ try {
297
+ while (this.iteration < this.maxIterations && !((_a = this.cancellationToken) === null || _a === void 0 ? void 0 : _a.isCancellationRequested)) {
298
+ console.debug(`Starting Response API iteration ${this.iteration} with ${this.currentInput.length} input messages`);
299
+ await this.processStream();
300
+ // Check if we have tool calls that need execution
301
+ if (this.currentToolCalls.size === 0) {
302
+ // No tool calls, we're done
303
+ this.finalize();
304
+ return;
305
+ }
306
+ // Execute all tool calls
307
+ await this.executeToolCalls();
308
+ // Prepare for next iteration
309
+ this.prepareNextIteration();
310
+ this.iteration++;
311
+ }
312
+ // Max iterations reached
313
+ this.finalize();
314
+ }
315
+ catch (error) {
316
+ this.terminalError = error instanceof Error ? error : new Error(String(error));
317
+ this.finalize();
318
+ }
319
+ }
320
+ async processStream() {
321
+ var _a;
322
+ this.currentToolCalls.clear();
323
+ this.currentResponseText = '';
324
+ if (this.isStreaming) {
325
+ // Use streaming API
326
+ const stream = this.openai.responses.stream({
327
+ model: this.model,
328
+ instructions: this.instructions,
329
+ input: this.currentInput,
330
+ tools: this.tools,
331
+ ...this.settings
332
+ });
333
+ for await (const event of stream) {
334
+ if ((_a = this.cancellationToken) === null || _a === void 0 ? void 0 : _a.isCancellationRequested) {
335
+ break;
336
+ }
337
+ await this.handleStreamEvent(event);
338
+ }
339
+ }
340
+ else {
341
+ // Use non-streaming API but yield results incrementally
342
+ await this.processNonStreamingResponse();
343
+ }
344
+ }
345
+ async processNonStreamingResponse() {
346
+ var _a;
347
+ const response = await this.openai.responses.create({
348
+ model: this.model,
349
+ instructions: this.instructions,
350
+ input: this.currentInput,
351
+ tools: this.tools,
352
+ ...this.settings
353
+ });
354
+ // Record token usage
355
+ if (response.usage) {
356
+ this.totalInputTokens += response.usage.input_tokens;
357
+ this.totalOutputTokens += response.usage.output_tokens;
358
+ }
359
+ // First, yield any text content from the response
360
+ this.currentResponseText = response.output_text || '';
361
+ if (this.currentResponseText) {
362
+ this.handleIncoming({ content: this.currentResponseText });
363
+ }
364
+ // Find function calls in the response
365
+ const functionCalls = ((_a = response.output) === null || _a === void 0 ? void 0 : _a.filter((item) => item.type === 'function_call')) || [];
366
+ // Process each function call
367
+ for (const functionCall of functionCalls) {
368
+ if (functionCall.id && functionCall.name) {
369
+ const toolCall = {
370
+ id: functionCall.id,
371
+ call_id: functionCall.call_id || functionCall.id,
372
+ name: functionCall.name,
373
+ arguments: functionCall.arguments || '',
374
+ executed: false
375
+ };
376
+ this.currentToolCalls.set(functionCall.id, toolCall);
377
+ // Yield the tool call initiation
378
+ this.handleIncoming({
379
+ tool_calls: [{
380
+ id: functionCall.id,
381
+ finished: false,
382
+ function: {
383
+ name: functionCall.name,
384
+ arguments: functionCall.arguments || ''
385
+ }
386
+ }]
387
+ });
388
+ }
389
+ }
390
+ }
391
+ async handleStreamEvent(event) {
392
+ var _a, _b, _c;
393
+ switch (event.type) {
394
+ case 'response.output_text.delta':
395
+ this.currentResponseText += event.delta;
396
+ this.handleIncoming({ content: event.delta });
397
+ break;
398
+ case 'response.output_item.added':
399
+ if (((_a = event.item) === null || _a === void 0 ? void 0 : _a.type) === 'function_call') {
400
+ this.handleFunctionCallAdded(event.item);
401
+ }
402
+ break;
403
+ case 'response.function_call_arguments.delta':
404
+ this.handleFunctionCallArgsDelta(event);
405
+ break;
406
+ case 'response.function_call_arguments.done':
407
+ await this.handleFunctionCallArgsDone(event);
408
+ break;
409
+ case 'response.output_item.done':
410
+ if (((_b = event.item) === null || _b === void 0 ? void 0 : _b.type) === 'function_call') {
411
+ this.handleFunctionCallDone(event.item);
412
+ }
413
+ break;
414
+ case 'response.completed':
415
+ if ((_c = event.response) === null || _c === void 0 ? void 0 : _c.usage) {
416
+ this.totalInputTokens += event.response.usage.input_tokens;
417
+ this.totalOutputTokens += event.response.usage.output_tokens;
418
+ }
419
+ break;
420
+ case 'error':
421
+ console.error('Response API error:', event.message);
422
+ throw new Error(`Response API error: ${event.message}`);
423
+ }
424
+ }
425
+ handleFunctionCallAdded(functionCall) {
426
+ if (functionCall.id && functionCall.call_id) {
427
+ console.debug(`Function call added: ${functionCall.name} with id ${functionCall.id} and call_id ${functionCall.call_id}`);
428
+ const toolCall = {
429
+ id: functionCall.id,
430
+ call_id: functionCall.call_id,
431
+ name: functionCall.name || '',
432
+ arguments: functionCall.arguments || '',
433
+ executed: false
434
+ };
435
+ this.currentToolCalls.set(functionCall.id, toolCall);
436
+ this.handleIncoming({
437
+ tool_calls: [{
438
+ id: functionCall.id,
439
+ finished: false,
440
+ function: {
441
+ name: functionCall.name || '',
442
+ arguments: functionCall.arguments || ''
443
+ }
444
+ }]
445
+ });
446
+ }
447
+ }
448
+ handleFunctionCallArgsDelta(event) {
449
+ const toolCall = this.currentToolCalls.get(event.item_id);
450
+ if (toolCall) {
451
+ toolCall.arguments += event.delta;
452
+ if (event.delta) {
453
+ this.handleIncoming({
454
+ tool_calls: [{
455
+ id: event.item_id,
456
+ function: {
457
+ arguments: event.delta
458
+ }
459
+ }]
460
+ });
461
+ }
462
+ }
463
+ }
464
+ async handleFunctionCallArgsDone(event) {
465
+ let toolCall = this.currentToolCalls.get(event.item_id);
466
+ if (!toolCall) {
467
+ // Create if we didn't see the added event
468
+ toolCall = {
469
+ id: event.item_id,
470
+ name: event.name || '',
471
+ arguments: event.arguments || '',
472
+ executed: false
473
+ };
474
+ this.currentToolCalls.set(event.item_id, toolCall);
475
+ this.handleIncoming({
476
+ tool_calls: [{
477
+ id: event.item_id,
478
+ finished: false,
479
+ function: {
480
+ name: event.name || '',
481
+ arguments: event.arguments || ''
482
+ }
483
+ }]
484
+ });
485
+ }
486
+ else {
487
+ // Update with final values
488
+ toolCall.name = event.name || toolCall.name;
489
+ toolCall.arguments = event.arguments || toolCall.arguments;
490
+ }
491
+ }
492
+ handleFunctionCallDone(functionCall) {
493
+ if (!functionCall.id) {
494
+ console.warn('Unexpected absence of ID for call ID', functionCall.call_id);
495
+ return;
496
+ }
497
+ const toolCall = this.currentToolCalls.get(functionCall.id);
498
+ if (toolCall && !toolCall.call_id && functionCall.call_id) {
499
+ toolCall.call_id = functionCall.call_id;
500
+ }
501
+ }
502
+ async executeToolCalls() {
503
+ var _a;
504
+ for (const [itemId, toolCall] of this.currentToolCalls) {
505
+ if (toolCall.executed) {
506
+ continue;
507
+ }
508
+ const tool = (_a = this.request.tools) === null || _a === void 0 ? void 0 : _a.find(t => t.name === toolCall.name);
509
+ if (tool) {
510
+ try {
511
+ const result = await tool.handler(toolCall.arguments);
512
+ toolCall.result = result;
513
+ // Yield the tool call completion
514
+ this.handleIncoming({
515
+ tool_calls: [{
516
+ id: itemId,
517
+ finished: true,
518
+ function: {
519
+ name: toolCall.name,
520
+ arguments: toolCall.arguments
521
+ },
522
+ result
523
+ }]
524
+ });
525
+ }
526
+ catch (error) {
527
+ console.error(`Error executing tool ${toolCall.name}:`, error);
528
+ toolCall.error = error instanceof Error ? error : new Error(String(error));
529
+ const errorResult = {
530
+ type: 'error',
531
+ data: error instanceof Error ? error.message : String(error)
532
+ };
533
+ // Yield the tool call error
534
+ this.handleIncoming({
535
+ tool_calls: [{
536
+ id: itemId,
537
+ finished: true,
538
+ function: {
539
+ name: toolCall.name,
540
+ arguments: toolCall.arguments
541
+ },
542
+ result: errorResult
543
+ }]
544
+ });
545
+ }
546
+ }
547
+ else {
548
+ console.warn(`Tool ${toolCall.name} not found in request tools`);
549
+ toolCall.error = new Error(`Tool ${toolCall.name} not found`);
550
+ const errorResult = {
551
+ type: 'error',
552
+ data: `Tool ${toolCall.name} not found`
553
+ };
554
+ // Yield the tool call error
555
+ this.handleIncoming({
556
+ tool_calls: [{
557
+ id: itemId,
558
+ finished: true,
559
+ function: {
560
+ name: toolCall.name,
561
+ arguments: toolCall.arguments
562
+ },
563
+ result: errorResult
564
+ }]
565
+ });
566
+ }
567
+ toolCall.executed = true;
568
+ }
569
+ }
570
+ prepareNextIteration() {
571
+ // Add assistant response with the actual text that was streamed
572
+ const assistantMessage = {
573
+ role: 'assistant',
574
+ content: this.currentResponseText
575
+ };
576
+ // Add the function calls that were made by the assistant
577
+ const functionCalls = [];
578
+ for (const [itemId, toolCall] of this.currentToolCalls) {
579
+ functionCalls.push({
580
+ type: 'function_call',
581
+ call_id: toolCall.call_id || itemId,
582
+ name: toolCall.name,
583
+ arguments: toolCall.arguments
584
+ });
585
+ }
586
+ // Add tool results
587
+ const toolResults = [];
588
+ for (const [itemId, toolCall] of this.currentToolCalls) {
589
+ const callId = toolCall.call_id || itemId;
590
+ if (toolCall.result !== undefined) {
591
+ const resultContent = typeof toolCall.result === 'string' ? toolCall.result : JSON.stringify(toolCall.result);
592
+ toolResults.push({
593
+ type: 'function_call_output',
594
+ call_id: callId,
595
+ output: resultContent
596
+ });
597
+ }
598
+ else if (toolCall.error) {
599
+ toolResults.push({
600
+ type: 'function_call_output',
601
+ call_id: callId,
602
+ output: `Error: ${toolCall.error.message}`
603
+ });
604
+ }
605
+ }
606
+ this.currentInput = [...this.currentInput, assistantMessage, ...functionCalls, ...toolResults];
607
+ }
608
+ handleIncoming(message) {
609
+ if (this.messageCache.length && this.requestQueue.length) {
610
+ throw new Error('Assertion error: cache and queue should not both be populated.');
611
+ }
612
+ if (this.requestQueue.length) {
613
+ this.requestQueue.shift().resolve({
614
+ done: false,
615
+ value: message
616
+ });
617
+ }
618
+ else {
619
+ this.messageCache.push(message);
620
+ }
621
+ }
622
+ async finalize() {
623
+ this.done = true;
624
+ // Record final token usage
625
+ if (this.tokenUsageService && (this.totalInputTokens > 0 || this.totalOutputTokens > 0)) {
626
+ try {
627
+ await this.tokenUsageService.recordTokenUsage(this.modelId, {
628
+ inputTokens: this.totalInputTokens,
629
+ outputTokens: this.totalOutputTokens,
630
+ requestId: this.request.requestId
631
+ });
632
+ }
633
+ catch (error) {
634
+ console.error('Error recording token usage:', error);
635
+ }
636
+ }
637
+ // Resolve any outstanding requests
638
+ if (this.terminalError) {
639
+ this.requestQueue.forEach(request => request.reject(this.terminalError));
640
+ }
641
+ else {
642
+ this.requestQueue.forEach(request => request.resolve({ done: true, value: undefined }));
643
+ }
644
+ this.requestQueue.length = 0;
645
+ }
646
+ }
647
+ function processSystemMessages(messages, developerMessageSettings) {
648
+ if (developerMessageSettings === 'skip') {
649
+ return messages.filter(message => message.actor !== 'system');
650
+ }
651
+ else if (developerMessageSettings === 'mergeWithFollowingUserMessage') {
652
+ const updated = messages.slice();
653
+ for (let i = updated.length - 1; i >= 0; i--) {
654
+ if (updated[i].actor === 'system') {
655
+ const systemMessage = updated[i];
656
+ if (i + 1 < updated.length && updated[i + 1].actor === 'user') {
657
+ // Merge system message with the next user message
658
+ const userMessage = updated[i + 1];
659
+ updated[i + 1] = {
660
+ ...updated[i + 1],
661
+ text: systemMessage.text + '\n' + userMessage.text
662
+ };
663
+ updated.splice(i, 1);
664
+ }
665
+ else {
666
+ // The message directly after is not a user message (or none exists), so create a new user message right after
667
+ updated.splice(i + 1, 0, { actor: 'user', type: 'text', text: systemMessage.text });
668
+ updated.splice(i, 1);
669
+ }
670
+ }
671
+ }
672
+ return updated;
673
+ }
674
+ return messages;
675
+ }
676
+ exports.processSystemMessages = processSystemMessages;
677
+ //# sourceMappingURL=openai-response-api-utils.js.map