@just-every/ensemble 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/README.md +245 -0
  2. package/dist/cost_tracker.d.ts +2 -0
  3. package/dist/cost_tracker.d.ts.map +1 -0
  4. package/dist/cost_tracker.js +2 -0
  5. package/dist/cost_tracker.js.map +1 -0
  6. package/dist/errors.d.ts +55 -0
  7. package/dist/errors.d.ts.map +1 -0
  8. package/dist/errors.js +134 -0
  9. package/dist/errors.js.map +1 -0
  10. package/dist/external_models.d.ts +10 -0
  11. package/dist/external_models.d.ts.map +1 -0
  12. package/dist/external_models.js +36 -0
  13. package/dist/external_models.js.map +1 -0
  14. package/dist/index.d.ts +31 -0
  15. package/dist/index.d.ts.map +1 -0
  16. package/dist/index.js +47 -0
  17. package/dist/index.js.map +1 -0
  18. package/dist/model_data.d.ts +63 -0
  19. package/dist/model_data.d.ts.map +1 -0
  20. package/dist/model_data.js +1070 -0
  21. package/dist/model_data.js.map +1 -0
  22. package/dist/model_providers/base_provider.d.ts +37 -0
  23. package/dist/model_providers/base_provider.d.ts.map +1 -0
  24. package/dist/model_providers/base_provider.js +146 -0
  25. package/dist/model_providers/base_provider.js.map +1 -0
  26. package/dist/model_providers/claude.d.ts +11 -0
  27. package/dist/model_providers/claude.d.ts.map +1 -0
  28. package/dist/model_providers/claude.js +788 -0
  29. package/dist/model_providers/claude.js.map +1 -0
  30. package/dist/model_providers/deepseek.d.ts +8 -0
  31. package/dist/model_providers/deepseek.d.ts.map +1 -0
  32. package/dist/model_providers/deepseek.js +136 -0
  33. package/dist/model_providers/deepseek.js.map +1 -0
  34. package/dist/model_providers/gemini.d.ts +11 -0
  35. package/dist/model_providers/gemini.d.ts.map +1 -0
  36. package/dist/model_providers/gemini.js +711 -0
  37. package/dist/model_providers/gemini.js.map +1 -0
  38. package/dist/model_providers/grok.d.ts +8 -0
  39. package/dist/model_providers/grok.d.ts.map +1 -0
  40. package/dist/model_providers/grok.js +22 -0
  41. package/dist/model_providers/grok.js.map +1 -0
  42. package/dist/model_providers/model_provider.d.ts +11 -0
  43. package/dist/model_providers/model_provider.d.ts.map +1 -0
  44. package/dist/model_providers/model_provider.js +170 -0
  45. package/dist/model_providers/model_provider.js.map +1 -0
  46. package/dist/model_providers/openai.d.ts +13 -0
  47. package/dist/model_providers/openai.d.ts.map +1 -0
  48. package/dist/model_providers/openai.js +822 -0
  49. package/dist/model_providers/openai.js.map +1 -0
  50. package/dist/model_providers/openai_chat.d.ts +14 -0
  51. package/dist/model_providers/openai_chat.d.ts.map +1 -0
  52. package/dist/model_providers/openai_chat.js +719 -0
  53. package/dist/model_providers/openai_chat.js.map +1 -0
  54. package/dist/model_providers/openrouter.d.ts +6 -0
  55. package/dist/model_providers/openrouter.d.ts.map +1 -0
  56. package/dist/model_providers/openrouter.js +18 -0
  57. package/dist/model_providers/openrouter.js.map +1 -0
  58. package/dist/model_providers/refactored_openai.d.ts +22 -0
  59. package/dist/model_providers/refactored_openai.d.ts.map +1 -0
  60. package/dist/model_providers/refactored_openai.js +310 -0
  61. package/dist/model_providers/refactored_openai.js.map +1 -0
  62. package/dist/model_providers/test_provider.d.ts +27 -0
  63. package/dist/model_providers/test_provider.d.ts.map +1 -0
  64. package/dist/model_providers/test_provider.js +185 -0
  65. package/dist/model_providers/test_provider.js.map +1 -0
  66. package/dist/tsconfig.tsbuildinfo +1 -0
  67. package/dist/types/api_types.d.ts +249 -0
  68. package/dist/types/api_types.d.ts.map +1 -0
  69. package/dist/types/api_types.js +2 -0
  70. package/dist/types/api_types.js.map +1 -0
  71. package/dist/types/extended_types.d.ts +43 -0
  72. package/dist/types/extended_types.d.ts.map +1 -0
  73. package/dist/types/extended_types.js +2 -0
  74. package/dist/types/extended_types.js.map +1 -0
  75. package/dist/types.d.ts +301 -0
  76. package/dist/types.d.ts.map +1 -0
  77. package/dist/types.js +2 -0
  78. package/dist/types.js.map +1 -0
  79. package/dist/utils/async_queue.d.ts +14 -0
  80. package/dist/utils/async_queue.d.ts.map +1 -0
  81. package/dist/utils/async_queue.js +68 -0
  82. package/dist/utils/async_queue.js.map +1 -0
  83. package/dist/utils/cache.d.ts +60 -0
  84. package/dist/utils/cache.d.ts.map +1 -0
  85. package/dist/utils/cache.js +205 -0
  86. package/dist/utils/cache.js.map +1 -0
  87. package/dist/utils/communication.d.ts +3 -0
  88. package/dist/utils/communication.d.ts.map +1 -0
  89. package/dist/utils/communication.js +8 -0
  90. package/dist/utils/communication.js.map +1 -0
  91. package/dist/utils/cost_tracker.d.ts +26 -0
  92. package/dist/utils/cost_tracker.d.ts.map +1 -0
  93. package/dist/utils/cost_tracker.js +177 -0
  94. package/dist/utils/cost_tracker.js.map +1 -0
  95. package/dist/utils/delta_buffer.d.ts +14 -0
  96. package/dist/utils/delta_buffer.d.ts.map +1 -0
  97. package/dist/utils/delta_buffer.js +60 -0
  98. package/dist/utils/delta_buffer.js.map +1 -0
  99. package/dist/utils/image_to_text.d.ts +3 -0
  100. package/dist/utils/image_to_text.d.ts.map +1 -0
  101. package/dist/utils/image_to_text.js +81 -0
  102. package/dist/utils/image_to_text.js.map +1 -0
  103. package/dist/utils/image_utils.d.ts +18 -0
  104. package/dist/utils/image_utils.d.ts.map +1 -0
  105. package/dist/utils/image_utils.js +132 -0
  106. package/dist/utils/image_utils.js.map +1 -0
  107. package/dist/utils/llm_logger.d.ts +8 -0
  108. package/dist/utils/llm_logger.d.ts.map +1 -0
  109. package/dist/utils/llm_logger.js +24 -0
  110. package/dist/utils/llm_logger.js.map +1 -0
  111. package/dist/utils/quota_tracker.d.ts +22 -0
  112. package/dist/utils/quota_tracker.d.ts.map +1 -0
  113. package/dist/utils/quota_tracker.js +338 -0
  114. package/dist/utils/quota_tracker.js.map +1 -0
  115. package/dist/utils/stream_converter.d.ts +19 -0
  116. package/dist/utils/stream_converter.d.ts.map +1 -0
  117. package/dist/utils/stream_converter.js +172 -0
  118. package/dist/utils/stream_converter.js.map +1 -0
  119. package/dist/validation.d.ts +1789 -0
  120. package/dist/validation.d.ts.map +1 -0
  121. package/dist/validation.js +289 -0
  122. package/dist/validation.js.map +1 -0
  123. package/dist/vitest.config.d.ts +3 -0
  124. package/dist/vitest.config.d.ts.map +1 -0
  125. package/dist/vitest.config.js +34 -0
  126. package/dist/vitest.config.js.map +1 -0
  127. package/package.json +86 -0
@@ -0,0 +1,822 @@
1
+ import OpenAI, { toFile } from 'openai';
2
+ import { costTracker } from '@just-every/ensemble/cost_tracker';
3
+ import { log_llm_request, log_llm_response, log_llm_error, } from '../utils/llm_logger.js';
4
+ import { isPaused } from '../utils/communication.js';
5
+ import { extractBase64Image, resizeAndSplitForOpenAI, } from '../utils/image_utils.js';
6
+ import { bufferDelta, flushBufferedDeltas, } from '../utils/delta_buffer.js';
7
+ const BROWSER_WIDTH = 1024;
8
+ const BROWSER_HEIGHT = 1536;
9
+ function createCitationTracker() {
10
+ return {
11
+ citations: new Map(),
12
+ };
13
+ }
14
+ function formatCitation(tracker, citation) {
15
+ if (!tracker.citations.has(citation.url)) {
16
+ tracker.citations.set(citation.url, citation);
17
+ }
18
+ return ` [${Array.from(tracker.citations.keys()).indexOf(citation.url) + 1}]`;
19
+ }
20
+ function generateFootnotes(tracker) {
21
+ if (tracker.citations.size === 0)
22
+ return '';
23
+ const footnotes = Array.from(tracker.citations.values())
24
+ .map((citation, i) => `[${i + 1}] ${citation.title} – ${citation.url}`)
25
+ .join('\n');
26
+ return '\n\nReferences:\n' + footnotes;
27
+ }
28
+ function processSchemaForOpenAI(schema, originalProperties) {
29
+ const processedSchema = JSON.parse(JSON.stringify(schema));
30
+ const processSchemaRecursively = (schema) => {
31
+ if (!schema || typeof schema !== 'object')
32
+ return;
33
+ if (schema.optional === true) {
34
+ delete schema.optional;
35
+ }
36
+ if (Array.isArray(schema.oneOf)) {
37
+ schema.anyOf = schema.oneOf;
38
+ delete schema.oneOf;
39
+ }
40
+ const unsupportedKeywords = [
41
+ 'minimum',
42
+ 'maximum',
43
+ 'minItems',
44
+ 'maxItems',
45
+ 'minLength',
46
+ 'maxLength',
47
+ 'pattern',
48
+ 'format',
49
+ 'multipleOf',
50
+ 'patternProperties',
51
+ 'unevaluatedProperties',
52
+ 'propertyNames',
53
+ 'minProperties',
54
+ 'maxProperties',
55
+ 'unevaluatedItems',
56
+ 'contains',
57
+ 'minContains',
58
+ 'maxContains',
59
+ 'uniqueItems',
60
+ 'default',
61
+ ];
62
+ unsupportedKeywords.forEach(keyword => {
63
+ if (schema[keyword] !== undefined) {
64
+ delete schema[keyword];
65
+ }
66
+ });
67
+ const isObject = schema.type === 'object' ||
68
+ (schema.type === undefined && schema.properties !== undefined);
69
+ for (const key of ['anyOf', 'allOf']) {
70
+ if (Array.isArray(schema[key])) {
71
+ schema[key].forEach((variantSchema) => processSchemaRecursively(variantSchema));
72
+ }
73
+ }
74
+ if (isObject && schema.properties) {
75
+ for (const propName in schema.properties) {
76
+ processSchemaRecursively(schema.properties[propName]);
77
+ }
78
+ }
79
+ if (schema.type === 'array' && schema.items !== undefined) {
80
+ if (Array.isArray(schema.items)) {
81
+ schema.items.forEach((itemSchema) => processSchemaRecursively(itemSchema));
82
+ }
83
+ else if (typeof schema.items === 'object') {
84
+ processSchemaRecursively(schema.items);
85
+ }
86
+ }
87
+ if (isObject) {
88
+ schema.additionalProperties = false;
89
+ if (schema.properties) {
90
+ const currentRequired = Object.keys(schema.properties);
91
+ if (currentRequired.length > 0) {
92
+ schema.required = currentRequired;
93
+ }
94
+ else {
95
+ delete schema.required;
96
+ }
97
+ }
98
+ else {
99
+ delete schema.required;
100
+ }
101
+ }
102
+ };
103
+ processSchemaRecursively(processedSchema);
104
+ if (originalProperties) {
105
+ const topLevelRequired = [];
106
+ for (const propName in originalProperties) {
107
+ if (!originalProperties[propName].optional) {
108
+ topLevelRequired.push(propName);
109
+ }
110
+ }
111
+ if (topLevelRequired.length > 0) {
112
+ processedSchema.required = topLevelRequired;
113
+ }
114
+ else {
115
+ delete processedSchema.required;
116
+ }
117
+ }
118
+ if (processedSchema.properties &&
119
+ processedSchema.additionalProperties === undefined) {
120
+ processedSchema.additionalProperties = false;
121
+ }
122
+ return processedSchema;
123
+ }
124
+ function convertToOpenAITools(requestParams, tools) {
125
+ requestParams.tools = tools.map((tool) => {
126
+ if (tool.definition.function.name === 'openai_web_search') {
127
+ delete requestParams.reasoning;
128
+ return {
129
+ type: 'web_search_preview',
130
+ search_context_size: 'high',
131
+ };
132
+ }
133
+ const originalToolProperties = tool.definition.function.parameters.properties;
134
+ const paramSchema = processSchemaForOpenAI(tool.definition.function.parameters, originalToolProperties);
135
+ return {
136
+ type: 'function',
137
+ name: tool.definition.function.name,
138
+ description: tool.definition.function.description,
139
+ parameters: paramSchema,
140
+ strict: true,
141
+ };
142
+ });
143
+ if (requestParams.model === 'computer-use-preview') {
144
+ requestParams.tools.push({
145
+ type: 'computer_use_preview',
146
+ display_width: BROWSER_WIDTH,
147
+ display_height: BROWSER_HEIGHT,
148
+ environment: 'browser',
149
+ });
150
+ }
151
+ requestParams.truncation = 'auto';
152
+ return requestParams;
153
+ }
154
+ async function addImagesToInput(input, images, source) {
155
+ for (const [image_id, imageData] of Object.entries(images)) {
156
+ try {
157
+ const processedImages = await resizeAndSplitForOpenAI(imageData);
158
+ const messageContent = [];
159
+ if (processedImages.length === 1) {
160
+ messageContent.push({
161
+ type: 'input_text',
162
+ text: `This is [image #${image_id}] from the ${source}`,
163
+ });
164
+ }
165
+ else {
166
+ messageContent.push({
167
+ type: 'input_text',
168
+ text: `This is [image #${image_id}] from the ${source} (split into ${processedImages.length} parts, each up to 768px high)`,
169
+ });
170
+ }
171
+ for (const imageSegment of processedImages) {
172
+ messageContent.push({
173
+ type: 'input_image',
174
+ image_url: imageSegment,
175
+ detail: 'high',
176
+ });
177
+ }
178
+ input.push({
179
+ type: 'message',
180
+ role: 'user',
181
+ content: messageContent,
182
+ });
183
+ }
184
+ catch (error) {
185
+ console.error(`Error processing image ${image_id}:`, error);
186
+ input.push({
187
+ type: 'message',
188
+ role: 'user',
189
+ content: [
190
+ {
191
+ type: 'input_text',
192
+ text: `This is [image #${image_id}] from the ${source} (raw image)`,
193
+ },
194
+ {
195
+ type: 'input_image',
196
+ image_url: imageData,
197
+ detail: 'high',
198
+ },
199
+ ],
200
+ });
201
+ }
202
+ }
203
+ return input;
204
+ }
205
+ export class OpenAIProvider {
206
+ client;
207
+ constructor(apiKey) {
208
+ this.client = new OpenAI({
209
+ apiKey: apiKey || process.env.OPENAI_API_KEY,
210
+ });
211
+ if (!this.client) {
212
+ throw new Error('Failed to initialize OpenAI client. Make sure OPENAI_API_KEY is set.');
213
+ }
214
+ }
215
+ async createEmbedding(modelId, input, opts) {
216
+ try {
217
+ const options = {
218
+ model: modelId,
219
+ input: input,
220
+ encoding_format: 'float',
221
+ };
222
+ options.dimensions = opts?.dimensions || 3072;
223
+ console.log(`[OpenAI] Generating embedding with model ${modelId}`);
224
+ const response = await this.client.embeddings.create(options);
225
+ const inputTokens = response.usage?.prompt_tokens ||
226
+ (typeof input === 'string'
227
+ ? Math.ceil(input.length / 4)
228
+ : input.reduce((sum, text) => sum + Math.ceil(text.length / 4), 0));
229
+ costTracker.addUsage({
230
+ model: modelId,
231
+ input_tokens: inputTokens,
232
+ output_tokens: 0,
233
+ metadata: {
234
+ dimensions: response.data[0]?.embedding.length ||
235
+ opts?.dimensions ||
236
+ 1536,
237
+ },
238
+ });
239
+ if (Array.isArray(input) && input.length > 1) {
240
+ return response.data.map(item => item.embedding);
241
+ }
242
+ else {
243
+ return response.data[0].embedding;
244
+ }
245
+ }
246
+ catch (error) {
247
+ console.error('[OpenAI] Error generating embedding:', error);
248
+ throw error;
249
+ }
250
+ }
251
+ async generateImage(prompt, model = 'gpt-image-1', background = 'auto', quality = 'auto', size = 'auto', source_images, number_of_images = 1) {
252
+ try {
253
+ console.log(`[OpenAI] Generating ${number_of_images} image(s) with model ${model}, prompt: "${prompt.substring(0, 100)}${prompt.length > 100 ? '...' : ''}"`);
254
+ let response;
255
+ if (source_images) {
256
+ console.log('[OpenAI] Using images.edit with source_images');
257
+ const imageArray = Array.isArray(source_images)
258
+ ? source_images
259
+ : [source_images];
260
+ const imageFiles = [];
261
+ for (const sourceImg of imageArray) {
262
+ let imageFile;
263
+ if (sourceImg.startsWith('http://') ||
264
+ sourceImg.startsWith('https://')) {
265
+ const imageResponse = await fetch(sourceImg);
266
+ const imageBuffer = await imageResponse.arrayBuffer();
267
+ imageFile = await toFile(new Uint8Array(imageBuffer), `image_${imageFiles.length}.png`, { type: 'image/png' });
268
+ }
269
+ else {
270
+ let base64Data = sourceImg;
271
+ if (sourceImg.startsWith('data:')) {
272
+ base64Data = sourceImg.split(',')[1];
273
+ }
274
+ const binaryData = Buffer.from(base64Data, 'base64');
275
+ imageFile = await toFile(new Uint8Array(binaryData), `image_${imageFiles.length}.png`, { type: 'image/png' });
276
+ }
277
+ imageFiles.push(imageFile);
278
+ }
279
+ response = await this.client.images.edit({
280
+ model,
281
+ prompt,
282
+ image: imageFiles,
283
+ n: number_of_images,
284
+ quality,
285
+ size,
286
+ });
287
+ }
288
+ else {
289
+ response = await this.client.images.generate({
290
+ model,
291
+ prompt,
292
+ n: number_of_images,
293
+ background,
294
+ quality,
295
+ size,
296
+ moderation: 'low',
297
+ output_format: 'png',
298
+ });
299
+ }
300
+ if (response.data && response.data.length > 0) {
301
+ costTracker.addUsage({
302
+ model,
303
+ image_count: response.data.length,
304
+ });
305
+ }
306
+ const imageDataUrls = response.data.map(item => {
307
+ const imageData = item?.b64_json;
308
+ if (!imageData) {
309
+ throw new Error('No image data returned from OpenAI');
310
+ }
311
+ return `data:image/png;base64,${imageData}`;
312
+ });
313
+ if (imageDataUrls.length === 0) {
314
+ throw new Error('No images returned from OpenAI');
315
+ }
316
+ return imageDataUrls;
317
+ }
318
+ catch (error) {
319
+ console.error('[OpenAI] Error generating image:', error);
320
+ throw error;
321
+ }
322
+ }
323
+ async *createResponseStream(model, messages, agent) {
324
+ const tools = agent
325
+ ? await agent.getTools()
326
+ : [];
327
+ const settings = agent?.modelSettings;
328
+ let requestId;
329
+ try {
330
+ let input = [];
331
+ for (const messageFull of messages) {
332
+ let message = { ...messageFull };
333
+ const originalModel = message
334
+ .model;
335
+ delete message.timestamp;
336
+ delete message.model;
337
+ if (message.type === 'thinking') {
338
+ if (model.startsWith('o') &&
339
+ message.thinking_id &&
340
+ model === originalModel) {
341
+ console.log(`[OpenAI] Processing thinking message with ID: ${message.thinking_id}`, message);
342
+ const match = message.thinking_id.match(/^(rs_[A-Za-z0-9]+)-(\d)$/);
343
+ if (match) {
344
+ const reasoningId = match[1];
345
+ const summaryIndex = parseInt(match[2], 10);
346
+ const summaryText = typeof message.content === 'string'
347
+ ? message.content
348
+ : JSON.stringify(message.content);
349
+ const summaryEntry = {
350
+ type: 'summary_text',
351
+ text: summaryText,
352
+ };
353
+ const existingIndex = input.findIndex((item) => item.type === 'reasoning' &&
354
+ item.id === reasoningId);
355
+ if (existingIndex !== -1) {
356
+ const existingItem = input[existingIndex];
357
+ if (!existingItem.summary) {
358
+ existingItem.summary = [];
359
+ }
360
+ existingItem.summary[summaryIndex] =
361
+ summaryEntry;
362
+ input[existingIndex] = existingItem;
363
+ }
364
+ else {
365
+ const newItem = {
366
+ type: 'reasoning',
367
+ id: reasoningId,
368
+ summary: [],
369
+ };
370
+ newItem.summary[summaryIndex] = summaryEntry;
371
+ input.push(newItem);
372
+ }
373
+ continue;
374
+ }
375
+ }
376
+ input.push({
377
+ type: 'message',
378
+ role: 'user',
379
+ content: 'Thinking: ' + message.content,
380
+ status: message.status || 'completed',
381
+ });
382
+ continue;
383
+ }
384
+ if (message.type === 'function_call') {
385
+ if (message.id &&
386
+ (!message.id.startsWith('fc_') ||
387
+ model !== originalModel)) {
388
+ const { id, ...rest } = message;
389
+ message = rest;
390
+ }
391
+ message.status = message.status || 'completed';
392
+ input.push(message);
393
+ continue;
394
+ }
395
+ if (message.type === 'function_call_output') {
396
+ const { name, id, ...messageToAdd } = message;
397
+ if (typeof message.output === 'string') {
398
+ const extracted = extractBase64Image(message.output);
399
+ if (extracted.found) {
400
+ input.push({
401
+ ...messageToAdd,
402
+ output: extracted.replaceContent,
403
+ });
404
+ input = await addImagesToInput(input, extracted.images, `function call output of ${message.name}`);
405
+ }
406
+ else {
407
+ input.push(messageToAdd);
408
+ }
409
+ }
410
+ else {
411
+ input.push(messageToAdd);
412
+ }
413
+ continue;
414
+ }
415
+ if ((message.type ?? 'message') === 'message' &&
416
+ 'content' in message) {
417
+ if ('id' in message &&
418
+ message.id &&
419
+ (!message.id.startsWith('msg_') ||
420
+ model !== originalModel)) {
421
+ const { id, ...rest } = message;
422
+ message = rest;
423
+ console.log(`[OpenAI] Removed message ID: ${id} model: ${model} originalModel: ${originalModel}`);
424
+ }
425
+ if (typeof message.content === 'string') {
426
+ const extracted = extractBase64Image(message.content);
427
+ if (extracted.found) {
428
+ input.push({
429
+ ...message,
430
+ type: 'message',
431
+ content: extracted.replaceContent,
432
+ });
433
+ input = await addImagesToInput(input, extracted.images, `${message.role} message`);
434
+ }
435
+ else {
436
+ if (message.type === undefined &&
437
+ 'role' in message &&
438
+ 'content' in message) {
439
+ input.push({ ...message, type: 'message' });
440
+ }
441
+ else {
442
+ input.push(message);
443
+ }
444
+ }
445
+ }
446
+ else {
447
+ if (message.type === undefined &&
448
+ 'role' in message &&
449
+ 'content' in message) {
450
+ input.push({ ...message, type: 'message' });
451
+ }
452
+ else {
453
+ input.push(message);
454
+ }
455
+ }
456
+ continue;
457
+ }
458
+ }
459
+ let requestParams = {
460
+ model,
461
+ stream: true,
462
+ user: 'magi',
463
+ input,
464
+ };
465
+ if (!model.startsWith('o3-')) {
466
+ if (settings?.temperature !== undefined) {
467
+ requestParams.temperature = settings.temperature;
468
+ }
469
+ if (settings?.top_p !== undefined) {
470
+ requestParams.top_p = settings.top_p;
471
+ }
472
+ }
473
+ const REASONING_EFFORT_CONFIGS = [
474
+ 'low',
475
+ 'medium',
476
+ 'high',
477
+ ];
478
+ let hasEffortSuffix = false;
479
+ for (const effort of REASONING_EFFORT_CONFIGS) {
480
+ const suffix = `-${effort}`;
481
+ if (model.endsWith(suffix)) {
482
+ hasEffortSuffix = true;
483
+ requestParams.reasoning = {
484
+ effort: effort,
485
+ summary: 'auto',
486
+ };
487
+ model = model.slice(0, -suffix.length);
488
+ requestParams.model = model;
489
+ break;
490
+ }
491
+ }
492
+ if (model.startsWith('o') && !hasEffortSuffix) {
493
+ requestParams.reasoning = {
494
+ effort: 'high',
495
+ summary: 'auto',
496
+ };
497
+ }
498
+ if (settings?.tool_choice) {
499
+ if (typeof settings.tool_choice === 'object' &&
500
+ settings.tool_choice?.type === 'function' &&
501
+ settings.tool_choice?.function?.name) {
502
+ requestParams.tool_choice = {
503
+ type: settings.tool_choice.type,
504
+ name: settings.tool_choice.function.name,
505
+ };
506
+ }
507
+ else if (typeof settings.tool_choice === 'string') {
508
+ requestParams.tool_choice = settings.tool_choice;
509
+ }
510
+ }
511
+ if (settings?.json_schema?.schema) {
512
+ const { schema, ...wrapperWithoutSchema } = settings.json_schema;
513
+ requestParams.text = {
514
+ format: {
515
+ ...wrapperWithoutSchema,
516
+ schema: processSchemaForOpenAI(schema),
517
+ },
518
+ };
519
+ }
520
+ if (tools && tools.length > 0) {
521
+ requestParams = convertToOpenAITools(requestParams, tools);
522
+ }
523
+ requestId = log_llm_request(agent.agent_id, 'openai', model, requestParams);
524
+ const stream = await this.client.responses.create(requestParams);
525
+ const messagePositions = new Map();
526
+ const reasoningPositions = new Map();
527
+ const reasoningAggregates = new Map();
528
+ const deltaBuffers = new Map();
529
+ const citationTracker = createCitationTracker();
530
+ const toolCallStates = new Map();
531
+ const events = [];
532
+ try {
533
+ for await (const event of stream) {
534
+ events.push(event);
535
+ if (isPaused()) {
536
+ console.log(`[OpenAI] System paused during stream for model ${model}. Aborting processing.`);
537
+ yield {
538
+ type: 'message_delta',
539
+ content: '\n⏸️ Stream paused by user.',
540
+ message_id: 'pause-notification-stream',
541
+ order: 999,
542
+ };
543
+ break;
544
+ }
545
+ if (event.type === 'response.in_progress') {
546
+ }
547
+ else if (event.type === 'response.completed' &&
548
+ event.response?.usage) {
549
+ costTracker.addUsage({
550
+ model,
551
+ input_tokens: event.response.usage.input_tokens || 0,
552
+ output_tokens: event.response.usage.output_tokens || 0,
553
+ cached_tokens: event.response.usage.input_tokens_details
554
+ ?.cached_tokens || 0,
555
+ metadata: {
556
+ reasoning_tokens: event.response.usage.output_tokens_details
557
+ ?.reasoning_tokens || 0,
558
+ },
559
+ });
560
+ }
561
+ else if (event.type === 'response.failed' &&
562
+ event.response?.error) {
563
+ const errorInfo = event.response.error;
564
+ log_llm_error(requestId, errorInfo);
565
+ console.error(`Response ${event.response.id} failed: [${errorInfo.code}] ${errorInfo.message}`);
566
+ yield {
567
+ type: 'error',
568
+ error: `OpenAI response failed: [${errorInfo.code}] ${errorInfo.message}`,
569
+ };
570
+ }
571
+ else if (event.type === 'response.incomplete' &&
572
+ event.response?.incomplete_details) {
573
+ const reason = event.response.incomplete_details.reason;
574
+ log_llm_error(requestId, 'OpenAI response incomplete: ' + reason);
575
+ console.warn(`Response ${event.response.id} incomplete: ${reason}`);
576
+ yield {
577
+ type: 'error',
578
+ error: 'OpenAI response incomplete: ' + reason,
579
+ };
580
+ }
581
+ else if (event.type === 'response.output_item.added' &&
582
+ event.item) {
583
+ if (event.item.type === 'function_call') {
584
+ if (!toolCallStates.has(event.item.id)) {
585
+ toolCallStates.set(event.item.id, {
586
+ id: event.item.id,
587
+ call_id: event.item.call_id,
588
+ type: 'function',
589
+ function: {
590
+ name: event.item.name || '',
591
+ arguments: '',
592
+ },
593
+ });
594
+ }
595
+ else {
596
+ console.warn(`Received output_item.added for already tracked function call ID: ${event.item.id}`);
597
+ }
598
+ }
599
+ }
600
+ else if (event.type === 'response.output_item.done' &&
601
+ event.item) {
602
+ if (event.item.type === 'reasoning' &&
603
+ !event.item.summary.length) {
604
+ yield {
605
+ type: 'message_complete',
606
+ content: '',
607
+ message_id: event.item.id + '-0',
608
+ thinking_content: '{empty}',
609
+ };
610
+ }
611
+ }
612
+ else if (event.type === 'response.content_part.added' &&
613
+ event.part) {
614
+ }
615
+ else if (event.type === 'response.content_part.done' &&
616
+ event.part) {
617
+ }
618
+ else if (event.type === 'response.output_text.delta' &&
619
+ event.delta) {
620
+ const itemId = event.item_id;
621
+ let position = messagePositions.get(itemId) ?? 0;
622
+ for (const ev of bufferDelta(deltaBuffers, itemId, event.delta, content => ({
623
+ type: 'message_delta',
624
+ content,
625
+ message_id: itemId,
626
+ order: position++,
627
+ }))) {
628
+ yield ev;
629
+ }
630
+ messagePositions.set(itemId, position);
631
+ }
632
+ else if (event.type ===
633
+ 'response.output_text.annotation.added' &&
634
+ event.annotation) {
635
+ const eventData = event;
636
+ if (eventData.annotation?.type === 'url_citation' &&
637
+ eventData.annotation.url) {
638
+ const marker = formatCitation(citationTracker, {
639
+ title: eventData.annotation.title ||
640
+ eventData.annotation.url,
641
+ url: eventData.annotation.url,
642
+ });
643
+ let position = messagePositions.get(eventData.item_id) ?? 0;
644
+ yield {
645
+ type: 'message_delta',
646
+ content: marker,
647
+ message_id: eventData.item_id,
648
+ order: position++,
649
+ };
650
+ messagePositions.set(eventData.item_id, position);
651
+ }
652
+ else {
653
+ console.log('Annotation added:', eventData.annotation);
654
+ }
655
+ }
656
+ else if (event.type === 'response.output_text.done' &&
657
+ event.text !== undefined) {
658
+ const itemId = event.item_id;
659
+ let finalText = event.text;
660
+ if (citationTracker.citations.size > 0) {
661
+ const footnotes = generateFootnotes(citationTracker);
662
+ finalText += footnotes;
663
+ }
664
+ yield {
665
+ type: 'message_complete',
666
+ content: finalText,
667
+ message_id: itemId,
668
+ };
669
+ messagePositions.delete(itemId);
670
+ }
671
+ else if (event.type === 'response.refusal.delta' &&
672
+ event.delta) {
673
+ console.log(`Refusal delta for item ${event.item_id}: ${event.delta}`);
674
+ }
675
+ else if (event.type === 'response.refusal.done' &&
676
+ event.refusal) {
677
+ log_llm_error(requestId, 'OpenAI refusal error: ' + event.refusal);
678
+ console.log(`Refusal done for item ${event.item_id}: ${event.refusal}`);
679
+ yield {
680
+ type: 'error',
681
+ error: 'OpenAI refusal error: ' + event.refusal,
682
+ };
683
+ }
684
+ else if (event.type ===
685
+ 'response.function_call_arguments.delta' &&
686
+ event.delta) {
687
+ const currentCall = toolCallStates.get(event.item_id);
688
+ if (currentCall) {
689
+ currentCall.function.arguments += event.delta;
690
+ }
691
+ else {
692
+ console.warn(`Received function_call_arguments.delta for unknown item_id: ${event.item_id}`);
693
+ }
694
+ }
695
+ else if (event.type ===
696
+ 'response.function_call_arguments.done' &&
697
+ event.arguments !== undefined) {
698
+ const currentCall = toolCallStates.get(event.item_id);
699
+ if (currentCall) {
700
+ currentCall.function.arguments = event.arguments;
701
+ yield {
702
+ type: 'tool_start',
703
+ tool_calls: [currentCall],
704
+ };
705
+ toolCallStates.delete(event.item_id);
706
+ }
707
+ else {
708
+ console.warn(`Received function_call_arguments.done for unknown or already yielded item_id: ${event.item_id}`);
709
+ }
710
+ }
711
+ else if (event.type === 'response.file_search_call.in_progress') {
712
+ console.log(`File search in progress for item ${event.item_id}...`);
713
+ }
714
+ else if (event.type === 'response.file_search_call.searching') {
715
+ console.log(`File search searching for item ${event.item_id}...`);
716
+ }
717
+ else if (event.type === 'response.file_search_call.completed') {
718
+ console.log(`File search completed for item ${event.item_id}.`);
719
+ }
720
+ else if (event.type === 'response.web_search_call.in_progress') {
721
+ console.log(`Web search in progress for item ${event.item_id}...`);
722
+ }
723
+ else if (event.type === 'response.web_search_call.searching') {
724
+ console.log(`Web search searching for item ${event.item_id}...`);
725
+ }
726
+ else if (event.type === 'response.web_search_call.completed') {
727
+ console.log(`Web search completed for item ${event.item_id}.`);
728
+ }
729
+ else if (event.type === 'response.reasoning_summary_part.added') {
730
+ console.log(`Reasoning summary part added for item ${event.item_id}, index ${event.summary_index}`);
731
+ }
732
+ else if (event.type === 'response.reasoning_summary_part.done') {
733
+ console.log(`Reasoning summary part done for item ${event.item_id}, index ${event.summary_index}`);
734
+ }
735
+ else if (event.type ===
736
+ 'response.reasoning_summary_text.delta' &&
737
+ event.delta) {
738
+ const itemId = event.item_id + '-' + event.summary_index;
739
+ if (!reasoningPositions.has(itemId)) {
740
+ reasoningPositions.set(itemId, 0);
741
+ reasoningAggregates.set(itemId, '');
742
+ }
743
+ const position = reasoningPositions.get(itemId);
744
+ reasoningAggregates.set(itemId, reasoningAggregates.get(itemId) + event.delta);
745
+ yield {
746
+ type: 'message_delta',
747
+ content: '',
748
+ message_id: itemId,
749
+ thinking_content: event.delta,
750
+ order: position,
751
+ };
752
+ reasoningPositions.set(itemId, position + 1);
753
+ }
754
+ else if (event.type === 'response.reasoning_summary_text.done' &&
755
+ event.text !== undefined) {
756
+ const itemId = event.item_id + '-' + event.summary_index;
757
+ const aggregatedThinking = reasoningAggregates.get(itemId) ?? event.text;
758
+ yield {
759
+ type: 'message_complete',
760
+ content: '',
761
+ message_id: itemId,
762
+ thinking_content: aggregatedThinking,
763
+ };
764
+ reasoningPositions.delete(itemId);
765
+ reasoningAggregates.delete(itemId);
766
+ }
767
+ else if (event.type === 'error' && event.message) {
768
+ log_llm_error(requestId, event);
769
+ console.error(`API Stream Error (${model}): [${event.code || 'N/A'}] ${event.message}`);
770
+ yield {
771
+ type: 'error',
772
+ error: `OpenAI API error (${model}): [${event.code || 'N/A'}] ${event.message}`,
773
+ };
774
+ }
775
+ }
776
+ }
777
+ catch (streamError) {
778
+ log_llm_error(requestId, streamError);
779
+ console.error('Error processing response stream:', streamError);
780
+ yield {
781
+ type: 'error',
782
+ error: `OpenAI stream request error (${model}): ${streamError}`,
783
+ };
784
+ }
785
+ finally {
786
+ if (toolCallStates.size > 0) {
787
+ console.warn(`Stream ended with ${toolCallStates.size} incomplete tool call(s).`);
788
+ for (const [, toolCall] of toolCallStates.entries()) {
789
+ if (toolCall.function.name) {
790
+ yield {
791
+ type: 'tool_start',
792
+ tool_calls: [toolCall],
793
+ };
794
+ }
795
+ }
796
+ toolCallStates.clear();
797
+ }
798
+ for (const ev of flushBufferedDeltas(deltaBuffers, (id, content) => ({
799
+ type: 'message_delta',
800
+ content,
801
+ message_id: id,
802
+ order: messagePositions.get(id) ?? 0,
803
+ }))) {
804
+ yield ev;
805
+ }
806
+ messagePositions.clear();
807
+ log_llm_response(requestId, events);
808
+ }
809
+ }
810
+ catch (error) {
811
+ log_llm_error(requestId, error);
812
+ console.error('Error in OpenAI streaming response:', error);
813
+ yield {
814
+ type: 'error',
815
+ error: 'OpenAI streaming error: ' +
816
+ (error instanceof Error ? error.stack : String(error)),
817
+ };
818
+ }
819
+ }
820
+ }
821
+ export const openaiProvider = new OpenAIProvider();
822
+ //# sourceMappingURL=openai.js.map