@smythos/sre 1.5.36 → 1.5.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/dist/index.js +64 -46
  2. package/dist/index.js.map +1 -1
  3. package/dist/types/Components/DataSourceLookup.class.d.ts +1 -0
  4. package/dist/types/Components/ECMASandbox.class.d.ts +14 -0
  5. package/dist/types/Components/index.d.ts +2 -0
  6. package/dist/types/Core/ConnectorsService.d.ts +2 -1
  7. package/dist/types/helpers/ECMASandbox.helper.d.ts +3 -0
  8. package/dist/types/helpers/Log.helper.d.ts +1 -1
  9. package/dist/types/index.d.ts +4 -1
  10. package/dist/types/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.d.ts +19 -0
  11. package/dist/types/subsystems/LLMManager/LLM.helper.d.ts +21 -10
  12. package/dist/types/subsystems/LLMManager/LLM.service/LLMConnector.d.ts +5 -5
  13. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.d.ts +2 -3
  14. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.d.ts +2 -3
  15. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Echo.class.d.ts +2 -3
  16. package/dist/types/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.d.ts +2 -3
  17. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Groq.class.d.ts +2 -3
  18. package/dist/types/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.d.ts +3 -4
  19. package/dist/types/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.d.ts +15 -7
  20. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +95 -0
  21. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.d.ts +87 -0
  22. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.d.ts +85 -0
  23. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.d.ts +49 -0
  24. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.d.ts +146 -0
  25. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.d.ts +10 -0
  26. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.d.ts +4 -0
  27. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/types.d.ts +38 -0
  28. package/dist/types/subsystems/LLMManager/LLM.service/connectors/xAI.class.d.ts +1 -2
  29. package/dist/types/subsystems/Security/Vault.service/connectors/JSONFileVault.class.d.ts +5 -0
  30. package/dist/types/types/LLM.types.d.ts +82 -37
  31. package/dist/types/utils/data.utils.d.ts +2 -1
  32. package/package.json +4 -3
  33. package/src/Components/APICall/APICall.class.ts +2 -1
  34. package/src/Components/Component.class.ts +1 -1
  35. package/src/Components/DataSourceLookup.class.ts +29 -10
  36. package/src/Components/ECMASandbox.class.ts +71 -0
  37. package/src/Components/GenAILLM.class.ts +1 -1
  38. package/src/Components/ServerlessCode.class.ts +2 -1
  39. package/src/Components/index.ts +2 -0
  40. package/src/Core/ConnectorsService.ts +3 -3
  41. package/src/helpers/ECMASandbox.helper.ts +54 -0
  42. package/src/helpers/Log.helper.ts +57 -17
  43. package/src/index.ts +188 -185
  44. package/src/index.ts.bak +188 -185
  45. package/src/subsystems/AgentManager/Agent.class.ts +11 -6
  46. package/src/subsystems/AgentManager/AgentRuntime.class.ts +13 -13
  47. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -0
  48. package/src/subsystems/ComputeManager/Code.service/index.ts +2 -0
  49. package/src/subsystems/LLMManager/LLM.helper.ts +57 -27
  50. package/src/subsystems/LLMManager/LLM.inference.ts +4 -0
  51. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +123 -28
  52. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +13 -14
  53. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +2 -7
  54. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +2 -6
  55. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +8 -14
  56. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +2 -7
  57. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +2 -7
  58. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +121 -9
  59. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +455 -0
  60. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +528 -0
  61. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -0
  62. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -0
  63. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +853 -0
  64. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +37 -0
  65. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -0
  66. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +37 -0
  67. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +0 -5
  68. package/src/subsystems/LLMManager/LLM.service/index.ts +1 -1
  69. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +18 -0
  70. package/src/subsystems/MemoryManager/RuntimeContext.ts +33 -16
  71. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +68 -42
  72. package/src/types/LLM.types.ts +91 -43
  73. package/src/utils/data.utils.ts +3 -2
  74. package/src/subsystems/LLMManager/LLM.service/connectors/OpenAI.class.ts +0 -848
@@ -0,0 +1,528 @@
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
4
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
5
+ import {
6
+ TLLMParams,
7
+ TLLMPreparedParams,
8
+ ILLMRequestContext,
9
+ ToolData,
10
+ TLLMMessageRole,
11
+ APIKeySource,
12
+ TLLMEvent,
13
+ OpenAIToolDefinition,
14
+ LegacyToolDefinition,
15
+ } from '@sre/types/LLM.types';
16
+ import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
17
+ import { HandlerDependencies } from '../types';
18
+ import { JSON_RESPONSE_INSTRUCTION, SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
19
+ import {
20
+ MODELS_WITHOUT_PRESENCE_PENALTY_SUPPORT,
21
+ MODELS_WITHOUT_TEMPERATURE_SUPPORT,
22
+ MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT,
23
+ MODELS_WITHOUT_JSON_RESPONSE_SUPPORT,
24
+ } from './constants';
25
+
26
+ // File size limits in bytes
27
+ const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
28
+ const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
29
+
30
+ /**
31
+ * OpenAI Chat Completions API interface implementation
32
+ * Handles all Chat Completions API-specific logic including:
33
+ * - Stream creation and handling
34
+ * - Request body preparation
35
+ * - Tool and message transformations
36
+ * - File attachment handling
37
+ */
38
+ export class ChatCompletionsApiInterface extends OpenAIApiInterface {
39
+ private deps: HandlerDependencies;
40
+ private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
41
+ private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
42
+
43
+ constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
44
+ super(context);
45
+ this.deps = deps;
46
+ }
47
+
48
+ public async createRequest(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<OpenAI.ChatCompletion> {
49
+ const openai = await this.deps.getClient(context);
50
+ return await openai.chat.completions.create({
51
+ ...body,
52
+ stream: false,
53
+ });
54
+ }
55
+
56
+ public async createStream(
57
+ body: OpenAI.ChatCompletionCreateParams,
58
+ context: ILLMRequestContext
59
+ ): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>> {
60
+ const openai = await this.deps.getClient(context);
61
+ return await openai.chat.completions.create({
62
+ ...body,
63
+ stream: true,
64
+ stream_options: { include_usage: true },
65
+ });
66
+ }
67
+
68
+ public handleStream(stream: AsyncIterable<OpenAI.ChatCompletionChunk>, context: ILLMRequestContext): EventEmitter {
69
+ const emitter = new EventEmitter();
70
+ const usage_data: OpenAI.Completions.CompletionUsage[] = [];
71
+ const reportedUsage: any[] = [];
72
+ let finishReason = 'stop';
73
+
74
+ // Process stream asynchronously while returning emitter immediately
75
+ (async () => {
76
+ let finalToolsData: ToolData[] = [];
77
+
78
+ try {
79
+ // Step 1: Process the stream
80
+ const streamResult = await this.processStream(stream, emitter, usage_data);
81
+ finalToolsData = streamResult.toolsData;
82
+ finishReason = streamResult.finishReason;
83
+
84
+ // Step 2: Report usage statistics
85
+ this.reportUsageStatistics(usage_data, context, reportedUsage);
86
+
87
+ // Step 3: Emit final events
88
+ this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
89
+ } catch (error) {
90
+ emitter.emit('error', error);
91
+ }
92
+ })();
93
+
94
+ return emitter;
95
+ }
96
+
97
+ public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.ChatCompletionCreateParams> {
98
+ let messages = await this.prepareMessages(params);
99
+
100
+ // Convert system messages for models that don't support them
101
+ if (MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName)) {
102
+ messages = this.convertSystemMessagesToUserMessages(messages);
103
+ }
104
+
105
+ // Handle JSON response format
106
+ if (params.responseFormat === 'json') {
107
+ const supportsSystemMessages = !MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName);
108
+
109
+ if (supportsSystemMessages) {
110
+ // For models that support system messages
111
+ if (messages?.[0]?.role === TLLMMessageRole.System) {
112
+ messages[0] = { ...messages[0], content: messages[0].content + JSON_RESPONSE_INSTRUCTION };
113
+ } else {
114
+ messages.unshift({ role: TLLMMessageRole.System, content: JSON_RESPONSE_INSTRUCTION });
115
+ }
116
+ } else {
117
+ // For models that don't support system messages, prepend to first user message
118
+ const firstUserMessageIndex = messages.findIndex((msg) => msg.role === TLLMMessageRole.User);
119
+ if (firstUserMessageIndex !== -1) {
120
+ const userMessage = messages[firstUserMessageIndex];
121
+ const content = typeof userMessage.content === 'string' ? userMessage.content : '';
122
+ messages[firstUserMessageIndex] = {
123
+ ...userMessage,
124
+ content: JSON_RESPONSE_INSTRUCTION + '\n\n' + content,
125
+ };
126
+ } else {
127
+ // If no user message exists, create one with the instruction
128
+ messages.push({ role: TLLMMessageRole.User, content: JSON_RESPONSE_INSTRUCTION });
129
+ }
130
+ }
131
+
132
+ params.responseFormat = { type: 'json_object' };
133
+ }
134
+
135
+ const body: OpenAI.ChatCompletionCreateParams = {
136
+ model: params.model as string,
137
+ messages,
138
+ };
139
+
140
+ // Handle max tokens
141
+ if (params?.maxTokens !== undefined) {
142
+ body.max_completion_tokens = params.maxTokens;
143
+ }
144
+
145
+ // Handle temperature
146
+ if (params?.temperature !== undefined && !MODELS_WITHOUT_TEMPERATURE_SUPPORT.includes(params.modelEntryName)) {
147
+ body.temperature = params.temperature;
148
+ }
149
+
150
+ // Handle topP
151
+ if (params?.topP !== undefined) {
152
+ body.top_p = params.topP;
153
+ }
154
+
155
+ // Handle frequency penalty
156
+ if (params?.frequencyPenalty !== undefined) {
157
+ body.frequency_penalty = params.frequencyPenalty;
158
+ }
159
+
160
+ // Handle presence penalty
161
+ if (params?.presencePenalty !== undefined && !MODELS_WITHOUT_PRESENCE_PENALTY_SUPPORT.includes(params.modelEntryName)) {
162
+ body.presence_penalty = params.presencePenalty;
163
+ }
164
+
165
+ // Handle response format
166
+ if (params?.responseFormat?.type && !MODELS_WITHOUT_JSON_RESPONSE_SUPPORT.includes(params.modelEntryName)) {
167
+ body.response_format = params.responseFormat;
168
+ }
169
+
170
+ // Handle stop sequences
171
+ if (params?.stopSequences?.length) {
172
+ body.stop = params.stopSequences;
173
+ }
174
+
175
+ // Handle tools configuration
176
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
177
+ body.tools = params?.toolsConfig?.tools as OpenAI.ChatCompletionTool[];
178
+ body.tool_choice = params?.toolsConfig?.tool_choice;
179
+ }
180
+
181
+ return body;
182
+ }
183
+
184
+ /**
185
+ * Type guard to check if a tool is an OpenAI tool definition
186
+ */
187
+ private isOpenAIToolDefinition(tool: OpenAIToolDefinition | LegacyToolDefinition): tool is OpenAIToolDefinition {
188
+ return 'parameters' in tool;
189
+ }
190
+
191
+ /**
192
+ * Transform OpenAI tool definitions to ChatCompletionTool format
193
+ */
194
+ public transformToolsConfig(config: ToolConfig): OpenAI.ChatCompletionTool[] {
195
+ return config.toolDefinitions.map((tool) => {
196
+ // Handle OpenAI tool definition format
197
+ if (this.isOpenAIToolDefinition(tool)) {
198
+ return {
199
+ type: 'function',
200
+ function: {
201
+ name: tool.name,
202
+ description: tool.description,
203
+ parameters: tool.parameters,
204
+ },
205
+ };
206
+ }
207
+
208
+ // Handle legacy format for backward compatibility
209
+ return {
210
+ type: 'function',
211
+ function: {
212
+ name: tool.name,
213
+ description: tool.description,
214
+ parameters: {
215
+ type: 'object',
216
+ properties: tool.properties || {},
217
+ required: tool.requiredFields || [],
218
+ },
219
+ },
220
+ };
221
+ });
222
+ }
223
+
224
+ public async handleFileAttachments(
225
+ files: BinaryInput[],
226
+ agentId: string,
227
+ messages: OpenAI.ChatCompletionMessageParam[]
228
+ ): Promise<OpenAI.ChatCompletionMessageParam[]> {
229
+ if (files.length === 0) return messages;
230
+
231
+ const uploadedFiles = await this.uploadFiles(files, agentId);
232
+ const validImageFiles = this.getValidImageFiles(uploadedFiles);
233
+ const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
234
+
235
+ // Process images and documents with Chat Completions specific formatting
236
+ const imageData = await this.processImageData(validImageFiles, agentId);
237
+ const documentData = await this.processDocumentData(validDocumentFiles, agentId);
238
+
239
+ // For Chat Completions, we modify the last user message
240
+ const messagesCopy = [...messages];
241
+ const userMessage =
242
+ Array.isArray(messagesCopy) && messagesCopy.length > 0 ? messagesCopy[messagesCopy.length - 1] : { role: 'user', content: '' };
243
+ const prompt = userMessage?.content && typeof userMessage.content === 'string' ? userMessage.content : '';
244
+
245
+ const promptData = [{ type: 'text', text: prompt || '' }, ...imageData, ...documentData];
246
+
247
+ // Replace the last message or add a new one if array was empty
248
+ if (messagesCopy.length > 0) {
249
+ messagesCopy[messagesCopy.length - 1] = { role: 'user', content: promptData };
250
+ } else {
251
+ messagesCopy.push({ role: 'user', content: promptData });
252
+ }
253
+
254
+ return messagesCopy;
255
+ }
256
+
257
+ /**
258
+ * Process the chat completions API stream format
259
+ */
260
+ private async processStream(
261
+ stream: AsyncIterable<OpenAI.ChatCompletionChunk>,
262
+ emitter: EventEmitter,
263
+ usage_data: OpenAI.Completions.CompletionUsage[]
264
+ ): Promise<{ toolsData: ToolData[]; finishReason: string }> {
265
+ let toolsData: ToolData[] = [];
266
+ let finishReason = 'stop';
267
+
268
+ for await (const part of stream) {
269
+ const delta = part.choices[0]?.delta;
270
+ const usage = part.usage;
271
+
272
+ // Collect usage statistics
273
+ if (usage) {
274
+ usage_data.push(usage);
275
+ }
276
+
277
+ // Emit data event for delta
278
+ emitter.emit('data', delta);
279
+
280
+ // Handle content deltas
281
+ if (!delta?.tool_calls && delta?.content) {
282
+ emitter.emit('content', delta?.content, delta?.role);
283
+ }
284
+
285
+ // Handle tool calls
286
+ if (delta?.tool_calls) {
287
+ const toolCall = delta?.tool_calls?.[0];
288
+ const index = toolCall?.index;
289
+
290
+ if (!toolsData[index]) {
291
+ toolsData[index] = {
292
+ index: index || 0,
293
+ id: '',
294
+ type: 'function',
295
+ name: '',
296
+ arguments: '',
297
+ role: 'tool',
298
+ };
299
+ }
300
+
301
+ if (toolCall?.function?.name) {
302
+ toolsData[index].name = toolCall.function.name;
303
+ }
304
+ if (toolCall?.function?.arguments) {
305
+ toolsData[index].arguments += toolCall.function.arguments;
306
+ }
307
+ if (toolCall?.id) {
308
+ toolsData[index].id = toolCall.id;
309
+ }
310
+ }
311
+
312
+ // Handle finish reason
313
+ if (part.choices[0]?.finish_reason) {
314
+ finishReason = part.choices[0].finish_reason;
315
+ }
316
+ }
317
+
318
+ return { toolsData: this.extractToolCalls(toolsData), finishReason };
319
+ }
320
+
321
+ /**
322
+ * Extract and format tool calls from the accumulated data
323
+ */
324
+ private extractToolCalls(toolsData: ToolData[]): ToolData[] {
325
+ return toolsData.map((tool) => ({
326
+ index: tool.index,
327
+ name: tool.name,
328
+ arguments: tool.arguments,
329
+ id: tool.id,
330
+ type: tool.type,
331
+ role: tool.role,
332
+ }));
333
+ }
334
+
335
+ /**
336
+ * Report usage statistics
337
+ */
338
+ private reportUsageStatistics(usage_data: OpenAI.Completions.CompletionUsage[], context: ILLMRequestContext, reportedUsage: any[]): void {
339
+ // Report normal usage
340
+ usage_data.forEach((usage) => {
341
+ const reported = this.deps.reportUsage(usage, this.buildUsageContext(context));
342
+ reportedUsage.push(reported);
343
+ });
344
+ }
345
+
346
+ /**
347
+ * Emit final events
348
+ */
349
+ private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
350
+ // Emit tool info event if tools were called
351
+ if (toolsData.length > 0) {
352
+ emitter.emit(TLLMEvent.ToolInfo, toolsData);
353
+ }
354
+
355
+ // Emit interrupted event if finishReason is not 'stop'
356
+ if (finishReason !== 'stop') {
357
+ emitter.emit('interrupted', finishReason);
358
+ }
359
+
360
+ // Emit end event with setImmediate to ensure proper event ordering
361
+ setImmediate(() => {
362
+ emitter.emit('end', toolsData, reportedUsage, finishReason);
363
+ });
364
+ }
365
+
366
+ /**
367
+ * Build usage context parameters from request context
368
+ */
369
+ private buildUsageContext(context: ILLMRequestContext) {
370
+ return {
371
+ modelEntryName: context.modelEntryName,
372
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
373
+ agentId: context.agentId,
374
+ teamId: context.teamId,
375
+ };
376
+ }
377
+
378
+ /**
379
+ * Get valid image files based on supported MIME types
380
+ */
381
+ private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
382
+ return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
383
+ }
384
+
385
+ /**
386
+ * Get valid document files based on supported MIME types
387
+ */
388
+ private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
389
+ return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
390
+ }
391
+
392
+ /**
393
+ * Upload files to storage
394
+ */
395
+ private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
396
+ const promises = files.map((file) => {
397
+ const binaryInput = BinaryInput.from(file);
398
+ return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
399
+ });
400
+
401
+ return Promise.all(promises);
402
+ }
403
+
404
+ /**
405
+ * Process image files with Chat Completions specific formatting
406
+ */
407
+ private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
408
+ if (files.length === 0) return [];
409
+
410
+ const imageData = [];
411
+ for (const file of files) {
412
+ await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image', agentId);
413
+
414
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
415
+ const base64Data = bufferData.toString('base64');
416
+ const url = `data:${file.mimetype};base64,${base64Data}`;
417
+
418
+ imageData.push({
419
+ type: 'image_url',
420
+ image_url: { url },
421
+ });
422
+ }
423
+
424
+ return imageData;
425
+ }
426
+
427
+ /**
428
+ * Process document files with Chat Completions specific formatting
429
+ */
430
+ private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
431
+ if (files.length === 0) return [];
432
+
433
+ const documentData = [];
434
+ for (const file of files) {
435
+ await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document', agentId);
436
+
437
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
438
+ const base64Data = bufferData.toString('base64');
439
+ const fileData = `data:${file.mimetype};base64,${base64Data}`;
440
+ const filename = await file.getName();
441
+
442
+ documentData.push({
443
+ type: 'file',
444
+ file: {
445
+ file_data: fileData,
446
+ filename,
447
+ },
448
+ });
449
+ }
450
+
451
+ return documentData;
452
+ }
453
+
454
+ /**
455
+ * Validate file size before processing
456
+ */
457
+ private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string, agentId: string): Promise<void> {
458
+ await file.ready();
459
+ const fileInfo = await file.getJsonData(AccessCandidate.agent(agentId));
460
+ if (fileInfo.size > maxSize) {
461
+ throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
462
+ }
463
+ }
464
+
465
+ getInterfaceName(): string {
466
+ return 'chat.completions';
467
+ }
468
+
469
+ validateParameters(params: TLLMParams): boolean {
470
+ // Basic validation for Chat Completions parameters
471
+ return !!params.model && Array.isArray(params.messages);
472
+ }
473
+
474
+ /**
475
+ * Convert system messages to user messages for models that don't support system messages
476
+ */
477
+ private convertSystemMessagesToUserMessages(messages: OpenAI.ChatCompletionMessageParam[]): OpenAI.ChatCompletionMessageParam[] {
478
+ const convertedMessages: OpenAI.ChatCompletionMessageParam[] = [];
479
+ const systemMessages: string[] = [];
480
+
481
+ // Extract system messages and collect other messages
482
+ for (const message of messages) {
483
+ if (message.role === TLLMMessageRole.System) {
484
+ const content = typeof message.content === 'string' ? message.content : '';
485
+ if (content.trim()) {
486
+ systemMessages.push(content);
487
+ }
488
+ } else {
489
+ convertedMessages.push(message);
490
+ }
491
+ }
492
+
493
+ // If we have system messages, prepend them to the first user message
494
+ if (systemMessages.length > 0) {
495
+ const systemContent = systemMessages.join('\n\n');
496
+ const firstUserMessageIndex = convertedMessages.findIndex((msg) => msg.role === TLLMMessageRole.User);
497
+
498
+ if (firstUserMessageIndex !== -1) {
499
+ const userMessage = convertedMessages[firstUserMessageIndex];
500
+ const existingContent = typeof userMessage.content === 'string' ? userMessage.content : '';
501
+ convertedMessages[firstUserMessageIndex] = {
502
+ ...userMessage,
503
+ content: systemContent + '\n\n' + existingContent,
504
+ };
505
+ } else {
506
+ // If no user message exists, create one with the system content
507
+ convertedMessages.unshift({ role: TLLMMessageRole.User, content: systemContent });
508
+ }
509
+ }
510
+
511
+ return convertedMessages;
512
+ }
513
+
514
+ /**
515
+ * Prepare messages for Chat Completions API
516
+ */
517
+ private async prepareMessages(params: TLLMParams): Promise<OpenAI.ChatCompletionMessageParam[]> {
518
+ const messages = params?.messages || [];
519
+ const files: BinaryInput[] = params?.files || [];
520
+
521
+ // Handle files if present
522
+ if (files.length > 0) {
523
+ return await this.handleFileAttachments(files, params.agentId, [...messages]);
524
+ }
525
+
526
+ return messages;
527
+ }
528
+ }
@@ -0,0 +1,100 @@
1
+ import EventEmitter from 'events';
2
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
3
+ import { TLLMParams, ILLMRequestContext, TLLMToolChoice, OpenAIToolDefinition, LegacyToolDefinition, LLMModelInfo } from '@sre/types/LLM.types';
4
+ import { HandlerDependencies } from '../types';
5
+
6
+ /**
7
+ * OpenAI-specific tool configuration interface
8
+ * Only deals with OpenAI tool definitions for clean separation
9
+ */
10
+ export interface ToolConfig {
11
+ type?: string;
12
+ toolDefinitions: (OpenAIToolDefinition | LegacyToolDefinition)[];
13
+ toolChoice?: TLLMToolChoice;
14
+ modelInfo?: LLMModelInfo | null;
15
+ }
16
+
17
+ /**
18
+ * Abstract base class for OpenAI API interfaces
19
+ * Defines the contract that all OpenAI API implementations must follow
20
+ *
21
+ * This follows the Strategy pattern - each API interface (responses, chat.completions)
22
+ * implements this interface with its own specific behavior
23
+ */
24
+ export abstract class OpenAIApiInterface {
25
+ protected context: ILLMRequestContext;
26
+
27
+ constructor(context: ILLMRequestContext) {
28
+ this.context = context;
29
+ }
30
+
31
+ /**
32
+ * Create a regular (non-streaming) request for this API interface
33
+ * @param body - The request body prepared for this API
34
+ * @param context - The request context
35
+ */
36
+ abstract createRequest(body: any, context: ILLMRequestContext): Promise<any>;
37
+
38
+ /**
39
+ * Create a stream for this API interface
40
+ * @param body - The request body prepared for this API
41
+ * @param context - The request context
42
+ */
43
+ abstract createStream(body: any, context: ILLMRequestContext): Promise<any>;
44
+
45
+ /**
46
+ * Handle the stream response from this API interface
47
+ * @param stream - The stream returned from createStream
48
+ * @param context - The request context
49
+ */
50
+ abstract handleStream(stream: any, context: ILLMRequestContext): EventEmitter;
51
+
52
+ /**
53
+ * Prepare the request body for this API interface
54
+ * @param params - The LLM parameters
55
+ */
56
+ abstract prepareRequestBody(params: TLLMParams): Promise<any>;
57
+
58
+ /**
59
+ * Transform tools configuration for this API interface
60
+ * @param config - The tool configuration
61
+ */
62
+ abstract transformToolsConfig(config: ToolConfig): any[];
63
+
64
+ /**
65
+ * Handle file attachments for this API interface
66
+ * @param files - The files to attach
67
+ * @param agentId - The agent ID
68
+ * @param messages - The messages to attach files to
69
+ */
70
+ abstract handleFileAttachments(files: BinaryInput[], agentId: string, messages: any[]): Promise<any[]>;
71
+
72
+ /**
73
+ * Get the API interface name
74
+ */
75
+ abstract getInterfaceName(): string;
76
+
77
+ /**
78
+ * Validate if this interface supports the given parameters
79
+ * @param params - The parameters to validate
80
+ */
81
+ abstract validateParameters(params: TLLMParams): boolean;
82
+ }
83
+
84
+ /**
85
+ * Factory interface for creating OpenAI API interfaces
86
+ */
87
+ export interface OpenAIApiInterfaceFactory {
88
+ /**
89
+ * Create an API interface instance for the specified type
90
+ * @param interfaceType - The type of interface to create
91
+ * @param context - The context for the interface
92
+ * @param deps - The handler dependencies for the interface
93
+ */
94
+ createInterface(interfaceType: string, context: ILLMRequestContext, deps: HandlerDependencies): OpenAIApiInterface;
95
+
96
+ /**
97
+ * Get supported interface types
98
+ */
99
+ getSupportedInterfaces(): string[];
100
+ }