@smythos/sre 1.5.50 → 1.5.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. package/CHANGELOG +98 -98
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/index.js +3 -3
  5. package/dist/index.js.map +1 -1
  6. package/dist/types/Components/APICall/AccessTokenManager.d.ts +3 -2
  7. package/dist/types/Components/APICall/OAuth.helper.d.ts +3 -2
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +6 -1
  9. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +39 -0
  10. package/package.json +1 -1
  11. package/src/Components/APICall/APICall.class.ts +156 -156
  12. package/src/Components/APICall/AccessTokenManager.ts +166 -130
  13. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  14. package/src/Components/APICall/OAuth.helper.ts +446 -294
  15. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  16. package/src/Components/APICall/parseData.ts +167 -167
  17. package/src/Components/APICall/parseHeaders.ts +41 -41
  18. package/src/Components/APICall/parseProxy.ts +68 -68
  19. package/src/Components/APICall/parseUrl.ts +91 -91
  20. package/src/Components/APIEndpoint.class.ts +234 -234
  21. package/src/Components/APIOutput.class.ts +58 -58
  22. package/src/Components/AgentPlugin.class.ts +102 -102
  23. package/src/Components/Async.class.ts +155 -155
  24. package/src/Components/Await.class.ts +90 -90
  25. package/src/Components/Classifier.class.ts +158 -158
  26. package/src/Components/Component.class.ts +132 -132
  27. package/src/Components/ComponentHost.class.ts +38 -38
  28. package/src/Components/DataSourceCleaner.class.ts +92 -92
  29. package/src/Components/DataSourceIndexer.class.ts +181 -181
  30. package/src/Components/DataSourceLookup.class.ts +161 -161
  31. package/src/Components/ECMASandbox.class.ts +71 -71
  32. package/src/Components/FEncDec.class.ts +29 -29
  33. package/src/Components/FHash.class.ts +33 -33
  34. package/src/Components/FSign.class.ts +80 -80
  35. package/src/Components/FSleep.class.ts +25 -25
  36. package/src/Components/FTimestamp.class.ts +25 -25
  37. package/src/Components/FileStore.class.ts +78 -78
  38. package/src/Components/ForEach.class.ts +97 -97
  39. package/src/Components/GPTPlugin.class.ts +70 -70
  40. package/src/Components/GenAILLM.class.ts +586 -586
  41. package/src/Components/HuggingFace.class.ts +314 -314
  42. package/src/Components/Image/imageSettings.config.ts +70 -70
  43. package/src/Components/ImageGenerator.class.ts +502 -502
  44. package/src/Components/JSONFilter.class.ts +54 -54
  45. package/src/Components/LLMAssistant.class.ts +213 -213
  46. package/src/Components/LogicAND.class.ts +28 -28
  47. package/src/Components/LogicAtLeast.class.ts +85 -85
  48. package/src/Components/LogicAtMost.class.ts +86 -86
  49. package/src/Components/LogicOR.class.ts +29 -29
  50. package/src/Components/LogicXOR.class.ts +34 -34
  51. package/src/Components/MCPClient.class.ts +138 -138
  52. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  53. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  54. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  55. package/src/Components/MemoryWriteObject.class.ts +97 -97
  56. package/src/Components/MultimodalLLM.class.ts +128 -128
  57. package/src/Components/OpenAPI.class.ts +72 -72
  58. package/src/Components/PromptGenerator.class.ts +122 -122
  59. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  60. package/src/Components/ServerlessCode.class.ts +123 -123
  61. package/src/Components/TavilyWebSearch.class.ts +98 -98
  62. package/src/Components/VisionLLM.class.ts +104 -104
  63. package/src/Components/ZapierAction.class.ts +127 -127
  64. package/src/Components/index.ts +97 -97
  65. package/src/Core/AgentProcess.helper.ts +240 -240
  66. package/src/Core/Connector.class.ts +123 -123
  67. package/src/Core/ConnectorsService.ts +197 -197
  68. package/src/Core/DummyConnector.ts +49 -49
  69. package/src/Core/HookService.ts +105 -105
  70. package/src/Core/SmythRuntime.class.ts +235 -235
  71. package/src/Core/SystemEvents.ts +16 -16
  72. package/src/Core/boot.ts +56 -56
  73. package/src/config.ts +15 -15
  74. package/src/constants.ts +126 -126
  75. package/src/data/hugging-face.params.json +579 -579
  76. package/src/helpers/AWSLambdaCode.helper.ts +590 -587
  77. package/src/helpers/BinaryInput.helper.ts +331 -331
  78. package/src/helpers/Conversation.helper.ts +1119 -1119
  79. package/src/helpers/ECMASandbox.helper.ts +54 -54
  80. package/src/helpers/JsonContent.helper.ts +97 -97
  81. package/src/helpers/LocalCache.helper.ts +97 -97
  82. package/src/helpers/Log.helper.ts +274 -274
  83. package/src/helpers/OpenApiParser.helper.ts +150 -150
  84. package/src/helpers/S3Cache.helper.ts +147 -147
  85. package/src/helpers/SmythURI.helper.ts +5 -5
  86. package/src/helpers/Sysconfig.helper.ts +77 -77
  87. package/src/helpers/TemplateString.helper.ts +243 -243
  88. package/src/helpers/TypeChecker.helper.ts +329 -329
  89. package/src/index.ts +3 -3
  90. package/src/index.ts.bak +3 -3
  91. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  92. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  93. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  94. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  95. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  97. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  98. package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -297
  99. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  100. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  101. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  102. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  103. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  104. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  105. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  106. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  107. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  108. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  109. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  110. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  111. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  112. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  113. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  114. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  115. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  116. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  117. package/src/subsystems/IO/Log.service/index.ts +13 -13
  118. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  119. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  120. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  121. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  122. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  123. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  124. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  125. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  126. package/src/subsystems/IO/Router.service/index.ts +11 -11
  127. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  128. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  129. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  130. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  131. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  132. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  133. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  134. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  135. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  136. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  137. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  138. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  139. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  140. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  141. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  142. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  143. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  144. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  145. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  147. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  149. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  150. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  151. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
  152. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  160. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  161. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  162. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  163. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  164. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  165. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  166. package/src/subsystems/LLMManager/models.ts +2540 -2540
  167. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  168. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  169. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  170. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  173. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  174. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  175. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  176. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  177. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  178. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  179. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  180. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  181. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  182. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  183. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  184. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  185. package/src/subsystems/Security/Account.service/index.ts +14 -14
  186. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  187. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  188. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  189. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  190. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  191. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  192. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  193. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  194. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  195. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  196. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  197. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  198. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  199. package/src/types/ACL.types.ts +104 -104
  200. package/src/types/AWS.types.ts +10 -10
  201. package/src/types/Agent.types.ts +61 -61
  202. package/src/types/AgentLogger.types.ts +17 -17
  203. package/src/types/Cache.types.ts +1 -1
  204. package/src/types/Common.types.ts +2 -2
  205. package/src/types/LLM.types.ts +496 -496
  206. package/src/types/Redis.types.ts +8 -8
  207. package/src/types/SRE.types.ts +64 -64
  208. package/src/types/Security.types.ts +14 -14
  209. package/src/types/Storage.types.ts +5 -5
  210. package/src/types/VectorDB.types.ts +86 -86
  211. package/src/utils/base64.utils.ts +275 -275
  212. package/src/utils/cli.utils.ts +68 -68
  213. package/src/utils/data.utils.ts +322 -322
  214. package/src/utils/date-time.utils.ts +22 -22
  215. package/src/utils/general.utils.ts +238 -238
  216. package/src/utils/index.ts +12 -12
  217. package/src/utils/lazy-client.ts +261 -261
  218. package/src/utils/numbers.utils.ts +13 -13
  219. package/src/utils/oauth.utils.ts +35 -35
  220. package/src/utils/string.utils.ts +414 -414
  221. package/src/utils/url.utils.ts +19 -19
  222. package/src/utils/validation.utils.ts +74 -74
  223. package/dist/bundle-analysis-lazy.html +0 -4949
  224. package/dist/bundle-analysis.html +0 -4949
  225. package/dist/types/utils/package-manager.utils.d.ts +0 -26
@@ -1,524 +1,524 @@
1
- import EventEmitter from 'events';
2
- import OpenAI from 'openai';
3
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
4
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
5
- import { TLLMParams, TLLMPreparedParams, ILLMRequestContext, ToolData, TLLMMessageRole, APIKeySource, TLLMEvent } from '@sre/types/LLM.types';
6
- import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
7
- import { HandlerDependencies } from '../types';
8
- import { JSON_RESPONSE_INSTRUCTION, SUPPORTED_MIME_TYPES_MAP, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
9
- import { MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT, MODELS_WITHOUT_JSON_RESPONSE_SUPPORT, O3_AND_O4_MODELS } from './constants';
10
-
11
- import { isValidOpenAIReasoningEffort } from './utils';
12
-
13
- // File size limits in bytes
14
- const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
15
- const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
16
-
17
- /**
18
- * OpenAI Chat Completions API interface implementation
19
- * Handles all Chat Completions API-specific logic including:
20
- * - Stream creation and handling
21
- * - Request body preparation
22
- * - Tool and message transformations
23
- * - File attachment handling
24
- */
25
- export class ChatCompletionsApiInterface extends OpenAIApiInterface {
26
- private deps: HandlerDependencies;
27
- private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
28
- private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
29
-
30
- constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
31
- super(context);
32
- this.deps = deps;
33
- }
34
-
35
- public async createRequest(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<OpenAI.ChatCompletion> {
36
- const openai = await this.deps.getClient(context);
37
- return await openai.chat.completions.create({
38
- ...body,
39
- stream: false,
40
- });
41
- }
42
-
43
- public async createStream(
44
- body: OpenAI.ChatCompletionCreateParams,
45
- context: ILLMRequestContext
46
- ): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>> {
47
- const openai = await this.deps.getClient(context);
48
- return await openai.chat.completions.create({
49
- ...body,
50
- stream: true,
51
- stream_options: { include_usage: true },
52
- });
53
- }
54
-
55
- public handleStream(stream: AsyncIterable<OpenAI.ChatCompletionChunk>, context: ILLMRequestContext): EventEmitter {
56
- const emitter = new EventEmitter();
57
-
58
- // Process stream asynchronously while returning emitter immediately
59
- (async () => {
60
- let finalToolsData: ToolData[] = [];
61
-
62
- try {
63
- // Step 1: Process the stream
64
- const streamResult = await this.processStream(stream, emitter);
65
- finalToolsData = streamResult.toolsData;
66
-
67
- const finishReason = streamResult.finishReason || 'stop';
68
- const usageData = streamResult.usageData;
69
-
70
- // Step 2: Report usage statistics
71
- const reportedUsage = this.reportUsageStatistics(usageData, context);
72
-
73
- // Step 3: Emit final events
74
- this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
75
- } catch (error) {
76
- emitter.emit('error', error);
77
- }
78
- })();
79
-
80
- return emitter;
81
- }
82
-
83
- public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.ChatCompletionCreateParams> {
84
- let messages = await this.prepareMessages(params);
85
-
86
- // Convert system messages for models that don't support them
87
- if (MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName)) {
88
- messages = this.convertSystemMessagesToUserMessages(messages);
89
- }
90
-
91
- // Handle JSON response format
92
- if (params.responseFormat === 'json') {
93
- const supportsSystemMessages = !MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName);
94
-
95
- if (supportsSystemMessages) {
96
- // For models that support system messages
97
- if (messages?.[0]?.role === TLLMMessageRole.System) {
98
- messages[0] = { ...messages[0], content: messages[0].content + JSON_RESPONSE_INSTRUCTION };
99
- } else {
100
- messages.unshift({ role: TLLMMessageRole.System, content: JSON_RESPONSE_INSTRUCTION });
101
- }
102
- } else {
103
- // For models that don't support system messages, prepend to first user message
104
- const firstUserMessageIndex = messages.findIndex((msg) => msg.role === TLLMMessageRole.User);
105
- if (firstUserMessageIndex !== -1) {
106
- const userMessage = messages[firstUserMessageIndex];
107
- const content = typeof userMessage.content === 'string' ? userMessage.content : '';
108
- messages[firstUserMessageIndex] = {
109
- ...userMessage,
110
- content: JSON_RESPONSE_INSTRUCTION + '\n\n' + content,
111
- };
112
- } else {
113
- // If no user message exists, create one with the instruction
114
- messages.push({ role: TLLMMessageRole.User, content: JSON_RESPONSE_INSTRUCTION });
115
- }
116
- }
117
-
118
- params.responseFormat = { type: 'json_object' };
119
- }
120
-
121
- const body: OpenAI.ChatCompletionCreateParams = {
122
- model: params.model as string,
123
- messages,
124
- };
125
-
126
- // Handle max tokens
127
- if (params?.maxTokens !== undefined) {
128
- body.max_completion_tokens = params.maxTokens;
129
- }
130
-
131
- // Handle temperature
132
- const modelName = params.modelEntryName?.replace(BUILT_IN_MODEL_PREFIX, '');
133
- if (params?.temperature !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
134
- body.temperature = params.temperature;
135
- }
136
-
137
- // Handle topP
138
- if (params?.topP !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
139
- body.top_p = params.topP;
140
- }
141
-
142
- // Handle frequency penalty
143
- if (params?.frequencyPenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
144
- body.frequency_penalty = params.frequencyPenalty;
145
- }
146
-
147
- // Handle presence penalty
148
- if (params?.presencePenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
149
- body.presence_penalty = params.presencePenalty;
150
- }
151
-
152
- // Handle response format
153
- if (params?.responseFormat?.type && !MODELS_WITHOUT_JSON_RESPONSE_SUPPORT.includes(modelName)) {
154
- body.response_format = params.responseFormat;
155
- }
156
-
157
- // Handle stop sequences
158
- if (params?.stopSequences?.length && !O3_AND_O4_MODELS.includes(modelName)) {
159
- body.stop = params.stopSequences;
160
- }
161
-
162
- // #region GPT 5 specific fields
163
- const isGPT5ReasoningModels = params.modelEntryName?.includes('gpt-5') && params?.capabilities?.reasoning;
164
- if (isGPT5ReasoningModels && params?.verbosity) {
165
- body.verbosity = params.verbosity;
166
- }
167
-
168
- // We need to validate the `reasoningEffort` parameter for OpenAI models, since models like `qwen/qwen3-32b` and `deepseek-r1-distill-llama-70b` (available via Groq) also support this parameter but use different values, such as `none` and `default`. These values are valid in our system but not specifically for OpenAI.
169
- if (isGPT5ReasoningModels && isValidOpenAIReasoningEffort(params.reasoningEffort)) {
170
- body.reasoning_effort = params.reasoningEffort;
171
- }
172
- // #endregion GPT 5 specific fields
173
-
174
- // Handle tools configuration
175
- if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
176
- body.tools = params?.toolsConfig?.tools as OpenAI.ChatCompletionTool[];
177
- body.tool_choice = params?.toolsConfig?.tool_choice;
178
- }
179
-
180
- return body;
181
- }
182
-
183
- /**
184
- * Transform OpenAI tool definitions to ChatCompletionTool format
185
- */
186
- public transformToolsConfig(config: ToolConfig): OpenAI.ChatCompletionTool[] {
187
- return config.toolDefinitions.map((tool) => {
188
- // Handle OpenAI tool definition format
189
- if ('parameters' in tool) {
190
- return {
191
- type: 'function',
192
- function: {
193
- name: tool.name,
194
- description: tool.description,
195
- parameters: tool.parameters,
196
- },
197
- };
198
- }
199
-
200
- // Handle legacy format for backward compatibility
201
- return {
202
- type: 'function',
203
- function: {
204
- name: tool.name,
205
- description: tool.description,
206
- parameters: {
207
- type: 'object',
208
- properties: tool.properties || {},
209
- required: tool.requiredFields || [],
210
- },
211
- },
212
- };
213
- });
214
- }
215
-
216
- public async handleFileAttachments(
217
- files: BinaryInput[],
218
- agentId: string,
219
- messages: OpenAI.ChatCompletionMessageParam[]
220
- ): Promise<OpenAI.ChatCompletionMessageParam[]> {
221
- if (files.length === 0) return messages;
222
-
223
- const uploadedFiles = await this.uploadFiles(files, agentId);
224
- const validImageFiles = this.getValidImageFiles(uploadedFiles);
225
- const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
226
-
227
- // Process images and documents with Chat Completions specific formatting
228
- const imageData = await this.processImageData(validImageFiles, agentId);
229
- const documentData = await this.processDocumentData(validDocumentFiles, agentId);
230
-
231
- // For Chat Completions, we modify the last user message
232
- const messagesCopy = [...messages];
233
- const userMessage =
234
- Array.isArray(messagesCopy) && messagesCopy.length > 0 ? messagesCopy[messagesCopy.length - 1] : { role: 'user', content: '' };
235
- const prompt = userMessage?.content && typeof userMessage.content === 'string' ? userMessage.content : '';
236
-
237
- const promptData = [{ type: 'text', text: prompt || '' }, ...imageData, ...documentData];
238
-
239
- // Replace the last message or add a new one if array was empty
240
- if (messagesCopy.length > 0) {
241
- messagesCopy[messagesCopy.length - 1] = { role: 'user', content: promptData };
242
- } else {
243
- messagesCopy.push({ role: 'user', content: promptData });
244
- }
245
-
246
- return messagesCopy;
247
- }
248
-
249
- /**
250
- * Process the chat completions API stream format
251
- */
252
- private async processStream(
253
- stream: AsyncIterable<OpenAI.ChatCompletionChunk>,
254
- emitter: EventEmitter
255
- ): Promise<{ toolsData: ToolData[]; finishReason: string; usageData: any[] }> {
256
- let toolsData: ToolData[] = [];
257
- let finishReason = 'stop';
258
- const usageData = [];
259
-
260
- for await (const part of stream) {
261
- const delta = part.choices[0]?.delta;
262
- const usage = part.usage;
263
-
264
- // Collect usage statistics
265
- if (usage) {
266
- usageData.push(usage);
267
- }
268
-
269
- // Emit data event for delta
270
- emitter.emit('data', delta);
271
-
272
- // Handle content deltas
273
- if (!delta?.tool_calls && delta?.content) {
274
- emitter.emit('content', delta?.content, delta?.role);
275
- }
276
-
277
- // Handle tool calls
278
- if (delta?.tool_calls) {
279
- const toolCall = delta?.tool_calls?.[0];
280
- const index = toolCall?.index;
281
-
282
- if (!toolsData[index]) {
283
- toolsData[index] = {
284
- index: index || 0,
285
- id: '',
286
- type: 'function',
287
- name: '',
288
- arguments: '',
289
- role: 'tool',
290
- };
291
- }
292
-
293
- if (toolCall?.function?.name) {
294
- toolsData[index].name = toolCall.function.name;
295
- }
296
- if (toolCall?.function?.arguments) {
297
- toolsData[index].arguments += toolCall.function.arguments;
298
- }
299
- if (toolCall?.id) {
300
- toolsData[index].id = toolCall.id;
301
- }
302
- }
303
-
304
- // Handle finish reason
305
- if (part.choices[0]?.finish_reason) {
306
- finishReason = part.choices[0].finish_reason;
307
- }
308
- }
309
-
310
- return { toolsData: this.extractToolCalls(toolsData), finishReason, usageData };
311
- }
312
-
313
- /**
314
- * Extract and format tool calls from the accumulated data
315
- */
316
- private extractToolCalls(toolsData: ToolData[]): ToolData[] {
317
- return toolsData.map((tool) => ({
318
- index: tool.index,
319
- name: tool.name,
320
- arguments: tool.arguments,
321
- id: tool.id,
322
- type: tool.type,
323
- role: tool.role,
324
- }));
325
- }
326
-
327
- /**
328
- * Report usage statistics
329
- */
330
- private reportUsageStatistics(usage_data: OpenAI.Completions.CompletionUsage[], context: ILLMRequestContext): any[] {
331
- const reportedUsage: any[] = [];
332
-
333
- // Report normal usage
334
- usage_data.forEach((usage) => {
335
- const reported = this.deps.reportUsage(usage, this.buildUsageContext(context));
336
- reportedUsage.push(reported);
337
- });
338
-
339
- return reportedUsage;
340
- }
341
-
342
- /**
343
- * Emit final events
344
- */
345
- private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
346
- // Emit tool info event if tools were called
347
- if (toolsData.length > 0) {
348
- emitter.emit(TLLMEvent.ToolInfo, toolsData);
349
- }
350
-
351
- // Emit interrupted event if finishReason is not 'stop'
352
- if (finishReason !== 'stop') {
353
- emitter.emit('interrupted', finishReason);
354
- }
355
-
356
- // Emit end event with setImmediate to ensure proper event ordering
357
- setImmediate(() => {
358
- emitter.emit('end', toolsData, reportedUsage, finishReason);
359
- });
360
- }
361
-
362
- /**
363
- * Build usage context parameters from request context
364
- */
365
- private buildUsageContext(context: ILLMRequestContext) {
366
- return {
367
- modelEntryName: context.modelEntryName,
368
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
369
- agentId: context.agentId,
370
- teamId: context.teamId,
371
- };
372
- }
373
-
374
- /**
375
- * Get valid image files based on supported MIME types
376
- */
377
- private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
378
- return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
379
- }
380
-
381
- /**
382
- * Get valid document files based on supported MIME types
383
- */
384
- private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
385
- return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
386
- }
387
-
388
- /**
389
- * Upload files to storage
390
- */
391
- private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
392
- const promises = files.map((file) => {
393
- const binaryInput = BinaryInput.from(file);
394
- return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
395
- });
396
-
397
- return Promise.all(promises);
398
- }
399
-
400
- /**
401
- * Process image files with Chat Completions specific formatting
402
- */
403
- private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
404
- if (files.length === 0) return [];
405
-
406
- const imageData = [];
407
- for (const file of files) {
408
- await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image', agentId);
409
-
410
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
411
- const base64Data = bufferData.toString('base64');
412
- const url = `data:${file.mimetype};base64,${base64Data}`;
413
-
414
- imageData.push({
415
- type: 'image_url',
416
- image_url: { url },
417
- });
418
- }
419
-
420
- return imageData;
421
- }
422
-
423
- /**
424
- * Process document files with Chat Completions specific formatting
425
- */
426
- private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
427
- if (files.length === 0) return [];
428
-
429
- const documentData = [];
430
- for (const file of files) {
431
- await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document', agentId);
432
-
433
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
434
- const base64Data = bufferData.toString('base64');
435
- const fileData = `data:${file.mimetype};base64,${base64Data}`;
436
- const filename = await file.getName();
437
-
438
- documentData.push({
439
- type: 'file',
440
- file: {
441
- file_data: fileData,
442
- filename,
443
- },
444
- });
445
- }
446
-
447
- return documentData;
448
- }
449
-
450
- /**
451
- * Validate file size before processing
452
- */
453
- private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string, agentId: string): Promise<void> {
454
- await file.ready();
455
- const fileInfo = await file.getJsonData(AccessCandidate.agent(agentId));
456
- if (fileInfo.size > maxSize) {
457
- throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
458
- }
459
- }
460
-
461
- getInterfaceName(): string {
462
- return 'chat.completions';
463
- }
464
-
465
- validateParameters(params: TLLMParams): boolean {
466
- // Basic validation for Chat Completions parameters
467
- return !!params.model && Array.isArray(params.messages);
468
- }
469
-
470
- /**
471
- * Convert system messages to user messages for models that don't support system messages
472
- */
473
- private convertSystemMessagesToUserMessages(messages: OpenAI.ChatCompletionMessageParam[]): OpenAI.ChatCompletionMessageParam[] {
474
- const convertedMessages: OpenAI.ChatCompletionMessageParam[] = [];
475
- const systemMessages: string[] = [];
476
-
477
- // Extract system messages and collect other messages
478
- for (const message of messages) {
479
- if (message.role === TLLMMessageRole.System) {
480
- const content = typeof message.content === 'string' ? message.content : '';
481
- if (content.trim()) {
482
- systemMessages.push(content);
483
- }
484
- } else {
485
- convertedMessages.push(message);
486
- }
487
- }
488
-
489
- // If we have system messages, prepend them to the first user message
490
- if (systemMessages.length > 0) {
491
- const systemContent = systemMessages.join('\n\n');
492
- const firstUserMessageIndex = convertedMessages.findIndex((msg) => msg.role === TLLMMessageRole.User);
493
-
494
- if (firstUserMessageIndex !== -1) {
495
- const userMessage = convertedMessages[firstUserMessageIndex];
496
- const existingContent = typeof userMessage.content === 'string' ? userMessage.content : '';
497
- convertedMessages[firstUserMessageIndex] = {
498
- ...userMessage,
499
- content: systemContent + '\n\n' + existingContent,
500
- };
501
- } else {
502
- // If no user message exists, create one with the system content
503
- convertedMessages.unshift({ role: TLLMMessageRole.User, content: systemContent });
504
- }
505
- }
506
-
507
- return convertedMessages;
508
- }
509
-
510
- /**
511
- * Prepare messages for Chat Completions API
512
- */
513
- private async prepareMessages(params: TLLMParams): Promise<OpenAI.ChatCompletionMessageParam[]> {
514
- const messages = params?.messages || [];
515
- const files: BinaryInput[] = params?.files || [];
516
-
517
- // Handle files if present
518
- if (files.length > 0) {
519
- return await this.handleFileAttachments(files, params.agentId, [...messages]);
520
- }
521
-
522
- return messages;
523
- }
524
- }
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
4
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
5
+ import { TLLMParams, TLLMPreparedParams, ILLMRequestContext, ToolData, TLLMMessageRole, APIKeySource, TLLMEvent } from '@sre/types/LLM.types';
6
+ import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
7
+ import { HandlerDependencies } from '../types';
8
+ import { JSON_RESPONSE_INSTRUCTION, SUPPORTED_MIME_TYPES_MAP, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
9
+ import { MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT, MODELS_WITHOUT_JSON_RESPONSE_SUPPORT, O3_AND_O4_MODELS } from './constants';
10
+
11
+ import { isValidOpenAIReasoningEffort } from './utils';
12
+
13
+ // File size limits in bytes
14
+ const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
15
+ const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
16
+
17
+ /**
18
+ * OpenAI Chat Completions API interface implementation
19
+ * Handles all Chat Completions API-specific logic including:
20
+ * - Stream creation and handling
21
+ * - Request body preparation
22
+ * - Tool and message transformations
23
+ * - File attachment handling
24
+ */
25
+ export class ChatCompletionsApiInterface extends OpenAIApiInterface {
26
+ private deps: HandlerDependencies;
27
+ private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
28
+ private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
29
+
30
+ constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
31
+ super(context);
32
+ this.deps = deps;
33
+ }
34
+
35
+ public async createRequest(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<OpenAI.ChatCompletion> {
36
+ const openai = await this.deps.getClient(context);
37
+ return await openai.chat.completions.create({
38
+ ...body,
39
+ stream: false,
40
+ });
41
+ }
42
+
43
+ public async createStream(
44
+ body: OpenAI.ChatCompletionCreateParams,
45
+ context: ILLMRequestContext
46
+ ): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>> {
47
+ const openai = await this.deps.getClient(context);
48
+ return await openai.chat.completions.create({
49
+ ...body,
50
+ stream: true,
51
+ stream_options: { include_usage: true },
52
+ });
53
+ }
54
+
55
+ public handleStream(stream: AsyncIterable<OpenAI.ChatCompletionChunk>, context: ILLMRequestContext): EventEmitter {
56
+ const emitter = new EventEmitter();
57
+
58
+ // Process stream asynchronously while returning emitter immediately
59
+ (async () => {
60
+ let finalToolsData: ToolData[] = [];
61
+
62
+ try {
63
+ // Step 1: Process the stream
64
+ const streamResult = await this.processStream(stream, emitter);
65
+ finalToolsData = streamResult.toolsData;
66
+
67
+ const finishReason = streamResult.finishReason || 'stop';
68
+ const usageData = streamResult.usageData;
69
+
70
+ // Step 2: Report usage statistics
71
+ const reportedUsage = this.reportUsageStatistics(usageData, context);
72
+
73
+ // Step 3: Emit final events
74
+ this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
75
+ } catch (error) {
76
+ emitter.emit('error', error);
77
+ }
78
+ })();
79
+
80
+ return emitter;
81
+ }
82
+
83
+ public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.ChatCompletionCreateParams> {
84
+ let messages = await this.prepareMessages(params);
85
+
86
+ // Convert system messages for models that don't support them
87
+ if (MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName)) {
88
+ messages = this.convertSystemMessagesToUserMessages(messages);
89
+ }
90
+
91
+ // Handle JSON response format
92
+ if (params.responseFormat === 'json') {
93
+ const supportsSystemMessages = !MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName);
94
+
95
+ if (supportsSystemMessages) {
96
+ // For models that support system messages
97
+ if (messages?.[0]?.role === TLLMMessageRole.System) {
98
+ messages[0] = { ...messages[0], content: messages[0].content + JSON_RESPONSE_INSTRUCTION };
99
+ } else {
100
+ messages.unshift({ role: TLLMMessageRole.System, content: JSON_RESPONSE_INSTRUCTION });
101
+ }
102
+ } else {
103
+ // For models that don't support system messages, prepend to first user message
104
+ const firstUserMessageIndex = messages.findIndex((msg) => msg.role === TLLMMessageRole.User);
105
+ if (firstUserMessageIndex !== -1) {
106
+ const userMessage = messages[firstUserMessageIndex];
107
+ const content = typeof userMessage.content === 'string' ? userMessage.content : '';
108
+ messages[firstUserMessageIndex] = {
109
+ ...userMessage,
110
+ content: JSON_RESPONSE_INSTRUCTION + '\n\n' + content,
111
+ };
112
+ } else {
113
+ // If no user message exists, create one with the instruction
114
+ messages.push({ role: TLLMMessageRole.User, content: JSON_RESPONSE_INSTRUCTION });
115
+ }
116
+ }
117
+
118
+ params.responseFormat = { type: 'json_object' };
119
+ }
120
+
121
+ const body: OpenAI.ChatCompletionCreateParams = {
122
+ model: params.model as string,
123
+ messages,
124
+ };
125
+
126
+ // Handle max tokens
127
+ if (params?.maxTokens !== undefined) {
128
+ body.max_completion_tokens = params.maxTokens;
129
+ }
130
+
131
+ // Handle temperature
132
+ const modelName = params.modelEntryName?.replace(BUILT_IN_MODEL_PREFIX, '');
133
+ if (params?.temperature !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
134
+ body.temperature = params.temperature;
135
+ }
136
+
137
+ // Handle topP
138
+ if (params?.topP !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
139
+ body.top_p = params.topP;
140
+ }
141
+
142
+ // Handle frequency penalty
143
+ if (params?.frequencyPenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
144
+ body.frequency_penalty = params.frequencyPenalty;
145
+ }
146
+
147
+ // Handle presence penalty
148
+ if (params?.presencePenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
149
+ body.presence_penalty = params.presencePenalty;
150
+ }
151
+
152
+ // Handle response format
153
+ if (params?.responseFormat?.type && !MODELS_WITHOUT_JSON_RESPONSE_SUPPORT.includes(modelName)) {
154
+ body.response_format = params.responseFormat;
155
+ }
156
+
157
+ // Handle stop sequences
158
+ if (params?.stopSequences?.length && !O3_AND_O4_MODELS.includes(modelName)) {
159
+ body.stop = params.stopSequences;
160
+ }
161
+
162
+ // #region GPT 5 specific fields
163
+ const isGPT5ReasoningModels = params.modelEntryName?.includes('gpt-5') && params?.capabilities?.reasoning;
164
+ if (isGPT5ReasoningModels && params?.verbosity) {
165
+ body.verbosity = params.verbosity;
166
+ }
167
+
168
+ // We need to validate the `reasoningEffort` parameter for OpenAI models, since models like `qwen/qwen3-32b` and `deepseek-r1-distill-llama-70b` (available via Groq) also support this parameter but use different values, such as `none` and `default`. These values are valid in our system but not specifically for OpenAI.
169
+ if (isGPT5ReasoningModels && isValidOpenAIReasoningEffort(params.reasoningEffort)) {
170
+ body.reasoning_effort = params.reasoningEffort;
171
+ }
172
+ // #endregion GPT 5 specific fields
173
+
174
+ // Handle tools configuration
175
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
176
+ body.tools = params?.toolsConfig?.tools as OpenAI.ChatCompletionTool[];
177
+ body.tool_choice = params?.toolsConfig?.tool_choice;
178
+ }
179
+
180
+ return body;
181
+ }
182
+
183
+ /**
184
+ * Transform OpenAI tool definitions to ChatCompletionTool format
185
+ */
186
+ public transformToolsConfig(config: ToolConfig): OpenAI.ChatCompletionTool[] {
187
+ return config.toolDefinitions.map((tool) => {
188
+ // Handle OpenAI tool definition format
189
+ if ('parameters' in tool) {
190
+ return {
191
+ type: 'function',
192
+ function: {
193
+ name: tool.name,
194
+ description: tool.description,
195
+ parameters: tool.parameters,
196
+ },
197
+ };
198
+ }
199
+
200
+ // Handle legacy format for backward compatibility
201
+ return {
202
+ type: 'function',
203
+ function: {
204
+ name: tool.name,
205
+ description: tool.description,
206
+ parameters: {
207
+ type: 'object',
208
+ properties: tool.properties || {},
209
+ required: tool.requiredFields || [],
210
+ },
211
+ },
212
+ };
213
+ });
214
+ }
215
+
216
+ public async handleFileAttachments(
217
+ files: BinaryInput[],
218
+ agentId: string,
219
+ messages: OpenAI.ChatCompletionMessageParam[]
220
+ ): Promise<OpenAI.ChatCompletionMessageParam[]> {
221
+ if (files.length === 0) return messages;
222
+
223
+ const uploadedFiles = await this.uploadFiles(files, agentId);
224
+ const validImageFiles = this.getValidImageFiles(uploadedFiles);
225
+ const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
226
+
227
+ // Process images and documents with Chat Completions specific formatting
228
+ const imageData = await this.processImageData(validImageFiles, agentId);
229
+ const documentData = await this.processDocumentData(validDocumentFiles, agentId);
230
+
231
+ // For Chat Completions, we modify the last user message
232
+ const messagesCopy = [...messages];
233
+ const userMessage =
234
+ Array.isArray(messagesCopy) && messagesCopy.length > 0 ? messagesCopy[messagesCopy.length - 1] : { role: 'user', content: '' };
235
+ const prompt = userMessage?.content && typeof userMessage.content === 'string' ? userMessage.content : '';
236
+
237
+ const promptData = [{ type: 'text', text: prompt || '' }, ...imageData, ...documentData];
238
+
239
+ // Replace the last message or add a new one if array was empty
240
+ if (messagesCopy.length > 0) {
241
+ messagesCopy[messagesCopy.length - 1] = { role: 'user', content: promptData };
242
+ } else {
243
+ messagesCopy.push({ role: 'user', content: promptData });
244
+ }
245
+
246
+ return messagesCopy;
247
+ }
248
+
249
+ /**
250
+ * Process the chat completions API stream format
251
+ */
252
+ private async processStream(
253
+ stream: AsyncIterable<OpenAI.ChatCompletionChunk>,
254
+ emitter: EventEmitter
255
+ ): Promise<{ toolsData: ToolData[]; finishReason: string; usageData: any[] }> {
256
+ let toolsData: ToolData[] = [];
257
+ let finishReason = 'stop';
258
+ const usageData = [];
259
+
260
+ for await (const part of stream) {
261
+ const delta = part.choices[0]?.delta;
262
+ const usage = part.usage;
263
+
264
+ // Collect usage statistics
265
+ if (usage) {
266
+ usageData.push(usage);
267
+ }
268
+
269
+ // Emit data event for delta
270
+ emitter.emit('data', delta);
271
+
272
+ // Handle content deltas
273
+ if (!delta?.tool_calls && delta?.content) {
274
+ emitter.emit('content', delta?.content, delta?.role);
275
+ }
276
+
277
+ // Handle tool calls
278
+ if (delta?.tool_calls) {
279
+ const toolCall = delta?.tool_calls?.[0];
280
+ const index = toolCall?.index;
281
+
282
+ if (!toolsData[index]) {
283
+ toolsData[index] = {
284
+ index: index || 0,
285
+ id: '',
286
+ type: 'function',
287
+ name: '',
288
+ arguments: '',
289
+ role: 'tool',
290
+ };
291
+ }
292
+
293
+ if (toolCall?.function?.name) {
294
+ toolsData[index].name = toolCall.function.name;
295
+ }
296
+ if (toolCall?.function?.arguments) {
297
+ toolsData[index].arguments += toolCall.function.arguments;
298
+ }
299
+ if (toolCall?.id) {
300
+ toolsData[index].id = toolCall.id;
301
+ }
302
+ }
303
+
304
+ // Handle finish reason
305
+ if (part.choices[0]?.finish_reason) {
306
+ finishReason = part.choices[0].finish_reason;
307
+ }
308
+ }
309
+
310
+ return { toolsData: this.extractToolCalls(toolsData), finishReason, usageData };
311
+ }
312
+
313
+ /**
314
+ * Extract and format tool calls from the accumulated data
315
+ */
316
+ private extractToolCalls(toolsData: ToolData[]): ToolData[] {
317
+ return toolsData.map((tool) => ({
318
+ index: tool.index,
319
+ name: tool.name,
320
+ arguments: tool.arguments,
321
+ id: tool.id,
322
+ type: tool.type,
323
+ role: tool.role,
324
+ }));
325
+ }
326
+
327
+ /**
328
+ * Report usage statistics
329
+ */
330
+ private reportUsageStatistics(usage_data: OpenAI.Completions.CompletionUsage[], context: ILLMRequestContext): any[] {
331
+ const reportedUsage: any[] = [];
332
+
333
+ // Report normal usage
334
+ usage_data.forEach((usage) => {
335
+ const reported = this.deps.reportUsage(usage, this.buildUsageContext(context));
336
+ reportedUsage.push(reported);
337
+ });
338
+
339
+ return reportedUsage;
340
+ }
341
+
342
+ /**
343
+ * Emit final events
344
+ */
345
+ private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
346
+ // Emit tool info event if tools were called
347
+ if (toolsData.length > 0) {
348
+ emitter.emit(TLLMEvent.ToolInfo, toolsData);
349
+ }
350
+
351
+ // Emit interrupted event if finishReason is not 'stop'
352
+ if (finishReason !== 'stop') {
353
+ emitter.emit('interrupted', finishReason);
354
+ }
355
+
356
+ // Emit end event with setImmediate to ensure proper event ordering
357
+ setImmediate(() => {
358
+ emitter.emit('end', toolsData, reportedUsage, finishReason);
359
+ });
360
+ }
361
+
362
+ /**
363
+ * Build usage context parameters from request context
364
+ */
365
+ private buildUsageContext(context: ILLMRequestContext) {
366
+ return {
367
+ modelEntryName: context.modelEntryName,
368
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
369
+ agentId: context.agentId,
370
+ teamId: context.teamId,
371
+ };
372
+ }
373
+
374
+ /**
375
+ * Get valid image files based on supported MIME types
376
+ */
377
+ private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
378
+ return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
379
+ }
380
+
381
+ /**
382
+ * Get valid document files based on supported MIME types
383
+ */
384
+ private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
385
+ return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
386
+ }
387
+
388
+ /**
389
+ * Upload files to storage
390
+ */
391
+ private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
392
+ const promises = files.map((file) => {
393
+ const binaryInput = BinaryInput.from(file);
394
+ return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
395
+ });
396
+
397
+ return Promise.all(promises);
398
+ }
399
+
400
+ /**
401
+ * Process image files with Chat Completions specific formatting
402
+ */
403
+ private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
404
+ if (files.length === 0) return [];
405
+
406
+ const imageData = [];
407
+ for (const file of files) {
408
+ await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image', agentId);
409
+
410
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
411
+ const base64Data = bufferData.toString('base64');
412
+ const url = `data:${file.mimetype};base64,${base64Data}`;
413
+
414
+ imageData.push({
415
+ type: 'image_url',
416
+ image_url: { url },
417
+ });
418
+ }
419
+
420
+ return imageData;
421
+ }
422
+
423
+ /**
424
+ * Process document files with Chat Completions specific formatting
425
+ */
426
+ private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
427
+ if (files.length === 0) return [];
428
+
429
+ const documentData = [];
430
+ for (const file of files) {
431
+ await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document', agentId);
432
+
433
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
434
+ const base64Data = bufferData.toString('base64');
435
+ const fileData = `data:${file.mimetype};base64,${base64Data}`;
436
+ const filename = await file.getName();
437
+
438
+ documentData.push({
439
+ type: 'file',
440
+ file: {
441
+ file_data: fileData,
442
+ filename,
443
+ },
444
+ });
445
+ }
446
+
447
+ return documentData;
448
+ }
449
+
450
+ /**
451
+ * Validate file size before processing
452
+ */
453
+ private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string, agentId: string): Promise<void> {
454
+ await file.ready();
455
+ const fileInfo = await file.getJsonData(AccessCandidate.agent(agentId));
456
+ if (fileInfo.size > maxSize) {
457
+ throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
458
+ }
459
+ }
460
+
461
+ getInterfaceName(): string {
462
+ return 'chat.completions';
463
+ }
464
+
465
+ validateParameters(params: TLLMParams): boolean {
466
+ // Basic validation for Chat Completions parameters
467
+ return !!params.model && Array.isArray(params.messages);
468
+ }
469
+
470
+ /**
471
+ * Convert system messages to user messages for models that don't support system messages
472
+ */
473
+ private convertSystemMessagesToUserMessages(messages: OpenAI.ChatCompletionMessageParam[]): OpenAI.ChatCompletionMessageParam[] {
474
+ const convertedMessages: OpenAI.ChatCompletionMessageParam[] = [];
475
+ const systemMessages: string[] = [];
476
+
477
+ // Extract system messages and collect other messages
478
+ for (const message of messages) {
479
+ if (message.role === TLLMMessageRole.System) {
480
+ const content = typeof message.content === 'string' ? message.content : '';
481
+ if (content.trim()) {
482
+ systemMessages.push(content);
483
+ }
484
+ } else {
485
+ convertedMessages.push(message);
486
+ }
487
+ }
488
+
489
+ // If we have system messages, prepend them to the first user message
490
+ if (systemMessages.length > 0) {
491
+ const systemContent = systemMessages.join('\n\n');
492
+ const firstUserMessageIndex = convertedMessages.findIndex((msg) => msg.role === TLLMMessageRole.User);
493
+
494
+ if (firstUserMessageIndex !== -1) {
495
+ const userMessage = convertedMessages[firstUserMessageIndex];
496
+ const existingContent = typeof userMessage.content === 'string' ? userMessage.content : '';
497
+ convertedMessages[firstUserMessageIndex] = {
498
+ ...userMessage,
499
+ content: systemContent + '\n\n' + existingContent,
500
+ };
501
+ } else {
502
+ // If no user message exists, create one with the system content
503
+ convertedMessages.unshift({ role: TLLMMessageRole.User, content: systemContent });
504
+ }
505
+ }
506
+
507
+ return convertedMessages;
508
+ }
509
+
510
+ /**
511
+ * Prepare messages for Chat Completions API
512
+ */
513
+ private async prepareMessages(params: TLLMParams): Promise<OpenAI.ChatCompletionMessageParam[]> {
514
+ const messages = params?.messages || [];
515
+ const files: BinaryInput[] = params?.files || [];
516
+
517
+ // Handle files if present
518
+ if (files.length > 0) {
519
+ return await this.handleFileAttachments(files, params.agentId, [...messages]);
520
+ }
521
+
522
+ return messages;
523
+ }
524
+ }