@smythos/sre 1.6.8 → 1.6.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (239) hide show
  1. package/CHANGELOG +111 -111
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +2 -2
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/Components/Triggers/Gmail.trigger.d.ts +58 -0
  9. package/dist/types/Components/Triggers/GmailTrigger.class.d.ts +44 -0
  10. package/dist/types/Components/Triggers/Trigger.class.d.ts +21 -0
  11. package/dist/types/Components/Triggers/WhatsApp.trigger.d.ts +22 -0
  12. package/dist/types/helpers/AIPerformanceAnalyzer.helper.d.ts +45 -0
  13. package/dist/types/helpers/AIPerformanceCollector.helper.d.ts +111 -0
  14. package/dist/types/subsystems/IO/Storage.service/connectors/AzureBlobStorage.class.d.ts +211 -0
  15. package/dist/types/subsystems/IO/VectorDB.service/connectors/WeaviateVectorDB.class.d.ts +187 -0
  16. package/dist/types/subsystems/PerformanceManager/Performance.service/PerformanceConnector.d.ts +102 -0
  17. package/dist/types/subsystems/PerformanceManager/Performance.service/connectors/LocalPerformanceConnector.class.d.ts +100 -0
  18. package/dist/types/subsystems/PerformanceManager/Performance.service/index.d.ts +22 -0
  19. package/dist/types/subsystems/Security/Credentials/Credentials.class.d.ts +2 -0
  20. package/dist/types/subsystems/Security/Credentials/ManagedOAuth2Credentials.class.d.ts +18 -0
  21. package/dist/types/subsystems/Security/Credentials/OAuth2Credentials.class.d.ts +14 -0
  22. package/dist/types/types/Performance.types.d.ts +468 -0
  23. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  24. package/package.json +1 -1
  25. package/src/Components/APICall/APICall.class.ts +161 -161
  26. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  27. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  28. package/src/Components/APICall/OAuth.helper.ts +447 -447
  29. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  30. package/src/Components/APICall/parseData.ts +167 -167
  31. package/src/Components/APICall/parseHeaders.ts +41 -41
  32. package/src/Components/APICall/parseProxy.ts +68 -68
  33. package/src/Components/APICall/parseUrl.ts +91 -91
  34. package/src/Components/APIEndpoint.class.ts +234 -234
  35. package/src/Components/APIOutput.class.ts +58 -58
  36. package/src/Components/AgentPlugin.class.ts +102 -102
  37. package/src/Components/Async.class.ts +155 -155
  38. package/src/Components/Await.class.ts +90 -90
  39. package/src/Components/Classifier.class.ts +158 -158
  40. package/src/Components/Component.class.ts +147 -147
  41. package/src/Components/ComponentHost.class.ts +38 -38
  42. package/src/Components/DataSourceCleaner.class.ts +92 -92
  43. package/src/Components/DataSourceIndexer.class.ts +181 -181
  44. package/src/Components/DataSourceLookup.class.ts +161 -161
  45. package/src/Components/ECMASandbox.class.ts +72 -72
  46. package/src/Components/FEncDec.class.ts +29 -29
  47. package/src/Components/FHash.class.ts +33 -33
  48. package/src/Components/FSign.class.ts +80 -80
  49. package/src/Components/FSleep.class.ts +25 -25
  50. package/src/Components/FTimestamp.class.ts +66 -66
  51. package/src/Components/FileStore.class.ts +78 -78
  52. package/src/Components/ForEach.class.ts +97 -97
  53. package/src/Components/GPTPlugin.class.ts +70 -70
  54. package/src/Components/GenAILLM.class.ts +586 -586
  55. package/src/Components/HuggingFace.class.ts +313 -313
  56. package/src/Components/Image/imageSettings.config.ts +70 -70
  57. package/src/Components/ImageGenerator.class.ts +483 -483
  58. package/src/Components/JSONFilter.class.ts +54 -54
  59. package/src/Components/LLMAssistant.class.ts +213 -213
  60. package/src/Components/LogicAND.class.ts +28 -28
  61. package/src/Components/LogicAtLeast.class.ts +85 -85
  62. package/src/Components/LogicAtMost.class.ts +86 -86
  63. package/src/Components/LogicOR.class.ts +29 -29
  64. package/src/Components/LogicXOR.class.ts +34 -34
  65. package/src/Components/MCPClient.class.ts +137 -137
  66. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  67. package/src/Components/MemoryReadKeyVal.class.ts +67 -67
  68. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  69. package/src/Components/MemoryWriteObject.class.ts +97 -97
  70. package/src/Components/MultimodalLLM.class.ts +128 -128
  71. package/src/Components/OpenAPI.class.ts +72 -72
  72. package/src/Components/PromptGenerator.class.ts +122 -122
  73. package/src/Components/ScrapflyWebScrape.class.ts +183 -183
  74. package/src/Components/ServerlessCode.class.ts +123 -123
  75. package/src/Components/TavilyWebSearch.class.ts +103 -103
  76. package/src/Components/VisionLLM.class.ts +104 -104
  77. package/src/Components/ZapierAction.class.ts +127 -127
  78. package/src/Components/index.ts +97 -97
  79. package/src/Core/AgentProcess.helper.ts +240 -240
  80. package/src/Core/Connector.class.ts +123 -123
  81. package/src/Core/ConnectorsService.ts +197 -197
  82. package/src/Core/DummyConnector.ts +49 -49
  83. package/src/Core/HookService.ts +105 -105
  84. package/src/Core/SmythRuntime.class.ts +241 -241
  85. package/src/Core/SystemEvents.ts +16 -16
  86. package/src/Core/boot.ts +56 -56
  87. package/src/config.ts +15 -15
  88. package/src/constants.ts +126 -126
  89. package/src/data/hugging-face.params.json +579 -579
  90. package/src/helpers/AWSLambdaCode.helper.ts +624 -624
  91. package/src/helpers/BinaryInput.helper.ts +331 -331
  92. package/src/helpers/Conversation.helper.ts +1157 -1157
  93. package/src/helpers/ECMASandbox.helper.ts +64 -64
  94. package/src/helpers/JsonContent.helper.ts +97 -97
  95. package/src/helpers/LocalCache.helper.ts +97 -97
  96. package/src/helpers/Log.helper.ts +274 -274
  97. package/src/helpers/OpenApiParser.helper.ts +150 -150
  98. package/src/helpers/S3Cache.helper.ts +147 -147
  99. package/src/helpers/SmythURI.helper.ts +5 -5
  100. package/src/helpers/Sysconfig.helper.ts +95 -95
  101. package/src/helpers/TemplateString.helper.ts +243 -243
  102. package/src/helpers/TypeChecker.helper.ts +329 -329
  103. package/src/index.ts +3 -3
  104. package/src/index.ts.bak +3 -3
  105. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  106. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  107. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  108. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  109. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +145 -145
  110. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  111. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  112. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -301
  113. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  114. package/src/subsystems/AgentManager/AgentRuntime.class.ts +557 -557
  115. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  116. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  117. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  118. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  119. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  120. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  121. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  122. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  123. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  124. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +171 -171
  125. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  126. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  127. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  128. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  129. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  130. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  131. package/src/subsystems/IO/Log.service/index.ts +13 -13
  132. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  133. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  134. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  135. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  136. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  137. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  138. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  139. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  140. package/src/subsystems/IO/Router.service/index.ts +11 -11
  141. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +488 -488
  142. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  143. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  144. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  145. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  146. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  147. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +465 -465
  148. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +387 -387
  149. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +408 -408
  150. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  151. package/src/subsystems/IO/VectorDB.service/embed/GoogleEmbedding.ts +118 -118
  152. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  153. package/src/subsystems/IO/VectorDB.service/embed/index.ts +26 -26
  154. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  155. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  156. package/src/subsystems/LLMManager/LLM.inference.ts +345 -345
  157. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +492 -492
  158. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  159. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +666 -666
  160. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +407 -407
  161. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +92 -92
  162. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +983 -983
  163. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +319 -319
  164. package/src/subsystems/LLMManager/LLM.service/connectors/Ollama.class.ts +361 -361
  165. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +257 -257
  166. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +430 -430
  167. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +503 -503
  168. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  169. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  170. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  171. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  172. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  173. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  174. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  175. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  176. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +478 -478
  177. package/src/subsystems/LLMManager/LLM.service/index.ts +47 -47
  178. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +303 -303
  179. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +280 -271
  180. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  181. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  182. package/src/subsystems/LLMManager/models.ts +2540 -2540
  183. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  184. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  185. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  186. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +214 -214
  187. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  188. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  189. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  190. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  191. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  192. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  193. package/src/subsystems/MemoryManager/RuntimeContext.ts +277 -277
  194. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  195. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  196. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  197. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  198. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  199. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +170 -170
  200. package/src/subsystems/Security/Account.service/connectors/MySQLAccount.class.ts +76 -76
  201. package/src/subsystems/Security/Account.service/index.ts +14 -14
  202. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  203. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  204. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  205. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  206. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  207. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  208. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  209. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  210. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  211. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  212. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  213. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  214. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  215. package/src/types/ACL.types.ts +104 -104
  216. package/src/types/AWS.types.ts +10 -10
  217. package/src/types/Agent.types.ts +61 -61
  218. package/src/types/AgentLogger.types.ts +17 -17
  219. package/src/types/Cache.types.ts +1 -1
  220. package/src/types/Common.types.ts +2 -2
  221. package/src/types/LLM.types.ts +520 -520
  222. package/src/types/Redis.types.ts +8 -8
  223. package/src/types/SRE.types.ts +64 -64
  224. package/src/types/Security.types.ts +14 -14
  225. package/src/types/Storage.types.ts +5 -5
  226. package/src/types/VectorDB.types.ts +86 -86
  227. package/src/utils/base64.utils.ts +275 -275
  228. package/src/utils/cli.utils.ts +68 -68
  229. package/src/utils/data.utils.ts +322 -322
  230. package/src/utils/date-time.utils.ts +22 -22
  231. package/src/utils/general.utils.ts +238 -238
  232. package/src/utils/index.ts +12 -12
  233. package/src/utils/lazy-client.ts +261 -261
  234. package/src/utils/numbers.utils.ts +13 -13
  235. package/src/utils/oauth.utils.ts +35 -35
  236. package/src/utils/string.utils.ts +414 -414
  237. package/src/utils/url.utils.ts +19 -19
  238. package/src/utils/validation.utils.ts +74 -74
  239. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,524 +1,524 @@
1
- import EventEmitter from 'events';
2
- import OpenAI from 'openai';
3
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
4
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
5
- import { TLLMParams, TLLMPreparedParams, ILLMRequestContext, ToolData, TLLMMessageRole, APIKeySource, TLLMEvent } from '@sre/types/LLM.types';
6
- import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
7
- import { HandlerDependencies } from '../types';
8
- import { JSON_RESPONSE_INSTRUCTION, SUPPORTED_MIME_TYPES_MAP, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
9
- import { MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT, MODELS_WITHOUT_JSON_RESPONSE_SUPPORT, O3_AND_O4_MODELS } from './constants';
10
-
11
- import { isValidOpenAIReasoningEffort } from './utils';
12
-
13
- // File size limits in bytes
14
- const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
15
- const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
16
-
17
- /**
18
- * OpenAI Chat Completions API interface implementation
19
- * Handles all Chat Completions API-specific logic including:
20
- * - Stream creation and handling
21
- * - Request body preparation
22
- * - Tool and message transformations
23
- * - File attachment handling
24
- */
25
- export class ChatCompletionsApiInterface extends OpenAIApiInterface {
26
- private deps: HandlerDependencies;
27
- private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
28
- private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
29
-
30
- constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
31
- super(context);
32
- this.deps = deps;
33
- }
34
-
35
- public async createRequest(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<OpenAI.ChatCompletion> {
36
- const openai = await this.deps.getClient(context);
37
- return await openai.chat.completions.create({
38
- ...body,
39
- stream: false,
40
- });
41
- }
42
-
43
- public async createStream(
44
- body: OpenAI.ChatCompletionCreateParams,
45
- context: ILLMRequestContext
46
- ): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>> {
47
- const openai = await this.deps.getClient(context);
48
- return await openai.chat.completions.create({
49
- ...body,
50
- stream: true,
51
- stream_options: { include_usage: true },
52
- });
53
- }
54
-
55
- public handleStream(stream: AsyncIterable<OpenAI.ChatCompletionChunk>, context: ILLMRequestContext): EventEmitter {
56
- const emitter = new EventEmitter();
57
-
58
- // Process stream asynchronously while returning emitter immediately
59
- (async () => {
60
- let finalToolsData: ToolData[] = [];
61
-
62
- try {
63
- // Step 1: Process the stream
64
- const streamResult = await this.processStream(stream, emitter);
65
- finalToolsData = streamResult.toolsData;
66
-
67
- const finishReason = streamResult.finishReason || 'stop';
68
- const usageData = streamResult.usageData;
69
-
70
- // Step 2: Report usage statistics
71
- const reportedUsage = this.reportUsageStatistics(usageData, context);
72
-
73
- // Step 3: Emit final events
74
- this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
75
- } catch (error) {
76
- emitter.emit('error', error);
77
- }
78
- })();
79
-
80
- return emitter;
81
- }
82
-
83
- public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.ChatCompletionCreateParams> {
84
- let messages = await this.prepareMessages(params);
85
-
86
- // Convert system messages for models that don't support them
87
- if (MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName)) {
88
- messages = this.convertSystemMessagesToUserMessages(messages);
89
- }
90
-
91
- // Handle JSON response format
92
- if (params.responseFormat === 'json') {
93
- const supportsSystemMessages = !MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName);
94
-
95
- if (supportsSystemMessages) {
96
- // For models that support system messages
97
- if (messages?.[0]?.role === TLLMMessageRole.System) {
98
- messages[0] = { ...messages[0], content: messages[0].content + JSON_RESPONSE_INSTRUCTION };
99
- } else {
100
- messages.unshift({ role: TLLMMessageRole.System, content: JSON_RESPONSE_INSTRUCTION });
101
- }
102
- } else {
103
- // For models that don't support system messages, prepend to first user message
104
- const firstUserMessageIndex = messages.findIndex((msg) => msg.role === TLLMMessageRole.User);
105
- if (firstUserMessageIndex !== -1) {
106
- const userMessage = messages[firstUserMessageIndex];
107
- const content = typeof userMessage.content === 'string' ? userMessage.content : '';
108
- messages[firstUserMessageIndex] = {
109
- ...userMessage,
110
- content: JSON_RESPONSE_INSTRUCTION + '\n\n' + content,
111
- };
112
- } else {
113
- // If no user message exists, create one with the instruction
114
- messages.push({ role: TLLMMessageRole.User, content: JSON_RESPONSE_INSTRUCTION });
115
- }
116
- }
117
-
118
- params.responseFormat = { type: 'json_object' };
119
- }
120
-
121
- const body: OpenAI.ChatCompletionCreateParams = {
122
- model: params.model as string,
123
- messages,
124
- };
125
-
126
- // Handle max tokens
127
- if (params?.maxTokens !== undefined) {
128
- body.max_completion_tokens = params.maxTokens;
129
- }
130
-
131
- // Handle temperature
132
- const modelName = params.modelEntryName?.replace(BUILT_IN_MODEL_PREFIX, '');
133
- if (params?.temperature !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
134
- body.temperature = params.temperature;
135
- }
136
-
137
- // Handle topP
138
- if (params?.topP !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
139
- body.top_p = params.topP;
140
- }
141
-
142
- // Handle frequency penalty
143
- if (params?.frequencyPenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
144
- body.frequency_penalty = params.frequencyPenalty;
145
- }
146
-
147
- // Handle presence penalty
148
- if (params?.presencePenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
149
- body.presence_penalty = params.presencePenalty;
150
- }
151
-
152
- // Handle response format
153
- if (params?.responseFormat?.type && !MODELS_WITHOUT_JSON_RESPONSE_SUPPORT.includes(modelName)) {
154
- body.response_format = params.responseFormat;
155
- }
156
-
157
- // Handle stop sequences
158
- if (params?.stopSequences?.length && !O3_AND_O4_MODELS.includes(modelName)) {
159
- body.stop = params.stopSequences;
160
- }
161
-
162
- // #region GPT 5 specific fields
163
- const isGPT5ReasoningModels = params.modelEntryName?.includes('gpt-5') && params?.capabilities?.reasoning;
164
- if (isGPT5ReasoningModels && params?.verbosity) {
165
- body.verbosity = params.verbosity;
166
- }
167
-
168
- // We need to validate the `reasoningEffort` parameter for OpenAI models, since models like `qwen/qwen3-32b` and `deepseek-r1-distill-llama-70b` (available via Groq) also support this parameter but use different values, such as `none` and `default`. These values are valid in our system but not specifically for OpenAI.
169
- if (isGPT5ReasoningModels && isValidOpenAIReasoningEffort(params.reasoningEffort)) {
170
- body.reasoning_effort = params.reasoningEffort;
171
- }
172
- // #endregion GPT 5 specific fields
173
-
174
- // Handle tools configuration
175
- if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
176
- body.tools = params?.toolsConfig?.tools as OpenAI.ChatCompletionTool[];
177
- body.tool_choice = params?.toolsConfig?.tool_choice;
178
- }
179
-
180
- return body;
181
- }
182
-
183
- /**
184
- * Transform OpenAI tool definitions to ChatCompletionTool format
185
- */
186
- public transformToolsConfig(config: ToolConfig): OpenAI.ChatCompletionTool[] {
187
- return config.toolDefinitions.map((tool) => {
188
- // Handle OpenAI tool definition format
189
- if ('parameters' in tool) {
190
- return {
191
- type: 'function' as const,
192
- function: {
193
- name: tool.name,
194
- description: tool.description,
195
- parameters: tool.parameters,
196
- },
197
- };
198
- }
199
-
200
- // Handle legacy format for backward compatibility
201
- return {
202
- type: 'function' as const,
203
- function: {
204
- name: tool.name,
205
- description: tool.description,
206
- parameters: {
207
- type: 'object',
208
- properties: tool.properties || {},
209
- required: tool.requiredFields || [],
210
- },
211
- },
212
- };
213
- });
214
- }
215
-
216
- public async handleFileAttachments(
217
- files: BinaryInput[],
218
- agentId: string,
219
- messages: OpenAI.ChatCompletionMessageParam[]
220
- ): Promise<OpenAI.ChatCompletionMessageParam[]> {
221
- if (files.length === 0) return messages;
222
-
223
- const uploadedFiles = await this.uploadFiles(files, agentId);
224
- const validImageFiles = this.getValidImageFiles(uploadedFiles);
225
- const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
226
-
227
- // Process images and documents with Chat Completions specific formatting
228
- const imageData = await this.processImageData(validImageFiles, agentId);
229
- const documentData = await this.processDocumentData(validDocumentFiles, agentId);
230
-
231
- // For Chat Completions, we modify the last user message
232
- const messagesCopy = [...messages];
233
- const userMessage =
234
- Array.isArray(messagesCopy) && messagesCopy.length > 0 ? messagesCopy[messagesCopy.length - 1] : { role: 'user', content: '' };
235
- const prompt = userMessage?.content && typeof userMessage.content === 'string' ? userMessage.content : '';
236
-
237
- const promptData = [{ type: 'text', text: prompt || '' }, ...imageData, ...documentData];
238
-
239
- // Replace the last message or add a new one if array was empty
240
- if (messagesCopy.length > 0) {
241
- messagesCopy[messagesCopy.length - 1] = { role: 'user', content: promptData };
242
- } else {
243
- messagesCopy.push({ role: 'user', content: promptData });
244
- }
245
-
246
- return messagesCopy;
247
- }
248
-
249
- /**
250
- * Process the chat completions API stream format
251
- */
252
- private async processStream(
253
- stream: AsyncIterable<OpenAI.ChatCompletionChunk>,
254
- emitter: EventEmitter
255
- ): Promise<{ toolsData: ToolData[]; finishReason: string; usageData: any[] }> {
256
- let toolsData: ToolData[] = [];
257
- let finishReason = 'stop';
258
- const usageData = [];
259
-
260
- for await (const part of stream) {
261
- const delta = part.choices[0]?.delta;
262
- const usage = part.usage;
263
-
264
- // Collect usage statistics
265
- if (usage) {
266
- usageData.push(usage);
267
- }
268
-
269
- // Emit data event for delta
270
- emitter.emit('data', delta);
271
-
272
- // Handle content deltas
273
- if (!delta?.tool_calls && delta?.content) {
274
- emitter.emit('content', delta?.content, delta?.role);
275
- }
276
-
277
- // Handle tool calls
278
- if (delta?.tool_calls) {
279
- const toolCall = delta?.tool_calls?.[0];
280
- const index = toolCall?.index;
281
-
282
- if (!toolsData[index]) {
283
- toolsData[index] = {
284
- index: index || 0,
285
- id: '',
286
- type: 'function',
287
- name: '',
288
- arguments: '',
289
- role: 'tool',
290
- };
291
- }
292
-
293
- if (toolCall?.function?.name) {
294
- toolsData[index].name = toolCall.function.name;
295
- }
296
- if (toolCall?.function?.arguments) {
297
- toolsData[index].arguments += toolCall.function.arguments;
298
- }
299
- if (toolCall?.id) {
300
- toolsData[index].id = toolCall.id;
301
- }
302
- }
303
-
304
- // Handle finish reason
305
- if (part.choices[0]?.finish_reason) {
306
- finishReason = part.choices[0].finish_reason;
307
- }
308
- }
309
-
310
- return { toolsData: this.extractToolCalls(toolsData), finishReason, usageData };
311
- }
312
-
313
- /**
314
- * Extract and format tool calls from the accumulated data
315
- */
316
- private extractToolCalls(toolsData: ToolData[]): ToolData[] {
317
- return toolsData.map((tool) => ({
318
- index: tool.index,
319
- name: tool.name,
320
- arguments: tool.arguments,
321
- id: tool.id,
322
- type: tool.type,
323
- role: tool.role,
324
- }));
325
- }
326
-
327
- /**
328
- * Report usage statistics
329
- */
330
- private reportUsageStatistics(usage_data: OpenAI.Completions.CompletionUsage[], context: ILLMRequestContext): any[] {
331
- const reportedUsage: any[] = [];
332
-
333
- // Report normal usage
334
- usage_data.forEach((usage) => {
335
- const reported = this.deps.reportUsage(usage, this.buildUsageContext(context));
336
- reportedUsage.push(reported);
337
- });
338
-
339
- return reportedUsage;
340
- }
341
-
342
- /**
343
- * Emit final events
344
- */
345
- private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
346
- // Emit tool info event if tools were called
347
- if (toolsData.length > 0) {
348
- emitter.emit(TLLMEvent.ToolInfo, toolsData);
349
- }
350
-
351
- // Emit interrupted event if finishReason is not 'stop'
352
- if (finishReason !== 'stop') {
353
- emitter.emit('interrupted', finishReason);
354
- }
355
-
356
- // Emit end event with setImmediate to ensure proper event ordering
357
- setImmediate(() => {
358
- emitter.emit('end', toolsData, reportedUsage, finishReason);
359
- });
360
- }
361
-
362
- /**
363
- * Build usage context parameters from request context
364
- */
365
- private buildUsageContext(context: ILLMRequestContext) {
366
- return {
367
- modelEntryName: context.modelEntryName,
368
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
369
- agentId: context.agentId,
370
- teamId: context.teamId,
371
- };
372
- }
373
-
374
- /**
375
- * Get valid image files based on supported MIME types
376
- */
377
- private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
378
- return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
379
- }
380
-
381
- /**
382
- * Get valid document files based on supported MIME types
383
- */
384
- private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
385
- return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
386
- }
387
-
388
- /**
389
- * Upload files to storage
390
- */
391
- private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
392
- const promises = files.map((file) => {
393
- const binaryInput = BinaryInput.from(file);
394
- return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
395
- });
396
-
397
- return Promise.all(promises);
398
- }
399
-
400
- /**
401
- * Process image files with Chat Completions specific formatting
402
- */
403
- private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
404
- if (files.length === 0) return [];
405
-
406
- const imageData = [];
407
- for (const file of files) {
408
- await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image', agentId);
409
-
410
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
411
- const base64Data = bufferData.toString('base64');
412
- const url = `data:${file.mimetype};base64,${base64Data}`;
413
-
414
- imageData.push({
415
- type: 'image_url',
416
- image_url: { url },
417
- });
418
- }
419
-
420
- return imageData;
421
- }
422
-
423
- /**
424
- * Process document files with Chat Completions specific formatting
425
- */
426
- private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
427
- if (files.length === 0) return [];
428
-
429
- const documentData = [];
430
- for (const file of files) {
431
- await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document', agentId);
432
-
433
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
434
- const base64Data = bufferData.toString('base64');
435
- const fileData = `data:${file.mimetype};base64,${base64Data}`;
436
- const filename = await file.getName();
437
-
438
- documentData.push({
439
- type: 'file',
440
- file: {
441
- file_data: fileData,
442
- filename,
443
- },
444
- });
445
- }
446
-
447
- return documentData;
448
- }
449
-
450
- /**
451
- * Validate file size before processing
452
- */
453
- private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string, agentId: string): Promise<void> {
454
- await file.ready();
455
- const fileInfo = await file.getJsonData(AccessCandidate.agent(agentId));
456
- if (fileInfo.size > maxSize) {
457
- throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
458
- }
459
- }
460
-
461
- getInterfaceName(): string {
462
- return 'chat.completions';
463
- }
464
-
465
- validateParameters(params: TLLMParams): boolean {
466
- // Basic validation for Chat Completions parameters
467
- return !!params.model && Array.isArray(params.messages);
468
- }
469
-
470
- /**
471
- * Convert system messages to user messages for models that don't support system messages
472
- */
473
- private convertSystemMessagesToUserMessages(messages: OpenAI.ChatCompletionMessageParam[]): OpenAI.ChatCompletionMessageParam[] {
474
- const convertedMessages: OpenAI.ChatCompletionMessageParam[] = [];
475
- const systemMessages: string[] = [];
476
-
477
- // Extract system messages and collect other messages
478
- for (const message of messages) {
479
- if (message.role === TLLMMessageRole.System) {
480
- const content = typeof message.content === 'string' ? message.content : '';
481
- if (content.trim()) {
482
- systemMessages.push(content);
483
- }
484
- } else {
485
- convertedMessages.push(message);
486
- }
487
- }
488
-
489
- // If we have system messages, prepend them to the first user message
490
- if (systemMessages.length > 0) {
491
- const systemContent = systemMessages.join('\n\n');
492
- const firstUserMessageIndex = convertedMessages.findIndex((msg) => msg.role === TLLMMessageRole.User);
493
-
494
- if (firstUserMessageIndex !== -1) {
495
- const userMessage = convertedMessages[firstUserMessageIndex];
496
- const existingContent = typeof userMessage.content === 'string' ? userMessage.content : '';
497
- convertedMessages[firstUserMessageIndex] = {
498
- ...userMessage,
499
- content: systemContent + '\n\n' + existingContent,
500
- };
501
- } else {
502
- // If no user message exists, create one with the system content
503
- convertedMessages.unshift({ role: TLLMMessageRole.User, content: systemContent });
504
- }
505
- }
506
-
507
- return convertedMessages;
508
- }
509
-
510
- /**
511
- * Prepare messages for Chat Completions API
512
- */
513
- private async prepareMessages(params: TLLMParams): Promise<OpenAI.ChatCompletionMessageParam[]> {
514
- const messages = params?.messages || [];
515
- const files: BinaryInput[] = params?.files || [];
516
-
517
- // Handle files if present
518
- if (files.length > 0) {
519
- return await this.handleFileAttachments(files, params.agentId, [...messages]);
520
- }
521
-
522
- return messages;
523
- }
524
- }
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
4
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
5
+ import { TLLMParams, TLLMPreparedParams, ILLMRequestContext, ToolData, TLLMMessageRole, APIKeySource, TLLMEvent } from '@sre/types/LLM.types';
6
+ import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
7
+ import { HandlerDependencies } from '../types';
8
+ import { JSON_RESPONSE_INSTRUCTION, SUPPORTED_MIME_TYPES_MAP, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
9
+ import { MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT, MODELS_WITHOUT_JSON_RESPONSE_SUPPORT, O3_AND_O4_MODELS } from './constants';
10
+
11
+ import { isValidOpenAIReasoningEffort } from './utils';
12
+
13
+ // File size limits in bytes
14
+ const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
15
+ const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
16
+
17
+ /**
18
+ * OpenAI Chat Completions API interface implementation
19
+ * Handles all Chat Completions API-specific logic including:
20
+ * - Stream creation and handling
21
+ * - Request body preparation
22
+ * - Tool and message transformations
23
+ * - File attachment handling
24
+ */
25
+ export class ChatCompletionsApiInterface extends OpenAIApiInterface {
26
+ private deps: HandlerDependencies;
27
+ private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
28
+ private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
29
+
30
+ constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
31
+ super(context);
32
+ this.deps = deps;
33
+ }
34
+
35
+ public async createRequest(body: OpenAI.ChatCompletionCreateParams, context: ILLMRequestContext): Promise<OpenAI.ChatCompletion> {
36
+ const openai = await this.deps.getClient(context);
37
+ return await openai.chat.completions.create({
38
+ ...body,
39
+ stream: false,
40
+ });
41
+ }
42
+
43
+ public async createStream(
44
+ body: OpenAI.ChatCompletionCreateParams,
45
+ context: ILLMRequestContext
46
+ ): Promise<AsyncIterable<OpenAI.ChatCompletionChunk>> {
47
+ const openai = await this.deps.getClient(context);
48
+ return await openai.chat.completions.create({
49
+ ...body,
50
+ stream: true,
51
+ stream_options: { include_usage: true },
52
+ });
53
+ }
54
+
55
+ public handleStream(stream: AsyncIterable<OpenAI.ChatCompletionChunk>, context: ILLMRequestContext): EventEmitter {
56
+ const emitter = new EventEmitter();
57
+
58
+ // Process stream asynchronously while returning emitter immediately
59
+ (async () => {
60
+ let finalToolsData: ToolData[] = [];
61
+
62
+ try {
63
+ // Step 1: Process the stream
64
+ const streamResult = await this.processStream(stream, emitter);
65
+ finalToolsData = streamResult.toolsData;
66
+
67
+ const finishReason = streamResult.finishReason || 'stop';
68
+ const usageData = streamResult.usageData;
69
+
70
+ // Step 2: Report usage statistics
71
+ const reportedUsage = this.reportUsageStatistics(usageData, context);
72
+
73
+ // Step 3: Emit final events
74
+ this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
75
+ } catch (error) {
76
+ emitter.emit('error', error);
77
+ }
78
+ })();
79
+
80
+ return emitter;
81
+ }
82
+
83
+ public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.ChatCompletionCreateParams> {
84
+ let messages = await this.prepareMessages(params);
85
+
86
+ // Convert system messages for models that don't support them
87
+ if (MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName)) {
88
+ messages = this.convertSystemMessagesToUserMessages(messages);
89
+ }
90
+
91
+ // Handle JSON response format
92
+ if (params.responseFormat === 'json') {
93
+ const supportsSystemMessages = !MODELS_WITHOUT_SYSTEM_MESSAGE_SUPPORT.includes(params.modelEntryName);
94
+
95
+ if (supportsSystemMessages) {
96
+ // For models that support system messages
97
+ if (messages?.[0]?.role === TLLMMessageRole.System) {
98
+ messages[0] = { ...messages[0], content: messages[0].content + JSON_RESPONSE_INSTRUCTION };
99
+ } else {
100
+ messages.unshift({ role: TLLMMessageRole.System, content: JSON_RESPONSE_INSTRUCTION });
101
+ }
102
+ } else {
103
+ // For models that don't support system messages, prepend to first user message
104
+ const firstUserMessageIndex = messages.findIndex((msg) => msg.role === TLLMMessageRole.User);
105
+ if (firstUserMessageIndex !== -1) {
106
+ const userMessage = messages[firstUserMessageIndex];
107
+ const content = typeof userMessage.content === 'string' ? userMessage.content : '';
108
+ messages[firstUserMessageIndex] = {
109
+ ...userMessage,
110
+ content: JSON_RESPONSE_INSTRUCTION + '\n\n' + content,
111
+ };
112
+ } else {
113
+ // If no user message exists, create one with the instruction
114
+ messages.push({ role: TLLMMessageRole.User, content: JSON_RESPONSE_INSTRUCTION });
115
+ }
116
+ }
117
+
118
+ params.responseFormat = { type: 'json_object' };
119
+ }
120
+
121
+ const body: OpenAI.ChatCompletionCreateParams = {
122
+ model: params.model as string,
123
+ messages,
124
+ };
125
+
126
+ // Handle max tokens
127
+ if (params?.maxTokens !== undefined) {
128
+ body.max_completion_tokens = params.maxTokens;
129
+ }
130
+
131
+ // Handle temperature
132
+ const modelName = params.modelEntryName?.replace(BUILT_IN_MODEL_PREFIX, '');
133
+ if (params?.temperature !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
134
+ body.temperature = params.temperature;
135
+ }
136
+
137
+ // Handle topP
138
+ if (params?.topP !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
139
+ body.top_p = params.topP;
140
+ }
141
+
142
+ // Handle frequency penalty
143
+ if (params?.frequencyPenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
144
+ body.frequency_penalty = params.frequencyPenalty;
145
+ }
146
+
147
+ // Handle presence penalty
148
+ if (params?.presencePenalty !== undefined && !O3_AND_O4_MODELS.includes(modelName)) {
149
+ body.presence_penalty = params.presencePenalty;
150
+ }
151
+
152
+ // Handle response format
153
+ if (params?.responseFormat?.type && !MODELS_WITHOUT_JSON_RESPONSE_SUPPORT.includes(modelName)) {
154
+ body.response_format = params.responseFormat;
155
+ }
156
+
157
+ // Handle stop sequences
158
+ if (params?.stopSequences?.length && !O3_AND_O4_MODELS.includes(modelName)) {
159
+ body.stop = params.stopSequences;
160
+ }
161
+
162
+ // #region GPT 5 specific fields
163
+ const isGPT5ReasoningModels = params.modelEntryName?.includes('gpt-5') && params?.capabilities?.reasoning;
164
+ if (isGPT5ReasoningModels && params?.verbosity) {
165
+ body.verbosity = params.verbosity;
166
+ }
167
+
168
+ // We need to validate the `reasoningEffort` parameter for OpenAI models, since models like `qwen/qwen3-32b` and `deepseek-r1-distill-llama-70b` (available via Groq) also support this parameter but use different values, such as `none` and `default`. These values are valid in our system but not specifically for OpenAI.
169
+ if (isGPT5ReasoningModels && isValidOpenAIReasoningEffort(params.reasoningEffort)) {
170
+ body.reasoning_effort = params.reasoningEffort;
171
+ }
172
+ // #endregion GPT 5 specific fields
173
+
174
+ // Handle tools configuration
175
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
176
+ body.tools = params?.toolsConfig?.tools as OpenAI.ChatCompletionTool[];
177
+ body.tool_choice = params?.toolsConfig?.tool_choice;
178
+ }
179
+
180
+ return body;
181
+ }
182
+
183
+ /**
184
+ * Transform OpenAI tool definitions to ChatCompletionTool format
185
+ */
186
+ public transformToolsConfig(config: ToolConfig): OpenAI.ChatCompletionTool[] {
187
+ return config.toolDefinitions.map((tool) => {
188
+ // Handle OpenAI tool definition format
189
+ if ('parameters' in tool) {
190
+ return {
191
+ type: 'function' as const,
192
+ function: {
193
+ name: tool.name,
194
+ description: tool.description,
195
+ parameters: tool.parameters,
196
+ },
197
+ };
198
+ }
199
+
200
+ // Handle legacy format for backward compatibility
201
+ return {
202
+ type: 'function' as const,
203
+ function: {
204
+ name: tool.name,
205
+ description: tool.description,
206
+ parameters: {
207
+ type: 'object',
208
+ properties: tool.properties || {},
209
+ required: tool.requiredFields || [],
210
+ },
211
+ },
212
+ };
213
+ });
214
+ }
215
+
216
+ public async handleFileAttachments(
217
+ files: BinaryInput[],
218
+ agentId: string,
219
+ messages: OpenAI.ChatCompletionMessageParam[]
220
+ ): Promise<OpenAI.ChatCompletionMessageParam[]> {
221
+ if (files.length === 0) return messages;
222
+
223
+ const uploadedFiles = await this.uploadFiles(files, agentId);
224
+ const validImageFiles = this.getValidImageFiles(uploadedFiles);
225
+ const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
226
+
227
+ // Process images and documents with Chat Completions specific formatting
228
+ const imageData = await this.processImageData(validImageFiles, agentId);
229
+ const documentData = await this.processDocumentData(validDocumentFiles, agentId);
230
+
231
+ // For Chat Completions, we modify the last user message
232
+ const messagesCopy = [...messages];
233
+ const userMessage =
234
+ Array.isArray(messagesCopy) && messagesCopy.length > 0 ? messagesCopy[messagesCopy.length - 1] : { role: 'user', content: '' };
235
+ const prompt = userMessage?.content && typeof userMessage.content === 'string' ? userMessage.content : '';
236
+
237
+ const promptData = [{ type: 'text', text: prompt || '' }, ...imageData, ...documentData];
238
+
239
+ // Replace the last message or add a new one if array was empty
240
+ if (messagesCopy.length > 0) {
241
+ messagesCopy[messagesCopy.length - 1] = { role: 'user', content: promptData };
242
+ } else {
243
+ messagesCopy.push({ role: 'user', content: promptData });
244
+ }
245
+
246
+ return messagesCopy;
247
+ }
248
+
249
+ /**
250
+ * Process the chat completions API stream format
251
+ */
252
+ private async processStream(
253
+ stream: AsyncIterable<OpenAI.ChatCompletionChunk>,
254
+ emitter: EventEmitter
255
+ ): Promise<{ toolsData: ToolData[]; finishReason: string; usageData: any[] }> {
256
+ let toolsData: ToolData[] = [];
257
+ let finishReason = 'stop';
258
+ const usageData = [];
259
+
260
+ for await (const part of stream) {
261
+ const delta = part.choices[0]?.delta;
262
+ const usage = part.usage;
263
+
264
+ // Collect usage statistics
265
+ if (usage) {
266
+ usageData.push(usage);
267
+ }
268
+
269
+ // Emit data event for delta
270
+ emitter.emit('data', delta);
271
+
272
+ // Handle content deltas
273
+ if (!delta?.tool_calls && delta?.content) {
274
+ emitter.emit('content', delta?.content, delta?.role);
275
+ }
276
+
277
+ // Handle tool calls
278
+ if (delta?.tool_calls) {
279
+ const toolCall = delta?.tool_calls?.[0];
280
+ const index = toolCall?.index;
281
+
282
+ if (!toolsData[index]) {
283
+ toolsData[index] = {
284
+ index: index || 0,
285
+ id: '',
286
+ type: 'function',
287
+ name: '',
288
+ arguments: '',
289
+ role: 'tool',
290
+ };
291
+ }
292
+
293
+ if (toolCall?.function?.name) {
294
+ toolsData[index].name = toolCall.function.name;
295
+ }
296
+ if (toolCall?.function?.arguments) {
297
+ toolsData[index].arguments += toolCall.function.arguments;
298
+ }
299
+ if (toolCall?.id) {
300
+ toolsData[index].id = toolCall.id;
301
+ }
302
+ }
303
+
304
+ // Handle finish reason
305
+ if (part.choices[0]?.finish_reason) {
306
+ finishReason = part.choices[0].finish_reason;
307
+ }
308
+ }
309
+
310
+ return { toolsData: this.extractToolCalls(toolsData), finishReason, usageData };
311
+ }
312
+
313
+ /**
314
+ * Extract and format tool calls from the accumulated data
315
+ */
316
+ private extractToolCalls(toolsData: ToolData[]): ToolData[] {
317
+ return toolsData.map((tool) => ({
318
+ index: tool.index,
319
+ name: tool.name,
320
+ arguments: tool.arguments,
321
+ id: tool.id,
322
+ type: tool.type,
323
+ role: tool.role,
324
+ }));
325
+ }
326
+
327
+ /**
328
+ * Report usage statistics
329
+ */
330
+ private reportUsageStatistics(usage_data: OpenAI.Completions.CompletionUsage[], context: ILLMRequestContext): any[] {
331
+ const reportedUsage: any[] = [];
332
+
333
+ // Report normal usage
334
+ usage_data.forEach((usage) => {
335
+ const reported = this.deps.reportUsage(usage, this.buildUsageContext(context));
336
+ reportedUsage.push(reported);
337
+ });
338
+
339
+ return reportedUsage;
340
+ }
341
+
342
+ /**
343
+ * Emit final events
344
+ */
345
+ private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
346
+ // Emit tool info event if tools were called
347
+ if (toolsData.length > 0) {
348
+ emitter.emit(TLLMEvent.ToolInfo, toolsData);
349
+ }
350
+
351
+ // Emit interrupted event if finishReason is not 'stop'
352
+ if (finishReason !== 'stop') {
353
+ emitter.emit('interrupted', finishReason);
354
+ }
355
+
356
+ // Emit end event with setImmediate to ensure proper event ordering
357
+ setImmediate(() => {
358
+ emitter.emit('end', toolsData, reportedUsage, finishReason);
359
+ });
360
+ }
361
+
362
+ /**
363
+ * Build usage context parameters from request context
364
+ */
365
+ private buildUsageContext(context: ILLMRequestContext) {
366
+ return {
367
+ modelEntryName: context.modelEntryName,
368
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
369
+ agentId: context.agentId,
370
+ teamId: context.teamId,
371
+ };
372
+ }
373
+
374
+ /**
375
+ * Get valid image files based on supported MIME types
376
+ */
377
+ private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
378
+ return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
379
+ }
380
+
381
+ /**
382
+ * Get valid document files based on supported MIME types
383
+ */
384
+ private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
385
+ return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
386
+ }
387
+
388
+ /**
389
+ * Upload files to storage
390
+ */
391
+ private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
392
+ const promises = files.map((file) => {
393
+ const binaryInput = BinaryInput.from(file);
394
+ return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
395
+ });
396
+
397
+ return Promise.all(promises);
398
+ }
399
+
400
+ /**
401
+ * Process image files with Chat Completions specific formatting
402
+ */
403
+ private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
404
+ if (files.length === 0) return [];
405
+
406
+ const imageData = [];
407
+ for (const file of files) {
408
+ await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image', agentId);
409
+
410
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
411
+ const base64Data = bufferData.toString('base64');
412
+ const url = `data:${file.mimetype};base64,${base64Data}`;
413
+
414
+ imageData.push({
415
+ type: 'image_url',
416
+ image_url: { url },
417
+ });
418
+ }
419
+
420
+ return imageData;
421
+ }
422
+
423
+ /**
424
+ * Process document files with Chat Completions specific formatting
425
+ */
426
+ private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
427
+ if (files.length === 0) return [];
428
+
429
+ const documentData = [];
430
+ for (const file of files) {
431
+ await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document', agentId);
432
+
433
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
434
+ const base64Data = bufferData.toString('base64');
435
+ const fileData = `data:${file.mimetype};base64,${base64Data}`;
436
+ const filename = await file.getName();
437
+
438
+ documentData.push({
439
+ type: 'file',
440
+ file: {
441
+ file_data: fileData,
442
+ filename,
443
+ },
444
+ });
445
+ }
446
+
447
+ return documentData;
448
+ }
449
+
450
+ /**
451
+ * Validate file size before processing
452
+ */
453
+ private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string, agentId: string): Promise<void> {
454
+ await file.ready();
455
+ const fileInfo = await file.getJsonData(AccessCandidate.agent(agentId));
456
+ if (fileInfo.size > maxSize) {
457
+ throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
458
+ }
459
+ }
460
+
461
+ getInterfaceName(): string {
462
+ return 'chat.completions';
463
+ }
464
+
465
+ validateParameters(params: TLLMParams): boolean {
466
+ // Basic validation for Chat Completions parameters
467
+ return !!params.model && Array.isArray(params.messages);
468
+ }
469
+
470
+ /**
471
+ * Convert system messages to user messages for models that don't support system messages
472
+ */
473
+ private convertSystemMessagesToUserMessages(messages: OpenAI.ChatCompletionMessageParam[]): OpenAI.ChatCompletionMessageParam[] {
474
+ const convertedMessages: OpenAI.ChatCompletionMessageParam[] = [];
475
+ const systemMessages: string[] = [];
476
+
477
+ // Extract system messages and collect other messages
478
+ for (const message of messages) {
479
+ if (message.role === TLLMMessageRole.System) {
480
+ const content = typeof message.content === 'string' ? message.content : '';
481
+ if (content.trim()) {
482
+ systemMessages.push(content);
483
+ }
484
+ } else {
485
+ convertedMessages.push(message);
486
+ }
487
+ }
488
+
489
+ // If we have system messages, prepend them to the first user message
490
+ if (systemMessages.length > 0) {
491
+ const systemContent = systemMessages.join('\n\n');
492
+ const firstUserMessageIndex = convertedMessages.findIndex((msg) => msg.role === TLLMMessageRole.User);
493
+
494
+ if (firstUserMessageIndex !== -1) {
495
+ const userMessage = convertedMessages[firstUserMessageIndex];
496
+ const existingContent = typeof userMessage.content === 'string' ? userMessage.content : '';
497
+ convertedMessages[firstUserMessageIndex] = {
498
+ ...userMessage,
499
+ content: systemContent + '\n\n' + existingContent,
500
+ };
501
+ } else {
502
+ // If no user message exists, create one with the system content
503
+ convertedMessages.unshift({ role: TLLMMessageRole.User, content: systemContent });
504
+ }
505
+ }
506
+
507
+ return convertedMessages;
508
+ }
509
+
510
+ /**
511
+ * Prepare messages for Chat Completions API
512
+ */
513
+ private async prepareMessages(params: TLLMParams): Promise<OpenAI.ChatCompletionMessageParam[]> {
514
+ const messages = params?.messages || [];
515
+ const files: BinaryInput[] = params?.files || [];
516
+
517
+ // Handle files if present
518
+ if (files.length > 0) {
519
+ return await this.handleFileAttachments(files, params.agentId, [...messages]);
520
+ }
521
+
522
+ return messages;
523
+ }
524
+ }