@smythos/sre 1.6.8 → 1.6.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (239) hide show
  1. package/CHANGELOG +111 -111
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +2 -2
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/Components/Triggers/Gmail.trigger.d.ts +58 -0
  9. package/dist/types/Components/Triggers/GmailTrigger.class.d.ts +44 -0
  10. package/dist/types/Components/Triggers/Trigger.class.d.ts +21 -0
  11. package/dist/types/Components/Triggers/WhatsApp.trigger.d.ts +22 -0
  12. package/dist/types/helpers/AIPerformanceAnalyzer.helper.d.ts +45 -0
  13. package/dist/types/helpers/AIPerformanceCollector.helper.d.ts +111 -0
  14. package/dist/types/subsystems/IO/Storage.service/connectors/AzureBlobStorage.class.d.ts +211 -0
  15. package/dist/types/subsystems/IO/VectorDB.service/connectors/WeaviateVectorDB.class.d.ts +187 -0
  16. package/dist/types/subsystems/PerformanceManager/Performance.service/PerformanceConnector.d.ts +102 -0
  17. package/dist/types/subsystems/PerformanceManager/Performance.service/connectors/LocalPerformanceConnector.class.d.ts +100 -0
  18. package/dist/types/subsystems/PerformanceManager/Performance.service/index.d.ts +22 -0
  19. package/dist/types/subsystems/Security/Credentials/Credentials.class.d.ts +2 -0
  20. package/dist/types/subsystems/Security/Credentials/ManagedOAuth2Credentials.class.d.ts +18 -0
  21. package/dist/types/subsystems/Security/Credentials/OAuth2Credentials.class.d.ts +14 -0
  22. package/dist/types/types/Performance.types.d.ts +468 -0
  23. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  24. package/package.json +1 -1
  25. package/src/Components/APICall/APICall.class.ts +161 -161
  26. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  27. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  28. package/src/Components/APICall/OAuth.helper.ts +447 -447
  29. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  30. package/src/Components/APICall/parseData.ts +167 -167
  31. package/src/Components/APICall/parseHeaders.ts +41 -41
  32. package/src/Components/APICall/parseProxy.ts +68 -68
  33. package/src/Components/APICall/parseUrl.ts +91 -91
  34. package/src/Components/APIEndpoint.class.ts +234 -234
  35. package/src/Components/APIOutput.class.ts +58 -58
  36. package/src/Components/AgentPlugin.class.ts +102 -102
  37. package/src/Components/Async.class.ts +155 -155
  38. package/src/Components/Await.class.ts +90 -90
  39. package/src/Components/Classifier.class.ts +158 -158
  40. package/src/Components/Component.class.ts +147 -147
  41. package/src/Components/ComponentHost.class.ts +38 -38
  42. package/src/Components/DataSourceCleaner.class.ts +92 -92
  43. package/src/Components/DataSourceIndexer.class.ts +181 -181
  44. package/src/Components/DataSourceLookup.class.ts +161 -161
  45. package/src/Components/ECMASandbox.class.ts +72 -72
  46. package/src/Components/FEncDec.class.ts +29 -29
  47. package/src/Components/FHash.class.ts +33 -33
  48. package/src/Components/FSign.class.ts +80 -80
  49. package/src/Components/FSleep.class.ts +25 -25
  50. package/src/Components/FTimestamp.class.ts +66 -66
  51. package/src/Components/FileStore.class.ts +78 -78
  52. package/src/Components/ForEach.class.ts +97 -97
  53. package/src/Components/GPTPlugin.class.ts +70 -70
  54. package/src/Components/GenAILLM.class.ts +586 -586
  55. package/src/Components/HuggingFace.class.ts +313 -313
  56. package/src/Components/Image/imageSettings.config.ts +70 -70
  57. package/src/Components/ImageGenerator.class.ts +483 -483
  58. package/src/Components/JSONFilter.class.ts +54 -54
  59. package/src/Components/LLMAssistant.class.ts +213 -213
  60. package/src/Components/LogicAND.class.ts +28 -28
  61. package/src/Components/LogicAtLeast.class.ts +85 -85
  62. package/src/Components/LogicAtMost.class.ts +86 -86
  63. package/src/Components/LogicOR.class.ts +29 -29
  64. package/src/Components/LogicXOR.class.ts +34 -34
  65. package/src/Components/MCPClient.class.ts +137 -137
  66. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  67. package/src/Components/MemoryReadKeyVal.class.ts +67 -67
  68. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  69. package/src/Components/MemoryWriteObject.class.ts +97 -97
  70. package/src/Components/MultimodalLLM.class.ts +128 -128
  71. package/src/Components/OpenAPI.class.ts +72 -72
  72. package/src/Components/PromptGenerator.class.ts +122 -122
  73. package/src/Components/ScrapflyWebScrape.class.ts +183 -183
  74. package/src/Components/ServerlessCode.class.ts +123 -123
  75. package/src/Components/TavilyWebSearch.class.ts +103 -103
  76. package/src/Components/VisionLLM.class.ts +104 -104
  77. package/src/Components/ZapierAction.class.ts +127 -127
  78. package/src/Components/index.ts +97 -97
  79. package/src/Core/AgentProcess.helper.ts +240 -240
  80. package/src/Core/Connector.class.ts +123 -123
  81. package/src/Core/ConnectorsService.ts +197 -197
  82. package/src/Core/DummyConnector.ts +49 -49
  83. package/src/Core/HookService.ts +105 -105
  84. package/src/Core/SmythRuntime.class.ts +241 -241
  85. package/src/Core/SystemEvents.ts +16 -16
  86. package/src/Core/boot.ts +56 -56
  87. package/src/config.ts +15 -15
  88. package/src/constants.ts +126 -126
  89. package/src/data/hugging-face.params.json +579 -579
  90. package/src/helpers/AWSLambdaCode.helper.ts +624 -624
  91. package/src/helpers/BinaryInput.helper.ts +331 -331
  92. package/src/helpers/Conversation.helper.ts +1157 -1157
  93. package/src/helpers/ECMASandbox.helper.ts +64 -64
  94. package/src/helpers/JsonContent.helper.ts +97 -97
  95. package/src/helpers/LocalCache.helper.ts +97 -97
  96. package/src/helpers/Log.helper.ts +274 -274
  97. package/src/helpers/OpenApiParser.helper.ts +150 -150
  98. package/src/helpers/S3Cache.helper.ts +147 -147
  99. package/src/helpers/SmythURI.helper.ts +5 -5
  100. package/src/helpers/Sysconfig.helper.ts +95 -95
  101. package/src/helpers/TemplateString.helper.ts +243 -243
  102. package/src/helpers/TypeChecker.helper.ts +329 -329
  103. package/src/index.ts +3 -3
  104. package/src/index.ts.bak +3 -3
  105. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  106. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  107. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  108. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  109. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +145 -145
  110. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  111. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  112. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -301
  113. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  114. package/src/subsystems/AgentManager/AgentRuntime.class.ts +557 -557
  115. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  116. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  117. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  118. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  119. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  120. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  121. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  122. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  123. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  124. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +171 -171
  125. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  126. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  127. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  128. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  129. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  130. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  131. package/src/subsystems/IO/Log.service/index.ts +13 -13
  132. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  133. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  134. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  135. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  136. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  137. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  138. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  139. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  140. package/src/subsystems/IO/Router.service/index.ts +11 -11
  141. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +488 -488
  142. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  143. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  144. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  145. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  146. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  147. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +465 -465
  148. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +387 -387
  149. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +408 -408
  150. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  151. package/src/subsystems/IO/VectorDB.service/embed/GoogleEmbedding.ts +118 -118
  152. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  153. package/src/subsystems/IO/VectorDB.service/embed/index.ts +26 -26
  154. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  155. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  156. package/src/subsystems/LLMManager/LLM.inference.ts +345 -345
  157. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +492 -492
  158. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  159. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +666 -666
  160. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +407 -407
  161. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +92 -92
  162. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +983 -983
  163. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +319 -319
  164. package/src/subsystems/LLMManager/LLM.service/connectors/Ollama.class.ts +361 -361
  165. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +257 -257
  166. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +430 -430
  167. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +503 -503
  168. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  169. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  170. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  171. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  172. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  173. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  174. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  175. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  176. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +478 -478
  177. package/src/subsystems/LLMManager/LLM.service/index.ts +47 -47
  178. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +303 -303
  179. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +280 -271
  180. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  181. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  182. package/src/subsystems/LLMManager/models.ts +2540 -2540
  183. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  184. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  185. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  186. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +214 -214
  187. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  188. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  189. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  190. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  191. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  192. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  193. package/src/subsystems/MemoryManager/RuntimeContext.ts +277 -277
  194. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  195. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  196. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  197. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  198. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  199. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +170 -170
  200. package/src/subsystems/Security/Account.service/connectors/MySQLAccount.class.ts +76 -76
  201. package/src/subsystems/Security/Account.service/index.ts +14 -14
  202. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  203. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  204. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  205. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  206. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  207. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  208. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  209. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  210. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  211. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  212. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  213. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  214. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  215. package/src/types/ACL.types.ts +104 -104
  216. package/src/types/AWS.types.ts +10 -10
  217. package/src/types/Agent.types.ts +61 -61
  218. package/src/types/AgentLogger.types.ts +17 -17
  219. package/src/types/Cache.types.ts +1 -1
  220. package/src/types/Common.types.ts +2 -2
  221. package/src/types/LLM.types.ts +520 -520
  222. package/src/types/Redis.types.ts +8 -8
  223. package/src/types/SRE.types.ts +64 -64
  224. package/src/types/Security.types.ts +14 -14
  225. package/src/types/Storage.types.ts +5 -5
  226. package/src/types/VectorDB.types.ts +86 -86
  227. package/src/utils/base64.utils.ts +275 -275
  228. package/src/utils/cli.utils.ts +68 -68
  229. package/src/utils/data.utils.ts +322 -322
  230. package/src/utils/date-time.utils.ts +22 -22
  231. package/src/utils/general.utils.ts +238 -238
  232. package/src/utils/index.ts +12 -12
  233. package/src/utils/lazy-client.ts +261 -261
  234. package/src/utils/numbers.utils.ts +13 -13
  235. package/src/utils/oauth.utils.ts +35 -35
  236. package/src/utils/string.utils.ts +414 -414
  237. package/src/utils/url.utils.ts +19 -19
  238. package/src/utils/validation.utils.ts +74 -74
  239. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,503 +1,503 @@
1
- import EventEmitter from 'events';
2
- import OpenAI from 'openai';
3
- import { toFile } from 'openai';
4
- import { encodeChat } from 'gpt-tokenizer';
5
-
6
- import { BUILT_IN_MODEL_PREFIX } from '@sre/constants';
7
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
8
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
9
- import { AccessRequest } from '@sre/Security/AccessControl/AccessRequest.class';
10
- import { LLMHelper } from '@sre/LLMManager/LLM.helper';
11
-
12
- import {
13
- TLLMParams,
14
- ToolData,
15
- TLLMMessageBlock,
16
- TLLMToolResultMessageBlock,
17
- TLLMMessageRole,
18
- APIKeySource,
19
- ILLMRequestFuncParams,
20
- TOpenAIRequestBody,
21
- TLLMChatResponse,
22
- ILLMRequestContext,
23
- BasicCredentials,
24
- TLLMPreparedParams,
25
- } from '@sre/types/LLM.types';
26
-
27
- import { LLMConnector } from '../../LLMConnector';
28
- import { SystemEvents } from '@sre/Core/SystemEvents';
29
- import { ConnectorService } from '@sre/Core/ConnectorsService';
30
- import { HandlerDependencies, TToolType } from './types';
31
- import { OpenAIApiInterfaceFactory, OpenAIApiInterface } from './apiInterfaces';
32
- import { Logger } from '@sre/helpers/Log.helper';
33
-
34
- const logger = Logger('OpenAIConnector');
35
-
36
- export class OpenAIConnector extends LLMConnector {
37
- public name = 'LLM:OpenAI';
38
-
39
- private interfaceFactory: OpenAIApiInterfaceFactory;
40
-
41
- constructor() {
42
- super();
43
-
44
- this.interfaceFactory = new OpenAIApiInterfaceFactory();
45
- }
46
-
47
- /**
48
- * Get the appropriate API interface for the given interface type and context
49
- */
50
- private getApiInterface(interfaceType: string, context: ILLMRequestContext): OpenAIApiInterface {
51
- const deps: HandlerDependencies = {
52
- getClient: (context) => this.getClient(context),
53
- reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
54
- };
55
-
56
- return this.interfaceFactory.createInterface(interfaceType, context, deps);
57
- }
58
-
59
- /**
60
- * Determine the appropriate interface type based on context and capabilities
61
- */
62
- private getInterfaceType(context: ILLMRequestContext): string {
63
- // Start with model-specified interface or default
64
- let responseInterface = this.interfaceFactory.getInterfaceTypeFromModelInfo(context.modelInfo);
65
-
66
- // Auto-switch to Responses API when web search is enabled
67
- if (context.toolsInfo?.openai?.webSearch?.enabled === true) {
68
- responseInterface = 'responses';
69
- }
70
-
71
- return responseInterface;
72
- }
73
-
74
- protected async getClient(params: ILLMRequestContext): Promise<OpenAI> {
75
- const apiKey = (params.credentials as BasicCredentials)?.apiKey;
76
- const baseURL = params?.modelInfo?.baseURL;
77
-
78
- if (!apiKey) throw new Error('Please provide an API key for OpenAI');
79
-
80
- const openai = new OpenAI({ baseURL, apiKey });
81
-
82
- return openai;
83
- }
84
-
85
- protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
86
- try {
87
- logger.debug(`request ${this.name}`, acRequest.candidate);
88
- const _body = body as OpenAI.ChatCompletionCreateParams;
89
-
90
- // #region Validate token limit
91
- const messages = _body?.messages || [];
92
- const lastMessage = messages[messages.length - 1];
93
- const promptTokens = await this.computePromptTokens(messages, context);
94
-
95
- await this.validateTokenLimit({
96
- acRequest,
97
- promptTokens,
98
- context,
99
- maxTokens: _body.max_completion_tokens,
100
- });
101
- // #endregion Validate token limit
102
-
103
- const responseInterface = this.getInterfaceType(context);
104
- const apiInterface = this.getApiInterface(responseInterface, context);
105
-
106
- const result = await apiInterface.createRequest(body, context);
107
-
108
- const message = result?.choices?.[0]?.message || { content: result?.output_text };
109
- const finishReason = result?.choices?.[0]?.finish_reason || result?.incomplete_details || 'stop';
110
-
111
- let toolsData: ToolData[] = [];
112
- let useTool = false;
113
-
114
- if (finishReason === 'tool_calls') {
115
- toolsData =
116
- message?.tool_calls?.map((tool, index) => ({
117
- index,
118
- id: tool?.id,
119
- type: tool?.type,
120
- name: tool?.function?.name,
121
- arguments: tool?.function?.arguments,
122
- role: 'tool',
123
- })) || [];
124
-
125
- useTool = true;
126
- }
127
-
128
- const usage = result?.usage;
129
- this.reportUsage(usage, {
130
- modelEntryName: context.modelEntryName,
131
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
132
- agentId: context.agentId,
133
- teamId: context.teamId,
134
- });
135
-
136
- return {
137
- content: message?.content ?? '',
138
- finishReason,
139
- useTool,
140
- toolsData,
141
- message,
142
- usage,
143
- };
144
- } catch (error) {
145
- logger.error(`request ${this.name}`, error, acRequest.candidate);
146
- throw error;
147
- }
148
- }
149
-
150
- protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
151
- try {
152
- logger.debug(`streamRequest ${this.name}`, acRequest.candidate);
153
- // #region Validate token limit
154
- const messages = body?.messages || body?.input || [];
155
- const lastMessage = messages[messages.length - 1];
156
- const promptTokens = await this.computePromptTokens(messages, context);
157
-
158
- await this.validateTokenLimit({
159
- acRequest,
160
- promptTokens,
161
- context,
162
- maxTokens: body.max_completion_tokens,
163
- });
164
- // #endregion Validate token limit
165
-
166
- const responseInterface = this.getInterfaceType(context);
167
- const apiInterface = this.getApiInterface(responseInterface, context);
168
-
169
- const stream = await apiInterface.createStream(body, context);
170
-
171
- const emitter = apiInterface.handleStream(stream, context);
172
-
173
- return emitter;
174
- } catch (error) {
175
- logger.error(`streamRequest ${this.name}`, error, acRequest.candidate);
176
- throw error;
177
- }
178
- }
179
-
180
- // #region Image Generation, will be moved to a different subsystem
181
- protected async imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
182
- const openai = await this.getClient(context);
183
- const response = await openai.images.generate(body as OpenAI.Images.ImageGenerateParams);
184
-
185
- return response as OpenAI.ImagesResponse;
186
- }
187
-
188
- protected async imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
189
- const _body = body as OpenAI.Images.ImageEditParams;
190
-
191
- const openai = await this.getClient(context);
192
- const response = await openai.images.edit(_body);
193
-
194
- return response as OpenAI.ImagesResponse;
195
- }
196
- // #endregion
197
-
198
- public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto', modelInfo = null }) {
199
- let tools = [];
200
-
201
- if (toolDefinitions && toolDefinitions.length > 0) {
202
- const interfaceType = modelInfo?.interface || 'chat.completions';
203
-
204
- const tempContext: ILLMRequestContext = {
205
- modelEntryName: '',
206
- agentId: '',
207
- teamId: '',
208
- isUserKey: false,
209
- modelInfo,
210
- credentials: null,
211
- } as ILLMRequestContext;
212
-
213
- const deps: HandlerDependencies = {
214
- getClient: (context) => this.getClient(context),
215
- reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
216
- };
217
-
218
- const apiInterface = this.interfaceFactory.createInterface(interfaceType, tempContext, deps);
219
-
220
- // Transform tools using the interface
221
- tools = apiInterface.transformToolsConfig({
222
- type,
223
- toolDefinitions,
224
- toolChoice: toolChoice as OpenAI.ChatCompletionToolChoiceOption,
225
- modelInfo,
226
- });
227
- }
228
-
229
- return tools?.length > 0 ? { tools, tool_choice: toolChoice || 'auto' } : {};
230
- }
231
-
232
- public transformToolMessageBlocks({
233
- messageBlock,
234
- toolsData,
235
- }: {
236
- messageBlock: TLLMMessageBlock;
237
- toolsData: ToolData[];
238
- }): TLLMToolResultMessageBlock[] {
239
- const messageBlocks: TLLMToolResultMessageBlock[] = [];
240
-
241
- if (messageBlock) {
242
- const transformedMessageBlock = {
243
- ...messageBlock,
244
- content: typeof messageBlock.content === 'object' ? JSON.stringify(messageBlock.content) : messageBlock.content,
245
- };
246
- if (transformedMessageBlock.tool_calls) {
247
- for (let toolCall of transformedMessageBlock.tool_calls) {
248
- toolCall.function.arguments =
249
- typeof toolCall.function.arguments === 'object' ? JSON.stringify(toolCall.function.arguments) : toolCall.function.arguments;
250
- }
251
- }
252
- messageBlocks.push(transformedMessageBlock);
253
- }
254
-
255
- const transformedToolsData = toolsData.map((toolData) => ({
256
- tool_call_id: toolData.id,
257
- role: TLLMMessageRole.Tool, // toolData.role as TLLMMessageRole, //should always be 'tool' for OpenAI
258
- name: toolData.name,
259
- content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result), // Ensure content is a string
260
- }));
261
-
262
- return [...messageBlocks, ...transformedToolsData];
263
- }
264
-
265
- public getConsistentMessages(messages) {
266
- const _messages = LLMHelper.removeDuplicateUserMessages(messages);
267
-
268
- return _messages.map((message) => {
269
- const _message = { ...message };
270
- let textContent = '';
271
-
272
- if (message?.parts) {
273
- textContent = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
274
- } else if (Array.isArray(message?.content)) {
275
- textContent = message.content.map((textBlock) => textBlock?.text || '').join(' ');
276
- } else if (message?.content) {
277
- textContent = message.content;
278
- }
279
-
280
- _message.content = textContent;
281
-
282
- return _message;
283
- });
284
- }
285
-
286
- private async validateTokenLimit({
287
- acRequest,
288
- maxTokens,
289
- promptTokens,
290
- context,
291
- }: {
292
- acRequest: AccessRequest;
293
- maxTokens: number;
294
- promptTokens: number;
295
- context: ILLMRequestContext;
296
- }): Promise<void> {
297
- const provider = await this.getProvider(acRequest, context.modelEntryName);
298
-
299
- await provider.validateTokensLimit({
300
- model: context.modelInfo,
301
- promptTokens,
302
- completionTokens: maxTokens,
303
- hasAPIKey: context.isUserKey,
304
- });
305
- }
306
-
307
- private async getProvider(acRequest: AccessRequest, modelEntryName: string) {
308
- const modelsProviderConnector = ConnectorService.getModelsProviderConnector();
309
- const modelsProvider = modelsProviderConnector.requester(acRequest.candidate as AccessCandidate);
310
-
311
- return modelsProvider;
312
- }
313
-
314
- /**
315
- * Safely compute prompt token count across different interfaces (Chat Completions, Responses)
316
- * - Normalizes message content to strings for encodeChat
317
- * - Handles vision prompts when files are present
318
- * - Never throws; defaults to 0 on failure
319
- */
320
- private async computePromptTokens(messages: any[], context: ILLMRequestContext): Promise<number> {
321
- try {
322
- if (context?.hasFiles) {
323
- const lastMessage = messages?.[messages?.length - 1] || {};
324
- const lastContent = lastMessage?.content ?? '';
325
- return await LLMHelper.countVisionPromptTokens(lastContent || '');
326
- }
327
-
328
- const normalized = (messages || [])
329
- .map((m) => {
330
- if (!m || !m.role) return null;
331
- let content = '';
332
- if (Array.isArray(m.content)) {
333
- content = m.content.map((b) => (typeof b?.text === 'string' ? b.text : '')).join(' ');
334
- } else if (typeof m.content === 'string') {
335
- content = m.content;
336
- } else if (m.content !== undefined && m.content !== null) {
337
- try {
338
- content = JSON.stringify(m.content);
339
- } catch (_) {
340
- content = '';
341
- }
342
- }
343
- return { role: m.role, content };
344
- })
345
- .filter(Boolean);
346
-
347
- return encodeChat(normalized as any, 'gpt-4')?.length || 0;
348
- } catch (_) {
349
- return 0;
350
- }
351
- }
352
-
353
- /**
354
- * Prepare request body for OpenAI Responses API
355
- * Uses MessageTransformer and ToolsTransformer for clean interface transformations
356
- */
357
-
358
- private async prepareImageGenerationBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageGenerateParams> {
359
- const { model, size, quality, n, responseFormat, style } = params;
360
-
361
- const body: OpenAI.Images.ImageGenerateParams = {
362
- prompt: params.prompt,
363
- model: model as string,
364
- size: size as OpenAI.Images.ImageGenerateParams['size'],
365
- n: n || 1,
366
- };
367
-
368
- if (quality) {
369
- body.quality = quality;
370
- }
371
-
372
- if (style) {
373
- body.style = style;
374
- }
375
-
376
- return body;
377
- }
378
-
379
- private async prepareImageEditBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageEditParams> {
380
- const { model, size, n, responseFormat } = params;
381
-
382
- const body: OpenAI.Images.ImageEditParams = {
383
- prompt: params.prompt,
384
- model: model as string,
385
- size: size as OpenAI.Images.ImageEditParams['size'],
386
- n: n || 1,
387
- image: null,
388
- };
389
-
390
- const files: BinaryInput[] = params?.files || [];
391
-
392
- if (files.length > 0) {
393
- const images = await Promise.all(
394
- files.map(
395
- async (file) =>
396
- await toFile(await file.getReadStream(), await file.getName(), {
397
- type: file.mimetype,
398
- })
399
- )
400
- );
401
-
402
- // Assign only the first image file as required by the OpenAI image-edit endpoint
403
- body.image = images[0];
404
- }
405
-
406
- return body;
407
- }
408
-
409
- protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TOpenAIRequestBody> {
410
- // Handle special capabilities first (these override interface type)
411
- if (params.capabilities?.imageGeneration === true) {
412
- const capabilityType = params?.files?.length > 0 ? 'image-edit' : 'image-generation';
413
- return this.prepareRequestBody(params, capabilityType);
414
- }
415
-
416
- // Create a minimal context to use the same interface selection logic
417
- const minimalContext: ILLMRequestContext = {
418
- modelInfo: params.modelInfo,
419
- toolsInfo: params.toolsInfo,
420
- } as ILLMRequestContext;
421
-
422
- const responseInterface = this.getInterfaceType(minimalContext);
423
-
424
- // Use interface-specific preparation
425
- return this.prepareRequestBody(params, responseInterface);
426
- }
427
-
428
- private async prepareRequestBody(params: TLLMPreparedParams, preparationType: string): Promise<TOpenAIRequestBody> {
429
- // Create a minimal context for body preparation - the interface may need access to model info
430
- const minimalContext: ILLMRequestContext = {
431
- modelInfo: params.modelInfo,
432
- modelEntryName: params.modelEntryName,
433
- agentId: params.agentId,
434
- teamId: params.teamId,
435
- isUserKey: params.isUserKey,
436
- credentials: params.credentials,
437
- hasFiles: params.files && params.files.length > 0,
438
- toolsInfo: params.toolsInfo,
439
- };
440
-
441
- const preparers = {
442
- 'chat.completions': async () => {
443
- const apiInterface = this.getApiInterface('chat.completions', minimalContext);
444
- return apiInterface.prepareRequestBody(params);
445
- },
446
- responses: async () => {
447
- const apiInterface = this.getApiInterface('responses', minimalContext);
448
- return apiInterface.prepareRequestBody(params);
449
- },
450
- 'image-generation': () => this.prepareImageGenerationBody(params),
451
- 'image-edit': () => this.prepareImageEditBody(params),
452
- // Future interfaces can be added here
453
- };
454
-
455
- const preparer = preparers[preparationType];
456
- if (!preparer) {
457
- throw new Error(`Unsupported preparation type: ${preparationType}`);
458
- }
459
-
460
- return preparer();
461
- }
462
-
463
- protected reportUsage(
464
- usage: OpenAI.Completions.CompletionUsage & {
465
- input_tokens?: number;
466
- output_tokens?: number;
467
- input_tokens_details?: { cached_tokens?: number };
468
- prompt_tokens_details?: { cached_tokens?: number };
469
- cost?: number; // for web search tool
470
- },
471
- metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
472
- ) {
473
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
474
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
475
-
476
- const inputTokens = usage?.input_tokens || usage?.prompt_tokens - (usage?.prompt_tokens_details?.cached_tokens || 0); // Returned by the search tool
477
-
478
- const outputTokens =
479
- usage?.output_tokens || // Returned by the search tool
480
- usage?.completion_tokens ||
481
- 0;
482
-
483
- const cachedInputTokens =
484
- usage?.input_tokens_details?.cached_tokens || // Returned by the search tool
485
- usage?.prompt_tokens_details?.cached_tokens ||
486
- 0;
487
-
488
- const usageData = {
489
- sourceId: `llm:${modelName}`,
490
- input_tokens: inputTokens,
491
- output_tokens: outputTokens,
492
- input_tokens_cache_write: 0,
493
- input_tokens_cache_read: cachedInputTokens,
494
- cost: usage?.cost || 0, // for web search tool
495
- keySource: metadata.keySource,
496
- agentId: metadata.agentId,
497
- teamId: metadata.teamId,
498
- };
499
- SystemEvents.emit('USAGE:LLM', usageData);
500
-
501
- return usageData;
502
- }
503
- }
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import { toFile } from 'openai';
4
+ import { encodeChat } from 'gpt-tokenizer';
5
+
6
+ import { BUILT_IN_MODEL_PREFIX } from '@sre/constants';
7
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
8
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
9
+ import { AccessRequest } from '@sre/Security/AccessControl/AccessRequest.class';
10
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
11
+
12
+ import {
13
+ TLLMParams,
14
+ ToolData,
15
+ TLLMMessageBlock,
16
+ TLLMToolResultMessageBlock,
17
+ TLLMMessageRole,
18
+ APIKeySource,
19
+ ILLMRequestFuncParams,
20
+ TOpenAIRequestBody,
21
+ TLLMChatResponse,
22
+ ILLMRequestContext,
23
+ BasicCredentials,
24
+ TLLMPreparedParams,
25
+ } from '@sre/types/LLM.types';
26
+
27
+ import { LLMConnector } from '../../LLMConnector';
28
+ import { SystemEvents } from '@sre/Core/SystemEvents';
29
+ import { ConnectorService } from '@sre/Core/ConnectorsService';
30
+ import { HandlerDependencies, TToolType } from './types';
31
+ import { OpenAIApiInterfaceFactory, OpenAIApiInterface } from './apiInterfaces';
32
+ import { Logger } from '@sre/helpers/Log.helper';
33
+
34
+ const logger = Logger('OpenAIConnector');
35
+
36
+ export class OpenAIConnector extends LLMConnector {
37
+ public name = 'LLM:OpenAI';
38
+
39
+ private interfaceFactory: OpenAIApiInterfaceFactory;
40
+
41
+ constructor() {
42
+ super();
43
+
44
+ this.interfaceFactory = new OpenAIApiInterfaceFactory();
45
+ }
46
+
47
+ /**
48
+ * Get the appropriate API interface for the given interface type and context
49
+ */
50
+ private getApiInterface(interfaceType: string, context: ILLMRequestContext): OpenAIApiInterface {
51
+ const deps: HandlerDependencies = {
52
+ getClient: (context) => this.getClient(context),
53
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
54
+ };
55
+
56
+ return this.interfaceFactory.createInterface(interfaceType, context, deps);
57
+ }
58
+
59
+ /**
60
+ * Determine the appropriate interface type based on context and capabilities
61
+ */
62
+ private getInterfaceType(context: ILLMRequestContext): string {
63
+ // Start with model-specified interface or default
64
+ let responseInterface = this.interfaceFactory.getInterfaceTypeFromModelInfo(context.modelInfo);
65
+
66
+ // Auto-switch to Responses API when web search is enabled
67
+ if (context.toolsInfo?.openai?.webSearch?.enabled === true) {
68
+ responseInterface = 'responses';
69
+ }
70
+
71
+ return responseInterface;
72
+ }
73
+
74
+ protected async getClient(params: ILLMRequestContext): Promise<OpenAI> {
75
+ const apiKey = (params.credentials as BasicCredentials)?.apiKey;
76
+ const baseURL = params?.modelInfo?.baseURL;
77
+
78
+ if (!apiKey) throw new Error('Please provide an API key for OpenAI');
79
+
80
+ const openai = new OpenAI({ baseURL, apiKey });
81
+
82
+ return openai;
83
+ }
84
+
85
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
86
+ try {
87
+ logger.debug(`request ${this.name}`, acRequest.candidate);
88
+ const _body = body as OpenAI.ChatCompletionCreateParams;
89
+
90
+ // #region Validate token limit
91
+ const messages = _body?.messages || [];
92
+ const lastMessage = messages[messages.length - 1];
93
+ const promptTokens = await this.computePromptTokens(messages, context);
94
+
95
+ await this.validateTokenLimit({
96
+ acRequest,
97
+ promptTokens,
98
+ context,
99
+ maxTokens: _body.max_completion_tokens,
100
+ });
101
+ // #endregion Validate token limit
102
+
103
+ const responseInterface = this.getInterfaceType(context);
104
+ const apiInterface = this.getApiInterface(responseInterface, context);
105
+
106
+ const result = await apiInterface.createRequest(body, context);
107
+
108
+ const message = result?.choices?.[0]?.message || { content: result?.output_text };
109
+ const finishReason = result?.choices?.[0]?.finish_reason || result?.incomplete_details || 'stop';
110
+
111
+ let toolsData: ToolData[] = [];
112
+ let useTool = false;
113
+
114
+ if (finishReason === 'tool_calls') {
115
+ toolsData =
116
+ message?.tool_calls?.map((tool, index) => ({
117
+ index,
118
+ id: tool?.id,
119
+ type: tool?.type,
120
+ name: tool?.function?.name,
121
+ arguments: tool?.function?.arguments,
122
+ role: 'tool',
123
+ })) || [];
124
+
125
+ useTool = true;
126
+ }
127
+
128
+ const usage = result?.usage;
129
+ this.reportUsage(usage, {
130
+ modelEntryName: context.modelEntryName,
131
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
132
+ agentId: context.agentId,
133
+ teamId: context.teamId,
134
+ });
135
+
136
+ return {
137
+ content: message?.content ?? '',
138
+ finishReason,
139
+ useTool,
140
+ toolsData,
141
+ message,
142
+ usage,
143
+ };
144
+ } catch (error) {
145
+ logger.error(`request ${this.name}`, error, acRequest.candidate);
146
+ throw error;
147
+ }
148
+ }
149
+
150
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
151
+ try {
152
+ logger.debug(`streamRequest ${this.name}`, acRequest.candidate);
153
+ // #region Validate token limit
154
+ const messages = body?.messages || body?.input || [];
155
+ const lastMessage = messages[messages.length - 1];
156
+ const promptTokens = await this.computePromptTokens(messages, context);
157
+
158
+ await this.validateTokenLimit({
159
+ acRequest,
160
+ promptTokens,
161
+ context,
162
+ maxTokens: body.max_completion_tokens,
163
+ });
164
+ // #endregion Validate token limit
165
+
166
+ const responseInterface = this.getInterfaceType(context);
167
+ const apiInterface = this.getApiInterface(responseInterface, context);
168
+
169
+ const stream = await apiInterface.createStream(body, context);
170
+
171
+ const emitter = apiInterface.handleStream(stream, context);
172
+
173
+ return emitter;
174
+ } catch (error) {
175
+ logger.error(`streamRequest ${this.name}`, error, acRequest.candidate);
176
+ throw error;
177
+ }
178
+ }
179
+
180
+ // #region Image Generation, will be moved to a different subsystem
181
+ protected async imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
182
+ const openai = await this.getClient(context);
183
+ const response = await openai.images.generate(body as OpenAI.Images.ImageGenerateParams);
184
+
185
+ return response as OpenAI.ImagesResponse;
186
+ }
187
+
188
+ protected async imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
189
+ const _body = body as OpenAI.Images.ImageEditParams;
190
+
191
+ const openai = await this.getClient(context);
192
+ const response = await openai.images.edit(_body);
193
+
194
+ return response as OpenAI.ImagesResponse;
195
+ }
196
+ // #endregion
197
+
198
+ public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto', modelInfo = null }) {
199
+ let tools = [];
200
+
201
+ if (toolDefinitions && toolDefinitions.length > 0) {
202
+ const interfaceType = modelInfo?.interface || 'chat.completions';
203
+
204
+ const tempContext: ILLMRequestContext = {
205
+ modelEntryName: '',
206
+ agentId: '',
207
+ teamId: '',
208
+ isUserKey: false,
209
+ modelInfo,
210
+ credentials: null,
211
+ } as ILLMRequestContext;
212
+
213
+ const deps: HandlerDependencies = {
214
+ getClient: (context) => this.getClient(context),
215
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
216
+ };
217
+
218
+ const apiInterface = this.interfaceFactory.createInterface(interfaceType, tempContext, deps);
219
+
220
+ // Transform tools using the interface
221
+ tools = apiInterface.transformToolsConfig({
222
+ type,
223
+ toolDefinitions,
224
+ toolChoice: toolChoice as OpenAI.ChatCompletionToolChoiceOption,
225
+ modelInfo,
226
+ });
227
+ }
228
+
229
+ return tools?.length > 0 ? { tools, tool_choice: toolChoice || 'auto' } : {};
230
+ }
231
+
232
+ public transformToolMessageBlocks({
233
+ messageBlock,
234
+ toolsData,
235
+ }: {
236
+ messageBlock: TLLMMessageBlock;
237
+ toolsData: ToolData[];
238
+ }): TLLMToolResultMessageBlock[] {
239
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
240
+
241
+ if (messageBlock) {
242
+ const transformedMessageBlock = {
243
+ ...messageBlock,
244
+ content: typeof messageBlock.content === 'object' ? JSON.stringify(messageBlock.content) : messageBlock.content,
245
+ };
246
+ if (transformedMessageBlock.tool_calls) {
247
+ for (let toolCall of transformedMessageBlock.tool_calls) {
248
+ toolCall.function.arguments =
249
+ typeof toolCall.function.arguments === 'object' ? JSON.stringify(toolCall.function.arguments) : toolCall.function.arguments;
250
+ }
251
+ }
252
+ messageBlocks.push(transformedMessageBlock);
253
+ }
254
+
255
+ const transformedToolsData = toolsData.map((toolData) => ({
256
+ tool_call_id: toolData.id,
257
+ role: TLLMMessageRole.Tool, // toolData.role as TLLMMessageRole, //should always be 'tool' for OpenAI
258
+ name: toolData.name,
259
+ content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result), // Ensure content is a string
260
+ }));
261
+
262
+ return [...messageBlocks, ...transformedToolsData];
263
+ }
264
+
265
+ public getConsistentMessages(messages) {
266
+ const _messages = LLMHelper.removeDuplicateUserMessages(messages);
267
+
268
+ return _messages.map((message) => {
269
+ const _message = { ...message };
270
+ let textContent = '';
271
+
272
+ if (message?.parts) {
273
+ textContent = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
274
+ } else if (Array.isArray(message?.content)) {
275
+ textContent = message.content.map((textBlock) => textBlock?.text || '').join(' ');
276
+ } else if (message?.content) {
277
+ textContent = message.content;
278
+ }
279
+
280
+ _message.content = textContent;
281
+
282
+ return _message;
283
+ });
284
+ }
285
+
286
+ private async validateTokenLimit({
287
+ acRequest,
288
+ maxTokens,
289
+ promptTokens,
290
+ context,
291
+ }: {
292
+ acRequest: AccessRequest;
293
+ maxTokens: number;
294
+ promptTokens: number;
295
+ context: ILLMRequestContext;
296
+ }): Promise<void> {
297
+ const provider = await this.getProvider(acRequest, context.modelEntryName);
298
+
299
+ await provider.validateTokensLimit({
300
+ model: context.modelInfo,
301
+ promptTokens,
302
+ completionTokens: maxTokens,
303
+ hasAPIKey: context.isUserKey,
304
+ });
305
+ }
306
+
307
+ private async getProvider(acRequest: AccessRequest, modelEntryName: string) {
308
+ const modelsProviderConnector = ConnectorService.getModelsProviderConnector();
309
+ const modelsProvider = modelsProviderConnector.requester(acRequest.candidate as AccessCandidate);
310
+
311
+ return modelsProvider;
312
+ }
313
+
314
+ /**
315
+ * Safely compute prompt token count across different interfaces (Chat Completions, Responses)
316
+ * - Normalizes message content to strings for encodeChat
317
+ * - Handles vision prompts when files are present
318
+ * - Never throws; defaults to 0 on failure
319
+ */
320
+ private async computePromptTokens(messages: any[], context: ILLMRequestContext): Promise<number> {
321
+ try {
322
+ if (context?.hasFiles) {
323
+ const lastMessage = messages?.[messages?.length - 1] || {};
324
+ const lastContent = lastMessage?.content ?? '';
325
+ return await LLMHelper.countVisionPromptTokens(lastContent || '');
326
+ }
327
+
328
+ const normalized = (messages || [])
329
+ .map((m) => {
330
+ if (!m || !m.role) return null;
331
+ let content = '';
332
+ if (Array.isArray(m.content)) {
333
+ content = m.content.map((b) => (typeof b?.text === 'string' ? b.text : '')).join(' ');
334
+ } else if (typeof m.content === 'string') {
335
+ content = m.content;
336
+ } else if (m.content !== undefined && m.content !== null) {
337
+ try {
338
+ content = JSON.stringify(m.content);
339
+ } catch (_) {
340
+ content = '';
341
+ }
342
+ }
343
+ return { role: m.role, content };
344
+ })
345
+ .filter(Boolean);
346
+
347
+ return encodeChat(normalized as any, 'gpt-4')?.length || 0;
348
+ } catch (_) {
349
+ return 0;
350
+ }
351
+ }
352
+
353
+ /**
354
+ * Prepare request body for OpenAI Responses API
355
+ * Uses MessageTransformer and ToolsTransformer for clean interface transformations
356
+ */
357
+
358
+ private async prepareImageGenerationBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageGenerateParams> {
359
+ const { model, size, quality, n, responseFormat, style } = params;
360
+
361
+ const body: OpenAI.Images.ImageGenerateParams = {
362
+ prompt: params.prompt,
363
+ model: model as string,
364
+ size: size as OpenAI.Images.ImageGenerateParams['size'],
365
+ n: n || 1,
366
+ };
367
+
368
+ if (quality) {
369
+ body.quality = quality;
370
+ }
371
+
372
+ if (style) {
373
+ body.style = style;
374
+ }
375
+
376
+ return body;
377
+ }
378
+
379
+ private async prepareImageEditBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageEditParams> {
380
+ const { model, size, n, responseFormat } = params;
381
+
382
+ const body: OpenAI.Images.ImageEditParams = {
383
+ prompt: params.prompt,
384
+ model: model as string,
385
+ size: size as OpenAI.Images.ImageEditParams['size'],
386
+ n: n || 1,
387
+ image: null,
388
+ };
389
+
390
+ const files: BinaryInput[] = params?.files || [];
391
+
392
+ if (files.length > 0) {
393
+ const images = await Promise.all(
394
+ files.map(
395
+ async (file) =>
396
+ await toFile(await file.getReadStream(), await file.getName(), {
397
+ type: file.mimetype,
398
+ })
399
+ )
400
+ );
401
+
402
+ // Assign only the first image file as required by the OpenAI image-edit endpoint
403
+ body.image = images[0];
404
+ }
405
+
406
+ return body;
407
+ }
408
+
409
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TOpenAIRequestBody> {
410
+ // Handle special capabilities first (these override interface type)
411
+ if (params.capabilities?.imageGeneration === true) {
412
+ const capabilityType = params?.files?.length > 0 ? 'image-edit' : 'image-generation';
413
+ return this.prepareRequestBody(params, capabilityType);
414
+ }
415
+
416
+ // Create a minimal context to use the same interface selection logic
417
+ const minimalContext: ILLMRequestContext = {
418
+ modelInfo: params.modelInfo,
419
+ toolsInfo: params.toolsInfo,
420
+ } as ILLMRequestContext;
421
+
422
+ const responseInterface = this.getInterfaceType(minimalContext);
423
+
424
+ // Use interface-specific preparation
425
+ return this.prepareRequestBody(params, responseInterface);
426
+ }
427
+
428
+ private async prepareRequestBody(params: TLLMPreparedParams, preparationType: string): Promise<TOpenAIRequestBody> {
429
+ // Create a minimal context for body preparation - the interface may need access to model info
430
+ const minimalContext: ILLMRequestContext = {
431
+ modelInfo: params.modelInfo,
432
+ modelEntryName: params.modelEntryName,
433
+ agentId: params.agentId,
434
+ teamId: params.teamId,
435
+ isUserKey: params.isUserKey,
436
+ credentials: params.credentials,
437
+ hasFiles: params.files && params.files.length > 0,
438
+ toolsInfo: params.toolsInfo,
439
+ };
440
+
441
+ const preparers = {
442
+ 'chat.completions': async () => {
443
+ const apiInterface = this.getApiInterface('chat.completions', minimalContext);
444
+ return apiInterface.prepareRequestBody(params);
445
+ },
446
+ responses: async () => {
447
+ const apiInterface = this.getApiInterface('responses', minimalContext);
448
+ return apiInterface.prepareRequestBody(params);
449
+ },
450
+ 'image-generation': () => this.prepareImageGenerationBody(params),
451
+ 'image-edit': () => this.prepareImageEditBody(params),
452
+ // Future interfaces can be added here
453
+ };
454
+
455
+ const preparer = preparers[preparationType];
456
+ if (!preparer) {
457
+ throw new Error(`Unsupported preparation type: ${preparationType}`);
458
+ }
459
+
460
+ return preparer();
461
+ }
462
+
463
+ protected reportUsage(
464
+ usage: OpenAI.Completions.CompletionUsage & {
465
+ input_tokens?: number;
466
+ output_tokens?: number;
467
+ input_tokens_details?: { cached_tokens?: number };
468
+ prompt_tokens_details?: { cached_tokens?: number };
469
+ cost?: number; // for web search tool
470
+ },
471
+ metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
472
+ ) {
473
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
474
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
475
+
476
+ const inputTokens = usage?.input_tokens || usage?.prompt_tokens - (usage?.prompt_tokens_details?.cached_tokens || 0); // Returned by the search tool
477
+
478
+ const outputTokens =
479
+ usage?.output_tokens || // Returned by the search tool
480
+ usage?.completion_tokens ||
481
+ 0;
482
+
483
+ const cachedInputTokens =
484
+ usage?.input_tokens_details?.cached_tokens || // Returned by the search tool
485
+ usage?.prompt_tokens_details?.cached_tokens ||
486
+ 0;
487
+
488
+ const usageData = {
489
+ sourceId: `llm:${modelName}`,
490
+ input_tokens: inputTokens,
491
+ output_tokens: outputTokens,
492
+ input_tokens_cache_write: 0,
493
+ input_tokens_cache_read: cachedInputTokens,
494
+ cost: usage?.cost || 0, // for web search tool
495
+ keySource: metadata.keySource,
496
+ agentId: metadata.agentId,
497
+ teamId: metadata.teamId,
498
+ };
499
+ SystemEvents.emit('USAGE:LLM', usageData);
500
+
501
+ return usageData;
502
+ }
503
+ }