@smythos/sre 1.5.44 → 1.5.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (227) hide show
  1. package/CHANGELOG +90 -90
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/index.js +3 -3
  5. package/dist/index.js.map +1 -1
  6. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +13 -1
  7. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.d.ts +46 -27
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.d.ts +4 -2
  9. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/types.d.ts +0 -4
  10. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +39 -0
  11. package/dist/types/types/LLM.types.d.ts +1 -0
  12. package/package.json +1 -1
  13. package/src/Components/APICall/APICall.class.ts +156 -156
  14. package/src/Components/APICall/AccessTokenManager.ts +130 -130
  15. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  16. package/src/Components/APICall/OAuth.helper.ts +294 -294
  17. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  18. package/src/Components/APICall/parseData.ts +167 -167
  19. package/src/Components/APICall/parseHeaders.ts +41 -41
  20. package/src/Components/APICall/parseProxy.ts +68 -68
  21. package/src/Components/APICall/parseUrl.ts +91 -91
  22. package/src/Components/APIEndpoint.class.ts +234 -234
  23. package/src/Components/APIOutput.class.ts +58 -58
  24. package/src/Components/AgentPlugin.class.ts +102 -102
  25. package/src/Components/Async.class.ts +155 -155
  26. package/src/Components/Await.class.ts +90 -90
  27. package/src/Components/Classifier.class.ts +158 -158
  28. package/src/Components/Component.class.ts +132 -132
  29. package/src/Components/ComponentHost.class.ts +38 -38
  30. package/src/Components/DataSourceCleaner.class.ts +92 -92
  31. package/src/Components/DataSourceIndexer.class.ts +181 -181
  32. package/src/Components/DataSourceLookup.class.ts +161 -161
  33. package/src/Components/ECMASandbox.class.ts +71 -71
  34. package/src/Components/FEncDec.class.ts +29 -29
  35. package/src/Components/FHash.class.ts +33 -33
  36. package/src/Components/FSign.class.ts +80 -80
  37. package/src/Components/FSleep.class.ts +25 -25
  38. package/src/Components/FTimestamp.class.ts +25 -25
  39. package/src/Components/FileStore.class.ts +78 -78
  40. package/src/Components/ForEach.class.ts +97 -97
  41. package/src/Components/GPTPlugin.class.ts +70 -70
  42. package/src/Components/GenAILLM.class.ts +586 -586
  43. package/src/Components/HuggingFace.class.ts +314 -314
  44. package/src/Components/Image/imageSettings.config.ts +70 -70
  45. package/src/Components/ImageGenerator.class.ts +502 -502
  46. package/src/Components/JSONFilter.class.ts +54 -54
  47. package/src/Components/LLMAssistant.class.ts +213 -213
  48. package/src/Components/LogicAND.class.ts +28 -28
  49. package/src/Components/LogicAtLeast.class.ts +85 -85
  50. package/src/Components/LogicAtMost.class.ts +86 -86
  51. package/src/Components/LogicOR.class.ts +29 -29
  52. package/src/Components/LogicXOR.class.ts +34 -34
  53. package/src/Components/MCPClient.class.ts +112 -112
  54. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  55. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  56. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  57. package/src/Components/MemoryWriteObject.class.ts +97 -97
  58. package/src/Components/MultimodalLLM.class.ts +128 -128
  59. package/src/Components/OpenAPI.class.ts +72 -72
  60. package/src/Components/PromptGenerator.class.ts +122 -122
  61. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  62. package/src/Components/ServerlessCode.class.ts +123 -123
  63. package/src/Components/TavilyWebSearch.class.ts +98 -98
  64. package/src/Components/VisionLLM.class.ts +104 -104
  65. package/src/Components/ZapierAction.class.ts +127 -127
  66. package/src/Components/index.ts +97 -97
  67. package/src/Core/AgentProcess.helper.ts +240 -240
  68. package/src/Core/Connector.class.ts +123 -123
  69. package/src/Core/ConnectorsService.ts +197 -197
  70. package/src/Core/DummyConnector.ts +49 -49
  71. package/src/Core/HookService.ts +105 -105
  72. package/src/Core/SmythRuntime.class.ts +235 -235
  73. package/src/Core/SystemEvents.ts +16 -16
  74. package/src/Core/boot.ts +56 -56
  75. package/src/config.ts +15 -15
  76. package/src/constants.ts +126 -126
  77. package/src/data/hugging-face.params.json +579 -579
  78. package/src/helpers/AWSLambdaCode.helper.ts +587 -587
  79. package/src/helpers/BinaryInput.helper.ts +331 -331
  80. package/src/helpers/Conversation.helper.ts +1119 -1119
  81. package/src/helpers/ECMASandbox.helper.ts +54 -54
  82. package/src/helpers/JsonContent.helper.ts +97 -97
  83. package/src/helpers/LocalCache.helper.ts +97 -97
  84. package/src/helpers/Log.helper.ts +274 -274
  85. package/src/helpers/OpenApiParser.helper.ts +150 -150
  86. package/src/helpers/S3Cache.helper.ts +147 -147
  87. package/src/helpers/SmythURI.helper.ts +5 -5
  88. package/src/helpers/Sysconfig.helper.ts +77 -77
  89. package/src/helpers/TemplateString.helper.ts +243 -243
  90. package/src/helpers/TypeChecker.helper.ts +329 -329
  91. package/src/index.ts +196 -196
  92. package/src/index.ts.bak +196 -196
  93. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  94. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  95. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  97. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  98. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  99. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  100. package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -297
  101. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  102. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  103. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  104. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  105. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  106. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  107. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  108. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  109. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  110. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  111. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  112. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  113. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  114. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  115. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  116. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  117. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  118. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  119. package/src/subsystems/IO/Log.service/index.ts +13 -13
  120. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  121. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  122. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  123. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  124. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  125. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  126. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  127. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  128. package/src/subsystems/IO/Router.service/index.ts +11 -11
  129. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  130. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  131. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  132. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  133. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  134. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  135. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  136. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  137. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  138. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  139. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  140. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  141. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  142. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  143. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  144. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  145. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  147. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  149. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  150. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  151. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  152. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -455
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +528 -528
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1168 -862
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -37
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  160. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  161. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -37
  162. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  163. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  164. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  165. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  166. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  167. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  168. package/src/subsystems/LLMManager/models.ts +2540 -2540
  169. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  170. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  173. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  174. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  175. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  176. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  177. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  178. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  179. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  180. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  181. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  182. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  183. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  184. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  185. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  186. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  187. package/src/subsystems/Security/Account.service/index.ts +14 -14
  188. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  189. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  190. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  191. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  192. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  193. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  194. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  195. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  196. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  197. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  198. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  199. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  200. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  201. package/src/types/ACL.types.ts +104 -104
  202. package/src/types/AWS.types.ts +10 -10
  203. package/src/types/Agent.types.ts +61 -61
  204. package/src/types/AgentLogger.types.ts +17 -17
  205. package/src/types/Cache.types.ts +1 -1
  206. package/src/types/Common.types.ts +2 -2
  207. package/src/types/LLM.types.ts +496 -495
  208. package/src/types/Redis.types.ts +8 -8
  209. package/src/types/SRE.types.ts +64 -64
  210. package/src/types/Security.types.ts +14 -14
  211. package/src/types/Storage.types.ts +5 -5
  212. package/src/types/VectorDB.types.ts +86 -86
  213. package/src/utils/base64.utils.ts +275 -275
  214. package/src/utils/cli.utils.ts +68 -68
  215. package/src/utils/data.utils.ts +322 -322
  216. package/src/utils/date-time.utils.ts +22 -22
  217. package/src/utils/general.utils.ts +238 -238
  218. package/src/utils/index.ts +12 -12
  219. package/src/utils/lazy-client.ts +261 -261
  220. package/src/utils/numbers.utils.ts +13 -13
  221. package/src/utils/oauth.utils.ts +35 -35
  222. package/src/utils/string.utils.ts +414 -414
  223. package/src/utils/url.utils.ts +19 -19
  224. package/src/utils/validation.utils.ts +74 -74
  225. package/dist/bundle-analysis-lazy.html +0 -4949
  226. package/dist/bundle-analysis.html +0 -4949
  227. package/dist/types/utils/package-manager.utils.d.ts +0 -26
@@ -1,455 +1,488 @@
1
- import EventEmitter from 'events';
2
- import OpenAI from 'openai';
3
- import { toFile } from 'openai';
4
- import { encodeChat } from 'gpt-tokenizer';
5
-
6
- import { BUILT_IN_MODEL_PREFIX } from '@sre/constants';
7
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
8
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
9
- import { AccessRequest } from '@sre/Security/AccessControl/AccessRequest.class';
10
- import { LLMHelper } from '@sre/LLMManager/LLM.helper';
11
-
12
- import {
13
- TLLMParams,
14
- ToolData,
15
- TLLMMessageBlock,
16
- TLLMToolResultMessageBlock,
17
- TLLMMessageRole,
18
- APIKeySource,
19
- ILLMRequestFuncParams,
20
- TOpenAIRequestBody,
21
- TLLMChatResponse,
22
- ILLMRequestContext,
23
- BasicCredentials,
24
- TLLMPreparedParams,
25
- } from '@sre/types/LLM.types';
26
-
27
- import { LLMConnector } from '../../LLMConnector';
28
- import { SystemEvents } from '@sre/Core/SystemEvents';
29
- import { ConnectorService } from '@sre/Core/ConnectorsService';
30
- import { HandlerDependencies, TToolType } from './types';
31
- import { OpenAIApiInterfaceFactory, OpenAIApiInterface } from './apiInterfaces';
32
-
33
- export class OpenAIConnector extends LLMConnector {
34
- public name = 'LLM:OpenAI';
35
-
36
- private interfaceFactory: OpenAIApiInterfaceFactory;
37
-
38
- constructor() {
39
- super();
40
-
41
- this.interfaceFactory = new OpenAIApiInterfaceFactory();
42
- }
43
-
44
- /**
45
- * Get the appropriate API interface for the given interface type and context
46
- */
47
- private getApiInterface(interfaceType: string, context: ILLMRequestContext): OpenAIApiInterface {
48
- const deps: HandlerDependencies = {
49
- getClient: (context) => this.getClient(context),
50
- reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
51
- };
52
-
53
- return this.interfaceFactory.createInterface(interfaceType, context, deps);
54
- }
55
-
56
- /**
57
- * Determine the appropriate interface type based on context and capabilities
58
- */
59
- private getInterfaceType(context: ILLMRequestContext): string {
60
- // Start with model-specified interface or default
61
- let responseInterface = this.interfaceFactory.getInterfaceTypeFromModelInfo(context.modelInfo);
62
-
63
- // Auto-switch to Responses API when web search is enabled
64
- if (context.toolsInfo?.openai?.webSearch?.enabled === true) {
65
- responseInterface = 'responses';
66
- }
67
-
68
- return responseInterface;
69
- }
70
-
71
- protected async getClient(params: ILLMRequestContext): Promise<OpenAI> {
72
- const apiKey = (params.credentials as BasicCredentials)?.apiKey;
73
- const baseURL = params?.modelInfo?.baseURL;
74
-
75
- if (!apiKey) throw new Error('Please provide an API key for OpenAI');
76
-
77
- const openai = new OpenAI({ baseURL, apiKey });
78
-
79
- return openai;
80
- }
81
-
82
- protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
83
- const _body = body as OpenAI.ChatCompletionCreateParams;
84
-
85
- // #region Validate token limit
86
- const messages = _body?.messages || [];
87
- const lastMessage = messages[messages.length - 1];
88
-
89
- const promptTokens = context?.hasFiles
90
- ? await LLMHelper.countVisionPromptTokens(lastMessage?.content)
91
- : encodeChat(messages as any, 'gpt-4')?.length;
92
-
93
- await this.validateTokenLimit({
94
- acRequest,
95
- promptTokens,
96
- context,
97
- maxTokens: _body.max_completion_tokens,
98
- });
99
- // #endregion Validate token limit
100
-
101
- const responseInterface = this.getInterfaceType(context);
102
- const apiInterface = this.getApiInterface(responseInterface, context);
103
-
104
- const result = await apiInterface.createRequest(body, context);
105
-
106
- const message = result?.choices?.[0]?.message;
107
- const finishReason = result?.choices?.[0]?.finish_reason;
108
-
109
- let toolsData: ToolData[] = [];
110
- let useTool = false;
111
-
112
- if (finishReason === 'tool_calls') {
113
- toolsData =
114
- message?.tool_calls?.map((tool, index) => ({
115
- index,
116
- id: tool?.id,
117
- type: tool?.type,
118
- name: tool?.function?.name,
119
- arguments: tool?.function?.arguments,
120
- role: 'tool',
121
- })) || [];
122
-
123
- useTool = true;
124
- }
125
-
126
- const usage = result?.usage;
127
- this.reportUsage(usage, {
128
- modelEntryName: context.modelEntryName,
129
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
130
- agentId: context.agentId,
131
- teamId: context.teamId,
132
- });
133
-
134
- return {
135
- content: message?.content ?? '',
136
- finishReason,
137
- useTool,
138
- toolsData,
139
- message,
140
- usage,
141
- };
142
- }
143
-
144
- protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
145
- // #region Validate token limit
146
- const messages = body?.messages || body?.input || [];
147
- const lastMessage = messages[messages.length - 1];
148
-
149
- const promptTokens = context?.hasFiles
150
- ? await LLMHelper.countVisionPromptTokens(lastMessage?.content)
151
- : encodeChat(messages as any, 'gpt-4')?.length;
152
-
153
- await this.validateTokenLimit({
154
- acRequest,
155
- promptTokens,
156
- context,
157
- maxTokens: body.max_completion_tokens,
158
- });
159
- // #endregion Validate token limit
160
-
161
- const responseInterface = this.getInterfaceType(context);
162
- const apiInterface = this.getApiInterface(responseInterface, context);
163
-
164
- const stream = await apiInterface.createStream(body, context);
165
-
166
- const emitter = apiInterface.handleStream(stream, context);
167
-
168
- return emitter;
169
- }
170
-
171
- // #region Image Generation, will be moved to a different subsystem
172
- protected async imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
173
- const openai = await this.getClient(context);
174
- const response = await openai.images.generate(body as OpenAI.Images.ImageGenerateParams);
175
-
176
- return response as OpenAI.ImagesResponse;
177
- }
178
-
179
- protected async imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
180
- const _body = body as OpenAI.Images.ImageEditParams;
181
-
182
- const openai = await this.getClient(context);
183
- const response = await openai.images.edit(_body);
184
-
185
- return response as OpenAI.ImagesResponse;
186
- }
187
- // #endregion
188
-
189
- public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto', modelInfo = null }) {
190
- let tools = [];
191
-
192
- if (toolDefinitions && toolDefinitions.length > 0) {
193
- const interfaceType = modelInfo?.interface || 'chat.completions';
194
-
195
- const tempContext: ILLMRequestContext = {
196
- modelEntryName: '',
197
- agentId: '',
198
- teamId: '',
199
- isUserKey: false,
200
- modelInfo,
201
- credentials: null,
202
- } as ILLMRequestContext;
203
-
204
- const deps: HandlerDependencies = {
205
- getClient: (context) => this.getClient(context),
206
- reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
207
- };
208
-
209
- const apiInterface = this.interfaceFactory.createInterface(interfaceType, tempContext, deps);
210
-
211
- // Transform tools using the interface
212
- tools = apiInterface.transformToolsConfig({
213
- type,
214
- toolDefinitions,
215
- toolChoice: toolChoice as OpenAI.ChatCompletionToolChoiceOption,
216
- modelInfo,
217
- });
218
- }
219
-
220
- return tools?.length > 0 ? { tools, tool_choice: toolChoice || 'auto' } : {};
221
- }
222
-
223
- public transformToolMessageBlocks({
224
- messageBlock,
225
- toolsData,
226
- }: {
227
- messageBlock: TLLMMessageBlock;
228
- toolsData: ToolData[];
229
- }): TLLMToolResultMessageBlock[] {
230
- const messageBlocks: TLLMToolResultMessageBlock[] = [];
231
-
232
- if (messageBlock) {
233
- const transformedMessageBlock = {
234
- ...messageBlock,
235
- content: typeof messageBlock.content === 'object' ? JSON.stringify(messageBlock.content) : messageBlock.content,
236
- };
237
- if (transformedMessageBlock.tool_calls) {
238
- for (let toolCall of transformedMessageBlock.tool_calls) {
239
- toolCall.function.arguments =
240
- typeof toolCall.function.arguments === 'object' ? JSON.stringify(toolCall.function.arguments) : toolCall.function.arguments;
241
- }
242
- }
243
- messageBlocks.push(transformedMessageBlock);
244
- }
245
-
246
- const transformedToolsData = toolsData.map((toolData) => ({
247
- tool_call_id: toolData.id,
248
- role: TLLMMessageRole.Tool, // toolData.role as TLLMMessageRole, //should always be 'tool' for OpenAI
249
- name: toolData.name,
250
- content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result), // Ensure content is a string
251
- }));
252
-
253
- return [...messageBlocks, ...transformedToolsData];
254
- }
255
-
256
- public getConsistentMessages(messages) {
257
- const _messages = LLMHelper.removeDuplicateUserMessages(messages);
258
-
259
- return _messages.map((message) => {
260
- const _message = { ...message };
261
- let textContent = '';
262
-
263
- if (message?.parts) {
264
- textContent = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
265
- } else if (Array.isArray(message?.content)) {
266
- textContent = message.content.map((textBlock) => textBlock?.text || '').join(' ');
267
- } else if (message?.content) {
268
- textContent = message.content;
269
- }
270
-
271
- _message.content = textContent;
272
-
273
- return _message;
274
- });
275
- }
276
-
277
- private async validateTokenLimit({
278
- acRequest,
279
- maxTokens,
280
- promptTokens,
281
- context,
282
- }: {
283
- acRequest: AccessRequest;
284
- maxTokens: number;
285
- promptTokens: number;
286
- context: ILLMRequestContext;
287
- }): Promise<void> {
288
- const provider = await this.getProvider(acRequest, context.modelEntryName);
289
-
290
- await provider.validateTokensLimit({
291
- model: context.modelInfo,
292
- promptTokens,
293
- completionTokens: maxTokens,
294
- hasAPIKey: context.isUserKey,
295
- });
296
- }
297
-
298
- private async getProvider(acRequest: AccessRequest, modelEntryName: string) {
299
- const modelsProviderConnector = ConnectorService.getModelsProviderConnector();
300
- const modelsProvider = modelsProviderConnector.requester(acRequest.candidate as AccessCandidate);
301
-
302
- return modelsProvider;
303
- }
304
-
305
- /**
306
- * Prepare request body for OpenAI Responses API
307
- * Uses MessageTransformer and ToolsTransformer for clean interface transformations
308
- */
309
-
310
- private async prepareImageGenerationBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageGenerateParams> {
311
- const { model, size, quality, n, responseFormat, style } = params;
312
-
313
- const body: OpenAI.Images.ImageGenerateParams = {
314
- prompt: params.prompt,
315
- model: model as string,
316
- size: size as OpenAI.Images.ImageGenerateParams['size'],
317
- n: n || 1,
318
- };
319
-
320
- if (quality) {
321
- body.quality = quality;
322
- }
323
-
324
- if (style) {
325
- body.style = style;
326
- }
327
-
328
- return body;
329
- }
330
-
331
- private async prepareImageEditBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageEditParams> {
332
- const { model, size, n, responseFormat } = params;
333
-
334
- const body: OpenAI.Images.ImageEditParams = {
335
- prompt: params.prompt,
336
- model: model as string,
337
- size: size as OpenAI.Images.ImageEditParams['size'],
338
- n: n || 1,
339
- image: null,
340
- };
341
-
342
- const files: BinaryInput[] = params?.files || [];
343
-
344
- if (files.length > 0) {
345
- const images = await Promise.all(
346
- files.map(
347
- async (file) =>
348
- await toFile(await file.getReadStream(), await file.getName(), {
349
- type: file.mimetype,
350
- })
351
- )
352
- );
353
-
354
- // Assign only the first image file as required by the OpenAI image-edit endpoint
355
- body.image = images[0];
356
- }
357
-
358
- return body;
359
- }
360
-
361
- protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TOpenAIRequestBody> {
362
- // Handle special capabilities first (these override interface type)
363
- if (params.capabilities?.imageGeneration === true) {
364
- const capabilityType = params?.files?.length > 0 ? 'image-edit' : 'image-generation';
365
- return this.prepareRequestBody(params, capabilityType);
366
- }
367
-
368
- // Create a minimal context to use the same interface selection logic
369
- const minimalContext: ILLMRequestContext = {
370
- modelInfo: params.modelInfo,
371
- toolsInfo: params.toolsInfo,
372
- } as ILLMRequestContext;
373
-
374
- const responseInterface = this.getInterfaceType(minimalContext);
375
-
376
- // Use interface-specific preparation
377
- return this.prepareRequestBody(params, responseInterface);
378
- }
379
-
380
- private async prepareRequestBody(params: TLLMPreparedParams, preparationType: string): Promise<TOpenAIRequestBody> {
381
- // Create a minimal context for body preparation - the interface may need access to model info
382
- const minimalContext: ILLMRequestContext = {
383
- modelInfo: params.modelInfo,
384
- modelEntryName: params.modelEntryName,
385
- agentId: params.agentId,
386
- teamId: params.teamId,
387
- isUserKey: params.isUserKey,
388
- credentials: params.credentials,
389
- hasFiles: params.files && params.files.length > 0,
390
- toolsInfo: params.toolsInfo,
391
- };
392
-
393
- const preparers = {
394
- 'chat.completions': async () => {
395
- const apiInterface = this.getApiInterface('chat.completions', minimalContext);
396
- return apiInterface.prepareRequestBody(params);
397
- },
398
- responses: async () => {
399
- const apiInterface = this.getApiInterface('responses', minimalContext);
400
- return apiInterface.prepareRequestBody(params);
401
- },
402
- 'image-generation': () => this.prepareImageGenerationBody(params),
403
- 'image-edit': () => this.prepareImageEditBody(params),
404
- // Future interfaces can be added here
405
- };
406
-
407
- const preparer = preparers[preparationType];
408
- if (!preparer) {
409
- throw new Error(`Unsupported preparation type: ${preparationType}`);
410
- }
411
-
412
- return preparer();
413
- }
414
-
415
- protected reportUsage(
416
- usage: OpenAI.Completions.CompletionUsage & {
417
- input_tokens?: number;
418
- output_tokens?: number;
419
- input_tokens_details?: { cached_tokens?: number };
420
- prompt_tokens_details?: { cached_tokens?: number };
421
- cost?: number; // for web search tool
422
- },
423
- metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
424
- ) {
425
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
426
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
427
-
428
- const inputTokens = usage?.input_tokens || usage?.prompt_tokens - (usage?.prompt_tokens_details?.cached_tokens || 0); // Returned by the search tool
429
-
430
- const outputTokens =
431
- usage?.output_tokens || // Returned by the search tool
432
- usage?.completion_tokens ||
433
- 0;
434
-
435
- const cachedInputTokens =
436
- usage?.input_tokens_details?.cached_tokens || // Returned by the search tool
437
- usage?.prompt_tokens_details?.cached_tokens ||
438
- 0;
439
-
440
- const usageData = {
441
- sourceId: `llm:${modelName}`,
442
- input_tokens: inputTokens,
443
- output_tokens: outputTokens,
444
- input_tokens_cache_write: 0,
445
- input_tokens_cache_read: cachedInputTokens,
446
- cost: usage?.cost || 0, // for web search tool
447
- keySource: metadata.keySource,
448
- agentId: metadata.agentId,
449
- teamId: metadata.teamId,
450
- };
451
- SystemEvents.emit('USAGE:LLM', usageData);
452
-
453
- return usageData;
454
- }
455
- }
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import { toFile } from 'openai';
4
+ import { encodeChat } from 'gpt-tokenizer';
5
+
6
+ import { BUILT_IN_MODEL_PREFIX } from '@sre/constants';
7
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
8
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
9
+ import { AccessRequest } from '@sre/Security/AccessControl/AccessRequest.class';
10
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
11
+
12
+ import {
13
+ TLLMParams,
14
+ ToolData,
15
+ TLLMMessageBlock,
16
+ TLLMToolResultMessageBlock,
17
+ TLLMMessageRole,
18
+ APIKeySource,
19
+ ILLMRequestFuncParams,
20
+ TOpenAIRequestBody,
21
+ TLLMChatResponse,
22
+ ILLMRequestContext,
23
+ BasicCredentials,
24
+ TLLMPreparedParams,
25
+ } from '@sre/types/LLM.types';
26
+
27
+ import { LLMConnector } from '../../LLMConnector';
28
+ import { SystemEvents } from '@sre/Core/SystemEvents';
29
+ import { ConnectorService } from '@sre/Core/ConnectorsService';
30
+ import { HandlerDependencies, TToolType } from './types';
31
+ import { OpenAIApiInterfaceFactory, OpenAIApiInterface } from './apiInterfaces';
32
+
33
+ export class OpenAIConnector extends LLMConnector {
34
+ public name = 'LLM:OpenAI';
35
+
36
+ private interfaceFactory: OpenAIApiInterfaceFactory;
37
+
38
+ constructor() {
39
+ super();
40
+
41
+ this.interfaceFactory = new OpenAIApiInterfaceFactory();
42
+ }
43
+
44
+ /**
45
+ * Get the appropriate API interface for the given interface type and context
46
+ */
47
+ private getApiInterface(interfaceType: string, context: ILLMRequestContext): OpenAIApiInterface {
48
+ const deps: HandlerDependencies = {
49
+ getClient: (context) => this.getClient(context),
50
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
51
+ };
52
+
53
+ return this.interfaceFactory.createInterface(interfaceType, context, deps);
54
+ }
55
+
56
+ /**
57
+ * Determine the appropriate interface type based on context and capabilities
58
+ */
59
+ private getInterfaceType(context: ILLMRequestContext): string {
60
+ // Start with model-specified interface or default
61
+ let responseInterface = this.interfaceFactory.getInterfaceTypeFromModelInfo(context.modelInfo);
62
+
63
+ // Auto-switch to Responses API when web search is enabled
64
+ if (context.toolsInfo?.openai?.webSearch?.enabled === true) {
65
+ responseInterface = 'responses';
66
+ }
67
+
68
+ return responseInterface;
69
+ }
70
+
71
+ protected async getClient(params: ILLMRequestContext): Promise<OpenAI> {
72
+ const apiKey = (params.credentials as BasicCredentials)?.apiKey;
73
+ const baseURL = params?.modelInfo?.baseURL;
74
+
75
+ if (!apiKey) throw new Error('Please provide an API key for OpenAI');
76
+
77
+ const openai = new OpenAI({ baseURL, apiKey });
78
+
79
+ return openai;
80
+ }
81
+
82
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
83
+ const _body = body as OpenAI.ChatCompletionCreateParams;
84
+
85
+ // #region Validate token limit
86
+ const messages = _body?.messages || [];
87
+ const lastMessage = messages[messages.length - 1];
88
+ const promptTokens = await this.computePromptTokens(messages, context);
89
+
90
+ await this.validateTokenLimit({
91
+ acRequest,
92
+ promptTokens,
93
+ context,
94
+ maxTokens: _body.max_completion_tokens,
95
+ });
96
+ // #endregion Validate token limit
97
+
98
+ const responseInterface = this.getInterfaceType(context);
99
+ const apiInterface = this.getApiInterface(responseInterface, context);
100
+
101
+ const result = await apiInterface.createRequest(body, context);
102
+
103
+ const message = result?.choices?.[0]?.message;
104
+ const finishReason = result?.choices?.[0]?.finish_reason;
105
+
106
+ let toolsData: ToolData[] = [];
107
+ let useTool = false;
108
+
109
+ if (finishReason === 'tool_calls') {
110
+ toolsData =
111
+ message?.tool_calls?.map((tool, index) => ({
112
+ index,
113
+ id: tool?.id,
114
+ type: tool?.type,
115
+ name: tool?.function?.name,
116
+ arguments: tool?.function?.arguments,
117
+ role: 'tool',
118
+ })) || [];
119
+
120
+ useTool = true;
121
+ }
122
+
123
+ const usage = result?.usage;
124
+ this.reportUsage(usage, {
125
+ modelEntryName: context.modelEntryName,
126
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
127
+ agentId: context.agentId,
128
+ teamId: context.teamId,
129
+ });
130
+
131
+ return {
132
+ content: message?.content ?? '',
133
+ finishReason,
134
+ useTool,
135
+ toolsData,
136
+ message,
137
+ usage,
138
+ };
139
+ }
140
+
141
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
142
+ // #region Validate token limit
143
+ const messages = body?.messages || body?.input || [];
144
+ const lastMessage = messages[messages.length - 1];
145
+ const promptTokens = await this.computePromptTokens(messages, context);
146
+
147
+ await this.validateTokenLimit({
148
+ acRequest,
149
+ promptTokens,
150
+ context,
151
+ maxTokens: body.max_completion_tokens,
152
+ });
153
+ // #endregion Validate token limit
154
+
155
+ const responseInterface = this.getInterfaceType(context);
156
+ const apiInterface = this.getApiInterface(responseInterface, context);
157
+
158
+ const stream = await apiInterface.createStream(body, context);
159
+
160
+ const emitter = apiInterface.handleStream(stream, context);
161
+
162
+ return emitter;
163
+ }
164
+
165
+ // #region Image Generation, will be moved to a different subsystem
166
+ protected async imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
167
+ const openai = await this.getClient(context);
168
+ const response = await openai.images.generate(body as OpenAI.Images.ImageGenerateParams);
169
+
170
+ return response as OpenAI.ImagesResponse;
171
+ }
172
+
173
+ protected async imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
174
+ const _body = body as OpenAI.Images.ImageEditParams;
175
+
176
+ const openai = await this.getClient(context);
177
+ const response = await openai.images.edit(_body);
178
+
179
+ return response as OpenAI.ImagesResponse;
180
+ }
181
+ // #endregion
182
+
183
+ public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto', modelInfo = null }) {
184
+ let tools = [];
185
+
186
+ if (toolDefinitions && toolDefinitions.length > 0) {
187
+ const interfaceType = modelInfo?.interface || 'chat.completions';
188
+
189
+ const tempContext: ILLMRequestContext = {
190
+ modelEntryName: '',
191
+ agentId: '',
192
+ teamId: '',
193
+ isUserKey: false,
194
+ modelInfo,
195
+ credentials: null,
196
+ } as ILLMRequestContext;
197
+
198
+ const deps: HandlerDependencies = {
199
+ getClient: (context) => this.getClient(context),
200
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
201
+ };
202
+
203
+ const apiInterface = this.interfaceFactory.createInterface(interfaceType, tempContext, deps);
204
+
205
+ // Transform tools using the interface
206
+ tools = apiInterface.transformToolsConfig({
207
+ type,
208
+ toolDefinitions,
209
+ toolChoice: toolChoice as OpenAI.ChatCompletionToolChoiceOption,
210
+ modelInfo,
211
+ });
212
+ }
213
+
214
+ return tools?.length > 0 ? { tools, tool_choice: toolChoice || 'auto' } : {};
215
+ }
216
+
217
+ public transformToolMessageBlocks({
218
+ messageBlock,
219
+ toolsData,
220
+ }: {
221
+ messageBlock: TLLMMessageBlock;
222
+ toolsData: ToolData[];
223
+ }): TLLMToolResultMessageBlock[] {
224
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
225
+
226
+ if (messageBlock) {
227
+ const transformedMessageBlock = {
228
+ ...messageBlock,
229
+ content: typeof messageBlock.content === 'object' ? JSON.stringify(messageBlock.content) : messageBlock.content,
230
+ };
231
+ if (transformedMessageBlock.tool_calls) {
232
+ for (let toolCall of transformedMessageBlock.tool_calls) {
233
+ toolCall.function.arguments =
234
+ typeof toolCall.function.arguments === 'object' ? JSON.stringify(toolCall.function.arguments) : toolCall.function.arguments;
235
+ }
236
+ }
237
+ messageBlocks.push(transformedMessageBlock);
238
+ }
239
+
240
+ const transformedToolsData = toolsData.map((toolData) => ({
241
+ tool_call_id: toolData.id,
242
+ role: TLLMMessageRole.Tool, // toolData.role as TLLMMessageRole, //should always be 'tool' for OpenAI
243
+ name: toolData.name,
244
+ content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result), // Ensure content is a string
245
+ }));
246
+
247
+ return [...messageBlocks, ...transformedToolsData];
248
+ }
249
+
250
+ public getConsistentMessages(messages) {
251
+ const _messages = LLMHelper.removeDuplicateUserMessages(messages);
252
+
253
+ return _messages.map((message) => {
254
+ const _message = { ...message };
255
+ let textContent = '';
256
+
257
+ if (message?.parts) {
258
+ textContent = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
259
+ } else if (Array.isArray(message?.content)) {
260
+ textContent = message.content.map((textBlock) => textBlock?.text || '').join(' ');
261
+ } else if (message?.content) {
262
+ textContent = message.content;
263
+ }
264
+
265
+ _message.content = textContent;
266
+
267
+ return _message;
268
+ });
269
+ }
270
+
271
+ private async validateTokenLimit({
272
+ acRequest,
273
+ maxTokens,
274
+ promptTokens,
275
+ context,
276
+ }: {
277
+ acRequest: AccessRequest;
278
+ maxTokens: number;
279
+ promptTokens: number;
280
+ context: ILLMRequestContext;
281
+ }): Promise<void> {
282
+ const provider = await this.getProvider(acRequest, context.modelEntryName);
283
+
284
+ await provider.validateTokensLimit({
285
+ model: context.modelInfo,
286
+ promptTokens,
287
+ completionTokens: maxTokens,
288
+ hasAPIKey: context.isUserKey,
289
+ });
290
+ }
291
+
292
+ private async getProvider(acRequest: AccessRequest, modelEntryName: string) {
293
+ const modelsProviderConnector = ConnectorService.getModelsProviderConnector();
294
+ const modelsProvider = modelsProviderConnector.requester(acRequest.candidate as AccessCandidate);
295
+
296
+ return modelsProvider;
297
+ }
298
+
299
+ /**
300
+ * Safely compute prompt token count across different interfaces (Chat Completions, Responses)
301
+ * - Normalizes message content to strings for encodeChat
302
+ * - Handles vision prompts when files are present
303
+ * - Never throws; defaults to 0 on failure
304
+ */
305
+ private async computePromptTokens(messages: any[], context: ILLMRequestContext): Promise<number> {
306
+ try {
307
+ if (context?.hasFiles) {
308
+ const lastMessage = messages?.[messages?.length - 1] || {};
309
+ const lastContent = lastMessage?.content ?? '';
310
+ return await LLMHelper.countVisionPromptTokens(lastContent || '');
311
+ }
312
+
313
+ const normalized = (messages || [])
314
+ .map((m) => {
315
+ if (!m || !m.role) return null;
316
+ let content = '';
317
+ if (Array.isArray(m.content)) {
318
+ content = m.content.map((b) => (typeof b?.text === 'string' ? b.text : '')).join(' ');
319
+ } else if (typeof m.content === 'string') {
320
+ content = m.content;
321
+ } else if (m.content !== undefined && m.content !== null) {
322
+ try {
323
+ content = JSON.stringify(m.content);
324
+ } catch (_) {
325
+ content = '';
326
+ }
327
+ }
328
+ return { role: m.role, content };
329
+ })
330
+ .filter(Boolean);
331
+
332
+ return encodeChat(normalized as any, 'gpt-4')?.length || 0;
333
+ } catch (_) {
334
+ return 0;
335
+ }
336
+ }
337
+
338
+ /**
339
+ * Prepare request body for OpenAI Responses API
340
+ * Uses MessageTransformer and ToolsTransformer for clean interface transformations
341
+ */
342
+
343
+ private async prepareImageGenerationBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageGenerateParams> {
344
+ const { model, size, quality, n, responseFormat, style } = params;
345
+
346
+ const body: OpenAI.Images.ImageGenerateParams = {
347
+ prompt: params.prompt,
348
+ model: model as string,
349
+ size: size as OpenAI.Images.ImageGenerateParams['size'],
350
+ n: n || 1,
351
+ };
352
+
353
+ if (quality) {
354
+ body.quality = quality;
355
+ }
356
+
357
+ if (style) {
358
+ body.style = style;
359
+ }
360
+
361
+ return body;
362
+ }
363
+
364
+ private async prepareImageEditBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageEditParams> {
365
+ const { model, size, n, responseFormat } = params;
366
+
367
+ const body: OpenAI.Images.ImageEditParams = {
368
+ prompt: params.prompt,
369
+ model: model as string,
370
+ size: size as OpenAI.Images.ImageEditParams['size'],
371
+ n: n || 1,
372
+ image: null,
373
+ };
374
+
375
+ const files: BinaryInput[] = params?.files || [];
376
+
377
+ if (files.length > 0) {
378
+ const images = await Promise.all(
379
+ files.map(
380
+ async (file) =>
381
+ await toFile(await file.getReadStream(), await file.getName(), {
382
+ type: file.mimetype,
383
+ })
384
+ )
385
+ );
386
+
387
+ // Assign only the first image file as required by the OpenAI image-edit endpoint
388
+ body.image = images[0];
389
+ }
390
+
391
+ return body;
392
+ }
393
+
394
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TOpenAIRequestBody> {
395
+ // Handle special capabilities first (these override interface type)
396
+ if (params.capabilities?.imageGeneration === true) {
397
+ const capabilityType = params?.files?.length > 0 ? 'image-edit' : 'image-generation';
398
+ return this.prepareRequestBody(params, capabilityType);
399
+ }
400
+
401
+ // Create a minimal context to use the same interface selection logic
402
+ const minimalContext: ILLMRequestContext = {
403
+ modelInfo: params.modelInfo,
404
+ toolsInfo: params.toolsInfo,
405
+ } as ILLMRequestContext;
406
+
407
+ const responseInterface = this.getInterfaceType(minimalContext);
408
+
409
+ // Use interface-specific preparation
410
+ return this.prepareRequestBody(params, responseInterface);
411
+ }
412
+
413
+ private async prepareRequestBody(params: TLLMPreparedParams, preparationType: string): Promise<TOpenAIRequestBody> {
414
+ // Create a minimal context for body preparation - the interface may need access to model info
415
+ const minimalContext: ILLMRequestContext = {
416
+ modelInfo: params.modelInfo,
417
+ modelEntryName: params.modelEntryName,
418
+ agentId: params.agentId,
419
+ teamId: params.teamId,
420
+ isUserKey: params.isUserKey,
421
+ credentials: params.credentials,
422
+ hasFiles: params.files && params.files.length > 0,
423
+ toolsInfo: params.toolsInfo,
424
+ };
425
+
426
+ const preparers = {
427
+ 'chat.completions': async () => {
428
+ const apiInterface = this.getApiInterface('chat.completions', minimalContext);
429
+ return apiInterface.prepareRequestBody(params);
430
+ },
431
+ responses: async () => {
432
+ const apiInterface = this.getApiInterface('responses', minimalContext);
433
+ return apiInterface.prepareRequestBody(params);
434
+ },
435
+ 'image-generation': () => this.prepareImageGenerationBody(params),
436
+ 'image-edit': () => this.prepareImageEditBody(params),
437
+ // Future interfaces can be added here
438
+ };
439
+
440
+ const preparer = preparers[preparationType];
441
+ if (!preparer) {
442
+ throw new Error(`Unsupported preparation type: ${preparationType}`);
443
+ }
444
+
445
+ return preparer();
446
+ }
447
+
448
+ protected reportUsage(
449
+ usage: OpenAI.Completions.CompletionUsage & {
450
+ input_tokens?: number;
451
+ output_tokens?: number;
452
+ input_tokens_details?: { cached_tokens?: number };
453
+ prompt_tokens_details?: { cached_tokens?: number };
454
+ cost?: number; // for web search tool
455
+ },
456
+ metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
457
+ ) {
458
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
459
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
460
+
461
+ const inputTokens = usage?.input_tokens || usage?.prompt_tokens - (usage?.prompt_tokens_details?.cached_tokens || 0); // Returned by the search tool
462
+
463
+ const outputTokens =
464
+ usage?.output_tokens || // Returned by the search tool
465
+ usage?.completion_tokens ||
466
+ 0;
467
+
468
+ const cachedInputTokens =
469
+ usage?.input_tokens_details?.cached_tokens || // Returned by the search tool
470
+ usage?.prompt_tokens_details?.cached_tokens ||
471
+ 0;
472
+
473
+ const usageData = {
474
+ sourceId: `llm:${modelName}`,
475
+ input_tokens: inputTokens,
476
+ output_tokens: outputTokens,
477
+ input_tokens_cache_write: 0,
478
+ input_tokens_cache_read: cachedInputTokens,
479
+ cost: usage?.cost || 0, // for web search tool
480
+ keySource: metadata.keySource,
481
+ agentId: metadata.agentId,
482
+ teamId: metadata.teamId,
483
+ };
484
+ SystemEvents.emit('USAGE:LLM', usageData);
485
+
486
+ return usageData;
487
+ }
488
+ }