@smythos/sre 1.5.53 → 1.5.54

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (223) hide show
  1. package/CHANGELOG +98 -98
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +3 -3
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +1 -6
  9. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  10. package/package.json +1 -1
  11. package/src/Components/APICall/APICall.class.ts +157 -157
  12. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  13. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  14. package/src/Components/APICall/OAuth.helper.ts +447 -447
  15. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  16. package/src/Components/APICall/parseData.ts +167 -167
  17. package/src/Components/APICall/parseHeaders.ts +41 -41
  18. package/src/Components/APICall/parseProxy.ts +68 -68
  19. package/src/Components/APICall/parseUrl.ts +91 -91
  20. package/src/Components/APIEndpoint.class.ts +234 -234
  21. package/src/Components/APIOutput.class.ts +58 -58
  22. package/src/Components/AgentPlugin.class.ts +102 -102
  23. package/src/Components/Async.class.ts +155 -155
  24. package/src/Components/Await.class.ts +90 -90
  25. package/src/Components/Classifier.class.ts +158 -158
  26. package/src/Components/Component.class.ts +132 -132
  27. package/src/Components/ComponentHost.class.ts +38 -38
  28. package/src/Components/DataSourceCleaner.class.ts +92 -92
  29. package/src/Components/DataSourceIndexer.class.ts +181 -181
  30. package/src/Components/DataSourceLookup.class.ts +161 -161
  31. package/src/Components/ECMASandbox.class.ts +71 -71
  32. package/src/Components/FEncDec.class.ts +29 -29
  33. package/src/Components/FHash.class.ts +33 -33
  34. package/src/Components/FSign.class.ts +80 -80
  35. package/src/Components/FSleep.class.ts +25 -25
  36. package/src/Components/FTimestamp.class.ts +25 -25
  37. package/src/Components/FileStore.class.ts +78 -78
  38. package/src/Components/ForEach.class.ts +97 -97
  39. package/src/Components/GPTPlugin.class.ts +70 -70
  40. package/src/Components/GenAILLM.class.ts +586 -586
  41. package/src/Components/HuggingFace.class.ts +314 -314
  42. package/src/Components/Image/imageSettings.config.ts +70 -70
  43. package/src/Components/ImageGenerator.class.ts +502 -502
  44. package/src/Components/JSONFilter.class.ts +54 -54
  45. package/src/Components/LLMAssistant.class.ts +213 -213
  46. package/src/Components/LogicAND.class.ts +28 -28
  47. package/src/Components/LogicAtLeast.class.ts +85 -85
  48. package/src/Components/LogicAtMost.class.ts +86 -86
  49. package/src/Components/LogicOR.class.ts +29 -29
  50. package/src/Components/LogicXOR.class.ts +34 -34
  51. package/src/Components/MCPClient.class.ts +138 -138
  52. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  53. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  54. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  55. package/src/Components/MemoryWriteObject.class.ts +97 -97
  56. package/src/Components/MultimodalLLM.class.ts +128 -128
  57. package/src/Components/OpenAPI.class.ts +72 -72
  58. package/src/Components/PromptGenerator.class.ts +122 -122
  59. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  60. package/src/Components/ServerlessCode.class.ts +123 -123
  61. package/src/Components/TavilyWebSearch.class.ts +98 -98
  62. package/src/Components/VisionLLM.class.ts +104 -104
  63. package/src/Components/ZapierAction.class.ts +127 -127
  64. package/src/Components/index.ts +97 -97
  65. package/src/Core/AgentProcess.helper.ts +240 -240
  66. package/src/Core/Connector.class.ts +123 -123
  67. package/src/Core/ConnectorsService.ts +197 -197
  68. package/src/Core/DummyConnector.ts +49 -49
  69. package/src/Core/HookService.ts +105 -105
  70. package/src/Core/SmythRuntime.class.ts +235 -235
  71. package/src/Core/SystemEvents.ts +16 -16
  72. package/src/Core/boot.ts +56 -56
  73. package/src/config.ts +15 -15
  74. package/src/constants.ts +126 -126
  75. package/src/data/hugging-face.params.json +579 -579
  76. package/src/helpers/AWSLambdaCode.helper.ts +590 -590
  77. package/src/helpers/BinaryInput.helper.ts +331 -331
  78. package/src/helpers/Conversation.helper.ts +1119 -1119
  79. package/src/helpers/ECMASandbox.helper.ts +54 -54
  80. package/src/helpers/JsonContent.helper.ts +97 -97
  81. package/src/helpers/LocalCache.helper.ts +97 -97
  82. package/src/helpers/Log.helper.ts +274 -274
  83. package/src/helpers/OpenApiParser.helper.ts +150 -150
  84. package/src/helpers/S3Cache.helper.ts +147 -147
  85. package/src/helpers/SmythURI.helper.ts +5 -5
  86. package/src/helpers/Sysconfig.helper.ts +77 -77
  87. package/src/helpers/TemplateString.helper.ts +243 -243
  88. package/src/helpers/TypeChecker.helper.ts +329 -329
  89. package/src/index.ts +3 -3
  90. package/src/index.ts.bak +3 -3
  91. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  92. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  93. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  94. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  95. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  97. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  98. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -297
  99. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  100. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  101. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  102. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  103. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  104. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  105. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  106. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  107. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  108. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  109. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  110. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  111. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  112. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  113. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  114. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  115. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  116. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  117. package/src/subsystems/IO/Log.service/index.ts +13 -13
  118. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  119. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  120. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  121. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  122. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  123. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  124. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  125. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  126. package/src/subsystems/IO/Router.service/index.ts +11 -11
  127. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  128. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  129. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  130. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  131. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  132. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  133. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  134. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  135. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  136. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  137. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  138. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  139. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  140. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  141. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  142. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  143. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  144. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  145. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  147. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  149. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  150. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  151. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
  152. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  160. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  161. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  162. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  163. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  164. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  165. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  166. package/src/subsystems/LLMManager/models.ts +2540 -2540
  167. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  168. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  169. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  170. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  173. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  174. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  175. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  176. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  177. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  178. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  179. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  180. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  181. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  182. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  183. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  184. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  185. package/src/subsystems/Security/Account.service/index.ts +14 -14
  186. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  187. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  188. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  189. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  190. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  191. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  192. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  193. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  194. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  195. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  196. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  197. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  198. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  199. package/src/types/ACL.types.ts +104 -104
  200. package/src/types/AWS.types.ts +10 -10
  201. package/src/types/Agent.types.ts +61 -61
  202. package/src/types/AgentLogger.types.ts +17 -17
  203. package/src/types/Cache.types.ts +1 -1
  204. package/src/types/Common.types.ts +2 -2
  205. package/src/types/LLM.types.ts +496 -496
  206. package/src/types/Redis.types.ts +8 -8
  207. package/src/types/SRE.types.ts +64 -64
  208. package/src/types/Security.types.ts +14 -14
  209. package/src/types/Storage.types.ts +5 -5
  210. package/src/types/VectorDB.types.ts +86 -86
  211. package/src/utils/base64.utils.ts +275 -275
  212. package/src/utils/cli.utils.ts +68 -68
  213. package/src/utils/data.utils.ts +322 -322
  214. package/src/utils/date-time.utils.ts +22 -22
  215. package/src/utils/general.utils.ts +238 -238
  216. package/src/utils/index.ts +12 -12
  217. package/src/utils/lazy-client.ts +261 -261
  218. package/src/utils/numbers.utils.ts +13 -13
  219. package/src/utils/oauth.utils.ts +35 -35
  220. package/src/utils/string.utils.ts +414 -414
  221. package/src/utils/url.utils.ts +19 -19
  222. package/src/utils/validation.utils.ts +74 -74
  223. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,488 +1,488 @@
1
- import EventEmitter from 'events';
2
- import OpenAI from 'openai';
3
- import { toFile } from 'openai';
4
- import { encodeChat } from 'gpt-tokenizer';
5
-
6
- import { BUILT_IN_MODEL_PREFIX } from '@sre/constants';
7
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
8
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
9
- import { AccessRequest } from '@sre/Security/AccessControl/AccessRequest.class';
10
- import { LLMHelper } from '@sre/LLMManager/LLM.helper';
11
-
12
- import {
13
- TLLMParams,
14
- ToolData,
15
- TLLMMessageBlock,
16
- TLLMToolResultMessageBlock,
17
- TLLMMessageRole,
18
- APIKeySource,
19
- ILLMRequestFuncParams,
20
- TOpenAIRequestBody,
21
- TLLMChatResponse,
22
- ILLMRequestContext,
23
- BasicCredentials,
24
- TLLMPreparedParams,
25
- } from '@sre/types/LLM.types';
26
-
27
- import { LLMConnector } from '../../LLMConnector';
28
- import { SystemEvents } from '@sre/Core/SystemEvents';
29
- import { ConnectorService } from '@sre/Core/ConnectorsService';
30
- import { HandlerDependencies, TToolType } from './types';
31
- import { OpenAIApiInterfaceFactory, OpenAIApiInterface } from './apiInterfaces';
32
-
33
- export class OpenAIConnector extends LLMConnector {
34
- public name = 'LLM:OpenAI';
35
-
36
- private interfaceFactory: OpenAIApiInterfaceFactory;
37
-
38
- constructor() {
39
- super();
40
-
41
- this.interfaceFactory = new OpenAIApiInterfaceFactory();
42
- }
43
-
44
- /**
45
- * Get the appropriate API interface for the given interface type and context
46
- */
47
- private getApiInterface(interfaceType: string, context: ILLMRequestContext): OpenAIApiInterface {
48
- const deps: HandlerDependencies = {
49
- getClient: (context) => this.getClient(context),
50
- reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
51
- };
52
-
53
- return this.interfaceFactory.createInterface(interfaceType, context, deps);
54
- }
55
-
56
- /**
57
- * Determine the appropriate interface type based on context and capabilities
58
- */
59
- private getInterfaceType(context: ILLMRequestContext): string {
60
- // Start with model-specified interface or default
61
- let responseInterface = this.interfaceFactory.getInterfaceTypeFromModelInfo(context.modelInfo);
62
-
63
- // Auto-switch to Responses API when web search is enabled
64
- if (context.toolsInfo?.openai?.webSearch?.enabled === true) {
65
- responseInterface = 'responses';
66
- }
67
-
68
- return responseInterface;
69
- }
70
-
71
- protected async getClient(params: ILLMRequestContext): Promise<OpenAI> {
72
- const apiKey = (params.credentials as BasicCredentials)?.apiKey;
73
- const baseURL = params?.modelInfo?.baseURL;
74
-
75
- if (!apiKey) throw new Error('Please provide an API key for OpenAI');
76
-
77
- const openai = new OpenAI({ baseURL, apiKey });
78
-
79
- return openai;
80
- }
81
-
82
- protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
83
- const _body = body as OpenAI.ChatCompletionCreateParams;
84
-
85
- // #region Validate token limit
86
- const messages = _body?.messages || [];
87
- const lastMessage = messages[messages.length - 1];
88
- const promptTokens = await this.computePromptTokens(messages, context);
89
-
90
- await this.validateTokenLimit({
91
- acRequest,
92
- promptTokens,
93
- context,
94
- maxTokens: _body.max_completion_tokens,
95
- });
96
- // #endregion Validate token limit
97
-
98
- const responseInterface = this.getInterfaceType(context);
99
- const apiInterface = this.getApiInterface(responseInterface, context);
100
-
101
- const result = await apiInterface.createRequest(body, context);
102
-
103
- const message = result?.choices?.[0]?.message;
104
- const finishReason = result?.choices?.[0]?.finish_reason;
105
-
106
- let toolsData: ToolData[] = [];
107
- let useTool = false;
108
-
109
- if (finishReason === 'tool_calls') {
110
- toolsData =
111
- message?.tool_calls?.map((tool, index) => ({
112
- index,
113
- id: tool?.id,
114
- type: tool?.type,
115
- name: tool?.function?.name,
116
- arguments: tool?.function?.arguments,
117
- role: 'tool',
118
- })) || [];
119
-
120
- useTool = true;
121
- }
122
-
123
- const usage = result?.usage;
124
- this.reportUsage(usage, {
125
- modelEntryName: context.modelEntryName,
126
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
127
- agentId: context.agentId,
128
- teamId: context.teamId,
129
- });
130
-
131
- return {
132
- content: message?.content ?? '',
133
- finishReason,
134
- useTool,
135
- toolsData,
136
- message,
137
- usage,
138
- };
139
- }
140
-
141
- protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
142
- // #region Validate token limit
143
- const messages = body?.messages || body?.input || [];
144
- const lastMessage = messages[messages.length - 1];
145
- const promptTokens = await this.computePromptTokens(messages, context);
146
-
147
- await this.validateTokenLimit({
148
- acRequest,
149
- promptTokens,
150
- context,
151
- maxTokens: body.max_completion_tokens,
152
- });
153
- // #endregion Validate token limit
154
-
155
- const responseInterface = this.getInterfaceType(context);
156
- const apiInterface = this.getApiInterface(responseInterface, context);
157
-
158
- const stream = await apiInterface.createStream(body, context);
159
-
160
- const emitter = apiInterface.handleStream(stream, context);
161
-
162
- return emitter;
163
- }
164
-
165
- // #region Image Generation, will be moved to a different subsystem
166
- protected async imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
167
- const openai = await this.getClient(context);
168
- const response = await openai.images.generate(body as OpenAI.Images.ImageGenerateParams);
169
-
170
- return response as OpenAI.ImagesResponse;
171
- }
172
-
173
- protected async imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
174
- const _body = body as OpenAI.Images.ImageEditParams;
175
-
176
- const openai = await this.getClient(context);
177
- const response = await openai.images.edit(_body);
178
-
179
- return response as OpenAI.ImagesResponse;
180
- }
181
- // #endregion
182
-
183
- public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto', modelInfo = null }) {
184
- let tools = [];
185
-
186
- if (toolDefinitions && toolDefinitions.length > 0) {
187
- const interfaceType = modelInfo?.interface || 'chat.completions';
188
-
189
- const tempContext: ILLMRequestContext = {
190
- modelEntryName: '',
191
- agentId: '',
192
- teamId: '',
193
- isUserKey: false,
194
- modelInfo,
195
- credentials: null,
196
- } as ILLMRequestContext;
197
-
198
- const deps: HandlerDependencies = {
199
- getClient: (context) => this.getClient(context),
200
- reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
201
- };
202
-
203
- const apiInterface = this.interfaceFactory.createInterface(interfaceType, tempContext, deps);
204
-
205
- // Transform tools using the interface
206
- tools = apiInterface.transformToolsConfig({
207
- type,
208
- toolDefinitions,
209
- toolChoice: toolChoice as OpenAI.ChatCompletionToolChoiceOption,
210
- modelInfo,
211
- });
212
- }
213
-
214
- return tools?.length > 0 ? { tools, tool_choice: toolChoice || 'auto' } : {};
215
- }
216
-
217
- public transformToolMessageBlocks({
218
- messageBlock,
219
- toolsData,
220
- }: {
221
- messageBlock: TLLMMessageBlock;
222
- toolsData: ToolData[];
223
- }): TLLMToolResultMessageBlock[] {
224
- const messageBlocks: TLLMToolResultMessageBlock[] = [];
225
-
226
- if (messageBlock) {
227
- const transformedMessageBlock = {
228
- ...messageBlock,
229
- content: typeof messageBlock.content === 'object' ? JSON.stringify(messageBlock.content) : messageBlock.content,
230
- };
231
- if (transformedMessageBlock.tool_calls) {
232
- for (let toolCall of transformedMessageBlock.tool_calls) {
233
- toolCall.function.arguments =
234
- typeof toolCall.function.arguments === 'object' ? JSON.stringify(toolCall.function.arguments) : toolCall.function.arguments;
235
- }
236
- }
237
- messageBlocks.push(transformedMessageBlock);
238
- }
239
-
240
- const transformedToolsData = toolsData.map((toolData) => ({
241
- tool_call_id: toolData.id,
242
- role: TLLMMessageRole.Tool, // toolData.role as TLLMMessageRole, //should always be 'tool' for OpenAI
243
- name: toolData.name,
244
- content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result), // Ensure content is a string
245
- }));
246
-
247
- return [...messageBlocks, ...transformedToolsData];
248
- }
249
-
250
- public getConsistentMessages(messages) {
251
- const _messages = LLMHelper.removeDuplicateUserMessages(messages);
252
-
253
- return _messages.map((message) => {
254
- const _message = { ...message };
255
- let textContent = '';
256
-
257
- if (message?.parts) {
258
- textContent = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
259
- } else if (Array.isArray(message?.content)) {
260
- textContent = message.content.map((textBlock) => textBlock?.text || '').join(' ');
261
- } else if (message?.content) {
262
- textContent = message.content;
263
- }
264
-
265
- _message.content = textContent;
266
-
267
- return _message;
268
- });
269
- }
270
-
271
- private async validateTokenLimit({
272
- acRequest,
273
- maxTokens,
274
- promptTokens,
275
- context,
276
- }: {
277
- acRequest: AccessRequest;
278
- maxTokens: number;
279
- promptTokens: number;
280
- context: ILLMRequestContext;
281
- }): Promise<void> {
282
- const provider = await this.getProvider(acRequest, context.modelEntryName);
283
-
284
- await provider.validateTokensLimit({
285
- model: context.modelInfo,
286
- promptTokens,
287
- completionTokens: maxTokens,
288
- hasAPIKey: context.isUserKey,
289
- });
290
- }
291
-
292
- private async getProvider(acRequest: AccessRequest, modelEntryName: string) {
293
- const modelsProviderConnector = ConnectorService.getModelsProviderConnector();
294
- const modelsProvider = modelsProviderConnector.requester(acRequest.candidate as AccessCandidate);
295
-
296
- return modelsProvider;
297
- }
298
-
299
- /**
300
- * Safely compute prompt token count across different interfaces (Chat Completions, Responses)
301
- * - Normalizes message content to strings for encodeChat
302
- * - Handles vision prompts when files are present
303
- * - Never throws; defaults to 0 on failure
304
- */
305
- private async computePromptTokens(messages: any[], context: ILLMRequestContext): Promise<number> {
306
- try {
307
- if (context?.hasFiles) {
308
- const lastMessage = messages?.[messages?.length - 1] || {};
309
- const lastContent = lastMessage?.content ?? '';
310
- return await LLMHelper.countVisionPromptTokens(lastContent || '');
311
- }
312
-
313
- const normalized = (messages || [])
314
- .map((m) => {
315
- if (!m || !m.role) return null;
316
- let content = '';
317
- if (Array.isArray(m.content)) {
318
- content = m.content.map((b) => (typeof b?.text === 'string' ? b.text : '')).join(' ');
319
- } else if (typeof m.content === 'string') {
320
- content = m.content;
321
- } else if (m.content !== undefined && m.content !== null) {
322
- try {
323
- content = JSON.stringify(m.content);
324
- } catch (_) {
325
- content = '';
326
- }
327
- }
328
- return { role: m.role, content };
329
- })
330
- .filter(Boolean);
331
-
332
- return encodeChat(normalized as any, 'gpt-4')?.length || 0;
333
- } catch (_) {
334
- return 0;
335
- }
336
- }
337
-
338
- /**
339
- * Prepare request body for OpenAI Responses API
340
- * Uses MessageTransformer and ToolsTransformer for clean interface transformations
341
- */
342
-
343
- private async prepareImageGenerationBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageGenerateParams> {
344
- const { model, size, quality, n, responseFormat, style } = params;
345
-
346
- const body: OpenAI.Images.ImageGenerateParams = {
347
- prompt: params.prompt,
348
- model: model as string,
349
- size: size as OpenAI.Images.ImageGenerateParams['size'],
350
- n: n || 1,
351
- };
352
-
353
- if (quality) {
354
- body.quality = quality;
355
- }
356
-
357
- if (style) {
358
- body.style = style;
359
- }
360
-
361
- return body;
362
- }
363
-
364
- private async prepareImageEditBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageEditParams> {
365
- const { model, size, n, responseFormat } = params;
366
-
367
- const body: OpenAI.Images.ImageEditParams = {
368
- prompt: params.prompt,
369
- model: model as string,
370
- size: size as OpenAI.Images.ImageEditParams['size'],
371
- n: n || 1,
372
- image: null,
373
- };
374
-
375
- const files: BinaryInput[] = params?.files || [];
376
-
377
- if (files.length > 0) {
378
- const images = await Promise.all(
379
- files.map(
380
- async (file) =>
381
- await toFile(await file.getReadStream(), await file.getName(), {
382
- type: file.mimetype,
383
- })
384
- )
385
- );
386
-
387
- // Assign only the first image file as required by the OpenAI image-edit endpoint
388
- body.image = images[0];
389
- }
390
-
391
- return body;
392
- }
393
-
394
- protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TOpenAIRequestBody> {
395
- // Handle special capabilities first (these override interface type)
396
- if (params.capabilities?.imageGeneration === true) {
397
- const capabilityType = params?.files?.length > 0 ? 'image-edit' : 'image-generation';
398
- return this.prepareRequestBody(params, capabilityType);
399
- }
400
-
401
- // Create a minimal context to use the same interface selection logic
402
- const minimalContext: ILLMRequestContext = {
403
- modelInfo: params.modelInfo,
404
- toolsInfo: params.toolsInfo,
405
- } as ILLMRequestContext;
406
-
407
- const responseInterface = this.getInterfaceType(minimalContext);
408
-
409
- // Use interface-specific preparation
410
- return this.prepareRequestBody(params, responseInterface);
411
- }
412
-
413
- private async prepareRequestBody(params: TLLMPreparedParams, preparationType: string): Promise<TOpenAIRequestBody> {
414
- // Create a minimal context for body preparation - the interface may need access to model info
415
- const minimalContext: ILLMRequestContext = {
416
- modelInfo: params.modelInfo,
417
- modelEntryName: params.modelEntryName,
418
- agentId: params.agentId,
419
- teamId: params.teamId,
420
- isUserKey: params.isUserKey,
421
- credentials: params.credentials,
422
- hasFiles: params.files && params.files.length > 0,
423
- toolsInfo: params.toolsInfo,
424
- };
425
-
426
- const preparers = {
427
- 'chat.completions': async () => {
428
- const apiInterface = this.getApiInterface('chat.completions', minimalContext);
429
- return apiInterface.prepareRequestBody(params);
430
- },
431
- responses: async () => {
432
- const apiInterface = this.getApiInterface('responses', minimalContext);
433
- return apiInterface.prepareRequestBody(params);
434
- },
435
- 'image-generation': () => this.prepareImageGenerationBody(params),
436
- 'image-edit': () => this.prepareImageEditBody(params),
437
- // Future interfaces can be added here
438
- };
439
-
440
- const preparer = preparers[preparationType];
441
- if (!preparer) {
442
- throw new Error(`Unsupported preparation type: ${preparationType}`);
443
- }
444
-
445
- return preparer();
446
- }
447
-
448
- protected reportUsage(
449
- usage: OpenAI.Completions.CompletionUsage & {
450
- input_tokens?: number;
451
- output_tokens?: number;
452
- input_tokens_details?: { cached_tokens?: number };
453
- prompt_tokens_details?: { cached_tokens?: number };
454
- cost?: number; // for web search tool
455
- },
456
- metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
457
- ) {
458
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
459
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
460
-
461
- const inputTokens = usage?.input_tokens || usage?.prompt_tokens - (usage?.prompt_tokens_details?.cached_tokens || 0); // Returned by the search tool
462
-
463
- const outputTokens =
464
- usage?.output_tokens || // Returned by the search tool
465
- usage?.completion_tokens ||
466
- 0;
467
-
468
- const cachedInputTokens =
469
- usage?.input_tokens_details?.cached_tokens || // Returned by the search tool
470
- usage?.prompt_tokens_details?.cached_tokens ||
471
- 0;
472
-
473
- const usageData = {
474
- sourceId: `llm:${modelName}`,
475
- input_tokens: inputTokens,
476
- output_tokens: outputTokens,
477
- input_tokens_cache_write: 0,
478
- input_tokens_cache_read: cachedInputTokens,
479
- cost: usage?.cost || 0, // for web search tool
480
- keySource: metadata.keySource,
481
- agentId: metadata.agentId,
482
- teamId: metadata.teamId,
483
- };
484
- SystemEvents.emit('USAGE:LLM', usageData);
485
-
486
- return usageData;
487
- }
488
- }
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import { toFile } from 'openai';
4
+ import { encodeChat } from 'gpt-tokenizer';
5
+
6
+ import { BUILT_IN_MODEL_PREFIX } from '@sre/constants';
7
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
8
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
9
+ import { AccessRequest } from '@sre/Security/AccessControl/AccessRequest.class';
10
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
11
+
12
+ import {
13
+ TLLMParams,
14
+ ToolData,
15
+ TLLMMessageBlock,
16
+ TLLMToolResultMessageBlock,
17
+ TLLMMessageRole,
18
+ APIKeySource,
19
+ ILLMRequestFuncParams,
20
+ TOpenAIRequestBody,
21
+ TLLMChatResponse,
22
+ ILLMRequestContext,
23
+ BasicCredentials,
24
+ TLLMPreparedParams,
25
+ } from '@sre/types/LLM.types';
26
+
27
+ import { LLMConnector } from '../../LLMConnector';
28
+ import { SystemEvents } from '@sre/Core/SystemEvents';
29
+ import { ConnectorService } from '@sre/Core/ConnectorsService';
30
+ import { HandlerDependencies, TToolType } from './types';
31
+ import { OpenAIApiInterfaceFactory, OpenAIApiInterface } from './apiInterfaces';
32
+
33
+ export class OpenAIConnector extends LLMConnector {
34
+ public name = 'LLM:OpenAI';
35
+
36
+ private interfaceFactory: OpenAIApiInterfaceFactory;
37
+
38
+ constructor() {
39
+ super();
40
+
41
+ this.interfaceFactory = new OpenAIApiInterfaceFactory();
42
+ }
43
+
44
+ /**
45
+ * Get the appropriate API interface for the given interface type and context
46
+ */
47
+ private getApiInterface(interfaceType: string, context: ILLMRequestContext): OpenAIApiInterface {
48
+ const deps: HandlerDependencies = {
49
+ getClient: (context) => this.getClient(context),
50
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
51
+ };
52
+
53
+ return this.interfaceFactory.createInterface(interfaceType, context, deps);
54
+ }
55
+
56
+ /**
57
+ * Determine the appropriate interface type based on context and capabilities
58
+ */
59
+ private getInterfaceType(context: ILLMRequestContext): string {
60
+ // Start with model-specified interface or default
61
+ let responseInterface = this.interfaceFactory.getInterfaceTypeFromModelInfo(context.modelInfo);
62
+
63
+ // Auto-switch to Responses API when web search is enabled
64
+ if (context.toolsInfo?.openai?.webSearch?.enabled === true) {
65
+ responseInterface = 'responses';
66
+ }
67
+
68
+ return responseInterface;
69
+ }
70
+
71
+ protected async getClient(params: ILLMRequestContext): Promise<OpenAI> {
72
+ const apiKey = (params.credentials as BasicCredentials)?.apiKey;
73
+ const baseURL = params?.modelInfo?.baseURL;
74
+
75
+ if (!apiKey) throw new Error('Please provide an API key for OpenAI');
76
+
77
+ const openai = new OpenAI({ baseURL, apiKey });
78
+
79
+ return openai;
80
+ }
81
+
82
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
83
+ const _body = body as OpenAI.ChatCompletionCreateParams;
84
+
85
+ // #region Validate token limit
86
+ const messages = _body?.messages || [];
87
+ const lastMessage = messages[messages.length - 1];
88
+ const promptTokens = await this.computePromptTokens(messages, context);
89
+
90
+ await this.validateTokenLimit({
91
+ acRequest,
92
+ promptTokens,
93
+ context,
94
+ maxTokens: _body.max_completion_tokens,
95
+ });
96
+ // #endregion Validate token limit
97
+
98
+ const responseInterface = this.getInterfaceType(context);
99
+ const apiInterface = this.getApiInterface(responseInterface, context);
100
+
101
+ const result = await apiInterface.createRequest(body, context);
102
+
103
+ const message = result?.choices?.[0]?.message;
104
+ const finishReason = result?.choices?.[0]?.finish_reason;
105
+
106
+ let toolsData: ToolData[] = [];
107
+ let useTool = false;
108
+
109
+ if (finishReason === 'tool_calls') {
110
+ toolsData =
111
+ message?.tool_calls?.map((tool, index) => ({
112
+ index,
113
+ id: tool?.id,
114
+ type: tool?.type,
115
+ name: tool?.function?.name,
116
+ arguments: tool?.function?.arguments,
117
+ role: 'tool',
118
+ })) || [];
119
+
120
+ useTool = true;
121
+ }
122
+
123
+ const usage = result?.usage;
124
+ this.reportUsage(usage, {
125
+ modelEntryName: context.modelEntryName,
126
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
127
+ agentId: context.agentId,
128
+ teamId: context.teamId,
129
+ });
130
+
131
+ return {
132
+ content: message?.content ?? '',
133
+ finishReason,
134
+ useTool,
135
+ toolsData,
136
+ message,
137
+ usage,
138
+ };
139
+ }
140
+
141
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
142
+ // #region Validate token limit
143
+ const messages = body?.messages || body?.input || [];
144
+ const lastMessage = messages[messages.length - 1];
145
+ const promptTokens = await this.computePromptTokens(messages, context);
146
+
147
+ await this.validateTokenLimit({
148
+ acRequest,
149
+ promptTokens,
150
+ context,
151
+ maxTokens: body.max_completion_tokens,
152
+ });
153
+ // #endregion Validate token limit
154
+
155
+ const responseInterface = this.getInterfaceType(context);
156
+ const apiInterface = this.getApiInterface(responseInterface, context);
157
+
158
+ const stream = await apiInterface.createStream(body, context);
159
+
160
+ const emitter = apiInterface.handleStream(stream, context);
161
+
162
+ return emitter;
163
+ }
164
+
165
+ // #region Image Generation, will be moved to a different subsystem
166
+ protected async imageGenRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
167
+ const openai = await this.getClient(context);
168
+ const response = await openai.images.generate(body as OpenAI.Images.ImageGenerateParams);
169
+
170
+ return response as OpenAI.ImagesResponse;
171
+ }
172
+
173
+ protected async imageEditRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<OpenAI.ImagesResponse> {
174
+ const _body = body as OpenAI.Images.ImageEditParams;
175
+
176
+ const openai = await this.getClient(context);
177
+ const response = await openai.images.edit(_body);
178
+
179
+ return response as OpenAI.ImagesResponse;
180
+ }
181
+ // #endregion
182
+
183
+ public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto', modelInfo = null }) {
184
+ let tools = [];
185
+
186
+ if (toolDefinitions && toolDefinitions.length > 0) {
187
+ const interfaceType = modelInfo?.interface || 'chat.completions';
188
+
189
+ const tempContext: ILLMRequestContext = {
190
+ modelEntryName: '',
191
+ agentId: '',
192
+ teamId: '',
193
+ isUserKey: false,
194
+ modelInfo,
195
+ credentials: null,
196
+ } as ILLMRequestContext;
197
+
198
+ const deps: HandlerDependencies = {
199
+ getClient: (context) => this.getClient(context),
200
+ reportUsage: (usage, metadata) => this.reportUsage(usage, metadata),
201
+ };
202
+
203
+ const apiInterface = this.interfaceFactory.createInterface(interfaceType, tempContext, deps);
204
+
205
+ // Transform tools using the interface
206
+ tools = apiInterface.transformToolsConfig({
207
+ type,
208
+ toolDefinitions,
209
+ toolChoice: toolChoice as OpenAI.ChatCompletionToolChoiceOption,
210
+ modelInfo,
211
+ });
212
+ }
213
+
214
+ return tools?.length > 0 ? { tools, tool_choice: toolChoice || 'auto' } : {};
215
+ }
216
+
217
+ public transformToolMessageBlocks({
218
+ messageBlock,
219
+ toolsData,
220
+ }: {
221
+ messageBlock: TLLMMessageBlock;
222
+ toolsData: ToolData[];
223
+ }): TLLMToolResultMessageBlock[] {
224
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
225
+
226
+ if (messageBlock) {
227
+ const transformedMessageBlock = {
228
+ ...messageBlock,
229
+ content: typeof messageBlock.content === 'object' ? JSON.stringify(messageBlock.content) : messageBlock.content,
230
+ };
231
+ if (transformedMessageBlock.tool_calls) {
232
+ for (let toolCall of transformedMessageBlock.tool_calls) {
233
+ toolCall.function.arguments =
234
+ typeof toolCall.function.arguments === 'object' ? JSON.stringify(toolCall.function.arguments) : toolCall.function.arguments;
235
+ }
236
+ }
237
+ messageBlocks.push(transformedMessageBlock);
238
+ }
239
+
240
+ const transformedToolsData = toolsData.map((toolData) => ({
241
+ tool_call_id: toolData.id,
242
+ role: TLLMMessageRole.Tool, // toolData.role as TLLMMessageRole, //should always be 'tool' for OpenAI
243
+ name: toolData.name,
244
+ content: typeof toolData.result === 'string' ? toolData.result : JSON.stringify(toolData.result), // Ensure content is a string
245
+ }));
246
+
247
+ return [...messageBlocks, ...transformedToolsData];
248
+ }
249
+
250
+ public getConsistentMessages(messages) {
251
+ const _messages = LLMHelper.removeDuplicateUserMessages(messages);
252
+
253
+ return _messages.map((message) => {
254
+ const _message = { ...message };
255
+ let textContent = '';
256
+
257
+ if (message?.parts) {
258
+ textContent = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
259
+ } else if (Array.isArray(message?.content)) {
260
+ textContent = message.content.map((textBlock) => textBlock?.text || '').join(' ');
261
+ } else if (message?.content) {
262
+ textContent = message.content;
263
+ }
264
+
265
+ _message.content = textContent;
266
+
267
+ return _message;
268
+ });
269
+ }
270
+
271
+ private async validateTokenLimit({
272
+ acRequest,
273
+ maxTokens,
274
+ promptTokens,
275
+ context,
276
+ }: {
277
+ acRequest: AccessRequest;
278
+ maxTokens: number;
279
+ promptTokens: number;
280
+ context: ILLMRequestContext;
281
+ }): Promise<void> {
282
+ const provider = await this.getProvider(acRequest, context.modelEntryName);
283
+
284
+ await provider.validateTokensLimit({
285
+ model: context.modelInfo,
286
+ promptTokens,
287
+ completionTokens: maxTokens,
288
+ hasAPIKey: context.isUserKey,
289
+ });
290
+ }
291
+
292
+ private async getProvider(acRequest: AccessRequest, modelEntryName: string) {
293
+ const modelsProviderConnector = ConnectorService.getModelsProviderConnector();
294
+ const modelsProvider = modelsProviderConnector.requester(acRequest.candidate as AccessCandidate);
295
+
296
+ return modelsProvider;
297
+ }
298
+
299
+ /**
300
+ * Safely compute prompt token count across different interfaces (Chat Completions, Responses)
301
+ * - Normalizes message content to strings for encodeChat
302
+ * - Handles vision prompts when files are present
303
+ * - Never throws; defaults to 0 on failure
304
+ */
305
+ private async computePromptTokens(messages: any[], context: ILLMRequestContext): Promise<number> {
306
+ try {
307
+ if (context?.hasFiles) {
308
+ const lastMessage = messages?.[messages?.length - 1] || {};
309
+ const lastContent = lastMessage?.content ?? '';
310
+ return await LLMHelper.countVisionPromptTokens(lastContent || '');
311
+ }
312
+
313
+ const normalized = (messages || [])
314
+ .map((m) => {
315
+ if (!m || !m.role) return null;
316
+ let content = '';
317
+ if (Array.isArray(m.content)) {
318
+ content = m.content.map((b) => (typeof b?.text === 'string' ? b.text : '')).join(' ');
319
+ } else if (typeof m.content === 'string') {
320
+ content = m.content;
321
+ } else if (m.content !== undefined && m.content !== null) {
322
+ try {
323
+ content = JSON.stringify(m.content);
324
+ } catch (_) {
325
+ content = '';
326
+ }
327
+ }
328
+ return { role: m.role, content };
329
+ })
330
+ .filter(Boolean);
331
+
332
+ return encodeChat(normalized as any, 'gpt-4')?.length || 0;
333
+ } catch (_) {
334
+ return 0;
335
+ }
336
+ }
337
+
338
+ /**
339
+ * Prepare request body for OpenAI Responses API
340
+ * Uses MessageTransformer and ToolsTransformer for clean interface transformations
341
+ */
342
+
343
+ private async prepareImageGenerationBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageGenerateParams> {
344
+ const { model, size, quality, n, responseFormat, style } = params;
345
+
346
+ const body: OpenAI.Images.ImageGenerateParams = {
347
+ prompt: params.prompt,
348
+ model: model as string,
349
+ size: size as OpenAI.Images.ImageGenerateParams['size'],
350
+ n: n || 1,
351
+ };
352
+
353
+ if (quality) {
354
+ body.quality = quality;
355
+ }
356
+
357
+ if (style) {
358
+ body.style = style;
359
+ }
360
+
361
+ return body;
362
+ }
363
+
364
+ private async prepareImageEditBody(params: TLLMPreparedParams): Promise<OpenAI.Images.ImageEditParams> {
365
+ const { model, size, n, responseFormat } = params;
366
+
367
+ const body: OpenAI.Images.ImageEditParams = {
368
+ prompt: params.prompt,
369
+ model: model as string,
370
+ size: size as OpenAI.Images.ImageEditParams['size'],
371
+ n: n || 1,
372
+ image: null,
373
+ };
374
+
375
+ const files: BinaryInput[] = params?.files || [];
376
+
377
+ if (files.length > 0) {
378
+ const images = await Promise.all(
379
+ files.map(
380
+ async (file) =>
381
+ await toFile(await file.getReadStream(), await file.getName(), {
382
+ type: file.mimetype,
383
+ })
384
+ )
385
+ );
386
+
387
+ // Assign only the first image file as required by the OpenAI image-edit endpoint
388
+ body.image = images[0];
389
+ }
390
+
391
+ return body;
392
+ }
393
+
394
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TOpenAIRequestBody> {
395
+ // Handle special capabilities first (these override interface type)
396
+ if (params.capabilities?.imageGeneration === true) {
397
+ const capabilityType = params?.files?.length > 0 ? 'image-edit' : 'image-generation';
398
+ return this.prepareRequestBody(params, capabilityType);
399
+ }
400
+
401
+ // Create a minimal context to use the same interface selection logic
402
+ const minimalContext: ILLMRequestContext = {
403
+ modelInfo: params.modelInfo,
404
+ toolsInfo: params.toolsInfo,
405
+ } as ILLMRequestContext;
406
+
407
+ const responseInterface = this.getInterfaceType(minimalContext);
408
+
409
+ // Use interface-specific preparation
410
+ return this.prepareRequestBody(params, responseInterface);
411
+ }
412
+
413
+ private async prepareRequestBody(params: TLLMPreparedParams, preparationType: string): Promise<TOpenAIRequestBody> {
414
+ // Create a minimal context for body preparation - the interface may need access to model info
415
+ const minimalContext: ILLMRequestContext = {
416
+ modelInfo: params.modelInfo,
417
+ modelEntryName: params.modelEntryName,
418
+ agentId: params.agentId,
419
+ teamId: params.teamId,
420
+ isUserKey: params.isUserKey,
421
+ credentials: params.credentials,
422
+ hasFiles: params.files && params.files.length > 0,
423
+ toolsInfo: params.toolsInfo,
424
+ };
425
+
426
+ const preparers = {
427
+ 'chat.completions': async () => {
428
+ const apiInterface = this.getApiInterface('chat.completions', minimalContext);
429
+ return apiInterface.prepareRequestBody(params);
430
+ },
431
+ responses: async () => {
432
+ const apiInterface = this.getApiInterface('responses', minimalContext);
433
+ return apiInterface.prepareRequestBody(params);
434
+ },
435
+ 'image-generation': () => this.prepareImageGenerationBody(params),
436
+ 'image-edit': () => this.prepareImageEditBody(params),
437
+ // Future interfaces can be added here
438
+ };
439
+
440
+ const preparer = preparers[preparationType];
441
+ if (!preparer) {
442
+ throw new Error(`Unsupported preparation type: ${preparationType}`);
443
+ }
444
+
445
+ return preparer();
446
+ }
447
+
448
+ protected reportUsage(
449
+ usage: OpenAI.Completions.CompletionUsage & {
450
+ input_tokens?: number;
451
+ output_tokens?: number;
452
+ input_tokens_details?: { cached_tokens?: number };
453
+ prompt_tokens_details?: { cached_tokens?: number };
454
+ cost?: number; // for web search tool
455
+ },
456
+ metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
457
+ ) {
458
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
459
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
460
+
461
+ const inputTokens = usage?.input_tokens || usage?.prompt_tokens - (usage?.prompt_tokens_details?.cached_tokens || 0); // Returned by the search tool
462
+
463
+ const outputTokens =
464
+ usage?.output_tokens || // Returned by the search tool
465
+ usage?.completion_tokens ||
466
+ 0;
467
+
468
+ const cachedInputTokens =
469
+ usage?.input_tokens_details?.cached_tokens || // Returned by the search tool
470
+ usage?.prompt_tokens_details?.cached_tokens ||
471
+ 0;
472
+
473
+ const usageData = {
474
+ sourceId: `llm:${modelName}`,
475
+ input_tokens: inputTokens,
476
+ output_tokens: outputTokens,
477
+ input_tokens_cache_write: 0,
478
+ input_tokens_cache_read: cachedInputTokens,
479
+ cost: usage?.cost || 0, // for web search tool
480
+ keySource: metadata.keySource,
481
+ agentId: metadata.agentId,
482
+ teamId: metadata.teamId,
483
+ };
484
+ SystemEvents.emit('USAGE:LLM', usageData);
485
+
486
+ return usageData;
487
+ }
488
+ }