@smythos/sre 1.5.50 → 1.5.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. package/CHANGELOG +98 -98
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/index.js +3 -3
  5. package/dist/index.js.map +1 -1
  6. package/dist/types/Components/APICall/AccessTokenManager.d.ts +3 -2
  7. package/dist/types/Components/APICall/OAuth.helper.d.ts +3 -2
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +6 -1
  9. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +39 -0
  10. package/package.json +1 -1
  11. package/src/Components/APICall/APICall.class.ts +156 -156
  12. package/src/Components/APICall/AccessTokenManager.ts +166 -130
  13. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  14. package/src/Components/APICall/OAuth.helper.ts +446 -294
  15. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  16. package/src/Components/APICall/parseData.ts +167 -167
  17. package/src/Components/APICall/parseHeaders.ts +41 -41
  18. package/src/Components/APICall/parseProxy.ts +68 -68
  19. package/src/Components/APICall/parseUrl.ts +91 -91
  20. package/src/Components/APIEndpoint.class.ts +234 -234
  21. package/src/Components/APIOutput.class.ts +58 -58
  22. package/src/Components/AgentPlugin.class.ts +102 -102
  23. package/src/Components/Async.class.ts +155 -155
  24. package/src/Components/Await.class.ts +90 -90
  25. package/src/Components/Classifier.class.ts +158 -158
  26. package/src/Components/Component.class.ts +132 -132
  27. package/src/Components/ComponentHost.class.ts +38 -38
  28. package/src/Components/DataSourceCleaner.class.ts +92 -92
  29. package/src/Components/DataSourceIndexer.class.ts +181 -181
  30. package/src/Components/DataSourceLookup.class.ts +161 -161
  31. package/src/Components/ECMASandbox.class.ts +71 -71
  32. package/src/Components/FEncDec.class.ts +29 -29
  33. package/src/Components/FHash.class.ts +33 -33
  34. package/src/Components/FSign.class.ts +80 -80
  35. package/src/Components/FSleep.class.ts +25 -25
  36. package/src/Components/FTimestamp.class.ts +25 -25
  37. package/src/Components/FileStore.class.ts +78 -78
  38. package/src/Components/ForEach.class.ts +97 -97
  39. package/src/Components/GPTPlugin.class.ts +70 -70
  40. package/src/Components/GenAILLM.class.ts +586 -586
  41. package/src/Components/HuggingFace.class.ts +314 -314
  42. package/src/Components/Image/imageSettings.config.ts +70 -70
  43. package/src/Components/ImageGenerator.class.ts +502 -502
  44. package/src/Components/JSONFilter.class.ts +54 -54
  45. package/src/Components/LLMAssistant.class.ts +213 -213
  46. package/src/Components/LogicAND.class.ts +28 -28
  47. package/src/Components/LogicAtLeast.class.ts +85 -85
  48. package/src/Components/LogicAtMost.class.ts +86 -86
  49. package/src/Components/LogicOR.class.ts +29 -29
  50. package/src/Components/LogicXOR.class.ts +34 -34
  51. package/src/Components/MCPClient.class.ts +138 -138
  52. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  53. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  54. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  55. package/src/Components/MemoryWriteObject.class.ts +97 -97
  56. package/src/Components/MultimodalLLM.class.ts +128 -128
  57. package/src/Components/OpenAPI.class.ts +72 -72
  58. package/src/Components/PromptGenerator.class.ts +122 -122
  59. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  60. package/src/Components/ServerlessCode.class.ts +123 -123
  61. package/src/Components/TavilyWebSearch.class.ts +98 -98
  62. package/src/Components/VisionLLM.class.ts +104 -104
  63. package/src/Components/ZapierAction.class.ts +127 -127
  64. package/src/Components/index.ts +97 -97
  65. package/src/Core/AgentProcess.helper.ts +240 -240
  66. package/src/Core/Connector.class.ts +123 -123
  67. package/src/Core/ConnectorsService.ts +197 -197
  68. package/src/Core/DummyConnector.ts +49 -49
  69. package/src/Core/HookService.ts +105 -105
  70. package/src/Core/SmythRuntime.class.ts +235 -235
  71. package/src/Core/SystemEvents.ts +16 -16
  72. package/src/Core/boot.ts +56 -56
  73. package/src/config.ts +15 -15
  74. package/src/constants.ts +126 -126
  75. package/src/data/hugging-face.params.json +579 -579
  76. package/src/helpers/AWSLambdaCode.helper.ts +590 -587
  77. package/src/helpers/BinaryInput.helper.ts +331 -331
  78. package/src/helpers/Conversation.helper.ts +1119 -1119
  79. package/src/helpers/ECMASandbox.helper.ts +54 -54
  80. package/src/helpers/JsonContent.helper.ts +97 -97
  81. package/src/helpers/LocalCache.helper.ts +97 -97
  82. package/src/helpers/Log.helper.ts +274 -274
  83. package/src/helpers/OpenApiParser.helper.ts +150 -150
  84. package/src/helpers/S3Cache.helper.ts +147 -147
  85. package/src/helpers/SmythURI.helper.ts +5 -5
  86. package/src/helpers/Sysconfig.helper.ts +77 -77
  87. package/src/helpers/TemplateString.helper.ts +243 -243
  88. package/src/helpers/TypeChecker.helper.ts +329 -329
  89. package/src/index.ts +3 -3
  90. package/src/index.ts.bak +3 -3
  91. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  92. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  93. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  94. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  95. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  97. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  98. package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -297
  99. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  100. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  101. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  102. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  103. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  104. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  105. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  106. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  107. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  108. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  109. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  110. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  111. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  112. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  113. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  114. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  115. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  116. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  117. package/src/subsystems/IO/Log.service/index.ts +13 -13
  118. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  119. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  120. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  121. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  122. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  123. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  124. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  125. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  126. package/src/subsystems/IO/Router.service/index.ts +11 -11
  127. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  128. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  129. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  130. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  131. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  132. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  133. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  134. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  135. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  136. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  137. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  138. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  139. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  140. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  141. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  142. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  143. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  144. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  145. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  147. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  149. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  150. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  151. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
  152. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  160. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  161. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  162. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  163. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  164. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  165. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  166. package/src/subsystems/LLMManager/models.ts +2540 -2540
  167. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  168. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  169. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  170. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  173. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  174. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  175. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  176. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  177. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  178. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  179. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  180. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  181. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  182. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  183. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  184. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  185. package/src/subsystems/Security/Account.service/index.ts +14 -14
  186. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  187. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  188. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  189. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  190. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  191. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  192. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  193. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  194. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  195. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  196. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  197. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  198. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  199. package/src/types/ACL.types.ts +104 -104
  200. package/src/types/AWS.types.ts +10 -10
  201. package/src/types/Agent.types.ts +61 -61
  202. package/src/types/AgentLogger.types.ts +17 -17
  203. package/src/types/Cache.types.ts +1 -1
  204. package/src/types/Common.types.ts +2 -2
  205. package/src/types/LLM.types.ts +496 -496
  206. package/src/types/Redis.types.ts +8 -8
  207. package/src/types/SRE.types.ts +64 -64
  208. package/src/types/Security.types.ts +14 -14
  209. package/src/types/Storage.types.ts +5 -5
  210. package/src/types/VectorDB.types.ts +86 -86
  211. package/src/utils/base64.utils.ts +275 -275
  212. package/src/utils/cli.utils.ts +68 -68
  213. package/src/utils/data.utils.ts +322 -322
  214. package/src/utils/date-time.utils.ts +22 -22
  215. package/src/utils/general.utils.ts +238 -238
  216. package/src/utils/index.ts +12 -12
  217. package/src/utils/lazy-client.ts +261 -261
  218. package/src/utils/numbers.utils.ts +13 -13
  219. package/src/utils/oauth.utils.ts +35 -35
  220. package/src/utils/string.utils.ts +414 -414
  221. package/src/utils/url.utils.ts +19 -19
  222. package/src/utils/validation.utils.ts +74 -74
  223. package/dist/bundle-analysis-lazy.html +0 -4949
  224. package/dist/bundle-analysis.html +0 -4949
  225. package/dist/types/utils/package-manager.utils.d.ts +0 -26
@@ -1,423 +1,423 @@
1
- import { VertexAI, type GenerationConfig, type UsageMetadata } from '@google-cloud/vertexai';
2
- import EventEmitter from 'events';
3
-
4
- import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
- import {
6
- TCustomLLMModel,
7
- APIKeySource,
8
- TVertexAISettings,
9
- ILLMRequestFuncParams,
10
- TGoogleAIRequestBody,
11
- ILLMRequestContext,
12
- TLLMPreparedParams,
13
- TLLMMessageBlock,
14
- ToolData,
15
- TLLMToolResultMessageBlock,
16
- TLLMMessageRole,
17
- TLLMChatResponse,
18
- TLLMEvent,
19
- } from '@sre/types/LLM.types';
20
- import { LLMHelper } from '@sre/LLMManager/LLM.helper';
21
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
22
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
23
-
24
- import { LLMConnector } from '../LLMConnector';
25
- import { SystemEvents } from '@sre/Core/SystemEvents';
26
-
27
- //TODO: [AHMED/FORHAD]: test the usage reporting for VertexAI because by the time we were implementing the feature of usage reporting
28
- // we had no access to VertexAI so we assumed it is working (potential bug)
29
-
30
- export class VertexAIConnector extends LLMConnector {
31
- public name = 'LLM:VertexAI';
32
-
33
- private async getClient(params: ILLMRequestContext): Promise<VertexAI> {
34
- const credentials = params.credentials as any;
35
- const modelInfo = params.modelInfo as TCustomLLMModel;
36
- const projectId = (modelInfo?.settings as TVertexAISettings)?.projectId;
37
- const region = modelInfo?.settings?.region;
38
-
39
- return new VertexAI({
40
- project: projectId,
41
- location: region,
42
- apiEndpoint: (modelInfo?.settings as TVertexAISettings)?.apiEndpoint,
43
- googleAuthOptions: {
44
- credentials: credentials as any,
45
- },
46
- });
47
- }
48
-
49
- protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
50
- try {
51
- const vertexAI = await this.getClient(context);
52
-
53
- // Separate contents from model configuration
54
- const contents = body.contents;
55
- delete body.contents;
56
-
57
- // VertexAI expects contents in a specific format: {contents: [...]}
58
- const requestParam = { contents };
59
-
60
- const model = vertexAI.getGenerativeModel(body);
61
-
62
- const result = await model.generateContent(requestParam);
63
- const response = await result.response;
64
-
65
- const content = response.candidates?.[0]?.content?.parts?.[0]?.text || '';
66
- const finishReason = response.candidates?.[0]?.finishReason || 'stop';
67
- const usage = response.usageMetadata;
68
-
69
- let toolsData: ToolData[] = [];
70
- let useTool = false;
71
-
72
- // Check for function calls in the response
73
- const functionCalls = response.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
74
- if (functionCalls && functionCalls.length > 0) {
75
- functionCalls.forEach((call, index) => {
76
- toolsData.push({
77
- index,
78
- id: call.functionCall?.name + '_' + index, // VertexAI doesn't provide IDs like Anthropic
79
- type: 'function',
80
- name: call.functionCall?.name,
81
- arguments: call.functionCall?.args,
82
- role: TLLMMessageRole.Assistant,
83
- });
84
- });
85
- useTool = true;
86
- }
87
-
88
- if (usage) {
89
- this.reportUsage(usage, {
90
- modelEntryName: context.modelEntryName,
91
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
92
- agentId: context.agentId,
93
- teamId: context.teamId,
94
- });
95
- }
96
-
97
- return {
98
- content,
99
- finishReason,
100
- toolsData,
101
- useTool,
102
- };
103
- } catch (error) {
104
- throw error;
105
- }
106
- }
107
-
108
- protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
109
- const emitter = new EventEmitter();
110
-
111
- setTimeout(async () => {
112
- try {
113
- const vertexAI = await this.getClient(context);
114
-
115
- // Separate contents from model configuration
116
- const contents = body.contents;
117
- delete body.contents;
118
-
119
- const vertexModel = vertexAI.getGenerativeModel(body);
120
-
121
- // VertexAI expects contents in a specific format: {contents: [...]}
122
- const requestParam = { contents };
123
-
124
- const streamResult = await vertexModel.generateContentStream(requestParam);
125
-
126
- let toolsData: ToolData[] = [];
127
- let usageData: any[] = [];
128
-
129
- for await (const chunk of streamResult.stream) {
130
- const chunkText = chunk.candidates?.[0]?.content?.parts?.[0]?.text || '';
131
- if (chunkText) {
132
- emitter.emit('content', chunkText);
133
- }
134
- }
135
-
136
- const aggregatedResponse = await streamResult.response;
137
-
138
- // Check for function calls in the final response (like Anthropic does)
139
- const functionCalls = aggregatedResponse.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
140
- if (functionCalls && functionCalls.length > 0) {
141
- functionCalls.forEach((call, index) => {
142
- toolsData.push({
143
- index,
144
- id: call.functionCall?.name + '_' + index,
145
- type: 'function',
146
- name: call.functionCall?.name,
147
- arguments: call.functionCall?.args,
148
- role: TLLMMessageRole.Assistant,
149
- });
150
- });
151
-
152
- emitter.emit(TLLMEvent.ToolInfo, toolsData);
153
- }
154
-
155
- const usage = aggregatedResponse.usageMetadata;
156
-
157
- if (usage) {
158
- const reportedUsage = this.reportUsage(usage, {
159
- modelEntryName: context.modelEntryName,
160
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
161
- agentId: context.agentId,
162
- teamId: context.teamId,
163
- });
164
- usageData.push(reportedUsage);
165
- }
166
-
167
- const finishReason = (aggregatedResponse.candidates?.[0]?.finishReason || 'stop').toLowerCase();
168
-
169
- if (finishReason !== 'stop') {
170
- emitter.emit('interrupted', finishReason);
171
- }
172
-
173
- setTimeout(() => {
174
- emitter.emit('end', toolsData, usageData, finishReason);
175
- }, 100);
176
- } catch (error) {
177
- emitter.emit('error', error);
178
- }
179
- }, 100);
180
-
181
- return emitter;
182
- }
183
-
184
- protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TGoogleAIRequestBody> {
185
- const model = params?.model;
186
- const { messages, systemMessage } = await this.prepareMessages(params);
187
-
188
- let body: any = {
189
- model: model as string,
190
- contents: messages, // This will be separated in the request methods
191
- };
192
-
193
- const responseFormat = params?.responseFormat || '';
194
- let systemInstruction = systemMessage || '';
195
-
196
- if (responseFormat === 'json') {
197
- systemInstruction += (systemInstruction ? '\n\n' : '') + JSON_RESPONSE_INSTRUCTION;
198
- }
199
-
200
- const config: GenerationConfig = {};
201
-
202
- if (params.maxTokens !== undefined) config.maxOutputTokens = params.maxTokens;
203
- if (params.temperature !== undefined) config.temperature = params.temperature;
204
- if (params.topP !== undefined) config.topP = params.topP;
205
- if (params.topK !== undefined) config.topK = params.topK;
206
- if (params.stopSequences?.length) config.stopSequences = params.stopSequences;
207
-
208
- if (systemInstruction) {
209
- body.systemInstruction = {
210
- role: 'system',
211
- parts: [{ text: systemInstruction }],
212
- };
213
- }
214
-
215
- if (Object.keys(config).length > 0) {
216
- body.generationConfig = config;
217
- }
218
-
219
- // Handle tools configuration
220
- if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
221
- body.tools = this.formatToolsForVertexAI(params.toolsConfig.tools);
222
- }
223
-
224
- return body;
225
- }
226
-
227
- protected reportUsage(usage: UsageMetadata, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
228
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
229
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
230
-
231
- const usageData = {
232
- sourceId: `llm:${modelName}`,
233
- input_tokens: usage.promptTokenCount || 0,
234
- output_tokens: usage.candidatesTokenCount || 0,
235
- input_tokens_cache_read: usage.cachedContentTokenCount || 0,
236
- input_tokens_cache_write: 0,
237
- keySource: metadata.keySource,
238
- agentId: metadata.agentId,
239
- teamId: metadata.teamId,
240
- };
241
- SystemEvents.emit('USAGE:LLM', usageData);
242
-
243
- return usageData;
244
- }
245
-
246
- private async prepareMessages(params: TLLMPreparedParams) {
247
- const messages = params?.messages || [];
248
- const files: BinaryInput[] = params?.files || [];
249
-
250
- let processedMessages = [...messages];
251
-
252
- // Handle system messages - VertexAI uses systemInstruction separately
253
- const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(processedMessages);
254
- processedMessages = otherMessages;
255
-
256
- // Handle files if present
257
- if (files?.length > 0) {
258
- const fileData = await this.processFiles(files, params.agentId);
259
-
260
- // Add file data to the last user message
261
- const userMessage = processedMessages.pop();
262
- if (userMessage) {
263
- const content = [{ text: userMessage.content as string }, ...fileData];
264
- processedMessages.push({
265
- role: userMessage.role,
266
- parts: content,
267
- });
268
- }
269
- }
270
-
271
- // Convert messages to VertexAI format
272
- let vertexAIMessages = this.convertMessagesToVertexAIFormat(processedMessages);
273
-
274
- // Ensure we have at least one message with content
275
- if (!vertexAIMessages || vertexAIMessages.length === 0) {
276
- vertexAIMessages = [
277
- {
278
- role: 'user',
279
- parts: [{ text: 'Hello' }],
280
- },
281
- ];
282
- }
283
-
284
- return {
285
- messages: vertexAIMessages,
286
- systemMessage: (systemMessage as any)?.content || '',
287
- };
288
- }
289
-
290
- private async processFiles(files: BinaryInput[], agentId: string) {
291
- const fileData = [];
292
-
293
- for (const file of files) {
294
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
295
- const base64Data = bufferData.toString('base64');
296
-
297
- fileData.push({
298
- inlineData: {
299
- data: base64Data,
300
- mimeType: file.mimetype,
301
- },
302
- });
303
- }
304
-
305
- return fileData;
306
- }
307
-
308
- private convertMessagesToVertexAIFormat(messages: TLLMMessageBlock[]) {
309
- return messages
310
- .filter((message) => message && (message.content || message.parts))
311
- .map((message) => {
312
- let parts;
313
-
314
- if (typeof message.content === 'string') {
315
- parts = message.content.trim() ? [{ text: message.content.trim() }] : [{ text: 'Continue' }];
316
- } else if (message.parts && Array.isArray(message.parts)) {
317
- parts = message.parts;
318
- } else if (message.content) {
319
- parts = [{ text: String(message.content) || 'Continue' }];
320
- } else {
321
- parts = [{ text: 'Continue' }];
322
- }
323
-
324
- return {
325
- role: message.role === TLLMMessageRole.Assistant ? 'model' : 'user',
326
- parts,
327
- };
328
- });
329
- }
330
-
331
- private formatToolsForVertexAI(tools: any[]) {
332
- return [
333
- {
334
- functionDeclarations: tools.map((tool) => ({
335
- name: tool.name,
336
- description: tool.description || '',
337
- parameters: {
338
- type: 'object',
339
- properties: tool.properties || {},
340
- required: tool.requiredFields || [],
341
- },
342
- })),
343
- },
344
- ];
345
- }
346
-
347
- public formatToolsConfig({ toolDefinitions, toolChoice = 'auto' }) {
348
- const tools = toolDefinitions.map((tool) => {
349
- const { name, description, properties, requiredFields } = tool;
350
-
351
- return {
352
- name,
353
- description,
354
- properties,
355
- requiredFields,
356
- };
357
- });
358
-
359
- return {
360
- tools,
361
- toolChoice: {
362
- type: toolChoice,
363
- },
364
- };
365
- }
366
-
367
- public transformToolMessageBlocks({
368
- messageBlock,
369
- toolsData,
370
- }: {
371
- messageBlock: TLLMMessageBlock;
372
- toolsData: ToolData[];
373
- }): TLLMToolResultMessageBlock[] {
374
- const messageBlocks: TLLMToolResultMessageBlock[] = [];
375
-
376
- if (messageBlock) {
377
- const parts = [];
378
-
379
- if (typeof messageBlock.content === 'string') {
380
- parts.push({ text: messageBlock.content });
381
- } else if (Array.isArray(messageBlock.content)) {
382
- parts.push(...messageBlock.content);
383
- }
384
-
385
- if (messageBlock.tool_calls) {
386
- const functionCalls = messageBlock.tool_calls.map((toolCall: any) => ({
387
- functionCall: {
388
- name: toolCall?.function?.name,
389
- args:
390
- typeof toolCall?.function?.arguments === 'string'
391
- ? JSON.parse(toolCall.function.arguments)
392
- : toolCall?.function?.arguments || {},
393
- },
394
- }));
395
- parts.push(...functionCalls);
396
- }
397
-
398
- messageBlocks.push({
399
- role: messageBlock.role,
400
- parts,
401
- });
402
- }
403
-
404
- // Transform tool results
405
- const toolResults = toolsData.map((toolData) => ({
406
- role: TLLMMessageRole.User,
407
- parts: [
408
- {
409
- functionResponse: {
410
- name: toolData.name,
411
- response: {
412
- name: toolData.name,
413
- content: toolData.result,
414
- },
415
- },
416
- },
417
- ],
418
- }));
419
-
420
- messageBlocks.push(...toolResults);
421
- return messageBlocks;
422
- }
423
- }
1
+ import { VertexAI, type GenerationConfig, type UsageMetadata } from '@google-cloud/vertexai';
2
+ import EventEmitter from 'events';
3
+
4
+ import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
+ import {
6
+ TCustomLLMModel,
7
+ APIKeySource,
8
+ TVertexAISettings,
9
+ ILLMRequestFuncParams,
10
+ TGoogleAIRequestBody,
11
+ ILLMRequestContext,
12
+ TLLMPreparedParams,
13
+ TLLMMessageBlock,
14
+ ToolData,
15
+ TLLMToolResultMessageBlock,
16
+ TLLMMessageRole,
17
+ TLLMChatResponse,
18
+ TLLMEvent,
19
+ } from '@sre/types/LLM.types';
20
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
21
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
22
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
23
+
24
+ import { LLMConnector } from '../LLMConnector';
25
+ import { SystemEvents } from '@sre/Core/SystemEvents';
26
+
27
+ //TODO: [AHMED/FORHAD]: test the usage reporting for VertexAI because by the time we were implementing the feature of usage reporting
28
+ // we had no access to VertexAI so we assumed it is working (potential bug)
29
+
30
+ export class VertexAIConnector extends LLMConnector {
31
+ public name = 'LLM:VertexAI';
32
+
33
+ private async getClient(params: ILLMRequestContext): Promise<VertexAI> {
34
+ const credentials = params.credentials as any;
35
+ const modelInfo = params.modelInfo as TCustomLLMModel;
36
+ const projectId = (modelInfo?.settings as TVertexAISettings)?.projectId;
37
+ const region = modelInfo?.settings?.region;
38
+
39
+ return new VertexAI({
40
+ project: projectId,
41
+ location: region,
42
+ apiEndpoint: (modelInfo?.settings as TVertexAISettings)?.apiEndpoint,
43
+ googleAuthOptions: {
44
+ credentials: credentials as any,
45
+ },
46
+ });
47
+ }
48
+
49
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
50
+ try {
51
+ const vertexAI = await this.getClient(context);
52
+
53
+ // Separate contents from model configuration
54
+ const contents = body.contents;
55
+ delete body.contents;
56
+
57
+ // VertexAI expects contents in a specific format: {contents: [...]}
58
+ const requestParam = { contents };
59
+
60
+ const model = vertexAI.getGenerativeModel(body);
61
+
62
+ const result = await model.generateContent(requestParam);
63
+ const response = await result.response;
64
+
65
+ const content = response.candidates?.[0]?.content?.parts?.[0]?.text || '';
66
+ const finishReason = response.candidates?.[0]?.finishReason || 'stop';
67
+ const usage = response.usageMetadata;
68
+
69
+ let toolsData: ToolData[] = [];
70
+ let useTool = false;
71
+
72
+ // Check for function calls in the response
73
+ const functionCalls = response.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
74
+ if (functionCalls && functionCalls.length > 0) {
75
+ functionCalls.forEach((call, index) => {
76
+ toolsData.push({
77
+ index,
78
+ id: call.functionCall?.name + '_' + index, // VertexAI doesn't provide IDs like Anthropic
79
+ type: 'function',
80
+ name: call.functionCall?.name,
81
+ arguments: call.functionCall?.args,
82
+ role: TLLMMessageRole.Assistant,
83
+ });
84
+ });
85
+ useTool = true;
86
+ }
87
+
88
+ if (usage) {
89
+ this.reportUsage(usage, {
90
+ modelEntryName: context.modelEntryName,
91
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
92
+ agentId: context.agentId,
93
+ teamId: context.teamId,
94
+ });
95
+ }
96
+
97
+ return {
98
+ content,
99
+ finishReason,
100
+ toolsData,
101
+ useTool,
102
+ };
103
+ } catch (error) {
104
+ throw error;
105
+ }
106
+ }
107
+
108
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
109
+ const emitter = new EventEmitter();
110
+
111
+ setTimeout(async () => {
112
+ try {
113
+ const vertexAI = await this.getClient(context);
114
+
115
+ // Separate contents from model configuration
116
+ const contents = body.contents;
117
+ delete body.contents;
118
+
119
+ const vertexModel = vertexAI.getGenerativeModel(body);
120
+
121
+ // VertexAI expects contents in a specific format: {contents: [...]}
122
+ const requestParam = { contents };
123
+
124
+ const streamResult = await vertexModel.generateContentStream(requestParam);
125
+
126
+ let toolsData: ToolData[] = [];
127
+ let usageData: any[] = [];
128
+
129
+ for await (const chunk of streamResult.stream) {
130
+ const chunkText = chunk.candidates?.[0]?.content?.parts?.[0]?.text || '';
131
+ if (chunkText) {
132
+ emitter.emit('content', chunkText);
133
+ }
134
+ }
135
+
136
+ const aggregatedResponse = await streamResult.response;
137
+
138
+ // Check for function calls in the final response (like Anthropic does)
139
+ const functionCalls = aggregatedResponse.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
140
+ if (functionCalls && functionCalls.length > 0) {
141
+ functionCalls.forEach((call, index) => {
142
+ toolsData.push({
143
+ index,
144
+ id: call.functionCall?.name + '_' + index,
145
+ type: 'function',
146
+ name: call.functionCall?.name,
147
+ arguments: call.functionCall?.args,
148
+ role: TLLMMessageRole.Assistant,
149
+ });
150
+ });
151
+
152
+ emitter.emit(TLLMEvent.ToolInfo, toolsData);
153
+ }
154
+
155
+ const usage = aggregatedResponse.usageMetadata;
156
+
157
+ if (usage) {
158
+ const reportedUsage = this.reportUsage(usage, {
159
+ modelEntryName: context.modelEntryName,
160
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
161
+ agentId: context.agentId,
162
+ teamId: context.teamId,
163
+ });
164
+ usageData.push(reportedUsage);
165
+ }
166
+
167
+ const finishReason = (aggregatedResponse.candidates?.[0]?.finishReason || 'stop').toLowerCase();
168
+
169
+ if (finishReason !== 'stop') {
170
+ emitter.emit('interrupted', finishReason);
171
+ }
172
+
173
+ setTimeout(() => {
174
+ emitter.emit('end', toolsData, usageData, finishReason);
175
+ }, 100);
176
+ } catch (error) {
177
+ emitter.emit('error', error);
178
+ }
179
+ }, 100);
180
+
181
+ return emitter;
182
+ }
183
+
184
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TGoogleAIRequestBody> {
185
+ const model = params?.model;
186
+ const { messages, systemMessage } = await this.prepareMessages(params);
187
+
188
+ let body: any = {
189
+ model: model as string,
190
+ contents: messages, // This will be separated in the request methods
191
+ };
192
+
193
+ const responseFormat = params?.responseFormat || '';
194
+ let systemInstruction = systemMessage || '';
195
+
196
+ if (responseFormat === 'json') {
197
+ systemInstruction += (systemInstruction ? '\n\n' : '') + JSON_RESPONSE_INSTRUCTION;
198
+ }
199
+
200
+ const config: GenerationConfig = {};
201
+
202
+ if (params.maxTokens !== undefined) config.maxOutputTokens = params.maxTokens;
203
+ if (params.temperature !== undefined) config.temperature = params.temperature;
204
+ if (params.topP !== undefined) config.topP = params.topP;
205
+ if (params.topK !== undefined) config.topK = params.topK;
206
+ if (params.stopSequences?.length) config.stopSequences = params.stopSequences;
207
+
208
+ if (systemInstruction) {
209
+ body.systemInstruction = {
210
+ role: 'system',
211
+ parts: [{ text: systemInstruction }],
212
+ };
213
+ }
214
+
215
+ if (Object.keys(config).length > 0) {
216
+ body.generationConfig = config;
217
+ }
218
+
219
+ // Handle tools configuration
220
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
221
+ body.tools = this.formatToolsForVertexAI(params.toolsConfig.tools);
222
+ }
223
+
224
+ return body;
225
+ }
226
+
227
+ protected reportUsage(usage: UsageMetadata, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
228
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
229
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
230
+
231
+ const usageData = {
232
+ sourceId: `llm:${modelName}`,
233
+ input_tokens: usage.promptTokenCount || 0,
234
+ output_tokens: usage.candidatesTokenCount || 0,
235
+ input_tokens_cache_read: usage.cachedContentTokenCount || 0,
236
+ input_tokens_cache_write: 0,
237
+ keySource: metadata.keySource,
238
+ agentId: metadata.agentId,
239
+ teamId: metadata.teamId,
240
+ };
241
+ SystemEvents.emit('USAGE:LLM', usageData);
242
+
243
+ return usageData;
244
+ }
245
+
246
+ private async prepareMessages(params: TLLMPreparedParams) {
247
+ const messages = params?.messages || [];
248
+ const files: BinaryInput[] = params?.files || [];
249
+
250
+ let processedMessages = [...messages];
251
+
252
+ // Handle system messages - VertexAI uses systemInstruction separately
253
+ const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(processedMessages);
254
+ processedMessages = otherMessages;
255
+
256
+ // Handle files if present
257
+ if (files?.length > 0) {
258
+ const fileData = await this.processFiles(files, params.agentId);
259
+
260
+ // Add file data to the last user message
261
+ const userMessage = processedMessages.pop();
262
+ if (userMessage) {
263
+ const content = [{ text: userMessage.content as string }, ...fileData];
264
+ processedMessages.push({
265
+ role: userMessage.role,
266
+ parts: content,
267
+ });
268
+ }
269
+ }
270
+
271
+ // Convert messages to VertexAI format
272
+ let vertexAIMessages = this.convertMessagesToVertexAIFormat(processedMessages);
273
+
274
+ // Ensure we have at least one message with content
275
+ if (!vertexAIMessages || vertexAIMessages.length === 0) {
276
+ vertexAIMessages = [
277
+ {
278
+ role: 'user',
279
+ parts: [{ text: 'Hello' }],
280
+ },
281
+ ];
282
+ }
283
+
284
+ return {
285
+ messages: vertexAIMessages,
286
+ systemMessage: (systemMessage as any)?.content || '',
287
+ };
288
+ }
289
+
290
+ private async processFiles(files: BinaryInput[], agentId: string) {
291
+ const fileData = [];
292
+
293
+ for (const file of files) {
294
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
295
+ const base64Data = bufferData.toString('base64');
296
+
297
+ fileData.push({
298
+ inlineData: {
299
+ data: base64Data,
300
+ mimeType: file.mimetype,
301
+ },
302
+ });
303
+ }
304
+
305
+ return fileData;
306
+ }
307
+
308
+ private convertMessagesToVertexAIFormat(messages: TLLMMessageBlock[]) {
309
+ return messages
310
+ .filter((message) => message && (message.content || message.parts))
311
+ .map((message) => {
312
+ let parts;
313
+
314
+ if (typeof message.content === 'string') {
315
+ parts = message.content.trim() ? [{ text: message.content.trim() }] : [{ text: 'Continue' }];
316
+ } else if (message.parts && Array.isArray(message.parts)) {
317
+ parts = message.parts;
318
+ } else if (message.content) {
319
+ parts = [{ text: String(message.content) || 'Continue' }];
320
+ } else {
321
+ parts = [{ text: 'Continue' }];
322
+ }
323
+
324
+ return {
325
+ role: message.role === TLLMMessageRole.Assistant ? 'model' : 'user',
326
+ parts,
327
+ };
328
+ });
329
+ }
330
+
331
+ private formatToolsForVertexAI(tools: any[]) {
332
+ return [
333
+ {
334
+ functionDeclarations: tools.map((tool) => ({
335
+ name: tool.name,
336
+ description: tool.description || '',
337
+ parameters: {
338
+ type: 'object',
339
+ properties: tool.properties || {},
340
+ required: tool.requiredFields || [],
341
+ },
342
+ })),
343
+ },
344
+ ];
345
+ }
346
+
347
+ public formatToolsConfig({ toolDefinitions, toolChoice = 'auto' }) {
348
+ const tools = toolDefinitions.map((tool) => {
349
+ const { name, description, properties, requiredFields } = tool;
350
+
351
+ return {
352
+ name,
353
+ description,
354
+ properties,
355
+ requiredFields,
356
+ };
357
+ });
358
+
359
+ return {
360
+ tools,
361
+ toolChoice: {
362
+ type: toolChoice,
363
+ },
364
+ };
365
+ }
366
+
367
+ public transformToolMessageBlocks({
368
+ messageBlock,
369
+ toolsData,
370
+ }: {
371
+ messageBlock: TLLMMessageBlock;
372
+ toolsData: ToolData[];
373
+ }): TLLMToolResultMessageBlock[] {
374
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
375
+
376
+ if (messageBlock) {
377
+ const parts = [];
378
+
379
+ if (typeof messageBlock.content === 'string') {
380
+ parts.push({ text: messageBlock.content });
381
+ } else if (Array.isArray(messageBlock.content)) {
382
+ parts.push(...messageBlock.content);
383
+ }
384
+
385
+ if (messageBlock.tool_calls) {
386
+ const functionCalls = messageBlock.tool_calls.map((toolCall: any) => ({
387
+ functionCall: {
388
+ name: toolCall?.function?.name,
389
+ args:
390
+ typeof toolCall?.function?.arguments === 'string'
391
+ ? JSON.parse(toolCall.function.arguments)
392
+ : toolCall?.function?.arguments || {},
393
+ },
394
+ }));
395
+ parts.push(...functionCalls);
396
+ }
397
+
398
+ messageBlocks.push({
399
+ role: messageBlock.role,
400
+ parts,
401
+ });
402
+ }
403
+
404
+ // Transform tool results
405
+ const toolResults = toolsData.map((toolData) => ({
406
+ role: TLLMMessageRole.User,
407
+ parts: [
408
+ {
409
+ functionResponse: {
410
+ name: toolData.name,
411
+ response: {
412
+ name: toolData.name,
413
+ content: toolData.result,
414
+ },
415
+ },
416
+ },
417
+ ],
418
+ }));
419
+
420
+ messageBlocks.push(...toolResults);
421
+ return messageBlocks;
422
+ }
423
+ }