@smythos/sre 1.6.8 → 1.6.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (239) hide show
  1. package/CHANGELOG +111 -111
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +2 -2
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/Components/Triggers/Gmail.trigger.d.ts +58 -0
  9. package/dist/types/Components/Triggers/GmailTrigger.class.d.ts +44 -0
  10. package/dist/types/Components/Triggers/Trigger.class.d.ts +21 -0
  11. package/dist/types/Components/Triggers/WhatsApp.trigger.d.ts +22 -0
  12. package/dist/types/helpers/AIPerformanceAnalyzer.helper.d.ts +45 -0
  13. package/dist/types/helpers/AIPerformanceCollector.helper.d.ts +111 -0
  14. package/dist/types/subsystems/IO/Storage.service/connectors/AzureBlobStorage.class.d.ts +211 -0
  15. package/dist/types/subsystems/IO/VectorDB.service/connectors/WeaviateVectorDB.class.d.ts +187 -0
  16. package/dist/types/subsystems/PerformanceManager/Performance.service/PerformanceConnector.d.ts +102 -0
  17. package/dist/types/subsystems/PerformanceManager/Performance.service/connectors/LocalPerformanceConnector.class.d.ts +100 -0
  18. package/dist/types/subsystems/PerformanceManager/Performance.service/index.d.ts +22 -0
  19. package/dist/types/subsystems/Security/Credentials/Credentials.class.d.ts +2 -0
  20. package/dist/types/subsystems/Security/Credentials/ManagedOAuth2Credentials.class.d.ts +18 -0
  21. package/dist/types/subsystems/Security/Credentials/OAuth2Credentials.class.d.ts +14 -0
  22. package/dist/types/types/Performance.types.d.ts +468 -0
  23. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  24. package/package.json +1 -1
  25. package/src/Components/APICall/APICall.class.ts +161 -161
  26. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  27. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  28. package/src/Components/APICall/OAuth.helper.ts +447 -447
  29. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  30. package/src/Components/APICall/parseData.ts +167 -167
  31. package/src/Components/APICall/parseHeaders.ts +41 -41
  32. package/src/Components/APICall/parseProxy.ts +68 -68
  33. package/src/Components/APICall/parseUrl.ts +91 -91
  34. package/src/Components/APIEndpoint.class.ts +234 -234
  35. package/src/Components/APIOutput.class.ts +58 -58
  36. package/src/Components/AgentPlugin.class.ts +102 -102
  37. package/src/Components/Async.class.ts +155 -155
  38. package/src/Components/Await.class.ts +90 -90
  39. package/src/Components/Classifier.class.ts +158 -158
  40. package/src/Components/Component.class.ts +147 -147
  41. package/src/Components/ComponentHost.class.ts +38 -38
  42. package/src/Components/DataSourceCleaner.class.ts +92 -92
  43. package/src/Components/DataSourceIndexer.class.ts +181 -181
  44. package/src/Components/DataSourceLookup.class.ts +161 -161
  45. package/src/Components/ECMASandbox.class.ts +72 -72
  46. package/src/Components/FEncDec.class.ts +29 -29
  47. package/src/Components/FHash.class.ts +33 -33
  48. package/src/Components/FSign.class.ts +80 -80
  49. package/src/Components/FSleep.class.ts +25 -25
  50. package/src/Components/FTimestamp.class.ts +66 -66
  51. package/src/Components/FileStore.class.ts +78 -78
  52. package/src/Components/ForEach.class.ts +97 -97
  53. package/src/Components/GPTPlugin.class.ts +70 -70
  54. package/src/Components/GenAILLM.class.ts +586 -586
  55. package/src/Components/HuggingFace.class.ts +313 -313
  56. package/src/Components/Image/imageSettings.config.ts +70 -70
  57. package/src/Components/ImageGenerator.class.ts +483 -483
  58. package/src/Components/JSONFilter.class.ts +54 -54
  59. package/src/Components/LLMAssistant.class.ts +213 -213
  60. package/src/Components/LogicAND.class.ts +28 -28
  61. package/src/Components/LogicAtLeast.class.ts +85 -85
  62. package/src/Components/LogicAtMost.class.ts +86 -86
  63. package/src/Components/LogicOR.class.ts +29 -29
  64. package/src/Components/LogicXOR.class.ts +34 -34
  65. package/src/Components/MCPClient.class.ts +137 -137
  66. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  67. package/src/Components/MemoryReadKeyVal.class.ts +67 -67
  68. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  69. package/src/Components/MemoryWriteObject.class.ts +97 -97
  70. package/src/Components/MultimodalLLM.class.ts +128 -128
  71. package/src/Components/OpenAPI.class.ts +72 -72
  72. package/src/Components/PromptGenerator.class.ts +122 -122
  73. package/src/Components/ScrapflyWebScrape.class.ts +183 -183
  74. package/src/Components/ServerlessCode.class.ts +123 -123
  75. package/src/Components/TavilyWebSearch.class.ts +103 -103
  76. package/src/Components/VisionLLM.class.ts +104 -104
  77. package/src/Components/ZapierAction.class.ts +127 -127
  78. package/src/Components/index.ts +97 -97
  79. package/src/Core/AgentProcess.helper.ts +240 -240
  80. package/src/Core/Connector.class.ts +123 -123
  81. package/src/Core/ConnectorsService.ts +197 -197
  82. package/src/Core/DummyConnector.ts +49 -49
  83. package/src/Core/HookService.ts +105 -105
  84. package/src/Core/SmythRuntime.class.ts +241 -241
  85. package/src/Core/SystemEvents.ts +16 -16
  86. package/src/Core/boot.ts +56 -56
  87. package/src/config.ts +15 -15
  88. package/src/constants.ts +126 -126
  89. package/src/data/hugging-face.params.json +579 -579
  90. package/src/helpers/AWSLambdaCode.helper.ts +624 -624
  91. package/src/helpers/BinaryInput.helper.ts +331 -331
  92. package/src/helpers/Conversation.helper.ts +1157 -1157
  93. package/src/helpers/ECMASandbox.helper.ts +64 -64
  94. package/src/helpers/JsonContent.helper.ts +97 -97
  95. package/src/helpers/LocalCache.helper.ts +97 -97
  96. package/src/helpers/Log.helper.ts +274 -274
  97. package/src/helpers/OpenApiParser.helper.ts +150 -150
  98. package/src/helpers/S3Cache.helper.ts +147 -147
  99. package/src/helpers/SmythURI.helper.ts +5 -5
  100. package/src/helpers/Sysconfig.helper.ts +95 -95
  101. package/src/helpers/TemplateString.helper.ts +243 -243
  102. package/src/helpers/TypeChecker.helper.ts +329 -329
  103. package/src/index.ts +3 -3
  104. package/src/index.ts.bak +3 -3
  105. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  106. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  107. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  108. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  109. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +145 -145
  110. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  111. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  112. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -301
  113. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  114. package/src/subsystems/AgentManager/AgentRuntime.class.ts +557 -557
  115. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  116. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  117. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  118. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  119. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  120. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  121. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  122. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  123. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  124. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +171 -171
  125. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  126. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  127. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  128. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  129. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  130. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  131. package/src/subsystems/IO/Log.service/index.ts +13 -13
  132. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  133. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  134. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  135. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  136. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  137. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  138. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  139. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  140. package/src/subsystems/IO/Router.service/index.ts +11 -11
  141. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +488 -488
  142. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  143. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  144. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  145. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  146. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  147. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +465 -465
  148. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +387 -387
  149. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +408 -408
  150. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  151. package/src/subsystems/IO/VectorDB.service/embed/GoogleEmbedding.ts +118 -118
  152. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  153. package/src/subsystems/IO/VectorDB.service/embed/index.ts +26 -26
  154. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  155. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  156. package/src/subsystems/LLMManager/LLM.inference.ts +345 -345
  157. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +492 -492
  158. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  159. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +666 -666
  160. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +407 -407
  161. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +92 -92
  162. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +983 -983
  163. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +319 -319
  164. package/src/subsystems/LLMManager/LLM.service/connectors/Ollama.class.ts +361 -361
  165. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +257 -257
  166. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +430 -430
  167. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +503 -503
  168. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  169. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  170. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  171. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  172. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  173. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  174. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  175. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  176. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +478 -478
  177. package/src/subsystems/LLMManager/LLM.service/index.ts +47 -47
  178. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +303 -303
  179. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +280 -271
  180. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  181. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  182. package/src/subsystems/LLMManager/models.ts +2540 -2540
  183. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  184. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  185. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  186. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +214 -214
  187. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  188. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  189. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  190. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  191. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  192. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  193. package/src/subsystems/MemoryManager/RuntimeContext.ts +277 -277
  194. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  195. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  196. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  197. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  198. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  199. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +170 -170
  200. package/src/subsystems/Security/Account.service/connectors/MySQLAccount.class.ts +76 -76
  201. package/src/subsystems/Security/Account.service/index.ts +14 -14
  202. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  203. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  204. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  205. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  206. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  207. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  208. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  209. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  210. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  211. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  212. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  213. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  214. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  215. package/src/types/ACL.types.ts +104 -104
  216. package/src/types/AWS.types.ts +10 -10
  217. package/src/types/Agent.types.ts +61 -61
  218. package/src/types/AgentLogger.types.ts +17 -17
  219. package/src/types/Cache.types.ts +1 -1
  220. package/src/types/Common.types.ts +2 -2
  221. package/src/types/LLM.types.ts +520 -520
  222. package/src/types/Redis.types.ts +8 -8
  223. package/src/types/SRE.types.ts +64 -64
  224. package/src/types/Security.types.ts +14 -14
  225. package/src/types/Storage.types.ts +5 -5
  226. package/src/types/VectorDB.types.ts +86 -86
  227. package/src/utils/base64.utils.ts +275 -275
  228. package/src/utils/cli.utils.ts +68 -68
  229. package/src/utils/data.utils.ts +322 -322
  230. package/src/utils/date-time.utils.ts +22 -22
  231. package/src/utils/general.utils.ts +238 -238
  232. package/src/utils/index.ts +12 -12
  233. package/src/utils/lazy-client.ts +261 -261
  234. package/src/utils/numbers.utils.ts +13 -13
  235. package/src/utils/oauth.utils.ts +35 -35
  236. package/src/utils/string.utils.ts +414 -414
  237. package/src/utils/url.utils.ts +19 -19
  238. package/src/utils/validation.utils.ts +74 -74
  239. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,430 +1,430 @@
1
- import { VertexAI, type GenerationConfig, type UsageMetadata } from '@google-cloud/vertexai';
2
- import EventEmitter from 'events';
3
-
4
- import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
- import {
6
- TCustomLLMModel,
7
- APIKeySource,
8
- TVertexAISettings,
9
- ILLMRequestFuncParams,
10
- TGoogleAIRequestBody,
11
- ILLMRequestContext,
12
- TLLMPreparedParams,
13
- TLLMMessageBlock,
14
- ToolData,
15
- TLLMToolResultMessageBlock,
16
- TLLMMessageRole,
17
- TLLMChatResponse,
18
- TLLMEvent,
19
- } from '@sre/types/LLM.types';
20
- import { LLMHelper } from '@sre/LLMManager/LLM.helper';
21
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
22
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
23
-
24
- import { LLMConnector } from '../LLMConnector';
25
- import { SystemEvents } from '@sre/Core/SystemEvents';
26
- import { Logger } from '@sre/helpers/Log.helper';
27
-
28
- const logger = Logger('VertexAIConnector');
29
-
30
- //TODO: [AHMED/FORHAD]: test the usage reporting for VertexAI because by the time we were implementing the feature of usage reporting
31
- // we had no access to VertexAI so we assumed it is working (potential bug)
32
-
33
- export class VertexAIConnector extends LLMConnector {
34
- public name = 'LLM:VertexAI';
35
-
36
- private async getClient(params: ILLMRequestContext): Promise<VertexAI> {
37
- const credentials = params.credentials as any;
38
- const modelInfo = params.modelInfo as TCustomLLMModel;
39
- const projectId = (modelInfo?.settings as TVertexAISettings)?.projectId;
40
- const region = modelInfo?.settings?.region;
41
-
42
- return new VertexAI({
43
- project: projectId,
44
- location: region,
45
- apiEndpoint: (modelInfo?.settings as TVertexAISettings)?.apiEndpoint,
46
- googleAuthOptions: {
47
- credentials: credentials as any,
48
- },
49
- });
50
- }
51
-
52
- protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
53
- try {
54
- logger.debug(`request ${this.name}`, acRequest.candidate);
55
- const vertexAI = await this.getClient(context);
56
-
57
- // Separate contents from model configuration
58
- const contents = body.contents;
59
- delete body.contents;
60
-
61
- // VertexAI expects contents in a specific format: {contents: [...]}
62
- const requestParam = { contents };
63
-
64
- const model = vertexAI.getGenerativeModel(body);
65
-
66
- const result = await model.generateContent(requestParam);
67
- const response = await result.response;
68
-
69
- const content = response.candidates?.[0]?.content?.parts?.[0]?.text || '';
70
- const finishReason = response.candidates?.[0]?.finishReason || 'stop';
71
- const usage = response.usageMetadata;
72
-
73
- let toolsData: ToolData[] = [];
74
- let useTool = false;
75
-
76
- // Check for function calls in the response
77
- const functionCalls = response.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
78
- if (functionCalls && functionCalls.length > 0) {
79
- functionCalls.forEach((call, index) => {
80
- toolsData.push({
81
- index,
82
- id: call.functionCall?.name + '_' + index, // VertexAI doesn't provide IDs like Anthropic
83
- type: 'function',
84
- name: call.functionCall?.name,
85
- arguments: call.functionCall?.args,
86
- role: TLLMMessageRole.Assistant,
87
- });
88
- });
89
- useTool = true;
90
- }
91
-
92
- if (usage) {
93
- this.reportUsage(usage, {
94
- modelEntryName: context.modelEntryName,
95
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
96
- agentId: context.agentId,
97
- teamId: context.teamId,
98
- });
99
- }
100
-
101
- return {
102
- content,
103
- finishReason,
104
- toolsData,
105
- useTool,
106
- };
107
- } catch (error) {
108
- logger.error(`request ${this.name}`, error, acRequest.candidate);
109
- throw error;
110
- }
111
- }
112
-
113
- protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
114
- const emitter = new EventEmitter();
115
-
116
- setTimeout(async () => {
117
- try {
118
- logger.debug(`streamRequest ${this.name}`, acRequest.candidate);
119
- const vertexAI = await this.getClient(context);
120
-
121
- // Separate contents from model configuration
122
- const contents = body.contents;
123
- delete body.contents;
124
-
125
- const vertexModel = vertexAI.getGenerativeModel(body);
126
-
127
- // VertexAI expects contents in a specific format: {contents: [...]}
128
- const requestParam = { contents };
129
-
130
- const streamResult = await vertexModel.generateContentStream(requestParam);
131
-
132
- let toolsData: ToolData[] = [];
133
- let usageData: any[] = [];
134
-
135
- for await (const chunk of streamResult.stream) {
136
- const chunkText = chunk.candidates?.[0]?.content?.parts?.[0]?.text || '';
137
- if (chunkText) {
138
- emitter.emit('content', chunkText);
139
- }
140
- }
141
-
142
- const aggregatedResponse = await streamResult.response;
143
-
144
- // Check for function calls in the final response (like Anthropic does)
145
- const functionCalls = aggregatedResponse.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
146
- if (functionCalls && functionCalls.length > 0) {
147
- functionCalls.forEach((call, index) => {
148
- toolsData.push({
149
- index,
150
- id: call.functionCall?.name + '_' + index,
151
- type: 'function',
152
- name: call.functionCall?.name,
153
- arguments: call.functionCall?.args,
154
- role: TLLMMessageRole.Assistant,
155
- });
156
- });
157
-
158
- emitter.emit(TLLMEvent.ToolInfo, toolsData);
159
- }
160
-
161
- const usage = aggregatedResponse.usageMetadata;
162
-
163
- if (usage) {
164
- const reportedUsage = this.reportUsage(usage, {
165
- modelEntryName: context.modelEntryName,
166
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
167
- agentId: context.agentId,
168
- teamId: context.teamId,
169
- });
170
- usageData.push(reportedUsage);
171
- }
172
-
173
- const finishReason = (aggregatedResponse.candidates?.[0]?.finishReason || 'stop').toLowerCase();
174
-
175
- if (finishReason !== 'stop') {
176
- emitter.emit('interrupted', finishReason);
177
- }
178
-
179
- setTimeout(() => {
180
- emitter.emit('end', toolsData, usageData, finishReason);
181
- }, 100);
182
- } catch (error) {
183
- logger.error(`streamRequest ${this.name}`, error, acRequest.candidate);
184
- emitter.emit('error', error);
185
- }
186
- }, 100);
187
-
188
- return emitter;
189
- }
190
-
191
- protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TGoogleAIRequestBody> {
192
- const model = params?.model;
193
- const { messages, systemMessage } = await this.prepareMessages(params);
194
-
195
- let body: any = {
196
- model: model as string,
197
- contents: messages, // This will be separated in the request methods
198
- };
199
-
200
- const responseFormat = params?.responseFormat || '';
201
- let systemInstruction = systemMessage || '';
202
-
203
- if (responseFormat === 'json') {
204
- systemInstruction += (systemInstruction ? '\n\n' : '') + JSON_RESPONSE_INSTRUCTION;
205
- }
206
-
207
- const config: GenerationConfig = {};
208
-
209
- if (params.maxTokens !== undefined) config.maxOutputTokens = params.maxTokens;
210
- if (params.temperature !== undefined) config.temperature = params.temperature;
211
- if (params.topP !== undefined) config.topP = params.topP;
212
- if (params.topK !== undefined) config.topK = params.topK;
213
- if (params.stopSequences?.length) config.stopSequences = params.stopSequences;
214
-
215
- if (systemInstruction) {
216
- body.systemInstruction = {
217
- role: 'system',
218
- parts: [{ text: systemInstruction }],
219
- };
220
- }
221
-
222
- if (Object.keys(config).length > 0) {
223
- body.generationConfig = config;
224
- }
225
-
226
- // Handle tools configuration
227
- if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
228
- body.tools = this.formatToolsForVertexAI(params.toolsConfig.tools);
229
- }
230
-
231
- return body;
232
- }
233
-
234
- protected reportUsage(usage: UsageMetadata, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
235
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
236
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
237
-
238
- const usageData = {
239
- sourceId: `llm:${modelName}`,
240
- input_tokens: usage.promptTokenCount || 0,
241
- output_tokens: usage.candidatesTokenCount || 0,
242
- input_tokens_cache_read: usage.cachedContentTokenCount || 0,
243
- input_tokens_cache_write: 0,
244
- keySource: metadata.keySource,
245
- agentId: metadata.agentId,
246
- teamId: metadata.teamId,
247
- };
248
- SystemEvents.emit('USAGE:LLM', usageData);
249
-
250
- return usageData;
251
- }
252
-
253
- private async prepareMessages(params: TLLMPreparedParams) {
254
- const messages = params?.messages || [];
255
- const files: BinaryInput[] = params?.files || [];
256
-
257
- let processedMessages = [...messages];
258
-
259
- // Handle system messages - VertexAI uses systemInstruction separately
260
- const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(processedMessages);
261
- processedMessages = otherMessages;
262
-
263
- // Handle files if present
264
- if (files?.length > 0) {
265
- const fileData = await this.processFiles(files, params.agentId);
266
-
267
- // Add file data to the last user message
268
- const userMessage = processedMessages.pop();
269
- if (userMessage) {
270
- const content = [{ text: userMessage.content as string }, ...fileData];
271
- processedMessages.push({
272
- role: userMessage.role,
273
- parts: content,
274
- });
275
- }
276
- }
277
-
278
- // Convert messages to VertexAI format
279
- let vertexAIMessages = this.convertMessagesToVertexAIFormat(processedMessages);
280
-
281
- // Ensure we have at least one message with content
282
- if (!vertexAIMessages || vertexAIMessages.length === 0) {
283
- vertexAIMessages = [
284
- {
285
- role: 'user',
286
- parts: [{ text: 'Hello' }],
287
- },
288
- ];
289
- }
290
-
291
- return {
292
- messages: vertexAIMessages,
293
- systemMessage: (systemMessage as any)?.content || '',
294
- };
295
- }
296
-
297
- private async processFiles(files: BinaryInput[], agentId: string) {
298
- const fileData = [];
299
-
300
- for (const file of files) {
301
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
302
- const base64Data = bufferData.toString('base64');
303
-
304
- fileData.push({
305
- inlineData: {
306
- data: base64Data,
307
- mimeType: file.mimetype,
308
- },
309
- });
310
- }
311
-
312
- return fileData;
313
- }
314
-
315
- private convertMessagesToVertexAIFormat(messages: TLLMMessageBlock[]) {
316
- return messages
317
- .filter((message) => message && (message.content || message.parts))
318
- .map((message) => {
319
- let parts;
320
-
321
- if (typeof message.content === 'string') {
322
- parts = message.content.trim() ? [{ text: message.content.trim() }] : [{ text: 'Continue' }];
323
- } else if (message.parts && Array.isArray(message.parts)) {
324
- parts = message.parts;
325
- } else if (message.content) {
326
- parts = [{ text: String(message.content) || 'Continue' }];
327
- } else {
328
- parts = [{ text: 'Continue' }];
329
- }
330
-
331
- return {
332
- role: message.role === TLLMMessageRole.Assistant ? 'model' : 'user',
333
- parts,
334
- };
335
- });
336
- }
337
-
338
- private formatToolsForVertexAI(tools: any[]) {
339
- return [
340
- {
341
- functionDeclarations: tools.map((tool) => ({
342
- name: tool.name,
343
- description: tool.description || '',
344
- parameters: {
345
- type: 'object',
346
- properties: tool.properties || {},
347
- required: tool.requiredFields || [],
348
- },
349
- })),
350
- },
351
- ];
352
- }
353
-
354
- public formatToolsConfig({ toolDefinitions, toolChoice = 'auto' }) {
355
- const tools = toolDefinitions.map((tool) => {
356
- const { name, description, properties, requiredFields } = tool;
357
-
358
- return {
359
- name,
360
- description,
361
- properties,
362
- requiredFields,
363
- };
364
- });
365
-
366
- return {
367
- tools,
368
- toolChoice: {
369
- type: toolChoice,
370
- },
371
- };
372
- }
373
-
374
- public transformToolMessageBlocks({
375
- messageBlock,
376
- toolsData,
377
- }: {
378
- messageBlock: TLLMMessageBlock;
379
- toolsData: ToolData[];
380
- }): TLLMToolResultMessageBlock[] {
381
- const messageBlocks: TLLMToolResultMessageBlock[] = [];
382
-
383
- if (messageBlock) {
384
- const parts = [];
385
-
386
- if (typeof messageBlock.content === 'string') {
387
- parts.push({ text: messageBlock.content });
388
- } else if (Array.isArray(messageBlock.content)) {
389
- parts.push(...messageBlock.content);
390
- }
391
-
392
- if (messageBlock.tool_calls) {
393
- const functionCalls = messageBlock.tool_calls.map((toolCall: any) => ({
394
- functionCall: {
395
- name: toolCall?.function?.name,
396
- args:
397
- typeof toolCall?.function?.arguments === 'string'
398
- ? JSON.parse(toolCall.function.arguments)
399
- : toolCall?.function?.arguments || {},
400
- },
401
- }));
402
- parts.push(...functionCalls);
403
- }
404
-
405
- messageBlocks.push({
406
- role: messageBlock.role,
407
- parts,
408
- });
409
- }
410
-
411
- // Transform tool results
412
- const toolResults = toolsData.map((toolData) => ({
413
- role: TLLMMessageRole.User,
414
- parts: [
415
- {
416
- functionResponse: {
417
- name: toolData.name,
418
- response: {
419
- name: toolData.name,
420
- content: toolData.result,
421
- },
422
- },
423
- },
424
- ],
425
- }));
426
-
427
- messageBlocks.push(...toolResults);
428
- return messageBlocks;
429
- }
430
- }
1
+ import { VertexAI, type GenerationConfig, type UsageMetadata } from '@google-cloud/vertexai';
2
+ import EventEmitter from 'events';
3
+
4
+ import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
+ import {
6
+ TCustomLLMModel,
7
+ APIKeySource,
8
+ TVertexAISettings,
9
+ ILLMRequestFuncParams,
10
+ TGoogleAIRequestBody,
11
+ ILLMRequestContext,
12
+ TLLMPreparedParams,
13
+ TLLMMessageBlock,
14
+ ToolData,
15
+ TLLMToolResultMessageBlock,
16
+ TLLMMessageRole,
17
+ TLLMChatResponse,
18
+ TLLMEvent,
19
+ } from '@sre/types/LLM.types';
20
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
21
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
22
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
23
+
24
+ import { LLMConnector } from '../LLMConnector';
25
+ import { SystemEvents } from '@sre/Core/SystemEvents';
26
+ import { Logger } from '@sre/helpers/Log.helper';
27
+
28
+ const logger = Logger('VertexAIConnector');
29
+
30
+ //TODO: [AHMED/FORHAD]: test the usage reporting for VertexAI because by the time we were implementing the feature of usage reporting
31
+ // we had no access to VertexAI so we assumed it is working (potential bug)
32
+
33
+ export class VertexAIConnector extends LLMConnector {
34
+ public name = 'LLM:VertexAI';
35
+
36
+ private async getClient(params: ILLMRequestContext): Promise<VertexAI> {
37
+ const credentials = params.credentials as any;
38
+ const modelInfo = params.modelInfo as TCustomLLMModel;
39
+ const projectId = (modelInfo?.settings as TVertexAISettings)?.projectId;
40
+ const region = modelInfo?.settings?.region;
41
+
42
+ return new VertexAI({
43
+ project: projectId,
44
+ location: region,
45
+ apiEndpoint: (modelInfo?.settings as TVertexAISettings)?.apiEndpoint,
46
+ googleAuthOptions: {
47
+ credentials: credentials as any,
48
+ },
49
+ });
50
+ }
51
+
52
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
53
+ try {
54
+ logger.debug(`request ${this.name}`, acRequest.candidate);
55
+ const vertexAI = await this.getClient(context);
56
+
57
+ // Separate contents from model configuration
58
+ const contents = body.contents;
59
+ delete body.contents;
60
+
61
+ // VertexAI expects contents in a specific format: {contents: [...]}
62
+ const requestParam = { contents };
63
+
64
+ const model = vertexAI.getGenerativeModel(body);
65
+
66
+ const result = await model.generateContent(requestParam);
67
+ const response = await result.response;
68
+
69
+ const content = response.candidates?.[0]?.content?.parts?.[0]?.text || '';
70
+ const finishReason = response.candidates?.[0]?.finishReason || 'stop';
71
+ const usage = response.usageMetadata;
72
+
73
+ let toolsData: ToolData[] = [];
74
+ let useTool = false;
75
+
76
+ // Check for function calls in the response
77
+ const functionCalls = response.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
78
+ if (functionCalls && functionCalls.length > 0) {
79
+ functionCalls.forEach((call, index) => {
80
+ toolsData.push({
81
+ index,
82
+ id: call.functionCall?.name + '_' + index, // VertexAI doesn't provide IDs like Anthropic
83
+ type: 'function',
84
+ name: call.functionCall?.name,
85
+ arguments: call.functionCall?.args,
86
+ role: TLLMMessageRole.Assistant,
87
+ });
88
+ });
89
+ useTool = true;
90
+ }
91
+
92
+ if (usage) {
93
+ this.reportUsage(usage, {
94
+ modelEntryName: context.modelEntryName,
95
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
96
+ agentId: context.agentId,
97
+ teamId: context.teamId,
98
+ });
99
+ }
100
+
101
+ return {
102
+ content,
103
+ finishReason,
104
+ toolsData,
105
+ useTool,
106
+ };
107
+ } catch (error) {
108
+ logger.error(`request ${this.name}`, error, acRequest.candidate);
109
+ throw error;
110
+ }
111
+ }
112
+
113
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
114
+ const emitter = new EventEmitter();
115
+
116
+ setTimeout(async () => {
117
+ try {
118
+ logger.debug(`streamRequest ${this.name}`, acRequest.candidate);
119
+ const vertexAI = await this.getClient(context);
120
+
121
+ // Separate contents from model configuration
122
+ const contents = body.contents;
123
+ delete body.contents;
124
+
125
+ const vertexModel = vertexAI.getGenerativeModel(body);
126
+
127
+ // VertexAI expects contents in a specific format: {contents: [...]}
128
+ const requestParam = { contents };
129
+
130
+ const streamResult = await vertexModel.generateContentStream(requestParam);
131
+
132
+ let toolsData: ToolData[] = [];
133
+ let usageData: any[] = [];
134
+
135
+ for await (const chunk of streamResult.stream) {
136
+ const chunkText = chunk.candidates?.[0]?.content?.parts?.[0]?.text || '';
137
+ if (chunkText) {
138
+ emitter.emit('content', chunkText);
139
+ }
140
+ }
141
+
142
+ const aggregatedResponse = await streamResult.response;
143
+
144
+ // Check for function calls in the final response (like Anthropic does)
145
+ const functionCalls = aggregatedResponse.candidates?.[0]?.content?.parts?.filter((part) => part.functionCall);
146
+ if (functionCalls && functionCalls.length > 0) {
147
+ functionCalls.forEach((call, index) => {
148
+ toolsData.push({
149
+ index,
150
+ id: call.functionCall?.name + '_' + index,
151
+ type: 'function',
152
+ name: call.functionCall?.name,
153
+ arguments: call.functionCall?.args,
154
+ role: TLLMMessageRole.Assistant,
155
+ });
156
+ });
157
+
158
+ emitter.emit(TLLMEvent.ToolInfo, toolsData);
159
+ }
160
+
161
+ const usage = aggregatedResponse.usageMetadata;
162
+
163
+ if (usage) {
164
+ const reportedUsage = this.reportUsage(usage, {
165
+ modelEntryName: context.modelEntryName,
166
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
167
+ agentId: context.agentId,
168
+ teamId: context.teamId,
169
+ });
170
+ usageData.push(reportedUsage);
171
+ }
172
+
173
+ const finishReason = (aggregatedResponse.candidates?.[0]?.finishReason || 'stop').toLowerCase();
174
+
175
+ if (finishReason !== 'stop') {
176
+ emitter.emit('interrupted', finishReason);
177
+ }
178
+
179
+ setTimeout(() => {
180
+ emitter.emit('end', toolsData, usageData, finishReason);
181
+ }, 100);
182
+ } catch (error) {
183
+ logger.error(`streamRequest ${this.name}`, error, acRequest.candidate);
184
+ emitter.emit('error', error);
185
+ }
186
+ }, 100);
187
+
188
+ return emitter;
189
+ }
190
+
191
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TGoogleAIRequestBody> {
192
+ const model = params?.model;
193
+ const { messages, systemMessage } = await this.prepareMessages(params);
194
+
195
+ let body: any = {
196
+ model: model as string,
197
+ contents: messages, // This will be separated in the request methods
198
+ };
199
+
200
+ const responseFormat = params?.responseFormat || '';
201
+ let systemInstruction = systemMessage || '';
202
+
203
+ if (responseFormat === 'json') {
204
+ systemInstruction += (systemInstruction ? '\n\n' : '') + JSON_RESPONSE_INSTRUCTION;
205
+ }
206
+
207
+ const config: GenerationConfig = {};
208
+
209
+ if (params.maxTokens !== undefined) config.maxOutputTokens = params.maxTokens;
210
+ if (params.temperature !== undefined) config.temperature = params.temperature;
211
+ if (params.topP !== undefined) config.topP = params.topP;
212
+ if (params.topK !== undefined) config.topK = params.topK;
213
+ if (params.stopSequences?.length) config.stopSequences = params.stopSequences;
214
+
215
+ if (systemInstruction) {
216
+ body.systemInstruction = {
217
+ role: 'system',
218
+ parts: [{ text: systemInstruction }],
219
+ };
220
+ }
221
+
222
+ if (Object.keys(config).length > 0) {
223
+ body.generationConfig = config;
224
+ }
225
+
226
+ // Handle tools configuration
227
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
228
+ body.tools = this.formatToolsForVertexAI(params.toolsConfig.tools);
229
+ }
230
+
231
+ return body;
232
+ }
233
+
234
+ protected reportUsage(usage: UsageMetadata, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
235
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
236
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
237
+
238
+ const usageData = {
239
+ sourceId: `llm:${modelName}`,
240
+ input_tokens: usage.promptTokenCount || 0,
241
+ output_tokens: usage.candidatesTokenCount || 0,
242
+ input_tokens_cache_read: usage.cachedContentTokenCount || 0,
243
+ input_tokens_cache_write: 0,
244
+ keySource: metadata.keySource,
245
+ agentId: metadata.agentId,
246
+ teamId: metadata.teamId,
247
+ };
248
+ SystemEvents.emit('USAGE:LLM', usageData);
249
+
250
+ return usageData;
251
+ }
252
+
253
+ private async prepareMessages(params: TLLMPreparedParams) {
254
+ const messages = params?.messages || [];
255
+ const files: BinaryInput[] = params?.files || [];
256
+
257
+ let processedMessages = [...messages];
258
+
259
+ // Handle system messages - VertexAI uses systemInstruction separately
260
+ const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(processedMessages);
261
+ processedMessages = otherMessages;
262
+
263
+ // Handle files if present
264
+ if (files?.length > 0) {
265
+ const fileData = await this.processFiles(files, params.agentId);
266
+
267
+ // Add file data to the last user message
268
+ const userMessage = processedMessages.pop();
269
+ if (userMessage) {
270
+ const content = [{ text: userMessage.content as string }, ...fileData];
271
+ processedMessages.push({
272
+ role: userMessage.role,
273
+ parts: content,
274
+ });
275
+ }
276
+ }
277
+
278
+ // Convert messages to VertexAI format
279
+ let vertexAIMessages = this.convertMessagesToVertexAIFormat(processedMessages);
280
+
281
+ // Ensure we have at least one message with content
282
+ if (!vertexAIMessages || vertexAIMessages.length === 0) {
283
+ vertexAIMessages = [
284
+ {
285
+ role: 'user',
286
+ parts: [{ text: 'Hello' }],
287
+ },
288
+ ];
289
+ }
290
+
291
+ return {
292
+ messages: vertexAIMessages,
293
+ systemMessage: (systemMessage as any)?.content || '',
294
+ };
295
+ }
296
+
297
+ private async processFiles(files: BinaryInput[], agentId: string) {
298
+ const fileData = [];
299
+
300
+ for (const file of files) {
301
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
302
+ const base64Data = bufferData.toString('base64');
303
+
304
+ fileData.push({
305
+ inlineData: {
306
+ data: base64Data,
307
+ mimeType: file.mimetype,
308
+ },
309
+ });
310
+ }
311
+
312
+ return fileData;
313
+ }
314
+
315
+ private convertMessagesToVertexAIFormat(messages: TLLMMessageBlock[]) {
316
+ return messages
317
+ .filter((message) => message && (message.content || message.parts))
318
+ .map((message) => {
319
+ let parts;
320
+
321
+ if (typeof message.content === 'string') {
322
+ parts = message.content.trim() ? [{ text: message.content.trim() }] : [{ text: 'Continue' }];
323
+ } else if (message.parts && Array.isArray(message.parts)) {
324
+ parts = message.parts;
325
+ } else if (message.content) {
326
+ parts = [{ text: String(message.content) || 'Continue' }];
327
+ } else {
328
+ parts = [{ text: 'Continue' }];
329
+ }
330
+
331
+ return {
332
+ role: message.role === TLLMMessageRole.Assistant ? 'model' : 'user',
333
+ parts,
334
+ };
335
+ });
336
+ }
337
+
338
+ private formatToolsForVertexAI(tools: any[]) {
339
+ return [
340
+ {
341
+ functionDeclarations: tools.map((tool) => ({
342
+ name: tool.name,
343
+ description: tool.description || '',
344
+ parameters: {
345
+ type: 'object',
346
+ properties: tool.properties || {},
347
+ required: tool.requiredFields || [],
348
+ },
349
+ })),
350
+ },
351
+ ];
352
+ }
353
+
354
+ public formatToolsConfig({ toolDefinitions, toolChoice = 'auto' }) {
355
+ const tools = toolDefinitions.map((tool) => {
356
+ const { name, description, properties, requiredFields } = tool;
357
+
358
+ return {
359
+ name,
360
+ description,
361
+ properties,
362
+ requiredFields,
363
+ };
364
+ });
365
+
366
+ return {
367
+ tools,
368
+ toolChoice: {
369
+ type: toolChoice,
370
+ },
371
+ };
372
+ }
373
+
374
+ public transformToolMessageBlocks({
375
+ messageBlock,
376
+ toolsData,
377
+ }: {
378
+ messageBlock: TLLMMessageBlock;
379
+ toolsData: ToolData[];
380
+ }): TLLMToolResultMessageBlock[] {
381
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
382
+
383
+ if (messageBlock) {
384
+ const parts = [];
385
+
386
+ if (typeof messageBlock.content === 'string') {
387
+ parts.push({ text: messageBlock.content });
388
+ } else if (Array.isArray(messageBlock.content)) {
389
+ parts.push(...messageBlock.content);
390
+ }
391
+
392
+ if (messageBlock.tool_calls) {
393
+ const functionCalls = messageBlock.tool_calls.map((toolCall: any) => ({
394
+ functionCall: {
395
+ name: toolCall?.function?.name,
396
+ args:
397
+ typeof toolCall?.function?.arguments === 'string'
398
+ ? JSON.parse(toolCall.function.arguments)
399
+ : toolCall?.function?.arguments || {},
400
+ },
401
+ }));
402
+ parts.push(...functionCalls);
403
+ }
404
+
405
+ messageBlocks.push({
406
+ role: messageBlock.role,
407
+ parts,
408
+ });
409
+ }
410
+
411
+ // Transform tool results
412
+ const toolResults = toolsData.map((toolData) => ({
413
+ role: TLLMMessageRole.User,
414
+ parts: [
415
+ {
416
+ functionResponse: {
417
+ name: toolData.name,
418
+ response: {
419
+ name: toolData.name,
420
+ content: toolData.result,
421
+ },
422
+ },
423
+ },
424
+ ],
425
+ }));
426
+
427
+ messageBlocks.push(...toolResults);
428
+ return messageBlocks;
429
+ }
430
+ }