@smythos/sre 1.6.8 → 1.6.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (239) hide show
  1. package/CHANGELOG +111 -111
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +2 -2
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/Components/Triggers/Gmail.trigger.d.ts +58 -0
  9. package/dist/types/Components/Triggers/GmailTrigger.class.d.ts +44 -0
  10. package/dist/types/Components/Triggers/Trigger.class.d.ts +21 -0
  11. package/dist/types/Components/Triggers/WhatsApp.trigger.d.ts +22 -0
  12. package/dist/types/helpers/AIPerformanceAnalyzer.helper.d.ts +45 -0
  13. package/dist/types/helpers/AIPerformanceCollector.helper.d.ts +111 -0
  14. package/dist/types/subsystems/IO/Storage.service/connectors/AzureBlobStorage.class.d.ts +211 -0
  15. package/dist/types/subsystems/IO/VectorDB.service/connectors/WeaviateVectorDB.class.d.ts +187 -0
  16. package/dist/types/subsystems/PerformanceManager/Performance.service/PerformanceConnector.d.ts +102 -0
  17. package/dist/types/subsystems/PerformanceManager/Performance.service/connectors/LocalPerformanceConnector.class.d.ts +100 -0
  18. package/dist/types/subsystems/PerformanceManager/Performance.service/index.d.ts +22 -0
  19. package/dist/types/subsystems/Security/Credentials/Credentials.class.d.ts +2 -0
  20. package/dist/types/subsystems/Security/Credentials/ManagedOAuth2Credentials.class.d.ts +18 -0
  21. package/dist/types/subsystems/Security/Credentials/OAuth2Credentials.class.d.ts +14 -0
  22. package/dist/types/types/Performance.types.d.ts +468 -0
  23. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  24. package/package.json +1 -1
  25. package/src/Components/APICall/APICall.class.ts +161 -161
  26. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  27. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  28. package/src/Components/APICall/OAuth.helper.ts +447 -447
  29. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  30. package/src/Components/APICall/parseData.ts +167 -167
  31. package/src/Components/APICall/parseHeaders.ts +41 -41
  32. package/src/Components/APICall/parseProxy.ts +68 -68
  33. package/src/Components/APICall/parseUrl.ts +91 -91
  34. package/src/Components/APIEndpoint.class.ts +234 -234
  35. package/src/Components/APIOutput.class.ts +58 -58
  36. package/src/Components/AgentPlugin.class.ts +102 -102
  37. package/src/Components/Async.class.ts +155 -155
  38. package/src/Components/Await.class.ts +90 -90
  39. package/src/Components/Classifier.class.ts +158 -158
  40. package/src/Components/Component.class.ts +147 -147
  41. package/src/Components/ComponentHost.class.ts +38 -38
  42. package/src/Components/DataSourceCleaner.class.ts +92 -92
  43. package/src/Components/DataSourceIndexer.class.ts +181 -181
  44. package/src/Components/DataSourceLookup.class.ts +161 -161
  45. package/src/Components/ECMASandbox.class.ts +72 -72
  46. package/src/Components/FEncDec.class.ts +29 -29
  47. package/src/Components/FHash.class.ts +33 -33
  48. package/src/Components/FSign.class.ts +80 -80
  49. package/src/Components/FSleep.class.ts +25 -25
  50. package/src/Components/FTimestamp.class.ts +66 -66
  51. package/src/Components/FileStore.class.ts +78 -78
  52. package/src/Components/ForEach.class.ts +97 -97
  53. package/src/Components/GPTPlugin.class.ts +70 -70
  54. package/src/Components/GenAILLM.class.ts +586 -586
  55. package/src/Components/HuggingFace.class.ts +313 -313
  56. package/src/Components/Image/imageSettings.config.ts +70 -70
  57. package/src/Components/ImageGenerator.class.ts +483 -483
  58. package/src/Components/JSONFilter.class.ts +54 -54
  59. package/src/Components/LLMAssistant.class.ts +213 -213
  60. package/src/Components/LogicAND.class.ts +28 -28
  61. package/src/Components/LogicAtLeast.class.ts +85 -85
  62. package/src/Components/LogicAtMost.class.ts +86 -86
  63. package/src/Components/LogicOR.class.ts +29 -29
  64. package/src/Components/LogicXOR.class.ts +34 -34
  65. package/src/Components/MCPClient.class.ts +137 -137
  66. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  67. package/src/Components/MemoryReadKeyVal.class.ts +67 -67
  68. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  69. package/src/Components/MemoryWriteObject.class.ts +97 -97
  70. package/src/Components/MultimodalLLM.class.ts +128 -128
  71. package/src/Components/OpenAPI.class.ts +72 -72
  72. package/src/Components/PromptGenerator.class.ts +122 -122
  73. package/src/Components/ScrapflyWebScrape.class.ts +183 -183
  74. package/src/Components/ServerlessCode.class.ts +123 -123
  75. package/src/Components/TavilyWebSearch.class.ts +103 -103
  76. package/src/Components/VisionLLM.class.ts +104 -104
  77. package/src/Components/ZapierAction.class.ts +127 -127
  78. package/src/Components/index.ts +97 -97
  79. package/src/Core/AgentProcess.helper.ts +240 -240
  80. package/src/Core/Connector.class.ts +123 -123
  81. package/src/Core/ConnectorsService.ts +197 -197
  82. package/src/Core/DummyConnector.ts +49 -49
  83. package/src/Core/HookService.ts +105 -105
  84. package/src/Core/SmythRuntime.class.ts +241 -241
  85. package/src/Core/SystemEvents.ts +16 -16
  86. package/src/Core/boot.ts +56 -56
  87. package/src/config.ts +15 -15
  88. package/src/constants.ts +126 -126
  89. package/src/data/hugging-face.params.json +579 -579
  90. package/src/helpers/AWSLambdaCode.helper.ts +624 -624
  91. package/src/helpers/BinaryInput.helper.ts +331 -331
  92. package/src/helpers/Conversation.helper.ts +1157 -1157
  93. package/src/helpers/ECMASandbox.helper.ts +64 -64
  94. package/src/helpers/JsonContent.helper.ts +97 -97
  95. package/src/helpers/LocalCache.helper.ts +97 -97
  96. package/src/helpers/Log.helper.ts +274 -274
  97. package/src/helpers/OpenApiParser.helper.ts +150 -150
  98. package/src/helpers/S3Cache.helper.ts +147 -147
  99. package/src/helpers/SmythURI.helper.ts +5 -5
  100. package/src/helpers/Sysconfig.helper.ts +95 -95
  101. package/src/helpers/TemplateString.helper.ts +243 -243
  102. package/src/helpers/TypeChecker.helper.ts +329 -329
  103. package/src/index.ts +3 -3
  104. package/src/index.ts.bak +3 -3
  105. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  106. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  107. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  108. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  109. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +145 -145
  110. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  111. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  112. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -301
  113. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  114. package/src/subsystems/AgentManager/AgentRuntime.class.ts +557 -557
  115. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  116. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  117. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  118. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  119. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  120. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  121. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  122. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  123. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  124. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +171 -171
  125. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  126. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  127. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  128. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  129. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  130. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  131. package/src/subsystems/IO/Log.service/index.ts +13 -13
  132. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  133. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  134. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  135. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  136. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  137. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  138. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  139. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  140. package/src/subsystems/IO/Router.service/index.ts +11 -11
  141. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +488 -488
  142. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  143. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  144. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  145. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  146. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  147. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +465 -465
  148. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +387 -387
  149. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +408 -408
  150. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  151. package/src/subsystems/IO/VectorDB.service/embed/GoogleEmbedding.ts +118 -118
  152. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  153. package/src/subsystems/IO/VectorDB.service/embed/index.ts +26 -26
  154. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  155. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  156. package/src/subsystems/LLMManager/LLM.inference.ts +345 -345
  157. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +492 -492
  158. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  159. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +666 -666
  160. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +407 -407
  161. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +92 -92
  162. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +983 -983
  163. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +319 -319
  164. package/src/subsystems/LLMManager/LLM.service/connectors/Ollama.class.ts +361 -361
  165. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +257 -257
  166. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +430 -430
  167. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +503 -503
  168. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  169. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  170. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  171. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  172. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  173. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  174. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  175. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  176. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +478 -478
  177. package/src/subsystems/LLMManager/LLM.service/index.ts +47 -47
  178. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +303 -303
  179. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +280 -271
  180. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  181. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  182. package/src/subsystems/LLMManager/models.ts +2540 -2540
  183. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  184. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  185. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  186. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +214 -214
  187. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  188. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  189. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  190. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  191. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  192. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  193. package/src/subsystems/MemoryManager/RuntimeContext.ts +277 -277
  194. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  195. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  196. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  197. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  198. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  199. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +170 -170
  200. package/src/subsystems/Security/Account.service/connectors/MySQLAccount.class.ts +76 -76
  201. package/src/subsystems/Security/Account.service/index.ts +14 -14
  202. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  203. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  204. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  205. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  206. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  207. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  208. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  209. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  210. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  211. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  212. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  213. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  214. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  215. package/src/types/ACL.types.ts +104 -104
  216. package/src/types/AWS.types.ts +10 -10
  217. package/src/types/Agent.types.ts +61 -61
  218. package/src/types/AgentLogger.types.ts +17 -17
  219. package/src/types/Cache.types.ts +1 -1
  220. package/src/types/Common.types.ts +2 -2
  221. package/src/types/LLM.types.ts +520 -520
  222. package/src/types/Redis.types.ts +8 -8
  223. package/src/types/SRE.types.ts +64 -64
  224. package/src/types/Security.types.ts +14 -14
  225. package/src/types/Storage.types.ts +5 -5
  226. package/src/types/VectorDB.types.ts +86 -86
  227. package/src/utils/base64.utils.ts +275 -275
  228. package/src/utils/cli.utils.ts +68 -68
  229. package/src/utils/data.utils.ts +322 -322
  230. package/src/utils/date-time.utils.ts +22 -22
  231. package/src/utils/general.utils.ts +238 -238
  232. package/src/utils/index.ts +12 -12
  233. package/src/utils/lazy-client.ts +261 -261
  234. package/src/utils/numbers.utils.ts +13 -13
  235. package/src/utils/oauth.utils.ts +35 -35
  236. package/src/utils/string.utils.ts +414 -414
  237. package/src/utils/url.utils.ts +19 -19
  238. package/src/utils/validation.utils.ts +74 -74
  239. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,345 +1,345 @@
1
- import _ from 'lodash';
2
- import { type OpenAI } from 'openai';
3
- import { encodeChat } from 'gpt-tokenizer';
4
- import { ChatMessage } from 'gpt-tokenizer/esm/GptEncoding';
5
- import { ConnectorService } from '@sre/Core/ConnectorsService';
6
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
7
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
8
- import { LLMConnector } from './LLM.service/LLMConnector';
9
- import { EventEmitter } from 'events';
10
- import { GenerateImageConfig, TLLMMessageRole, TLLMModel, TLLMChatResponse } from '@sre/types/LLM.types';
11
- import { IModelsProviderRequest, ModelsProviderConnector } from './ModelsProvider.service/ModelsProviderConnector';
12
- import { Logger } from '@sre/helpers/Log.helper';
13
- import { IAgent } from '@sre/types/Agent.types';
14
- import { isAgent } from '@sre/AgentManager/Agent.helper';
15
- import { TLLMParams } from '@sre/types/LLM.types';
16
-
17
- const console = Logger('LLMInference');
18
-
19
- type TPromptParams = { query?: string; contextWindow?: any[]; files?: any[]; params: TLLMParams };
20
-
21
- export class LLMInference {
22
- private model: string | TLLMModel;
23
- private llmConnector: LLMConnector;
24
- private modelProviderReq: IModelsProviderRequest;
25
- public teamId?: string;
26
-
27
- public static async getInstance(model: string | TLLMModel, candidate: AccessCandidate) {
28
- const modelsProvider: ModelsProviderConnector = ConnectorService.getModelsProviderConnector();
29
- if (!modelsProvider.valid) {
30
- throw new Error(`Model provider Not available, cannot create LLM instance`);
31
- }
32
- const accountConnector = ConnectorService.getAccountConnector();
33
- const teamId = await accountConnector.requester(candidate).getTeam();
34
-
35
- const llmInference = new LLMInference();
36
- llmInference.teamId = teamId;
37
-
38
- llmInference.modelProviderReq = modelsProvider.requester(candidate);
39
-
40
- const llmProvider = await llmInference.modelProviderReq.getProvider(model);
41
- if (llmProvider) {
42
- llmInference.llmConnector = ConnectorService.getLLMConnector(llmProvider);
43
- }
44
-
45
- if (!llmInference.llmConnector) {
46
- console.error(`Model ${model} unavailable for team ${teamId}`);
47
- }
48
-
49
- llmInference.model = model;
50
-
51
- return llmInference;
52
- }
53
-
54
- public static user(candidate: AccessCandidate): any {}
55
-
56
- public get connector(): LLMConnector {
57
- return this.llmConnector;
58
- }
59
-
60
- public async prompt({ query, contextWindow, files, params }: TPromptParams) {
61
- let messages = contextWindow || [];
62
-
63
- if (query) {
64
- const content = this.llmConnector.enhancePrompt(query, params);
65
- messages.push({ role: TLLMMessageRole.User, content });
66
- }
67
-
68
- if (!params.model) params.model = this.model;
69
- params.messages = messages;
70
- params.files = files;
71
-
72
- try {
73
- let response: TLLMChatResponse = await this.llmConnector.requester(AccessCandidate.agent(params.agentId)).request(params);
74
-
75
- const result = this.llmConnector.postProcess(response?.content);
76
- if (result.error) {
77
- // If the model stopped before completing the response, this is usually due to output token limit reached.
78
- if (response.finishReason !== 'stop') {
79
- throw new Error('The model stopped before completing the response, this is usually due to output token limit reached.');
80
- }
81
-
82
- // If the model stopped due to other reasons, throw the error
83
- throw new Error(result.error);
84
- }
85
- return result;
86
- } catch (error: any) {
87
- console.error('Error in chatRequest: ', error);
88
-
89
- throw error;
90
- }
91
- }
92
-
93
- public async promptStream({ query, contextWindow, files, params }: TPromptParams) {
94
- let messages = contextWindow || [];
95
-
96
- if (query) {
97
- const content = this.llmConnector.enhancePrompt(query, params);
98
- messages.push({ role: TLLMMessageRole.User, content });
99
- }
100
-
101
- if (!params.model) params.model = this.model;
102
- params.messages = messages;
103
- params.files = files;
104
-
105
- try {
106
- return await this.llmConnector.user(AccessCandidate.agent(params.agentId)).streamRequest(params);
107
- } catch (error) {
108
- console.error('Error in streamRequest:', error);
109
-
110
- const dummyEmitter = new EventEmitter();
111
- process.nextTick(() => {
112
- dummyEmitter.emit('error', error);
113
- dummyEmitter.emit('end');
114
- });
115
- return dummyEmitter;
116
- }
117
- }
118
-
119
- public async imageGenRequest({ query, files, params }: TPromptParams) {
120
- params.prompt = query;
121
- return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageGenRequest(params);
122
- }
123
-
124
- public async imageEditRequest({ query, files, params }: TPromptParams) {
125
- params.prompt = query;
126
- params.files = files;
127
- return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageEditRequest(params);
128
- }
129
-
130
- //@deprecated
131
- public async streamRequest(params: any, agent: string | IAgent) {
132
- const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
133
- try {
134
- if (!params.messages || !params.messages?.length) {
135
- throw new Error('Input messages are required.');
136
- }
137
-
138
- const model = params.model || this.model;
139
-
140
- return await this.llmConnector.user(AccessCandidate.agent(agentId)).streamRequest({ ...params, model });
141
- } catch (error) {
142
- console.error('Error in streamRequest:', error);
143
-
144
- const dummyEmitter = new EventEmitter();
145
- process.nextTick(() => {
146
- dummyEmitter.emit('error', error);
147
- dummyEmitter.emit('end');
148
- });
149
- return dummyEmitter;
150
- }
151
- }
152
-
153
- //@deprecated
154
- public async multimodalStreamRequest(params: any, fileSources, agent: string | IAgent) {
155
- const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
156
-
157
- const promises = [];
158
- const _fileSources = [];
159
-
160
- // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
161
- for (let file of fileSources) {
162
- const binaryInput = BinaryInput.from(file);
163
- _fileSources.push(binaryInput);
164
- promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
165
- }
166
-
167
- await Promise.all(promises);
168
-
169
- params.fileSources = _fileSources;
170
-
171
- try {
172
- //FIXME we need to update the connector multimediaStreamRequest in order to ignore prompt param if not provided
173
- const userMessage = Array.isArray(params.messages) ? params.messages.pop() : {};
174
- const prompt = userMessage?.content || '';
175
- const model = params.model || this.model;
176
-
177
- return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
178
- } catch (error: any) {
179
- console.error('Error in multimodalRequest: ', error);
180
-
181
- throw error;
182
- }
183
- }
184
-
185
- //@deprecated
186
- public async multimodalStreamRequestLegacy(prompt, files: string[], config: any = {}, agent: string | IAgent) {
187
- const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
188
-
189
- const promises = [];
190
- const _files = [];
191
-
192
- // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
193
- for (let file of files) {
194
- const binaryInput = BinaryInput.from(file);
195
- _files.push(binaryInput);
196
- promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
197
- }
198
-
199
- await Promise.all(promises);
200
-
201
- const params = config.data;
202
-
203
- params.files = _files;
204
-
205
- try {
206
- prompt = this.llmConnector.enhancePrompt(prompt, config);
207
- const model = params.model || this.model;
208
-
209
- return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
210
- } catch (error: any) {
211
- console.error('Error in multimodalRequest: ', error);
212
-
213
- throw error;
214
- }
215
- }
216
-
217
- //Not needed
218
- // public getConsistentMessages(messages: TLLMMessageBlock[]) {
219
- // if (!messages?.length) {
220
- // throw new Error('Input messages are required.');
221
- // }
222
-
223
- // try {
224
- // return this.llmConnector.getConsistentMessages(messages);
225
- // } catch (error) {
226
- // console.warn('Something went wrong in getConsistentMessages: ', error);
227
-
228
- // return messages; // if something went wrong then we return the original messages
229
- // }
230
- // }
231
-
232
- /**
233
- * Get the context window for the given messages
234
- * @param _messages - The messages to get the context window for (the messages are in smythos generic format)
235
- * @param maxTokens - The maximum number of tokens to use for the context window
236
- * @param maxOutputTokens - The maximum number of tokens to use for the output
237
- * @returns The context window for the given messages
238
- */
239
- public async getContextWindow(systemPrompt: string, _messages: any[], maxTokens: number, maxOutputTokens: number = 1024): Promise<any[]> {
240
- //TODO: handle non key accounts (limit tokens)
241
- // const maxModelContext = this._llmHelper?.modelInfo?.keyOptions?.tokens || this._llmHelper?.modelInfo?.tokens || 256;
242
-
243
- //#region get max model context
244
-
245
- const modelInfo = await this.modelProviderReq.getModelInfo(this.model, true);
246
- let maxModelContext = modelInfo?.tokens;
247
- let maxModelOutputTokens = modelInfo?.completionTokens || modelInfo?.tokens;
248
- // const isStandardLLM = LLMRegistry.isStandardLLM(this.model);
249
-
250
- // if (isStandardLLM) {
251
- // maxModelContext = LLMRegistry.getMaxContextTokens(this.model, true); // we just provide true for hasAPIKey to get the original max context
252
- // } else {
253
- // const team = AccessCandidate.team(this.teamId);
254
- // const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
255
- // maxModelContext = customLLMRegistry.getMaxContextTokens(this.model);
256
- // maxModelOutputTokens = customLLMRegistry.getMaxCompletionTokens(this.model);
257
- // }
258
- //#endregion get max model context
259
-
260
- let maxInputContext = Math.min(maxTokens, maxModelContext);
261
- let maxOutputContext = Math.min(maxOutputTokens, maxModelOutputTokens || 0);
262
-
263
- if (maxInputContext + maxOutputContext > maxModelContext) {
264
- maxInputContext -= maxInputContext + maxOutputContext - maxModelContext;
265
- }
266
-
267
- if (maxInputContext <= 0) {
268
- console.warn('Max input context is 0, returning empty context window, This usually indicates a wrong model configuration');
269
- }
270
-
271
- console.debug(
272
- `Context Window Configuration: Max Input Tokens: ${maxInputContext}, Max Output Tokens: ${maxOutputContext}, Max Model Tokens: ${maxModelContext}`
273
- );
274
- const systemMessage = { role: 'system', content: systemPrompt };
275
-
276
- let smythContextWindow = [];
277
-
278
- //loop through messages from last to first and use encodeChat to calculate token lengths
279
- //we will use fake chatMessages to calculate the token lengths, these are not used by the LLM, but just for token counting
280
- let tokensCount = encodeChat([systemMessage as ChatMessage], 'gpt-4o').length;
281
- for (let i = _messages?.length - 1; i >= 0; i--) {
282
- const curMessage = _messages[i];
283
- if (curMessage.role === 'system') continue;
284
-
285
- tokensCount = 0;
286
- if (curMessage?.content) {
287
- // tokensCount += encodeChat([{ role: 'user', content: curMessage.content } as ChatMessage], 'gpt-4o').length;
288
- tokensCount += countTokens(curMessage.content);
289
- }
290
-
291
- if (curMessage?.messageBlock?.content) {
292
- // tokensCount += encodeChat([{ role: 'user', content: curMessage.messageBlock.content } as ChatMessage], 'gpt-4o').length;
293
- tokensCount += countTokens(curMessage.messageBlock.content);
294
- }
295
- if (curMessage.toolsData) {
296
- for (let tool of curMessage.toolsData) {
297
- // tokensCount += encodeChat([{ role: 'user', content: tool.result } as ChatMessage], 'gpt-4o').length;
298
- tokensCount += countTokens(tool.result);
299
- }
300
- }
301
-
302
- //did the last message exceed the context window ?
303
- if (tokensCount > maxInputContext) {
304
- break;
305
- }
306
-
307
- smythContextWindow.unshift(curMessage);
308
- }
309
- smythContextWindow.unshift(systemMessage);
310
-
311
- let modelContextWindow = [];
312
- //now transform the messages to the model format
313
- for (let message of smythContextWindow) {
314
- if (message.role && message.content) {
315
- modelContextWindow.push({ role: message.role, content: message.content });
316
- }
317
-
318
- if (message.messageBlock && message.toolsData) {
319
- const internal_message = this.connector.transformToolMessageBlocks({
320
- messageBlock: message?.messageBlock,
321
- toolsData: message?.toolsData,
322
- });
323
-
324
- modelContextWindow.push(...internal_message);
325
- }
326
- }
327
-
328
- modelContextWindow = this.connector.getConsistentMessages(modelContextWindow);
329
-
330
- return modelContextWindow;
331
- }
332
- }
333
-
334
- function countTokens(content: any, model: 'gpt-4o' | 'gpt-4o-mini' = 'gpt-4o') {
335
- try {
336
- // Content must be stringified since some providers like Anthropic use object content
337
- const _stringifiedContent = typeof content === 'string' ? content : JSON.stringify(content);
338
-
339
- const tokens = encodeChat([{ role: 'user', content: _stringifiedContent } as ChatMessage], model);
340
- return tokens.length;
341
- } catch (error) {
342
- console.warn('Error in countTokens: ', error);
343
- return 0;
344
- }
345
- }
1
+ import _ from 'lodash';
2
+ import { type OpenAI } from 'openai';
3
+ import { encodeChat } from 'gpt-tokenizer';
4
+ import { ChatMessage } from 'gpt-tokenizer/esm/GptEncoding';
5
+ import { ConnectorService } from '@sre/Core/ConnectorsService';
6
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
7
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
8
+ import { LLMConnector } from './LLM.service/LLMConnector';
9
+ import { EventEmitter } from 'events';
10
+ import { GenerateImageConfig, TLLMMessageRole, TLLMModel, TLLMChatResponse } from '@sre/types/LLM.types';
11
+ import { IModelsProviderRequest, ModelsProviderConnector } from './ModelsProvider.service/ModelsProviderConnector';
12
+ import { Logger } from '@sre/helpers/Log.helper';
13
+ import { IAgent } from '@sre/types/Agent.types';
14
+ import { isAgent } from '@sre/AgentManager/Agent.helper';
15
+ import { TLLMParams } from '@sre/types/LLM.types';
16
+
17
+ const console = Logger('LLMInference');
18
+
19
+ type TPromptParams = { query?: string; contextWindow?: any[]; files?: any[]; params: TLLMParams };
20
+
21
+ export class LLMInference {
22
+ private model: string | TLLMModel;
23
+ private llmConnector: LLMConnector;
24
+ private modelProviderReq: IModelsProviderRequest;
25
+ public teamId?: string;
26
+
27
+ public static async getInstance(model: string | TLLMModel, candidate: AccessCandidate) {
28
+ const modelsProvider: ModelsProviderConnector = ConnectorService.getModelsProviderConnector();
29
+ if (!modelsProvider.valid) {
30
+ throw new Error(`Model provider Not available, cannot create LLM instance`);
31
+ }
32
+ const accountConnector = ConnectorService.getAccountConnector();
33
+ const teamId = await accountConnector.requester(candidate).getTeam();
34
+
35
+ const llmInference = new LLMInference();
36
+ llmInference.teamId = teamId;
37
+
38
+ llmInference.modelProviderReq = modelsProvider.requester(candidate);
39
+
40
+ const llmProvider = await llmInference.modelProviderReq.getProvider(model);
41
+ if (llmProvider) {
42
+ llmInference.llmConnector = ConnectorService.getLLMConnector(llmProvider);
43
+ }
44
+
45
+ if (!llmInference.llmConnector) {
46
+ console.error(`Model ${model} unavailable for team ${teamId}`);
47
+ }
48
+
49
+ llmInference.model = model;
50
+
51
+ return llmInference;
52
+ }
53
+
54
+ public static user(candidate: AccessCandidate): any {}
55
+
56
+ public get connector(): LLMConnector {
57
+ return this.llmConnector;
58
+ }
59
+
60
+ public async prompt({ query, contextWindow, files, params }: TPromptParams) {
61
+ let messages = contextWindow || [];
62
+
63
+ if (query) {
64
+ const content = this.llmConnector.enhancePrompt(query, params);
65
+ messages.push({ role: TLLMMessageRole.User, content });
66
+ }
67
+
68
+ if (!params.model) params.model = this.model;
69
+ params.messages = messages;
70
+ params.files = files;
71
+
72
+ try {
73
+ let response: TLLMChatResponse = await this.llmConnector.requester(AccessCandidate.agent(params.agentId)).request(params);
74
+
75
+ const result = this.llmConnector.postProcess(response?.content);
76
+ if (result.error) {
77
+ // If the model stopped before completing the response, this is usually due to output token limit reached.
78
+ if (response.finishReason !== 'stop') {
79
+ throw new Error('The model stopped before completing the response, this is usually due to output token limit reached.');
80
+ }
81
+
82
+ // If the model stopped due to other reasons, throw the error
83
+ throw new Error(result.error);
84
+ }
85
+ return result;
86
+ } catch (error: any) {
87
+ console.error('Error in chatRequest: ', error);
88
+
89
+ throw error;
90
+ }
91
+ }
92
+
93
+ public async promptStream({ query, contextWindow, files, params }: TPromptParams) {
94
+ let messages = contextWindow || [];
95
+
96
+ if (query) {
97
+ const content = this.llmConnector.enhancePrompt(query, params);
98
+ messages.push({ role: TLLMMessageRole.User, content });
99
+ }
100
+
101
+ if (!params.model) params.model = this.model;
102
+ params.messages = messages;
103
+ params.files = files;
104
+
105
+ try {
106
+ return await this.llmConnector.user(AccessCandidate.agent(params.agentId)).streamRequest(params);
107
+ } catch (error) {
108
+ console.error('Error in streamRequest:', error);
109
+
110
+ const dummyEmitter = new EventEmitter();
111
+ process.nextTick(() => {
112
+ dummyEmitter.emit('error', error);
113
+ dummyEmitter.emit('end');
114
+ });
115
+ return dummyEmitter;
116
+ }
117
+ }
118
+
119
+ public async imageGenRequest({ query, files, params }: TPromptParams) {
120
+ params.prompt = query;
121
+ return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageGenRequest(params);
122
+ }
123
+
124
+ public async imageEditRequest({ query, files, params }: TPromptParams) {
125
+ params.prompt = query;
126
+ params.files = files;
127
+ return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageEditRequest(params);
128
+ }
129
+
130
+ //@deprecated
131
+ public async streamRequest(params: any, agent: string | IAgent) {
132
+ const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
133
+ try {
134
+ if (!params.messages || !params.messages?.length) {
135
+ throw new Error('Input messages are required.');
136
+ }
137
+
138
+ const model = params.model || this.model;
139
+
140
+ return await this.llmConnector.user(AccessCandidate.agent(agentId)).streamRequest({ ...params, model });
141
+ } catch (error) {
142
+ console.error('Error in streamRequest:', error);
143
+
144
+ const dummyEmitter = new EventEmitter();
145
+ process.nextTick(() => {
146
+ dummyEmitter.emit('error', error);
147
+ dummyEmitter.emit('end');
148
+ });
149
+ return dummyEmitter;
150
+ }
151
+ }
152
+
153
+ //@deprecated
154
+ public async multimodalStreamRequest(params: any, fileSources, agent: string | IAgent) {
155
+ const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
156
+
157
+ const promises = [];
158
+ const _fileSources = [];
159
+
160
+ // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
161
+ for (let file of fileSources) {
162
+ const binaryInput = BinaryInput.from(file);
163
+ _fileSources.push(binaryInput);
164
+ promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
165
+ }
166
+
167
+ await Promise.all(promises);
168
+
169
+ params.fileSources = _fileSources;
170
+
171
+ try {
172
+ //FIXME we need to update the connector multimediaStreamRequest in order to ignore prompt param if not provided
173
+ const userMessage = Array.isArray(params.messages) ? params.messages.pop() : {};
174
+ const prompt = userMessage?.content || '';
175
+ const model = params.model || this.model;
176
+
177
+ return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
178
+ } catch (error: any) {
179
+ console.error('Error in multimodalRequest: ', error);
180
+
181
+ throw error;
182
+ }
183
+ }
184
+
185
+ //@deprecated
186
+ public async multimodalStreamRequestLegacy(prompt, files: string[], config: any = {}, agent: string | IAgent) {
187
+ const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
188
+
189
+ const promises = [];
190
+ const _files = [];
191
+
192
+ // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
193
+ for (let file of files) {
194
+ const binaryInput = BinaryInput.from(file);
195
+ _files.push(binaryInput);
196
+ promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
197
+ }
198
+
199
+ await Promise.all(promises);
200
+
201
+ const params = config.data;
202
+
203
+ params.files = _files;
204
+
205
+ try {
206
+ prompt = this.llmConnector.enhancePrompt(prompt, config);
207
+ const model = params.model || this.model;
208
+
209
+ return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
210
+ } catch (error: any) {
211
+ console.error('Error in multimodalRequest: ', error);
212
+
213
+ throw error;
214
+ }
215
+ }
216
+
217
+ //Not needed
218
+ // public getConsistentMessages(messages: TLLMMessageBlock[]) {
219
+ // if (!messages?.length) {
220
+ // throw new Error('Input messages are required.');
221
+ // }
222
+
223
+ // try {
224
+ // return this.llmConnector.getConsistentMessages(messages);
225
+ // } catch (error) {
226
+ // console.warn('Something went wrong in getConsistentMessages: ', error);
227
+
228
+ // return messages; // if something went wrong then we return the original messages
229
+ // }
230
+ // }
231
+
232
+ /**
233
+ * Get the context window for the given messages
234
+ * @param _messages - The messages to get the context window for (the messages are in smythos generic format)
235
+ * @param maxTokens - The maximum number of tokens to use for the context window
236
+ * @param maxOutputTokens - The maximum number of tokens to use for the output
237
+ * @returns The context window for the given messages
238
+ */
239
+ public async getContextWindow(systemPrompt: string, _messages: any[], maxTokens: number, maxOutputTokens: number = 1024): Promise<any[]> {
240
+ //TODO: handle non key accounts (limit tokens)
241
+ // const maxModelContext = this._llmHelper?.modelInfo?.keyOptions?.tokens || this._llmHelper?.modelInfo?.tokens || 256;
242
+
243
+ //#region get max model context
244
+
245
+ const modelInfo = await this.modelProviderReq.getModelInfo(this.model, true);
246
+ let maxModelContext = modelInfo?.tokens;
247
+ let maxModelOutputTokens = modelInfo?.completionTokens || modelInfo?.tokens;
248
+ // const isStandardLLM = LLMRegistry.isStandardLLM(this.model);
249
+
250
+ // if (isStandardLLM) {
251
+ // maxModelContext = LLMRegistry.getMaxContextTokens(this.model, true); // we just provide true for hasAPIKey to get the original max context
252
+ // } else {
253
+ // const team = AccessCandidate.team(this.teamId);
254
+ // const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
255
+ // maxModelContext = customLLMRegistry.getMaxContextTokens(this.model);
256
+ // maxModelOutputTokens = customLLMRegistry.getMaxCompletionTokens(this.model);
257
+ // }
258
+ //#endregion get max model context
259
+
260
+ let maxInputContext = Math.min(maxTokens, maxModelContext);
261
+ let maxOutputContext = Math.min(maxOutputTokens, maxModelOutputTokens || 0);
262
+
263
+ if (maxInputContext + maxOutputContext > maxModelContext) {
264
+ maxInputContext -= maxInputContext + maxOutputContext - maxModelContext;
265
+ }
266
+
267
+ if (maxInputContext <= 0) {
268
+ console.warn('Max input context is 0, returning empty context window, This usually indicates a wrong model configuration');
269
+ }
270
+
271
+ console.debug(
272
+ `Context Window Configuration: Max Input Tokens: ${maxInputContext}, Max Output Tokens: ${maxOutputContext}, Max Model Tokens: ${maxModelContext}`
273
+ );
274
+ const systemMessage = { role: 'system', content: systemPrompt };
275
+
276
+ let smythContextWindow = [];
277
+
278
+ //loop through messages from last to first and use encodeChat to calculate token lengths
279
+ //we will use fake chatMessages to calculate the token lengths, these are not used by the LLM, but just for token counting
280
+ let tokensCount = encodeChat([systemMessage as ChatMessage], 'gpt-4o').length;
281
+ for (let i = _messages?.length - 1; i >= 0; i--) {
282
+ const curMessage = _messages[i];
283
+ if (curMessage.role === 'system') continue;
284
+
285
+ tokensCount = 0;
286
+ if (curMessage?.content) {
287
+ // tokensCount += encodeChat([{ role: 'user', content: curMessage.content } as ChatMessage], 'gpt-4o').length;
288
+ tokensCount += countTokens(curMessage.content);
289
+ }
290
+
291
+ if (curMessage?.messageBlock?.content) {
292
+ // tokensCount += encodeChat([{ role: 'user', content: curMessage.messageBlock.content } as ChatMessage], 'gpt-4o').length;
293
+ tokensCount += countTokens(curMessage.messageBlock.content);
294
+ }
295
+ if (curMessage.toolsData) {
296
+ for (let tool of curMessage.toolsData) {
297
+ // tokensCount += encodeChat([{ role: 'user', content: tool.result } as ChatMessage], 'gpt-4o').length;
298
+ tokensCount += countTokens(tool.result);
299
+ }
300
+ }
301
+
302
+ //did the last message exceed the context window ?
303
+ if (tokensCount > maxInputContext) {
304
+ break;
305
+ }
306
+
307
+ smythContextWindow.unshift(curMessage);
308
+ }
309
+ smythContextWindow.unshift(systemMessage);
310
+
311
+ let modelContextWindow = [];
312
+ //now transform the messages to the model format
313
+ for (let message of smythContextWindow) {
314
+ if (message.role && message.content) {
315
+ modelContextWindow.push({ role: message.role, content: message.content });
316
+ }
317
+
318
+ if (message.messageBlock && message.toolsData) {
319
+ const internal_message = this.connector.transformToolMessageBlocks({
320
+ messageBlock: message?.messageBlock,
321
+ toolsData: message?.toolsData,
322
+ });
323
+
324
+ modelContextWindow.push(...internal_message);
325
+ }
326
+ }
327
+
328
+ modelContextWindow = this.connector.getConsistentMessages(modelContextWindow);
329
+
330
+ return modelContextWindow;
331
+ }
332
+ }
333
+
334
+ function countTokens(content: any, model: 'gpt-4o' | 'gpt-4o-mini' = 'gpt-4o') {
335
+ try {
336
+ // Content must be stringified since some providers like Anthropic use object content
337
+ const _stringifiedContent = typeof content === 'string' ? content : JSON.stringify(content);
338
+
339
+ const tokens = encodeChat([{ role: 'user', content: _stringifiedContent } as ChatMessage], model);
340
+ return tokens.length;
341
+ } catch (error) {
342
+ console.warn('Error in countTokens: ', error);
343
+ return 0;
344
+ }
345
+ }