@smythos/sre 1.5.53 → 1.5.54

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (223) hide show
  1. package/CHANGELOG +98 -98
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +3 -3
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +1 -6
  9. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  10. package/package.json +1 -1
  11. package/src/Components/APICall/APICall.class.ts +157 -157
  12. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  13. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  14. package/src/Components/APICall/OAuth.helper.ts +447 -447
  15. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  16. package/src/Components/APICall/parseData.ts +167 -167
  17. package/src/Components/APICall/parseHeaders.ts +41 -41
  18. package/src/Components/APICall/parseProxy.ts +68 -68
  19. package/src/Components/APICall/parseUrl.ts +91 -91
  20. package/src/Components/APIEndpoint.class.ts +234 -234
  21. package/src/Components/APIOutput.class.ts +58 -58
  22. package/src/Components/AgentPlugin.class.ts +102 -102
  23. package/src/Components/Async.class.ts +155 -155
  24. package/src/Components/Await.class.ts +90 -90
  25. package/src/Components/Classifier.class.ts +158 -158
  26. package/src/Components/Component.class.ts +132 -132
  27. package/src/Components/ComponentHost.class.ts +38 -38
  28. package/src/Components/DataSourceCleaner.class.ts +92 -92
  29. package/src/Components/DataSourceIndexer.class.ts +181 -181
  30. package/src/Components/DataSourceLookup.class.ts +161 -161
  31. package/src/Components/ECMASandbox.class.ts +71 -71
  32. package/src/Components/FEncDec.class.ts +29 -29
  33. package/src/Components/FHash.class.ts +33 -33
  34. package/src/Components/FSign.class.ts +80 -80
  35. package/src/Components/FSleep.class.ts +25 -25
  36. package/src/Components/FTimestamp.class.ts +25 -25
  37. package/src/Components/FileStore.class.ts +78 -78
  38. package/src/Components/ForEach.class.ts +97 -97
  39. package/src/Components/GPTPlugin.class.ts +70 -70
  40. package/src/Components/GenAILLM.class.ts +586 -586
  41. package/src/Components/HuggingFace.class.ts +314 -314
  42. package/src/Components/Image/imageSettings.config.ts +70 -70
  43. package/src/Components/ImageGenerator.class.ts +502 -502
  44. package/src/Components/JSONFilter.class.ts +54 -54
  45. package/src/Components/LLMAssistant.class.ts +213 -213
  46. package/src/Components/LogicAND.class.ts +28 -28
  47. package/src/Components/LogicAtLeast.class.ts +85 -85
  48. package/src/Components/LogicAtMost.class.ts +86 -86
  49. package/src/Components/LogicOR.class.ts +29 -29
  50. package/src/Components/LogicXOR.class.ts +34 -34
  51. package/src/Components/MCPClient.class.ts +138 -138
  52. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  53. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  54. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  55. package/src/Components/MemoryWriteObject.class.ts +97 -97
  56. package/src/Components/MultimodalLLM.class.ts +128 -128
  57. package/src/Components/OpenAPI.class.ts +72 -72
  58. package/src/Components/PromptGenerator.class.ts +122 -122
  59. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  60. package/src/Components/ServerlessCode.class.ts +123 -123
  61. package/src/Components/TavilyWebSearch.class.ts +98 -98
  62. package/src/Components/VisionLLM.class.ts +104 -104
  63. package/src/Components/ZapierAction.class.ts +127 -127
  64. package/src/Components/index.ts +97 -97
  65. package/src/Core/AgentProcess.helper.ts +240 -240
  66. package/src/Core/Connector.class.ts +123 -123
  67. package/src/Core/ConnectorsService.ts +197 -197
  68. package/src/Core/DummyConnector.ts +49 -49
  69. package/src/Core/HookService.ts +105 -105
  70. package/src/Core/SmythRuntime.class.ts +235 -235
  71. package/src/Core/SystemEvents.ts +16 -16
  72. package/src/Core/boot.ts +56 -56
  73. package/src/config.ts +15 -15
  74. package/src/constants.ts +126 -126
  75. package/src/data/hugging-face.params.json +579 -579
  76. package/src/helpers/AWSLambdaCode.helper.ts +590 -590
  77. package/src/helpers/BinaryInput.helper.ts +331 -331
  78. package/src/helpers/Conversation.helper.ts +1119 -1119
  79. package/src/helpers/ECMASandbox.helper.ts +54 -54
  80. package/src/helpers/JsonContent.helper.ts +97 -97
  81. package/src/helpers/LocalCache.helper.ts +97 -97
  82. package/src/helpers/Log.helper.ts +274 -274
  83. package/src/helpers/OpenApiParser.helper.ts +150 -150
  84. package/src/helpers/S3Cache.helper.ts +147 -147
  85. package/src/helpers/SmythURI.helper.ts +5 -5
  86. package/src/helpers/Sysconfig.helper.ts +77 -77
  87. package/src/helpers/TemplateString.helper.ts +243 -243
  88. package/src/helpers/TypeChecker.helper.ts +329 -329
  89. package/src/index.ts +3 -3
  90. package/src/index.ts.bak +3 -3
  91. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  92. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  93. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  94. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  95. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  97. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  98. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -297
  99. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  100. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  101. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  102. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  103. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  104. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  105. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  106. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  107. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  108. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  109. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  110. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  111. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  112. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  113. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  114. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  115. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  116. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  117. package/src/subsystems/IO/Log.service/index.ts +13 -13
  118. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  119. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  120. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  121. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  122. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  123. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  124. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  125. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  126. package/src/subsystems/IO/Router.service/index.ts +11 -11
  127. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  128. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  129. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  130. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  131. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  132. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  133. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  134. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  135. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  136. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  137. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  138. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  139. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  140. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  141. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  142. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  143. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  144. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  145. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  147. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  149. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  150. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  151. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
  152. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  160. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  161. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  162. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  163. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  164. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  165. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  166. package/src/subsystems/LLMManager/models.ts +2540 -2540
  167. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  168. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  169. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  170. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  173. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  174. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  175. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  176. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  177. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  178. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  179. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  180. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  181. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  182. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  183. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  184. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  185. package/src/subsystems/Security/Account.service/index.ts +14 -14
  186. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  187. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  188. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  189. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  190. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  191. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  192. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  193. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  194. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  195. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  196. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  197. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  198. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  199. package/src/types/ACL.types.ts +104 -104
  200. package/src/types/AWS.types.ts +10 -10
  201. package/src/types/Agent.types.ts +61 -61
  202. package/src/types/AgentLogger.types.ts +17 -17
  203. package/src/types/Cache.types.ts +1 -1
  204. package/src/types/Common.types.ts +2 -2
  205. package/src/types/LLM.types.ts +496 -496
  206. package/src/types/Redis.types.ts +8 -8
  207. package/src/types/SRE.types.ts +64 -64
  208. package/src/types/Security.types.ts +14 -14
  209. package/src/types/Storage.types.ts +5 -5
  210. package/src/types/VectorDB.types.ts +86 -86
  211. package/src/utils/base64.utils.ts +275 -275
  212. package/src/utils/cli.utils.ts +68 -68
  213. package/src/utils/data.utils.ts +322 -322
  214. package/src/utils/date-time.utils.ts +22 -22
  215. package/src/utils/general.utils.ts +238 -238
  216. package/src/utils/index.ts +12 -12
  217. package/src/utils/lazy-client.ts +261 -261
  218. package/src/utils/numbers.utils.ts +13 -13
  219. package/src/utils/oauth.utils.ts +35 -35
  220. package/src/utils/string.utils.ts +414 -414
  221. package/src/utils/url.utils.ts +19 -19
  222. package/src/utils/validation.utils.ts +74 -74
  223. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,339 +1,339 @@
1
- import _ from 'lodash';
2
- import { type OpenAI } from 'openai';
3
- import { encodeChat } from 'gpt-tokenizer';
4
- import { ChatMessage } from 'gpt-tokenizer/esm/GptEncoding';
5
- import { ConnectorService } from '@sre/Core/ConnectorsService';
6
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
7
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
8
- import { LLMConnector } from './LLM.service/LLMConnector';
9
- import { EventEmitter } from 'events';
10
- import { GenerateImageConfig, TLLMMessageRole, TLLMModel, TLLMChatResponse } from '@sre/types/LLM.types';
11
- import { IModelsProviderRequest, ModelsProviderConnector } from './ModelsProvider.service/ModelsProviderConnector';
12
- import { Logger } from '@sre/helpers/Log.helper';
13
- import { IAgent } from '@sre/types/Agent.types';
14
- import { isAgent } from '@sre/AgentManager/Agent.helper';
15
- import { TLLMParams } from '@sre/types/LLM.types';
16
-
17
- const console = Logger('LLMInference');
18
-
19
- type TPromptParams = { query?: string; contextWindow?: any[]; files?: any[]; params: TLLMParams };
20
-
21
- export class LLMInference {
22
- private model: string | TLLMModel;
23
- private llmConnector: LLMConnector;
24
- private modelProviderReq: IModelsProviderRequest;
25
- public teamId?: string;
26
-
27
- public static async getInstance(model: string | TLLMModel, candidate: AccessCandidate) {
28
- const modelsProvider: ModelsProviderConnector = ConnectorService.getModelsProviderConnector();
29
- if (!modelsProvider.valid) {
30
- throw new Error(`Model provider Not available, cannot create LLM instance`);
31
- }
32
- const accountConnector = ConnectorService.getAccountConnector();
33
- const teamId = await accountConnector.requester(candidate).getTeam();
34
-
35
- const llmInference = new LLMInference();
36
- llmInference.teamId = teamId;
37
-
38
- llmInference.modelProviderReq = modelsProvider.requester(candidate);
39
-
40
- const llmProvider = await llmInference.modelProviderReq.getProvider(model);
41
- if (llmProvider) {
42
- llmInference.llmConnector = ConnectorService.getLLMConnector(llmProvider);
43
- }
44
-
45
- if (!llmInference.llmConnector) {
46
- console.error(`Model ${model} unavailable for team ${teamId}`);
47
- }
48
-
49
- llmInference.model = model;
50
-
51
- return llmInference;
52
- }
53
-
54
- public static user(candidate: AccessCandidate): any {}
55
-
56
- public get connector(): LLMConnector {
57
- return this.llmConnector;
58
- }
59
-
60
- public async prompt({ query, contextWindow, files, params }: TPromptParams) {
61
- let messages = contextWindow || [];
62
-
63
- if (query) {
64
- const content = this.llmConnector.enhancePrompt(query, params);
65
- messages.push({ role: TLLMMessageRole.User, content });
66
- }
67
-
68
- if (!params.model) params.model = this.model;
69
- params.messages = messages;
70
- params.files = files;
71
-
72
- try {
73
- let response: TLLMChatResponse = await this.llmConnector.requester(AccessCandidate.agent(params.agentId)).request(params);
74
-
75
- const result = this.llmConnector.postProcess(response?.content);
76
- if (result.error) {
77
- // If the model stopped before completing the response, this is usually due to output token limit reached.
78
- if (response.finishReason !== 'stop') {
79
- throw new Error('The model stopped before completing the response, this is usually due to output token limit reached.');
80
- }
81
-
82
- // If the model stopped due to other reasons, throw the error
83
- throw new Error(result.error);
84
- }
85
- return result;
86
- } catch (error: any) {
87
- console.error('Error in chatRequest: ', error);
88
-
89
- throw error;
90
- }
91
- }
92
-
93
- public async promptStream({ query, contextWindow, files, params }: TPromptParams) {
94
- let messages = contextWindow || [];
95
-
96
- if (query) {
97
- const content = this.llmConnector.enhancePrompt(query, params);
98
- messages.push({ role: TLLMMessageRole.User, content });
99
- }
100
-
101
- if (!params.model) params.model = this.model;
102
- params.messages = messages;
103
- params.files = files;
104
-
105
- try {
106
- return await this.llmConnector.user(AccessCandidate.agent(params.agentId)).streamRequest(params);
107
- } catch (error) {
108
- console.error('Error in streamRequest:', error);
109
-
110
- const dummyEmitter = new EventEmitter();
111
- process.nextTick(() => {
112
- dummyEmitter.emit('error', error);
113
- dummyEmitter.emit('end');
114
- });
115
- return dummyEmitter;
116
- }
117
- }
118
-
119
- public async imageGenRequest({ query, files, params }: TPromptParams) {
120
- params.prompt = query;
121
- return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageGenRequest(params);
122
- }
123
-
124
- public async imageEditRequest({ query, files, params }: TPromptParams) {
125
- params.prompt = query;
126
- params.files = files;
127
- return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageEditRequest(params);
128
- }
129
-
130
- public async streamRequest(params: any, agent: string | IAgent) {
131
- const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
132
- try {
133
- if (!params.messages || !params.messages?.length) {
134
- throw new Error('Input messages are required.');
135
- }
136
-
137
- const model = params.model || this.model;
138
-
139
- return await this.llmConnector.user(AccessCandidate.agent(agentId)).streamRequest({ ...params, model });
140
- } catch (error) {
141
- console.error('Error in streamRequest:', error);
142
-
143
- const dummyEmitter = new EventEmitter();
144
- process.nextTick(() => {
145
- dummyEmitter.emit('error', error);
146
- dummyEmitter.emit('end');
147
- });
148
- return dummyEmitter;
149
- }
150
- }
151
-
152
- public async multimodalStreamRequest(params: any, fileSources, agent: string | IAgent) {
153
- const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
154
-
155
- const promises = [];
156
- const _fileSources = [];
157
-
158
- // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
159
- for (let file of fileSources) {
160
- const binaryInput = BinaryInput.from(file);
161
- _fileSources.push(binaryInput);
162
- promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
163
- }
164
-
165
- await Promise.all(promises);
166
-
167
- params.fileSources = _fileSources;
168
-
169
- try {
170
- //FIXME we need to update the connector multimediaStreamRequest in order to ignore prompt param if not provided
171
- const userMessage = Array.isArray(params.messages) ? params.messages.pop() : {};
172
- const prompt = userMessage?.content || '';
173
- const model = params.model || this.model;
174
-
175
- return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
176
- } catch (error: any) {
177
- console.error('Error in multimodalRequest: ', error);
178
-
179
- throw error;
180
- }
181
- }
182
-
183
- public async multimodalStreamRequestLegacy(prompt, files: string[], config: any = {}, agent: string | IAgent) {
184
- const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
185
-
186
- const promises = [];
187
- const _files = [];
188
-
189
- // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
190
- for (let file of files) {
191
- const binaryInput = BinaryInput.from(file);
192
- _files.push(binaryInput);
193
- promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
194
- }
195
-
196
- await Promise.all(promises);
197
-
198
- const params = config.data;
199
-
200
- params.files = _files;
201
-
202
- try {
203
- prompt = this.llmConnector.enhancePrompt(prompt, config);
204
- const model = params.model || this.model;
205
-
206
- return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
207
- } catch (error: any) {
208
- console.error('Error in multimodalRequest: ', error);
209
-
210
- throw error;
211
- }
212
- }
213
-
214
- //Not needed
215
- // public getConsistentMessages(messages: TLLMMessageBlock[]) {
216
- // if (!messages?.length) {
217
- // throw new Error('Input messages are required.');
218
- // }
219
-
220
- // try {
221
- // return this.llmConnector.getConsistentMessages(messages);
222
- // } catch (error) {
223
- // console.warn('Something went wrong in getConsistentMessages: ', error);
224
-
225
- // return messages; // if something went wrong then we return the original messages
226
- // }
227
- // }
228
-
229
- /**
230
- * Get the context window for the given messages
231
- * @param _messages - The messages to get the context window for (the messages are in smythos generic format)
232
- * @param maxTokens - The maximum number of tokens to use for the context window
233
- * @param maxOutputTokens - The maximum number of tokens to use for the output
234
- * @returns The context window for the given messages
235
- */
236
- public async getContextWindow(systemPrompt: string, _messages: any[], maxTokens: number, maxOutputTokens: number = 1024): Promise<any[]> {
237
- //TODO: handle non key accounts (limit tokens)
238
- // const maxModelContext = this._llmHelper?.modelInfo?.keyOptions?.tokens || this._llmHelper?.modelInfo?.tokens || 256;
239
-
240
- //#region get max model context
241
-
242
- const modelInfo = await this.modelProviderReq.getModelInfo(this.model, true);
243
- let maxModelContext = modelInfo?.tokens;
244
- let maxModelOutputTokens = modelInfo?.completionTokens || modelInfo?.tokens;
245
- // const isStandardLLM = LLMRegistry.isStandardLLM(this.model);
246
-
247
- // if (isStandardLLM) {
248
- // maxModelContext = LLMRegistry.getMaxContextTokens(this.model, true); // we just provide true for hasAPIKey to get the original max context
249
- // } else {
250
- // const team = AccessCandidate.team(this.teamId);
251
- // const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
252
- // maxModelContext = customLLMRegistry.getMaxContextTokens(this.model);
253
- // maxModelOutputTokens = customLLMRegistry.getMaxCompletionTokens(this.model);
254
- // }
255
- //#endregion get max model context
256
-
257
- let maxInputContext = Math.min(maxTokens, maxModelContext);
258
- let maxOutputContext = Math.min(maxOutputTokens, maxModelOutputTokens || 0);
259
-
260
- if (maxInputContext + maxOutputContext > maxModelContext) {
261
- maxInputContext -= maxInputContext + maxOutputContext - maxModelContext;
262
- }
263
-
264
- if (maxInputContext <= 0) {
265
- console.warn('Max input context is 0, returning empty context window, This usually indicates a wrong model configuration');
266
- }
267
-
268
- const systemMessage = { role: 'system', content: systemPrompt };
269
-
270
- let smythContextWindow = [];
271
-
272
- //loop through messages from last to first and use encodeChat to calculate token lengths
273
- //we will use fake chatMessages to calculate the token lengths, these are not used by the LLM, but just for token counting
274
- let tokensCount = encodeChat([systemMessage as ChatMessage], 'gpt-4o').length;
275
- for (let i = _messages?.length - 1; i >= 0; i--) {
276
- const curMessage = _messages[i];
277
- if (curMessage.role === 'system') continue;
278
-
279
- tokensCount = 0;
280
- if (curMessage?.content) {
281
- // tokensCount += encodeChat([{ role: 'user', content: curMessage.content } as ChatMessage], 'gpt-4o').length;
282
- tokensCount += countTokens(curMessage.content);
283
- }
284
-
285
- if (curMessage?.messageBlock?.content) {
286
- // tokensCount += encodeChat([{ role: 'user', content: curMessage.messageBlock.content } as ChatMessage], 'gpt-4o').length;
287
- tokensCount += countTokens(curMessage.messageBlock.content);
288
- }
289
- if (curMessage.toolsData) {
290
- for (let tool of curMessage.toolsData) {
291
- // tokensCount += encodeChat([{ role: 'user', content: tool.result } as ChatMessage], 'gpt-4o').length;
292
- tokensCount += countTokens(tool.result);
293
- }
294
- }
295
-
296
- //did the last message exceed the context window ?
297
- if (tokensCount > maxInputContext) {
298
- break;
299
- }
300
-
301
- smythContextWindow.unshift(curMessage);
302
- }
303
- smythContextWindow.unshift(systemMessage);
304
-
305
- let modelContextWindow = [];
306
- //now transform the messages to the model format
307
- for (let message of smythContextWindow) {
308
- if (message.role && message.content) {
309
- modelContextWindow.push({ role: message.role, content: message.content });
310
- }
311
-
312
- if (message.messageBlock && message.toolsData) {
313
- const internal_message = this.connector.transformToolMessageBlocks({
314
- messageBlock: message?.messageBlock,
315
- toolsData: message?.toolsData,
316
- });
317
-
318
- modelContextWindow.push(...internal_message);
319
- }
320
- }
321
-
322
- modelContextWindow = this.connector.getConsistentMessages(modelContextWindow);
323
-
324
- return modelContextWindow;
325
- }
326
- }
327
-
328
- function countTokens(content: any, model: 'gpt-4o' | 'gpt-4o-mini' = 'gpt-4o') {
329
- try {
330
- // Content must be stringified since some providers like Anthropic use object content
331
- const _stringifiedContent = typeof content === 'string' ? content : JSON.stringify(content);
332
-
333
- const tokens = encodeChat([{ role: 'user', content: _stringifiedContent } as ChatMessage], model);
334
- return tokens.length;
335
- } catch (error) {
336
- console.warn('Error in countTokens: ', error);
337
- return 0;
338
- }
339
- }
1
+ import _ from 'lodash';
2
+ import { type OpenAI } from 'openai';
3
+ import { encodeChat } from 'gpt-tokenizer';
4
+ import { ChatMessage } from 'gpt-tokenizer/esm/GptEncoding';
5
+ import { ConnectorService } from '@sre/Core/ConnectorsService';
6
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
7
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
8
+ import { LLMConnector } from './LLM.service/LLMConnector';
9
+ import { EventEmitter } from 'events';
10
+ import { GenerateImageConfig, TLLMMessageRole, TLLMModel, TLLMChatResponse } from '@sre/types/LLM.types';
11
+ import { IModelsProviderRequest, ModelsProviderConnector } from './ModelsProvider.service/ModelsProviderConnector';
12
+ import { Logger } from '@sre/helpers/Log.helper';
13
+ import { IAgent } from '@sre/types/Agent.types';
14
+ import { isAgent } from '@sre/AgentManager/Agent.helper';
15
+ import { TLLMParams } from '@sre/types/LLM.types';
16
+
17
+ const console = Logger('LLMInference');
18
+
19
+ type TPromptParams = { query?: string; contextWindow?: any[]; files?: any[]; params: TLLMParams };
20
+
21
+ export class LLMInference {
22
+ private model: string | TLLMModel;
23
+ private llmConnector: LLMConnector;
24
+ private modelProviderReq: IModelsProviderRequest;
25
+ public teamId?: string;
26
+
27
+ public static async getInstance(model: string | TLLMModel, candidate: AccessCandidate) {
28
+ const modelsProvider: ModelsProviderConnector = ConnectorService.getModelsProviderConnector();
29
+ if (!modelsProvider.valid) {
30
+ throw new Error(`Model provider Not available, cannot create LLM instance`);
31
+ }
32
+ const accountConnector = ConnectorService.getAccountConnector();
33
+ const teamId = await accountConnector.requester(candidate).getTeam();
34
+
35
+ const llmInference = new LLMInference();
36
+ llmInference.teamId = teamId;
37
+
38
+ llmInference.modelProviderReq = modelsProvider.requester(candidate);
39
+
40
+ const llmProvider = await llmInference.modelProviderReq.getProvider(model);
41
+ if (llmProvider) {
42
+ llmInference.llmConnector = ConnectorService.getLLMConnector(llmProvider);
43
+ }
44
+
45
+ if (!llmInference.llmConnector) {
46
+ console.error(`Model ${model} unavailable for team ${teamId}`);
47
+ }
48
+
49
+ llmInference.model = model;
50
+
51
+ return llmInference;
52
+ }
53
+
54
+ public static user(candidate: AccessCandidate): any {}
55
+
56
+ public get connector(): LLMConnector {
57
+ return this.llmConnector;
58
+ }
59
+
60
+ public async prompt({ query, contextWindow, files, params }: TPromptParams) {
61
+ let messages = contextWindow || [];
62
+
63
+ if (query) {
64
+ const content = this.llmConnector.enhancePrompt(query, params);
65
+ messages.push({ role: TLLMMessageRole.User, content });
66
+ }
67
+
68
+ if (!params.model) params.model = this.model;
69
+ params.messages = messages;
70
+ params.files = files;
71
+
72
+ try {
73
+ let response: TLLMChatResponse = await this.llmConnector.requester(AccessCandidate.agent(params.agentId)).request(params);
74
+
75
+ const result = this.llmConnector.postProcess(response?.content);
76
+ if (result.error) {
77
+ // If the model stopped before completing the response, this is usually due to output token limit reached.
78
+ if (response.finishReason !== 'stop') {
79
+ throw new Error('The model stopped before completing the response, this is usually due to output token limit reached.');
80
+ }
81
+
82
+ // If the model stopped due to other reasons, throw the error
83
+ throw new Error(result.error);
84
+ }
85
+ return result;
86
+ } catch (error: any) {
87
+ console.error('Error in chatRequest: ', error);
88
+
89
+ throw error;
90
+ }
91
+ }
92
+
93
+ public async promptStream({ query, contextWindow, files, params }: TPromptParams) {
94
+ let messages = contextWindow || [];
95
+
96
+ if (query) {
97
+ const content = this.llmConnector.enhancePrompt(query, params);
98
+ messages.push({ role: TLLMMessageRole.User, content });
99
+ }
100
+
101
+ if (!params.model) params.model = this.model;
102
+ params.messages = messages;
103
+ params.files = files;
104
+
105
+ try {
106
+ return await this.llmConnector.user(AccessCandidate.agent(params.agentId)).streamRequest(params);
107
+ } catch (error) {
108
+ console.error('Error in streamRequest:', error);
109
+
110
+ const dummyEmitter = new EventEmitter();
111
+ process.nextTick(() => {
112
+ dummyEmitter.emit('error', error);
113
+ dummyEmitter.emit('end');
114
+ });
115
+ return dummyEmitter;
116
+ }
117
+ }
118
+
119
+ public async imageGenRequest({ query, files, params }: TPromptParams) {
120
+ params.prompt = query;
121
+ return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageGenRequest(params);
122
+ }
123
+
124
+ public async imageEditRequest({ query, files, params }: TPromptParams) {
125
+ params.prompt = query;
126
+ params.files = files;
127
+ return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageEditRequest(params);
128
+ }
129
+
130
+ public async streamRequest(params: any, agent: string | IAgent) {
131
+ const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
132
+ try {
133
+ if (!params.messages || !params.messages?.length) {
134
+ throw new Error('Input messages are required.');
135
+ }
136
+
137
+ const model = params.model || this.model;
138
+
139
+ return await this.llmConnector.user(AccessCandidate.agent(agentId)).streamRequest({ ...params, model });
140
+ } catch (error) {
141
+ console.error('Error in streamRequest:', error);
142
+
143
+ const dummyEmitter = new EventEmitter();
144
+ process.nextTick(() => {
145
+ dummyEmitter.emit('error', error);
146
+ dummyEmitter.emit('end');
147
+ });
148
+ return dummyEmitter;
149
+ }
150
+ }
151
+
152
+ public async multimodalStreamRequest(params: any, fileSources, agent: string | IAgent) {
153
+ const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
154
+
155
+ const promises = [];
156
+ const _fileSources = [];
157
+
158
+ // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
159
+ for (let file of fileSources) {
160
+ const binaryInput = BinaryInput.from(file);
161
+ _fileSources.push(binaryInput);
162
+ promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
163
+ }
164
+
165
+ await Promise.all(promises);
166
+
167
+ params.fileSources = _fileSources;
168
+
169
+ try {
170
+ //FIXME we need to update the connector multimediaStreamRequest in order to ignore prompt param if not provided
171
+ const userMessage = Array.isArray(params.messages) ? params.messages.pop() : {};
172
+ const prompt = userMessage?.content || '';
173
+ const model = params.model || this.model;
174
+
175
+ return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
176
+ } catch (error: any) {
177
+ console.error('Error in multimodalRequest: ', error);
178
+
179
+ throw error;
180
+ }
181
+ }
182
+
183
+ public async multimodalStreamRequestLegacy(prompt, files: string[], config: any = {}, agent: string | IAgent) {
184
+ const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
185
+
186
+ const promises = [];
187
+ const _files = [];
188
+
189
+ // TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
190
+ for (let file of files) {
191
+ const binaryInput = BinaryInput.from(file);
192
+ _files.push(binaryInput);
193
+ promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
194
+ }
195
+
196
+ await Promise.all(promises);
197
+
198
+ const params = config.data;
199
+
200
+ params.files = _files;
201
+
202
+ try {
203
+ prompt = this.llmConnector.enhancePrompt(prompt, config);
204
+ const model = params.model || this.model;
205
+
206
+ return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
207
+ } catch (error: any) {
208
+ console.error('Error in multimodalRequest: ', error);
209
+
210
+ throw error;
211
+ }
212
+ }
213
+
214
+ //Not needed
215
+ // public getConsistentMessages(messages: TLLMMessageBlock[]) {
216
+ // if (!messages?.length) {
217
+ // throw new Error('Input messages are required.');
218
+ // }
219
+
220
+ // try {
221
+ // return this.llmConnector.getConsistentMessages(messages);
222
+ // } catch (error) {
223
+ // console.warn('Something went wrong in getConsistentMessages: ', error);
224
+
225
+ // return messages; // if something went wrong then we return the original messages
226
+ // }
227
+ // }
228
+
229
+ /**
230
+ * Get the context window for the given messages
231
+ * @param _messages - The messages to get the context window for (the messages are in smythos generic format)
232
+ * @param maxTokens - The maximum number of tokens to use for the context window
233
+ * @param maxOutputTokens - The maximum number of tokens to use for the output
234
+ * @returns The context window for the given messages
235
+ */
236
+ public async getContextWindow(systemPrompt: string, _messages: any[], maxTokens: number, maxOutputTokens: number = 1024): Promise<any[]> {
237
+ //TODO: handle non key accounts (limit tokens)
238
+ // const maxModelContext = this._llmHelper?.modelInfo?.keyOptions?.tokens || this._llmHelper?.modelInfo?.tokens || 256;
239
+
240
+ //#region get max model context
241
+
242
+ const modelInfo = await this.modelProviderReq.getModelInfo(this.model, true);
243
+ let maxModelContext = modelInfo?.tokens;
244
+ let maxModelOutputTokens = modelInfo?.completionTokens || modelInfo?.tokens;
245
+ // const isStandardLLM = LLMRegistry.isStandardLLM(this.model);
246
+
247
+ // if (isStandardLLM) {
248
+ // maxModelContext = LLMRegistry.getMaxContextTokens(this.model, true); // we just provide true for hasAPIKey to get the original max context
249
+ // } else {
250
+ // const team = AccessCandidate.team(this.teamId);
251
+ // const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
252
+ // maxModelContext = customLLMRegistry.getMaxContextTokens(this.model);
253
+ // maxModelOutputTokens = customLLMRegistry.getMaxCompletionTokens(this.model);
254
+ // }
255
+ //#endregion get max model context
256
+
257
+ let maxInputContext = Math.min(maxTokens, maxModelContext);
258
+ let maxOutputContext = Math.min(maxOutputTokens, maxModelOutputTokens || 0);
259
+
260
+ if (maxInputContext + maxOutputContext > maxModelContext) {
261
+ maxInputContext -= maxInputContext + maxOutputContext - maxModelContext;
262
+ }
263
+
264
+ if (maxInputContext <= 0) {
265
+ console.warn('Max input context is 0, returning empty context window, This usually indicates a wrong model configuration');
266
+ }
267
+
268
+ const systemMessage = { role: 'system', content: systemPrompt };
269
+
270
+ let smythContextWindow = [];
271
+
272
+ //loop through messages from last to first and use encodeChat to calculate token lengths
273
+ //we will use fake chatMessages to calculate the token lengths, these are not used by the LLM, but just for token counting
274
+ let tokensCount = encodeChat([systemMessage as ChatMessage], 'gpt-4o').length;
275
+ for (let i = _messages?.length - 1; i >= 0; i--) {
276
+ const curMessage = _messages[i];
277
+ if (curMessage.role === 'system') continue;
278
+
279
+ tokensCount = 0;
280
+ if (curMessage?.content) {
281
+ // tokensCount += encodeChat([{ role: 'user', content: curMessage.content } as ChatMessage], 'gpt-4o').length;
282
+ tokensCount += countTokens(curMessage.content);
283
+ }
284
+
285
+ if (curMessage?.messageBlock?.content) {
286
+ // tokensCount += encodeChat([{ role: 'user', content: curMessage.messageBlock.content } as ChatMessage], 'gpt-4o').length;
287
+ tokensCount += countTokens(curMessage.messageBlock.content);
288
+ }
289
+ if (curMessage.toolsData) {
290
+ for (let tool of curMessage.toolsData) {
291
+ // tokensCount += encodeChat([{ role: 'user', content: tool.result } as ChatMessage], 'gpt-4o').length;
292
+ tokensCount += countTokens(tool.result);
293
+ }
294
+ }
295
+
296
+ //did the last message exceed the context window ?
297
+ if (tokensCount > maxInputContext) {
298
+ break;
299
+ }
300
+
301
+ smythContextWindow.unshift(curMessage);
302
+ }
303
+ smythContextWindow.unshift(systemMessage);
304
+
305
+ let modelContextWindow = [];
306
+ //now transform the messages to the model format
307
+ for (let message of smythContextWindow) {
308
+ if (message.role && message.content) {
309
+ modelContextWindow.push({ role: message.role, content: message.content });
310
+ }
311
+
312
+ if (message.messageBlock && message.toolsData) {
313
+ const internal_message = this.connector.transformToolMessageBlocks({
314
+ messageBlock: message?.messageBlock,
315
+ toolsData: message?.toolsData,
316
+ });
317
+
318
+ modelContextWindow.push(...internal_message);
319
+ }
320
+ }
321
+
322
+ modelContextWindow = this.connector.getConsistentMessages(modelContextWindow);
323
+
324
+ return modelContextWindow;
325
+ }
326
+ }
327
+
328
+ function countTokens(content: any, model: 'gpt-4o' | 'gpt-4o-mini' = 'gpt-4o') {
329
+ try {
330
+ // Content must be stringified since some providers like Anthropic use object content
331
+ const _stringifiedContent = typeof content === 'string' ? content : JSON.stringify(content);
332
+
333
+ const tokens = encodeChat([{ role: 'user', content: _stringifiedContent } as ChatMessage], model);
334
+ return tokens.length;
335
+ } catch (error) {
336
+ console.warn('Error in countTokens: ', error);
337
+ return 0;
338
+ }
339
+ }