@smythos/sre 1.5.53 → 1.5.54
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG +98 -98
- package/LICENSE +18 -18
- package/README.md +135 -135
- package/dist/bundle-analysis-lazy.html +4949 -0
- package/dist/bundle-analysis.html +4949 -0
- package/dist/index.js +3 -3
- package/dist/index.js.map +1 -1
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +1 -6
- package/dist/types/utils/package-manager.utils.d.ts +26 -0
- package/package.json +1 -1
- package/src/Components/APICall/APICall.class.ts +157 -157
- package/src/Components/APICall/AccessTokenManager.ts +166 -166
- package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
- package/src/Components/APICall/OAuth.helper.ts +447 -447
- package/src/Components/APICall/mimeTypeCategories.ts +46 -46
- package/src/Components/APICall/parseData.ts +167 -167
- package/src/Components/APICall/parseHeaders.ts +41 -41
- package/src/Components/APICall/parseProxy.ts +68 -68
- package/src/Components/APICall/parseUrl.ts +91 -91
- package/src/Components/APIEndpoint.class.ts +234 -234
- package/src/Components/APIOutput.class.ts +58 -58
- package/src/Components/AgentPlugin.class.ts +102 -102
- package/src/Components/Async.class.ts +155 -155
- package/src/Components/Await.class.ts +90 -90
- package/src/Components/Classifier.class.ts +158 -158
- package/src/Components/Component.class.ts +132 -132
- package/src/Components/ComponentHost.class.ts +38 -38
- package/src/Components/DataSourceCleaner.class.ts +92 -92
- package/src/Components/DataSourceIndexer.class.ts +181 -181
- package/src/Components/DataSourceLookup.class.ts +161 -161
- package/src/Components/ECMASandbox.class.ts +71 -71
- package/src/Components/FEncDec.class.ts +29 -29
- package/src/Components/FHash.class.ts +33 -33
- package/src/Components/FSign.class.ts +80 -80
- package/src/Components/FSleep.class.ts +25 -25
- package/src/Components/FTimestamp.class.ts +25 -25
- package/src/Components/FileStore.class.ts +78 -78
- package/src/Components/ForEach.class.ts +97 -97
- package/src/Components/GPTPlugin.class.ts +70 -70
- package/src/Components/GenAILLM.class.ts +586 -586
- package/src/Components/HuggingFace.class.ts +314 -314
- package/src/Components/Image/imageSettings.config.ts +70 -70
- package/src/Components/ImageGenerator.class.ts +502 -502
- package/src/Components/JSONFilter.class.ts +54 -54
- package/src/Components/LLMAssistant.class.ts +213 -213
- package/src/Components/LogicAND.class.ts +28 -28
- package/src/Components/LogicAtLeast.class.ts +85 -85
- package/src/Components/LogicAtMost.class.ts +86 -86
- package/src/Components/LogicOR.class.ts +29 -29
- package/src/Components/LogicXOR.class.ts +34 -34
- package/src/Components/MCPClient.class.ts +138 -138
- package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
- package/src/Components/MemoryReadKeyVal.class.ts +66 -66
- package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
- package/src/Components/MemoryWriteObject.class.ts +97 -97
- package/src/Components/MultimodalLLM.class.ts +128 -128
- package/src/Components/OpenAPI.class.ts +72 -72
- package/src/Components/PromptGenerator.class.ts +122 -122
- package/src/Components/ScrapflyWebScrape.class.ts +159 -159
- package/src/Components/ServerlessCode.class.ts +123 -123
- package/src/Components/TavilyWebSearch.class.ts +98 -98
- package/src/Components/VisionLLM.class.ts +104 -104
- package/src/Components/ZapierAction.class.ts +127 -127
- package/src/Components/index.ts +97 -97
- package/src/Core/AgentProcess.helper.ts +240 -240
- package/src/Core/Connector.class.ts +123 -123
- package/src/Core/ConnectorsService.ts +197 -197
- package/src/Core/DummyConnector.ts +49 -49
- package/src/Core/HookService.ts +105 -105
- package/src/Core/SmythRuntime.class.ts +235 -235
- package/src/Core/SystemEvents.ts +16 -16
- package/src/Core/boot.ts +56 -56
- package/src/config.ts +15 -15
- package/src/constants.ts +126 -126
- package/src/data/hugging-face.params.json +579 -579
- package/src/helpers/AWSLambdaCode.helper.ts +590 -590
- package/src/helpers/BinaryInput.helper.ts +331 -331
- package/src/helpers/Conversation.helper.ts +1119 -1119
- package/src/helpers/ECMASandbox.helper.ts +54 -54
- package/src/helpers/JsonContent.helper.ts +97 -97
- package/src/helpers/LocalCache.helper.ts +97 -97
- package/src/helpers/Log.helper.ts +274 -274
- package/src/helpers/OpenApiParser.helper.ts +150 -150
- package/src/helpers/S3Cache.helper.ts +147 -147
- package/src/helpers/SmythURI.helper.ts +5 -5
- package/src/helpers/Sysconfig.helper.ts +77 -77
- package/src/helpers/TemplateString.helper.ts +243 -243
- package/src/helpers/TypeChecker.helper.ts +329 -329
- package/src/index.ts +3 -3
- package/src/index.ts.bak +3 -3
- package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
- package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
- package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
- package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
- package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
- package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
- package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
- package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -297
- package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
- package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
- package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
- package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
- package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
- package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
- package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
- package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
- package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
- package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
- package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
- package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
- package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
- package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
- package/src/subsystems/IO/CLI.service/index.ts +9 -9
- package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
- package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
- package/src/subsystems/IO/Log.service/index.ts +13 -13
- package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
- package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
- package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
- package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
- package/src/subsystems/IO/NKV.service/index.ts +14 -14
- package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
- package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
- package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
- package/src/subsystems/IO/Router.service/index.ts +11 -11
- package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
- package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
- package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
- package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
- package/src/subsystems/IO/Storage.service/index.ts +13 -13
- package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
- package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
- package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
- package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
- package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
- package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
- package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
- package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
- package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
- package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
- package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
- package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
- package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
- package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
- package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
- package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
- package/src/subsystems/LLMManager/custom-models.ts +854 -854
- package/src/subsystems/LLMManager/models.ts +2540 -2540
- package/src/subsystems/LLMManager/paramMappings.ts +69 -69
- package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
- package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
- package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
- package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
- package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
- package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
- package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
- package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
- package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
- package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
- package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
- package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
- package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
- package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
- package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
- package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
- package/src/subsystems/Security/Account.service/index.ts +14 -14
- package/src/subsystems/Security/Credentials.helper.ts +62 -62
- package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
- package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
- package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
- package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
- package/src/subsystems/Security/SecureConnector.class.ts +110 -110
- package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
- package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
- package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
- package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
- package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
- package/src/subsystems/Security/Vault.service/index.ts +12 -12
- package/src/types/ACL.types.ts +104 -104
- package/src/types/AWS.types.ts +10 -10
- package/src/types/Agent.types.ts +61 -61
- package/src/types/AgentLogger.types.ts +17 -17
- package/src/types/Cache.types.ts +1 -1
- package/src/types/Common.types.ts +2 -2
- package/src/types/LLM.types.ts +496 -496
- package/src/types/Redis.types.ts +8 -8
- package/src/types/SRE.types.ts +64 -64
- package/src/types/Security.types.ts +14 -14
- package/src/types/Storage.types.ts +5 -5
- package/src/types/VectorDB.types.ts +86 -86
- package/src/utils/base64.utils.ts +275 -275
- package/src/utils/cli.utils.ts +68 -68
- package/src/utils/data.utils.ts +322 -322
- package/src/utils/date-time.utils.ts +22 -22
- package/src/utils/general.utils.ts +238 -238
- package/src/utils/index.ts +12 -12
- package/src/utils/lazy-client.ts +261 -261
- package/src/utils/numbers.utils.ts +13 -13
- package/src/utils/oauth.utils.ts +35 -35
- package/src/utils/string.utils.ts +414 -414
- package/src/utils/url.utils.ts +19 -19
- package/src/utils/validation.utils.ts +74 -74
- package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
|
@@ -1,339 +1,339 @@
|
|
|
1
|
-
import _ from 'lodash';
|
|
2
|
-
import { type OpenAI } from 'openai';
|
|
3
|
-
import { encodeChat } from 'gpt-tokenizer';
|
|
4
|
-
import { ChatMessage } from 'gpt-tokenizer/esm/GptEncoding';
|
|
5
|
-
import { ConnectorService } from '@sre/Core/ConnectorsService';
|
|
6
|
-
import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
|
|
7
|
-
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
8
|
-
import { LLMConnector } from './LLM.service/LLMConnector';
|
|
9
|
-
import { EventEmitter } from 'events';
|
|
10
|
-
import { GenerateImageConfig, TLLMMessageRole, TLLMModel, TLLMChatResponse } from '@sre/types/LLM.types';
|
|
11
|
-
import { IModelsProviderRequest, ModelsProviderConnector } from './ModelsProvider.service/ModelsProviderConnector';
|
|
12
|
-
import { Logger } from '@sre/helpers/Log.helper';
|
|
13
|
-
import { IAgent } from '@sre/types/Agent.types';
|
|
14
|
-
import { isAgent } from '@sre/AgentManager/Agent.helper';
|
|
15
|
-
import { TLLMParams } from '@sre/types/LLM.types';
|
|
16
|
-
|
|
17
|
-
const console = Logger('LLMInference');
|
|
18
|
-
|
|
19
|
-
type TPromptParams = { query?: string; contextWindow?: any[]; files?: any[]; params: TLLMParams };
|
|
20
|
-
|
|
21
|
-
export class LLMInference {
|
|
22
|
-
private model: string | TLLMModel;
|
|
23
|
-
private llmConnector: LLMConnector;
|
|
24
|
-
private modelProviderReq: IModelsProviderRequest;
|
|
25
|
-
public teamId?: string;
|
|
26
|
-
|
|
27
|
-
public static async getInstance(model: string | TLLMModel, candidate: AccessCandidate) {
|
|
28
|
-
const modelsProvider: ModelsProviderConnector = ConnectorService.getModelsProviderConnector();
|
|
29
|
-
if (!modelsProvider.valid) {
|
|
30
|
-
throw new Error(`Model provider Not available, cannot create LLM instance`);
|
|
31
|
-
}
|
|
32
|
-
const accountConnector = ConnectorService.getAccountConnector();
|
|
33
|
-
const teamId = await accountConnector.requester(candidate).getTeam();
|
|
34
|
-
|
|
35
|
-
const llmInference = new LLMInference();
|
|
36
|
-
llmInference.teamId = teamId;
|
|
37
|
-
|
|
38
|
-
llmInference.modelProviderReq = modelsProvider.requester(candidate);
|
|
39
|
-
|
|
40
|
-
const llmProvider = await llmInference.modelProviderReq.getProvider(model);
|
|
41
|
-
if (llmProvider) {
|
|
42
|
-
llmInference.llmConnector = ConnectorService.getLLMConnector(llmProvider);
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
if (!llmInference.llmConnector) {
|
|
46
|
-
console.error(`Model ${model} unavailable for team ${teamId}`);
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
llmInference.model = model;
|
|
50
|
-
|
|
51
|
-
return llmInference;
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
public static user(candidate: AccessCandidate): any {}
|
|
55
|
-
|
|
56
|
-
public get connector(): LLMConnector {
|
|
57
|
-
return this.llmConnector;
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
public async prompt({ query, contextWindow, files, params }: TPromptParams) {
|
|
61
|
-
let messages = contextWindow || [];
|
|
62
|
-
|
|
63
|
-
if (query) {
|
|
64
|
-
const content = this.llmConnector.enhancePrompt(query, params);
|
|
65
|
-
messages.push({ role: TLLMMessageRole.User, content });
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
if (!params.model) params.model = this.model;
|
|
69
|
-
params.messages = messages;
|
|
70
|
-
params.files = files;
|
|
71
|
-
|
|
72
|
-
try {
|
|
73
|
-
let response: TLLMChatResponse = await this.llmConnector.requester(AccessCandidate.agent(params.agentId)).request(params);
|
|
74
|
-
|
|
75
|
-
const result = this.llmConnector.postProcess(response?.content);
|
|
76
|
-
if (result.error) {
|
|
77
|
-
// If the model stopped before completing the response, this is usually due to output token limit reached.
|
|
78
|
-
if (response.finishReason !== 'stop') {
|
|
79
|
-
throw new Error('The model stopped before completing the response, this is usually due to output token limit reached.');
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
// If the model stopped due to other reasons, throw the error
|
|
83
|
-
throw new Error(result.error);
|
|
84
|
-
}
|
|
85
|
-
return result;
|
|
86
|
-
} catch (error: any) {
|
|
87
|
-
console.error('Error in chatRequest: ', error);
|
|
88
|
-
|
|
89
|
-
throw error;
|
|
90
|
-
}
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
public async promptStream({ query, contextWindow, files, params }: TPromptParams) {
|
|
94
|
-
let messages = contextWindow || [];
|
|
95
|
-
|
|
96
|
-
if (query) {
|
|
97
|
-
const content = this.llmConnector.enhancePrompt(query, params);
|
|
98
|
-
messages.push({ role: TLLMMessageRole.User, content });
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
if (!params.model) params.model = this.model;
|
|
102
|
-
params.messages = messages;
|
|
103
|
-
params.files = files;
|
|
104
|
-
|
|
105
|
-
try {
|
|
106
|
-
return await this.llmConnector.user(AccessCandidate.agent(params.agentId)).streamRequest(params);
|
|
107
|
-
} catch (error) {
|
|
108
|
-
console.error('Error in streamRequest:', error);
|
|
109
|
-
|
|
110
|
-
const dummyEmitter = new EventEmitter();
|
|
111
|
-
process.nextTick(() => {
|
|
112
|
-
dummyEmitter.emit('error', error);
|
|
113
|
-
dummyEmitter.emit('end');
|
|
114
|
-
});
|
|
115
|
-
return dummyEmitter;
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
public async imageGenRequest({ query, files, params }: TPromptParams) {
|
|
120
|
-
params.prompt = query;
|
|
121
|
-
return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageGenRequest(params);
|
|
122
|
-
}
|
|
123
|
-
|
|
124
|
-
public async imageEditRequest({ query, files, params }: TPromptParams) {
|
|
125
|
-
params.prompt = query;
|
|
126
|
-
params.files = files;
|
|
127
|
-
return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageEditRequest(params);
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
public async streamRequest(params: any, agent: string | IAgent) {
|
|
131
|
-
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
132
|
-
try {
|
|
133
|
-
if (!params.messages || !params.messages?.length) {
|
|
134
|
-
throw new Error('Input messages are required.');
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
const model = params.model || this.model;
|
|
138
|
-
|
|
139
|
-
return await this.llmConnector.user(AccessCandidate.agent(agentId)).streamRequest({ ...params, model });
|
|
140
|
-
} catch (error) {
|
|
141
|
-
console.error('Error in streamRequest:', error);
|
|
142
|
-
|
|
143
|
-
const dummyEmitter = new EventEmitter();
|
|
144
|
-
process.nextTick(() => {
|
|
145
|
-
dummyEmitter.emit('error', error);
|
|
146
|
-
dummyEmitter.emit('end');
|
|
147
|
-
});
|
|
148
|
-
return dummyEmitter;
|
|
149
|
-
}
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
public async multimodalStreamRequest(params: any, fileSources, agent: string | IAgent) {
|
|
153
|
-
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
154
|
-
|
|
155
|
-
const promises = [];
|
|
156
|
-
const _fileSources = [];
|
|
157
|
-
|
|
158
|
-
// TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
|
|
159
|
-
for (let file of fileSources) {
|
|
160
|
-
const binaryInput = BinaryInput.from(file);
|
|
161
|
-
_fileSources.push(binaryInput);
|
|
162
|
-
promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
await Promise.all(promises);
|
|
166
|
-
|
|
167
|
-
params.fileSources = _fileSources;
|
|
168
|
-
|
|
169
|
-
try {
|
|
170
|
-
//FIXME we need to update the connector multimediaStreamRequest in order to ignore prompt param if not provided
|
|
171
|
-
const userMessage = Array.isArray(params.messages) ? params.messages.pop() : {};
|
|
172
|
-
const prompt = userMessage?.content || '';
|
|
173
|
-
const model = params.model || this.model;
|
|
174
|
-
|
|
175
|
-
return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
|
|
176
|
-
} catch (error: any) {
|
|
177
|
-
console.error('Error in multimodalRequest: ', error);
|
|
178
|
-
|
|
179
|
-
throw error;
|
|
180
|
-
}
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
public async multimodalStreamRequestLegacy(prompt, files: string[], config: any = {}, agent: string | IAgent) {
|
|
184
|
-
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
185
|
-
|
|
186
|
-
const promises = [];
|
|
187
|
-
const _files = [];
|
|
188
|
-
|
|
189
|
-
// TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
|
|
190
|
-
for (let file of files) {
|
|
191
|
-
const binaryInput = BinaryInput.from(file);
|
|
192
|
-
_files.push(binaryInput);
|
|
193
|
-
promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
await Promise.all(promises);
|
|
197
|
-
|
|
198
|
-
const params = config.data;
|
|
199
|
-
|
|
200
|
-
params.files = _files;
|
|
201
|
-
|
|
202
|
-
try {
|
|
203
|
-
prompt = this.llmConnector.enhancePrompt(prompt, config);
|
|
204
|
-
const model = params.model || this.model;
|
|
205
|
-
|
|
206
|
-
return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
|
|
207
|
-
} catch (error: any) {
|
|
208
|
-
console.error('Error in multimodalRequest: ', error);
|
|
209
|
-
|
|
210
|
-
throw error;
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
//Not needed
|
|
215
|
-
// public getConsistentMessages(messages: TLLMMessageBlock[]) {
|
|
216
|
-
// if (!messages?.length) {
|
|
217
|
-
// throw new Error('Input messages are required.');
|
|
218
|
-
// }
|
|
219
|
-
|
|
220
|
-
// try {
|
|
221
|
-
// return this.llmConnector.getConsistentMessages(messages);
|
|
222
|
-
// } catch (error) {
|
|
223
|
-
// console.warn('Something went wrong in getConsistentMessages: ', error);
|
|
224
|
-
|
|
225
|
-
// return messages; // if something went wrong then we return the original messages
|
|
226
|
-
// }
|
|
227
|
-
// }
|
|
228
|
-
|
|
229
|
-
/**
|
|
230
|
-
* Get the context window for the given messages
|
|
231
|
-
* @param _messages - The messages to get the context window for (the messages are in smythos generic format)
|
|
232
|
-
* @param maxTokens - The maximum number of tokens to use for the context window
|
|
233
|
-
* @param maxOutputTokens - The maximum number of tokens to use for the output
|
|
234
|
-
* @returns The context window for the given messages
|
|
235
|
-
*/
|
|
236
|
-
public async getContextWindow(systemPrompt: string, _messages: any[], maxTokens: number, maxOutputTokens: number = 1024): Promise<any[]> {
|
|
237
|
-
//TODO: handle non key accounts (limit tokens)
|
|
238
|
-
// const maxModelContext = this._llmHelper?.modelInfo?.keyOptions?.tokens || this._llmHelper?.modelInfo?.tokens || 256;
|
|
239
|
-
|
|
240
|
-
//#region get max model context
|
|
241
|
-
|
|
242
|
-
const modelInfo = await this.modelProviderReq.getModelInfo(this.model, true);
|
|
243
|
-
let maxModelContext = modelInfo?.tokens;
|
|
244
|
-
let maxModelOutputTokens = modelInfo?.completionTokens || modelInfo?.tokens;
|
|
245
|
-
// const isStandardLLM = LLMRegistry.isStandardLLM(this.model);
|
|
246
|
-
|
|
247
|
-
// if (isStandardLLM) {
|
|
248
|
-
// maxModelContext = LLMRegistry.getMaxContextTokens(this.model, true); // we just provide true for hasAPIKey to get the original max context
|
|
249
|
-
// } else {
|
|
250
|
-
// const team = AccessCandidate.team(this.teamId);
|
|
251
|
-
// const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
|
|
252
|
-
// maxModelContext = customLLMRegistry.getMaxContextTokens(this.model);
|
|
253
|
-
// maxModelOutputTokens = customLLMRegistry.getMaxCompletionTokens(this.model);
|
|
254
|
-
// }
|
|
255
|
-
//#endregion get max model context
|
|
256
|
-
|
|
257
|
-
let maxInputContext = Math.min(maxTokens, maxModelContext);
|
|
258
|
-
let maxOutputContext = Math.min(maxOutputTokens, maxModelOutputTokens || 0);
|
|
259
|
-
|
|
260
|
-
if (maxInputContext + maxOutputContext > maxModelContext) {
|
|
261
|
-
maxInputContext -= maxInputContext + maxOutputContext - maxModelContext;
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
if (maxInputContext <= 0) {
|
|
265
|
-
console.warn('Max input context is 0, returning empty context window, This usually indicates a wrong model configuration');
|
|
266
|
-
}
|
|
267
|
-
|
|
268
|
-
const systemMessage = { role: 'system', content: systemPrompt };
|
|
269
|
-
|
|
270
|
-
let smythContextWindow = [];
|
|
271
|
-
|
|
272
|
-
//loop through messages from last to first and use encodeChat to calculate token lengths
|
|
273
|
-
//we will use fake chatMessages to calculate the token lengths, these are not used by the LLM, but just for token counting
|
|
274
|
-
let tokensCount = encodeChat([systemMessage as ChatMessage], 'gpt-4o').length;
|
|
275
|
-
for (let i = _messages?.length - 1; i >= 0; i--) {
|
|
276
|
-
const curMessage = _messages[i];
|
|
277
|
-
if (curMessage.role === 'system') continue;
|
|
278
|
-
|
|
279
|
-
tokensCount = 0;
|
|
280
|
-
if (curMessage?.content) {
|
|
281
|
-
// tokensCount += encodeChat([{ role: 'user', content: curMessage.content } as ChatMessage], 'gpt-4o').length;
|
|
282
|
-
tokensCount += countTokens(curMessage.content);
|
|
283
|
-
}
|
|
284
|
-
|
|
285
|
-
if (curMessage?.messageBlock?.content) {
|
|
286
|
-
// tokensCount += encodeChat([{ role: 'user', content: curMessage.messageBlock.content } as ChatMessage], 'gpt-4o').length;
|
|
287
|
-
tokensCount += countTokens(curMessage.messageBlock.content);
|
|
288
|
-
}
|
|
289
|
-
if (curMessage.toolsData) {
|
|
290
|
-
for (let tool of curMessage.toolsData) {
|
|
291
|
-
// tokensCount += encodeChat([{ role: 'user', content: tool.result } as ChatMessage], 'gpt-4o').length;
|
|
292
|
-
tokensCount += countTokens(tool.result);
|
|
293
|
-
}
|
|
294
|
-
}
|
|
295
|
-
|
|
296
|
-
//did the last message exceed the context window ?
|
|
297
|
-
if (tokensCount > maxInputContext) {
|
|
298
|
-
break;
|
|
299
|
-
}
|
|
300
|
-
|
|
301
|
-
smythContextWindow.unshift(curMessage);
|
|
302
|
-
}
|
|
303
|
-
smythContextWindow.unshift(systemMessage);
|
|
304
|
-
|
|
305
|
-
let modelContextWindow = [];
|
|
306
|
-
//now transform the messages to the model format
|
|
307
|
-
for (let message of smythContextWindow) {
|
|
308
|
-
if (message.role && message.content) {
|
|
309
|
-
modelContextWindow.push({ role: message.role, content: message.content });
|
|
310
|
-
}
|
|
311
|
-
|
|
312
|
-
if (message.messageBlock && message.toolsData) {
|
|
313
|
-
const internal_message = this.connector.transformToolMessageBlocks({
|
|
314
|
-
messageBlock: message?.messageBlock,
|
|
315
|
-
toolsData: message?.toolsData,
|
|
316
|
-
});
|
|
317
|
-
|
|
318
|
-
modelContextWindow.push(...internal_message);
|
|
319
|
-
}
|
|
320
|
-
}
|
|
321
|
-
|
|
322
|
-
modelContextWindow = this.connector.getConsistentMessages(modelContextWindow);
|
|
323
|
-
|
|
324
|
-
return modelContextWindow;
|
|
325
|
-
}
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
function countTokens(content: any, model: 'gpt-4o' | 'gpt-4o-mini' = 'gpt-4o') {
|
|
329
|
-
try {
|
|
330
|
-
// Content must be stringified since some providers like Anthropic use object content
|
|
331
|
-
const _stringifiedContent = typeof content === 'string' ? content : JSON.stringify(content);
|
|
332
|
-
|
|
333
|
-
const tokens = encodeChat([{ role: 'user', content: _stringifiedContent } as ChatMessage], model);
|
|
334
|
-
return tokens.length;
|
|
335
|
-
} catch (error) {
|
|
336
|
-
console.warn('Error in countTokens: ', error);
|
|
337
|
-
return 0;
|
|
338
|
-
}
|
|
339
|
-
}
|
|
1
|
+
import _ from 'lodash';
|
|
2
|
+
import { type OpenAI } from 'openai';
|
|
3
|
+
import { encodeChat } from 'gpt-tokenizer';
|
|
4
|
+
import { ChatMessage } from 'gpt-tokenizer/esm/GptEncoding';
|
|
5
|
+
import { ConnectorService } from '@sre/Core/ConnectorsService';
|
|
6
|
+
import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
|
|
7
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
8
|
+
import { LLMConnector } from './LLM.service/LLMConnector';
|
|
9
|
+
import { EventEmitter } from 'events';
|
|
10
|
+
import { GenerateImageConfig, TLLMMessageRole, TLLMModel, TLLMChatResponse } from '@sre/types/LLM.types';
|
|
11
|
+
import { IModelsProviderRequest, ModelsProviderConnector } from './ModelsProvider.service/ModelsProviderConnector';
|
|
12
|
+
import { Logger } from '@sre/helpers/Log.helper';
|
|
13
|
+
import { IAgent } from '@sre/types/Agent.types';
|
|
14
|
+
import { isAgent } from '@sre/AgentManager/Agent.helper';
|
|
15
|
+
import { TLLMParams } from '@sre/types/LLM.types';
|
|
16
|
+
|
|
17
|
+
const console = Logger('LLMInference');
|
|
18
|
+
|
|
19
|
+
type TPromptParams = { query?: string; contextWindow?: any[]; files?: any[]; params: TLLMParams };
|
|
20
|
+
|
|
21
|
+
export class LLMInference {
|
|
22
|
+
private model: string | TLLMModel;
|
|
23
|
+
private llmConnector: LLMConnector;
|
|
24
|
+
private modelProviderReq: IModelsProviderRequest;
|
|
25
|
+
public teamId?: string;
|
|
26
|
+
|
|
27
|
+
public static async getInstance(model: string | TLLMModel, candidate: AccessCandidate) {
|
|
28
|
+
const modelsProvider: ModelsProviderConnector = ConnectorService.getModelsProviderConnector();
|
|
29
|
+
if (!modelsProvider.valid) {
|
|
30
|
+
throw new Error(`Model provider Not available, cannot create LLM instance`);
|
|
31
|
+
}
|
|
32
|
+
const accountConnector = ConnectorService.getAccountConnector();
|
|
33
|
+
const teamId = await accountConnector.requester(candidate).getTeam();
|
|
34
|
+
|
|
35
|
+
const llmInference = new LLMInference();
|
|
36
|
+
llmInference.teamId = teamId;
|
|
37
|
+
|
|
38
|
+
llmInference.modelProviderReq = modelsProvider.requester(candidate);
|
|
39
|
+
|
|
40
|
+
const llmProvider = await llmInference.modelProviderReq.getProvider(model);
|
|
41
|
+
if (llmProvider) {
|
|
42
|
+
llmInference.llmConnector = ConnectorService.getLLMConnector(llmProvider);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
if (!llmInference.llmConnector) {
|
|
46
|
+
console.error(`Model ${model} unavailable for team ${teamId}`);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
llmInference.model = model;
|
|
50
|
+
|
|
51
|
+
return llmInference;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
public static user(candidate: AccessCandidate): any {}
|
|
55
|
+
|
|
56
|
+
public get connector(): LLMConnector {
|
|
57
|
+
return this.llmConnector;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
public async prompt({ query, contextWindow, files, params }: TPromptParams) {
|
|
61
|
+
let messages = contextWindow || [];
|
|
62
|
+
|
|
63
|
+
if (query) {
|
|
64
|
+
const content = this.llmConnector.enhancePrompt(query, params);
|
|
65
|
+
messages.push({ role: TLLMMessageRole.User, content });
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if (!params.model) params.model = this.model;
|
|
69
|
+
params.messages = messages;
|
|
70
|
+
params.files = files;
|
|
71
|
+
|
|
72
|
+
try {
|
|
73
|
+
let response: TLLMChatResponse = await this.llmConnector.requester(AccessCandidate.agent(params.agentId)).request(params);
|
|
74
|
+
|
|
75
|
+
const result = this.llmConnector.postProcess(response?.content);
|
|
76
|
+
if (result.error) {
|
|
77
|
+
// If the model stopped before completing the response, this is usually due to output token limit reached.
|
|
78
|
+
if (response.finishReason !== 'stop') {
|
|
79
|
+
throw new Error('The model stopped before completing the response, this is usually due to output token limit reached.');
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// If the model stopped due to other reasons, throw the error
|
|
83
|
+
throw new Error(result.error);
|
|
84
|
+
}
|
|
85
|
+
return result;
|
|
86
|
+
} catch (error: any) {
|
|
87
|
+
console.error('Error in chatRequest: ', error);
|
|
88
|
+
|
|
89
|
+
throw error;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
public async promptStream({ query, contextWindow, files, params }: TPromptParams) {
|
|
94
|
+
let messages = contextWindow || [];
|
|
95
|
+
|
|
96
|
+
if (query) {
|
|
97
|
+
const content = this.llmConnector.enhancePrompt(query, params);
|
|
98
|
+
messages.push({ role: TLLMMessageRole.User, content });
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
if (!params.model) params.model = this.model;
|
|
102
|
+
params.messages = messages;
|
|
103
|
+
params.files = files;
|
|
104
|
+
|
|
105
|
+
try {
|
|
106
|
+
return await this.llmConnector.user(AccessCandidate.agent(params.agentId)).streamRequest(params);
|
|
107
|
+
} catch (error) {
|
|
108
|
+
console.error('Error in streamRequest:', error);
|
|
109
|
+
|
|
110
|
+
const dummyEmitter = new EventEmitter();
|
|
111
|
+
process.nextTick(() => {
|
|
112
|
+
dummyEmitter.emit('error', error);
|
|
113
|
+
dummyEmitter.emit('end');
|
|
114
|
+
});
|
|
115
|
+
return dummyEmitter;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
public async imageGenRequest({ query, files, params }: TPromptParams) {
|
|
120
|
+
params.prompt = query;
|
|
121
|
+
return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageGenRequest(params);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
public async imageEditRequest({ query, files, params }: TPromptParams) {
|
|
125
|
+
params.prompt = query;
|
|
126
|
+
params.files = files;
|
|
127
|
+
return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageEditRequest(params);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
public async streamRequest(params: any, agent: string | IAgent) {
|
|
131
|
+
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
132
|
+
try {
|
|
133
|
+
if (!params.messages || !params.messages?.length) {
|
|
134
|
+
throw new Error('Input messages are required.');
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
const model = params.model || this.model;
|
|
138
|
+
|
|
139
|
+
return await this.llmConnector.user(AccessCandidate.agent(agentId)).streamRequest({ ...params, model });
|
|
140
|
+
} catch (error) {
|
|
141
|
+
console.error('Error in streamRequest:', error);
|
|
142
|
+
|
|
143
|
+
const dummyEmitter = new EventEmitter();
|
|
144
|
+
process.nextTick(() => {
|
|
145
|
+
dummyEmitter.emit('error', error);
|
|
146
|
+
dummyEmitter.emit('end');
|
|
147
|
+
});
|
|
148
|
+
return dummyEmitter;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
public async multimodalStreamRequest(params: any, fileSources, agent: string | IAgent) {
|
|
153
|
+
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
154
|
+
|
|
155
|
+
const promises = [];
|
|
156
|
+
const _fileSources = [];
|
|
157
|
+
|
|
158
|
+
// TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
|
|
159
|
+
for (let file of fileSources) {
|
|
160
|
+
const binaryInput = BinaryInput.from(file);
|
|
161
|
+
_fileSources.push(binaryInput);
|
|
162
|
+
promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
await Promise.all(promises);
|
|
166
|
+
|
|
167
|
+
params.fileSources = _fileSources;
|
|
168
|
+
|
|
169
|
+
try {
|
|
170
|
+
//FIXME we need to update the connector multimediaStreamRequest in order to ignore prompt param if not provided
|
|
171
|
+
const userMessage = Array.isArray(params.messages) ? params.messages.pop() : {};
|
|
172
|
+
const prompt = userMessage?.content || '';
|
|
173
|
+
const model = params.model || this.model;
|
|
174
|
+
|
|
175
|
+
return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
|
|
176
|
+
} catch (error: any) {
|
|
177
|
+
console.error('Error in multimodalRequest: ', error);
|
|
178
|
+
|
|
179
|
+
throw error;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
public async multimodalStreamRequestLegacy(prompt, files: string[], config: any = {}, agent: string | IAgent) {
|
|
184
|
+
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
185
|
+
|
|
186
|
+
const promises = [];
|
|
187
|
+
const _files = [];
|
|
188
|
+
|
|
189
|
+
// TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
|
|
190
|
+
for (let file of files) {
|
|
191
|
+
const binaryInput = BinaryInput.from(file);
|
|
192
|
+
_files.push(binaryInput);
|
|
193
|
+
promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
await Promise.all(promises);
|
|
197
|
+
|
|
198
|
+
const params = config.data;
|
|
199
|
+
|
|
200
|
+
params.files = _files;
|
|
201
|
+
|
|
202
|
+
try {
|
|
203
|
+
prompt = this.llmConnector.enhancePrompt(prompt, config);
|
|
204
|
+
const model = params.model || this.model;
|
|
205
|
+
|
|
206
|
+
return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
|
|
207
|
+
} catch (error: any) {
|
|
208
|
+
console.error('Error in multimodalRequest: ', error);
|
|
209
|
+
|
|
210
|
+
throw error;
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
//Not needed
|
|
215
|
+
// public getConsistentMessages(messages: TLLMMessageBlock[]) {
|
|
216
|
+
// if (!messages?.length) {
|
|
217
|
+
// throw new Error('Input messages are required.');
|
|
218
|
+
// }
|
|
219
|
+
|
|
220
|
+
// try {
|
|
221
|
+
// return this.llmConnector.getConsistentMessages(messages);
|
|
222
|
+
// } catch (error) {
|
|
223
|
+
// console.warn('Something went wrong in getConsistentMessages: ', error);
|
|
224
|
+
|
|
225
|
+
// return messages; // if something went wrong then we return the original messages
|
|
226
|
+
// }
|
|
227
|
+
// }
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Get the context window for the given messages
|
|
231
|
+
* @param _messages - The messages to get the context window for (the messages are in smythos generic format)
|
|
232
|
+
* @param maxTokens - The maximum number of tokens to use for the context window
|
|
233
|
+
* @param maxOutputTokens - The maximum number of tokens to use for the output
|
|
234
|
+
* @returns The context window for the given messages
|
|
235
|
+
*/
|
|
236
|
+
public async getContextWindow(systemPrompt: string, _messages: any[], maxTokens: number, maxOutputTokens: number = 1024): Promise<any[]> {
|
|
237
|
+
//TODO: handle non key accounts (limit tokens)
|
|
238
|
+
// const maxModelContext = this._llmHelper?.modelInfo?.keyOptions?.tokens || this._llmHelper?.modelInfo?.tokens || 256;
|
|
239
|
+
|
|
240
|
+
//#region get max model context
|
|
241
|
+
|
|
242
|
+
const modelInfo = await this.modelProviderReq.getModelInfo(this.model, true);
|
|
243
|
+
let maxModelContext = modelInfo?.tokens;
|
|
244
|
+
let maxModelOutputTokens = modelInfo?.completionTokens || modelInfo?.tokens;
|
|
245
|
+
// const isStandardLLM = LLMRegistry.isStandardLLM(this.model);
|
|
246
|
+
|
|
247
|
+
// if (isStandardLLM) {
|
|
248
|
+
// maxModelContext = LLMRegistry.getMaxContextTokens(this.model, true); // we just provide true for hasAPIKey to get the original max context
|
|
249
|
+
// } else {
|
|
250
|
+
// const team = AccessCandidate.team(this.teamId);
|
|
251
|
+
// const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
|
|
252
|
+
// maxModelContext = customLLMRegistry.getMaxContextTokens(this.model);
|
|
253
|
+
// maxModelOutputTokens = customLLMRegistry.getMaxCompletionTokens(this.model);
|
|
254
|
+
// }
|
|
255
|
+
//#endregion get max model context
|
|
256
|
+
|
|
257
|
+
let maxInputContext = Math.min(maxTokens, maxModelContext);
|
|
258
|
+
let maxOutputContext = Math.min(maxOutputTokens, maxModelOutputTokens || 0);
|
|
259
|
+
|
|
260
|
+
if (maxInputContext + maxOutputContext > maxModelContext) {
|
|
261
|
+
maxInputContext -= maxInputContext + maxOutputContext - maxModelContext;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
if (maxInputContext <= 0) {
|
|
265
|
+
console.warn('Max input context is 0, returning empty context window, This usually indicates a wrong model configuration');
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
const systemMessage = { role: 'system', content: systemPrompt };
|
|
269
|
+
|
|
270
|
+
let smythContextWindow = [];
|
|
271
|
+
|
|
272
|
+
//loop through messages from last to first and use encodeChat to calculate token lengths
|
|
273
|
+
//we will use fake chatMessages to calculate the token lengths, these are not used by the LLM, but just for token counting
|
|
274
|
+
let tokensCount = encodeChat([systemMessage as ChatMessage], 'gpt-4o').length;
|
|
275
|
+
for (let i = _messages?.length - 1; i >= 0; i--) {
|
|
276
|
+
const curMessage = _messages[i];
|
|
277
|
+
if (curMessage.role === 'system') continue;
|
|
278
|
+
|
|
279
|
+
tokensCount = 0;
|
|
280
|
+
if (curMessage?.content) {
|
|
281
|
+
// tokensCount += encodeChat([{ role: 'user', content: curMessage.content } as ChatMessage], 'gpt-4o').length;
|
|
282
|
+
tokensCount += countTokens(curMessage.content);
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
if (curMessage?.messageBlock?.content) {
|
|
286
|
+
// tokensCount += encodeChat([{ role: 'user', content: curMessage.messageBlock.content } as ChatMessage], 'gpt-4o').length;
|
|
287
|
+
tokensCount += countTokens(curMessage.messageBlock.content);
|
|
288
|
+
}
|
|
289
|
+
if (curMessage.toolsData) {
|
|
290
|
+
for (let tool of curMessage.toolsData) {
|
|
291
|
+
// tokensCount += encodeChat([{ role: 'user', content: tool.result } as ChatMessage], 'gpt-4o').length;
|
|
292
|
+
tokensCount += countTokens(tool.result);
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
//did the last message exceed the context window ?
|
|
297
|
+
if (tokensCount > maxInputContext) {
|
|
298
|
+
break;
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
smythContextWindow.unshift(curMessage);
|
|
302
|
+
}
|
|
303
|
+
smythContextWindow.unshift(systemMessage);
|
|
304
|
+
|
|
305
|
+
let modelContextWindow = [];
|
|
306
|
+
//now transform the messages to the model format
|
|
307
|
+
for (let message of smythContextWindow) {
|
|
308
|
+
if (message.role && message.content) {
|
|
309
|
+
modelContextWindow.push({ role: message.role, content: message.content });
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
if (message.messageBlock && message.toolsData) {
|
|
313
|
+
const internal_message = this.connector.transformToolMessageBlocks({
|
|
314
|
+
messageBlock: message?.messageBlock,
|
|
315
|
+
toolsData: message?.toolsData,
|
|
316
|
+
});
|
|
317
|
+
|
|
318
|
+
modelContextWindow.push(...internal_message);
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
modelContextWindow = this.connector.getConsistentMessages(modelContextWindow);
|
|
323
|
+
|
|
324
|
+
return modelContextWindow;
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
function countTokens(content: any, model: 'gpt-4o' | 'gpt-4o-mini' = 'gpt-4o') {
|
|
329
|
+
try {
|
|
330
|
+
// Content must be stringified since some providers like Anthropic use object content
|
|
331
|
+
const _stringifiedContent = typeof content === 'string' ? content : JSON.stringify(content);
|
|
332
|
+
|
|
333
|
+
const tokens = encodeChat([{ role: 'user', content: _stringifiedContent } as ChatMessage], model);
|
|
334
|
+
return tokens.length;
|
|
335
|
+
} catch (error) {
|
|
336
|
+
console.warn('Error in countTokens: ', error);
|
|
337
|
+
return 0;
|
|
338
|
+
}
|
|
339
|
+
}
|