@smythos/sre 1.5.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG +62 -0
- package/LICENSE +18 -0
- package/package.json +127 -115
- package/src/Components/APICall/APICall.class.ts +155 -0
- package/src/Components/APICall/AccessTokenManager.ts +130 -0
- package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -0
- package/src/Components/APICall/OAuth.helper.ts +294 -0
- package/src/Components/APICall/mimeTypeCategories.ts +46 -0
- package/src/Components/APICall/parseData.ts +167 -0
- package/src/Components/APICall/parseHeaders.ts +41 -0
- package/src/Components/APICall/parseProxy.ts +68 -0
- package/src/Components/APICall/parseUrl.ts +91 -0
- package/src/Components/APIEndpoint.class.ts +234 -0
- package/src/Components/APIOutput.class.ts +58 -0
- package/src/Components/AgentPlugin.class.ts +102 -0
- package/src/Components/Async.class.ts +155 -0
- package/src/Components/Await.class.ts +90 -0
- package/src/Components/Classifier.class.ts +158 -0
- package/src/Components/Component.class.ts +94 -0
- package/src/Components/ComponentHost.class.ts +38 -0
- package/src/Components/DataSourceCleaner.class.ts +92 -0
- package/src/Components/DataSourceIndexer.class.ts +181 -0
- package/src/Components/DataSourceLookup.class.ts +141 -0
- package/src/Components/FEncDec.class.ts +29 -0
- package/src/Components/FHash.class.ts +33 -0
- package/src/Components/FSign.class.ts +80 -0
- package/src/Components/FSleep.class.ts +25 -0
- package/src/Components/FTimestamp.class.ts +25 -0
- package/src/Components/FileStore.class.ts +75 -0
- package/src/Components/ForEach.class.ts +97 -0
- package/src/Components/GPTPlugin.class.ts +70 -0
- package/src/Components/GenAILLM.class.ts +395 -0
- package/src/Components/HuggingFace.class.ts +314 -0
- package/src/Components/Image/imageSettings.config.ts +70 -0
- package/src/Components/ImageGenerator.class.ts +407 -0
- package/src/Components/JSONFilter.class.ts +54 -0
- package/src/Components/LLMAssistant.class.ts +213 -0
- package/src/Components/LogicAND.class.ts +28 -0
- package/src/Components/LogicAtLeast.class.ts +85 -0
- package/src/Components/LogicAtMost.class.ts +86 -0
- package/src/Components/LogicOR.class.ts +29 -0
- package/src/Components/LogicXOR.class.ts +34 -0
- package/src/Components/MCPClient.class.ts +112 -0
- package/src/Components/PromptGenerator.class.ts +122 -0
- package/src/Components/ScrapflyWebScrape.class.ts +159 -0
- package/src/Components/TavilyWebSearch.class.ts +98 -0
- package/src/Components/index.ts +77 -0
- package/src/Core/AgentProcess.helper.ts +240 -0
- package/src/Core/Connector.class.ts +123 -0
- package/src/Core/ConnectorsService.ts +192 -0
- package/src/Core/DummyConnector.ts +49 -0
- package/src/Core/HookService.ts +105 -0
- package/src/Core/SmythRuntime.class.ts +292 -0
- package/src/Core/SystemEvents.ts +15 -0
- package/src/Core/boot.ts +55 -0
- package/src/config.ts +15 -0
- package/src/constants.ts +125 -0
- package/src/data/hugging-face.params.json +580 -0
- package/src/helpers/BinaryInput.helper.ts +324 -0
- package/src/helpers/Conversation.helper.ts +1094 -0
- package/src/helpers/JsonContent.helper.ts +97 -0
- package/src/helpers/LocalCache.helper.ts +97 -0
- package/src/helpers/Log.helper.ts +234 -0
- package/src/helpers/OpenApiParser.helper.ts +150 -0
- package/src/helpers/S3Cache.helper.ts +129 -0
- package/src/helpers/SmythURI.helper.ts +5 -0
- package/src/helpers/TemplateString.helper.ts +243 -0
- package/src/helpers/TypeChecker.helper.ts +329 -0
- package/src/index.ts +179 -0
- package/src/index.ts.bak +179 -0
- package/src/subsystems/AgentManager/Agent.class.ts +1108 -0
- package/src/subsystems/AgentManager/Agent.helper.ts +3 -0
- package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -0
- package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -0
- package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -0
- package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -0
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -0
- package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -0
- package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -0
- package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -0
- package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +59 -0
- package/src/subsystems/AgentManager/Component.service/index.ts +11 -0
- package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -0
- package/src/subsystems/AgentManager/ForkedAgent.class.ts +153 -0
- package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -0
- package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +99 -0
- package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +63 -0
- package/src/subsystems/ComputeManager/Code.service/index.ts +11 -0
- package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -0
- package/src/subsystems/IO/CLI.service/index.ts +9 -0
- package/src/subsystems/IO/Log.service/LogConnector.ts +32 -0
- package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -0
- package/src/subsystems/IO/Log.service/index.ts +13 -0
- package/src/subsystems/IO/NKV.service/NKVConnector.ts +41 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -0
- package/src/subsystems/IO/NKV.service/index.ts +12 -0
- package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -0
- package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -0
- package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -0
- package/src/subsystems/IO/Router.service/index.ts +11 -0
- package/src/subsystems/IO/Storage.service/SmythFS.class.ts +472 -0
- package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -0
- package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +305 -0
- package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +418 -0
- package/src/subsystems/IO/Storage.service/index.ts +13 -0
- package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -0
- package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +450 -0
- package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +373 -0
- package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +420 -0
- package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +106 -0
- package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -0
- package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -0
- package/src/subsystems/IO/VectorDB.service/index.ts +14 -0
- package/src/subsystems/LLMManager/LLM.helper.ts +221 -0
- package/src/subsystems/LLMManager/LLM.inference.ts +335 -0
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +374 -0
- package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +145 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +632 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +405 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +81 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +689 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +257 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/OpenAI.class.ts +848 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +255 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +193 -0
- package/src/subsystems/LLMManager/LLM.service/index.ts +43 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +281 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.ts +229 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -0
- package/src/subsystems/LLMManager/custom-models.ts +854 -0
- package/src/subsystems/LLMManager/models.ts +2539 -0
- package/src/subsystems/LLMManager/paramMappings.ts +69 -0
- package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -0
- package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -0
- package/src/subsystems/MemoryManager/LLMCache.ts +72 -0
- package/src/subsystems/MemoryManager/LLMContext.ts +125 -0
- package/src/subsystems/MemoryManager/RuntimeContext.ts +249 -0
- package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -0
- package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +76 -0
- package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -0
- package/src/subsystems/Security/Account.service/AccountConnector.ts +41 -0
- package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -0
- package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -0
- package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -0
- package/src/subsystems/Security/Account.service/index.ts +14 -0
- package/src/subsystems/Security/Credentials.helper.ts +62 -0
- package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +34 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +57 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -0
- package/src/subsystems/Security/ManagedVault.service/index.ts +12 -0
- package/src/subsystems/Security/SecureConnector.class.ts +110 -0
- package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -0
- package/src/subsystems/Security/Vault.service/VaultConnector.ts +26 -0
- package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -0
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +166 -0
- package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -0
- package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -0
- package/src/subsystems/Security/Vault.service/index.ts +12 -0
- package/src/types/ACL.types.ts +104 -0
- package/src/types/AWS.types.ts +9 -0
- package/src/types/Agent.types.ts +61 -0
- package/src/types/AgentLogger.types.ts +17 -0
- package/src/types/Cache.types.ts +1 -0
- package/src/types/Common.types.ts +3 -0
- package/src/types/LLM.types.ts +419 -0
- package/src/types/Redis.types.ts +8 -0
- package/src/types/SRE.types.ts +64 -0
- package/src/types/Security.types.ts +18 -0
- package/src/types/Storage.types.ts +5 -0
- package/src/types/VectorDB.types.ts +78 -0
- package/src/utils/base64.utils.ts +275 -0
- package/src/utils/cli.utils.ts +68 -0
- package/src/utils/data.utils.ts +263 -0
- package/src/utils/date-time.utils.ts +22 -0
- package/src/utils/general.utils.ts +238 -0
- package/src/utils/index.ts +12 -0
- package/src/utils/numbers.utils.ts +13 -0
- package/src/utils/oauth.utils.ts +35 -0
- package/src/utils/string.utils.ts +414 -0
- package/src/utils/url.utils.ts +19 -0
- package/src/utils/validation.utils.ts +74 -0
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
import { type TLLMMessageBlock, TLLMMessageRole } from '@sre/types/LLM.types';
|
|
2
|
+
|
|
3
|
+
import axios from 'axios';
|
|
4
|
+
import imageSize from 'image-size';
|
|
5
|
+
import { encode } from 'gpt-tokenizer';
|
|
6
|
+
import { isBase64FileUrl, isUrl } from '@sre/utils';
|
|
7
|
+
|
|
8
|
+
export class LLMHelper {
|
|
9
|
+
/**
|
|
10
|
+
* Checks if the given array of messages contains a system message.
|
|
11
|
+
*
|
|
12
|
+
* @param {any} messages - The array of messages to check.
|
|
13
|
+
* @returns {boolean} True if a system message is found, false otherwise.
|
|
14
|
+
*
|
|
15
|
+
* @example
|
|
16
|
+
* const messages = [
|
|
17
|
+
* { role: 'user', content: 'Hello' },
|
|
18
|
+
* { role: 'system', content: 'You are a helpful assistant' }
|
|
19
|
+
* ];
|
|
20
|
+
* const hasSystem = LLMHelper.hasSystemMessage(messages);
|
|
21
|
+
* console.log(hasSystem); // true
|
|
22
|
+
*/
|
|
23
|
+
public static hasSystemMessage(messages: any): boolean {
|
|
24
|
+
if (!Array.isArray(messages)) return false;
|
|
25
|
+
return messages?.some((message) => message.role === 'system');
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Separates system messages from other messages in an array of LLM message blocks.
|
|
30
|
+
*
|
|
31
|
+
* @param {TLLMMessageBlock[]} messages - The array of message blocks to process.
|
|
32
|
+
* @returns {Object} An object containing the system message (if any) and an array of other messages.
|
|
33
|
+
* @property {TLLMMessageBlock | {}} systemMessage - The first system message found, or an empty object if none.
|
|
34
|
+
* @property {TLLMMessageBlock[]} otherMessages - An array of all non-system messages.
|
|
35
|
+
*
|
|
36
|
+
* @example
|
|
37
|
+
* const messages = [
|
|
38
|
+
* { role: 'system', content: 'You are a helpful assistant' },
|
|
39
|
+
* { role: 'user', content: 'Hello' },
|
|
40
|
+
* { role: 'assistant', content: 'Hi there!' }
|
|
41
|
+
* ];
|
|
42
|
+
* const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
|
|
43
|
+
* console.log(systemMessage); // { role: 'system', content: 'You are a helpful assistant' }
|
|
44
|
+
* console.log(otherMessages); // [{ role: 'user', content: 'Hello' }, { role: 'assistant', content: 'Hi there!' }]
|
|
45
|
+
*/
|
|
46
|
+
public static separateSystemMessages(messages: TLLMMessageBlock[]): {
|
|
47
|
+
systemMessage: TLLMMessageBlock | {};
|
|
48
|
+
otherMessages: TLLMMessageBlock[];
|
|
49
|
+
} {
|
|
50
|
+
const systemMessage = messages.find((message) => message.role === 'system') || {};
|
|
51
|
+
const otherMessages = messages.filter((message) => message.role !== 'system');
|
|
52
|
+
|
|
53
|
+
return { systemMessage, otherMessages };
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Counts the total number of tokens in a vision prompt, including both text and image tokens.
|
|
58
|
+
*
|
|
59
|
+
* @param {any} prompt - The vision prompt object containing text and image items.
|
|
60
|
+
* @returns {Promise<number>} A promise that resolves to the total number of tokens in the prompt.
|
|
61
|
+
*
|
|
62
|
+
* @description
|
|
63
|
+
* This method processes a vision prompt by:
|
|
64
|
+
* 1. Counting tokens in the text portion of the prompt.
|
|
65
|
+
* 2. Calculating tokens for each image in the prompt based on its dimensions.
|
|
66
|
+
* 3. Summing up text and image tokens to get the total token count.
|
|
67
|
+
*
|
|
68
|
+
* @example
|
|
69
|
+
* const prompt = [
|
|
70
|
+
* { type: 'text', text: 'Describe this image:' },
|
|
71
|
+
* { type: 'image_url', image_url: { url: 'https://example.com/image.jpg' } }
|
|
72
|
+
* ];
|
|
73
|
+
* const tokenCount = await countVisionPromptTokens(prompt);
|
|
74
|
+
* console.log(tokenCount); // e.g., 150
|
|
75
|
+
*/
|
|
76
|
+
public static async countVisionPromptTokens(prompt: any): Promise<number> {
|
|
77
|
+
let tokens = 0;
|
|
78
|
+
|
|
79
|
+
const textObj = prompt?.filter((item) => item.type === 'text');
|
|
80
|
+
const textTokens = encode(textObj?.[0]?.text).length;
|
|
81
|
+
|
|
82
|
+
const images = prompt?.filter((item) => item.type === 'image_url');
|
|
83
|
+
let imageTokens = 0;
|
|
84
|
+
|
|
85
|
+
for (const image of images) {
|
|
86
|
+
const imageUrl = image?.image_url?.url;
|
|
87
|
+
const { width, height } = await this.getImageDimensions(imageUrl);
|
|
88
|
+
const tokens = this.countImageTokens(width, height);
|
|
89
|
+
imageTokens += tokens;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
tokens = textTokens + imageTokens;
|
|
93
|
+
return tokens;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Retrieves the dimensions (width and height) of an image from a given URL or base64 encoded string.
|
|
98
|
+
*
|
|
99
|
+
* @param {string} imageUrl - The URL or base64 encoded string of the image.
|
|
100
|
+
* @returns {Promise<{ width: number; height: number }>} A promise that resolves to an object containing the width and height of the image.
|
|
101
|
+
* @throws {Error} If the provided imageUrl is invalid or if there's an error retrieving the image dimensions.
|
|
102
|
+
*
|
|
103
|
+
* @example
|
|
104
|
+
* // Using a URL
|
|
105
|
+
* const dimensions = await getImageDimensions('https://example.com/image.jpg');
|
|
106
|
+
* console.log(dimensions); // { width: 800, height: 600 }
|
|
107
|
+
*
|
|
108
|
+
* @example
|
|
109
|
+
* // Using a base64 encoded string
|
|
110
|
+
* const dimensions = await getImageDimensions('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAACklEQVR4nGMAAQAABQABDQottAAAAABJRU5ErkJggg==');
|
|
111
|
+
* console.log(dimensions); // { width: 1, height: 1 }
|
|
112
|
+
*/
|
|
113
|
+
public static async getImageDimensions(imageUrl: string): Promise<{ width: number; height: number }> {
|
|
114
|
+
try {
|
|
115
|
+
let buffer: Buffer;
|
|
116
|
+
|
|
117
|
+
if (isBase64FileUrl(imageUrl)) {
|
|
118
|
+
const base64Data = imageUrl.replace(/^data:image\/\w+;base64,/, '');
|
|
119
|
+
buffer = Buffer.from(base64Data, 'base64');
|
|
120
|
+
} else if (isUrl(imageUrl)) {
|
|
121
|
+
const response = await axios.get(imageUrl, { responseType: 'arraybuffer' });
|
|
122
|
+
buffer = Buffer.from(response.data);
|
|
123
|
+
} else {
|
|
124
|
+
throw new Error('Please provide a valid image url!');
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
const dimensions = imageSize(buffer);
|
|
128
|
+
|
|
129
|
+
return {
|
|
130
|
+
width: dimensions?.width || 0,
|
|
131
|
+
height: dimensions?.height || 0,
|
|
132
|
+
};
|
|
133
|
+
} catch (error) {
|
|
134
|
+
console.error('Error getting image dimensions', error);
|
|
135
|
+
throw new Error('Please provide a valid image url!');
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Calculates the number of tokens required to process an image based on its dimensions and detail mode.
|
|
141
|
+
*
|
|
142
|
+
* @param {number} width - The width of the image in pixels.
|
|
143
|
+
* @param {number} height - The height of the image in pixels.
|
|
144
|
+
* @param {string} detailMode - The detail mode for processing the image. Defaults to 'auto'.
|
|
145
|
+
* @returns {number} The number of tokens required to process the image.
|
|
146
|
+
*
|
|
147
|
+
* @description
|
|
148
|
+
* This method estimates the token count for image processing based on the image dimensions and detail mode.
|
|
149
|
+
* It uses a tiling approach to calculate the token count, scaling the image if necessary.
|
|
150
|
+
*
|
|
151
|
+
* - If detailMode is 'low', it returns a fixed token count of 85.
|
|
152
|
+
* - For other modes, it calculates based on the image dimensions:
|
|
153
|
+
* - Scales down images larger than 2048 pixels in any dimension.
|
|
154
|
+
* - Adjusts the scaled dimension to fit within a 768x1024 aspect ratio.
|
|
155
|
+
* - Calculates the number of 512x512 tiles needed to cover the image.
|
|
156
|
+
* - Returns the total token count based on the number of tiles.
|
|
157
|
+
*
|
|
158
|
+
* @example
|
|
159
|
+
* const tokenCount = countImageTokens(1024, 768);
|
|
160
|
+
* console.log(tokenCount); // Outputs the calculated token count
|
|
161
|
+
*/
|
|
162
|
+
public static countImageTokens(width: number, height: number, detailMode: string = 'auto'): number {
|
|
163
|
+
if (detailMode === 'low') return 85;
|
|
164
|
+
|
|
165
|
+
const maxDimension = Math.max(width, height);
|
|
166
|
+
const minDimension = Math.min(width, height);
|
|
167
|
+
let scaledMinDimension = minDimension;
|
|
168
|
+
|
|
169
|
+
if (maxDimension > 2048) {
|
|
170
|
+
scaledMinDimension = (2048 / maxDimension) * minDimension;
|
|
171
|
+
}
|
|
172
|
+
scaledMinDimension = Math.floor((768 / 1024) * scaledMinDimension);
|
|
173
|
+
|
|
174
|
+
let tileSize = 512;
|
|
175
|
+
let tiles = Math.ceil(scaledMinDimension / tileSize);
|
|
176
|
+
|
|
177
|
+
if (minDimension !== scaledMinDimension) {
|
|
178
|
+
tiles *= Math.ceil((scaledMinDimension * (maxDimension / minDimension)) / tileSize);
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
return tiles * 170 + 85;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
* Removes duplicate user messages from the beginning and end of the messages array.
|
|
186
|
+
*
|
|
187
|
+
* This method checks if there are two consecutive user messages at the start or end of the array
|
|
188
|
+
*
|
|
189
|
+
* @param {Array<{ role: string; content: string }>} messages - The array of message objects to process.
|
|
190
|
+
*
|
|
191
|
+
* @example
|
|
192
|
+
* const messages = [
|
|
193
|
+
* { role: 'user', content: 'Hello' },
|
|
194
|
+
* { role: 'user', content: 'Hello' },
|
|
195
|
+
* { role: 'assistant', content: 'Hi there!' }
|
|
196
|
+
* ];
|
|
197
|
+
* LLMHelper.removeDuplicateUserMessages(messages);
|
|
198
|
+
* console.log(messages); // [{ role: 'user', content: 'Hello' }, { role: 'assistant', content: 'Hi there!' }]
|
|
199
|
+
*
|
|
200
|
+
* @returns {TLLMMessageBlock[]} The modified array of message objects.
|
|
201
|
+
*/
|
|
202
|
+
public static removeDuplicateUserMessages(messages: TLLMMessageBlock[]): TLLMMessageBlock[] {
|
|
203
|
+
const _messages = JSON.parse(JSON.stringify(messages));
|
|
204
|
+
|
|
205
|
+
// Check for two user messages at the beginning
|
|
206
|
+
if (_messages.length > 1 && _messages[0].role === TLLMMessageRole.User && _messages[1].role === TLLMMessageRole.User) {
|
|
207
|
+
_messages.shift(); // Remove the first user message
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// Check for two user messages at the end
|
|
211
|
+
if (
|
|
212
|
+
_messages.length > 1 &&
|
|
213
|
+
_messages[_messages.length - 1].role === TLLMMessageRole.User &&
|
|
214
|
+
_messages[_messages.length - 2].role === TLLMMessageRole.User
|
|
215
|
+
) {
|
|
216
|
+
_messages.pop(); // Remove the last user message
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
return _messages;
|
|
220
|
+
}
|
|
221
|
+
}
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
import _ from 'lodash';
|
|
2
|
+
import { type OpenAI } from 'openai';
|
|
3
|
+
import { encodeChat } from 'gpt-tokenizer';
|
|
4
|
+
import { ChatMessage } from 'gpt-tokenizer/esm/GptEncoding';
|
|
5
|
+
import { ConnectorService } from '@sre/Core/ConnectorsService';
|
|
6
|
+
import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
|
|
7
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
8
|
+
import { LLMConnector } from './LLM.service/LLMConnector';
|
|
9
|
+
import { EventEmitter } from 'events';
|
|
10
|
+
import { GenerateImageConfig, TLLMMessageRole, TLLMModel, TLLMChatResponse } from '@sre/types/LLM.types';
|
|
11
|
+
import { IModelsProviderRequest, ModelsProviderConnector } from './ModelsProvider.service/ModelsProviderConnector';
|
|
12
|
+
import { Logger } from '@sre/helpers/Log.helper';
|
|
13
|
+
import { IAgent } from '@sre/types/Agent.types';
|
|
14
|
+
import { isAgent } from '@sre/AgentManager/Agent.helper';
|
|
15
|
+
import { TLLMParams } from '@sre/types/LLM.types';
|
|
16
|
+
|
|
17
|
+
const console = Logger('LLMInference');
|
|
18
|
+
|
|
19
|
+
type TPromptParams = { query?: string; contextWindow?: any[]; files?: any[]; params: TLLMParams };
|
|
20
|
+
|
|
21
|
+
export class LLMInference {
|
|
22
|
+
private model: string | TLLMModel;
|
|
23
|
+
private llmConnector: LLMConnector;
|
|
24
|
+
private modelProviderReq: IModelsProviderRequest;
|
|
25
|
+
public teamId?: string;
|
|
26
|
+
|
|
27
|
+
public static async getInstance(model: string | TLLMModel, candidate: AccessCandidate) {
|
|
28
|
+
const modelsProvider: ModelsProviderConnector = ConnectorService.getModelsProviderConnector();
|
|
29
|
+
if (!modelsProvider.valid) {
|
|
30
|
+
throw new Error(`Model provider Not available, cannot create LLM instance`);
|
|
31
|
+
}
|
|
32
|
+
const accountConnector = ConnectorService.getAccountConnector();
|
|
33
|
+
const teamId = await accountConnector.requester(candidate).getTeam();
|
|
34
|
+
|
|
35
|
+
const llmInference = new LLMInference();
|
|
36
|
+
llmInference.teamId = teamId;
|
|
37
|
+
|
|
38
|
+
llmInference.modelProviderReq = modelsProvider.requester(candidate);
|
|
39
|
+
|
|
40
|
+
const llmProvider = await llmInference.modelProviderReq.getProvider(model);
|
|
41
|
+
if (llmProvider) {
|
|
42
|
+
llmInference.llmConnector = ConnectorService.getLLMConnector(llmProvider);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
if (!llmInference.llmConnector) {
|
|
46
|
+
console.error(`Model ${model} unavailable for team ${teamId}`);
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
llmInference.model = model;
|
|
50
|
+
|
|
51
|
+
return llmInference;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
public static user(candidate: AccessCandidate): any {}
|
|
55
|
+
|
|
56
|
+
public get connector(): LLMConnector {
|
|
57
|
+
return this.llmConnector;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
public async prompt({ query, contextWindow, files, params }: TPromptParams) {
|
|
61
|
+
let messages = contextWindow || [];
|
|
62
|
+
|
|
63
|
+
if (query) {
|
|
64
|
+
const content = this.llmConnector.enhancePrompt(query, params);
|
|
65
|
+
messages.push({ role: TLLMMessageRole.User, content });
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
if (!params.model) params.model = this.model;
|
|
69
|
+
params.messages = messages;
|
|
70
|
+
params.files = files;
|
|
71
|
+
|
|
72
|
+
try {
|
|
73
|
+
let response: TLLMChatResponse = await this.llmConnector.requester(AccessCandidate.agent(params.agentId)).request(params);
|
|
74
|
+
|
|
75
|
+
const result = this.llmConnector.postProcess(response?.content);
|
|
76
|
+
if (result.error) {
|
|
77
|
+
// If the model stopped before completing the response, this is usually due to output token limit reached.
|
|
78
|
+
if (response.finishReason !== 'stop') {
|
|
79
|
+
throw new Error('The model stopped before completing the response, this is usually due to output token limit reached.');
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// If the model stopped due to other reasons, throw the error
|
|
83
|
+
throw new Error(result.error);
|
|
84
|
+
}
|
|
85
|
+
return result;
|
|
86
|
+
} catch (error: any) {
|
|
87
|
+
console.error('Error in chatRequest: ', error);
|
|
88
|
+
|
|
89
|
+
throw error;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
public async promptStream({ query, contextWindow, files, params }: TPromptParams) {
|
|
94
|
+
let messages = contextWindow || [];
|
|
95
|
+
|
|
96
|
+
if (query) {
|
|
97
|
+
const content = this.llmConnector.enhancePrompt(query, params);
|
|
98
|
+
messages.push({ role: TLLMMessageRole.User, content });
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
if (!params.model) params.model = this.model;
|
|
102
|
+
params.messages = messages;
|
|
103
|
+
params.files = files;
|
|
104
|
+
|
|
105
|
+
try {
|
|
106
|
+
return await this.llmConnector.user(AccessCandidate.agent(params.agentId)).streamRequest(params);
|
|
107
|
+
} catch (error) {
|
|
108
|
+
console.error('Error in streamRequest:', error);
|
|
109
|
+
|
|
110
|
+
const dummyEmitter = new EventEmitter();
|
|
111
|
+
process.nextTick(() => {
|
|
112
|
+
dummyEmitter.emit('error', error);
|
|
113
|
+
dummyEmitter.emit('end');
|
|
114
|
+
});
|
|
115
|
+
return dummyEmitter;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
public async imageGenRequest({ query, files, params }: TPromptParams) {
|
|
120
|
+
params.prompt = query;
|
|
121
|
+
return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageGenRequest(params);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
public async imageEditRequest({ query, files, params }: TPromptParams) {
|
|
125
|
+
params.prompt = query;
|
|
126
|
+
params.files = files;
|
|
127
|
+
return this.llmConnector.user(AccessCandidate.agent(params.agentId)).imageEditRequest(params);
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
public async streamRequest(params: any, agent: string | IAgent) {
|
|
131
|
+
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
132
|
+
try {
|
|
133
|
+
if (!params.messages || !params.messages?.length) {
|
|
134
|
+
throw new Error('Input messages are required.');
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
const model = params.model || this.model;
|
|
138
|
+
|
|
139
|
+
return await this.llmConnector.user(AccessCandidate.agent(agentId)).streamRequest({ ...params, model });
|
|
140
|
+
} catch (error) {
|
|
141
|
+
console.error('Error in streamRequest:', error);
|
|
142
|
+
|
|
143
|
+
const dummyEmitter = new EventEmitter();
|
|
144
|
+
process.nextTick(() => {
|
|
145
|
+
dummyEmitter.emit('error', error);
|
|
146
|
+
dummyEmitter.emit('end');
|
|
147
|
+
});
|
|
148
|
+
return dummyEmitter;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
public async multimodalStreamRequest(params: any, fileSources, agent: string | IAgent) {
|
|
153
|
+
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
154
|
+
|
|
155
|
+
const promises = [];
|
|
156
|
+
const _fileSources = [];
|
|
157
|
+
|
|
158
|
+
// TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
|
|
159
|
+
for (let file of fileSources) {
|
|
160
|
+
const binaryInput = BinaryInput.from(file);
|
|
161
|
+
_fileSources.push(binaryInput);
|
|
162
|
+
promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
await Promise.all(promises);
|
|
166
|
+
|
|
167
|
+
params.fileSources = _fileSources;
|
|
168
|
+
|
|
169
|
+
try {
|
|
170
|
+
//FIXME we need to update the connector multimediaStreamRequest in order to ignore prompt param if not provided
|
|
171
|
+
const userMessage = Array.isArray(params.messages) ? params.messages.pop() : {};
|
|
172
|
+
const prompt = userMessage?.content || '';
|
|
173
|
+
const model = params.model || this.model;
|
|
174
|
+
|
|
175
|
+
return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
|
|
176
|
+
} catch (error: any) {
|
|
177
|
+
console.error('Error in multimodalRequest: ', error);
|
|
178
|
+
|
|
179
|
+
throw error;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
public async multimodalStreamRequestLegacy(prompt, files: string[], config: any = {}, agent: string | IAgent) {
|
|
184
|
+
const agentId = isAgent(agent) ? (agent as IAgent).id : agent;
|
|
185
|
+
|
|
186
|
+
const promises = [];
|
|
187
|
+
const _files = [];
|
|
188
|
+
|
|
189
|
+
// TODO [Forhad]: For models from Google AI, we currently store files twice — once here and once in the GoogleAIConnector. We need to optimize this process.
|
|
190
|
+
for (let file of files) {
|
|
191
|
+
const binaryInput = BinaryInput.from(file);
|
|
192
|
+
_files.push(binaryInput);
|
|
193
|
+
promises.push(binaryInput.upload(AccessCandidate.agent(agentId)));
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
await Promise.all(promises);
|
|
197
|
+
|
|
198
|
+
const params = config.data;
|
|
199
|
+
|
|
200
|
+
params.files = _files;
|
|
201
|
+
|
|
202
|
+
try {
|
|
203
|
+
prompt = this.llmConnector.enhancePrompt(prompt, config);
|
|
204
|
+
const model = params.model || this.model;
|
|
205
|
+
|
|
206
|
+
return await this.llmConnector.user(AccessCandidate.agent(agentId)).multimodalStreamRequest(prompt, { ...params, model });
|
|
207
|
+
} catch (error: any) {
|
|
208
|
+
console.error('Error in multimodalRequest: ', error);
|
|
209
|
+
|
|
210
|
+
throw error;
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
//Not needed
|
|
215
|
+
// public getConsistentMessages(messages: TLLMMessageBlock[]) {
|
|
216
|
+
// if (!messages?.length) {
|
|
217
|
+
// throw new Error('Input messages are required.');
|
|
218
|
+
// }
|
|
219
|
+
|
|
220
|
+
// try {
|
|
221
|
+
// return this.llmConnector.getConsistentMessages(messages);
|
|
222
|
+
// } catch (error) {
|
|
223
|
+
// console.warn('Something went wrong in getConsistentMessages: ', error);
|
|
224
|
+
|
|
225
|
+
// return messages; // if something went wrong then we return the original messages
|
|
226
|
+
// }
|
|
227
|
+
// }
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Get the context window for the given messages
|
|
231
|
+
* @param _messages - The messages to get the context window for (the messages are in smythos generic format)
|
|
232
|
+
* @param maxTokens - The maximum number of tokens to use for the context window
|
|
233
|
+
* @param maxOutputTokens - The maximum number of tokens to use for the output
|
|
234
|
+
* @returns The context window for the given messages
|
|
235
|
+
*/
|
|
236
|
+
public async getContextWindow(systemPrompt: string, _messages: any[], maxTokens: number, maxOutputTokens: number = 1024): Promise<any[]> {
|
|
237
|
+
//TODO: handle non key accounts (limit tokens)
|
|
238
|
+
// const maxModelContext = this._llmHelper?.modelInfo?.keyOptions?.tokens || this._llmHelper?.modelInfo?.tokens || 256;
|
|
239
|
+
|
|
240
|
+
//#region get max model context
|
|
241
|
+
|
|
242
|
+
const modelInfo = await this.modelProviderReq.getModelInfo(this.model, true);
|
|
243
|
+
let maxModelContext = modelInfo?.tokens;
|
|
244
|
+
let maxModelOutputTokens = modelInfo?.completionTokens || modelInfo?.tokens;
|
|
245
|
+
// const isStandardLLM = LLMRegistry.isStandardLLM(this.model);
|
|
246
|
+
|
|
247
|
+
// if (isStandardLLM) {
|
|
248
|
+
// maxModelContext = LLMRegistry.getMaxContextTokens(this.model, true); // we just provide true for hasAPIKey to get the original max context
|
|
249
|
+
// } else {
|
|
250
|
+
// const team = AccessCandidate.team(this.teamId);
|
|
251
|
+
// const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
|
|
252
|
+
// maxModelContext = customLLMRegistry.getMaxContextTokens(this.model);
|
|
253
|
+
// maxModelOutputTokens = customLLMRegistry.getMaxCompletionTokens(this.model);
|
|
254
|
+
// }
|
|
255
|
+
//#endregion get max model context
|
|
256
|
+
|
|
257
|
+
let maxInputContext = Math.min(maxTokens, maxModelContext);
|
|
258
|
+
let maxOutputContext = Math.min(maxOutputTokens, maxModelOutputTokens || 0);
|
|
259
|
+
|
|
260
|
+
if (maxInputContext + maxOutputContext > maxModelContext) {
|
|
261
|
+
maxInputContext -= maxInputContext + maxOutputContext - maxModelContext;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
const systemMessage = { role: 'system', content: systemPrompt };
|
|
265
|
+
|
|
266
|
+
let smythContextWindow = [];
|
|
267
|
+
|
|
268
|
+
//loop through messages from last to first and use encodeChat to calculate token lengths
|
|
269
|
+
//we will use fake chatMessages to calculate the token lengths, these are not used by the LLM, but just for token counting
|
|
270
|
+
let tokensCount = encodeChat([systemMessage as ChatMessage], 'gpt-4o').length;
|
|
271
|
+
for (let i = _messages?.length - 1; i >= 0; i--) {
|
|
272
|
+
const curMessage = _messages[i];
|
|
273
|
+
if (curMessage.role === 'system') continue;
|
|
274
|
+
|
|
275
|
+
tokensCount = 0;
|
|
276
|
+
if (curMessage?.content) {
|
|
277
|
+
// tokensCount += encodeChat([{ role: 'user', content: curMessage.content } as ChatMessage], 'gpt-4o').length;
|
|
278
|
+
tokensCount += countTokens(curMessage.content);
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
if (curMessage?.messageBlock?.content) {
|
|
282
|
+
// tokensCount += encodeChat([{ role: 'user', content: curMessage.messageBlock.content } as ChatMessage], 'gpt-4o').length;
|
|
283
|
+
tokensCount += countTokens(curMessage.messageBlock.content);
|
|
284
|
+
}
|
|
285
|
+
if (curMessage.toolsData) {
|
|
286
|
+
for (let tool of curMessage.toolsData) {
|
|
287
|
+
// tokensCount += encodeChat([{ role: 'user', content: tool.result } as ChatMessage], 'gpt-4o').length;
|
|
288
|
+
tokensCount += countTokens(tool.result);
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
//did the last message exceed the context window ?
|
|
293
|
+
if (tokensCount > maxInputContext) {
|
|
294
|
+
break;
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
smythContextWindow.unshift(curMessage);
|
|
298
|
+
}
|
|
299
|
+
smythContextWindow.unshift(systemMessage);
|
|
300
|
+
|
|
301
|
+
let modelContextWindow = [];
|
|
302
|
+
//now transform the messages to the model format
|
|
303
|
+
for (let message of smythContextWindow) {
|
|
304
|
+
if (message.role && message.content) {
|
|
305
|
+
modelContextWindow.push({ role: message.role, content: message.content });
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
if (message.messageBlock && message.toolsData) {
|
|
309
|
+
const internal_message = this.connector.transformToolMessageBlocks({
|
|
310
|
+
messageBlock: message?.messageBlock,
|
|
311
|
+
toolsData: message?.toolsData,
|
|
312
|
+
});
|
|
313
|
+
|
|
314
|
+
modelContextWindow.push(...internal_message);
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
modelContextWindow = this.connector.getConsistentMessages(modelContextWindow);
|
|
319
|
+
|
|
320
|
+
return modelContextWindow;
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
function countTokens(content: any, model: 'gpt-4o' | 'gpt-4o-mini' = 'gpt-4o') {
|
|
325
|
+
try {
|
|
326
|
+
// Content must be stringified since some providers like Anthropic use object content
|
|
327
|
+
const _stringifiedContent = typeof content === 'string' ? content : JSON.stringify(content);
|
|
328
|
+
|
|
329
|
+
const tokens = encodeChat([{ role: 'user', content: _stringifiedContent } as ChatMessage], model);
|
|
330
|
+
return tokens.length;
|
|
331
|
+
} catch (error) {
|
|
332
|
+
console.warn('Error in countTokens: ', error);
|
|
333
|
+
return 0;
|
|
334
|
+
}
|
|
335
|
+
}
|