@smythos/sre 1.5.46 → 1.5.50
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG +98 -90
- package/LICENSE +18 -18
- package/README.md +135 -135
- package/dist/bundle-analysis-lazy.html +4949 -0
- package/dist/bundle-analysis.html +4949 -0
- package/dist/index.js +3 -3
- package/dist/index.js.map +1 -1
- package/dist/types/Components/MCPClient.class.d.ts +1 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +1 -6
- package/dist/types/utils/package-manager.utils.d.ts +26 -0
- package/package.json +1 -1
- package/src/Components/APICall/APICall.class.ts +156 -156
- package/src/Components/APICall/AccessTokenManager.ts +130 -130
- package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
- package/src/Components/APICall/OAuth.helper.ts +294 -294
- package/src/Components/APICall/mimeTypeCategories.ts +46 -46
- package/src/Components/APICall/parseData.ts +167 -167
- package/src/Components/APICall/parseHeaders.ts +41 -41
- package/src/Components/APICall/parseProxy.ts +68 -68
- package/src/Components/APICall/parseUrl.ts +91 -91
- package/src/Components/APIEndpoint.class.ts +234 -234
- package/src/Components/APIOutput.class.ts +58 -58
- package/src/Components/AgentPlugin.class.ts +102 -102
- package/src/Components/Async.class.ts +155 -155
- package/src/Components/Await.class.ts +90 -90
- package/src/Components/Classifier.class.ts +158 -158
- package/src/Components/Component.class.ts +132 -132
- package/src/Components/ComponentHost.class.ts +38 -38
- package/src/Components/DataSourceCleaner.class.ts +92 -92
- package/src/Components/DataSourceIndexer.class.ts +181 -181
- package/src/Components/DataSourceLookup.class.ts +161 -161
- package/src/Components/ECMASandbox.class.ts +71 -71
- package/src/Components/FEncDec.class.ts +29 -29
- package/src/Components/FHash.class.ts +33 -33
- package/src/Components/FSign.class.ts +80 -80
- package/src/Components/FSleep.class.ts +25 -25
- package/src/Components/FTimestamp.class.ts +25 -25
- package/src/Components/FileStore.class.ts +78 -78
- package/src/Components/ForEach.class.ts +97 -97
- package/src/Components/GPTPlugin.class.ts +70 -70
- package/src/Components/GenAILLM.class.ts +586 -586
- package/src/Components/HuggingFace.class.ts +314 -314
- package/src/Components/Image/imageSettings.config.ts +70 -70
- package/src/Components/ImageGenerator.class.ts +502 -502
- package/src/Components/JSONFilter.class.ts +54 -54
- package/src/Components/LLMAssistant.class.ts +213 -213
- package/src/Components/LogicAND.class.ts +28 -28
- package/src/Components/LogicAtLeast.class.ts +85 -85
- package/src/Components/LogicAtMost.class.ts +86 -86
- package/src/Components/LogicOR.class.ts +29 -29
- package/src/Components/LogicXOR.class.ts +34 -34
- package/src/Components/MCPClient.class.ts +138 -112
- package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
- package/src/Components/MemoryReadKeyVal.class.ts +66 -66
- package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
- package/src/Components/MemoryWriteObject.class.ts +97 -97
- package/src/Components/MultimodalLLM.class.ts +128 -128
- package/src/Components/OpenAPI.class.ts +72 -72
- package/src/Components/PromptGenerator.class.ts +122 -122
- package/src/Components/ScrapflyWebScrape.class.ts +159 -159
- package/src/Components/ServerlessCode.class.ts +123 -123
- package/src/Components/TavilyWebSearch.class.ts +98 -98
- package/src/Components/VisionLLM.class.ts +104 -104
- package/src/Components/ZapierAction.class.ts +127 -127
- package/src/Components/index.ts +97 -97
- package/src/Core/AgentProcess.helper.ts +240 -240
- package/src/Core/Connector.class.ts +123 -123
- package/src/Core/ConnectorsService.ts +197 -197
- package/src/Core/DummyConnector.ts +49 -49
- package/src/Core/HookService.ts +105 -105
- package/src/Core/SmythRuntime.class.ts +235 -235
- package/src/Core/SystemEvents.ts +16 -16
- package/src/Core/boot.ts +56 -56
- package/src/config.ts +15 -15
- package/src/constants.ts +126 -126
- package/src/data/hugging-face.params.json +579 -579
- package/src/helpers/AWSLambdaCode.helper.ts +587 -587
- package/src/helpers/BinaryInput.helper.ts +331 -331
- package/src/helpers/Conversation.helper.ts +1119 -1119
- package/src/helpers/ECMASandbox.helper.ts +54 -54
- package/src/helpers/JsonContent.helper.ts +97 -97
- package/src/helpers/LocalCache.helper.ts +97 -97
- package/src/helpers/Log.helper.ts +274 -274
- package/src/helpers/OpenApiParser.helper.ts +150 -150
- package/src/helpers/S3Cache.helper.ts +147 -147
- package/src/helpers/SmythURI.helper.ts +5 -5
- package/src/helpers/Sysconfig.helper.ts +77 -77
- package/src/helpers/TemplateString.helper.ts +243 -243
- package/src/helpers/TypeChecker.helper.ts +329 -329
- package/src/index.ts +3 -3
- package/src/index.ts.bak +3 -3
- package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
- package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
- package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
- package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
- package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
- package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
- package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
- package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -297
- package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
- package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
- package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
- package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
- package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
- package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
- package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
- package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
- package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
- package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
- package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
- package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
- package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
- package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
- package/src/subsystems/IO/CLI.service/index.ts +9 -9
- package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
- package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
- package/src/subsystems/IO/Log.service/index.ts +13 -13
- package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
- package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
- package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
- package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
- package/src/subsystems/IO/NKV.service/index.ts +14 -14
- package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
- package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
- package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
- package/src/subsystems/IO/Router.service/index.ts +11 -11
- package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
- package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
- package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
- package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
- package/src/subsystems/IO/Storage.service/index.ts +13 -13
- package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
- package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
- package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
- package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
- package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
- package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
- package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
- package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
- package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
- package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
- package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
- package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
- package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
- package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
- package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
- package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
- package/src/subsystems/LLMManager/custom-models.ts +854 -854
- package/src/subsystems/LLMManager/models.ts +2540 -2540
- package/src/subsystems/LLMManager/paramMappings.ts +69 -69
- package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
- package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
- package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
- package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
- package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
- package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
- package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
- package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
- package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
- package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
- package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
- package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
- package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
- package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
- package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
- package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
- package/src/subsystems/Security/Account.service/index.ts +14 -14
- package/src/subsystems/Security/Credentials.helper.ts +62 -62
- package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
- package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
- package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
- package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
- package/src/subsystems/Security/SecureConnector.class.ts +110 -110
- package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
- package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
- package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
- package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
- package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
- package/src/subsystems/Security/Vault.service/index.ts +12 -12
- package/src/types/ACL.types.ts +104 -104
- package/src/types/AWS.types.ts +10 -10
- package/src/types/Agent.types.ts +61 -61
- package/src/types/AgentLogger.types.ts +17 -17
- package/src/types/Cache.types.ts +1 -1
- package/src/types/Common.types.ts +2 -2
- package/src/types/LLM.types.ts +496 -496
- package/src/types/Redis.types.ts +8 -8
- package/src/types/SRE.types.ts +64 -64
- package/src/types/Security.types.ts +14 -14
- package/src/types/Storage.types.ts +5 -5
- package/src/types/VectorDB.types.ts +86 -86
- package/src/utils/base64.utils.ts +275 -275
- package/src/utils/cli.utils.ts +68 -68
- package/src/utils/data.utils.ts +322 -322
- package/src/utils/date-time.utils.ts +22 -22
- package/src/utils/general.utils.ts +238 -238
- package/src/utils/index.ts +12 -12
- package/src/utils/lazy-client.ts +261 -261
- package/src/utils/numbers.utils.ts +13 -13
- package/src/utils/oauth.utils.ts +35 -35
- package/src/utils/string.utils.ts +414 -414
- package/src/utils/url.utils.ts +19 -19
- package/src/utils/validation.utils.ts +74 -74
- package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
|
@@ -1,586 +1,586 @@
|
|
|
1
|
-
import Joi from 'joi';
|
|
2
|
-
import { IAgent as Agent } from '@sre/types/Agent.types';
|
|
3
|
-
import { LLMInference } from '@sre/LLMManager/LLM.inference';
|
|
4
|
-
import { TemplateString } from '@sre/helpers/TemplateString.helper';
|
|
5
|
-
import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
|
|
6
|
-
import { getMimeType } from '@sre/utils/data.utils';
|
|
7
|
-
import { Component } from './Component.class';
|
|
8
|
-
import { formatDataForDebug } from '@sre/utils/data.utils';
|
|
9
|
-
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
10
|
-
|
|
11
|
-
//TODO : better handling of context window exceeding max length
|
|
12
|
-
|
|
13
|
-
export class GenAILLM extends Component {
|
|
14
|
-
protected schema = {
|
|
15
|
-
name: 'GenAILLM',
|
|
16
|
-
description: 'Use this component to generate a responses from an LLM',
|
|
17
|
-
settings: {
|
|
18
|
-
model: {
|
|
19
|
-
type: 'string',
|
|
20
|
-
max: 200,
|
|
21
|
-
required: true,
|
|
22
|
-
},
|
|
23
|
-
prompt: {
|
|
24
|
-
type: 'string',
|
|
25
|
-
max: 8_000_000,
|
|
26
|
-
label: 'Prompt',
|
|
27
|
-
},
|
|
28
|
-
temperature: {
|
|
29
|
-
type: 'number',
|
|
30
|
-
min: 0,
|
|
31
|
-
max: 5,
|
|
32
|
-
label: 'Temperature',
|
|
33
|
-
},
|
|
34
|
-
maxTokens: {
|
|
35
|
-
type: 'number',
|
|
36
|
-
min: 1,
|
|
37
|
-
label: 'Maximum Tokens',
|
|
38
|
-
},
|
|
39
|
-
stopSequences: {
|
|
40
|
-
type: 'string',
|
|
41
|
-
max: 400,
|
|
42
|
-
label: 'Stop Sequences',
|
|
43
|
-
allowEmpty: true,
|
|
44
|
-
},
|
|
45
|
-
topP: {
|
|
46
|
-
type: 'number',
|
|
47
|
-
min: 0,
|
|
48
|
-
max: 1,
|
|
49
|
-
label: 'Top P',
|
|
50
|
-
},
|
|
51
|
-
topK: {
|
|
52
|
-
type: 'number',
|
|
53
|
-
min: 0,
|
|
54
|
-
max: 500,
|
|
55
|
-
label: 'Top K',
|
|
56
|
-
},
|
|
57
|
-
frequencyPenalty: {
|
|
58
|
-
type: 'number',
|
|
59
|
-
min: 0,
|
|
60
|
-
max: 2,
|
|
61
|
-
label: 'Frequency Penalty',
|
|
62
|
-
},
|
|
63
|
-
presencePenalty: {
|
|
64
|
-
type: 'number',
|
|
65
|
-
min: 0,
|
|
66
|
-
max: 2,
|
|
67
|
-
label: 'Presence Penalty',
|
|
68
|
-
},
|
|
69
|
-
responseFormat: {
|
|
70
|
-
type: 'string',
|
|
71
|
-
valid: ['json', 'text'],
|
|
72
|
-
label: 'Response Format',
|
|
73
|
-
},
|
|
74
|
-
passthrough: {
|
|
75
|
-
type: 'boolean',
|
|
76
|
-
description: 'If true, the LLM response will be returned as is by the agent',
|
|
77
|
-
label: 'Passthrough',
|
|
78
|
-
},
|
|
79
|
-
useSystemPrompt: {
|
|
80
|
-
type: 'boolean',
|
|
81
|
-
description: 'If true, the component will use parent agent system prompt',
|
|
82
|
-
label: 'Use System Prompt',
|
|
83
|
-
},
|
|
84
|
-
useContextWindow: {
|
|
85
|
-
type: 'boolean',
|
|
86
|
-
description: 'If true, the component will use parent agent context window',
|
|
87
|
-
label: 'Use Context Window',
|
|
88
|
-
},
|
|
89
|
-
maxContextWindowLength: {
|
|
90
|
-
type: 'number',
|
|
91
|
-
min: 0,
|
|
92
|
-
description: 'The maximum number of messages to use from this component context window (if useContextWindow is true)',
|
|
93
|
-
label: 'Maximum Context Window Length',
|
|
94
|
-
},
|
|
95
|
-
|
|
96
|
-
// #region Web Search
|
|
97
|
-
useWebSearch: {
|
|
98
|
-
type: 'boolean',
|
|
99
|
-
description: 'If true, the component will use web search for additional context',
|
|
100
|
-
label: 'Use Web Search',
|
|
101
|
-
},
|
|
102
|
-
webSearchContextSize: {
|
|
103
|
-
type: 'string',
|
|
104
|
-
valid: ['high', 'medium', 'low'],
|
|
105
|
-
label: 'Web Search Context Size',
|
|
106
|
-
},
|
|
107
|
-
webSearchCity: {
|
|
108
|
-
type: 'string',
|
|
109
|
-
max: 100,
|
|
110
|
-
label: 'Web Search City',
|
|
111
|
-
allowEmpty: true,
|
|
112
|
-
},
|
|
113
|
-
webSearchCountry: {
|
|
114
|
-
type: 'string',
|
|
115
|
-
max: 2,
|
|
116
|
-
label: 'Web Search Country',
|
|
117
|
-
allowEmpty: true,
|
|
118
|
-
},
|
|
119
|
-
webSearchRegion: {
|
|
120
|
-
type: 'string',
|
|
121
|
-
max: 100,
|
|
122
|
-
label: 'Web Search Region',
|
|
123
|
-
allowEmpty: true,
|
|
124
|
-
},
|
|
125
|
-
webSearchTimezone: {
|
|
126
|
-
type: 'string',
|
|
127
|
-
max: 100,
|
|
128
|
-
label: 'Web Search Timezone',
|
|
129
|
-
allowEmpty: true,
|
|
130
|
-
},
|
|
131
|
-
// #endregion
|
|
132
|
-
|
|
133
|
-
// #region xAI Search
|
|
134
|
-
useSearch: {
|
|
135
|
-
type: 'boolean',
|
|
136
|
-
description: 'If true, the component will use xAI live search capabilities',
|
|
137
|
-
label: 'Use Search',
|
|
138
|
-
allowEmpty: true,
|
|
139
|
-
},
|
|
140
|
-
searchMode: {
|
|
141
|
-
type: 'string',
|
|
142
|
-
valid: ['auto', 'on', 'off'],
|
|
143
|
-
label: 'Search Mode',
|
|
144
|
-
allowEmpty: true,
|
|
145
|
-
},
|
|
146
|
-
returnCitations: {
|
|
147
|
-
type: 'boolean',
|
|
148
|
-
description: 'If true, include citations and sources in the response',
|
|
149
|
-
label: 'Return Citations',
|
|
150
|
-
allowEmpty: true,
|
|
151
|
-
},
|
|
152
|
-
maxSearchResults: {
|
|
153
|
-
type: 'number',
|
|
154
|
-
min: 1,
|
|
155
|
-
max: 50,
|
|
156
|
-
label: 'Max Search Results',
|
|
157
|
-
allowEmpty: true,
|
|
158
|
-
},
|
|
159
|
-
searchDataSources: {
|
|
160
|
-
type: 'array',
|
|
161
|
-
max: 4,
|
|
162
|
-
label: 'Search Data Sources',
|
|
163
|
-
allowEmpty: true,
|
|
164
|
-
},
|
|
165
|
-
searchCountry: {
|
|
166
|
-
type: 'string',
|
|
167
|
-
max: 2,
|
|
168
|
-
label: 'Search Country',
|
|
169
|
-
allowEmpty: true,
|
|
170
|
-
},
|
|
171
|
-
excludedWebsites: {
|
|
172
|
-
type: 'string',
|
|
173
|
-
max: 10000,
|
|
174
|
-
label: 'Excluded Websites',
|
|
175
|
-
allowEmpty: true,
|
|
176
|
-
},
|
|
177
|
-
allowedWebsites: {
|
|
178
|
-
type: 'string',
|
|
179
|
-
max: 10000,
|
|
180
|
-
label: 'Allowed Websites',
|
|
181
|
-
allowEmpty: true,
|
|
182
|
-
},
|
|
183
|
-
includedXHandles: {
|
|
184
|
-
type: 'string',
|
|
185
|
-
max: 1000,
|
|
186
|
-
label: 'Included X Handles',
|
|
187
|
-
allowEmpty: true,
|
|
188
|
-
},
|
|
189
|
-
excludedXHandles: {
|
|
190
|
-
type: 'string',
|
|
191
|
-
max: 1000,
|
|
192
|
-
label: 'Excluded X Handles',
|
|
193
|
-
allowEmpty: true,
|
|
194
|
-
},
|
|
195
|
-
postFavoriteCount: {
|
|
196
|
-
type: 'number',
|
|
197
|
-
min: 0,
|
|
198
|
-
max: 1000000000,
|
|
199
|
-
label: 'Post Favorite Count',
|
|
200
|
-
allowEmpty: true,
|
|
201
|
-
},
|
|
202
|
-
postViewCount: {
|
|
203
|
-
type: 'number',
|
|
204
|
-
min: 0,
|
|
205
|
-
max: 1000000000,
|
|
206
|
-
label: 'Post View Count',
|
|
207
|
-
allowEmpty: true,
|
|
208
|
-
},
|
|
209
|
-
link: {
|
|
210
|
-
type: 'string',
|
|
211
|
-
max: 5000,
|
|
212
|
-
label: 'RSS Link',
|
|
213
|
-
allowEmpty: true,
|
|
214
|
-
},
|
|
215
|
-
safeSearch: {
|
|
216
|
-
type: 'boolean',
|
|
217
|
-
description: 'If true, enable safe search filtering',
|
|
218
|
-
label: 'Safe Search',
|
|
219
|
-
allowEmpty: true,
|
|
220
|
-
},
|
|
221
|
-
fromDate: {
|
|
222
|
-
type: 'string',
|
|
223
|
-
max: 10,
|
|
224
|
-
label: 'From Date',
|
|
225
|
-
allowEmpty: true,
|
|
226
|
-
},
|
|
227
|
-
toDate: {
|
|
228
|
-
type: 'string',
|
|
229
|
-
max: 10,
|
|
230
|
-
label: 'To Date',
|
|
231
|
-
allowEmpty: true,
|
|
232
|
-
},
|
|
233
|
-
// #endregion
|
|
234
|
-
|
|
235
|
-
// #region Reasoning
|
|
236
|
-
useReasoning: {
|
|
237
|
-
type: 'boolean',
|
|
238
|
-
description: 'If true, the component will use reasoning capabilities for complex problem-solving',
|
|
239
|
-
label: 'Use Reasoning',
|
|
240
|
-
},
|
|
241
|
-
verbosity: {
|
|
242
|
-
type: 'string',
|
|
243
|
-
valid: ['low', 'medium', 'high'],
|
|
244
|
-
label: 'Verbosity',
|
|
245
|
-
allowEmpty: true,
|
|
246
|
-
},
|
|
247
|
-
reasoningEffort: {
|
|
248
|
-
type: 'string',
|
|
249
|
-
valid: ['none', 'default', 'low', 'medium', 'high'],
|
|
250
|
-
description: 'Controls the level of effort the model will put into reasoning',
|
|
251
|
-
label: 'Reasoning Effort',
|
|
252
|
-
},
|
|
253
|
-
maxThinkingTokens: {
|
|
254
|
-
type: 'number',
|
|
255
|
-
min: 1,
|
|
256
|
-
label: 'Maximum Thinking Tokens',
|
|
257
|
-
},
|
|
258
|
-
// #endregion
|
|
259
|
-
},
|
|
260
|
-
inputs: {
|
|
261
|
-
Input: {
|
|
262
|
-
type: 'Any',
|
|
263
|
-
description: 'An input that you can pass to the LLM',
|
|
264
|
-
},
|
|
265
|
-
Attachment: {
|
|
266
|
-
type: 'Binary',
|
|
267
|
-
description: 'An attachment that you can pass to the LLM',
|
|
268
|
-
optional: true,
|
|
269
|
-
},
|
|
270
|
-
},
|
|
271
|
-
outputs: {
|
|
272
|
-
Reply: {
|
|
273
|
-
default: true,
|
|
274
|
-
},
|
|
275
|
-
},
|
|
276
|
-
};
|
|
277
|
-
protected configSchema = Joi.object({
|
|
278
|
-
model: Joi.string().max(200).required(),
|
|
279
|
-
prompt: Joi.string().required().max(8_000_000).label('Prompt'), // 2M tokens is around 8M characters
|
|
280
|
-
temperature: Joi.number().min(0).max(5).label('Temperature'), // max temperature is 2 for OpenAI and togetherAI but 5 for cohere
|
|
281
|
-
maxTokens: Joi.number().min(1).label('Maximum Tokens'),
|
|
282
|
-
stopSequences: Joi.string().allow('').max(400).label('Stop Sequences'),
|
|
283
|
-
topP: Joi.number().min(0).max(1).label('Top P'),
|
|
284
|
-
topK: Joi.number().min(0).max(500).label('Top K'), // max top_k is 100 for togetherAI but 500 for cohere
|
|
285
|
-
frequencyPenalty: Joi.number().min(0).max(2).label('Frequency Penalty'),
|
|
286
|
-
presencePenalty: Joi.number().min(0).max(2).label('Presence Penalty'),
|
|
287
|
-
responseFormat: Joi.string().valid('json', 'text').allow('').optional().label('Response Format'),
|
|
288
|
-
passthrough: Joi.boolean().optional().label('Passthrough'),
|
|
289
|
-
useSystemPrompt: Joi.boolean().optional().label('Use System Prompt'),
|
|
290
|
-
useContextWindow: Joi.boolean().optional().label('Use Context Window'),
|
|
291
|
-
maxContextWindowLength: Joi.number().optional().min(0).label('Maximum Context Window Length'),
|
|
292
|
-
verbosity: Joi.string().valid('low', 'medium', 'high').optional().allow('').allow(null).label('Verbosity'),
|
|
293
|
-
|
|
294
|
-
// #region Web Search
|
|
295
|
-
useWebSearch: Joi.boolean().optional().label('Use Web Search'),
|
|
296
|
-
webSearchContextSize: Joi.string().valid('high', 'medium', 'low').optional().label('Web Search Context Size'),
|
|
297
|
-
webSearchCity: Joi.string().max(100).optional().allow('').label('Web Search City'),
|
|
298
|
-
webSearchCountry: Joi.string().max(2).optional().allow('').label('Web Search Country'),
|
|
299
|
-
webSearchRegion: Joi.string().max(100).optional().allow('').label('Web Search Region'),
|
|
300
|
-
webSearchTimezone: Joi.string().max(100).optional().allow('').label('Web Search Timezone'),
|
|
301
|
-
// #endregion
|
|
302
|
-
|
|
303
|
-
// #region xAI Search
|
|
304
|
-
useSearch: Joi.boolean().optional().allow('').label('Use Search'),
|
|
305
|
-
searchMode: Joi.string().valid('auto', 'on', 'off').optional().allow('').label('Search Mode'),
|
|
306
|
-
returnCitations: Joi.boolean().optional().allow('').label('Return Citations'),
|
|
307
|
-
maxSearchResults: Joi.number().min(1).max(100).optional().allow('').label('Max Search Results'),
|
|
308
|
-
searchDataSources: Joi.array().items(Joi.string().valid('web', 'x', 'news', 'rss')).max(4).optional().allow('').label('Search Data Sources'),
|
|
309
|
-
searchCountry: Joi.string().length(2).optional().allow('').label('Search Country'),
|
|
310
|
-
excludedWebsites: Joi.string().max(10000).optional().allow('').label('Excluded Websites'),
|
|
311
|
-
allowedWebsites: Joi.string().max(10000).optional().allow('').label('Allowed Websites'),
|
|
312
|
-
includedXHandles: Joi.string().max(1000).optional().allow('').label('Included X Handles'),
|
|
313
|
-
excludedXHandles: Joi.string().max(1000).optional().allow('').label('Excluded X Handles'),
|
|
314
|
-
postFavoriteCount: Joi.number().min(0).max(1000000000).optional().allow('').label('Post Favorite Count'),
|
|
315
|
-
postViewCount: Joi.number().min(0).max(1000000000).optional().allow('').label('Post View Count'),
|
|
316
|
-
rssLinks: Joi.string().max(10000).optional().allow('').label('RSS Link'),
|
|
317
|
-
safeSearch: Joi.boolean().optional().allow('').label('Safe Search'),
|
|
318
|
-
fromDate: Joi.string()
|
|
319
|
-
.pattern(/^\d{4}-\d{2}-\d{2}$/)
|
|
320
|
-
.optional()
|
|
321
|
-
.allow('')
|
|
322
|
-
.label('From Date'),
|
|
323
|
-
toDate: Joi.string()
|
|
324
|
-
.pattern(/^\d{4}-\d{2}-\d{2}$/)
|
|
325
|
-
.optional()
|
|
326
|
-
.allow('')
|
|
327
|
-
.label('To Date'),
|
|
328
|
-
// #endregion
|
|
329
|
-
|
|
330
|
-
// #region Reasoning
|
|
331
|
-
useReasoning: Joi.boolean().optional().label('Use Reasoning'),
|
|
332
|
-
reasoningEffort: Joi.string().valid('none', 'default', 'minimal', 'low', 'medium', 'high').optional().allow('').label('Reasoning Effort'),
|
|
333
|
-
maxThinkingTokens: Joi.number().min(1).optional().label('Maximum Thinking Tokens'),
|
|
334
|
-
// #endregion
|
|
335
|
-
});
|
|
336
|
-
constructor() {
|
|
337
|
-
super();
|
|
338
|
-
}
|
|
339
|
-
init() {}
|
|
340
|
-
async process(input, config, agent: Agent) {
|
|
341
|
-
await super.process(input, config, agent);
|
|
342
|
-
|
|
343
|
-
//let debugLog = agent.agentRuntime?.debug ? [] : undefined;
|
|
344
|
-
const logger = this.createComponentLogger(agent, config);
|
|
345
|
-
|
|
346
|
-
try {
|
|
347
|
-
logger.debug(`=== GenAILLM Log ===`);
|
|
348
|
-
let teamId = agent?.teamId;
|
|
349
|
-
|
|
350
|
-
const passThrough: boolean = config.data.passthrough || false;
|
|
351
|
-
const useContextWindow: boolean = config.data.useContextWindow || false;
|
|
352
|
-
const useSystemPrompt: boolean = config.data.useSystemPrompt || false;
|
|
353
|
-
const useWebSearch: boolean = config.data.useWebSearch || false;
|
|
354
|
-
const maxTokens: number = parseInt(config.data.maxTokens) || 1024;
|
|
355
|
-
const maxContextWindowLength: number = parseInt(config.data.maxContextWindowLength) || 1024;
|
|
356
|
-
const model: string = config.data.model || 'echo';
|
|
357
|
-
const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
|
|
358
|
-
|
|
359
|
-
// if the llm is undefined, then it means we removed the model from our system
|
|
360
|
-
if (!llmInference.connector) {
|
|
361
|
-
return {
|
|
362
|
-
_error: `The model '${model}' is not available. Please try a different one.`,
|
|
363
|
-
_debug: logger.output,
|
|
364
|
-
};
|
|
365
|
-
}
|
|
366
|
-
|
|
367
|
-
//const team = AccessCandidate.team(teamId);
|
|
368
|
-
//const llmRegistry = isStandardLLM ? LLMRegistry : await CustomLLMRegistry.getInstance(team);
|
|
369
|
-
const modelId = await agent.modelsProvider.getModelId(model);
|
|
370
|
-
|
|
371
|
-
logger.debug(` Model : ${modelId || model}`);
|
|
372
|
-
|
|
373
|
-
let prompt: any = TemplateString(config.data.prompt).parse(input).result;
|
|
374
|
-
|
|
375
|
-
let files: any[] = parseFiles(input, config);
|
|
376
|
-
let isMultimodalRequest = false;
|
|
377
|
-
const provider = await agent.modelsProvider.getProvider(model);
|
|
378
|
-
const isEcho = provider === 'Echo';
|
|
379
|
-
|
|
380
|
-
// Ignore files for Echo model
|
|
381
|
-
if (files?.length > 0 && !isEcho) {
|
|
382
|
-
// TODO: simplify the valid files checking logic
|
|
383
|
-
const supportedFileTypes = SUPPORTED_MIME_TYPES_MAP?.[provider] || {};
|
|
384
|
-
const modelInfo = await agent.modelsProvider.getModelInfo(model);
|
|
385
|
-
const features = modelInfo?.features || [];
|
|
386
|
-
const fileTypes = new Set(); // Set to avoid duplicates
|
|
387
|
-
|
|
388
|
-
const validFiles = await Promise.all(
|
|
389
|
-
files.map(async (file) => {
|
|
390
|
-
const mimeType = file?.mimetype || (await getMimeType(file));
|
|
391
|
-
const [requestFeature = ''] =
|
|
392
|
-
Object.entries(supportedFileTypes).find(([key, value]) => (value as string[]).includes(mimeType)) || [];
|
|
393
|
-
|
|
394
|
-
if (mimeType) {
|
|
395
|
-
fileTypes.add(mimeType);
|
|
396
|
-
}
|
|
397
|
-
|
|
398
|
-
return features?.includes(requestFeature) ? file : null;
|
|
399
|
-
})
|
|
400
|
-
);
|
|
401
|
-
|
|
402
|
-
files = validFiles.filter(Boolean);
|
|
403
|
-
|
|
404
|
-
if (files.length === 0) {
|
|
405
|
-
return {
|
|
406
|
-
_error: `Model does not support ${fileTypes?.size > 0 ? Array.from(fileTypes).join(', ') : 'File(s)'}`,
|
|
407
|
-
_debug: logger.output,
|
|
408
|
-
};
|
|
409
|
-
}
|
|
410
|
-
|
|
411
|
-
isMultimodalRequest = true;
|
|
412
|
-
}
|
|
413
|
-
|
|
414
|
-
logger.debug(` Prompt\n`, prompt, '\n');
|
|
415
|
-
|
|
416
|
-
if (!isEcho) {
|
|
417
|
-
logger.debug(' Files\n', await Promise.all(files.map((file) => formatDataForDebug(file, AccessCandidate.agent(agent.id)))));
|
|
418
|
-
}
|
|
419
|
-
|
|
420
|
-
// default to json response format
|
|
421
|
-
const hasCustomOutputs = config?.outputs?.some((output) => !output.default);
|
|
422
|
-
config.data.responseFormat = config.data?.responseFormat || (hasCustomOutputs ? 'json' : '');
|
|
423
|
-
|
|
424
|
-
// request to LLM
|
|
425
|
-
let response: any;
|
|
426
|
-
|
|
427
|
-
const _prompt = llmInference.connector.enhancePrompt(prompt, config);
|
|
428
|
-
let messages = [];
|
|
429
|
-
|
|
430
|
-
let systemPrompt = '';
|
|
431
|
-
if (useSystemPrompt) {
|
|
432
|
-
//first we try to grab the system prompt from llmCache (in case of a ConversationHelper implementing dynamic system prompt)
|
|
433
|
-
const cachedPrompt = await agent.agentRuntime.llmCache.get('systemPrompt', 'text');
|
|
434
|
-
//if not found, we can read the system prompt from agent data.
|
|
435
|
-
systemPrompt = cachedPrompt || agent.data?.behavior || '';
|
|
436
|
-
|
|
437
|
-
if (systemPrompt) {
|
|
438
|
-
logger.debug(' Using Agent System Prompt\n', systemPrompt);
|
|
439
|
-
}
|
|
440
|
-
if (systemPrompt) {
|
|
441
|
-
messages = [{ role: 'system', content: systemPrompt }];
|
|
442
|
-
}
|
|
443
|
-
}
|
|
444
|
-
|
|
445
|
-
if (useContextWindow) {
|
|
446
|
-
const cachedMessages = await agent.agentRuntime.llmCache.get('messages', 'json');
|
|
447
|
-
try {
|
|
448
|
-
const messagesJSON = typeof cachedMessages === 'string' ? JSON.parse(cachedMessages) : cachedMessages;
|
|
449
|
-
//const contextWindow = messagesJSON.filter((message) => message.role !== 'user');
|
|
450
|
-
|
|
451
|
-
const convMessages = await llmInference.getContextWindow(systemPrompt, messagesJSON, maxContextWindowLength, maxTokens);
|
|
452
|
-
|
|
453
|
-
if (convMessages.length > 0) {
|
|
454
|
-
logger.debug(` Using Agent Context Window : ${convMessages.length - 1} messages will be used`);
|
|
455
|
-
}
|
|
456
|
-
|
|
457
|
-
messages = [...convMessages];
|
|
458
|
-
//messages.push(...contextWindowJSON);
|
|
459
|
-
} catch (error) {
|
|
460
|
-
logger.warn('Error on parsing context window: ', error);
|
|
461
|
-
console.warn(cachedMessages);
|
|
462
|
-
}
|
|
463
|
-
}
|
|
464
|
-
|
|
465
|
-
if (messages[messages.length - 1]?.role == 'user') {
|
|
466
|
-
messages[messages.length - 1].content = _prompt;
|
|
467
|
-
} else {
|
|
468
|
-
messages.push({ role: 'user', content: _prompt });
|
|
469
|
-
}
|
|
470
|
-
let finishReason = 'stop';
|
|
471
|
-
const contentPromise = new Promise(async (resolve, reject) => {
|
|
472
|
-
let _content = '';
|
|
473
|
-
let eventEmitter;
|
|
474
|
-
|
|
475
|
-
eventEmitter = await llmInference
|
|
476
|
-
.promptStream({
|
|
477
|
-
contextWindow: messages,
|
|
478
|
-
files,
|
|
479
|
-
params: {
|
|
480
|
-
...config.data,
|
|
481
|
-
agentId: agent.id,
|
|
482
|
-
},
|
|
483
|
-
})
|
|
484
|
-
.catch((error) => {
|
|
485
|
-
console.error('Error on promptStream: ', error);
|
|
486
|
-
reject(error);
|
|
487
|
-
});
|
|
488
|
-
|
|
489
|
-
eventEmitter.on('content', (content) => {
|
|
490
|
-
if (passThrough) {
|
|
491
|
-
if (typeof agent.callback === 'function') {
|
|
492
|
-
agent.callback({ content });
|
|
493
|
-
}
|
|
494
|
-
agent.sse.send('llm/passthrough/content', content.replace(/\n/g, '\\n'));
|
|
495
|
-
}
|
|
496
|
-
_content += content;
|
|
497
|
-
});
|
|
498
|
-
|
|
499
|
-
eventEmitter.on('thinking', (thinking) => {
|
|
500
|
-
if (passThrough) {
|
|
501
|
-
if (typeof agent.callback === 'function') {
|
|
502
|
-
agent.callback({ thinking });
|
|
503
|
-
}
|
|
504
|
-
agent.sse.send('llm/passthrough/thinking', thinking.replace(/\n/g, '\\n'));
|
|
505
|
-
}
|
|
506
|
-
});
|
|
507
|
-
eventEmitter.on('end', () => {
|
|
508
|
-
if (passThrough) {
|
|
509
|
-
if (typeof agent.callback === 'function') {
|
|
510
|
-
agent.callback({ content: '\n' });
|
|
511
|
-
}
|
|
512
|
-
agent.sse.send('llm/passthrough/content', '\\n');
|
|
513
|
-
}
|
|
514
|
-
resolve(_content);
|
|
515
|
-
});
|
|
516
|
-
eventEmitter.on('interrupted', (reason) => {
|
|
517
|
-
finishReason = reason || 'stop';
|
|
518
|
-
});
|
|
519
|
-
|
|
520
|
-
eventEmitter.on('error', (error) => {
|
|
521
|
-
reject(error);
|
|
522
|
-
});
|
|
523
|
-
});
|
|
524
|
-
response = await contentPromise.catch((error) => {
|
|
525
|
-
return { error: error.message || error };
|
|
526
|
-
});
|
|
527
|
-
// // If the model stopped before completing the response, this is usually due to output token limit reached.
|
|
528
|
-
if (finishReason !== 'stop') {
|
|
529
|
-
return {
|
|
530
|
-
Reply: response,
|
|
531
|
-
_error: 'The model stopped before completing the response, this is usually due to output token limit reached.',
|
|
532
|
-
_debug: logger.output,
|
|
533
|
-
};
|
|
534
|
-
}
|
|
535
|
-
|
|
536
|
-
// in case we have the response but it's empty string, undefined or null
|
|
537
|
-
if (!response) {
|
|
538
|
-
return { _error: ' LLM Error = Empty Response!', _debug: logger.output };
|
|
539
|
-
}
|
|
540
|
-
|
|
541
|
-
if (response?.error) {
|
|
542
|
-
const error = response?.error + ' ' + (response?.details || '');
|
|
543
|
-
logger.error(` LLM Error=`, error);
|
|
544
|
-
|
|
545
|
-
return { Output: response?.data, _error: error, _debug: logger.output };
|
|
546
|
-
}
|
|
547
|
-
|
|
548
|
-
const Reply = llmInference.connector.postProcess(response);
|
|
549
|
-
if (Reply.error) {
|
|
550
|
-
logger.error(` LLM Error=`, Reply.error);
|
|
551
|
-
return { _error: Reply.error, _debug: logger.output };
|
|
552
|
-
}
|
|
553
|
-
|
|
554
|
-
logger.debug(' Reply \n', Reply);
|
|
555
|
-
|
|
556
|
-
const result = { Reply };
|
|
557
|
-
|
|
558
|
-
result['_debug'] = logger.output;
|
|
559
|
-
|
|
560
|
-
return result;
|
|
561
|
-
} catch (error) {
|
|
562
|
-
return { _error: error.message, _debug: logger.output };
|
|
563
|
-
}
|
|
564
|
-
}
|
|
565
|
-
}
|
|
566
|
-
|
|
567
|
-
function parseFiles(input: any, config: any) {
|
|
568
|
-
const mediaTypes = ['Image', 'Audio', 'Video', 'Binary'];
|
|
569
|
-
|
|
570
|
-
// Parse media inputs from config
|
|
571
|
-
const inputFiles =
|
|
572
|
-
config.inputs
|
|
573
|
-
?.filter((_input) => mediaTypes.includes(_input.type))
|
|
574
|
-
?.flatMap((_input) => {
|
|
575
|
-
const value = input[_input.name];
|
|
576
|
-
|
|
577
|
-
if (Array.isArray(value)) {
|
|
578
|
-
return value.map((item) => TemplateString(item).parseRaw(input).result);
|
|
579
|
-
} else {
|
|
580
|
-
return TemplateString(value).parseRaw(input).result;
|
|
581
|
-
}
|
|
582
|
-
})
|
|
583
|
-
?.filter((file) => file) || [];
|
|
584
|
-
|
|
585
|
-
return inputFiles;
|
|
586
|
-
}
|
|
1
|
+
import Joi from 'joi';
|
|
2
|
+
import { IAgent as Agent } from '@sre/types/Agent.types';
|
|
3
|
+
import { LLMInference } from '@sre/LLMManager/LLM.inference';
|
|
4
|
+
import { TemplateString } from '@sre/helpers/TemplateString.helper';
|
|
5
|
+
import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
|
|
6
|
+
import { getMimeType } from '@sre/utils/data.utils';
|
|
7
|
+
import { Component } from './Component.class';
|
|
8
|
+
import { formatDataForDebug } from '@sre/utils/data.utils';
|
|
9
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
10
|
+
|
|
11
|
+
//TODO : better handling of context window exceeding max length
|
|
12
|
+
|
|
13
|
+
export class GenAILLM extends Component {
|
|
14
|
+
protected schema = {
|
|
15
|
+
name: 'GenAILLM',
|
|
16
|
+
description: 'Use this component to generate a responses from an LLM',
|
|
17
|
+
settings: {
|
|
18
|
+
model: {
|
|
19
|
+
type: 'string',
|
|
20
|
+
max: 200,
|
|
21
|
+
required: true,
|
|
22
|
+
},
|
|
23
|
+
prompt: {
|
|
24
|
+
type: 'string',
|
|
25
|
+
max: 8_000_000,
|
|
26
|
+
label: 'Prompt',
|
|
27
|
+
},
|
|
28
|
+
temperature: {
|
|
29
|
+
type: 'number',
|
|
30
|
+
min: 0,
|
|
31
|
+
max: 5,
|
|
32
|
+
label: 'Temperature',
|
|
33
|
+
},
|
|
34
|
+
maxTokens: {
|
|
35
|
+
type: 'number',
|
|
36
|
+
min: 1,
|
|
37
|
+
label: 'Maximum Tokens',
|
|
38
|
+
},
|
|
39
|
+
stopSequences: {
|
|
40
|
+
type: 'string',
|
|
41
|
+
max: 400,
|
|
42
|
+
label: 'Stop Sequences',
|
|
43
|
+
allowEmpty: true,
|
|
44
|
+
},
|
|
45
|
+
topP: {
|
|
46
|
+
type: 'number',
|
|
47
|
+
min: 0,
|
|
48
|
+
max: 1,
|
|
49
|
+
label: 'Top P',
|
|
50
|
+
},
|
|
51
|
+
topK: {
|
|
52
|
+
type: 'number',
|
|
53
|
+
min: 0,
|
|
54
|
+
max: 500,
|
|
55
|
+
label: 'Top K',
|
|
56
|
+
},
|
|
57
|
+
frequencyPenalty: {
|
|
58
|
+
type: 'number',
|
|
59
|
+
min: 0,
|
|
60
|
+
max: 2,
|
|
61
|
+
label: 'Frequency Penalty',
|
|
62
|
+
},
|
|
63
|
+
presencePenalty: {
|
|
64
|
+
type: 'number',
|
|
65
|
+
min: 0,
|
|
66
|
+
max: 2,
|
|
67
|
+
label: 'Presence Penalty',
|
|
68
|
+
},
|
|
69
|
+
responseFormat: {
|
|
70
|
+
type: 'string',
|
|
71
|
+
valid: ['json', 'text'],
|
|
72
|
+
label: 'Response Format',
|
|
73
|
+
},
|
|
74
|
+
passthrough: {
|
|
75
|
+
type: 'boolean',
|
|
76
|
+
description: 'If true, the LLM response will be returned as is by the agent',
|
|
77
|
+
label: 'Passthrough',
|
|
78
|
+
},
|
|
79
|
+
useSystemPrompt: {
|
|
80
|
+
type: 'boolean',
|
|
81
|
+
description: 'If true, the component will use parent agent system prompt',
|
|
82
|
+
label: 'Use System Prompt',
|
|
83
|
+
},
|
|
84
|
+
useContextWindow: {
|
|
85
|
+
type: 'boolean',
|
|
86
|
+
description: 'If true, the component will use parent agent context window',
|
|
87
|
+
label: 'Use Context Window',
|
|
88
|
+
},
|
|
89
|
+
maxContextWindowLength: {
|
|
90
|
+
type: 'number',
|
|
91
|
+
min: 0,
|
|
92
|
+
description: 'The maximum number of messages to use from this component context window (if useContextWindow is true)',
|
|
93
|
+
label: 'Maximum Context Window Length',
|
|
94
|
+
},
|
|
95
|
+
|
|
96
|
+
// #region Web Search
|
|
97
|
+
useWebSearch: {
|
|
98
|
+
type: 'boolean',
|
|
99
|
+
description: 'If true, the component will use web search for additional context',
|
|
100
|
+
label: 'Use Web Search',
|
|
101
|
+
},
|
|
102
|
+
webSearchContextSize: {
|
|
103
|
+
type: 'string',
|
|
104
|
+
valid: ['high', 'medium', 'low'],
|
|
105
|
+
label: 'Web Search Context Size',
|
|
106
|
+
},
|
|
107
|
+
webSearchCity: {
|
|
108
|
+
type: 'string',
|
|
109
|
+
max: 100,
|
|
110
|
+
label: 'Web Search City',
|
|
111
|
+
allowEmpty: true,
|
|
112
|
+
},
|
|
113
|
+
webSearchCountry: {
|
|
114
|
+
type: 'string',
|
|
115
|
+
max: 2,
|
|
116
|
+
label: 'Web Search Country',
|
|
117
|
+
allowEmpty: true,
|
|
118
|
+
},
|
|
119
|
+
webSearchRegion: {
|
|
120
|
+
type: 'string',
|
|
121
|
+
max: 100,
|
|
122
|
+
label: 'Web Search Region',
|
|
123
|
+
allowEmpty: true,
|
|
124
|
+
},
|
|
125
|
+
webSearchTimezone: {
|
|
126
|
+
type: 'string',
|
|
127
|
+
max: 100,
|
|
128
|
+
label: 'Web Search Timezone',
|
|
129
|
+
allowEmpty: true,
|
|
130
|
+
},
|
|
131
|
+
// #endregion
|
|
132
|
+
|
|
133
|
+
// #region xAI Search
|
|
134
|
+
useSearch: {
|
|
135
|
+
type: 'boolean',
|
|
136
|
+
description: 'If true, the component will use xAI live search capabilities',
|
|
137
|
+
label: 'Use Search',
|
|
138
|
+
allowEmpty: true,
|
|
139
|
+
},
|
|
140
|
+
searchMode: {
|
|
141
|
+
type: 'string',
|
|
142
|
+
valid: ['auto', 'on', 'off'],
|
|
143
|
+
label: 'Search Mode',
|
|
144
|
+
allowEmpty: true,
|
|
145
|
+
},
|
|
146
|
+
returnCitations: {
|
|
147
|
+
type: 'boolean',
|
|
148
|
+
description: 'If true, include citations and sources in the response',
|
|
149
|
+
label: 'Return Citations',
|
|
150
|
+
allowEmpty: true,
|
|
151
|
+
},
|
|
152
|
+
maxSearchResults: {
|
|
153
|
+
type: 'number',
|
|
154
|
+
min: 1,
|
|
155
|
+
max: 50,
|
|
156
|
+
label: 'Max Search Results',
|
|
157
|
+
allowEmpty: true,
|
|
158
|
+
},
|
|
159
|
+
searchDataSources: {
|
|
160
|
+
type: 'array',
|
|
161
|
+
max: 4,
|
|
162
|
+
label: 'Search Data Sources',
|
|
163
|
+
allowEmpty: true,
|
|
164
|
+
},
|
|
165
|
+
searchCountry: {
|
|
166
|
+
type: 'string',
|
|
167
|
+
max: 2,
|
|
168
|
+
label: 'Search Country',
|
|
169
|
+
allowEmpty: true,
|
|
170
|
+
},
|
|
171
|
+
excludedWebsites: {
|
|
172
|
+
type: 'string',
|
|
173
|
+
max: 10000,
|
|
174
|
+
label: 'Excluded Websites',
|
|
175
|
+
allowEmpty: true,
|
|
176
|
+
},
|
|
177
|
+
allowedWebsites: {
|
|
178
|
+
type: 'string',
|
|
179
|
+
max: 10000,
|
|
180
|
+
label: 'Allowed Websites',
|
|
181
|
+
allowEmpty: true,
|
|
182
|
+
},
|
|
183
|
+
includedXHandles: {
|
|
184
|
+
type: 'string',
|
|
185
|
+
max: 1000,
|
|
186
|
+
label: 'Included X Handles',
|
|
187
|
+
allowEmpty: true,
|
|
188
|
+
},
|
|
189
|
+
excludedXHandles: {
|
|
190
|
+
type: 'string',
|
|
191
|
+
max: 1000,
|
|
192
|
+
label: 'Excluded X Handles',
|
|
193
|
+
allowEmpty: true,
|
|
194
|
+
},
|
|
195
|
+
postFavoriteCount: {
|
|
196
|
+
type: 'number',
|
|
197
|
+
min: 0,
|
|
198
|
+
max: 1000000000,
|
|
199
|
+
label: 'Post Favorite Count',
|
|
200
|
+
allowEmpty: true,
|
|
201
|
+
},
|
|
202
|
+
postViewCount: {
|
|
203
|
+
type: 'number',
|
|
204
|
+
min: 0,
|
|
205
|
+
max: 1000000000,
|
|
206
|
+
label: 'Post View Count',
|
|
207
|
+
allowEmpty: true,
|
|
208
|
+
},
|
|
209
|
+
link: {
|
|
210
|
+
type: 'string',
|
|
211
|
+
max: 5000,
|
|
212
|
+
label: 'RSS Link',
|
|
213
|
+
allowEmpty: true,
|
|
214
|
+
},
|
|
215
|
+
safeSearch: {
|
|
216
|
+
type: 'boolean',
|
|
217
|
+
description: 'If true, enable safe search filtering',
|
|
218
|
+
label: 'Safe Search',
|
|
219
|
+
allowEmpty: true,
|
|
220
|
+
},
|
|
221
|
+
fromDate: {
|
|
222
|
+
type: 'string',
|
|
223
|
+
max: 10,
|
|
224
|
+
label: 'From Date',
|
|
225
|
+
allowEmpty: true,
|
|
226
|
+
},
|
|
227
|
+
toDate: {
|
|
228
|
+
type: 'string',
|
|
229
|
+
max: 10,
|
|
230
|
+
label: 'To Date',
|
|
231
|
+
allowEmpty: true,
|
|
232
|
+
},
|
|
233
|
+
// #endregion
|
|
234
|
+
|
|
235
|
+
// #region Reasoning
|
|
236
|
+
useReasoning: {
|
|
237
|
+
type: 'boolean',
|
|
238
|
+
description: 'If true, the component will use reasoning capabilities for complex problem-solving',
|
|
239
|
+
label: 'Use Reasoning',
|
|
240
|
+
},
|
|
241
|
+
verbosity: {
|
|
242
|
+
type: 'string',
|
|
243
|
+
valid: ['low', 'medium', 'high'],
|
|
244
|
+
label: 'Verbosity',
|
|
245
|
+
allowEmpty: true,
|
|
246
|
+
},
|
|
247
|
+
reasoningEffort: {
|
|
248
|
+
type: 'string',
|
|
249
|
+
valid: ['none', 'default', 'low', 'medium', 'high'],
|
|
250
|
+
description: 'Controls the level of effort the model will put into reasoning',
|
|
251
|
+
label: 'Reasoning Effort',
|
|
252
|
+
},
|
|
253
|
+
maxThinkingTokens: {
|
|
254
|
+
type: 'number',
|
|
255
|
+
min: 1,
|
|
256
|
+
label: 'Maximum Thinking Tokens',
|
|
257
|
+
},
|
|
258
|
+
// #endregion
|
|
259
|
+
},
|
|
260
|
+
inputs: {
|
|
261
|
+
Input: {
|
|
262
|
+
type: 'Any',
|
|
263
|
+
description: 'An input that you can pass to the LLM',
|
|
264
|
+
},
|
|
265
|
+
Attachment: {
|
|
266
|
+
type: 'Binary',
|
|
267
|
+
description: 'An attachment that you can pass to the LLM',
|
|
268
|
+
optional: true,
|
|
269
|
+
},
|
|
270
|
+
},
|
|
271
|
+
outputs: {
|
|
272
|
+
Reply: {
|
|
273
|
+
default: true,
|
|
274
|
+
},
|
|
275
|
+
},
|
|
276
|
+
};
|
|
277
|
+
protected configSchema = Joi.object({
|
|
278
|
+
model: Joi.string().max(200).required(),
|
|
279
|
+
prompt: Joi.string().required().max(8_000_000).label('Prompt'), // 2M tokens is around 8M characters
|
|
280
|
+
temperature: Joi.number().min(0).max(5).label('Temperature'), // max temperature is 2 for OpenAI and togetherAI but 5 for cohere
|
|
281
|
+
maxTokens: Joi.number().min(1).label('Maximum Tokens'),
|
|
282
|
+
stopSequences: Joi.string().allow('').max(400).label('Stop Sequences'),
|
|
283
|
+
topP: Joi.number().min(0).max(1).label('Top P'),
|
|
284
|
+
topK: Joi.number().min(0).max(500).label('Top K'), // max top_k is 100 for togetherAI but 500 for cohere
|
|
285
|
+
frequencyPenalty: Joi.number().min(0).max(2).label('Frequency Penalty'),
|
|
286
|
+
presencePenalty: Joi.number().min(0).max(2).label('Presence Penalty'),
|
|
287
|
+
responseFormat: Joi.string().valid('json', 'text').allow('').optional().label('Response Format'),
|
|
288
|
+
passthrough: Joi.boolean().optional().label('Passthrough'),
|
|
289
|
+
useSystemPrompt: Joi.boolean().optional().label('Use System Prompt'),
|
|
290
|
+
useContextWindow: Joi.boolean().optional().label('Use Context Window'),
|
|
291
|
+
maxContextWindowLength: Joi.number().optional().min(0).label('Maximum Context Window Length'),
|
|
292
|
+
verbosity: Joi.string().valid('low', 'medium', 'high').optional().allow('').allow(null).label('Verbosity'),
|
|
293
|
+
|
|
294
|
+
// #region Web Search
|
|
295
|
+
useWebSearch: Joi.boolean().optional().label('Use Web Search'),
|
|
296
|
+
webSearchContextSize: Joi.string().valid('high', 'medium', 'low').optional().label('Web Search Context Size'),
|
|
297
|
+
webSearchCity: Joi.string().max(100).optional().allow('').label('Web Search City'),
|
|
298
|
+
webSearchCountry: Joi.string().max(2).optional().allow('').label('Web Search Country'),
|
|
299
|
+
webSearchRegion: Joi.string().max(100).optional().allow('').label('Web Search Region'),
|
|
300
|
+
webSearchTimezone: Joi.string().max(100).optional().allow('').label('Web Search Timezone'),
|
|
301
|
+
// #endregion
|
|
302
|
+
|
|
303
|
+
// #region xAI Search
|
|
304
|
+
useSearch: Joi.boolean().optional().allow('').label('Use Search'),
|
|
305
|
+
searchMode: Joi.string().valid('auto', 'on', 'off').optional().allow('').label('Search Mode'),
|
|
306
|
+
returnCitations: Joi.boolean().optional().allow('').label('Return Citations'),
|
|
307
|
+
maxSearchResults: Joi.number().min(1).max(100).optional().allow('').label('Max Search Results'),
|
|
308
|
+
searchDataSources: Joi.array().items(Joi.string().valid('web', 'x', 'news', 'rss')).max(4).optional().allow('').label('Search Data Sources'),
|
|
309
|
+
searchCountry: Joi.string().length(2).optional().allow('').label('Search Country'),
|
|
310
|
+
excludedWebsites: Joi.string().max(10000).optional().allow('').label('Excluded Websites'),
|
|
311
|
+
allowedWebsites: Joi.string().max(10000).optional().allow('').label('Allowed Websites'),
|
|
312
|
+
includedXHandles: Joi.string().max(1000).optional().allow('').label('Included X Handles'),
|
|
313
|
+
excludedXHandles: Joi.string().max(1000).optional().allow('').label('Excluded X Handles'),
|
|
314
|
+
postFavoriteCount: Joi.number().min(0).max(1000000000).optional().allow('').label('Post Favorite Count'),
|
|
315
|
+
postViewCount: Joi.number().min(0).max(1000000000).optional().allow('').label('Post View Count'),
|
|
316
|
+
rssLinks: Joi.string().max(10000).optional().allow('').label('RSS Link'),
|
|
317
|
+
safeSearch: Joi.boolean().optional().allow('').label('Safe Search'),
|
|
318
|
+
fromDate: Joi.string()
|
|
319
|
+
.pattern(/^\d{4}-\d{2}-\d{2}$/)
|
|
320
|
+
.optional()
|
|
321
|
+
.allow('')
|
|
322
|
+
.label('From Date'),
|
|
323
|
+
toDate: Joi.string()
|
|
324
|
+
.pattern(/^\d{4}-\d{2}-\d{2}$/)
|
|
325
|
+
.optional()
|
|
326
|
+
.allow('')
|
|
327
|
+
.label('To Date'),
|
|
328
|
+
// #endregion
|
|
329
|
+
|
|
330
|
+
// #region Reasoning
|
|
331
|
+
useReasoning: Joi.boolean().optional().label('Use Reasoning'),
|
|
332
|
+
reasoningEffort: Joi.string().valid('none', 'default', 'minimal', 'low', 'medium', 'high').optional().allow('').label('Reasoning Effort'),
|
|
333
|
+
maxThinkingTokens: Joi.number().min(1).optional().label('Maximum Thinking Tokens'),
|
|
334
|
+
// #endregion
|
|
335
|
+
});
|
|
336
|
+
constructor() {
|
|
337
|
+
super();
|
|
338
|
+
}
|
|
339
|
+
init() {}
|
|
340
|
+
async process(input, config, agent: Agent) {
|
|
341
|
+
await super.process(input, config, agent);
|
|
342
|
+
|
|
343
|
+
//let debugLog = agent.agentRuntime?.debug ? [] : undefined;
|
|
344
|
+
const logger = this.createComponentLogger(agent, config);
|
|
345
|
+
|
|
346
|
+
try {
|
|
347
|
+
logger.debug(`=== GenAILLM Log ===`);
|
|
348
|
+
let teamId = agent?.teamId;
|
|
349
|
+
|
|
350
|
+
const passThrough: boolean = config.data.passthrough || false;
|
|
351
|
+
const useContextWindow: boolean = config.data.useContextWindow || false;
|
|
352
|
+
const useSystemPrompt: boolean = config.data.useSystemPrompt || false;
|
|
353
|
+
const useWebSearch: boolean = config.data.useWebSearch || false;
|
|
354
|
+
const maxTokens: number = parseInt(config.data.maxTokens) || 1024;
|
|
355
|
+
const maxContextWindowLength: number = parseInt(config.data.maxContextWindowLength) || 1024;
|
|
356
|
+
const model: string = config.data.model || 'echo';
|
|
357
|
+
const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
|
|
358
|
+
|
|
359
|
+
// if the llm is undefined, then it means we removed the model from our system
|
|
360
|
+
if (!llmInference.connector) {
|
|
361
|
+
return {
|
|
362
|
+
_error: `The model '${model}' is not available. Please try a different one.`,
|
|
363
|
+
_debug: logger.output,
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
//const team = AccessCandidate.team(teamId);
|
|
368
|
+
//const llmRegistry = isStandardLLM ? LLMRegistry : await CustomLLMRegistry.getInstance(team);
|
|
369
|
+
const modelId = await agent.modelsProvider.getModelId(model);
|
|
370
|
+
|
|
371
|
+
logger.debug(` Model : ${modelId || model}`);
|
|
372
|
+
|
|
373
|
+
let prompt: any = TemplateString(config.data.prompt).parse(input).result;
|
|
374
|
+
|
|
375
|
+
let files: any[] = parseFiles(input, config);
|
|
376
|
+
let isMultimodalRequest = false;
|
|
377
|
+
const provider = await agent.modelsProvider.getProvider(model);
|
|
378
|
+
const isEcho = provider === 'Echo';
|
|
379
|
+
|
|
380
|
+
// Ignore files for Echo model
|
|
381
|
+
if (files?.length > 0 && !isEcho) {
|
|
382
|
+
// TODO: simplify the valid files checking logic
|
|
383
|
+
const supportedFileTypes = SUPPORTED_MIME_TYPES_MAP?.[provider] || {};
|
|
384
|
+
const modelInfo = await agent.modelsProvider.getModelInfo(model);
|
|
385
|
+
const features = modelInfo?.features || [];
|
|
386
|
+
const fileTypes = new Set(); // Set to avoid duplicates
|
|
387
|
+
|
|
388
|
+
const validFiles = await Promise.all(
|
|
389
|
+
files.map(async (file) => {
|
|
390
|
+
const mimeType = file?.mimetype || (await getMimeType(file));
|
|
391
|
+
const [requestFeature = ''] =
|
|
392
|
+
Object.entries(supportedFileTypes).find(([key, value]) => (value as string[]).includes(mimeType)) || [];
|
|
393
|
+
|
|
394
|
+
if (mimeType) {
|
|
395
|
+
fileTypes.add(mimeType);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
return features?.includes(requestFeature) ? file : null;
|
|
399
|
+
})
|
|
400
|
+
);
|
|
401
|
+
|
|
402
|
+
files = validFiles.filter(Boolean);
|
|
403
|
+
|
|
404
|
+
if (files.length === 0) {
|
|
405
|
+
return {
|
|
406
|
+
_error: `Model does not support ${fileTypes?.size > 0 ? Array.from(fileTypes).join(', ') : 'File(s)'}`,
|
|
407
|
+
_debug: logger.output,
|
|
408
|
+
};
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
isMultimodalRequest = true;
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
logger.debug(` Prompt\n`, prompt, '\n');
|
|
415
|
+
|
|
416
|
+
if (!isEcho) {
|
|
417
|
+
logger.debug(' Files\n', await Promise.all(files.map((file) => formatDataForDebug(file, AccessCandidate.agent(agent.id)))));
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
// default to json response format
|
|
421
|
+
const hasCustomOutputs = config?.outputs?.some((output) => !output.default);
|
|
422
|
+
config.data.responseFormat = config.data?.responseFormat || (hasCustomOutputs ? 'json' : '');
|
|
423
|
+
|
|
424
|
+
// request to LLM
|
|
425
|
+
let response: any;
|
|
426
|
+
|
|
427
|
+
const _prompt = llmInference.connector.enhancePrompt(prompt, config);
|
|
428
|
+
let messages = [];
|
|
429
|
+
|
|
430
|
+
let systemPrompt = '';
|
|
431
|
+
if (useSystemPrompt) {
|
|
432
|
+
//first we try to grab the system prompt from llmCache (in case of a ConversationHelper implementing dynamic system prompt)
|
|
433
|
+
const cachedPrompt = await agent.agentRuntime.llmCache.get('systemPrompt', 'text');
|
|
434
|
+
//if not found, we can read the system prompt from agent data.
|
|
435
|
+
systemPrompt = cachedPrompt || agent.data?.behavior || '';
|
|
436
|
+
|
|
437
|
+
if (systemPrompt) {
|
|
438
|
+
logger.debug(' Using Agent System Prompt\n', systemPrompt);
|
|
439
|
+
}
|
|
440
|
+
if (systemPrompt) {
|
|
441
|
+
messages = [{ role: 'system', content: systemPrompt }];
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
if (useContextWindow) {
|
|
446
|
+
const cachedMessages = await agent.agentRuntime.llmCache.get('messages', 'json');
|
|
447
|
+
try {
|
|
448
|
+
const messagesJSON = typeof cachedMessages === 'string' ? JSON.parse(cachedMessages) : cachedMessages;
|
|
449
|
+
//const contextWindow = messagesJSON.filter((message) => message.role !== 'user');
|
|
450
|
+
|
|
451
|
+
const convMessages = await llmInference.getContextWindow(systemPrompt, messagesJSON, maxContextWindowLength, maxTokens);
|
|
452
|
+
|
|
453
|
+
if (convMessages.length > 0) {
|
|
454
|
+
logger.debug(` Using Agent Context Window : ${convMessages.length - 1} messages will be used`);
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
messages = [...convMessages];
|
|
458
|
+
//messages.push(...contextWindowJSON);
|
|
459
|
+
} catch (error) {
|
|
460
|
+
logger.warn('Error on parsing context window: ', error);
|
|
461
|
+
console.warn(cachedMessages);
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
if (messages[messages.length - 1]?.role == 'user') {
|
|
466
|
+
messages[messages.length - 1].content = _prompt;
|
|
467
|
+
} else {
|
|
468
|
+
messages.push({ role: 'user', content: _prompt });
|
|
469
|
+
}
|
|
470
|
+
let finishReason = 'stop';
|
|
471
|
+
const contentPromise = new Promise(async (resolve, reject) => {
|
|
472
|
+
let _content = '';
|
|
473
|
+
let eventEmitter;
|
|
474
|
+
|
|
475
|
+
eventEmitter = await llmInference
|
|
476
|
+
.promptStream({
|
|
477
|
+
contextWindow: messages,
|
|
478
|
+
files,
|
|
479
|
+
params: {
|
|
480
|
+
...config.data,
|
|
481
|
+
agentId: agent.id,
|
|
482
|
+
},
|
|
483
|
+
})
|
|
484
|
+
.catch((error) => {
|
|
485
|
+
console.error('Error on promptStream: ', error);
|
|
486
|
+
reject(error);
|
|
487
|
+
});
|
|
488
|
+
|
|
489
|
+
eventEmitter.on('content', (content) => {
|
|
490
|
+
if (passThrough) {
|
|
491
|
+
if (typeof agent.callback === 'function') {
|
|
492
|
+
agent.callback({ content });
|
|
493
|
+
}
|
|
494
|
+
agent.sse.send('llm/passthrough/content', content.replace(/\n/g, '\\n'));
|
|
495
|
+
}
|
|
496
|
+
_content += content;
|
|
497
|
+
});
|
|
498
|
+
|
|
499
|
+
eventEmitter.on('thinking', (thinking) => {
|
|
500
|
+
if (passThrough) {
|
|
501
|
+
if (typeof agent.callback === 'function') {
|
|
502
|
+
agent.callback({ thinking });
|
|
503
|
+
}
|
|
504
|
+
agent.sse.send('llm/passthrough/thinking', thinking.replace(/\n/g, '\\n'));
|
|
505
|
+
}
|
|
506
|
+
});
|
|
507
|
+
eventEmitter.on('end', () => {
|
|
508
|
+
if (passThrough) {
|
|
509
|
+
if (typeof agent.callback === 'function') {
|
|
510
|
+
agent.callback({ content: '\n' });
|
|
511
|
+
}
|
|
512
|
+
agent.sse.send('llm/passthrough/content', '\\n');
|
|
513
|
+
}
|
|
514
|
+
resolve(_content);
|
|
515
|
+
});
|
|
516
|
+
eventEmitter.on('interrupted', (reason) => {
|
|
517
|
+
finishReason = reason || 'stop';
|
|
518
|
+
});
|
|
519
|
+
|
|
520
|
+
eventEmitter.on('error', (error) => {
|
|
521
|
+
reject(error);
|
|
522
|
+
});
|
|
523
|
+
});
|
|
524
|
+
response = await contentPromise.catch((error) => {
|
|
525
|
+
return { error: error.message || error };
|
|
526
|
+
});
|
|
527
|
+
// // If the model stopped before completing the response, this is usually due to output token limit reached.
|
|
528
|
+
if (finishReason !== 'stop') {
|
|
529
|
+
return {
|
|
530
|
+
Reply: response,
|
|
531
|
+
_error: 'The model stopped before completing the response, this is usually due to output token limit reached.',
|
|
532
|
+
_debug: logger.output,
|
|
533
|
+
};
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
// in case we have the response but it's empty string, undefined or null
|
|
537
|
+
if (!response) {
|
|
538
|
+
return { _error: ' LLM Error = Empty Response!', _debug: logger.output };
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
if (response?.error) {
|
|
542
|
+
const error = response?.error + ' ' + (response?.details || '');
|
|
543
|
+
logger.error(` LLM Error=`, error);
|
|
544
|
+
|
|
545
|
+
return { Output: response?.data, _error: error, _debug: logger.output };
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
const Reply = llmInference.connector.postProcess(response);
|
|
549
|
+
if (Reply.error) {
|
|
550
|
+
logger.error(` LLM Error=`, Reply.error);
|
|
551
|
+
return { _error: Reply.error, _debug: logger.output };
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
logger.debug(' Reply \n', Reply);
|
|
555
|
+
|
|
556
|
+
const result = { Reply };
|
|
557
|
+
|
|
558
|
+
result['_debug'] = logger.output;
|
|
559
|
+
|
|
560
|
+
return result;
|
|
561
|
+
} catch (error) {
|
|
562
|
+
return { _error: error.message, _debug: logger.output };
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
function parseFiles(input: any, config: any) {
|
|
568
|
+
const mediaTypes = ['Image', 'Audio', 'Video', 'Binary'];
|
|
569
|
+
|
|
570
|
+
// Parse media inputs from config
|
|
571
|
+
const inputFiles =
|
|
572
|
+
config.inputs
|
|
573
|
+
?.filter((_input) => mediaTypes.includes(_input.type))
|
|
574
|
+
?.flatMap((_input) => {
|
|
575
|
+
const value = input[_input.name];
|
|
576
|
+
|
|
577
|
+
if (Array.isArray(value)) {
|
|
578
|
+
return value.map((item) => TemplateString(item).parseRaw(input).result);
|
|
579
|
+
} else {
|
|
580
|
+
return TemplateString(value).parseRaw(input).result;
|
|
581
|
+
}
|
|
582
|
+
})
|
|
583
|
+
?.filter((file) => file) || [];
|
|
584
|
+
|
|
585
|
+
return inputFiles;
|
|
586
|
+
}
|