@smythos/sre 1.5.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG +62 -0
- package/LICENSE +18 -0
- package/package.json +127 -115
- package/src/Components/APICall/APICall.class.ts +155 -0
- package/src/Components/APICall/AccessTokenManager.ts +130 -0
- package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -0
- package/src/Components/APICall/OAuth.helper.ts +294 -0
- package/src/Components/APICall/mimeTypeCategories.ts +46 -0
- package/src/Components/APICall/parseData.ts +167 -0
- package/src/Components/APICall/parseHeaders.ts +41 -0
- package/src/Components/APICall/parseProxy.ts +68 -0
- package/src/Components/APICall/parseUrl.ts +91 -0
- package/src/Components/APIEndpoint.class.ts +234 -0
- package/src/Components/APIOutput.class.ts +58 -0
- package/src/Components/AgentPlugin.class.ts +102 -0
- package/src/Components/Async.class.ts +155 -0
- package/src/Components/Await.class.ts +90 -0
- package/src/Components/Classifier.class.ts +158 -0
- package/src/Components/Component.class.ts +94 -0
- package/src/Components/ComponentHost.class.ts +38 -0
- package/src/Components/DataSourceCleaner.class.ts +92 -0
- package/src/Components/DataSourceIndexer.class.ts +181 -0
- package/src/Components/DataSourceLookup.class.ts +141 -0
- package/src/Components/FEncDec.class.ts +29 -0
- package/src/Components/FHash.class.ts +33 -0
- package/src/Components/FSign.class.ts +80 -0
- package/src/Components/FSleep.class.ts +25 -0
- package/src/Components/FTimestamp.class.ts +25 -0
- package/src/Components/FileStore.class.ts +75 -0
- package/src/Components/ForEach.class.ts +97 -0
- package/src/Components/GPTPlugin.class.ts +70 -0
- package/src/Components/GenAILLM.class.ts +395 -0
- package/src/Components/HuggingFace.class.ts +314 -0
- package/src/Components/Image/imageSettings.config.ts +70 -0
- package/src/Components/ImageGenerator.class.ts +407 -0
- package/src/Components/JSONFilter.class.ts +54 -0
- package/src/Components/LLMAssistant.class.ts +213 -0
- package/src/Components/LogicAND.class.ts +28 -0
- package/src/Components/LogicAtLeast.class.ts +85 -0
- package/src/Components/LogicAtMost.class.ts +86 -0
- package/src/Components/LogicOR.class.ts +29 -0
- package/src/Components/LogicXOR.class.ts +34 -0
- package/src/Components/MCPClient.class.ts +112 -0
- package/src/Components/PromptGenerator.class.ts +122 -0
- package/src/Components/ScrapflyWebScrape.class.ts +159 -0
- package/src/Components/TavilyWebSearch.class.ts +98 -0
- package/src/Components/index.ts +77 -0
- package/src/Core/AgentProcess.helper.ts +240 -0
- package/src/Core/Connector.class.ts +123 -0
- package/src/Core/ConnectorsService.ts +192 -0
- package/src/Core/DummyConnector.ts +49 -0
- package/src/Core/HookService.ts +105 -0
- package/src/Core/SmythRuntime.class.ts +292 -0
- package/src/Core/SystemEvents.ts +15 -0
- package/src/Core/boot.ts +55 -0
- package/src/config.ts +15 -0
- package/src/constants.ts +125 -0
- package/src/data/hugging-face.params.json +580 -0
- package/src/helpers/BinaryInput.helper.ts +324 -0
- package/src/helpers/Conversation.helper.ts +1094 -0
- package/src/helpers/JsonContent.helper.ts +97 -0
- package/src/helpers/LocalCache.helper.ts +97 -0
- package/src/helpers/Log.helper.ts +234 -0
- package/src/helpers/OpenApiParser.helper.ts +150 -0
- package/src/helpers/S3Cache.helper.ts +129 -0
- package/src/helpers/SmythURI.helper.ts +5 -0
- package/src/helpers/TemplateString.helper.ts +243 -0
- package/src/helpers/TypeChecker.helper.ts +329 -0
- package/src/index.ts +179 -0
- package/src/index.ts.bak +179 -0
- package/src/subsystems/AgentManager/Agent.class.ts +1108 -0
- package/src/subsystems/AgentManager/Agent.helper.ts +3 -0
- package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -0
- package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -0
- package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -0
- package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -0
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -0
- package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -0
- package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -0
- package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -0
- package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +59 -0
- package/src/subsystems/AgentManager/Component.service/index.ts +11 -0
- package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -0
- package/src/subsystems/AgentManager/ForkedAgent.class.ts +153 -0
- package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -0
- package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +99 -0
- package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +63 -0
- package/src/subsystems/ComputeManager/Code.service/index.ts +11 -0
- package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -0
- package/src/subsystems/IO/CLI.service/index.ts +9 -0
- package/src/subsystems/IO/Log.service/LogConnector.ts +32 -0
- package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -0
- package/src/subsystems/IO/Log.service/index.ts +13 -0
- package/src/subsystems/IO/NKV.service/NKVConnector.ts +41 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -0
- package/src/subsystems/IO/NKV.service/index.ts +12 -0
- package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -0
- package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -0
- package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -0
- package/src/subsystems/IO/Router.service/index.ts +11 -0
- package/src/subsystems/IO/Storage.service/SmythFS.class.ts +472 -0
- package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -0
- package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +305 -0
- package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +418 -0
- package/src/subsystems/IO/Storage.service/index.ts +13 -0
- package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -0
- package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +450 -0
- package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +373 -0
- package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +420 -0
- package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +106 -0
- package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -0
- package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -0
- package/src/subsystems/IO/VectorDB.service/index.ts +14 -0
- package/src/subsystems/LLMManager/LLM.helper.ts +221 -0
- package/src/subsystems/LLMManager/LLM.inference.ts +335 -0
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +374 -0
- package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +145 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +632 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +405 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +81 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +689 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +257 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/OpenAI.class.ts +848 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +255 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +193 -0
- package/src/subsystems/LLMManager/LLM.service/index.ts +43 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +281 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.ts +229 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -0
- package/src/subsystems/LLMManager/custom-models.ts +854 -0
- package/src/subsystems/LLMManager/models.ts +2539 -0
- package/src/subsystems/LLMManager/paramMappings.ts +69 -0
- package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -0
- package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -0
- package/src/subsystems/MemoryManager/LLMCache.ts +72 -0
- package/src/subsystems/MemoryManager/LLMContext.ts +125 -0
- package/src/subsystems/MemoryManager/RuntimeContext.ts +249 -0
- package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -0
- package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +76 -0
- package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -0
- package/src/subsystems/Security/Account.service/AccountConnector.ts +41 -0
- package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -0
- package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -0
- package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -0
- package/src/subsystems/Security/Account.service/index.ts +14 -0
- package/src/subsystems/Security/Credentials.helper.ts +62 -0
- package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +34 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +57 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -0
- package/src/subsystems/Security/ManagedVault.service/index.ts +12 -0
- package/src/subsystems/Security/SecureConnector.class.ts +110 -0
- package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -0
- package/src/subsystems/Security/Vault.service/VaultConnector.ts +26 -0
- package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -0
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +166 -0
- package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -0
- package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -0
- package/src/subsystems/Security/Vault.service/index.ts +12 -0
- package/src/types/ACL.types.ts +104 -0
- package/src/types/AWS.types.ts +9 -0
- package/src/types/Agent.types.ts +61 -0
- package/src/types/AgentLogger.types.ts +17 -0
- package/src/types/Cache.types.ts +1 -0
- package/src/types/Common.types.ts +3 -0
- package/src/types/LLM.types.ts +419 -0
- package/src/types/Redis.types.ts +8 -0
- package/src/types/SRE.types.ts +64 -0
- package/src/types/Security.types.ts +18 -0
- package/src/types/Storage.types.ts +5 -0
- package/src/types/VectorDB.types.ts +78 -0
- package/src/utils/base64.utils.ts +275 -0
- package/src/utils/cli.utils.ts +68 -0
- package/src/utils/data.utils.ts +263 -0
- package/src/utils/date-time.utils.ts +22 -0
- package/src/utils/general.utils.ts +238 -0
- package/src/utils/index.ts +12 -0
- package/src/utils/numbers.utils.ts +13 -0
- package/src/utils/oauth.utils.ts +35 -0
- package/src/utils/string.utils.ts +414 -0
- package/src/utils/url.utils.ts +19 -0
- package/src/utils/validation.utils.ts +74 -0
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
//TODO: this component need to be fully refactored to use the same approach as GenAI LLM
|
|
2
|
+
|
|
3
|
+
import { IRequestImage, Runware } from '@runware/sdk-js';
|
|
4
|
+
import { OpenAI } from 'openai';
|
|
5
|
+
|
|
6
|
+
import { TemplateString } from '@sre/helpers/TemplateString.helper';
|
|
7
|
+
import { LLMInference } from '@sre/LLMManager/LLM.inference';
|
|
8
|
+
import { IAgent as Agent } from '@sre/types/Agent.types';
|
|
9
|
+
import { APIKeySource, GenerateImageConfig } from '@sre/types/LLM.types';
|
|
10
|
+
import Joi from 'joi';
|
|
11
|
+
import { Component } from './Component.class';
|
|
12
|
+
|
|
13
|
+
import { SystemEvents } from '@sre/Core/SystemEvents';
|
|
14
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
15
|
+
|
|
16
|
+
import appConfig from '@sre/config';
|
|
17
|
+
import { BUILT_IN_MODEL_PREFIX, SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
|
|
18
|
+
import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
|
|
19
|
+
import { normalizeImageInput } from '@sre/utils/data.utils';
|
|
20
|
+
import { ImageSettingsConfig } from './Image/imageSettings.config';
|
|
21
|
+
import { getCredentials } from '../subsystems/Security/Credentials.helper';
|
|
22
|
+
|
|
23
|
+
enum DALL_E_MODELS {
|
|
24
|
+
DALL_E_2 = 'dall-e-2',
|
|
25
|
+
DALL_E_3 = 'dall-e-3',
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const IMAGE_GEN_COST_MAP = {
|
|
29
|
+
[DALL_E_MODELS.DALL_E_3]: {
|
|
30
|
+
standard: {
|
|
31
|
+
'1024x1024': 0.04,
|
|
32
|
+
'1024x1792': 0.08,
|
|
33
|
+
'1792x1024': 0.08,
|
|
34
|
+
},
|
|
35
|
+
hd: {
|
|
36
|
+
'1024x1024': 0.08,
|
|
37
|
+
'1024x1792': 0.12,
|
|
38
|
+
'1792x1024': 0.12,
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
[DALL_E_MODELS.DALL_E_2]: {
|
|
42
|
+
'256x256': 0.016,
|
|
43
|
+
'512x512': 0.018,
|
|
44
|
+
'1024x1024': 0.02,
|
|
45
|
+
},
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
export class ImageGenerator extends Component {
|
|
49
|
+
protected configSchema = Joi.object({
|
|
50
|
+
model: Joi.string().max(100).required(),
|
|
51
|
+
prompt: Joi.string().optional().min(2).max(2000).label('Prompt'),
|
|
52
|
+
|
|
53
|
+
// #region OpenAI (DALL·E)
|
|
54
|
+
sizeDalle2: Joi.string().valid('256x256', '512x512', '1024x1024').optional(),
|
|
55
|
+
sizeDalle3: Joi.string().valid('1024x1024', '1792x1024', '1024x1792').optional(),
|
|
56
|
+
quality: Joi.string().valid('standard', 'hd', 'auto', 'high', 'medium', 'low').allow('').optional(),
|
|
57
|
+
style: Joi.string().valid('vivid', 'natural').optional(),
|
|
58
|
+
isRawInputPrompt: Joi.boolean().strict().optional(),
|
|
59
|
+
// #endregion
|
|
60
|
+
|
|
61
|
+
// #region Runware
|
|
62
|
+
negativePrompt: Joi.string().optional().allow('').min(2).max(2000).label('Negative Prompt'),
|
|
63
|
+
width: Joi.number().min(128).max(2048).multiple(64).optional().messages({
|
|
64
|
+
'number.multiple': '{{#label}} must be divisible by 64 (eg: 128...512, 576, 640...2048). Provided value: {{#value}}',
|
|
65
|
+
}),
|
|
66
|
+
height: Joi.number().min(128).max(2048).multiple(64).optional().messages({
|
|
67
|
+
'number.multiple': '{{#label}} must be divisible by 64 (eg: 128...512, 576, 640...2048). Provided value: {{#value}}',
|
|
68
|
+
}),
|
|
69
|
+
outputFormat: Joi.string().valid('PNG', 'JPEG', 'WEBP', 'auto', 'jpeg', 'png', 'webp').optional(),
|
|
70
|
+
strength: ImageSettingsConfig.strength,
|
|
71
|
+
// #endregion
|
|
72
|
+
|
|
73
|
+
// #region GPT model
|
|
74
|
+
size: Joi.string().optional().allow('').max(100).label('Size'),
|
|
75
|
+
// #endregion
|
|
76
|
+
});
|
|
77
|
+
constructor() {
|
|
78
|
+
super();
|
|
79
|
+
}
|
|
80
|
+
init() {}
|
|
81
|
+
async process(input, config, agent: Agent) {
|
|
82
|
+
await super.process(input, config, agent);
|
|
83
|
+
|
|
84
|
+
const logger = this.createComponentLogger(agent, config);
|
|
85
|
+
|
|
86
|
+
logger.debug(`=== Image Generator Log ===`);
|
|
87
|
+
|
|
88
|
+
let model = config?.data?.model;
|
|
89
|
+
|
|
90
|
+
if (!model) {
|
|
91
|
+
return { _error: 'Model Not Found: ', _debug: logger.output };
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
logger.debug(`Model: ${model}`);
|
|
95
|
+
|
|
96
|
+
let prompt = config.data?.prompt || input?.Prompt;
|
|
97
|
+
prompt = typeof prompt === 'string' ? prompt : JSON.stringify(prompt);
|
|
98
|
+
prompt = TemplateString(prompt).parse(input).result;
|
|
99
|
+
|
|
100
|
+
if (!prompt) {
|
|
101
|
+
return { _error: 'Please provide a prompt or Image', _debug: logger.output };
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
logger.debug(`Prompt: \n`, prompt);
|
|
105
|
+
|
|
106
|
+
const modelFamily = await getModelFamily(model, agent);
|
|
107
|
+
|
|
108
|
+
if (typeof imageGenerator[modelFamily] !== 'function') {
|
|
109
|
+
return { _error: `The model '${model}' is not available. Please try a different one.`, _debug: logger.output };
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
try {
|
|
113
|
+
const { output } = await imageGenerator[modelFamily]({ model, config, input, logger, agent, prompt });
|
|
114
|
+
|
|
115
|
+
logger.debug(`Output: `, output);
|
|
116
|
+
|
|
117
|
+
return { Output: output, _debug: logger.output };
|
|
118
|
+
} catch (error: any) {
|
|
119
|
+
return { _error: `Generating Image(s)\n${error?.message || JSON.stringify(error)}`, _debug: logger.output };
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// TODO: Create a separate service for image generation, similar to LLM.service.
|
|
125
|
+
|
|
126
|
+
// TODO: Hopefully we will have the proper type with new OpenAI SDK, then we can use their type
|
|
127
|
+
type TokenUsage = OpenAI.Completions.CompletionUsage & {
|
|
128
|
+
prompt_tokens_details?: { cached_tokens?: number };
|
|
129
|
+
input_tokens_details: { image_tokens?: number; text_tokens?: number };
|
|
130
|
+
output_tokens: number;
|
|
131
|
+
};
|
|
132
|
+
|
|
133
|
+
enum MODEL_FAMILY {
|
|
134
|
+
GPT = 'gpt',
|
|
135
|
+
RUNWARE = 'runware',
|
|
136
|
+
DALL_E = 'dall-e',
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
const imageGenerator = {
|
|
140
|
+
[MODEL_FAMILY.GPT]: async ({ model, prompt, config, logger, agent, input }) => {
|
|
141
|
+
let args: GenerateImageConfig & { files?: BinaryInput[] } = {
|
|
142
|
+
model,
|
|
143
|
+
size: config?.data?.size || 'auto',
|
|
144
|
+
quality: config?.data?.quality || 'auto',
|
|
145
|
+
};
|
|
146
|
+
|
|
147
|
+
try {
|
|
148
|
+
const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
|
|
149
|
+
|
|
150
|
+
// if the llm is undefined, then it means we removed the model from our system
|
|
151
|
+
if (!llmInference.connector) {
|
|
152
|
+
return {
|
|
153
|
+
_error: `The model '${model}' is not available. Please try a different one.`,
|
|
154
|
+
_debug: logger.output,
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
const provider = await agent.modelsProvider.getProvider(model);
|
|
159
|
+
|
|
160
|
+
const files: any[] = parseFiles(input, config);
|
|
161
|
+
const validFiles = files.filter((file) => imageGenerator.isValidImageFile(provider, file.mimetype));
|
|
162
|
+
|
|
163
|
+
if (files.length > 0 && validFiles.length === 0) {
|
|
164
|
+
throw new Error('Supported image file types are: ' + SUPPORTED_MIME_TYPES_MAP[provider]?.imageGen?.join(', '));
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
let response;
|
|
168
|
+
|
|
169
|
+
if (validFiles.length > 0) {
|
|
170
|
+
response = await llmInference.imageEditRequest({ query: prompt, files: validFiles, params: { ...args, agentId: agent.id } });
|
|
171
|
+
} else {
|
|
172
|
+
response = await llmInference.imageGenRequest({ query: prompt, params: { ...args, agentId: agent.id } });
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
if (response?.usage) {
|
|
176
|
+
imageGenerator.reportTokenUsage(response.usage, {
|
|
177
|
+
modelEntryName: model,
|
|
178
|
+
keySource: model.startsWith(BUILT_IN_MODEL_PREFIX) ? APIKeySource.Smyth : APIKeySource.User,
|
|
179
|
+
agentId: agent.id,
|
|
180
|
+
teamId: agent.teamId,
|
|
181
|
+
});
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
let output = response?.data?.[0]?.b64_json;
|
|
185
|
+
|
|
186
|
+
const binaryInput = BinaryInput.from(output);
|
|
187
|
+
const agentId = typeof agent == 'object' && agent.id ? agent.id : agent;
|
|
188
|
+
const smythFile = await binaryInput.getJsonData(AccessCandidate.agent(agentId));
|
|
189
|
+
|
|
190
|
+
return { output: smythFile };
|
|
191
|
+
} catch (error: any) {
|
|
192
|
+
throw new Error(`OpenAI Image Generation Error: ${error?.message || JSON.stringify(error)}`);
|
|
193
|
+
}
|
|
194
|
+
},
|
|
195
|
+
[MODEL_FAMILY.DALL_E]: async ({ model, prompt, config, logger, agent, input }) => {
|
|
196
|
+
let _finalPrompt = prompt;
|
|
197
|
+
|
|
198
|
+
const files: any[] = parseFiles(input, config);
|
|
199
|
+
|
|
200
|
+
if (files.length > 0) {
|
|
201
|
+
throw new Error('OpenAI Image Generation Error: DALL-E models do not support image editing or variations. Please use a different model.');
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
const responseFormat = config?.data?.responseFormat || 'url';
|
|
205
|
+
|
|
206
|
+
let args: GenerateImageConfig & { responseFormat: 'url' | 'b64_json' } = {
|
|
207
|
+
responseFormat,
|
|
208
|
+
model,
|
|
209
|
+
};
|
|
210
|
+
|
|
211
|
+
let cost = 0;
|
|
212
|
+
|
|
213
|
+
if (model === DALL_E_MODELS.DALL_E_3) {
|
|
214
|
+
const size = config?.data?.sizeDalle3 || '1024x1024';
|
|
215
|
+
const quality = config?.data?.quality || 'standard';
|
|
216
|
+
const style = config?.data?.style || 'vivid';
|
|
217
|
+
args.size = size;
|
|
218
|
+
args.quality = quality;
|
|
219
|
+
args.style = style;
|
|
220
|
+
|
|
221
|
+
const isRawInputPrompt = config?.data?.isRawInputPrompt || false;
|
|
222
|
+
|
|
223
|
+
if (isRawInputPrompt) {
|
|
224
|
+
_finalPrompt = `I NEED to test how the tool works with extremely simple prompts. DO NOT add any detail, just use it AS-IS: ${prompt}`;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
cost = IMAGE_GEN_COST_MAP[model][quality][size];
|
|
228
|
+
} else if (model === DALL_E_MODELS.DALL_E_2) {
|
|
229
|
+
const size = config?.data?.sizeDalle2 || '256x256';
|
|
230
|
+
const numberOfImages = parseInt(config?.data?.numberOfImages) || 1;
|
|
231
|
+
args.size = size;
|
|
232
|
+
args.n = numberOfImages;
|
|
233
|
+
|
|
234
|
+
cost = IMAGE_GEN_COST_MAP[model][size];
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
|
|
238
|
+
|
|
239
|
+
// if the llm is undefined, then it means we removed the model from our system
|
|
240
|
+
if (!llmInference.connector) {
|
|
241
|
+
return {
|
|
242
|
+
_error: `The model '${model}' is not available. Please try a different one.`,
|
|
243
|
+
_debug: logger.output,
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
const response: any = await llmInference.imageGenRequest({ query: _finalPrompt, params: { ...args, agentId: agent.id } });
|
|
248
|
+
|
|
249
|
+
let output = response?.data?.[0]?.[responseFormat];
|
|
250
|
+
const revised_prompt = response?.data?.[0]?.revised_prompt;
|
|
251
|
+
|
|
252
|
+
if (revised_prompt && prompt !== revised_prompt) {
|
|
253
|
+
logger.debug(`Revised Prompt:\n${revised_prompt}`);
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
imageGenerator.reportUsage({ cost }, { modelEntryName: model, keySource: APIKeySource.Smyth, agentId: agent.id, teamId: agent.teamId });
|
|
257
|
+
|
|
258
|
+
return { output };
|
|
259
|
+
},
|
|
260
|
+
[MODEL_FAMILY.RUNWARE]: async ({ model, prompt, config, agent, input }) => {
|
|
261
|
+
// Initialize Runware client
|
|
262
|
+
const teamId = agent.teamId;
|
|
263
|
+
const apiKey = (await getCredentials(AccessCandidate.team(teamId), 'runware')) as string;
|
|
264
|
+
|
|
265
|
+
const runware = new Runware({ apiKey });
|
|
266
|
+
await runware.ensureConnection();
|
|
267
|
+
|
|
268
|
+
const negativePrompt = config?.data?.negativePrompt || '';
|
|
269
|
+
|
|
270
|
+
const files: any[] = parseFiles(input, config);
|
|
271
|
+
let seedImage = Array.isArray(files) ? files[0] : files;
|
|
272
|
+
seedImage = await normalizeImageInput(seedImage);
|
|
273
|
+
|
|
274
|
+
const modelId = await agent.modelsProvider.getModelId(model);
|
|
275
|
+
const imageRequestArgs: IRequestImage = {
|
|
276
|
+
model: modelId,
|
|
277
|
+
positivePrompt: prompt,
|
|
278
|
+
width: +config?.data?.width || 1024,
|
|
279
|
+
height: +config?.data?.height || 1024,
|
|
280
|
+
numberResults: 1, // For Image Generation we only need 1 image
|
|
281
|
+
outputType: 'URL', // For Image Generation we only need the URL
|
|
282
|
+
outputFormat: config?.data?.outputFormat || 'JPEG',
|
|
283
|
+
includeCost: true,
|
|
284
|
+
};
|
|
285
|
+
|
|
286
|
+
if (seedImage) {
|
|
287
|
+
imageRequestArgs.seedImage = seedImage;
|
|
288
|
+
imageRequestArgs.strength = +config?.data?.strength || 0.5;
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// If a negative prompt is provided, add it to the request args
|
|
292
|
+
if (negativePrompt) {
|
|
293
|
+
imageRequestArgs.negativePrompt = negativePrompt;
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
try {
|
|
297
|
+
const response = await runware.requestImages(imageRequestArgs);
|
|
298
|
+
|
|
299
|
+
// Get first image from response array
|
|
300
|
+
const firstImage = response[0];
|
|
301
|
+
|
|
302
|
+
// Map response to match expected format
|
|
303
|
+
let output = firstImage.imageURL;
|
|
304
|
+
|
|
305
|
+
imageGenerator.reportUsage(
|
|
306
|
+
{ cost: firstImage.cost },
|
|
307
|
+
{ modelEntryName: model, keySource: APIKeySource.Smyth, agentId: agent.id, teamId: agent.teamId }
|
|
308
|
+
);
|
|
309
|
+
|
|
310
|
+
return { output };
|
|
311
|
+
} catch (error: any) {
|
|
312
|
+
throw new Error(`Runware Image Generation Error: ${error?.message || JSON.stringify(error)}`);
|
|
313
|
+
} finally {
|
|
314
|
+
// Clean up connection
|
|
315
|
+
await runware.disconnect();
|
|
316
|
+
}
|
|
317
|
+
},
|
|
318
|
+
reportTokenUsage(usage: TokenUsage, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
|
|
319
|
+
// SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
|
|
320
|
+
const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
|
|
321
|
+
|
|
322
|
+
const usageData = {
|
|
323
|
+
sourceId: `api:imagegen.${modelName}`,
|
|
324
|
+
keySource: metadata.keySource,
|
|
325
|
+
|
|
326
|
+
input_tokens_txt: usage?.input_tokens_details?.text_tokens || 0,
|
|
327
|
+
input_tokens_img: usage?.input_tokens_details?.image_tokens || 0,
|
|
328
|
+
output_tokens: usage?.output_tokens,
|
|
329
|
+
input_tokens_cache_read: usage?.prompt_tokens_details?.cached_tokens || 0,
|
|
330
|
+
|
|
331
|
+
agentId: metadata.agentId,
|
|
332
|
+
teamId: metadata.teamId,
|
|
333
|
+
};
|
|
334
|
+
SystemEvents.emit('USAGE:API', usageData);
|
|
335
|
+
|
|
336
|
+
return usageData;
|
|
337
|
+
},
|
|
338
|
+
reportUsage(usage: { cost: number }, metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }) {
|
|
339
|
+
const usageData = {
|
|
340
|
+
sourceId: `api:imagegen.smyth`,
|
|
341
|
+
keySource: metadata.keySource,
|
|
342
|
+
|
|
343
|
+
cost: usage?.cost,
|
|
344
|
+
|
|
345
|
+
agentId: metadata.agentId,
|
|
346
|
+
teamId: metadata.teamId,
|
|
347
|
+
};
|
|
348
|
+
SystemEvents.emit('USAGE:API', usageData);
|
|
349
|
+
|
|
350
|
+
return usageData;
|
|
351
|
+
},
|
|
352
|
+
isValidImageFile(provider: string, mimetype: string) {
|
|
353
|
+
return SUPPORTED_MIME_TYPES_MAP[provider]?.imageGen?.includes(mimetype);
|
|
354
|
+
},
|
|
355
|
+
};
|
|
356
|
+
|
|
357
|
+
enum PROVIDERS {
|
|
358
|
+
OPENAI = 'OpenAI',
|
|
359
|
+
RUNWARE = 'Runware',
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
/**
|
|
363
|
+
* Gets the model family from a model identifier
|
|
364
|
+
* @param model The model identifier
|
|
365
|
+
* @returns The model family or null if not recognized
|
|
366
|
+
*/
|
|
367
|
+
async function getModelFamily(model: string, agent: Agent): Promise<string | null> {
|
|
368
|
+
if (await isGPTModel(model)) return MODEL_FAMILY.GPT;
|
|
369
|
+
if (await isRunwareModel(model, agent)) return MODEL_FAMILY.RUNWARE;
|
|
370
|
+
if (await isDallEModel(model)) return MODEL_FAMILY.DALL_E;
|
|
371
|
+
|
|
372
|
+
return null;
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
function isGPTModel(model: string) {
|
|
376
|
+
return model?.replace(BUILT_IN_MODEL_PREFIX, '')?.startsWith(MODEL_FAMILY.GPT);
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
async function isRunwareModel(model: string, agent: Agent): Promise<boolean> {
|
|
380
|
+
const provider = await agent.modelsProvider.getProvider(model);
|
|
381
|
+
return provider === PROVIDERS.RUNWARE || provider.toLowerCase() === PROVIDERS.RUNWARE.toLowerCase();
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
function isDallEModel(model: string) {
|
|
385
|
+
return model?.replace(BUILT_IN_MODEL_PREFIX, '')?.startsWith(MODEL_FAMILY.DALL_E);
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
function parseFiles(input: any, config: any) {
|
|
389
|
+
const mediaTypes = ['Image', 'Audio', 'Video', 'Binary'];
|
|
390
|
+
|
|
391
|
+
// Parse media inputs from config
|
|
392
|
+
const inputFiles =
|
|
393
|
+
config.inputs
|
|
394
|
+
?.filter((_input) => mediaTypes.includes(_input.type))
|
|
395
|
+
?.flatMap((_input) => {
|
|
396
|
+
const value = input[_input.name];
|
|
397
|
+
|
|
398
|
+
if (Array.isArray(value)) {
|
|
399
|
+
return value.map((item) => TemplateString(item).parseRaw(input).result);
|
|
400
|
+
} else {
|
|
401
|
+
return TemplateString(value).parseRaw(input).result;
|
|
402
|
+
}
|
|
403
|
+
})
|
|
404
|
+
?.filter((file) => file) || [];
|
|
405
|
+
|
|
406
|
+
return inputFiles;
|
|
407
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import Joi from 'joi';
|
|
2
|
+
|
|
3
|
+
import { IAgent as Agent } from '@sre/types/Agent.types';
|
|
4
|
+
import { Component } from './Component.class';
|
|
5
|
+
|
|
6
|
+
export class JSONFilter extends Component {
|
|
7
|
+
protected configSchema = Joi.object({
|
|
8
|
+
fields: Joi.string().max(30000).allow('').label('Prompt'),
|
|
9
|
+
});
|
|
10
|
+
constructor() {
|
|
11
|
+
super();
|
|
12
|
+
}
|
|
13
|
+
init() {}
|
|
14
|
+
async process(input, config, agent: Agent) {
|
|
15
|
+
await super.process(input, config, agent);
|
|
16
|
+
|
|
17
|
+
const logger = this.createComponentLogger(agent, config);
|
|
18
|
+
logger.debug(`=== JSONFilter Log ===`);
|
|
19
|
+
let Output = {};
|
|
20
|
+
let _error = null;
|
|
21
|
+
try {
|
|
22
|
+
const componentId = config.id;
|
|
23
|
+
const fields = config.data.fields;
|
|
24
|
+
const obj = input.Input;
|
|
25
|
+
|
|
26
|
+
Output = filterFields(obj, fields);
|
|
27
|
+
logger.debug(`Output filtered`);
|
|
28
|
+
} catch (error: any) {
|
|
29
|
+
_error = error;
|
|
30
|
+
logger.error(` JSONFilter Error`, error);
|
|
31
|
+
}
|
|
32
|
+
return { Output, _error, _debug: logger.output };
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
function filterFields(obj, fields) {
|
|
37
|
+
const fieldList = fields?.split(',').map((field) => field.trim());
|
|
38
|
+
|
|
39
|
+
function filterObject(obj) {
|
|
40
|
+
if (Array.isArray(obj)) {
|
|
41
|
+
return obj.map(filterObject);
|
|
42
|
+
} else if (obj !== null && typeof obj === 'object') {
|
|
43
|
+
return Object.keys(obj)
|
|
44
|
+
.filter((key) => fieldList.includes(key))
|
|
45
|
+
.reduce((acc, key) => {
|
|
46
|
+
acc[key] = filterObject(obj[key]);
|
|
47
|
+
return acc;
|
|
48
|
+
}, {});
|
|
49
|
+
}
|
|
50
|
+
return obj;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return filterObject(obj);
|
|
54
|
+
}
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
import Joi from 'joi';
|
|
2
|
+
|
|
3
|
+
import { IAgent as Agent } from '@sre/types/Agent.types';
|
|
4
|
+
import { ConnectorService } from '@sre/Core/ConnectorsService';
|
|
5
|
+
import { CacheConnector } from '@sre/MemoryManager/Cache.service/CacheConnector';
|
|
6
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
7
|
+
import { DEFAULT_MAX_TOKENS_FOR_LLM } from '@sre/constants';
|
|
8
|
+
import { TemplateString } from '@sre/helpers/TemplateString.helper';
|
|
9
|
+
import { encode } from 'gpt-tokenizer';
|
|
10
|
+
import { Component } from './Component.class';
|
|
11
|
+
import { JSONContent } from '@sre/helpers/JsonContent.helper';
|
|
12
|
+
import { LLMInference } from '@sre/LLMManager/LLM.inference';
|
|
13
|
+
import { TLLMMessageRole } from '@sre/types/LLM.types';
|
|
14
|
+
import { VaultHelper } from '@sre/Security/Vault.service/Vault.helper';
|
|
15
|
+
import path from 'path';
|
|
16
|
+
import config from '@sre/config';
|
|
17
|
+
import fs from 'fs/promises';
|
|
18
|
+
|
|
19
|
+
//const sessions = {};
|
|
20
|
+
let cacheConnector: CacheConnector;
|
|
21
|
+
function getCacheConnector() {
|
|
22
|
+
if (!cacheConnector) {
|
|
23
|
+
cacheConnector = ConnectorService.getCacheConnector();
|
|
24
|
+
}
|
|
25
|
+
return cacheConnector;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
async function saveMessagesToSession(agentId, userId, conversationId, messages, ttl?) {
|
|
29
|
+
if (!userId && !conversationId) return;
|
|
30
|
+
const cacheConnector = getCacheConnector();
|
|
31
|
+
const conv_uid = `${agentId}:conv-u${userId}-c${conversationId}`;
|
|
32
|
+
|
|
33
|
+
cacheConnector.requester(AccessCandidate.agent(agentId)).set(conv_uid, JSON.stringify(messages), null, null, ttl);
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
async function readMessagesFromSession(agentId, userId, conversationId, maxTokens = DEFAULT_MAX_TOKENS_FOR_LLM) {
|
|
37
|
+
if (!userId && !conversationId) return [];
|
|
38
|
+
const cacheConnector = getCacheConnector();
|
|
39
|
+
|
|
40
|
+
const conv_uid = `${agentId}:conv-u${userId}-c${conversationId}`;
|
|
41
|
+
//read the last messages from a given session and ensure that the total chat tokens are within the limit
|
|
42
|
+
//start from the last message and keep adding messages until the total tokens exceed the limit
|
|
43
|
+
//if (!sessions[agentId]) return [];
|
|
44
|
+
//if (!sessions[agentId][conv_uid]) return [];
|
|
45
|
+
|
|
46
|
+
const sessionData = await cacheConnector.requester(AccessCandidate.agent(agentId))?.get(conv_uid);
|
|
47
|
+
|
|
48
|
+
let messages = sessionData ? JSONContent(sessionData).tryParse() : [];
|
|
49
|
+
|
|
50
|
+
//const messages = sessions[agentId][conv_uid].messages;
|
|
51
|
+
|
|
52
|
+
const filteredMessages: any[] = [];
|
|
53
|
+
|
|
54
|
+
let tokens = 0;
|
|
55
|
+
if (messages[0]?.role == 'system') {
|
|
56
|
+
const encoded = encode(messages[0]?.content);
|
|
57
|
+
const messageTokens = encoded.length + 3;
|
|
58
|
+
tokens += messageTokens;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
62
|
+
if (messages[i].role == 'system') continue;
|
|
63
|
+
const message = messages[i];
|
|
64
|
+
const encoded = encode(message?.content);
|
|
65
|
+
const messageTokens = encoded.length + 3;
|
|
66
|
+
if (tokens + messageTokens > maxTokens) break;
|
|
67
|
+
filteredMessages.unshift(message);
|
|
68
|
+
tokens += messageTokens;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
if (messages[0]?.role == 'system') filteredMessages.unshift(messages[0]);
|
|
72
|
+
|
|
73
|
+
return filteredMessages;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
//TODO : update this implementation to use ConversationManager
|
|
77
|
+
// This will allow better context management and support for tool calls
|
|
78
|
+
export class LLMAssistant extends Component {
|
|
79
|
+
protected configSchema = Joi.object({
|
|
80
|
+
model: Joi.string().max(200).required(),
|
|
81
|
+
behavior: Joi.string().max(30000).allow('').label('Behavior'),
|
|
82
|
+
passthrough: Joi.boolean().optional().label('Passthrough'),
|
|
83
|
+
});
|
|
84
|
+
constructor() {
|
|
85
|
+
super();
|
|
86
|
+
}
|
|
87
|
+
init() {}
|
|
88
|
+
async process(input, config, agent: Agent) {
|
|
89
|
+
await super.process(input, config, agent);
|
|
90
|
+
const logger = this.createComponentLogger(agent, config);
|
|
91
|
+
try {
|
|
92
|
+
logger.debug('== LLM Assistant Log ==\n');
|
|
93
|
+
|
|
94
|
+
const passThrough: boolean = config.data.passthrough || false;
|
|
95
|
+
const model: string = config.data.model || 'echo';
|
|
96
|
+
const ttl = config.data.ttl || undefined;
|
|
97
|
+
let teamId = agent?.teamId;
|
|
98
|
+
|
|
99
|
+
const llmInference: LLMInference = await LLMInference.getInstance(model, AccessCandidate.agent(agent.id));
|
|
100
|
+
// if the llm is undefined, then it means we removed the model from our system
|
|
101
|
+
if (!llmInference.connector) {
|
|
102
|
+
return {
|
|
103
|
+
_error: `The model '${model}' is not available. Please try a different one.`,
|
|
104
|
+
_debug: logger.output,
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const modelId = await agent.modelsProvider.getModelId(model);
|
|
109
|
+
logger.debug(` Model : ${modelId || model}`);
|
|
110
|
+
|
|
111
|
+
const userInput = input.UserInput;
|
|
112
|
+
const userId = input.UserId;
|
|
113
|
+
const conversationId = input.ConversationId;
|
|
114
|
+
|
|
115
|
+
let behavior = TemplateString(config.data.behavior).parse(input).result;
|
|
116
|
+
logger.debug(`[Parsed Behavior] \n${behavior}\n\n`);
|
|
117
|
+
|
|
118
|
+
//#region get max tokens
|
|
119
|
+
let maxTokens = 2048;
|
|
120
|
+
|
|
121
|
+
const isStandardLLM = await agent.modelsProvider.isStandardLLM(model);
|
|
122
|
+
const hasKey = true; //TODO : check if the user has a key
|
|
123
|
+
//const modelInfo = await agent.modelsProvider.getModelInfo(model, hasKey);
|
|
124
|
+
maxTokens = await agent.modelsProvider.getMaxCompletionTokens(model, hasKey);
|
|
125
|
+
|
|
126
|
+
// if (isStandardLLM) {
|
|
127
|
+
// const provider = LLMRegistry.getProvider(model);
|
|
128
|
+
// const apiKey = await VaultHelper.getAgentKey(provider, agent?.id);
|
|
129
|
+
// maxTokens = LLMRegistry.getMaxCompletionTokens(model, !!apiKey);
|
|
130
|
+
// } else {
|
|
131
|
+
// const team = AccessCandidate.team(teamId);
|
|
132
|
+
// const customLLMRegistry = await CustomLLMRegistry.getInstance(team);
|
|
133
|
+
// maxTokens = await customLLMRegistry.getMaxCompletionTokens(model);
|
|
134
|
+
// }
|
|
135
|
+
//#endregion get max tokens
|
|
136
|
+
|
|
137
|
+
const messages: any[] = await readMessagesFromSession(agent.id, userId, conversationId, Math.round(maxTokens / 2));
|
|
138
|
+
|
|
139
|
+
messages.push({ role: TLLMMessageRole.User, content: userInput });
|
|
140
|
+
|
|
141
|
+
if (messages[0]?.role != TLLMMessageRole.System) {
|
|
142
|
+
messages.unshift({ role: TLLMMessageRole.System, content: behavior });
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
const customParams = {
|
|
146
|
+
messages,
|
|
147
|
+
};
|
|
148
|
+
|
|
149
|
+
let response: any;
|
|
150
|
+
if (passThrough) {
|
|
151
|
+
const contentPromise = new Promise(async (resolve, reject) => {
|
|
152
|
+
let _content = '';
|
|
153
|
+
const eventEmitter: any = await llmInference
|
|
154
|
+
.promptStream({
|
|
155
|
+
contextWindow: messages,
|
|
156
|
+
params: { ...config, model, agentId: agent.id },
|
|
157
|
+
})
|
|
158
|
+
.catch((error) => {
|
|
159
|
+
console.error('Error on promptStream: ', error);
|
|
160
|
+
reject(error);
|
|
161
|
+
});
|
|
162
|
+
eventEmitter.on('content', (content) => {
|
|
163
|
+
if (typeof agent.callback === 'function') {
|
|
164
|
+
agent.callback({ content });
|
|
165
|
+
}
|
|
166
|
+
agent.sse.send('llm/passthrough/content', content);
|
|
167
|
+
_content += content;
|
|
168
|
+
});
|
|
169
|
+
eventEmitter.on('thinking', (thinking) => {
|
|
170
|
+
if (typeof agent.callback === 'function') {
|
|
171
|
+
agent.callback({ thinking });
|
|
172
|
+
}
|
|
173
|
+
agent.sse.send('llm/passthrough/thinking', thinking);
|
|
174
|
+
});
|
|
175
|
+
eventEmitter.on('end', () => {
|
|
176
|
+
console.log('end');
|
|
177
|
+
resolve(_content);
|
|
178
|
+
});
|
|
179
|
+
});
|
|
180
|
+
response = await contentPromise;
|
|
181
|
+
} else {
|
|
182
|
+
response = await llmInference
|
|
183
|
+
.prompt({ contextWindow: messages, params: { ...config, agentId: agent.id } })
|
|
184
|
+
.catch((error) => ({ error: error }));
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// in case we have the response but it's empty string, undefined or null
|
|
188
|
+
if (!response) {
|
|
189
|
+
return { _error: ' LLM Error = Empty Response!', _debug: logger.output };
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
if (response?.error) {
|
|
193
|
+
const error = response?.error + ' ' + (response?.details || '');
|
|
194
|
+
logger.error(` LLM Error=`, error);
|
|
195
|
+
|
|
196
|
+
return { Response: response?.data, _error: error, _debug: logger.output };
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
messages.push({ role: 'assistant', content: response });
|
|
200
|
+
saveMessagesToSession(agent.id, userId, conversationId, messages, ttl);
|
|
201
|
+
|
|
202
|
+
logger.debug(' Response \n', response);
|
|
203
|
+
|
|
204
|
+
const result = { Response: response };
|
|
205
|
+
|
|
206
|
+
result['_debug'] = logger.output;
|
|
207
|
+
|
|
208
|
+
return result;
|
|
209
|
+
} catch (error) {
|
|
210
|
+
return { _error: error.message, _debug: logger.output };
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|