@smythos/sre 1.5.46 → 1.5.50
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG +98 -90
- package/LICENSE +18 -18
- package/README.md +135 -135
- package/dist/bundle-analysis-lazy.html +4949 -0
- package/dist/bundle-analysis.html +4949 -0
- package/dist/index.js +3 -3
- package/dist/index.js.map +1 -1
- package/dist/types/Components/MCPClient.class.d.ts +1 -0
- package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +1 -6
- package/dist/types/utils/package-manager.utils.d.ts +26 -0
- package/package.json +1 -1
- package/src/Components/APICall/APICall.class.ts +156 -156
- package/src/Components/APICall/AccessTokenManager.ts +130 -130
- package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
- package/src/Components/APICall/OAuth.helper.ts +294 -294
- package/src/Components/APICall/mimeTypeCategories.ts +46 -46
- package/src/Components/APICall/parseData.ts +167 -167
- package/src/Components/APICall/parseHeaders.ts +41 -41
- package/src/Components/APICall/parseProxy.ts +68 -68
- package/src/Components/APICall/parseUrl.ts +91 -91
- package/src/Components/APIEndpoint.class.ts +234 -234
- package/src/Components/APIOutput.class.ts +58 -58
- package/src/Components/AgentPlugin.class.ts +102 -102
- package/src/Components/Async.class.ts +155 -155
- package/src/Components/Await.class.ts +90 -90
- package/src/Components/Classifier.class.ts +158 -158
- package/src/Components/Component.class.ts +132 -132
- package/src/Components/ComponentHost.class.ts +38 -38
- package/src/Components/DataSourceCleaner.class.ts +92 -92
- package/src/Components/DataSourceIndexer.class.ts +181 -181
- package/src/Components/DataSourceLookup.class.ts +161 -161
- package/src/Components/ECMASandbox.class.ts +71 -71
- package/src/Components/FEncDec.class.ts +29 -29
- package/src/Components/FHash.class.ts +33 -33
- package/src/Components/FSign.class.ts +80 -80
- package/src/Components/FSleep.class.ts +25 -25
- package/src/Components/FTimestamp.class.ts +25 -25
- package/src/Components/FileStore.class.ts +78 -78
- package/src/Components/ForEach.class.ts +97 -97
- package/src/Components/GPTPlugin.class.ts +70 -70
- package/src/Components/GenAILLM.class.ts +586 -586
- package/src/Components/HuggingFace.class.ts +314 -314
- package/src/Components/Image/imageSettings.config.ts +70 -70
- package/src/Components/ImageGenerator.class.ts +502 -502
- package/src/Components/JSONFilter.class.ts +54 -54
- package/src/Components/LLMAssistant.class.ts +213 -213
- package/src/Components/LogicAND.class.ts +28 -28
- package/src/Components/LogicAtLeast.class.ts +85 -85
- package/src/Components/LogicAtMost.class.ts +86 -86
- package/src/Components/LogicOR.class.ts +29 -29
- package/src/Components/LogicXOR.class.ts +34 -34
- package/src/Components/MCPClient.class.ts +138 -112
- package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
- package/src/Components/MemoryReadKeyVal.class.ts +66 -66
- package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
- package/src/Components/MemoryWriteObject.class.ts +97 -97
- package/src/Components/MultimodalLLM.class.ts +128 -128
- package/src/Components/OpenAPI.class.ts +72 -72
- package/src/Components/PromptGenerator.class.ts +122 -122
- package/src/Components/ScrapflyWebScrape.class.ts +159 -159
- package/src/Components/ServerlessCode.class.ts +123 -123
- package/src/Components/TavilyWebSearch.class.ts +98 -98
- package/src/Components/VisionLLM.class.ts +104 -104
- package/src/Components/ZapierAction.class.ts +127 -127
- package/src/Components/index.ts +97 -97
- package/src/Core/AgentProcess.helper.ts +240 -240
- package/src/Core/Connector.class.ts +123 -123
- package/src/Core/ConnectorsService.ts +197 -197
- package/src/Core/DummyConnector.ts +49 -49
- package/src/Core/HookService.ts +105 -105
- package/src/Core/SmythRuntime.class.ts +235 -235
- package/src/Core/SystemEvents.ts +16 -16
- package/src/Core/boot.ts +56 -56
- package/src/config.ts +15 -15
- package/src/constants.ts +126 -126
- package/src/data/hugging-face.params.json +579 -579
- package/src/helpers/AWSLambdaCode.helper.ts +587 -587
- package/src/helpers/BinaryInput.helper.ts +331 -331
- package/src/helpers/Conversation.helper.ts +1119 -1119
- package/src/helpers/ECMASandbox.helper.ts +54 -54
- package/src/helpers/JsonContent.helper.ts +97 -97
- package/src/helpers/LocalCache.helper.ts +97 -97
- package/src/helpers/Log.helper.ts +274 -274
- package/src/helpers/OpenApiParser.helper.ts +150 -150
- package/src/helpers/S3Cache.helper.ts +147 -147
- package/src/helpers/SmythURI.helper.ts +5 -5
- package/src/helpers/Sysconfig.helper.ts +77 -77
- package/src/helpers/TemplateString.helper.ts +243 -243
- package/src/helpers/TypeChecker.helper.ts +329 -329
- package/src/index.ts +3 -3
- package/src/index.ts.bak +3 -3
- package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
- package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
- package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
- package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
- package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
- package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
- package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
- package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -297
- package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
- package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
- package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
- package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
- package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
- package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
- package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
- package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
- package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
- package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
- package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
- package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
- package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
- package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
- package/src/subsystems/IO/CLI.service/index.ts +9 -9
- package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
- package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
- package/src/subsystems/IO/Log.service/index.ts +13 -13
- package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
- package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
- package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
- package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
- package/src/subsystems/IO/NKV.service/index.ts +14 -14
- package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
- package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
- package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
- package/src/subsystems/IO/Router.service/index.ts +11 -11
- package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
- package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
- package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
- package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
- package/src/subsystems/IO/Storage.service/index.ts +13 -13
- package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
- package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
- package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
- package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
- package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
- package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
- package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
- package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
- package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
- package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
- package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
- package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
- package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
- package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
- package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
- package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
- package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
- package/src/subsystems/LLMManager/custom-models.ts +854 -854
- package/src/subsystems/LLMManager/models.ts +2540 -2540
- package/src/subsystems/LLMManager/paramMappings.ts +69 -69
- package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
- package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
- package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
- package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
- package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
- package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
- package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
- package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
- package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
- package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
- package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
- package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
- package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
- package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
- package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
- package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
- package/src/subsystems/Security/Account.service/index.ts +14 -14
- package/src/subsystems/Security/Credentials.helper.ts +62 -62
- package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
- package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
- package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
- package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
- package/src/subsystems/Security/SecureConnector.class.ts +110 -110
- package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
- package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
- package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
- package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
- package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
- package/src/subsystems/Security/Vault.service/index.ts +12 -12
- package/src/types/ACL.types.ts +104 -104
- package/src/types/AWS.types.ts +10 -10
- package/src/types/Agent.types.ts +61 -61
- package/src/types/AgentLogger.types.ts +17 -17
- package/src/types/Cache.types.ts +1 -1
- package/src/types/Common.types.ts +2 -2
- package/src/types/LLM.types.ts +496 -496
- package/src/types/Redis.types.ts +8 -8
- package/src/types/SRE.types.ts +64 -64
- package/src/types/Security.types.ts +14 -14
- package/src/types/Storage.types.ts +5 -5
- package/src/types/VectorDB.types.ts +86 -86
- package/src/utils/base64.utils.ts +275 -275
- package/src/utils/cli.utils.ts +68 -68
- package/src/utils/data.utils.ts +322 -322
- package/src/utils/date-time.utils.ts +22 -22
- package/src/utils/general.utils.ts +238 -238
- package/src/utils/index.ts +12 -12
- package/src/utils/lazy-client.ts +261 -261
- package/src/utils/numbers.utils.ts +13 -13
- package/src/utils/oauth.utils.ts +35 -35
- package/src/utils/string.utils.ts +414 -414
- package/src/utils/url.utils.ts +19 -19
- package/src/utils/validation.utils.ts +74 -74
- package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
|
@@ -1,1119 +1,1119 @@
|
|
|
1
|
-
import { AgentProcess } from '@sre/Core/AgentProcess.helper';
|
|
2
|
-
import { ConnectorService } from '@sre/Core/ConnectorsService';
|
|
3
|
-
import { Logger } from '@sre/helpers/Log.helper';
|
|
4
|
-
import { LLMInference } from '@sre/LLMManager/LLM.inference';
|
|
5
|
-
import { LLMContext } from '@sre/MemoryManager/LLMContext';
|
|
6
|
-
import { TAgentProcessParams } from '@sre/types/Agent.types';
|
|
7
|
-
import { ILLMContextStore, TLLMEvent, TLLMModel, ToolData } from '@sre/types/LLM.types';
|
|
8
|
-
import { isUrl } from '@sre/utils/data.utils';
|
|
9
|
-
import { processWithConcurrencyLimit, uid } from '@sre/utils/general.utils';
|
|
10
|
-
import axios, { AxiosRequestConfig } from 'axios';
|
|
11
|
-
import EventEmitter from 'events';
|
|
12
|
-
import { JSONContent } from './JsonContent.helper';
|
|
13
|
-
import { OpenAPIParser } from './OpenApiParser.helper';
|
|
14
|
-
import { Match, TemplateString } from './TemplateString.helper';
|
|
15
|
-
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
16
|
-
import { EventSource, FetchLike } from 'eventsource';
|
|
17
|
-
import { hookAsyncWithContext } from '@sre/Core/HookService';
|
|
18
|
-
import { randomUUID } from 'crypto';
|
|
19
|
-
import * as acorn from 'acorn';
|
|
20
|
-
|
|
21
|
-
const console = Logger('ConversationHelper');
|
|
22
|
-
type FunctionDeclaration = {
|
|
23
|
-
name: string;
|
|
24
|
-
description: string;
|
|
25
|
-
properties: Record<string, any>;
|
|
26
|
-
requiredFields: string[];
|
|
27
|
-
};
|
|
28
|
-
type ToolParams = {
|
|
29
|
-
type: string;
|
|
30
|
-
endpoint: string;
|
|
31
|
-
args: Record<string, any>;
|
|
32
|
-
method: string;
|
|
33
|
-
baseUrl: string;
|
|
34
|
-
headers?: Record<string, string>;
|
|
35
|
-
agentCallback?: (data: any) => void;
|
|
36
|
-
};
|
|
37
|
-
|
|
38
|
-
//TODO: handle authentication
|
|
39
|
-
export class Conversation extends EventEmitter {
|
|
40
|
-
private _agentId: string = '';
|
|
41
|
-
private _systemPrompt;
|
|
42
|
-
private userDefinedSystemPrompt: string = '';
|
|
43
|
-
public toolChoice: string = 'auto';
|
|
44
|
-
public get systemPrompt() {
|
|
45
|
-
return this._systemPrompt;
|
|
46
|
-
}
|
|
47
|
-
public set systemPrompt(systemPrompt) {
|
|
48
|
-
this._systemPrompt = systemPrompt;
|
|
49
|
-
if (this._context) this._context.systemPrompt = systemPrompt;
|
|
50
|
-
}
|
|
51
|
-
public assistantName;
|
|
52
|
-
|
|
53
|
-
private _reqMethods;
|
|
54
|
-
private _toolsConfig;
|
|
55
|
-
private _endpoints;
|
|
56
|
-
private _baseUrl;
|
|
57
|
-
|
|
58
|
-
private _status = '';
|
|
59
|
-
private _currentWaitPromise;
|
|
60
|
-
|
|
61
|
-
private _llmContextStore: ILLMContextStore;
|
|
62
|
-
private _context: LLMContext;
|
|
63
|
-
|
|
64
|
-
private _maxContextSize = 1024 * 128;
|
|
65
|
-
private _maxOutputTokens = 1024 * 8;
|
|
66
|
-
private _teamId: string = undefined;
|
|
67
|
-
private _agentVersion: string = undefined;
|
|
68
|
-
public agentData: any;
|
|
69
|
-
|
|
70
|
-
public get context() {
|
|
71
|
-
return this._context;
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
private _lastError;
|
|
75
|
-
private _spec;
|
|
76
|
-
private _customToolsDeclarations: FunctionDeclaration[] = [];
|
|
77
|
-
private _customToolsHandlers: Record<string, (args: Record<string, any>) => Promise<any>> = {};
|
|
78
|
-
public stop = false;
|
|
79
|
-
public set spec(specSource) {
|
|
80
|
-
this.ready.then(() => {
|
|
81
|
-
this._status = '';
|
|
82
|
-
this.loadSpecFromSource(specSource).then(async (spec) => {
|
|
83
|
-
if (!spec) {
|
|
84
|
-
this._status = 'error';
|
|
85
|
-
this.emit('error', 'Invalid OpenAPI specification data format');
|
|
86
|
-
throw new Error('Invalid OpenAPI specification data format');
|
|
87
|
-
}
|
|
88
|
-
this._spec = spec;
|
|
89
|
-
|
|
90
|
-
// teamId is required to load custom LLMs, we must assign it before updateModel()
|
|
91
|
-
await this.assignTeamIdFromAgentId(this._agentId);
|
|
92
|
-
|
|
93
|
-
await this.updateModel(this._model);
|
|
94
|
-
this._status = 'ready';
|
|
95
|
-
});
|
|
96
|
-
});
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
public set model(model: string | TLLMModel) {
|
|
100
|
-
this.ready.then(async () => {
|
|
101
|
-
this._status = '';
|
|
102
|
-
await this.updateModel(model);
|
|
103
|
-
this._status = 'ready';
|
|
104
|
-
});
|
|
105
|
-
}
|
|
106
|
-
public get model() {
|
|
107
|
-
return this._model;
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
constructor(
|
|
111
|
-
private _model: string | TLLMModel,
|
|
112
|
-
private _specSource?: string | Record<string, any>,
|
|
113
|
-
private _settings?: {
|
|
114
|
-
maxContextSize?: number;
|
|
115
|
-
maxOutputTokens?: number;
|
|
116
|
-
systemPrompt?: string;
|
|
117
|
-
toolChoice?: string;
|
|
118
|
-
store?: ILLMContextStore;
|
|
119
|
-
experimentalCache?: boolean;
|
|
120
|
-
toolsStrategy?: (toolsConfig) => any;
|
|
121
|
-
agentId?: string;
|
|
122
|
-
agentVersion?: string;
|
|
123
|
-
}
|
|
124
|
-
) {
|
|
125
|
-
//TODO: handle loading previous session (messages)
|
|
126
|
-
super();
|
|
127
|
-
|
|
128
|
-
//this event listener avoids unhandled errors that can cause crashes
|
|
129
|
-
this.on('error', (error) => {
|
|
130
|
-
this._lastError = error;
|
|
131
|
-
console.warn('Conversation Error: ', error?.message);
|
|
132
|
-
});
|
|
133
|
-
this._maxContextSize =
|
|
134
|
-
_settings.maxContextSize || (this._model as TLLMModel).tokens || (this._model as TLLMModel).keyOptions?.tokens || this._maxContextSize;
|
|
135
|
-
this._maxOutputTokens =
|
|
136
|
-
_settings.maxOutputTokens ||
|
|
137
|
-
(this._model as TLLMModel).completionTokens ||
|
|
138
|
-
(this._model as TLLMModel).keyOptions?.completionTokens ||
|
|
139
|
-
this._maxOutputTokens;
|
|
140
|
-
|
|
141
|
-
if (_settings?.systemPrompt) {
|
|
142
|
-
this.userDefinedSystemPrompt = _settings.systemPrompt;
|
|
143
|
-
}
|
|
144
|
-
if (_settings?.toolChoice) {
|
|
145
|
-
this.toolChoice = _settings.toolChoice;
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
if (_settings?.store) {
|
|
149
|
-
this._llmContextStore = _settings.store;
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
this._agentVersion = _settings?.agentVersion;
|
|
153
|
-
|
|
154
|
-
(async () => {
|
|
155
|
-
if (_specSource) {
|
|
156
|
-
this.loadSpecFromSource(_specSource)
|
|
157
|
-
.then(async (spec) => {
|
|
158
|
-
if (!spec) {
|
|
159
|
-
this._status = 'error';
|
|
160
|
-
this.emit('error', 'Unable to parse OpenAPI specifications');
|
|
161
|
-
throw new Error('Invalid OpenAPI specification data format');
|
|
162
|
-
}
|
|
163
|
-
this._spec = spec;
|
|
164
|
-
|
|
165
|
-
if (!this._agentId && _settings?.agentId) this._agentId = _settings.agentId;
|
|
166
|
-
if (!this._agentId) this._agentId = 'FAKE-AGENT-ID'; //We use a fake agent ID to avoid ACL check errors
|
|
167
|
-
|
|
168
|
-
// teamId is required to load custom LLMs, we must assign it before updateModel()
|
|
169
|
-
await this.assignTeamIdFromAgentId(this._agentId);
|
|
170
|
-
|
|
171
|
-
await this.updateModel(this._model);
|
|
172
|
-
|
|
173
|
-
this._status = 'ready';
|
|
174
|
-
})
|
|
175
|
-
.catch((error) => {
|
|
176
|
-
this._status = 'error';
|
|
177
|
-
this.emit('error', error);
|
|
178
|
-
});
|
|
179
|
-
} else {
|
|
180
|
-
await this.updateModel(this._model);
|
|
181
|
-
this._status = 'ready';
|
|
182
|
-
}
|
|
183
|
-
})();
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
public get ready() {
|
|
187
|
-
if (this._currentWaitPromise) return this._currentWaitPromise;
|
|
188
|
-
this._currentWaitPromise = new Promise((resolve, reject) => {
|
|
189
|
-
if (this._status) {
|
|
190
|
-
return resolve(this._status);
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
const maxWaitTime = 30000;
|
|
194
|
-
let waitTime = 0;
|
|
195
|
-
const interval = 100;
|
|
196
|
-
|
|
197
|
-
const wait = setInterval(() => {
|
|
198
|
-
if (this._status) {
|
|
199
|
-
clearInterval(wait);
|
|
200
|
-
return resolve(this._status);
|
|
201
|
-
} else {
|
|
202
|
-
waitTime += interval;
|
|
203
|
-
if (waitTime >= maxWaitTime) {
|
|
204
|
-
clearInterval(wait);
|
|
205
|
-
return reject('Timeout: Failed to prepare data');
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
}, interval);
|
|
209
|
-
});
|
|
210
|
-
|
|
211
|
-
return this._currentWaitPromise;
|
|
212
|
-
}
|
|
213
|
-
|
|
214
|
-
//TODO : handle attachments
|
|
215
|
-
@hookAsyncWithContext('Conversation.prompt', async (instance: Conversation) => {
|
|
216
|
-
await instance.ready;
|
|
217
|
-
|
|
218
|
-
return {
|
|
219
|
-
teamId: instance._teamId,
|
|
220
|
-
agentId: instance._agentId,
|
|
221
|
-
model: instance._model,
|
|
222
|
-
};
|
|
223
|
-
})
|
|
224
|
-
public async prompt(message?: string | any, toolHeaders = {}, concurrentToolCalls = 4, abortSignal?: AbortSignal) {
|
|
225
|
-
// if an error occured while streaming, we need to propagate it so for this, we register a one time error listener
|
|
226
|
-
let error = null;
|
|
227
|
-
const errListener = (err) => (error = err);
|
|
228
|
-
this.once('error', errListener);
|
|
229
|
-
const result = await this.streamPrompt(message, toolHeaders, concurrentToolCalls, abortSignal);
|
|
230
|
-
|
|
231
|
-
// if an error event occured, throw the error
|
|
232
|
-
if (error) {
|
|
233
|
-
throw error;
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
this.removeListener('error', errListener);
|
|
237
|
-
return result;
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
//TODO : handle attachments
|
|
241
|
-
@hookAsyncWithContext('Conversation.streamPrompt', async (instance: Conversation) => {
|
|
242
|
-
await instance.ready;
|
|
243
|
-
|
|
244
|
-
return {
|
|
245
|
-
teamId: instance._teamId,
|
|
246
|
-
agentId: instance._agentId,
|
|
247
|
-
model: instance._model,
|
|
248
|
-
};
|
|
249
|
-
})
|
|
250
|
-
public async streamPrompt(message?: string | any, toolHeaders = {}, concurrentToolCalls = 4, abortSignal?: AbortSignal) {
|
|
251
|
-
let options = typeof message === 'object' ? message : { message };
|
|
252
|
-
message = options?.message;
|
|
253
|
-
const files = options?.files;
|
|
254
|
-
|
|
255
|
-
if (message) {
|
|
256
|
-
//initial call, reset stop flag
|
|
257
|
-
|
|
258
|
-
this.stop = false;
|
|
259
|
-
}
|
|
260
|
-
if (this.stop) {
|
|
261
|
-
this.emit('interrupted', 'interrupted');
|
|
262
|
-
this.emit('end');
|
|
263
|
-
return;
|
|
264
|
-
}
|
|
265
|
-
await this.ready;
|
|
266
|
-
|
|
267
|
-
// Add an abort handler
|
|
268
|
-
if (abortSignal) {
|
|
269
|
-
abortSignal.addEventListener('abort', () => {
|
|
270
|
-
//this.emit('error', { name: 'AbortError', message: 'Request aborted by user!' });
|
|
271
|
-
this.emit('aborted', 'Aborted by user!');
|
|
272
|
-
//const error = new Error('Request aborted by user!');
|
|
273
|
-
//error.name = 'AbortError';
|
|
274
|
-
//throw error;
|
|
275
|
-
});
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
const passThroughtContinueMessage = 'Continue with the next tool call if there are any, or just inform the user that you are done';
|
|
279
|
-
//let promises = [];
|
|
280
|
-
let _content = '';
|
|
281
|
-
const reqMethods = this._reqMethods;
|
|
282
|
-
const toolsConfig = this._toolsConfig;
|
|
283
|
-
const endpoints = this._endpoints;
|
|
284
|
-
const baseUrl = this._baseUrl;
|
|
285
|
-
const message_id = 'msg_' + randomUUID();
|
|
286
|
-
const isDebugSession = toolHeaders['X-DEBUG'];
|
|
287
|
-
|
|
288
|
-
/* ==================== STEP ENTRY ==================== */
|
|
289
|
-
// console.debug('Request to LLM with the given model, messages and functions properties.', {
|
|
290
|
-
// model: this.model,
|
|
291
|
-
// message,
|
|
292
|
-
// toolsConfig,
|
|
293
|
-
// });
|
|
294
|
-
/* ==================== STEP ENTRY ==================== */
|
|
295
|
-
const llmInference: LLMInference = await LLMInference.getInstance(this.model, AccessCandidate.team(this._teamId));
|
|
296
|
-
|
|
297
|
-
if (message) this._context.addUserMessage(message, message_id);
|
|
298
|
-
|
|
299
|
-
const contextWindow = await this._context.getContextWindow(this._maxContextSize, this._maxOutputTokens);
|
|
300
|
-
|
|
301
|
-
let maxTokens = this._maxOutputTokens;
|
|
302
|
-
if (typeof this.model === 'object' && this.model?.params?.maxTokens) {
|
|
303
|
-
maxTokens = this.model.params.maxTokens;
|
|
304
|
-
}
|
|
305
|
-
|
|
306
|
-
const eventEmitter: any = await llmInference
|
|
307
|
-
.promptStream({
|
|
308
|
-
contextWindow,
|
|
309
|
-
files,
|
|
310
|
-
params: {
|
|
311
|
-
model: this.model,
|
|
312
|
-
toolsConfig: this._settings?.toolsStrategy ? this._settings.toolsStrategy(toolsConfig) : toolsConfig,
|
|
313
|
-
maxTokens,
|
|
314
|
-
cache: this._settings?.experimentalCache,
|
|
315
|
-
agentId: this._agentId,
|
|
316
|
-
abortSignal,
|
|
317
|
-
},
|
|
318
|
-
})
|
|
319
|
-
.catch((error) => {
|
|
320
|
-
console.error('Error on promptStream: ', error);
|
|
321
|
-
this.emit(TLLMEvent.Error, error);
|
|
322
|
-
});
|
|
323
|
-
|
|
324
|
-
// remove listeners from llm event emitter to stop receiving stream data
|
|
325
|
-
if (abortSignal) {
|
|
326
|
-
abortSignal.addEventListener('abort', () => {
|
|
327
|
-
eventEmitter.removeAllListeners();
|
|
328
|
-
});
|
|
329
|
-
}
|
|
330
|
-
if (!eventEmitter || eventEmitter.error) {
|
|
331
|
-
throw new Error('[LLM Request Error]');
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
if (message) this.emit('start');
|
|
335
|
-
eventEmitter.on('data', (data) => {
|
|
336
|
-
if (this.stop) return;
|
|
337
|
-
this.emit('data', data);
|
|
338
|
-
});
|
|
339
|
-
|
|
340
|
-
eventEmitter.on(TLLMEvent.Thinking, (thinking) => {
|
|
341
|
-
if (this.stop) return;
|
|
342
|
-
this.emit(TLLMEvent.Thinking, thinking);
|
|
343
|
-
});
|
|
344
|
-
|
|
345
|
-
eventEmitter.on(TLLMEvent.Content, (content) => {
|
|
346
|
-
if (this.stop) return;
|
|
347
|
-
// if (toolHeaders['x-passthrough']) {
|
|
348
|
-
// console.log('Passthrough skiped content ', content);
|
|
349
|
-
// return;
|
|
350
|
-
// }
|
|
351
|
-
//const lastMessage = this._context?.messages?.[this._context?.messages?.length - 1];
|
|
352
|
-
//const skip = lastMessage?.__smyth_data__?.internal;
|
|
353
|
-
|
|
354
|
-
//skip if the content is the last generated message after a passthrough content
|
|
355
|
-
// if (skip) {
|
|
356
|
-
// let s = true;
|
|
357
|
-
// }
|
|
358
|
-
_content += content;
|
|
359
|
-
this.emit(TLLMEvent.Content, content);
|
|
360
|
-
});
|
|
361
|
-
|
|
362
|
-
let finishReason = 'stop';
|
|
363
|
-
|
|
364
|
-
let toolsPromise = new Promise((resolve, reject) => {
|
|
365
|
-
let hasTools = false;
|
|
366
|
-
let hasError = false;
|
|
367
|
-
let passThroughContent = '';
|
|
368
|
-
|
|
369
|
-
eventEmitter.on(TLLMEvent.Error, (error) => {
|
|
370
|
-
hasError = true;
|
|
371
|
-
reject(error);
|
|
372
|
-
});
|
|
373
|
-
|
|
374
|
-
eventEmitter.on(TLLMEvent.ToolInfo, async (toolsData, thinkingBlocks = []) => {
|
|
375
|
-
if (this.stop) return;
|
|
376
|
-
hasTools = true;
|
|
377
|
-
let llmMessage: any = {
|
|
378
|
-
role: 'assistant',
|
|
379
|
-
content: _content,
|
|
380
|
-
tool_calls: [],
|
|
381
|
-
};
|
|
382
|
-
|
|
383
|
-
if (thinkingBlocks?.length > 0) {
|
|
384
|
-
this.emit(
|
|
385
|
-
'thoughtProcess',
|
|
386
|
-
thinkingBlocks
|
|
387
|
-
.filter((block) => block.type === 'thinking')
|
|
388
|
-
.map((block) => block.thinking || '')
|
|
389
|
-
.join('\n')
|
|
390
|
-
);
|
|
391
|
-
|
|
392
|
-
llmMessage.thinkingBlocks = thinkingBlocks;
|
|
393
|
-
}
|
|
394
|
-
|
|
395
|
-
llmMessage.tool_calls = toolsData.map((tool) => {
|
|
396
|
-
return {
|
|
397
|
-
id: tool.id,
|
|
398
|
-
type: tool.type,
|
|
399
|
-
function: {
|
|
400
|
-
name: tool.name,
|
|
401
|
-
arguments: tool.arguments,
|
|
402
|
-
},
|
|
403
|
-
};
|
|
404
|
-
});
|
|
405
|
-
|
|
406
|
-
//if (llmMessage.tool_calls?.length <= 0) return;
|
|
407
|
-
|
|
408
|
-
this.emit(TLLMEvent.ToolInfo, toolsData);
|
|
409
|
-
|
|
410
|
-
//initialize the agent callback logic
|
|
411
|
-
const _agentCallback = (data) => {
|
|
412
|
-
if (this.stop) return;
|
|
413
|
-
//if (typeof data !== 'string') return;
|
|
414
|
-
let content = '';
|
|
415
|
-
let thinking = '';
|
|
416
|
-
if (typeof data === 'object') {
|
|
417
|
-
if (data.content) {
|
|
418
|
-
content = data.content;
|
|
419
|
-
|
|
420
|
-
passThroughContent += content;
|
|
421
|
-
eventEmitter.emit(TLLMEvent.Content, content);
|
|
422
|
-
}
|
|
423
|
-
if (data.thinking) {
|
|
424
|
-
thinking = data.thinking;
|
|
425
|
-
eventEmitter.emit(TLLMEvent.Thinking, thinking);
|
|
426
|
-
}
|
|
427
|
-
return;
|
|
428
|
-
}
|
|
429
|
-
if (typeof data === 'string') {
|
|
430
|
-
passThroughContent += data;
|
|
431
|
-
eventEmitter.emit(TLLMEvent.Content, data);
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
//passThroughContent += data;
|
|
435
|
-
//this is currently used to handle agent callbacks when running local agents
|
|
436
|
-
//this.emit('agentCallback', data);
|
|
437
|
-
|
|
438
|
-
//this.emit('content', data);
|
|
439
|
-
//this.emit('content', data);
|
|
440
|
-
//eventEmitter.emit('content', data);
|
|
441
|
-
};
|
|
442
|
-
|
|
443
|
-
const toolProcessingTasks = toolsData.map(
|
|
444
|
-
(tool: { index: number; name: string; type: string; arguments: Record<string, any> }) => async () => {
|
|
445
|
-
const endpoint = endpoints?.get(tool?.name) || tool?.name;
|
|
446
|
-
// Sometimes we have object response from the LLM such as Anthropic
|
|
447
|
-
|
|
448
|
-
let args = typeof tool?.arguments === 'string' ? JSONContent(tool?.arguments).tryParse() || {} : tool?.arguments;
|
|
449
|
-
|
|
450
|
-
if (args?.error) {
|
|
451
|
-
throw new Error('[Tool] Arguments Parsing Error\n' + JSON.stringify({ message: args?.error }));
|
|
452
|
-
}
|
|
453
|
-
|
|
454
|
-
//await beforeFunctionCall(llmMessage, toolsData[tool.index]);
|
|
455
|
-
// TODO [Forhad]: Make sure toolsData[tool.index] and tool do the same thing
|
|
456
|
-
this.emit('beforeToolCall', { tool, args }, llmMessage); //deprecated
|
|
457
|
-
this.emit(TLLMEvent.ToolCall, { tool, _llmRequest: llmMessage });
|
|
458
|
-
|
|
459
|
-
const toolArgs = {
|
|
460
|
-
type: tool?.type,
|
|
461
|
-
method: reqMethods?.get(tool?.name),
|
|
462
|
-
endpoint,
|
|
463
|
-
args,
|
|
464
|
-
baseUrl,
|
|
465
|
-
headers: toolHeaders,
|
|
466
|
-
agentCallback: _agentCallback,
|
|
467
|
-
};
|
|
468
|
-
|
|
469
|
-
let { data: functionResponse, error } = await this.useTool(toolArgs, abortSignal);
|
|
470
|
-
|
|
471
|
-
if (error) {
|
|
472
|
-
functionResponse = typeof error === 'object' && typeof error !== null ? JSON.stringify(error) : error;
|
|
473
|
-
}
|
|
474
|
-
|
|
475
|
-
const result = functionResponse;
|
|
476
|
-
|
|
477
|
-
functionResponse =
|
|
478
|
-
typeof functionResponse === 'object' && typeof functionResponse !== null
|
|
479
|
-
? JSON.stringify(functionResponse)
|
|
480
|
-
: functionResponse;
|
|
481
|
-
|
|
482
|
-
//await afterFunctionCall(functionResponse, toolsData[tool.index]);
|
|
483
|
-
this.emit('afterToolCall', { tool, args }, functionResponse); // Deprecated
|
|
484
|
-
this.emit(TLLMEvent.ToolResult, { tool, result });
|
|
485
|
-
|
|
486
|
-
return { ...tool, result: functionResponse };
|
|
487
|
-
}
|
|
488
|
-
);
|
|
489
|
-
|
|
490
|
-
const processedToolsData = await processWithConcurrencyLimit<ToolData>(toolProcessingTasks, concurrentToolCalls);
|
|
491
|
-
|
|
492
|
-
//if (!passThroughContent) {
|
|
493
|
-
|
|
494
|
-
if (!passThroughContent) {
|
|
495
|
-
this._context.addToolMessage(llmMessage, processedToolsData, message_id);
|
|
496
|
-
//delete toolHeaders['x-passthrough'];
|
|
497
|
-
} else {
|
|
498
|
-
//this._context.addAssistantMessage(passThroughContent, message_id);
|
|
499
|
-
|
|
500
|
-
//llmMessage.content += '\n' + passThroughContent;
|
|
501
|
-
this._context.addToolMessage(llmMessage, processedToolsData, message_id, { passThrough: true });
|
|
502
|
-
|
|
503
|
-
//this._context.addAssistantMessage(passThroughContent, message_id, { passthrough: true });
|
|
504
|
-
//this should not be stored in the persistent conversation store
|
|
505
|
-
//it's just a workaround to avoid generating more content after passthrough content
|
|
506
|
-
//this._context.addUserMessage(passThroughtContinueMessage, message_id, { internal: true });
|
|
507
|
-
//toolHeaders['x-passthrough'] = 'true';
|
|
508
|
-
}
|
|
509
|
-
|
|
510
|
-
this.streamPrompt(null, toolHeaders, concurrentToolCalls, abortSignal).then(resolve).catch(reject);
|
|
511
|
-
|
|
512
|
-
//} else {
|
|
513
|
-
//TODO : add passthrough content to the context window ??
|
|
514
|
-
|
|
515
|
-
//if passThroughContent is not empty, it means that the current agent streamed content through components
|
|
516
|
-
//resolve(passThroughContent);
|
|
517
|
-
//}
|
|
518
|
-
//const result = await resolve(await this.streamPrompt(null, toolHeaders, concurrentToolCalls));
|
|
519
|
-
//console.log('Result after tool call: ', result);
|
|
520
|
-
});
|
|
521
|
-
|
|
522
|
-
eventEmitter.on(TLLMEvent.End, async (toolsData, usage_data, _finishReason) => {
|
|
523
|
-
if (_finishReason) finishReason = _finishReason;
|
|
524
|
-
if (usage_data) {
|
|
525
|
-
//FIXME : normalize the usage data format
|
|
526
|
-
this.emit(TLLMEvent.Usage, usage_data);
|
|
527
|
-
}
|
|
528
|
-
if (hasError) return;
|
|
529
|
-
|
|
530
|
-
if (!hasTools || passThroughContent) {
|
|
531
|
-
//console.log(' ===> resolved content no tool', _content);
|
|
532
|
-
//this._context.push({ role: 'assistant', content: _content });
|
|
533
|
-
const lastMessage = this._context?.messages?.[this._context?.messages?.length - 1];
|
|
534
|
-
let metadata;
|
|
535
|
-
if (lastMessage?.content?.includes(passThroughtContinueMessage) && lastMessage?.__smyth_data__?.internal) {
|
|
536
|
-
metadata = { internal: true };
|
|
537
|
-
}
|
|
538
|
-
this._context.addAssistantMessage(_content, message_id, metadata);
|
|
539
|
-
resolve(''); //the content were already emitted through 'content' event
|
|
540
|
-
}
|
|
541
|
-
});
|
|
542
|
-
});
|
|
543
|
-
|
|
544
|
-
const toolsContent = await toolsPromise.catch((error) => {
|
|
545
|
-
console.error('Error in toolsPromise: ', error);
|
|
546
|
-
//this.emit('error', error);
|
|
547
|
-
this.emit(TLLMEvent.Error, error);
|
|
548
|
-
return '';
|
|
549
|
-
});
|
|
550
|
-
_content += toolsContent;
|
|
551
|
-
//let content = JSONContent(_content).tryParse();
|
|
552
|
-
|
|
553
|
-
// let streamPromise = new Promise((resolve, reject) => {
|
|
554
|
-
// eventEmitter.on('end', async () => {
|
|
555
|
-
// if (toolsPromise) await toolsPromise;
|
|
556
|
-
|
|
557
|
-
// let content = JSONContent(_content).tryParse();
|
|
558
|
-
// resolve({ content });
|
|
559
|
-
// });
|
|
560
|
-
// });
|
|
561
|
-
|
|
562
|
-
// promises.push(streamPromise);
|
|
563
|
-
|
|
564
|
-
//await Promise.all(promises);
|
|
565
|
-
//return content;
|
|
566
|
-
|
|
567
|
-
if (message) {
|
|
568
|
-
//console.log('main content', content);
|
|
569
|
-
//this._context.push({ role: 'assistant', content: content });
|
|
570
|
-
|
|
571
|
-
if (finishReason !== 'stop') {
|
|
572
|
-
this.emit(TLLMEvent.Interrupted, finishReason);
|
|
573
|
-
}
|
|
574
|
-
this.emit(TLLMEvent.End);
|
|
575
|
-
} else {
|
|
576
|
-
//console.log('tool content', content);
|
|
577
|
-
}
|
|
578
|
-
|
|
579
|
-
return _content;
|
|
580
|
-
}
|
|
581
|
-
|
|
582
|
-
private resolveToolEndpoint(baseUrl: string, method: string, endpoint: string, params: Record<string, any>): string {
|
|
583
|
-
//handle query params
|
|
584
|
-
let templateParams = {};
|
|
585
|
-
if (params) {
|
|
586
|
-
const parameters = this._spec?.paths?.[endpoint]?.[method.toLowerCase()]?.parameters || [];
|
|
587
|
-
for (let p of parameters) {
|
|
588
|
-
if (p.in === 'path') {
|
|
589
|
-
templateParams[p.name] = params[p.name] || '';
|
|
590
|
-
delete params[p.name];
|
|
591
|
-
}
|
|
592
|
-
}
|
|
593
|
-
}
|
|
594
|
-
const parsedEndpoint = TemplateString(endpoint).parse(templateParams, Match.singleCurly).clean().result;
|
|
595
|
-
|
|
596
|
-
// Create a new URL object using the base URL and endpoint
|
|
597
|
-
const url = new URL(parsedEndpoint, baseUrl);
|
|
598
|
-
|
|
599
|
-
// Iterate over the params object and append each key/value pair to the URL search parameters
|
|
600
|
-
Object.keys(params).forEach((key) => {
|
|
601
|
-
url.searchParams.append(key, params[key]);
|
|
602
|
-
});
|
|
603
|
-
|
|
604
|
-
// Return the full URL as a string
|
|
605
|
-
return url.toString();
|
|
606
|
-
}
|
|
607
|
-
|
|
608
|
-
private async useTool(
|
|
609
|
-
params: ToolParams,
|
|
610
|
-
abortSignal?: AbortSignal
|
|
611
|
-
): Promise<{
|
|
612
|
-
data: any;
|
|
613
|
-
error;
|
|
614
|
-
}> {
|
|
615
|
-
if (this.stop) {
|
|
616
|
-
return { data: null, error: 'Conversation Interrupted' };
|
|
617
|
-
}
|
|
618
|
-
|
|
619
|
-
const { type, endpoint, args, method, baseUrl, headers = {}, agentCallback } = params;
|
|
620
|
-
|
|
621
|
-
if (type === 'function') {
|
|
622
|
-
const toolHandler = this._customToolsHandlers[endpoint];
|
|
623
|
-
if (toolHandler) {
|
|
624
|
-
try {
|
|
625
|
-
const result = await toolHandler(args);
|
|
626
|
-
return { data: result, error: null };
|
|
627
|
-
} catch (error) {
|
|
628
|
-
return { data: null, error: error?.message || 'Custom tool handler failed' };
|
|
629
|
-
}
|
|
630
|
-
}
|
|
631
|
-
try {
|
|
632
|
-
const url = this.resolveToolEndpoint(baseUrl, method, endpoint, method == 'get' ? args : {});
|
|
633
|
-
|
|
634
|
-
const reqConfig: AxiosRequestConfig = {
|
|
635
|
-
method,
|
|
636
|
-
url,
|
|
637
|
-
headers: {
|
|
638
|
-
...headers,
|
|
639
|
-
},
|
|
640
|
-
signal: abortSignal,
|
|
641
|
-
};
|
|
642
|
-
|
|
643
|
-
if (method !== 'get') {
|
|
644
|
-
if (Object.keys(args).length) {
|
|
645
|
-
reqConfig.data = args;
|
|
646
|
-
}
|
|
647
|
-
//(reqConfig.headers as Record<string, unknown>)['Content-Type'] = 'application/json';
|
|
648
|
-
reqConfig.headers['Content-Type'] = 'application/json';
|
|
649
|
-
}
|
|
650
|
-
|
|
651
|
-
console.debug('Calling tool: ', reqConfig);
|
|
652
|
-
|
|
653
|
-
reqConfig.headers['X-CACHE-ID'] = this._context?.llmCache?.id;
|
|
654
|
-
|
|
655
|
-
/*
|
|
656
|
-
* Objective for the following conditions:
|
|
657
|
-
* - In case it is not a debug call and there is no monitor id, then we need to run the agent locally to reduce latency
|
|
658
|
-
* - but if it a debug call, we need to forward req to sre-builder-debugger since it holds the debug promises
|
|
659
|
-
* - or if there is a monitor id, we need to forward req to sre-builder-debugger since it holds the monitor SSE connections.
|
|
660
|
-
* - a remote call is often needed for file parsing be default agent we inject, it should not be loaded locally.
|
|
661
|
-
* So the objecive is mainly reducing latency when possible
|
|
662
|
-
*/
|
|
663
|
-
//TODO : implement a timeout for the tool call
|
|
664
|
-
const requiresRemoteCall =
|
|
665
|
-
reqConfig.headers['X-DEBUG'] !== undefined ||
|
|
666
|
-
reqConfig.headers['X-MONITOR-ID'] !== undefined ||
|
|
667
|
-
reqConfig.headers['X-AGENT-REMOTE-CALL'] !== undefined;
|
|
668
|
-
if (
|
|
669
|
-
reqConfig.url.includes('localhost') ||
|
|
670
|
-
(reqConfig.headers['X-AGENT-ID'] && !requiresRemoteCall)
|
|
671
|
-
//empty string is accepted
|
|
672
|
-
|
|
673
|
-
// || reqConfig.url.includes('localagent') //* commented to allow debugging live sessions as the req needs to reach sre-builder-debugger
|
|
674
|
-
) {
|
|
675
|
-
console.log('RUNNING AGENT LOCALLY');
|
|
676
|
-
let agentProcess;
|
|
677
|
-
if (this.agentData === this._specSource) {
|
|
678
|
-
//the agent was loaded from data
|
|
679
|
-
agentProcess = AgentProcess.load(this.agentData, this._agentVersion);
|
|
680
|
-
} else {
|
|
681
|
-
//the agent was loaded from a spec
|
|
682
|
-
agentProcess = AgentProcess.load(
|
|
683
|
-
reqConfig.headers['X-AGENT-ID'] || this._agentId,
|
|
684
|
-
reqConfig.headers['X-AGENT-VERSION'] || this._agentVersion
|
|
685
|
-
);
|
|
686
|
-
}
|
|
687
|
-
//if it's a local agent, invoke it directly
|
|
688
|
-
|
|
689
|
-
const response = await agentProcess.run(reqConfig as TAgentProcessParams, agentCallback);
|
|
690
|
-
return { data: response.data, error: null };
|
|
691
|
-
} else {
|
|
692
|
-
console.log('RUNNING AGENT REMOTELY');
|
|
693
|
-
let eventSource;
|
|
694
|
-
|
|
695
|
-
// if debug mode is on OR the user attached a monitor to the call, then we need to attach a monitor to the agent call
|
|
696
|
-
if ((reqConfig.headers['X-DEBUG'] && reqConfig.headers['X-AGENT-ID']) || reqConfig.headers['X-MONITOR-ID']) {
|
|
697
|
-
console.log('ATTACHING MONITOR TO REMOTE AGENT CALL');
|
|
698
|
-
const monitUrl = reqConfig.url.split('/api')[0] + '/agent/' + reqConfig.headers['X-AGENT-ID'] + '/monitor';
|
|
699
|
-
|
|
700
|
-
// Create custom fetch implementation that includes our headers
|
|
701
|
-
const customFetch: FetchLike = (url, init) => {
|
|
702
|
-
return fetch(url, {
|
|
703
|
-
...init,
|
|
704
|
-
headers: {
|
|
705
|
-
...(init?.headers || {}),
|
|
706
|
-
...Object.fromEntries(Object.entries(reqConfig.headers).map(([k, v]) => [k, String(v)])),
|
|
707
|
-
},
|
|
708
|
-
});
|
|
709
|
-
};
|
|
710
|
-
|
|
711
|
-
const eventSource = new EventSource(monitUrl, {
|
|
712
|
-
fetch: customFetch,
|
|
713
|
-
});
|
|
714
|
-
let monitorId = '';
|
|
715
|
-
|
|
716
|
-
eventSource.addEventListener('init', (event) => {
|
|
717
|
-
monitorId = event.data;
|
|
718
|
-
console.log('monitorId', monitorId);
|
|
719
|
-
if (reqConfig.headers['X-MONITOR-ID']) {
|
|
720
|
-
// an external monitor was sent, so we do not override it
|
|
721
|
-
reqConfig.headers['X-MONITOR-ID'] = `${reqConfig.headers['X-MONITOR-ID']},${monitorId}`;
|
|
722
|
-
} else {
|
|
723
|
-
reqConfig.headers['X-MONITOR-ID'] = monitorId;
|
|
724
|
-
}
|
|
725
|
-
});
|
|
726
|
-
eventSource.addEventListener('llm/passthrough/content', (event: any) => {
|
|
727
|
-
if (params.agentCallback) params.agentCallback({ content: event.data.replace(/\\n/g, '\n') });
|
|
728
|
-
});
|
|
729
|
-
eventSource.addEventListener('llm/passthrough/thinking', (event: any) => {
|
|
730
|
-
if (params.agentCallback) params.agentCallback({ thinking: event.data.replace(/\\n/g, '\n') });
|
|
731
|
-
});
|
|
732
|
-
|
|
733
|
-
await new Promise((resolve) => {
|
|
734
|
-
let maxTime = 5 * 1000; //5 seconds
|
|
735
|
-
let itv = setInterval(() => {
|
|
736
|
-
if (monitorId || maxTime <= 0) {
|
|
737
|
-
clearInterval(itv);
|
|
738
|
-
resolve(true);
|
|
739
|
-
}
|
|
740
|
-
maxTime -= 100;
|
|
741
|
-
}, 100);
|
|
742
|
-
});
|
|
743
|
-
}
|
|
744
|
-
|
|
745
|
-
//if it's a remote agent, call the API via HTTP
|
|
746
|
-
const response = await axios.request(reqConfig);
|
|
747
|
-
|
|
748
|
-
if (eventSource) {
|
|
749
|
-
eventSource.close();
|
|
750
|
-
console.log('eventSource closed');
|
|
751
|
-
}
|
|
752
|
-
return { data: response.data, error: null };
|
|
753
|
-
}
|
|
754
|
-
} catch (error: any) {
|
|
755
|
-
console.warn('Failed to call Tool: ', baseUrl, endpoint);
|
|
756
|
-
console.warn(' ====>', error);
|
|
757
|
-
return { data: null, error: error?.response?.data || error?.message };
|
|
758
|
-
}
|
|
759
|
-
}
|
|
760
|
-
|
|
761
|
-
return { data: null, error: `'${type}' tool type not supported at the moment` };
|
|
762
|
-
}
|
|
763
|
-
|
|
764
|
-
public async addTool(tool: {
|
|
765
|
-
name: string;
|
|
766
|
-
description: string;
|
|
767
|
-
arguments?: Record<string, any> | string[];
|
|
768
|
-
handler: (args: Record<string, any>) => Promise<any>;
|
|
769
|
-
inputs?: any[];
|
|
770
|
-
}) {
|
|
771
|
-
if (!tool.arguments) {
|
|
772
|
-
//if no arguments are provided, we need to extract them from the function
|
|
773
|
-
const toolFunction = tool.handler as Function;
|
|
774
|
-
const openApiArgs = this.extractArgsAsOpenAPI(toolFunction);
|
|
775
|
-
const _arguments: any = {};
|
|
776
|
-
for (let arg of openApiArgs) {
|
|
777
|
-
_arguments[arg.name] = arg.schema;
|
|
778
|
-
if (tool.inputs && arg.schema.properties) {
|
|
779
|
-
const required = [];
|
|
780
|
-
for (let prop in arg.schema.properties) {
|
|
781
|
-
const input = tool.inputs?.find((i) => i.name === prop);
|
|
782
|
-
if (!arg.schema.properties[prop].description) {
|
|
783
|
-
arg.schema.properties[prop].description = input?.description;
|
|
784
|
-
}
|
|
785
|
-
if (!input?.optional) {
|
|
786
|
-
required.push(prop);
|
|
787
|
-
}
|
|
788
|
-
}
|
|
789
|
-
if (required.length) {
|
|
790
|
-
arg.schema.required = required;
|
|
791
|
-
}
|
|
792
|
-
}
|
|
793
|
-
}
|
|
794
|
-
|
|
795
|
-
tool.arguments = _arguments;
|
|
796
|
-
tool.handler = async (argsObj: any) => {
|
|
797
|
-
const args = Object.values(argsObj);
|
|
798
|
-
const result = await toolFunction(...args);
|
|
799
|
-
return result;
|
|
800
|
-
};
|
|
801
|
-
}
|
|
802
|
-
|
|
803
|
-
const requiredFields = Object.values(tool.arguments)
|
|
804
|
-
.map((arg) => (arg.required ? arg.name : null))
|
|
805
|
-
.filter((arg) => arg);
|
|
806
|
-
|
|
807
|
-
const properties = {};
|
|
808
|
-
for (let entry in tool.arguments) {
|
|
809
|
-
properties[entry] = {
|
|
810
|
-
type: tool.arguments[entry].type || 'string',
|
|
811
|
-
properties: tool.arguments[entry].properties,
|
|
812
|
-
description: tool.arguments[entry].description,
|
|
813
|
-
...(tool.arguments[entry].type === 'array' ? { items: { type: tool.arguments[entry].items?.type || 'string' } } : {}),
|
|
814
|
-
};
|
|
815
|
-
}
|
|
816
|
-
const toolDefinition = {
|
|
817
|
-
name: tool.name,
|
|
818
|
-
description: tool.description,
|
|
819
|
-
properties,
|
|
820
|
-
requiredFields,
|
|
821
|
-
};
|
|
822
|
-
this._customToolsDeclarations.push(toolDefinition);
|
|
823
|
-
this._customToolsHandlers[tool.name] = tool.handler;
|
|
824
|
-
|
|
825
|
-
const llmInference: LLMInference = await LLMInference.getInstance(this.model, AccessCandidate.team(this._teamId));
|
|
826
|
-
const toolsConfig: any = llmInference.connector.formatToolsConfig({
|
|
827
|
-
type: 'function',
|
|
828
|
-
toolDefinitions: [toolDefinition],
|
|
829
|
-
toolChoice: this.toolChoice,
|
|
830
|
-
});
|
|
831
|
-
|
|
832
|
-
if (this._toolsConfig) this._toolsConfig.tools.push(...toolsConfig?.tools);
|
|
833
|
-
else this._toolsConfig = toolsConfig;
|
|
834
|
-
}
|
|
835
|
-
/**
|
|
836
|
-
* updates LLM model, if spec is available, it will update the tools config
|
|
837
|
-
* @param model
|
|
838
|
-
*/
|
|
839
|
-
// TODO [Forhad]: For now updateModel does not required await, but when we will have tools implementation in custom model then we need to await for it
|
|
840
|
-
private async updateModel(model: string | TLLMModel) {
|
|
841
|
-
try {
|
|
842
|
-
this._model = model;
|
|
843
|
-
|
|
844
|
-
if (this._spec) {
|
|
845
|
-
this._reqMethods = OpenAPIParser.mapReqMethods(this._spec?.paths);
|
|
846
|
-
this._endpoints = OpenAPIParser.mapEndpoints(this._spec?.paths);
|
|
847
|
-
this._baseUrl = this._spec?.servers?.[0].url;
|
|
848
|
-
|
|
849
|
-
const functionDeclarations = this.getFunctionDeclarations(this._spec);
|
|
850
|
-
functionDeclarations.push(...this._customToolsDeclarations);
|
|
851
|
-
const llmInference: LLMInference = await LLMInference.getInstance(this._model, AccessCandidate.team(this._teamId));
|
|
852
|
-
if (!llmInference.connector) {
|
|
853
|
-
this.emit('error', 'No connector found for model: ' + this._model);
|
|
854
|
-
return;
|
|
855
|
-
}
|
|
856
|
-
this._toolsConfig = llmInference.connector.formatToolsConfig({
|
|
857
|
-
type: 'function',
|
|
858
|
-
toolDefinitions: functionDeclarations,
|
|
859
|
-
toolChoice: this.toolChoice,
|
|
860
|
-
});
|
|
861
|
-
|
|
862
|
-
let messages = [];
|
|
863
|
-
if (this._context) messages = this._context.messages; // preserve messages
|
|
864
|
-
|
|
865
|
-
this._context = new LLMContext(llmInference, this.systemPrompt, this._llmContextStore);
|
|
866
|
-
} else {
|
|
867
|
-
this._toolsConfig = null;
|
|
868
|
-
this._reqMethods = null;
|
|
869
|
-
this._endpoints = null;
|
|
870
|
-
this._baseUrl = null;
|
|
871
|
-
}
|
|
872
|
-
} catch (error) {
|
|
873
|
-
this.emit('error', error);
|
|
874
|
-
}
|
|
875
|
-
}
|
|
876
|
-
|
|
877
|
-
/**
|
|
878
|
-
* this function is used to patch the spec with missing fields that are required for the tool to work
|
|
879
|
-
* @param spec
|
|
880
|
-
*/
|
|
881
|
-
private patchSpec(spec: Record<string, any>) {
|
|
882
|
-
const paths = spec?.paths;
|
|
883
|
-
for (const path in paths) {
|
|
884
|
-
const pathData = paths[path];
|
|
885
|
-
|
|
886
|
-
// it's possible we have multiple methods for a single path
|
|
887
|
-
for (const key in pathData) {
|
|
888
|
-
const data = pathData[key];
|
|
889
|
-
if (!data?.operationId) {
|
|
890
|
-
//normalize path and use it as operationId
|
|
891
|
-
data.operationId = path.replace(/\//g, '_').replace(/{|}/g, '').replace(/\./g, '_');
|
|
892
|
-
}
|
|
893
|
-
}
|
|
894
|
-
}
|
|
895
|
-
return spec;
|
|
896
|
-
}
|
|
897
|
-
/**
|
|
898
|
-
* Loads OpenAPI specification from source
|
|
899
|
-
* @param specSource
|
|
900
|
-
* @returns
|
|
901
|
-
*/
|
|
902
|
-
private async loadSpecFromSource(specSource: string | Record<string, any>) {
|
|
903
|
-
if (typeof specSource === 'object') {
|
|
904
|
-
//is this a valid OpenAPI spec?
|
|
905
|
-
if (OpenAPIParser.isValidOpenAPI(specSource)) {
|
|
906
|
-
this.systemPrompt = specSource?.info?.description || '';
|
|
907
|
-
return this.patchSpec(specSource);
|
|
908
|
-
}
|
|
909
|
-
//is this a valid agent data?
|
|
910
|
-
if (typeof specSource?.behavior === 'string' && specSource?.components && specSource?.connections) {
|
|
911
|
-
this.agentData = specSource; //agent loaded from data directly
|
|
912
|
-
return await this.loadSpecFromAgent(specSource);
|
|
913
|
-
}
|
|
914
|
-
|
|
915
|
-
return null;
|
|
916
|
-
}
|
|
917
|
-
|
|
918
|
-
if (typeof specSource === 'string') {
|
|
919
|
-
//is this an openAPI url?
|
|
920
|
-
if (isUrl(specSource as string)) {
|
|
921
|
-
const spec = await OpenAPIParser.getJsonFromUrl(specSource as string);
|
|
922
|
-
|
|
923
|
-
if (spec.info?.description) this.systemPrompt = spec.info.description;
|
|
924
|
-
|
|
925
|
-
// we always overwrite system prompt with user defined one
|
|
926
|
-
if (this.userDefinedSystemPrompt) this.systemPrompt = this.userDefinedSystemPrompt;
|
|
927
|
-
|
|
928
|
-
if (spec.info?.title) this.assistantName = spec.info.title;
|
|
929
|
-
|
|
930
|
-
const specUrl = new URL(specSource as string);
|
|
931
|
-
const defaultBaseUrl = specUrl.origin;
|
|
932
|
-
|
|
933
|
-
if (!spec?.servers) spec.servers = [{ url: defaultBaseUrl }];
|
|
934
|
-
if (spec.servers?.length == 0) spec.servers = [{ url: defaultBaseUrl }];
|
|
935
|
-
|
|
936
|
-
if (this.assistantName) {
|
|
937
|
-
this.systemPrompt = `Assistant Name : ${this.assistantName}\n\n${this.systemPrompt}`;
|
|
938
|
-
}
|
|
939
|
-
|
|
940
|
-
//this._agentId = specUrl.hostname; //just set an agent ID in order to identify the agent in SRE //FIXME: maybe this requires a better solution
|
|
941
|
-
return this.patchSpec(spec);
|
|
942
|
-
}
|
|
943
|
-
//is this an agentId ?
|
|
944
|
-
const agentDataConnector = ConnectorService.getAgentDataConnector();
|
|
945
|
-
const agentId = specSource as string;
|
|
946
|
-
this._agentId = agentId;
|
|
947
|
-
|
|
948
|
-
if (this._agentVersion === undefined) {
|
|
949
|
-
const isDeployed = await agentDataConnector.isDeployed(agentId);
|
|
950
|
-
this._agentVersion = isDeployed ? 'latest' : '';
|
|
951
|
-
}
|
|
952
|
-
|
|
953
|
-
this.agentData = await agentDataConnector.getAgentData(agentId, this._agentVersion).catch((error) => null);
|
|
954
|
-
if (!this.agentData) return null;
|
|
955
|
-
|
|
956
|
-
const spec = await this.loadSpecFromAgent(this.agentData);
|
|
957
|
-
return spec;
|
|
958
|
-
}
|
|
959
|
-
}
|
|
960
|
-
private async loadSpecFromAgent(agentData: Record<string, any>) {
|
|
961
|
-
//handle the case where agentData object contains the agent schema directly
|
|
962
|
-
//agents retrieved from the database have a wrapping object with agent name and version number
|
|
963
|
-
//local agent might include the agent data directly
|
|
964
|
-
if (agentData?.components) {
|
|
965
|
-
agentData = { name: agentData?.name, data: agentData, version: '1.0.0' };
|
|
966
|
-
}
|
|
967
|
-
|
|
968
|
-
const agentDataConnector = ConnectorService.getAgentDataConnector();
|
|
969
|
-
this.systemPrompt = agentData?.data?.behavior || this.systemPrompt;
|
|
970
|
-
|
|
971
|
-
// we always overwrite system prompt with user defined one
|
|
972
|
-
if (this.userDefinedSystemPrompt) this.systemPrompt = this.userDefinedSystemPrompt;
|
|
973
|
-
|
|
974
|
-
this.assistantName = agentData?.data?.name || agentData?.data?.templateInfo?.name || this.assistantName;
|
|
975
|
-
if (this.assistantName) {
|
|
976
|
-
this.systemPrompt = `Assistant Name : ${this.assistantName}\n\n${this.systemPrompt}`;
|
|
977
|
-
}
|
|
978
|
-
|
|
979
|
-
const spec = await agentDataConnector.getOpenAPIJSON(agentData, 'http://localhost/', this._agentVersion, true).catch((error) => null);
|
|
980
|
-
return this.patchSpec(spec);
|
|
981
|
-
}
|
|
982
|
-
|
|
983
|
-
/**
|
|
984
|
-
* Extracts function declarations from OpenAPI specification
|
|
985
|
-
* @param spec
|
|
986
|
-
* @returns
|
|
987
|
-
*/
|
|
988
|
-
private getFunctionDeclarations(spec): FunctionDeclaration[] {
|
|
989
|
-
const paths = spec?.paths;
|
|
990
|
-
const reqMethods = OpenAPIParser.mapReqMethods(paths);
|
|
991
|
-
|
|
992
|
-
let declarations: FunctionDeclaration[] = [];
|
|
993
|
-
|
|
994
|
-
for (const path in paths) {
|
|
995
|
-
const pathData = paths[path];
|
|
996
|
-
|
|
997
|
-
// it's possible we have multiple methods for a single path
|
|
998
|
-
for (const key in pathData) {
|
|
999
|
-
const data = pathData[key];
|
|
1000
|
-
|
|
1001
|
-
if (!data?.operationId) continue;
|
|
1002
|
-
|
|
1003
|
-
const method = reqMethods.get(data?.operationId) || 'get';
|
|
1004
|
-
|
|
1005
|
-
let properties = {};
|
|
1006
|
-
let requiredFields: string[] = [];
|
|
1007
|
-
|
|
1008
|
-
if (method.toLowerCase() === 'get') {
|
|
1009
|
-
const params = data?.parameters || [];
|
|
1010
|
-
for (const prop of params) {
|
|
1011
|
-
properties[prop.name] = {
|
|
1012
|
-
...prop.schema,
|
|
1013
|
-
description: prop.description,
|
|
1014
|
-
};
|
|
1015
|
-
|
|
1016
|
-
if (prop.required === true) {
|
|
1017
|
-
requiredFields.push(prop?.name || '');
|
|
1018
|
-
}
|
|
1019
|
-
}
|
|
1020
|
-
} else {
|
|
1021
|
-
properties = data?.requestBody?.content?.['application/json']?.schema?.properties;
|
|
1022
|
-
requiredFields = data?.requestBody?.content?.['application/json']?.schema?.required;
|
|
1023
|
-
|
|
1024
|
-
// Open AI doesn't support 'required' to be boolean inside property
|
|
1025
|
-
for (const prop in properties) {
|
|
1026
|
-
delete properties[prop]?.required;
|
|
1027
|
-
}
|
|
1028
|
-
}
|
|
1029
|
-
|
|
1030
|
-
if (!properties) properties = {};
|
|
1031
|
-
if (!requiredFields) requiredFields = [];
|
|
1032
|
-
|
|
1033
|
-
const declaration = {
|
|
1034
|
-
name: data?.operationId,
|
|
1035
|
-
description: data?.description || data?.summary || '',
|
|
1036
|
-
properties,
|
|
1037
|
-
requiredFields,
|
|
1038
|
-
};
|
|
1039
|
-
declarations.push(declaration);
|
|
1040
|
-
}
|
|
1041
|
-
}
|
|
1042
|
-
|
|
1043
|
-
return declarations;
|
|
1044
|
-
}
|
|
1045
|
-
|
|
1046
|
-
private async assignTeamIdFromAgentId(agentId: string) {
|
|
1047
|
-
if (agentId) {
|
|
1048
|
-
const accountConnector = ConnectorService.getAccountConnector();
|
|
1049
|
-
const teamId = await accountConnector.getCandidateTeam(AccessCandidate.agent(agentId))?.catch(() => '');
|
|
1050
|
-
this._teamId = teamId || '';
|
|
1051
|
-
}
|
|
1052
|
-
}
|
|
1053
|
-
|
|
1054
|
-
private extractArgsAsOpenAPI(fn) {
|
|
1055
|
-
const ast = acorn.parse(`(${fn.toString()})`, { ecmaVersion: 'latest' });
|
|
1056
|
-
const params = (ast.body[0] as any).expression.params;
|
|
1057
|
-
|
|
1058
|
-
let counter = 0;
|
|
1059
|
-
function handleParam(param) {
|
|
1060
|
-
if (param.type === 'Identifier') {
|
|
1061
|
-
return {
|
|
1062
|
-
name: param.name,
|
|
1063
|
-
in: 'query',
|
|
1064
|
-
required: true,
|
|
1065
|
-
schema: { type: 'string', name: param.name, required: true },
|
|
1066
|
-
};
|
|
1067
|
-
}
|
|
1068
|
-
|
|
1069
|
-
if (param.type === 'AssignmentPattern' && param.left.type === 'Identifier') {
|
|
1070
|
-
return {
|
|
1071
|
-
name: param.left.name,
|
|
1072
|
-
in: 'query',
|
|
1073
|
-
required: false,
|
|
1074
|
-
schema: { type: 'string', name: param.left.name, required: false },
|
|
1075
|
-
};
|
|
1076
|
-
}
|
|
1077
|
-
|
|
1078
|
-
if (param.type === 'RestElement' && param.argument.type === 'Identifier') {
|
|
1079
|
-
return {
|
|
1080
|
-
name: param.argument.name,
|
|
1081
|
-
in: 'query',
|
|
1082
|
-
required: false,
|
|
1083
|
-
schema: { type: 'array', items: { type: 'string' } },
|
|
1084
|
-
};
|
|
1085
|
-
}
|
|
1086
|
-
|
|
1087
|
-
if (param.type === 'ObjectPattern') {
|
|
1088
|
-
// For destructured objects, output as a single parameter with nested fields
|
|
1089
|
-
const name = `object___${counter++}`;
|
|
1090
|
-
return {
|
|
1091
|
-
name,
|
|
1092
|
-
in: 'query',
|
|
1093
|
-
required: true,
|
|
1094
|
-
schema: {
|
|
1095
|
-
type: 'object',
|
|
1096
|
-
required: true,
|
|
1097
|
-
name,
|
|
1098
|
-
properties: Object.fromEntries(
|
|
1099
|
-
param.properties.map((prop) => {
|
|
1100
|
-
const keyName = prop.key.name || '[unknown]';
|
|
1101
|
-
return [keyName, { type: 'string' }]; // default to string
|
|
1102
|
-
})
|
|
1103
|
-
),
|
|
1104
|
-
},
|
|
1105
|
-
};
|
|
1106
|
-
}
|
|
1107
|
-
|
|
1108
|
-
const name = `unknown___${counter++}`;
|
|
1109
|
-
return {
|
|
1110
|
-
name,
|
|
1111
|
-
in: 'query',
|
|
1112
|
-
required: true,
|
|
1113
|
-
schema: { type: 'string', name, required: true },
|
|
1114
|
-
};
|
|
1115
|
-
}
|
|
1116
|
-
|
|
1117
|
-
return params.map(handleParam);
|
|
1118
|
-
}
|
|
1119
|
-
}
|
|
1
|
+
import { AgentProcess } from '@sre/Core/AgentProcess.helper';
|
|
2
|
+
import { ConnectorService } from '@sre/Core/ConnectorsService';
|
|
3
|
+
import { Logger } from '@sre/helpers/Log.helper';
|
|
4
|
+
import { LLMInference } from '@sre/LLMManager/LLM.inference';
|
|
5
|
+
import { LLMContext } from '@sre/MemoryManager/LLMContext';
|
|
6
|
+
import { TAgentProcessParams } from '@sre/types/Agent.types';
|
|
7
|
+
import { ILLMContextStore, TLLMEvent, TLLMModel, ToolData } from '@sre/types/LLM.types';
|
|
8
|
+
import { isUrl } from '@sre/utils/data.utils';
|
|
9
|
+
import { processWithConcurrencyLimit, uid } from '@sre/utils/general.utils';
|
|
10
|
+
import axios, { AxiosRequestConfig } from 'axios';
|
|
11
|
+
import EventEmitter from 'events';
|
|
12
|
+
import { JSONContent } from './JsonContent.helper';
|
|
13
|
+
import { OpenAPIParser } from './OpenApiParser.helper';
|
|
14
|
+
import { Match, TemplateString } from './TemplateString.helper';
|
|
15
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
16
|
+
import { EventSource, FetchLike } from 'eventsource';
|
|
17
|
+
import { hookAsyncWithContext } from '@sre/Core/HookService';
|
|
18
|
+
import { randomUUID } from 'crypto';
|
|
19
|
+
import * as acorn from 'acorn';
|
|
20
|
+
|
|
21
|
+
const console = Logger('ConversationHelper');
|
|
22
|
+
type FunctionDeclaration = {
|
|
23
|
+
name: string;
|
|
24
|
+
description: string;
|
|
25
|
+
properties: Record<string, any>;
|
|
26
|
+
requiredFields: string[];
|
|
27
|
+
};
|
|
28
|
+
type ToolParams = {
|
|
29
|
+
type: string;
|
|
30
|
+
endpoint: string;
|
|
31
|
+
args: Record<string, any>;
|
|
32
|
+
method: string;
|
|
33
|
+
baseUrl: string;
|
|
34
|
+
headers?: Record<string, string>;
|
|
35
|
+
agentCallback?: (data: any) => void;
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
//TODO: handle authentication
|
|
39
|
+
export class Conversation extends EventEmitter {
|
|
40
|
+
private _agentId: string = '';
|
|
41
|
+
private _systemPrompt;
|
|
42
|
+
private userDefinedSystemPrompt: string = '';
|
|
43
|
+
public toolChoice: string = 'auto';
|
|
44
|
+
public get systemPrompt() {
|
|
45
|
+
return this._systemPrompt;
|
|
46
|
+
}
|
|
47
|
+
public set systemPrompt(systemPrompt) {
|
|
48
|
+
this._systemPrompt = systemPrompt;
|
|
49
|
+
if (this._context) this._context.systemPrompt = systemPrompt;
|
|
50
|
+
}
|
|
51
|
+
public assistantName;
|
|
52
|
+
|
|
53
|
+
private _reqMethods;
|
|
54
|
+
private _toolsConfig;
|
|
55
|
+
private _endpoints;
|
|
56
|
+
private _baseUrl;
|
|
57
|
+
|
|
58
|
+
private _status = '';
|
|
59
|
+
private _currentWaitPromise;
|
|
60
|
+
|
|
61
|
+
private _llmContextStore: ILLMContextStore;
|
|
62
|
+
private _context: LLMContext;
|
|
63
|
+
|
|
64
|
+
private _maxContextSize = 1024 * 128;
|
|
65
|
+
private _maxOutputTokens = 1024 * 8;
|
|
66
|
+
private _teamId: string = undefined;
|
|
67
|
+
private _agentVersion: string = undefined;
|
|
68
|
+
public agentData: any;
|
|
69
|
+
|
|
70
|
+
public get context() {
|
|
71
|
+
return this._context;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
private _lastError;
|
|
75
|
+
private _spec;
|
|
76
|
+
private _customToolsDeclarations: FunctionDeclaration[] = [];
|
|
77
|
+
private _customToolsHandlers: Record<string, (args: Record<string, any>) => Promise<any>> = {};
|
|
78
|
+
public stop = false;
|
|
79
|
+
public set spec(specSource) {
|
|
80
|
+
this.ready.then(() => {
|
|
81
|
+
this._status = '';
|
|
82
|
+
this.loadSpecFromSource(specSource).then(async (spec) => {
|
|
83
|
+
if (!spec) {
|
|
84
|
+
this._status = 'error';
|
|
85
|
+
this.emit('error', 'Invalid OpenAPI specification data format');
|
|
86
|
+
throw new Error('Invalid OpenAPI specification data format');
|
|
87
|
+
}
|
|
88
|
+
this._spec = spec;
|
|
89
|
+
|
|
90
|
+
// teamId is required to load custom LLMs, we must assign it before updateModel()
|
|
91
|
+
await this.assignTeamIdFromAgentId(this._agentId);
|
|
92
|
+
|
|
93
|
+
await this.updateModel(this._model);
|
|
94
|
+
this._status = 'ready';
|
|
95
|
+
});
|
|
96
|
+
});
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
public set model(model: string | TLLMModel) {
|
|
100
|
+
this.ready.then(async () => {
|
|
101
|
+
this._status = '';
|
|
102
|
+
await this.updateModel(model);
|
|
103
|
+
this._status = 'ready';
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
public get model() {
|
|
107
|
+
return this._model;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
constructor(
|
|
111
|
+
private _model: string | TLLMModel,
|
|
112
|
+
private _specSource?: string | Record<string, any>,
|
|
113
|
+
private _settings?: {
|
|
114
|
+
maxContextSize?: number;
|
|
115
|
+
maxOutputTokens?: number;
|
|
116
|
+
systemPrompt?: string;
|
|
117
|
+
toolChoice?: string;
|
|
118
|
+
store?: ILLMContextStore;
|
|
119
|
+
experimentalCache?: boolean;
|
|
120
|
+
toolsStrategy?: (toolsConfig) => any;
|
|
121
|
+
agentId?: string;
|
|
122
|
+
agentVersion?: string;
|
|
123
|
+
}
|
|
124
|
+
) {
|
|
125
|
+
//TODO: handle loading previous session (messages)
|
|
126
|
+
super();
|
|
127
|
+
|
|
128
|
+
//this event listener avoids unhandled errors that can cause crashes
|
|
129
|
+
this.on('error', (error) => {
|
|
130
|
+
this._lastError = error;
|
|
131
|
+
console.warn('Conversation Error: ', error?.message);
|
|
132
|
+
});
|
|
133
|
+
this._maxContextSize =
|
|
134
|
+
_settings.maxContextSize || (this._model as TLLMModel).tokens || (this._model as TLLMModel).keyOptions?.tokens || this._maxContextSize;
|
|
135
|
+
this._maxOutputTokens =
|
|
136
|
+
_settings.maxOutputTokens ||
|
|
137
|
+
(this._model as TLLMModel).completionTokens ||
|
|
138
|
+
(this._model as TLLMModel).keyOptions?.completionTokens ||
|
|
139
|
+
this._maxOutputTokens;
|
|
140
|
+
|
|
141
|
+
if (_settings?.systemPrompt) {
|
|
142
|
+
this.userDefinedSystemPrompt = _settings.systemPrompt;
|
|
143
|
+
}
|
|
144
|
+
if (_settings?.toolChoice) {
|
|
145
|
+
this.toolChoice = _settings.toolChoice;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
if (_settings?.store) {
|
|
149
|
+
this._llmContextStore = _settings.store;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
this._agentVersion = _settings?.agentVersion;
|
|
153
|
+
|
|
154
|
+
(async () => {
|
|
155
|
+
if (_specSource) {
|
|
156
|
+
this.loadSpecFromSource(_specSource)
|
|
157
|
+
.then(async (spec) => {
|
|
158
|
+
if (!spec) {
|
|
159
|
+
this._status = 'error';
|
|
160
|
+
this.emit('error', 'Unable to parse OpenAPI specifications');
|
|
161
|
+
throw new Error('Invalid OpenAPI specification data format');
|
|
162
|
+
}
|
|
163
|
+
this._spec = spec;
|
|
164
|
+
|
|
165
|
+
if (!this._agentId && _settings?.agentId) this._agentId = _settings.agentId;
|
|
166
|
+
if (!this._agentId) this._agentId = 'FAKE-AGENT-ID'; //We use a fake agent ID to avoid ACL check errors
|
|
167
|
+
|
|
168
|
+
// teamId is required to load custom LLMs, we must assign it before updateModel()
|
|
169
|
+
await this.assignTeamIdFromAgentId(this._agentId);
|
|
170
|
+
|
|
171
|
+
await this.updateModel(this._model);
|
|
172
|
+
|
|
173
|
+
this._status = 'ready';
|
|
174
|
+
})
|
|
175
|
+
.catch((error) => {
|
|
176
|
+
this._status = 'error';
|
|
177
|
+
this.emit('error', error);
|
|
178
|
+
});
|
|
179
|
+
} else {
|
|
180
|
+
await this.updateModel(this._model);
|
|
181
|
+
this._status = 'ready';
|
|
182
|
+
}
|
|
183
|
+
})();
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
public get ready() {
|
|
187
|
+
if (this._currentWaitPromise) return this._currentWaitPromise;
|
|
188
|
+
this._currentWaitPromise = new Promise((resolve, reject) => {
|
|
189
|
+
if (this._status) {
|
|
190
|
+
return resolve(this._status);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
const maxWaitTime = 30000;
|
|
194
|
+
let waitTime = 0;
|
|
195
|
+
const interval = 100;
|
|
196
|
+
|
|
197
|
+
const wait = setInterval(() => {
|
|
198
|
+
if (this._status) {
|
|
199
|
+
clearInterval(wait);
|
|
200
|
+
return resolve(this._status);
|
|
201
|
+
} else {
|
|
202
|
+
waitTime += interval;
|
|
203
|
+
if (waitTime >= maxWaitTime) {
|
|
204
|
+
clearInterval(wait);
|
|
205
|
+
return reject('Timeout: Failed to prepare data');
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
}, interval);
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
return this._currentWaitPromise;
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
//TODO : handle attachments
|
|
215
|
+
@hookAsyncWithContext('Conversation.prompt', async (instance: Conversation) => {
|
|
216
|
+
await instance.ready;
|
|
217
|
+
|
|
218
|
+
return {
|
|
219
|
+
teamId: instance._teamId,
|
|
220
|
+
agentId: instance._agentId,
|
|
221
|
+
model: instance._model,
|
|
222
|
+
};
|
|
223
|
+
})
|
|
224
|
+
public async prompt(message?: string | any, toolHeaders = {}, concurrentToolCalls = 4, abortSignal?: AbortSignal) {
|
|
225
|
+
// if an error occured while streaming, we need to propagate it so for this, we register a one time error listener
|
|
226
|
+
let error = null;
|
|
227
|
+
const errListener = (err) => (error = err);
|
|
228
|
+
this.once('error', errListener);
|
|
229
|
+
const result = await this.streamPrompt(message, toolHeaders, concurrentToolCalls, abortSignal);
|
|
230
|
+
|
|
231
|
+
// if an error event occured, throw the error
|
|
232
|
+
if (error) {
|
|
233
|
+
throw error;
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
this.removeListener('error', errListener);
|
|
237
|
+
return result;
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
//TODO : handle attachments
|
|
241
|
+
@hookAsyncWithContext('Conversation.streamPrompt', async (instance: Conversation) => {
|
|
242
|
+
await instance.ready;
|
|
243
|
+
|
|
244
|
+
return {
|
|
245
|
+
teamId: instance._teamId,
|
|
246
|
+
agentId: instance._agentId,
|
|
247
|
+
model: instance._model,
|
|
248
|
+
};
|
|
249
|
+
})
|
|
250
|
+
public async streamPrompt(message?: string | any, toolHeaders = {}, concurrentToolCalls = 4, abortSignal?: AbortSignal) {
|
|
251
|
+
let options = typeof message === 'object' ? message : { message };
|
|
252
|
+
message = options?.message;
|
|
253
|
+
const files = options?.files;
|
|
254
|
+
|
|
255
|
+
if (message) {
|
|
256
|
+
//initial call, reset stop flag
|
|
257
|
+
|
|
258
|
+
this.stop = false;
|
|
259
|
+
}
|
|
260
|
+
if (this.stop) {
|
|
261
|
+
this.emit('interrupted', 'interrupted');
|
|
262
|
+
this.emit('end');
|
|
263
|
+
return;
|
|
264
|
+
}
|
|
265
|
+
await this.ready;
|
|
266
|
+
|
|
267
|
+
// Add an abort handler
|
|
268
|
+
if (abortSignal) {
|
|
269
|
+
abortSignal.addEventListener('abort', () => {
|
|
270
|
+
//this.emit('error', { name: 'AbortError', message: 'Request aborted by user!' });
|
|
271
|
+
this.emit('aborted', 'Aborted by user!');
|
|
272
|
+
//const error = new Error('Request aborted by user!');
|
|
273
|
+
//error.name = 'AbortError';
|
|
274
|
+
//throw error;
|
|
275
|
+
});
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
const passThroughtContinueMessage = 'Continue with the next tool call if there are any, or just inform the user that you are done';
|
|
279
|
+
//let promises = [];
|
|
280
|
+
let _content = '';
|
|
281
|
+
const reqMethods = this._reqMethods;
|
|
282
|
+
const toolsConfig = this._toolsConfig;
|
|
283
|
+
const endpoints = this._endpoints;
|
|
284
|
+
const baseUrl = this._baseUrl;
|
|
285
|
+
const message_id = 'msg_' + randomUUID();
|
|
286
|
+
const isDebugSession = toolHeaders['X-DEBUG'];
|
|
287
|
+
|
|
288
|
+
/* ==================== STEP ENTRY ==================== */
|
|
289
|
+
// console.debug('Request to LLM with the given model, messages and functions properties.', {
|
|
290
|
+
// model: this.model,
|
|
291
|
+
// message,
|
|
292
|
+
// toolsConfig,
|
|
293
|
+
// });
|
|
294
|
+
/* ==================== STEP ENTRY ==================== */
|
|
295
|
+
const llmInference: LLMInference = await LLMInference.getInstance(this.model, AccessCandidate.team(this._teamId));
|
|
296
|
+
|
|
297
|
+
if (message) this._context.addUserMessage(message, message_id);
|
|
298
|
+
|
|
299
|
+
const contextWindow = await this._context.getContextWindow(this._maxContextSize, this._maxOutputTokens);
|
|
300
|
+
|
|
301
|
+
let maxTokens = this._maxOutputTokens;
|
|
302
|
+
if (typeof this.model === 'object' && this.model?.params?.maxTokens) {
|
|
303
|
+
maxTokens = this.model.params.maxTokens;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
const eventEmitter: any = await llmInference
|
|
307
|
+
.promptStream({
|
|
308
|
+
contextWindow,
|
|
309
|
+
files,
|
|
310
|
+
params: {
|
|
311
|
+
model: this.model,
|
|
312
|
+
toolsConfig: this._settings?.toolsStrategy ? this._settings.toolsStrategy(toolsConfig) : toolsConfig,
|
|
313
|
+
maxTokens,
|
|
314
|
+
cache: this._settings?.experimentalCache,
|
|
315
|
+
agentId: this._agentId,
|
|
316
|
+
abortSignal,
|
|
317
|
+
},
|
|
318
|
+
})
|
|
319
|
+
.catch((error) => {
|
|
320
|
+
console.error('Error on promptStream: ', error);
|
|
321
|
+
this.emit(TLLMEvent.Error, error);
|
|
322
|
+
});
|
|
323
|
+
|
|
324
|
+
// remove listeners from llm event emitter to stop receiving stream data
|
|
325
|
+
if (abortSignal) {
|
|
326
|
+
abortSignal.addEventListener('abort', () => {
|
|
327
|
+
eventEmitter.removeAllListeners();
|
|
328
|
+
});
|
|
329
|
+
}
|
|
330
|
+
if (!eventEmitter || eventEmitter.error) {
|
|
331
|
+
throw new Error('[LLM Request Error]');
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
if (message) this.emit('start');
|
|
335
|
+
eventEmitter.on('data', (data) => {
|
|
336
|
+
if (this.stop) return;
|
|
337
|
+
this.emit('data', data);
|
|
338
|
+
});
|
|
339
|
+
|
|
340
|
+
eventEmitter.on(TLLMEvent.Thinking, (thinking) => {
|
|
341
|
+
if (this.stop) return;
|
|
342
|
+
this.emit(TLLMEvent.Thinking, thinking);
|
|
343
|
+
});
|
|
344
|
+
|
|
345
|
+
eventEmitter.on(TLLMEvent.Content, (content) => {
|
|
346
|
+
if (this.stop) return;
|
|
347
|
+
// if (toolHeaders['x-passthrough']) {
|
|
348
|
+
// console.log('Passthrough skiped content ', content);
|
|
349
|
+
// return;
|
|
350
|
+
// }
|
|
351
|
+
//const lastMessage = this._context?.messages?.[this._context?.messages?.length - 1];
|
|
352
|
+
//const skip = lastMessage?.__smyth_data__?.internal;
|
|
353
|
+
|
|
354
|
+
//skip if the content is the last generated message after a passthrough content
|
|
355
|
+
// if (skip) {
|
|
356
|
+
// let s = true;
|
|
357
|
+
// }
|
|
358
|
+
_content += content;
|
|
359
|
+
this.emit(TLLMEvent.Content, content);
|
|
360
|
+
});
|
|
361
|
+
|
|
362
|
+
let finishReason = 'stop';
|
|
363
|
+
|
|
364
|
+
let toolsPromise = new Promise((resolve, reject) => {
|
|
365
|
+
let hasTools = false;
|
|
366
|
+
let hasError = false;
|
|
367
|
+
let passThroughContent = '';
|
|
368
|
+
|
|
369
|
+
eventEmitter.on(TLLMEvent.Error, (error) => {
|
|
370
|
+
hasError = true;
|
|
371
|
+
reject(error);
|
|
372
|
+
});
|
|
373
|
+
|
|
374
|
+
eventEmitter.on(TLLMEvent.ToolInfo, async (toolsData, thinkingBlocks = []) => {
|
|
375
|
+
if (this.stop) return;
|
|
376
|
+
hasTools = true;
|
|
377
|
+
let llmMessage: any = {
|
|
378
|
+
role: 'assistant',
|
|
379
|
+
content: _content,
|
|
380
|
+
tool_calls: [],
|
|
381
|
+
};
|
|
382
|
+
|
|
383
|
+
if (thinkingBlocks?.length > 0) {
|
|
384
|
+
this.emit(
|
|
385
|
+
'thoughtProcess',
|
|
386
|
+
thinkingBlocks
|
|
387
|
+
.filter((block) => block.type === 'thinking')
|
|
388
|
+
.map((block) => block.thinking || '')
|
|
389
|
+
.join('\n')
|
|
390
|
+
);
|
|
391
|
+
|
|
392
|
+
llmMessage.thinkingBlocks = thinkingBlocks;
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
llmMessage.tool_calls = toolsData.map((tool) => {
|
|
396
|
+
return {
|
|
397
|
+
id: tool.id,
|
|
398
|
+
type: tool.type,
|
|
399
|
+
function: {
|
|
400
|
+
name: tool.name,
|
|
401
|
+
arguments: tool.arguments,
|
|
402
|
+
},
|
|
403
|
+
};
|
|
404
|
+
});
|
|
405
|
+
|
|
406
|
+
//if (llmMessage.tool_calls?.length <= 0) return;
|
|
407
|
+
|
|
408
|
+
this.emit(TLLMEvent.ToolInfo, toolsData);
|
|
409
|
+
|
|
410
|
+
//initialize the agent callback logic
|
|
411
|
+
const _agentCallback = (data) => {
|
|
412
|
+
if (this.stop) return;
|
|
413
|
+
//if (typeof data !== 'string') return;
|
|
414
|
+
let content = '';
|
|
415
|
+
let thinking = '';
|
|
416
|
+
if (typeof data === 'object') {
|
|
417
|
+
if (data.content) {
|
|
418
|
+
content = data.content;
|
|
419
|
+
|
|
420
|
+
passThroughContent += content;
|
|
421
|
+
eventEmitter.emit(TLLMEvent.Content, content);
|
|
422
|
+
}
|
|
423
|
+
if (data.thinking) {
|
|
424
|
+
thinking = data.thinking;
|
|
425
|
+
eventEmitter.emit(TLLMEvent.Thinking, thinking);
|
|
426
|
+
}
|
|
427
|
+
return;
|
|
428
|
+
}
|
|
429
|
+
if (typeof data === 'string') {
|
|
430
|
+
passThroughContent += data;
|
|
431
|
+
eventEmitter.emit(TLLMEvent.Content, data);
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
//passThroughContent += data;
|
|
435
|
+
//this is currently used to handle agent callbacks when running local agents
|
|
436
|
+
//this.emit('agentCallback', data);
|
|
437
|
+
|
|
438
|
+
//this.emit('content', data);
|
|
439
|
+
//this.emit('content', data);
|
|
440
|
+
//eventEmitter.emit('content', data);
|
|
441
|
+
};
|
|
442
|
+
|
|
443
|
+
const toolProcessingTasks = toolsData.map(
|
|
444
|
+
(tool: { index: number; name: string; type: string; arguments: Record<string, any> }) => async () => {
|
|
445
|
+
const endpoint = endpoints?.get(tool?.name) || tool?.name;
|
|
446
|
+
// Sometimes we have object response from the LLM such as Anthropic
|
|
447
|
+
|
|
448
|
+
let args = typeof tool?.arguments === 'string' ? JSONContent(tool?.arguments).tryParse() || {} : tool?.arguments;
|
|
449
|
+
|
|
450
|
+
if (args?.error) {
|
|
451
|
+
throw new Error('[Tool] Arguments Parsing Error\n' + JSON.stringify({ message: args?.error }));
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
//await beforeFunctionCall(llmMessage, toolsData[tool.index]);
|
|
455
|
+
// TODO [Forhad]: Make sure toolsData[tool.index] and tool do the same thing
|
|
456
|
+
this.emit('beforeToolCall', { tool, args }, llmMessage); //deprecated
|
|
457
|
+
this.emit(TLLMEvent.ToolCall, { tool, _llmRequest: llmMessage });
|
|
458
|
+
|
|
459
|
+
const toolArgs = {
|
|
460
|
+
type: tool?.type,
|
|
461
|
+
method: reqMethods?.get(tool?.name),
|
|
462
|
+
endpoint,
|
|
463
|
+
args,
|
|
464
|
+
baseUrl,
|
|
465
|
+
headers: toolHeaders,
|
|
466
|
+
agentCallback: _agentCallback,
|
|
467
|
+
};
|
|
468
|
+
|
|
469
|
+
let { data: functionResponse, error } = await this.useTool(toolArgs, abortSignal);
|
|
470
|
+
|
|
471
|
+
if (error) {
|
|
472
|
+
functionResponse = typeof error === 'object' && typeof error !== null ? JSON.stringify(error) : error;
|
|
473
|
+
}
|
|
474
|
+
|
|
475
|
+
const result = functionResponse;
|
|
476
|
+
|
|
477
|
+
functionResponse =
|
|
478
|
+
typeof functionResponse === 'object' && typeof functionResponse !== null
|
|
479
|
+
? JSON.stringify(functionResponse)
|
|
480
|
+
: functionResponse;
|
|
481
|
+
|
|
482
|
+
//await afterFunctionCall(functionResponse, toolsData[tool.index]);
|
|
483
|
+
this.emit('afterToolCall', { tool, args }, functionResponse); // Deprecated
|
|
484
|
+
this.emit(TLLMEvent.ToolResult, { tool, result });
|
|
485
|
+
|
|
486
|
+
return { ...tool, result: functionResponse };
|
|
487
|
+
}
|
|
488
|
+
);
|
|
489
|
+
|
|
490
|
+
const processedToolsData = await processWithConcurrencyLimit<ToolData>(toolProcessingTasks, concurrentToolCalls);
|
|
491
|
+
|
|
492
|
+
//if (!passThroughContent) {
|
|
493
|
+
|
|
494
|
+
if (!passThroughContent) {
|
|
495
|
+
this._context.addToolMessage(llmMessage, processedToolsData, message_id);
|
|
496
|
+
//delete toolHeaders['x-passthrough'];
|
|
497
|
+
} else {
|
|
498
|
+
//this._context.addAssistantMessage(passThroughContent, message_id);
|
|
499
|
+
|
|
500
|
+
//llmMessage.content += '\n' + passThroughContent;
|
|
501
|
+
this._context.addToolMessage(llmMessage, processedToolsData, message_id, { passThrough: true });
|
|
502
|
+
|
|
503
|
+
//this._context.addAssistantMessage(passThroughContent, message_id, { passthrough: true });
|
|
504
|
+
//this should not be stored in the persistent conversation store
|
|
505
|
+
//it's just a workaround to avoid generating more content after passthrough content
|
|
506
|
+
//this._context.addUserMessage(passThroughtContinueMessage, message_id, { internal: true });
|
|
507
|
+
//toolHeaders['x-passthrough'] = 'true';
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
this.streamPrompt(null, toolHeaders, concurrentToolCalls, abortSignal).then(resolve).catch(reject);
|
|
511
|
+
|
|
512
|
+
//} else {
|
|
513
|
+
//TODO : add passthrough content to the context window ??
|
|
514
|
+
|
|
515
|
+
//if passThroughContent is not empty, it means that the current agent streamed content through components
|
|
516
|
+
//resolve(passThroughContent);
|
|
517
|
+
//}
|
|
518
|
+
//const result = await resolve(await this.streamPrompt(null, toolHeaders, concurrentToolCalls));
|
|
519
|
+
//console.log('Result after tool call: ', result);
|
|
520
|
+
});
|
|
521
|
+
|
|
522
|
+
eventEmitter.on(TLLMEvent.End, async (toolsData, usage_data, _finishReason) => {
|
|
523
|
+
if (_finishReason) finishReason = _finishReason;
|
|
524
|
+
if (usage_data) {
|
|
525
|
+
//FIXME : normalize the usage data format
|
|
526
|
+
this.emit(TLLMEvent.Usage, usage_data);
|
|
527
|
+
}
|
|
528
|
+
if (hasError) return;
|
|
529
|
+
|
|
530
|
+
if (!hasTools || passThroughContent) {
|
|
531
|
+
//console.log(' ===> resolved content no tool', _content);
|
|
532
|
+
//this._context.push({ role: 'assistant', content: _content });
|
|
533
|
+
const lastMessage = this._context?.messages?.[this._context?.messages?.length - 1];
|
|
534
|
+
let metadata;
|
|
535
|
+
if (lastMessage?.content?.includes(passThroughtContinueMessage) && lastMessage?.__smyth_data__?.internal) {
|
|
536
|
+
metadata = { internal: true };
|
|
537
|
+
}
|
|
538
|
+
this._context.addAssistantMessage(_content, message_id, metadata);
|
|
539
|
+
resolve(''); //the content were already emitted through 'content' event
|
|
540
|
+
}
|
|
541
|
+
});
|
|
542
|
+
});
|
|
543
|
+
|
|
544
|
+
const toolsContent = await toolsPromise.catch((error) => {
|
|
545
|
+
console.error('Error in toolsPromise: ', error);
|
|
546
|
+
//this.emit('error', error);
|
|
547
|
+
this.emit(TLLMEvent.Error, error);
|
|
548
|
+
return '';
|
|
549
|
+
});
|
|
550
|
+
_content += toolsContent;
|
|
551
|
+
//let content = JSONContent(_content).tryParse();
|
|
552
|
+
|
|
553
|
+
// let streamPromise = new Promise((resolve, reject) => {
|
|
554
|
+
// eventEmitter.on('end', async () => {
|
|
555
|
+
// if (toolsPromise) await toolsPromise;
|
|
556
|
+
|
|
557
|
+
// let content = JSONContent(_content).tryParse();
|
|
558
|
+
// resolve({ content });
|
|
559
|
+
// });
|
|
560
|
+
// });
|
|
561
|
+
|
|
562
|
+
// promises.push(streamPromise);
|
|
563
|
+
|
|
564
|
+
//await Promise.all(promises);
|
|
565
|
+
//return content;
|
|
566
|
+
|
|
567
|
+
if (message) {
|
|
568
|
+
//console.log('main content', content);
|
|
569
|
+
//this._context.push({ role: 'assistant', content: content });
|
|
570
|
+
|
|
571
|
+
if (finishReason !== 'stop') {
|
|
572
|
+
this.emit(TLLMEvent.Interrupted, finishReason);
|
|
573
|
+
}
|
|
574
|
+
this.emit(TLLMEvent.End);
|
|
575
|
+
} else {
|
|
576
|
+
//console.log('tool content', content);
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
return _content;
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
private resolveToolEndpoint(baseUrl: string, method: string, endpoint: string, params: Record<string, any>): string {
|
|
583
|
+
//handle query params
|
|
584
|
+
let templateParams = {};
|
|
585
|
+
if (params) {
|
|
586
|
+
const parameters = this._spec?.paths?.[endpoint]?.[method.toLowerCase()]?.parameters || [];
|
|
587
|
+
for (let p of parameters) {
|
|
588
|
+
if (p.in === 'path') {
|
|
589
|
+
templateParams[p.name] = params[p.name] || '';
|
|
590
|
+
delete params[p.name];
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
const parsedEndpoint = TemplateString(endpoint).parse(templateParams, Match.singleCurly).clean().result;
|
|
595
|
+
|
|
596
|
+
// Create a new URL object using the base URL and endpoint
|
|
597
|
+
const url = new URL(parsedEndpoint, baseUrl);
|
|
598
|
+
|
|
599
|
+
// Iterate over the params object and append each key/value pair to the URL search parameters
|
|
600
|
+
Object.keys(params).forEach((key) => {
|
|
601
|
+
url.searchParams.append(key, params[key]);
|
|
602
|
+
});
|
|
603
|
+
|
|
604
|
+
// Return the full URL as a string
|
|
605
|
+
return url.toString();
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
private async useTool(
|
|
609
|
+
params: ToolParams,
|
|
610
|
+
abortSignal?: AbortSignal
|
|
611
|
+
): Promise<{
|
|
612
|
+
data: any;
|
|
613
|
+
error;
|
|
614
|
+
}> {
|
|
615
|
+
if (this.stop) {
|
|
616
|
+
return { data: null, error: 'Conversation Interrupted' };
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
const { type, endpoint, args, method, baseUrl, headers = {}, agentCallback } = params;
|
|
620
|
+
|
|
621
|
+
if (type === 'function') {
|
|
622
|
+
const toolHandler = this._customToolsHandlers[endpoint];
|
|
623
|
+
if (toolHandler) {
|
|
624
|
+
try {
|
|
625
|
+
const result = await toolHandler(args);
|
|
626
|
+
return { data: result, error: null };
|
|
627
|
+
} catch (error) {
|
|
628
|
+
return { data: null, error: error?.message || 'Custom tool handler failed' };
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
try {
|
|
632
|
+
const url = this.resolveToolEndpoint(baseUrl, method, endpoint, method == 'get' ? args : {});
|
|
633
|
+
|
|
634
|
+
const reqConfig: AxiosRequestConfig = {
|
|
635
|
+
method,
|
|
636
|
+
url,
|
|
637
|
+
headers: {
|
|
638
|
+
...headers,
|
|
639
|
+
},
|
|
640
|
+
signal: abortSignal,
|
|
641
|
+
};
|
|
642
|
+
|
|
643
|
+
if (method !== 'get') {
|
|
644
|
+
if (Object.keys(args).length) {
|
|
645
|
+
reqConfig.data = args;
|
|
646
|
+
}
|
|
647
|
+
//(reqConfig.headers as Record<string, unknown>)['Content-Type'] = 'application/json';
|
|
648
|
+
reqConfig.headers['Content-Type'] = 'application/json';
|
|
649
|
+
}
|
|
650
|
+
|
|
651
|
+
console.debug('Calling tool: ', reqConfig);
|
|
652
|
+
|
|
653
|
+
reqConfig.headers['X-CACHE-ID'] = this._context?.llmCache?.id;
|
|
654
|
+
|
|
655
|
+
/*
|
|
656
|
+
* Objective for the following conditions:
|
|
657
|
+
* - In case it is not a debug call and there is no monitor id, then we need to run the agent locally to reduce latency
|
|
658
|
+
* - but if it a debug call, we need to forward req to sre-builder-debugger since it holds the debug promises
|
|
659
|
+
* - or if there is a monitor id, we need to forward req to sre-builder-debugger since it holds the monitor SSE connections.
|
|
660
|
+
* - a remote call is often needed for file parsing be default agent we inject, it should not be loaded locally.
|
|
661
|
+
* So the objecive is mainly reducing latency when possible
|
|
662
|
+
*/
|
|
663
|
+
//TODO : implement a timeout for the tool call
|
|
664
|
+
const requiresRemoteCall =
|
|
665
|
+
reqConfig.headers['X-DEBUG'] !== undefined ||
|
|
666
|
+
reqConfig.headers['X-MONITOR-ID'] !== undefined ||
|
|
667
|
+
reqConfig.headers['X-AGENT-REMOTE-CALL'] !== undefined;
|
|
668
|
+
if (
|
|
669
|
+
reqConfig.url.includes('localhost') ||
|
|
670
|
+
(reqConfig.headers['X-AGENT-ID'] && !requiresRemoteCall)
|
|
671
|
+
//empty string is accepted
|
|
672
|
+
|
|
673
|
+
// || reqConfig.url.includes('localagent') //* commented to allow debugging live sessions as the req needs to reach sre-builder-debugger
|
|
674
|
+
) {
|
|
675
|
+
console.log('RUNNING AGENT LOCALLY');
|
|
676
|
+
let agentProcess;
|
|
677
|
+
if (this.agentData === this._specSource) {
|
|
678
|
+
//the agent was loaded from data
|
|
679
|
+
agentProcess = AgentProcess.load(this.agentData, this._agentVersion);
|
|
680
|
+
} else {
|
|
681
|
+
//the agent was loaded from a spec
|
|
682
|
+
agentProcess = AgentProcess.load(
|
|
683
|
+
reqConfig.headers['X-AGENT-ID'] || this._agentId,
|
|
684
|
+
reqConfig.headers['X-AGENT-VERSION'] || this._agentVersion
|
|
685
|
+
);
|
|
686
|
+
}
|
|
687
|
+
//if it's a local agent, invoke it directly
|
|
688
|
+
|
|
689
|
+
const response = await agentProcess.run(reqConfig as TAgentProcessParams, agentCallback);
|
|
690
|
+
return { data: response.data, error: null };
|
|
691
|
+
} else {
|
|
692
|
+
console.log('RUNNING AGENT REMOTELY');
|
|
693
|
+
let eventSource;
|
|
694
|
+
|
|
695
|
+
// if debug mode is on OR the user attached a monitor to the call, then we need to attach a monitor to the agent call
|
|
696
|
+
if ((reqConfig.headers['X-DEBUG'] && reqConfig.headers['X-AGENT-ID']) || reqConfig.headers['X-MONITOR-ID']) {
|
|
697
|
+
console.log('ATTACHING MONITOR TO REMOTE AGENT CALL');
|
|
698
|
+
const monitUrl = reqConfig.url.split('/api')[0] + '/agent/' + reqConfig.headers['X-AGENT-ID'] + '/monitor';
|
|
699
|
+
|
|
700
|
+
// Create custom fetch implementation that includes our headers
|
|
701
|
+
const customFetch: FetchLike = (url, init) => {
|
|
702
|
+
return fetch(url, {
|
|
703
|
+
...init,
|
|
704
|
+
headers: {
|
|
705
|
+
...(init?.headers || {}),
|
|
706
|
+
...Object.fromEntries(Object.entries(reqConfig.headers).map(([k, v]) => [k, String(v)])),
|
|
707
|
+
},
|
|
708
|
+
});
|
|
709
|
+
};
|
|
710
|
+
|
|
711
|
+
const eventSource = new EventSource(monitUrl, {
|
|
712
|
+
fetch: customFetch,
|
|
713
|
+
});
|
|
714
|
+
let monitorId = '';
|
|
715
|
+
|
|
716
|
+
eventSource.addEventListener('init', (event) => {
|
|
717
|
+
monitorId = event.data;
|
|
718
|
+
console.log('monitorId', monitorId);
|
|
719
|
+
if (reqConfig.headers['X-MONITOR-ID']) {
|
|
720
|
+
// an external monitor was sent, so we do not override it
|
|
721
|
+
reqConfig.headers['X-MONITOR-ID'] = `${reqConfig.headers['X-MONITOR-ID']},${monitorId}`;
|
|
722
|
+
} else {
|
|
723
|
+
reqConfig.headers['X-MONITOR-ID'] = monitorId;
|
|
724
|
+
}
|
|
725
|
+
});
|
|
726
|
+
eventSource.addEventListener('llm/passthrough/content', (event: any) => {
|
|
727
|
+
if (params.agentCallback) params.agentCallback({ content: event.data.replace(/\\n/g, '\n') });
|
|
728
|
+
});
|
|
729
|
+
eventSource.addEventListener('llm/passthrough/thinking', (event: any) => {
|
|
730
|
+
if (params.agentCallback) params.agentCallback({ thinking: event.data.replace(/\\n/g, '\n') });
|
|
731
|
+
});
|
|
732
|
+
|
|
733
|
+
await new Promise((resolve) => {
|
|
734
|
+
let maxTime = 5 * 1000; //5 seconds
|
|
735
|
+
let itv = setInterval(() => {
|
|
736
|
+
if (monitorId || maxTime <= 0) {
|
|
737
|
+
clearInterval(itv);
|
|
738
|
+
resolve(true);
|
|
739
|
+
}
|
|
740
|
+
maxTime -= 100;
|
|
741
|
+
}, 100);
|
|
742
|
+
});
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
//if it's a remote agent, call the API via HTTP
|
|
746
|
+
const response = await axios.request(reqConfig);
|
|
747
|
+
|
|
748
|
+
if (eventSource) {
|
|
749
|
+
eventSource.close();
|
|
750
|
+
console.log('eventSource closed');
|
|
751
|
+
}
|
|
752
|
+
return { data: response.data, error: null };
|
|
753
|
+
}
|
|
754
|
+
} catch (error: any) {
|
|
755
|
+
console.warn('Failed to call Tool: ', baseUrl, endpoint);
|
|
756
|
+
console.warn(' ====>', error);
|
|
757
|
+
return { data: null, error: error?.response?.data || error?.message };
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
|
|
761
|
+
return { data: null, error: `'${type}' tool type not supported at the moment` };
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
public async addTool(tool: {
|
|
765
|
+
name: string;
|
|
766
|
+
description: string;
|
|
767
|
+
arguments?: Record<string, any> | string[];
|
|
768
|
+
handler: (args: Record<string, any>) => Promise<any>;
|
|
769
|
+
inputs?: any[];
|
|
770
|
+
}) {
|
|
771
|
+
if (!tool.arguments) {
|
|
772
|
+
//if no arguments are provided, we need to extract them from the function
|
|
773
|
+
const toolFunction = tool.handler as Function;
|
|
774
|
+
const openApiArgs = this.extractArgsAsOpenAPI(toolFunction);
|
|
775
|
+
const _arguments: any = {};
|
|
776
|
+
for (let arg of openApiArgs) {
|
|
777
|
+
_arguments[arg.name] = arg.schema;
|
|
778
|
+
if (tool.inputs && arg.schema.properties) {
|
|
779
|
+
const required = [];
|
|
780
|
+
for (let prop in arg.schema.properties) {
|
|
781
|
+
const input = tool.inputs?.find((i) => i.name === prop);
|
|
782
|
+
if (!arg.schema.properties[prop].description) {
|
|
783
|
+
arg.schema.properties[prop].description = input?.description;
|
|
784
|
+
}
|
|
785
|
+
if (!input?.optional) {
|
|
786
|
+
required.push(prop);
|
|
787
|
+
}
|
|
788
|
+
}
|
|
789
|
+
if (required.length) {
|
|
790
|
+
arg.schema.required = required;
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
}
|
|
794
|
+
|
|
795
|
+
tool.arguments = _arguments;
|
|
796
|
+
tool.handler = async (argsObj: any) => {
|
|
797
|
+
const args = Object.values(argsObj);
|
|
798
|
+
const result = await toolFunction(...args);
|
|
799
|
+
return result;
|
|
800
|
+
};
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
const requiredFields = Object.values(tool.arguments)
|
|
804
|
+
.map((arg) => (arg.required ? arg.name : null))
|
|
805
|
+
.filter((arg) => arg);
|
|
806
|
+
|
|
807
|
+
const properties = {};
|
|
808
|
+
for (let entry in tool.arguments) {
|
|
809
|
+
properties[entry] = {
|
|
810
|
+
type: tool.arguments[entry].type || 'string',
|
|
811
|
+
properties: tool.arguments[entry].properties,
|
|
812
|
+
description: tool.arguments[entry].description,
|
|
813
|
+
...(tool.arguments[entry].type === 'array' ? { items: { type: tool.arguments[entry].items?.type || 'string' } } : {}),
|
|
814
|
+
};
|
|
815
|
+
}
|
|
816
|
+
const toolDefinition = {
|
|
817
|
+
name: tool.name,
|
|
818
|
+
description: tool.description,
|
|
819
|
+
properties,
|
|
820
|
+
requiredFields,
|
|
821
|
+
};
|
|
822
|
+
this._customToolsDeclarations.push(toolDefinition);
|
|
823
|
+
this._customToolsHandlers[tool.name] = tool.handler;
|
|
824
|
+
|
|
825
|
+
const llmInference: LLMInference = await LLMInference.getInstance(this.model, AccessCandidate.team(this._teamId));
|
|
826
|
+
const toolsConfig: any = llmInference.connector.formatToolsConfig({
|
|
827
|
+
type: 'function',
|
|
828
|
+
toolDefinitions: [toolDefinition],
|
|
829
|
+
toolChoice: this.toolChoice,
|
|
830
|
+
});
|
|
831
|
+
|
|
832
|
+
if (this._toolsConfig) this._toolsConfig.tools.push(...toolsConfig?.tools);
|
|
833
|
+
else this._toolsConfig = toolsConfig;
|
|
834
|
+
}
|
|
835
|
+
/**
|
|
836
|
+
* updates LLM model, if spec is available, it will update the tools config
|
|
837
|
+
* @param model
|
|
838
|
+
*/
|
|
839
|
+
// TODO [Forhad]: For now updateModel does not required await, but when we will have tools implementation in custom model then we need to await for it
|
|
840
|
+
private async updateModel(model: string | TLLMModel) {
|
|
841
|
+
try {
|
|
842
|
+
this._model = model;
|
|
843
|
+
|
|
844
|
+
if (this._spec) {
|
|
845
|
+
this._reqMethods = OpenAPIParser.mapReqMethods(this._spec?.paths);
|
|
846
|
+
this._endpoints = OpenAPIParser.mapEndpoints(this._spec?.paths);
|
|
847
|
+
this._baseUrl = this._spec?.servers?.[0].url;
|
|
848
|
+
|
|
849
|
+
const functionDeclarations = this.getFunctionDeclarations(this._spec);
|
|
850
|
+
functionDeclarations.push(...this._customToolsDeclarations);
|
|
851
|
+
const llmInference: LLMInference = await LLMInference.getInstance(this._model, AccessCandidate.team(this._teamId));
|
|
852
|
+
if (!llmInference.connector) {
|
|
853
|
+
this.emit('error', 'No connector found for model: ' + this._model);
|
|
854
|
+
return;
|
|
855
|
+
}
|
|
856
|
+
this._toolsConfig = llmInference.connector.formatToolsConfig({
|
|
857
|
+
type: 'function',
|
|
858
|
+
toolDefinitions: functionDeclarations,
|
|
859
|
+
toolChoice: this.toolChoice,
|
|
860
|
+
});
|
|
861
|
+
|
|
862
|
+
let messages = [];
|
|
863
|
+
if (this._context) messages = this._context.messages; // preserve messages
|
|
864
|
+
|
|
865
|
+
this._context = new LLMContext(llmInference, this.systemPrompt, this._llmContextStore);
|
|
866
|
+
} else {
|
|
867
|
+
this._toolsConfig = null;
|
|
868
|
+
this._reqMethods = null;
|
|
869
|
+
this._endpoints = null;
|
|
870
|
+
this._baseUrl = null;
|
|
871
|
+
}
|
|
872
|
+
} catch (error) {
|
|
873
|
+
this.emit('error', error);
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
/**
|
|
878
|
+
* this function is used to patch the spec with missing fields that are required for the tool to work
|
|
879
|
+
* @param spec
|
|
880
|
+
*/
|
|
881
|
+
private patchSpec(spec: Record<string, any>) {
|
|
882
|
+
const paths = spec?.paths;
|
|
883
|
+
for (const path in paths) {
|
|
884
|
+
const pathData = paths[path];
|
|
885
|
+
|
|
886
|
+
// it's possible we have multiple methods for a single path
|
|
887
|
+
for (const key in pathData) {
|
|
888
|
+
const data = pathData[key];
|
|
889
|
+
if (!data?.operationId) {
|
|
890
|
+
//normalize path and use it as operationId
|
|
891
|
+
data.operationId = path.replace(/\//g, '_').replace(/{|}/g, '').replace(/\./g, '_');
|
|
892
|
+
}
|
|
893
|
+
}
|
|
894
|
+
}
|
|
895
|
+
return spec;
|
|
896
|
+
}
|
|
897
|
+
/**
|
|
898
|
+
* Loads OpenAPI specification from source
|
|
899
|
+
* @param specSource
|
|
900
|
+
* @returns
|
|
901
|
+
*/
|
|
902
|
+
private async loadSpecFromSource(specSource: string | Record<string, any>) {
|
|
903
|
+
if (typeof specSource === 'object') {
|
|
904
|
+
//is this a valid OpenAPI spec?
|
|
905
|
+
if (OpenAPIParser.isValidOpenAPI(specSource)) {
|
|
906
|
+
this.systemPrompt = specSource?.info?.description || '';
|
|
907
|
+
return this.patchSpec(specSource);
|
|
908
|
+
}
|
|
909
|
+
//is this a valid agent data?
|
|
910
|
+
if (typeof specSource?.behavior === 'string' && specSource?.components && specSource?.connections) {
|
|
911
|
+
this.agentData = specSource; //agent loaded from data directly
|
|
912
|
+
return await this.loadSpecFromAgent(specSource);
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
return null;
|
|
916
|
+
}
|
|
917
|
+
|
|
918
|
+
if (typeof specSource === 'string') {
|
|
919
|
+
//is this an openAPI url?
|
|
920
|
+
if (isUrl(specSource as string)) {
|
|
921
|
+
const spec = await OpenAPIParser.getJsonFromUrl(specSource as string);
|
|
922
|
+
|
|
923
|
+
if (spec.info?.description) this.systemPrompt = spec.info.description;
|
|
924
|
+
|
|
925
|
+
// we always overwrite system prompt with user defined one
|
|
926
|
+
if (this.userDefinedSystemPrompt) this.systemPrompt = this.userDefinedSystemPrompt;
|
|
927
|
+
|
|
928
|
+
if (spec.info?.title) this.assistantName = spec.info.title;
|
|
929
|
+
|
|
930
|
+
const specUrl = new URL(specSource as string);
|
|
931
|
+
const defaultBaseUrl = specUrl.origin;
|
|
932
|
+
|
|
933
|
+
if (!spec?.servers) spec.servers = [{ url: defaultBaseUrl }];
|
|
934
|
+
if (spec.servers?.length == 0) spec.servers = [{ url: defaultBaseUrl }];
|
|
935
|
+
|
|
936
|
+
if (this.assistantName) {
|
|
937
|
+
this.systemPrompt = `Assistant Name : ${this.assistantName}\n\n${this.systemPrompt}`;
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
//this._agentId = specUrl.hostname; //just set an agent ID in order to identify the agent in SRE //FIXME: maybe this requires a better solution
|
|
941
|
+
return this.patchSpec(spec);
|
|
942
|
+
}
|
|
943
|
+
//is this an agentId ?
|
|
944
|
+
const agentDataConnector = ConnectorService.getAgentDataConnector();
|
|
945
|
+
const agentId = specSource as string;
|
|
946
|
+
this._agentId = agentId;
|
|
947
|
+
|
|
948
|
+
if (this._agentVersion === undefined) {
|
|
949
|
+
const isDeployed = await agentDataConnector.isDeployed(agentId);
|
|
950
|
+
this._agentVersion = isDeployed ? 'latest' : '';
|
|
951
|
+
}
|
|
952
|
+
|
|
953
|
+
this.agentData = await agentDataConnector.getAgentData(agentId, this._agentVersion).catch((error) => null);
|
|
954
|
+
if (!this.agentData) return null;
|
|
955
|
+
|
|
956
|
+
const spec = await this.loadSpecFromAgent(this.agentData);
|
|
957
|
+
return spec;
|
|
958
|
+
}
|
|
959
|
+
}
|
|
960
|
+
private async loadSpecFromAgent(agentData: Record<string, any>) {
|
|
961
|
+
//handle the case where agentData object contains the agent schema directly
|
|
962
|
+
//agents retrieved from the database have a wrapping object with agent name and version number
|
|
963
|
+
//local agent might include the agent data directly
|
|
964
|
+
if (agentData?.components) {
|
|
965
|
+
agentData = { name: agentData?.name, data: agentData, version: '1.0.0' };
|
|
966
|
+
}
|
|
967
|
+
|
|
968
|
+
const agentDataConnector = ConnectorService.getAgentDataConnector();
|
|
969
|
+
this.systemPrompt = agentData?.data?.behavior || this.systemPrompt;
|
|
970
|
+
|
|
971
|
+
// we always overwrite system prompt with user defined one
|
|
972
|
+
if (this.userDefinedSystemPrompt) this.systemPrompt = this.userDefinedSystemPrompt;
|
|
973
|
+
|
|
974
|
+
this.assistantName = agentData?.data?.name || agentData?.data?.templateInfo?.name || this.assistantName;
|
|
975
|
+
if (this.assistantName) {
|
|
976
|
+
this.systemPrompt = `Assistant Name : ${this.assistantName}\n\n${this.systemPrompt}`;
|
|
977
|
+
}
|
|
978
|
+
|
|
979
|
+
const spec = await agentDataConnector.getOpenAPIJSON(agentData, 'http://localhost/', this._agentVersion, true).catch((error) => null);
|
|
980
|
+
return this.patchSpec(spec);
|
|
981
|
+
}
|
|
982
|
+
|
|
983
|
+
/**
|
|
984
|
+
* Extracts function declarations from OpenAPI specification
|
|
985
|
+
* @param spec
|
|
986
|
+
* @returns
|
|
987
|
+
*/
|
|
988
|
+
private getFunctionDeclarations(spec): FunctionDeclaration[] {
|
|
989
|
+
const paths = spec?.paths;
|
|
990
|
+
const reqMethods = OpenAPIParser.mapReqMethods(paths);
|
|
991
|
+
|
|
992
|
+
let declarations: FunctionDeclaration[] = [];
|
|
993
|
+
|
|
994
|
+
for (const path in paths) {
|
|
995
|
+
const pathData = paths[path];
|
|
996
|
+
|
|
997
|
+
// it's possible we have multiple methods for a single path
|
|
998
|
+
for (const key in pathData) {
|
|
999
|
+
const data = pathData[key];
|
|
1000
|
+
|
|
1001
|
+
if (!data?.operationId) continue;
|
|
1002
|
+
|
|
1003
|
+
const method = reqMethods.get(data?.operationId) || 'get';
|
|
1004
|
+
|
|
1005
|
+
let properties = {};
|
|
1006
|
+
let requiredFields: string[] = [];
|
|
1007
|
+
|
|
1008
|
+
if (method.toLowerCase() === 'get') {
|
|
1009
|
+
const params = data?.parameters || [];
|
|
1010
|
+
for (const prop of params) {
|
|
1011
|
+
properties[prop.name] = {
|
|
1012
|
+
...prop.schema,
|
|
1013
|
+
description: prop.description,
|
|
1014
|
+
};
|
|
1015
|
+
|
|
1016
|
+
if (prop.required === true) {
|
|
1017
|
+
requiredFields.push(prop?.name || '');
|
|
1018
|
+
}
|
|
1019
|
+
}
|
|
1020
|
+
} else {
|
|
1021
|
+
properties = data?.requestBody?.content?.['application/json']?.schema?.properties;
|
|
1022
|
+
requiredFields = data?.requestBody?.content?.['application/json']?.schema?.required;
|
|
1023
|
+
|
|
1024
|
+
// Open AI doesn't support 'required' to be boolean inside property
|
|
1025
|
+
for (const prop in properties) {
|
|
1026
|
+
delete properties[prop]?.required;
|
|
1027
|
+
}
|
|
1028
|
+
}
|
|
1029
|
+
|
|
1030
|
+
if (!properties) properties = {};
|
|
1031
|
+
if (!requiredFields) requiredFields = [];
|
|
1032
|
+
|
|
1033
|
+
const declaration = {
|
|
1034
|
+
name: data?.operationId,
|
|
1035
|
+
description: data?.description || data?.summary || '',
|
|
1036
|
+
properties,
|
|
1037
|
+
requiredFields,
|
|
1038
|
+
};
|
|
1039
|
+
declarations.push(declaration);
|
|
1040
|
+
}
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1043
|
+
return declarations;
|
|
1044
|
+
}
|
|
1045
|
+
|
|
1046
|
+
private async assignTeamIdFromAgentId(agentId: string) {
|
|
1047
|
+
if (agentId) {
|
|
1048
|
+
const accountConnector = ConnectorService.getAccountConnector();
|
|
1049
|
+
const teamId = await accountConnector.getCandidateTeam(AccessCandidate.agent(agentId))?.catch(() => '');
|
|
1050
|
+
this._teamId = teamId || '';
|
|
1051
|
+
}
|
|
1052
|
+
}
|
|
1053
|
+
|
|
1054
|
+
private extractArgsAsOpenAPI(fn) {
|
|
1055
|
+
const ast = acorn.parse(`(${fn.toString()})`, { ecmaVersion: 'latest' });
|
|
1056
|
+
const params = (ast.body[0] as any).expression.params;
|
|
1057
|
+
|
|
1058
|
+
let counter = 0;
|
|
1059
|
+
function handleParam(param) {
|
|
1060
|
+
if (param.type === 'Identifier') {
|
|
1061
|
+
return {
|
|
1062
|
+
name: param.name,
|
|
1063
|
+
in: 'query',
|
|
1064
|
+
required: true,
|
|
1065
|
+
schema: { type: 'string', name: param.name, required: true },
|
|
1066
|
+
};
|
|
1067
|
+
}
|
|
1068
|
+
|
|
1069
|
+
if (param.type === 'AssignmentPattern' && param.left.type === 'Identifier') {
|
|
1070
|
+
return {
|
|
1071
|
+
name: param.left.name,
|
|
1072
|
+
in: 'query',
|
|
1073
|
+
required: false,
|
|
1074
|
+
schema: { type: 'string', name: param.left.name, required: false },
|
|
1075
|
+
};
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
if (param.type === 'RestElement' && param.argument.type === 'Identifier') {
|
|
1079
|
+
return {
|
|
1080
|
+
name: param.argument.name,
|
|
1081
|
+
in: 'query',
|
|
1082
|
+
required: false,
|
|
1083
|
+
schema: { type: 'array', items: { type: 'string' } },
|
|
1084
|
+
};
|
|
1085
|
+
}
|
|
1086
|
+
|
|
1087
|
+
if (param.type === 'ObjectPattern') {
|
|
1088
|
+
// For destructured objects, output as a single parameter with nested fields
|
|
1089
|
+
const name = `object___${counter++}`;
|
|
1090
|
+
return {
|
|
1091
|
+
name,
|
|
1092
|
+
in: 'query',
|
|
1093
|
+
required: true,
|
|
1094
|
+
schema: {
|
|
1095
|
+
type: 'object',
|
|
1096
|
+
required: true,
|
|
1097
|
+
name,
|
|
1098
|
+
properties: Object.fromEntries(
|
|
1099
|
+
param.properties.map((prop) => {
|
|
1100
|
+
const keyName = prop.key.name || '[unknown]';
|
|
1101
|
+
return [keyName, { type: 'string' }]; // default to string
|
|
1102
|
+
})
|
|
1103
|
+
),
|
|
1104
|
+
},
|
|
1105
|
+
};
|
|
1106
|
+
}
|
|
1107
|
+
|
|
1108
|
+
const name = `unknown___${counter++}`;
|
|
1109
|
+
return {
|
|
1110
|
+
name,
|
|
1111
|
+
in: 'query',
|
|
1112
|
+
required: true,
|
|
1113
|
+
schema: { type: 'string', name, required: true },
|
|
1114
|
+
};
|
|
1115
|
+
}
|
|
1116
|
+
|
|
1117
|
+
return params.map(handleParam);
|
|
1118
|
+
}
|
|
1119
|
+
}
|