@smythos/sre 1.5.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG +62 -0
- package/LICENSE +18 -0
- package/package.json +127 -115
- package/src/Components/APICall/APICall.class.ts +155 -0
- package/src/Components/APICall/AccessTokenManager.ts +130 -0
- package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -0
- package/src/Components/APICall/OAuth.helper.ts +294 -0
- package/src/Components/APICall/mimeTypeCategories.ts +46 -0
- package/src/Components/APICall/parseData.ts +167 -0
- package/src/Components/APICall/parseHeaders.ts +41 -0
- package/src/Components/APICall/parseProxy.ts +68 -0
- package/src/Components/APICall/parseUrl.ts +91 -0
- package/src/Components/APIEndpoint.class.ts +234 -0
- package/src/Components/APIOutput.class.ts +58 -0
- package/src/Components/AgentPlugin.class.ts +102 -0
- package/src/Components/Async.class.ts +155 -0
- package/src/Components/Await.class.ts +90 -0
- package/src/Components/Classifier.class.ts +158 -0
- package/src/Components/Component.class.ts +94 -0
- package/src/Components/ComponentHost.class.ts +38 -0
- package/src/Components/DataSourceCleaner.class.ts +92 -0
- package/src/Components/DataSourceIndexer.class.ts +181 -0
- package/src/Components/DataSourceLookup.class.ts +141 -0
- package/src/Components/FEncDec.class.ts +29 -0
- package/src/Components/FHash.class.ts +33 -0
- package/src/Components/FSign.class.ts +80 -0
- package/src/Components/FSleep.class.ts +25 -0
- package/src/Components/FTimestamp.class.ts +25 -0
- package/src/Components/FileStore.class.ts +75 -0
- package/src/Components/ForEach.class.ts +97 -0
- package/src/Components/GPTPlugin.class.ts +70 -0
- package/src/Components/GenAILLM.class.ts +395 -0
- package/src/Components/HuggingFace.class.ts +314 -0
- package/src/Components/Image/imageSettings.config.ts +70 -0
- package/src/Components/ImageGenerator.class.ts +407 -0
- package/src/Components/JSONFilter.class.ts +54 -0
- package/src/Components/LLMAssistant.class.ts +213 -0
- package/src/Components/LogicAND.class.ts +28 -0
- package/src/Components/LogicAtLeast.class.ts +85 -0
- package/src/Components/LogicAtMost.class.ts +86 -0
- package/src/Components/LogicOR.class.ts +29 -0
- package/src/Components/LogicXOR.class.ts +34 -0
- package/src/Components/MCPClient.class.ts +112 -0
- package/src/Components/PromptGenerator.class.ts +122 -0
- package/src/Components/ScrapflyWebScrape.class.ts +159 -0
- package/src/Components/TavilyWebSearch.class.ts +98 -0
- package/src/Components/index.ts +77 -0
- package/src/Core/AgentProcess.helper.ts +240 -0
- package/src/Core/Connector.class.ts +123 -0
- package/src/Core/ConnectorsService.ts +192 -0
- package/src/Core/DummyConnector.ts +49 -0
- package/src/Core/HookService.ts +105 -0
- package/src/Core/SmythRuntime.class.ts +292 -0
- package/src/Core/SystemEvents.ts +15 -0
- package/src/Core/boot.ts +55 -0
- package/src/config.ts +15 -0
- package/src/constants.ts +125 -0
- package/src/data/hugging-face.params.json +580 -0
- package/src/helpers/BinaryInput.helper.ts +324 -0
- package/src/helpers/Conversation.helper.ts +1094 -0
- package/src/helpers/JsonContent.helper.ts +97 -0
- package/src/helpers/LocalCache.helper.ts +97 -0
- package/src/helpers/Log.helper.ts +234 -0
- package/src/helpers/OpenApiParser.helper.ts +150 -0
- package/src/helpers/S3Cache.helper.ts +129 -0
- package/src/helpers/SmythURI.helper.ts +5 -0
- package/src/helpers/TemplateString.helper.ts +243 -0
- package/src/helpers/TypeChecker.helper.ts +329 -0
- package/src/index.ts +179 -0
- package/src/index.ts.bak +179 -0
- package/src/subsystems/AgentManager/Agent.class.ts +1108 -0
- package/src/subsystems/AgentManager/Agent.helper.ts +3 -0
- package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -0
- package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -0
- package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -0
- package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -0
- package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -0
- package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -0
- package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -0
- package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -0
- package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -0
- package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +59 -0
- package/src/subsystems/AgentManager/Component.service/index.ts +11 -0
- package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -0
- package/src/subsystems/AgentManager/ForkedAgent.class.ts +153 -0
- package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -0
- package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +99 -0
- package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +63 -0
- package/src/subsystems/ComputeManager/Code.service/index.ts +11 -0
- package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -0
- package/src/subsystems/IO/CLI.service/index.ts +9 -0
- package/src/subsystems/IO/Log.service/LogConnector.ts +32 -0
- package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -0
- package/src/subsystems/IO/Log.service/index.ts +13 -0
- package/src/subsystems/IO/NKV.service/NKVConnector.ts +41 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -0
- package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -0
- package/src/subsystems/IO/NKV.service/index.ts +12 -0
- package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -0
- package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -0
- package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -0
- package/src/subsystems/IO/Router.service/index.ts +11 -0
- package/src/subsystems/IO/Storage.service/SmythFS.class.ts +472 -0
- package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -0
- package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +305 -0
- package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +418 -0
- package/src/subsystems/IO/Storage.service/index.ts +13 -0
- package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -0
- package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +450 -0
- package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +373 -0
- package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +420 -0
- package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +106 -0
- package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -0
- package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -0
- package/src/subsystems/IO/VectorDB.service/index.ts +14 -0
- package/src/subsystems/LLMManager/LLM.helper.ts +221 -0
- package/src/subsystems/LLMManager/LLM.inference.ts +335 -0
- package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +374 -0
- package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +145 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +632 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +405 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +81 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +689 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +257 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/OpenAI.class.ts +848 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +255 -0
- package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +193 -0
- package/src/subsystems/LLMManager/LLM.service/index.ts +43 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +281 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.ts +229 -0
- package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -0
- package/src/subsystems/LLMManager/custom-models.ts +854 -0
- package/src/subsystems/LLMManager/models.ts +2539 -0
- package/src/subsystems/LLMManager/paramMappings.ts +69 -0
- package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -0
- package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -0
- package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -0
- package/src/subsystems/MemoryManager/LLMCache.ts +72 -0
- package/src/subsystems/MemoryManager/LLMContext.ts +125 -0
- package/src/subsystems/MemoryManager/RuntimeContext.ts +249 -0
- package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -0
- package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +76 -0
- package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -0
- package/src/subsystems/Security/Account.service/AccountConnector.ts +41 -0
- package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -0
- package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -0
- package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -0
- package/src/subsystems/Security/Account.service/index.ts +14 -0
- package/src/subsystems/Security/Credentials.helper.ts +62 -0
- package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +34 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +57 -0
- package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -0
- package/src/subsystems/Security/ManagedVault.service/index.ts +12 -0
- package/src/subsystems/Security/SecureConnector.class.ts +110 -0
- package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -0
- package/src/subsystems/Security/Vault.service/VaultConnector.ts +26 -0
- package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -0
- package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +166 -0
- package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -0
- package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -0
- package/src/subsystems/Security/Vault.service/index.ts +12 -0
- package/src/types/ACL.types.ts +104 -0
- package/src/types/AWS.types.ts +9 -0
- package/src/types/Agent.types.ts +61 -0
- package/src/types/AgentLogger.types.ts +17 -0
- package/src/types/Cache.types.ts +1 -0
- package/src/types/Common.types.ts +3 -0
- package/src/types/LLM.types.ts +419 -0
- package/src/types/Redis.types.ts +8 -0
- package/src/types/SRE.types.ts +64 -0
- package/src/types/Security.types.ts +18 -0
- package/src/types/Storage.types.ts +5 -0
- package/src/types/VectorDB.types.ts +78 -0
- package/src/utils/base64.utils.ts +275 -0
- package/src/utils/cli.utils.ts +68 -0
- package/src/utils/data.utils.ts +263 -0
- package/src/utils/date-time.utils.ts +22 -0
- package/src/utils/general.utils.ts +238 -0
- package/src/utils/index.ts +12 -0
- package/src/utils/numbers.utils.ts +13 -0
- package/src/utils/oauth.utils.ts +35 -0
- package/src/utils/string.utils.ts +414 -0
- package/src/utils/url.utils.ts +19 -0
- package/src/utils/validation.utils.ts +74 -0
|
@@ -0,0 +1,632 @@
|
|
|
1
|
+
import EventEmitter from 'events';
|
|
2
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
3
|
+
|
|
4
|
+
import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
|
|
5
|
+
import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
|
|
6
|
+
import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
|
|
7
|
+
import {
|
|
8
|
+
TLLMParams,
|
|
9
|
+
ToolData,
|
|
10
|
+
TLLMMessageBlock,
|
|
11
|
+
TLLMToolResultMessageBlock,
|
|
12
|
+
TLLMMessageRole,
|
|
13
|
+
APIKeySource,
|
|
14
|
+
TLLMEvent,
|
|
15
|
+
ILLMRequestFuncParams,
|
|
16
|
+
TLLMChatResponse,
|
|
17
|
+
BasicCredentials,
|
|
18
|
+
TAnthropicRequestBody,
|
|
19
|
+
TLLMConnectorParams,
|
|
20
|
+
ILLMRequestContext,
|
|
21
|
+
} from '@sre/types/LLM.types';
|
|
22
|
+
|
|
23
|
+
import { LLMHelper } from '@sre/LLMManager/LLM.helper';
|
|
24
|
+
import { JSONContent } from '@sre/helpers/JsonContent.helper';
|
|
25
|
+
|
|
26
|
+
import { LLMConnector } from '../LLMConnector';
|
|
27
|
+
import { SystemEvents } from '@sre/Core/SystemEvents';
|
|
28
|
+
import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
|
|
29
|
+
|
|
30
|
+
const PREFILL_TEXT_FOR_JSON_RESPONSE = '{';
|
|
31
|
+
const LEGACY_THINKING_MODELS = ['smythos/claude-3.7-sonnet-thinking', 'claude-3.7-sonnet-thinking'];
|
|
32
|
+
|
|
33
|
+
// Type aliases
|
|
34
|
+
type AnthropicMessageParams = Anthropic.MessageCreateParamsNonStreaming | Anthropic.Messages.MessageStreamParams;
|
|
35
|
+
|
|
36
|
+
// TODO [Forhad]: implement proper typing
|
|
37
|
+
|
|
38
|
+
export class AnthropicConnector extends LLMConnector {
|
|
39
|
+
public name = 'LLM:Anthropic';
|
|
40
|
+
|
|
41
|
+
private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.Anthropic.image;
|
|
42
|
+
|
|
43
|
+
private async getClient(params: ILLMRequestContext): Promise<Anthropic> {
|
|
44
|
+
const apiKey = (params.credentials as BasicCredentials)?.apiKey;
|
|
45
|
+
|
|
46
|
+
if (!apiKey) throw new Error('Please provide an API key for Anthropic');
|
|
47
|
+
|
|
48
|
+
return new Anthropic({ apiKey });
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
|
|
52
|
+
try {
|
|
53
|
+
const anthropic = await this.getClient(context);
|
|
54
|
+
const result = await anthropic.messages.create(body);
|
|
55
|
+
const message: Anthropic.MessageParam = {
|
|
56
|
+
role: (result?.role || TLLMMessageRole.User) as Anthropic.MessageParam['role'],
|
|
57
|
+
content: result?.content || '',
|
|
58
|
+
};
|
|
59
|
+
const stopReason = result?.stop_reason;
|
|
60
|
+
|
|
61
|
+
let toolsData: ToolData[] = [];
|
|
62
|
+
let useTool = false;
|
|
63
|
+
|
|
64
|
+
if ((stopReason as 'tool_use') === 'tool_use') {
|
|
65
|
+
const toolUseContentBlocks = result?.content?.filter((c) => (c.type as 'tool_use') === 'tool_use');
|
|
66
|
+
|
|
67
|
+
if (toolUseContentBlocks?.length === 0) return;
|
|
68
|
+
|
|
69
|
+
toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
|
|
70
|
+
toolsData.push({
|
|
71
|
+
index,
|
|
72
|
+
id: toolUseBlock?.id,
|
|
73
|
+
type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
|
|
74
|
+
name: toolUseBlock?.name,
|
|
75
|
+
arguments: toolUseBlock?.input,
|
|
76
|
+
role: result?.role,
|
|
77
|
+
});
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
useTool = true;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
const textBlock = result?.content?.find((block) => block.type === 'text');
|
|
84
|
+
const content = textBlock?.text || '';
|
|
85
|
+
|
|
86
|
+
const usage = result?.usage;
|
|
87
|
+
|
|
88
|
+
this.reportUsage(usage, {
|
|
89
|
+
modelEntryName: context.modelEntryName,
|
|
90
|
+
keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
|
|
91
|
+
agentId: context.agentId,
|
|
92
|
+
teamId: context.teamId,
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
return {
|
|
96
|
+
content,
|
|
97
|
+
finishReason: result?.stop_reason,
|
|
98
|
+
useTool,
|
|
99
|
+
toolsData,
|
|
100
|
+
message,
|
|
101
|
+
usage,
|
|
102
|
+
};
|
|
103
|
+
} catch (error) {
|
|
104
|
+
throw error;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
|
|
109
|
+
try {
|
|
110
|
+
const emitter = new EventEmitter();
|
|
111
|
+
const usage_data = [];
|
|
112
|
+
|
|
113
|
+
const anthropic = await this.getClient(context);
|
|
114
|
+
let stream = anthropic.messages.stream(body);
|
|
115
|
+
|
|
116
|
+
let toolsData: ToolData[] = [];
|
|
117
|
+
let thinkingBlocks: any[] = []; // To preserve thinking blocks
|
|
118
|
+
|
|
119
|
+
stream.on('streamEvent', (event: any) => {
|
|
120
|
+
if (event.message?.usage) {
|
|
121
|
+
//console.log('usage', event.message?.usage);
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
|
|
125
|
+
stream.on('error', (error) => {
|
|
126
|
+
//console.log('error', error);
|
|
127
|
+
|
|
128
|
+
emitter.emit('error', error);
|
|
129
|
+
});
|
|
130
|
+
stream.on('text', (text: string) => {
|
|
131
|
+
emitter.emit('content', text);
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
stream.on('thinking', (thinking) => {
|
|
135
|
+
// Handle thinking blocks during streaming
|
|
136
|
+
emitter.emit('thinking', thinking);
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
stream.on('finalMessage', (finalMessage) => {
|
|
140
|
+
let finishReason = 'stop';
|
|
141
|
+
// Preserve thinking blocks for subsequent tool interactions
|
|
142
|
+
thinkingBlocks = finalMessage.content.filter((block) => block.type === 'thinking' || block.type === 'redacted_thinking');
|
|
143
|
+
|
|
144
|
+
// Process tool use blocks
|
|
145
|
+
const toolUseContentBlocks = finalMessage.content.filter((c) => c.type === 'tool_use');
|
|
146
|
+
|
|
147
|
+
if (toolUseContentBlocks?.length > 0) {
|
|
148
|
+
toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
|
|
149
|
+
toolsData.push({
|
|
150
|
+
index,
|
|
151
|
+
id: toolUseBlock?.id,
|
|
152
|
+
type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
|
|
153
|
+
name: toolUseBlock?.name,
|
|
154
|
+
arguments: toolUseBlock?.input,
|
|
155
|
+
role: finalMessage?.role,
|
|
156
|
+
});
|
|
157
|
+
});
|
|
158
|
+
|
|
159
|
+
emitter.emit(TLLMEvent.ToolInfo, toolsData, thinkingBlocks);
|
|
160
|
+
} else {
|
|
161
|
+
finishReason = finalMessage.stop_reason;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
if (finalMessage?.usage) {
|
|
165
|
+
const usage = finalMessage.usage;
|
|
166
|
+
|
|
167
|
+
const reportedUsage = this.reportUsage(usage, {
|
|
168
|
+
modelEntryName: context.modelEntryName,
|
|
169
|
+
keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
|
|
170
|
+
agentId: context.agentId,
|
|
171
|
+
teamId: context.teamId,
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
usage_data.push(reportedUsage);
|
|
175
|
+
}
|
|
176
|
+
if (finishReason !== 'stop' && finishReason !== 'end_turn') {
|
|
177
|
+
emitter.emit('interrupted', finishReason);
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
//only emit end event after processing the final message
|
|
181
|
+
setTimeout(() => {
|
|
182
|
+
emitter.emit('end', toolsData, usage_data, finishReason);
|
|
183
|
+
}, 100);
|
|
184
|
+
});
|
|
185
|
+
|
|
186
|
+
return emitter;
|
|
187
|
+
} catch (error: any) {
|
|
188
|
+
throw error;
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
protected async webSearchRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
|
|
193
|
+
throw new Error('Web search requests are not supported by Anthropic');
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
protected async reqBodyAdapter(params: TLLMParams): Promise<TAnthropicRequestBody> {
|
|
197
|
+
const body = await this.prepareBody(params);
|
|
198
|
+
|
|
199
|
+
const shouldUseThinking = await this.shouldUseThinkingMode(params);
|
|
200
|
+
if (shouldUseThinking) {
|
|
201
|
+
return await this.prepareBodyForThinkingRequest({
|
|
202
|
+
body,
|
|
203
|
+
maxThinkingTokens: params.maxThinkingTokens,
|
|
204
|
+
toolChoice: params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice,
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
return body;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
protected reportUsage(
|
|
212
|
+
usage: Anthropic.Messages.Usage & { cache_creation_input_tokens?: number; cache_read_input_tokens?: number },
|
|
213
|
+
metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string },
|
|
214
|
+
) {
|
|
215
|
+
// SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
|
|
216
|
+
const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
|
|
217
|
+
|
|
218
|
+
const usageData = {
|
|
219
|
+
sourceId: `llm:${modelName}`,
|
|
220
|
+
input_tokens: usage.input_tokens,
|
|
221
|
+
output_tokens: usage.output_tokens,
|
|
222
|
+
input_tokens_cache_write: usage.cache_creation_input_tokens,
|
|
223
|
+
input_tokens_cache_read: usage.cache_read_input_tokens,
|
|
224
|
+
keySource: metadata.keySource,
|
|
225
|
+
agentId: metadata.agentId,
|
|
226
|
+
teamId: metadata.teamId,
|
|
227
|
+
};
|
|
228
|
+
SystemEvents.emit('USAGE:LLM', usageData);
|
|
229
|
+
|
|
230
|
+
return usageData;
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto' }) {
|
|
234
|
+
let tools: {
|
|
235
|
+
name: string;
|
|
236
|
+
description: string;
|
|
237
|
+
input_schema: {
|
|
238
|
+
type: 'object';
|
|
239
|
+
properties: Record<string, unknown>;
|
|
240
|
+
required: string[];
|
|
241
|
+
};
|
|
242
|
+
}[] = [];
|
|
243
|
+
|
|
244
|
+
if (type === 'function') {
|
|
245
|
+
tools = toolDefinitions.map((tool) => {
|
|
246
|
+
const { name, description, properties, requiredFields } = tool;
|
|
247
|
+
|
|
248
|
+
return {
|
|
249
|
+
name,
|
|
250
|
+
description,
|
|
251
|
+
input_schema: {
|
|
252
|
+
type: 'object',
|
|
253
|
+
properties,
|
|
254
|
+
required: requiredFields,
|
|
255
|
+
},
|
|
256
|
+
};
|
|
257
|
+
});
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
return tools?.length > 0 ? { tools } : {};
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
public transformToolMessageBlocks({
|
|
264
|
+
messageBlock,
|
|
265
|
+
toolsData,
|
|
266
|
+
}: {
|
|
267
|
+
messageBlock: TLLMMessageBlock & { thinkingBlocks?: { type: string; thinking: string }[] };
|
|
268
|
+
toolsData: ToolData[];
|
|
269
|
+
}): TLLMToolResultMessageBlock[] {
|
|
270
|
+
const messageBlocks: TLLMToolResultMessageBlock[] = [];
|
|
271
|
+
|
|
272
|
+
if (messageBlock) {
|
|
273
|
+
const content: any[] = []; // TODO: set proper type for content
|
|
274
|
+
|
|
275
|
+
if (messageBlock.thinkingBlocks?.length > 0) {
|
|
276
|
+
content.push(...messageBlock.thinkingBlocks);
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
if (Array.isArray(messageBlock.content)) {
|
|
280
|
+
content.push(...messageBlock.content);
|
|
281
|
+
} else {
|
|
282
|
+
if (messageBlock.content) {
|
|
283
|
+
//Anthropic does not accept empty text blocks
|
|
284
|
+
content.push({ type: 'text', text: messageBlock.content });
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
if (messageBlock.tool_calls) {
|
|
288
|
+
const calls = messageBlock.tool_calls.map((toolCall: any) => {
|
|
289
|
+
const args = toolCall?.function?.arguments;
|
|
290
|
+
return {
|
|
291
|
+
type: 'tool_use',
|
|
292
|
+
id: toolCall.id,
|
|
293
|
+
name: toolCall?.function?.name,
|
|
294
|
+
input: typeof args === 'string' ? JSONContent(args || '{}').tryParse() : args || {},
|
|
295
|
+
};
|
|
296
|
+
});
|
|
297
|
+
|
|
298
|
+
content.push(...calls);
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
messageBlocks.push({
|
|
302
|
+
role: messageBlock?.role,
|
|
303
|
+
content: content,
|
|
304
|
+
});
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// Combine all tool results into a single user message
|
|
308
|
+
const toolResultsContent = toolsData.map((toolData): any => ({
|
|
309
|
+
type: 'tool_result',
|
|
310
|
+
tool_use_id: toolData.id,
|
|
311
|
+
content: toolData.result,
|
|
312
|
+
}));
|
|
313
|
+
|
|
314
|
+
if (toolResultsContent.length > 0) {
|
|
315
|
+
messageBlocks.push({
|
|
316
|
+
role: TLLMMessageRole.User,
|
|
317
|
+
content: toolResultsContent,
|
|
318
|
+
});
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
return messageBlocks;
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
// TODO [Forhad]: This method is quite lengthy and complex. Consider breaking it down into smaller, more manageable functions for better readability and maintainability.
|
|
325
|
+
public getConsistentMessages(messages) {
|
|
326
|
+
let _messages = JSON.parse(JSON.stringify(messages));
|
|
327
|
+
|
|
328
|
+
// Extract the system message from the start, as our logic expects 'user' to be the first message for checks and fixes. We will add it back later.
|
|
329
|
+
let systemMessage = null;
|
|
330
|
+
if (_messages[0]?.role === TLLMMessageRole.System) {
|
|
331
|
+
systemMessage = _messages.shift();
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
_messages = LLMHelper.removeDuplicateUserMessages(_messages);
|
|
335
|
+
|
|
336
|
+
_messages = _messages.map((message) => {
|
|
337
|
+
let content;
|
|
338
|
+
|
|
339
|
+
if (message?.parts) {
|
|
340
|
+
content = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
|
|
341
|
+
} else if (Array.isArray(message?.content)) {
|
|
342
|
+
if (Array.isArray(message.content)) {
|
|
343
|
+
const toolBlocks = message.content.filter(
|
|
344
|
+
(item) => typeof item === 'object' && 'type' in item && (item.type === 'tool_use' || item.type === 'tool_result'),
|
|
345
|
+
);
|
|
346
|
+
|
|
347
|
+
if (toolBlocks?.length > 0) {
|
|
348
|
+
content = message.content.map((item) => {
|
|
349
|
+
if (item.type === 'text' && (!item.text || item.text.trim() === '')) {
|
|
350
|
+
return { ...item, text: '...' }; // empty text causes error that's why we added '...'
|
|
351
|
+
}
|
|
352
|
+
return item;
|
|
353
|
+
});
|
|
354
|
+
} else {
|
|
355
|
+
content = message.content
|
|
356
|
+
.map((block) => block?.text || '')
|
|
357
|
+
.join(' ')
|
|
358
|
+
.trim();
|
|
359
|
+
}
|
|
360
|
+
} else {
|
|
361
|
+
content = message.content;
|
|
362
|
+
}
|
|
363
|
+
} else if (message?.content) {
|
|
364
|
+
content = message.content as string;
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
message.content = content || '...'; // empty content causes error that's why we added '...'
|
|
368
|
+
|
|
369
|
+
return message;
|
|
370
|
+
});
|
|
371
|
+
|
|
372
|
+
//[FIXED] - `tool_result` block(s) provided when previous message does not contain any `tool_use` blocks" (handler)
|
|
373
|
+
if (_messages[0]?.role === TLLMMessageRole.User && Array.isArray(_messages[0].content)) {
|
|
374
|
+
const hasToolResult = _messages[0].content.find((content) => 'type' in content && content.type === 'tool_result');
|
|
375
|
+
|
|
376
|
+
//we found a tool result in the first message, so we need to remove the user message
|
|
377
|
+
if (hasToolResult) {
|
|
378
|
+
_messages.shift();
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
// - Error: 400 {"type":"error","error":{"type":"invalid_request_error","message":"messages: first message must use the \"user\" role"}}
|
|
383
|
+
if (_messages[0]?.role !== TLLMMessageRole.User) {
|
|
384
|
+
_messages.unshift({ role: TLLMMessageRole.User, content: 'continue' }); //add an empty user message to keep the consistency
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
// Add the system message back to the start, as we extracted it earlier
|
|
388
|
+
// Empty content is not allowed in Anthropic
|
|
389
|
+
if (systemMessage && systemMessage.content) {
|
|
390
|
+
_messages.unshift(systemMessage);
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
return _messages;
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
private async prepareBody(params: TLLMParams): Promise<Anthropic.MessageCreateParamsNonStreaming> {
|
|
397
|
+
let messages = await this.prepareMessages(params);
|
|
398
|
+
|
|
399
|
+
let body: Anthropic.MessageCreateParamsNonStreaming = {
|
|
400
|
+
model: params.model as string,
|
|
401
|
+
messages: messages as Anthropic.MessageParam[],
|
|
402
|
+
max_tokens: params.maxTokens, // * max token is required
|
|
403
|
+
};
|
|
404
|
+
|
|
405
|
+
//#region Prepare system message and add JSON response instruction if needed
|
|
406
|
+
// TODO: We have better parameter to have structured response, need to implement it.
|
|
407
|
+
const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
|
|
408
|
+
if ('content' in systemMessage) {
|
|
409
|
+
body.system = systemMessage?.content as string;
|
|
410
|
+
}
|
|
411
|
+
messages = otherMessages;
|
|
412
|
+
|
|
413
|
+
const responseFormat = params?.responseFormat || '';
|
|
414
|
+
if (responseFormat === 'json') {
|
|
415
|
+
body.system = body.system ? `${body.system} ${JSON_RESPONSE_INSTRUCTION}` : JSON_RESPONSE_INSTRUCTION;
|
|
416
|
+
|
|
417
|
+
messages.push({ role: TLLMMessageRole.Assistant, content: PREFILL_TEXT_FOR_JSON_RESPONSE });
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
const hasSystemMessage = LLMHelper.hasSystemMessage(messages);
|
|
421
|
+
if (hasSystemMessage) {
|
|
422
|
+
// in Anthropic we need to provide system message separately
|
|
423
|
+
const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
|
|
424
|
+
|
|
425
|
+
if ('content' in systemMessage) {
|
|
426
|
+
body.system = await this.prepareSystemPrompt(systemMessage, params);
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
messages = otherMessages as Anthropic.MessageParam[];
|
|
430
|
+
}
|
|
431
|
+
//#endregion Prepare system message and add JSON response instruction if needed
|
|
432
|
+
|
|
433
|
+
if (params?.temperature !== undefined) body.temperature = params.temperature;
|
|
434
|
+
if (params?.topP !== undefined) body.top_p = params.topP;
|
|
435
|
+
if (params?.topK !== undefined) body.top_k = params.topK;
|
|
436
|
+
if (params?.stopSequences?.length) body.stop_sequences = params.stopSequences;
|
|
437
|
+
|
|
438
|
+
// #region Tools
|
|
439
|
+
if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
|
|
440
|
+
body.tools = params?.toolsConfig?.tools as unknown as Anthropic.Tool[];
|
|
441
|
+
|
|
442
|
+
if (params?.cache) {
|
|
443
|
+
body.tools[body.tools.length - 1]['cache_control'] = { type: 'ephemeral' };
|
|
444
|
+
}
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
const toolChoice = params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice;
|
|
448
|
+
if (toolChoice) {
|
|
449
|
+
body.tool_choice = toolChoice;
|
|
450
|
+
}
|
|
451
|
+
// #endregion Tools
|
|
452
|
+
|
|
453
|
+
body.messages = messages as Anthropic.MessageParam[];
|
|
454
|
+
return body;
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
private async prepareBodyForThinkingRequest({
|
|
458
|
+
body,
|
|
459
|
+
maxThinkingTokens,
|
|
460
|
+
toolChoice = null,
|
|
461
|
+
}: {
|
|
462
|
+
body: AnthropicMessageParams;
|
|
463
|
+
maxThinkingTokens: number;
|
|
464
|
+
toolChoice?: Anthropic.ToolChoice;
|
|
465
|
+
}): Promise<Anthropic.MessageCreateParamsNonStreaming> {
|
|
466
|
+
// Remove the assistant message with the prefill text for JSON response, it's not supported with thinking
|
|
467
|
+
let messages = body.messages.filter(
|
|
468
|
+
(message) => message?.role !== TLLMMessageRole.Assistant && message?.content !== PREFILL_TEXT_FOR_JSON_RESPONSE,
|
|
469
|
+
);
|
|
470
|
+
|
|
471
|
+
let budget_tokens = Math.min(maxThinkingTokens, body.max_tokens);
|
|
472
|
+
|
|
473
|
+
// If budget_tokens is equal to max_tokens, we set it to 80% of max_tokens
|
|
474
|
+
// to avoid the error: "budget_tokens must be less than max_tokens".
|
|
475
|
+
//
|
|
476
|
+
// Another way to ensure valid budget_tokens is to add max_tokens and budget_tokens together - max_tokens = max_tokens + budget_tokens,
|
|
477
|
+
// then take the minimum, like: Math.min(max_tokens, allowedMaxTokens).
|
|
478
|
+
// However, this approach requires additional information such as model details,
|
|
479
|
+
// which would mean adding more arguments like acRequest and modelEntryName to get allowedMaxTokens.
|
|
480
|
+
//
|
|
481
|
+
// So for now, to keep it simple, if max_tokens equals budget_tokens,
|
|
482
|
+
// just use 80% of max_tokens.
|
|
483
|
+
|
|
484
|
+
if (budget_tokens === body.max_tokens) {
|
|
485
|
+
budget_tokens = Math.floor(budget_tokens * 0.8);
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
const thinkingBody: Anthropic.MessageCreateParamsNonStreaming = {
|
|
489
|
+
model: body.model,
|
|
490
|
+
messages,
|
|
491
|
+
max_tokens: body.max_tokens,
|
|
492
|
+
thinking: {
|
|
493
|
+
type: 'enabled',
|
|
494
|
+
budget_tokens,
|
|
495
|
+
},
|
|
496
|
+
};
|
|
497
|
+
|
|
498
|
+
if (toolChoice) {
|
|
499
|
+
// any and tool are not supported with thinking, so we set it to auto
|
|
500
|
+
if (['any', 'tool'].includes(toolChoice.type)) {
|
|
501
|
+
thinkingBody.tool_choice = {
|
|
502
|
+
type: 'auto',
|
|
503
|
+
};
|
|
504
|
+
} else {
|
|
505
|
+
thinkingBody.tool_choice = toolChoice;
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
|
|
509
|
+
return thinkingBody;
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
private async prepareMessages(params: TLLMParams) {
|
|
513
|
+
const messages = params?.messages || [];
|
|
514
|
+
|
|
515
|
+
const files: BinaryInput[] = params?.files || [];
|
|
516
|
+
|
|
517
|
+
if (files?.length > 0) {
|
|
518
|
+
// #region Upload files
|
|
519
|
+
const promises = [];
|
|
520
|
+
const _files = [];
|
|
521
|
+
|
|
522
|
+
for (let image of files) {
|
|
523
|
+
const binaryInput = BinaryInput.from(image);
|
|
524
|
+
promises.push(binaryInput.upload(AccessCandidate.agent(params.agentId)));
|
|
525
|
+
|
|
526
|
+
_files.push(binaryInput);
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
await Promise.all(promises);
|
|
530
|
+
// #endregion Upload files
|
|
531
|
+
|
|
532
|
+
const validSources = this.getValidImageFiles(_files);
|
|
533
|
+
const imageData = await this.getImageData(validSources, params.agentId);
|
|
534
|
+
|
|
535
|
+
const userMessage = Array.isArray(messages) ? messages.pop() : {};
|
|
536
|
+
const prompt = userMessage?.content || '';
|
|
537
|
+
|
|
538
|
+
const content = [{ type: 'text', text: prompt }, ...imageData];
|
|
539
|
+
messages.push({ role: TLLMMessageRole.User, content });
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
return messages;
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
private async prepareSystemPrompt(systemMessage: TLLMMessageBlock, params: TLLMParams): Promise<string | Array<Anthropic.TextBlockParam>> {
|
|
546
|
+
let systemPrompt = systemMessage?.content;
|
|
547
|
+
|
|
548
|
+
if (typeof systemPrompt === 'string') {
|
|
549
|
+
systemPrompt = [
|
|
550
|
+
{
|
|
551
|
+
type: 'text' as const,
|
|
552
|
+
text: systemPrompt,
|
|
553
|
+
//cache_control: { type: 'ephemeral' }, //TODO: @Forhad check this
|
|
554
|
+
},
|
|
555
|
+
] as Array<Anthropic.TextBlockParam>;
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
(systemPrompt as Array<Anthropic.TextBlockParam>).unshift({
|
|
559
|
+
type: 'text' as const,
|
|
560
|
+
text: 'If you need to call a function, Do NOT inform the user that you are about to do so, and do not thank the user after you get the response. Just say something like "Give me a moment...", then when you get the response, Just continue answering the user without saying anything about the function you just called',
|
|
561
|
+
});
|
|
562
|
+
|
|
563
|
+
if (params?.cache) {
|
|
564
|
+
(systemPrompt as Array<Anthropic.TextBlockParam>)[systemPrompt.length - 1]['cache_control'] = { type: 'ephemeral' };
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
return systemPrompt as Array<Anthropic.TextBlockParam>;
|
|
568
|
+
}
|
|
569
|
+
|
|
570
|
+
/**
|
|
571
|
+
* Determines if thinking mode should be used based on model capabilities and parameters.
|
|
572
|
+
*/
|
|
573
|
+
private async shouldUseThinkingMode(params: TLLMParams): Promise<boolean> {
|
|
574
|
+
// Legacy thinking models always use thinking mode
|
|
575
|
+
if (LEGACY_THINKING_MODELS.includes(params.modelEntryName)) {
|
|
576
|
+
return true;
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
// Check if reasoning is explicitly requested and model supports it
|
|
580
|
+
const useReasoning = params?.useReasoning && params.capabilities?.reasoning === true;
|
|
581
|
+
|
|
582
|
+
return useReasoning;
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
private getValidImageFiles(files: BinaryInput[]) {
|
|
586
|
+
const validSources = [];
|
|
587
|
+
|
|
588
|
+
for (let file of files) {
|
|
589
|
+
if (this.validImageMimeTypes.includes(file?.mimetype)) {
|
|
590
|
+
validSources.push(file);
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
|
|
594
|
+
if (validSources?.length === 0) {
|
|
595
|
+
throw new Error(`Unsupported file(s). Please make sure your file is one of the following types: ${this.validImageMimeTypes.join(', ')}`);
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
return validSources;
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
private async getImageData(
|
|
602
|
+
files: BinaryInput[],
|
|
603
|
+
agentId: string,
|
|
604
|
+
): Promise<
|
|
605
|
+
{
|
|
606
|
+
type: string;
|
|
607
|
+
source: { type: 'base64'; data: string; media_type: string };
|
|
608
|
+
}[]
|
|
609
|
+
> {
|
|
610
|
+
try {
|
|
611
|
+
const imageData = [];
|
|
612
|
+
|
|
613
|
+
for (let file of files) {
|
|
614
|
+
const bufferData = await file.readData(AccessCandidate.agent(agentId));
|
|
615
|
+
const base64Data = bufferData.toString('base64');
|
|
616
|
+
|
|
617
|
+
imageData.push({
|
|
618
|
+
type: 'image',
|
|
619
|
+
source: {
|
|
620
|
+
type: 'base64',
|
|
621
|
+
data: base64Data,
|
|
622
|
+
media_type: file.mimetype,
|
|
623
|
+
},
|
|
624
|
+
});
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
return imageData;
|
|
628
|
+
} catch (error) {
|
|
629
|
+
throw error;
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
}
|