@smythos/sre 1.6.8 → 1.6.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (239) hide show
  1. package/CHANGELOG +111 -111
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +2 -2
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/Components/Triggers/Gmail.trigger.d.ts +58 -0
  9. package/dist/types/Components/Triggers/GmailTrigger.class.d.ts +44 -0
  10. package/dist/types/Components/Triggers/Trigger.class.d.ts +21 -0
  11. package/dist/types/Components/Triggers/WhatsApp.trigger.d.ts +22 -0
  12. package/dist/types/helpers/AIPerformanceAnalyzer.helper.d.ts +45 -0
  13. package/dist/types/helpers/AIPerformanceCollector.helper.d.ts +111 -0
  14. package/dist/types/subsystems/IO/Storage.service/connectors/AzureBlobStorage.class.d.ts +211 -0
  15. package/dist/types/subsystems/IO/VectorDB.service/connectors/WeaviateVectorDB.class.d.ts +187 -0
  16. package/dist/types/subsystems/PerformanceManager/Performance.service/PerformanceConnector.d.ts +102 -0
  17. package/dist/types/subsystems/PerformanceManager/Performance.service/connectors/LocalPerformanceConnector.class.d.ts +100 -0
  18. package/dist/types/subsystems/PerformanceManager/Performance.service/index.d.ts +22 -0
  19. package/dist/types/subsystems/Security/Credentials/Credentials.class.d.ts +2 -0
  20. package/dist/types/subsystems/Security/Credentials/ManagedOAuth2Credentials.class.d.ts +18 -0
  21. package/dist/types/subsystems/Security/Credentials/OAuth2Credentials.class.d.ts +14 -0
  22. package/dist/types/types/Performance.types.d.ts +468 -0
  23. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  24. package/package.json +1 -1
  25. package/src/Components/APICall/APICall.class.ts +161 -161
  26. package/src/Components/APICall/AccessTokenManager.ts +166 -166
  27. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  28. package/src/Components/APICall/OAuth.helper.ts +447 -447
  29. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  30. package/src/Components/APICall/parseData.ts +167 -167
  31. package/src/Components/APICall/parseHeaders.ts +41 -41
  32. package/src/Components/APICall/parseProxy.ts +68 -68
  33. package/src/Components/APICall/parseUrl.ts +91 -91
  34. package/src/Components/APIEndpoint.class.ts +234 -234
  35. package/src/Components/APIOutput.class.ts +58 -58
  36. package/src/Components/AgentPlugin.class.ts +102 -102
  37. package/src/Components/Async.class.ts +155 -155
  38. package/src/Components/Await.class.ts +90 -90
  39. package/src/Components/Classifier.class.ts +158 -158
  40. package/src/Components/Component.class.ts +147 -147
  41. package/src/Components/ComponentHost.class.ts +38 -38
  42. package/src/Components/DataSourceCleaner.class.ts +92 -92
  43. package/src/Components/DataSourceIndexer.class.ts +181 -181
  44. package/src/Components/DataSourceLookup.class.ts +161 -161
  45. package/src/Components/ECMASandbox.class.ts +72 -72
  46. package/src/Components/FEncDec.class.ts +29 -29
  47. package/src/Components/FHash.class.ts +33 -33
  48. package/src/Components/FSign.class.ts +80 -80
  49. package/src/Components/FSleep.class.ts +25 -25
  50. package/src/Components/FTimestamp.class.ts +66 -66
  51. package/src/Components/FileStore.class.ts +78 -78
  52. package/src/Components/ForEach.class.ts +97 -97
  53. package/src/Components/GPTPlugin.class.ts +70 -70
  54. package/src/Components/GenAILLM.class.ts +586 -586
  55. package/src/Components/HuggingFace.class.ts +313 -313
  56. package/src/Components/Image/imageSettings.config.ts +70 -70
  57. package/src/Components/ImageGenerator.class.ts +483 -483
  58. package/src/Components/JSONFilter.class.ts +54 -54
  59. package/src/Components/LLMAssistant.class.ts +213 -213
  60. package/src/Components/LogicAND.class.ts +28 -28
  61. package/src/Components/LogicAtLeast.class.ts +85 -85
  62. package/src/Components/LogicAtMost.class.ts +86 -86
  63. package/src/Components/LogicOR.class.ts +29 -29
  64. package/src/Components/LogicXOR.class.ts +34 -34
  65. package/src/Components/MCPClient.class.ts +137 -137
  66. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  67. package/src/Components/MemoryReadKeyVal.class.ts +67 -67
  68. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  69. package/src/Components/MemoryWriteObject.class.ts +97 -97
  70. package/src/Components/MultimodalLLM.class.ts +128 -128
  71. package/src/Components/OpenAPI.class.ts +72 -72
  72. package/src/Components/PromptGenerator.class.ts +122 -122
  73. package/src/Components/ScrapflyWebScrape.class.ts +183 -183
  74. package/src/Components/ServerlessCode.class.ts +123 -123
  75. package/src/Components/TavilyWebSearch.class.ts +103 -103
  76. package/src/Components/VisionLLM.class.ts +104 -104
  77. package/src/Components/ZapierAction.class.ts +127 -127
  78. package/src/Components/index.ts +97 -97
  79. package/src/Core/AgentProcess.helper.ts +240 -240
  80. package/src/Core/Connector.class.ts +123 -123
  81. package/src/Core/ConnectorsService.ts +197 -197
  82. package/src/Core/DummyConnector.ts +49 -49
  83. package/src/Core/HookService.ts +105 -105
  84. package/src/Core/SmythRuntime.class.ts +241 -241
  85. package/src/Core/SystemEvents.ts +16 -16
  86. package/src/Core/boot.ts +56 -56
  87. package/src/config.ts +15 -15
  88. package/src/constants.ts +126 -126
  89. package/src/data/hugging-face.params.json +579 -579
  90. package/src/helpers/AWSLambdaCode.helper.ts +624 -624
  91. package/src/helpers/BinaryInput.helper.ts +331 -331
  92. package/src/helpers/Conversation.helper.ts +1157 -1157
  93. package/src/helpers/ECMASandbox.helper.ts +64 -64
  94. package/src/helpers/JsonContent.helper.ts +97 -97
  95. package/src/helpers/LocalCache.helper.ts +97 -97
  96. package/src/helpers/Log.helper.ts +274 -274
  97. package/src/helpers/OpenApiParser.helper.ts +150 -150
  98. package/src/helpers/S3Cache.helper.ts +147 -147
  99. package/src/helpers/SmythURI.helper.ts +5 -5
  100. package/src/helpers/Sysconfig.helper.ts +95 -95
  101. package/src/helpers/TemplateString.helper.ts +243 -243
  102. package/src/helpers/TypeChecker.helper.ts +329 -329
  103. package/src/index.ts +3 -3
  104. package/src/index.ts.bak +3 -3
  105. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  106. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  107. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  108. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  109. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +145 -145
  110. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  111. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  112. package/src/subsystems/AgentManager/AgentLogger.class.ts +301 -301
  113. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  114. package/src/subsystems/AgentManager/AgentRuntime.class.ts +557 -557
  115. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  116. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  117. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  118. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  119. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  120. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  121. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  122. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  123. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  124. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +171 -171
  125. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  126. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  127. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  128. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  129. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  130. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  131. package/src/subsystems/IO/Log.service/index.ts +13 -13
  132. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  133. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  134. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  135. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  136. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  137. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  138. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  139. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  140. package/src/subsystems/IO/Router.service/index.ts +11 -11
  141. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +488 -488
  142. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  143. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  144. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  145. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  146. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  147. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +465 -465
  148. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +387 -387
  149. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +408 -408
  150. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  151. package/src/subsystems/IO/VectorDB.service/embed/GoogleEmbedding.ts +118 -118
  152. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  153. package/src/subsystems/IO/VectorDB.service/embed/index.ts +26 -26
  154. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  155. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  156. package/src/subsystems/LLMManager/LLM.inference.ts +345 -345
  157. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +492 -492
  158. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  159. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +666 -666
  160. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +407 -407
  161. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +92 -92
  162. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +983 -983
  163. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +319 -319
  164. package/src/subsystems/LLMManager/LLM.service/connectors/Ollama.class.ts +361 -361
  165. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +257 -257
  166. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +430 -430
  167. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +503 -503
  168. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -524
  169. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  170. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  171. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1145
  172. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  173. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  174. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  175. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  176. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +478 -478
  177. package/src/subsystems/LLMManager/LLM.service/index.ts +47 -47
  178. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +303 -303
  179. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +280 -271
  180. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  181. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  182. package/src/subsystems/LLMManager/models.ts +2540 -2540
  183. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  184. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  185. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  186. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +214 -214
  187. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  188. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  189. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  190. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  191. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  192. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  193. package/src/subsystems/MemoryManager/RuntimeContext.ts +277 -277
  194. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  195. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  196. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  197. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  198. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  199. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +170 -170
  200. package/src/subsystems/Security/Account.service/connectors/MySQLAccount.class.ts +76 -76
  201. package/src/subsystems/Security/Account.service/index.ts +14 -14
  202. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  203. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  204. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  205. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  206. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  207. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  208. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  209. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  210. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  211. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  212. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  213. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  214. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  215. package/src/types/ACL.types.ts +104 -104
  216. package/src/types/AWS.types.ts +10 -10
  217. package/src/types/Agent.types.ts +61 -61
  218. package/src/types/AgentLogger.types.ts +17 -17
  219. package/src/types/Cache.types.ts +1 -1
  220. package/src/types/Common.types.ts +2 -2
  221. package/src/types/LLM.types.ts +520 -520
  222. package/src/types/Redis.types.ts +8 -8
  223. package/src/types/SRE.types.ts +64 -64
  224. package/src/types/Security.types.ts +14 -14
  225. package/src/types/Storage.types.ts +5 -5
  226. package/src/types/VectorDB.types.ts +86 -86
  227. package/src/utils/base64.utils.ts +275 -275
  228. package/src/utils/cli.utils.ts +68 -68
  229. package/src/utils/data.utils.ts +322 -322
  230. package/src/utils/date-time.utils.ts +22 -22
  231. package/src/utils/general.utils.ts +238 -238
  232. package/src/utils/index.ts +12 -12
  233. package/src/utils/lazy-client.ts +261 -261
  234. package/src/utils/numbers.utils.ts +13 -13
  235. package/src/utils/oauth.utils.ts +35 -35
  236. package/src/utils/string.utils.ts +414 -414
  237. package/src/utils/url.utils.ts +19 -19
  238. package/src/utils/validation.utils.ts +74 -74
  239. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,666 +1,666 @@
1
- import EventEmitter from 'events';
2
- import Anthropic from '@anthropic-ai/sdk';
3
-
4
- import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
6
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
7
- import {
8
- ToolData,
9
- TLLMMessageBlock,
10
- TLLMToolResultMessageBlock,
11
- TLLMMessageRole,
12
- APIKeySource,
13
- TLLMEvent,
14
- ILLMRequestFuncParams,
15
- TLLMChatResponse,
16
- BasicCredentials,
17
- TAnthropicRequestBody,
18
- ILLMRequestContext,
19
- TLLMPreparedParams,
20
- } from '@sre/types/LLM.types';
21
-
22
- import { LLMHelper } from '@sre/LLMManager/LLM.helper';
23
- import { JSONContent } from '@sre/helpers/JsonContent.helper';
24
-
25
- import { LLMConnector } from '../LLMConnector';
26
- import { SystemEvents } from '@sre/Core/SystemEvents';
27
- import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
28
- import { Logger } from '@sre/helpers/Log.helper';
29
-
30
- const logger = Logger('AnthropicConnector');
31
-
32
- const PREFILL_TEXT_FOR_JSON_RESPONSE = '{';
33
- const LEGACY_THINKING_MODELS = ['smythos/claude-3.7-sonnet-thinking', 'claude-3.7-sonnet-thinking'];
34
-
35
- // Type aliases
36
- type AnthropicMessageParams = Anthropic.MessageCreateParamsNonStreaming | Anthropic.Messages.MessageStreamParams;
37
-
38
- // TODO [Forhad]: implement proper typing
39
-
40
- export class AnthropicConnector extends LLMConnector {
41
- public name = 'LLM:Anthropic';
42
-
43
- private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.Anthropic.image;
44
-
45
- private async getClient(params: ILLMRequestContext): Promise<Anthropic> {
46
- const apiKey = (params.credentials as BasicCredentials)?.apiKey;
47
-
48
- if (!apiKey) throw new Error('Please provide an API key for Anthropic');
49
-
50
- return new Anthropic({ apiKey });
51
- }
52
-
53
- protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
54
- try {
55
- logger.debug(`request ${this.name}`, acRequest.candidate);
56
- const anthropic = await this.getClient(context);
57
- const result = await anthropic.messages.create(body);
58
- const message: Anthropic.MessageParam = {
59
- role: (result?.role || TLLMMessageRole.User) as Anthropic.MessageParam['role'],
60
- content: result?.content || '',
61
- };
62
- const stopReason = result?.stop_reason;
63
-
64
- let toolsData: ToolData[] = [];
65
- let useTool = false;
66
-
67
- if ((stopReason as 'tool_use') === 'tool_use') {
68
- const toolUseContentBlocks = result?.content?.filter((c) => (c.type as 'tool_use') === 'tool_use');
69
-
70
- if (toolUseContentBlocks?.length === 0) return;
71
-
72
- toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
73
- toolsData.push({
74
- index,
75
- id: toolUseBlock?.id,
76
- type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
77
- name: toolUseBlock?.name,
78
- arguments: toolUseBlock?.input,
79
- role: result?.role,
80
- });
81
- });
82
-
83
- useTool = true;
84
- }
85
-
86
- const textBlock = result?.content?.find((block) => block.type === 'text');
87
- let content = textBlock?.text || '';
88
-
89
- const usage = result?.usage;
90
-
91
- if (this.hasPrefillText(body.messages)) {
92
- content = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${content}`;
93
- }
94
-
95
- this.reportUsage(usage, {
96
- modelEntryName: context.modelEntryName,
97
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
98
- agentId: context.agentId,
99
- teamId: context.teamId,
100
- });
101
-
102
- return {
103
- content,
104
- finishReason: result?.stop_reason,
105
- useTool,
106
- toolsData,
107
- message,
108
- usage,
109
- };
110
- } catch (error) {
111
- logger.error(`request ${this.name}`, error, acRequest.candidate);
112
- throw error;
113
- }
114
- }
115
-
116
- protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
117
- try {
118
- logger.debug(`streamRequest ${this.name}`, acRequest.candidate);
119
- const emitter = new EventEmitter();
120
- const usage_data = [];
121
-
122
- const anthropic = await this.getClient(context);
123
- let stream = anthropic.messages.stream(body);
124
-
125
- let toolsData: ToolData[] = [];
126
- let thinkingBlocks: any[] = []; // To preserve thinking blocks
127
-
128
- // Determine if we need to inject prefill text and track if it's been injected
129
- const needsPrefillInjection = this.hasPrefillText(body.messages);
130
- let prefillInjected = false;
131
-
132
- stream.on('streamEvent', (event: any) => {
133
- if (event.message?.usage) {
134
- //console.log('usage', event.message?.usage);
135
- }
136
- });
137
-
138
- stream.on('error', (error) => {
139
- //console.log('error', error);
140
-
141
- emitter.emit('error', error);
142
- });
143
-
144
- stream.on('text', (text: string) => {
145
- // Inject prefill text only once at the very beginning if needed
146
- if (needsPrefillInjection && !prefillInjected) {
147
- text = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${text}`;
148
- prefillInjected = true;
149
- }
150
-
151
- emitter.emit('content', text);
152
- });
153
-
154
- stream.on('thinking', (thinking) => {
155
- // Handle thinking blocks during streaming
156
- emitter.emit('thinking', thinking);
157
- });
158
-
159
- stream.on('finalMessage', (finalMessage) => {
160
- let finishReason = 'stop';
161
- // Preserve thinking blocks for subsequent tool interactions
162
- thinkingBlocks = finalMessage.content.filter((block) => block.type === 'thinking' || block.type === 'redacted_thinking');
163
-
164
- // Process tool use blocks
165
- const toolUseContentBlocks = finalMessage.content.filter((c) => c.type === 'tool_use');
166
-
167
- if (toolUseContentBlocks?.length > 0) {
168
- toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
169
- toolsData.push({
170
- index,
171
- id: toolUseBlock?.id,
172
- type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
173
- name: toolUseBlock?.name,
174
- arguments: toolUseBlock?.input,
175
- role: finalMessage?.role,
176
- });
177
- });
178
-
179
- emitter.emit(TLLMEvent.ToolInfo, toolsData, thinkingBlocks);
180
- } else {
181
- finishReason = finalMessage.stop_reason;
182
- }
183
-
184
- if (finalMessage?.usage) {
185
- const usage = finalMessage.usage;
186
-
187
- const reportedUsage = this.reportUsage(usage, {
188
- modelEntryName: context.modelEntryName,
189
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
190
- agentId: context.agentId,
191
- teamId: context.teamId,
192
- });
193
-
194
- usage_data.push(reportedUsage);
195
- }
196
- if (finishReason !== 'stop' && finishReason !== 'end_turn') {
197
- emitter.emit('interrupted', finishReason);
198
- }
199
-
200
- //only emit end event after processing the final message
201
- setTimeout(() => {
202
- emitter.emit('end', toolsData, usage_data, finishReason);
203
- }, 100);
204
- });
205
-
206
- return emitter;
207
- } catch (error: any) {
208
- logger.error(`streamRequest ${this.name}`, error, acRequest.candidate);
209
- throw error;
210
- }
211
- }
212
-
213
- protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TAnthropicRequestBody> {
214
- const body = await this.prepareBody(params);
215
-
216
- const shouldUseThinking = await this.shouldUseThinkingMode(params);
217
- if (shouldUseThinking) {
218
- return await this.prepareBodyForThinkingRequest({
219
- body,
220
- maxThinkingTokens: params.maxThinkingTokens,
221
- toolChoice: params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice,
222
- });
223
- }
224
-
225
- return body;
226
- }
227
-
228
- protected reportUsage(
229
- usage: Anthropic.Messages.Usage & { cache_creation_input_tokens?: number; cache_read_input_tokens?: number },
230
- metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
231
- ) {
232
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
233
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
234
-
235
- const usageData = {
236
- sourceId: `llm:${modelName}`,
237
- input_tokens: usage.input_tokens,
238
- output_tokens: usage.output_tokens,
239
- input_tokens_cache_write: usage.cache_creation_input_tokens,
240
- input_tokens_cache_read: usage.cache_read_input_tokens,
241
- keySource: metadata.keySource,
242
- agentId: metadata.agentId,
243
- teamId: metadata.teamId,
244
- };
245
- SystemEvents.emit('USAGE:LLM', usageData);
246
-
247
- return usageData;
248
- }
249
-
250
- public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto' }) {
251
- let tools: {
252
- name: string;
253
- description: string;
254
- input_schema: {
255
- type: 'object';
256
- properties: Record<string, unknown>;
257
- required: string[];
258
- };
259
- }[] = [];
260
-
261
- if (type === 'function') {
262
- tools = toolDefinitions.map((tool) => {
263
- const { name, description, properties, requiredFields } = tool;
264
-
265
- return {
266
- name,
267
- description,
268
- input_schema: {
269
- type: 'object',
270
- properties,
271
- required: requiredFields,
272
- },
273
- };
274
- });
275
- }
276
-
277
- return tools?.length > 0 ? { tools } : {};
278
- }
279
-
280
- public transformToolMessageBlocks({
281
- messageBlock,
282
- toolsData,
283
- }: {
284
- messageBlock: TLLMMessageBlock & { thinkingBlocks?: { type: string; thinking: string }[] };
285
- toolsData: ToolData[];
286
- }): TLLMToolResultMessageBlock[] {
287
- const messageBlocks: TLLMToolResultMessageBlock[] = [];
288
-
289
- if (messageBlock) {
290
- const content: any[] = []; // TODO: set proper type for content
291
-
292
- if (messageBlock.thinkingBlocks?.length > 0) {
293
- content.push(...messageBlock.thinkingBlocks);
294
- }
295
-
296
- if (Array.isArray(messageBlock.content)) {
297
- content.push(...messageBlock.content);
298
- } else {
299
- if (messageBlock.content) {
300
- //Anthropic does not accept empty text blocks
301
- content.push({ type: 'text', text: messageBlock.content });
302
- }
303
- }
304
- if (messageBlock.tool_calls) {
305
- const calls = messageBlock.tool_calls.map((toolCall: any) => {
306
- const args = toolCall?.function?.arguments;
307
- return {
308
- type: 'tool_use',
309
- id: toolCall.id,
310
- name: toolCall?.function?.name,
311
- input: typeof args === 'string' ? JSONContent(args || '{}').tryParse() : args || {},
312
- };
313
- });
314
-
315
- content.push(...calls);
316
- }
317
-
318
- messageBlocks.push({
319
- role: messageBlock?.role,
320
- content: content,
321
- });
322
- }
323
-
324
- // Combine all tool results into a single user message
325
- const toolResultsContent = toolsData.map((toolData): any => ({
326
- type: 'tool_result',
327
- tool_use_id: toolData.id,
328
- content: toolData.result,
329
- }));
330
-
331
- if (toolResultsContent.length > 0) {
332
- messageBlocks.push({
333
- role: TLLMMessageRole.User,
334
- content: toolResultsContent,
335
- });
336
- }
337
-
338
- return messageBlocks;
339
- }
340
-
341
- // TODO [Forhad]: This method is quite lengthy and complex. Consider breaking it down into smaller, more manageable functions for better readability and maintainability.
342
- public getConsistentMessages(messages) {
343
- let _messages = JSON.parse(JSON.stringify(messages));
344
-
345
- // Extract the system message from the start, as our logic expects 'user' to be the first message for checks and fixes. We will add it back later.
346
- let systemMessage = null;
347
- if (_messages[0]?.role === TLLMMessageRole.System) {
348
- systemMessage = _messages.shift();
349
- }
350
-
351
- _messages = LLMHelper.removeDuplicateUserMessages(_messages);
352
-
353
- _messages = _messages.map((message) => {
354
- let content;
355
-
356
- if (message?.parts) {
357
- content = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
358
- } else if (Array.isArray(message?.content)) {
359
- if (Array.isArray(message.content)) {
360
- const toolBlocks = message.content.filter(
361
- (item) => typeof item === 'object' && 'type' in item && (item.type === 'tool_use' || item.type === 'tool_result')
362
- );
363
-
364
- if (toolBlocks?.length > 0) {
365
- content = message.content.map((item) => {
366
- if (item.type === 'text' && (!item.text || item.text.trim() === '')) {
367
- return { ...item, text: '...' }; // empty text causes error that's why we added '...'
368
- }
369
- return item;
370
- });
371
- } else {
372
- content = message.content
373
- .map((block) => block?.text || '')
374
- .join(' ')
375
- .trim();
376
- }
377
- } else {
378
- content = message.content;
379
- }
380
- } else if (message?.content) {
381
- content = message.content as string;
382
- }
383
-
384
- message.content = content || '...'; // empty content causes error that's why we added '...'
385
-
386
- return message;
387
- });
388
-
389
- //[FIXED] - `tool_result` block(s) provided when previous message does not contain any `tool_use` blocks" (handler)
390
- if (_messages[0]?.role === TLLMMessageRole.User && Array.isArray(_messages[0].content)) {
391
- const hasToolResult = _messages[0].content.find((content) => 'type' in content && content.type === 'tool_result');
392
-
393
- //we found a tool result in the first message, so we need to remove the user message
394
- if (hasToolResult) {
395
- _messages.shift();
396
- }
397
- }
398
-
399
- // - Error: 400 {"type":"error","error":{"type":"invalid_request_error","message":"messages: first message must use the \"user\" role"}}
400
- if (_messages[0]?.role !== TLLMMessageRole.User) {
401
- _messages.unshift({ role: TLLMMessageRole.User, content: 'continue' }); //add an empty user message to keep the consistency
402
- }
403
-
404
- // Add the system message back to the start, as we extracted it earlier
405
- // Empty content is not allowed in Anthropic
406
- if (systemMessage && systemMessage.content) {
407
- _messages.unshift(systemMessage);
408
- }
409
-
410
- return _messages;
411
- }
412
-
413
- private async prepareBody(params: TLLMPreparedParams): Promise<Anthropic.MessageCreateParamsNonStreaming> {
414
- let messages = await this.prepareMessages(params);
415
-
416
- let body: Anthropic.MessageCreateParamsNonStreaming = {
417
- model: params.model as string,
418
- messages: messages as Anthropic.MessageParam[],
419
- max_tokens: params.maxTokens, // * max token is required
420
- };
421
-
422
- //#region Prepare system message and add JSON response instruction if needed
423
- // TODO: We have better parameter to have structured response, need to implement it.
424
- const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
425
- if ('content' in systemMessage) {
426
- body.system = systemMessage?.content as string;
427
- }
428
- messages = otherMessages;
429
-
430
- const responseFormat = params?.responseFormat || '';
431
- if (responseFormat === 'json') {
432
- body.system = body.system ? `${body.system} ${JSON_RESPONSE_INSTRUCTION}` : JSON_RESPONSE_INSTRUCTION;
433
-
434
- messages.push({ role: TLLMMessageRole.Assistant, content: PREFILL_TEXT_FOR_JSON_RESPONSE });
435
- }
436
-
437
- const hasSystemMessage = LLMHelper.hasSystemMessage(messages);
438
- if (hasSystemMessage) {
439
- // in Anthropic we need to provide system message separately
440
- const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
441
-
442
- if ('content' in systemMessage) {
443
- body.system = await this.prepareSystemPrompt(systemMessage, params);
444
- }
445
-
446
- messages = otherMessages as Anthropic.MessageParam[];
447
- }
448
- //#endregion Prepare system message and add JSON response instruction if needed
449
-
450
- const isReasoningModel = params?.capabilities?.reasoning;
451
-
452
- if (params?.temperature !== undefined && !isReasoningModel) body.temperature = params.temperature;
453
- if (params?.topP !== undefined && !isReasoningModel) body.top_p = params.topP;
454
- if (params?.topK !== undefined && !isReasoningModel) body.top_k = params.topK;
455
- if (params?.stopSequences?.length) body.stop_sequences = params.stopSequences;
456
-
457
- // #region Tools
458
- if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
459
- body.tools = params?.toolsConfig?.tools as unknown as Anthropic.Tool[];
460
-
461
- if (params?.cache) {
462
- body.tools[body.tools.length - 1]['cache_control'] = { type: 'ephemeral' };
463
- }
464
- }
465
-
466
- const toolChoice = params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice;
467
- if (toolChoice) {
468
- body.tool_choice = toolChoice;
469
- }
470
- // #endregion Tools
471
-
472
- body.messages = messages as Anthropic.MessageParam[];
473
- return body;
474
- }
475
-
476
- private async prepareBodyForThinkingRequest({
477
- body,
478
- maxThinkingTokens,
479
- toolChoice = null,
480
- }: {
481
- body: AnthropicMessageParams;
482
- maxThinkingTokens: number;
483
- toolChoice?: Anthropic.ToolChoice;
484
- }): Promise<Anthropic.MessageCreateParamsNonStreaming> {
485
- // Remove the assistant message with the prefill text for JSON response, it's not supported with thinking
486
- let messages = body.messages.filter(
487
- (message) => !(message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE)
488
- );
489
-
490
- let budget_tokens = Math.min(maxThinkingTokens, body.max_tokens);
491
-
492
- // If budget_tokens is equal to max_tokens, we set it to 80% of max_tokens
493
- // to avoid the error: "budget_tokens must be less than max_tokens".
494
- //
495
- // Another way to ensure valid budget_tokens is to add max_tokens and budget_tokens together - max_tokens = max_tokens + budget_tokens,
496
- // then take the minimum, like: Math.min(max_tokens, allowedMaxTokens).
497
- // However, this approach requires additional information such as model details,
498
- // which would mean adding more arguments like acRequest and modelEntryName to get allowedMaxTokens.
499
- //
500
- // So for now, to keep it simple, if max_tokens equals budget_tokens,
501
- // just use 80% of max_tokens.
502
-
503
- if (budget_tokens === body.max_tokens) {
504
- budget_tokens = Math.floor(budget_tokens * 0.8);
505
- }
506
-
507
- const thinkingBody: Anthropic.MessageCreateParamsNonStreaming = {
508
- model: body.model,
509
- messages,
510
- max_tokens: body.max_tokens,
511
- thinking: {
512
- type: 'enabled',
513
- budget_tokens,
514
- },
515
- };
516
-
517
- if (toolChoice) {
518
- // any and tool are not supported with thinking, so we set it to auto
519
- if (['any', 'tool'].includes(toolChoice.type)) {
520
- thinkingBody.tool_choice = {
521
- type: 'auto',
522
- };
523
- } else {
524
- thinkingBody.tool_choice = toolChoice;
525
- }
526
- }
527
-
528
- return thinkingBody;
529
- }
530
-
531
- private async prepareMessages(params: TLLMPreparedParams) {
532
- const messages = params?.messages || [];
533
-
534
- const files: BinaryInput[] = params?.files || [];
535
-
536
- if (files?.length > 0) {
537
- // #region Upload files
538
- const promises = [];
539
- const _files = [];
540
-
541
- for (let image of files) {
542
- const binaryInput = BinaryInput.from(image);
543
- promises.push(binaryInput.upload(AccessCandidate.agent(params.agentId)));
544
-
545
- _files.push(binaryInput);
546
- }
547
-
548
- await Promise.all(promises);
549
- // #endregion Upload files
550
-
551
- const validSources = this.getValidImageFiles(_files);
552
- const imageData = await this.getImageData(validSources, params.agentId);
553
-
554
- const userMessage = Array.isArray(messages) ? messages.pop() : {};
555
- const prompt = userMessage?.content || '';
556
-
557
- const content = [{ type: 'text', text: prompt }, ...imageData];
558
- messages.push({ role: TLLMMessageRole.User, content });
559
- }
560
-
561
- return messages;
562
- }
563
-
564
- private async prepareSystemPrompt(
565
- systemMessage: TLLMMessageBlock,
566
- params: TLLMPreparedParams
567
- ): Promise<string | Array<Anthropic.TextBlockParam>> {
568
- let systemPrompt = systemMessage?.content;
569
-
570
- if (typeof systemPrompt === 'string') {
571
- systemPrompt = [
572
- {
573
- type: 'text' as const,
574
- text: systemPrompt,
575
- //cache_control: { type: 'ephemeral' }, //TODO: @Forhad check this
576
- },
577
- ] as Array<Anthropic.TextBlockParam>;
578
- }
579
-
580
- (systemPrompt as Array<Anthropic.TextBlockParam>).unshift({
581
- type: 'text' as const,
582
- text: 'If you need to call a function, Do NOT inform the user that you are about to do so, and do not thank the user after you get the response. Just say something like "Give me a moment...", then when you get the response, Just continue answering the user without saying anything about the function you just called',
583
- });
584
-
585
- if (params?.cache) {
586
- (systemPrompt as Array<Anthropic.TextBlockParam>)[systemPrompt.length - 1]['cache_control'] = { type: 'ephemeral' };
587
- }
588
-
589
- return systemPrompt as Array<Anthropic.TextBlockParam>;
590
- }
591
-
592
- /**
593
- * Determines if thinking mode should be used based on model capabilities and parameters.
594
- */
595
- private async shouldUseThinkingMode(params: TLLMPreparedParams): Promise<boolean> {
596
- // Legacy thinking models always use thinking mode
597
- if (LEGACY_THINKING_MODELS.includes(params.modelEntryName)) {
598
- return true;
599
- }
600
-
601
- // Check if reasoning is explicitly requested and model supports it
602
- const useReasoning = params?.useReasoning && params.capabilities?.reasoning === true;
603
-
604
- return useReasoning;
605
- }
606
-
607
- private getValidImageFiles(files: BinaryInput[]) {
608
- const validSources = [];
609
-
610
- for (let file of files) {
611
- if (this.validImageMimeTypes.includes(file?.mimetype)) {
612
- validSources.push(file);
613
- }
614
- }
615
-
616
- if (validSources?.length === 0) {
617
- throw new Error(`Unsupported file(s). Please make sure your file is one of the following types: ${this.validImageMimeTypes.join(', ')}`);
618
- }
619
-
620
- return validSources;
621
- }
622
-
623
- private async getImageData(
624
- files: BinaryInput[],
625
- agentId: string
626
- ): Promise<
627
- {
628
- type: string;
629
- source: { type: 'base64'; data: string; media_type: string };
630
- }[]
631
- > {
632
- try {
633
- const imageData = [];
634
-
635
- for (let file of files) {
636
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
637
- const base64Data = bufferData.toString('base64');
638
-
639
- imageData.push({
640
- type: 'image',
641
- source: {
642
- type: 'base64',
643
- data: base64Data,
644
- media_type: file.mimetype,
645
- },
646
- });
647
- }
648
-
649
- return imageData;
650
- } catch (error) {
651
- throw error;
652
- }
653
- }
654
-
655
- private hasPrefillText(messages: Anthropic.MessageParam[]) {
656
- for (let i = messages.length - 1; i >= 0; i--) {
657
- const message = messages[i];
658
-
659
- if (message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE) {
660
- return true;
661
- }
662
- }
663
-
664
- return false;
665
- }
666
- }
1
+ import EventEmitter from 'events';
2
+ import Anthropic from '@anthropic-ai/sdk';
3
+
4
+ import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
6
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
7
+ import {
8
+ ToolData,
9
+ TLLMMessageBlock,
10
+ TLLMToolResultMessageBlock,
11
+ TLLMMessageRole,
12
+ APIKeySource,
13
+ TLLMEvent,
14
+ ILLMRequestFuncParams,
15
+ TLLMChatResponse,
16
+ BasicCredentials,
17
+ TAnthropicRequestBody,
18
+ ILLMRequestContext,
19
+ TLLMPreparedParams,
20
+ } from '@sre/types/LLM.types';
21
+
22
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
23
+ import { JSONContent } from '@sre/helpers/JsonContent.helper';
24
+
25
+ import { LLMConnector } from '../LLMConnector';
26
+ import { SystemEvents } from '@sre/Core/SystemEvents';
27
+ import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
28
+ import { Logger } from '@sre/helpers/Log.helper';
29
+
30
+ const logger = Logger('AnthropicConnector');
31
+
32
+ const PREFILL_TEXT_FOR_JSON_RESPONSE = '{';
33
+ const LEGACY_THINKING_MODELS = ['smythos/claude-3.7-sonnet-thinking', 'claude-3.7-sonnet-thinking'];
34
+
35
+ // Type aliases
36
+ type AnthropicMessageParams = Anthropic.MessageCreateParamsNonStreaming | Anthropic.Messages.MessageStreamParams;
37
+
38
+ // TODO [Forhad]: implement proper typing
39
+
40
+ export class AnthropicConnector extends LLMConnector {
41
+ public name = 'LLM:Anthropic';
42
+
43
+ private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.Anthropic.image;
44
+
45
+ private async getClient(params: ILLMRequestContext): Promise<Anthropic> {
46
+ const apiKey = (params.credentials as BasicCredentials)?.apiKey;
47
+
48
+ if (!apiKey) throw new Error('Please provide an API key for Anthropic');
49
+
50
+ return new Anthropic({ apiKey });
51
+ }
52
+
53
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
54
+ try {
55
+ logger.debug(`request ${this.name}`, acRequest.candidate);
56
+ const anthropic = await this.getClient(context);
57
+ const result = await anthropic.messages.create(body);
58
+ const message: Anthropic.MessageParam = {
59
+ role: (result?.role || TLLMMessageRole.User) as Anthropic.MessageParam['role'],
60
+ content: result?.content || '',
61
+ };
62
+ const stopReason = result?.stop_reason;
63
+
64
+ let toolsData: ToolData[] = [];
65
+ let useTool = false;
66
+
67
+ if ((stopReason as 'tool_use') === 'tool_use') {
68
+ const toolUseContentBlocks = result?.content?.filter((c) => (c.type as 'tool_use') === 'tool_use');
69
+
70
+ if (toolUseContentBlocks?.length === 0) return;
71
+
72
+ toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
73
+ toolsData.push({
74
+ index,
75
+ id: toolUseBlock?.id,
76
+ type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
77
+ name: toolUseBlock?.name,
78
+ arguments: toolUseBlock?.input,
79
+ role: result?.role,
80
+ });
81
+ });
82
+
83
+ useTool = true;
84
+ }
85
+
86
+ const textBlock = result?.content?.find((block) => block.type === 'text');
87
+ let content = textBlock?.text || '';
88
+
89
+ const usage = result?.usage;
90
+
91
+ if (this.hasPrefillText(body.messages)) {
92
+ content = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${content}`;
93
+ }
94
+
95
+ this.reportUsage(usage, {
96
+ modelEntryName: context.modelEntryName,
97
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
98
+ agentId: context.agentId,
99
+ teamId: context.teamId,
100
+ });
101
+
102
+ return {
103
+ content,
104
+ finishReason: result?.stop_reason,
105
+ useTool,
106
+ toolsData,
107
+ message,
108
+ usage,
109
+ };
110
+ } catch (error) {
111
+ logger.error(`request ${this.name}`, error, acRequest.candidate);
112
+ throw error;
113
+ }
114
+ }
115
+
116
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
117
+ try {
118
+ logger.debug(`streamRequest ${this.name}`, acRequest.candidate);
119
+ const emitter = new EventEmitter();
120
+ const usage_data = [];
121
+
122
+ const anthropic = await this.getClient(context);
123
+ let stream = anthropic.messages.stream(body);
124
+
125
+ let toolsData: ToolData[] = [];
126
+ let thinkingBlocks: any[] = []; // To preserve thinking blocks
127
+
128
+ // Determine if we need to inject prefill text and track if it's been injected
129
+ const needsPrefillInjection = this.hasPrefillText(body.messages);
130
+ let prefillInjected = false;
131
+
132
+ stream.on('streamEvent', (event: any) => {
133
+ if (event.message?.usage) {
134
+ //console.log('usage', event.message?.usage);
135
+ }
136
+ });
137
+
138
+ stream.on('error', (error) => {
139
+ //console.log('error', error);
140
+
141
+ emitter.emit('error', error);
142
+ });
143
+
144
+ stream.on('text', (text: string) => {
145
+ // Inject prefill text only once at the very beginning if needed
146
+ if (needsPrefillInjection && !prefillInjected) {
147
+ text = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${text}`;
148
+ prefillInjected = true;
149
+ }
150
+
151
+ emitter.emit('content', text);
152
+ });
153
+
154
+ stream.on('thinking', (thinking) => {
155
+ // Handle thinking blocks during streaming
156
+ emitter.emit('thinking', thinking);
157
+ });
158
+
159
+ stream.on('finalMessage', (finalMessage) => {
160
+ let finishReason = 'stop';
161
+ // Preserve thinking blocks for subsequent tool interactions
162
+ thinkingBlocks = finalMessage.content.filter((block) => block.type === 'thinking' || block.type === 'redacted_thinking');
163
+
164
+ // Process tool use blocks
165
+ const toolUseContentBlocks = finalMessage.content.filter((c) => c.type === 'tool_use');
166
+
167
+ if (toolUseContentBlocks?.length > 0) {
168
+ toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
169
+ toolsData.push({
170
+ index,
171
+ id: toolUseBlock?.id,
172
+ type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
173
+ name: toolUseBlock?.name,
174
+ arguments: toolUseBlock?.input,
175
+ role: finalMessage?.role,
176
+ });
177
+ });
178
+
179
+ emitter.emit(TLLMEvent.ToolInfo, toolsData, thinkingBlocks);
180
+ } else {
181
+ finishReason = finalMessage.stop_reason;
182
+ }
183
+
184
+ if (finalMessage?.usage) {
185
+ const usage = finalMessage.usage;
186
+
187
+ const reportedUsage = this.reportUsage(usage, {
188
+ modelEntryName: context.modelEntryName,
189
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
190
+ agentId: context.agentId,
191
+ teamId: context.teamId,
192
+ });
193
+
194
+ usage_data.push(reportedUsage);
195
+ }
196
+ if (finishReason !== 'stop' && finishReason !== 'end_turn') {
197
+ emitter.emit('interrupted', finishReason);
198
+ }
199
+
200
+ //only emit end event after processing the final message
201
+ setTimeout(() => {
202
+ emitter.emit('end', toolsData, usage_data, finishReason);
203
+ }, 100);
204
+ });
205
+
206
+ return emitter;
207
+ } catch (error: any) {
208
+ logger.error(`streamRequest ${this.name}`, error, acRequest.candidate);
209
+ throw error;
210
+ }
211
+ }
212
+
213
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TAnthropicRequestBody> {
214
+ const body = await this.prepareBody(params);
215
+
216
+ const shouldUseThinking = await this.shouldUseThinkingMode(params);
217
+ if (shouldUseThinking) {
218
+ return await this.prepareBodyForThinkingRequest({
219
+ body,
220
+ maxThinkingTokens: params.maxThinkingTokens,
221
+ toolChoice: params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice,
222
+ });
223
+ }
224
+
225
+ return body;
226
+ }
227
+
228
+ protected reportUsage(
229
+ usage: Anthropic.Messages.Usage & { cache_creation_input_tokens?: number; cache_read_input_tokens?: number },
230
+ metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
231
+ ) {
232
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
233
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
234
+
235
+ const usageData = {
236
+ sourceId: `llm:${modelName}`,
237
+ input_tokens: usage.input_tokens,
238
+ output_tokens: usage.output_tokens,
239
+ input_tokens_cache_write: usage.cache_creation_input_tokens,
240
+ input_tokens_cache_read: usage.cache_read_input_tokens,
241
+ keySource: metadata.keySource,
242
+ agentId: metadata.agentId,
243
+ teamId: metadata.teamId,
244
+ };
245
+ SystemEvents.emit('USAGE:LLM', usageData);
246
+
247
+ return usageData;
248
+ }
249
+
250
+ public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto' }) {
251
+ let tools: {
252
+ name: string;
253
+ description: string;
254
+ input_schema: {
255
+ type: 'object';
256
+ properties: Record<string, unknown>;
257
+ required: string[];
258
+ };
259
+ }[] = [];
260
+
261
+ if (type === 'function') {
262
+ tools = toolDefinitions.map((tool) => {
263
+ const { name, description, properties, requiredFields } = tool;
264
+
265
+ return {
266
+ name,
267
+ description,
268
+ input_schema: {
269
+ type: 'object',
270
+ properties,
271
+ required: requiredFields,
272
+ },
273
+ };
274
+ });
275
+ }
276
+
277
+ return tools?.length > 0 ? { tools } : {};
278
+ }
279
+
280
+ public transformToolMessageBlocks({
281
+ messageBlock,
282
+ toolsData,
283
+ }: {
284
+ messageBlock: TLLMMessageBlock & { thinkingBlocks?: { type: string; thinking: string }[] };
285
+ toolsData: ToolData[];
286
+ }): TLLMToolResultMessageBlock[] {
287
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
288
+
289
+ if (messageBlock) {
290
+ const content: any[] = []; // TODO: set proper type for content
291
+
292
+ if (messageBlock.thinkingBlocks?.length > 0) {
293
+ content.push(...messageBlock.thinkingBlocks);
294
+ }
295
+
296
+ if (Array.isArray(messageBlock.content)) {
297
+ content.push(...messageBlock.content);
298
+ } else {
299
+ if (messageBlock.content) {
300
+ //Anthropic does not accept empty text blocks
301
+ content.push({ type: 'text', text: messageBlock.content });
302
+ }
303
+ }
304
+ if (messageBlock.tool_calls) {
305
+ const calls = messageBlock.tool_calls.map((toolCall: any) => {
306
+ const args = toolCall?.function?.arguments;
307
+ return {
308
+ type: 'tool_use',
309
+ id: toolCall.id,
310
+ name: toolCall?.function?.name,
311
+ input: typeof args === 'string' ? JSONContent(args || '{}').tryParse() : args || {},
312
+ };
313
+ });
314
+
315
+ content.push(...calls);
316
+ }
317
+
318
+ messageBlocks.push({
319
+ role: messageBlock?.role,
320
+ content: content,
321
+ });
322
+ }
323
+
324
+ // Combine all tool results into a single user message
325
+ const toolResultsContent = toolsData.map((toolData): any => ({
326
+ type: 'tool_result',
327
+ tool_use_id: toolData.id,
328
+ content: toolData.result,
329
+ }));
330
+
331
+ if (toolResultsContent.length > 0) {
332
+ messageBlocks.push({
333
+ role: TLLMMessageRole.User,
334
+ content: toolResultsContent,
335
+ });
336
+ }
337
+
338
+ return messageBlocks;
339
+ }
340
+
341
+ // TODO [Forhad]: This method is quite lengthy and complex. Consider breaking it down into smaller, more manageable functions for better readability and maintainability.
342
+ public getConsistentMessages(messages) {
343
+ let _messages = JSON.parse(JSON.stringify(messages));
344
+
345
+ // Extract the system message from the start, as our logic expects 'user' to be the first message for checks and fixes. We will add it back later.
346
+ let systemMessage = null;
347
+ if (_messages[0]?.role === TLLMMessageRole.System) {
348
+ systemMessage = _messages.shift();
349
+ }
350
+
351
+ _messages = LLMHelper.removeDuplicateUserMessages(_messages);
352
+
353
+ _messages = _messages.map((message) => {
354
+ let content;
355
+
356
+ if (message?.parts) {
357
+ content = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
358
+ } else if (Array.isArray(message?.content)) {
359
+ if (Array.isArray(message.content)) {
360
+ const toolBlocks = message.content.filter(
361
+ (item) => typeof item === 'object' && 'type' in item && (item.type === 'tool_use' || item.type === 'tool_result')
362
+ );
363
+
364
+ if (toolBlocks?.length > 0) {
365
+ content = message.content.map((item) => {
366
+ if (item.type === 'text' && (!item.text || item.text.trim() === '')) {
367
+ return { ...item, text: '...' }; // empty text causes error that's why we added '...'
368
+ }
369
+ return item;
370
+ });
371
+ } else {
372
+ content = message.content
373
+ .map((block) => block?.text || '')
374
+ .join(' ')
375
+ .trim();
376
+ }
377
+ } else {
378
+ content = message.content;
379
+ }
380
+ } else if (message?.content) {
381
+ content = message.content as string;
382
+ }
383
+
384
+ message.content = content || '...'; // empty content causes error that's why we added '...'
385
+
386
+ return message;
387
+ });
388
+
389
+ //[FIXED] - `tool_result` block(s) provided when previous message does not contain any `tool_use` blocks" (handler)
390
+ if (_messages[0]?.role === TLLMMessageRole.User && Array.isArray(_messages[0].content)) {
391
+ const hasToolResult = _messages[0].content.find((content) => 'type' in content && content.type === 'tool_result');
392
+
393
+ //we found a tool result in the first message, so we need to remove the user message
394
+ if (hasToolResult) {
395
+ _messages.shift();
396
+ }
397
+ }
398
+
399
+ // - Error: 400 {"type":"error","error":{"type":"invalid_request_error","message":"messages: first message must use the \"user\" role"}}
400
+ if (_messages[0]?.role !== TLLMMessageRole.User) {
401
+ _messages.unshift({ role: TLLMMessageRole.User, content: 'continue' }); //add an empty user message to keep the consistency
402
+ }
403
+
404
+ // Add the system message back to the start, as we extracted it earlier
405
+ // Empty content is not allowed in Anthropic
406
+ if (systemMessage && systemMessage.content) {
407
+ _messages.unshift(systemMessage);
408
+ }
409
+
410
+ return _messages;
411
+ }
412
+
413
+ private async prepareBody(params: TLLMPreparedParams): Promise<Anthropic.MessageCreateParamsNonStreaming> {
414
+ let messages = await this.prepareMessages(params);
415
+
416
+ let body: Anthropic.MessageCreateParamsNonStreaming = {
417
+ model: params.model as string,
418
+ messages: messages as Anthropic.MessageParam[],
419
+ max_tokens: params.maxTokens, // * max token is required
420
+ };
421
+
422
+ //#region Prepare system message and add JSON response instruction if needed
423
+ // TODO: We have better parameter to have structured response, need to implement it.
424
+ const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
425
+ if ('content' in systemMessage) {
426
+ body.system = systemMessage?.content as string;
427
+ }
428
+ messages = otherMessages;
429
+
430
+ const responseFormat = params?.responseFormat || '';
431
+ if (responseFormat === 'json') {
432
+ body.system = body.system ? `${body.system} ${JSON_RESPONSE_INSTRUCTION}` : JSON_RESPONSE_INSTRUCTION;
433
+
434
+ messages.push({ role: TLLMMessageRole.Assistant, content: PREFILL_TEXT_FOR_JSON_RESPONSE });
435
+ }
436
+
437
+ const hasSystemMessage = LLMHelper.hasSystemMessage(messages);
438
+ if (hasSystemMessage) {
439
+ // in Anthropic we need to provide system message separately
440
+ const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
441
+
442
+ if ('content' in systemMessage) {
443
+ body.system = await this.prepareSystemPrompt(systemMessage, params);
444
+ }
445
+
446
+ messages = otherMessages as Anthropic.MessageParam[];
447
+ }
448
+ //#endregion Prepare system message and add JSON response instruction if needed
449
+
450
+ const isReasoningModel = params?.capabilities?.reasoning;
451
+
452
+ if (params?.temperature !== undefined && !isReasoningModel) body.temperature = params.temperature;
453
+ if (params?.topP !== undefined && !isReasoningModel) body.top_p = params.topP;
454
+ if (params?.topK !== undefined && !isReasoningModel) body.top_k = params.topK;
455
+ if (params?.stopSequences?.length) body.stop_sequences = params.stopSequences;
456
+
457
+ // #region Tools
458
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
459
+ body.tools = params?.toolsConfig?.tools as unknown as Anthropic.Tool[];
460
+
461
+ if (params?.cache) {
462
+ body.tools[body.tools.length - 1]['cache_control'] = { type: 'ephemeral' };
463
+ }
464
+ }
465
+
466
+ const toolChoice = params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice;
467
+ if (toolChoice) {
468
+ body.tool_choice = toolChoice;
469
+ }
470
+ // #endregion Tools
471
+
472
+ body.messages = messages as Anthropic.MessageParam[];
473
+ return body;
474
+ }
475
+
476
+ private async prepareBodyForThinkingRequest({
477
+ body,
478
+ maxThinkingTokens,
479
+ toolChoice = null,
480
+ }: {
481
+ body: AnthropicMessageParams;
482
+ maxThinkingTokens: number;
483
+ toolChoice?: Anthropic.ToolChoice;
484
+ }): Promise<Anthropic.MessageCreateParamsNonStreaming> {
485
+ // Remove the assistant message with the prefill text for JSON response, it's not supported with thinking
486
+ let messages = body.messages.filter(
487
+ (message) => !(message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE)
488
+ );
489
+
490
+ let budget_tokens = Math.min(maxThinkingTokens, body.max_tokens);
491
+
492
+ // If budget_tokens is equal to max_tokens, we set it to 80% of max_tokens
493
+ // to avoid the error: "budget_tokens must be less than max_tokens".
494
+ //
495
+ // Another way to ensure valid budget_tokens is to add max_tokens and budget_tokens together - max_tokens = max_tokens + budget_tokens,
496
+ // then take the minimum, like: Math.min(max_tokens, allowedMaxTokens).
497
+ // However, this approach requires additional information such as model details,
498
+ // which would mean adding more arguments like acRequest and modelEntryName to get allowedMaxTokens.
499
+ //
500
+ // So for now, to keep it simple, if max_tokens equals budget_tokens,
501
+ // just use 80% of max_tokens.
502
+
503
+ if (budget_tokens === body.max_tokens) {
504
+ budget_tokens = Math.floor(budget_tokens * 0.8);
505
+ }
506
+
507
+ const thinkingBody: Anthropic.MessageCreateParamsNonStreaming = {
508
+ model: body.model,
509
+ messages,
510
+ max_tokens: body.max_tokens,
511
+ thinking: {
512
+ type: 'enabled',
513
+ budget_tokens,
514
+ },
515
+ };
516
+
517
+ if (toolChoice) {
518
+ // any and tool are not supported with thinking, so we set it to auto
519
+ if (['any', 'tool'].includes(toolChoice.type)) {
520
+ thinkingBody.tool_choice = {
521
+ type: 'auto',
522
+ };
523
+ } else {
524
+ thinkingBody.tool_choice = toolChoice;
525
+ }
526
+ }
527
+
528
+ return thinkingBody;
529
+ }
530
+
531
+ private async prepareMessages(params: TLLMPreparedParams) {
532
+ const messages = params?.messages || [];
533
+
534
+ const files: BinaryInput[] = params?.files || [];
535
+
536
+ if (files?.length > 0) {
537
+ // #region Upload files
538
+ const promises = [];
539
+ const _files = [];
540
+
541
+ for (let image of files) {
542
+ const binaryInput = BinaryInput.from(image);
543
+ promises.push(binaryInput.upload(AccessCandidate.agent(params.agentId)));
544
+
545
+ _files.push(binaryInput);
546
+ }
547
+
548
+ await Promise.all(promises);
549
+ // #endregion Upload files
550
+
551
+ const validSources = this.getValidImageFiles(_files);
552
+ const imageData = await this.getImageData(validSources, params.agentId);
553
+
554
+ const userMessage = Array.isArray(messages) ? messages.pop() : {};
555
+ const prompt = userMessage?.content || '';
556
+
557
+ const content = [{ type: 'text', text: prompt }, ...imageData];
558
+ messages.push({ role: TLLMMessageRole.User, content });
559
+ }
560
+
561
+ return messages;
562
+ }
563
+
564
+ private async prepareSystemPrompt(
565
+ systemMessage: TLLMMessageBlock,
566
+ params: TLLMPreparedParams
567
+ ): Promise<string | Array<Anthropic.TextBlockParam>> {
568
+ let systemPrompt = systemMessage?.content;
569
+
570
+ if (typeof systemPrompt === 'string') {
571
+ systemPrompt = [
572
+ {
573
+ type: 'text' as const,
574
+ text: systemPrompt,
575
+ //cache_control: { type: 'ephemeral' }, //TODO: @Forhad check this
576
+ },
577
+ ] as Array<Anthropic.TextBlockParam>;
578
+ }
579
+
580
+ (systemPrompt as Array<Anthropic.TextBlockParam>).unshift({
581
+ type: 'text' as const,
582
+ text: 'If you need to call a function, Do NOT inform the user that you are about to do so, and do not thank the user after you get the response. Just say something like "Give me a moment...", then when you get the response, Just continue answering the user without saying anything about the function you just called',
583
+ });
584
+
585
+ if (params?.cache) {
586
+ (systemPrompt as Array<Anthropic.TextBlockParam>)[systemPrompt.length - 1]['cache_control'] = { type: 'ephemeral' };
587
+ }
588
+
589
+ return systemPrompt as Array<Anthropic.TextBlockParam>;
590
+ }
591
+
592
+ /**
593
+ * Determines if thinking mode should be used based on model capabilities and parameters.
594
+ */
595
+ private async shouldUseThinkingMode(params: TLLMPreparedParams): Promise<boolean> {
596
+ // Legacy thinking models always use thinking mode
597
+ if (LEGACY_THINKING_MODELS.includes(params.modelEntryName)) {
598
+ return true;
599
+ }
600
+
601
+ // Check if reasoning is explicitly requested and model supports it
602
+ const useReasoning = params?.useReasoning && params.capabilities?.reasoning === true;
603
+
604
+ return useReasoning;
605
+ }
606
+
607
+ private getValidImageFiles(files: BinaryInput[]) {
608
+ const validSources = [];
609
+
610
+ for (let file of files) {
611
+ if (this.validImageMimeTypes.includes(file?.mimetype)) {
612
+ validSources.push(file);
613
+ }
614
+ }
615
+
616
+ if (validSources?.length === 0) {
617
+ throw new Error(`Unsupported file(s). Please make sure your file is one of the following types: ${this.validImageMimeTypes.join(', ')}`);
618
+ }
619
+
620
+ return validSources;
621
+ }
622
+
623
+ private async getImageData(
624
+ files: BinaryInput[],
625
+ agentId: string
626
+ ): Promise<
627
+ {
628
+ type: string;
629
+ source: { type: 'base64'; data: string; media_type: string };
630
+ }[]
631
+ > {
632
+ try {
633
+ const imageData = [];
634
+
635
+ for (let file of files) {
636
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
637
+ const base64Data = bufferData.toString('base64');
638
+
639
+ imageData.push({
640
+ type: 'image',
641
+ source: {
642
+ type: 'base64',
643
+ data: base64Data,
644
+ media_type: file.mimetype,
645
+ },
646
+ });
647
+ }
648
+
649
+ return imageData;
650
+ } catch (error) {
651
+ throw error;
652
+ }
653
+ }
654
+
655
+ private hasPrefillText(messages: Anthropic.MessageParam[]) {
656
+ for (let i = messages.length - 1; i >= 0; i--) {
657
+ const message = messages[i];
658
+
659
+ if (message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE) {
660
+ return true;
661
+ }
662
+ }
663
+
664
+ return false;
665
+ }
666
+ }