@smythos/sre 1.5.44 → 1.5.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (227) hide show
  1. package/CHANGELOG +90 -90
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/index.js +3 -3
  5. package/dist/index.js.map +1 -1
  6. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +13 -1
  7. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.d.ts +46 -27
  8. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.d.ts +4 -2
  9. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/types.d.ts +0 -4
  10. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +39 -0
  11. package/dist/types/types/LLM.types.d.ts +1 -0
  12. package/package.json +1 -1
  13. package/src/Components/APICall/APICall.class.ts +156 -156
  14. package/src/Components/APICall/AccessTokenManager.ts +130 -130
  15. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  16. package/src/Components/APICall/OAuth.helper.ts +294 -294
  17. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  18. package/src/Components/APICall/parseData.ts +167 -167
  19. package/src/Components/APICall/parseHeaders.ts +41 -41
  20. package/src/Components/APICall/parseProxy.ts +68 -68
  21. package/src/Components/APICall/parseUrl.ts +91 -91
  22. package/src/Components/APIEndpoint.class.ts +234 -234
  23. package/src/Components/APIOutput.class.ts +58 -58
  24. package/src/Components/AgentPlugin.class.ts +102 -102
  25. package/src/Components/Async.class.ts +155 -155
  26. package/src/Components/Await.class.ts +90 -90
  27. package/src/Components/Classifier.class.ts +158 -158
  28. package/src/Components/Component.class.ts +132 -132
  29. package/src/Components/ComponentHost.class.ts +38 -38
  30. package/src/Components/DataSourceCleaner.class.ts +92 -92
  31. package/src/Components/DataSourceIndexer.class.ts +181 -181
  32. package/src/Components/DataSourceLookup.class.ts +161 -161
  33. package/src/Components/ECMASandbox.class.ts +71 -71
  34. package/src/Components/FEncDec.class.ts +29 -29
  35. package/src/Components/FHash.class.ts +33 -33
  36. package/src/Components/FSign.class.ts +80 -80
  37. package/src/Components/FSleep.class.ts +25 -25
  38. package/src/Components/FTimestamp.class.ts +25 -25
  39. package/src/Components/FileStore.class.ts +78 -78
  40. package/src/Components/ForEach.class.ts +97 -97
  41. package/src/Components/GPTPlugin.class.ts +70 -70
  42. package/src/Components/GenAILLM.class.ts +586 -586
  43. package/src/Components/HuggingFace.class.ts +314 -314
  44. package/src/Components/Image/imageSettings.config.ts +70 -70
  45. package/src/Components/ImageGenerator.class.ts +502 -502
  46. package/src/Components/JSONFilter.class.ts +54 -54
  47. package/src/Components/LLMAssistant.class.ts +213 -213
  48. package/src/Components/LogicAND.class.ts +28 -28
  49. package/src/Components/LogicAtLeast.class.ts +85 -85
  50. package/src/Components/LogicAtMost.class.ts +86 -86
  51. package/src/Components/LogicOR.class.ts +29 -29
  52. package/src/Components/LogicXOR.class.ts +34 -34
  53. package/src/Components/MCPClient.class.ts +112 -112
  54. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  55. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  56. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  57. package/src/Components/MemoryWriteObject.class.ts +97 -97
  58. package/src/Components/MultimodalLLM.class.ts +128 -128
  59. package/src/Components/OpenAPI.class.ts +72 -72
  60. package/src/Components/PromptGenerator.class.ts +122 -122
  61. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  62. package/src/Components/ServerlessCode.class.ts +123 -123
  63. package/src/Components/TavilyWebSearch.class.ts +98 -98
  64. package/src/Components/VisionLLM.class.ts +104 -104
  65. package/src/Components/ZapierAction.class.ts +127 -127
  66. package/src/Components/index.ts +97 -97
  67. package/src/Core/AgentProcess.helper.ts +240 -240
  68. package/src/Core/Connector.class.ts +123 -123
  69. package/src/Core/ConnectorsService.ts +197 -197
  70. package/src/Core/DummyConnector.ts +49 -49
  71. package/src/Core/HookService.ts +105 -105
  72. package/src/Core/SmythRuntime.class.ts +235 -235
  73. package/src/Core/SystemEvents.ts +16 -16
  74. package/src/Core/boot.ts +56 -56
  75. package/src/config.ts +15 -15
  76. package/src/constants.ts +126 -126
  77. package/src/data/hugging-face.params.json +579 -579
  78. package/src/helpers/AWSLambdaCode.helper.ts +587 -587
  79. package/src/helpers/BinaryInput.helper.ts +331 -331
  80. package/src/helpers/Conversation.helper.ts +1119 -1119
  81. package/src/helpers/ECMASandbox.helper.ts +54 -54
  82. package/src/helpers/JsonContent.helper.ts +97 -97
  83. package/src/helpers/LocalCache.helper.ts +97 -97
  84. package/src/helpers/Log.helper.ts +274 -274
  85. package/src/helpers/OpenApiParser.helper.ts +150 -150
  86. package/src/helpers/S3Cache.helper.ts +147 -147
  87. package/src/helpers/SmythURI.helper.ts +5 -5
  88. package/src/helpers/Sysconfig.helper.ts +77 -77
  89. package/src/helpers/TemplateString.helper.ts +243 -243
  90. package/src/helpers/TypeChecker.helper.ts +329 -329
  91. package/src/index.ts +196 -196
  92. package/src/index.ts.bak +196 -196
  93. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  94. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  95. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  97. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  98. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  99. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  100. package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -297
  101. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  102. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  103. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  104. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  105. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  106. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  107. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  108. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  109. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  110. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  111. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  112. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  113. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  114. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  115. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  116. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  117. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  118. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  119. package/src/subsystems/IO/Log.service/index.ts +13 -13
  120. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  121. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  122. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  123. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  124. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  125. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  126. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  127. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  128. package/src/subsystems/IO/Router.service/index.ts +11 -11
  129. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  130. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  131. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  132. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  133. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  134. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  135. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  136. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  137. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  138. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  139. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  140. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  141. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  142. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  143. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  144. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  145. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  147. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  149. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  150. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  151. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  152. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -455
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +528 -528
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1168 -862
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -37
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  160. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  161. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -37
  162. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  163. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  164. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  165. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  166. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  167. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  168. package/src/subsystems/LLMManager/models.ts +2540 -2540
  169. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  170. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  173. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  174. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  175. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  176. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  177. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  178. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  179. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  180. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  181. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  182. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  183. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  184. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  185. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  186. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  187. package/src/subsystems/Security/Account.service/index.ts +14 -14
  188. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  189. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  190. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  191. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  192. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  193. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  194. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  195. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  196. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  197. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  198. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  199. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  200. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  201. package/src/types/ACL.types.ts +104 -104
  202. package/src/types/AWS.types.ts +10 -10
  203. package/src/types/Agent.types.ts +61 -61
  204. package/src/types/AgentLogger.types.ts +17 -17
  205. package/src/types/Cache.types.ts +1 -1
  206. package/src/types/Common.types.ts +2 -2
  207. package/src/types/LLM.types.ts +496 -495
  208. package/src/types/Redis.types.ts +8 -8
  209. package/src/types/SRE.types.ts +64 -64
  210. package/src/types/Security.types.ts +14 -14
  211. package/src/types/Storage.types.ts +5 -5
  212. package/src/types/VectorDB.types.ts +86 -86
  213. package/src/utils/base64.utils.ts +275 -275
  214. package/src/utils/cli.utils.ts +68 -68
  215. package/src/utils/data.utils.ts +322 -322
  216. package/src/utils/date-time.utils.ts +22 -22
  217. package/src/utils/general.utils.ts +238 -238
  218. package/src/utils/index.ts +12 -12
  219. package/src/utils/lazy-client.ts +261 -261
  220. package/src/utils/numbers.utils.ts +13 -13
  221. package/src/utils/oauth.utils.ts +35 -35
  222. package/src/utils/string.utils.ts +414 -414
  223. package/src/utils/url.utils.ts +19 -19
  224. package/src/utils/validation.utils.ts +74 -74
  225. package/dist/bundle-analysis-lazy.html +0 -4949
  226. package/dist/bundle-analysis.html +0 -4949
  227. package/dist/types/utils/package-manager.utils.d.ts +0 -26
@@ -1,659 +1,659 @@
1
- import EventEmitter from 'events';
2
- import Anthropic from '@anthropic-ai/sdk';
3
-
4
- import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
6
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
7
- import {
8
- ToolData,
9
- TLLMMessageBlock,
10
- TLLMToolResultMessageBlock,
11
- TLLMMessageRole,
12
- APIKeySource,
13
- TLLMEvent,
14
- ILLMRequestFuncParams,
15
- TLLMChatResponse,
16
- BasicCredentials,
17
- TAnthropicRequestBody,
18
- ILLMRequestContext,
19
- TLLMPreparedParams,
20
- } from '@sre/types/LLM.types';
21
-
22
- import { LLMHelper } from '@sre/LLMManager/LLM.helper';
23
- import { JSONContent } from '@sre/helpers/JsonContent.helper';
24
-
25
- import { LLMConnector } from '../LLMConnector';
26
- import { SystemEvents } from '@sre/Core/SystemEvents';
27
- import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
28
-
29
- const PREFILL_TEXT_FOR_JSON_RESPONSE = '{';
30
- const LEGACY_THINKING_MODELS = ['smythos/claude-3.7-sonnet-thinking', 'claude-3.7-sonnet-thinking'];
31
-
32
- // Type aliases
33
- type AnthropicMessageParams = Anthropic.MessageCreateParamsNonStreaming | Anthropic.Messages.MessageStreamParams;
34
-
35
- // TODO [Forhad]: implement proper typing
36
-
37
- export class AnthropicConnector extends LLMConnector {
38
- public name = 'LLM:Anthropic';
39
-
40
- private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.Anthropic.image;
41
-
42
- private async getClient(params: ILLMRequestContext): Promise<Anthropic> {
43
- const apiKey = (params.credentials as BasicCredentials)?.apiKey;
44
-
45
- if (!apiKey) throw new Error('Please provide an API key for Anthropic');
46
-
47
- return new Anthropic({ apiKey });
48
- }
49
-
50
- protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
51
- try {
52
- const anthropic = await this.getClient(context);
53
- const result = await anthropic.messages.create(body);
54
- const message: Anthropic.MessageParam = {
55
- role: (result?.role || TLLMMessageRole.User) as Anthropic.MessageParam['role'],
56
- content: result?.content || '',
57
- };
58
- const stopReason = result?.stop_reason;
59
-
60
- let toolsData: ToolData[] = [];
61
- let useTool = false;
62
-
63
- if ((stopReason as 'tool_use') === 'tool_use') {
64
- const toolUseContentBlocks = result?.content?.filter((c) => (c.type as 'tool_use') === 'tool_use');
65
-
66
- if (toolUseContentBlocks?.length === 0) return;
67
-
68
- toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
69
- toolsData.push({
70
- index,
71
- id: toolUseBlock?.id,
72
- type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
73
- name: toolUseBlock?.name,
74
- arguments: toolUseBlock?.input,
75
- role: result?.role,
76
- });
77
- });
78
-
79
- useTool = true;
80
- }
81
-
82
- const textBlock = result?.content?.find((block) => block.type === 'text');
83
- let content = textBlock?.text || '';
84
-
85
- const usage = result?.usage;
86
-
87
- if (this.hasPrefillText(body.messages)) {
88
- content = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${content}`;
89
- }
90
-
91
- this.reportUsage(usage, {
92
- modelEntryName: context.modelEntryName,
93
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
94
- agentId: context.agentId,
95
- teamId: context.teamId,
96
- });
97
-
98
- return {
99
- content,
100
- finishReason: result?.stop_reason,
101
- useTool,
102
- toolsData,
103
- message,
104
- usage,
105
- };
106
- } catch (error) {
107
- throw error;
108
- }
109
- }
110
-
111
- protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
112
- try {
113
- const emitter = new EventEmitter();
114
- const usage_data = [];
115
-
116
- const anthropic = await this.getClient(context);
117
- let stream = anthropic.messages.stream(body);
118
-
119
- let toolsData: ToolData[] = [];
120
- let thinkingBlocks: any[] = []; // To preserve thinking blocks
121
-
122
- // Determine if we need to inject prefill text and track if it's been injected
123
- const needsPrefillInjection = this.hasPrefillText(body.messages);
124
- let prefillInjected = false;
125
-
126
- stream.on('streamEvent', (event: any) => {
127
- if (event.message?.usage) {
128
- //console.log('usage', event.message?.usage);
129
- }
130
- });
131
-
132
- stream.on('error', (error) => {
133
- //console.log('error', error);
134
-
135
- emitter.emit('error', error);
136
- });
137
-
138
- stream.on('text', (text: string) => {
139
- // Inject prefill text only once at the very beginning if needed
140
- if (needsPrefillInjection && !prefillInjected) {
141
- text = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${text}`;
142
- prefillInjected = true;
143
- }
144
-
145
- emitter.emit('content', text);
146
- });
147
-
148
- stream.on('thinking', (thinking) => {
149
- // Handle thinking blocks during streaming
150
- emitter.emit('thinking', thinking);
151
- });
152
-
153
- stream.on('finalMessage', (finalMessage) => {
154
- let finishReason = 'stop';
155
- // Preserve thinking blocks for subsequent tool interactions
156
- thinkingBlocks = finalMessage.content.filter((block) => block.type === 'thinking' || block.type === 'redacted_thinking');
157
-
158
- // Process tool use blocks
159
- const toolUseContentBlocks = finalMessage.content.filter((c) => c.type === 'tool_use');
160
-
161
- if (toolUseContentBlocks?.length > 0) {
162
- toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
163
- toolsData.push({
164
- index,
165
- id: toolUseBlock?.id,
166
- type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
167
- name: toolUseBlock?.name,
168
- arguments: toolUseBlock?.input,
169
- role: finalMessage?.role,
170
- });
171
- });
172
-
173
- emitter.emit(TLLMEvent.ToolInfo, toolsData, thinkingBlocks);
174
- } else {
175
- finishReason = finalMessage.stop_reason;
176
- }
177
-
178
- if (finalMessage?.usage) {
179
- const usage = finalMessage.usage;
180
-
181
- const reportedUsage = this.reportUsage(usage, {
182
- modelEntryName: context.modelEntryName,
183
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
184
- agentId: context.agentId,
185
- teamId: context.teamId,
186
- });
187
-
188
- usage_data.push(reportedUsage);
189
- }
190
- if (finishReason !== 'stop' && finishReason !== 'end_turn') {
191
- emitter.emit('interrupted', finishReason);
192
- }
193
-
194
- //only emit end event after processing the final message
195
- setTimeout(() => {
196
- emitter.emit('end', toolsData, usage_data, finishReason);
197
- }, 100);
198
- });
199
-
200
- return emitter;
201
- } catch (error: any) {
202
- throw error;
203
- }
204
- }
205
-
206
- protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TAnthropicRequestBody> {
207
- const body = await this.prepareBody(params);
208
-
209
- const shouldUseThinking = await this.shouldUseThinkingMode(params);
210
- if (shouldUseThinking) {
211
- return await this.prepareBodyForThinkingRequest({
212
- body,
213
- maxThinkingTokens: params.maxThinkingTokens,
214
- toolChoice: params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice,
215
- });
216
- }
217
-
218
- return body;
219
- }
220
-
221
- protected reportUsage(
222
- usage: Anthropic.Messages.Usage & { cache_creation_input_tokens?: number; cache_read_input_tokens?: number },
223
- metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
224
- ) {
225
- // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
226
- const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
227
-
228
- const usageData = {
229
- sourceId: `llm:${modelName}`,
230
- input_tokens: usage.input_tokens,
231
- output_tokens: usage.output_tokens,
232
- input_tokens_cache_write: usage.cache_creation_input_tokens,
233
- input_tokens_cache_read: usage.cache_read_input_tokens,
234
- keySource: metadata.keySource,
235
- agentId: metadata.agentId,
236
- teamId: metadata.teamId,
237
- };
238
- SystemEvents.emit('USAGE:LLM', usageData);
239
-
240
- return usageData;
241
- }
242
-
243
- public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto' }) {
244
- let tools: {
245
- name: string;
246
- description: string;
247
- input_schema: {
248
- type: 'object';
249
- properties: Record<string, unknown>;
250
- required: string[];
251
- };
252
- }[] = [];
253
-
254
- if (type === 'function') {
255
- tools = toolDefinitions.map((tool) => {
256
- const { name, description, properties, requiredFields } = tool;
257
-
258
- return {
259
- name,
260
- description,
261
- input_schema: {
262
- type: 'object',
263
- properties,
264
- required: requiredFields,
265
- },
266
- };
267
- });
268
- }
269
-
270
- return tools?.length > 0 ? { tools } : {};
271
- }
272
-
273
- public transformToolMessageBlocks({
274
- messageBlock,
275
- toolsData,
276
- }: {
277
- messageBlock: TLLMMessageBlock & { thinkingBlocks?: { type: string; thinking: string }[] };
278
- toolsData: ToolData[];
279
- }): TLLMToolResultMessageBlock[] {
280
- const messageBlocks: TLLMToolResultMessageBlock[] = [];
281
-
282
- if (messageBlock) {
283
- const content: any[] = []; // TODO: set proper type for content
284
-
285
- if (messageBlock.thinkingBlocks?.length > 0) {
286
- content.push(...messageBlock.thinkingBlocks);
287
- }
288
-
289
- if (Array.isArray(messageBlock.content)) {
290
- content.push(...messageBlock.content);
291
- } else {
292
- if (messageBlock.content) {
293
- //Anthropic does not accept empty text blocks
294
- content.push({ type: 'text', text: messageBlock.content });
295
- }
296
- }
297
- if (messageBlock.tool_calls) {
298
- const calls = messageBlock.tool_calls.map((toolCall: any) => {
299
- const args = toolCall?.function?.arguments;
300
- return {
301
- type: 'tool_use',
302
- id: toolCall.id,
303
- name: toolCall?.function?.name,
304
- input: typeof args === 'string' ? JSONContent(args || '{}').tryParse() : args || {},
305
- };
306
- });
307
-
308
- content.push(...calls);
309
- }
310
-
311
- messageBlocks.push({
312
- role: messageBlock?.role,
313
- content: content,
314
- });
315
- }
316
-
317
- // Combine all tool results into a single user message
318
- const toolResultsContent = toolsData.map((toolData): any => ({
319
- type: 'tool_result',
320
- tool_use_id: toolData.id,
321
- content: toolData.result,
322
- }));
323
-
324
- if (toolResultsContent.length > 0) {
325
- messageBlocks.push({
326
- role: TLLMMessageRole.User,
327
- content: toolResultsContent,
328
- });
329
- }
330
-
331
- return messageBlocks;
332
- }
333
-
334
- // TODO [Forhad]: This method is quite lengthy and complex. Consider breaking it down into smaller, more manageable functions for better readability and maintainability.
335
- public getConsistentMessages(messages) {
336
- let _messages = JSON.parse(JSON.stringify(messages));
337
-
338
- // Extract the system message from the start, as our logic expects 'user' to be the first message for checks and fixes. We will add it back later.
339
- let systemMessage = null;
340
- if (_messages[0]?.role === TLLMMessageRole.System) {
341
- systemMessage = _messages.shift();
342
- }
343
-
344
- _messages = LLMHelper.removeDuplicateUserMessages(_messages);
345
-
346
- _messages = _messages.map((message) => {
347
- let content;
348
-
349
- if (message?.parts) {
350
- content = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
351
- } else if (Array.isArray(message?.content)) {
352
- if (Array.isArray(message.content)) {
353
- const toolBlocks = message.content.filter(
354
- (item) => typeof item === 'object' && 'type' in item && (item.type === 'tool_use' || item.type === 'tool_result')
355
- );
356
-
357
- if (toolBlocks?.length > 0) {
358
- content = message.content.map((item) => {
359
- if (item.type === 'text' && (!item.text || item.text.trim() === '')) {
360
- return { ...item, text: '...' }; // empty text causes error that's why we added '...'
361
- }
362
- return item;
363
- });
364
- } else {
365
- content = message.content
366
- .map((block) => block?.text || '')
367
- .join(' ')
368
- .trim();
369
- }
370
- } else {
371
- content = message.content;
372
- }
373
- } else if (message?.content) {
374
- content = message.content as string;
375
- }
376
-
377
- message.content = content || '...'; // empty content causes error that's why we added '...'
378
-
379
- return message;
380
- });
381
-
382
- //[FIXED] - `tool_result` block(s) provided when previous message does not contain any `tool_use` blocks" (handler)
383
- if (_messages[0]?.role === TLLMMessageRole.User && Array.isArray(_messages[0].content)) {
384
- const hasToolResult = _messages[0].content.find((content) => 'type' in content && content.type === 'tool_result');
385
-
386
- //we found a tool result in the first message, so we need to remove the user message
387
- if (hasToolResult) {
388
- _messages.shift();
389
- }
390
- }
391
-
392
- // - Error: 400 {"type":"error","error":{"type":"invalid_request_error","message":"messages: first message must use the \"user\" role"}}
393
- if (_messages[0]?.role !== TLLMMessageRole.User) {
394
- _messages.unshift({ role: TLLMMessageRole.User, content: 'continue' }); //add an empty user message to keep the consistency
395
- }
396
-
397
- // Add the system message back to the start, as we extracted it earlier
398
- // Empty content is not allowed in Anthropic
399
- if (systemMessage && systemMessage.content) {
400
- _messages.unshift(systemMessage);
401
- }
402
-
403
- return _messages;
404
- }
405
-
406
- private async prepareBody(params: TLLMPreparedParams): Promise<Anthropic.MessageCreateParamsNonStreaming> {
407
- let messages = await this.prepareMessages(params);
408
-
409
- let body: Anthropic.MessageCreateParamsNonStreaming = {
410
- model: params.model as string,
411
- messages: messages as Anthropic.MessageParam[],
412
- max_tokens: params.maxTokens, // * max token is required
413
- };
414
-
415
- //#region Prepare system message and add JSON response instruction if needed
416
- // TODO: We have better parameter to have structured response, need to implement it.
417
- const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
418
- if ('content' in systemMessage) {
419
- body.system = systemMessage?.content as string;
420
- }
421
- messages = otherMessages;
422
-
423
- const responseFormat = params?.responseFormat || '';
424
- if (responseFormat === 'json') {
425
- body.system = body.system ? `${body.system} ${JSON_RESPONSE_INSTRUCTION}` : JSON_RESPONSE_INSTRUCTION;
426
-
427
- messages.push({ role: TLLMMessageRole.Assistant, content: PREFILL_TEXT_FOR_JSON_RESPONSE });
428
- }
429
-
430
- const hasSystemMessage = LLMHelper.hasSystemMessage(messages);
431
- if (hasSystemMessage) {
432
- // in Anthropic we need to provide system message separately
433
- const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
434
-
435
- if ('content' in systemMessage) {
436
- body.system = await this.prepareSystemPrompt(systemMessage, params);
437
- }
438
-
439
- messages = otherMessages as Anthropic.MessageParam[];
440
- }
441
- //#endregion Prepare system message and add JSON response instruction if needed
442
-
443
- const isReasoningModel = params?.capabilities?.reasoning;
444
-
445
- if (params?.temperature !== undefined && !isReasoningModel) body.temperature = params.temperature;
446
- if (params?.topP !== undefined && !isReasoningModel) body.top_p = params.topP;
447
- if (params?.topK !== undefined && !isReasoningModel) body.top_k = params.topK;
448
- if (params?.stopSequences?.length) body.stop_sequences = params.stopSequences;
449
-
450
- // #region Tools
451
- if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
452
- body.tools = params?.toolsConfig?.tools as unknown as Anthropic.Tool[];
453
-
454
- if (params?.cache) {
455
- body.tools[body.tools.length - 1]['cache_control'] = { type: 'ephemeral' };
456
- }
457
- }
458
-
459
- const toolChoice = params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice;
460
- if (toolChoice) {
461
- body.tool_choice = toolChoice;
462
- }
463
- // #endregion Tools
464
-
465
- body.messages = messages as Anthropic.MessageParam[];
466
- return body;
467
- }
468
-
469
- private async prepareBodyForThinkingRequest({
470
- body,
471
- maxThinkingTokens,
472
- toolChoice = null,
473
- }: {
474
- body: AnthropicMessageParams;
475
- maxThinkingTokens: number;
476
- toolChoice?: Anthropic.ToolChoice;
477
- }): Promise<Anthropic.MessageCreateParamsNonStreaming> {
478
- // Remove the assistant message with the prefill text for JSON response, it's not supported with thinking
479
- let messages = body.messages.filter(
480
- (message) => !(message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE)
481
- );
482
-
483
- let budget_tokens = Math.min(maxThinkingTokens, body.max_tokens);
484
-
485
- // If budget_tokens is equal to max_tokens, we set it to 80% of max_tokens
486
- // to avoid the error: "budget_tokens must be less than max_tokens".
487
- //
488
- // Another way to ensure valid budget_tokens is to add max_tokens and budget_tokens together - max_tokens = max_tokens + budget_tokens,
489
- // then take the minimum, like: Math.min(max_tokens, allowedMaxTokens).
490
- // However, this approach requires additional information such as model details,
491
- // which would mean adding more arguments like acRequest and modelEntryName to get allowedMaxTokens.
492
- //
493
- // So for now, to keep it simple, if max_tokens equals budget_tokens,
494
- // just use 80% of max_tokens.
495
-
496
- if (budget_tokens === body.max_tokens) {
497
- budget_tokens = Math.floor(budget_tokens * 0.8);
498
- }
499
-
500
- const thinkingBody: Anthropic.MessageCreateParamsNonStreaming = {
501
- model: body.model,
502
- messages,
503
- max_tokens: body.max_tokens,
504
- thinking: {
505
- type: 'enabled',
506
- budget_tokens,
507
- },
508
- };
509
-
510
- if (toolChoice) {
511
- // any and tool are not supported with thinking, so we set it to auto
512
- if (['any', 'tool'].includes(toolChoice.type)) {
513
- thinkingBody.tool_choice = {
514
- type: 'auto',
515
- };
516
- } else {
517
- thinkingBody.tool_choice = toolChoice;
518
- }
519
- }
520
-
521
- return thinkingBody;
522
- }
523
-
524
- private async prepareMessages(params: TLLMPreparedParams) {
525
- const messages = params?.messages || [];
526
-
527
- const files: BinaryInput[] = params?.files || [];
528
-
529
- if (files?.length > 0) {
530
- // #region Upload files
531
- const promises = [];
532
- const _files = [];
533
-
534
- for (let image of files) {
535
- const binaryInput = BinaryInput.from(image);
536
- promises.push(binaryInput.upload(AccessCandidate.agent(params.agentId)));
537
-
538
- _files.push(binaryInput);
539
- }
540
-
541
- await Promise.all(promises);
542
- // #endregion Upload files
543
-
544
- const validSources = this.getValidImageFiles(_files);
545
- const imageData = await this.getImageData(validSources, params.agentId);
546
-
547
- const userMessage = Array.isArray(messages) ? messages.pop() : {};
548
- const prompt = userMessage?.content || '';
549
-
550
- const content = [{ type: 'text', text: prompt }, ...imageData];
551
- messages.push({ role: TLLMMessageRole.User, content });
552
- }
553
-
554
- return messages;
555
- }
556
-
557
- private async prepareSystemPrompt(
558
- systemMessage: TLLMMessageBlock,
559
- params: TLLMPreparedParams
560
- ): Promise<string | Array<Anthropic.TextBlockParam>> {
561
- let systemPrompt = systemMessage?.content;
562
-
563
- if (typeof systemPrompt === 'string') {
564
- systemPrompt = [
565
- {
566
- type: 'text' as const,
567
- text: systemPrompt,
568
- //cache_control: { type: 'ephemeral' }, //TODO: @Forhad check this
569
- },
570
- ] as Array<Anthropic.TextBlockParam>;
571
- }
572
-
573
- (systemPrompt as Array<Anthropic.TextBlockParam>).unshift({
574
- type: 'text' as const,
575
- text: 'If you need to call a function, Do NOT inform the user that you are about to do so, and do not thank the user after you get the response. Just say something like "Give me a moment...", then when you get the response, Just continue answering the user without saying anything about the function you just called',
576
- });
577
-
578
- if (params?.cache) {
579
- (systemPrompt as Array<Anthropic.TextBlockParam>)[systemPrompt.length - 1]['cache_control'] = { type: 'ephemeral' };
580
- }
581
-
582
- return systemPrompt as Array<Anthropic.TextBlockParam>;
583
- }
584
-
585
- /**
586
- * Determines if thinking mode should be used based on model capabilities and parameters.
587
- */
588
- private async shouldUseThinkingMode(params: TLLMPreparedParams): Promise<boolean> {
589
- // Legacy thinking models always use thinking mode
590
- if (LEGACY_THINKING_MODELS.includes(params.modelEntryName)) {
591
- return true;
592
- }
593
-
594
- // Check if reasoning is explicitly requested and model supports it
595
- const useReasoning = params?.useReasoning && params.capabilities?.reasoning === true;
596
-
597
- return useReasoning;
598
- }
599
-
600
- private getValidImageFiles(files: BinaryInput[]) {
601
- const validSources = [];
602
-
603
- for (let file of files) {
604
- if (this.validImageMimeTypes.includes(file?.mimetype)) {
605
- validSources.push(file);
606
- }
607
- }
608
-
609
- if (validSources?.length === 0) {
610
- throw new Error(`Unsupported file(s). Please make sure your file is one of the following types: ${this.validImageMimeTypes.join(', ')}`);
611
- }
612
-
613
- return validSources;
614
- }
615
-
616
- private async getImageData(
617
- files: BinaryInput[],
618
- agentId: string
619
- ): Promise<
620
- {
621
- type: string;
622
- source: { type: 'base64'; data: string; media_type: string };
623
- }[]
624
- > {
625
- try {
626
- const imageData = [];
627
-
628
- for (let file of files) {
629
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
630
- const base64Data = bufferData.toString('base64');
631
-
632
- imageData.push({
633
- type: 'image',
634
- source: {
635
- type: 'base64',
636
- data: base64Data,
637
- media_type: file.mimetype,
638
- },
639
- });
640
- }
641
-
642
- return imageData;
643
- } catch (error) {
644
- throw error;
645
- }
646
- }
647
-
648
- private hasPrefillText(messages: Anthropic.MessageParam[]) {
649
- for (let i = messages.length - 1; i >= 0; i--) {
650
- const message = messages[i];
651
-
652
- if (message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE) {
653
- return true;
654
- }
655
- }
656
-
657
- return false;
658
- }
659
- }
1
+ import EventEmitter from 'events';
2
+ import Anthropic from '@anthropic-ai/sdk';
3
+
4
+ import { JSON_RESPONSE_INSTRUCTION, BUILT_IN_MODEL_PREFIX } from '@sre/constants';
5
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
6
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
7
+ import {
8
+ ToolData,
9
+ TLLMMessageBlock,
10
+ TLLMToolResultMessageBlock,
11
+ TLLMMessageRole,
12
+ APIKeySource,
13
+ TLLMEvent,
14
+ ILLMRequestFuncParams,
15
+ TLLMChatResponse,
16
+ BasicCredentials,
17
+ TAnthropicRequestBody,
18
+ ILLMRequestContext,
19
+ TLLMPreparedParams,
20
+ } from '@sre/types/LLM.types';
21
+
22
+ import { LLMHelper } from '@sre/LLMManager/LLM.helper';
23
+ import { JSONContent } from '@sre/helpers/JsonContent.helper';
24
+
25
+ import { LLMConnector } from '../LLMConnector';
26
+ import { SystemEvents } from '@sre/Core/SystemEvents';
27
+ import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
28
+
29
+ const PREFILL_TEXT_FOR_JSON_RESPONSE = '{';
30
+ const LEGACY_THINKING_MODELS = ['smythos/claude-3.7-sonnet-thinking', 'claude-3.7-sonnet-thinking'];
31
+
32
+ // Type aliases
33
+ type AnthropicMessageParams = Anthropic.MessageCreateParamsNonStreaming | Anthropic.Messages.MessageStreamParams;
34
+
35
+ // TODO [Forhad]: implement proper typing
36
+
37
+ export class AnthropicConnector extends LLMConnector {
38
+ public name = 'LLM:Anthropic';
39
+
40
+ private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.Anthropic.image;
41
+
42
+ private async getClient(params: ILLMRequestContext): Promise<Anthropic> {
43
+ const apiKey = (params.credentials as BasicCredentials)?.apiKey;
44
+
45
+ if (!apiKey) throw new Error('Please provide an API key for Anthropic');
46
+
47
+ return new Anthropic({ apiKey });
48
+ }
49
+
50
+ protected async request({ acRequest, body, context }: ILLMRequestFuncParams): Promise<TLLMChatResponse> {
51
+ try {
52
+ const anthropic = await this.getClient(context);
53
+ const result = await anthropic.messages.create(body);
54
+ const message: Anthropic.MessageParam = {
55
+ role: (result?.role || TLLMMessageRole.User) as Anthropic.MessageParam['role'],
56
+ content: result?.content || '',
57
+ };
58
+ const stopReason = result?.stop_reason;
59
+
60
+ let toolsData: ToolData[] = [];
61
+ let useTool = false;
62
+
63
+ if ((stopReason as 'tool_use') === 'tool_use') {
64
+ const toolUseContentBlocks = result?.content?.filter((c) => (c.type as 'tool_use') === 'tool_use');
65
+
66
+ if (toolUseContentBlocks?.length === 0) return;
67
+
68
+ toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
69
+ toolsData.push({
70
+ index,
71
+ id: toolUseBlock?.id,
72
+ type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
73
+ name: toolUseBlock?.name,
74
+ arguments: toolUseBlock?.input,
75
+ role: result?.role,
76
+ });
77
+ });
78
+
79
+ useTool = true;
80
+ }
81
+
82
+ const textBlock = result?.content?.find((block) => block.type === 'text');
83
+ let content = textBlock?.text || '';
84
+
85
+ const usage = result?.usage;
86
+
87
+ if (this.hasPrefillText(body.messages)) {
88
+ content = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${content}`;
89
+ }
90
+
91
+ this.reportUsage(usage, {
92
+ modelEntryName: context.modelEntryName,
93
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
94
+ agentId: context.agentId,
95
+ teamId: context.teamId,
96
+ });
97
+
98
+ return {
99
+ content,
100
+ finishReason: result?.stop_reason,
101
+ useTool,
102
+ toolsData,
103
+ message,
104
+ usage,
105
+ };
106
+ } catch (error) {
107
+ throw error;
108
+ }
109
+ }
110
+
111
+ protected async streamRequest({ acRequest, body, context }: ILLMRequestFuncParams): Promise<EventEmitter> {
112
+ try {
113
+ const emitter = new EventEmitter();
114
+ const usage_data = [];
115
+
116
+ const anthropic = await this.getClient(context);
117
+ let stream = anthropic.messages.stream(body);
118
+
119
+ let toolsData: ToolData[] = [];
120
+ let thinkingBlocks: any[] = []; // To preserve thinking blocks
121
+
122
+ // Determine if we need to inject prefill text and track if it's been injected
123
+ const needsPrefillInjection = this.hasPrefillText(body.messages);
124
+ let prefillInjected = false;
125
+
126
+ stream.on('streamEvent', (event: any) => {
127
+ if (event.message?.usage) {
128
+ //console.log('usage', event.message?.usage);
129
+ }
130
+ });
131
+
132
+ stream.on('error', (error) => {
133
+ //console.log('error', error);
134
+
135
+ emitter.emit('error', error);
136
+ });
137
+
138
+ stream.on('text', (text: string) => {
139
+ // Inject prefill text only once at the very beginning if needed
140
+ if (needsPrefillInjection && !prefillInjected) {
141
+ text = `${PREFILL_TEXT_FOR_JSON_RESPONSE}${text}`;
142
+ prefillInjected = true;
143
+ }
144
+
145
+ emitter.emit('content', text);
146
+ });
147
+
148
+ stream.on('thinking', (thinking) => {
149
+ // Handle thinking blocks during streaming
150
+ emitter.emit('thinking', thinking);
151
+ });
152
+
153
+ stream.on('finalMessage', (finalMessage) => {
154
+ let finishReason = 'stop';
155
+ // Preserve thinking blocks for subsequent tool interactions
156
+ thinkingBlocks = finalMessage.content.filter((block) => block.type === 'thinking' || block.type === 'redacted_thinking');
157
+
158
+ // Process tool use blocks
159
+ const toolUseContentBlocks = finalMessage.content.filter((c) => c.type === 'tool_use');
160
+
161
+ if (toolUseContentBlocks?.length > 0) {
162
+ toolUseContentBlocks.forEach((toolUseBlock: Anthropic.Messages.ToolUseBlock, index) => {
163
+ toolsData.push({
164
+ index,
165
+ id: toolUseBlock?.id,
166
+ type: 'function', // We call API only when the tool type is 'function' in `src/helpers/Conversation.helper.ts`. Even though Anthropic returns the type as 'tool_use', it should be interpreted as 'function'.
167
+ name: toolUseBlock?.name,
168
+ arguments: toolUseBlock?.input,
169
+ role: finalMessage?.role,
170
+ });
171
+ });
172
+
173
+ emitter.emit(TLLMEvent.ToolInfo, toolsData, thinkingBlocks);
174
+ } else {
175
+ finishReason = finalMessage.stop_reason;
176
+ }
177
+
178
+ if (finalMessage?.usage) {
179
+ const usage = finalMessage.usage;
180
+
181
+ const reportedUsage = this.reportUsage(usage, {
182
+ modelEntryName: context.modelEntryName,
183
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
184
+ agentId: context.agentId,
185
+ teamId: context.teamId,
186
+ });
187
+
188
+ usage_data.push(reportedUsage);
189
+ }
190
+ if (finishReason !== 'stop' && finishReason !== 'end_turn') {
191
+ emitter.emit('interrupted', finishReason);
192
+ }
193
+
194
+ //only emit end event after processing the final message
195
+ setTimeout(() => {
196
+ emitter.emit('end', toolsData, usage_data, finishReason);
197
+ }, 100);
198
+ });
199
+
200
+ return emitter;
201
+ } catch (error: any) {
202
+ throw error;
203
+ }
204
+ }
205
+
206
+ protected async reqBodyAdapter(params: TLLMPreparedParams): Promise<TAnthropicRequestBody> {
207
+ const body = await this.prepareBody(params);
208
+
209
+ const shouldUseThinking = await this.shouldUseThinkingMode(params);
210
+ if (shouldUseThinking) {
211
+ return await this.prepareBodyForThinkingRequest({
212
+ body,
213
+ maxThinkingTokens: params.maxThinkingTokens,
214
+ toolChoice: params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice,
215
+ });
216
+ }
217
+
218
+ return body;
219
+ }
220
+
221
+ protected reportUsage(
222
+ usage: Anthropic.Messages.Usage & { cache_creation_input_tokens?: number; cache_read_input_tokens?: number },
223
+ metadata: { modelEntryName: string; keySource: APIKeySource; agentId: string; teamId: string }
224
+ ) {
225
+ // SmythOS (built-in) models have a prefix, so we need to remove it to get the model name
226
+ const modelName = metadata.modelEntryName.replace(BUILT_IN_MODEL_PREFIX, '');
227
+
228
+ const usageData = {
229
+ sourceId: `llm:${modelName}`,
230
+ input_tokens: usage.input_tokens,
231
+ output_tokens: usage.output_tokens,
232
+ input_tokens_cache_write: usage.cache_creation_input_tokens,
233
+ input_tokens_cache_read: usage.cache_read_input_tokens,
234
+ keySource: metadata.keySource,
235
+ agentId: metadata.agentId,
236
+ teamId: metadata.teamId,
237
+ };
238
+ SystemEvents.emit('USAGE:LLM', usageData);
239
+
240
+ return usageData;
241
+ }
242
+
243
+ public formatToolsConfig({ type = 'function', toolDefinitions, toolChoice = 'auto' }) {
244
+ let tools: {
245
+ name: string;
246
+ description: string;
247
+ input_schema: {
248
+ type: 'object';
249
+ properties: Record<string, unknown>;
250
+ required: string[];
251
+ };
252
+ }[] = [];
253
+
254
+ if (type === 'function') {
255
+ tools = toolDefinitions.map((tool) => {
256
+ const { name, description, properties, requiredFields } = tool;
257
+
258
+ return {
259
+ name,
260
+ description,
261
+ input_schema: {
262
+ type: 'object',
263
+ properties,
264
+ required: requiredFields,
265
+ },
266
+ };
267
+ });
268
+ }
269
+
270
+ return tools?.length > 0 ? { tools } : {};
271
+ }
272
+
273
+ public transformToolMessageBlocks({
274
+ messageBlock,
275
+ toolsData,
276
+ }: {
277
+ messageBlock: TLLMMessageBlock & { thinkingBlocks?: { type: string; thinking: string }[] };
278
+ toolsData: ToolData[];
279
+ }): TLLMToolResultMessageBlock[] {
280
+ const messageBlocks: TLLMToolResultMessageBlock[] = [];
281
+
282
+ if (messageBlock) {
283
+ const content: any[] = []; // TODO: set proper type for content
284
+
285
+ if (messageBlock.thinkingBlocks?.length > 0) {
286
+ content.push(...messageBlock.thinkingBlocks);
287
+ }
288
+
289
+ if (Array.isArray(messageBlock.content)) {
290
+ content.push(...messageBlock.content);
291
+ } else {
292
+ if (messageBlock.content) {
293
+ //Anthropic does not accept empty text blocks
294
+ content.push({ type: 'text', text: messageBlock.content });
295
+ }
296
+ }
297
+ if (messageBlock.tool_calls) {
298
+ const calls = messageBlock.tool_calls.map((toolCall: any) => {
299
+ const args = toolCall?.function?.arguments;
300
+ return {
301
+ type: 'tool_use',
302
+ id: toolCall.id,
303
+ name: toolCall?.function?.name,
304
+ input: typeof args === 'string' ? JSONContent(args || '{}').tryParse() : args || {},
305
+ };
306
+ });
307
+
308
+ content.push(...calls);
309
+ }
310
+
311
+ messageBlocks.push({
312
+ role: messageBlock?.role,
313
+ content: content,
314
+ });
315
+ }
316
+
317
+ // Combine all tool results into a single user message
318
+ const toolResultsContent = toolsData.map((toolData): any => ({
319
+ type: 'tool_result',
320
+ tool_use_id: toolData.id,
321
+ content: toolData.result,
322
+ }));
323
+
324
+ if (toolResultsContent.length > 0) {
325
+ messageBlocks.push({
326
+ role: TLLMMessageRole.User,
327
+ content: toolResultsContent,
328
+ });
329
+ }
330
+
331
+ return messageBlocks;
332
+ }
333
+
334
+ // TODO [Forhad]: This method is quite lengthy and complex. Consider breaking it down into smaller, more manageable functions for better readability and maintainability.
335
+ public getConsistentMessages(messages) {
336
+ let _messages = JSON.parse(JSON.stringify(messages));
337
+
338
+ // Extract the system message from the start, as our logic expects 'user' to be the first message for checks and fixes. We will add it back later.
339
+ let systemMessage = null;
340
+ if (_messages[0]?.role === TLLMMessageRole.System) {
341
+ systemMessage = _messages.shift();
342
+ }
343
+
344
+ _messages = LLMHelper.removeDuplicateUserMessages(_messages);
345
+
346
+ _messages = _messages.map((message) => {
347
+ let content;
348
+
349
+ if (message?.parts) {
350
+ content = message.parts.map((textBlock) => textBlock?.text || '').join(' ');
351
+ } else if (Array.isArray(message?.content)) {
352
+ if (Array.isArray(message.content)) {
353
+ const toolBlocks = message.content.filter(
354
+ (item) => typeof item === 'object' && 'type' in item && (item.type === 'tool_use' || item.type === 'tool_result')
355
+ );
356
+
357
+ if (toolBlocks?.length > 0) {
358
+ content = message.content.map((item) => {
359
+ if (item.type === 'text' && (!item.text || item.text.trim() === '')) {
360
+ return { ...item, text: '...' }; // empty text causes error that's why we added '...'
361
+ }
362
+ return item;
363
+ });
364
+ } else {
365
+ content = message.content
366
+ .map((block) => block?.text || '')
367
+ .join(' ')
368
+ .trim();
369
+ }
370
+ } else {
371
+ content = message.content;
372
+ }
373
+ } else if (message?.content) {
374
+ content = message.content as string;
375
+ }
376
+
377
+ message.content = content || '...'; // empty content causes error that's why we added '...'
378
+
379
+ return message;
380
+ });
381
+
382
+ //[FIXED] - `tool_result` block(s) provided when previous message does not contain any `tool_use` blocks" (handler)
383
+ if (_messages[0]?.role === TLLMMessageRole.User && Array.isArray(_messages[0].content)) {
384
+ const hasToolResult = _messages[0].content.find((content) => 'type' in content && content.type === 'tool_result');
385
+
386
+ //we found a tool result in the first message, so we need to remove the user message
387
+ if (hasToolResult) {
388
+ _messages.shift();
389
+ }
390
+ }
391
+
392
+ // - Error: 400 {"type":"error","error":{"type":"invalid_request_error","message":"messages: first message must use the \"user\" role"}}
393
+ if (_messages[0]?.role !== TLLMMessageRole.User) {
394
+ _messages.unshift({ role: TLLMMessageRole.User, content: 'continue' }); //add an empty user message to keep the consistency
395
+ }
396
+
397
+ // Add the system message back to the start, as we extracted it earlier
398
+ // Empty content is not allowed in Anthropic
399
+ if (systemMessage && systemMessage.content) {
400
+ _messages.unshift(systemMessage);
401
+ }
402
+
403
+ return _messages;
404
+ }
405
+
406
+ private async prepareBody(params: TLLMPreparedParams): Promise<Anthropic.MessageCreateParamsNonStreaming> {
407
+ let messages = await this.prepareMessages(params);
408
+
409
+ let body: Anthropic.MessageCreateParamsNonStreaming = {
410
+ model: params.model as string,
411
+ messages: messages as Anthropic.MessageParam[],
412
+ max_tokens: params.maxTokens, // * max token is required
413
+ };
414
+
415
+ //#region Prepare system message and add JSON response instruction if needed
416
+ // TODO: We have better parameter to have structured response, need to implement it.
417
+ const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
418
+ if ('content' in systemMessage) {
419
+ body.system = systemMessage?.content as string;
420
+ }
421
+ messages = otherMessages;
422
+
423
+ const responseFormat = params?.responseFormat || '';
424
+ if (responseFormat === 'json') {
425
+ body.system = body.system ? `${body.system} ${JSON_RESPONSE_INSTRUCTION}` : JSON_RESPONSE_INSTRUCTION;
426
+
427
+ messages.push({ role: TLLMMessageRole.Assistant, content: PREFILL_TEXT_FOR_JSON_RESPONSE });
428
+ }
429
+
430
+ const hasSystemMessage = LLMHelper.hasSystemMessage(messages);
431
+ if (hasSystemMessage) {
432
+ // in Anthropic we need to provide system message separately
433
+ const { systemMessage, otherMessages } = LLMHelper.separateSystemMessages(messages);
434
+
435
+ if ('content' in systemMessage) {
436
+ body.system = await this.prepareSystemPrompt(systemMessage, params);
437
+ }
438
+
439
+ messages = otherMessages as Anthropic.MessageParam[];
440
+ }
441
+ //#endregion Prepare system message and add JSON response instruction if needed
442
+
443
+ const isReasoningModel = params?.capabilities?.reasoning;
444
+
445
+ if (params?.temperature !== undefined && !isReasoningModel) body.temperature = params.temperature;
446
+ if (params?.topP !== undefined && !isReasoningModel) body.top_p = params.topP;
447
+ if (params?.topK !== undefined && !isReasoningModel) body.top_k = params.topK;
448
+ if (params?.stopSequences?.length) body.stop_sequences = params.stopSequences;
449
+
450
+ // #region Tools
451
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools.length > 0) {
452
+ body.tools = params?.toolsConfig?.tools as unknown as Anthropic.Tool[];
453
+
454
+ if (params?.cache) {
455
+ body.tools[body.tools.length - 1]['cache_control'] = { type: 'ephemeral' };
456
+ }
457
+ }
458
+
459
+ const toolChoice = params?.toolsConfig?.tool_choice as unknown as Anthropic.ToolChoice;
460
+ if (toolChoice) {
461
+ body.tool_choice = toolChoice;
462
+ }
463
+ // #endregion Tools
464
+
465
+ body.messages = messages as Anthropic.MessageParam[];
466
+ return body;
467
+ }
468
+
469
+ private async prepareBodyForThinkingRequest({
470
+ body,
471
+ maxThinkingTokens,
472
+ toolChoice = null,
473
+ }: {
474
+ body: AnthropicMessageParams;
475
+ maxThinkingTokens: number;
476
+ toolChoice?: Anthropic.ToolChoice;
477
+ }): Promise<Anthropic.MessageCreateParamsNonStreaming> {
478
+ // Remove the assistant message with the prefill text for JSON response, it's not supported with thinking
479
+ let messages = body.messages.filter(
480
+ (message) => !(message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE)
481
+ );
482
+
483
+ let budget_tokens = Math.min(maxThinkingTokens, body.max_tokens);
484
+
485
+ // If budget_tokens is equal to max_tokens, we set it to 80% of max_tokens
486
+ // to avoid the error: "budget_tokens must be less than max_tokens".
487
+ //
488
+ // Another way to ensure valid budget_tokens is to add max_tokens and budget_tokens together - max_tokens = max_tokens + budget_tokens,
489
+ // then take the minimum, like: Math.min(max_tokens, allowedMaxTokens).
490
+ // However, this approach requires additional information such as model details,
491
+ // which would mean adding more arguments like acRequest and modelEntryName to get allowedMaxTokens.
492
+ //
493
+ // So for now, to keep it simple, if max_tokens equals budget_tokens,
494
+ // just use 80% of max_tokens.
495
+
496
+ if (budget_tokens === body.max_tokens) {
497
+ budget_tokens = Math.floor(budget_tokens * 0.8);
498
+ }
499
+
500
+ const thinkingBody: Anthropic.MessageCreateParamsNonStreaming = {
501
+ model: body.model,
502
+ messages,
503
+ max_tokens: body.max_tokens,
504
+ thinking: {
505
+ type: 'enabled',
506
+ budget_tokens,
507
+ },
508
+ };
509
+
510
+ if (toolChoice) {
511
+ // any and tool are not supported with thinking, so we set it to auto
512
+ if (['any', 'tool'].includes(toolChoice.type)) {
513
+ thinkingBody.tool_choice = {
514
+ type: 'auto',
515
+ };
516
+ } else {
517
+ thinkingBody.tool_choice = toolChoice;
518
+ }
519
+ }
520
+
521
+ return thinkingBody;
522
+ }
523
+
524
+ private async prepareMessages(params: TLLMPreparedParams) {
525
+ const messages = params?.messages || [];
526
+
527
+ const files: BinaryInput[] = params?.files || [];
528
+
529
+ if (files?.length > 0) {
530
+ // #region Upload files
531
+ const promises = [];
532
+ const _files = [];
533
+
534
+ for (let image of files) {
535
+ const binaryInput = BinaryInput.from(image);
536
+ promises.push(binaryInput.upload(AccessCandidate.agent(params.agentId)));
537
+
538
+ _files.push(binaryInput);
539
+ }
540
+
541
+ await Promise.all(promises);
542
+ // #endregion Upload files
543
+
544
+ const validSources = this.getValidImageFiles(_files);
545
+ const imageData = await this.getImageData(validSources, params.agentId);
546
+
547
+ const userMessage = Array.isArray(messages) ? messages.pop() : {};
548
+ const prompt = userMessage?.content || '';
549
+
550
+ const content = [{ type: 'text', text: prompt }, ...imageData];
551
+ messages.push({ role: TLLMMessageRole.User, content });
552
+ }
553
+
554
+ return messages;
555
+ }
556
+
557
+ private async prepareSystemPrompt(
558
+ systemMessage: TLLMMessageBlock,
559
+ params: TLLMPreparedParams
560
+ ): Promise<string | Array<Anthropic.TextBlockParam>> {
561
+ let systemPrompt = systemMessage?.content;
562
+
563
+ if (typeof systemPrompt === 'string') {
564
+ systemPrompt = [
565
+ {
566
+ type: 'text' as const,
567
+ text: systemPrompt,
568
+ //cache_control: { type: 'ephemeral' }, //TODO: @Forhad check this
569
+ },
570
+ ] as Array<Anthropic.TextBlockParam>;
571
+ }
572
+
573
+ (systemPrompt as Array<Anthropic.TextBlockParam>).unshift({
574
+ type: 'text' as const,
575
+ text: 'If you need to call a function, Do NOT inform the user that you are about to do so, and do not thank the user after you get the response. Just say something like "Give me a moment...", then when you get the response, Just continue answering the user without saying anything about the function you just called',
576
+ });
577
+
578
+ if (params?.cache) {
579
+ (systemPrompt as Array<Anthropic.TextBlockParam>)[systemPrompt.length - 1]['cache_control'] = { type: 'ephemeral' };
580
+ }
581
+
582
+ return systemPrompt as Array<Anthropic.TextBlockParam>;
583
+ }
584
+
585
+ /**
586
+ * Determines if thinking mode should be used based on model capabilities and parameters.
587
+ */
588
+ private async shouldUseThinkingMode(params: TLLMPreparedParams): Promise<boolean> {
589
+ // Legacy thinking models always use thinking mode
590
+ if (LEGACY_THINKING_MODELS.includes(params.modelEntryName)) {
591
+ return true;
592
+ }
593
+
594
+ // Check if reasoning is explicitly requested and model supports it
595
+ const useReasoning = params?.useReasoning && params.capabilities?.reasoning === true;
596
+
597
+ return useReasoning;
598
+ }
599
+
600
+ private getValidImageFiles(files: BinaryInput[]) {
601
+ const validSources = [];
602
+
603
+ for (let file of files) {
604
+ if (this.validImageMimeTypes.includes(file?.mimetype)) {
605
+ validSources.push(file);
606
+ }
607
+ }
608
+
609
+ if (validSources?.length === 0) {
610
+ throw new Error(`Unsupported file(s). Please make sure your file is one of the following types: ${this.validImageMimeTypes.join(', ')}`);
611
+ }
612
+
613
+ return validSources;
614
+ }
615
+
616
+ private async getImageData(
617
+ files: BinaryInput[],
618
+ agentId: string
619
+ ): Promise<
620
+ {
621
+ type: string;
622
+ source: { type: 'base64'; data: string; media_type: string };
623
+ }[]
624
+ > {
625
+ try {
626
+ const imageData = [];
627
+
628
+ for (let file of files) {
629
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
630
+ const base64Data = bufferData.toString('base64');
631
+
632
+ imageData.push({
633
+ type: 'image',
634
+ source: {
635
+ type: 'base64',
636
+ data: base64Data,
637
+ media_type: file.mimetype,
638
+ },
639
+ });
640
+ }
641
+
642
+ return imageData;
643
+ } catch (error) {
644
+ throw error;
645
+ }
646
+ }
647
+
648
+ private hasPrefillText(messages: Anthropic.MessageParam[]) {
649
+ for (let i = messages.length - 1; i >= 0; i--) {
650
+ const message = messages[i];
651
+
652
+ if (message?.role === TLLMMessageRole.Assistant && message?.content === PREFILL_TEXT_FOR_JSON_RESPONSE) {
653
+ return true;
654
+ }
655
+ }
656
+
657
+ return false;
658
+ }
659
+ }