@smythos/sre 1.5.45 → 1.5.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. package/CHANGELOG +98 -90
  2. package/LICENSE +18 -18
  3. package/README.md +135 -135
  4. package/dist/bundle-analysis-lazy.html +4949 -0
  5. package/dist/bundle-analysis.html +4949 -0
  6. package/dist/index.js +6 -6
  7. package/dist/index.js.map +1 -1
  8. package/dist/types/Components/MCPClient.class.d.ts +1 -0
  9. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.d.ts +1 -6
  10. package/dist/types/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.d.ts +2 -2
  11. package/dist/types/utils/package-manager.utils.d.ts +26 -0
  12. package/package.json +1 -1
  13. package/src/Components/APICall/APICall.class.ts +156 -156
  14. package/src/Components/APICall/AccessTokenManager.ts +130 -130
  15. package/src/Components/APICall/ArrayBufferResponse.helper.ts +58 -58
  16. package/src/Components/APICall/OAuth.helper.ts +294 -294
  17. package/src/Components/APICall/mimeTypeCategories.ts +46 -46
  18. package/src/Components/APICall/parseData.ts +167 -167
  19. package/src/Components/APICall/parseHeaders.ts +41 -41
  20. package/src/Components/APICall/parseProxy.ts +68 -68
  21. package/src/Components/APICall/parseUrl.ts +91 -91
  22. package/src/Components/APIEndpoint.class.ts +234 -234
  23. package/src/Components/APIOutput.class.ts +58 -58
  24. package/src/Components/AgentPlugin.class.ts +102 -102
  25. package/src/Components/Async.class.ts +155 -155
  26. package/src/Components/Await.class.ts +90 -90
  27. package/src/Components/Classifier.class.ts +158 -158
  28. package/src/Components/Component.class.ts +132 -132
  29. package/src/Components/ComponentHost.class.ts +38 -38
  30. package/src/Components/DataSourceCleaner.class.ts +92 -92
  31. package/src/Components/DataSourceIndexer.class.ts +181 -181
  32. package/src/Components/DataSourceLookup.class.ts +161 -161
  33. package/src/Components/ECMASandbox.class.ts +71 -71
  34. package/src/Components/FEncDec.class.ts +29 -29
  35. package/src/Components/FHash.class.ts +33 -33
  36. package/src/Components/FSign.class.ts +80 -80
  37. package/src/Components/FSleep.class.ts +25 -25
  38. package/src/Components/FTimestamp.class.ts +25 -25
  39. package/src/Components/FileStore.class.ts +78 -78
  40. package/src/Components/ForEach.class.ts +97 -97
  41. package/src/Components/GPTPlugin.class.ts +70 -70
  42. package/src/Components/GenAILLM.class.ts +586 -586
  43. package/src/Components/HuggingFace.class.ts +314 -314
  44. package/src/Components/Image/imageSettings.config.ts +70 -70
  45. package/src/Components/ImageGenerator.class.ts +502 -502
  46. package/src/Components/JSONFilter.class.ts +54 -54
  47. package/src/Components/LLMAssistant.class.ts +213 -213
  48. package/src/Components/LogicAND.class.ts +28 -28
  49. package/src/Components/LogicAtLeast.class.ts +85 -85
  50. package/src/Components/LogicAtMost.class.ts +86 -86
  51. package/src/Components/LogicOR.class.ts +29 -29
  52. package/src/Components/LogicXOR.class.ts +34 -34
  53. package/src/Components/MCPClient.class.ts +138 -112
  54. package/src/Components/MemoryDeleteKeyVal.class.ts +70 -70
  55. package/src/Components/MemoryReadKeyVal.class.ts +66 -66
  56. package/src/Components/MemoryWriteKeyVal.class.ts +62 -62
  57. package/src/Components/MemoryWriteObject.class.ts +97 -97
  58. package/src/Components/MultimodalLLM.class.ts +128 -128
  59. package/src/Components/OpenAPI.class.ts +72 -72
  60. package/src/Components/PromptGenerator.class.ts +122 -122
  61. package/src/Components/ScrapflyWebScrape.class.ts +159 -159
  62. package/src/Components/ServerlessCode.class.ts +123 -123
  63. package/src/Components/TavilyWebSearch.class.ts +98 -98
  64. package/src/Components/VisionLLM.class.ts +104 -104
  65. package/src/Components/ZapierAction.class.ts +127 -127
  66. package/src/Components/index.ts +97 -97
  67. package/src/Core/AgentProcess.helper.ts +240 -240
  68. package/src/Core/Connector.class.ts +123 -123
  69. package/src/Core/ConnectorsService.ts +197 -197
  70. package/src/Core/DummyConnector.ts +49 -49
  71. package/src/Core/HookService.ts +105 -105
  72. package/src/Core/SmythRuntime.class.ts +235 -235
  73. package/src/Core/SystemEvents.ts +16 -16
  74. package/src/Core/boot.ts +56 -56
  75. package/src/config.ts +15 -15
  76. package/src/constants.ts +126 -126
  77. package/src/data/hugging-face.params.json +579 -579
  78. package/src/helpers/AWSLambdaCode.helper.ts +587 -587
  79. package/src/helpers/BinaryInput.helper.ts +331 -331
  80. package/src/helpers/Conversation.helper.ts +1119 -1119
  81. package/src/helpers/ECMASandbox.helper.ts +54 -54
  82. package/src/helpers/JsonContent.helper.ts +97 -97
  83. package/src/helpers/LocalCache.helper.ts +97 -97
  84. package/src/helpers/Log.helper.ts +274 -274
  85. package/src/helpers/OpenApiParser.helper.ts +150 -150
  86. package/src/helpers/S3Cache.helper.ts +147 -147
  87. package/src/helpers/SmythURI.helper.ts +5 -5
  88. package/src/helpers/Sysconfig.helper.ts +77 -77
  89. package/src/helpers/TemplateString.helper.ts +243 -243
  90. package/src/helpers/TypeChecker.helper.ts +329 -329
  91. package/src/index.ts +3 -3
  92. package/src/index.ts.bak +3 -3
  93. package/src/subsystems/AgentManager/Agent.class.ts +1114 -1114
  94. package/src/subsystems/AgentManager/Agent.helper.ts +3 -3
  95. package/src/subsystems/AgentManager/AgentData.service/AgentDataConnector.ts +230 -230
  96. package/src/subsystems/AgentManager/AgentData.service/connectors/CLIAgentDataConnector.class.ts +66 -66
  97. package/src/subsystems/AgentManager/AgentData.service/connectors/LocalAgentDataConnector.class.ts +142 -142
  98. package/src/subsystems/AgentManager/AgentData.service/connectors/NullAgentData.class.ts +39 -39
  99. package/src/subsystems/AgentManager/AgentData.service/index.ts +18 -18
  100. package/src/subsystems/AgentManager/AgentLogger.class.ts +297 -297
  101. package/src/subsystems/AgentManager/AgentRequest.class.ts +51 -51
  102. package/src/subsystems/AgentManager/AgentRuntime.class.ts +559 -559
  103. package/src/subsystems/AgentManager/AgentSSE.class.ts +101 -101
  104. package/src/subsystems/AgentManager/AgentSettings.class.ts +52 -52
  105. package/src/subsystems/AgentManager/Component.service/ComponentConnector.ts +32 -32
  106. package/src/subsystems/AgentManager/Component.service/connectors/LocalComponentConnector.class.ts +60 -60
  107. package/src/subsystems/AgentManager/Component.service/index.ts +11 -11
  108. package/src/subsystems/AgentManager/EmbodimentSettings.class.ts +47 -47
  109. package/src/subsystems/AgentManager/ForkedAgent.class.ts +154 -154
  110. package/src/subsystems/AgentManager/OSResourceMonitor.ts +77 -77
  111. package/src/subsystems/ComputeManager/Code.service/CodeConnector.ts +98 -98
  112. package/src/subsystems/ComputeManager/Code.service/connectors/AWSLambdaCode.class.ts +172 -172
  113. package/src/subsystems/ComputeManager/Code.service/connectors/ECMASandbox.class.ts +131 -131
  114. package/src/subsystems/ComputeManager/Code.service/index.ts +13 -13
  115. package/src/subsystems/IO/CLI.service/CLIConnector.ts +47 -47
  116. package/src/subsystems/IO/CLI.service/index.ts +9 -9
  117. package/src/subsystems/IO/Log.service/LogConnector.ts +32 -32
  118. package/src/subsystems/IO/Log.service/connectors/ConsoleLog.class.ts +28 -28
  119. package/src/subsystems/IO/Log.service/index.ts +13 -13
  120. package/src/subsystems/IO/NKV.service/NKVConnector.ts +43 -43
  121. package/src/subsystems/IO/NKV.service/connectors/NKVLocalStorage.class.ts +234 -234
  122. package/src/subsystems/IO/NKV.service/connectors/NKVRAM.class.ts +204 -204
  123. package/src/subsystems/IO/NKV.service/connectors/NKVRedis.class.ts +182 -182
  124. package/src/subsystems/IO/NKV.service/index.ts +14 -14
  125. package/src/subsystems/IO/Router.service/RouterConnector.ts +21 -21
  126. package/src/subsystems/IO/Router.service/connectors/ExpressRouter.class.ts +48 -48
  127. package/src/subsystems/IO/Router.service/connectors/NullRouter.class.ts +40 -40
  128. package/src/subsystems/IO/Router.service/index.ts +11 -11
  129. package/src/subsystems/IO/Storage.service/SmythFS.class.ts +489 -489
  130. package/src/subsystems/IO/Storage.service/StorageConnector.ts +66 -66
  131. package/src/subsystems/IO/Storage.service/connectors/LocalStorage.class.ts +327 -327
  132. package/src/subsystems/IO/Storage.service/connectors/S3Storage.class.ts +482 -482
  133. package/src/subsystems/IO/Storage.service/index.ts +13 -13
  134. package/src/subsystems/IO/VectorDB.service/VectorDBConnector.ts +108 -108
  135. package/src/subsystems/IO/VectorDB.service/connectors/MilvusVectorDB.class.ts +454 -454
  136. package/src/subsystems/IO/VectorDB.service/connectors/PineconeVectorDB.class.ts +384 -384
  137. package/src/subsystems/IO/VectorDB.service/connectors/RAMVecrtorDB.class.ts +421 -421
  138. package/src/subsystems/IO/VectorDB.service/embed/BaseEmbedding.ts +107 -107
  139. package/src/subsystems/IO/VectorDB.service/embed/OpenAIEmbedding.ts +109 -109
  140. package/src/subsystems/IO/VectorDB.service/embed/index.ts +21 -21
  141. package/src/subsystems/IO/VectorDB.service/index.ts +14 -14
  142. package/src/subsystems/LLMManager/LLM.helper.ts +251 -251
  143. package/src/subsystems/LLMManager/LLM.inference.ts +339 -339
  144. package/src/subsystems/LLMManager/LLM.service/LLMConnector.ts +489 -489
  145. package/src/subsystems/LLMManager/LLM.service/LLMCredentials.helper.ts +171 -171
  146. package/src/subsystems/LLMManager/LLM.service/connectors/Anthropic.class.ts +659 -659
  147. package/src/subsystems/LLMManager/LLM.service/connectors/Bedrock.class.ts +400 -400
  148. package/src/subsystems/LLMManager/LLM.service/connectors/Echo.class.ts +77 -77
  149. package/src/subsystems/LLMManager/LLM.service/connectors/GoogleAI.class.ts +757 -757
  150. package/src/subsystems/LLMManager/LLM.service/connectors/Groq.class.ts +304 -304
  151. package/src/subsystems/LLMManager/LLM.service/connectors/Perplexity.class.ts +250 -250
  152. package/src/subsystems/LLMManager/LLM.service/connectors/VertexAI.class.ts +423 -423
  153. package/src/subsystems/LLMManager/LLM.service/connectors/openai/OpenAIConnector.class.ts +488 -488
  154. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ChatCompletionsApiInterface.ts +524 -528
  155. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterface.ts +100 -100
  156. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/OpenAIApiInterfaceFactory.ts +81 -81
  157. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/ResponsesApiInterface.ts +1145 -1168
  158. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/constants.ts +13 -13
  159. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/index.ts +4 -4
  160. package/src/subsystems/LLMManager/LLM.service/connectors/openai/apiInterfaces/utils.ts +11 -11
  161. package/src/subsystems/LLMManager/LLM.service/connectors/openai/types.ts +32 -32
  162. package/src/subsystems/LLMManager/LLM.service/connectors/xAI.class.ts +471 -471
  163. package/src/subsystems/LLMManager/LLM.service/index.ts +44 -44
  164. package/src/subsystems/LLMManager/ModelsProvider.service/ModelsProviderConnector.ts +300 -300
  165. package/src/subsystems/LLMManager/ModelsProvider.service/connectors/JSONModelsProvider.class.ts +252 -252
  166. package/src/subsystems/LLMManager/ModelsProvider.service/index.ts +11 -11
  167. package/src/subsystems/LLMManager/custom-models.ts +854 -854
  168. package/src/subsystems/LLMManager/models.ts +2540 -2540
  169. package/src/subsystems/LLMManager/paramMappings.ts +69 -69
  170. package/src/subsystems/MemoryManager/Cache.service/CacheConnector.ts +86 -86
  171. package/src/subsystems/MemoryManager/Cache.service/connectors/LocalStorageCache.class.ts +297 -297
  172. package/src/subsystems/MemoryManager/Cache.service/connectors/RAMCache.class.ts +201 -201
  173. package/src/subsystems/MemoryManager/Cache.service/connectors/RedisCache.class.ts +252 -252
  174. package/src/subsystems/MemoryManager/Cache.service/connectors/S3Cache.class.ts +373 -373
  175. package/src/subsystems/MemoryManager/Cache.service/index.ts +15 -15
  176. package/src/subsystems/MemoryManager/LLMCache.ts +72 -72
  177. package/src/subsystems/MemoryManager/LLMContext.ts +124 -124
  178. package/src/subsystems/MemoryManager/LLMMemory.service/LLMMemoryConnector.ts +26 -26
  179. package/src/subsystems/MemoryManager/RuntimeContext.ts +266 -266
  180. package/src/subsystems/Security/AccessControl/ACL.class.ts +208 -208
  181. package/src/subsystems/Security/AccessControl/AccessCandidate.class.ts +82 -82
  182. package/src/subsystems/Security/AccessControl/AccessRequest.class.ts +52 -52
  183. package/src/subsystems/Security/Account.service/AccountConnector.ts +44 -44
  184. package/src/subsystems/Security/Account.service/connectors/AWSAccount.class.ts +76 -76
  185. package/src/subsystems/Security/Account.service/connectors/DummyAccount.class.ts +130 -130
  186. package/src/subsystems/Security/Account.service/connectors/JSONFileAccount.class.ts +159 -159
  187. package/src/subsystems/Security/Account.service/index.ts +14 -14
  188. package/src/subsystems/Security/Credentials.helper.ts +62 -62
  189. package/src/subsystems/Security/ManagedVault.service/ManagedVaultConnector.ts +38 -38
  190. package/src/subsystems/Security/ManagedVault.service/connectors/NullManagedVault.class.ts +53 -53
  191. package/src/subsystems/Security/ManagedVault.service/connectors/SecretManagerManagedVault.ts +154 -154
  192. package/src/subsystems/Security/ManagedVault.service/index.ts +12 -12
  193. package/src/subsystems/Security/SecureConnector.class.ts +110 -110
  194. package/src/subsystems/Security/Vault.service/Vault.helper.ts +30 -30
  195. package/src/subsystems/Security/Vault.service/VaultConnector.ts +29 -29
  196. package/src/subsystems/Security/Vault.service/connectors/HashicorpVault.class.ts +46 -46
  197. package/src/subsystems/Security/Vault.service/connectors/JSONFileVault.class.ts +221 -221
  198. package/src/subsystems/Security/Vault.service/connectors/NullVault.class.ts +54 -54
  199. package/src/subsystems/Security/Vault.service/connectors/SecretsManager.class.ts +140 -140
  200. package/src/subsystems/Security/Vault.service/index.ts +12 -12
  201. package/src/types/ACL.types.ts +104 -104
  202. package/src/types/AWS.types.ts +10 -10
  203. package/src/types/Agent.types.ts +61 -61
  204. package/src/types/AgentLogger.types.ts +17 -17
  205. package/src/types/Cache.types.ts +1 -1
  206. package/src/types/Common.types.ts +2 -2
  207. package/src/types/LLM.types.ts +496 -496
  208. package/src/types/Redis.types.ts +8 -8
  209. package/src/types/SRE.types.ts +64 -64
  210. package/src/types/Security.types.ts +14 -14
  211. package/src/types/Storage.types.ts +5 -5
  212. package/src/types/VectorDB.types.ts +86 -86
  213. package/src/utils/base64.utils.ts +275 -275
  214. package/src/utils/cli.utils.ts +68 -68
  215. package/src/utils/data.utils.ts +322 -322
  216. package/src/utils/date-time.utils.ts +22 -22
  217. package/src/utils/general.utils.ts +238 -238
  218. package/src/utils/index.ts +12 -12
  219. package/src/utils/lazy-client.ts +261 -261
  220. package/src/utils/numbers.utils.ts +13 -13
  221. package/src/utils/oauth.utils.ts +35 -35
  222. package/src/utils/string.utils.ts +414 -414
  223. package/src/utils/url.utils.ts +19 -19
  224. package/src/utils/validation.utils.ts +74 -74
  225. package/dist/types/subsystems/LLMManager/ModelsProvider.service/connectors/SmythModelsProvider.class.d.ts +0 -39
@@ -1,1168 +1,1145 @@
1
- import EventEmitter from 'events';
2
- import OpenAI from 'openai';
3
- import type { Stream } from 'openai/streaming';
4
-
5
- import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
6
- import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
7
- import {
8
- TLLMParams,
9
- TLLMPreparedParams,
10
- ILLMRequestContext,
11
- TLLMMessageBlock,
12
- ToolData,
13
- TLLMToolResultMessageBlock,
14
- TLLMMessageRole,
15
- APIKeySource,
16
- TLLMEvent,
17
- OpenAIToolDefinition,
18
- LegacyToolDefinition,
19
- LLMModelInfo,
20
- } from '@sre/types/LLM.types';
21
- import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
22
- import { HandlerDependencies, TToolType } from '../types';
23
- import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
24
- import { MODELS_WITHOUT_TEMPERATURE_SUPPORT, SEARCH_TOOL_COSTS } from './constants';
25
- import { isValidOpenAIReasoningEffort } from './utils';
26
-
27
- // File size limits in bytes
28
- const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
29
- const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
30
-
31
- // Event type constants for type safety and maintainability
32
- const EVENT_TYPES = {
33
- // Officially supported web search events (OpenAI SDK >= 5.12.x)
34
- WEB_SEARCH_IN_PROGRESS: 'response.web_search_call.in_progress',
35
- WEB_SEARCH_SEARCHING: 'response.web_search_call.searching',
36
- WEB_SEARCH_COMPLETED: 'response.web_search_call.completed',
37
- // Legacy alias observed historically (kept for backward compat if emitted)
38
- WEB_SEARCH_STARTED: 'response.web_search_call.started',
39
-
40
- RESPONSE_COMPLETED: 'response.completed',
41
- OUTPUT_TEXT_DELTA: 'response.output_text.delta',
42
- OUTPUT_ITEM_ADDED: 'response.output_item.added',
43
- FUNCTION_CALL_ARGUMENTS_DELTA: 'response.function_call_arguments.delta',
44
- FUNCTION_CALL_ARGUMENTS_DONE: 'response.function_call_arguments.done',
45
- OUTPUT_ITEM_DONE: 'response.output_item.done',
46
- } as const;
47
-
48
- // Type definitions for web search events (augmenting SDK types locally)
49
- interface WebSearchInProgressEvent {
50
- type: typeof EVENT_TYPES.WEB_SEARCH_IN_PROGRESS;
51
- item_id: string;
52
- }
53
-
54
- interface WebSearchSearchingEvent {
55
- type: typeof EVENT_TYPES.WEB_SEARCH_SEARCHING;
56
- item_id: string;
57
- }
58
-
59
- interface WebSearchCompletedEvent {
60
- type: typeof EVENT_TYPES.WEB_SEARCH_COMPLETED;
61
- item_id: string;
62
- }
63
-
64
- type TSearchLocation = {
65
- type: 'approximate';
66
- city?: string;
67
- country?: string;
68
- region?: string;
69
- timezone?: string;
70
- };
71
-
72
- /**
73
- * OpenAI Responses API interface implementation
74
- * Handles all Responses API-specific logic including:
75
- * - Stream creation and handling
76
- * - Request body preparation
77
- * - Tool and message transformations
78
- * - File attachment handling
79
- */
80
- export class ResponsesApiInterface extends OpenAIApiInterface {
81
- private deps: HandlerDependencies;
82
- private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
83
- private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
84
-
85
- constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
86
- super(context);
87
- this.deps = deps;
88
- }
89
-
90
- async createRequest(body: OpenAI.Responses.ResponseCreateParams, context: ILLMRequestContext): Promise<OpenAI.Responses.Response> {
91
- const openai = await this.deps.getClient(context);
92
- return await openai.responses.create({
93
- ...body,
94
- stream: false,
95
- });
96
- }
97
-
98
- async createStream(
99
- body: OpenAI.Responses.ResponseCreateParams,
100
- context: ILLMRequestContext
101
- ): Promise<Stream<OpenAI.Responses.ResponseStreamEvent>> {
102
- const openai = await this.deps.getClient(context);
103
- return (await openai.responses.create({
104
- ...body,
105
- stream: true,
106
- })) as Stream<OpenAI.Responses.ResponseStreamEvent>;
107
- }
108
-
109
- public handleStream(stream: Stream<OpenAI.Responses.ResponseStreamEvent>, context: ILLMRequestContext): EventEmitter {
110
- const emitter = new EventEmitter();
111
-
112
- // Process stream asynchronously while returning emitter immediately
113
- (async () => {
114
- let finalToolsData: ToolData[] = [];
115
-
116
- try {
117
- // Step 1: Process the stream
118
- const streamResult = await this.processStream(stream, emitter);
119
- finalToolsData = streamResult.toolsData;
120
-
121
- const finishReason = streamResult.finishReason || 'stop';
122
- const usageData = streamResult.usageData;
123
-
124
- // Step 2: Report usage statistics
125
- const reportedUsage = this.reportUsageStatistics(usageData, context);
126
-
127
- // Step 3: Emit final events
128
- this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
129
- } catch (error) {
130
- emitter.emit('error', error);
131
- }
132
- })();
133
-
134
- return emitter;
135
- }
136
-
137
- /**
138
- * Process the responses API stream format
139
- */
140
- private async processStream(
141
- stream: Stream<OpenAI.Responses.ResponseStreamEvent>,
142
- emitter: EventEmitter
143
- ): Promise<{ toolsData: ToolData[]; finishReason: string; usageData: any[] }> {
144
- let toolsData: ToolData[] = [];
145
- let finishReason = 'stop';
146
- const usageData = [];
147
-
148
- for await (const part of stream) {
149
- try {
150
- // Handle different event types from the Responses API stream
151
- if ('type' in part) {
152
- // Handle officially typed events using constants
153
- switch (part.type) {
154
- case EVENT_TYPES.WEB_SEARCH_IN_PROGRESS:
155
- toolsData = this.handleWebSearchInProgress(part as any, toolsData);
156
- break;
157
- case EVENT_TYPES.WEB_SEARCH_SEARCHING:
158
- toolsData = this.handleWebSearchSearching(part as any, toolsData);
159
- break;
160
- case EVENT_TYPES.WEB_SEARCH_COMPLETED:
161
- toolsData = this.handleWebSearchCompleted(part as any, toolsData);
162
- break;
163
- case EVENT_TYPES.OUTPUT_TEXT_DELTA:
164
- this.handleOutputTextDelta(part, emitter);
165
- break;
166
-
167
- case EVENT_TYPES.OUTPUT_ITEM_ADDED:
168
- toolsData = this.handleOutputItemAdded(part, toolsData, emitter);
169
- break;
170
-
171
- case EVENT_TYPES.FUNCTION_CALL_ARGUMENTS_DELTA:
172
- toolsData = this.handleFunctionCallArgumentsDelta(part, toolsData, emitter);
173
- break;
174
-
175
- case EVENT_TYPES.FUNCTION_CALL_ARGUMENTS_DONE:
176
- toolsData = this.handleFunctionCallArgumentsDone(part, toolsData, emitter);
177
- break;
178
-
179
- case EVENT_TYPES.OUTPUT_ITEM_DONE:
180
- toolsData = this.handleOutputItemDone(part, toolsData);
181
- break;
182
-
183
- case EVENT_TYPES.RESPONSE_COMPLETED: {
184
- finishReason = 'stop';
185
- const responseData = (part as any)?.response;
186
- if (responseData?.usage) {
187
- usageData.push(responseData.usage);
188
- }
189
- break;
190
- }
191
-
192
- default: {
193
- const eventType = String(part.type);
194
- // Handle legacy started event if ever emitted
195
- if (eventType === EVENT_TYPES.WEB_SEARCH_STARTED) {
196
- const legacyId = (part as any)?.id;
197
- if (typeof legacyId === 'string') {
198
- const result = this.upsertWebSearchToolImmutable(toolsData, legacyId);
199
- toolsData = result.toolsData;
200
- }
201
- break;
202
- }
203
- // Handle any other unknown 'done' style events as completion
204
- finishReason = this.handleCompletionEvent(eventType);
205
- break;
206
- }
207
- }
208
- }
209
- } catch (error) {
210
- // Log error but continue processing to prevent stream interruption
211
- console.warn('Error processing stream event:', error, 'Event:', part);
212
- }
213
- }
214
-
215
- return { toolsData: this.extractToolCalls(toolsData), finishReason, usageData };
216
- }
217
-
218
- /**
219
- * Extract and format tool calls from the accumulated data
220
- */
221
- private extractToolCalls(output: ToolData[]): ToolData[] {
222
- return output.map((tool) => ({
223
- index: tool.index,
224
- name: tool.name,
225
- arguments: tool.arguments,
226
- id: tool.callId || tool.id, // Use callId for final output if available
227
- type: tool.type,
228
- role: tool.role,
229
- callId: tool.callId, // Preserve callId for reference
230
- }));
231
- }
232
-
233
- /**
234
- * Report usage statistics
235
- */
236
- private reportUsageStatistics(usage_data: any[], context: ILLMRequestContext): any[] {
237
- const reportedUsage: any[] = [];
238
-
239
- // Report normal usage
240
- usage_data.forEach((usage) => {
241
- // Convert ResponseUsage to CompletionUsage format for compatibility
242
- const convertedUsage = {
243
- completion_tokens: usage.completion_tokens || 0,
244
- prompt_tokens: usage.prompt_tokens || 0,
245
- total_tokens: usage.total_tokens || 0,
246
- ...usage,
247
- };
248
- const reported = this.deps.reportUsage(convertedUsage, this.buildUsageContext(context));
249
- reportedUsage.push(reported);
250
- });
251
-
252
- // Report search tool usage if enabled
253
- if (context.toolsInfo?.openai?.webSearch?.enabled) {
254
- const searchUsage = this.calculateSearchToolUsage(context);
255
- const reported = this.deps.reportUsage(searchUsage, this.buildUsageContext(context));
256
- reportedUsage.push(reported);
257
- }
258
-
259
- return reportedUsage;
260
- }
261
-
262
- /**
263
- * Emit final events
264
- */
265
- private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
266
- // Emit tool info event if tools were called
267
- if (toolsData.length > 0) {
268
- emitter.emit(TLLMEvent.ToolInfo, toolsData);
269
- }
270
-
271
- // Emit interrupted event if finishReason is not 'stop'
272
- if (finishReason !== 'stop') {
273
- emitter.emit('interrupted', finishReason);
274
- }
275
-
276
- // Emit end event with setImmediate to ensure proper event ordering
277
- setImmediate(() => {
278
- emitter.emit('end', toolsData, reportedUsage, finishReason);
279
- });
280
- }
281
-
282
- /**
283
- * Build usage context parameters from request context
284
- */
285
- private buildUsageContext(context: ILLMRequestContext) {
286
- return {
287
- modelEntryName: context.modelEntryName,
288
- keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
289
- agentId: context.agentId,
290
- teamId: context.teamId,
291
- };
292
- }
293
-
294
- /**
295
- * Calculate search tool usage with cost
296
- */
297
- private calculateSearchToolUsage(context: ILLMRequestContext) {
298
- const modelName = context.modelEntryName?.replace('smythos/', '');
299
- const cost = this.getSearchToolCost(modelName);
300
-
301
- return {
302
- cost,
303
- completion_tokens: 0,
304
- prompt_tokens: 0,
305
- total_tokens: 0,
306
- };
307
- }
308
-
309
- // =====================
310
- // Event handlers (private)
311
- // =====================
312
-
313
- /**
314
- * Handle web search completed event with proper type safety
315
- */
316
- private handleWebSearchCompleted(event: WebSearchCompletedEvent, toolsData: ToolData[]): ToolData[] {
317
- try {
318
- const { item_id: itemId } = event;
319
- const result = this.upsertWebSearchToolImmutable(toolsData, itemId);
320
- return result.toolsData;
321
- } catch (error) {
322
- console.warn('Error handling web search completed event:', error);
323
- return toolsData;
324
- }
325
- }
326
-
327
- /**
328
- * Handle web search in-progress event (official typed)
329
- */
330
- private handleWebSearchInProgress(event: WebSearchInProgressEvent, toolsData: ToolData[]): ToolData[] {
331
- try {
332
- const { item_id: itemId } = event;
333
- const result = this.upsertWebSearchToolImmutable(toolsData, itemId);
334
- return result.toolsData;
335
- } catch (error) {
336
- console.warn('Error handling web search in_progress event:', error);
337
- return toolsData;
338
- }
339
- }
340
-
341
- /**
342
- * Handle web search searching event (official typed)
343
- */
344
- private handleWebSearchSearching(event: WebSearchSearchingEvent, toolsData: ToolData[]): ToolData[] {
345
- try {
346
- const { item_id: itemId } = event;
347
- const result = this.upsertWebSearchToolImmutable(toolsData, itemId);
348
- return result.toolsData;
349
- } catch (error) {
350
- console.warn('Error handling web search searching event:', error);
351
- return toolsData;
352
- }
353
- }
354
-
355
- /**
356
- * Handle output text delta events
357
- */
358
- private handleOutputTextDelta(part: any, emitter: EventEmitter): void {
359
- try {
360
- if ('delta' in part && part.delta) {
361
- const deltaMsg = {
362
- role: 'assistant',
363
- content: part.delta,
364
- };
365
- emitter.emit('data', deltaMsg);
366
- emitter.emit('content', part.delta, 'assistant');
367
- }
368
- } catch (error) {
369
- console.warn('Error handling output text delta:', error);
370
- }
371
- }
372
-
373
- /**
374
- * Handle output item added events (function calls)
375
- */
376
- private handleOutputItemAdded(part: any, toolsData: ToolData[], emitter: EventEmitter): ToolData[] {
377
- try {
378
- const partAny = part as any;
379
- if (partAny.item && partAny.item.type === 'function_call') {
380
- const item = partAny.item;
381
- const callId = item.call_id;
382
- const functionName = item.name;
383
- const itemId = item.id;
384
-
385
- if (callId && itemId) {
386
- const existingIndex = toolsData.findIndex((t) => t.id === itemId || t.id === callId);
387
- const addingNew = existingIndex === -1;
388
- const nextIndex = addingNew ? toolsData.length : existingIndex;
389
-
390
- let updated: ToolData[];
391
- if (addingNew) {
392
- const newItem: ToolData = {
393
- index: nextIndex,
394
- id: itemId,
395
- callId: callId,
396
- type: 'function',
397
- name: functionName || '',
398
- arguments: item.arguments || '',
399
- role: 'tool',
400
- } as ToolData;
401
- updated = [...toolsData, newItem];
402
- } else {
403
- updated = toolsData.map((t, idx) => {
404
- if (idx !== existingIndex) return t;
405
- return {
406
- ...t,
407
- name: functionName || t.name,
408
- arguments: item.arguments !== undefined ? item.arguments : t.arguments,
409
- callId: t.callId || callId,
410
- };
411
- });
412
- }
413
-
414
- if (addingNew) {
415
- emitter.emit('tool_call_started', {
416
- id: callId,
417
- name: functionName || '',
418
- type: 'function',
419
- });
420
- }
421
-
422
- return updated;
423
- }
424
- }
425
- return toolsData;
426
- } catch (error) {
427
- console.warn('Error handling output item added:', error);
428
- return toolsData;
429
- }
430
- }
431
-
432
- /**
433
- * Handle function call arguments delta events
434
- */
435
- private handleFunctionCallArgumentsDelta(part: any, toolsData: ToolData[], emitter: EventEmitter): ToolData[] {
436
- try {
437
- if ('delta' in part && 'item_id' in part && typeof part.delta === 'string' && typeof part.item_id === 'string') {
438
- const delta = part.delta;
439
- const itemId = part.item_id;
440
-
441
- const existingIndex = toolsData.findIndex((t) => t.id === itemId);
442
- let updated: ToolData[];
443
- let finalIndex: number;
444
- if (existingIndex === -1) {
445
- finalIndex = toolsData.length;
446
- const newItem: ToolData = {
447
- index: finalIndex,
448
- id: itemId,
449
- type: 'function',
450
- name: '',
451
- arguments: delta,
452
- role: 'tool',
453
- } as ToolData;
454
- updated = [...toolsData, newItem];
455
- } else {
456
- finalIndex = existingIndex;
457
- updated = toolsData.map((t, idx) => (idx === existingIndex ? { ...t, arguments: String(t.arguments || '') + delta } : t));
458
- }
459
-
460
- const entry = existingIndex === -1 ? updated[finalIndex] : updated[finalIndex];
461
- emitter.emit('tool_call_progress', {
462
- id: entry.callId || itemId,
463
- name: entry.name,
464
- arguments: entry.arguments,
465
- delta: delta,
466
- });
467
-
468
- return updated;
469
- }
470
- return toolsData;
471
- } catch (error) {
472
- console.warn('Error handling function call arguments delta:', error);
473
- return toolsData;
474
- }
475
- }
476
-
477
- /**
478
- * Handle function call arguments done events
479
- */
480
- private handleFunctionCallArgumentsDone(part: any, toolsData: ToolData[], emitter: EventEmitter): ToolData[] {
481
- try {
482
- const partAny = part;
483
- if (partAny.item_id && partAny.arguments) {
484
- const itemId = partAny.item_id;
485
- const finalArguments = partAny.arguments;
486
-
487
- const toolIndex = toolsData.findIndex((t) => t.id === itemId);
488
- if (toolIndex !== -1) {
489
- const updated = toolsData.map((t, idx) => (idx === toolIndex ? { ...t, arguments: finalArguments } : t));
490
-
491
- const updatedEntry = updated[toolIndex];
492
- emitter.emit('tool_call_completed', {
493
- id: updatedEntry.callId || itemId,
494
- name: updatedEntry.name,
495
- arguments: finalArguments,
496
- });
497
-
498
- return updated;
499
- }
500
- }
501
- return toolsData;
502
- } catch (error) {
503
- console.warn('Error handling function call arguments done:', error);
504
- return toolsData;
505
- }
506
- }
507
-
508
- /**
509
- * Handle output item done events
510
- */
511
- private handleOutputItemDone(part: any, toolsData: ToolData[]): ToolData[] {
512
- try {
513
- const partAny = part as any;
514
- if (partAny.item && partAny.item.type === 'function_call' && partAny.item.status === 'completed') {
515
- const item = partAny.item;
516
- const callId = item.call_id;
517
- const itemId = item.id;
518
-
519
- const toolIndex = toolsData.findIndex((t) => t.id === itemId || t.id === callId);
520
- if (toolIndex !== -1 && item.arguments) {
521
- const updated = toolsData.map((t, idx) =>
522
- idx === toolIndex
523
- ? {
524
- ...t,
525
- arguments: item.arguments,
526
- callId: t.callId || callId,
527
- }
528
- : t
529
- );
530
- return updated;
531
- }
532
- }
533
- return toolsData;
534
- } catch (error) {
535
- console.warn('Error handling output item done:', error);
536
- return toolsData;
537
- }
538
- }
539
-
540
- /**
541
- * Handle completion events and unknown event types
542
- */
543
- private handleCompletionEvent(eventType: string): string {
544
- if (eventType === EVENT_TYPES.RESPONSE_COMPLETED || eventType.includes('done')) {
545
- return 'stop';
546
- }
547
- return 'stop'; // Default finish reason
548
- }
549
-
550
- public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.Responses.ResponseCreateParams> {
551
- let input = await this.prepareInputMessages(params);
552
-
553
- // Apply tool message transformation to input messages
554
- // There's a difference in the tools message data structures between `Chat Completions` and the `Response` interface.
555
- // Since we don't have enough context for the interface in `transformToolMessageBlocks`, we need to perform the transformation here so it's compatible with the `Responses` interface.
556
- input = this.applyToolMessageTransformation(input);
557
-
558
- const body: OpenAI.Responses.ResponseCreateParams = {
559
- model: params.model as string,
560
- input,
561
- };
562
-
563
- // Handle max tokens
564
- if (params?.maxTokens !== undefined) {
565
- body.max_output_tokens = params.maxTokens;
566
- }
567
-
568
- // o3-pro does not support temperature
569
- if (params?.temperature !== undefined && !MODELS_WITHOUT_TEMPERATURE_SUPPORT.includes(params.modelEntryName)) {
570
- body.temperature = params.temperature;
571
- }
572
-
573
- if (params?.topP !== undefined) {
574
- body.top_p = params.topP;
575
- }
576
-
577
- // #region GPT 5 specific fields
578
-
579
- const isGPT5ReasoningModels = params.modelEntryName?.includes('gpt-5') && params?.capabilities?.reasoning;
580
- if (isGPT5ReasoningModels && params?.verbosity) {
581
- body.text = { verbosity: params.verbosity };
582
- }
583
-
584
- // We need to validate the `reasoningEffort` parameter for OpenAI models, since models like `qwen/qwen3-32b` and `deepseek-r1-distill-llama-70b` (available via Groq) also support this parameter but use different values, such as `none` and `default`. These values are valid in our system but not specifically for OpenAI.
585
- if (isGPT5ReasoningModels && isValidOpenAIReasoningEffort(params.reasoningEffort)) {
586
- body.reasoning = { effort: params.reasoningEffort };
587
- }
588
- // #endregion GPT 5 specific fields
589
-
590
- let tools: OpenAI.Responses.Tool[] = [];
591
-
592
- if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
593
- tools = await this.prepareFunctionTools(params);
594
- }
595
-
596
- // Add null safety check before accessing toolsInfo
597
- if (params.toolsInfo?.openai?.webSearch?.enabled) {
598
- const searchTool = this.prepareWebSearchTool(params);
599
- tools.push(searchTool);
600
- }
601
-
602
- if (tools.length > 0) {
603
- body.tools = tools;
604
-
605
- if (params?.toolsConfig?.tool_choice) {
606
- const toolChoice = params.toolsConfig.tool_choice;
607
-
608
- // Validate tool choice before applying
609
- if (this.validateToolChoice(toolChoice, tools)) {
610
- if (typeof toolChoice === 'string') {
611
- // Handle string-based tool choices
612
- body.tool_choice = toolChoice;
613
- } else if (typeof toolChoice === 'object' && toolChoice !== null) {
614
- // Handle object-based tool choices (specific function selection)
615
- if ('type' in toolChoice && toolChoice.type === 'function' && 'function' in toolChoice && 'name' in toolChoice.function) {
616
- // Transform Chat Completions specific function choice to Responses API format
617
- body.tool_choice = {
618
- type: 'function',
619
- name: toolChoice.function.name,
620
- };
621
- } else {
622
- // For other object formats, pass through with type assertion
623
- body.tool_choice = toolChoice as any;
624
- }
625
- }
626
- } else {
627
- body.tool_choice = 'auto';
628
- }
629
- } else {
630
- // Default to auto if tools are present but no choice is specified
631
- body.tool_choice = 'auto';
632
- }
633
- }
634
-
635
- return body;
636
- }
637
-
638
- /**
639
- * Transform OpenAI tool definitions to Responses.Tool format
640
- * Handles multiple tool definition formats and ensures compatibility
641
- */
642
- public transformToolsConfig(config: ToolConfig): OpenAI.Responses.Tool[] {
643
- if (!config?.toolDefinitions || !Array.isArray(config.toolDefinitions)) {
644
- return [];
645
- }
646
-
647
- return config.toolDefinitions
648
- .map((tool, index) => {
649
- // Validate basic tool structure
650
- if (!tool || typeof tool !== 'object') {
651
- // Return a minimal tool structure for compatibility
652
- return {
653
- type: 'function' as const,
654
- name: undefined,
655
- description: undefined,
656
- parameters: {
657
- type: 'object',
658
- properties: undefined,
659
- required: undefined,
660
- },
661
- strict: false,
662
- } as OpenAI.Responses.Tool;
663
- }
664
-
665
- // Handle tools that are already in ChatCompletionTool format (with nested function object)
666
- if ('function' in tool && tool.function && typeof tool.function === 'object' && tool.function !== null) {
667
- const funcTool = tool.function as { name: string; description?: string; parameters?: any };
668
-
669
- if (!funcTool.name || typeof funcTool.name !== 'string') {
670
- return {
671
- type: 'function' as const,
672
- name: undefined,
673
- description: tool.description || '',
674
- parameters: { type: 'object', properties: undefined, required: undefined },
675
- strict: false,
676
- } as OpenAI.Responses.Tool;
677
- }
678
-
679
- return {
680
- type: 'function' as const,
681
- name: funcTool.name,
682
- description: funcTool.description || tool.description || '',
683
- parameters: funcTool.parameters || { type: 'object', properties: {}, required: [] },
684
- strict: false,
685
- } as OpenAI.Responses.Tool;
686
- }
687
-
688
- // Handle OpenAI tool definition format (direct parameters)
689
- if ('parameters' in tool) {
690
- return {
691
- type: 'function' as const,
692
- name: tool.name,
693
- description: tool.description || '',
694
- parameters: tool.parameters || { type: 'object', properties: {}, required: [] },
695
- strict: false,
696
- } as OpenAI.Responses.Tool;
697
- }
698
-
699
- // Handle legacy format for backward compatibility
700
- const legacyTool = tool as any;
701
- return {
702
- type: 'function' as const,
703
- name: tool.name,
704
- description: tool.description || legacyTool.desc,
705
- parameters: {
706
- type: 'object',
707
- properties: legacyTool.properties,
708
- required: legacyTool.requiredFields || legacyTool.required,
709
- },
710
- strict: false,
711
- } as OpenAI.Responses.Tool;
712
- })
713
- .filter(Boolean) as OpenAI.Responses.Tool[];
714
- }
715
-
716
- /**
717
- * Normalize tool arguments to string format for Responses API
718
- */
719
- private normalizeToolArguments(args: any): string {
720
- if (typeof args === 'string') {
721
- // If it's already a string, validate it's proper JSON
722
- try {
723
- JSON.parse(args);
724
- return args;
725
- } catch {
726
- // If not valid JSON, wrap it in quotes to make it valid
727
- return JSON.stringify(args);
728
- }
729
- }
730
-
731
- if (typeof args === 'object' && args !== null) {
732
- try {
733
- return JSON.stringify(args);
734
- } catch (error) {
735
- return '{}'; // Fallback to empty object
736
- }
737
- }
738
-
739
- if (args === undefined || args === null) {
740
- return '{}';
741
- }
742
-
743
- // For primitive types, convert to JSON
744
- return JSON.stringify(args);
745
- }
746
-
747
- /**
748
- * Validate if tool data is complete and valid for transformation
749
- */
750
- private isValidToolData(toolData: ToolData): boolean {
751
- return !!(toolData && toolData.id && toolData.name && (toolData.result !== undefined || toolData.error !== undefined));
752
- }
753
-
754
- async handleFileAttachments(files: BinaryInput[], agentId: string, messages: any[]): Promise<any[]> {
755
- if (files.length === 0) return messages;
756
-
757
- const uploadedFiles = await this.uploadFiles(files, agentId);
758
- const validImageFiles = this.getValidImageFiles(uploadedFiles);
759
- const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
760
-
761
- // Process images and documents with Responses API specific formatting
762
- const imageData = await this.processImageData(validImageFiles, agentId);
763
- const documentData = await this.processDocumentData(validDocumentFiles, agentId);
764
-
765
- // Find the last user message and add files to it
766
- for (let i = messages.length - 1; i >= 0; i--) {
767
- if (messages[i].role === 'user') {
768
- // Ensure content is an array before pushing files
769
- if (typeof messages[i].content === 'string') {
770
- messages[i].content = [{ type: 'input_text', text: messages[i].content }];
771
- } else if (!Array.isArray(messages[i].content)) {
772
- messages[i].content = [];
773
- }
774
- messages[i].content.push(...imageData, ...documentData);
775
- break;
776
- }
777
- }
778
-
779
- // If no user message found, create one with files
780
- if (!messages.some((item) => item.role === 'user')) {
781
- messages.push({
782
- role: 'user',
783
- content: [...imageData, ...documentData],
784
- });
785
- }
786
-
787
- return messages;
788
- }
789
-
790
- /**
791
- * Get valid image files based on supported MIME types
792
- */
793
- private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
794
- return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
795
- }
796
-
797
- /**
798
- * Get valid document files based on supported MIME types
799
- */
800
- private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
801
- return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
802
- }
803
-
804
- /**
805
- * Upload files to storage
806
- */
807
- private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
808
- const promises = files.map((file) => {
809
- const binaryInput = BinaryInput.from(file);
810
- return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
811
- });
812
-
813
- return Promise.all(promises);
814
- }
815
-
816
- /**
817
- * Process image files with Responses API specific formatting
818
- */
819
- private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
820
- if (files.length === 0) return [];
821
-
822
- const imageData = [];
823
- for (const file of files) {
824
- await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image');
825
-
826
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
827
- const base64Data = bufferData.toString('base64');
828
- const url = `data:${file.mimetype};base64,${base64Data}`;
829
-
830
- imageData.push({
831
- type: 'input_image',
832
- image_url: url,
833
- });
834
- }
835
-
836
- return imageData;
837
- }
838
-
839
- /**
840
- * Process document files with Responses API specific formatting
841
- */
842
- private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
843
- if (files.length === 0) return [];
844
-
845
- const documentData = [];
846
- for (const file of files) {
847
- await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document');
848
-
849
- const bufferData = await file.readData(AccessCandidate.agent(agentId));
850
- const base64Data = bufferData.toString('base64');
851
- const fileData = `data:${file.mimetype};base64,${base64Data}`;
852
- const filename = await file.getName();
853
-
854
- documentData.push({
855
- type: 'input_file',
856
- file: {
857
- file_data: fileData,
858
- filename,
859
- },
860
- });
861
- }
862
-
863
- return documentData;
864
- }
865
-
866
- /**
867
- * Validate file size before processing
868
- */
869
- private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string): Promise<void> {
870
- await file.ready();
871
- const fileInfo = await file.getJsonData(AccessCandidate.agent('temp'));
872
- if (fileInfo.size > maxSize) {
873
- throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
874
- }
875
- }
876
-
877
- getInterfaceName(): string {
878
- return 'responses';
879
- }
880
-
881
- validateParameters(params: TLLMParams): boolean {
882
- // Basic validation for Responses API parameters
883
- return !!params.model;
884
- }
885
-
886
- /**
887
- * Prepare input messages for Responses API
888
- */
889
- private async prepareInputMessages(params: TLLMParams): Promise<any[]> {
890
- const messages = params?.messages || [];
891
- const files: BinaryInput[] = params?.files || [];
892
-
893
- // Start with raw messages - transformation now happens in applyToolMessageTransformation
894
- let input = [...messages];
895
-
896
- // Handle files if present
897
- if (files.length > 0) {
898
- input = await this.handleFileAttachments(files, params.agentId, input);
899
- }
900
-
901
- return input;
902
- }
903
-
904
- /**
905
- * Prepare function tools for Responses API request
906
- * Transforms tools from various formats to Responses API format
907
- */
908
- private async prepareFunctionTools(params: TLLMParams): Promise<OpenAI.Responses.Tool[]> {
909
- const tools: OpenAI.Responses.Tool[] = [];
910
-
911
- // Validate and process function tools
912
- if (params?.toolsConfig?.tools && Array.isArray(params.toolsConfig.tools) && params.toolsConfig.tools.length > 0) {
913
- try {
914
- // Transform tools using the enhanced transformToolsConfig method
915
- const toolsConfig = this.transformToolsConfig({
916
- type: 'function',
917
- toolDefinitions: params.toolsConfig.tools as any[],
918
- toolChoice: params.toolsConfig.tool_choice || 'auto',
919
- modelInfo: (params.modelInfo as LLMModelInfo) || null,
920
- });
921
-
922
- // Validate transformed tools before adding them
923
- const validTools = toolsConfig.filter((tool, index) => {
924
- if (tool.type !== 'function' || !(tool as any).name) {
925
- return false;
926
- }
927
- return true;
928
- });
929
-
930
- tools.push(...validTools);
931
- } catch (error) {
932
- // Don't throw here to allow the request to continue without tools
933
- // This provides better resilience in production
934
- }
935
- }
936
-
937
- return tools;
938
- }
939
-
940
- /**
941
- * Get web search tool configuration for OpenAI Responses API
942
- * According to OpenAI documentation: https://platform.openai.com/docs/api-reference/responses/create
943
- */
944
- private prepareWebSearchTool(params: TLLMPreparedParams): OpenAI.Responses.WebSearchTool {
945
- const webSearch = params?.toolsInfo?.openai?.webSearch;
946
- const contextSize = webSearch?.contextSize;
947
- const searchCity = webSearch?.city;
948
- const searchCountry = webSearch?.country;
949
- const searchRegion = webSearch?.region;
950
- const searchTimezone = webSearch?.timezone;
951
-
952
- // Prepare location object - build incrementally if any location parameters exist
953
- const userLocation: TSearchLocation = {
954
- type: 'approximate', // Required, always be 'approximate' when we implement location
955
- };
956
-
957
- // Add location fields if they exist
958
- if (searchCity) userLocation.city = searchCity;
959
- if (searchCountry) userLocation.country = searchCountry;
960
- if (searchRegion) userLocation.region = searchRegion;
961
- if (searchTimezone) userLocation.timezone = searchTimezone;
962
-
963
- // Only include location in config if we have actual location data
964
- const hasLocationData = searchCity || searchCountry || searchRegion || searchTimezone;
965
-
966
- // Configure web search tool according to OpenAI Responses API specification
967
- const searchTool = {
968
- type: 'web_search_preview' as const, // Use literal type to ensure consistency
969
- };
970
-
971
- // Add optional configuration properties
972
- const webSearchConfig: any = {};
973
-
974
- if (contextSize) {
975
- webSearchConfig.search_context_size = contextSize;
976
- }
977
-
978
- if (hasLocationData) {
979
- webSearchConfig.user_location = userLocation;
980
- }
981
-
982
- return { ...searchTool, ...webSearchConfig };
983
- }
984
-
985
- /**
986
- * Transform messages for Responses API compatibility
987
- * Handles the differences between Chat Completions and Responses API message formats
988
- */
989
- private applyToolMessageTransformation(input: any[]): any[] {
990
- const transformedMessages: any[] = [];
991
-
992
- for (let i = 0; i < input.length; i++) {
993
- const message = input[i];
994
-
995
- try {
996
- if (message.role === 'assistant' && message.tool_calls && Array.isArray(message.tool_calls)) {
997
- // Split assistant message with tool_calls into separate items (Responses API format)
998
-
999
- // Add assistant content first if present
1000
- if (message.content !== undefined && message.content !== null) {
1001
- const contentStr = typeof message.content === 'string' ? message.content : JSON.stringify(message.content);
1002
- if (contentStr.trim().length > 0) {
1003
- transformedMessages.push({
1004
- role: 'assistant',
1005
- content: contentStr,
1006
- });
1007
- }
1008
- }
1009
-
1010
- // Transform each tool call to function_call format
1011
- message.tool_calls.forEach((toolCall: any, index: number) => {
1012
- if (!toolCall || !toolCall.function) {
1013
- return;
1014
- }
1015
-
1016
- const functionArgs = toolCall.function.arguments;
1017
- const normalizedArgs =
1018
- functionArgs === undefined || functionArgs === null
1019
- ? undefined
1020
- : typeof functionArgs === 'object'
1021
- ? JSON.stringify(functionArgs)
1022
- : String(functionArgs);
1023
-
1024
- transformedMessages.push({
1025
- type: 'function_call',
1026
- name: toolCall.function.name || '',
1027
- arguments: normalizedArgs,
1028
- call_id: toolCall.id || toolCall.call_id || `call_${Date.now()}_${index}`, // Ensure unique ID
1029
- });
1030
- });
1031
- } else if (message.role === 'tool') {
1032
- // Transform tool message to function_call_output (Responses API format)
1033
- if (!message.tool_call_id) {
1034
- return;
1035
- }
1036
-
1037
- const outputContent = message.content;
1038
- const normalizedOutput = typeof outputContent === 'string' ? outputContent : JSON.stringify(outputContent || 'null');
1039
-
1040
- transformedMessages.push({
1041
- type: 'function_call_output',
1042
- call_id: message.tool_call_id,
1043
- output: normalizedOutput,
1044
- });
1045
- } else {
1046
- // Pass through other message types without content modification
1047
- // The Responses API can handle various content formats
1048
- transformedMessages.push(message);
1049
- }
1050
- } catch (error) {
1051
- // Add the original message to prevent data loss
1052
- transformedMessages.push(message);
1053
- }
1054
- }
1055
-
1056
- // Validate the final message structure
1057
- const validMessages = transformedMessages.filter((msg, index) => {
1058
- if (!msg || typeof msg !== 'object') {
1059
- return false;
1060
- }
1061
- return true;
1062
- });
1063
-
1064
- return validMessages;
1065
- }
1066
-
1067
- /**
1068
- * Get search tool cost for a specific model and context size
1069
- */
1070
- private getSearchToolCost(modelName: string): number {
1071
- if (!modelName) return 0;
1072
- // Normalize: remove built-in prefix and compare case-insensitively
1073
- const normalized = String(modelName)
1074
- .toLowerCase()
1075
- .replace(/^smythos\//, '');
1076
-
1077
- // Match by prefix with any configured family in SEARCH_TOOL_COSTS
1078
- const match = Object.entries(SEARCH_TOOL_COSTS).find(([family]) => normalized.startsWith(family));
1079
- return match ? (match[1] as number) : 0;
1080
- }
1081
-
1082
- /**
1083
- * Process function call responses and integrate them back into the conversation
1084
- * This method helps maintain compatibility with the chat completion flow
1085
- */
1086
- public async processFunctionCallResults(toolsData: ToolData[]): Promise<ToolData[]> {
1087
- const processedTools: ToolData[] = [];
1088
-
1089
- for (const tool of toolsData) {
1090
- if (!this.isValidToolData(tool)) {
1091
- continue;
1092
- }
1093
-
1094
- try {
1095
- const processedTool: ToolData = {
1096
- ...tool,
1097
- // Ensure arguments are properly formatted as JSON string
1098
- arguments: this.normalizeToolArguments(tool.arguments),
1099
- // Ensure function property is properly structured for compatibility
1100
- function: tool.function || {
1101
- name: tool.name,
1102
- arguments: this.normalizeToolArguments(tool.arguments),
1103
- },
1104
- };
1105
-
1106
- processedTools.push(processedTool);
1107
- } catch (error) {
1108
- // Add error information to the tool result
1109
- processedTools.push({
1110
- ...tool,
1111
- error: error instanceof Error ? error.message : 'Unknown processing error',
1112
- result: undefined,
1113
- });
1114
- }
1115
- }
1116
-
1117
- return processedTools;
1118
- }
1119
-
1120
- /**
1121
- * Validate tool choice parameter for Responses API
1122
- */
1123
- private validateToolChoice(toolChoice: any, availableTools: OpenAI.Responses.Tool[]): boolean {
1124
- if (!toolChoice) return true;
1125
-
1126
- if (typeof toolChoice === 'string') {
1127
- const validStringChoices = ['auto', 'required', 'none'];
1128
- return validStringChoices.includes(toolChoice);
1129
- }
1130
-
1131
- if (typeof toolChoice === 'object' && toolChoice !== null) {
1132
- // For specific function selection
1133
- if (toolChoice.type === 'function' && toolChoice.function?.name) {
1134
- // Check if the specified function exists in available tools
1135
- return availableTools.some((tool) => tool.type === 'function' && tool.name === toolChoice.function.name);
1136
- }
1137
- }
1138
-
1139
- return false;
1140
- }
1141
-
1142
- /**
1143
- * Upsert a web search tool entry in toolsData and return its index
1144
- */
1145
- private upsertWebSearchToolImmutable(toolsData: ToolData[], id: string, args: string = ''): { toolsData: ToolData[]; index: number } {
1146
- const existingIndex = toolsData.findIndex((t) => t.id === id);
1147
- if (existingIndex === -1) {
1148
- const index = toolsData.length;
1149
- const newItem: ToolData = {
1150
- index,
1151
- id,
1152
- type: TToolType.WebSearch,
1153
- name: 'web_search',
1154
- arguments: args,
1155
- role: 'tool',
1156
- } as ToolData;
1157
- const updated: ToolData[] = [...toolsData, newItem];
1158
- return { toolsData: updated, index };
1159
- }
1160
-
1161
- if (args) {
1162
- const updated: ToolData[] = toolsData.map((t, idx) => (idx === existingIndex ? { ...t, arguments: args } : t));
1163
- return { toolsData: updated, index: existingIndex };
1164
- }
1165
-
1166
- return { toolsData, index: existingIndex };
1167
- }
1168
- }
1
+ import EventEmitter from 'events';
2
+ import OpenAI from 'openai';
3
+ import type { Stream } from 'openai/streaming';
4
+
5
+ import { BinaryInput } from '@sre/helpers/BinaryInput.helper';
6
+ import { AccessCandidate } from '@sre/Security/AccessControl/AccessCandidate.class';
7
+ import { TLLMParams, TLLMPreparedParams, ILLMRequestContext, ToolData, APIKeySource, TLLMEvent, LLMModelInfo } from '@sre/types/LLM.types';
8
+ import { OpenAIApiInterface, ToolConfig } from './OpenAIApiInterface';
9
+ import { HandlerDependencies, TToolType } from '../types';
10
+ import { SUPPORTED_MIME_TYPES_MAP } from '@sre/constants';
11
+ import { SEARCH_TOOL_COSTS } from './constants';
12
+ import { isValidOpenAIReasoningEffort } from './utils';
13
+
14
+ // File size limits in bytes
15
+ const MAX_IMAGE_SIZE = 20 * 1024 * 1024; // 20MB
16
+ const MAX_DOCUMENT_SIZE = 25 * 1024 * 1024; // 25MB
17
+
18
+ // Event type constants for type safety and maintainability
19
+ const EVENT_TYPES = {
20
+ // Officially supported web search events (OpenAI SDK >= 5.12.x)
21
+ WEB_SEARCH_IN_PROGRESS: 'response.web_search_call.in_progress',
22
+ WEB_SEARCH_SEARCHING: 'response.web_search_call.searching',
23
+ WEB_SEARCH_COMPLETED: 'response.web_search_call.completed',
24
+ // Legacy alias observed historically (kept for backward compat if emitted)
25
+ WEB_SEARCH_STARTED: 'response.web_search_call.started',
26
+
27
+ RESPONSE_COMPLETED: 'response.completed',
28
+ OUTPUT_TEXT_DELTA: 'response.output_text.delta',
29
+ OUTPUT_ITEM_ADDED: 'response.output_item.added',
30
+ FUNCTION_CALL_ARGUMENTS_DELTA: 'response.function_call_arguments.delta',
31
+ FUNCTION_CALL_ARGUMENTS_DONE: 'response.function_call_arguments.done',
32
+ OUTPUT_ITEM_DONE: 'response.output_item.done',
33
+ } as const;
34
+
35
+ // Type definitions for web search events (augmenting SDK types locally)
36
+ interface WebSearchInProgressEvent {
37
+ type: typeof EVENT_TYPES.WEB_SEARCH_IN_PROGRESS;
38
+ item_id: string;
39
+ }
40
+
41
+ interface WebSearchSearchingEvent {
42
+ type: typeof EVENT_TYPES.WEB_SEARCH_SEARCHING;
43
+ item_id: string;
44
+ }
45
+
46
+ interface WebSearchCompletedEvent {
47
+ type: typeof EVENT_TYPES.WEB_SEARCH_COMPLETED;
48
+ item_id: string;
49
+ }
50
+
51
+ type TSearchLocation = {
52
+ type: 'approximate';
53
+ city?: string;
54
+ country?: string;
55
+ region?: string;
56
+ timezone?: string;
57
+ };
58
+
59
+ /**
60
+ * OpenAI Responses API interface implementation
61
+ * Handles all Responses API-specific logic including:
62
+ * - Stream creation and handling
63
+ * - Request body preparation
64
+ * - Tool and message transformations
65
+ * - File attachment handling
66
+ */
67
+ export class ResponsesApiInterface extends OpenAIApiInterface {
68
+ private deps: HandlerDependencies;
69
+ private validImageMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.image;
70
+ private validDocumentMimeTypes = SUPPORTED_MIME_TYPES_MAP.OpenAI.document;
71
+
72
+ constructor(context: ILLMRequestContext, deps: HandlerDependencies) {
73
+ super(context);
74
+ this.deps = deps;
75
+ }
76
+
77
+ async createRequest(body: OpenAI.Responses.ResponseCreateParams, context: ILLMRequestContext): Promise<OpenAI.Responses.Response> {
78
+ const openai = await this.deps.getClient(context);
79
+ return await openai.responses.create({
80
+ ...body,
81
+ stream: false,
82
+ });
83
+ }
84
+
85
+ async createStream(
86
+ body: OpenAI.Responses.ResponseCreateParams,
87
+ context: ILLMRequestContext
88
+ ): Promise<Stream<OpenAI.Responses.ResponseStreamEvent>> {
89
+ const openai = await this.deps.getClient(context);
90
+ return (await openai.responses.create({
91
+ ...body,
92
+ stream: true,
93
+ })) as Stream<OpenAI.Responses.ResponseStreamEvent>;
94
+ }
95
+
96
+ public handleStream(stream: Stream<OpenAI.Responses.ResponseStreamEvent>, context: ILLMRequestContext): EventEmitter {
97
+ const emitter = new EventEmitter();
98
+
99
+ // Process stream asynchronously while returning emitter immediately
100
+ (async () => {
101
+ let finalToolsData: ToolData[] = [];
102
+
103
+ try {
104
+ // Step 1: Process the stream
105
+ const streamResult = await this.processStream(stream, emitter);
106
+ finalToolsData = streamResult.toolsData;
107
+
108
+ const finishReason = streamResult.finishReason || 'stop';
109
+ const usageData = streamResult.usageData;
110
+
111
+ // Step 2: Report usage statistics
112
+ const reportedUsage = this.reportUsageStatistics(usageData, context);
113
+
114
+ // Step 3: Emit final events
115
+ this.emitFinalEvents(emitter, finalToolsData, reportedUsage, finishReason);
116
+ } catch (error) {
117
+ emitter.emit('error', error);
118
+ }
119
+ })();
120
+
121
+ return emitter;
122
+ }
123
+
124
+ /**
125
+ * Process the responses API stream format
126
+ */
127
+ private async processStream(
128
+ stream: Stream<OpenAI.Responses.ResponseStreamEvent>,
129
+ emitter: EventEmitter
130
+ ): Promise<{ toolsData: ToolData[]; finishReason: string; usageData: any[] }> {
131
+ let toolsData: ToolData[] = [];
132
+ let finishReason = 'stop';
133
+ const usageData = [];
134
+
135
+ for await (const part of stream) {
136
+ try {
137
+ // Handle different event types from the Responses API stream
138
+ if ('type' in part) {
139
+ // Handle officially typed events using constants
140
+ switch (part.type) {
141
+ case EVENT_TYPES.WEB_SEARCH_IN_PROGRESS:
142
+ toolsData = this.handleWebSearchInProgress(part as any, toolsData);
143
+ break;
144
+ case EVENT_TYPES.WEB_SEARCH_SEARCHING:
145
+ toolsData = this.handleWebSearchSearching(part as any, toolsData);
146
+ break;
147
+ case EVENT_TYPES.WEB_SEARCH_COMPLETED:
148
+ toolsData = this.handleWebSearchCompleted(part as any, toolsData);
149
+ break;
150
+ case EVENT_TYPES.OUTPUT_TEXT_DELTA:
151
+ this.handleOutputTextDelta(part, emitter);
152
+ break;
153
+
154
+ case EVENT_TYPES.OUTPUT_ITEM_ADDED:
155
+ toolsData = this.handleOutputItemAdded(part, toolsData, emitter);
156
+ break;
157
+
158
+ case EVENT_TYPES.FUNCTION_CALL_ARGUMENTS_DELTA:
159
+ toolsData = this.handleFunctionCallArgumentsDelta(part, toolsData, emitter);
160
+ break;
161
+
162
+ case EVENT_TYPES.FUNCTION_CALL_ARGUMENTS_DONE:
163
+ toolsData = this.handleFunctionCallArgumentsDone(part, toolsData, emitter);
164
+ break;
165
+
166
+ case EVENT_TYPES.OUTPUT_ITEM_DONE:
167
+ toolsData = this.handleOutputItemDone(part, toolsData);
168
+ break;
169
+
170
+ case EVENT_TYPES.RESPONSE_COMPLETED: {
171
+ finishReason = 'stop';
172
+ const responseData = (part as any)?.response;
173
+ if (responseData?.usage) {
174
+ usageData.push(responseData.usage);
175
+ }
176
+ break;
177
+ }
178
+
179
+ default: {
180
+ const eventType = String(part.type);
181
+ // Handle legacy started event if ever emitted
182
+ if (eventType === EVENT_TYPES.WEB_SEARCH_STARTED) {
183
+ const legacyId = (part as any)?.id;
184
+ if (typeof legacyId === 'string') {
185
+ const result = this.upsertWebSearchToolImmutable(toolsData, legacyId);
186
+ toolsData = result.toolsData;
187
+ }
188
+ break;
189
+ }
190
+ // Handle any other unknown 'done' style events as completion
191
+ finishReason = this.handleCompletionEvent(eventType);
192
+ break;
193
+ }
194
+ }
195
+ }
196
+ } catch (error) {
197
+ // Log error but continue processing to prevent stream interruption
198
+ console.warn('Error processing stream event:', error, 'Event:', part);
199
+ }
200
+ }
201
+
202
+ return { toolsData: this.extractToolCalls(toolsData), finishReason, usageData };
203
+ }
204
+
205
+ /**
206
+ * Extract and format tool calls from the accumulated data
207
+ */
208
+ private extractToolCalls(output: ToolData[]): ToolData[] {
209
+ return output.map((tool) => ({
210
+ index: tool.index,
211
+ name: tool.name,
212
+ arguments: tool.arguments,
213
+ id: tool.callId || tool.id, // Use callId for final output if available
214
+ type: tool.type,
215
+ role: tool.role,
216
+ callId: tool.callId, // Preserve callId for reference
217
+ }));
218
+ }
219
+
220
+ /**
221
+ * Report usage statistics
222
+ */
223
+ private reportUsageStatistics(usage_data: any[], context: ILLMRequestContext): any[] {
224
+ const reportedUsage: any[] = [];
225
+
226
+ // Report normal usage
227
+ usage_data.forEach((usage) => {
228
+ // Convert ResponseUsage to CompletionUsage format for compatibility
229
+ const convertedUsage = {
230
+ completion_tokens: usage.completion_tokens || 0,
231
+ prompt_tokens: usage.prompt_tokens || 0,
232
+ total_tokens: usage.total_tokens || 0,
233
+ ...usage,
234
+ };
235
+ const reported = this.deps.reportUsage(convertedUsage, this.buildUsageContext(context));
236
+ reportedUsage.push(reported);
237
+ });
238
+
239
+ // Report search tool usage if enabled
240
+ if (context.toolsInfo?.openai?.webSearch?.enabled) {
241
+ const searchUsage = this.calculateSearchToolUsage(context);
242
+ const reported = this.deps.reportUsage(searchUsage, this.buildUsageContext(context));
243
+ reportedUsage.push(reported);
244
+ }
245
+
246
+ return reportedUsage;
247
+ }
248
+
249
+ /**
250
+ * Emit final events
251
+ */
252
+ private emitFinalEvents(emitter: EventEmitter, toolsData: ToolData[], reportedUsage: any[], finishReason: string): void {
253
+ // Emit tool info event if tools were called
254
+ if (toolsData.length > 0) {
255
+ emitter.emit(TLLMEvent.ToolInfo, toolsData);
256
+ }
257
+
258
+ // Emit interrupted event if finishReason is not 'stop'
259
+ if (finishReason !== 'stop') {
260
+ emitter.emit('interrupted', finishReason);
261
+ }
262
+
263
+ // Emit end event with setImmediate to ensure proper event ordering
264
+ setImmediate(() => {
265
+ emitter.emit('end', toolsData, reportedUsage, finishReason);
266
+ });
267
+ }
268
+
269
+ /**
270
+ * Build usage context parameters from request context
271
+ */
272
+ private buildUsageContext(context: ILLMRequestContext) {
273
+ return {
274
+ modelEntryName: context.modelEntryName,
275
+ keySource: context.isUserKey ? APIKeySource.User : APIKeySource.Smyth,
276
+ agentId: context.agentId,
277
+ teamId: context.teamId,
278
+ };
279
+ }
280
+
281
+ /**
282
+ * Calculate search tool usage with cost
283
+ */
284
+ private calculateSearchToolUsage(context: ILLMRequestContext) {
285
+ const modelName = context.modelEntryName?.replace('smythos/', '');
286
+ const cost = this.getSearchToolCost(modelName);
287
+
288
+ return {
289
+ cost,
290
+ completion_tokens: 0,
291
+ prompt_tokens: 0,
292
+ total_tokens: 0,
293
+ };
294
+ }
295
+
296
+ // =====================
297
+ // Event handlers (private)
298
+ // =====================
299
+
300
+ /**
301
+ * Handle web search completed event with proper type safety
302
+ */
303
+ private handleWebSearchCompleted(event: WebSearchCompletedEvent, toolsData: ToolData[]): ToolData[] {
304
+ try {
305
+ const { item_id: itemId } = event;
306
+ const result = this.upsertWebSearchToolImmutable(toolsData, itemId);
307
+ return result.toolsData;
308
+ } catch (error) {
309
+ console.warn('Error handling web search completed event:', error);
310
+ return toolsData;
311
+ }
312
+ }
313
+
314
+ /**
315
+ * Handle web search in-progress event (official typed)
316
+ */
317
+ private handleWebSearchInProgress(event: WebSearchInProgressEvent, toolsData: ToolData[]): ToolData[] {
318
+ try {
319
+ const { item_id: itemId } = event;
320
+ const result = this.upsertWebSearchToolImmutable(toolsData, itemId);
321
+ return result.toolsData;
322
+ } catch (error) {
323
+ console.warn('Error handling web search in_progress event:', error);
324
+ return toolsData;
325
+ }
326
+ }
327
+
328
+ /**
329
+ * Handle web search searching event (official typed)
330
+ */
331
+ private handleWebSearchSearching(event: WebSearchSearchingEvent, toolsData: ToolData[]): ToolData[] {
332
+ try {
333
+ const { item_id: itemId } = event;
334
+ const result = this.upsertWebSearchToolImmutable(toolsData, itemId);
335
+ return result.toolsData;
336
+ } catch (error) {
337
+ console.warn('Error handling web search searching event:', error);
338
+ return toolsData;
339
+ }
340
+ }
341
+
342
+ /**
343
+ * Handle output text delta events
344
+ */
345
+ private handleOutputTextDelta(part: any, emitter: EventEmitter): void {
346
+ try {
347
+ if ('delta' in part && part.delta) {
348
+ const deltaMsg = {
349
+ role: 'assistant',
350
+ content: part.delta,
351
+ };
352
+ emitter.emit('data', deltaMsg);
353
+ emitter.emit('content', part.delta, 'assistant');
354
+ }
355
+ } catch (error) {
356
+ console.warn('Error handling output text delta:', error);
357
+ }
358
+ }
359
+
360
+ /**
361
+ * Handle output item added events (function calls)
362
+ */
363
+ private handleOutputItemAdded(part: any, toolsData: ToolData[], emitter: EventEmitter): ToolData[] {
364
+ try {
365
+ const partAny = part as any;
366
+ if (partAny.item && partAny.item.type === 'function_call') {
367
+ const item = partAny.item;
368
+ const callId = item.call_id;
369
+ const functionName = item.name;
370
+ const itemId = item.id;
371
+
372
+ if (callId && itemId) {
373
+ const existingIndex = toolsData.findIndex((t) => t.id === itemId || t.id === callId);
374
+ const addingNew = existingIndex === -1;
375
+ const nextIndex = addingNew ? toolsData.length : existingIndex;
376
+
377
+ let updated: ToolData[];
378
+ if (addingNew) {
379
+ const newItem: ToolData = {
380
+ index: nextIndex,
381
+ id: itemId,
382
+ callId: callId,
383
+ type: 'function',
384
+ name: functionName || '',
385
+ arguments: item.arguments || '',
386
+ role: 'tool',
387
+ } as ToolData;
388
+ updated = [...toolsData, newItem];
389
+ } else {
390
+ updated = toolsData.map((t, idx) => {
391
+ if (idx !== existingIndex) return t;
392
+ return {
393
+ ...t,
394
+ name: functionName || t.name,
395
+ arguments: item.arguments !== undefined ? item.arguments : t.arguments,
396
+ callId: t.callId || callId,
397
+ };
398
+ });
399
+ }
400
+
401
+ if (addingNew) {
402
+ emitter.emit('tool_call_started', {
403
+ id: callId,
404
+ name: functionName || '',
405
+ type: 'function',
406
+ });
407
+ }
408
+
409
+ return updated;
410
+ }
411
+ }
412
+ return toolsData;
413
+ } catch (error) {
414
+ console.warn('Error handling output item added:', error);
415
+ return toolsData;
416
+ }
417
+ }
418
+
419
+ /**
420
+ * Handle function call arguments delta events
421
+ */
422
+ private handleFunctionCallArgumentsDelta(part: any, toolsData: ToolData[], emitter: EventEmitter): ToolData[] {
423
+ try {
424
+ if ('delta' in part && 'item_id' in part && typeof part.delta === 'string' && typeof part.item_id === 'string') {
425
+ const delta = part.delta;
426
+ const itemId = part.item_id;
427
+
428
+ const existingIndex = toolsData.findIndex((t) => t.id === itemId);
429
+ let updated: ToolData[];
430
+ let finalIndex: number;
431
+ if (existingIndex === -1) {
432
+ finalIndex = toolsData.length;
433
+ const newItem: ToolData = {
434
+ index: finalIndex,
435
+ id: itemId,
436
+ type: 'function',
437
+ name: '',
438
+ arguments: delta,
439
+ role: 'tool',
440
+ } as ToolData;
441
+ updated = [...toolsData, newItem];
442
+ } else {
443
+ finalIndex = existingIndex;
444
+ updated = toolsData.map((t, idx) => (idx === existingIndex ? { ...t, arguments: String(t.arguments || '') + delta } : t));
445
+ }
446
+
447
+ const entry = existingIndex === -1 ? updated[finalIndex] : updated[finalIndex];
448
+ emitter.emit('tool_call_progress', {
449
+ id: entry.callId || itemId,
450
+ name: entry.name,
451
+ arguments: entry.arguments,
452
+ delta: delta,
453
+ });
454
+
455
+ return updated;
456
+ }
457
+ return toolsData;
458
+ } catch (error) {
459
+ console.warn('Error handling function call arguments delta:', error);
460
+ return toolsData;
461
+ }
462
+ }
463
+
464
+ /**
465
+ * Handle function call arguments done events
466
+ */
467
+ private handleFunctionCallArgumentsDone(part: any, toolsData: ToolData[], emitter: EventEmitter): ToolData[] {
468
+ try {
469
+ const partAny = part;
470
+ if (partAny.item_id && partAny.arguments) {
471
+ const itemId = partAny.item_id;
472
+ const finalArguments = partAny.arguments;
473
+
474
+ const toolIndex = toolsData.findIndex((t) => t.id === itemId);
475
+ if (toolIndex !== -1) {
476
+ const updated = toolsData.map((t, idx) => (idx === toolIndex ? { ...t, arguments: finalArguments } : t));
477
+
478
+ const updatedEntry = updated[toolIndex];
479
+ emitter.emit('tool_call_completed', {
480
+ id: updatedEntry.callId || itemId,
481
+ name: updatedEntry.name,
482
+ arguments: finalArguments,
483
+ });
484
+
485
+ return updated;
486
+ }
487
+ }
488
+ return toolsData;
489
+ } catch (error) {
490
+ console.warn('Error handling function call arguments done:', error);
491
+ return toolsData;
492
+ }
493
+ }
494
+
495
+ /**
496
+ * Handle output item done events
497
+ */
498
+ private handleOutputItemDone(part: any, toolsData: ToolData[]): ToolData[] {
499
+ try {
500
+ const partAny = part as any;
501
+ if (partAny.item && partAny.item.type === 'function_call' && partAny.item.status === 'completed') {
502
+ const item = partAny.item;
503
+ const callId = item.call_id;
504
+ const itemId = item.id;
505
+
506
+ const toolIndex = toolsData.findIndex((t) => t.id === itemId || t.id === callId);
507
+ if (toolIndex !== -1 && item.arguments) {
508
+ const updated = toolsData.map((t, idx) =>
509
+ idx === toolIndex
510
+ ? {
511
+ ...t,
512
+ arguments: item.arguments,
513
+ callId: t.callId || callId,
514
+ }
515
+ : t
516
+ );
517
+ return updated;
518
+ }
519
+ }
520
+ return toolsData;
521
+ } catch (error) {
522
+ console.warn('Error handling output item done:', error);
523
+ return toolsData;
524
+ }
525
+ }
526
+
527
+ /**
528
+ * Handle completion events and unknown event types
529
+ */
530
+ private handleCompletionEvent(eventType: string): string {
531
+ if (eventType === EVENT_TYPES.RESPONSE_COMPLETED || eventType.includes('done')) {
532
+ return 'stop';
533
+ }
534
+ return 'stop'; // Default finish reason
535
+ }
536
+
537
+ public async prepareRequestBody(params: TLLMPreparedParams): Promise<OpenAI.Responses.ResponseCreateParams> {
538
+ let input = await this.prepareInputMessages(params);
539
+
540
+ // Apply tool message transformation to input messages
541
+ // There's a difference in the tools message data structures between `Chat Completions` and the `Response` interface.
542
+ // Since we don't have enough context for the interface in `transformToolMessageBlocks`, we need to perform the transformation here so it's compatible with the `Responses` interface.
543
+ input = this.applyToolMessageTransformation(input);
544
+
545
+ const body: OpenAI.Responses.ResponseCreateParams = {
546
+ model: params.model as string,
547
+ input,
548
+ };
549
+
550
+ // Handle max tokens
551
+ if (params?.maxTokens !== undefined) {
552
+ body.max_output_tokens = params.maxTokens;
553
+ }
554
+ // #region GPT 5 specific fields
555
+
556
+ const isGPT5ReasoningModels = params.modelEntryName?.includes('gpt-5') && params?.capabilities?.reasoning;
557
+ if (isGPT5ReasoningModels && params?.verbosity) {
558
+ body.text = { verbosity: params.verbosity };
559
+ }
560
+
561
+ // We need to validate the `reasoningEffort` parameter for OpenAI models, since models like `qwen/qwen3-32b` and `deepseek-r1-distill-llama-70b` (available via Groq) also support this parameter but use different values, such as `none` and `default`. These values are valid in our system but not specifically for OpenAI.
562
+ if (isGPT5ReasoningModels && isValidOpenAIReasoningEffort(params.reasoningEffort)) {
563
+ body.reasoning = { effort: params.reasoningEffort };
564
+ }
565
+ // #endregion GPT 5 specific fields
566
+
567
+ let tools: OpenAI.Responses.Tool[] = [];
568
+
569
+ if (params?.toolsConfig?.tools && params?.toolsConfig?.tools?.length > 0) {
570
+ tools = await this.prepareFunctionTools(params);
571
+ }
572
+
573
+ // Add null safety check before accessing toolsInfo
574
+ if (params.toolsInfo?.openai?.webSearch?.enabled) {
575
+ const searchTool = this.prepareWebSearchTool(params);
576
+ tools.push(searchTool);
577
+ }
578
+
579
+ if (tools.length > 0) {
580
+ body.tools = tools;
581
+
582
+ if (params?.toolsConfig?.tool_choice) {
583
+ const toolChoice = params.toolsConfig.tool_choice;
584
+
585
+ // Validate tool choice before applying
586
+ if (this.validateToolChoice(toolChoice, tools)) {
587
+ if (typeof toolChoice === 'string') {
588
+ // Handle string-based tool choices
589
+ body.tool_choice = toolChoice;
590
+ } else if (typeof toolChoice === 'object' && toolChoice !== null) {
591
+ // Handle object-based tool choices (specific function selection)
592
+ if ('type' in toolChoice && toolChoice.type === 'function' && 'function' in toolChoice && 'name' in toolChoice.function) {
593
+ // Transform Chat Completions specific function choice to Responses API format
594
+ body.tool_choice = {
595
+ type: 'function',
596
+ name: toolChoice.function.name,
597
+ };
598
+ } else {
599
+ // For other object formats, pass through with type assertion
600
+ body.tool_choice = toolChoice as any;
601
+ }
602
+ }
603
+ } else {
604
+ body.tool_choice = 'auto';
605
+ }
606
+ } else {
607
+ // Default to auto if tools are present but no choice is specified
608
+ body.tool_choice = 'auto';
609
+ }
610
+ }
611
+
612
+ return body;
613
+ }
614
+
615
+ /**
616
+ * Transform OpenAI tool definitions to Responses.Tool format
617
+ * Handles multiple tool definition formats and ensures compatibility
618
+ */
619
+ public transformToolsConfig(config: ToolConfig): OpenAI.Responses.Tool[] {
620
+ if (!config?.toolDefinitions || !Array.isArray(config.toolDefinitions)) {
621
+ return [];
622
+ }
623
+
624
+ return config.toolDefinitions
625
+ .map((tool, index) => {
626
+ // Validate basic tool structure
627
+ if (!tool || typeof tool !== 'object') {
628
+ // Return a minimal tool structure for compatibility
629
+ return {
630
+ type: 'function' as const,
631
+ name: undefined,
632
+ description: undefined,
633
+ parameters: {
634
+ type: 'object',
635
+ properties: undefined,
636
+ required: undefined,
637
+ },
638
+ strict: false,
639
+ } as OpenAI.Responses.Tool;
640
+ }
641
+
642
+ // Handle tools that are already in ChatCompletionTool format (with nested function object)
643
+ if ('function' in tool && tool.function && typeof tool.function === 'object' && tool.function !== null) {
644
+ const funcTool = tool.function as { name: string; description?: string; parameters?: any };
645
+
646
+ if (!funcTool.name || typeof funcTool.name !== 'string') {
647
+ return {
648
+ type: 'function' as const,
649
+ name: undefined,
650
+ description: tool.description || '',
651
+ parameters: { type: 'object', properties: undefined, required: undefined },
652
+ strict: false,
653
+ } as OpenAI.Responses.Tool;
654
+ }
655
+
656
+ return {
657
+ type: 'function' as const,
658
+ name: funcTool.name,
659
+ description: funcTool.description || tool.description || '',
660
+ parameters: funcTool.parameters || { type: 'object', properties: {}, required: [] },
661
+ strict: false,
662
+ } as OpenAI.Responses.Tool;
663
+ }
664
+
665
+ // Handle OpenAI tool definition format (direct parameters)
666
+ if ('parameters' in tool) {
667
+ return {
668
+ type: 'function' as const,
669
+ name: tool.name,
670
+ description: tool.description || '',
671
+ parameters: tool.parameters || { type: 'object', properties: {}, required: [] },
672
+ strict: false,
673
+ } as OpenAI.Responses.Tool;
674
+ }
675
+
676
+ // Handle legacy format for backward compatibility
677
+ const legacyTool = tool as any;
678
+ return {
679
+ type: 'function' as const,
680
+ name: tool.name,
681
+ description: tool.description || legacyTool.desc,
682
+ parameters: {
683
+ type: 'object',
684
+ properties: legacyTool.properties,
685
+ required: legacyTool.requiredFields || legacyTool.required,
686
+ },
687
+ strict: false,
688
+ } as OpenAI.Responses.Tool;
689
+ })
690
+ .filter(Boolean) as OpenAI.Responses.Tool[];
691
+ }
692
+
693
+ /**
694
+ * Normalize tool arguments to string format for Responses API
695
+ */
696
+ private normalizeToolArguments(args: any): string {
697
+ if (typeof args === 'string') {
698
+ // If it's already a string, validate it's proper JSON
699
+ try {
700
+ JSON.parse(args);
701
+ return args;
702
+ } catch {
703
+ // If not valid JSON, wrap it in quotes to make it valid
704
+ return JSON.stringify(args);
705
+ }
706
+ }
707
+
708
+ if (typeof args === 'object' && args !== null) {
709
+ try {
710
+ return JSON.stringify(args);
711
+ } catch (error) {
712
+ return '{}'; // Fallback to empty object
713
+ }
714
+ }
715
+
716
+ if (args === undefined || args === null) {
717
+ return '{}';
718
+ }
719
+
720
+ // For primitive types, convert to JSON
721
+ return JSON.stringify(args);
722
+ }
723
+
724
+ /**
725
+ * Validate if tool data is complete and valid for transformation
726
+ */
727
+ private isValidToolData(toolData: ToolData): boolean {
728
+ return !!(toolData && toolData.id && toolData.name && (toolData.result !== undefined || toolData.error !== undefined));
729
+ }
730
+
731
+ async handleFileAttachments(files: BinaryInput[], agentId: string, messages: any[]): Promise<any[]> {
732
+ if (files.length === 0) return messages;
733
+
734
+ const uploadedFiles = await this.uploadFiles(files, agentId);
735
+ const validImageFiles = this.getValidImageFiles(uploadedFiles);
736
+ const validDocumentFiles = this.getValidDocumentFiles(uploadedFiles);
737
+
738
+ // Process images and documents with Responses API specific formatting
739
+ const imageData = await this.processImageData(validImageFiles, agentId);
740
+ const documentData = await this.processDocumentData(validDocumentFiles, agentId);
741
+
742
+ // Find the last user message and add files to it
743
+ for (let i = messages.length - 1; i >= 0; i--) {
744
+ if (messages[i].role === 'user') {
745
+ // Ensure content is an array before pushing files
746
+ if (typeof messages[i].content === 'string') {
747
+ messages[i].content = [{ type: 'input_text', text: messages[i].content }];
748
+ } else if (!Array.isArray(messages[i].content)) {
749
+ messages[i].content = [];
750
+ }
751
+ messages[i].content.push(...imageData, ...documentData);
752
+ break;
753
+ }
754
+ }
755
+
756
+ // If no user message found, create one with files
757
+ if (!messages.some((item) => item.role === 'user')) {
758
+ messages.push({
759
+ role: 'user',
760
+ content: [...imageData, ...documentData],
761
+ });
762
+ }
763
+
764
+ return messages;
765
+ }
766
+
767
+ /**
768
+ * Get valid image files based on supported MIME types
769
+ */
770
+ private getValidImageFiles(files: BinaryInput[]): BinaryInput[] {
771
+ return files.filter((file) => this.validImageMimeTypes.includes(file?.mimetype));
772
+ }
773
+
774
+ /**
775
+ * Get valid document files based on supported MIME types
776
+ */
777
+ private getValidDocumentFiles(files: BinaryInput[]): BinaryInput[] {
778
+ return files.filter((file) => this.validDocumentMimeTypes.includes(file?.mimetype));
779
+ }
780
+
781
+ /**
782
+ * Upload files to storage
783
+ */
784
+ private async uploadFiles(files: BinaryInput[], agentId: string): Promise<BinaryInput[]> {
785
+ const promises = files.map((file) => {
786
+ const binaryInput = BinaryInput.from(file);
787
+ return binaryInput.upload(AccessCandidate.agent(agentId)).then(() => binaryInput);
788
+ });
789
+
790
+ return Promise.all(promises);
791
+ }
792
+
793
+ /**
794
+ * Process image files with Responses API specific formatting
795
+ */
796
+ private async processImageData(files: BinaryInput[], agentId: string): Promise<any[]> {
797
+ if (files.length === 0) return [];
798
+
799
+ const imageData = [];
800
+ for (const file of files) {
801
+ await this.validateFileSize(file, MAX_IMAGE_SIZE, 'Image');
802
+
803
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
804
+ const base64Data = bufferData.toString('base64');
805
+ const url = `data:${file.mimetype};base64,${base64Data}`;
806
+
807
+ imageData.push({
808
+ type: 'input_image',
809
+ image_url: url,
810
+ });
811
+ }
812
+
813
+ return imageData;
814
+ }
815
+
816
+ /**
817
+ * Process document files with Responses API specific formatting
818
+ */
819
+ private async processDocumentData(files: BinaryInput[], agentId: string): Promise<any[]> {
820
+ if (files.length === 0) return [];
821
+
822
+ const documentData = [];
823
+ for (const file of files) {
824
+ await this.validateFileSize(file, MAX_DOCUMENT_SIZE, 'Document');
825
+
826
+ const bufferData = await file.readData(AccessCandidate.agent(agentId));
827
+ const base64Data = bufferData.toString('base64');
828
+ const fileData = `data:${file.mimetype};base64,${base64Data}`;
829
+ const filename = await file.getName();
830
+
831
+ documentData.push({
832
+ type: 'input_file',
833
+ file: {
834
+ file_data: fileData,
835
+ filename,
836
+ },
837
+ });
838
+ }
839
+
840
+ return documentData;
841
+ }
842
+
843
+ /**
844
+ * Validate file size before processing
845
+ */
846
+ private async validateFileSize(file: BinaryInput, maxSize: number, fileType: string): Promise<void> {
847
+ await file.ready();
848
+ const fileInfo = await file.getJsonData(AccessCandidate.agent('temp'));
849
+ if (fileInfo.size > maxSize) {
850
+ throw new Error(`${fileType} file size (${fileInfo.size} bytes) exceeds maximum allowed size of ${maxSize} bytes`);
851
+ }
852
+ }
853
+
854
+ getInterfaceName(): string {
855
+ return 'responses';
856
+ }
857
+
858
+ validateParameters(params: TLLMParams): boolean {
859
+ // Basic validation for Responses API parameters
860
+ return !!params.model;
861
+ }
862
+
863
+ /**
864
+ * Prepare input messages for Responses API
865
+ */
866
+ private async prepareInputMessages(params: TLLMParams): Promise<any[]> {
867
+ const messages = params?.messages || [];
868
+ const files: BinaryInput[] = params?.files || [];
869
+
870
+ // Start with raw messages - transformation now happens in applyToolMessageTransformation
871
+ let input = [...messages];
872
+
873
+ // Handle files if present
874
+ if (files.length > 0) {
875
+ input = await this.handleFileAttachments(files, params.agentId, input);
876
+ }
877
+
878
+ return input;
879
+ }
880
+
881
+ /**
882
+ * Prepare function tools for Responses API request
883
+ * Transforms tools from various formats to Responses API format
884
+ */
885
+ private async prepareFunctionTools(params: TLLMParams): Promise<OpenAI.Responses.Tool[]> {
886
+ const tools: OpenAI.Responses.Tool[] = [];
887
+
888
+ // Validate and process function tools
889
+ if (params?.toolsConfig?.tools && Array.isArray(params.toolsConfig.tools) && params.toolsConfig.tools.length > 0) {
890
+ try {
891
+ // Transform tools using the enhanced transformToolsConfig method
892
+ const toolsConfig = this.transformToolsConfig({
893
+ type: 'function',
894
+ toolDefinitions: params.toolsConfig.tools as any[],
895
+ toolChoice: params.toolsConfig.tool_choice || 'auto',
896
+ modelInfo: (params.modelInfo as LLMModelInfo) || null,
897
+ });
898
+
899
+ // Validate transformed tools before adding them
900
+ const validTools = toolsConfig.filter((tool, index) => {
901
+ if (tool.type !== 'function' || !(tool as any).name) {
902
+ return false;
903
+ }
904
+ return true;
905
+ });
906
+
907
+ tools.push(...validTools);
908
+ } catch (error) {
909
+ // Don't throw here to allow the request to continue without tools
910
+ // This provides better resilience in production
911
+ }
912
+ }
913
+
914
+ return tools;
915
+ }
916
+
917
+ /**
918
+ * Get web search tool configuration for OpenAI Responses API
919
+ * According to OpenAI documentation: https://platform.openai.com/docs/api-reference/responses/create
920
+ */
921
+ private prepareWebSearchTool(params: TLLMPreparedParams): OpenAI.Responses.WebSearchTool {
922
+ const webSearch = params?.toolsInfo?.openai?.webSearch;
923
+ const contextSize = webSearch?.contextSize;
924
+ const searchCity = webSearch?.city;
925
+ const searchCountry = webSearch?.country;
926
+ const searchRegion = webSearch?.region;
927
+ const searchTimezone = webSearch?.timezone;
928
+
929
+ // Prepare location object - build incrementally if any location parameters exist
930
+ const userLocation: TSearchLocation = {
931
+ type: 'approximate', // Required, always be 'approximate' when we implement location
932
+ };
933
+
934
+ // Add location fields if they exist
935
+ if (searchCity) userLocation.city = searchCity;
936
+ if (searchCountry) userLocation.country = searchCountry;
937
+ if (searchRegion) userLocation.region = searchRegion;
938
+ if (searchTimezone) userLocation.timezone = searchTimezone;
939
+
940
+ // Only include location in config if we have actual location data
941
+ const hasLocationData = searchCity || searchCountry || searchRegion || searchTimezone;
942
+
943
+ // Configure web search tool according to OpenAI Responses API specification
944
+ const searchTool = {
945
+ type: 'web_search_preview' as const, // Use literal type to ensure consistency
946
+ };
947
+
948
+ // Add optional configuration properties
949
+ const webSearchConfig: any = {};
950
+
951
+ if (contextSize) {
952
+ webSearchConfig.search_context_size = contextSize;
953
+ }
954
+
955
+ if (hasLocationData) {
956
+ webSearchConfig.user_location = userLocation;
957
+ }
958
+
959
+ return { ...searchTool, ...webSearchConfig };
960
+ }
961
+
962
+ /**
963
+ * Transform messages for Responses API compatibility
964
+ * Handles the differences between Chat Completions and Responses API message formats
965
+ */
966
+ private applyToolMessageTransformation(input: any[]): any[] {
967
+ const transformedMessages: any[] = [];
968
+
969
+ for (let i = 0; i < input.length; i++) {
970
+ const message = input[i];
971
+
972
+ try {
973
+ if (message.role === 'assistant' && message.tool_calls && Array.isArray(message.tool_calls)) {
974
+ // Split assistant message with tool_calls into separate items (Responses API format)
975
+
976
+ // Add assistant content first if present
977
+ if (message.content !== undefined && message.content !== null) {
978
+ const contentStr = typeof message.content === 'string' ? message.content : JSON.stringify(message.content);
979
+ if (contentStr.trim().length > 0) {
980
+ transformedMessages.push({
981
+ role: 'assistant',
982
+ content: contentStr,
983
+ });
984
+ }
985
+ }
986
+
987
+ // Transform each tool call to function_call format
988
+ message.tool_calls.forEach((toolCall: any, index: number) => {
989
+ if (!toolCall || !toolCall.function) {
990
+ return;
991
+ }
992
+
993
+ const functionArgs = toolCall.function.arguments;
994
+ const normalizedArgs =
995
+ functionArgs === undefined || functionArgs === null
996
+ ? undefined
997
+ : typeof functionArgs === 'object'
998
+ ? JSON.stringify(functionArgs)
999
+ : String(functionArgs);
1000
+
1001
+ transformedMessages.push({
1002
+ type: 'function_call',
1003
+ name: toolCall.function.name || '',
1004
+ arguments: normalizedArgs,
1005
+ call_id: toolCall.id || toolCall.call_id || `call_${Date.now()}_${index}`, // Ensure unique ID
1006
+ });
1007
+ });
1008
+ } else if (message.role === 'tool') {
1009
+ // Transform tool message to function_call_output (Responses API format)
1010
+ if (!message.tool_call_id) {
1011
+ return;
1012
+ }
1013
+
1014
+ const outputContent = message.content;
1015
+ const normalizedOutput = typeof outputContent === 'string' ? outputContent : JSON.stringify(outputContent || 'null');
1016
+
1017
+ transformedMessages.push({
1018
+ type: 'function_call_output',
1019
+ call_id: message.tool_call_id,
1020
+ output: normalizedOutput,
1021
+ });
1022
+ } else {
1023
+ // Pass through other message types without content modification
1024
+ // The Responses API can handle various content formats
1025
+ transformedMessages.push(message);
1026
+ }
1027
+ } catch (error) {
1028
+ // Add the original message to prevent data loss
1029
+ transformedMessages.push(message);
1030
+ }
1031
+ }
1032
+
1033
+ // Validate the final message structure
1034
+ const validMessages = transformedMessages.filter((msg, index) => {
1035
+ if (!msg || typeof msg !== 'object') {
1036
+ return false;
1037
+ }
1038
+ return true;
1039
+ });
1040
+
1041
+ return validMessages;
1042
+ }
1043
+
1044
+ /**
1045
+ * Get search tool cost for a specific model and context size
1046
+ */
1047
+ private getSearchToolCost(modelName: string): number {
1048
+ if (!modelName) return 0;
1049
+ // Normalize: remove built-in prefix and compare case-insensitively
1050
+ const normalized = String(modelName)
1051
+ .toLowerCase()
1052
+ .replace(/^smythos\//, '');
1053
+
1054
+ // Match by prefix with any configured family in SEARCH_TOOL_COSTS
1055
+ const match = Object.entries(SEARCH_TOOL_COSTS).find(([family]) => normalized.startsWith(family));
1056
+ return match ? (match[1] as number) : 0;
1057
+ }
1058
+
1059
+ /**
1060
+ * Process function call responses and integrate them back into the conversation
1061
+ * This method helps maintain compatibility with the chat completion flow
1062
+ */
1063
+ public async processFunctionCallResults(toolsData: ToolData[]): Promise<ToolData[]> {
1064
+ const processedTools: ToolData[] = [];
1065
+
1066
+ for (const tool of toolsData) {
1067
+ if (!this.isValidToolData(tool)) {
1068
+ continue;
1069
+ }
1070
+
1071
+ try {
1072
+ const processedTool: ToolData = {
1073
+ ...tool,
1074
+ // Ensure arguments are properly formatted as JSON string
1075
+ arguments: this.normalizeToolArguments(tool.arguments),
1076
+ // Ensure function property is properly structured for compatibility
1077
+ function: tool.function || {
1078
+ name: tool.name,
1079
+ arguments: this.normalizeToolArguments(tool.arguments),
1080
+ },
1081
+ };
1082
+
1083
+ processedTools.push(processedTool);
1084
+ } catch (error) {
1085
+ // Add error information to the tool result
1086
+ processedTools.push({
1087
+ ...tool,
1088
+ error: error instanceof Error ? error.message : 'Unknown processing error',
1089
+ result: undefined,
1090
+ });
1091
+ }
1092
+ }
1093
+
1094
+ return processedTools;
1095
+ }
1096
+
1097
+ /**
1098
+ * Validate tool choice parameter for Responses API
1099
+ */
1100
+ private validateToolChoice(toolChoice: any, availableTools: OpenAI.Responses.Tool[]): boolean {
1101
+ if (!toolChoice) return true;
1102
+
1103
+ if (typeof toolChoice === 'string') {
1104
+ const validStringChoices = ['auto', 'required', 'none'];
1105
+ return validStringChoices.includes(toolChoice);
1106
+ }
1107
+
1108
+ if (typeof toolChoice === 'object' && toolChoice !== null) {
1109
+ // For specific function selection
1110
+ if (toolChoice.type === 'function' && toolChoice.function?.name) {
1111
+ // Check if the specified function exists in available tools
1112
+ return availableTools.some((tool) => tool.type === 'function' && tool.name === toolChoice.function.name);
1113
+ }
1114
+ }
1115
+
1116
+ return false;
1117
+ }
1118
+
1119
+ /**
1120
+ * Upsert a web search tool entry in toolsData and return its index
1121
+ */
1122
+ private upsertWebSearchToolImmutable(toolsData: ToolData[], id: string, args: string = ''): { toolsData: ToolData[]; index: number } {
1123
+ const existingIndex = toolsData.findIndex((t) => t.id === id);
1124
+ if (existingIndex === -1) {
1125
+ const index = toolsData.length;
1126
+ const newItem: ToolData = {
1127
+ index,
1128
+ id,
1129
+ type: TToolType.WebSearch,
1130
+ name: 'web_search',
1131
+ arguments: args,
1132
+ role: 'tool',
1133
+ } as ToolData;
1134
+ const updated: ToolData[] = [...toolsData, newItem];
1135
+ return { toolsData: updated, index };
1136
+ }
1137
+
1138
+ if (args) {
1139
+ const updated: ToolData[] = toolsData.map((t, idx) => (idx === existingIndex ? { ...t, arguments: args } : t));
1140
+ return { toolsData: updated, index: existingIndex };
1141
+ }
1142
+
1143
+ return { toolsData, index: existingIndex };
1144
+ }
1145
+ }