@within-7/minto 0.1.5 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (264) hide show
  1. package/dist/commands/agents/AgentsCommand.js +2342 -0
  2. package/dist/commands/agents/AgentsCommand.js.map +7 -0
  3. package/dist/commands/agents/constants.js +58 -0
  4. package/dist/commands/agents/constants.js.map +7 -0
  5. package/dist/commands/agents/index.js +37 -0
  6. package/dist/commands/agents/index.js.map +7 -0
  7. package/dist/commands/agents/types.js +10 -0
  8. package/dist/commands/agents/types.js.map +7 -0
  9. package/dist/commands/agents/utils/fileOperations.js +185 -0
  10. package/dist/commands/agents/utils/fileOperations.js.map +7 -0
  11. package/dist/commands/agents/utils/index.js +21 -0
  12. package/dist/commands/agents/utils/index.js.map +7 -0
  13. package/dist/commands/bug.js +2 -2
  14. package/dist/commands/bug.js.map +2 -2
  15. package/dist/commands/compact.js +5 -5
  16. package/dist/commands/compact.js.map +2 -2
  17. package/dist/commands/ctx_viz.js +55 -22
  18. package/dist/commands/ctx_viz.js.map +2 -2
  19. package/dist/commands/mcp-interactive.js +11 -11
  20. package/dist/commands/mcp-interactive.js.map +2 -2
  21. package/dist/commands/model.js +94 -32
  22. package/dist/commands/model.js.map +3 -3
  23. package/dist/commands/plugin/AddMarketplaceForm.js +49 -21
  24. package/dist/commands/plugin/AddMarketplaceForm.js.map +2 -2
  25. package/dist/commands/plugin/ConfirmDialog.js +38 -26
  26. package/dist/commands/plugin/ConfirmDialog.js.map +2 -2
  27. package/dist/commands/plugin/InstalledPluginsByMarketplace.js +24 -8
  28. package/dist/commands/plugin/InstalledPluginsByMarketplace.js.map +2 -2
  29. package/dist/commands/plugin/InstalledPluginsManager.js +3 -1
  30. package/dist/commands/plugin/InstalledPluginsManager.js.map +2 -2
  31. package/dist/commands/plugin/MainMenu.js +16 -7
  32. package/dist/commands/plugin/MainMenu.js.map +2 -2
  33. package/dist/commands/plugin/MarketplaceManager.js +84 -39
  34. package/dist/commands/plugin/MarketplaceManager.js.map +2 -2
  35. package/dist/commands/plugin/MarketplaceSelector.js +7 -3
  36. package/dist/commands/plugin/MarketplaceSelector.js.map +2 -2
  37. package/dist/commands/plugin/PlaceholderScreen.js +16 -2
  38. package/dist/commands/plugin/PlaceholderScreen.js.map +2 -2
  39. package/dist/commands/plugin/PluginBrowser.js +4 -2
  40. package/dist/commands/plugin/PluginBrowser.js.map +2 -2
  41. package/dist/commands/plugin/PluginDetailsInstall.js +12 -6
  42. package/dist/commands/plugin/PluginDetailsInstall.js.map +2 -2
  43. package/dist/commands/plugin/PluginDetailsManage.js +14 -5
  44. package/dist/commands/plugin/PluginDetailsManage.js.map +2 -2
  45. package/dist/commands/plugin/example-usage.js.map +2 -2
  46. package/dist/commands/plugin/utils.js.map +2 -2
  47. package/dist/commands/plugin.js +226 -46
  48. package/dist/commands/plugin.js.map +2 -2
  49. package/dist/commands/refreshCommands.js +6 -3
  50. package/dist/commands/refreshCommands.js.map +2 -2
  51. package/dist/commands/resume.js +2 -1
  52. package/dist/commands/resume.js.map +2 -2
  53. package/dist/commands/setup.js +19 -5
  54. package/dist/commands/setup.js.map +2 -2
  55. package/dist/commands/terminalSetup.js +2 -2
  56. package/dist/commands/terminalSetup.js.map +1 -1
  57. package/dist/commands.js +14 -30
  58. package/dist/commands.js.map +2 -2
  59. package/dist/components/AskUserQuestionDialog/AskUserQuestionDialog.js.map +2 -2
  60. package/dist/components/AskUserQuestionDialog/QuestionView.js +10 -1
  61. package/dist/components/AskUserQuestionDialog/QuestionView.js.map +2 -2
  62. package/dist/components/BackgroundTasksPanel.js +5 -1
  63. package/dist/components/BackgroundTasksPanel.js.map +2 -2
  64. package/dist/components/Config.js +17 -4
  65. package/dist/components/Config.js.map +2 -2
  66. package/dist/components/ConsoleOAuthFlow.js.map +2 -2
  67. package/dist/components/CustomSelect/select-option.js +4 -1
  68. package/dist/components/CustomSelect/select-option.js.map +2 -2
  69. package/dist/components/Help.js +6 -8
  70. package/dist/components/Help.js.map +2 -2
  71. package/dist/components/Logo.js +1 -1
  72. package/dist/components/Logo.js.map +2 -2
  73. package/dist/components/ModelListManager.js.map +2 -2
  74. package/dist/components/ModelSelector/ModelSelector.js +2030 -0
  75. package/dist/components/ModelSelector/ModelSelector.js.map +7 -0
  76. package/dist/components/ModelSelector/ScreenContainer.js +27 -0
  77. package/dist/components/ModelSelector/ScreenContainer.js.map +7 -0
  78. package/dist/components/ModelSelector/constants.js +37 -0
  79. package/dist/components/ModelSelector/constants.js.map +7 -0
  80. package/dist/components/ModelSelector/hooks/index.js +5 -0
  81. package/dist/components/ModelSelector/hooks/index.js.map +7 -0
  82. package/dist/components/ModelSelector/hooks/useEscapeNavigation.js +21 -0
  83. package/dist/components/ModelSelector/hooks/useEscapeNavigation.js.map +7 -0
  84. package/dist/components/ModelSelector/index.js +17 -0
  85. package/dist/components/ModelSelector/index.js.map +7 -0
  86. package/dist/components/ModelSelector/types.js +1 -0
  87. package/dist/components/ModelSelector/types.js.map +7 -0
  88. package/dist/components/PressEnterToContinue.js +1 -1
  89. package/dist/components/PressEnterToContinue.js.map +2 -2
  90. package/dist/components/ProjectOnboarding.js +1 -1
  91. package/dist/components/ProjectOnboarding.js.map +2 -2
  92. package/dist/components/PromptInput.js +88 -37
  93. package/dist/components/PromptInput.js.map +2 -2
  94. package/dist/components/QuitSummary.js +17 -10
  95. package/dist/components/QuitSummary.js.map +2 -2
  96. package/dist/components/SentryErrorBoundary.js.map +2 -2
  97. package/dist/components/StreamingBashOutput.js.map +2 -2
  98. package/dist/components/StructuredDiff.js.map +2 -2
  99. package/dist/components/SubagentProgress.js.map +2 -2
  100. package/dist/components/TaskCard.js.map +2 -2
  101. package/dist/components/TextInput.js.map +1 -1
  102. package/dist/components/TodoItem.js.map +1 -1
  103. package/dist/components/binary-feedback/BinaryFeedbackOption.js +1 -3
  104. package/dist/components/binary-feedback/BinaryFeedbackOption.js.map +2 -2
  105. package/dist/components/messages/AssistantLocalCommandOutputMessage.js.map +1 -1
  106. package/dist/components/messages/AssistantToolUseMessage.js +3 -1
  107. package/dist/components/messages/AssistantToolUseMessage.js.map +2 -2
  108. package/dist/components/messages/TaskProgressMessage.js.map +2 -2
  109. package/dist/components/messages/TaskToolMessage.js.map +2 -2
  110. package/dist/components/messages/UserToolResultMessage/utils.js.map +2 -2
  111. package/dist/components/permissions/FileEditPermissionRequest/FileEditToolDiff.js.map +2 -2
  112. package/dist/components/permissions/FileWritePermissionRequest/FileWriteToolDiff.js.map +2 -2
  113. package/dist/components/permissions/hooks.js.map +2 -2
  114. package/dist/constants/modelCapabilities.js +1 -1
  115. package/dist/constants/modelCapabilities.js.map +2 -2
  116. package/dist/constants/prompts.js.map +1 -1
  117. package/dist/constants/timing.js +34 -0
  118. package/dist/constants/timing.js.map +7 -0
  119. package/dist/entrypoints/cli.js +128 -33
  120. package/dist/entrypoints/cli.js.map +3 -3
  121. package/dist/entrypoints/mcp.js +13 -18
  122. package/dist/entrypoints/mcp.js.map +2 -2
  123. package/dist/hooks/useCanUseTool.js.map +2 -2
  124. package/dist/hooks/useCancelRequest.js.map +1 -1
  125. package/dist/hooks/useHistorySearch.js.map +2 -2
  126. package/dist/hooks/useLogStartupTime.js.map +2 -2
  127. package/dist/hooks/usePermissionRequestLogging.js.map +2 -2
  128. package/dist/hooks/useTextInput.js.map +1 -1
  129. package/dist/hooks/useUnifiedCompletion.js +493 -394
  130. package/dist/hooks/useUnifiedCompletion.js.map +2 -2
  131. package/dist/index.js.map +2 -2
  132. package/dist/permissions.js +4 -7
  133. package/dist/permissions.js.map +2 -2
  134. package/dist/query.js +6 -1
  135. package/dist/query.js.map +2 -2
  136. package/dist/screens/REPL.js +72 -36
  137. package/dist/screens/REPL.js.map +2 -2
  138. package/dist/screens/ResumeConversation.js +2 -1
  139. package/dist/screens/ResumeConversation.js.map +2 -2
  140. package/dist/services/adapters/base.js.map +2 -2
  141. package/dist/services/adapters/chatCompletions.js.map +2 -2
  142. package/dist/services/adapters/responsesAPI.js +3 -1
  143. package/dist/services/adapters/responsesAPI.js.map +2 -2
  144. package/dist/services/claude.js +327 -328
  145. package/dist/services/claude.js.map +2 -2
  146. package/dist/services/customCommands.js +6 -1
  147. package/dist/services/customCommands.js.map +2 -2
  148. package/dist/services/fileFreshness.js.map +2 -2
  149. package/dist/services/gpt5ConnectionTest.js +20 -7
  150. package/dist/services/gpt5ConnectionTest.js.map +2 -2
  151. package/dist/services/hookExecutor.js +6 -12
  152. package/dist/services/hookExecutor.js.map +2 -2
  153. package/dist/services/mcpClient.js +29 -2
  154. package/dist/services/mcpClient.js.map +2 -2
  155. package/dist/services/mentionProcessor.js +23 -10
  156. package/dist/services/mentionProcessor.js.map +2 -2
  157. package/dist/services/modelAdapterFactory.js.map +2 -2
  158. package/dist/services/oauth.js.map +2 -2
  159. package/dist/services/openai.js +109 -72
  160. package/dist/services/openai.js.map +3 -3
  161. package/dist/services/responseStateManager.js.map +2 -2
  162. package/dist/services/systemReminder.js.map +2 -2
  163. package/dist/tools/ArchitectTool/ArchitectTool.js.map +1 -1
  164. package/dist/tools/AskExpertModelTool/AskExpertModelTool.js +14 -8
  165. package/dist/tools/AskExpertModelTool/AskExpertModelTool.js.map +2 -2
  166. package/dist/tools/BashOutputTool/BashOutputTool.js.map +2 -2
  167. package/dist/tools/BashTool/BashTool.js.map +2 -2
  168. package/dist/tools/FileReadTool/FileReadTool.js.map +1 -1
  169. package/dist/tools/FileWriteTool/FileWriteTool.js.map +2 -2
  170. package/dist/tools/GrepTool/GrepTool.js +1 -4
  171. package/dist/tools/GrepTool/GrepTool.js.map +2 -2
  172. package/dist/tools/MultiEditTool/MultiEditTool.js +4 -1
  173. package/dist/tools/MultiEditTool/MultiEditTool.js.map +2 -2
  174. package/dist/tools/NotebookReadTool/NotebookReadTool.js +3 -1
  175. package/dist/tools/NotebookReadTool/NotebookReadTool.js.map +2 -2
  176. package/dist/tools/SkillTool/SkillTool.js +12 -6
  177. package/dist/tools/SkillTool/SkillTool.js.map +2 -2
  178. package/dist/tools/TaskTool/TaskTool.js +14 -5
  179. package/dist/tools/TaskTool/TaskTool.js.map +2 -2
  180. package/dist/tools/TaskTool/prompt.js.map +2 -2
  181. package/dist/tools/ThinkTool/ThinkTool.js +6 -1
  182. package/dist/tools/ThinkTool/ThinkTool.js.map +2 -2
  183. package/dist/tools/TodoWriteTool/TodoWriteTool.js +23 -3
  184. package/dist/tools/TodoWriteTool/TodoWriteTool.js.map +2 -2
  185. package/dist/tools/URLFetcherTool/URLFetcherTool.js +2 -2
  186. package/dist/tools/URLFetcherTool/URLFetcherTool.js.map +2 -2
  187. package/dist/tools/URLFetcherTool/cache.js +6 -3
  188. package/dist/tools/URLFetcherTool/cache.js.map +2 -2
  189. package/dist/tools/URLFetcherTool/htmlToMarkdown.js +3 -1
  190. package/dist/tools/URLFetcherTool/htmlToMarkdown.js.map +2 -2
  191. package/dist/tools/WebSearchTool/WebSearchTool.js.map +2 -2
  192. package/dist/tools/WebSearchTool/prompt.js.map +2 -2
  193. package/dist/tools/WebSearchTool/searchProviders.js +15 -6
  194. package/dist/tools/WebSearchTool/searchProviders.js.map +2 -2
  195. package/dist/tools.js +4 -1
  196. package/dist/tools.js.map +2 -2
  197. package/dist/types/core.js +1 -0
  198. package/dist/types/core.js.map +7 -0
  199. package/dist/types/hooks.js +1 -4
  200. package/dist/types/hooks.js.map +2 -2
  201. package/dist/types/marketplace.js +8 -2
  202. package/dist/types/marketplace.js.map +2 -2
  203. package/dist/types/plugin.js +9 -6
  204. package/dist/types/plugin.js.map +2 -2
  205. package/dist/utils/BackgroundShellManager.js +76 -10
  206. package/dist/utils/BackgroundShellManager.js.map +2 -2
  207. package/dist/utils/PersistentShell.js +7 -2
  208. package/dist/utils/PersistentShell.js.map +2 -2
  209. package/dist/utils/advancedFuzzyMatcher.js +4 -1
  210. package/dist/utils/advancedFuzzyMatcher.js.map +2 -2
  211. package/dist/utils/agentLoader.js +69 -35
  212. package/dist/utils/agentLoader.js.map +2 -2
  213. package/dist/utils/agentStorage.js.map +2 -2
  214. package/dist/utils/async.js +163 -0
  215. package/dist/utils/async.js.map +7 -0
  216. package/dist/utils/autoUpdater.js +8 -2
  217. package/dist/utils/autoUpdater.js.map +2 -2
  218. package/dist/utils/commands.js +23 -11
  219. package/dist/utils/commands.js.map +2 -2
  220. package/dist/utils/commonUnixCommands.js +3 -1
  221. package/dist/utils/commonUnixCommands.js.map +2 -2
  222. package/dist/utils/compressionMode.js.map +2 -2
  223. package/dist/utils/config.js +30 -14
  224. package/dist/utils/config.js.map +2 -2
  225. package/dist/utils/debugLogger.js.map +2 -2
  226. package/dist/utils/env.js.map +2 -2
  227. package/dist/utils/envConfig.js +82 -0
  228. package/dist/utils/envConfig.js.map +7 -0
  229. package/dist/utils/errorHandling.js +89 -0
  230. package/dist/utils/errorHandling.js.map +7 -0
  231. package/dist/utils/expertChatStorage.js.map +2 -2
  232. package/dist/utils/fuzzyMatcher.js +13 -7
  233. package/dist/utils/fuzzyMatcher.js.map +2 -2
  234. package/dist/utils/hookManager.js +14 -4
  235. package/dist/utils/hookManager.js.map +2 -2
  236. package/dist/utils/log.js.map +2 -2
  237. package/dist/utils/marketplaceManager.js +44 -9
  238. package/dist/utils/marketplaceManager.js.map +2 -2
  239. package/dist/utils/messageContextManager.js.map +1 -1
  240. package/dist/utils/messages.js +6 -3
  241. package/dist/utils/messages.js.map +2 -2
  242. package/dist/utils/model.js +3 -1
  243. package/dist/utils/model.js.map +2 -2
  244. package/dist/utils/pluginInstaller.js +3 -15
  245. package/dist/utils/pluginInstaller.js.map +2 -2
  246. package/dist/utils/pluginLoader.js +41 -13
  247. package/dist/utils/pluginLoader.js.map +2 -2
  248. package/dist/utils/pluginRegistry.js.map +2 -2
  249. package/dist/utils/pluginValidator.js +71 -49
  250. package/dist/utils/pluginValidator.js.map +2 -2
  251. package/dist/utils/ptyCompat.js.map +2 -2
  252. package/dist/utils/roundConverter.js.map +2 -2
  253. package/dist/utils/secureFile.js +43 -14
  254. package/dist/utils/secureFile.js.map +2 -2
  255. package/dist/utils/sessionState.js.map +2 -2
  256. package/dist/utils/skillLoader.js.map +2 -2
  257. package/dist/utils/teamConfig.js +7 -4
  258. package/dist/utils/teamConfig.js.map +2 -2
  259. package/dist/utils/theme.js.map +2 -2
  260. package/dist/utils/thinking.js.map +2 -2
  261. package/dist/utils/unaryLogging.js.map +2 -2
  262. package/dist/version.js +2 -2
  263. package/dist/version.js.map +1 -1
  264. package/package.json +5 -5
@@ -14,6 +14,7 @@ import {
14
14
  } from "../utils/config.js";
15
15
  import { getProjectDocs } from "../context.js";
16
16
  import { logError, SESSION_ID } from "../utils/log.js";
17
+ import { abortableDelay } from "../utils/async.js";
17
18
  import { USER_AGENT } from "../utils/http.js";
18
19
  import {
19
20
  createAssistantAPIErrorMessage,
@@ -36,41 +37,15 @@ import { USE_BEDROCK, USE_VERTEX } from "../utils/model.js";
36
37
  import { getCLISyspromptPrefix } from "../constants/prompts.js";
37
38
  import { getVertexRegionForModel } from "../utils/model.js";
38
39
  import { nanoid } from "nanoid";
39
- import { getCompletionWithProfile, getGPT5CompletionWithProfile } from "./openai.js";
40
+ import {
41
+ getCompletionWithProfile,
42
+ getGPT5CompletionWithProfile
43
+ } from "./openai.js";
40
44
  import { getReasoningEffort } from "../utils/thinking.js";
41
45
  import { generateSystemReminders } from "./systemReminder.js";
42
46
  function isGPT5Model(modelName) {
43
47
  return modelName.startsWith("gpt-5");
44
48
  }
45
- function getModelConfigForDebug(model) {
46
- const config = getGlobalConfig();
47
- const modelManager = getModelManager();
48
- const modelProfile = modelManager.getModel("main");
49
- let apiKeyStatus = "missing";
50
- let baseURL;
51
- let maxTokens;
52
- let reasoningEffort;
53
- if (modelProfile) {
54
- apiKeyStatus = modelProfile.apiKey ? "configured" : "missing";
55
- baseURL = modelProfile.baseURL;
56
- maxTokens = modelProfile.maxTokens;
57
- reasoningEffort = modelProfile.reasoningEffort;
58
- } else {
59
- apiKeyStatus = "missing";
60
- maxTokens = void 0;
61
- reasoningEffort = void 0;
62
- }
63
- return {
64
- modelName: model,
65
- provider: modelProfile?.provider || config.primaryProvider || "anthropic",
66
- apiKeyStatus,
67
- baseURL,
68
- maxTokens,
69
- reasoningEffort,
70
- isStream: config.stream || false,
71
- temperature: MAIN_QUERY_TEMPERATURE
72
- };
73
- }
74
49
  class MintoContextManager {
75
50
  static instance;
76
51
  projectDocsCache = "";
@@ -103,14 +78,15 @@ class MintoContextManager {
103
78
  });
104
79
  }
105
80
  } catch (error) {
106
- console.warn("[MintoContext] Failed to load project docs:", error);
81
+ debugLogger.warn("MINTO_CONTEXT_LOAD_FAILED", { error: String(error) });
107
82
  this.projectDocsCache = "";
108
83
  this.cacheInitialized = true;
109
84
  }
110
85
  }
111
86
  getMintoContext() {
112
87
  if (!this.cacheInitialized) {
113
- this.initialize().catch(console.warn);
88
+ this.initialize().catch(() => {
89
+ });
114
90
  return "";
115
91
  }
116
92
  return this.projectDocsCache;
@@ -122,7 +98,8 @@ class MintoContextManager {
122
98
  }
123
99
  }
124
100
  const mintoContextManager = MintoContextManager.getInstance();
125
- mintoContextManager.initialize().catch(console.warn);
101
+ mintoContextManager.initialize().catch(() => {
102
+ });
126
103
  const generateMintoContext = () => {
127
104
  return mintoContextManager.getMintoContext();
128
105
  };
@@ -135,10 +112,6 @@ const CREDIT_BALANCE_TOO_LOW_ERROR_MESSAGE = "Credit balance is too low";
135
112
  const INVALID_API_KEY_ERROR_MESSAGE = "Invalid API key \xB7 Please run /login";
136
113
  const NO_CONTENT_MESSAGE = "(no content)";
137
114
  const PROMPT_CACHING_ENABLED = !process.env.DISABLE_PROMPT_CACHING;
138
- const HAIKU_COST_PER_MILLION_INPUT_TOKENS = 0.8;
139
- const HAIKU_COST_PER_MILLION_OUTPUT_TOKENS = 4;
140
- const HAIKU_COST_PER_MILLION_PROMPT_CACHE_WRITE_TOKENS = 1;
141
- const HAIKU_COST_PER_MILLION_PROMPT_CACHE_READ_TOKENS = 0.08;
142
115
  const SONNET_COST_PER_MILLION_INPUT_TOKENS = 3;
143
116
  const SONNET_COST_PER_MILLION_OUTPUT_TOKENS = 15;
144
117
  const SONNET_COST_PER_MILLION_PROMPT_CACHE_WRITE_TOKENS = 3.75;
@@ -151,24 +124,6 @@ function getMetadata() {
151
124
  }
152
125
  const MAX_RETRIES = process.env.USER_TYPE === "SWE_BENCH" ? 100 : 10;
153
126
  const BASE_DELAY_MS = 500;
154
- function abortableDelay(delayMs, signal) {
155
- return new Promise((resolve, reject) => {
156
- if (signal?.aborted) {
157
- reject(new Error("Request was aborted"));
158
- return;
159
- }
160
- const timeoutId = setTimeout(() => {
161
- resolve();
162
- }, delayMs);
163
- if (signal) {
164
- const abortHandler = () => {
165
- clearTimeout(timeoutId);
166
- reject(new Error("Request was aborted"));
167
- };
168
- signal.addEventListener("abort", abortHandler, { once: true });
169
- }
170
- });
171
- }
172
127
  function getRetryDelay(attempt, retryAfterHeader) {
173
128
  if (retryAfterHeader) {
174
129
  const seconds = parseInt(retryAfterHeader, 10);
@@ -266,7 +221,7 @@ async function fetchAnthropicModels(baseURL, apiKey) {
266
221
  if (error instanceof Error && error.message.includes("API key") || error instanceof Error && error.message.includes("Anthropic")) {
267
222
  throw error;
268
223
  }
269
- console.error("Failed to fetch Anthropic models:", error);
224
+ logError(error);
270
225
  throw new Error(
271
226
  "Unable to connect to Anthropic API. Please check your internet connection and try again."
272
227
  );
@@ -283,9 +238,9 @@ async function verifyApiKey(apiKey, baseURL, provider) {
283
238
  "Content-Type": "application/json"
284
239
  };
285
240
  if (!baseURL) {
286
- console.warn(
287
- "No baseURL provided for non-Anthropic provider verification"
288
- );
241
+ debugLogger.warn("API_VERIFY", {
242
+ error: "No baseURL provided for non-Anthropic provider"
243
+ });
289
244
  return false;
290
245
  }
291
246
  const modelsURL = `${baseURL.replace(/\/+$/, "")}/models`;
@@ -295,7 +250,10 @@ async function verifyApiKey(apiKey, baseURL, provider) {
295
250
  });
296
251
  return response.ok;
297
252
  } catch (error) {
298
- console.warn("API verification failed for non-Anthropic provider:", error);
253
+ debugLogger.warn("API_VERIFY", {
254
+ error: "non-Anthropic provider verification failed",
255
+ details: String(error)
256
+ });
299
257
  return false;
300
258
  }
301
259
  }
@@ -537,7 +495,7 @@ async function handleMessageStream(stream, signal) {
537
495
  usage
538
496
  };
539
497
  }
540
- function convertOpenAIResponseToAnthropic(response, tools) {
498
+ function convertOpenAIResponseToAnthropic(response, _tools) {
541
499
  let contentBlocks = [];
542
500
  const message = response.choices?.[0]?.message;
543
501
  if (!message) {
@@ -671,7 +629,7 @@ function applyCacheControlWithLimits(systemBlocks, messageParams) {
671
629
  }
672
630
  const maxCacheBlocks = 4;
673
631
  let usedCacheBlocks = 0;
674
- const processedSystemBlocks = systemBlocks.map((block, index) => {
632
+ const processedSystemBlocks = systemBlocks.map((block, _index) => {
675
633
  if (usedCacheBlocks < maxCacheBlocks && block.text.length > 1e3) {
676
634
  usedCacheBlocks++;
677
635
  return {
@@ -684,20 +642,22 @@ function applyCacheControlWithLimits(systemBlocks, messageParams) {
684
642
  });
685
643
  const processedMessageParams = messageParams.map((message, messageIndex) => {
686
644
  if (Array.isArray(message.content)) {
687
- const processedContent = message.content.map((contentBlock, blockIndex) => {
688
- const shouldCache = usedCacheBlocks < maxCacheBlocks && contentBlock.type === "text" && typeof contentBlock.text === "string" && // Long documents (over 2000 characters)
689
- (contentBlock.text.length > 2e3 || // Last content block of the last message (may be important context)
690
- messageIndex === messageParams.length - 1 && blockIndex === message.content.length - 1 && contentBlock.text.length > 500);
691
- if (shouldCache) {
692
- usedCacheBlocks++;
693
- return {
694
- ...contentBlock,
695
- cache_control: { type: "ephemeral" }
696
- };
645
+ const processedContent = message.content.map(
646
+ (contentBlock, blockIndex) => {
647
+ const shouldCache = usedCacheBlocks < maxCacheBlocks && contentBlock.type === "text" && typeof contentBlock.text === "string" && // Long documents (over 2000 characters)
648
+ (contentBlock.text.length > 2e3 || // Last content block of the last message (may be important context)
649
+ messageIndex === messageParams.length - 1 && blockIndex === message.content.length - 1 && contentBlock.text.length > 500);
650
+ if (shouldCache) {
651
+ usedCacheBlocks++;
652
+ return {
653
+ ...contentBlock,
654
+ cache_control: { type: "ephemeral" }
655
+ };
656
+ }
657
+ const { cache_control, ...blockWithoutCache } = contentBlock;
658
+ return blockWithoutCache;
697
659
  }
698
- const { cache_control, ...blockWithoutCache } = contentBlock;
699
- return blockWithoutCache;
700
- });
660
+ );
701
661
  return {
702
662
  ...message,
703
663
  content: processedContent
@@ -775,7 +735,10 @@ async function queryLLM(messages, systemPrompt, maxThinkingTokens, tools, signal
775
735
  const resolvedModel = modelProfile.modelName;
776
736
  const toolUseContext = options.toolUseContext;
777
737
  if (toolUseContext && !toolUseContext.responseState) {
778
- const conversationId = getConversationId(toolUseContext.agentId, toolUseContext.messageId);
738
+ const conversationId = getConversationId(
739
+ toolUseContext.agentId,
740
+ toolUseContext.messageId
741
+ );
779
742
  const previousResponseId = responseStateManager.getPreviousResponseId(conversationId);
780
743
  toolUseContext.responseState = {
781
744
  previousResponseId,
@@ -1009,146 +972,26 @@ async function queryAnthropicNative(messages, systemPrompt, maxThinkingTokens, t
1009
972
  let attemptNumber = 0;
1010
973
  let response;
1011
974
  try {
1012
- response = await withRetry(async (attempt) => {
1013
- attemptNumber = attempt;
1014
- start = Date.now();
1015
- const params = {
1016
- model,
1017
- max_tokens: getMaxTokensFromProfile(modelProfile),
1018
- messages: processedMessages,
1019
- system: processedSystem,
1020
- tools: toolSchemas.length > 0 ? toolSchemas : void 0,
1021
- tool_choice: toolSchemas.length > 0 ? { type: "auto" } : void 0
1022
- };
1023
- if (maxThinkingTokens > 0) {
1024
- ;
1025
- params.extra_headers = {
1026
- "anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15"
975
+ response = await withRetry(
976
+ async (attempt) => {
977
+ attemptNumber = attempt;
978
+ start = Date.now();
979
+ const params = {
980
+ model,
981
+ max_tokens: getMaxTokensFromProfile(modelProfile),
982
+ messages: processedMessages,
983
+ system: processedSystem,
984
+ tools: toolSchemas.length > 0 ? toolSchemas : void 0,
985
+ tool_choice: toolSchemas.length > 0 ? { type: "auto" } : void 0
1027
986
  };
1028
- params.thinking = { max_tokens: maxThinkingTokens };
1029
- }
1030
- debugLogger.api("ANTHROPIC_API_CALL_START_STREAMING", {
1031
- endpoint: modelProfile?.baseURL || "DEFAULT_ANTHROPIC",
1032
- model,
1033
- provider,
1034
- apiKeyConfigured: !!modelProfile?.apiKey,
1035
- apiKeyPrefix: modelProfile?.apiKey ? modelProfile.apiKey.substring(0, 8) : null,
1036
- maxTokens: params.max_tokens,
1037
- temperature: MAIN_QUERY_TEMPERATURE,
1038
- params,
1039
- messageCount: params.messages?.length || 0,
1040
- streamMode: true,
1041
- toolsCount: toolSchemas.length,
1042
- thinkingTokens: maxThinkingTokens,
1043
- timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1044
- modelProfileId: modelProfile?.modelName,
1045
- modelProfileName: modelProfile?.name
1046
- });
1047
- if (config.stream) {
1048
- const stream = await anthropic.beta.messages.create({
1049
- ...params,
1050
- stream: true
1051
- }, {
1052
- signal
1053
- // ← CRITICAL: Connect the AbortSignal to API call
1054
- });
1055
- let finalResponse = null;
1056
- let messageStartEvent = null;
1057
- const contentBlocks = [];
1058
- const inputJSONBuffers = /* @__PURE__ */ new Map();
1059
- let usage = null;
1060
- let stopReason = null;
1061
- let stopSequence = null;
1062
- for await (const event of stream) {
1063
- if (signal.aborted) {
1064
- debugLogger.flow("STREAM_ABORTED", {
1065
- eventType: event.type,
1066
- timestamp: Date.now()
1067
- });
1068
- throw new Error("Request was cancelled");
1069
- }
1070
- switch (event.type) {
1071
- case "message_start":
1072
- messageStartEvent = event;
1073
- finalResponse = {
1074
- ...event.message,
1075
- content: []
1076
- // Will be populated from content blocks
1077
- };
1078
- break;
1079
- case "content_block_start":
1080
- contentBlocks[event.index] = { ...event.content_block };
1081
- if (event.content_block.type === "tool_use") {
1082
- inputJSONBuffers.set(event.index, "");
1083
- }
1084
- break;
1085
- case "content_block_delta":
1086
- const blockIndex = event.index;
1087
- if (!contentBlocks[blockIndex]) {
1088
- contentBlocks[blockIndex] = {
1089
- type: event.delta.type === "text_delta" ? "text" : "tool_use",
1090
- text: event.delta.type === "text_delta" ? "" : void 0
1091
- };
1092
- if (event.delta.type === "input_json_delta") {
1093
- inputJSONBuffers.set(blockIndex, "");
1094
- }
1095
- }
1096
- if (event.delta.type === "text_delta") {
1097
- contentBlocks[blockIndex].text += event.delta.text;
1098
- } else if (event.delta.type === "input_json_delta") {
1099
- const currentBuffer = inputJSONBuffers.get(blockIndex) || "";
1100
- inputJSONBuffers.set(blockIndex, currentBuffer + event.delta.partial_json);
1101
- }
1102
- break;
1103
- case "message_delta":
1104
- if (event.delta.stop_reason) stopReason = event.delta.stop_reason;
1105
- if (event.delta.stop_sequence) stopSequence = event.delta.stop_sequence;
1106
- if (event.usage) usage = { ...usage, ...event.usage };
1107
- break;
1108
- case "content_block_stop":
1109
- const stopIndex = event.index;
1110
- const block = contentBlocks[stopIndex];
1111
- if (block?.type === "tool_use" && inputJSONBuffers.has(stopIndex)) {
1112
- const jsonStr = inputJSONBuffers.get(stopIndex);
1113
- if (jsonStr) {
1114
- try {
1115
- block.input = JSON.parse(jsonStr);
1116
- } catch (error) {
1117
- debugLogger.error("JSON_PARSE_ERROR", {
1118
- blockIndex: stopIndex,
1119
- jsonStr,
1120
- error: error instanceof Error ? error.message : String(error)
1121
- });
1122
- block.input = {};
1123
- }
1124
- inputJSONBuffers.delete(stopIndex);
1125
- }
1126
- }
1127
- break;
1128
- case "message_stop":
1129
- inputJSONBuffers.clear();
1130
- break;
1131
- }
1132
- if (event.type === "message_stop") {
1133
- break;
1134
- }
1135
- }
1136
- if (!finalResponse || !messageStartEvent) {
1137
- throw new Error("Stream ended without proper message structure");
987
+ if (maxThinkingTokens > 0) {
988
+ ;
989
+ params.extra_headers = {
990
+ "anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15"
991
+ };
992
+ params.thinking = { max_tokens: maxThinkingTokens };
1138
993
  }
1139
- finalResponse = {
1140
- ...messageStartEvent.message,
1141
- content: contentBlocks.filter(Boolean),
1142
- stop_reason: stopReason,
1143
- stop_sequence: stopSequence,
1144
- usage: {
1145
- ...messageStartEvent.message.usage,
1146
- ...usage
1147
- }
1148
- };
1149
- return finalResponse;
1150
- } else {
1151
- debugLogger.api("ANTHROPIC_API_CALL_START_NON_STREAMING", {
994
+ debugLogger.api("ANTHROPIC_API_CALL_START_STREAMING", {
1152
995
  endpoint: modelProfile?.baseURL || "DEFAULT_ANTHROPIC",
1153
996
  model,
1154
997
  provider,
@@ -1156,20 +999,151 @@ async function queryAnthropicNative(messages, systemPrompt, maxThinkingTokens, t
1156
999
  apiKeyPrefix: modelProfile?.apiKey ? modelProfile.apiKey.substring(0, 8) : null,
1157
1000
  maxTokens: params.max_tokens,
1158
1001
  temperature: MAIN_QUERY_TEMPERATURE,
1002
+ params,
1159
1003
  messageCount: params.messages?.length || 0,
1160
- streamMode: false,
1004
+ streamMode: true,
1161
1005
  toolsCount: toolSchemas.length,
1162
1006
  thinkingTokens: maxThinkingTokens,
1163
1007
  timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1164
1008
  modelProfileId: modelProfile?.modelName,
1165
1009
  modelProfileName: modelProfile?.name
1166
1010
  });
1167
- return await anthropic.beta.messages.create(params, {
1168
- signal
1169
- // ← CRITICAL: Connect the AbortSignal to API call
1170
- });
1171
- }
1172
- }, { signal });
1011
+ if (config.stream) {
1012
+ const stream = await anthropic.beta.messages.create(
1013
+ {
1014
+ ...params,
1015
+ stream: true
1016
+ },
1017
+ {
1018
+ signal
1019
+ // ← CRITICAL: Connect the AbortSignal to API call
1020
+ }
1021
+ );
1022
+ let finalResponse = null;
1023
+ let messageStartEvent = null;
1024
+ const contentBlocks = [];
1025
+ const inputJSONBuffers = /* @__PURE__ */ new Map();
1026
+ let usage = null;
1027
+ let stopReason = null;
1028
+ let stopSequence = null;
1029
+ for await (const event of stream) {
1030
+ if (signal.aborted) {
1031
+ debugLogger.flow("STREAM_ABORTED", {
1032
+ eventType: event.type,
1033
+ timestamp: Date.now()
1034
+ });
1035
+ throw new Error("Request was cancelled");
1036
+ }
1037
+ switch (event.type) {
1038
+ case "message_start":
1039
+ messageStartEvent = event;
1040
+ finalResponse = {
1041
+ ...event.message,
1042
+ content: []
1043
+ // Will be populated from content blocks
1044
+ };
1045
+ break;
1046
+ case "content_block_start":
1047
+ contentBlocks[event.index] = { ...event.content_block };
1048
+ if (event.content_block.type === "tool_use") {
1049
+ inputJSONBuffers.set(event.index, "");
1050
+ }
1051
+ break;
1052
+ case "content_block_delta":
1053
+ const blockIndex = event.index;
1054
+ if (!contentBlocks[blockIndex]) {
1055
+ contentBlocks[blockIndex] = {
1056
+ type: event.delta.type === "text_delta" ? "text" : "tool_use",
1057
+ text: event.delta.type === "text_delta" ? "" : void 0
1058
+ };
1059
+ if (event.delta.type === "input_json_delta") {
1060
+ inputJSONBuffers.set(blockIndex, "");
1061
+ }
1062
+ }
1063
+ if (event.delta.type === "text_delta") {
1064
+ contentBlocks[blockIndex].text += event.delta.text;
1065
+ } else if (event.delta.type === "input_json_delta") {
1066
+ const currentBuffer = inputJSONBuffers.get(blockIndex) || "";
1067
+ inputJSONBuffers.set(
1068
+ blockIndex,
1069
+ currentBuffer + event.delta.partial_json
1070
+ );
1071
+ }
1072
+ break;
1073
+ case "message_delta":
1074
+ if (event.delta.stop_reason)
1075
+ stopReason = event.delta.stop_reason;
1076
+ if (event.delta.stop_sequence)
1077
+ stopSequence = event.delta.stop_sequence;
1078
+ if (event.usage) usage = { ...usage, ...event.usage };
1079
+ break;
1080
+ case "content_block_stop":
1081
+ const stopIndex = event.index;
1082
+ const block = contentBlocks[stopIndex];
1083
+ if (block?.type === "tool_use" && inputJSONBuffers.has(stopIndex)) {
1084
+ const jsonStr = inputJSONBuffers.get(stopIndex);
1085
+ if (jsonStr) {
1086
+ try {
1087
+ block.input = JSON.parse(jsonStr);
1088
+ } catch (error) {
1089
+ debugLogger.error("JSON_PARSE_ERROR", {
1090
+ blockIndex: stopIndex,
1091
+ jsonStr,
1092
+ error: error instanceof Error ? error.message : String(error)
1093
+ });
1094
+ block.input = {};
1095
+ }
1096
+ inputJSONBuffers.delete(stopIndex);
1097
+ }
1098
+ }
1099
+ break;
1100
+ case "message_stop":
1101
+ inputJSONBuffers.clear();
1102
+ break;
1103
+ }
1104
+ if (event.type === "message_stop") {
1105
+ break;
1106
+ }
1107
+ }
1108
+ if (!finalResponse || !messageStartEvent) {
1109
+ throw new Error("Stream ended without proper message structure");
1110
+ }
1111
+ finalResponse = {
1112
+ ...messageStartEvent.message,
1113
+ content: contentBlocks.filter(Boolean),
1114
+ stop_reason: stopReason,
1115
+ stop_sequence: stopSequence,
1116
+ usage: {
1117
+ ...messageStartEvent.message.usage,
1118
+ ...usage
1119
+ }
1120
+ };
1121
+ return finalResponse;
1122
+ } else {
1123
+ debugLogger.api("ANTHROPIC_API_CALL_START_NON_STREAMING", {
1124
+ endpoint: modelProfile?.baseURL || "DEFAULT_ANTHROPIC",
1125
+ model,
1126
+ provider,
1127
+ apiKeyConfigured: !!modelProfile?.apiKey,
1128
+ apiKeyPrefix: modelProfile?.apiKey ? modelProfile.apiKey.substring(0, 8) : null,
1129
+ maxTokens: params.max_tokens,
1130
+ temperature: MAIN_QUERY_TEMPERATURE,
1131
+ messageCount: params.messages?.length || 0,
1132
+ streamMode: false,
1133
+ toolsCount: toolSchemas.length,
1134
+ thinkingTokens: maxThinkingTokens,
1135
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1136
+ modelProfileId: modelProfile?.modelName,
1137
+ modelProfileName: modelProfile?.name
1138
+ });
1139
+ return await anthropic.beta.messages.create(params, {
1140
+ signal
1141
+ // ← CRITICAL: Connect the AbortSignal to API call
1142
+ });
1143
+ }
1144
+ },
1145
+ { signal }
1146
+ );
1173
1147
  debugLogger.api("ANTHROPIC_API_CALL_SUCCESS", {
1174
1148
  content: response.content
1175
1149
  });
@@ -1334,86 +1308,121 @@ async function queryOpenAI(messages, systemPrompt, maxThinkingTokens, tools, sig
1334
1308
  let attemptNumber = 0;
1335
1309
  let response;
1336
1310
  try {
1337
- response = await withRetry(async (attempt) => {
1338
- attemptNumber = attempt;
1339
- start = Date.now();
1340
- const maxTokens = getMaxTokensFromProfile(modelProfile);
1341
- const isGPT5 = isGPT5Model(model);
1342
- const opts = {
1343
- model,
1344
- ...isGPT5 ? { max_completion_tokens: maxTokens } : { max_tokens: maxTokens },
1345
- messages: [...openaiSystem, ...openaiMessages],
1346
- temperature: isGPT5 ? 1 : MAIN_QUERY_TEMPERATURE
1347
- };
1348
- if (config.stream) {
1349
- ;
1350
- opts.stream = true;
1351
- opts.stream_options = {
1352
- include_usage: true
1311
+ response = await withRetry(
1312
+ async (attempt) => {
1313
+ attemptNumber = attempt;
1314
+ start = Date.now();
1315
+ const maxTokens = getMaxTokensFromProfile(modelProfile);
1316
+ const isGPT5 = isGPT5Model(model);
1317
+ const opts = {
1318
+ model,
1319
+ ...isGPT5 ? { max_completion_tokens: maxTokens } : { max_tokens: maxTokens },
1320
+ messages: [...openaiSystem, ...openaiMessages],
1321
+ temperature: isGPT5 ? 1 : MAIN_QUERY_TEMPERATURE
1353
1322
  };
1354
- }
1355
- if (toolSchemas.length > 0) {
1356
- opts.tools = toolSchemas;
1357
- opts.tool_choice = "auto";
1358
- }
1359
- const reasoningEffort = await getReasoningEffort(modelProfile, messages);
1360
- if (reasoningEffort) {
1361
- opts.reasoning_effort = reasoningEffort;
1362
- }
1363
- if (modelProfile && modelProfile.modelName) {
1364
- debugLogger.api("USING_MODEL_PROFILE_PATH", {
1365
- modelProfileName: modelProfile.modelName,
1366
- modelName: modelProfile.modelName,
1367
- provider: modelProfile.provider,
1368
- baseURL: modelProfile.baseURL,
1369
- apiKeyExists: !!modelProfile.apiKey,
1370
- requestId: getCurrentRequest()?.id
1371
- });
1372
- const USE_NEW_ADAPTER_SYSTEM = process.env.USE_NEW_ADAPTERS !== "false";
1373
- if (USE_NEW_ADAPTER_SYSTEM) {
1374
- const adapter = ModelAdapterFactory.createAdapter(modelProfile);
1375
- const unifiedParams = {
1376
- messages: openaiMessages,
1377
- systemPrompt: openaiSystem.map((s) => s.content),
1378
- tools,
1379
- maxTokens: getMaxTokensFromProfile(modelProfile),
1380
- stream: config.stream,
1381
- reasoningEffort,
1382
- temperature: isGPT5Model(model) ? 1 : MAIN_QUERY_TEMPERATURE,
1383
- previousResponseId: toolUseContext?.responseState?.previousResponseId,
1384
- verbosity: "high"
1385
- // High verbosity for coding tasks
1323
+ if (config.stream) {
1324
+ ;
1325
+ opts.stream = true;
1326
+ opts.stream_options = {
1327
+ include_usage: true
1386
1328
  };
1387
- const request = adapter.createRequest(unifiedParams);
1388
- if (ModelAdapterFactory.shouldUseResponsesAPI(modelProfile)) {
1389
- const { callGPT5ResponsesAPI } = await import("./openai.js");
1390
- const response2 = await callGPT5ResponsesAPI(modelProfile, request, signal);
1391
- const unifiedResponse = adapter.parseResponse(response2);
1392
- const apiMessage = {
1393
- role: "assistant",
1394
- content: unifiedResponse.content,
1395
- tool_calls: unifiedResponse.toolCalls,
1396
- usage: {
1397
- prompt_tokens: unifiedResponse.usage.promptTokens,
1398
- completion_tokens: unifiedResponse.usage.completionTokens
1399
- }
1400
- };
1401
- const assistantMsg = {
1402
- type: "assistant",
1403
- message: apiMessage,
1404
- costUSD: 0,
1405
- // Will be calculated later
1406
- durationMs: Date.now() - start,
1407
- uuid: `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
1408
- responseId: unifiedResponse.responseId
1409
- // For state management
1329
+ }
1330
+ if (toolSchemas.length > 0) {
1331
+ opts.tools = toolSchemas;
1332
+ opts.tool_choice = "auto";
1333
+ }
1334
+ const reasoningEffort = await getReasoningEffort(modelProfile, messages);
1335
+ if (reasoningEffort) {
1336
+ opts.reasoning_effort = reasoningEffort;
1337
+ }
1338
+ if (modelProfile && modelProfile.modelName) {
1339
+ debugLogger.api("USING_MODEL_PROFILE_PATH", {
1340
+ modelProfileName: modelProfile.modelName,
1341
+ modelName: modelProfile.modelName,
1342
+ provider: modelProfile.provider,
1343
+ baseURL: modelProfile.baseURL,
1344
+ apiKeyExists: !!modelProfile.apiKey,
1345
+ requestId: getCurrentRequest()?.id
1346
+ });
1347
+ const USE_NEW_ADAPTER_SYSTEM = process.env.USE_NEW_ADAPTERS !== "false";
1348
+ if (USE_NEW_ADAPTER_SYSTEM) {
1349
+ const adapter = ModelAdapterFactory.createAdapter(modelProfile);
1350
+ const unifiedParams = {
1351
+ messages: openaiMessages,
1352
+ systemPrompt: openaiSystem.map((s) => s.content),
1353
+ tools,
1354
+ maxTokens: getMaxTokensFromProfile(modelProfile),
1355
+ stream: config.stream,
1356
+ reasoningEffort,
1357
+ temperature: isGPT5Model(model) ? 1 : MAIN_QUERY_TEMPERATURE,
1358
+ previousResponseId: toolUseContext?.responseState?.previousResponseId,
1359
+ verbosity: "high"
1360
+ // High verbosity for coding tasks
1410
1361
  };
1411
- return assistantMsg;
1362
+ const request = adapter.createRequest(unifiedParams);
1363
+ if (ModelAdapterFactory.shouldUseResponsesAPI(modelProfile)) {
1364
+ const { callGPT5ResponsesAPI } = await import("./openai.js");
1365
+ const response2 = await callGPT5ResponsesAPI(
1366
+ modelProfile,
1367
+ request,
1368
+ signal
1369
+ );
1370
+ const unifiedResponse = adapter.parseResponse(response2);
1371
+ const apiMessage = {
1372
+ role: "assistant",
1373
+ content: unifiedResponse.content,
1374
+ tool_calls: unifiedResponse.toolCalls,
1375
+ usage: {
1376
+ prompt_tokens: unifiedResponse.usage.promptTokens,
1377
+ completion_tokens: unifiedResponse.usage.completionTokens
1378
+ }
1379
+ };
1380
+ const assistantMsg = {
1381
+ type: "assistant",
1382
+ message: apiMessage,
1383
+ costUSD: 0,
1384
+ // Will be calculated later
1385
+ durationMs: Date.now() - start,
1386
+ uuid: `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
1387
+ responseId: unifiedResponse.responseId
1388
+ // For state management
1389
+ };
1390
+ return assistantMsg;
1391
+ } else {
1392
+ const s = await getCompletionWithProfile(
1393
+ modelProfile,
1394
+ request,
1395
+ 0,
1396
+ 10,
1397
+ signal
1398
+ );
1399
+ let finalResponse;
1400
+ if (config.stream) {
1401
+ finalResponse = await handleMessageStream(
1402
+ s,
1403
+ signal
1404
+ );
1405
+ } else {
1406
+ finalResponse = s;
1407
+ }
1408
+ const r = convertOpenAIResponseToAnthropic(finalResponse, tools);
1409
+ return r;
1410
+ }
1412
1411
  } else {
1413
- const s = await getCompletionWithProfile(modelProfile, request, 0, 10, signal);
1412
+ const completionFunction = isGPT5Model(modelProfile.modelName) ? getGPT5CompletionWithProfile : getCompletionWithProfile;
1413
+ const s = await completionFunction(
1414
+ modelProfile,
1415
+ opts,
1416
+ 0,
1417
+ 10,
1418
+ signal
1419
+ );
1414
1420
  let finalResponse;
1415
- if (config.stream) {
1416
- finalResponse = await handleMessageStream(s, signal);
1421
+ if (opts.stream) {
1422
+ finalResponse = await handleMessageStream(
1423
+ s,
1424
+ signal
1425
+ );
1417
1426
  } else {
1418
1427
  finalResponse = s;
1419
1428
  }
@@ -1421,39 +1430,29 @@ async function queryOpenAI(messages, systemPrompt, maxThinkingTokens, tools, sig
1421
1430
  return r;
1422
1431
  }
1423
1432
  } else {
1424
- const completionFunction = isGPT5Model(modelProfile.modelName) ? getGPT5CompletionWithProfile : getCompletionWithProfile;
1425
- const s = await completionFunction(modelProfile, opts, 0, 10, signal);
1426
- let finalResponse;
1427
- if (opts.stream) {
1428
- finalResponse = await handleMessageStream(s, signal);
1429
- } else {
1430
- finalResponse = s;
1431
- }
1432
- const r = convertOpenAIResponseToAnthropic(finalResponse, tools);
1433
- return r;
1433
+ debugLogger.api("USING_LEGACY_PATH", {
1434
+ modelProfileExists: !!modelProfile,
1435
+ modelProfileId: modelProfile?.modelName,
1436
+ modelNameExists: !!modelProfile?.modelName,
1437
+ fallbackModel: "main",
1438
+ actualModel: model,
1439
+ requestId: getCurrentRequest()?.id
1440
+ });
1441
+ const errorDetails = {
1442
+ modelProfileExists: !!modelProfile,
1443
+ modelProfileId: modelProfile?.modelName,
1444
+ modelNameExists: !!modelProfile?.modelName,
1445
+ requestedModel: model,
1446
+ requestId: getCurrentRequest()?.id
1447
+ };
1448
+ debugLogger.error("NO_VALID_MODEL_PROFILE", errorDetails);
1449
+ throw new Error(
1450
+ `No valid ModelProfile available for model: ${model}. Please configure model through /model command. Debug: ${JSON.stringify(errorDetails)}`
1451
+ );
1434
1452
  }
1435
- } else {
1436
- debugLogger.api("USING_LEGACY_PATH", {
1437
- modelProfileExists: !!modelProfile,
1438
- modelProfileId: modelProfile?.modelName,
1439
- modelNameExists: !!modelProfile?.modelName,
1440
- fallbackModel: "main",
1441
- actualModel: model,
1442
- requestId: getCurrentRequest()?.id
1443
- });
1444
- const errorDetails = {
1445
- modelProfileExists: !!modelProfile,
1446
- modelProfileId: modelProfile?.modelName,
1447
- modelNameExists: !!modelProfile?.modelName,
1448
- requestedModel: model,
1449
- requestId: getCurrentRequest()?.id
1450
- };
1451
- debugLogger.error("NO_VALID_MODEL_PROFILE", errorDetails);
1452
- throw new Error(
1453
- `No valid ModelProfile available for model: ${model}. Please configure model through /model command. Debug: ${JSON.stringify(errorDetails)}`
1454
- );
1455
- }
1456
- }, { signal });
1453
+ },
1454
+ { signal }
1455
+ );
1457
1456
  } catch (error) {
1458
1457
  logError(error);
1459
1458
  return getAssistantMessageFromError(error);