snow-ai 0.7.14 → 0.7.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bundle/cli.mjs CHANGED
@@ -46426,6 +46426,40 @@ var init_backend = __esm({
46426
46426
  }
46427
46427
  });
46428
46428
 
46429
+ // dist/utils/commands/branch.js
46430
+ var branch_exports = {};
46431
+ __export(branch_exports, {
46432
+ default: () => branch_default
46433
+ });
46434
+ var branch_default;
46435
+ var init_branch = __esm({
46436
+ "dist/utils/commands/branch.js"() {
46437
+ "use strict";
46438
+ init_commandExecutor();
46439
+ registerCommand("branch", {
46440
+ execute: (args2) => {
46441
+ const branchName = (args2 == null ? void 0 : args2.trim()) || void 0;
46442
+ return {
46443
+ success: true,
46444
+ action: "forkSession",
46445
+ prompt: branchName
46446
+ };
46447
+ }
46448
+ });
46449
+ registerCommand("fork", {
46450
+ execute: (args2) => {
46451
+ const branchName = (args2 == null ? void 0 : args2.trim()) || void 0;
46452
+ return {
46453
+ success: true,
46454
+ action: "forkSession",
46455
+ prompt: branchName
46456
+ };
46457
+ }
46458
+ });
46459
+ branch_default = {};
46460
+ }
46461
+ });
46462
+
46429
46463
  // dist/utils/commands/clear.js
46430
46464
  var clear_exports = {};
46431
46465
  __export(clear_exports, {
@@ -54244,7 +54278,7 @@ async function* createStreamingChatCompletion(options3, abortSignal, onRetry) {
54244
54278
  }
54245
54279
  customSystemPromptContent || (customSystemPromptContent = getCustomSystemPromptForConfig(config3));
54246
54280
  yield* withRetryGenerator(async function* () {
54247
- var _a20, _b14, _c6, _d4, _e2, _f, _g;
54281
+ var _a20, _b14, _c6, _d4, _e2, _f, _g, _h;
54248
54282
  const requestBody = {
54249
54283
  model: options3.model || config3.advancedModel,
54250
54284
  messages: convertToOpenAIMessages(
@@ -54264,6 +54298,12 @@ async function* createStreamingChatCompletion(options3, abortSignal, onRetry) {
54264
54298
  tools: options3.tools,
54265
54299
  tool_choice: options3.tool_choice
54266
54300
  };
54301
+ if (((_a20 = config3.chatThinking) == null ? void 0 : _a20.enabled) && !options3.disableThinking) {
54302
+ requestBody["thinking"] = { type: "enabled" };
54303
+ if (config3.chatThinking.reasoning_effort) {
54304
+ requestBody["reasoning_effort"] = config3.chatThinking.reasoning_effort;
54305
+ }
54306
+ }
54267
54307
  const url2 = `${config3.baseUrl}/chat/completions`;
54268
54308
  const customHeaders = options3.customHeaders || getCustomHeadersForConfig(config3);
54269
54309
  const fetchOptions = addProxyToFetchOptions(url2, {
@@ -54284,7 +54324,7 @@ async function* createStreamingChatCompletion(options3, abortSignal, onRetry) {
54284
54324
  const errorMessage = error40 instanceof Error ? error40.message : String(error40);
54285
54325
  throw new Error(`OpenAI API fetch failed: ${errorMessage}
54286
54326
  URL: ${url2}
54287
- Model: ${requestBody.model}
54327
+ Model: ${requestBody["model"]}
54288
54328
  Error type: ${error40 instanceof TypeError ? "Network/Connection Error" : "Unknown Error"}
54289
54329
  Possible causes: Network unavailable, DNS resolution failed, proxy issues, or server unreachable`);
54290
54330
  }
@@ -54310,17 +54350,17 @@ Possible causes: Network unavailable, DNS resolution failed, proxy issues, or se
54310
54350
  completion_tokens: usageValue.completion_tokens || 0,
54311
54351
  total_tokens: usageValue.total_tokens || 0,
54312
54352
  // OpenAI Chat API: cached_tokens in prompt_tokens_details
54313
- cached_tokens: (_a20 = usageValue.prompt_tokens_details) == null ? void 0 : _a20.cached_tokens
54353
+ cached_tokens: (_b14 = usageValue.prompt_tokens_details) == null ? void 0 : _b14.cached_tokens
54314
54354
  };
54315
54355
  }
54316
- const choice = (_b14 = chunk2.choices) == null ? void 0 : _b14[0];
54356
+ const choice = (_c6 = chunk2.choices) == null ? void 0 : _c6[0];
54317
54357
  if (!choice) {
54318
54358
  if (chunk2.usage) {
54319
54359
  break;
54320
54360
  }
54321
54361
  continue;
54322
54362
  }
54323
- const content = (_c6 = choice.delta) == null ? void 0 : _c6.content;
54363
+ const content = (_d4 = choice.delta) == null ? void 0 : _d4.content;
54324
54364
  if (content) {
54325
54365
  contentBuffer += content;
54326
54366
  yield {
@@ -54328,7 +54368,7 @@ Possible causes: Network unavailable, DNS resolution failed, proxy issues, or se
54328
54368
  content
54329
54369
  };
54330
54370
  }
54331
- const reasoningContent = (_d4 = choice.delta) == null ? void 0 : _d4.reasoning_content;
54371
+ const reasoningContent = (_e2 = choice.delta) == null ? void 0 : _e2.reasoning_content;
54332
54372
  if (reasoningContent) {
54333
54373
  reasoningContentBuffer += reasoningContent;
54334
54374
  if (!reasoningStarted) {
@@ -54342,7 +54382,7 @@ Possible causes: Network unavailable, DNS resolution failed, proxy issues, or se
54342
54382
  delta: reasoningContent
54343
54383
  };
54344
54384
  }
54345
- const deltaToolCalls = (_e2 = choice.delta) == null ? void 0 : _e2.tool_calls;
54385
+ const deltaToolCalls = (_f = choice.delta) == null ? void 0 : _f.tool_calls;
54346
54386
  if (deltaToolCalls) {
54347
54387
  hasToolCalls = true;
54348
54388
  for (const deltaCall of deltaToolCalls) {
@@ -54361,11 +54401,11 @@ Possible causes: Network unavailable, DNS resolution failed, proxy issues, or se
54361
54401
  toolCallsBuffer[index].id = deltaCall.id;
54362
54402
  }
54363
54403
  let deltaText = "";
54364
- if ((_f = deltaCall.function) == null ? void 0 : _f.name) {
54404
+ if ((_g = deltaCall.function) == null ? void 0 : _g.name) {
54365
54405
  toolCallsBuffer[index].function.name += deltaCall.function.name;
54366
54406
  deltaText += deltaCall.function.name;
54367
54407
  }
54368
- if ((_g = deltaCall.function) == null ? void 0 : _g.arguments) {
54408
+ if ((_h = deltaCall.function) == null ? void 0 : _h.arguments) {
54369
54409
  toolCallsBuffer[index].function.arguments += deltaCall.function.arguments;
54370
54410
  deltaText += deltaCall.function.arguments;
54371
54411
  }
@@ -177952,6 +177992,8 @@ var init_en = __esm({
177952
177992
  responsesReasoningEffort: "Responses Reasoning Effort:",
177953
177993
  responsesVerbosity: "Responses Verbosity:",
177954
177994
  responsesFastMode: "Responses Fast Mode (priority):",
177995
+ chatThinkingEnabled: "Chat Thinking (DeepSeek):",
177996
+ chatReasoningEffort: "Chat Reasoning Effort:",
177955
177997
  advancedModel: "Advanced Model(Type to search):",
177956
177998
  basicModel: "Basic Model(Type to search):",
177957
177999
  maxContextTokens: "Max Context Tokens:",
@@ -178333,6 +178375,7 @@ var init_en = __esm({
178333
178375
  toolSearch: "Toggle Tool Search (progressive tool loading). Enabled by default to save context",
178334
178376
  hybridCompress: "Toggle Hybrid Compress mode (AI summary + smart truncation for /compact and auto-compress)",
178335
178377
  team: "Toggle Agent Team mode - orchestrate multiple agents working together in independent Git worktrees",
178378
+ branch: "Fork current conversation into a new branch",
178336
178379
  worktree: "Open Git branch management panel for switching, creating and deleting branches",
178337
178380
  diff: "Review file changes from a conversation in IDE diff view",
178338
178381
  connect: "Connect to a Snow Instance for AI processing",
@@ -178372,6 +178415,11 @@ var init_en = __esm({
178372
178415
  unmatchedIDEs: "Found {count} other running IDE(s). However, their workspace/project directories do not match the current cwd.",
178373
178416
  connectedTo: "Connected to {label}",
178374
178417
  connectFailed: "Failed to connect to IDE: {error}"
178418
+ },
178419
+ branchFork: {
178420
+ noActiveSession: "No active session to fork.",
178421
+ success: "Conversation forked into branch {name}. To return to the original session:\n/resume {originalId}",
178422
+ failed: "Failed to fork session"
178375
178423
  }
178376
178424
  }
178377
178425
  },
@@ -179573,6 +179621,8 @@ var init_zh = __esm({
179573
179621
  responsesReasoningEffort: "Responses \u63A8\u7406\u5F3A\u5EA6:",
179574
179622
  responsesVerbosity: "Responses \u8F93\u51FA\u8BE6\u7EC6\u5EA6:",
179575
179623
  responsesFastMode: "Responses Fast (priority):",
179624
+ chatThinkingEnabled: "\u542F\u7528 Chat \u601D\u8003 (DeepSeek):",
179625
+ chatReasoningEffort: "Chat \u601D\u8003\u5F3A\u5EA6:",
179576
179626
  advancedModel: "\u9AD8\u7EA7\u6A21\u578B(\u952E\u5165\u53EF\u641C\u7D22):",
179577
179627
  basicModel: "\u57FA\u7840\u6A21\u578B(\u952E\u5165\u53EF\u641C\u7D22):",
179578
179628
  maxContextTokens: "\u6700\u5927\u4E0A\u4E0B\u6587\u4EE4\u724C:",
@@ -179954,6 +180004,7 @@ var init_zh = __esm({
179954
180004
  toolSearch: "\u5207\u6362\u5DE5\u5177\u641C\u7D22\uFF08\u6E10\u8FDB\u5F0F\u5DE5\u5177\u52A0\u8F7D\uFF09\u3002\u9ED8\u8BA4\u542F\u7528\u4EE5\u8282\u7701\u4E0A\u4E0B\u6587",
179955
180005
  hybridCompress: "\u5207\u6362\u6DF7\u5408\u538B\u7F29\u6A21\u5F0F\uFF08AI \u6458\u8981 + \u667A\u80FD\u622A\u65AD\uFF0C\u7528\u4E8E /compact \u548C\u81EA\u52A8\u538B\u7F29\uFF09",
179956
180006
  team: "\u5207\u6362 Agent Team \u6A21\u5F0F - \u534F\u8C03\u591A\u4E2A\u4EE3\u7406\u5728\u72EC\u7ACB Git Worktree \u4E2D\u5E76\u884C\u5DE5\u4F5C",
180007
+ branch: "\u5C06\u5F53\u524D\u5BF9\u8BDD\u5206\u53C9\u4E3A\u65B0\u5206\u652F\uFF0C\u53EF\u7528 /resume \u8FD4\u56DE\u539F\u4F1A\u8BDD",
179957
180008
  worktree: "\u6253\u5F00 Git \u5206\u652F\u7BA1\u7406\u9762\u677F\uFF0C\u652F\u6301\u5207\u6362\u3001\u65B0\u5EFA\u548C\u5220\u9664\u5206\u652F",
179958
180009
  diff: "\u5728 IDE \u4E2D\u67E5\u770B\u5BF9\u8BDD\u7684\u6587\u4EF6\u4FEE\u6539 Diff",
179959
180010
  connect: "\u8FDE\u63A5\u5230 Snow Instance \u8FDB\u884C AI \u5904\u7406",
@@ -179993,6 +180044,11 @@ var init_zh = __esm({
179993
180044
  unmatchedIDEs: "\u53D1\u73B0 {count} \u4E2A\u5176\u4ED6\u8FD0\u884C\u4E2D\u7684 IDE\uFF0C\u4F46\u5176\u5DE5\u4F5C\u533A/\u9879\u76EE\u76EE\u5F55\u4E0E\u5F53\u524D\u5DE5\u4F5C\u76EE\u5F55\u4E0D\u5339\u914D\u3002",
179994
180045
  connectedTo: "\u5DF2\u8FDE\u63A5\u5230 {label}",
179995
180046
  connectFailed: "\u8FDE\u63A5 IDE \u5931\u8D25\uFF1A{error}"
180047
+ },
180048
+ branchFork: {
180049
+ noActiveSession: "\u6CA1\u6709\u53EF\u5206\u53C9\u7684\u6D3B\u8DC3\u4F1A\u8BDD\u3002",
180050
+ success: "\u5BF9\u8BDD\u5DF2\u5206\u53C9\u4E3A\u5206\u652F {name}\u3002\u8FD4\u56DE\u539F\u4F1A\u8BDD\u8BF7\u6267\u884C:\n/resume {originalId}",
180051
+ failed: "\u4F1A\u8BDD\u5206\u53C9\u5931\u8D25"
179996
180052
  }
179997
180053
  }
179998
180054
  },
@@ -181193,6 +181249,8 @@ var init_zh_TW = __esm({
181193
181249
  responsesReasoningEffort: "Responses \u63A8\u7406\u5F37\u5EA6:",
181194
181250
  responsesVerbosity: "Responses \u8F38\u51FA\u8A73\u7D30\u5EA6:",
181195
181251
  responsesFastMode: "Responses Fast (priority):",
181252
+ chatThinkingEnabled: "\u555F\u7528 Chat \u601D\u8003 (DeepSeek):",
181253
+ chatReasoningEffort: "Chat \u601D\u8003\u5F37\u5EA6:",
181196
181254
  advancedModel: "\u9032\u968E\u6A21\u578B(\u8F38\u5165\u5F8C\u53EF\u4EE5\u641C\u5C0B):",
181197
181255
  basicModel: "\u57FA\u790E\u6A21\u578B(\u8F38\u5165\u5F8C\u53EF\u4EE5\u641C\u5C0B):",
181198
181256
  maxContextTokens: "\u6700\u5927\u4E0A\u4E0B\u6587\u4EE4\u724C:",
@@ -181574,6 +181632,7 @@ var init_zh_TW = __esm({
181574
181632
  toolSearch: "\u5207\u63DB\u5DE5\u5177\u641C\u5C0B\uFF08\u6F38\u9032\u5F0F\u5DE5\u5177\u8F09\u5165\uFF09\u3002\u9810\u8A2D\u555F\u7528\u4EE5\u7BC0\u7701\u4E0A\u4E0B\u6587",
181575
181633
  hybridCompress: "\u5207\u63DB\u6DF7\u5408\u58D3\u7E2E\u6A21\u5F0F\uFF08AI \u6458\u8981 + \u667A\u6167\u622A\u65B7\uFF0C\u7528\u65BC /compact \u548C\u81EA\u52D5\u58D3\u7E2E\uFF09",
181576
181634
  team: "\u5207\u63DB Agent Team \u6A21\u5F0F - \u5354\u8ABF\u591A\u500B\u4EE3\u7406\u5728\u7368\u7ACB Git Worktree \u4E2D\u4E26\u884C\u5DE5\u4F5C",
181635
+ branch: "\u5C07\u76EE\u524D\u5C0D\u8A71\u5206\u53C9\u70BA\u65B0\u5206\u652F\uFF0C\u53EF\u7528 /resume \u8FD4\u56DE\u539F\u6703\u8A71",
181577
181636
  worktree: "\u958B\u555F Git \u5206\u652F\u7BA1\u7406\u9762\u677F\uFF0C\u652F\u63F4\u5207\u63DB\u3001\u65B0\u5EFA\u548C\u522A\u9664\u5206\u652F",
181578
181637
  diff: "\u5728 IDE \u4E2D\u67E5\u770B\u5C0D\u8A71\u7684\u6A94\u6848\u4FEE\u6539 Diff",
181579
181638
  connect: "\u9023\u63A5\u5230 Snow Instance \u9032\u884C AI \u8655\u7406",
@@ -181613,6 +181672,11 @@ var init_zh_TW = __esm({
181613
181672
  unmatchedIDEs: "\u767C\u73FE {count} \u500B\u5176\u4ED6\u57F7\u884C\u4E2D\u7684 IDE\uFF0C\u4F46\u5176\u5DE5\u4F5C\u5340/\u5C08\u6848\u76EE\u9304\u8207\u76EE\u524D\u5DE5\u4F5C\u76EE\u9304\u4E0D\u76F8\u7B26\u3002",
181614
181673
  connectedTo: "\u5DF2\u9023\u7DDA\u81F3 {label}",
181615
181674
  connectFailed: "\u9023\u7DDA IDE \u5931\u6557\uFF1A{error}"
181675
+ },
181676
+ branchFork: {
181677
+ noActiveSession: "\u6C92\u6709\u53EF\u5206\u53C9\u7684\u6D3B\u8E8D\u6703\u8A71\u3002",
181678
+ success: "\u5C0D\u8A71\u5DF2\u5206\u53C9\u70BA\u5206\u652F {name}\u3002\u8FD4\u56DE\u539F\u6703\u8A71\u8ACB\u57F7\u884C:\n/resume {originalId}",
181679
+ failed: "\u6703\u8A71\u5206\u53C9\u5931\u6557"
181616
181680
  }
181617
181681
  }
181618
181682
  },
@@ -465822,7 +465886,7 @@ function useCommandHandler(options3) {
465822
465886
  const { stdout } = use_stdout_default();
465823
465887
  const { t } = useI18n();
465824
465888
  const handleCommandExecution = (0, import_react27.useCallback)(async (commandName, result2) => {
465825
- var _a20, _b14;
465889
+ var _a20, _b14, _c6, _d4, _e2;
465826
465890
  if (commandName === "compact" && result2.success && result2.action === "compact") {
465827
465891
  options3.setIsCompressing(true);
465828
465892
  options3.setCompressionError(null);
@@ -466093,6 +466157,60 @@ function useCommandHandler(options3) {
466093
466157
  commandName
466094
466158
  };
466095
466159
  options3.setMessages((prev) => [...prev, commandMessage]);
466160
+ } else if (result2.success && result2.action === "forkSession") {
466161
+ const currentSession = sessionManager.getCurrentSession();
466162
+ if (!currentSession) {
466163
+ const errorMessage = {
466164
+ role: "command",
466165
+ content: ((_c6 = t.commandPanel.commandOutput.branchFork) == null ? void 0 : _c6.noActiveSession) || "No active session to fork.",
466166
+ commandName
466167
+ };
466168
+ options3.setMessages((prev) => [...prev, errorMessage]);
466169
+ return;
466170
+ }
466171
+ try {
466172
+ await sessionManager.saveSession(currentSession);
466173
+ const forkedSession = await sessionManager.createNewSession(false, true);
466174
+ const branchName = result2.prompt || void 0;
466175
+ forkedSession.messages = currentSession.messages.map((msg) => ({
466176
+ ...msg
466177
+ }));
466178
+ forkedSession.messageCount = currentSession.messageCount;
466179
+ forkedSession.title = branchName ? `${currentSession.title} [${branchName}]` : currentSession.title;
466180
+ forkedSession.summary = currentSession.summary;
466181
+ forkedSession.branchedFrom = currentSession.id;
466182
+ forkedSession.branchName = branchName;
466183
+ forkedSession.updatedAt = Date.now();
466184
+ await sessionManager.saveSession(forkedSession);
466185
+ try {
466186
+ const { getTodoService: getTodoService2 } = await Promise.resolve().then(() => (init_mcpToolsManager(), mcpToolsManager_exports));
466187
+ const todoService2 = getTodoService2();
466188
+ await todoService2.copyTodoList(currentSession.id, forkedSession.id);
466189
+ } catch {
466190
+ }
466191
+ if (options3.onResumeSessionById) {
466192
+ await options3.onResumeSessionById(forkedSession.id);
466193
+ } else {
466194
+ sessionManager.setCurrentSession(forkedSession);
466195
+ }
466196
+ const displayName = branchName ? `"${branchName}"` : forkedSession.id.slice(0, 8);
466197
+ const originalId = currentSession.id;
466198
+ const successContent = (((_d4 = t.commandPanel.commandOutput.branchFork) == null ? void 0 : _d4.success) || "Conversation forked into branch {name}. To return to the original session:\n/resume {originalId}").replace("{name}", displayName).replace("{originalId}", originalId);
466199
+ const commandMessage = {
466200
+ role: "command",
466201
+ content: successContent,
466202
+ commandName
466203
+ };
466204
+ options3.setMessages((prev) => [...prev, commandMessage]);
466205
+ } catch (error40) {
466206
+ const errorMsg = error40 instanceof Error ? error40.message : "Unknown error";
466207
+ const errorMessage = {
466208
+ role: "command",
466209
+ content: `${((_e2 = t.commandPanel.commandOutput.branchFork) == null ? void 0 : _e2.failed) || "Failed to fork session"}: ${errorMsg}`,
466210
+ commandName
466211
+ };
466212
+ options3.setMessages((prev) => [...prev, errorMessage]);
466213
+ }
466096
466214
  } else if (result2.success && result2.action === "showNewPromptPanel") {
466097
466215
  options3.setShowNewPromptPanel(true);
466098
466216
  } else if (result2.success && result2.action === "showSubAgentDepthPanel") {
@@ -469968,6 +470086,7 @@ var init_utils2 = __esm({
469968
470086
  init_addDir();
469969
470087
  init_agent();
469970
470088
  init_backend();
470089
+ init_branch();
469971
470090
  init_clear();
469972
470091
  init_codebase();
469973
470092
  init_compact2();
@@ -476442,7 +476561,8 @@ var init_types5 = __esm({
476442
476561
  "geminiThinkingLevel",
476443
476562
  "responsesReasoningEffort",
476444
476563
  "responsesVerbosity",
476445
- "anthropicSpeed"
476564
+ "anthropicSpeed",
476565
+ "chatReasoningEffort"
476446
476566
  ];
476447
476567
  isSelectField = (field) => SELECT_FIELDS.includes(field);
476448
476568
  }
@@ -476620,6 +476740,8 @@ function useConfigState() {
476620
476740
  const [responsesVerbosity, setResponsesVerbosity] = (0, import_react66.useState)("medium");
476621
476741
  const [responsesFastMode, setResponsesFastMode] = (0, import_react66.useState)(false);
476622
476742
  const [anthropicSpeed, setAnthropicSpeed] = (0, import_react66.useState)(void 0);
476743
+ const [chatThinkingEnabled, setChatThinkingEnabled] = (0, import_react66.useState)(false);
476744
+ const [chatReasoningEffort, setChatReasoningEffort] = (0, import_react66.useState)("high");
476623
476745
  const [advancedModel, setAdvancedModel] = (0, import_react66.useState)("");
476624
476746
  const [basicModel, setBasicModel] = (0, import_react66.useState)("");
476625
476747
  const [maxContextTokens, setMaxContextTokens] = (0, import_react66.useState)(4e3);
@@ -476683,6 +476805,9 @@ function useConfigState() {
476683
476805
  "responsesReasoningEffort",
476684
476806
  "responsesVerbosity",
476685
476807
  "responsesFastMode"
476808
+ ] : requestMethod === "chat" ? [
476809
+ "chatThinkingEnabled",
476810
+ ...chatThinkingEnabled ? ["chatReasoningEffort"] : []
476686
476811
  ] : [],
476687
476812
  "advancedModel",
476688
476813
  "basicModel",
@@ -476730,6 +476855,9 @@ function useConfigState() {
476730
476855
  if (requestMethod !== "responses" && (currentField === "responsesReasoningEnabled" || currentField === "responsesReasoningEffort" || currentField === "responsesVerbosity" || currentField === "responsesFastMode")) {
476731
476856
  setCurrentField("advancedModel");
476732
476857
  }
476858
+ if (requestMethod !== "chat" && (currentField === "chatThinkingEnabled" || currentField === "chatReasoningEffort")) {
476859
+ setCurrentField("advancedModel");
476860
+ }
476733
476861
  }, [requestMethod, currentField]);
476734
476862
  (0, import_react66.useEffect)(() => {
476735
476863
  if (!enableAutoCompress && currentField === "autoCompressThreshold") {
@@ -476748,7 +476876,7 @@ function useConfigState() {
476748
476876
  supportsXHigh
476749
476877
  ]);
476750
476878
  const loadProfilesAndConfig = () => {
476751
- var _a20, _b14, _c6, _d4, _e2, _f, _g, _h, _i;
476879
+ var _a20, _b14, _c6, _d4, _e2, _f, _g, _h, _i, _j, _k;
476752
476880
  const loadedProfiles = getAllProfiles();
476753
476881
  setProfiles(loadedProfiles);
476754
476882
  const config3 = getOpenAiConfig();
@@ -476774,6 +476902,8 @@ function useConfigState() {
476774
476902
  setResponsesVerbosity(config3.responsesVerbosity || "medium");
476775
476903
  setResponsesFastMode(config3.responsesFastMode || false);
476776
476904
  setAnthropicSpeed(config3.anthropicSpeed);
476905
+ setChatThinkingEnabled(((_j = config3.chatThinking) == null ? void 0 : _j.enabled) || false);
476906
+ setChatReasoningEffort(((_k = config3.chatThinking) == null ? void 0 : _k.reasoning_effort) || "high");
476777
476907
  setAdvancedModel(config3.advancedModel || "");
476778
476908
  setBasicModel(config3.basicModel || "");
476779
476909
  setMaxContextTokens(config3.maxContextTokens || 4e3);
@@ -476856,6 +476986,8 @@ function useConfigState() {
476856
476986
  return responsesReasoningEffort;
476857
476987
  if (currentField === "anthropicSpeed")
476858
476988
  return anthropicSpeed || "";
476989
+ if (currentField === "chatReasoningEffort")
476990
+ return chatReasoningEffort;
476859
476991
  return "";
476860
476992
  };
476861
476993
  const getSystemPromptNameById = (id) => {
@@ -476981,6 +477113,7 @@ function useConfigState() {
476981
477113
  streamingDisplay,
476982
477114
  thinking: thinkingEnabled ? thinkingMode === "adaptive" ? { type: "adaptive", effort: thinkingEffort } : { type: "enabled", budget_tokens: thinkingBudgetTokens } : void 0,
476983
477115
  anthropicSpeed,
477116
+ chatThinking: chatThinkingEnabled ? { enabled: true, reasoning_effort: chatReasoningEffort } : void 0,
476984
477117
  advancedModel,
476985
477118
  basicModel,
476986
477119
  maxContextTokens,
@@ -477125,6 +477258,7 @@ function useConfigState() {
477125
477258
  config3.responsesFastMode = responsesFastMode;
477126
477259
  config3.responsesVerbosity = responsesVerbosity;
477127
477260
  config3.anthropicSpeed = anthropicSpeed;
477261
+ config3.chatThinking = chatThinkingEnabled ? { enabled: true, reasoning_effort: chatReasoningEffort } : void 0;
477128
477262
  await updateOpenAiConfig(config3);
477129
477263
  try {
477130
477264
  const fullConfig = {
@@ -477152,6 +477286,7 @@ function useConfigState() {
477152
477286
  responsesVerbosity,
477153
477287
  responsesFastMode,
477154
477288
  anthropicSpeed,
477289
+ chatThinking: chatThinkingEnabled ? { enabled: true, reasoning_effort: chatReasoningEffort } : void 0,
477155
477290
  advancedModel,
477156
477291
  basicModel,
477157
477292
  maxContextTokens,
@@ -477237,6 +477372,10 @@ function useConfigState() {
477237
477372
  setResponsesFastMode,
477238
477373
  anthropicSpeed,
477239
477374
  setAnthropicSpeed,
477375
+ chatThinkingEnabled,
477376
+ setChatThinkingEnabled,
477377
+ chatReasoningEffort,
477378
+ setChatReasoningEffort,
477240
477379
  // Model settings
477241
477380
  advancedModel,
477242
477381
  setAdvancedModel,
@@ -477559,7 +477698,12 @@ function useConfigInput(state, callbacks) {
477559
477698
  setShowThinking(false);
477560
477699
  } else if (currentField === "responsesFastMode") {
477561
477700
  setResponsesFastMode(!responsesFastMode);
477562
- } else if (currentField === "anthropicCacheTTL" || currentField === "anthropicSpeed" || currentField === "thinkingMode" || currentField === "thinkingEffort" || currentField === "geminiThinkingLevel" || currentField === "responsesReasoningEffort" || currentField === "responsesVerbosity") {
477701
+ } else if (currentField === "chatThinkingEnabled") {
477702
+ const next = !state.chatThinkingEnabled;
477703
+ state.setChatThinkingEnabled(next);
477704
+ if (!next)
477705
+ setShowThinking(false);
477706
+ } else if (currentField === "anthropicCacheTTL" || currentField === "anthropicSpeed" || currentField === "thinkingMode" || currentField === "thinkingEffort" || currentField === "geminiThinkingLevel" || currentField === "responsesReasoningEffort" || currentField === "responsesVerbosity" || currentField === "chatReasoningEffort") {
477563
477707
  setIsEditing(true);
477564
477708
  } else if (currentField === "maxContextTokens" || currentField === "maxTokens" || currentField === "streamIdleTimeoutSec" || currentField === "toolResultTokenLimit" || currentField === "thinkingBudgetTokens" || currentField === "autoCompressThreshold") {
477565
477709
  setIsEditing(true);
@@ -477927,6 +478071,8 @@ function ConfigFieldRenderer({ field, state }) {
477927
478071
  responsesVerbosity,
477928
478072
  setResponsesVerbosity,
477929
478073
  responsesFastMode,
478074
+ chatThinkingEnabled,
478075
+ chatReasoningEffort,
477930
478076
  supportsXHigh,
477931
478077
  // Model settings
477932
478078
  advancedModel,
@@ -478529,6 +478675,44 @@ function ConfigFieldRenderer({ field, state }) {
478529
478675
  )
478530
478676
  )
478531
478677
  );
478678
+ case "chatThinkingEnabled":
478679
+ return import_react69.default.createElement(
478680
+ Box_default,
478681
+ { key: field, flexDirection: "column" },
478682
+ import_react69.default.createElement(
478683
+ Text,
478684
+ { color: activeColor },
478685
+ activeIndicator,
478686
+ t.configScreen.chatThinkingEnabled
478687
+ ),
478688
+ import_react69.default.createElement(
478689
+ Box_default,
478690
+ { marginLeft: 3 },
478691
+ import_react69.default.createElement(
478692
+ Text,
478693
+ { color: theme14.colors.menuSecondary },
478694
+ chatThinkingEnabled ? t.configScreen.enabled : t.configScreen.disabled,
478695
+ " ",
478696
+ t.configScreen.toggleHint
478697
+ )
478698
+ )
478699
+ );
478700
+ case "chatReasoningEffort":
478701
+ return import_react69.default.createElement(
478702
+ Box_default,
478703
+ { key: field, flexDirection: "column" },
478704
+ import_react69.default.createElement(
478705
+ Text,
478706
+ { color: activeColor },
478707
+ activeIndicator,
478708
+ t.configScreen.chatReasoningEffort
478709
+ ),
478710
+ !isCurrentlyEditing && import_react69.default.createElement(
478711
+ Box_default,
478712
+ { marginLeft: 3 },
478713
+ import_react69.default.createElement(Text, { color: theme14.colors.menuSecondary }, chatReasoningEffort.toUpperCase())
478714
+ )
478715
+ );
478532
478716
  case "advancedModel":
478533
478717
  return import_react69.default.createElement(
478534
478718
  Box_default,
@@ -478653,7 +478837,7 @@ var init_ConfigFieldRenderer = __esm({
478653
478837
 
478654
478838
  // dist/ui/pages/configScreen/ConfigSelectPanel.js
478655
478839
  function ConfigSelectPanel({ state }) {
478656
- const { t, theme: theme14, currentField, setIsEditing, requestMethod, setRequestMethod, requestMethodOptions, thinkingMode, setThinkingMode, thinkingEffort, setThinkingEffort, geminiThinkingLevel, setGeminiThinkingLevel, responsesVerbosity, setResponsesVerbosity, anthropicSpeed, setAnthropicSpeed, getCustomHeadersSchemeSelectItems, getCustomHeadersSchemeSelectedValue, applyCustomHeadersSchemeSelectValue } = state;
478840
+ const { t, theme: theme14, currentField, setIsEditing, requestMethod, setRequestMethod, requestMethodOptions, thinkingMode, setThinkingMode, thinkingEffort, setThinkingEffort, geminiThinkingLevel, setGeminiThinkingLevel, responsesVerbosity, setResponsesVerbosity, anthropicSpeed, setAnthropicSpeed, chatReasoningEffort, setChatReasoningEffort, getCustomHeadersSchemeSelectItems, getCustomHeadersSchemeSelectedValue, applyCustomHeadersSchemeSelectValue } = state;
478657
478841
  const getFieldLabel = () => {
478658
478842
  switch (currentField) {
478659
478843
  case "profile":
@@ -478676,6 +478860,8 @@ function ConfigSelectPanel({ state }) {
478676
478860
  return t.configScreen.responsesVerbosity.replace(":", "");
478677
478861
  case "anthropicSpeed":
478678
478862
  return t.configScreen.anthropicSpeed.replace(":", "");
478863
+ case "chatReasoningEffort":
478864
+ return t.configScreen.chatReasoningEffort.replace(":", "");
478679
478865
  case "systemPromptId":
478680
478866
  return t.configScreen.systemPrompt;
478681
478867
  case "customHeadersSchemeId":
@@ -478752,6 +478938,15 @@ function ConfigSelectPanel({ state }) {
478752
478938
  setResponsesVerbosity(item.value);
478753
478939
  setIsEditing(false);
478754
478940
  } }),
478941
+ currentField === "chatReasoningEffort" && import_react70.default.createElement(ScrollableSelectInput, { items: [
478942
+ { label: "LOW", value: "low" },
478943
+ { label: "MEDIUM", value: "medium" },
478944
+ { label: "HIGH", value: "high" },
478945
+ { label: "MAX", value: "max" }
478946
+ ], initialIndex: Math.max(0, ["low", "medium", "high", "max"].indexOf(chatReasoningEffort)), isFocused: true, onSelect: (item) => {
478947
+ setChatReasoningEffort(item.value);
478948
+ setIsEditing(false);
478949
+ } }),
478755
478950
  currentField === "anthropicSpeed" && import_react70.default.createElement(ScrollableSelectInput, { items: [
478756
478951
  { label: t.configScreen.anthropicSpeedNotUsed, value: "__NONE__" },
478757
478952
  { label: t.configScreen.anthropicSpeedFast, value: "fast" },
@@ -562541,6 +562736,10 @@ function useCommandPanel(buffer, isProcessing = false) {
562541
562736
  const teammates = (0, import_react93.useSyncExternalStore)(subscribeToTeamTracker, getTeamSnapshot);
562542
562737
  const hasRunningAgentsOrTeam = subAgents.length > 0 || teammates.length > 0;
562543
562738
  const builtInCommands = (0, import_react93.useMemo)(() => [
562739
+ {
562740
+ name: "branch",
562741
+ description: t.commandPanel.commands.branch || "Fork current conversation into a new branch"
562742
+ },
562544
562743
  { name: "help", description: t.commandPanel.commands.help },
562545
562744
  { name: "clear", description: t.commandPanel.commands.clear },
562546
562745
  {
@@ -562813,6 +563012,8 @@ var init_useCommandPanel = __esm({
562813
563012
  subscribeToTeamTracker = (cb2) => teamTracker.subscribe(cb2);
562814
563013
  getTeamSnapshot = () => teamTracker.getRunningTeammates();
562815
563014
  COMMAND_ARGS_HINTS = {
563015
+ branch: "[name]",
563016
+ fork: "[name]",
562816
563017
  resume: "[sessionId]",
562817
563018
  reindex: "[-force]",
562818
563019
  codebase: "[on|off|status]",
@@ -565973,7 +566174,8 @@ ${selfDestruct.suggestion}`,
565973
566174
  }
565974
566175
  const result2 = await executeCommand2(cmd.command, cmd.timeout || 3e4);
565975
566176
  results.push(result2);
565976
- const output2 = result2.success ? result2.stdout || "(no output)" : (() => {
566177
+ const successOutput = [result2.stdout, result2.stderr].filter(Boolean).join("\n");
566178
+ const output2 = result2.success ? successOutput || "(no output)" : (() => {
565977
566179
  const lines = [];
565978
566180
  lines.push("Command execution failed.");
565979
566181
  if (typeof result2.exitCode === "number") {
@@ -570096,7 +570298,7 @@ function TodoTree({ todos }) {
570096
570298
  setPageIndex((p) => Math.min(p, pageCount - 1));
570097
570299
  }, [pageCount]);
570098
570300
  use_input_default((_input, key) => {
570099
- if (!key.tab || pageCount <= 1)
570301
+ if (!key.tab || key.shift || pageCount <= 1)
570100
570302
  return;
570101
570303
  setPageIndex((p) => (p + 1) % pageCount);
570102
570304
  });
@@ -573961,6 +574163,44 @@ function ToolConfirmation({ toolName, toolArguments, allTools, onConfirm, onHook
573961
574163
  if (!vscodeConnection.isConnected()) {
573962
574164
  return;
573963
574165
  }
574166
+ const computeHashlinePreview = (originalContent, operations) => {
574167
+ if (!Array.isArray(operations) || operations.length === 0) {
574168
+ return originalContent;
574169
+ }
574170
+ const mutableLines = originalContent.split("\n");
574171
+ const parsed = operations.map((op2) => {
574172
+ const startMatch = String(op2.startAnchor ?? "").match(/^(\d+):/);
574173
+ const endMatch = String(op2.endAnchor ?? "").match(/^(\d+):/);
574174
+ return {
574175
+ type: op2.type,
574176
+ content: op2.content ?? "",
574177
+ startLine: startMatch ? parseInt(startMatch[1], 10) : 0,
574178
+ endLine: endMatch ? parseInt(endMatch[1], 10) : 0
574179
+ };
574180
+ }).filter((op2) => op2.startLine > 0 && op2.endLine > 0).sort((a, b) => b.startLine - a.startLine);
574181
+ for (const op2 of parsed) {
574182
+ const newLines = op2.content.split("\n");
574183
+ switch (op2.type) {
574184
+ case "replace":
574185
+ mutableLines.splice(op2.startLine - 1, op2.endLine - op2.startLine + 1, ...newLines);
574186
+ break;
574187
+ case "insert_after":
574188
+ mutableLines.splice(op2.startLine, 0, ...newLines);
574189
+ break;
574190
+ case "delete":
574191
+ mutableLines.splice(op2.startLine - 1, op2.endLine - op2.startLine + 1);
574192
+ break;
574193
+ }
574194
+ }
574195
+ return mutableLines.join("\n");
574196
+ };
574197
+ const computeReplaceEditPreview = (originalContent, searchContent, replaceContent) => {
574198
+ const idx = originalContent.indexOf(searchContent);
574199
+ if (idx !== -1) {
574200
+ return originalContent.substring(0, idx) + replaceContent + originalContent.substring(idx + searchContent.length);
574201
+ }
574202
+ return originalContent;
574203
+ };
573964
574204
  const showDiffForTool = (name, args2) => {
573965
574205
  const promises2 = [];
573966
574206
  try {
@@ -573969,7 +574209,8 @@ function ToolConfirmation({ toolName, toolArguments, allTools, onConfirm, onHook
573969
574209
  const filePath = typeof parsed.filePath === "string" ? parsed.filePath : null;
573970
574210
  if (filePath && fs56.existsSync(filePath)) {
573971
574211
  const originalContent = fs56.readFileSync(filePath, "utf-8");
573972
- promises2.push(vscodeConnection.showDiff(filePath, originalContent, originalContent, "Hashline Edit").catch(() => {
574212
+ const newContent = computeHashlinePreview(originalContent, parsed.operations);
574213
+ promises2.push(vscodeConnection.showDiff(filePath, originalContent, newContent, "Hashline Edit").catch(() => {
573973
574214
  }));
573974
574215
  }
573975
574216
  }
@@ -573977,7 +574218,8 @@ function ToolConfirmation({ toolName, toolArguments, allTools, onConfirm, onHook
573977
574218
  const filePath = typeof parsed.filePath === "string" ? parsed.filePath : null;
573978
574219
  if (filePath && fs56.existsSync(filePath)) {
573979
574220
  const originalContent = fs56.readFileSync(filePath, "utf-8");
573980
- promises2.push(vscodeConnection.showDiff(filePath, originalContent, originalContent, "Replace Edit").catch(() => {
574221
+ const newContent = parsed.searchContent && parsed.replaceContent !== void 0 ? computeReplaceEditPreview(originalContent, parsed.searchContent, parsed.replaceContent) : originalContent;
574222
+ promises2.push(vscodeConnection.showDiff(filePath, originalContent, newContent, "Replace Edit").catch(() => {
573981
574223
  }));
573982
574224
  }
573983
574225
  }
@@ -577885,8 +578127,11 @@ var init_ModelsPanel = __esm({
577885
578127
  const [isVerbositySelecting, setIsVerbositySelecting] = (0, import_react156.useState)(false);
577886
578128
  const [anthropicSpeed, setAnthropicSpeed] = (0, import_react156.useState)(void 0);
577887
578129
  const [isSpeedSelecting, setIsSpeedSelecting] = (0, import_react156.useState)(false);
578130
+ const [chatThinkingEnabled, setChatThinkingEnabled] = (0, import_react156.useState)(false);
578131
+ const [chatReasoningEffort, setChatReasoningEffort] = (0, import_react156.useState)("high");
578132
+ const [isChatEffortSelecting, setIsChatEffortSelecting] = (0, import_react156.useState)(false);
577888
578133
  (0, import_react156.useEffect)(() => {
577889
- var _a20, _b14, _c6, _d4, _e2, _f, _g, _h, _i;
578134
+ var _a20, _b14, _c6, _d4, _e2, _f, _g, _h, _i, _j, _k;
577890
578135
  if (!visible) {
577891
578136
  return;
577892
578137
  }
@@ -577923,6 +578168,9 @@ var init_ModelsPanel = __esm({
577923
578168
  setResponsesFastMode(cfg.responsesFastMode || false);
577924
578169
  setResponsesVerbosity(cfg.responsesVerbosity || "medium");
577925
578170
  setAnthropicSpeed(cfg.anthropicSpeed);
578171
+ setChatThinkingEnabled(((_j = cfg.chatThinking) == null ? void 0 : _j.enabled) || false);
578172
+ setChatReasoningEffort(((_k = cfg.chatThinking) == null ? void 0 : _k.reasoning_effort) || "high");
578173
+ setIsChatEffortSelecting(false);
577926
578174
  }, [visible, advancedModel, basicModel]);
577927
578175
  (0, import_react156.useEffect)(() => {
577928
578176
  if (errorMessage) {
@@ -578023,12 +578271,16 @@ var init_ModelsPanel = __esm({
578023
578271
  if (requestMethod === "responses") {
578024
578272
  return responsesReasoningEnabled;
578025
578273
  }
578274
+ if (requestMethod === "chat") {
578275
+ return chatThinkingEnabled;
578276
+ }
578026
578277
  return false;
578027
578278
  }, [
578028
578279
  requestMethod,
578029
578280
  thinkingEnabled,
578030
578281
  geminiThinkingEnabled,
578031
- responsesReasoningEnabled
578282
+ responsesReasoningEnabled,
578283
+ chatThinkingEnabled
578032
578284
  ]);
578033
578285
  const thinkingStrengthValue = (0, import_react156.useMemo)(() => {
578034
578286
  if (requestMethod === "anthropic") {
@@ -578040,6 +578292,9 @@ var init_ModelsPanel = __esm({
578040
578292
  if (requestMethod === "responses") {
578041
578293
  return responsesReasoningEffort;
578042
578294
  }
578295
+ if (requestMethod === "chat") {
578296
+ return chatReasoningEffort.toUpperCase();
578297
+ }
578043
578298
  return t.modelsPanel.notSupported;
578044
578299
  }, [
578045
578300
  requestMethod,
@@ -578048,6 +578303,7 @@ var init_ModelsPanel = __esm({
578048
578303
  thinkingEffort,
578049
578304
  geminiThinkingLevel,
578050
578305
  responsesReasoningEffort,
578306
+ chatReasoningEffort,
578051
578307
  t
578052
578308
  ]);
578053
578309
  const applyShowThinking = (0, import_react156.useCallback)(async (next) => {
@@ -578099,6 +578355,10 @@ var init_ModelsPanel = __esm({
578099
578355
  });
578100
578356
  return;
578101
578357
  }
578358
+ if (requestMethod === "chat") {
578359
+ void applyChatThinkingEnabled(next);
578360
+ return;
578361
+ }
578102
578362
  setErrorMessage(t.modelsPanel.requestMethodNotSupportedForThinking.replace("{requestMethod}", requestMethod));
578103
578363
  } catch (err) {
578104
578364
  const message = err instanceof Error ? err.message : t.modelsPanel.saveFailed;
@@ -578188,6 +578448,38 @@ var init_ModelsPanel = __esm({
578188
578448
  setErrorMessage(message);
578189
578449
  }
578190
578450
  }, []);
578451
+ const applyChatThinkingEnabled = (0, import_react156.useCallback)(async (next) => {
578452
+ setErrorMessage("");
578453
+ try {
578454
+ if (!next && showThinking) {
578455
+ setShowThinking(false);
578456
+ await updateOpenAiConfig({ showThinking: false });
578457
+ configEvents.emitConfigChange({ type: "showThinking", value: false });
578458
+ }
578459
+ setChatThinkingEnabled(next);
578460
+ await updateOpenAiConfig({
578461
+ chatThinking: next ? { enabled: true, reasoning_effort: chatReasoningEffort } : void 0
578462
+ });
578463
+ } catch (err) {
578464
+ const message = err instanceof Error ? err.message : t.modelsPanel.saveFailed;
578465
+ setErrorMessage(message);
578466
+ }
578467
+ }, [showThinking, chatReasoningEffort]);
578468
+ const applyChatReasoningEffort = (0, import_react156.useCallback)(async (effort) => {
578469
+ setErrorMessage("");
578470
+ try {
578471
+ setChatReasoningEffort(effort);
578472
+ await updateOpenAiConfig({
578473
+ chatThinking: {
578474
+ enabled: chatThinkingEnabled,
578475
+ reasoning_effort: effort
578476
+ }
578477
+ });
578478
+ } catch (err) {
578479
+ const message = err instanceof Error ? err.message : t.modelsPanel.saveFailed;
578480
+ setErrorMessage(message);
578481
+ }
578482
+ }, [chatThinkingEnabled]);
578191
578483
  const applyAnthropicSpeed = (0, import_react156.useCallback)(async (next) => {
578192
578484
  setErrorMessage("");
578193
578485
  try {
@@ -578219,6 +578511,8 @@ var init_ModelsPanel = __esm({
578219
578511
  return 4;
578220
578512
  if (requestMethod === "gemini")
578221
578513
  return 2;
578514
+ if (requestMethod === "chat")
578515
+ return 2;
578222
578516
  return 1;
578223
578517
  }, [requestMethod]);
578224
578518
  const selectedIndex = Math.max(0, currentOptions.findIndex((option) => option.value === currentModel));
@@ -578253,6 +578547,10 @@ var init_ModelsPanel = __esm({
578253
578547
  setIsSpeedSelecting(false);
578254
578548
  return;
578255
578549
  }
578550
+ if (isChatEffortSelecting) {
578551
+ setIsChatEffortSelecting(false);
578552
+ return;
578553
+ }
578256
578554
  if (manualInputModeRef.current || manualInputMode) {
578257
578555
  manualInputModeRef.current = false;
578258
578556
  setManualInputMode(false);
@@ -578320,7 +578618,7 @@ var init_ModelsPanel = __esm({
578320
578618
  }
578321
578619
  return;
578322
578620
  }
578323
- if (isThinkingModeSelecting || isGeminiLevelSelecting || isThinkingEffortSelecting || isVerbositySelecting || isSpeedSelecting) {
578621
+ if (isThinkingModeSelecting || isGeminiLevelSelecting || isThinkingEffortSelecting || isVerbositySelecting || isSpeedSelecting || isChatEffortSelecting) {
578324
578622
  return;
578325
578623
  }
578326
578624
  if (key.tab) {
@@ -578348,6 +578646,8 @@ var init_ModelsPanel = __esm({
578348
578646
  setIsGeminiLevelSelecting(true);
578349
578647
  } else if (requestMethod === "responses") {
578350
578648
  setIsThinkingEffortSelecting(true);
578649
+ } else if (requestMethod === "chat") {
578650
+ setIsChatEffortSelecting(true);
578351
578651
  }
578352
578652
  } else if (thinkingFocusIndex === 3) {
578353
578653
  if (requestMethod === "anthropic") {
@@ -578454,7 +578754,7 @@ var init_ModelsPanel = __esm({
578454
578754
  showThinking ? "[\u2713]" : "[ ]"
578455
578755
  )
578456
578756
  ),
578457
- (requestMethod === "anthropic" || requestMethod === "gemini" || requestMethod === "responses") && import_react156.default.createElement(
578757
+ (requestMethod === "anthropic" || requestMethod === "gemini" || requestMethod === "responses" || requestMethod === "chat") && import_react156.default.createElement(
578458
578758
  Box_default,
578459
578759
  null,
578460
578760
  import_react156.default.createElement(
@@ -578486,7 +578786,7 @@ var init_ModelsPanel = __esm({
578486
578786
  thinkingMode === "tokens" ? t.configScreen.thinkingModeTokens : t.configScreen.thinkingModeAdaptive
578487
578787
  )
578488
578788
  ),
578489
- (requestMethod === "anthropic" || requestMethod === "gemini" || requestMethod === "responses") && import_react156.default.createElement(
578789
+ (requestMethod === "anthropic" || requestMethod === "gemini" || requestMethod === "responses" || requestMethod === "chat") && import_react156.default.createElement(
578490
578790
  Box_default,
578491
578791
  null,
578492
578792
  import_react156.default.createElement(
@@ -578647,7 +578947,20 @@ var init_ModelsPanel = __esm({
578647
578947
  setIsSpeedSelecting(false);
578648
578948
  } })
578649
578949
  ),
578650
- !thinkingInputMode && !isThinkingModeSelecting && !isGeminiLevelSelecting && !isThinkingEffortSelecting && !isVerbositySelecting && !isSpeedSelecting && import_react156.default.createElement(
578950
+ isChatEffortSelecting && import_react156.default.createElement(
578951
+ Box_default,
578952
+ { marginTop: 1 },
578953
+ import_react156.default.createElement(ScrollableSelectInput, { items: [
578954
+ { label: "LOW", value: "low" },
578955
+ { label: "MEDIUM", value: "medium" },
578956
+ { label: "HIGH", value: "high" },
578957
+ { label: "MAX", value: "max" }
578958
+ ], limit: 6, disableNumberShortcuts: true, initialIndex: Math.max(0, ["low", "medium", "high", "max"].indexOf(chatReasoningEffort)), isFocused: true, onSelect: (item) => {
578959
+ void applyChatReasoningEffort(item.value);
578960
+ setIsChatEffortSelecting(false);
578961
+ } })
578962
+ ),
578963
+ !thinkingInputMode && !isThinkingModeSelecting && !isGeminiLevelSelecting && !isThinkingEffortSelecting && !isVerbositySelecting && !isSpeedSelecting && !isChatEffortSelecting && import_react156.default.createElement(
578651
578964
  Box_default,
578652
578965
  { marginTop: 1 },
578653
578966
  import_react156.default.createElement(Text, { dimColor: true, color: theme14.colors.menuSecondary }, t.modelsPanel.navigationHint)
@@ -584288,6 +584601,7 @@ function useChatScreenCommands(workingDirectory) {
584288
584601
  Promise.resolve().then(() => (init_codebase(), codebase_exports)),
584289
584602
  Promise.resolve().then(() => (init_addDir(), addDir_exports)),
584290
584603
  Promise.resolve().then(() => (init_permissions(), permissions_exports)),
584604
+ Promise.resolve().then(() => (init_branch(), branch_exports)),
584291
584605
  Promise.resolve().then(() => (init_backend(), backend_exports)),
584292
584606
  Promise.resolve().then(() => (init_loop(), loop_exports)),
584293
584607
  Promise.resolve().then(() => (init_models(), models_exports)),
@@ -585060,6 +585374,13 @@ function ChatScreen({ autoResume, resumeSessionId: resumeSessionId2, enableYolo,
585060
585374
  setInputDraftContent(null);
585061
585375
  }
585062
585376
  }, [shouldShowFooter, setInputDraftContent]);
585377
+ const remountKeyRef = (0, import_react179.useRef)(remountKey);
585378
+ (0, import_react179.useEffect)(() => {
585379
+ if (remountKey !== remountKeyRef.current) {
585380
+ remountKeyRef.current = remountKey;
585381
+ setInputDraftContent(null);
585382
+ }
585383
+ }, [remountKey, setInputDraftContent]);
585063
585384
  const footerContextUsage = streamingState.contextUsage ? {
585064
585385
  inputTokens: streamingState.contextUsage.prompt_tokens,
585065
585386
  maxContextTokens: getOpenAiConfig().maxContextTokens || 4e3,
@@ -601779,7 +602100,7 @@ var require_package3 = __commonJS({
601779
602100
  "package.json"(exports2, module2) {
601780
602101
  module2.exports = {
601781
602102
  name: "snow-ai",
601782
- version: "0.7.14",
602103
+ version: "0.7.15",
601783
602104
  description: "Agentic coding in your terminal",
601784
602105
  license: "MIT",
601785
602106
  bin: {
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "snow-ai",
3
- "version": "0.7.14",
3
+ "version": "0.7.15",
4
4
  "description": "Agentic coding in your terminal",
5
5
  "license": "MIT",
6
6
  "bin": {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "snow-ai",
3
- "version": "0.7.14",
3
+ "version": "0.7.15",
4
4
  "description": "Agentic coding in your terminal",
5
5
  "license": "MIT",
6
6
  "bin": {