henosis-cli 0.6.4__py3-none-any.whl → 0.6.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cli.py CHANGED
@@ -800,8 +800,9 @@ class ChatCLI:
800
800
  # - concise: only model (+thinking level when applicable) and context meter
801
801
  # - verbose: full details (current behavior)
802
802
  self.usage_info_mode: str = "verbose"
803
- # Reasoning effort selector for OpenAI reasoning models (low|medium|high). Default: medium
804
- self.reasoning_effort: str = "medium"
803
+ # Reasoning effort selector for OpenAI reasoning models (low|medium|high|xhigh). Default: medium
804
+ # Note: 'xhigh' is only applied by the server for models that support it (e.g., gpt-5.2* and gpt-5.1-codex-max).
805
+ self.reasoning_effort: str = "medium"
805
806
  # Retain provider-native tool results between turns (e.g., Kimi reasoning/tool messages)
806
807
  self.retain_native_tool_results: bool = False
807
808
  # Anthropic thinking-mode budget tokens (applies to '-thinking' models; None = server default)
@@ -1340,37 +1341,28 @@ class ChatCLI:
1340
1341
  # Match server chat_adapter PRICING_PER_MILLION (subset is fine; unknown -> 0)
1341
1342
  return {
1342
1343
  # OpenAI
1343
- "gpt-5.1": {"input": 1.75, "output": 14.00, "provider": "openai"},
1344
1344
  "gpt-5.2": {"input": 2.00, "output": 14.25, "provider": "openai"},
1345
- "gpt-5.1-codex": {"input": 1.75, "output": 14.00, "provider": "openai"},
1346
- "gpt-5.1-codex-max": {"input": 1.75, "output": 14.00, "provider": "openai"},
1347
- "gpt-5.1-codex": {"input": 1.75, "output": 14.00, "provider": "openai"},
1348
- "gpt-5.1-mini": {"input": 0.35, "output": 2.80, "provider": "openai"},
1345
+ # From gpt5.2.txt: $21/$168 base, plus +$0.25 margin each -> $21.25/$168.25
1346
+ "gpt-5.2-pro": {"input": 21.25, "output": 168.25, "provider": "openai"},
1349
1347
  "gpt-5": {"input": 1.75, "output": 14.00, "provider": "openai"},
1350
1348
  "gpt-5-2025-08-07": {"input": 1.75, "output": 14.00, "provider": "openai"},
1351
1349
  "gpt-5-codex": {"input": 1.75, "output": 14.00, "provider": "openai"},
1352
- "gpt-5-pro": {"input": 15.00, "output": 120.00, "provider": "openai"},
1353
- "gpt-5-pro-2025-10-06": {"input": 15.00, "output": 120.00, "provider": "openai"},
1354
- "gpt-5-mini-2025-08-07": {"input": 0.35, "output": 2.80, "provider": "openai"},
1355
- "gpt-4o-mini": {"input": 0.21, "output": 0.84, "provider": "openai"},
1350
+ "gpt-4o-mini": {"input": 0.21, "output": 0.84, "provider": "openai"},
1356
1351
  # Codex Mini (fine-tuned o4-mini for CLI). Pricing includes 1.4x margin per codex-mini.txt.
1357
1352
  # Cached input tokens override: $0.375 * 1.4 = $0.525 per 1M (25% of input rate).
1358
1353
  "codex-mini-latest": {"input": 2.10, "output": 8.40, "cached_input": 0.525, "provider": "openai"},
1359
1354
  # Anthropic
1360
1355
  "claude-sonnet-4-20250514": {"input": 4.20, "output": 21.00, "provider": "anthropic"},
1361
1356
  "claude-sonnet-4-20250514-thinking": {"input": 4.20, "output": 21.00, "provider": "anthropic"},
1362
- "claude-haiku-4-5-20251001": {"input": 1.40, "output": 7.00, "provider": "anthropic"},
1363
- "claude-haiku-4-5-20251001-thinking": {"input": 1.40, "output": 7.00, "provider": "anthropic"},
1364
- "claude-sonnet-4-5-20250929": {"input": 4.20, "output": 21.00, "provider": "anthropic"},
1357
+ "claude-sonnet-4-5-20250929": {"input": 4.20, "output": 21.00, "provider": "anthropic"},
1365
1358
  "claude-sonnet-4-5-20250929-thinking": {"input": 4.20, "output": 21.00, "provider": "anthropic"},
1366
- "claude-opus-4-1-20250805": {"input": 21.00, "output": 105.00, "provider": "anthropic"},
1367
- "claude-opus-4-1-20250805-thinking": {"input": 21.00, "output": 105.00, "provider": "anthropic"},
1368
- # New Opus 4.5 (provider base $5/$25 with 1.4x margin -> $7.00/$35.00)
1359
+ # New Opus 4.5 (provider base $5/$25 with 1.4x margin -> $7.00/$35.00)
1369
1360
  "claude-opus-4-5-20251101": {"input": 7.00, "output": 35.00, "provider": "anthropic"},
1370
1361
  "claude-opus-4-5-20251101-thinking": {"input": 7.00, "output": 35.00, "provider": "anthropic"},
1371
1362
  # Gemini
1372
1363
  "gemini-2.5-pro": {"input": 1.75, "output": 14.00, "provider": "gemini"},
1373
- "gemini-2.5-flash": {"input": 0.21, "output": 0.84, "provider": "gemini"},
1364
+ # Gemini 3 Flash Preview (priced same as prior Gemini 2.5 Flash per request)
1365
+ "gemini-3-flash-preview": {"input": 0.21, "output": 0.84, "provider": "gemini"},
1374
1366
  # Gemini 3 Pro Preview ("newgem"). Base: $2/$12 and $4/$18 per 1M;
1375
1367
  # CLI uses the low-tier 1.4x margin rates for estimates. High-tier
1376
1368
  # pricing based on total_tokens > 200K is applied on the server.
@@ -1414,6 +1406,8 @@ class ChatCLI:
1414
1406
  "gemini-3": "gemini-3-pro-preview",
1415
1407
  "gemini-3-pro": "gemini-3-pro-preview",
1416
1408
  "gemini-3-preview": "gemini-3-pro-preview",
1409
+ "gemini-3-flash": "gemini-3-flash-preview",
1410
+ "gemini-flash-3": "gemini-3-flash-preview",
1417
1411
  "gemini-new": "gemini-3-pro-preview",
1418
1412
  "new-gemini": "gemini-3-pro-preview",
1419
1413
  "gemini-pro-3": "gemini-3-pro-preview",
@@ -1437,18 +1431,13 @@ class ChatCLI:
1437
1431
  except Exception:
1438
1432
  model_name = ""
1439
1433
  try:
1440
- if model_name in {"gpt-5-pro", "gpt-5-pro-2025-10-06"}:
1441
- if getattr(self, "reasoning_effort", None) != "high":
1434
+ if model_name in {"gpt-5.2-pro"}:
1435
+ # Default these to high, but don't clobber a user-chosen xhigh.
1436
+ if getattr(self, "reasoning_effort", None) not in ("high", "xhigh"):
1442
1437
  self.reasoning_effort = "high"
1443
1438
  # Codex family: disable preambles for better behavior
1444
1439
  if "codex" in model_name:
1445
1440
  self.preambles_enabled = False
1446
- # Codex Max: default web search OFF when this model is selected (user can re-enable manually)
1447
- if model_name == "gpt-5.1-codex-max":
1448
- try:
1449
- self.web_search_enabled = False
1450
- except Exception:
1451
- pass
1452
1441
  except Exception:
1453
1442
  try:
1454
1443
  self.reasoning_effort = "high"
@@ -1459,20 +1448,16 @@ class ChatCLI:
1459
1448
  return bool(model) and ("codex" in str(model).lower())
1460
1449
  except Exception:
1461
1450
  return False
1462
- def _is_codex_max(self, model: Optional[str]) -> bool:
1463
- try:
1464
- return bool(model) and (str(model).strip().lower() == "gpt-5.1-codex-max")
1465
- except Exception:
1466
- return False
1467
-
1468
1451
  def _supports_xhigh_reasoning_effort(self, model: Optional[str]) -> bool:
1469
1452
  """Return True if the OpenAI model supports reasoning_effort='xhigh'.
1470
1453
 
1471
- Historically we only exposed this for Codex Max, but GPT-5.2 also supports it.
1454
+ OpenAI supports xhigh on:
1455
+ - gpt-5.1-codex-max
1456
+ - the gpt-5.2* family
1472
1457
  """
1473
1458
  try:
1474
1459
  m = (str(model).strip().lower() if model else "")
1475
- return m in {"gpt-5.1-codex-max", "gpt-5.2", "gpt-5-2025-08-07"}
1460
+ return m.startswith("gpt-5.2")
1476
1461
  except Exception:
1477
1462
  return False
1478
1463
 
@@ -2049,13 +2034,13 @@ class ChatCLI:
2049
2034
  self.usage_info_mode = val
2050
2035
  except Exception:
2051
2036
  pass
2052
- # Reasoning effort (default medium if missing/invalid)
2053
- try:
2054
- val = data.get("reasoning_effort")
2055
- if isinstance(val, str) and val in ("low", "medium", "high"):
2056
- self.reasoning_effort = val
2057
- except Exception:
2058
- pass
2037
+ # Reasoning effort (default medium if missing/invalid)
2038
+ try:
2039
+ val = data.get("reasoning_effort")
2040
+ if isinstance(val, str) and val in ("low", "medium", "high", "xhigh"):
2041
+ self.reasoning_effort = val
2042
+ except Exception:
2043
+ pass
2059
2044
  # Text verbosity selector
2060
2045
  try:
2061
2046
  v = data.get("text_verbosity")
@@ -2485,7 +2470,7 @@ class ChatCLI:
2485
2470
  {"name": "/infomode", "usage": "/infomode concise|verbose", "desc": "Set Usage & Info panel mode"},
2486
2471
  {"name": "/tools", "usage": "/tools on|off|default", "desc": "Toggle per-request tools"},
2487
2472
  {"name": "/websearch", "usage": "/websearch on|off|domains|sources|location", "desc": "Configure OpenAI web search"},
2488
- {"name": "/reasoning", "usage": "/reasoning low|medium|high", "desc": "Set OpenAI reasoning effort (default: medium)"},
2473
+ {"name": "/reasoning", "usage": "/reasoning low|medium|high|xhigh", "desc": "Set OpenAI reasoning effort (default: medium; xhigh supported on gpt-5.2*)"},
2489
2474
  {"name": "/thinkingbudget", "usage": "/thinkingbudget <tokens>|default", "desc": "Set Anthropic thinking budget tokens for -thinking models"},
2490
2475
  {"name": "/fs", "usage": "/fs workspace|host|default", "desc": "Set filesystem scope"},
2491
2476
  {"name": "/agent-scope", "usage": "/agent-scope <absolute path>", "desc": "Alias for /hostbase (set Agent scope)"},
@@ -2508,35 +2493,26 @@ class ChatCLI:
2508
2493
  def _model_presets(self) -> List[Tuple[str, str]]:
2509
2494
  """Shared list of (model, label) used by settings UI and /model menu."""
2510
2495
  return [
2511
- ("gpt-5.1", "OpenAI: gpt-5.1"),
2512
2496
  ("gpt-5.2", "OpenAI: gpt-5.2"),
2497
+ ("gpt-5.2-pro", "OpenAI: gpt-5.2-pro (streaming, very expensive)"),
2513
2498
  ("gpt-5", "OpenAI: gpt-5"),
2514
2499
  ("gpt-5-codex", "OpenAI: gpt-5-codex"),
2515
- ("gpt-5.1-codex", "OpenAI: gpt-5.1-codex"),
2516
- ("gpt-5.1-codex-max", "OpenAI: gpt-5.1-codex-max"),
2517
- ("gpt-5.1-mini", "OpenAI: gpt-5.1-mini"),
2518
- ("gpt-5-pro", "OpenAI: gpt-5-pro (non-streaming, very expensive)"),
2519
- ("gpt-5-mini-2025-08-07", "OpenAI: gpt-5-mini-2025-08-07"),
2520
- ("codex-mini-latest", "OpenAI: codex-mini-latest (fast reasoning)"),
2500
+ ("codex-mini-latest", "OpenAI: codex-mini-latest (fast reasoning)"),
2521
2501
  ("deepseek-chat-3.2", "DeepSeek: deepseek-chat 3.2"),
2522
2502
  ("deepseek-reasoner-3.2", "DeepSeek: deepseek-reasoner 3.2"),
2523
2503
  ("deepseek-3.2-speciale", "DeepSeek: deepseek 3.2 Speciale (no tools)"),
2524
2504
  ("kimi-k2-thinking", "Kimi: kimi-k2-thinking"),
2525
2505
  ("kimi-k2-0905-preview", "Kimi: kimi-k2-0905-preview"),
2526
- ("gemini-2.5-pro", "Gemini: gemini-2.5-pro"),
2527
- ("gemini-2.5-flash", "Gemini: gemini-2.5-flash"),
2528
- ("gemini-3-pro-preview", "Gemini: gemini-3-pro-preview"),
2506
+ ("gemini-2.5-pro", "Gemini: gemini-2.5-pro"),
2507
+ ("gemini-3-flash-preview", "Gemini: gemini-3-flash-preview"),
2508
+ ("gemini-3-pro-preview", "Gemini: gemini-3-pro-preview"),
2529
2509
  ("grok-4-1-fast-reasoning", "xAI: grok-4-1-fast-reasoning"),
2530
2510
  ("grok-4-1-fast-non-reasoning", "xAI: grok-4-1-fast-non-reasoning"),
2531
2511
  ("grok-4", "xAI: grok-4"),
2532
2512
  ("grok-code-fast-1", "xAI: grok-code-fast-1"),
2533
- ("claude-haiku-4-5-20251001", "Anthropic: claude-haiku-4-5-20251001 (thinking OFF)"),
2534
- ("claude-haiku-4-5-20251001-thinking", "Anthropic: claude-haiku-4-5-20251001 (thinking ON)"),
2535
- ("claude-sonnet-4-5-20250929", "Anthropic: claude-sonnet-4-5-20250929 (thinking OFF)"),
2536
- ("claude-sonnet-4-5-20250929-thinking", "Anthropic: claude-sonnet-4-5-20250929 (thinking ON)"),
2537
- ("claude-opus-4-1-20250805", "Anthropic: claude-opus-4-1-20250805 (thinking OFF)"),
2538
- ("claude-opus-4-1-20250805-thinking", "Anthropic: claude-opus-4-1-20250805 (thinking ON)"),
2539
- ("claude-opus-4-5-20251101", "Anthropic: claude-opus-4-5-20251101 (thinking OFF)"),
2513
+ ("claude-sonnet-4-5-20250929", "Anthropic: claude-sonnet-4-5-20250929 (thinking OFF)"),
2514
+ ("claude-sonnet-4-5-20250929-thinking", "Anthropic: claude-sonnet-4-5-20250929 (thinking ON)"),
2515
+ ("claude-opus-4-5-20251101", "Anthropic: claude-opus-4-5-20251101 (thinking OFF)"),
2540
2516
  ("claude-opus-4-5-20251101-thinking", "Anthropic: claude-opus-4-5-20251101 (thinking ON)"),
2541
2517
  ("glm-4.6", "GLM: glm-4.6"),
2542
2518
  ]
@@ -2603,11 +2579,11 @@ class ChatCLI:
2603
2579
  "deepseek-reasoner-3.2",
2604
2580
  "claude-opus-4-5-20251101",
2605
2581
  "gemini-3-pro-preview",
2582
+ "gemini-3-flash-preview",
2606
2583
  "gpt-5",
2607
2584
  "gpt-5.2",
2608
2585
  "kimi-k2-thinking",
2609
2586
  "grok-code-fast-1",
2610
- "gpt-5.1-codex-max",
2611
2587
  }
2612
2588
  rec_list: List[Tuple[str, str]] = [(m, lbl) for (m, lbl) in model_presets if m in rec_keys]
2613
2589
  other_list: List[Tuple[str, str]] = [(m, lbl) for (m, lbl) in model_presets if m not in rec_keys]
@@ -2662,7 +2638,9 @@ class ChatCLI:
2662
2638
  },
2663
2639
  {"id": "auto_approve", "label": "Auto-approve tools (comma)", "type": "text"},
2664
2640
  {"id": "show_tool_calls", "label": "Show tool call logs", "type": "bool"},
2665
- {"id": "reasoning_effort", "label": "OpenAI reasoning effort", "type": "enum", "options": (["low", "medium", "high", "xhigh"] if self._supports_xhigh_reasoning_effort(self.model) else ["low", "medium", "high"]), "render": {"low": "Low", "medium": "Medium", "high": "High", "xhigh": "XHigh (Codex Max / GPT-5.2)"}},
2641
+ # Note: options are static for this Settings UI session, so include xhigh unconditionally.
2642
+ # The server will safely downgrade xhigh on models that don't support it.
2643
+ {"id": "reasoning_effort", "label": "OpenAI reasoning effort", "type": "enum", "options": ["low", "medium", "high", "xhigh"], "render": {"low": "Low", "medium": "Medium", "high": "High", "xhigh": "XHigh (gpt-5.2* / Codex Max; otherwise downgrades)"}},
2666
2644
  {"id": "codex_max_allow_all_tools", "label": "Codex Max: allow ALL tools", "type": "bool"},
2667
2645
  {"id": "retain_native_tool_results", "label": "Retain provider-native tool results across turns", "type": "bool"},
2668
2646
  {"id": "thinking_budget_tokens", "label": "Anthropic thinking budget (tokens)", "type": "int"},
@@ -2721,10 +2699,10 @@ class ChatCLI:
2721
2699
  try:
2722
2700
  if rid == "model":
2723
2701
  if value == "custom":
2724
- typed = self.ui.prompt(
2725
- "Enter model name (e.g., deepseek-chat, gpt-5, gemini-2.5-flash)",
2726
- default=self.model or "",
2727
- )
2702
+ typed = self.ui.prompt(
2703
+ "Enter model name (e.g., deepseek-chat, gpt-5, gemini-3-flash-preview)",
2704
+ default=self.model or "",
2705
+ )
2728
2706
  working["model"] = typed.strip() or None
2729
2707
  self._apply_model_side_effects()
2730
2708
  elif rid == "text_verbosity" and isinstance(value, str):
@@ -4417,20 +4395,25 @@ class ChatCLI:
4417
4395
  self.ui.warn("Unknown /websearch subcommand. Use on, off, domains, sources, or location.")
4418
4396
  return True
4419
4397
 
4420
- if cmd.startswith("/reasoning"):
4421
- parts = cmd.split(maxsplit=1)
4422
- if len(parts) == 1:
4423
- self.ui.info("Usage: /reasoning low|medium|high")
4424
- self.ui.info(f"Current: {self.reasoning_effort}")
4425
- return True
4426
- arg = (parts[1] or "").strip().lower()
4427
- if arg in ("low", "medium", "high"):
4428
- self.reasoning_effort = arg
4429
- self.ui.success(f"Reasoning effort set to: {self.reasoning_effort}")
4430
- self.save_settings()
4431
- else:
4432
- self.ui.warn("Invalid value. Use: low, medium, or high")
4433
- return True
4398
+ if cmd.startswith("/reasoning"):
4399
+ parts = cmd.split(maxsplit=1)
4400
+ if len(parts) == 1:
4401
+ self.ui.info("Usage: /reasoning low|medium|high|xhigh")
4402
+ self.ui.info(f"Current: {self.reasoning_effort}")
4403
+ return True
4404
+ arg = (parts[1] or "").strip().lower()
4405
+ if arg in ("low", "medium", "high", "xhigh"):
4406
+ self.reasoning_effort = arg
4407
+ if arg == "xhigh" and not self._supports_xhigh_reasoning_effort(self.model):
4408
+ # Keep the user's preference, but be explicit about server-side downgrading.
4409
+ self.ui.warn(
4410
+ "Note: xhigh is only applied on models that support it (e.g., gpt-5.2* / gpt-5.1-codex-max). The server may downgrade it on other models."
4411
+ )
4412
+ self.ui.success(f"Reasoning effort set to: {self.reasoning_effort}")
4413
+ self.save_settings()
4414
+ else:
4415
+ self.ui.warn("Invalid value. Use: low, medium, high, or xhigh")
4416
+ return True
4434
4417
 
4435
4418
  if cmd.startswith("/thinkingbudget"):
4436
4419
  parts = cmd.split(maxsplit=1)
@@ -5048,10 +5031,10 @@ class ChatCLI:
5048
5031
  # Recommended models (ordered list for shuffling)
5049
5032
  # Curated list per request (include Codex Max as recommended)
5050
5033
  rec_keys = [
5051
- "gpt-5.1-codex-max",
5052
5034
  "deepseek-reasoner-3.2",
5053
5035
  "claude-opus-4-5-20251101",
5054
5036
  "gemini-3-pro-preview",
5037
+ "gemini-3-flash-preview",
5055
5038
  "gpt-5",
5056
5039
  "gpt-5.2",
5057
5040
  "kimi-k2-thinking",
@@ -5122,10 +5105,10 @@ class ChatCLI:
5122
5105
  self.model = None
5123
5106
  self.ui.info("Model cleared; server default will be used.")
5124
5107
  elif picked == "custom":
5125
- typed = self.ui.prompt(
5126
- "Enter model name (e.g., deepseek-chat, gpt-5, gemini-2.5-flash)",
5127
- default=self.model or "",
5128
- )
5108
+ typed = self.ui.prompt(
5109
+ "Enter model name (e.g., deepseek-chat, gpt-5, gemini-3-flash-preview)",
5110
+ default=self.model or "",
5111
+ )
5129
5112
  self.model = self._resolve_model_alias(typed.strip() or None)
5130
5113
  if not self.model:
5131
5114
  self.ui.info("Model cleared; server default will be used.")
@@ -5315,12 +5298,6 @@ class ChatCLI:
5315
5298
  payload: Dict[str, Any] = {"messages": self._build_messages(user_input)}
5316
5299
  if self.model:
5317
5300
  payload["model"] = self.model
5318
- # Codex Max: allow ALL tools per-request when enabled in settings
5319
- try:
5320
- if self._is_codex_max(payload.get("model")) and bool(getattr(self, "codex_max_allow_all_tools", False)):
5321
- payload["codex_max_allow_all_tools"] = True
5322
- except Exception:
5323
- pass
5324
5301
  # Include terminal identifier so the server can isolate per-terminal workspace if it executes tools
5325
5302
  try:
5326
5303
  if self.terminal_id:
@@ -5375,18 +5352,11 @@ class ChatCLI:
5375
5352
  payload["auto_approve_command_bases"] = cmd_bases
5376
5353
  except Exception:
5377
5354
  pass
5378
- # Reasoning effort (OpenAI reasoning models only; server will ignore for others)
5355
+ # Reasoning effort (OpenAI reasoning models only; server will ignore for others).
5356
+ # Let the server decide whether xhigh is supported for the selected (or default) model.
5379
5357
  try:
5380
- if isinstance(self.reasoning_effort, str):
5381
- if self.reasoning_effort in ("low", "medium", "high"):
5382
- payload["reasoning_effort"] = self.reasoning_effort
5383
- elif self.reasoning_effort == "xhigh":
5384
- if self._supports_xhigh_reasoning_effort(payload.get("model")):
5385
- payload["reasoning_effort"] = "xhigh"
5386
- else:
5387
- payload["reasoning_effort"] = "medium"
5388
- else:
5389
- payload["reasoning_effort"] = "medium"
5358
+ if isinstance(self.reasoning_effort, str) and self.reasoning_effort in ("low", "medium", "high", "xhigh"):
5359
+ payload["reasoning_effort"] = self.reasoning_effort
5390
5360
  else:
5391
5361
  payload["reasoning_effort"] = "medium"
5392
5362
  except Exception:
@@ -5619,15 +5589,18 @@ class ChatCLI:
5619
5589
  self._rawlog_write(msg)
5620
5590
  except Exception:
5621
5591
  pass
5622
- # Idle "thinking" indicator shown while waiting for the next event (first tokens or next tool call)
5623
- indicator_task = None
5624
- indicator_active = False
5625
- indicator_started = False # used only to adjust leading newline behavior on first assistant header
5626
- # Mode: animate or static (default static for stability)
5627
- try:
5628
- _animate_indicator = (os.getenv("HENOSIS_THINKING_ANIMATE", "").strip().lower() in ("1", "true", "yes", "on"))
5629
- except Exception:
5630
- _animate_indicator = False
5592
+ # Idle "thinking" indicator shown while waiting for the next event (first tokens or next tool call)
5593
+ indicator_task = None
5594
+ indicator_active = False
5595
+ indicator_started = False # used only to adjust leading newline behavior on first assistant header
5596
+ # Track whether we're currently positioned at the start of a fresh line.
5597
+ # This prevents double-newlines between back-to-back tool events.
5598
+ at_line_start = True
5599
+ # Mode: animate or static (default static for stability)
5600
+ try:
5601
+ _animate_indicator = (os.getenv("HENOSIS_THINKING_ANIMATE", "").strip().lower() in ("1", "true", "yes", "on"))
5602
+ except Exception:
5603
+ _animate_indicator = False
5631
5604
 
5632
5605
  async def _thinking_indicator_loop(chosen_word: str, spacing: int = 3) -> None:
5633
5606
  """Animate a transient thinking word on a single line until indicator_active becomes False.
@@ -5666,8 +5639,8 @@ class ChatCLI:
5666
5639
  except Exception:
5667
5640
  pass
5668
5641
 
5669
- async def _indicator_start() -> None:
5670
- nonlocal indicator_task, indicator_active, indicator_started
5642
+ async def _indicator_start() -> None:
5643
+ nonlocal indicator_task, indicator_active, indicator_started, at_line_start
5671
5644
  # Choose a random word and spacing each start
5672
5645
  word_bank = list(self._thinking_words or ["thinking", "working..."])
5673
5646
  if not word_bank:
@@ -5695,21 +5668,28 @@ class ChatCLI:
5695
5668
  c = colors[i % len(colors)]
5696
5669
  out_chars.append(f"\x1b[38;5;{c}m{ch}\x1b[0m")
5697
5670
  line = " " + joiner.join(out_chars) + " "
5698
- # Start on a dedicated new line so we never clobber prior output
5699
- sys.stdout.write("\n")
5700
- sys.stdout.write("\r\x1b[2K" + line)
5701
- sys.stdout.flush()
5671
+ # Start on a dedicated new line so we never clobber prior output.
5672
+ # If we're already at a fresh line, don't emit an extra newline (prevents
5673
+ # visible blank lines between back-to-back tool events).
5674
+ if not at_line_start:
5675
+ sys.stdout.write("\n")
5676
+ sys.stdout.write("\r\x1b[2K" + line)
5677
+ sys.stdout.flush()
5678
+ at_line_start = False
5702
5679
  # File debug
5703
5680
  try:
5704
5681
  self.ui.debug_log(f"indicator.start word='{chosen}' animate={_animate_indicator}")
5705
5682
  except Exception:
5706
5683
  pass
5707
- except Exception:
5708
- try:
5709
- sys.stdout.write("\n" + (" " + joiner.join(list(str(chosen))) + " "))
5710
- sys.stdout.flush()
5711
- except Exception:
5712
- pass
5684
+ except Exception:
5685
+ try:
5686
+ if not at_line_start:
5687
+ sys.stdout.write("\n")
5688
+ sys.stdout.write("\r\x1b[2K" + (" " + joiner.join(list(str(chosen))) + " "))
5689
+ sys.stdout.flush()
5690
+ at_line_start = False
5691
+ except Exception:
5692
+ pass
5713
5693
  indicator_started = True
5714
5694
  if _animate_indicator:
5715
5695
  try:
@@ -5719,8 +5699,8 @@ class ChatCLI:
5719
5699
  indicator_task = None
5720
5700
  indicator_active = False
5721
5701
 
5722
- async def _indicator_stop(clear: bool = False) -> None:
5723
- nonlocal indicator_task, indicator_active, indicator_started
5702
+ async def _indicator_stop(clear: bool = False) -> None:
5703
+ nonlocal indicator_task, indicator_active, indicator_started, at_line_start
5724
5704
  # Only clear the line if an indicator was actually started.
5725
5705
  was_started = bool(indicator_started)
5726
5706
  indicator_active = False
@@ -5738,19 +5718,21 @@ class ChatCLI:
5738
5718
  finally:
5739
5719
  indicator_task = None
5740
5720
  # Default to not clearing to avoid erasing streamed content lines
5741
- if was_started and clear:
5742
- try:
5743
- sys.stdout.write("\r\x1b[2K")
5744
- sys.stdout.flush()
5745
- except Exception:
5746
- pass
5747
- elif was_started:
5748
- # Move to the next line to separate subsequent output
5749
- try:
5750
- sys.stdout.write("\n")
5751
- sys.stdout.flush()
5752
- except Exception:
5753
- pass
5721
+ if was_started and clear:
5722
+ try:
5723
+ sys.stdout.write("\r\x1b[2K")
5724
+ sys.stdout.flush()
5725
+ at_line_start = True
5726
+ except Exception:
5727
+ pass
5728
+ elif was_started:
5729
+ # Move to the next line to separate subsequent output
5730
+ try:
5731
+ sys.stdout.write("\n")
5732
+ sys.stdout.flush()
5733
+ at_line_start = True
5734
+ except Exception:
5735
+ pass
5754
5736
  # Reset started flag after stopping
5755
5737
  indicator_started = False
5756
5738
  try:
@@ -5810,14 +5792,16 @@ class ChatCLI:
5810
5792
  pass
5811
5793
  continue
5812
5794
 
5813
- elif event == "message.delta":
5814
- # Stop any transient indicator before printing content and clear the line
5815
- try:
5816
- await _indicator_stop(clear=True)
5817
- except Exception:
5818
- pass
5819
- text = data.get("text", "")
5820
- if text:
5795
+ elif event == "message.delta":
5796
+ # Stop any transient indicator before printing content and clear the line
5797
+ try:
5798
+ await _indicator_stop(clear=True)
5799
+ except Exception:
5800
+ pass
5801
+ # Indicator line cleared; we're now at the start of a fresh line.
5802
+ at_line_start = True
5803
+ text = data.get("text", "")
5804
+ if text:
5821
5805
  try:
5822
5806
  _deltas_total += 1
5823
5807
  except Exception:
@@ -5852,21 +5836,23 @@ class ChatCLI:
5852
5836
  print(str(model_label) + ": ", end="", flush=True)
5853
5837
  except Exception:
5854
5838
  pass
5855
- header_printed = True
5856
- try:
5857
- self.ui.debug_log(f"header.printed model='{model_label}' on_first_delta")
5858
- except Exception:
5859
- pass
5839
+ header_printed = True
5840
+ at_line_start = False
5841
+ try:
5842
+ self.ui.debug_log(f"header.printed model='{model_label}' on_first_delta")
5843
+ except Exception:
5844
+ pass
5860
5845
  assistant_buf.append(text)
5861
5846
  # Print the token delta raw to avoid any wrapping/markup side-effects
5862
5847
  try:
5863
5848
  self.ui.print(text, style=self.ui.theme["assistant"], end="")
5864
- except Exception:
5865
- try:
5866
- print(str(text), end="", flush=True)
5867
- except Exception:
5868
- pass
5869
- # Deep debug: show each delta's size/preview
5849
+ except Exception:
5850
+ try:
5851
+ print(str(text), end="", flush=True)
5852
+ except Exception:
5853
+ pass
5854
+ at_line_start = False
5855
+ # Deep debug: show each delta's size/preview
5870
5856
  try:
5871
5857
  if DEBUG_SSE:
5872
5858
  prev = text[:40].replace("\n", "\\n")
@@ -5885,17 +5871,29 @@ class ChatCLI:
5885
5871
  except Exception:
5886
5872
  pass
5887
5873
 
5888
- elif event == "tool.call":
5889
- # Ensure any prior indicator state is reset cleanly, then restart
5890
- # a fresh indicator while waiting for the tool to run.
5891
- try:
5892
- await _indicator_stop(clear=True)
5893
- except Exception:
5894
- pass
5895
-
5896
- name = data.get("name")
5897
- args = data.get("args", {}) or {}
5898
- call_id = data.get("call_id")
5874
+ elif event == "tool.call":
5875
+ # Ensure any prior indicator state is reset cleanly, then restart
5876
+ # a fresh indicator while waiting for the tool to run.
5877
+ try:
5878
+ await _indicator_stop(clear=True)
5879
+ except Exception:
5880
+ pass
5881
+
5882
+ # If we were mid-line (e.g., streamed assistant text), break cleanly before
5883
+ # showing the transient tool-wait indicator.
5884
+ if not at_line_start:
5885
+ try:
5886
+ self.ui.print()
5887
+ except Exception:
5888
+ try:
5889
+ print()
5890
+ except Exception:
5891
+ pass
5892
+ at_line_start = True
5893
+
5894
+ name = data.get("name")
5895
+ args = data.get("args", {}) or {}
5896
+ call_id = data.get("call_id")
5899
5897
  try:
5900
5898
  self.ui.debug_log(f"tool.call name='{name}' call_id={call_id}")
5901
5899
  except Exception:
@@ -5982,17 +5980,23 @@ class ChatCLI:
5982
5980
  name = str(data.get("name"))
5983
5981
  result = data.get("result", {}) or {}
5984
5982
  call_id = data.get("call_id")
5985
- # Stop any indicator before rendering results
5986
- try:
5987
- await _indicator_stop(clear=True)
5988
- except Exception:
5989
- pass
5990
- # Ensure tool result starts on a fresh line if assistant text was mid-line
5991
- try:
5992
- buf_str = "".join(assistant_buf)
5993
- except Exception:
5994
- buf_str = ""
5995
- self.ui.ensure_newline(buf_str)
5983
+ # Stop any indicator before rendering results
5984
+ try:
5985
+ await _indicator_stop(clear=True)
5986
+ except Exception:
5987
+ pass
5988
+ # Ensure tool result starts on a fresh line if assistant text was mid-line.
5989
+ # Don't rely on assistant_buf ending with "\n" because UI.ensure_newline()
5990
+ # prints without mutating the buffer, which can cause repeated blank lines.
5991
+ if not at_line_start:
5992
+ try:
5993
+ self.ui.print()
5994
+ except Exception:
5995
+ try:
5996
+ print()
5997
+ except Exception:
5998
+ pass
5999
+ at_line_start = True
5996
6000
  # Concise default: one professional, natural-language line per tool call.
5997
6001
  if not self.ui.verbose:
5998
6002
  try:
@@ -6048,13 +6052,18 @@ class ChatCLI:
6048
6052
  except Exception:
6049
6053
  # Fall back to legacy renderer on unexpected issues
6050
6054
  self._render_tool_result(name, result, call_id=call_id)
6051
- else:
6052
- # Verbose mode retains the richer summary with previews
6053
- self._render_tool_result(name, result, call_id=call_id)
6054
- try:
6055
- await self._ws_broadcast("tool.result", {"name": name, "result": result, "call_id": call_id})
6056
- except Exception:
6057
- pass
6055
+ else:
6056
+ # Verbose mode retains the richer summary with previews
6057
+ self._render_tool_result(name, result, call_id=call_id)
6058
+
6059
+ # Tool result output is line-oriented; after rendering we should be positioned
6060
+ # at the start of a fresh line so the next tool.call indicator doesn't insert
6061
+ # an extra blank line.
6062
+ at_line_start = True
6063
+ try:
6064
+ await self._ws_broadcast("tool.result", {"name": name, "result": result, "call_id": call_id})
6065
+ except Exception:
6066
+ pass
6058
6067
  # For Kimi, append provider-native tool result to raw history so it's threaded correctly
6059
6068
  try:
6060
6069
  if bool(getattr(self, "retain_native_tool_results", False)) and isinstance(self.model, str) and self.model.startswith("kimi-") and call_id:
@@ -6683,12 +6692,13 @@ class ChatCLI:
6683
6692
  # Compact style: include reasoning effort inline with model name when applicable
6684
6693
  try:
6685
6694
  effort_seg = ""
6686
- if self._is_openai_reasoning_model(model_used):
6687
- # Convert low|medium|high -> Low|Medium|High for display
6688
- lvl = str(self.reasoning_effort or "medium").strip().lower()
6689
- if lvl not in ("low", "medium", "high"):
6690
- lvl = "medium"
6691
- effort_seg = f" {lvl.capitalize()}"
6695
+ if self._is_openai_reasoning_model(model_used):
6696
+ # Convert low|medium|high|xhigh -> Low|Medium|High|XHigh for display
6697
+ lvl = str(self.reasoning_effort or "medium").strip().lower()
6698
+ if lvl not in ("low", "medium", "high", "xhigh"):
6699
+ lvl = "medium"
6700
+ disp = {"low": "Low", "medium": "Medium", "high": "High", "xhigh": "XHigh"}.get(lvl, "Medium")
6701
+ effort_seg = f" {disp}"
6692
6702
  except Exception:
6693
6703
  effort_seg = ""
6694
6704
  model_only_line = f"model: {model_used or '(unknown)'}{effort_seg}"
@@ -7375,11 +7385,12 @@ class ChatCLI:
7375
7385
  # Reasoning effort tag for OpenAI reasoning models
7376
7386
  try:
7377
7387
  effort_seg = ""
7378
- if self._is_openai_reasoning_model(model_label):
7379
- lvl = str(self.reasoning_effort or "medium").strip().lower()
7380
- if lvl not in ("low", "medium", "high"):
7381
- lvl = "medium"
7382
- effort_seg = f" {lvl.capitalize()}"
7388
+ if self._is_openai_reasoning_model(model_label):
7389
+ lvl = str(self.reasoning_effort or "medium").strip().lower()
7390
+ if lvl not in ("low", "medium", "high", "xhigh"):
7391
+ lvl = "medium"
7392
+ disp = {"low": "Low", "medium": "Medium", "high": "High", "xhigh": "XHigh"}.get(lvl, "Medium")
7393
+ effort_seg = f" {disp}"
7383
7394
  except Exception:
7384
7395
  effort_seg = ""
7385
7396
  try:
@@ -7506,13 +7517,8 @@ class ChatCLI:
7506
7517
  if self.auto_approve:
7507
7518
  new_payload["auto_approve"] = self.auto_approve
7508
7519
  try:
7509
- if isinstance(self.reasoning_effort, str):
7510
- if self.reasoning_effort in ("low", "medium", "high"):
7511
- new_payload["reasoning_effort"] = self.reasoning_effort
7512
- elif self.reasoning_effort == "xhigh" and self._supports_xhigh_reasoning_effort(self.model):
7513
- new_payload["reasoning_effort"] = "xhigh"
7514
- else:
7515
- new_payload["reasoning_effort"] = "medium"
7520
+ if isinstance(self.reasoning_effort, str) and self.reasoning_effort in ("low", "medium", "high", "xhigh"):
7521
+ new_payload["reasoning_effort"] = self.reasoning_effort
7516
7522
  else:
7517
7523
  new_payload["reasoning_effort"] = "medium"
7518
7524
  except Exception:
@@ -7788,16 +7794,14 @@ class ChatCLI:
7788
7794
  if not ctx_map:
7789
7795
  try:
7790
7796
  ctx_map.update({
7791
- "gpt-5.1": 400000,
7792
7797
  "gpt-5.2": 400000,
7793
- "gpt-5.1-mini": 400000,
7798
+ "gpt-5.2-pro": 400000,
7794
7799
  "gpt-5": 400000,
7795
7800
  "gpt-5-2025-08-07": 400000,
7796
- "gpt-5-mini-2025-08-07": 400000,
7797
7801
  "codex-mini-latest": 200000,
7798
7802
  "gemini-2.5-pro": 1048576,
7799
- "gemini-2.5-flash": 1048576,
7800
- "gemini-3-pro-preview": 1000000,
7803
+ "gemini-3-flash-preview": 1048576,
7804
+ "gemini-3-pro-preview": 1000000,
7801
7805
  "grok-4-1-fast-reasoning": 2000000,
7802
7806
  "grok-4-1-fast-non-reasoning": 2000000,
7803
7807
  "grok-4": 200000,
@@ -7808,13 +7812,9 @@ class ChatCLI:
7808
7812
  "kimi-k2-0905-preview": 262144,
7809
7813
  "claude-sonnet-4-20250514": 1000000,
7810
7814
  "claude-sonnet-4-20250514-thinking": 1000000,
7811
- "claude-sonnet-4-5-20250929": 1000000,
7812
- "claude-sonnet-4-5-20250929-thinking": 1000000,
7813
- "claude-haiku-4-5-20251001": 200000,
7814
- "claude-haiku-4-5-20251001-thinking": 200000,
7815
- "claude-opus-4-1-20250805": 200000,
7816
- "claude-opus-4-1-20250805-thinking": 200000,
7817
- "claude-opus-4-5-20251101": 200000,
7815
+ "claude-sonnet-4-5-20250929": 1000000,
7816
+ "claude-sonnet-4-5-20250929-thinking": 1000000,
7817
+ "claude-opus-4-5-20251101": 200000,
7818
7818
  "claude-opus-4-5-20251101-thinking": 200000,
7819
7819
  "glm-4.6": 200000,
7820
7820
  })
@@ -7846,19 +7846,19 @@ class ChatCLI:
7846
7846
 
7847
7847
  # --------------------- Tier-aware defaults -------------------------
7848
7848
 
7849
- def _recommended_default_model(self) -> str:
7850
- """Return the tier-aware recommended default model.
7849
+ def _recommended_default_model(self) -> str:
7850
+ """Return the tier-aware recommended default model.
7851
7851
 
7852
7852
  - Free-tier users: recommend Kimi k2-thinking (free-tier friendly reasoning model).
7853
- - All other users: recommend gpt-5.1 (best overall default).
7854
- When tier is unknown, fall back to gpt-5.1.
7855
- """
7853
+ - All other users: recommend gpt-5.2 (best overall default).
7854
+ When tier is unknown, fall back to gpt-5.2.
7855
+ """
7856
7856
  try:
7857
7857
  if bool(self.is_free_tier):
7858
7858
  return "kimi-k2-thinking"
7859
7859
  except Exception:
7860
7860
  pass
7861
- return "gpt-5.1"
7861
+ return "gpt-5.2"
7862
7862
 
7863
7863
  # --------------------- Onboarding and Welcome ---------------------
7864
7864
  async def _welcome_flow(self) -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: henosis-cli
3
- Version: 0.6.4
3
+ Version: 0.6.7
4
4
  Summary: henosis-cli — interactive CLI for the Henosis multi-provider streaming chat backend, with optional local tools.
5
5
  Author-email: henosis <henosis@henosis.us>
6
6
  License-Expression: LicenseRef-Proprietary
@@ -0,0 +1,11 @@
1
+ cli.py,sha256=XgyMuIBF-b20rQ2mIAAYDLVxEzZB4-4DlZQ0a4V_1UY,501266
2
+ henosis_cli_tools/__init__.py,sha256=x3uaN_ub32uALx_oURna0VnuoSsj7i9NYY6uRsc2ZzM,1147
3
+ henosis_cli_tools/cli_entry.py,sha256=OZTe_s9Hfy3mcsYG77T3RTdtCDod-CSwmhskbXjmmqs,1713
4
+ henosis_cli_tools/input_engine.py,sha256=kGW6AgDGbdcVxlx5mvTPKYe4lYhho5wztvUAw7WlmTs,15286
5
+ henosis_cli_tools/settings_ui.py,sha256=8rWsp0S3wT-dgkP0y20FOBmBBy7jYbDy8AuftmKcp4w,21368
6
+ henosis_cli_tools/tool_impl.py,sha256=0iojZbVZhhPJybcmb2qYAuCesgQMp83JgPL2Py4PjT8,39250
7
+ henosis_cli-0.6.7.dist-info/METADATA,sha256=orIj5bXfMXfOSh3TeKpGkgOUWv6Ea2pBKB5rzkAbcKk,5787
8
+ henosis_cli-0.6.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
+ henosis_cli-0.6.7.dist-info/entry_points.txt,sha256=KmXDdmIjq1SVMs8FK3wHPA2i89RMaerzZHIetllMLIk,74
10
+ henosis_cli-0.6.7.dist-info/top_level.txt,sha256=u7XMBcJ8Kb0n91WaSU-4Db8yURSUXFuOxGMsXti0a-g,34
11
+ henosis_cli-0.6.7.dist-info/RECORD,,
@@ -70,12 +70,37 @@ _DEFAULT_ALLOWED_EXTS = {
70
70
  ".conf",
71
71
  }
72
72
 
73
- _MAX_FILE_BYTES = int(os.getenv("HENOSIS_MAX_FILE_BYTES", str(1_073_741_824)))
74
- _MAX_EDIT_BYTES = int(os.getenv("HENOSIS_MAX_EDIT_BYTES", str(1_073_741_824)))
75
- _EDIT_SAFEGUARD_MAX_LINES = int(os.getenv("HENOSIS_EDIT_SAFEGUARD_MAX_LINES", "3000"))
76
- _COMMAND_TIMEOUT_SEC = float(os.getenv("HENOSIS_COMMAND_TIMEOUT_SEC", "30"))
77
- # Max chars for stdout/stderr before truncation notice is applied
78
- _CMD_OUTPUT_MAX_CHARS = 3000
73
+ _MAX_FILE_BYTES = int(os.getenv("HENOSIS_MAX_FILE_BYTES", str(1_073_741_824)))
74
+ _MAX_EDIT_BYTES = int(os.getenv("HENOSIS_MAX_EDIT_BYTES", str(1_073_741_824)))
75
+ _EDIT_SAFEGUARD_MAX_LINES = int(os.getenv("HENOSIS_EDIT_SAFEGUARD_MAX_LINES", "3000"))
76
+
77
+ # Command timeout behavior:
78
+ # - The tool call can request a per-invocation timeout via the `timeout` argument.
79
+ # - The client/user may configure a DEFAULT (used when the tool omits timeout)
80
+ # and a MAX (hard cap for safety).
81
+ # - Backward compatibility: legacy env var HENOSIS_COMMAND_TIMEOUT_SEC is treated as MAX.
82
+ def _env_float(name: str, default: float) -> float:
83
+ try:
84
+ v = os.getenv(name, "")
85
+ if v is None:
86
+ return float(default)
87
+ s = str(v).strip()
88
+ if not s:
89
+ return float(default)
90
+ return float(s)
91
+ except Exception:
92
+ return float(default)
93
+
94
+
95
+ _COMMAND_TIMEOUT_DEFAULT_SEC = _env_float("HENOSIS_COMMAND_TIMEOUT_DEFAULT_SEC", 360.0)
96
+ _LEGACY_COMMAND_TIMEOUT_SEC_RAW = os.getenv("HENOSIS_COMMAND_TIMEOUT_SEC", "")
97
+ if str(_LEGACY_COMMAND_TIMEOUT_SEC_RAW or "").strip():
98
+ _COMMAND_TIMEOUT_MAX_SEC = _env_float("HENOSIS_COMMAND_TIMEOUT_SEC", 900.0)
99
+ else:
100
+ _COMMAND_TIMEOUT_MAX_SEC = _env_float("HENOSIS_COMMAND_TIMEOUT_MAX_SEC", 900.0)
101
+
102
+ # Max chars for stdout/stderr before truncation notice is applied
103
+ _CMD_OUTPUT_MAX_CHARS = 3000
79
104
 
80
105
  _VERBOSE_NOTICE = (
81
106
  "console output was EXTEREMELY verbose the ouput was truncated as to not overflow your context. here are the last 3k chars:\n"
@@ -322,13 +347,22 @@ def run_command(cmd: str, policy: FileToolPolicy, cwd: Optional[str] = None, tim
322
347
  base = os.path.basename(exe).lower()
323
348
  if (not allow_all) and (base not in allow_set):
324
349
  return {"ok": False, "error": f"command '{base}' not allowed"}
325
- try:
326
- requested = float(timeout) if timeout is not None else _COMMAND_TIMEOUT_SEC
327
- except Exception:
328
- requested = _COMMAND_TIMEOUT_SEC
329
- timeout_s = min(max(0.01, requested), float(_COMMAND_TIMEOUT_SEC))
330
- start = time.time()
331
- try:
350
+ # Determine effective timeout: tool-controlled within a user-configurable maximum.
351
+ try:
352
+ requested = float(timeout) if timeout is not None else float(_COMMAND_TIMEOUT_DEFAULT_SEC)
353
+ except Exception:
354
+ requested = float(_COMMAND_TIMEOUT_DEFAULT_SEC)
355
+ try:
356
+ max_sec = float(_COMMAND_TIMEOUT_MAX_SEC)
357
+ except Exception:
358
+ max_sec = 900.0
359
+ if max_sec <= 0:
360
+ # Degenerate config; keep tool safe.
361
+ max_sec = 0.01
362
+ timeout_s = min(max(0.01, requested), max_sec)
363
+ timeout_was_clamped = bool(requested > max_sec)
364
+ start = time.time()
365
+ try:
332
366
  # Force UTF-8 decoding with replacement to avoid locale-dependent decode errors
333
367
  # on Windows (e.g., cp1252 UnicodeDecodeError in reader thread).
334
368
  proc = subprocess.run(
@@ -341,39 +375,51 @@ def run_command(cmd: str, policy: FileToolPolicy, cwd: Optional[str] = None, tim
341
375
  errors="replace",
342
376
  timeout=timeout_s,
343
377
  )
344
- dur_ms = int((time.time() - start) * 1000)
378
+ dur_ms = int((time.time() - start) * 1000)
345
379
  # Truncate very verbose outputs to protect context size
346
380
  out = _truncate_if_verbose(proc.stdout)
347
381
  err = _truncate_if_verbose(proc.stderr)
348
- return {
349
- "ok": True,
350
- "data": {
351
- "cmd": cmd_str,
352
- "cwd": str(cwd_path),
353
- "exit_code": proc.returncode,
354
- "stdout": out,
355
- "stderr": err,
356
- "timed_out": False,
357
- "duration_ms": dur_ms,
358
- },
359
- }
360
- except subprocess.TimeoutExpired as e:
361
- dur_ms = int((time.time() - start) * 1000)
382
+ return {
383
+ "ok": True,
384
+ "data": {
385
+ "cmd": cmd_str,
386
+ "cwd": str(cwd_path),
387
+ "exit_code": proc.returncode,
388
+ "stdout": out,
389
+ "stderr": err,
390
+ "timed_out": False,
391
+ "duration_ms": dur_ms,
392
+ "timeout_requested_sec": requested,
393
+ "timeout_effective_sec": timeout_s,
394
+ "timeout_max_sec": max_sec,
395
+ "timeout_was_clamped": timeout_was_clamped,
396
+ },
397
+ }
398
+ except subprocess.TimeoutExpired as e:
399
+ dur_ms = int((time.time() - start) * 1000)
362
400
  # Even in timeout, ensure any captured output is truncated if overly verbose
363
401
  out = _truncate_if_verbose(e.stdout or "")
364
402
  err = _truncate_if_verbose(e.stderr or "")
365
- return {
366
- "ok": True,
367
- "data": {
368
- "cmd": cmd_str,
369
- "cwd": str(cwd_path),
370
- "exit_code": None,
371
- "stdout": out,
372
- "stderr": err,
373
- "timed_out": True,
374
- "duration_ms": dur_ms,
375
- },
376
- }
403
+ return {
404
+ "ok": True,
405
+ "data": {
406
+ "cmd": cmd_str,
407
+ "cwd": str(cwd_path),
408
+ "exit_code": None,
409
+ "stdout": out,
410
+ "stderr": err,
411
+ "timed_out": True,
412
+ "duration_ms": dur_ms,
413
+ "timeout_requested_sec": requested,
414
+ "timeout_effective_sec": timeout_s,
415
+ "timeout_max_sec": max_sec,
416
+ "timeout_was_clamped": timeout_was_clamped,
417
+ "message": (
418
+ f"Command exceeded timeout (effective_timeout={timeout_s}s). "
419
+ "Process was terminated."
420
+ ),
421
+ },
422
+ }
377
423
 
378
424
  # ---------------------------- apply_patch ----------------------------#
379
425
  def _ap_normalize_unicode(s: str) -> str:
@@ -1,11 +0,0 @@
1
- cli.py,sha256=2btVCM5KSbsbhf6_8y-7Of0yUB4EbZUqm1Q_HmmjUIk,500984
2
- henosis_cli_tools/__init__.py,sha256=x3uaN_ub32uALx_oURna0VnuoSsj7i9NYY6uRsc2ZzM,1147
3
- henosis_cli_tools/cli_entry.py,sha256=OZTe_s9Hfy3mcsYG77T3RTdtCDod-CSwmhskbXjmmqs,1713
4
- henosis_cli_tools/input_engine.py,sha256=kGW6AgDGbdcVxlx5mvTPKYe4lYhho5wztvUAw7WlmTs,15286
5
- henosis_cli_tools/settings_ui.py,sha256=8rWsp0S3wT-dgkP0y20FOBmBBy7jYbDy8AuftmKcp4w,21368
6
- henosis_cli_tools/tool_impl.py,sha256=oZGajJMkR2jfyuPVe-iq2s6ktOl5-2K3RzX2R7FIAFQ,37414
7
- henosis_cli-0.6.4.dist-info/METADATA,sha256=VhjQpdEqWxDfMv5nAR_sfovg1XYt_PiurHQsz-LGGwo,5787
8
- henosis_cli-0.6.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
- henosis_cli-0.6.4.dist-info/entry_points.txt,sha256=KmXDdmIjq1SVMs8FK3wHPA2i89RMaerzZHIetllMLIk,74
10
- henosis_cli-0.6.4.dist-info/top_level.txt,sha256=u7XMBcJ8Kb0n91WaSU-4Db8yURSUXFuOxGMsXti0a-g,34
11
- henosis_cli-0.6.4.dist-info/RECORD,,