abstractcode 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import json
4
4
  import os
5
+ import re
5
6
  import sys
6
7
  import threading
7
8
  import time
@@ -12,8 +13,16 @@ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
12
13
  from prompt_toolkit.formatted_text import HTML
13
14
 
14
15
  from .input_handler import create_prompt_session, create_simple_session
15
- from .fullscreen_ui import FullScreenUI
16
+ from .file_mentions import (
17
+ default_workspace_mounts,
18
+ default_workspace_root,
19
+ find_at_file_mentions,
20
+ normalize_relative_path,
21
+ resolve_workspace_path,
22
+ )
23
+ from .fullscreen_ui import BLOCKING_PROMPT_CANCEL_TOKEN, FullScreenUI, SubmittedInput
16
24
  from .terminal_markdown import TerminalMarkdownRenderer
25
+ from .theme import BUILTIN_THEMES, Theme, ansi_bg, ansi_fg, blend_hex, get_theme, is_dark, theme_from_env
17
26
 
18
27
 
19
28
  def _supports_color() -> bool:
@@ -61,6 +70,15 @@ class _ToolSpec:
61
70
  parameters: Dict[str, Any]
62
71
 
63
72
 
73
+ @dataclass(frozen=True)
74
+ class _BundledAgentTemplate:
75
+ bundle_id: str
76
+ bundle_version: str
77
+ flow_id: str
78
+ name: str
79
+ description: str
80
+
81
+
64
82
  def _now_iso() -> str:
65
83
  from datetime import datetime, timezone
66
84
 
@@ -185,6 +203,27 @@ class ReactShell:
185
203
  self._max_tokens = None
186
204
  # Enable ANSI colors - fullscreen_ui uses ANSI class to parse escape codes
187
205
  self._color = bool(color and _supports_color())
206
+ self._theme: Theme = theme_from_env().normalized()
207
+ # Optional user-defined system prompt override (applies to new runs; can be set via /system).
208
+ self._system_prompt_override: Optional[str] = None
209
+ # ReAct runtime heuristics (configurable).
210
+ # - check_plan: retry if output claims it will act but emits no tool calls (default OFF).
211
+ self._check_plan: bool = False
212
+ # Attachment persistence:
213
+ # - when OFF (default), `@file` chips are consumed for the next user message only.
214
+ # - when ON, attachment chips persist across turns until removed.
215
+ self._files_keep: bool = False
216
+ # Host/Gateway GPU meter (optional).
217
+ self._gpu_monitor_enabled: bool = self._gpu_monitor_enabled_from_env()
218
+ self._gpu_utilization_pct: Optional[float] = None
219
+ self._gpu_last_error: Optional[str] = None
220
+ self._gpu_last_ok_at: Optional[float] = None
221
+ self._gpu_monitor_thread: Optional[threading.Thread] = None
222
+ self._gpu_monitor_lock = threading.Lock()
223
+ # Bundled `.flow` agent templates (for TUI selector).
224
+ self._bundled_agent_templates: List[_BundledAgentTemplate] = []
225
+ # Current agent selector key (shown in the footer dropdown).
226
+ self._agent_selector_key: str = agent_lower if agent_lower in ("react", "codeact", "memact") else raw_agent
188
227
  # Session-level tool allowlist (None = default/all tools for the agent kind).
189
228
  self._allowed_tools: Optional[List[str]] = None
190
229
  # Whether to include tool usage examples in the prompted tool section (token-expensive).
@@ -213,11 +252,14 @@ class ReactShell:
213
252
  from abstractagent.agents.react import ReactAgent
214
253
  from abstractagent.tools import execute_python, self_improve
215
254
  from abstractcore.tools import ToolDefinition
255
+ from abstractcore.tools.abstractignore import AbstractIgnore
216
256
  from abstractcore.tools.common_tools import (
217
257
  list_files,
258
+ skim_folders,
218
259
  search_files,
219
260
  analyze_code,
220
261
  read_file,
262
+ skim_files,
221
263
  write_file,
222
264
  edit_file,
223
265
  execute_command,
@@ -247,13 +289,16 @@ class ReactShell:
247
289
  self._Snapshot = Snapshot
248
290
  self._JsonSnapshotStore = JsonSnapshotStore
249
291
  self._InMemorySnapshotStore = InMemorySnapshotStore
292
+ self._AbstractIgnore = AbstractIgnore
250
293
 
251
294
  # Default tools for AbstractCode (curated subset for coding tasks)
252
295
  DEFAULT_TOOLS = [
253
296
  list_files,
297
+ skim_folders,
254
298
  search_files,
255
299
  analyze_code,
256
300
  read_file,
301
+ skim_files,
257
302
  write_file,
258
303
  edit_file,
259
304
  execute_command,
@@ -261,19 +306,27 @@ class ReactShell:
261
306
  fetch_url,
262
307
  self_improve,
263
308
  ]
309
+ # Keep references so we can rebuild agents/toolsets at runtime (e.g. /agent switch).
310
+ self._default_tools = list(DEFAULT_TOOLS)
311
+ self._codeact_tools = [execute_python]
312
+ self._agent_classes = {
313
+ "react": ReactAgent,
314
+ "memact": MemActAgent,
315
+ "codeact": CodeActAgent,
316
+ }
264
317
 
265
318
  if self._workflow_agent_ref is not None:
266
319
  # Workflow agents use the "safe" default toolset (same as ReAct).
267
- self._tools = list(DEFAULT_TOOLS)
320
+ self._tools = list(self._default_tools)
268
321
  agent_cls = None
269
322
  elif self._agent_kind == "react":
270
- self._tools = list(DEFAULT_TOOLS)
323
+ self._tools = list(self._default_tools)
271
324
  agent_cls = ReactAgent
272
325
  elif self._agent_kind == "memact":
273
- self._tools = list(DEFAULT_TOOLS)
326
+ self._tools = list(self._default_tools)
274
327
  agent_cls = MemActAgent
275
328
  else:
276
- self._tools = [execute_python]
329
+ self._tools = list(self._codeact_tools)
277
330
  agent_cls = CodeActAgent
278
331
 
279
332
  self._tool_specs: Dict[str, _ToolSpec] = {}
@@ -306,6 +359,9 @@ class ReactShell:
306
359
  if self._state_file:
307
360
  self._config_file = Path(self._state_file).with_suffix(".config.json")
308
361
  self._load_config()
362
+ # Best-effort: discover bundled `.flow` agents for the footer selector.
363
+ self._refresh_bundled_agent_templates()
364
+ self._agent_selector_key = self._normalize_agent_selector_key(getattr(self, "_agent_selector_key", "") or "")
309
365
 
310
366
  # Tool execution: passthrough by default so we can gate by approval in the CLI.
311
367
  tool_executor = PassthroughToolExecutor(mode="approval_required")
@@ -322,6 +378,74 @@ class ReactShell:
322
378
  llm_kwargs=llm_kwargs or None,
323
379
  )
324
380
 
381
+ # Artifact storage is the durability-safe place for large payloads (including attachments).
382
+ #
383
+ # Important: create the ArtifactStore BEFORE wiring the Runtime so effect handlers
384
+ # (LLM_CALL media resolution) and host-side ingestion use the same store instance.
385
+ if self._store_dir is not None:
386
+ self._artifact_store = FileArtifactStore(self._store_dir)
387
+ else:
388
+ self._artifact_store = InMemoryArtifactStore()
389
+
390
+ # Best-effort: enable AbstractMemory KG effects for workflow agents and memory-enabled flows.
391
+ #
392
+ # Without these handlers, VisualFlow nodes like `memory_kg_query` fail at runtime with:
393
+ # "No effect handler registered for memory_kg_query".
394
+ extra_effect_handlers: Optional[Dict[Any, Any]] = None
395
+ try:
396
+ from abstractmemory import InMemoryTripleStore, LanceDBTripleStore # type: ignore
397
+ from abstractmemory.embeddings import AbstractGatewayTextEmbedder # type: ignore
398
+ from abstractruntime.core.runtime import utc_now_iso
399
+ from abstractruntime.integrations.abstractmemory.effect_handlers import build_memory_kg_effect_handlers
400
+
401
+ from .gateway_cli import default_gateway_token, default_gateway_url
402
+ except Exception:
403
+ extra_effect_handlers = None
404
+ else:
405
+ base_dir: Optional[Path] = None
406
+ mem_dir_raw = (
407
+ os.getenv("ABSTRACTCODE_MEMORY_DIR")
408
+ or os.getenv("ABSTRACTMEMORY_DIR")
409
+ or os.getenv("ABSTRACTFLOW_MEMORY_DIR")
410
+ )
411
+ if isinstance(mem_dir_raw, str) and mem_dir_raw.strip():
412
+ try:
413
+ base_dir = Path(mem_dir_raw).expanduser().resolve()
414
+ except Exception:
415
+ base_dir = None
416
+ if base_dir is None and self._store_dir is not None:
417
+ base_dir = self._store_dir / "abstractmemory"
418
+
419
+ embedder = None
420
+ embed_provider = (
421
+ os.getenv("ABSTRACTCODE_EMBEDDING_PROVIDER")
422
+ or os.getenv("ABSTRACTMEMORY_EMBEDDING_PROVIDER")
423
+ or os.getenv("ABSTRACTFLOW_EMBEDDING_PROVIDER")
424
+ or os.getenv("ABSTRACTGATEWAY_EMBEDDING_PROVIDER")
425
+ )
426
+ if str(embed_provider or "").strip().lower() not in {"__disabled__", "disabled", "none", "off"}:
427
+ try:
428
+ gateway_url = str(default_gateway_url() or "").strip()
429
+ auth_token = default_gateway_token()
430
+ if gateway_url:
431
+ embedder = AbstractGatewayTextEmbedder(base_url=gateway_url, auth_token=auth_token)
432
+ except Exception:
433
+ embedder = None
434
+
435
+ try:
436
+ if base_dir is None:
437
+ store_obj = InMemoryTripleStore(embedder=embedder)
438
+ else:
439
+ base_dir.mkdir(parents=True, exist_ok=True)
440
+ store_obj = LanceDBTripleStore(base_dir / "kg", embedder=embedder)
441
+ extra_effect_handlers = build_memory_kg_effect_handlers(
442
+ store=store_obj,
443
+ run_store=run_store,
444
+ now_iso=utc_now_iso,
445
+ )
446
+ except Exception:
447
+ extra_effect_handlers = None
448
+
325
449
  self._runtime = create_local_runtime(
326
450
  provider=self._provider,
327
451
  model=self._model,
@@ -329,14 +453,27 @@ class ReactShell:
329
453
  run_store=run_store,
330
454
  ledger_store=ledger_store,
331
455
  tool_executor=tool_executor,
456
+ artifact_store=self._artifact_store,
457
+ extra_effect_handlers=extra_effect_handlers,
332
458
  )
333
- # Artifact storage is the durability-safe place for large payloads (including archived memory spans).
334
- if self._store_dir is not None:
335
- self._artifact_store = FileArtifactStore(self._store_dir)
336
- else:
337
- self._artifact_store = InMemoryArtifactStore()
338
459
  self._runtime.set_artifact_store(self._artifact_store)
339
460
 
461
+ # Workspace root for `@file` mentions / attachments.
462
+ self._workspace_root: Path = default_workspace_root()
463
+ self._workspace_mounts: Dict[str, Path] = default_workspace_mounts()
464
+ self._workspace_mount_ignores: Dict[str, Any] = {}
465
+ self._workspace_blocked_paths: List[Path] = []
466
+ try:
467
+ self._workspace_ignore = self._AbstractIgnore.for_path(self._workspace_root)
468
+ for name, root in dict(self._workspace_mounts).items():
469
+ try:
470
+ self._workspace_mount_ignores[name] = self._AbstractIgnore.for_path(root)
471
+ except Exception:
472
+ self._workspace_mount_ignores[name] = None
473
+ except Exception:
474
+ self._workspace_ignore = None
475
+ self._workspace_mount_ignores = {}
476
+
340
477
  if self._workflow_agent_ref is not None:
341
478
  try:
342
479
  from .workflow_agent import WorkflowAgent
@@ -374,20 +511,50 @@ class ReactShell:
374
511
  get_status_text=self._get_status_text,
375
512
  on_input=self._handle_input,
376
513
  on_copy_payload=self._copy_to_clipboard,
514
+ on_cancel=self._handle_ui_cancel,
377
515
  color=self._color,
516
+ theme=self._theme,
378
517
  )
518
+ # Keep `@file` chips behavior consistent with the saved preference.
519
+ try:
520
+ setter = getattr(self._ui, "set_files_keep", None)
521
+ if callable(setter):
522
+ setter(bool(getattr(self, "_files_keep", False)))
523
+ except Exception:
524
+ pass
525
+ # Populate the footer agent selector (best-effort).
526
+ self._sync_agent_selector_to_ui()
527
+ # Keep `@file` completion consistent with the shell's workspace policy.
528
+ try:
529
+ setter = getattr(self._ui, "set_workspace_policy", None)
530
+ if callable(setter):
531
+ setter(
532
+ workspace_root=self._workspace_root,
533
+ mounts=dict(self._workspace_mounts),
534
+ blocked_paths=list(self._workspace_blocked_paths),
535
+ )
536
+ except Exception:
537
+ pass
379
538
 
380
539
  # Keep simple session for tool approvals (runs within full-screen)
381
540
  self._simple_session = create_simple_session(color=self._color)
382
541
 
383
542
  # Pending input for the run loop
384
543
  self._pending_input: Optional[str] = None
544
+ # Async run-loop interrupts (set by UI thread, consumed by run thread).
545
+ self._pending_conclude_note: Optional[str] = None
546
+ self._pending_conclude_lock = threading.Lock()
385
547
 
386
548
  # Per-turn observability (for copy + traceability)
387
549
  self._turn_task: Optional[str] = None
388
550
  self._turn_trace: List[str] = []
389
551
  # Turn-level timing (for per-answer stats).
390
552
  self._turn_started_at: Optional[float] = None
553
+ # Session-level attachment refs (workspace-relative path -> artifact-backed ref dict).
554
+ # Avoids re-ingesting the same file every turn when attachments persist.
555
+ self._attachment_ref_cache: Dict[str, Dict[str, Any]] = {}
556
+ # Attachment file signatures (path -> (size_bytes, mtime_ns)) used to detect file changes.
557
+ self._attachment_sig_cache: Dict[str, Tuple[int, int]] = {}
391
558
  # Simple in-session dedup for obviously repeated shell commands.
392
559
  self._last_execute_command: Optional[str] = None
393
560
  self._last_execute_command_result: Optional[Dict[str, Any]] = None
@@ -398,7 +565,9 @@ class ReactShell:
398
565
  self._pending_tool_markers: List[str] = []
399
566
  # Pending tool call metadata (aligned with tool markers/results).
400
567
  self._pending_tool_metas: List[Dict[str, Any]] = []
401
- # Keep the last started run id so /log can show traces even after completion.
568
+ # Links extracted from the most recent assistant answer (for /links, /open).
569
+ self._last_answer_links: List[str] = []
570
+ # Keep the last started run id so /logs can show traces even after completion.
402
571
  self._last_run_id: Optional[str] = None
403
572
  # Status bar cache (token counting can be expensive; avoid per-frame rescans).
404
573
  self._status_cache_key: Optional[Tuple[Any, ...]] = None
@@ -426,6 +595,21 @@ class ReactShell:
426
595
  # UI helpers
427
596
  # ---------------------------------------------------------------------
428
597
 
598
+ def _handle_ui_cancel(self) -> None:
599
+ """Handle an Esc cancel request coming from the full-screen UI."""
600
+ run_id = self._attached_run_id()
601
+ if run_id is None:
602
+ return
603
+ try:
604
+ state = self._runtime.get_state(run_id)
605
+ except Exception:
606
+ state = None
607
+
608
+ status = getattr(state, "status", None) if state is not None else None
609
+ if status not in (self._RunStatus.RUNNING, self._RunStatus.WAITING):
610
+ return
611
+ self._cancel()
612
+
429
613
  def _safe_get_state(self):
430
614
  """Safely get agent state, returning None if unavailable.
431
615
 
@@ -445,6 +629,13 @@ class ReactShell:
445
629
  # Run doesn't exist (completed/cleaned up) or other error
446
630
  return None
447
631
 
632
+ def _safe_get_active_state(self):
633
+ """Safely get only the *active* run state (no last-run fallback)."""
634
+ try:
635
+ return self._agent.get_state()
636
+ except (KeyError, Exception):
637
+ return None
638
+
448
639
  def _select_messages_for_llm(self, state: Any) -> List[Dict[str, Any]]:
449
640
  """Return the best-effort LLM-visible message view for a state."""
450
641
  if state is None or not hasattr(state, "vars") or not isinstance(getattr(state, "vars", None), dict):
@@ -504,6 +695,9 @@ class ReactShell:
504
695
  state: Any,
505
696
  messages: List[Dict[str, Any]],
506
697
  effective_model: Optional[str] = None,
698
+ task_text: Optional[str] = None,
699
+ extra_messages: Optional[List[Dict[str, Any]]] = None,
700
+ tool_specs: Optional[List[Dict[str, Any]]] = None,
507
701
  ) -> Dict[str, Any]:
508
702
  """Estimate the next prompt token usage (best effort).
509
703
 
@@ -532,23 +726,27 @@ class ReactShell:
532
726
  system_text = ""
533
727
  prompt_text = ""
534
728
 
535
- logic = getattr(self._agent, "logic", None)
536
- if (
537
- state is not None
538
- and logic is not None
539
- and hasattr(state, "vars")
540
- and isinstance(getattr(state, "vars", None), dict)
541
- ):
542
- context_ns = state.vars.get("context") if isinstance(state.vars.get("context"), dict) else {}
543
- task = str(context_ns.get("task") or "")
544
- limits = state.vars.get("_limits") if isinstance(state.vars.get("_limits"), dict) else {}
545
- try:
546
- iteration = int(limits.get("current_iteration", 0) or 0) + 1
547
- max_iterations = int(limits.get("max_iterations", 25) or 25)
548
- except Exception:
549
- iteration = 1
550
- max_iterations = 25
729
+ vars_ns: Dict[str, Any] = {}
730
+ if state is not None and hasattr(state, "vars") and isinstance(getattr(state, "vars", None), dict):
731
+ vars_ns = state.vars
732
+
733
+ context_ns = vars_ns.get("context") if isinstance(vars_ns.get("context"), dict) else {}
734
+ task = str(task_text or context_ns.get("task") or "")
551
735
 
736
+ limits = vars_ns.get("_limits") if isinstance(vars_ns.get("_limits"), dict) else {}
737
+ try:
738
+ iteration = int(limits.get("current_iteration", 0) or 0) + 1
739
+ max_iterations = int(limits.get("max_iterations", self._max_iterations) or self._max_iterations)
740
+ except Exception:
741
+ iteration = 1
742
+ max_iterations = int(self._max_iterations)
743
+ if max_iterations < 1:
744
+ max_iterations = 25
745
+
746
+ logic = getattr(self._agent, "logic", None)
747
+ sys_base = ""
748
+ req_tools = None
749
+ if logic is not None:
552
750
  try:
553
751
  req = logic.build_request(
554
752
  task=task,
@@ -556,27 +754,52 @@ class ReactShell:
556
754
  guidance="",
557
755
  iteration=iteration,
558
756
  max_iterations=max_iterations,
559
- vars=state.vars,
757
+ vars=vars_ns,
560
758
  )
561
- system_text = str(getattr(req, "system_prompt", "") or "").strip()
759
+ sys_base = str(getattr(req, "system_prompt", "") or "").strip()
562
760
  prompt_text = str(getattr(req, "prompt", "") or "").strip()
761
+ req_tools = getattr(req, "tools", None)
563
762
  except Exception:
564
- system_text = ""
565
- prompt_text = ""
763
+ sys_base = ""
764
+ prompt_text = task.strip()
765
+ req_tools = None
766
+
767
+ # Apply optional runtime/system prompt overrides.
768
+ runtime_ns = vars_ns.get("_runtime") if isinstance(vars_ns.get("_runtime"), dict) else {}
769
+ runtime_override = runtime_ns.get("system_prompt") if isinstance(runtime_ns, dict) else None
770
+ extra = runtime_ns.get("system_prompt_extra") if isinstance(runtime_ns, dict) else None
771
+
772
+ session_override = getattr(self, "_system_prompt_override", None)
773
+ if isinstance(runtime_override, str) and runtime_override.strip():
774
+ base_sys = runtime_override.strip()
775
+ elif isinstance(session_override, str) and session_override.strip():
776
+ base_sys = session_override.strip()
777
+ else:
778
+ base_sys = sys_base
566
779
 
567
- if self._agent_kind == "memact":
568
- try:
569
- from abstractruntime.memory.active_memory import render_memact_system_prompt
780
+ system_text = base_sys
570
781
 
571
- mem_prompt = render_memact_system_prompt(state.vars)
572
- if isinstance(mem_prompt, str) and mem_prompt.strip():
573
- system_text = (mem_prompt.strip() + ("\n\n" + system_text if system_text else "")).strip()
574
- except Exception:
575
- pass
782
+ if self._agent_kind == "memact" and state is not None and isinstance(vars_ns, dict):
783
+ try:
784
+ from abstractruntime.memory.active_memory import render_memact_system_prompt
785
+
786
+ mem_prompt = render_memact_system_prompt(vars_ns)
787
+ if isinstance(mem_prompt, str) and mem_prompt.strip():
788
+ system_text = (mem_prompt.strip() + ("\n\n" + base_sys if base_sys else "")).strip()
789
+ except Exception:
790
+ pass
791
+
792
+ if isinstance(extra, str) and extra.strip():
793
+ system_text = (system_text.rstrip() + "\n\nAdditional system instructions:\n" + extra.strip()).strip()
576
794
 
577
795
  # Approximate messages by concatenating content with role labels.
796
+ merged_messages: List[Dict[str, Any]] = []
797
+ if isinstance(extra_messages, list) and extra_messages:
798
+ merged_messages.extend([m for m in extra_messages if isinstance(m, dict)])
799
+ merged_messages.extend(list(messages or []))
800
+
578
801
  text_parts: List[str] = []
579
- for m in messages:
802
+ for m in merged_messages:
580
803
  if not isinstance(m, dict):
581
804
  continue
582
805
  content = str(m.get("content") or "")
@@ -591,6 +814,27 @@ class ReactShell:
591
814
  messages_tokens = estimate_tokens(joined) if joined else 0
592
815
  system_tokens = estimate_tokens(system_text) if system_text else 0
593
816
  prompt_tokens = estimate_tokens(prompt_text) if prompt_text else 0
817
+ tools_tokens = 0
818
+ if tool_specs is None and isinstance(req_tools, list):
819
+ # Best-effort: convert ToolDefinition objects to dicts so we can estimate token cost.
820
+ tool_specs0: List[Dict[str, Any]] = []
821
+ for t in req_tools:
822
+ to_dict = getattr(t, "to_dict", None)
823
+ if callable(to_dict):
824
+ try:
825
+ spec = to_dict()
826
+ except Exception:
827
+ spec = None
828
+ if isinstance(spec, dict):
829
+ tool_specs0.append(spec)
830
+ tool_specs = tool_specs0
831
+
832
+ if isinstance(tool_specs, list) and tool_specs:
833
+ try:
834
+ raw = json.dumps(tool_specs, ensure_ascii=False, sort_keys=True, separators=(",", ":"))
835
+ tools_tokens = estimate_tokens(raw)
836
+ except Exception:
837
+ tools_tokens = 0
594
838
 
595
839
  return {
596
840
  "effective_model": effective_model,
@@ -598,19 +842,52 @@ class ReactShell:
598
842
  "system_tokens": system_tokens,
599
843
  "prompt_tokens": prompt_tokens,
600
844
  "messages_tokens": messages_tokens,
601
- "tools_tokens": 0,
602
- "total_tokens": int(system_tokens) + int(prompt_tokens) + int(messages_tokens),
845
+ "tools_tokens": tools_tokens,
846
+ "total_tokens": int(system_tokens) + int(prompt_tokens) + int(messages_tokens) + int(tools_tokens),
603
847
  }
604
848
 
605
849
  def _get_status_text(self) -> str:
606
850
  """Generate status text for the status bar."""
607
851
  # Keep this fast: the render thread can call this frequently.
608
- state = self._safe_get_state()
852
+ state = self._safe_get_active_state()
609
853
 
610
854
  effective_model = self._get_effective_model(state)
611
855
  messages = self._select_messages_for_llm(state)
612
856
  max_tokens = self._resolve_context_max_tokens(state, effective_model=effective_model)
613
857
 
858
+ # Include pending inputs (draft prompt + attachments) in status estimate.
859
+ draft_text = ""
860
+ pending_attachments: List[str] = []
861
+ try:
862
+ ui = getattr(self, "_ui", None)
863
+ getter = getattr(ui, "get_composer_state", None) if ui is not None else None
864
+ if callable(getter):
865
+ st = getter()
866
+ if isinstance(st, dict):
867
+ draft_text = str(st.get("draft") or "")
868
+ raw_att = st.get("attachments")
869
+ if isinstance(raw_att, list):
870
+ pending_attachments = [str(p).strip() for p in raw_att if isinstance(p, str) and str(p).strip()]
871
+ except Exception:
872
+ draft_text = ""
873
+ pending_attachments = []
874
+
875
+ draft_for_tokens = draft_text.strip()
876
+ if draft_for_tokens.startswith("/"):
877
+ draft_for_tokens = ""
878
+
879
+ sys_override = getattr(self, "_system_prompt_override", None)
880
+ sys_override_s = str(sys_override or "").strip() if isinstance(sys_override, str) or sys_override is not None else ""
881
+
882
+ def _text_sig(s: str) -> tuple[int, str, str]:
883
+ t = str(s or "")
884
+ if len(t) <= 96:
885
+ return (len(t), t, "")
886
+ return (len(t), t[:48], t[-48:])
887
+
888
+ draft_sig = _text_sig(draft_for_tokens)
889
+ sys_sig = _text_sig(sys_override_s)
890
+
614
891
  # Cache by a cheap signature to avoid rescanning large contexts every frame.
615
892
  last = messages[-1] if isinstance(messages, list) and messages else {}
616
893
  last_id = ""
@@ -637,6 +914,20 @@ class ReactShell:
637
914
  except Exception:
638
915
  current_iteration = 0
639
916
 
917
+ gpu_enabled = bool(getattr(self, "_gpu_monitor_enabled", False))
918
+ gpu_pct = getattr(self, "_gpu_utilization_pct", None)
919
+ gpu_err = getattr(self, "_gpu_last_error", None)
920
+ gpu_err_s = str(gpu_err or "").strip() if isinstance(gpu_err, str) or gpu_err is not None else ""
921
+ gpu_key: Optional[int] = None
922
+ if isinstance(gpu_pct, (int, float)):
923
+ try:
924
+ gpu_key = int(round(float(gpu_pct)))
925
+ except Exception:
926
+ gpu_key = None
927
+ gpu_err_key = ""
928
+ if gpu_key is None and gpu_err_s:
929
+ gpu_err_key = gpu_err_s.split(":", 1)[0][:32]
930
+
640
931
  cache_key = (
641
932
  getattr(state, "run_id", None) if state is not None else None,
642
933
  len(messages),
@@ -648,13 +939,113 @@ class ReactShell:
648
939
  toolset_id,
649
940
  max_tokens,
650
941
  self._model,
942
+ draft_sig,
943
+ tuple(pending_attachments),
944
+ sys_sig,
945
+ tuple(self._allowed_tools) if isinstance(self._allowed_tools, list) else None,
946
+ bool(getattr(self, "_tool_prompt_examples", False)),
947
+ str(getattr(self, "_tool_executor_server_id", "") or ""),
948
+ gpu_enabled,
949
+ gpu_key,
950
+ gpu_err_key,
651
951
  )
652
952
  if self._status_cache_key == cache_key and self._status_cache_text:
653
953
  return self._status_cache_text
654
954
 
655
955
  tokens_used_source = "estimate"
656
956
  try:
657
- est = self._estimate_next_prompt_tokens(state=state, messages=messages, effective_model=effective_model)
957
+ extra_msgs: List[Dict[str, Any]] = []
958
+ if pending_attachments:
959
+ try:
960
+ # Best-effort: mirror runtime's injected "Session attachments" system message.
961
+ from abstractruntime.integrations.abstractcore.session_attachments import (
962
+ render_session_attachments_system_message,
963
+ )
964
+
965
+ entries: List[Dict[str, Any]] = []
966
+ # Most recent first (match runtime).
967
+ for rel in reversed(list(pending_attachments)):
968
+ key = str(rel or "").strip()
969
+ if not key:
970
+ continue
971
+ if not key.startswith("/"):
972
+ key = normalize_relative_path(key)
973
+ if not key:
974
+ continue
975
+ cached = self._attachment_ref_cache.get(key)
976
+ artifact_id = str(cached.get("$artifact") or "") if isinstance(cached, dict) else ""
977
+ sha = str(cached.get("sha256") or "") if isinstance(cached, dict) else ""
978
+ ct = str(cached.get("content_type") or "") if isinstance(cached, dict) else ""
979
+ fn = str(cached.get("filename") or "") if isinstance(cached, dict) else ""
980
+ entry: Dict[str, Any] = {
981
+ "handle": key,
982
+ "artifact_id": artifact_id,
983
+ "filename": fn or key.rsplit("/", 1)[-1],
984
+ "sha256": sha,
985
+ "content_type": ct,
986
+ }
987
+ entries.append(entry)
988
+
989
+ msg = render_session_attachments_system_message(entries, max_entries=20, max_chars=4000)
990
+ if msg and not (
991
+ messages
992
+ and isinstance(messages[0], dict)
993
+ and messages[0].get("role") == "system"
994
+ and isinstance(messages[0].get("content"), str)
995
+ and str(messages[0].get("content") or "")
996
+ .strip()
997
+ .startswith(("Stored session attachments", "Session attachments"))
998
+ ):
999
+ extra_msgs.append({"role": "system", "content": msg})
1000
+ except Exception:
1001
+ pass
1002
+
1003
+ tool_specs: Optional[List[Dict[str, Any]]] = None
1004
+ try:
1005
+ # Prefer durable tool_specs if present on the active run.
1006
+ runtime_ns = state.vars.get("_runtime") if state is not None and isinstance(getattr(state, "vars", None), dict) else None
1007
+ if isinstance(runtime_ns, dict):
1008
+ raw_specs = runtime_ns.get("tool_specs")
1009
+ if isinstance(raw_specs, list) and raw_specs and all(isinstance(x, dict) for x in raw_specs):
1010
+ tool_specs = list(raw_specs)
1011
+ except Exception:
1012
+ tool_specs = None
1013
+ if tool_specs is None:
1014
+ try:
1015
+ logic = getattr(self._agent, "logic", None)
1016
+ tool_defs = getattr(logic, "tools", None) if logic is not None else None
1017
+ allow = list(self._allowed_tools) if isinstance(self._allowed_tools, list) else None
1018
+ if isinstance(tool_defs, list) and tool_defs:
1019
+ tool_by_name = {t.name: t for t in tool_defs if getattr(t, "name", None)}
1020
+ ordered = [t.name for t in tool_defs if getattr(t, "name", None)]
1021
+ if allow is not None:
1022
+ ordered = [n for n in ordered if str(n) in set(allow)]
1023
+ built: List[Dict[str, Any]] = []
1024
+ for name in ordered:
1025
+ tool = tool_by_name.get(str(name))
1026
+ if tool is None:
1027
+ continue
1028
+ to_dict = getattr(tool, "to_dict", None)
1029
+ if callable(to_dict):
1030
+ try:
1031
+ spec = to_dict()
1032
+ except Exception:
1033
+ spec = None
1034
+ if isinstance(spec, dict):
1035
+ built.append(spec)
1036
+ if built:
1037
+ tool_specs = built
1038
+ except Exception:
1039
+ tool_specs = None
1040
+
1041
+ est = self._estimate_next_prompt_tokens(
1042
+ state=state,
1043
+ messages=messages,
1044
+ effective_model=effective_model,
1045
+ task_text=draft_for_tokens,
1046
+ extra_messages=extra_msgs,
1047
+ tool_specs=tool_specs,
1048
+ )
658
1049
  tokens_used_source = str(est.get("source") or "estimate")
659
1050
  tokens_used = int(est.get("total_tokens") or 0)
660
1051
  except Exception:
@@ -669,6 +1060,24 @@ class ReactShell:
669
1060
  pct = (tokens_used / max_tokens) * 100 if max_tokens > 0 else 0.0
670
1061
  label = "Context" if tokens_used_source == "provider" else "Context(next)"
671
1062
  status = f"{self._provider} | {self._model} | {label}: {tokens_used:,}/{max_tokens:,} tk ({pct:.0f}%)"
1063
+ if gpu_enabled:
1064
+ if gpu_key is None:
1065
+ if gpu_err_s:
1066
+ if gpu_err_s.startswith("http_"):
1067
+ code = gpu_err_s.split(":", 1)[0].replace("http_", "").strip()
1068
+ status = f"{status} | GPU {code or 'n/a'}"
1069
+ else:
1070
+ short = gpu_err_s.split(":", 1)[0].strip()
1071
+ if len(short) > 12:
1072
+ short = short[:12] + "…"
1073
+ status = f"{status} | GPU {short or 'n/a'}"
1074
+ else:
1075
+ status = f"{status} | GPU n/a"
1076
+ else:
1077
+ try:
1078
+ status = f"{status} | {self._format_gpu_meter(float(gpu_pct))}"
1079
+ except Exception:
1080
+ status = f"{status} | GPU n/a"
672
1081
  self._status_cache_key = cache_key
673
1082
  self._status_cache_text = status
674
1083
  return status
@@ -1260,7 +1669,18 @@ class ReactShell:
1260
1669
  return "\n".join(out)
1261
1670
 
1262
1671
  bg = "\033[48;5;238m"
1263
- fg = "\033[38;5;255m"
1672
+ try:
1673
+ theme = getattr(self, "_theme", None)
1674
+ t = theme.normalized() if isinstance(theme, Theme) else theme_from_env().normalized()
1675
+ bg_target = "#ffffff" if is_dark(t.surface) else "#000000"
1676
+ bg_hex = blend_hex(t.surface, bg_target, 0.08)
1677
+ fg_target = "#ffffff" if is_dark(bg_hex) else "#000000"
1678
+ fg_hex = blend_hex(t.muted, fg_target, 0.90)
1679
+ bg = ansi_bg(bg_hex) or bg
1680
+ fallback_fg = "\033[38;5;255m" if is_dark(bg_hex) else "\033[38;5;0m"
1681
+ fg = ansi_fg(fg_hex) or fallback_fg
1682
+ except Exception:
1683
+ fg = "\033[38;5;255m"
1264
1684
  reset = _C.RESET
1265
1685
 
1266
1686
  def style_full(line_text: str) -> str:
@@ -1287,21 +1707,23 @@ class ReactShell:
1287
1707
  out_lines.append("")
1288
1708
  return "\n".join(out_lines)
1289
1709
 
1290
- def _handle_input(self, text: str) -> None:
1710
+ def _handle_input(self, inp: SubmittedInput) -> None:
1291
1711
  """Handle user input from the UI (called from worker thread)."""
1292
1712
  import uuid
1293
1713
 
1714
+ attachment_paths: List[str] = []
1715
+ text = ""
1716
+ if isinstance(inp, SubmittedInput):
1717
+ text = str(inp.text or "")
1718
+ if isinstance(inp.attachments, list):
1719
+ attachment_paths = [str(p) for p in inp.attachments if isinstance(p, str) and p.strip()]
1720
+ else: # pragma: no cover
1721
+ text = str(inp or "")
1722
+
1294
1723
  text = text.strip()
1295
1724
  if not text:
1296
1725
  return
1297
1726
 
1298
- # Echo user input (styled so user prompts are easy to spot).
1299
- copy_id = f"user_{uuid.uuid4().hex}"
1300
- self._ui.register_copy_payload(copy_id, text)
1301
- ts_text = self._format_timestamp_short(_now_iso())
1302
- footer = _style(ts_text, _C.DIM, enabled=self._color) if ts_text else ""
1303
- self._print(self._format_user_prompt_block(text, copy_id=copy_id, footer=footer))
1304
-
1305
1727
  cmd = text.strip()
1306
1728
 
1307
1729
  if cmd.startswith("/"):
@@ -1323,18 +1745,30 @@ class ReactShell:
1323
1745
  "max-messages",
1324
1746
  "max_messages",
1325
1747
  "memory",
1748
+ "files",
1749
+ "files-keep",
1750
+ "files_keep",
1326
1751
  "plan",
1327
1752
  "review",
1753
+ "config",
1328
1754
  "compact",
1329
1755
  "spans",
1330
1756
  "expand",
1331
1757
  "vars",
1332
1758
  "var",
1333
- "log",
1759
+ "whitelist",
1760
+ "blacklist",
1761
+ "logs",
1334
1762
  "memorize",
1335
1763
  "recall",
1336
1764
  "copy",
1337
1765
  "mouse",
1766
+ "agent",
1767
+ "theme",
1768
+ "system",
1769
+ "gpu",
1770
+ "links",
1771
+ "open",
1338
1772
  "flow",
1339
1773
  "history",
1340
1774
  "resume",
@@ -1353,8 +1787,361 @@ class ReactShell:
1353
1787
  self._print(_style(f"Try: /{lower}", _C.DIM, enabled=self._color))
1354
1788
  return
1355
1789
 
1356
- # Otherwise treat as a task
1357
- self._start(cmd)
1790
+ # Otherwise treat as a task. Allow both:
1791
+ # - chips-based attachments (from `@` completion), and
1792
+ # - inline `@file` mentions typed manually.
1793
+ from .file_mentions import extract_at_file_mentions
1794
+
1795
+ cleaned_cmd, mentions = extract_at_file_mentions(cmd)
1796
+ cleaned_cmd_text = str(cleaned_cmd or "").strip()
1797
+ paths: List[str] = []
1798
+ for p in attachment_paths:
1799
+ norm = self._normalize_attachment_token(p)
1800
+ if norm:
1801
+ paths.append(norm)
1802
+ mention_paths: List[str] = []
1803
+ for m in mentions:
1804
+ norm = self._normalize_attachment_token(m)
1805
+ if norm:
1806
+ paths.append(norm)
1807
+ mention_paths.append(norm)
1808
+
1809
+ # Persist manual `@file` mentions into the attachment chips bar when:
1810
+ # - files_keep is enabled (attachments persist across turns), OR
1811
+ # - this is an attachment-only message (used to stage chips for the next prompt).
1812
+ persist_mentions = bool(mention_paths) and (bool(getattr(self, "_files_keep", False)) or not cleaned_cmd_text)
1813
+ if persist_mentions:
1814
+ try:
1815
+ adder = getattr(self._ui, "add_attachments", None)
1816
+ if callable(adder):
1817
+ adder(mention_paths)
1818
+ except Exception:
1819
+ pass
1820
+
1821
+ # De-dup while preserving order.
1822
+ seen: set[str] = set()
1823
+ paths = [p for p in paths if not (p in seen or seen.add(p))]
1824
+
1825
+ # Ingest missing attachments once; reuse cached refs across turns.
1826
+ newly_added: List[str] = []
1827
+ if paths:
1828
+ def _attachment_sig(key: str) -> Optional[Tuple[int, int]]:
1829
+ resolved = self._resolve_attachment_file(key)
1830
+ if resolved is None:
1831
+ return None
1832
+ p, _source_path = resolved
1833
+ try:
1834
+ st = p.stat()
1835
+ size = int(st.st_size)
1836
+ mtime_ns = int(getattr(st, "st_mtime_ns", int(st.st_mtime * 1e9)))
1837
+ return (size, mtime_ns)
1838
+ except Exception:
1839
+ return None
1840
+
1841
+ needs_ingest: List[str] = []
1842
+ for rel in paths:
1843
+ cached = self._attachment_ref_cache.get(rel)
1844
+ if not isinstance(cached, dict) or not cached:
1845
+ needs_ingest.append(rel)
1846
+ continue
1847
+ cached_sig = self._attachment_sig_cache.get(rel)
1848
+ cur_sig = _attachment_sig(rel)
1849
+ if cached_sig is not None and cur_sig is not None and cached_sig != cur_sig:
1850
+ needs_ingest.append(rel)
1851
+
1852
+ if needs_ingest:
1853
+ for ref in self._ingest_attachments(needs_ingest):
1854
+ if not isinstance(ref, dict):
1855
+ continue
1856
+ key = str(ref.get("source_path") or "").strip()
1857
+ if not key:
1858
+ continue
1859
+ prev = self._attachment_ref_cache.get(key)
1860
+ prev_sha = str(prev.get("sha256") or "").strip() if isinstance(prev, dict) else ""
1861
+ # Strip internal fields before caching/passing to the agent.
1862
+ clean = {k: v for k, v in dict(ref).items() if isinstance(k, str) and not k.startswith("_")}
1863
+ sig0 = ref.get("_sig") if isinstance(ref.get("_sig"), dict) else {}
1864
+ try:
1865
+ size0 = int((sig0 or {}).get("size_bytes"))
1866
+ except Exception:
1867
+ size0 = -1
1868
+ try:
1869
+ mtime0 = int((sig0 or {}).get("mtime_ns"))
1870
+ except Exception:
1871
+ mtime0 = -1
1872
+ existed = isinstance(prev, dict)
1873
+ self._attachment_ref_cache[key] = clean
1874
+ if size0 >= 0 and mtime0 >= 0:
1875
+ self._attachment_sig_cache[key] = (size0, mtime0)
1876
+ if not existed:
1877
+ newly_added.append(key)
1878
+ else:
1879
+ # Only report as "new" when the content hash changed (version update).
1880
+ next_sha = str(clean.get("sha256") or "").strip()
1881
+ if next_sha and prev_sha and next_sha != prev_sha:
1882
+ newly_added.append(key)
1883
+
1884
+ attachment_refs: List[Dict[str, Any]] = []
1885
+ for p in paths:
1886
+ ref = self._attachment_ref_cache.get(p)
1887
+ if isinstance(ref, dict):
1888
+ attachment_refs.append(dict(ref))
1889
+
1890
+ def _display_attachment_token(token: str) -> str:
1891
+ t = str(token or "").strip()
1892
+ if not t:
1893
+ return ""
1894
+ norm = t.replace("\\", "/")
1895
+ if norm.startswith("/") or (len(norm) >= 3 and norm[1] == ":" and norm[2] in ("/", "\\")):
1896
+ return norm.rsplit("/", 1)[-1] or t
1897
+ return t
1898
+
1899
+ def _display_attachment_ref(ref: Dict[str, Any]) -> str:
1900
+ filename = str(ref.get("filename") or "").strip()
1901
+ source_path = str(ref.get("source_path") or "").strip()
1902
+ return _display_attachment_token(filename or source_path or "?")
1903
+
1904
+ # Attachment-only message: update the session attachments and don't start a run.
1905
+ if not cleaned_cmd_text and attachment_refs:
1906
+ joined = ", ".join([_display_attachment_ref(a) for a in attachment_refs if isinstance(a, dict)])
1907
+ if joined:
1908
+ self._print(_style(f"Attachments: {joined}", _C.DIM, enabled=self._color))
1909
+ return
1910
+
1911
+ if not cleaned_cmd_text:
1912
+ return
1913
+
1914
+ # Echo cleaned user prompt (without `@file` mentions).
1915
+ copy_id = f"user_{uuid.uuid4().hex}"
1916
+ self._ui.register_copy_payload(copy_id, cleaned_cmd_text)
1917
+ ts_text = self._format_timestamp_short(_now_iso())
1918
+ footer = _style(ts_text, _C.DIM, enabled=self._color) if ts_text else ""
1919
+ self._print(self._format_user_prompt_block(cleaned_cmd_text, copy_id=copy_id, footer=footer))
1920
+
1921
+ if newly_added:
1922
+ joined = ", ".join([_display_attachment_token(k) for k in newly_added if str(k or "").strip()])
1923
+ self._print(_style(f"Attachments: {joined}", _C.DIM, enabled=self._color))
1924
+
1925
+ self._start(cleaned_cmd_text, attachments=attachment_refs or None)
1926
+
1927
+ def _normalize_attachment_token(self, raw_path: str) -> str:
1928
+ """Normalize an attachment token into a stable key.
1929
+
1930
+ Returns either:
1931
+ - a workspace "virtual path" (`docs/readme.md`, `Desktop/foo.png`), or
1932
+ - a canonical absolute path (`/Users/.../Desktop/foo.png`) for local files outside the workspace.
1933
+ """
1934
+ tok = str(raw_path or "").strip()
1935
+ if not tok:
1936
+ return ""
1937
+ if len(tok) >= 2 and tok[0] == tok[-1] and tok[0] in ("'", '"'):
1938
+ tok = tok[1:-1].strip()
1939
+ if not tok:
1940
+ return ""
1941
+
1942
+ if tok.lower().startswith("file://"):
1943
+ try:
1944
+ from urllib.parse import urlparse, unquote
1945
+
1946
+ parsed = urlparse(tok)
1947
+ tok = unquote(parsed.path) if parsed.scheme == "file" else tok[7:]
1948
+ except Exception:
1949
+ tok = tok[7:]
1950
+ tok = str(tok or "").strip()
1951
+ if not tok:
1952
+ return ""
1953
+
1954
+ tok_ws = tok.replace("\\", "/")
1955
+ while tok_ws.startswith("./"):
1956
+ tok_ws = tok_ws[2:]
1957
+
1958
+ try:
1959
+ _p, virt, _mount, _root = resolve_workspace_path(
1960
+ raw_path=tok_ws,
1961
+ workspace_root=self._workspace_root,
1962
+ mounts=dict(self._workspace_mounts or {}),
1963
+ )
1964
+ key = normalize_relative_path(str(virt or ""))
1965
+ if key:
1966
+ return key
1967
+ except Exception:
1968
+ pass
1969
+
1970
+ p = self._resolve_local_attachment_file(tok)
1971
+ return str(p) if p is not None else ""
1972
+
1973
+ def _resolve_workspace_file(self, rel_path: str) -> Optional[tuple[Path, str]]:
1974
+ rel = normalize_relative_path(rel_path)
1975
+ if not rel:
1976
+ return None
1977
+ try:
1978
+ p, virt, mount, root = resolve_workspace_path(
1979
+ raw_path=rel,
1980
+ workspace_root=self._workspace_root,
1981
+ mounts=dict(self._workspace_mounts or {}),
1982
+ )
1983
+ del root
1984
+ except Exception:
1985
+ return None
1986
+
1987
+ blocked = list(self._workspace_blocked_paths or [])
1988
+ for b in blocked:
1989
+ if not isinstance(b, Path):
1990
+ continue
1991
+ try:
1992
+ if p.resolve() == b.resolve():
1993
+ return None
1994
+ p.resolve().relative_to(b.resolve())
1995
+ return None
1996
+ except Exception:
1997
+ continue
1998
+
1999
+ try:
2000
+ if not p.is_file():
2001
+ return None
2002
+ except Exception:
2003
+ return None
2004
+ try:
2005
+ ign = self._workspace_ignore if mount is None else self._workspace_mount_ignores.get(str(mount))
2006
+ if ign is not None and ign.is_ignored(p, is_dir=False):
2007
+ return None
2008
+ except Exception:
2009
+ pass
2010
+ return (p, virt)
2011
+
2012
+ def _resolve_local_attachment_file(self, raw_path: str) -> Optional[Path]:
2013
+ tok = str(raw_path or "").strip()
2014
+ if not tok:
2015
+ return None
2016
+ try:
2017
+ p = Path(tok).expanduser()
2018
+ if not p.is_absolute():
2019
+ return None
2020
+ p = p.resolve()
2021
+ except Exception:
2022
+ return None
2023
+
2024
+ blocked = list(self._workspace_blocked_paths or [])
2025
+ for b in blocked:
2026
+ if not isinstance(b, Path):
2027
+ continue
2028
+ try:
2029
+ if p.resolve() == b.resolve():
2030
+ return None
2031
+ p.resolve().relative_to(b.resolve())
2032
+ return None
2033
+ except Exception:
2034
+ continue
2035
+
2036
+ try:
2037
+ if not p.is_file():
2038
+ return None
2039
+ except Exception:
2040
+ return None
2041
+ return p
2042
+
2043
+ def _resolve_attachment_file(self, token: str) -> Optional[tuple[Path, str]]:
2044
+ key = str(token or "").strip()
2045
+ if not key:
2046
+ return None
2047
+ local = self._resolve_local_attachment_file(key)
2048
+ if local is not None:
2049
+ return (local, str(local))
2050
+ return self._resolve_workspace_file(key)
2051
+
2052
+ def _max_attachment_bytes(self) -> int:
2053
+ raw = os.environ.get("ABSTRACTCODE_MAX_ATTACHMENT_BYTES") or os.environ.get("ABSTRACTGATEWAY_MAX_ATTACHMENT_BYTES")
2054
+ try:
2055
+ n = int(raw) if raw is not None else 25 * 1024 * 1024
2056
+ except Exception:
2057
+ n = 25 * 1024 * 1024
2058
+ return max(1, n)
2059
+
2060
+ def _session_memory_run_id(self) -> Optional[str]:
2061
+ try:
2062
+ ensure = getattr(self._agent, "_ensure_session_id", None)
2063
+ if callable(ensure):
2064
+ sid = ensure()
2065
+ if isinstance(sid, str) and sid.strip():
2066
+ return f"session_memory_{sid.strip()}"
2067
+ except Exception:
2068
+ return None
2069
+ return None
2070
+
2071
+ def _ingest_attachments(self, paths: Sequence[str]) -> List[Dict[str, Any]]:
2072
+ """Store attachment files in ArtifactStore and return AttachmentRefs (best-effort)."""
2073
+ import hashlib
2074
+ import mimetypes
2075
+
2076
+ keys: list[str] = [str(p or "").strip() for p in (paths or []) if str(p or "").strip()]
2077
+ if not keys:
2078
+ return []
2079
+
2080
+ max_bytes = self._max_attachment_bytes()
2081
+ run_id = self._session_memory_run_id()
2082
+ out: List[Dict[str, Any]] = []
2083
+
2084
+ for key in keys:
2085
+ resolved = self._resolve_attachment_file(key)
2086
+ if resolved is None:
2087
+ self._print(_style(f"Attachment ignored/not found: {key}", _C.YELLOW, enabled=self._color))
2088
+ continue
2089
+ p, source_path = resolved
2090
+
2091
+ try:
2092
+ size = int(p.stat().st_size)
2093
+ mtime_ns = int(getattr(p.stat(), "st_mtime_ns", int(p.stat().st_mtime * 1e9)))
2094
+ except Exception:
2095
+ size = -1
2096
+ mtime_ns = -1
2097
+ if size >= 0 and size > max_bytes:
2098
+ self._print(
2099
+ _style(
2100
+ f"Attachment too large ({size:,} bytes > {max_bytes:,}): {source_path}",
2101
+ _C.YELLOW,
2102
+ enabled=self._color,
2103
+ )
2104
+ )
2105
+ continue
2106
+
2107
+ try:
2108
+ content = p.read_bytes()
2109
+ except Exception as e:
2110
+ self._print(_style(f"Attachment read failed: {source_path} ({e})", _C.YELLOW, enabled=self._color))
2111
+ continue
2112
+
2113
+ sha256 = hashlib.sha256(bytes(content)).hexdigest()
2114
+ ct = mimetypes.guess_type(p.name)[0] or "application/octet-stream"
2115
+ try:
2116
+ source = "local" if str(source_path).startswith("/") else "workspace"
2117
+ meta = self._artifact_store.store(
2118
+ bytes(content),
2119
+ content_type=str(ct),
2120
+ run_id=run_id,
2121
+ tags={
2122
+ "kind": "attachment",
2123
+ "source": source,
2124
+ "path": source_path,
2125
+ "filename": str(p.name),
2126
+ "sha256": sha256,
2127
+ },
2128
+ )
2129
+ except Exception as e:
2130
+ self._print(_style(f"Attachment ingest failed: {source_path} ({e})", _C.YELLOW, enabled=self._color))
2131
+ continue
2132
+
2133
+ out.append(
2134
+ {
2135
+ "$artifact": str(meta.artifact_id),
2136
+ "filename": str(p.name),
2137
+ "content_type": str(ct),
2138
+ "source_path": str(source_path),
2139
+ "sha256": sha256,
2140
+ "_sig": {"size_bytes": size, "mtime_ns": mtime_ns},
2141
+ }
2142
+ )
2143
+
2144
+ return out
1358
2145
 
1359
2146
  def _build_answer_copy_payload(self, *, answer_text: str, prompt_text: Optional[str] = None) -> str:
1360
2147
  """Build the payload for the assistant copy button (best-effort, lossless)."""
@@ -1381,7 +2168,11 @@ class ReactShell:
1381
2168
  answer = "" if answer_text is None else str(answer_text)
1382
2169
  if not answer.strip():
1383
2170
  answer = "(no assistant answer produced yet)"
1384
-
2171
+ try:
2172
+ self._last_answer_links = self._extract_links(answer)
2173
+ except Exception:
2174
+ self._last_answer_links = []
2175
+
1385
2176
  copy_id = f"assistant_{uuid.uuid4().hex}"
1386
2177
  payload = self._build_answer_copy_payload(answer_text=answer, prompt_text=prompt_text)
1387
2178
  self._ui.register_copy_payload(copy_id, payload)
@@ -1390,7 +2181,11 @@ class ReactShell:
1390
2181
  self._print(_style("─" * 60, _C.DIM, enabled=self._color))
1391
2182
  # Render Markdown for the terminal, but keep copy payload lossless (raw answer).
1392
2183
  try:
1393
- renderer = TerminalMarkdownRenderer(color=self._color)
2184
+ renderer = TerminalMarkdownRenderer(
2185
+ color=self._color,
2186
+ theme=getattr(self, "_theme", None),
2187
+ width=self._terminal_width(),
2188
+ )
1394
2189
  rendered = renderer.render(answer)
1395
2190
  except Exception:
1396
2191
  rendered = answer
@@ -1461,12 +2256,14 @@ class ReactShell:
1461
2256
  This uses blocking_prompt which queues a response and waits for user input.
1462
2257
  """
1463
2258
  result = self._ui.blocking_prompt(message)
2259
+ if result == BLOCKING_PROMPT_CANCEL_TOKEN:
2260
+ return result
1464
2261
  if result:
1465
2262
  self._print(f" → {result}")
1466
2263
  return result.strip()
1467
2264
 
1468
2265
  def _banner(self) -> None:
1469
- self._print(_style("AbstractCode (MVP)", _C.CYAN, _C.BOLD, enabled=self._color))
2266
+ self._print(_style("AbstractCode", _C.CYAN, _C.BOLD, enabled=self._color))
1470
2267
  self._print(_style("─" * 60, _C.DIM, enabled=self._color))
1471
2268
  self._print(f"Provider: {self._provider} Model: {self._model}")
1472
2269
  if self._base_url:
@@ -1491,23 +2288,41 @@ class ReactShell:
1491
2288
  # Show the agent's actual "thinking" (rationale) when it is about to act.
1492
2289
  # We only print this for tool-using iterations to avoid duplicating final answers.
1493
2290
  has_tool_calls = bool(data.get("has_tool_calls"))
1494
- content = str(data.get("content", "") or "")
1495
- if has_tool_calls and content.strip():
2291
+ if has_tool_calls:
1496
2292
  import uuid
1497
2293
 
1498
- text = content.strip()
1499
- self._turn_trace.append("Thought:\n" + text)
2294
+ it = data.get("iteration", "?")
2295
+ max_it = data.get("max_iterations", "?")
2296
+ content = str(data.get("content", "") or "")
2297
+ reasoning = str(data.get("reasoning", "") or "")
2298
+ text = content.strip() or reasoning.strip()
2299
+
1500
2300
  fid = f"thought_{uuid.uuid4().hex}"
1501
- header = _style("Thought", _C.ORANGE, _C.BOLD, enabled=self._color)
1502
- lines = text.splitlines() or [""]
1503
- first_line = lines[0].strip()
1504
- if len(first_line) > 200:
1505
- first_line = first_line[:199] + ""
1506
- visible = ["", f"[[FOLD:{fid}]]{header}", _style(f" {first_line}", _C.ORANGE, enabled=self._color)]
1507
- # Hidden part shows the remaining thought (avoid duplicating the first line).
1508
- rest = lines[1:] if len(lines) > 1 else []
1509
- hidden = [_style(f" {line}" if line else " ", _C.ORANGE, enabled=self._color) for line in (rest or [""])]
1510
- hidden.append("")
2301
+ header = _style(f"Cycle {it}/{max_it}", _C.ORANGE, _C.BOLD, enabled=self._color)
2302
+
2303
+ if text:
2304
+ self._turn_trace.append(f"Cycle {it}/{max_it} thought:\n{text}".rstrip())
2305
+ lines = text.splitlines() or [""]
2306
+ first_line = lines[0].strip()
2307
+ if len(first_line) > 200:
2308
+ first_line = first_line[:199] + "…"
2309
+ visible = ["", f"[[FOLD:{fid}]]{header}", _style(f" {first_line}", _C.ORANGE, enabled=self._color)]
2310
+ # Hidden part shows the remaining rationale (avoid duplicating the first line).
2311
+ rest = lines[1:] if len(lines) > 1 else []
2312
+ hidden = [
2313
+ _style(f" {line}" if line else " ", _C.ORANGE, enabled=self._color)
2314
+ for line in (rest or [""])
2315
+ ]
2316
+ hidden.append("")
2317
+ else:
2318
+ self._turn_trace.append(f"Cycle {it}/{max_it} thought: (none)")
2319
+ visible = [
2320
+ "",
2321
+ f"[[FOLD:{fid}]]{header}",
2322
+ _style(" (model returned tool calls without rationale text)", _C.DIM, enabled=self._color),
2323
+ ]
2324
+ hidden = [_style(" (no rationale provided)", _C.DIM, enabled=self._color), ""]
2325
+
1511
2326
  self._ui_append_fold_region(fold_id=fid, visible_lines=visible, hidden_lines=hidden, collapsed=True)
1512
2327
  elif step == "act":
1513
2328
  import uuid
@@ -1646,7 +2461,7 @@ class ReactShell:
1646
2461
  answer_text = str(data.get("answer", "") or "")
1647
2462
  self._print_answer_block(title="ANSWER", answer_text=answer_text, state=self._safe_get_state())
1648
2463
  elif step == "status":
1649
- # Workflow-driven status update (e.g., VisualFlow emit_event name="abstractcode.status").
2464
+ # Workflow-driven status update (e.g., VisualFlow emit_event name="abstract.status").
1650
2465
  text = str(data.get("text", "") or "").strip()
1651
2466
  dur_raw = data.get("duration")
1652
2467
  dur: Optional[float]
@@ -1664,7 +2479,7 @@ class ReactShell:
1664
2479
  else:
1665
2480
  self._ui.set_spinner(text, duration_s=dur)
1666
2481
  elif step == "message":
1667
- # Workflow-driven message notification (e.g., VisualFlow emit_event name="abstractcode.message").
2482
+ # Workflow-driven message notification (e.g., VisualFlow emit_event name="abstract.message").
1668
2483
  text = str(data.get("text") or data.get("message") or "").rstrip()
1669
2484
  if not text.strip():
1670
2485
  return
@@ -1708,7 +2523,7 @@ class ReactShell:
1708
2523
  def run(self) -> None:
1709
2524
  # Build initial banner text
1710
2525
  banner_lines = []
1711
- banner_lines.append(_style("AbstractCode (MVP)", _C.CYAN, _C.BOLD, enabled=self._color))
2526
+ banner_lines.append(_style("AbstractCode", _C.CYAN, _C.BOLD, enabled=self._color))
1712
2527
  banner_lines.append(_style("─" * 60, _C.DIM, enabled=self._color))
1713
2528
  banner_lines.append(f"Provider: {self._provider} Model: {self._model}")
1714
2529
  if self._base_url:
@@ -1723,17 +2538,81 @@ class ReactShell:
1723
2538
  banner_lines.append(_style("Type '/help' for commands.", _C.DIM, enabled=self._color))
1724
2539
  banner_lines.append("")
1725
2540
 
1726
- # Add tools list to banner
2541
+ # Add tools list to banner (dynamic; mirrors `/tools`).
2542
+ try:
2543
+ if callable(getattr(self, "_maybe_sync_executor_tools", None)):
2544
+ self._maybe_sync_executor_tools()
2545
+ except Exception:
2546
+ pass
2547
+
2548
+ def _param_names(raw: object) -> List[str]:
2549
+ if not isinstance(raw, dict):
2550
+ return []
2551
+ props = raw.get("properties") if isinstance(raw.get("properties"), dict) else None
2552
+ if isinstance(props, dict) and props:
2553
+ return sorted([str(k) for k in props.keys() if isinstance(k, str) and k.strip()])
2554
+ skip = {"type", "properties", "required", "description", "additionalProperties", "$schema", "title"}
2555
+ keys = [str(k) for k in raw.keys() if isinstance(k, str) and k not in skip and k.strip()]
2556
+ return sorted(keys)
2557
+
2558
+ def _available_tools_for_banner() -> List[tuple[str, List[str]]]:
2559
+ out: List[tuple[str, List[str]]] = []
2560
+ logic = getattr(self._agent, "logic", None)
2561
+ tools = getattr(logic, "tools", None) if logic is not None else None
2562
+ if isinstance(tools, list):
2563
+ for t in tools:
2564
+ name = getattr(t, "name", None)
2565
+ if not isinstance(name, str) or not name.strip():
2566
+ continue
2567
+ params = _param_names(getattr(t, "parameters", None))
2568
+ if not params:
2569
+ td = getattr(t, "_tool_definition", None)
2570
+ params = _param_names(getattr(td, "parameters", None)) if td is not None else []
2571
+ out.append((name.strip(), params))
2572
+ if not out:
2573
+ for name, spec in sorted((self._tool_specs or {}).items()):
2574
+ params = sorted((spec.parameters or {}).keys())
2575
+ out.append((name, params))
2576
+ out.sort(key=lambda x: x[0])
2577
+ return out
2578
+
2579
+ def _fmt_tool_sig(name: str, params: List[str]) -> str:
2580
+ n = str(name or "").strip()
2581
+ ps = [str(p).strip() for p in (params or []) if str(p).strip()]
2582
+ if not self._color:
2583
+ inside = ", ".join(ps)
2584
+ return f"- {n}({inside})"
2585
+ try:
2586
+ t = getattr(self, "_theme", None)
2587
+ tn = t.normalized() if isinstance(t, Theme) else theme_from_env().normalized()
2588
+ name_fg = ansi_fg(tn.primary)
2589
+ param_fg = ansi_fg(tn.secondary)
2590
+ except Exception:
2591
+ name_fg = ""
2592
+ param_fg = ""
2593
+ dim = _C.DIM
2594
+ bold = _C.BOLD
2595
+ reset = _C.RESET
2596
+ parts: List[str] = ["- ", name_fg, bold, n, reset, dim, "(", reset]
2597
+ for i, p in enumerate(ps):
2598
+ if i:
2599
+ parts.extend([dim, ", ", reset])
2600
+ parts.extend([param_fg, p, reset])
2601
+ parts.extend([dim, ")", reset])
2602
+ return "".join(parts)
2603
+
1727
2604
  banner_lines.append(_style("Available tools", _C.CYAN, _C.BOLD, enabled=self._color))
1728
2605
  banner_lines.append(_style("─" * 60, _C.DIM, enabled=self._color))
1729
- for name, spec in sorted(self._tool_specs.items()):
1730
- params = ", ".join(sorted((spec.parameters or {}).keys()))
1731
- banner_lines.append(f"- {name}({params})")
2606
+ for name, params in _available_tools_for_banner():
2607
+ banner_lines.append(_fmt_tool_sig(name, params))
1732
2608
  banner_lines.append(_style("─" * 60, _C.DIM, enabled=self._color))
1733
2609
 
1734
2610
  if self._state_file:
1735
2611
  self._try_load_state()
1736
2612
 
2613
+ # Optional host GPU meter (polled from AbstractGateway).
2614
+ self._start_gpu_monitor()
2615
+
1737
2616
  # Run the UI loop - this stays in full-screen mode continuously.
1738
2617
  # All input is handled by _handle_input() via the worker thread.
1739
2618
  self._ui.run_loop(banner="\n".join(banner_lines))
@@ -1775,6 +2654,9 @@ class ReactShell:
1775
2654
  if command == "review":
1776
2655
  self._handle_review(arg)
1777
2656
  return False
2657
+ if command == "config":
2658
+ self._handle_config(arg)
2659
+ return False
1778
2660
  if command == "resume":
1779
2661
  self._resume()
1780
2662
  return False
@@ -1784,6 +2666,9 @@ class ReactShell:
1784
2666
  if command == "cancel":
1785
2667
  self._cancel()
1786
2668
  return False
2669
+ if command == "conclude":
2670
+ self._conclude(arg)
2671
+ return False
1787
2672
  if command == "history":
1788
2673
  sub = arg.strip()
1789
2674
  if sub:
@@ -1841,12 +2726,42 @@ class ReactShell:
1841
2726
  if command in ("vars", "var"):
1842
2727
  self._handle_vars(arg)
1843
2728
  return False
1844
- if command == "log":
2729
+ if command == "whitelist":
2730
+ self._handle_whitelist(arg)
2731
+ return False
2732
+ if command == "blacklist":
2733
+ self._handle_blacklist(arg)
2734
+ return False
2735
+ if command == "logs":
1845
2736
  self._handle_log(arg)
1846
2737
  return False
1847
2738
  if command == "mouse":
1848
2739
  self._handle_mouse_toggle()
1849
2740
  return False
2741
+ if command == "agent":
2742
+ self._handle_agent(arg)
2743
+ return False
2744
+ if command == "theme":
2745
+ self._handle_theme(arg)
2746
+ return False
2747
+ if command == "files":
2748
+ self._handle_files(arg)
2749
+ return False
2750
+ if command in ("files-keep", "files_keep"):
2751
+ self._handle_files_keep(arg)
2752
+ return False
2753
+ if command == "system":
2754
+ self._handle_system(arg)
2755
+ return False
2756
+ if command == "gpu":
2757
+ self._handle_gpu(arg)
2758
+ return False
2759
+ if command == "links":
2760
+ self._handle_links(arg)
2761
+ return False
2762
+ if command == "open":
2763
+ self._handle_open(arg)
2764
+ return False
1850
2765
  if command == "copy":
1851
2766
  self._handle_copy(arg)
1852
2767
  return False
@@ -1854,19 +2769,815 @@ class ReactShell:
1854
2769
  self._handle_flow(arg)
1855
2770
  return False
1856
2771
 
1857
- self._print(_style(f"Unknown command: /{command}", _C.YELLOW, enabled=self._color))
1858
- self._print(_style("Type /help for commands.", _C.DIM, enabled=self._color))
1859
- return False
2772
+ self._print(_style(f"Unknown command: /{command}", _C.YELLOW, enabled=self._color))
2773
+ self._print(_style("Type /help for commands.", _C.DIM, enabled=self._color))
2774
+ return False
2775
+
2776
+ def _default_bundles_dir(self) -> Path:
2777
+ try:
2778
+ from abstractruntime.workflow_bundle import default_workflow_bundles_dir # type: ignore
2779
+
2780
+ return default_workflow_bundles_dir()
2781
+ except Exception:
2782
+ candidate = Path("flows") / "bundles"
2783
+ if candidate.exists() and candidate.is_dir():
2784
+ return candidate
2785
+ return Path("flows")
2786
+
2787
+ def _normalize_agent_selector_key(self, raw: str) -> str:
2788
+ s = str(raw or "").strip()
2789
+ if not s:
2790
+ return "react"
2791
+ lower = s.lower()
2792
+ if lower in ("react", "codeact", "memact"):
2793
+ return lower
2794
+
2795
+ # Support "bundle_id:flow_id" (same as the web UI). Avoid Windows drive letters.
2796
+ if ":" in s and not re.match(r"^[A-Za-z]:[\\\\/]", s):
2797
+ left, _right = s.split(":", 1)
2798
+ if left.strip():
2799
+ s = left.strip()
2800
+
2801
+ # If this is a bundle path, prefer manifest.bundle_id.
2802
+ try:
2803
+ p = Path(s).expanduser()
2804
+ if p.exists() and p.is_file() and str(p.suffix or "").lower() == ".flow":
2805
+ try:
2806
+ from abstractruntime.workflow_bundle import open_workflow_bundle # type: ignore
2807
+
2808
+ bid = str(open_workflow_bundle(p).manifest.bundle_id or "").strip()
2809
+ if bid:
2810
+ return bid
2811
+ except Exception:
2812
+ pass
2813
+ except Exception:
2814
+ pass
2815
+
2816
+ if s.lower().endswith(".flow"):
2817
+ s = s[:-5]
2818
+ if "@" in s:
2819
+ base, _ver = s.split("@", 1)
2820
+ s = base.strip()
2821
+ return s or lower
2822
+
2823
+ def _discover_bundled_agent_templates(self) -> List[_BundledAgentTemplate]:
2824
+ """Best-effort: list bundled `.flow` entrypoints implementing `abstractcode.agent.v1`."""
2825
+ try:
2826
+ from abstractruntime.workflow_bundle import open_workflow_bundle # type: ignore
2827
+ except Exception:
2828
+ return []
2829
+
2830
+ bundles_dir = self._default_bundles_dir()
2831
+ if not bundles_dir.exists() or not bundles_dir.is_dir():
2832
+ return []
2833
+
2834
+ try:
2835
+ from packaging.version import Version
2836
+ except Exception: # pragma: no cover
2837
+
2838
+ def Version(v: str) -> Any: # type: ignore[misc]
2839
+ return v
2840
+
2841
+ best_by_id: Dict[str, Tuple[Any, _BundledAgentTemplate]] = {}
2842
+ for path in sorted(bundles_dir.glob("*.flow")):
2843
+ if not path.is_file():
2844
+ continue
2845
+ try:
2846
+ bundle = open_workflow_bundle(path)
2847
+ except Exception:
2848
+ continue
2849
+ man = bundle.manifest
2850
+ bundle_id = str(getattr(man, "bundle_id", "") or "").strip()
2851
+ if not bundle_id:
2852
+ continue
2853
+ bundle_version = str(getattr(man, "bundle_version", "") or "0.0.0").strip() or "0.0.0"
2854
+
2855
+ eps = []
2856
+ for ep in getattr(man, "entrypoints", None) or []:
2857
+ interfaces = list(getattr(ep, "interfaces", None) or [])
2858
+ if "abstractcode.agent.v1" not in interfaces:
2859
+ continue
2860
+ eps.append(ep)
2861
+ if not eps:
2862
+ continue
2863
+
2864
+ default_flow_id = str(getattr(man, "default_entrypoint", "") or "").strip()
2865
+ chosen = None
2866
+ if default_flow_id:
2867
+ chosen = next((ep for ep in eps if str(getattr(ep, "flow_id", "") or "").strip() == default_flow_id), None)
2868
+ if chosen is None:
2869
+ chosen = eps[0]
2870
+
2871
+ flow_id = str(getattr(chosen, "flow_id", "") or "").strip()
2872
+ if not flow_id:
2873
+ continue
2874
+
2875
+ tpl = _BundledAgentTemplate(
2876
+ bundle_id=bundle_id,
2877
+ bundle_version=bundle_version,
2878
+ flow_id=flow_id,
2879
+ name=str(getattr(chosen, "name", "") or "").strip() or bundle_id,
2880
+ description=str(getattr(chosen, "description", "") or ""),
2881
+ )
2882
+
2883
+ try:
2884
+ vkey = Version(bundle_version)
2885
+ except Exception:
2886
+ vkey = bundle_version
2887
+
2888
+ prev = best_by_id.get(bundle_id)
2889
+ if prev is None or vkey > prev[0]:
2890
+ best_by_id[bundle_id] = (vkey, tpl)
2891
+
2892
+ out = [tpl for _v, tpl in best_by_id.values()]
2893
+ out.sort(key=lambda t: t.bundle_id)
2894
+ return out
2895
+
2896
+ def _refresh_bundled_agent_templates(self) -> None:
2897
+ self._bundled_agent_templates = self._discover_bundled_agent_templates()
2898
+ self._sync_agent_selector_to_ui()
2899
+
2900
+ def _agent_selector_items_for_ui(self) -> List[Tuple[str, str, str]]:
2901
+ items: List[Tuple[str, str, str]] = [
2902
+ ("react", "react", "builtin"),
2903
+ ("memact", "memact", "builtin"),
2904
+ ("codeact", "codeact", "builtin"),
2905
+ ]
2906
+ for tpl in list(getattr(self, "_bundled_agent_templates", None) or []):
2907
+ bid = str(getattr(tpl, "bundle_id", "") or "").strip()
2908
+ if not bid:
2909
+ continue
2910
+ flow_id = str(getattr(tpl, "flow_id", "") or "").strip()
2911
+ ver = str(getattr(tpl, "bundle_version", "") or "").strip()
2912
+ meta = flow_id
2913
+ if ver:
2914
+ meta = f"{meta} @{ver}" if meta else f"@{ver}"
2915
+ items.append((bid, bid, meta))
2916
+ return items
2917
+
2918
+ def _sync_agent_selector_to_ui(self) -> None:
2919
+ ui = getattr(self, "_ui", None)
2920
+ if ui is None:
2921
+ return
2922
+ try:
2923
+ ui.set_agent_selector(
2924
+ current_key=str(getattr(self, "_agent_selector_key", "") or "").strip(),
2925
+ items=self._agent_selector_items_for_ui(),
2926
+ )
2927
+ except Exception:
2928
+ return
2929
+
2930
+ def _set_agent(self, selector: str) -> None:
2931
+ raw = str(selector or "").strip()
2932
+ if not raw:
2933
+ return
2934
+
2935
+ lower = raw.lower()
2936
+ if lower in ("react", "codeact", "memact"):
2937
+ agent_kind = lower
2938
+ workflow_ref: Optional[str] = None
2939
+ else:
2940
+ agent_kind = raw
2941
+ workflow_ref = raw
2942
+
2943
+ # Choose toolset.
2944
+ agent_cls = None
2945
+ if workflow_ref is not None:
2946
+ tools = list(getattr(self, "_default_tools", []) or [])
2947
+ elif agent_kind == "react":
2948
+ tools = list(getattr(self, "_default_tools", []) or [])
2949
+ agent_cls = getattr(self, "_agent_classes", {}).get("react")
2950
+ elif agent_kind == "memact":
2951
+ tools = list(getattr(self, "_default_tools", []) or [])
2952
+ agent_cls = getattr(self, "_agent_classes", {}).get("memact")
2953
+ else:
2954
+ tools = list(getattr(self, "_codeact_tools", []) or [])
2955
+ agent_cls = getattr(self, "_agent_classes", {}).get("codeact")
2956
+
2957
+ if not tools:
2958
+ raise RuntimeError("No tools configured for agent selection")
2959
+
2960
+ # Rebuild tool metadata + runner.
2961
+ from abstractcore.tools import ToolDefinition
2962
+ from abstractruntime.integrations.abstractcore import MappingToolExecutor
2963
+
2964
+ self._tools = tools
2965
+ self._tool_runner = MappingToolExecutor.from_tools(self._tools)
2966
+
2967
+ tool_specs: Dict[str, _ToolSpec] = {}
2968
+ for t in self._tools:
2969
+ tool_def = getattr(t, "_tool_definition", None) or ToolDefinition.from_function(t)
2970
+ tool_specs[tool_def.name] = _ToolSpec(
2971
+ name=tool_def.name,
2972
+ description=tool_def.description,
2973
+ parameters=dict(tool_def.parameters or {}),
2974
+ )
2975
+ self._tool_specs = tool_specs
2976
+
2977
+ # Filter allowlist against the new toolset (best-effort).
2978
+ if isinstance(getattr(self, "_allowed_tools", None), list):
2979
+ self._allowed_tools = [t for t in self._allowed_tools if t in self._tool_specs]
2980
+
2981
+ # Rebuild agent instance.
2982
+ if workflow_ref is not None:
2983
+ try:
2984
+ from .workflow_agent import WorkflowAgent
2985
+ except Exception as e:
2986
+ raise RuntimeError(f"Workflow agents require AbstractFlow to be installed/importable.\n\n{e}") from e
2987
+
2988
+ self._workflow_agent_ref = workflow_ref
2989
+ self._agent_kind = raw
2990
+ self._agent = WorkflowAgent(
2991
+ runtime=self._runtime,
2992
+ flow_ref=self._workflow_agent_ref,
2993
+ tools=self._tools,
2994
+ on_step=self._on_step,
2995
+ max_iterations=self._max_iterations,
2996
+ max_tokens=self._max_tokens,
2997
+ )
2998
+ else:
2999
+ if agent_cls is None:
3000
+ raise RuntimeError(f"Unknown agent kind: {agent_kind}")
3001
+ self._workflow_agent_ref = None
3002
+ self._agent_kind = agent_kind
3003
+ self._agent = agent_cls(
3004
+ runtime=self._runtime,
3005
+ tools=self._tools,
3006
+ on_step=self._on_step,
3007
+ max_iterations=self._max_iterations,
3008
+ max_tokens=self._max_tokens,
3009
+ plan_mode=self._plan_mode,
3010
+ review_mode=self._review_mode,
3011
+ review_max_rounds=self._review_max_rounds,
3012
+ )
3013
+
3014
+ self._agent_selector_key = self._normalize_agent_selector_key(raw)
3015
+ self._status_cache_key = None
3016
+ self._status_cache_text = ""
3017
+ self._sync_agent_selector_to_ui()
3018
+
3019
+ def _handle_agent(self, raw: str) -> None:
3020
+ import shlex
3021
+
3022
+ try:
3023
+ parts = shlex.split(raw) if raw else []
3024
+ except ValueError:
3025
+ parts = raw.split() if raw else []
3026
+
3027
+ if not parts:
3028
+ cur = str(getattr(self, "_agent_selector_key", "") or "").strip() or str(getattr(self, "_agent_kind", "") or "")
3029
+ self._print(_style(f"Current agent: {cur or 'react'}", _C.DIM, enabled=self._color))
3030
+ self._print(_style("Usage:", _C.DIM, enabled=self._color))
3031
+ self._print(_style(" /agent list", _C.DIM, enabled=self._color))
3032
+ self._print(_style(" /agent <name|bundle_id|path>", _C.DIM, enabled=self._color))
3033
+ return
3034
+
3035
+ head = str(parts[0] or "").strip().lower()
3036
+ if head in ("list", "ls"):
3037
+ self._refresh_bundled_agent_templates()
3038
+ self._print(_style("\nAgents", _C.CYAN, _C.BOLD, enabled=self._color))
3039
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
3040
+ for key, label, meta in self._agent_selector_items_for_ui():
3041
+ if not self._color:
3042
+ self._print(f"- {label} {meta}".rstrip())
3043
+ continue
3044
+ try:
3045
+ t = getattr(self, "_theme", None)
3046
+ tn = t.normalized() if isinstance(t, Theme) else theme_from_env().normalized()
3047
+ name_fg = ansi_fg(tn.primary)
3048
+ meta_fg = ansi_fg(tn.secondary)
3049
+ except Exception:
3050
+ name_fg = ""
3051
+ meta_fg = ""
3052
+ reset = _C.RESET
3053
+ dim = _C.DIM
3054
+ line = f"- {name_fg}{label}{reset}"
3055
+ if meta:
3056
+ line += f" {dim}{meta_fg}{meta}{reset}"
3057
+ self._print(line)
3058
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
3059
+ return
3060
+
3061
+ if head in ("reload", "refresh"):
3062
+ self._refresh_bundled_agent_templates()
3063
+ self._print(_style("Agent templates reloaded.", _C.DIM, enabled=self._color))
3064
+ return
3065
+
3066
+ # Otherwise: treat the raw arg as a selector and switch (silent on success).
3067
+ try:
3068
+ self._set_agent(raw)
3069
+ except Exception as e:
3070
+ self._print(_style(f"Failed to set agent: {e}", _C.YELLOW, enabled=self._color))
3071
+
3072
+ def _handle_theme(self, raw: str) -> None:
3073
+ import shlex
3074
+
3075
+ try:
3076
+ parts = shlex.split(raw) if raw else []
3077
+ except ValueError:
3078
+ parts = raw.split() if raw else []
3079
+
3080
+ if not parts or (len(parts) == 1 and parts[0].strip().lower() in ("list", "ls")):
3081
+ current = getattr(self, "_theme", None)
3082
+ current_name = str(getattr(current, "name", "") or "tokyo")
3083
+ names = sorted(BUILTIN_THEMES.keys())
3084
+ self._print(_style("\nThemes", _C.CYAN, _C.BOLD, enabled=self._color))
3085
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
3086
+ self._print(f"Current: {current_name}")
3087
+ self._print("Available: " + ", ".join(names))
3088
+ self._print(_style("Usage:", _C.DIM, enabled=self._color))
3089
+ self._print(_style(" /theme <name>", _C.DIM, enabled=self._color))
3090
+ self._print(_style(" /theme custom <primary> <secondary> <surface> <muted>", _C.DIM, enabled=self._color))
3091
+ return
3092
+
3093
+ head = str(parts[0] or "").strip().lower()
3094
+ if head == "custom":
3095
+ if len(parts) != 5:
3096
+ self._print(_style("Usage: /theme custom <primary> <secondary> <surface> <muted>", _C.DIM, enabled=self._color))
3097
+ return
3098
+ t = Theme(
3099
+ name="custom",
3100
+ primary=str(parts[1]),
3101
+ secondary=str(parts[2]),
3102
+ surface=str(parts[3]),
3103
+ muted=str(parts[4]),
3104
+ ).normalized()
3105
+ else:
3106
+ t = get_theme(head) if head else None
3107
+ if t is None:
3108
+ self._print(_style(f"Unknown theme: {head}", _C.YELLOW, enabled=self._color))
3109
+ self._print(_style("Try: /theme list", _C.DIM, enabled=self._color))
3110
+ return
3111
+ t = t.normalized()
3112
+
3113
+ self._theme = t
3114
+ try:
3115
+ self._ui.set_theme(t)
3116
+ except Exception:
3117
+ pass
3118
+ # Persist user preference (durable when state_file is enabled).
3119
+ self._save_config()
3120
+ # Intentionally no chat output here: theme changes are reflected in the UI immediately
3121
+ # (footer indicator + full rerender), and /theme is considered a UI command.
3122
+
3123
+ def _handle_system(self, raw: str) -> None:
3124
+ """Show or set the system prompt override for the agent/runtime.
3125
+
3126
+ Usage:
3127
+ /system (show)
3128
+ /system <text...> (set override; applies to new runs and the active run)
3129
+ /system clear (remove override)
3130
+ """
3131
+ import uuid
3132
+
3133
+ arg = str(raw or "").strip()
3134
+
3135
+ def _apply_to_active_run(value: Optional[str]) -> None:
3136
+ run_id = self._attached_run_id()
3137
+ if run_id is None:
3138
+ return
3139
+ try:
3140
+ state = self._runtime.get_state(run_id)
3141
+ except Exception:
3142
+ state = None
3143
+ if state is None or not hasattr(state, "vars") or not isinstance(state.vars, dict):
3144
+ return
3145
+ runtime_ns = state.vars.get("_runtime")
3146
+ if not isinstance(runtime_ns, dict):
3147
+ runtime_ns = {}
3148
+ state.vars["_runtime"] = runtime_ns
3149
+ if value is None:
3150
+ runtime_ns.pop("system_prompt", None)
3151
+ else:
3152
+ runtime_ns["system_prompt"] = str(value)
3153
+ try:
3154
+ self._runtime.run_store.save(state)
3155
+ except Exception:
3156
+ pass
3157
+ # System prompt affects token estimates shown in the footer.
3158
+ self._status_cache_key = None
3159
+ self._status_cache_text = ""
3160
+
3161
+ if arg.lower() in ("clear", "reset", "off", "default", "none"):
3162
+ self._system_prompt_override = None
3163
+ self._save_config()
3164
+ _apply_to_active_run(None)
3165
+ self._print(_style("System prompt cleared (back to agent default).", _C.DIM, enabled=self._color))
3166
+ return
3167
+
3168
+ if arg:
3169
+ self._system_prompt_override = arg
3170
+ self._save_config()
3171
+ _apply_to_active_run(arg)
3172
+ self._print(_style("System prompt set.", _C.DIM, enabled=self._color))
3173
+ return
3174
+
3175
+ # Show the effective system prompt (active run if available; else session default).
3176
+ state = self._safe_get_state()
3177
+ runtime_override: Optional[str] = None
3178
+ if state is not None and hasattr(state, "vars") and isinstance(getattr(state, "vars", None), dict):
3179
+ runtime_ns = state.vars.get("_runtime") if isinstance(state.vars.get("_runtime"), dict) else {}
3180
+ raw_sys = runtime_ns.get("system_prompt") if isinstance(runtime_ns, dict) else None
3181
+ if isinstance(raw_sys, str) and raw_sys.strip():
3182
+ runtime_override = raw_sys.strip()
3183
+
3184
+ title = "System prompt"
3185
+ prompt_text = ""
3186
+ if runtime_override is not None:
3187
+ title = "System prompt (override)"
3188
+ prompt_text = runtime_override
3189
+ else:
3190
+ session_override = getattr(self, "_system_prompt_override", None)
3191
+ if state is None and isinstance(session_override, str) and session_override.strip():
3192
+ title = "System prompt (default override)"
3193
+ prompt_text = session_override.strip()
3194
+ else:
3195
+ # Best-effort: build the agent-generated system prompt (next call).
3196
+ logic = getattr(self._agent, "logic", None)
3197
+ if logic is not None:
3198
+ try:
3199
+ messages = self._select_messages_for_llm(state) if state is not None else []
3200
+ except Exception:
3201
+ messages = []
3202
+
3203
+ iteration = 1
3204
+ max_iterations = int(getattr(self, "_max_iterations", 25) or 25)
3205
+ vars_ns: Optional[Dict[str, Any]] = None
3206
+ if state is not None and hasattr(state, "vars") and isinstance(getattr(state, "vars", None), dict):
3207
+ vars_ns = state.vars
3208
+ context_ns = state.vars.get("context") if isinstance(state.vars.get("context"), dict) else {}
3209
+ limits = state.vars.get("_limits") if isinstance(state.vars.get("_limits"), dict) else {}
3210
+ try:
3211
+ cur = int(limits.get("current_iteration", 0) or 0)
3212
+ except Exception:
3213
+ cur = 0
3214
+ try:
3215
+ max_iterations = int(limits.get("max_iterations", max_iterations) or max_iterations)
3216
+ except Exception:
3217
+ max_iterations = max_iterations
3218
+ if max_iterations < 1:
3219
+ max_iterations = 1
3220
+ iteration = max(1, cur + 1)
3221
+ try:
3222
+ task_txt = ""
3223
+ if vars_ns is not None and isinstance(context_ns, dict):
3224
+ task_txt = str(context_ns.get("task") or "")
3225
+ req = logic.build_request(
3226
+ task=task_txt,
3227
+ messages=list(messages or []),
3228
+ guidance="",
3229
+ iteration=iteration,
3230
+ max_iterations=max_iterations,
3231
+ vars=vars_ns,
3232
+ )
3233
+ prompt_text = str(getattr(req, "system_prompt", "") or "").strip()
3234
+ except Exception:
3235
+ prompt_text = ""
3236
+
3237
+ if not prompt_text:
3238
+ self._print(_style("No system prompt available yet. Start a task, then run /system again.", _C.DIM, enabled=self._color))
3239
+ return
3240
+
3241
+ copy_id = f"system_{uuid.uuid4().hex}"
3242
+ self._ui.register_copy_payload(copy_id, prompt_text)
3243
+ self._print(_style(f"\n{title}", _C.CYAN, _C.BOLD, enabled=self._color))
3244
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
3245
+ self._print(prompt_text)
3246
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
3247
+ self._print(f"[[COPY:{copy_id}]]")
3248
+
3249
+ def _gpu_monitor_enabled_from_env(self) -> bool:
3250
+ """Return whether the GPU meter should run.
3251
+
3252
+ Default: OFF (opt-in), to avoid constant polling/noisy gateway logs.
3253
+
3254
+ Control via `ABSTRACTCODE_GPU_MONITOR`:
3255
+ - 0/false/off/no -> disabled
3256
+ - 1/true/on/yes -> enabled
3257
+ - auto -> enabled when a gateway URL/token is configured
3258
+ """
3259
+ v = str(os.getenv("ABSTRACTCODE_GPU_MONITOR", "") or "").strip().lower()
3260
+ if v in ("0", "false", "off", "no"):
3261
+ return False
3262
+ if v in ("1", "true", "on", "yes"):
3263
+ return True
3264
+ if v in ("auto", "detect", "default"):
3265
+ return bool(
3266
+ str(os.getenv("ABSTRACTCODE_GATEWAY_URL", "") or "").strip()
3267
+ or str(os.getenv("ABSTRACTFLOW_GATEWAY_URL", "") or "").strip()
3268
+ or str(os.getenv("ABSTRACTGATEWAY_URL", "") or "").strip()
3269
+ or str(os.getenv("ABSTRACTCODE_GATEWAY_TOKEN", "") or "").strip()
3270
+ or str(os.getenv("ABSTRACTGATEWAY_AUTH_TOKEN", "") or "").strip()
3271
+ or str(os.getenv("ABSTRACTFLOW_GATEWAY_AUTH_TOKEN", "") or "").strip()
3272
+ or str(os.getenv("ABSTRACTGATEWAY_AUTH_TOKENS", "") or "").strip()
3273
+ or str(os.getenv("ABSTRACTFLOW_GATEWAY_AUTH_TOKENS", "") or "").strip()
3274
+ )
3275
+ return False
3276
+
3277
+ def _start_gpu_monitor(self) -> None:
3278
+ """Start the GPU polling thread (best-effort)."""
3279
+ if not bool(getattr(self, "_gpu_monitor_enabled", False)):
3280
+ return
3281
+ try:
3282
+ lock = getattr(self, "_gpu_monitor_lock", None)
3283
+ if lock is None:
3284
+ lock = threading.Lock()
3285
+ self._gpu_monitor_lock = lock
3286
+ except Exception:
3287
+ lock = None
3288
+
3289
+ if lock is None: # pragma: no cover
3290
+ return
3291
+
3292
+ with lock:
3293
+ t = getattr(self, "_gpu_monitor_thread", None)
3294
+ if t is not None and getattr(t, "is_alive", lambda: False)():
3295
+ return
3296
+ self._gpu_monitor_thread = threading.Thread(target=self._gpu_monitor_loop, daemon=True)
3297
+ self._gpu_monitor_thread.start()
3298
+
3299
+ def _fetch_gateway_gpu_utilization_pct(self, *, timeout_s: float = 0.8) -> tuple[Optional[float], Optional[str]]:
3300
+ """Fetch GPU utilization from AbstractGateway (best-effort)."""
3301
+ try:
3302
+ from urllib.error import HTTPError
3303
+ from urllib.request import Request, urlopen
3304
+ except Exception: # pragma: no cover
3305
+ return None, "urllib_unavailable"
3306
+
3307
+ try:
3308
+ from .gateway_cli import default_gateway_token, default_gateway_url
3309
+ except Exception:
3310
+ return None, "gateway_cli_unavailable"
3311
+
3312
+ base = str(default_gateway_url() or "").rstrip("/")
3313
+ if not base:
3314
+ return None, "gateway_url_missing"
3315
+ url = f"{base}/api/gateway/host/metrics/gpu"
3316
+ headers = {"Accept": "application/json"}
3317
+ token = default_gateway_token()
3318
+ if token:
3319
+ headers["Authorization"] = f"Bearer {token}"
3320
+
3321
+ req = Request(url=url, headers=headers, method="GET")
3322
+ raw = ""
3323
+ try:
3324
+ with urlopen(req, timeout=float(max(0.1, timeout_s))) as resp:
3325
+ raw = resp.read().decode("utf-8")
3326
+ except HTTPError as e:
3327
+ detail = ""
3328
+ try:
3329
+ detail = (e.read().decode("utf-8") or "").strip()
3330
+ except Exception:
3331
+ detail = ""
3332
+ msg = f"http_{e.code}"
3333
+ if detail:
3334
+ msg = f"{msg}: {detail}"
3335
+ return None, msg
3336
+ except Exception as e:
3337
+ return None, f"network_error: {e}"
3338
+
3339
+ try:
3340
+ payload = json.loads(raw) if raw else {}
3341
+ except Exception:
3342
+ payload = {}
3343
+
3344
+ if not isinstance(payload, dict):
3345
+ return None, "invalid_json"
3346
+ if payload.get("supported") is False:
3347
+ reason = payload.get("reason")
3348
+ reason_s = str(reason).strip() if isinstance(reason, str) else ""
3349
+ return None, reason_s or "unsupported"
3350
+
3351
+ def _as_float(x: object) -> Optional[float]:
3352
+ try:
3353
+ v = float(x) # type: ignore[arg-type]
3354
+ except Exception:
3355
+ return None
3356
+ if v != v: # NaN
3357
+ return None
3358
+ return v
3359
+
3360
+ direct = _as_float(payload.get("utilization_gpu_pct"))
3361
+ if direct is not None:
3362
+ return max(0.0, min(100.0, direct)), None
3363
+
3364
+ gpus = payload.get("gpus")
3365
+ if isinstance(gpus, list) and gpus:
3366
+ vals: List[float] = []
3367
+ for g in gpus:
3368
+ if not isinstance(g, dict):
3369
+ continue
3370
+ v = _as_float(g.get("utilization_gpu_pct"))
3371
+ if v is None:
3372
+ continue
3373
+ vals.append(v)
3374
+ if vals:
3375
+ avg = sum(vals) / max(1, len(vals))
3376
+ return max(0.0, min(100.0, avg)), None
3377
+
3378
+ return None, "missing_utilization"
3379
+
3380
+ def _gpu_monitor_loop(self) -> None:
3381
+ failures = 0
3382
+ last_emitted: Optional[int] = None
3383
+ last_err: Optional[str] = None
3384
+ poll_s = 2.0
3385
+
3386
+ while True:
3387
+ ui = getattr(self, "_ui", None)
3388
+ if ui is not None and bool(getattr(ui, "_shutdown", False)):
3389
+ return
3390
+ if not bool(getattr(self, "_gpu_monitor_enabled", False)):
3391
+ time.sleep(0.5)
3392
+ continue
3393
+
3394
+ pct, err = self._fetch_gateway_gpu_utilization_pct(timeout_s=0.8)
3395
+ updated = False
3396
+ if pct is None:
3397
+ failures += 1
3398
+ err_s = str(err or "unavailable")
3399
+ self._gpu_last_error = err_s
3400
+ if err_s != (last_err or ""):
3401
+ last_err = err_s
3402
+ updated = True
3403
+ if getattr(self, "_gpu_utilization_pct", None) is not None:
3404
+ self._gpu_utilization_pct = None
3405
+ last_emitted = None
3406
+ updated = True
3407
+ else:
3408
+ failures = 0
3409
+ self._gpu_last_error = None
3410
+ if last_err is not None:
3411
+ last_err = None
3412
+ updated = True
3413
+ self._gpu_last_ok_at = time.time()
3414
+ pct_int = int(round(float(pct)))
3415
+ if last_emitted is None or pct_int != last_emitted:
3416
+ self._gpu_utilization_pct = float(pct)
3417
+ last_emitted = pct_int
3418
+ updated = True
3419
+
3420
+ if updated:
3421
+ try:
3422
+ self._ui.request_refresh()
3423
+ except Exception:
3424
+ pass
3425
+
3426
+ sleep_s = poll_s if failures < 3 else min(10.0, poll_s * float(failures))
3427
+ time.sleep(float(sleep_s))
3428
+
3429
+ def _format_gpu_meter(self, pct: float) -> str:
3430
+ p = max(0.0, min(100.0, float(pct)))
3431
+ bar_len = 10
3432
+ filled = int(round((p / 100.0) * bar_len))
3433
+ filled = max(0, min(bar_len, filled))
3434
+ bar = ("█" * filled) + ("░" * (bar_len - filled))
3435
+ return f"GPU {bar} {p:.0f}%"
3436
+
3437
+ def _handle_gpu(self, raw: str) -> None:
3438
+ arg = str(raw or "").strip().lower()
3439
+ if not arg or arg in ("status", "show"):
3440
+ enabled = bool(getattr(self, "_gpu_monitor_enabled", False))
3441
+ pct = getattr(self, "_gpu_utilization_pct", None)
3442
+ meter = self._format_gpu_meter(float(pct)) if isinstance(pct, (int, float)) else "GPU n/a"
3443
+ err = getattr(self, "_gpu_last_error", None)
3444
+ err_s = str(err or "").strip() if err is not None else ""
3445
+ ok_at = getattr(self, "_gpu_last_ok_at", None)
3446
+ age_s = ""
3447
+ if isinstance(ok_at, (int, float)) and ok_at > 0:
3448
+ try:
3449
+ age_s = f"{max(0, int(time.time() - float(ok_at)))}s ago"
3450
+ except Exception:
3451
+ age_s = ""
3452
+
3453
+ try:
3454
+ from .gateway_cli import default_gateway_token, default_gateway_url
3455
+
3456
+ gw_url = str(default_gateway_url() or "")
3457
+ has_token = bool(default_gateway_token())
3458
+ except Exception:
3459
+ gw_url = ""
3460
+ has_token = False
3461
+
3462
+ line = f"GPU meter: {'ON' if enabled else 'OFF'} ({meter})"
3463
+ if age_s:
3464
+ line = f"{line} (last ok: {age_s})"
3465
+ self._print(_style(line, _C.DIM, enabled=self._color))
3466
+ if gw_url:
3467
+ self._print(_style(f"Gateway: {gw_url} token: {'set' if has_token else 'missing'}", _C.DIM, enabled=self._color))
3468
+ if not has_token:
3469
+ self._print(
3470
+ _style(
3471
+ "Tip: set a gateway token with --gateway-token or $ABSTRACTGATEWAY_AUTH_TOKEN.",
3472
+ _C.DIM,
3473
+ enabled=self._color,
3474
+ )
3475
+ )
3476
+ if err_s:
3477
+ self._print(_style(f"Last error: {err_s}", _C.DIM, enabled=self._color))
3478
+ if not enabled:
3479
+ self._print(_style("Enable with: /gpu on", _C.DIM, enabled=self._color))
3480
+ return
3481
+ if arg in ("on", "enable", "enabled", "1", "true"):
3482
+ self._gpu_monitor_enabled = True
3483
+ self._start_gpu_monitor()
3484
+ try:
3485
+ self._ui.request_refresh()
3486
+ except Exception:
3487
+ pass
3488
+ self._print(_style("GPU meter: ON", _C.DIM, enabled=self._color))
3489
+ return
3490
+ if arg in ("off", "disable", "disabled", "0", "false"):
3491
+ self._gpu_monitor_enabled = False
3492
+ self._gpu_utilization_pct = None
3493
+ self._gpu_last_error = None
3494
+ try:
3495
+ self._ui.request_refresh()
3496
+ except Exception:
3497
+ pass
3498
+ self._print(_style("GPU meter: OFF", _C.DIM, enabled=self._color))
3499
+ return
3500
+ self._print(_style("Usage: /gpu [status|on|off]", _C.DIM, enabled=self._color))
3501
+
3502
+ _LINK_RE_MD = re.compile(r"\[[^\]]+\]\((?P<url>[^)\s]+)")
3503
+ _LINK_RE_URL = re.compile(r"(?P<url>https?://[^\s<>()\]]+)")
3504
+
3505
+ def _extract_links(self, text: str) -> List[str]:
3506
+ """Extract URLs (best-effort) from markdown/plain text.
3507
+
3508
+ Returns unique URLs preserving first-seen order.
3509
+ """
3510
+ s = str(text or "")
3511
+ candidates: List[str] = []
3512
+ candidates.extend([m.group("url") for m in self._LINK_RE_MD.finditer(s) if m.group("url")])
3513
+ candidates.extend([m.group("url") for m in self._LINK_RE_URL.finditer(s) if m.group("url")])
3514
+
3515
+ out: List[str] = []
3516
+ seen: set[str] = set()
3517
+ for u in candidates:
3518
+ url = str(u or "").strip()
3519
+ while url and url[-1] in ".,;:)]}":
3520
+ url = url[:-1]
3521
+ if not url.startswith(("http://", "https://")):
3522
+ continue
3523
+ if url in seen:
3524
+ continue
3525
+ seen.add(url)
3526
+ out.append(url)
3527
+ return out
3528
+
3529
+ def _open_url(self, url: str) -> bool:
3530
+ u = str(url or "").strip()
3531
+ if not u.startswith(("http://", "https://")):
3532
+ return False
3533
+ try:
3534
+ import webbrowser
3535
+
3536
+ threading.Thread(target=webbrowser.open, args=(u,), kwargs={"new": 2}, daemon=True).start()
3537
+ return True
3538
+ except Exception:
3539
+ return False
3540
+
3541
+ def _handle_links(self, raw: str) -> None:
3542
+ _ = raw
3543
+ links = list(getattr(self, "_last_answer_links", []) or [])
3544
+ if not links:
3545
+ self._print(_style("No links captured from the last answer.", _C.DIM, enabled=self._color))
3546
+ return
3547
+ self._print(_style("\nLinks (last answer)", _C.CYAN, _C.BOLD, enabled=self._color))
3548
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
3549
+ for i, u in enumerate(links, start=1):
3550
+ self._print(f"{i}. {u}")
3551
+
3552
+ def _handle_open(self, raw: str) -> None:
3553
+ arg = str(raw or "").strip()
3554
+ if not arg:
3555
+ self._print(_style("Usage: /open <N|URL>", _C.DIM, enabled=self._color))
3556
+ return
3557
+ if arg.isdigit():
3558
+ idx = int(arg)
3559
+ links = list(getattr(self, "_last_answer_links", []) or [])
3560
+ if idx < 1 or idx > len(links):
3561
+ self._print(_style(f"Invalid link index: {idx}", _C.YELLOW, enabled=self._color))
3562
+ self._print(_style("Try: /links", _C.DIM, enabled=self._color))
3563
+ return
3564
+ url = links[idx - 1]
3565
+ else:
3566
+ url = arg
3567
+
3568
+ ok = self._open_url(url)
3569
+ if not ok:
3570
+ self._print(_style(f"Failed to open: {url}", _C.YELLOW, enabled=self._color))
1860
3571
 
1861
3572
  def _handle_log(self, raw: str) -> None:
1862
3573
  """Show durable logs for the current run.
1863
3574
 
1864
- `/log runtime` is the AbstractRuntime-centric view (step trace, payloads).
1865
- `/log provider` is the provider wire view (request sent + response received).
3575
+ `/logs runtime` is the AbstractRuntime-centric view (step trace, payloads).
3576
+ `/logs provider` is the provider wire view (request sent + response received).
1866
3577
 
1867
3578
  Usage:
1868
- /log runtime [copy] [--last] [--json-only] [--save <path>]
1869
- /log provider [copy] [--last|--all] [--json-only] [--save <path>]
3579
+ /logs runtime [copy] [--last] [--json-only] [--save <path>]
3580
+ /logs provider [copy] [--last|--all] [--json-only] [--save <path>]
1870
3581
  """
1871
3582
  import shlex
1872
3583
 
@@ -1877,8 +3588,8 @@ class ReactShell:
1877
3588
 
1878
3589
  usage = (
1879
3590
  "Usage:\n"
1880
- " /log runtime [copy] [--last] [--json-only] [--save <path>]\n"
1881
- " /log provider [copy] [--last|--all] [--json-only] [--save <path>]\n"
3591
+ " /logs runtime [copy] [--last] [--json-only] [--save <path>]\n"
3592
+ " /logs provider [copy] [--last|--all] [--json-only] [--save <path>]\n"
1882
3593
  )
1883
3594
  if not parts:
1884
3595
  self._print(_style(usage, _C.DIM, enabled=self._color))
@@ -1895,7 +3606,7 @@ class ReactShell:
1895
3606
  self._handle_log_provider(rest_raw)
1896
3607
  return
1897
3608
 
1898
- self._print(_style(f"Unknown /log kind: {kind}", _C.YELLOW, enabled=self._color))
3609
+ self._print(_style(f"Unknown /logs kind: {kind}", _C.YELLOW, enabled=self._color))
1899
3610
  self._print(_style(usage, _C.DIM, enabled=self._color))
1900
3611
 
1901
3612
  def _append_to_active_context(self, *, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
@@ -2304,6 +4015,122 @@ class ReactShell:
2304
4015
  self._print(_style(f"Review mode set to {status} (max_rounds={self._review_max_rounds}).", _C.DIM, enabled=self._color))
2305
4016
  self._save_config()
2306
4017
 
4018
+ def _handle_config(self, raw: str) -> None:
4019
+ """Configure durable runtime options.
4020
+
4021
+ Usage:
4022
+ /config
4023
+ /config check-plan
4024
+ /config check-plan on|off
4025
+ """
4026
+ text = str(raw or "").strip()
4027
+ if not text:
4028
+ status = "on" if bool(getattr(self, "_check_plan", False)) else "off"
4029
+ self._print(_style("\nConfig", _C.CYAN, _C.BOLD, enabled=self._color))
4030
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
4031
+ self._print(f"- check-plan: {status}")
4032
+ self._print(_style("Usage: /config check-plan on|off", _C.DIM, enabled=self._color))
4033
+ return
4034
+
4035
+ parts = text.split()
4036
+ key = str(parts[0] or "").strip().lower()
4037
+ if key in ("check-plan", "check_plan", "checkplan"):
4038
+ if len(parts) < 2:
4039
+ status = "on" if bool(getattr(self, "_check_plan", False)) else "off"
4040
+ self._print(_style(f"check-plan: {status}", _C.DIM, enabled=self._color))
4041
+ return
4042
+
4043
+ value = str(parts[1] or "").strip().lower()
4044
+ if value in ("toggle",):
4045
+ enabled = not bool(getattr(self, "_check_plan", False))
4046
+ elif value in ("on", "true", "1", "yes", "y", "enabled"):
4047
+ enabled = True
4048
+ elif value in ("off", "false", "0", "no", "n", "disabled"):
4049
+ enabled = False
4050
+ else:
4051
+ self._print(_style("Usage: /config check-plan on|off", _C.DIM, enabled=self._color))
4052
+ return
4053
+
4054
+ self._check_plan = bool(enabled)
4055
+ self._save_config()
4056
+
4057
+ rid = self._attached_run_id()
4058
+ if rid is not None:
4059
+ self._sync_tool_prompt_settings_to_run(rid)
4060
+
4061
+ # Invalidate footer cache (small but keeps status reactive).
4062
+ self._status_cache_key = None
4063
+ self._status_cache_text = ""
4064
+
4065
+ status = "on" if self._check_plan else "off"
4066
+ self._print(_style(f"Config set: check-plan={status}", _C.DIM, enabled=self._color))
4067
+ return
4068
+
4069
+ self._print(_style(f"Unknown config key: {key}", _C.YELLOW, enabled=self._color))
4070
+ self._print(_style("Usage: /config check-plan on|off", _C.DIM, enabled=self._color))
4071
+
4072
+ def _handle_files(self, raw: str) -> None:
4073
+ """List pending `@file` attachment chips (files that will be sent with the next prompt)."""
4074
+ if str(raw or "").strip():
4075
+ self._print(_style("Usage: /files", _C.DIM, enabled=self._color))
4076
+ return
4077
+
4078
+ attachments: List[str] = []
4079
+ try:
4080
+ getter = getattr(self._ui, "get_composer_state", None)
4081
+ if callable(getter):
4082
+ state = getter()
4083
+ if isinstance(state, dict) and isinstance(state.get("attachments"), list):
4084
+ attachments = [str(x) for x in state["attachments"] if isinstance(x, str) and x.strip()]
4085
+ except Exception:
4086
+ attachments = []
4087
+
4088
+ keep_status = "on" if bool(getattr(self, "_files_keep", False)) else "off"
4089
+ self._print(_style("\nFiles", _C.CYAN, _C.BOLD, enabled=self._color))
4090
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
4091
+ self._print(_style(f"files-keep: {keep_status}", _C.DIM, enabled=self._color))
4092
+ if not attachments:
4093
+ self._print(_style("(no pending files)", _C.DIM, enabled=self._color))
4094
+ return
4095
+ for rel in attachments:
4096
+ self._print(f"- {rel}")
4097
+ self._print(_style("Tip: click a chip ‘×’ (or Backspace) to remove.", _C.DIM, enabled=self._color))
4098
+
4099
+ def _handle_files_keep(self, raw: str) -> None:
4100
+ """Toggle whether `@file` chips persist across turns."""
4101
+ value = str(raw or "").strip().lower()
4102
+ if not value:
4103
+ status = "ON" if bool(getattr(self, "_files_keep", False)) else "OFF"
4104
+ self._print(_style(f"Files keep: {status}", _C.DIM, enabled=self._color))
4105
+ return
4106
+
4107
+ if value in ("toggle",):
4108
+ enabled = not bool(getattr(self, "_files_keep", False))
4109
+ elif value in ("on", "true", "1", "yes", "y", "enabled"):
4110
+ enabled = True
4111
+ elif value in ("off", "false", "0", "no", "n", "disabled"):
4112
+ enabled = False
4113
+ else:
4114
+ self._print(_style("Usage: /files-keep [on|off]", _C.DIM, enabled=self._color))
4115
+ return
4116
+
4117
+ self._files_keep = bool(enabled)
4118
+ self._save_config()
4119
+
4120
+ try:
4121
+ setter = getattr(self._ui, "set_files_keep", None)
4122
+ if callable(setter):
4123
+ setter(bool(self._files_keep))
4124
+ except Exception:
4125
+ pass
4126
+
4127
+ # Invalidate footer cache (token estimate includes attachments).
4128
+ self._status_cache_key = None
4129
+ self._status_cache_text = ""
4130
+
4131
+ status = "ON" if self._files_keep else "OFF"
4132
+ self._print(_style(f"Files keep set to {status}.", _C.DIM, enabled=self._color))
4133
+
2307
4134
  def _handle_max_tokens(self, raw: str) -> None:
2308
4135
  """Show or set max tokens for context."""
2309
4136
  value = raw.strip()
@@ -2401,6 +4228,16 @@ class ReactShell:
2401
4228
  self._auto_approve = config["auto_approve"]
2402
4229
  if "plan_mode" in config:
2403
4230
  self._plan_mode = bool(config["plan_mode"])
4231
+ if "check_plan" in config or "check-plan" in config:
4232
+ raw = config.get("check_plan")
4233
+ if raw is None:
4234
+ raw = config.get("check-plan")
4235
+ self._check_plan = bool(raw)
4236
+ if "files_keep" in config or "files-keep" in config:
4237
+ raw = config.get("files_keep")
4238
+ if raw is None:
4239
+ raw = config.get("files-keep")
4240
+ self._files_keep = bool(raw)
2404
4241
  if "review_mode" in config:
2405
4242
  self._review_mode = bool(config["review_mode"])
2406
4243
  if "review_max_rounds" in config:
@@ -2424,6 +4261,43 @@ class ReactShell:
2424
4261
  self._tool_executor_server_id = None
2425
4262
  elif isinstance(raw_exec, str) and raw_exec.strip():
2426
4263
  self._tool_executor_server_id = raw_exec.strip()
4264
+ if "theme" in config:
4265
+ raw_theme = config.get("theme")
4266
+ try:
4267
+ t: Optional[Theme] = None
4268
+ if isinstance(raw_theme, str) and raw_theme.strip():
4269
+ t = get_theme(raw_theme.strip())
4270
+ elif isinstance(raw_theme, dict):
4271
+ name = str(raw_theme.get("name") or raw_theme.get("id") or raw_theme.get("theme") or "").strip()
4272
+ colors = raw_theme.get("colors") if isinstance(raw_theme.get("colors"), dict) else raw_theme
4273
+ primary = colors.get("primary") if isinstance(colors, dict) else None
4274
+ secondary = colors.get("secondary") if isinstance(colors, dict) else None
4275
+ surface = colors.get("surface") if isinstance(colors, dict) else None
4276
+ muted = colors.get("muted") if isinstance(colors, dict) else None
4277
+ if any(isinstance(x, str) and str(x).strip() for x in (primary, secondary, surface, muted)):
4278
+ t = Theme(
4279
+ name=name or "custom",
4280
+ primary=str(primary or ""),
4281
+ secondary=str(secondary or ""),
4282
+ surface=str(surface or ""),
4283
+ muted=str(muted or ""),
4284
+ ).normalized()
4285
+ elif name:
4286
+ t = get_theme(name)
4287
+ if isinstance(t, Theme):
4288
+ self._theme = t.normalized()
4289
+ except Exception:
4290
+ pass
4291
+ if "system_prompt" in config or "system" in config:
4292
+ raw_sys = config.get("system_prompt")
4293
+ if raw_sys is None:
4294
+ raw_sys = config.get("system")
4295
+ if raw_sys is None:
4296
+ self._system_prompt_override = None
4297
+ elif isinstance(raw_sys, str) and raw_sys.strip():
4298
+ self._system_prompt_override = raw_sys.strip()
4299
+ else:
4300
+ self._system_prompt_override = None
2427
4301
  if "mcp_servers" in config:
2428
4302
  raw = config.get("mcp_servers")
2429
4303
  if isinstance(raw, dict):
@@ -2503,18 +4377,48 @@ class ReactShell:
2503
4377
 
2504
4378
  config = dict(existing)
2505
4379
  mcp_servers = getattr(self, "_mcp_servers", None)
4380
+ theme_payload: Any = None
4381
+ try:
4382
+ t = getattr(self, "_theme", None)
4383
+ if isinstance(t, Theme):
4384
+ tn = t.normalized()
4385
+ key = str(tn.name or "").strip().lower()
4386
+ base = BUILTIN_THEMES.get(key) if key else None
4387
+ if isinstance(base, Theme) and base.normalized() == tn:
4388
+ theme_payload = base.name
4389
+ else:
4390
+ theme_payload = {
4391
+ "name": tn.name,
4392
+ "colors": {
4393
+ "primary": tn.primary,
4394
+ "secondary": tn.secondary,
4395
+ "surface": tn.surface,
4396
+ "muted": tn.muted,
4397
+ },
4398
+ }
4399
+ except Exception:
4400
+ theme_payload = None
4401
+ sys_override = getattr(self, "_system_prompt_override", None)
2506
4402
  config.update(
2507
4403
  {
2508
4404
  "max_tokens": self._max_tokens,
2509
4405
  "max_history_messages": getattr(self, "_max_history_messages", -1),
2510
4406
  "auto_approve": self._auto_approve,
2511
4407
  "plan_mode": self._plan_mode,
4408
+ "check_plan": bool(getattr(self, "_check_plan", False)),
4409
+ "files_keep": bool(getattr(self, "_files_keep", False)),
2512
4410
  "review_mode": self._review_mode,
2513
4411
  "review_max_rounds": self._review_max_rounds,
2514
4412
  "allowed_tools": self._allowed_tools,
2515
4413
  "tool_prompt_examples": bool(self._tool_prompt_examples),
2516
4414
  "tool_executor": getattr(self, "_tool_executor_server_id", None),
2517
4415
  "mcp_servers": dict(mcp_servers or {}) if isinstance(mcp_servers, dict) else {},
4416
+ "theme": theme_payload,
4417
+ "system_prompt": (
4418
+ str(sys_override).strip()
4419
+ if isinstance(sys_override, str) and str(sys_override).strip()
4420
+ else None
4421
+ ),
2518
4422
  }
2519
4423
  )
2520
4424
  self._config_file.write_text(json.dumps(config, indent=2))
@@ -4069,12 +5973,238 @@ class ReactShell:
4069
5973
  self._print(_style("─" * 60, _C.DIM, enabled=self._color))
4070
5974
  self._print(json.dumps(out, ensure_ascii=False, indent=2, sort_keys=True, default=str))
4071
5975
 
5976
+ def _sync_workspace_policy_to_ui(self) -> None:
5977
+ """Best-effort: keep UI `@file` completion aligned with shell workspace policy."""
5978
+ try:
5979
+ setter = getattr(self._ui, "set_workspace_policy", None)
5980
+ if callable(setter):
5981
+ setter(
5982
+ workspace_root=self._workspace_root,
5983
+ mounts=dict(self._workspace_mounts or {}),
5984
+ blocked_paths=list(self._workspace_blocked_paths or []),
5985
+ )
5986
+ except Exception:
5987
+ pass
5988
+
5989
+ def _handle_whitelist(self, raw: str) -> None:
5990
+ """Whitelist directories for this session as named workspace mounts.
5991
+
5992
+ Usage:
5993
+ /whitelist
5994
+ /whitelist <dir...>
5995
+ /whitelist name=/abs/dir [name2=/abs/dir2 ...]
5996
+
5997
+ Notes:
5998
+ - Mounts are referenced in prompts as: `@<mount>/<path/to/file>`
5999
+ - Blacklist has priority over whitelist.
6000
+ """
6001
+ import re
6002
+ import shlex
6003
+
6004
+ try:
6005
+ parts = shlex.split(raw) if raw else []
6006
+ except ValueError:
6007
+ parts = raw.split() if raw else []
6008
+
6009
+ if not parts:
6010
+ mounts = dict(self._workspace_mounts or {})
6011
+ if not mounts:
6012
+ self._print(_style("No mounts configured. Use /whitelist <dir...>.", _C.DIM, enabled=self._color))
6013
+ return
6014
+ self._print(_style("\nWorkspace mounts", _C.CYAN, _C.BOLD, enabled=self._color))
6015
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
6016
+ for name, root in sorted(mounts.items()):
6017
+ self._print(f"- {name}={root}")
6018
+ return
6019
+
6020
+ def _sanitize_mount_name(name: str) -> str:
6021
+ s = re.sub(r"[^a-zA-Z0-9_-]+", "_", str(name or "").strip())
6022
+ s = s.strip("_-")
6023
+ if not s:
6024
+ s = "mount"
6025
+ return s[:32]
6026
+
6027
+ existing = dict(self._workspace_mounts or {})
6028
+ added: list[tuple[str, Path]] = []
6029
+
6030
+ for tok in parts:
6031
+ name: Optional[str] = None
6032
+ path_text = str(tok or "").strip()
6033
+ if not path_text:
6034
+ continue
6035
+ if "=" in path_text:
6036
+ a, b = path_text.split("=", 1)
6037
+ cand_name = a.strip()
6038
+ cand_path = b.strip()
6039
+ if cand_name and re.match(r"^[a-zA-Z0-9_-]{1,32}$", cand_name):
6040
+ name = cand_name
6041
+ path_text = cand_path
6042
+
6043
+ if not path_text:
6044
+ continue
6045
+
6046
+ p = Path(path_text).expanduser()
6047
+ try:
6048
+ resolved = p.resolve() if p.is_absolute() else (self._workspace_root / p).resolve()
6049
+ except Exception:
6050
+ self._print(_style(f"Invalid path: {path_text}", _C.YELLOW, enabled=self._color))
6051
+ continue
6052
+ try:
6053
+ if not resolved.exists() or not resolved.is_dir():
6054
+ self._print(_style(f"Not a directory: {resolved}", _C.YELLOW, enabled=self._color))
6055
+ continue
6056
+ except Exception:
6057
+ self._print(_style(f"Directory not accessible: {resolved}", _C.YELLOW, enabled=self._color))
6058
+ continue
6059
+
6060
+ # Whitelisting the workspace root is redundant (it's already accessible), and the
6061
+ # default auto-name can shadow a real top-level folder (breaking @file resolution).
6062
+ try:
6063
+ if name is None and resolved.resolve() == self._workspace_root.resolve():
6064
+ self._print(_style(f"Workspace root already accessible: {resolved}", _C.DIM, enabled=self._color))
6065
+ continue
6066
+ except Exception:
6067
+ pass
6068
+
6069
+ mount = _sanitize_mount_name(name or resolved.name or "mount")
6070
+
6071
+ # Avoid mounts that shadow an existing workspace-relative path segment.
6072
+ # Example: workspace has `mnemosyne/` and user mounts `/.../workspace` with mount name `mnemosyne`,
6073
+ # then `@mnemosyne/...` resolves against the mount instead of the workspace folder.
6074
+ try:
6075
+ candidate = (self._workspace_root / mount)
6076
+ if candidate.exists():
6077
+ try:
6078
+ if candidate.resolve() != resolved.resolve():
6079
+ mount = _sanitize_mount_name(f"{mount}_mount")
6080
+ except Exception:
6081
+ mount = _sanitize_mount_name(f"{mount}_mount")
6082
+ except Exception:
6083
+ pass
6084
+
6085
+ base = mount
6086
+ i = 2
6087
+ while mount in existing and existing.get(mount) != resolved:
6088
+ suffix = f"_{i}"
6089
+ mount = (base[: max(1, 32 - len(suffix))] + suffix)[:32]
6090
+ i += 1
6091
+
6092
+ existing[mount] = resolved
6093
+ try:
6094
+ self._workspace_mount_ignores[mount] = self._AbstractIgnore.for_path(resolved)
6095
+ except Exception:
6096
+ self._workspace_mount_ignores[mount] = None
6097
+ added.append((mount, resolved))
6098
+
6099
+ self._workspace_mounts = existing
6100
+ self._sync_workspace_policy_to_ui()
6101
+
6102
+ rid = self._attached_run_id()
6103
+ if rid is not None:
6104
+ self._sync_tool_prompt_settings_to_run(rid)
6105
+
6106
+ if not added:
6107
+ return
6108
+ self._print(_style("Whitelisted:", _C.DIM, enabled=self._color))
6109
+ for mount, root in added:
6110
+ self._print(_style(f" {mount}={root}", _C.DIM, enabled=self._color))
6111
+
6112
+ def _handle_blacklist(self, raw: str) -> None:
6113
+ """Blacklist folders/files for this session (overrides whitelist)."""
6114
+ import shlex
6115
+
6116
+ try:
6117
+ parts = shlex.split(raw) if raw else []
6118
+ except ValueError:
6119
+ parts = raw.split() if raw else []
6120
+
6121
+ if not parts:
6122
+ blocked = list(self._workspace_blocked_paths or [])
6123
+ if not blocked:
6124
+ self._print(_style("No blacklist entries.", _C.DIM, enabled=self._color))
6125
+ return
6126
+ self._print(_style("\nWorkspace blacklist", _C.CYAN, _C.BOLD, enabled=self._color))
6127
+ self._print(_style("─" * 60, _C.DIM, enabled=self._color))
6128
+ for p in blocked:
6129
+ self._print(f"- {p}")
6130
+ return
6131
+
6132
+ head = str(parts[0] or "").strip().lower()
6133
+ if head in ("reset", "clear"):
6134
+ self._workspace_blocked_paths = []
6135
+ self._sync_workspace_policy_to_ui()
6136
+ rid = self._attached_run_id()
6137
+ if rid is not None:
6138
+ self._sync_tool_prompt_settings_to_run(rid)
6139
+ self._print(_style("Blacklist cleared.", _C.DIM, enabled=self._color))
6140
+ return
6141
+
6142
+ blocked = list(self._workspace_blocked_paths or [])
6143
+ added: list[Path] = []
6144
+ for tok in parts:
6145
+ text = str(tok or "").strip()
6146
+ if not text:
6147
+ continue
6148
+ p = Path(text).expanduser()
6149
+ if p.is_absolute():
6150
+ try:
6151
+ resolved = p.resolve()
6152
+ except Exception:
6153
+ self._print(_style(f"Invalid path: {text}", _C.YELLOW, enabled=self._color))
6154
+ continue
6155
+ else:
6156
+ norm = normalize_relative_path(text)
6157
+ if not norm:
6158
+ self._print(_style(f"Invalid path: {text}", _C.YELLOW, enabled=self._color))
6159
+ continue
6160
+ try:
6161
+ resolved, _virt, _mount, _root = resolve_workspace_path(
6162
+ raw_path=norm,
6163
+ workspace_root=self._workspace_root,
6164
+ mounts=dict(self._workspace_mounts or {}),
6165
+ )
6166
+ except Exception:
6167
+ self._print(_style(f"Invalid path: {text}", _C.YELLOW, enabled=self._color))
6168
+ continue
6169
+
6170
+ try:
6171
+ resolved = resolved.resolve()
6172
+ except Exception:
6173
+ pass
6174
+
6175
+ # De-dup
6176
+ already = False
6177
+ for cur in blocked:
6178
+ try:
6179
+ if resolved == cur.resolve():
6180
+ already = True
6181
+ break
6182
+ except Exception:
6183
+ continue
6184
+ if already:
6185
+ continue
6186
+ blocked.append(resolved)
6187
+ added.append(resolved)
6188
+
6189
+ self._workspace_blocked_paths = blocked
6190
+ self._sync_workspace_policy_to_ui()
6191
+
6192
+ rid = self._attached_run_id()
6193
+ if rid is not None:
6194
+ self._sync_tool_prompt_settings_to_run(rid)
6195
+
6196
+ if not added:
6197
+ return
6198
+ self._print(_style("Blacklisted:", _C.DIM, enabled=self._color))
6199
+ for p in added:
6200
+ self._print(_style(f" {p}", _C.DIM, enabled=self._color))
6201
+
4072
6202
  def _handle_context(self, raw: str) -> None:
4073
6203
  """(Deprecated) Legacy context preview command.
4074
6204
 
4075
6205
  The public `/context` and `/llm` commands were removed in favor of:
4076
- - `/log runtime`
4077
- - `/log provider`
6206
+ - `/logs runtime`
6207
+ - `/logs provider`
4078
6208
  """
4079
6209
  import copy
4080
6210
  import shlex
@@ -4092,7 +6222,7 @@ class ReactShell:
4092
6222
  return
4093
6223
 
4094
6224
  copy_to_clipboard = False
4095
- # Accept `copy` as either a leading or trailing token (UX: "/log runtime ... copy").
6225
+ # Accept `copy` as either a leading or trailing token (UX: "/logs runtime ... copy").
4096
6226
  if parts and str(parts[0] or "").strip().lower() == "copy":
4097
6227
  copy_to_clipboard = True
4098
6228
  parts = parts[1:]
@@ -4112,7 +6242,7 @@ class ReactShell:
4112
6242
  self._print(_style(f"Unknown flag: {p}", _C.YELLOW, enabled=self._color))
4113
6243
  self._print(
4114
6244
  _style(
4115
- "Usage: /log runtime | /log provider",
6245
+ "Usage: /logs runtime | /logs provider",
4116
6246
  _C.DIM,
4117
6247
  enabled=self._color,
4118
6248
  )
@@ -4133,7 +6263,7 @@ class ReactShell:
4133
6263
  "provider": self._provider,
4134
6264
  "model": self._model,
4135
6265
  "note": "No active run. This is the current session context that will be included in the next /task.",
4136
- "tip": "Use /log runtime (or /log provider) to inspect durable LLM/tool call payloads from the last run.",
6266
+ "tip": "Use /logs runtime (or /logs provider) to inspect durable LLM/tool call payloads from the last run.",
4137
6267
  "session_messages": list(self._agent.session_messages or []),
4138
6268
  }
4139
6269
  if state is not None and hasattr(state, "run_id") and hasattr(state, "status"):
@@ -4453,7 +6583,7 @@ class ReactShell:
4453
6583
  Source of truth: `RunState.vars["_runtime"]["node_traces"]`.
4454
6584
 
4455
6585
  Usage:
4456
- /log runtime [copy] [--last] [--json-only] [--save <path>]
6586
+ /logs runtime [copy] [--last] [--json-only] [--save <path>]
4457
6587
  """
4458
6588
  import shlex
4459
6589
  import uuid
@@ -4465,7 +6595,7 @@ class ReactShell:
4465
6595
  parts = raw.split() if raw else []
4466
6596
 
4467
6597
  copy_to_clipboard = False
4468
- # Accept `copy` as either a leading or trailing token (UX: "/log runtime ... copy").
6598
+ # Accept `copy` as either a leading or trailing token (UX: "/logs runtime ... copy").
4469
6599
  if parts and str(parts[0] or "").strip().lower() == "copy":
4470
6600
  copy_to_clipboard = True
4471
6601
  parts = parts[1:]
@@ -4478,7 +6608,7 @@ class ReactShell:
4478
6608
  all_calls = False
4479
6609
  verbatim = False
4480
6610
  save_path: Optional[str] = None
4481
- usage = "Usage: /log runtime [copy] [--last] [--json-only] [--save <path>]"
6611
+ usage = "Usage: /logs runtime [copy] [--last] [--json-only] [--save <path>]"
4482
6612
  i = 0
4483
6613
  while i < len(parts):
4484
6614
  p = parts[i]
@@ -4809,7 +6939,7 @@ class ReactShell:
4809
6939
  - response: `result.raw_response` (provider JSON response, best-effort preserved)
4810
6940
 
4811
6941
  Usage:
4812
- /log provider [copy] [--last] [--run] [--json-only] [--save <path>]
6942
+ /logs provider [copy] [--last] [--run] [--json-only] [--save <path>]
4813
6943
  """
4814
6944
  import shlex
4815
6945
  import uuid
@@ -4821,7 +6951,7 @@ class ReactShell:
4821
6951
  parts = raw.split() if raw else []
4822
6952
 
4823
6953
  copy_to_clipboard = False
4824
- # Accept `copy` as either a leading or trailing token (UX: "/log provider --all copy").
6954
+ # Accept `copy` as either a leading or trailing token (UX: "/logs provider --all copy").
4825
6955
  if parts and str(parts[0] or "").strip().lower() == "copy":
4826
6956
  copy_to_clipboard = True
4827
6957
  parts = parts[1:]
@@ -4834,7 +6964,7 @@ class ReactShell:
4834
6964
  run_only = False
4835
6965
  no_tool_defs = False
4836
6966
  save_path: Optional[str] = None
4837
- usage = "Usage: /log provider [copy] [--last] [--run] [--json-only] [--no-tool-defs] [--save <path>]"
6967
+ usage = "Usage: /logs provider [copy] [--last] [--run] [--json-only] [--no-tool-defs] [--save <path>]"
4838
6968
 
4839
6969
  i = 0
4840
6970
  while i < len(parts):
@@ -4891,7 +7021,7 @@ class ReactShell:
4891
7021
  if not run_only and isinstance(session_id, str) and session_id.strip() and isinstance(self._runtime.run_store, QueryableRunStore):
4892
7022
  # Pull a bounded set of runs and filter client-side by session_id.
4893
7023
  # (QueryableRunStore doesn't expose a session_id filter in v0.1.)
4894
- # Prefer completeness over speed: /log provider is a debugging tool.
7024
+ # Prefer completeness over speed: /logs provider is a debugging tool.
4895
7025
  # JsonFileRunStore scans all run_*.json files anyway; a low limit can hide older runs.
4896
7026
  runs = self._runtime.run_store.list_runs(limit=100000)
4897
7027
  run_ids = [r.run_id for r in runs if getattr(r, "session_id", None) == session_id]
@@ -5030,7 +7160,7 @@ class ReactShell:
5030
7160
  def _extract_tool_calls(resp_obj: Any) -> list[Any]:
5031
7161
  """Best-effort tool-call extraction for human-readable logs.
5032
7162
 
5033
- `/log provider` is intentionally provider-wire oriented, but this line helps
7163
+ `/logs provider` is intentionally provider-wire oriented, but this line helps
5034
7164
  quickly spot whether the model asked for tools. Not all providers share the
5035
7165
  OpenAI `choices[0].message.tool_calls` shape (e.g. Anthropic `tool_use` blocks).
5036
7166
  """
@@ -5289,6 +7419,8 @@ class ReactShell:
5289
7419
  " /review ... Toggle Review mode (self-check) [saved]\n"
5290
7420
  " - /review [on|off] [max_rounds]\n"
5291
7421
  " - /review rounds <N>\n"
7422
+ " /config ... Configure runtime options [saved]\n"
7423
+ " - /config check-plan [on|off]\n"
5292
7424
  " /max-tokens [N] Show or set max tokens (-1 = auto) [saved]\n"
5293
7425
  " /max-messages [N] Show or set max history messages (-1 = unlimited) [saved]\n"
5294
7426
  " /memory Show MemAct Active Memory (MemAct only)\n"
@@ -5297,12 +7429,26 @@ class ReactShell:
5297
7429
  " /expand <span> Expand an archived span (--show, --into-context)\n"
5298
7430
  " /recall [opts] Recall spans by time/tags/query (--into-context)\n"
5299
7431
  " /vars [path] Inspect run vars (scratchpad, _runtime, ...)\n"
5300
- " /log runtime Show runtime step trace for LLM/tool calls (durable)\n"
5301
- " - /log runtime [copy] [--last] [--json-only] [--save <path>]\n"
5302
- " /log provider Show provider wire request+response (durable)\n"
5303
- " - /log provider [copy] [--last|--all] [--json-only] [--save <path>]\n"
7432
+ " /whitelist ... Add workspace mounts for this session\n"
7433
+ " - /whitelist <dir...>\n"
7434
+ " - /whitelist name=/abs/dir [name2=/abs/dir2 ...]\n"
7435
+ " /blacklist ... Block folders/files for this session (overrides whitelist)\n"
7436
+ " - /blacklist <path...>\n"
7437
+ " - /blacklist reset\n"
7438
+ " /logs runtime Show runtime step trace for LLM/tool calls (durable)\n"
7439
+ " - /logs runtime [copy] [--last] [--json-only] [--save <path>]\n"
7440
+ " /logs provider Show provider wire request+response (durable)\n"
7441
+ " - /logs provider [copy] [--last|--all] [--json-only] [--save <path>]\n"
5304
7442
  " /memorize <note> Store a durable memory note (tags + provenance)\n"
5305
7443
  " /mouse Toggle mouse mode (wheel scroll vs terminal selection)\n"
7444
+ " /agent [name] Switch agent (/agent list, /agent reload)\n"
7445
+ " /theme [name] Switch UI theme (/theme list, /theme custom ...)\n"
7446
+ " /files List pending @file attachments\n"
7447
+ " /files-keep [on|off] Keep @file attachments across turns [saved]\n"
7448
+ " /system [text] Show or set system prompt override [saved]\n"
7449
+ " /gpu [status|on|off] Toggle host GPU meter (via AbstractGateway)\n"
7450
+ " /links List links captured from last answer\n"
7451
+ " /open <N|URL> Open a link in your default browser\n"
5306
7452
  " /flow ... Run AbstractFlow workflows inside this REPL\n"
5307
7453
  " - /flow run <flow_id_or_path> [--verbosity none|default|full] [--key value ...]\n"
5308
7454
  " - /flow resume [--verbosity none|default|full] [--wait-until]\n"
@@ -5315,6 +7461,7 @@ class ReactShell:
5315
7461
  " /resume Resume the saved/attached run\n"
5316
7462
  " /pause Pause the current run (durable)\n"
5317
7463
  " /cancel Cancel the current run (durable)\n"
7464
+ " /conclude [note] Ask the agent to conclude now (best-effort; no new tools)\n"
5318
7465
  " /clear Clear memory and clear the screen\n"
5319
7466
  " /snapshot save <n> Save current state as named snapshot\n"
5320
7467
  " /snapshot load <n> Load snapshot by name\n"
@@ -5448,6 +7595,16 @@ class ReactShell:
5448
7595
 
5449
7596
  selected = turns[-limit_int:]
5450
7597
 
7598
+ renderer: Optional[TerminalMarkdownRenderer] = None
7599
+ try:
7600
+ renderer = TerminalMarkdownRenderer(
7601
+ color=self._color,
7602
+ theme=getattr(self, "_theme", None),
7603
+ width=self._terminal_width(),
7604
+ )
7605
+ except Exception:
7606
+ renderer = None
7607
+
5451
7608
  self._print(_style(f"\nHistory (last {len(selected)} interaction(s))", _C.CYAN, _C.BOLD, enabled=self._color))
5452
7609
  self._print(_style("─" * 80, _C.DIM, enabled=self._color))
5453
7610
 
@@ -5480,14 +7637,26 @@ class ReactShell:
5480
7637
  mid = _get_message_id(msg) or f"system_{uuid.uuid4().hex}"
5481
7638
  self._ui.register_copy_payload(mid, content)
5482
7639
  self._print(_style("[system]", _C.DIM, enabled=self._color))
5483
- self._print(content)
7640
+ if renderer is not None:
7641
+ try:
7642
+ self._print(renderer.render(content))
7643
+ except Exception:
7644
+ self._print(content)
7645
+ else:
7646
+ self._print(content)
5484
7647
  self._print(f"[[COPY:{mid}]] {footer}".rstrip())
5485
7648
  continue
5486
7649
 
5487
7650
  # Default: assistant/other roles (no role prefix; rely on styling/structure).
5488
7651
  mid = _get_message_id(msg) or f"assistant_{uuid.uuid4().hex}"
5489
7652
  self._ui.register_copy_payload(mid, content)
5490
- self._print(content)
7653
+ if renderer is not None:
7654
+ try:
7655
+ self._print(renderer.render(content))
7656
+ except Exception:
7657
+ self._print(content)
7658
+ else:
7659
+ self._print(content)
5491
7660
  self._print(f"[[COPY:{mid}]] {footer}".rstrip())
5492
7661
 
5493
7662
  self._print(_style("\n" + "─" * 80, _C.DIM, enabled=self._color))
@@ -5688,6 +7857,10 @@ class ReactShell:
5688
7857
  # Reset approval state (clear = full reset)
5689
7858
  self._approve_all_session = False
5690
7859
 
7860
+ # Force status bar recompute (no last-run fallback for token meter).
7861
+ self._status_cache_key = None
7862
+ self._status_cache_text = ""
7863
+
5691
7864
  self._print(_style("Memory cleared. Ready for a fresh start.", _C.GREEN, enabled=self._color))
5692
7865
 
5693
7866
  def _handle_snapshot(self, arg: str) -> None:
@@ -5830,12 +8003,90 @@ class ReactShell:
5830
8003
  runtime_ns = {}
5831
8004
  state.vars["_runtime"] = runtime_ns
5832
8005
  runtime_ns["tool_prompt_examples"] = bool(self._tool_prompt_examples)
8006
+ runtime_ns["check_plan"] = bool(getattr(self, "_check_plan", False))
8007
+ # Apply session system prompt override to new runs (don't clobber an existing run override).
8008
+ sys_override = getattr(self, "_system_prompt_override", None)
8009
+ if (
8010
+ isinstance(sys_override, str)
8011
+ and sys_override.strip()
8012
+ and not (isinstance(runtime_ns.get("system_prompt"), str) and str(runtime_ns.get("system_prompt")).strip())
8013
+ ):
8014
+ runtime_ns["system_prompt"] = sys_override.strip()
8015
+
8016
+ # Workspace policy (tool-call scoping): keep it JSON-safe and stable across runs.
8017
+ #
8018
+ # Notes:
8019
+ # - `workspace_root` applies to relative paths and command working_directory defaults.
8020
+ # - Mount roots are expressed as `workspace_allowed_paths` (absolute paths).
8021
+ # - Blacklist uses `workspace_ignored_paths` (absolute paths) and always has priority.
8022
+ try:
8023
+ if "workspace_root" not in state.vars:
8024
+ state.vars["workspace_root"] = str(self._workspace_root)
8025
+
8026
+ allowed_paths = [str(p) for p in (self._workspace_mounts or {}).values() if isinstance(p, Path)]
8027
+ ignored_paths = [str(p) for p in (self._workspace_blocked_paths or []) if isinstance(p, Path)]
8028
+
8029
+ def _as_list(value: Any) -> List[str]:
8030
+ if value is None:
8031
+ return []
8032
+ if isinstance(value, list):
8033
+ return [str(x).strip() for x in value if isinstance(x, (str, int, float, bool)) and str(x).strip()]
8034
+ if isinstance(value, str):
8035
+ s = value.strip()
8036
+ if not s:
8037
+ return []
8038
+ if s.startswith("["):
8039
+ try:
8040
+ parsed = json.loads(s)
8041
+ if isinstance(parsed, list):
8042
+ return [str(x).strip() for x in parsed if isinstance(x, str) and str(x).strip()]
8043
+ except Exception:
8044
+ pass
8045
+ return [ln.strip() for ln in s.splitlines() if ln.strip()]
8046
+ return []
8047
+
8048
+ def _merge_unique(existing: Any, extra: List[str]) -> List[str]:
8049
+ base = _as_list(existing)
8050
+ seen: set[str] = set()
8051
+ out: List[str] = []
8052
+ for x in list(base) + list(extra):
8053
+ if not isinstance(x, str):
8054
+ continue
8055
+ s = x.strip()
8056
+ if not s or s in seen:
8057
+ continue
8058
+ seen.add(s)
8059
+ out.append(s)
8060
+ return out
8061
+
8062
+ if allowed_paths:
8063
+ state.vars["workspace_allowed_paths"] = _merge_unique(
8064
+ state.vars.get("workspace_allowed_paths") or state.vars.get("workspaceAllowedPaths"),
8065
+ allowed_paths,
8066
+ )
8067
+ mode_raw = state.vars.get("workspace_access_mode") or state.vars.get("workspaceAccessMode")
8068
+ mode = str(mode_raw or "").strip().lower()
8069
+ if not mode or mode == "workspace_only":
8070
+ state.vars["workspace_access_mode"] = "workspace_or_allowed"
8071
+ else:
8072
+ # Default to workspace_only when no allowlist is present and caller didn't specify.
8073
+ if "workspace_access_mode" not in state.vars and "workspaceAccessMode" not in state.vars:
8074
+ state.vars["workspace_access_mode"] = "workspace_only"
8075
+
8076
+ if ignored_paths:
8077
+ state.vars["workspace_ignored_paths"] = _merge_unique(
8078
+ state.vars.get("workspace_ignored_paths") or state.vars.get("workspaceIgnoredPaths"),
8079
+ ignored_paths,
8080
+ )
8081
+ except Exception:
8082
+ pass
8083
+
5833
8084
  try:
5834
8085
  self._runtime.run_store.save(state)
5835
8086
  except Exception:
5836
8087
  pass
5837
8088
 
5838
- def _start(self, task: str) -> None:
8089
+ def _start(self, task: str, *, attachments: Optional[List[Dict[str, Any]]] = None) -> None:
5839
8090
  if self._run_thread_active():
5840
8091
  self._print(_style("A run is already executing. Use /pause or /cancel first.", _C.DIM, enabled=self._color))
5841
8092
  return
@@ -5845,7 +8096,7 @@ class ReactShell:
5845
8096
  self._turn_task = str(task or "").strip() or None
5846
8097
  self._turn_trace = []
5847
8098
  self._turn_started_at = time.perf_counter()
5848
- run_id = self._agent.start(task, allowed_tools=self._allowed_tools)
8099
+ run_id = self._agent.start(task, allowed_tools=self._allowed_tools, attachments=attachments)
5849
8100
  self._sync_tool_prompt_settings_to_run(run_id)
5850
8101
  self._last_run_id = run_id
5851
8102
  if self._state_file:
@@ -5900,6 +8151,87 @@ class ReactShell:
5900
8151
  self._print(_style(f"Cancel requested (run_id={run_id}).", _C.DIM, enabled=self._color))
5901
8152
  self._reset_repeat_guardrails()
5902
8153
 
8154
+ def _consume_pending_conclude_note(self) -> Optional[str]:
8155
+ try:
8156
+ with self._pending_conclude_lock:
8157
+ note = self._pending_conclude_note
8158
+ self._pending_conclude_note = None
8159
+ return note
8160
+ except Exception:
8161
+ return None
8162
+
8163
+ def _apply_conclude_request(self, run_id: str, note: str) -> None:
8164
+ """Best-effort: push a one-shot 'conclude now' instruction into the ReAct inbox."""
8165
+ rid = str(run_id or "").strip()
8166
+ if not rid:
8167
+ return
8168
+ try:
8169
+ state = self._runtime.get_state(rid)
8170
+ except Exception:
8171
+ return
8172
+ if state is None or not hasattr(state, "vars") or not isinstance(state.vars, dict):
8173
+ return
8174
+
8175
+ runtime_ns = state.vars.get("_runtime")
8176
+ if not isinstance(runtime_ns, dict):
8177
+ runtime_ns = {}
8178
+ state.vars["_runtime"] = runtime_ns
8179
+
8180
+ inbox = runtime_ns.get("inbox")
8181
+ if not isinstance(inbox, list):
8182
+ inbox = []
8183
+ runtime_ns["inbox"] = inbox
8184
+
8185
+ extra = str(note or "").strip()
8186
+ msg = (
8187
+ "User requested: CONCLUDE NOW.\n"
8188
+ "- Provide your best final answer using ONLY the information already gathered in this run.\n"
8189
+ "- Do NOT call tools.\n"
8190
+ "- If information is missing, say what is unknown and list the most useful next steps."
8191
+ )
8192
+ if extra:
8193
+ msg = f"{msg}\n\nUser note:\n{extra}"
8194
+ inbox.append({"role": "system", "content": msg})
8195
+
8196
+ # Stronger guardrail: hide tools from the next model call (ReAct reads these from _runtime).
8197
+ runtime_ns["allowed_tools"] = []
8198
+ runtime_ns["tool_specs"] = []
8199
+
8200
+ try:
8201
+ self._runtime.run_store.save(state)
8202
+ except Exception:
8203
+ pass
8204
+
8205
+ # System prompt changes affect token estimates shown in the footer.
8206
+ self._status_cache_key = None
8207
+ self._status_cache_text = ""
8208
+
8209
+ def _conclude(self, raw: str) -> None:
8210
+ if str(getattr(self, "_agent_kind", "") or "") != "react":
8211
+ self._print(_style("/conclude is currently supported for the ReAct agent only.", _C.DIM, enabled=self._color))
8212
+ return
8213
+ run_id = self._attached_run_id()
8214
+ if run_id is None:
8215
+ self._print(_style("No run loaded. Start a task or /resume first.", _C.DIM, enabled=self._color))
8216
+ return
8217
+
8218
+ note = str(raw or "").strip()
8219
+
8220
+ # If a run is currently executing, queue an interrupt for the background thread
8221
+ # (avoids run_store lost-update races during tick/save).
8222
+ if self._run_thread_active():
8223
+ try:
8224
+ with self._pending_conclude_lock:
8225
+ self._pending_conclude_note = note
8226
+ except Exception:
8227
+ self._pending_conclude_note = note
8228
+ self._print(_style(f"Conclude requested (run_id={run_id}). Will apply on the next step.", _C.DIM, enabled=self._color))
8229
+ return
8230
+
8231
+ # Otherwise apply immediately to the persisted run state; user can /resume.
8232
+ self._apply_conclude_request(run_id, note)
8233
+ self._print(_style(f"Conclude requested (run_id={run_id}). Type '/resume' to continue.", _C.DIM, enabled=self._color))
8234
+
5903
8235
  def _try_load_state(self) -> None:
5904
8236
  try:
5905
8237
  state = self._agent.load_state(self._state_file) # type: ignore[arg-type]
@@ -5923,6 +8255,9 @@ class ReactShell:
5923
8255
 
5924
8256
  def _run_loop(self, run_id: str) -> None:
5925
8257
  while True:
8258
+ note = self._consume_pending_conclude_note()
8259
+ if note is not None:
8260
+ self._apply_conclude_request(run_id, note)
5926
8261
  state = self._agent.step()
5927
8262
 
5928
8263
  if state.status == self._RunStatus.COMPLETED:
@@ -5982,6 +8317,8 @@ class ReactShell:
5982
8317
  self._agent.session_messages = loaded
5983
8318
  return
5984
8319
  response = self._prompt_user(wait.prompt or "Please respond:", wait.choices)
8320
+ if response == BLOCKING_PROMPT_CANCEL_TOKEN:
8321
+ continue
5985
8322
  state = self._agent.resume(response)
5986
8323
  continue
5987
8324
 
@@ -6080,6 +8417,8 @@ class ReactShell:
6080
8417
  if cur_wait.reason == self._WaitReason.USER:
6081
8418
  self._ui.clear_spinner()
6082
8419
  response = self._prompt_user(cur_wait.prompt or "Please respond:", cur_wait.choices)
8420
+ if response == BLOCKING_PROMPT_CANCEL_TOKEN:
8421
+ continue
6083
8422
  self._runtime.resume(
6084
8423
  workflow=_workflow_for(current_state),
6085
8424
  run_id=current_run_id,
@@ -6096,6 +8435,8 @@ class ReactShell:
6096
8435
  except Exception:
6097
8436
  pass
6098
8437
  response = self._prompt_user(cur_wait.prompt or "Please respond:", cur_wait.choices)
8438
+ if response == BLOCKING_PROMPT_CANCEL_TOKEN:
8439
+ continue
6099
8440
  self._runtime.resume(
6100
8441
  workflow=_workflow_for(current_state),
6101
8442
  run_id=current_run_id,
@@ -6112,6 +8453,8 @@ class ReactShell:
6112
8453
  if payload is None:
6113
8454
  self._print(_style("\nLeft run waiting (not resumed).", _C.DIM, enabled=self._color))
6114
8455
  return
8456
+ if isinstance(payload, dict) and payload.get("mode") == "cancelled":
8457
+ continue
6115
8458
  self._runtime.resume(
6116
8459
  workflow=_workflow_for(current_state),
6117
8460
  run_id=current_run_id,
@@ -6155,6 +8498,8 @@ class ReactShell:
6155
8498
  if payload is None:
6156
8499
  self._print(_style("\nLeft run waiting (not resumed).", _C.DIM, enabled=self._color))
6157
8500
  return
8501
+ if isinstance(payload, dict) and payload.get("mode") == "cancelled":
8502
+ continue
6158
8503
 
6159
8504
  state = self._runtime.resume(
6160
8505
  workflow=self._agent.workflow,
@@ -6172,6 +8517,8 @@ class ReactShell:
6172
8517
  except Exception:
6173
8518
  pass
6174
8519
  response = self._prompt_user(wait.prompt or "Please respond:", wait.choices)
8520
+ if response == BLOCKING_PROMPT_CANCEL_TOKEN:
8521
+ continue
6175
8522
  state = self._runtime.resume(
6176
8523
  workflow=self._agent.workflow,
6177
8524
  run_id=run_id,
@@ -6195,6 +8542,8 @@ class ReactShell:
6195
8542
  self._print(f" [{i+1}] {c}")
6196
8543
  while True:
6197
8544
  raw = self._simple_prompt("Choice (number or text): ")
8545
+ if raw == BLOCKING_PROMPT_CANCEL_TOKEN:
8546
+ return raw
6198
8547
  if not raw:
6199
8548
  continue
6200
8549
  if raw.isdigit():
@@ -6202,7 +8551,8 @@ class ReactShell:
6202
8551
  if 0 <= idx < len(choices):
6203
8552
  return str(choices[idx])
6204
8553
  return raw
6205
- return self._simple_prompt(prompt + " ")
8554
+ resp = self._simple_prompt(prompt + " ")
8555
+ return resp if resp == BLOCKING_PROMPT_CANCEL_TOKEN else resp.strip()
6206
8556
 
6207
8557
  def _approve_and_execute(self, tool_calls: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
6208
8558
  auto = bool(self._auto_approve or self._approve_all_session)
@@ -6244,9 +8594,12 @@ class ReactShell:
6244
8594
 
6245
8595
  if not auto and not approve_all:
6246
8596
  while True:
6247
- choice = self._simple_prompt(
8597
+ choice_raw = self._simple_prompt(
6248
8598
  f"Approve {name}? [y]es/[n]o/[a]ll/[e]dit/[q]uit: "
6249
- ).lower()
8599
+ ).strip()
8600
+ if choice_raw == BLOCKING_PROMPT_CANCEL_TOKEN:
8601
+ return {"mode": "cancelled"}
8602
+ choice = choice_raw.lower()
6250
8603
  if choice in ("y", "yes"):
6251
8604
  break
6252
8605
  if choice in ("a", "all"):
@@ -6269,6 +8622,8 @@ class ReactShell:
6269
8622
  return None
6270
8623
  if choice in ("e", "edit"):
6271
8624
  edited = self._simple_prompt("New arguments (JSON): ")
8625
+ if edited == BLOCKING_PROMPT_CANCEL_TOKEN:
8626
+ return {"mode": "cancelled"}
6272
8627
  if edited:
6273
8628
  try:
6274
8629
  new_args = json.loads(edited)
@@ -6290,7 +8645,10 @@ class ReactShell:
6290
8645
 
6291
8646
  # Additional confirmation for shell execution (skip if auto/approve_all is set)
6292
8647
  if name == "execute_command" and not auto and not approve_all:
6293
- confirm = self._simple_prompt("Type 'run' to execute this command: ").lower()
8648
+ confirm_raw = self._simple_prompt("Type 'run' to execute this command: ")
8649
+ if confirm_raw == BLOCKING_PROMPT_CANCEL_TOKEN:
8650
+ return {"mode": "cancelled"}
8651
+ confirm = confirm_raw.lower()
6294
8652
  if confirm != "run":
6295
8653
  results.append(
6296
8654
  {