tcm-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tcm/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ """tcm-cli: An autonomous agent for Traditional Chinese Medicine research and discovery."""
2
+
3
+ __version__ = "0.1.0"
tcm/agent/__init__.py ADDED
@@ -0,0 +1 @@
1
+ """Agent components: planner, executor, synthesizer, session management."""
tcm/agent/config.py ADDED
@@ -0,0 +1,229 @@
1
+ """
2
+ Configuration management for tcm.
3
+
4
+ Config is stored at ~/.tcm/config.json and manages:
5
+ - LLM provider settings (Anthropic, OpenAI)
6
+ - Data directory paths (TCMSP, TCMID, etc.)
7
+ - Output preferences
8
+ - Agent settings
9
+ """
10
+
11
+ import json
12
+ import os
13
+ import logging
14
+ from pathlib import Path
15
+ from typing import Any, Optional
16
+
17
+ from dotenv import load_dotenv
18
+
19
+ load_dotenv()
20
+
21
+ from rich.table import Table
22
+
23
+ CONFIG_DIR = Path.home() / ".tcm"
24
+ CONFIG_FILE = CONFIG_DIR / "config.json"
25
+ VALID_LLM_PROVIDERS = frozenset({"anthropic", "openai"})
26
+ logger = logging.getLogger("tcm.config")
27
+
28
+ DEFAULTS = {
29
+ "llm.provider": "anthropic",
30
+ "llm.model": "claude-sonnet-4-5-20250929",
31
+ "llm.api_key": None,
32
+ "llm.openai_api_key": None,
33
+ "llm.temperature": 0.1,
34
+
35
+ "data.base": str(CONFIG_DIR / "data"),
36
+ "data.tcmsp": None,
37
+ "data.tcmid": None,
38
+ "data.herbs": None,
39
+ "data.formulas": None,
40
+ "data.batman": None,
41
+ "data.symmap": None,
42
+
43
+ "output.format": "markdown",
44
+ "output.verbose": False,
45
+ "output.auto_publish_html_interactive": True,
46
+ "output.auto_publish_html_batch": False,
47
+
48
+ "ui.spinner": "dots",
49
+ "ui.language": "en", # "en" or "zh"
50
+
51
+ "sandbox.timeout": 30,
52
+ "sandbox.output_dir": str(Path.cwd() / "outputs"),
53
+ "sandbox.max_retries": 2,
54
+
55
+ "agent.max_iterations": 3,
56
+ "agent.enable_experimental_tools": False,
57
+ "agent.executor_max_retries": 2,
58
+ "agent.executor_loop_limit": 50,
59
+ "agent.synthesis_max_tokens": 8192,
60
+ "agent.enforce_grounded_synthesis": True,
61
+ "agent.confidence_scoring_enabled": True,
62
+ "agent.min_step_success_rate": 0.5,
63
+ "agent.allow_creative_hypotheses": True,
64
+ "agent.max_hypotheses": 3,
65
+ "agent.profile": "research",
66
+ "agent.planner_max_tools": 60,
67
+ "agent.tool_health_enabled": True,
68
+ "agent.tool_health_fail_threshold": 2,
69
+ "agent.tool_health_failure_window_s": 1800,
70
+ "agent.tool_health_suppress_seconds": 900,
71
+ }
72
+
73
+ AGENT_PROFILE_PRESETS = {
74
+ "research": {
75
+ "agent.enforce_grounded_synthesis": True,
76
+ "agent.allow_creative_hypotheses": True,
77
+ "agent.confidence_scoring_enabled": True,
78
+ },
79
+ "clinical": {
80
+ "agent.enforce_grounded_synthesis": True,
81
+ "agent.allow_creative_hypotheses": False,
82
+ "agent.confidence_scoring_enabled": True,
83
+ },
84
+ "education": {
85
+ "agent.enforce_grounded_synthesis": False,
86
+ "agent.allow_creative_hypotheses": True,
87
+ "agent.confidence_scoring_enabled": False,
88
+ },
89
+ }
90
+
91
+
92
+ class Config:
93
+ """Manages tcm configuration."""
94
+
95
+ def __init__(self, data: dict = None):
96
+ self._data = data or {}
97
+
98
+ @classmethod
99
+ def load(cls) -> "Config":
100
+ """Load config from ~/.tcm/config.json, falling back to defaults."""
101
+ if CONFIG_FILE.exists():
102
+ try:
103
+ raw = json.loads(CONFIG_FILE.read_text())
104
+ return cls(data=raw)
105
+ except (json.JSONDecodeError, OSError) as exc:
106
+ logger.warning("Failed to load config: %s", exc)
107
+ return cls()
108
+
109
+ def save(self):
110
+ """Persist config to ~/.tcm/config.json."""
111
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
112
+ CONFIG_FILE.write_text(json.dumps(self._data, indent=2) + "\n")
113
+
114
+ def get(self, key: str, default: Any = None) -> Any:
115
+ """Get a config value, falling back to DEFAULTS then the provided default."""
116
+ if key in self._data:
117
+ return self._data[key]
118
+ if key in DEFAULTS:
119
+ return DEFAULTS[key]
120
+ return default
121
+
122
+ def set(self, key: str, value: Any):
123
+ """Set a config value.
124
+
125
+ Special handling:
126
+ - llm.provider: validates against known providers.
127
+ - llm.model: auto-detects and sets the matching provider.
128
+ - agent.profile: applies profile presets.
129
+ """
130
+ if key == "llm.provider" and value not in VALID_LLM_PROVIDERS:
131
+ raise ValueError(
132
+ f"Invalid provider '{value}'. Valid: {', '.join(sorted(VALID_LLM_PROVIDERS))}"
133
+ )
134
+ if key == "llm.model":
135
+ from tcm.models.llm import resolve_provider, MODEL_CATALOG
136
+ provider = resolve_provider(value)
137
+ if provider:
138
+ self._data["llm.provider"] = provider
139
+ if value not in MODEL_CATALOG:
140
+ logger.warning(
141
+ "Model '%s' is not in the catalog — it may still work if your provider supports it.",
142
+ value,
143
+ )
144
+ if key == "agent.profile":
145
+ preset = AGENT_PROFILE_PRESETS.get(value)
146
+ if not preset:
147
+ raise ValueError(
148
+ f"Unknown profile '{value}'. "
149
+ f"Valid: {', '.join(sorted(AGENT_PROFILE_PRESETS))}"
150
+ )
151
+ for pk, pv in preset.items():
152
+ self._data[pk] = pv
153
+ self._data[key] = value
154
+
155
+ def llm_api_key(self, provider: str = None) -> Optional[str]:
156
+ """Get the API key for the given LLM provider."""
157
+ provider = provider or self.get("llm.provider", "anthropic")
158
+ if provider == "anthropic":
159
+ return (
160
+ self._data.get("llm.api_key")
161
+ or os.environ.get("ANTHROPIC_API_KEY")
162
+ )
163
+ elif provider == "openai":
164
+ return (
165
+ self._data.get("llm.openai_api_key")
166
+ or os.environ.get("OPENAI_API_KEY")
167
+ )
168
+ return None
169
+
170
+ def validate(self) -> list[str]:
171
+ """Validate configuration and return a list of issues."""
172
+ issues = []
173
+ known_keys = set(DEFAULTS.keys())
174
+ for key in self._data:
175
+ if key not in known_keys:
176
+ issues.append(f"Unknown config key '{key}' (possible typo)")
177
+ # Type checks
178
+ for key, value in self._data.items():
179
+ if key not in DEFAULTS or value is None:
180
+ continue
181
+ default = DEFAULTS[key]
182
+ if default is None:
183
+ continue
184
+ expected_type = type(default)
185
+ if expected_type == bool and not isinstance(value, bool):
186
+ issues.append(f"'{key}' should be bool, got {type(value).__name__}")
187
+ elif expected_type in (int, float) and not isinstance(value, (int, float)):
188
+ issues.append(f"'{key}' should be numeric, got {type(value).__name__}")
189
+ return issues
190
+
191
+ def to_table(self) -> Table:
192
+ """Render config as a Rich table."""
193
+ table = Table(title="tcm Configuration")
194
+ table.add_column("Key", style="cyan")
195
+ table.add_column("Value")
196
+ table.add_column("Source", style="dim")
197
+
198
+ for key in sorted(DEFAULTS):
199
+ if key.endswith("api_key"):
200
+ val = self.get(key)
201
+ if val:
202
+ val = val[:7] + "..." + val[-4:] if len(val) > 11 else "***"
203
+ else:
204
+ val = "(not set)"
205
+ source = "config" if key in self._data else "default"
206
+ else:
207
+ val = str(self.get(key))
208
+ source = "config" if key in self._data else "default"
209
+ table.add_row(key, val, source)
210
+ return table
211
+
212
+ def keys_table(self) -> Table:
213
+ """Show API key status table."""
214
+ table = Table(title="API Keys")
215
+ table.add_column("Service", style="cyan")
216
+ table.add_column("Status")
217
+ table.add_column("Description")
218
+
219
+ # Anthropic
220
+ key = self.llm_api_key("anthropic")
221
+ status = "[green]✓ configured[/green]" if key else "[red]✗ missing[/red]"
222
+ table.add_row("Anthropic", status, "Primary LLM provider")
223
+
224
+ # OpenAI
225
+ key = self.llm_api_key("openai")
226
+ status = "[green]✓ configured[/green]" if key else "[dim]○ optional[/dim]"
227
+ table.add_row("OpenAI", status, "Alternative LLM provider")
228
+
229
+ return table
tcm/agent/doctor.py ADDED
@@ -0,0 +1,92 @@
1
+ """
2
+ Health checks for tcm CLI.
3
+ """
4
+
5
+ import shutil
6
+ import sys
7
+ from rich.table import Table
8
+
9
+
10
+ def run_checks(config, session=None) -> list[dict]:
11
+ """Run all health checks. Returns list of check results."""
12
+ checks = []
13
+
14
+ # Python version
15
+ py_ver = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
16
+ py_ok = sys.version_info >= (3, 10)
17
+ checks.append({
18
+ "name": "Python version",
19
+ "status": "ok" if py_ok else "error",
20
+ "detail": f"Python {py_ver}" + ("" if py_ok else " (need 3.10+)"),
21
+ })
22
+
23
+ # Anthropic API key
24
+ key = config.llm_api_key("anthropic")
25
+ checks.append({
26
+ "name": "Anthropic API key",
27
+ "status": "ok" if key else "error",
28
+ "detail": "configured" if key else "missing — run `tcm setup`",
29
+ })
30
+
31
+ # Core dependencies
32
+ for pkg in ["anthropic", "rich", "typer", "httpx", "prompt_toolkit"]:
33
+ try:
34
+ __import__(pkg)
35
+ checks.append({"name": f"Package: {pkg}", "status": "ok", "detail": "installed"})
36
+ except ImportError:
37
+ checks.append({"name": f"Package: {pkg}", "status": "error", "detail": "not installed"})
38
+
39
+ # Optional dependencies
40
+ for pkg, label in [("pandas", "pandas"), ("numpy", "numpy"), ("rdkit", "RDKit (chemistry)")]:
41
+ try:
42
+ __import__(pkg)
43
+ checks.append({"name": f"Optional: {label}", "status": "ok", "detail": "installed"})
44
+ except ImportError:
45
+ checks.append({"name": f"Optional: {label}", "status": "info", "detail": "not installed (optional)"})
46
+
47
+ # Tool loading
48
+ from tcm.tools import ensure_loaded, tool_load_errors
49
+ ensure_loaded()
50
+ errors = tool_load_errors()
51
+ if errors:
52
+ checks.append({
53
+ "name": "Tool modules",
54
+ "status": "warn",
55
+ "detail": f"{len(errors)} module(s) failed to load: {', '.join(errors.keys())}",
56
+ })
57
+ else:
58
+ from tcm.tools import registry
59
+ checks.append({
60
+ "name": "Tool modules",
61
+ "status": "ok",
62
+ "detail": f"{len(registry.list_tools())} tools loaded",
63
+ })
64
+
65
+ return checks
66
+
67
+
68
+ def to_table(checks: list[dict]) -> Table:
69
+ """Render checks as a Rich table."""
70
+ table = Table(title="tcm doctor")
71
+ table.add_column("Check", style="cyan")
72
+ table.add_column("Status")
73
+ table.add_column("Detail")
74
+
75
+ for check in checks:
76
+ status = check["status"]
77
+ if status == "ok":
78
+ icon = "[green]✓[/green]"
79
+ elif status == "warn":
80
+ icon = "[yellow]⚠[/yellow]"
81
+ elif status == "info":
82
+ icon = "[dim]○[/dim]"
83
+ else:
84
+ icon = "[red]✗[/red]"
85
+ table.add_row(check["name"], icon, check["detail"])
86
+
87
+ return table
88
+
89
+
90
+ def has_errors(checks: list[dict]) -> bool:
91
+ """Check if any health check failed."""
92
+ return any(c["status"] == "error" for c in checks)
tcm/agent/executor.py ADDED
@@ -0,0 +1,122 @@
1
+ """
2
+ Plan executor: iterates plan steps, calls tools, handles errors/retries.
3
+ """
4
+
5
+ import json
6
+ import logging
7
+ import time
8
+ from rich.console import Console
9
+ from rich.panel import Panel
10
+
11
+ from tcm.agent.session import Session
12
+ from tcm.tools import registry, ensure_loaded
13
+
14
+ logger = logging.getLogger("tcm.agent.executor")
15
+ console = Console()
16
+
17
+
18
+ def execute_plan(session: Session, plan: dict, verbose: bool = False) -> list[dict]:
19
+ """Execute a research plan step by step.
20
+
21
+ Args:
22
+ session: Active session with LLM and config.
23
+ plan: Plan dict with 'steps' list.
24
+ verbose: Whether to print step details.
25
+
26
+ Returns:
27
+ List of step results.
28
+ """
29
+ ensure_loaded()
30
+ steps = plan.get("steps", [])
31
+ results = []
32
+
33
+ if not steps:
34
+ # No tool steps — this was a direct LLM response
35
+ return [{"step": 0, "tool": "llm_direct", "result": plan.get("reasoning", ""), "status": "success"}]
36
+
37
+ max_retries = int(session.config.get("agent.executor_max_retries", 2))
38
+
39
+ for step in steps:
40
+ step_num = step.get("step", len(results) + 1)
41
+ tool_name = step.get("tool", "")
42
+ parameters = step.get("parameters", {})
43
+ purpose = step.get("purpose", "")
44
+
45
+ if verbose:
46
+ console.print(f" [cyan]Step {step_num}[/cyan]: {tool_name} — {purpose}")
47
+
48
+ tool = registry.get_tool(tool_name)
49
+ if not tool:
50
+ result = {
51
+ "step": step_num,
52
+ "tool": tool_name,
53
+ "status": "error",
54
+ "error": f"Tool '{tool_name}' not found.",
55
+ }
56
+ results.append(result)
57
+ continue
58
+
59
+ # Substitute references to previous results
60
+ resolved_params = _resolve_params(parameters, results)
61
+
62
+ # Execute with retry
63
+ result = _execute_with_retry(session, tool, resolved_params, max_retries)
64
+ result["step"] = step_num
65
+ result["tool"] = tool_name
66
+ result["purpose"] = purpose
67
+
68
+ if verbose:
69
+ status_icon = "✓" if result["status"] == "success" else "✗"
70
+ color = "green" if result["status"] == "success" else "red"
71
+ console.print(f" [{color}]{status_icon}[/{color}] {result['status']}")
72
+
73
+ results.append(result)
74
+
75
+ return results
76
+
77
+
78
+ def _execute_with_retry(session: Session, tool, parameters: dict, max_retries: int) -> dict:
79
+ """Execute a tool with retry logic."""
80
+ for attempt in range(1, max_retries + 1):
81
+ try:
82
+ output = tool.run(**parameters)
83
+ session.record_tool_success(tool.name)
84
+ return {"status": "success", "output": output}
85
+ except Exception as e:
86
+ error_text = str(e)
87
+ logger.warning("Tool %s failed (attempt %d): %s", tool.name, attempt, error_text)
88
+ session.record_tool_failure(tool.name, error_text)
89
+
90
+ if attempt >= max_retries:
91
+ return {"status": "error", "error": error_text}
92
+ time.sleep(1)
93
+
94
+ return {"status": "error", "error": "Max retries exceeded"}
95
+
96
+
97
+ def _resolve_params(parameters: dict, previous_results: list) -> dict:
98
+ """Resolve parameter references like '$step1.output.targets' to actual values."""
99
+ resolved = {}
100
+ for key, value in parameters.items():
101
+ if isinstance(value, str) and value.startswith("$step"):
102
+ try:
103
+ # Parse reference like "$step1.output.targets"
104
+ parts = value[1:].split(".")
105
+ step_ref = int(parts[0].replace("step", "")) - 1
106
+ if 0 <= step_ref < len(previous_results):
107
+ result = previous_results[step_ref]
108
+ obj = result
109
+ for part in parts[1:]:
110
+ if isinstance(obj, dict):
111
+ obj = obj.get(part, value)
112
+ else:
113
+ obj = value
114
+ break
115
+ resolved[key] = obj
116
+ else:
117
+ resolved[key] = value
118
+ except (ValueError, IndexError):
119
+ resolved[key] = value
120
+ else:
121
+ resolved[key] = value
122
+ return resolved
tcm/agent/planner.py ADDED
@@ -0,0 +1,90 @@
1
+ """
2
+ Research planner: takes user query → calls LLM with tool catalog → returns ordered plan.
3
+ """
4
+
5
+ import json
6
+ import logging
7
+ from tcm.agent.session import Session
8
+ from tcm.tools import registry, ensure_loaded
9
+
10
+ logger = logging.getLogger("tcm.agent.planner")
11
+
12
+ PLANNER_SYSTEM = """You are an expert Traditional Chinese Medicine (TCM) research assistant.
13
+ You have access to a set of computational tools for TCM research.
14
+
15
+ Given a user's research question, create a step-by-step execution plan.
16
+
17
+ RULES:
18
+ 1. Select only the tools needed to answer the question.
19
+ 2. Order steps logically — later steps can depend on earlier results.
20
+ 3. Each step must specify exactly one tool and its parameters.
21
+ 4. Be conservative — only include steps that directly help answer the question.
22
+ 5. If the question is simple and can be answered with 1-2 tools, keep the plan short.
23
+
24
+ OUTPUT FORMAT (strict JSON):
25
+ {
26
+ "reasoning": "Brief explanation of your approach",
27
+ "steps": [
28
+ {
29
+ "step": 1,
30
+ "tool": "category.tool_name",
31
+ "parameters": {"param1": "value1"},
32
+ "purpose": "Why this step is needed"
33
+ }
34
+ ]
35
+ }
36
+
37
+ AVAILABLE TOOLS:
38
+ """
39
+
40
+
41
+ def create_plan(session: Session, query: str, mention_context: str = "") -> dict:
42
+ """Create a research plan for the given query.
43
+
44
+ Returns:
45
+ dict with 'reasoning' and 'steps' keys.
46
+ """
47
+ ensure_loaded()
48
+
49
+ # Build tool catalog for the planner
50
+ suppressed = session.tool_health_suppressed_tools()
51
+ tool_catalog = registry.tool_descriptions_for_llm(
52
+ exclude_tools=suppressed,
53
+ )
54
+
55
+ system_prompt = PLANNER_SYSTEM + tool_catalog
56
+
57
+ if mention_context:
58
+ system_prompt += f"\n\nUSER CONTEXT:\n{mention_context}"
59
+
60
+ messages = [{"role": "user", "content": query}]
61
+
62
+ llm = session.get_llm()
63
+ response = llm.chat(
64
+ system=system_prompt,
65
+ messages=messages,
66
+ temperature=0.1,
67
+ max_tokens=2048,
68
+ )
69
+
70
+ # Parse the plan from LLM response
71
+ try:
72
+ # Try to extract JSON from the response
73
+ content = response.content.strip()
74
+ # Handle markdown code blocks
75
+ if "```json" in content:
76
+ content = content.split("```json")[1].split("```")[0].strip()
77
+ elif "```" in content:
78
+ content = content.split("```")[1].split("```")[0].strip()
79
+
80
+ plan = json.loads(content)
81
+ if "steps" not in plan:
82
+ plan = {"reasoning": "Direct response", "steps": []}
83
+ return plan
84
+ except (json.JSONDecodeError, IndexError):
85
+ logger.warning("Failed to parse plan JSON, returning raw response")
86
+ return {
87
+ "reasoning": response.content,
88
+ "steps": [],
89
+ "raw_response": True,
90
+ }
tcm/agent/session.py ADDED
@@ -0,0 +1,129 @@
1
+ """
2
+ Session management: holds config, LLM clients, and shared state for a tcm session.
3
+ """
4
+
5
+ import time
6
+ from pathlib import Path
7
+ from rich.console import Console
8
+
9
+ from tcm.agent.config import Config
10
+
11
+
12
+ class Session:
13
+ """Manages state for a tcm research session."""
14
+
15
+ def __init__(self, config: Config = None, verbose: bool = False, mode: str = "batch"):
16
+ self.config = config or Config.load()
17
+ self.verbose = verbose
18
+ self.mode = mode # "interactive" or "batch"
19
+ self.console = Console()
20
+ self._llm = None
21
+ self._scratchpad = []
22
+ self._tool_health_failures: dict[str, list[float]] = {}
23
+ self._tool_health_suppressed_until: dict[str, float] = {}
24
+
25
+ def get_llm(self):
26
+ """Get or create the LLM client based on config."""
27
+ if self._llm is None:
28
+ self._llm = self._create_llm()
29
+ return self._llm
30
+
31
+ def _create_llm(self):
32
+ """Create LLM client from config."""
33
+ from tcm.models.llm import LLMClient
34
+
35
+ provider = self.config.get("llm.provider", "anthropic")
36
+ model = self.config.get("llm.model", None)
37
+ api_key = self.config.llm_api_key(provider)
38
+
39
+ return LLMClient(
40
+ provider=provider,
41
+ model=model,
42
+ api_key=api_key,
43
+ )
44
+
45
+ def set_model(self, model: str, provider: str = None):
46
+ """Switch the LLM model mid-session.
47
+
48
+ Provider is auto-detected from the model name if not given.
49
+ """
50
+ if provider:
51
+ self.config.set("llm.provider", provider)
52
+ # Config.set("llm.model", ...) auto-detects provider
53
+ self.config.set("llm.model", model)
54
+ self._llm = None
55
+
56
+ @property
57
+ def current_model(self) -> str:
58
+ """Return the current model name."""
59
+ if self._llm:
60
+ return self._llm.model
61
+ return self.config.get("llm.model") or "claude-sonnet-4-5-20250929"
62
+
63
+ def log(self, message: str):
64
+ """Log to scratchpad."""
65
+ self._scratchpad.append(message)
66
+ if self.verbose:
67
+ self.console.print(f" [dim]{message}[/dim]")
68
+
69
+ def save_scratchpad(self, path: Path):
70
+ """Save scratchpad to file for debugging."""
71
+ path.parent.mkdir(parents=True, exist_ok=True)
72
+ path.write_text("\n".join(self._scratchpad))
73
+
74
+ # --- Runtime tool-health tracking ---
75
+
76
+ def _tool_health_enabled(self) -> bool:
77
+ return bool(self.config.get("agent.tool_health_enabled", True))
78
+
79
+ def _is_transient_tool_error(self, error_text: str) -> bool:
80
+ text = str(error_text or "").lower()
81
+ transient_markers = (
82
+ "timeout", "timed out", "connection", "dns",
83
+ "service unavailable", "rate limit", "429",
84
+ "500", "502", "503", "504",
85
+ )
86
+ return any(marker in text for marker in transient_markers)
87
+
88
+ def record_tool_success(self, tool_name: str):
89
+ """Clear runtime failure pressure after a successful execution."""
90
+ if not tool_name:
91
+ return
92
+ self._tool_health_failures.pop(tool_name, None)
93
+ self._tool_health_suppressed_until.pop(tool_name, None)
94
+
95
+ def record_tool_failure(self, tool_name: str, error_text: str = ""):
96
+ """Record transient tool failures and suppress flaky tools temporarily."""
97
+ if not self._tool_health_enabled() or not tool_name:
98
+ return
99
+ if not self._is_transient_tool_error(error_text):
100
+ return
101
+
102
+ now = time.time()
103
+ window_s = max(60, int(self.config.get("agent.tool_health_failure_window_s", 1800)))
104
+ threshold = max(1, int(self.config.get("agent.tool_health_fail_threshold", 2)))
105
+ suppress_s = max(60, int(self.config.get("agent.tool_health_suppress_seconds", 900)))
106
+
107
+ history = [t for t in self._tool_health_failures.get(tool_name, []) if now - t <= window_s]
108
+ history.append(now)
109
+ self._tool_health_failures[tool_name] = history
110
+
111
+ if len(history) >= threshold:
112
+ self._tool_health_suppressed_until[tool_name] = now + suppress_s
113
+
114
+ def tool_health_suppressed_tools(self) -> set[str]:
115
+ """Return tools currently suppressed due to repeated transient failures."""
116
+ if not self._tool_health_enabled():
117
+ return set()
118
+ now = time.time()
119
+ suppressed = set()
120
+ expired = []
121
+ for name, until in self._tool_health_suppressed_until.items():
122
+ if now < until:
123
+ suppressed.add(name)
124
+ else:
125
+ expired.append(name)
126
+ for name in expired:
127
+ self._tool_health_suppressed_until.pop(name, None)
128
+ self._tool_health_failures.pop(name, None)
129
+ return suppressed