agentops-cockpit 0.9.7__py3-none-any.whl → 0.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. agent_ops_cockpit/agent.py +43 -81
  2. agent_ops_cockpit/cache/semantic_cache.py +10 -21
  3. agent_ops_cockpit/cli/main.py +105 -153
  4. agent_ops_cockpit/eval/load_test.py +33 -50
  5. agent_ops_cockpit/eval/quality_climber.py +88 -93
  6. agent_ops_cockpit/eval/red_team.py +54 -21
  7. agent_ops_cockpit/mcp_server.py +26 -93
  8. agent_ops_cockpit/ops/arch_review.py +221 -148
  9. agent_ops_cockpit/ops/auditors/base.py +50 -0
  10. agent_ops_cockpit/ops/auditors/behavioral.py +31 -0
  11. agent_ops_cockpit/ops/auditors/compliance.py +35 -0
  12. agent_ops_cockpit/ops/auditors/dependency.py +48 -0
  13. agent_ops_cockpit/ops/auditors/finops.py +48 -0
  14. agent_ops_cockpit/ops/auditors/graph.py +49 -0
  15. agent_ops_cockpit/ops/auditors/pivot.py +51 -0
  16. agent_ops_cockpit/ops/auditors/reasoning.py +67 -0
  17. agent_ops_cockpit/ops/auditors/reliability.py +53 -0
  18. agent_ops_cockpit/ops/auditors/security.py +87 -0
  19. agent_ops_cockpit/ops/auditors/sme_v12.py +76 -0
  20. agent_ops_cockpit/ops/auditors/sovereignty.py +74 -0
  21. agent_ops_cockpit/ops/auditors/sre_a2a.py +179 -0
  22. agent_ops_cockpit/ops/benchmarker.py +97 -0
  23. agent_ops_cockpit/ops/cost_optimizer.py +15 -24
  24. agent_ops_cockpit/ops/discovery.py +214 -0
  25. agent_ops_cockpit/ops/evidence_bridge.py +30 -63
  26. agent_ops_cockpit/ops/frameworks.py +124 -1
  27. agent_ops_cockpit/ops/git_portal.py +74 -0
  28. agent_ops_cockpit/ops/mcp_hub.py +19 -42
  29. agent_ops_cockpit/ops/orchestrator.py +477 -277
  30. agent_ops_cockpit/ops/policy_engine.py +38 -38
  31. agent_ops_cockpit/ops/reliability.py +120 -65
  32. agent_ops_cockpit/ops/remediator.py +54 -0
  33. agent_ops_cockpit/ops/secret_scanner.py +34 -22
  34. agent_ops_cockpit/ops/swarm.py +17 -27
  35. agent_ops_cockpit/ops/ui_auditor.py +67 -6
  36. agent_ops_cockpit/ops/watcher.py +41 -70
  37. agent_ops_cockpit/ops/watchlist.json +30 -0
  38. agent_ops_cockpit/optimizer.py +157 -407
  39. agent_ops_cockpit/tests/test_arch_review.py +6 -6
  40. agent_ops_cockpit/tests/test_discovery.py +96 -0
  41. agent_ops_cockpit/tests/test_ops_core.py +56 -0
  42. agent_ops_cockpit/tests/test_orchestrator_fleet.py +73 -0
  43. agent_ops_cockpit/tests/test_persona_architect.py +75 -0
  44. agent_ops_cockpit/tests/test_persona_finops.py +31 -0
  45. agent_ops_cockpit/tests/test_persona_security.py +55 -0
  46. agent_ops_cockpit/tests/test_persona_sre.py +43 -0
  47. agent_ops_cockpit/tests/test_persona_ux.py +42 -0
  48. agent_ops_cockpit/tests/test_quality_climber.py +2 -2
  49. agent_ops_cockpit/tests/test_remediator.py +75 -0
  50. agent_ops_cockpit/tests/test_ui_auditor.py +52 -0
  51. agentops_cockpit-0.9.8.dist-info/METADATA +172 -0
  52. agentops_cockpit-0.9.8.dist-info/RECORD +71 -0
  53. agent_ops_cockpit/tests/test_optimizer.py +0 -68
  54. agent_ops_cockpit/tests/test_red_team.py +0 -35
  55. agent_ops_cockpit/tests/test_secret_scanner.py +0 -24
  56. agentops_cockpit-0.9.7.dist-info/METADATA +0 -246
  57. agentops_cockpit-0.9.7.dist-info/RECORD +0 -47
  58. {agentops_cockpit-0.9.7.dist-info → agentops_cockpit-0.9.8.dist-info}/WHEEL +0 -0
  59. {agentops_cockpit-0.9.7.dist-info → agentops_cockpit-0.9.8.dist-info}/entry_points.txt +0 -0
  60. {agentops_cockpit-0.9.7.dist-info → agentops_cockpit-0.9.8.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,12 @@
1
+ from tenacity import retry, wait_exponential, stop_after_attempt
2
+ from tenacity import retry, wait_exponential, stop_after_attempt
3
+ from tenacity import retry, wait_exponential, stop_after_attempt
4
+ from tenacity import retry, wait_exponential, stop_after_attempt
5
+ from tenacity import retry, wait_exponential, stop_after_attempt
6
+ from tenacity import retry, wait_exponential, stop_after_attempt
7
+ from tenacity import retry, wait_exponential, stop_after_attempt
8
+ from tenacity import retry, wait_exponential, stop_after_attempt
9
+ from tenacity import retry, wait_exponential, stop_after_attempt
1
10
  import json
2
11
  import os
3
12
  import urllib.request
@@ -7,13 +16,11 @@ from typing import Dict, Any, Optional, List
7
16
  import importlib.metadata
8
17
  from packaging import version
9
18
  from rich.console import Console
10
-
11
19
  console = Console()
12
-
13
- WATCHLIST_PATH = os.path.join(os.path.dirname(__file__), "watchlist.json")
20
+ WATCHLIST_PATH = os.path.join(os.path.dirname(__file__), 'watchlist.json')
14
21
 
15
22
  def clean_version(v_str: str) -> str:
16
- match = re.search(r'(\d+\.\d+(?:\.\d+)?(?:[a-zA-Z]+\d+)?)', v_str)
23
+ match = re.search('(\\d+\\.\\d+(?:\\.\\d+)?(?:[a-zA-Z]+\\d+)?)', v_str)
17
24
  if match:
18
25
  return match.group(1)
19
26
  return v_str.strip().lstrip('v')
@@ -25,23 +32,16 @@ def fetch_latest_from_atom(url: str) -> Optional[Dict[str, str]]:
25
32
  tree = ET.parse(response)
26
33
  root = tree.getroot()
27
34
  ns = {'ns': 'http://www.w3.org/2005/Atom'}
28
-
29
35
  latest_entry = root.find('ns:entry', ns)
30
36
  if latest_entry is not None:
31
37
  title = latest_entry.find('ns:title', ns).text
32
38
  updated = latest_entry.find('ns:updated', ns).text
33
39
  content_node = latest_entry.find('ns:content', ns)
34
- summary = ""
40
+ summary = ''
35
41
  if content_node is not None:
36
- summary = re.sub('<[^<]+?>', '', content_node.text or "")[:500] + "..."
37
-
42
+ summary = re.sub('<[^<]+?>', '', content_node.text or '')[:500] + '...'
38
43
  raw_v = title.strip().split()[-1]
39
- return {
40
- "version": clean_version(raw_v) if "==" not in raw_v else clean_version(raw_v.split("==")[-1]),
41
- "date": updated,
42
- "title": title,
43
- "summary": summary
44
- }
44
+ return {'version': clean_version(raw_v) if '==' not in raw_v else clean_version(raw_v.split('==')[-1]), 'date': updated, 'title': title, 'summary': summary}
45
45
  except Exception:
46
46
  return None
47
47
  return None
@@ -50,83 +50,50 @@ def get_installed_version(package_name: str) -> str:
50
50
  try:
51
51
  return importlib.metadata.version(package_name)
52
52
  except importlib.metadata.PackageNotFoundError:
53
- return "Not Installed"
53
+ return 'Not Installed'
54
54
 
55
55
  def get_package_evidence(package_name: str) -> Dict[str, Any]:
56
56
  if not os.path.exists(WATCHLIST_PATH):
57
- return {"error": "Watchlist not found"}
58
-
57
+ return {'error': 'Watchlist not found'}
59
58
  with open(WATCHLIST_PATH, 'r') as f:
60
59
  watchlist = json.load(f)
61
-
62
- # Flatten categories to find the package
63
60
  for cat_name, cat in watchlist.items():
64
- if cat_name == "compatibility_rules":
61
+ if cat_name == 'compatibility_rules':
65
62
  continue
66
63
  for name, info in cat.items():
67
- if info.get("package") == package_name or name == package_name:
68
- latest = fetch_latest_from_atom(info["feed"])
64
+ if info.get('package') == package_name or name == package_name:
65
+ latest = fetch_latest_from_atom(info['feed'])
69
66
  installed = get_installed_version(package_name)
70
- min_v = info.get("min_version_for_optimizations", "0.0.0")
71
-
67
+ min_v = info.get('min_version_for_optimizations', '0.0.0')
72
68
  upgrade_required = False
73
- if installed != "Not Installed":
69
+ if installed != 'Not Installed':
74
70
  try:
75
71
  if version.parse(installed) < version.parse(min_v):
76
72
  upgrade_required = True
77
73
  except Exception:
78
74
  pass
79
-
80
- return {
81
- "package": package_name,
82
- "installed_version": installed,
83
- "latest_version": latest["version"] if latest else "Unknown",
84
- "min_optimized_version": min_v,
85
- "upgrade_required": upgrade_required,
86
- "release_date": latest["date"] if latest else "Unknown",
87
- "source_url": info["feed"].replace(".atom", ""),
88
- "best_practice_context": latest["summary"] if latest else "Check release notes for performance/security enhancements."
89
- }
90
- return {"error": f"Package {package_name} not found in watchlist"}
75
+ return {'package': package_name, 'installed_version': installed, 'latest_version': latest['version'] if latest else 'Unknown', 'min_optimized_version': min_v, 'upgrade_required': upgrade_required, 'release_date': latest['date'] if latest else 'Unknown', 'source_url': info['feed'].replace('.atom', ''), 'best_practice_context': latest['summary'] if latest else 'Check release notes for performance/security enhancements.'}
76
+ return {'error': f'Package {package_name} not found in watchlist'}
91
77
 
92
78
  def get_compatibility_report(installed_packages: List[str]) -> List[Dict[str, Any]]:
93
79
  if not os.path.exists(WATCHLIST_PATH):
94
80
  return []
95
-
96
81
  with open(WATCHLIST_PATH, 'r') as f:
97
82
  watchlist = json.load(f)
98
-
99
- rules = watchlist.get("compatibility_rules", [])
83
+ rules = watchlist.get('compatibility_rules', [])
100
84
  reports = []
101
-
102
- # Normalize imports to find root package names
103
85
  roots = set()
104
86
  for pkg in installed_packages:
105
87
  roots.add(pkg.split('.')[0].replace('-', '_'))
106
-
107
88
  for rule in rules:
108
- comp_root = rule["component"].replace('-', '_')
89
+ comp_root = rule['component'].replace('-', '_')
109
90
  if comp_root in roots:
110
- # Check for incompatibilities
111
- for forbidden in rule.get("incompatible_with", []):
91
+ for forbidden in rule.get('incompatible_with', []):
112
92
  forbidden_root = forbidden.replace('-', '_')
113
93
  if forbidden_root in roots:
114
- reports.append({
115
- "type": "INCOMPATIBLE",
116
- "component": rule["component"],
117
- "conflict_with": forbidden,
118
- "reason": rule["reason"]
119
- })
120
-
121
- # Check for synergies
122
- for synergy in rule.get("works_well_with", []):
94
+ reports.append({'type': 'INCOMPATIBLE', 'component': rule['component'], 'conflict_with': forbidden, 'reason': rule['reason']})
95
+ for synergy in rule.get('works_well_with', []):
123
96
  synergy_root = synergy.replace('-', '_')
124
97
  if synergy_root in roots:
125
- reports.append({
126
- "type": "SYNERGY",
127
- "component": rule["component"],
128
- "partner": synergy,
129
- "reason": f"Optimally paired with ecosystem partner {synergy}."
130
- })
131
-
132
- return reports
98
+ reports.append({'type': 'SYNERGY', 'component': rule['component'], 'partner': synergy, 'reason': f'Optimally paired with ecosystem partner {synergy}.'})
99
+ return reports
@@ -359,6 +359,84 @@ GO_CHECKLIST = [
359
359
  }
360
360
  ]
361
361
 
362
+ LLAMAINDEX_CHECKLIST = [
363
+ {
364
+ "category": "🏗️ LlamaIndex (Data Intelligence)",
365
+ "checks": [
366
+ ("RAG: Using Advanced Retrieval (Sub-Question, Recursive)?", "Critical for complex reasoning over unstructured data."),
367
+ ("Memory: Is a BaseChatEngine or specialized buffer used?", "Ensures conversational continuity with large retrieval contexts."),
368
+ ("Knowledge Graphs: Is Property Graph Index active?", "Next-gen standard for relational reasoning in agents.")
369
+ ]
370
+ }
371
+ ]
372
+
373
+ VECTORDN_CHECKLIST = [
374
+ {
375
+ "category": "🧠 Vector Native Infrastructure",
376
+ "checks": [
377
+ ("Native Vector: Using Pinecone, Weaviate, or Qdrant?", "Enterprise standard for hybrid search and metadata filtering."),
378
+ ("Local Native: Using ChromaDB or LanceDB?", "Standard for local-first or edge-compute agents."),
379
+ ("Indexing: Are specialized Namespaces or Collections used?", "Ensures tenant isolation and multi-modal query efficiency.")
380
+ ]
381
+ }
382
+ ]
383
+
384
+ OBSERVABILITY_CHECKLIST = [
385
+ {
386
+ "category": "🔬 AgentOps Observability & Evaluation",
387
+ "checks": [
388
+ ("Tracing: Using LangSmith, Arize Phoenix, or Langfuse?", "Deep trace analysis for debugging multi-hop agent failures."),
389
+ ("Evaluation: Are DeepEval or RAGAS active in CI/CD?", "Automated scoring for retrieval and alignment quality."),
390
+ ("Experiments: Is Weights & Biases (W&B) used for track tracking?", "Industry standard for prompt and hyperparameter optimization.")
391
+ ]
392
+ }
393
+ ]
394
+
395
+ INFERENCE_CHECKLIST = [
396
+ {
397
+ "category": "⚡ Specialized Inference Engines",
398
+ "checks": [
399
+ ("Self-Hosting: Is vLLM used for high-throughput serving?", "Private cloud standard for concurrent agent execution."),
400
+ ("Edge / Local: Is Ollama used for local development?", "Prevents token leakage in early-stage reasoning cycles."),
401
+ ("Real-Time: Is Groq LPU used for low-latency voice/chat?", "Critical for maintaining agentic 'conversational flow'."),
402
+ ("Microsrvices: Are NVIDIA NIMs used for VPC-native deployment?", "Standard for containerized model microservices.")
403
+ ]
404
+ }
405
+ ]
406
+
407
+ GUARDRAILS_CHECKLIST = [
408
+ {
409
+ "category": "🛡️ Advanced Guardrail Fences",
410
+ "checks": [
411
+ ("Topical Rails: Is NeMo Guardrails (NVIDIA) active?", "Standard for enforcing domain-specific conversation boundaries."),
412
+ ("Structured Safety: Is Guardrails AI used for JSON validation?", "Ensures structured outputs are both safe and schema-compliant."),
413
+ ("Gateway Defense: Is Lakera Guard active at the ingress?", "Hardened defense against prompt injection at the API level.")
414
+ ]
415
+ }
416
+ ]
417
+
418
+ DSPY_CHECKLIST = [
419
+ {
420
+ "category": "🏗️ Programmatic Optimization (DSPy)",
421
+ "checks": [
422
+ ("Compilation: Are Optimizers (BootstrapFewShot/MIPRO) used?", "Industry standard for replacing manual prompt engineering with compiled logic."),
423
+ ("Signatures: Are declarative DSPy Signatures used for task definition?", "Ensures structured, repeatable reasoning modules."),
424
+ ("Assertions: Are DSPy Assertions/Suggestions active?", "Runtime self-correction for complex agentic logic.")
425
+ ]
426
+ }
427
+ ]
428
+
429
+ SPRING_AI_CHECKLIST = [
430
+ {
431
+ "category": "🏗️ Spring AI (Java Enterprise)",
432
+ "checks": [
433
+ ("Orchestration: Using ChatClient or specialized Advisors?", "Spring-standard for enterprise agent integration."),
434
+ ("Vector Store: Is a Spring-compatible VectorStore (PgVector/Pinecone) active?", "Ensures type-safe data retrieval in Java stacks."),
435
+ ("Observability: Is Spring Boot Actuator/Micrometer integrated?", "Critical for enterprise-grade agent monitoring and tracing.")
436
+ ]
437
+ }
438
+ ]
439
+
362
440
 
363
441
  FRAMEWORKS = {
364
442
  "google": {
@@ -439,8 +517,42 @@ FRAMEWORKS = {
439
517
  "checklist": CREWAI_CHECKLIST,
440
518
  "indicators": [r"crewai", r"Agent\(", r"Task\(", r"Crew\("]
441
519
  },
520
+ "llamaindex": {
521
+ "name": "LlamaIndex / Data Agents",
522
+ "checklist": LLAMAINDEX_CHECKLIST,
523
+ "indicators": [r"llamaindex", r"llama-index", r"VectorStoreIndex", r"KnowledgeGraphIndex"]
524
+ },
525
+ "vectordb": {
526
+ "name": "Vector Native (Pinecone/Weaviate/Chroma)",
527
+ "checklist": VECTORDN_CHECKLIST,
528
+ "indicators": [r"pinecone", r"weaviate", r"qdrant", r"chromadb", r"lancedb", r"milvus"]
529
+ },
530
+ "observability": {
531
+ "name": "Observability (LangSmith/Phoenix)",
532
+ "checklist": OBSERVABILITY_CHECKLIST,
533
+ "indicators": [r"langsmith", r"arize", r"phoenix", r"langfuse", r"deepeval", r"ragas", r"wandb"]
534
+ },
535
+ "inference": {
536
+ "name": "Inference Engines (vLLM/Groq)",
537
+ "checklist": INFERENCE_CHECKLIST,
538
+ "indicators": [r"vllm", r"ollama", r"groq", r"nvidia-nim", r"tgi"]
539
+ },
540
+ "guardrails": {
541
+ "name": "Guardrail Fences (NeMo/Lakera)",
542
+ "checklist": GUARDRAILS_CHECKLIST,
543
+ "indicators": [r"nemoguardrails", r"guardrails-ai", r"lakera"]
544
+ },
545
+ "dspy": {
546
+ "name": "DSPy / Programmatic Optimization",
547
+ "checklist": DSPY_CHECKLIST,
548
+ "indicators": [r"dspy", r"BootstrapFewShot", r"MIPRO", r"Signature"]
549
+ },
550
+ "springai": {
551
+ "name": "Spring AI (Java)",
552
+ "checklist": SPRING_AI_CHECKLIST,
553
+ "indicators": [r"spring-ai", r"ChatClient", r"org\.springframework\.ai"]
554
+ },
442
555
  "generic": {
443
-
444
556
  "name": "Generic Agentic Stack",
445
557
  "checklist": GENERIC_CHECKLIST,
446
558
  "indicators": []
@@ -448,6 +560,17 @@ FRAMEWORKS = {
448
560
  }
449
561
 
450
562
 
563
+ NIST_AI_RMF_CHECKLIST = [
564
+ {
565
+ "category": "⚖️ NIST AI RMF (Governance)",
566
+ "checks": [
567
+ ("Transparency: Is the agent's purpose and limitation documented?", "Ensures users know what the AI can/cannot do."),
568
+ ("Human-in-the-Loop: Are sensitive decisions manually reviewed?", "Mitigates high-risk autonomous failures."),
569
+ ("Traceability: Is every agent reasoning step logged?", "Required for forensic audit and accountability.")
570
+ ]
571
+ }
572
+ ]
573
+
451
574
  def detect_framework(path: str = ".") -> str:
452
575
  """ Detects the framework based on README or requirements.txt files. """
453
576
  content = ""
@@ -0,0 +1,74 @@
1
+ import os
2
+ import git
3
+ from typing import List
4
+ from rich.console import Console
5
+
6
+ console = Console()
7
+
8
+ class GitPortal:
9
+ """
10
+ Phase 5: The 'Ambassador' - Autonomous Git & PR Portal.
11
+ Handles branch creation, committing fixes, and preparing for PR submission.
12
+ """
13
+ def __init__(self, repo_path: str = "."):
14
+ self.repo_path = repo_path
15
+ try:
16
+ self.repo = git.Repo(repo_path)
17
+ except Exception as e:
18
+ self.repo = None
19
+ console.print(f"[red]❌ Git initialization failed: {e}[/red]")
20
+
21
+ def create_fix_branch(self, branch_name: str):
22
+ """Creates and switches to a new branch for fixes."""
23
+ if not self.repo: return None
24
+
25
+ current = self.repo.active_branch
26
+ console.print(f"🌿 [dim]Creating fix branch: {branch_name} (from {current.name})[/dim]")
27
+
28
+ new_branch = self.repo.create_head(branch_name)
29
+ new_branch.checkout()
30
+ return new_branch
31
+
32
+ def commit_fixes(self, files: List[str], message: str):
33
+ """Stages and commits the remediated files."""
34
+ if not self.repo: return False
35
+
36
+ self.repo.index.add(files)
37
+ self.repo.index.commit(message)
38
+ console.print(f"📦 [bold green]Committed fixes to {len(files)} files.[/bold green]")
39
+ return True
40
+
41
+ def push_fixes(self, branch_name: str):
42
+ """Pushes the branch to the remote."""
43
+ if not self.repo: return False
44
+
45
+ try:
46
+ origin = self.repo.remote(name='origin')
47
+ console.print(f"🚀 [cyan]Pushing {branch_name} to origin...[/cyan]")
48
+ origin.push(branch_name)
49
+ return True
50
+ except Exception as e:
51
+ console.print(f"[yellow]⚠️ Push failed (Remote likely not configured): {e}[/yellow]")
52
+ return False
53
+
54
+ def get_pr_body(self, findings_count: int, score_improvement: int):
55
+ """Generates the body text for a GitHub PR."""
56
+ return f"""
57
+ # 🕹️ AgentOps Cockpit: Autonomous Room Fixest
58
+ **Status**: AUTO_REMEDIATION_COMPLETE
59
+ **Maturity Score Improvement**: +{score_improvement}%
60
+
61
+ ## 🛠️ Summary of Remediations
62
+ This PR was automatically generated by the AgentOps Cockpit (v1.1) to resolve **{findings_count} architectural gaps**.
63
+
64
+ ### 🧗 Resiliency Hardening
65
+ - Injected `tenacity` retry decorators on volatile network calls.
66
+ - Applied `timeout` guards to async tool executions to prevent Zombie processes.
67
+
68
+ ### 🛡️ Security Posture
69
+ - Resolved hardcoded credential risks via AST detection.
70
+ - Fixed behavioral PII leaks identified in runtime traces.
71
+
72
+ ---
73
+ *Generated autonomously by the AgentOps Cockpit 'The Closer' Engine.*
74
+ """
@@ -1,9 +1,15 @@
1
+ from tenacity import retry, wait_exponential, stop_after_attempt
1
2
  from typing import List, Dict, Any
2
3
  import asyncio
3
4
  import os
5
+ import logging
4
6
  from mcp import ClientSession, StdioServerParameters
5
7
  from mcp.client.stdio import stdio_client
6
8
 
9
+ # v1.1 Compliance: Transit Logging (SOC2 CC6.1)
10
+ logging.basicConfig(level=logging.INFO)
11
+ logger = logging.getLogger("mcp_hub")
12
+
7
13
  class MCPHub:
8
14
  """
9
15
  Model Context Protocol (MCP) Hub.
@@ -13,67 +19,38 @@ class MCPHub:
13
19
 
14
20
  def __init__(self):
15
21
  self.servers: Dict[str, StdioServerParameters] = {}
16
- self.registry = {
17
- "search": {"type": "mcp", "provider": "google-search", "server": "google-search-mcp"},
18
- "db": {"type": "mcp", "provider": "alloydb", "server": "postgres-mcp"},
19
- "legacy_crm": {"type": "rest", "provider": "internal", "status": "deprecated"}
20
- }
22
+ self.registry = {'search': {'type': 'mcp', 'provider': 'google-search', 'server': 'google-search-mcp'}, 'db': {'type': 'mcp', 'provider': 'alloydb', 'server': 'postgres-mcp'}, 'legacy_crm': {'type': 'rest', 'provider': 'internal', 'status': 'deprecated'}}
21
23
 
22
- def register_server(self, name: str, command: str, args: List[str] = None):
24
+ def register_server(self, name: str, command: str, args: List[str]=None):
23
25
  """Registers a local MCP server."""
24
- self.servers[name] = StdioServerParameters(
25
- command=command,
26
- args=args or [],
27
- env=os.environ.copy()
28
- )
26
+ self.servers[name] = StdioServerParameters(command=command, args=args or [], env=os.environ.copy())
29
27
 
30
28
  async def execute_tool(self, tool_name: str, arguments: Dict[str, Any]):
31
29
  """
32
30
  Executes a tool call using the Model Context Protocol.
33
31
  """
32
+ logger.info(f"TRANSIT_LOG: Executing tool '{tool_name}' with {len(arguments)} args.")
34
33
  if tool_name not in self.registry:
35
- raise ValueError(f"Tool {tool_name} not found in MCP registry.")
36
-
34
+ raise ValueError(f'Tool {tool_name} not found in MCP registry.')
37
35
  config = self.registry[tool_name]
38
-
39
- # If it's a legacy tool, handle it separately
40
- if config["type"] == "rest":
41
- print(f"⚠️ Executing legacy REST tool: {tool_name}")
36
+ if config['type'] == 'rest':
37
+ print(f'⚠️ Executing legacy REST tool: {tool_name}')
42
38
  return await self._mock_legacy_exec(tool_name, arguments)
43
-
44
- server_name = config.get("server")
39
+ server_name = config.get('server')
45
40
  if not server_name or server_name not in self.servers:
46
- # Fallback to mock for demo/unconfigured environments
47
41
  print(f"ℹ️ MCP Server '{server_name}' not configured. Running in simulated mode.")
48
42
  return await self._mock_mcp_exec(tool_name, arguments)
49
-
50
- # Real MCP Protocol Execution
51
43
  async with stdio_client(self.servers[server_name]) as (read, write):
52
44
  async with ClientSession(read, write) as session:
53
45
  await session.initialize()
54
- result = await session.call_tool(tool_name, arguments)
55
- return {
56
- "result": result.content,
57
- "protocol": "mcp-v1",
58
- "server": server_name
59
- }
46
+ result = await session.call_tool(tool_name, arguments, timeout=10)
47
+ return {'result': result.content, 'protocol': 'mcp-v1', 'server': server_name}
60
48
 
61
49
  async def _mock_mcp_exec(self, tool_name: str, args: Dict[str, Any]):
62
50
  await asyncio.sleep(0.2)
63
- return {
64
- "result": f"Simulated MCP response for {tool_name}",
65
- "protocol": "mcp-virtual",
66
- "assurance": 0.95
67
- }
51
+ return {'result': f'Simulated MCP response for {tool_name}', 'protocol': 'mcp-virtual', 'assurance': 0.95}
68
52
 
69
53
  async def _mock_legacy_exec(self, tool_name: str, args: Dict[str, Any]):
70
54
  await asyncio.sleep(0.5)
71
- return {
72
- "result": f"Legacy response for {tool_name}",
73
- "protocol": "rest-legacy",
74
- "warning": "MIGRATE_TO_MCP"
75
- }
76
-
77
- global_mcp_hub = MCPHub()
78
- # Example registration (commented out as it requires local binaries)
79
- # global_mcp_hub.register_server("google-search-mcp", "npx", ["-y", "@modelcontextprotocol/server-google-search"])
55
+ return {'result': f'Legacy response for {tool_name}', 'protocol': 'rest-legacy', 'warning': 'MIGRATE_TO_MCP'}
56
+ global_mcp_hub = MCPHub()