contextagent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. agentz/agent/base.py +262 -0
  2. agentz/artifacts/__init__.py +5 -0
  3. agentz/artifacts/artifact_writer.py +538 -0
  4. agentz/artifacts/reporter.py +235 -0
  5. agentz/artifacts/terminal_writer.py +100 -0
  6. agentz/context/__init__.py +6 -0
  7. agentz/context/context.py +91 -0
  8. agentz/context/conversation.py +205 -0
  9. agentz/context/data_store.py +208 -0
  10. agentz/llm/llm_setup.py +156 -0
  11. agentz/mcp/manager.py +142 -0
  12. agentz/mcp/patches.py +88 -0
  13. agentz/mcp/servers/chrome_devtools/server.py +14 -0
  14. agentz/profiles/base.py +108 -0
  15. agentz/profiles/data/data_analysis.py +38 -0
  16. agentz/profiles/data/data_loader.py +35 -0
  17. agentz/profiles/data/evaluation.py +43 -0
  18. agentz/profiles/data/model_training.py +47 -0
  19. agentz/profiles/data/preprocessing.py +47 -0
  20. agentz/profiles/data/visualization.py +47 -0
  21. agentz/profiles/manager/evaluate.py +51 -0
  22. agentz/profiles/manager/memory.py +62 -0
  23. agentz/profiles/manager/observe.py +48 -0
  24. agentz/profiles/manager/routing.py +66 -0
  25. agentz/profiles/manager/writer.py +51 -0
  26. agentz/profiles/mcp/browser.py +21 -0
  27. agentz/profiles/mcp/chrome.py +21 -0
  28. agentz/profiles/mcp/notion.py +21 -0
  29. agentz/runner/__init__.py +74 -0
  30. agentz/runner/base.py +28 -0
  31. agentz/runner/executor.py +320 -0
  32. agentz/runner/hooks.py +110 -0
  33. agentz/runner/iteration.py +142 -0
  34. agentz/runner/patterns.py +215 -0
  35. agentz/runner/tracker.py +188 -0
  36. agentz/runner/utils.py +45 -0
  37. agentz/runner/workflow.py +250 -0
  38. agentz/tools/__init__.py +20 -0
  39. agentz/tools/data_tools/__init__.py +17 -0
  40. agentz/tools/data_tools/data_analysis.py +152 -0
  41. agentz/tools/data_tools/data_loading.py +92 -0
  42. agentz/tools/data_tools/evaluation.py +175 -0
  43. agentz/tools/data_tools/helpers.py +120 -0
  44. agentz/tools/data_tools/model_training.py +192 -0
  45. agentz/tools/data_tools/preprocessing.py +229 -0
  46. agentz/tools/data_tools/visualization.py +281 -0
  47. agentz/utils/__init__.py +69 -0
  48. agentz/utils/config.py +708 -0
  49. agentz/utils/helpers.py +10 -0
  50. agentz/utils/parsers.py +142 -0
  51. agentz/utils/printer.py +539 -0
  52. contextagent-0.1.0.dist-info/METADATA +269 -0
  53. contextagent-0.1.0.dist-info/RECORD +66 -0
  54. contextagent-0.1.0.dist-info/WHEEL +5 -0
  55. contextagent-0.1.0.dist-info/licenses/LICENSE +21 -0
  56. contextagent-0.1.0.dist-info/top_level.txt +2 -0
  57. pipelines/base.py +972 -0
  58. pipelines/data_scientist.py +97 -0
  59. pipelines/data_scientist_memory.py +151 -0
  60. pipelines/experience_learner.py +0 -0
  61. pipelines/prompt_generator.py +0 -0
  62. pipelines/simple.py +78 -0
  63. pipelines/simple_browser.py +145 -0
  64. pipelines/simple_chrome.py +75 -0
  65. pipelines/simple_notion.py +103 -0
  66. pipelines/tool_builder.py +0 -0
@@ -0,0 +1,208 @@
1
+ """
2
+ Pipeline-scoped data store for sharing in-memory objects between agents.
3
+ Enables efficient data sharing without redundant file I/O.
4
+ """
5
+
6
+ from typing import Any, Dict, Optional, List
7
+ from datetime import datetime
8
+ from dataclasses import dataclass
9
+ import threading
10
+ from loguru import logger
11
+
12
+
13
+ @dataclass
14
+ class DataStoreEntry:
15
+ """Single entry in the pipeline data store with metadata."""
16
+ key: str
17
+ value: Any
18
+ timestamp: datetime
19
+ data_type: str
20
+ metadata: Dict[str, Any]
21
+
22
+ def size_mb(self) -> float:
23
+ """Estimate size in MB (rough approximation)."""
24
+ import sys
25
+ return sys.getsizeof(self.value) / 1024 / 1024
26
+
27
+
28
+ class DataStore:
29
+ """Thread-safe data store for sharing objects within a pipeline run.
30
+
31
+ Designed for storing:
32
+ - DataFrames loaded from files
33
+ - Trained models
34
+ - Preprocessed data
35
+ - Intermediate computation results
36
+
37
+ Key naming conventions:
38
+ - DataFrames: f"dataframe:{file_path}"
39
+ - Models: f"model:{model_name}"
40
+ - Preprocessed data: f"preprocessed:{file_path}"
41
+ - Custom: any string key
42
+ """
43
+
44
+ def __init__(self, experiment_id: Optional[str] = None):
45
+ """Initialize the data store.
46
+
47
+ Args:
48
+ experiment_id: Optional experiment ID for tracking
49
+ """
50
+ self._store: Dict[str, DataStoreEntry] = {}
51
+ self._lock = threading.RLock()
52
+ self.experiment_id = experiment_id
53
+ logger.debug(f"Initialized DataStore for experiment: {experiment_id}")
54
+
55
+ def set(
56
+ self,
57
+ key: str,
58
+ value: Any,
59
+ data_type: Optional[str] = None,
60
+ metadata: Optional[Dict[str, Any]] = None
61
+ ) -> None:
62
+ """Store a value in the data store.
63
+
64
+ Args:
65
+ key: Unique key for the value
66
+ value: The object to store
67
+ data_type: Optional type descriptor (e.g., 'dataframe', 'model')
68
+ metadata: Optional metadata dict
69
+ """
70
+ with self._lock:
71
+ # Infer data type if not provided
72
+ if data_type is None:
73
+ data_type = type(value).__name__
74
+
75
+ entry = DataStoreEntry(
76
+ key=key,
77
+ value=value,
78
+ timestamp=datetime.now(),
79
+ data_type=data_type,
80
+ metadata=metadata or {}
81
+ )
82
+
83
+ self._store[key] = entry
84
+ logger.debug(
85
+ f"Stored {data_type} at key '{key}' "
86
+ f"(size: {entry.size_mb():.2f} MB)"
87
+ )
88
+
89
+ def get(self, key: str, default: Any = None) -> Any:
90
+ """Retrieve a value from the data store.
91
+
92
+ Args:
93
+ key: Key to retrieve
94
+ default: Default value if key not found
95
+
96
+ Returns:
97
+ The stored value or default
98
+ """
99
+ with self._lock:
100
+ entry = self._store.get(key)
101
+ if entry:
102
+ logger.debug(f"Retrieved {entry.data_type} from key '{key}'")
103
+ return entry.value
104
+ return default
105
+
106
+ def get_entry(self, key: str) -> Optional[DataStoreEntry]:
107
+ """Retrieve full entry with metadata.
108
+
109
+ Args:
110
+ key: Key to retrieve
111
+
112
+ Returns:
113
+ DataStoreEntry or None
114
+ """
115
+ with self._lock:
116
+ return self._store.get(key)
117
+
118
+ def has(self, key: str) -> bool:
119
+ """Check if a key exists in the store.
120
+
121
+ Args:
122
+ key: Key to check
123
+
124
+ Returns:
125
+ True if key exists
126
+ """
127
+ with self._lock:
128
+ return key in self._store
129
+
130
+ def delete(self, key: str) -> bool:
131
+ """Delete a key from the store.
132
+
133
+ Args:
134
+ key: Key to delete
135
+
136
+ Returns:
137
+ True if key was deleted, False if not found
138
+ """
139
+ with self._lock:
140
+ if key in self._store:
141
+ del self._store[key]
142
+ logger.debug(f"Deleted key '{key}' from data store")
143
+ return True
144
+ return False
145
+
146
+ def clear(self) -> None:
147
+ """Clear all data from the store."""
148
+ with self._lock:
149
+ count = len(self._store)
150
+ self._store.clear()
151
+ logger.debug(f"Cleared {count} entries from data store")
152
+
153
+ def list_keys(self, data_type: Optional[str] = None) -> List[str]:
154
+ """List all keys, optionally filtered by data type.
155
+
156
+ Args:
157
+ data_type: Optional filter by data type
158
+
159
+ Returns:
160
+ List of keys
161
+ """
162
+ with self._lock:
163
+ if data_type is None:
164
+ return list(self._store.keys())
165
+ return [
166
+ key for key, entry in self._store.items()
167
+ if entry.data_type == data_type
168
+ ]
169
+
170
+ def get_stats(self) -> Dict[str, Any]:
171
+ """Get statistics about the data store.
172
+
173
+ Returns:
174
+ Dictionary with stats
175
+ """
176
+ with self._lock:
177
+ total_size_mb = sum(entry.size_mb() for entry in self._store.values())
178
+ data_types = {}
179
+ for entry in self._store.values():
180
+ data_types[entry.data_type] = data_types.get(entry.data_type, 0) + 1
181
+
182
+ return {
183
+ "total_entries": len(self._store),
184
+ "total_size_mb": round(total_size_mb, 2),
185
+ "data_types": data_types,
186
+ "experiment_id": self.experiment_id,
187
+ "keys": list(self._store.keys())
188
+ }
189
+
190
+ def __len__(self) -> int:
191
+ """Return number of entries."""
192
+ with self._lock:
193
+ return len(self._store)
194
+
195
+ def __contains__(self, key: str) -> bool:
196
+ """Support 'in' operator."""
197
+ return self.has(key)
198
+
199
+ def __getitem__(self, key: str) -> Any:
200
+ """Support dictionary-style access."""
201
+ value = self.get(key)
202
+ if value is None and key not in self._store:
203
+ raise KeyError(f"Key '{key}' not found in data store")
204
+ return value
205
+
206
+ def __setitem__(self, key: str, value: Any) -> None:
207
+ """Support dictionary-style assignment."""
208
+ self.set(key, value)
@@ -0,0 +1,156 @@
1
+ from typing import Dict, Any, Optional, Union
2
+
3
+ from agents import (
4
+ OpenAIChatCompletionsModel,
5
+ OpenAIResponsesModel,
6
+ ModelSettings,
7
+ )
8
+ from agents.extensions.models.litellm_model import LitellmModel
9
+ from openai import AsyncAzureOpenAI, AsyncOpenAI
10
+
11
+ # Provider configurations - use OpenAIResponsesModel for most providers
12
+ PROVIDER_CONFIGS = {
13
+ "openai": {
14
+ "base_url": "https://api.openai.com/v1",
15
+ "model_class": OpenAIResponsesModel,
16
+ },
17
+ "deepseek": {
18
+ "base_url": "https://api.deepseek.com/v1",
19
+ "model_class": OpenAIResponsesModel,
20
+ },
21
+ "openrouter": {
22
+ "base_url": "https://openrouter.ai/api/v1",
23
+ "model_class": OpenAIResponsesModel,
24
+ },
25
+ "gemini": {
26
+ "base_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
27
+ "model_class": OpenAIChatCompletionsModel,
28
+ "use_litellm": False,
29
+ },
30
+ "anthropic": {
31
+ "base_url": "https://api.anthropic.com/v1/",
32
+ "model_class": OpenAIResponsesModel,
33
+ },
34
+ "perplexity": {
35
+ "base_url": "https://api.perplexity.ai/chat/completions",
36
+ "model_class": OpenAIResponsesModel,
37
+ },
38
+ "huggingface": {
39
+ "base_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
40
+ "model_class": OpenAIResponsesModel,
41
+ },
42
+ "local": {
43
+ "base_url": None, # Will be provided in config
44
+ "model_class": OpenAIChatCompletionsModel,
45
+ "default_api_key": "ollama",
46
+ },
47
+ "azureopenai": {
48
+ "model_class": OpenAIChatCompletionsModel,
49
+ "requires_azure": True,
50
+ },
51
+ "bedrock": {
52
+ "model_class": LitellmModel,
53
+ "use_litellm": True,
54
+ },
55
+ }
56
+
57
+
58
+ class LLMConfig:
59
+ """Direct configuration system - no environment variables."""
60
+
61
+ def __init__(self, config: Dict[str, Any], full_config: Optional[Dict[str, Any]] = None):
62
+ """
63
+ Initialize LLM configuration from direct config.
64
+
65
+ Args:
66
+ config: Dictionary containing:
67
+ - provider: str (e.g., "openai", "gemini", "deepseek")
68
+ - api_key: str
69
+ - model: str (optional, will use defaults)
70
+ - base_url: str (optional for custom endpoints)
71
+ - azure_config: dict (for Azure OpenAI)
72
+ - aws_config: dict (for Bedrock)
73
+ - model_settings: dict (optional, for temperature etc.)
74
+ full_config: Optional full configuration including agent prompts, pipeline settings
75
+ """
76
+ self.provider = config["provider"]
77
+ self.api_key = config["api_key"]
78
+ self.model_name = config.get("model", self._get_default_model())
79
+ self.config = config
80
+ self.full_config = full_config
81
+
82
+ # Validate provider
83
+ if self.provider not in PROVIDER_CONFIGS:
84
+ valid = list(PROVIDER_CONFIGS.keys())
85
+ raise ValueError(f"Invalid provider: {self.provider}. Available: {valid}")
86
+
87
+ # Create main model (used for all purposes - reasoning, main, fast)
88
+ self.main_model = self._create_model()
89
+ self.reasoning_model = self.main_model
90
+ self.fast_model = self.main_model
91
+
92
+ # Model settings from config or defaults
93
+ model_settings_config = self.config.get("model_settings", {})
94
+ self.default_model_settings = ModelSettings(
95
+ temperature=model_settings_config.get("temperature", 0.1)
96
+ )
97
+
98
+ # Set tracing if OpenAI key provided
99
+ if self.provider == "openai" and self.api_key:
100
+ from agents import set_tracing_export_api_key
101
+ set_tracing_export_api_key(self.api_key)
102
+
103
+ def _get_default_model(self) -> str:
104
+ """Get default model for provider."""
105
+ defaults = {
106
+ "openai": "gpt-4.1",
107
+ "gemini": "gemini-2.5-flash",
108
+ "deepseek": "deepseek-chat",
109
+ "anthropic": "claude-3-5-sonnet-20241022",
110
+ "bedrock": "anthropic.claude-3-5-sonnet-20241022-v2:0",
111
+ "perplexity": "llama-3.1-sonar-large-128k-online",
112
+ "openrouter": "meta-llama/llama-3.2-3b-instruct:free",
113
+ }
114
+ return defaults.get(self.provider, "gpt-4.1")
115
+
116
+ def _create_model(self):
117
+ """Create model instance using direct configuration."""
118
+ provider_config = PROVIDER_CONFIGS[self.provider]
119
+ model_class = provider_config["model_class"]
120
+
121
+ if provider_config.get("use_litellm"):
122
+ return model_class(model=self.model_name, api_key=self.api_key, base_url=provider_config.get("base_url"))
123
+
124
+ elif self.provider == "azureopenai":
125
+ azure_config = self.config.get("azure_config", {})
126
+ client = AsyncAzureOpenAI(
127
+ api_key=self.api_key,
128
+ azure_endpoint=azure_config.get("endpoint"),
129
+ azure_deployment=azure_config.get("deployment"),
130
+ api_version=azure_config.get("api_version", "2023-12-01-preview"),
131
+ )
132
+ return model_class(model=self.model_name, openai_client=client)
133
+
134
+ else:
135
+ # Standard OpenAI-compatible providers
136
+ base_url = self.config.get("base_url", provider_config["base_url"])
137
+ api_key = self.api_key or provider_config.get("default_api_key", "key")
138
+
139
+ client = AsyncOpenAI(
140
+ api_key=api_key,
141
+ base_url=base_url,
142
+ )
143
+ return model_class(model=self.model_name, openai_client=client)
144
+
145
+ def get_base_url(model: Union[OpenAIChatCompletionsModel, OpenAIResponsesModel]) -> str:
146
+ """Utility function to get the base URL for a given model"""
147
+ return str(model._client._base_url)
148
+
149
+ def model_supports_json_and_tool_calls(
150
+ model: Union[OpenAIChatCompletionsModel, OpenAIResponsesModel],
151
+ ) -> bool:
152
+ """Utility function to check if a model supports structured output"""
153
+ structured_output_providers = ["openai.com", "anthropic.com"]
154
+ return any(
155
+ provider in get_base_url(model) for provider in structured_output_providers
156
+ )
agentz/mcp/manager.py ADDED
@@ -0,0 +1,142 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from contextlib import AsyncExitStack
5
+ from dataclasses import dataclass
6
+ from typing import Any, Dict, Optional
7
+
8
+ from agents.mcp import MCPServer, MCPServerSse, MCPServerStdio
9
+
10
+
11
+ class MCPConfigurationError(ValueError):
12
+ """Raised when an MCP server configuration is invalid."""
13
+
14
+
15
+ @dataclass(frozen=True)
16
+ class MCPServerSpec:
17
+ """Lightweight specification describing how to build an MCP server."""
18
+
19
+ type: str
20
+ options: Dict[str, Any]
21
+
22
+ def __post_init__(self) -> None:
23
+ if not self.type:
24
+ raise MCPConfigurationError("MCP server configuration requires a 'type'.")
25
+
26
+
27
+ class MCPRegistry:
28
+ """Registry responsible for storing MCP server specifications."""
29
+
30
+ def __init__(self, specs: Optional[Dict[str, MCPServerSpec]] = None) -> None:
31
+ self._specs = specs or {}
32
+
33
+ @classmethod
34
+ def from_config(cls, config: Optional[Mapping[str, Any]]) -> "MCPRegistry":
35
+ """Create a registry from configuration mapping."""
36
+ if config is None:
37
+ return cls()
38
+
39
+ servers_config = config.get("servers", {})
40
+ if servers_config and not isinstance(servers_config, Mapping):
41
+ raise MCPConfigurationError("'servers' must be a mapping of server definitions.")
42
+
43
+ specs: Dict[str, MCPServerSpec] = {}
44
+ for name, server_cfg in (servers_config or {}).items():
45
+ if not isinstance(server_cfg, Mapping):
46
+ raise MCPConfigurationError(f"MCP server '{name}' configuration must be a mapping.")
47
+
48
+ server_type = str(server_cfg.get("type") or server_cfg.get("transport") or "").strip().lower()
49
+ if not server_type:
50
+ raise MCPConfigurationError(f"MCP server '{name}' must define a 'type' or 'transport'.")
51
+
52
+ options = {k: v for k, v in server_cfg.items() if k not in {"type", "transport"}}
53
+ specs[name] = MCPServerSpec(type=server_type, options=options)
54
+
55
+ return cls(specs)
56
+
57
+ def register(self, name: str, spec: MCPServerSpec) -> None:
58
+ """Register (or overwrite) a spec by name."""
59
+ self._specs[name] = spec
60
+
61
+ def get(self, name: str) -> MCPServerSpec:
62
+ try:
63
+ return self._specs[name]
64
+ except KeyError as exc:
65
+ raise MCPConfigurationError(
66
+ f"MCP server '{name}' is not defined; add it to the configuration."
67
+ ) from exc
68
+
69
+ def as_dict(self) -> Dict[str, MCPServerSpec]:
70
+ return dict(self._specs)
71
+
72
+ def contains(self, name: str) -> bool:
73
+ return name in self._specs
74
+
75
+
76
+ SERVER_TYPE_MAP = {
77
+ "stdio": MCPServerStdio,
78
+ "sse": MCPServerSse,
79
+ }
80
+
81
+
82
+ class MCPManagerSession:
83
+ """Async context manager that keeps MCP server connections alive."""
84
+
85
+ def __init__(self, registry: MCPRegistry):
86
+ self._registry = registry
87
+ self._stack = AsyncExitStack()
88
+ self._servers: Dict[str, MCPServer] = {}
89
+
90
+ async def __aenter__(self) -> "MCPManagerSession":
91
+ return self
92
+
93
+ async def __aexit__(self, exc_type, exc, tb) -> None:
94
+ await self._stack.aclose()
95
+ self._servers.clear()
96
+
97
+ async def get_server(self, name: str, overrides: Optional[Mapping[str, Any]] = None) -> MCPServer:
98
+ """Return a connected MCP server instance by name, creating it on demand."""
99
+ if name in self._servers:
100
+ return self._servers[name]
101
+
102
+ spec = self._registry.get(name)
103
+ options = dict(spec.options)
104
+ if overrides:
105
+ options.update(overrides)
106
+
107
+ try:
108
+ server_cls = SERVER_TYPE_MAP[spec.type]
109
+ except KeyError as exc:
110
+ raise MCPConfigurationError(
111
+ f"Unsupported MCP server type '{spec.type}' for '{name}'. "
112
+ f"Supported types: {', '.join(SERVER_TYPE_MAP)}."
113
+ ) from exc
114
+
115
+ server_ctx = server_cls(**options)
116
+ server = await self._stack.enter_async_context(server_ctx)
117
+ self._servers[name] = server
118
+ return server
119
+
120
+
121
+ class MCPManager:
122
+ """Entry point that provides MCP manager sessions."""
123
+
124
+ def __init__(self, registry: MCPRegistry):
125
+ self._registry = registry
126
+
127
+ @classmethod
128
+ def from_config(cls, config: Optional[Mapping[str, Any]]) -> "MCPManager":
129
+ registry = MCPRegistry.from_config(config)
130
+ return cls(registry)
131
+
132
+ def ensure_server(self, name: str, spec: MCPServerSpec) -> None:
133
+ """Add a default server if one isn't already configured."""
134
+ if not self._registry.contains(name):
135
+ self._registry.register(name, spec)
136
+
137
+ def session(self) -> MCPManagerSession:
138
+ """Create a new MCPManagerSession for a pipeline run."""
139
+ return MCPManagerSession(self._registry)
140
+
141
+ def list_servers(self) -> Dict[str, MCPServerSpec]:
142
+ return self._registry.as_dict()
agentz/mcp/patches.py ADDED
@@ -0,0 +1,88 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import shutil
5
+ import subprocess
6
+ from pathlib import Path
7
+ from typing import Iterable
8
+
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ PATCH_MARKER = "__browsermcp_original_close"
14
+ BUGGY_SNIPPET = " server.close = async () => {\n await server.close();"
15
+ PATCHED_SNIPPET = """ const __browsermcp_original_close = server.close.bind(server);
16
+ server.close = async () => {
17
+ if (server.__browsermcp_closing) {
18
+ return;
19
+ }
20
+ server.__browsermcp_closing = true;
21
+ await __browsermcp_original_close();
22
+ """
23
+
24
+
25
+ def _candidate_paths(base_dir: Path) -> Iterable[Path]:
26
+ if not base_dir.exists():
27
+ return []
28
+ return base_dir.glob("**/node_modules/@browsermcp/mcp/dist/index.js")
29
+
30
+
31
+ def _prime_browsermcp_cache() -> bool:
32
+ npx_path = shutil.which("npx")
33
+ if not npx_path:
34
+ logger.warning("Unable to locate `npx`; skipping Browser MCP patch.")
35
+ return False
36
+
37
+ try:
38
+ subprocess.run(
39
+ [
40
+ npx_path,
41
+ "--yes",
42
+ "--package",
43
+ "@browsermcp/mcp@latest",
44
+ "node",
45
+ "-e",
46
+ "process.exit(0)",
47
+ ],
48
+ check=True,
49
+ stdout=subprocess.DEVNULL,
50
+ stderr=subprocess.DEVNULL,
51
+ )
52
+ return True
53
+ except subprocess.CalledProcessError as exc:
54
+ logger.warning("Failed to prime Browser MCP cache via npx: %s", exc)
55
+ return False
56
+
57
+
58
+ def apply_browsermcp_close_patch() -> None:
59
+ """Patch the Browser MCP CLI to avoid recursive `server.close` calls."""
60
+ cache_root = Path.home() / ".npm" / "_npx"
61
+ candidate_paths = list(_candidate_paths(cache_root))
62
+ if not candidate_paths:
63
+ if not _prime_browsermcp_cache():
64
+ return
65
+ candidate_paths = list(_candidate_paths(cache_root))
66
+
67
+ for path in candidate_paths:
68
+ try:
69
+ original = path.read_text(encoding="utf-8")
70
+ except OSError as exc:
71
+ logger.warning("Failed to read %s while applying Browser MCP patch: %s", path, exc)
72
+ continue
73
+
74
+ if PATCH_MARKER in original:
75
+ continue
76
+
77
+ if BUGGY_SNIPPET not in original:
78
+ continue
79
+
80
+ patched = original.replace(BUGGY_SNIPPET, PATCHED_SNIPPET, 1)
81
+ if patched == original:
82
+ continue
83
+
84
+ try:
85
+ path.write_text(patched, encoding="utf-8")
86
+ logger.info("Patched Browser MCP close handler in %s", path)
87
+ except OSError as exc:
88
+ logger.warning("Failed to patch %s: %s", path, exc)
@@ -0,0 +1,14 @@
1
+ import asyncio
2
+ import shutil
3
+
4
+ from agents import Agent, Runner, trace
5
+ from agents.mcp import MCPServer, MCPServerStdio
6
+
7
+
8
+ def ChromeDevToolsMCP():
9
+ server = MCPServerStdio(
10
+ cache_tools_list=True, # Cache the tools list, for demonstration
11
+ params={"command": "npx", "args": ["-y", "chrome-devtools-mcp@latest"]},
12
+ )
13
+ return server
14
+