onetool-mcp 1.0.0rc2__py3-none-any.whl → 1.0.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. onetool/cli.py +2 -0
  2. {onetool_mcp-1.0.0rc2.dist-info → onetool_mcp-1.0.0rc3.dist-info}/METADATA +26 -33
  3. {onetool_mcp-1.0.0rc2.dist-info → onetool_mcp-1.0.0rc3.dist-info}/RECORD +31 -33
  4. ot/config/__init__.py +90 -48
  5. ot/config/global_templates/__init__.py +2 -2
  6. ot/config/global_templates/diagram-templates/api-flow.mmd +33 -33
  7. ot/config/global_templates/diagram-templates/c4-context.puml +30 -30
  8. ot/config/global_templates/diagram-templates/class-diagram.mmd +87 -87
  9. ot/config/global_templates/diagram-templates/feature-mindmap.mmd +70 -70
  10. ot/config/global_templates/diagram-templates/microservices.d2 +81 -81
  11. ot/config/global_templates/diagram-templates/project-gantt.mmd +37 -37
  12. ot/config/global_templates/diagram-templates/state-machine.mmd +42 -42
  13. ot/config/global_templates/diagram.yaml +167 -167
  14. ot/config/global_templates/onetool.yaml +2 -0
  15. ot/config/global_templates/prompts.yaml +102 -102
  16. ot/config/global_templates/security.yaml +1 -4
  17. ot/config/global_templates/servers.yaml +1 -1
  18. ot/config/global_templates/tool_templates/__init__.py +7 -7
  19. ot/config/loader.py +226 -869
  20. ot/config/models.py +735 -0
  21. ot/config/secrets.py +243 -192
  22. ot/executor/tool_loader.py +10 -1
  23. ot/executor/validator.py +11 -1
  24. ot/meta.py +338 -33
  25. ot/prompts.py +228 -218
  26. ot/proxy/manager.py +168 -8
  27. ot/registry/__init__.py +199 -189
  28. ot/config/dynamic.py +0 -121
  29. ot/config/mcp.py +0 -149
  30. ot/config/tool_config.py +0 -125
  31. {onetool_mcp-1.0.0rc2.dist-info → onetool_mcp-1.0.0rc3.dist-info}/WHEEL +0 -0
  32. {onetool_mcp-1.0.0rc2.dist-info → onetool_mcp-1.0.0rc3.dist-info}/entry_points.txt +0 -0
  33. {onetool_mcp-1.0.0rc2.dist-info → onetool_mcp-1.0.0rc3.dist-info}/licenses/LICENSE.txt +0 -0
  34. {onetool_mcp-1.0.0rc2.dist-info → onetool_mcp-1.0.0rc3.dist-info}/licenses/NOTICE.txt +0 -0
ot/prompts.py CHANGED
@@ -1,218 +1,228 @@
1
- """Prompts loader for externalized MCP server instructions.
2
-
3
- Loads prompts from prompts.yaml. File must exist and contain instructions.
4
- """
5
-
6
- from __future__ import annotations
7
-
8
- from pathlib import Path
9
- from typing import Any
10
-
11
- import yaml
12
- from loguru import logger
13
- from pydantic import BaseModel, Field
14
-
15
-
16
- class ToolPrompt(BaseModel):
17
- """Prompt configuration for a specific tool."""
18
-
19
- description: str | None = Field(
20
- default=None, description="Override tool description"
21
- )
22
- examples: list[str] = Field(default_factory=list, description="Usage examples")
23
-
24
-
25
- class PromptsConfig(BaseModel):
26
- """Configuration for MCP server prompts and tool descriptions."""
27
-
28
- instructions: str = Field(
29
- description="Main server instructions shown to the LLM",
30
- )
31
- tools: dict[str, ToolPrompt] = Field(
32
- default_factory=dict,
33
- description="Per-tool prompt overrides",
34
- )
35
- templates: dict[str, str] = Field(
36
- default_factory=dict,
37
- description="Reusable prompt templates with {variable} placeholders",
38
- )
39
- packs: dict[str, str] = Field(
40
- default_factory=dict,
41
- description="Per-pack instructions (e.g., excel, github)",
42
- )
43
-
44
-
45
- class PromptsError(Exception):
46
- """Error loading prompts configuration."""
47
-
48
-
49
- def _get_template_prompts_path() -> Path:
50
- """Get path to prompts.yaml in global_templates (for development/testing)."""
51
- return Path(__file__).parent / "config" / "global_templates" / "prompts.yaml"
52
-
53
-
54
- def load_prompts(prompts_path: Path | str | None = None) -> PromptsConfig:
55
- """Load prompts configuration from YAML file.
56
-
57
- Args:
58
- prompts_path: Path to prompts file. Falls back to global_templates for development.
59
-
60
- Returns:
61
- PromptsConfig with loaded prompts.
62
-
63
- Raises:
64
- PromptsError: If file is invalid or has no instructions.
65
- """
66
- if prompts_path is not None:
67
- prompts_path = Path(prompts_path)
68
- if not prompts_path.exists():
69
- raise PromptsError(f"Prompts file not found: {prompts_path}")
70
- else:
71
- # Try config/prompts.yaml, fall back to global_templates for development
72
- prompts_path = Path("config/prompts.yaml")
73
- if not prompts_path.exists():
74
- prompts_path = _get_template_prompts_path()
75
-
76
- logger.debug(f"Loading prompts from {prompts_path}")
77
-
78
- try:
79
- with prompts_path.open() as f:
80
- raw_data = yaml.safe_load(f)
81
- except yaml.YAMLError as e:
82
- raise PromptsError(f"Invalid YAML in {prompts_path}: {e}") from e
83
- except OSError as e:
84
- raise PromptsError(f"Error reading {prompts_path}: {e}") from e
85
-
86
- if raw_data is None or not isinstance(raw_data, dict):
87
- raise PromptsError(f"Empty or invalid prompts file: {prompts_path}")
88
-
89
- # Handle nested 'prompts:' key (used in template files)
90
- if "prompts" in raw_data and isinstance(raw_data["prompts"], dict):
91
- raw_data = raw_data["prompts"]
92
-
93
- if "instructions" not in raw_data or not raw_data["instructions"]:
94
- raise PromptsError(f"Missing 'instructions' in {prompts_path}")
95
-
96
- try:
97
- return PromptsConfig.model_validate(raw_data)
98
- except Exception as e:
99
- raise PromptsError(f"Invalid prompts configuration: {e}") from e
100
-
101
-
102
- def render_template(
103
- config: PromptsConfig, template_name: str, **kwargs: Any
104
- ) -> str | None:
105
- """Render a prompt template with variable substitution.
106
-
107
- Args:
108
- config: PromptsConfig with templates
109
- template_name: Name of the template to render
110
- **kwargs: Variables to substitute in the template
111
-
112
- Returns:
113
- Rendered template string, or None if template not found.
114
- """
115
- template = config.templates.get(template_name)
116
- if template is None:
117
- return None
118
-
119
- try:
120
- return template.format(**kwargs)
121
- except KeyError as e:
122
- logger.warning(f"Missing template variable: {e}")
123
- return None
124
-
125
-
126
- def get_tool_description(
127
- config: PromptsConfig, tool_name: str, default: str = ""
128
- ) -> str:
129
- """Get tool description from prompts config with fallback to docstring.
130
-
131
- Args:
132
- config: PromptsConfig with tool prompts
133
- tool_name: Name of the tool
134
- default: Default description if not in config (typically from docstring)
135
-
136
- Returns:
137
- Tool description string.
138
- """
139
- tool_prompt = config.tools.get(tool_name)
140
- if tool_prompt and tool_prompt.description:
141
- return tool_prompt.description
142
- return default
143
-
144
-
145
- def get_tool_examples(config: PromptsConfig, tool_name: str) -> list[str]:
146
- """Get usage examples for a tool.
147
-
148
- Args:
149
- config: PromptsConfig with tool prompts
150
- tool_name: Name of the tool
151
-
152
- Returns:
153
- List of example strings.
154
- """
155
- tool_prompt = config.tools.get(tool_name)
156
- if tool_prompt:
157
- return tool_prompt.examples
158
- return []
159
-
160
-
161
- def get_pack_instructions(config: PromptsConfig, pack: str) -> str | None:
162
- """Get instructions for a pack from prompts config.
163
-
164
- Args:
165
- config: PromptsConfig with pack instructions
166
- pack: Name of the pack (e.g., "excel", "github")
167
-
168
- Returns:
169
- Pack instructions string, or None if not configured.
170
- """
171
- return config.packs.get(pack)
172
-
173
-
174
- # Global prompts instance
175
- _prompts: PromptsConfig | None = None
176
-
177
-
178
- def get_prompts(
179
- prompts_path: Path | str | None = None,
180
- inline_prompts: dict[str, Any] | None = None,
181
- reload: bool = False,
182
- ) -> PromptsConfig:
183
- """Get or load the global prompts configuration.
184
-
185
- Prompts are loaded with the following priority:
186
- 1. Inline prompts (if provided)
187
- 2. prompts_file (from config or explicit path)
188
-
189
- Args:
190
- prompts_path: Path to prompts file (only used on first load)
191
- inline_prompts: Inline prompts dict from config (overrides file)
192
- reload: Force reload configuration
193
-
194
- Returns:
195
- PromptsConfig instance
196
-
197
- Raises:
198
- PromptsError: If prompts cannot be loaded.
199
- """
200
- global _prompts
201
-
202
- if _prompts is None or reload:
203
- if inline_prompts is not None:
204
- # Use inline prompts from config
205
- if (
206
- "instructions" not in inline_prompts
207
- or not inline_prompts["instructions"]
208
- ):
209
- raise PromptsError("Missing 'instructions' in inline prompts")
210
- try:
211
- _prompts = PromptsConfig.model_validate(inline_prompts)
212
- logger.debug("Using inline prompts from config")
213
- except Exception as e:
214
- raise PromptsError(f"Invalid inline prompts: {e}") from e
215
- else:
216
- _prompts = load_prompts(prompts_path)
217
-
218
- return _prompts
1
+ """Prompts loader for externalized MCP server instructions.
2
+
3
+ Loads prompts from prompts.yaml. File must exist and contain instructions.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from pathlib import Path
9
+ from typing import Any
10
+
11
+ import yaml
12
+ from loguru import logger
13
+ from pydantic import BaseModel, Field
14
+
15
+
16
+ class ToolPrompt(BaseModel):
17
+ """Prompt configuration for a specific tool."""
18
+
19
+ description: str | None = Field(
20
+ default=None, description="Override tool description"
21
+ )
22
+ examples: list[str] = Field(default_factory=list, description="Usage examples")
23
+
24
+
25
+ class PromptsConfig(BaseModel):
26
+ """Configuration for MCP server prompts and tool descriptions."""
27
+
28
+ instructions: str = Field(
29
+ description="Main server instructions shown to the LLM",
30
+ )
31
+ tools: dict[str, ToolPrompt] = Field(
32
+ default_factory=dict,
33
+ description="Per-tool prompt overrides",
34
+ )
35
+ templates: dict[str, str] = Field(
36
+ default_factory=dict,
37
+ description="Reusable prompt templates with {variable} placeholders",
38
+ )
39
+ packs: dict[str, str] = Field(
40
+ default_factory=dict,
41
+ description="Per-pack instructions (e.g., excel, github)",
42
+ )
43
+
44
+
45
+ class PromptsError(Exception):
46
+ """Error loading prompts configuration."""
47
+
48
+
49
+ def _get_template_prompts_path() -> Path:
50
+ """Get path to prompts.yaml in global_templates (for development/testing)."""
51
+ return Path(__file__).parent / "config" / "global_templates" / "prompts.yaml"
52
+
53
+
54
+ def load_prompts(prompts_path: Path | str | None = None) -> PromptsConfig:
55
+ """Load prompts configuration from YAML file.
56
+
57
+ Args:
58
+ prompts_path: Path to prompts file. Falls back to global_templates for development.
59
+
60
+ Returns:
61
+ PromptsConfig with loaded prompts.
62
+
63
+ Raises:
64
+ PromptsError: If file is invalid or has no instructions.
65
+ """
66
+ if prompts_path is not None:
67
+ prompts_path = Path(prompts_path)
68
+ if not prompts_path.exists():
69
+ raise PromptsError(f"Prompts file not found: {prompts_path}")
70
+ else:
71
+ # Try config/prompts.yaml, fall back to global_templates for development
72
+ prompts_path = Path("config/prompts.yaml")
73
+ if not prompts_path.exists():
74
+ prompts_path = _get_template_prompts_path()
75
+
76
+ logger.debug(f"Loading prompts from {prompts_path}")
77
+
78
+ try:
79
+ with prompts_path.open() as f:
80
+ raw_data = yaml.safe_load(f)
81
+ except yaml.YAMLError as e:
82
+ raise PromptsError(f"Invalid YAML in {prompts_path}: {e}") from e
83
+ except OSError as e:
84
+ raise PromptsError(f"Error reading {prompts_path}: {e}") from e
85
+
86
+ if raw_data is None or not isinstance(raw_data, dict):
87
+ raise PromptsError(f"Empty or invalid prompts file: {prompts_path}")
88
+
89
+ # Handle nested 'prompts:' key (used in template files)
90
+ if "prompts" in raw_data and isinstance(raw_data["prompts"], dict):
91
+ raw_data = raw_data["prompts"]
92
+
93
+ if "instructions" not in raw_data or not raw_data["instructions"]:
94
+ raise PromptsError(f"Missing 'instructions' in {prompts_path}")
95
+
96
+ try:
97
+ return PromptsConfig.model_validate(raw_data)
98
+ except Exception as e:
99
+ raise PromptsError(f"Invalid prompts configuration: {e}") from e
100
+
101
+
102
+ def render_template(
103
+ config: PromptsConfig, template_name: str, **kwargs: Any
104
+ ) -> str | None:
105
+ """Render a prompt template with variable substitution.
106
+
107
+ Args:
108
+ config: PromptsConfig with templates
109
+ template_name: Name of the template to render
110
+ **kwargs: Variables to substitute in the template
111
+
112
+ Returns:
113
+ Rendered template string, or None if template not found.
114
+ """
115
+ template = config.templates.get(template_name)
116
+ if template is None:
117
+ return None
118
+
119
+ try:
120
+ return template.format(**kwargs)
121
+ except KeyError as e:
122
+ logger.warning(f"Missing template variable: {e}")
123
+ return None
124
+
125
+
126
+ def get_tool_description(
127
+ config: PromptsConfig, tool_name: str, default: str = ""
128
+ ) -> str:
129
+ """Get tool description from prompts config with fallback to docstring.
130
+
131
+ Args:
132
+ config: PromptsConfig with tool prompts
133
+ tool_name: Name of the tool
134
+ default: Default description if not in config (typically from docstring)
135
+
136
+ Returns:
137
+ Tool description string.
138
+ """
139
+ tool_prompt = config.tools.get(tool_name)
140
+ if tool_prompt and tool_prompt.description:
141
+ return tool_prompt.description
142
+ return default
143
+
144
+
145
+ def get_tool_examples(config: PromptsConfig, tool_name: str) -> list[str]:
146
+ """Get usage examples for a tool.
147
+
148
+ Args:
149
+ config: PromptsConfig with tool prompts
150
+ tool_name: Name of the tool
151
+
152
+ Returns:
153
+ List of example strings.
154
+ """
155
+ tool_prompt = config.tools.get(tool_name)
156
+ if tool_prompt:
157
+ return tool_prompt.examples
158
+ return []
159
+
160
+
161
+ def get_pack_instructions(config: PromptsConfig, pack: str) -> str | None:
162
+ """Get instructions for a pack from prompts config.
163
+
164
+ Args:
165
+ config: PromptsConfig with pack instructions
166
+ pack: Name of the pack (e.g., "excel", "github")
167
+
168
+ Returns:
169
+ Pack instructions string, or None if not configured.
170
+ """
171
+ return config.packs.get(pack)
172
+
173
+
174
+ # Global prompts instance
175
+ _prompts: PromptsConfig | None = None
176
+
177
+
178
+ def get_prompts(
179
+ prompts_path: Path | str | None = None,
180
+ inline_prompts: dict[str, Any] | None = None,
181
+ reload: bool = False,
182
+ ) -> PromptsConfig:
183
+ """Get or load the global prompts configuration.
184
+
185
+ Prompts are loaded with the following priority:
186
+ 1. Inline prompts (if provided)
187
+ 2. prompts_file (from config or explicit path)
188
+
189
+ Args:
190
+ prompts_path: Path to prompts file (only used on first load)
191
+ inline_prompts: Inline prompts dict from config (overrides file)
192
+ reload: Force reload configuration
193
+
194
+ Returns:
195
+ PromptsConfig instance
196
+
197
+ Raises:
198
+ PromptsError: If prompts cannot be loaded.
199
+ """
200
+ global _prompts
201
+
202
+ if _prompts is None or reload:
203
+ if inline_prompts is not None:
204
+ # Use inline prompts from config
205
+ if (
206
+ "instructions" not in inline_prompts
207
+ or not inline_prompts["instructions"]
208
+ ):
209
+ raise PromptsError("Missing 'instructions' in inline prompts")
210
+ try:
211
+ _prompts = PromptsConfig.model_validate(inline_prompts)
212
+ logger.debug("Using inline prompts from config")
213
+ except Exception as e:
214
+ raise PromptsError(f"Invalid inline prompts: {e}") from e
215
+ else:
216
+ _prompts = load_prompts(prompts_path)
217
+
218
+ return _prompts
219
+
220
+
221
+ def reset() -> None:
222
+ """Clear prompts cache for reload.
223
+
224
+ Use this as part of the config reload flow to force prompts to be
225
+ reloaded from disk on next access.
226
+ """
227
+ global _prompts
228
+ _prompts = None
ot/proxy/manager.py CHANGED
@@ -10,16 +10,20 @@ import asyncio
10
10
  import contextlib
11
11
  import os
12
12
  from dataclasses import dataclass
13
- from typing import Any
13
+ from typing import TYPE_CHECKING, Any
14
14
 
15
15
  from fastmcp import Client
16
- from fastmcp.client.transports import StdioTransport
16
+ from fastmcp.client.auth import BearerAuth, OAuth
17
+ from fastmcp.client.transports import StdioTransport, StreamableHttpTransport
17
18
  from loguru import logger
18
19
  from mcp import types
19
20
 
20
- from ot.config.mcp import McpServerConfig, expand_secrets, expand_subprocess_env
21
+ from ot.config import expand_vars
21
22
  from ot.logging import LogSpan
22
23
 
24
+ if TYPE_CHECKING:
25
+ from ot.config.models import McpServerConfig
26
+
23
27
 
24
28
  @dataclass
25
29
  class ProxyToolInfo:
@@ -190,6 +194,124 @@ class ProxyManager:
190
194
  )
191
195
  return future.result(timeout=timeout + 5)
192
196
 
197
+ async def list_resources(self, server: str) -> list[dict[str, Any]]:
198
+ """List resources from a proxied MCP server.
199
+
200
+ Args:
201
+ server: Name of the server.
202
+
203
+ Returns:
204
+ List of resource metadata dicts. Empty list if server doesn't support resources.
205
+
206
+ Raises:
207
+ ValueError: If server is not connected.
208
+ """
209
+ client = self._clients.get(server)
210
+ if not client:
211
+ raise ValueError(f"Server '{server}' not connected")
212
+
213
+ try:
214
+ resources = await client.list_resources()
215
+ return [{"uri": r.uri, "name": r.name, "description": r.description or ""} for r in resources]
216
+ except (AttributeError, NotImplementedError):
217
+ # Server doesn't support resources
218
+ return []
219
+ except Exception as e:
220
+ # Check if error indicates unsupported feature
221
+ error_msg = str(e).lower()
222
+ if any(x in error_msg for x in ["not found", "not supported", "not implemented"]):
223
+ return []
224
+ raise
225
+
226
+ async def read_resource(self, server: str, uri: str) -> str:
227
+ """Read a resource from a proxied MCP server.
228
+
229
+ Args:
230
+ server: Name of the server.
231
+ uri: Resource URI to read.
232
+
233
+ Returns:
234
+ Resource content as text.
235
+
236
+ Raises:
237
+ ValueError: If server is not connected.
238
+ """
239
+ client = self._clients.get(server)
240
+ if not client:
241
+ raise ValueError(f"Server '{server}' not connected")
242
+
243
+ result = await client.read_resource(uri)
244
+ # Extract text from resource contents (ReadResourceResult.contents)
245
+ text_parts = []
246
+ for content in result.contents: # type: ignore[attr-defined]
247
+ if hasattr(content, "text"):
248
+ text_parts.append(content.text)
249
+ return "\n".join(text_parts) if text_parts else ""
250
+
251
+ async def list_prompts(self, server: str) -> list[dict[str, Any]]:
252
+ """List prompts from a proxied MCP server.
253
+
254
+ Args:
255
+ server: Name of the server.
256
+
257
+ Returns:
258
+ List of prompt metadata dicts. Empty list if server doesn't support prompts.
259
+
260
+ Raises:
261
+ ValueError: If server is not connected.
262
+ """
263
+ client = self._clients.get(server)
264
+ if not client:
265
+ raise ValueError(f"Server '{server}' not connected")
266
+
267
+ try:
268
+ prompts = await client.list_prompts()
269
+ return [{"name": p.name, "description": p.description or ""} for p in prompts]
270
+ except (AttributeError, NotImplementedError):
271
+ # Server doesn't support prompts
272
+ return []
273
+ except Exception as e:
274
+ # Check if error indicates unsupported feature
275
+ error_msg = str(e).lower()
276
+ if any(x in error_msg for x in ["not found", "not supported", "not implemented"]):
277
+ return []
278
+ raise
279
+
280
+ async def get_prompt(self, server: str, name: str, arguments: dict[str, Any] | None = None) -> str:
281
+ """Get a rendered prompt from a proxied MCP server.
282
+
283
+ Args:
284
+ server: Name of the server.
285
+ name: Prompt name.
286
+ arguments: Optional arguments for the prompt.
287
+
288
+ Returns:
289
+ Rendered prompt content as text.
290
+
291
+ Raises:
292
+ ValueError: If server is not connected.
293
+ """
294
+ client = self._clients.get(server)
295
+ if not client:
296
+ raise ValueError(f"Server '{server}' not connected")
297
+
298
+ result = await client.get_prompt(name, arguments or {})
299
+ # Extract text from prompt messages
300
+ text_parts = []
301
+ for message in result.messages:
302
+ if hasattr(message, "content"):
303
+ content = message.content
304
+ if isinstance(content, str):
305
+ text_parts.append(content)
306
+ elif isinstance(content, list):
307
+ # Content is a list of content parts
308
+ for part in content:
309
+ if hasattr(part, "text"):
310
+ text_parts.append(part.text)
311
+ elif hasattr(content, "text"):
312
+ text_parts.append(content.text)
313
+ return "\n".join(text_parts) if text_parts else ""
314
+
193
315
  async def connect(self, configs: dict[str, McpServerConfig]) -> None:
194
316
  """Connect to all enabled MCP servers.
195
317
 
@@ -263,7 +385,11 @@ class ProxyManager:
263
385
  raise ValueError(f"Unknown server type: {config.type}")
264
386
 
265
387
  def _create_http_client(self, name: str, config: McpServerConfig) -> Client: # type: ignore[type-arg]
266
- """Create an HTTP/SSE client."""
388
+ """Create an HTTP client using Streamable HTTP transport.
389
+
390
+ Streamable HTTP is the recommended MCP transport for web-based servers,
391
+ supporting both batch responses and streaming via SSE.
392
+ """
267
393
  if not config.url:
268
394
  raise RuntimeError(f"Server {name}: HTTP server requires url")
269
395
 
@@ -277,21 +403,55 @@ class ProxyManager:
277
403
  headers = {}
278
404
  for key, value in config.headers.items():
279
405
  if "${" in value:
280
- headers[key] = expand_secrets(value)
406
+ headers[key] = expand_vars(value)
281
407
  else:
282
408
  headers[key] = value
283
409
 
284
- return Client(url, headers=headers, timeout=float(config.timeout))
410
+ # Configure authentication
411
+ auth: OAuth | BearerAuth | None = None
412
+ if config.auth:
413
+ if config.auth.type == "oauth":
414
+ auth = OAuth(
415
+ mcp_url=url,
416
+ scopes=config.auth.scopes or [],
417
+ client_name="OneTool",
418
+ )
419
+ logger.debug(f"Configured OAuth for {name} with scopes: {config.auth.scopes}")
420
+ else: # bearer
421
+ token = expand_vars(config.auth.token) if config.auth.token else ""
422
+ auth = BearerAuth(token)
423
+ logger.debug(f"Configured bearer auth for {name}")
424
+
425
+ transport = StreamableHttpTransport(url=url, headers=headers if headers else None, auth=auth)
426
+ return Client(transport, timeout=float(config.timeout))
285
427
 
286
428
  def _create_stdio_client(self, name: str, config: McpServerConfig) -> Client: # type: ignore[type-arg]
287
429
  """Create a stdio client."""
288
430
  if not config.command:
289
431
  raise RuntimeError(f"Server {name}: stdio server requires command")
290
432
 
291
- # Build environment: PATH only + explicit config.env
433
+ # Build environment variables for subprocess
434
+ # Order: PATH (from host) -> root env -> server-specific env -> expand secrets
292
435
  env = {"PATH": os.environ.get("PATH", "")}
436
+
437
+ # Get root-level env from config (if available)
438
+ try:
439
+ from ot.config import get_config
440
+ root_config = get_config()
441
+ root_env = root_config.env
442
+ except Exception:
443
+ root_env = {}
444
+
445
+ # Merge: root env first, then server-specific env (overrides root)
446
+ for key, value in root_env.items():
447
+ env[key] = value
293
448
  for key, value in config.env.items():
294
- env[key] = expand_subprocess_env(value)
449
+ env[key] = value
450
+
451
+ # Expand ${VAR} patterns from secrets and env: in all env values
452
+ for key, value in env.items():
453
+ if "${" in value:
454
+ env[key] = expand_vars(value)
295
455
 
296
456
  transport = StdioTransport(
297
457
  command=config.command,