code-puppy 0.0.336__py3-none-any.whl → 0.0.348__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agents/base_agent.py +41 -224
- code_puppy/agents/event_stream_handler.py +257 -0
- code_puppy/claude_cache_client.py +208 -2
- code_puppy/cli_runner.py +53 -35
- code_puppy/command_line/add_model_menu.py +8 -9
- code_puppy/command_line/autosave_menu.py +18 -24
- code_puppy/command_line/clipboard.py +527 -0
- code_puppy/command_line/core_commands.py +34 -0
- code_puppy/command_line/mcp/catalog_server_installer.py +5 -6
- code_puppy/command_line/mcp/custom_server_form.py +54 -19
- code_puppy/command_line/mcp/custom_server_installer.py +8 -9
- code_puppy/command_line/mcp/handler.py +0 -2
- code_puppy/command_line/mcp/help_command.py +1 -5
- code_puppy/command_line/mcp/start_command.py +36 -18
- code_puppy/command_line/onboarding_slides.py +0 -1
- code_puppy/command_line/prompt_toolkit_completion.py +124 -0
- code_puppy/command_line/utils.py +54 -0
- code_puppy/http_utils.py +93 -130
- code_puppy/mcp_/async_lifecycle.py +35 -4
- code_puppy/mcp_/managed_server.py +49 -24
- code_puppy/mcp_/manager.py +81 -52
- code_puppy/messaging/message_queue.py +11 -23
- code_puppy/messaging/messages.py +3 -0
- code_puppy/messaging/rich_renderer.py +13 -3
- code_puppy/model_factory.py +16 -0
- code_puppy/models.json +2 -2
- code_puppy/plugins/antigravity_oauth/antigravity_model.py +17 -2
- code_puppy/plugins/claude_code_oauth/utils.py +126 -7
- code_puppy/terminal_utils.py +128 -1
- code_puppy/tools/agent_tools.py +66 -13
- code_puppy/tools/command_runner.py +1 -0
- code_puppy/tools/common.py +3 -9
- {code_puppy-0.0.336.data → code_puppy-0.0.348.data}/data/code_puppy/models.json +2 -2
- {code_puppy-0.0.336.dist-info → code_puppy-0.0.348.dist-info}/METADATA +19 -71
- {code_puppy-0.0.336.dist-info → code_puppy-0.0.348.dist-info}/RECORD +39 -38
- code_puppy/command_line/mcp/add_command.py +0 -170
- {code_puppy-0.0.336.data → code_puppy-0.0.348.data}/data/code_puppy/models_dev_api.json +0 -0
- {code_puppy-0.0.336.dist-info → code_puppy-0.0.348.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.336.dist-info → code_puppy-0.0.348.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.336.dist-info → code_puppy-0.0.348.dist-info}/licenses/LICENSE +0 -0
code_puppy/http_utils.py
CHANGED
|
@@ -5,10 +5,10 @@ This module provides functions for creating properly configured HTTP clients.
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import asyncio
|
|
8
|
-
import logging
|
|
9
8
|
import os
|
|
10
9
|
import socket
|
|
11
10
|
import time
|
|
11
|
+
from dataclasses import dataclass
|
|
12
12
|
from typing import Any, Dict, Optional, Union
|
|
13
13
|
|
|
14
14
|
import httpx
|
|
@@ -16,7 +16,69 @@ import requests
|
|
|
16
16
|
|
|
17
17
|
from code_puppy.config import get_http2
|
|
18
18
|
|
|
19
|
-
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ProxyConfig:
|
|
22
|
+
"""Configuration for proxy and SSL settings."""
|
|
23
|
+
|
|
24
|
+
verify: Union[bool, str, None]
|
|
25
|
+
trust_env: bool
|
|
26
|
+
proxy_url: str | None
|
|
27
|
+
disable_retry: bool
|
|
28
|
+
http2_enabled: bool
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _resolve_proxy_config(verify: Union[bool, str, None] = None) -> ProxyConfig:
|
|
32
|
+
"""Resolve proxy, SSL, and retry settings from environment.
|
|
33
|
+
|
|
34
|
+
This centralizes the logic for detecting proxies, determining SSL verification,
|
|
35
|
+
and checking if retry transport should be disabled.
|
|
36
|
+
"""
|
|
37
|
+
if verify is None:
|
|
38
|
+
verify = get_cert_bundle_path()
|
|
39
|
+
|
|
40
|
+
http2_enabled = get_http2()
|
|
41
|
+
|
|
42
|
+
disable_retry = os.environ.get(
|
|
43
|
+
"CODE_PUPPY_DISABLE_RETRY_TRANSPORT", ""
|
|
44
|
+
).lower() in ("1", "true", "yes")
|
|
45
|
+
|
|
46
|
+
has_proxy = bool(
|
|
47
|
+
os.environ.get("HTTP_PROXY")
|
|
48
|
+
or os.environ.get("HTTPS_PROXY")
|
|
49
|
+
or os.environ.get("http_proxy")
|
|
50
|
+
or os.environ.get("https_proxy")
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# Determine trust_env and verify based on proxy/retry settings
|
|
54
|
+
if disable_retry:
|
|
55
|
+
# Test mode: disable SSL verification for proxy testing
|
|
56
|
+
verify = False
|
|
57
|
+
trust_env = True
|
|
58
|
+
elif has_proxy:
|
|
59
|
+
# Production proxy: keep SSL verification enabled
|
|
60
|
+
trust_env = True
|
|
61
|
+
else:
|
|
62
|
+
trust_env = False
|
|
63
|
+
|
|
64
|
+
# Extract proxy URL
|
|
65
|
+
proxy_url = None
|
|
66
|
+
if has_proxy:
|
|
67
|
+
proxy_url = (
|
|
68
|
+
os.environ.get("HTTPS_PROXY")
|
|
69
|
+
or os.environ.get("https_proxy")
|
|
70
|
+
or os.environ.get("HTTP_PROXY")
|
|
71
|
+
or os.environ.get("http_proxy")
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
return ProxyConfig(
|
|
75
|
+
verify=verify,
|
|
76
|
+
trust_env=trust_env,
|
|
77
|
+
proxy_url=proxy_url,
|
|
78
|
+
disable_retry=disable_retry,
|
|
79
|
+
http2_enabled=http2_enabled,
|
|
80
|
+
)
|
|
81
|
+
|
|
20
82
|
|
|
21
83
|
try:
|
|
22
84
|
from .reopenable_async_client import ReopenableAsyncClient
|
|
@@ -58,14 +120,7 @@ class RetryingAsyncClient(httpx.AsyncClient):
|
|
|
58
120
|
|
|
59
121
|
for attempt in range(self.max_retries + 1):
|
|
60
122
|
try:
|
|
61
|
-
|
|
62
|
-
# But only if it's not the first attempt
|
|
63
|
-
req_to_send = request
|
|
64
|
-
if attempt > 0:
|
|
65
|
-
# httpx requests are reusable, but we need to be careful with streams
|
|
66
|
-
pass
|
|
67
|
-
|
|
68
|
-
response = await super().send(req_to_send, **kwargs)
|
|
123
|
+
response = await super().send(request, **kwargs)
|
|
69
124
|
last_response = response
|
|
70
125
|
|
|
71
126
|
# Check for retryable status
|
|
@@ -128,7 +183,7 @@ class RetryingAsyncClient(httpx.AsyncClient):
|
|
|
128
183
|
return last_response
|
|
129
184
|
|
|
130
185
|
|
|
131
|
-
def get_cert_bundle_path() -> str:
|
|
186
|
+
def get_cert_bundle_path() -> str | None:
|
|
132
187
|
# First check if SSL_CERT_FILE environment variable is set
|
|
133
188
|
ssl_cert_file = os.environ.get("SSL_CERT_FILE")
|
|
134
189
|
if ssl_cert_file and os.path.exists(ssl_cert_file):
|
|
@@ -164,66 +219,26 @@ def create_async_client(
|
|
|
164
219
|
headers: Optional[Dict[str, str]] = None,
|
|
165
220
|
retry_status_codes: tuple = (429, 502, 503, 504),
|
|
166
221
|
) -> httpx.AsyncClient:
|
|
167
|
-
|
|
168
|
-
verify = get_cert_bundle_path()
|
|
169
|
-
|
|
170
|
-
# Check if HTTP/2 is enabled in config
|
|
171
|
-
http2_enabled = get_http2()
|
|
172
|
-
|
|
173
|
-
# Check if custom retry transport should be disabled (e.g., for integration tests with proxies)
|
|
174
|
-
disable_retry_transport = os.environ.get(
|
|
175
|
-
"CODE_PUPPY_DISABLE_RETRY_TRANSPORT", ""
|
|
176
|
-
).lower() in ("1", "true", "yes")
|
|
177
|
-
|
|
178
|
-
# Check if proxy environment variables are set
|
|
179
|
-
has_proxy = bool(
|
|
180
|
-
os.environ.get("HTTP_PROXY")
|
|
181
|
-
or os.environ.get("HTTPS_PROXY")
|
|
182
|
-
or os.environ.get("http_proxy")
|
|
183
|
-
or os.environ.get("https_proxy")
|
|
184
|
-
)
|
|
185
|
-
|
|
186
|
-
# When retry transport is disabled (test mode), disable SSL verification
|
|
187
|
-
# for proxy testing. For production proxies, SSL should still be verified!
|
|
188
|
-
if disable_retry_transport:
|
|
189
|
-
verify = False
|
|
190
|
-
trust_env = True
|
|
191
|
-
elif has_proxy:
|
|
192
|
-
# Production proxy detected - keep SSL verification enabled for security
|
|
193
|
-
trust_env = True
|
|
194
|
-
else:
|
|
195
|
-
trust_env = False
|
|
196
|
-
|
|
197
|
-
# Extract proxy URL if needed
|
|
198
|
-
proxy_url = None
|
|
199
|
-
if has_proxy:
|
|
200
|
-
proxy_url = (
|
|
201
|
-
os.environ.get("HTTPS_PROXY")
|
|
202
|
-
or os.environ.get("https_proxy")
|
|
203
|
-
or os.environ.get("HTTP_PROXY")
|
|
204
|
-
or os.environ.get("http_proxy")
|
|
205
|
-
)
|
|
222
|
+
config = _resolve_proxy_config(verify)
|
|
206
223
|
|
|
207
|
-
|
|
208
|
-
if not disable_retry_transport:
|
|
224
|
+
if not config.disable_retry:
|
|
209
225
|
return RetryingAsyncClient(
|
|
210
226
|
retry_status_codes=retry_status_codes,
|
|
211
|
-
proxy=proxy_url,
|
|
212
|
-
verify=verify,
|
|
227
|
+
proxy=config.proxy_url,
|
|
228
|
+
verify=config.verify,
|
|
213
229
|
headers=headers or {},
|
|
214
230
|
timeout=timeout,
|
|
215
|
-
http2=http2_enabled,
|
|
216
|
-
trust_env=trust_env,
|
|
231
|
+
http2=config.http2_enabled,
|
|
232
|
+
trust_env=config.trust_env,
|
|
217
233
|
)
|
|
218
234
|
else:
|
|
219
|
-
# Regular client for testing
|
|
220
235
|
return httpx.AsyncClient(
|
|
221
|
-
proxy=proxy_url,
|
|
222
|
-
verify=verify,
|
|
236
|
+
proxy=config.proxy_url,
|
|
237
|
+
verify=config.verify,
|
|
223
238
|
headers=headers or {},
|
|
224
239
|
timeout=timeout,
|
|
225
|
-
http2=http2_enabled,
|
|
226
|
-
trust_env=trust_env,
|
|
240
|
+
http2=config.http2_enabled,
|
|
241
|
+
trust_env=config.trust_env,
|
|
227
242
|
)
|
|
228
243
|
|
|
229
244
|
|
|
@@ -273,85 +288,33 @@ def create_reopenable_async_client(
|
|
|
273
288
|
headers: Optional[Dict[str, str]] = None,
|
|
274
289
|
retry_status_codes: tuple = (429, 502, 503, 504),
|
|
275
290
|
) -> Union[ReopenableAsyncClient, httpx.AsyncClient]:
|
|
276
|
-
|
|
277
|
-
verify = get_cert_bundle_path()
|
|
278
|
-
|
|
279
|
-
# Check if HTTP/2 is enabled in config
|
|
280
|
-
http2_enabled = get_http2()
|
|
281
|
-
|
|
282
|
-
# Check if custom retry transport should be disabled (e.g., for integration tests with proxies)
|
|
283
|
-
disable_retry_transport = os.environ.get(
|
|
284
|
-
"CODE_PUPPY_DISABLE_RETRY_TRANSPORT", ""
|
|
285
|
-
).lower() in ("1", "true", "yes")
|
|
286
|
-
|
|
287
|
-
# Check if proxy environment variables are set
|
|
288
|
-
has_proxy = bool(
|
|
289
|
-
os.environ.get("HTTP_PROXY")
|
|
290
|
-
or os.environ.get("HTTPS_PROXY")
|
|
291
|
-
or os.environ.get("http_proxy")
|
|
292
|
-
or os.environ.get("https_proxy")
|
|
293
|
-
)
|
|
294
|
-
|
|
295
|
-
# When retry transport is disabled (test mode), disable SSL verification
|
|
296
|
-
if disable_retry_transport:
|
|
297
|
-
verify = False
|
|
298
|
-
trust_env = True
|
|
299
|
-
elif has_proxy:
|
|
300
|
-
trust_env = True
|
|
301
|
-
else:
|
|
302
|
-
trust_env = False
|
|
291
|
+
config = _resolve_proxy_config(verify)
|
|
303
292
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
)
|
|
293
|
+
base_kwargs = {
|
|
294
|
+
"proxy": config.proxy_url,
|
|
295
|
+
"verify": config.verify,
|
|
296
|
+
"headers": headers or {},
|
|
297
|
+
"timeout": timeout,
|
|
298
|
+
"http2": config.http2_enabled,
|
|
299
|
+
"trust_env": config.trust_env,
|
|
300
|
+
}
|
|
313
301
|
|
|
314
302
|
if ReopenableAsyncClient is not None:
|
|
315
|
-
# Use RetryingAsyncClient if retries are enabled
|
|
316
303
|
client_class = (
|
|
317
|
-
RetryingAsyncClient if not
|
|
304
|
+
RetryingAsyncClient if not config.disable_retry else httpx.AsyncClient
|
|
318
305
|
)
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
kwargs = {
|
|
322
|
-
"proxy": proxy_url,
|
|
323
|
-
"verify": verify,
|
|
324
|
-
"headers": headers or {},
|
|
325
|
-
"timeout": timeout,
|
|
326
|
-
"http2": http2_enabled,
|
|
327
|
-
"trust_env": trust_env,
|
|
328
|
-
}
|
|
329
|
-
|
|
330
|
-
if not disable_retry_transport:
|
|
306
|
+
kwargs = {**base_kwargs, "client_class": client_class}
|
|
307
|
+
if not config.disable_retry:
|
|
331
308
|
kwargs["retry_status_codes"] = retry_status_codes
|
|
332
|
-
|
|
333
|
-
return ReopenableAsyncClient(client_class=client_class, **kwargs)
|
|
309
|
+
return ReopenableAsyncClient(**kwargs)
|
|
334
310
|
else:
|
|
335
|
-
# Fallback to RetryingAsyncClient
|
|
336
|
-
if not
|
|
311
|
+
# Fallback to RetryingAsyncClient or plain AsyncClient
|
|
312
|
+
if not config.disable_retry:
|
|
337
313
|
return RetryingAsyncClient(
|
|
338
|
-
retry_status_codes=retry_status_codes,
|
|
339
|
-
proxy=proxy_url,
|
|
340
|
-
verify=verify,
|
|
341
|
-
headers=headers or {},
|
|
342
|
-
timeout=timeout,
|
|
343
|
-
http2=http2_enabled,
|
|
344
|
-
trust_env=trust_env,
|
|
314
|
+
retry_status_codes=retry_status_codes, **base_kwargs
|
|
345
315
|
)
|
|
346
316
|
else:
|
|
347
|
-
return httpx.AsyncClient(
|
|
348
|
-
proxy=proxy_url,
|
|
349
|
-
verify=verify,
|
|
350
|
-
headers=headers or {},
|
|
351
|
-
timeout=timeout,
|
|
352
|
-
http2=http2_enabled,
|
|
353
|
-
trust_env=trust_env,
|
|
354
|
-
)
|
|
317
|
+
return httpx.AsyncClient(**base_kwargs)
|
|
355
318
|
|
|
356
319
|
|
|
357
320
|
def is_cert_bundle_available() -> bool:
|
|
@@ -108,10 +108,17 @@ class AsyncServerLifecycleManager:
|
|
|
108
108
|
|
|
109
109
|
try:
|
|
110
110
|
logger.info(f"Starting server lifecycle for {server_id}")
|
|
111
|
+
logger.info(
|
|
112
|
+
f"Server {server_id} _running_count before enter: {getattr(server, '_running_count', 'N/A')}"
|
|
113
|
+
)
|
|
111
114
|
|
|
112
115
|
# Enter the server's context
|
|
113
116
|
await exit_stack.enter_async_context(server)
|
|
114
117
|
|
|
118
|
+
logger.info(
|
|
119
|
+
f"Server {server_id} _running_count after enter: {getattr(server, '_running_count', 'N/A')}"
|
|
120
|
+
)
|
|
121
|
+
|
|
115
122
|
# Store the managed context
|
|
116
123
|
async with self._lock:
|
|
117
124
|
self._servers[server_id] = ManagedServerContext(
|
|
@@ -122,26 +129,50 @@ class AsyncServerLifecycleManager:
|
|
|
122
129
|
task=asyncio.current_task(),
|
|
123
130
|
)
|
|
124
131
|
|
|
125
|
-
logger.info(
|
|
132
|
+
logger.info(
|
|
133
|
+
f"Server {server_id} started successfully and stored in _servers"
|
|
134
|
+
)
|
|
126
135
|
|
|
127
136
|
# Keep the task alive until cancelled
|
|
137
|
+
loop_count = 0
|
|
128
138
|
while True:
|
|
129
139
|
await asyncio.sleep(1)
|
|
140
|
+
loop_count += 1
|
|
130
141
|
|
|
131
142
|
# Check if server is still running
|
|
132
|
-
|
|
133
|
-
|
|
143
|
+
running_count = getattr(server, "_running_count", "N/A")
|
|
144
|
+
is_running = server.is_running
|
|
145
|
+
logger.debug(
|
|
146
|
+
f"Server {server_id} heartbeat #{loop_count}: "
|
|
147
|
+
f"is_running={is_running}, _running_count={running_count}"
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if not is_running:
|
|
151
|
+
logger.warning(
|
|
152
|
+
f"Server {server_id} stopped unexpectedly! "
|
|
153
|
+
f"_running_count={running_count}"
|
|
154
|
+
)
|
|
134
155
|
break
|
|
135
156
|
|
|
136
157
|
except asyncio.CancelledError:
|
|
137
158
|
logger.info(f"Server {server_id} lifecycle task cancelled")
|
|
138
159
|
raise
|
|
139
160
|
except Exception as e:
|
|
140
|
-
logger.error(f"Error in server {server_id} lifecycle: {e}")
|
|
161
|
+
logger.error(f"Error in server {server_id} lifecycle: {e}", exc_info=True)
|
|
141
162
|
finally:
|
|
163
|
+
running_count = getattr(server, "_running_count", "N/A")
|
|
164
|
+
logger.info(
|
|
165
|
+
f"Server {server_id} lifecycle ending, _running_count={running_count}"
|
|
166
|
+
)
|
|
167
|
+
|
|
142
168
|
# Clean up the context
|
|
143
169
|
await exit_stack.aclose()
|
|
144
170
|
|
|
171
|
+
running_count_after = getattr(server, "_running_count", "N/A")
|
|
172
|
+
logger.info(
|
|
173
|
+
f"Server {server_id} context closed, _running_count={running_count_after}"
|
|
174
|
+
)
|
|
175
|
+
|
|
145
176
|
# Remove from managed servers
|
|
146
177
|
async with self._lock:
|
|
147
178
|
if server_id in self._servers:
|
|
@@ -28,6 +28,31 @@ from code_puppy.mcp_.blocking_startup import BlockingMCPServerStdio
|
|
|
28
28
|
from code_puppy.messaging import emit_info
|
|
29
29
|
|
|
30
30
|
|
|
31
|
+
def _expand_env_vars(value: Any) -> Any:
|
|
32
|
+
"""
|
|
33
|
+
Recursively expand environment variables in config values.
|
|
34
|
+
|
|
35
|
+
Supports $VAR and ${VAR} syntax. Works with:
|
|
36
|
+
- Strings: expands env vars
|
|
37
|
+
- Dicts: recursively expands all string values
|
|
38
|
+
- Lists: recursively expands all string elements
|
|
39
|
+
- Other types: returned as-is
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
value: The value to expand env vars in
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
The value with env vars expanded
|
|
46
|
+
"""
|
|
47
|
+
if isinstance(value, str):
|
|
48
|
+
return os.path.expandvars(value)
|
|
49
|
+
elif isinstance(value, dict):
|
|
50
|
+
return {k: _expand_env_vars(v) for k, v in value.items()}
|
|
51
|
+
elif isinstance(value, list):
|
|
52
|
+
return [_expand_env_vars(item) for item in value]
|
|
53
|
+
return value
|
|
54
|
+
|
|
55
|
+
|
|
31
56
|
class ServerState(Enum):
|
|
32
57
|
"""Enumeration of possible server states."""
|
|
33
58
|
|
|
@@ -153,9 +178,9 @@ class ManagedMCPServer:
|
|
|
153
178
|
if "url" not in config:
|
|
154
179
|
raise ValueError("SSE server requires 'url' in config")
|
|
155
180
|
|
|
156
|
-
# Prepare arguments for MCPServerSSE
|
|
181
|
+
# Prepare arguments for MCPServerSSE (expand env vars in URL)
|
|
157
182
|
sse_kwargs = {
|
|
158
|
-
"url": config["url"],
|
|
183
|
+
"url": _expand_env_vars(config["url"]),
|
|
159
184
|
}
|
|
160
185
|
|
|
161
186
|
# Add optional parameters if provided
|
|
@@ -177,23 +202,26 @@ class ManagedMCPServer:
|
|
|
177
202
|
if "command" not in config:
|
|
178
203
|
raise ValueError("Stdio server requires 'command' in config")
|
|
179
204
|
|
|
180
|
-
# Handle command and arguments
|
|
181
|
-
command = config["command"]
|
|
205
|
+
# Handle command and arguments (expand env vars)
|
|
206
|
+
command = _expand_env_vars(config["command"])
|
|
182
207
|
args = config.get("args", [])
|
|
183
208
|
if isinstance(args, str):
|
|
184
|
-
# If args is a string, split it
|
|
185
|
-
args = args.split()
|
|
209
|
+
# If args is a string, split it then expand
|
|
210
|
+
args = [_expand_env_vars(a) for a in args.split()]
|
|
211
|
+
else:
|
|
212
|
+
args = _expand_env_vars(args)
|
|
186
213
|
|
|
187
214
|
# Prepare arguments for MCPServerStdio
|
|
188
215
|
stdio_kwargs = {"command": command, "args": list(args) if args else []}
|
|
189
216
|
|
|
190
|
-
# Add optional parameters if provided
|
|
217
|
+
# Add optional parameters if provided (expand env vars in env and cwd)
|
|
191
218
|
if "env" in config:
|
|
192
|
-
stdio_kwargs["env"] = config["env"]
|
|
219
|
+
stdio_kwargs["env"] = _expand_env_vars(config["env"])
|
|
193
220
|
if "cwd" in config:
|
|
194
|
-
stdio_kwargs["cwd"] = config["cwd"]
|
|
195
|
-
|
|
196
|
-
|
|
221
|
+
stdio_kwargs["cwd"] = _expand_env_vars(config["cwd"])
|
|
222
|
+
# Default timeout of 60s for stdio servers - some servers like Serena take a while to start
|
|
223
|
+
# Users can override this in their config
|
|
224
|
+
stdio_kwargs["timeout"] = config.get("timeout", 60)
|
|
197
225
|
if "read_timeout" in config:
|
|
198
226
|
stdio_kwargs["read_timeout"] = config["read_timeout"]
|
|
199
227
|
|
|
@@ -212,9 +240,9 @@ class ManagedMCPServer:
|
|
|
212
240
|
if "url" not in config:
|
|
213
241
|
raise ValueError("HTTP server requires 'url' in config")
|
|
214
242
|
|
|
215
|
-
# Prepare arguments for MCPServerStreamableHTTP
|
|
243
|
+
# Prepare arguments for MCPServerStreamableHTTP (expand env vars in URL)
|
|
216
244
|
http_kwargs = {
|
|
217
|
-
"url": config["url"],
|
|
245
|
+
"url": _expand_env_vars(config["url"]),
|
|
218
246
|
}
|
|
219
247
|
|
|
220
248
|
# Add optional parameters if provided
|
|
@@ -222,18 +250,15 @@ class ManagedMCPServer:
|
|
|
222
250
|
http_kwargs["timeout"] = config["timeout"]
|
|
223
251
|
if "read_timeout" in config:
|
|
224
252
|
http_kwargs["read_timeout"] = config["read_timeout"]
|
|
225
|
-
|
|
253
|
+
|
|
254
|
+
# Pass headers directly instead of creating http_client
|
|
255
|
+
# Note: There's a bug in MCP 1.25.0 where passing http_client
|
|
256
|
+
# causes "'_AsyncGeneratorContextManager' object has no attribute 'stream'"
|
|
257
|
+
# The workaround is to pass headers directly and let pydantic-ai
|
|
258
|
+
# create the http_client internally.
|
|
259
|
+
if config.get("headers"):
|
|
226
260
|
# Expand environment variables in headers
|
|
227
|
-
headers = config
|
|
228
|
-
resolved_headers = {}
|
|
229
|
-
if isinstance(headers, dict):
|
|
230
|
-
for k, v in headers.items():
|
|
231
|
-
if isinstance(v, str):
|
|
232
|
-
resolved_headers[k] = os.path.expandvars(v)
|
|
233
|
-
else:
|
|
234
|
-
resolved_headers[k] = v
|
|
235
|
-
http_kwargs["headers"] = resolved_headers
|
|
236
|
-
# Create HTTP client if headers are provided but no client specified
|
|
261
|
+
http_kwargs["headers"] = _expand_env_vars(config["headers"])
|
|
237
262
|
|
|
238
263
|
self._pydantic_server = MCPServerStreamableHTTP(
|
|
239
264
|
**http_kwargs, process_tool_call=process_tool_call
|
code_puppy/mcp_/manager.py
CHANGED
|
@@ -469,41 +469,57 @@ class MCPManager:
|
|
|
469
469
|
def start_server_sync(self, server_id: str) -> bool:
|
|
470
470
|
"""
|
|
471
471
|
Synchronous wrapper for start_server.
|
|
472
|
+
|
|
473
|
+
IMPORTANT: This schedules the server start as a background task.
|
|
474
|
+
The server subprocess will start asynchronously - it may not be
|
|
475
|
+
immediately ready when this function returns.
|
|
472
476
|
"""
|
|
473
477
|
try:
|
|
474
|
-
asyncio.get_running_loop()
|
|
475
|
-
# We're in an async context
|
|
476
|
-
#
|
|
478
|
+
loop = asyncio.get_running_loop()
|
|
479
|
+
# We're in an async context - schedule the server start as a background task
|
|
480
|
+
# DO NOT use blocking time.sleep() here as it freezes the event loop!
|
|
481
|
+
|
|
482
|
+
# First, enable the server immediately so it's recognized as "starting"
|
|
483
|
+
managed_server = self._managed_servers.get(server_id)
|
|
484
|
+
if managed_server:
|
|
485
|
+
managed_server.enable()
|
|
486
|
+
self.status_tracker.set_status(server_id, ServerState.STARTING)
|
|
487
|
+
self.status_tracker.record_start_time(server_id)
|
|
477
488
|
|
|
478
|
-
#
|
|
479
|
-
|
|
480
|
-
|
|
489
|
+
# Schedule the async start_server to run in the background
|
|
490
|
+
# This will properly start the subprocess and lifecycle task
|
|
491
|
+
async def start_server_background():
|
|
492
|
+
try:
|
|
493
|
+
result = await self.start_server(server_id)
|
|
494
|
+
if result:
|
|
495
|
+
logger.info(f"Background server start completed: {server_id}")
|
|
496
|
+
else:
|
|
497
|
+
logger.warning(f"Background server start failed: {server_id}")
|
|
498
|
+
return result
|
|
499
|
+
except Exception as e:
|
|
500
|
+
logger.error(f"Background server start error for {server_id}: {e}")
|
|
501
|
+
self.status_tracker.set_status(server_id, ServerState.ERROR)
|
|
502
|
+
return False
|
|
481
503
|
|
|
482
|
-
#
|
|
483
|
-
task =
|
|
504
|
+
# Create the task - it will run when the event loop gets control
|
|
505
|
+
task = loop.create_task(
|
|
506
|
+
start_server_background(), name=f"start_server_{server_id}"
|
|
507
|
+
)
|
|
484
508
|
|
|
485
|
-
#
|
|
486
|
-
|
|
509
|
+
# Store task reference to prevent garbage collection
|
|
510
|
+
if not hasattr(self, "_pending_start_tasks"):
|
|
511
|
+
self._pending_start_tasks = {}
|
|
512
|
+
self._pending_start_tasks[server_id] = task
|
|
487
513
|
|
|
488
|
-
|
|
514
|
+
# Add callback to clean up task reference when done
|
|
515
|
+
def cleanup_task(t):
|
|
516
|
+
if hasattr(self, "_pending_start_tasks"):
|
|
517
|
+
self._pending_start_tasks.pop(server_id, None)
|
|
489
518
|
|
|
490
|
-
|
|
491
|
-
if task.done():
|
|
492
|
-
try:
|
|
493
|
-
result = task.result()
|
|
494
|
-
return result
|
|
495
|
-
except Exception:
|
|
496
|
-
pass
|
|
519
|
+
task.add_done_callback(cleanup_task)
|
|
497
520
|
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
if managed_server:
|
|
501
|
-
managed_server.enable()
|
|
502
|
-
self.status_tracker.set_status(server_id, ServerState.RUNNING)
|
|
503
|
-
self.status_tracker.record_start_time(server_id)
|
|
504
|
-
logger.info(f"Enabled server synchronously: {server_id}")
|
|
505
|
-
return True
|
|
506
|
-
return False
|
|
521
|
+
logger.info(f"Scheduled background start for server: {server_id}")
|
|
522
|
+
return True # Return immediately - server will start in background
|
|
507
523
|
|
|
508
524
|
except RuntimeError:
|
|
509
525
|
# No async loop, just enable the server
|
|
@@ -582,39 +598,52 @@ class MCPManager:
|
|
|
582
598
|
def stop_server_sync(self, server_id: str) -> bool:
|
|
583
599
|
"""
|
|
584
600
|
Synchronous wrapper for stop_server.
|
|
601
|
+
|
|
602
|
+
IMPORTANT: This schedules the server stop as a background task.
|
|
603
|
+
The server subprocess will stop asynchronously.
|
|
585
604
|
"""
|
|
586
605
|
try:
|
|
587
|
-
asyncio.get_running_loop()
|
|
606
|
+
loop = asyncio.get_running_loop()
|
|
607
|
+
# We're in an async context - schedule the server stop as a background task
|
|
608
|
+
# DO NOT use blocking time.sleep() here as it freezes the event loop!
|
|
588
609
|
|
|
589
|
-
#
|
|
590
|
-
|
|
591
|
-
|
|
610
|
+
# First, disable the server immediately
|
|
611
|
+
managed_server = self._managed_servers.get(server_id)
|
|
612
|
+
if managed_server:
|
|
613
|
+
managed_server.disable()
|
|
614
|
+
self.status_tracker.set_status(server_id, ServerState.STOPPING)
|
|
615
|
+
self.status_tracker.record_stop_time(server_id)
|
|
592
616
|
|
|
593
|
-
# Schedule the
|
|
594
|
-
|
|
617
|
+
# Schedule the async stop_server to run in the background
|
|
618
|
+
async def stop_server_background():
|
|
619
|
+
try:
|
|
620
|
+
result = await self.stop_server(server_id)
|
|
621
|
+
if result:
|
|
622
|
+
logger.info(f"Background server stop completed: {server_id}")
|
|
623
|
+
return result
|
|
624
|
+
except Exception as e:
|
|
625
|
+
logger.error(f"Background server stop error for {server_id}: {e}")
|
|
626
|
+
return False
|
|
595
627
|
|
|
596
|
-
#
|
|
597
|
-
|
|
628
|
+
# Create the task - it will run when the event loop gets control
|
|
629
|
+
task = loop.create_task(
|
|
630
|
+
stop_server_background(), name=f"stop_server_{server_id}"
|
|
631
|
+
)
|
|
598
632
|
|
|
599
|
-
|
|
633
|
+
# Store task reference to prevent garbage collection
|
|
634
|
+
if not hasattr(self, "_pending_stop_tasks"):
|
|
635
|
+
self._pending_stop_tasks = {}
|
|
636
|
+
self._pending_stop_tasks[server_id] = task
|
|
600
637
|
|
|
601
|
-
#
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
return result
|
|
606
|
-
except Exception:
|
|
607
|
-
pass
|
|
638
|
+
# Add callback to clean up task reference when done
|
|
639
|
+
def cleanup_task(t):
|
|
640
|
+
if hasattr(self, "_pending_stop_tasks"):
|
|
641
|
+
self._pending_stop_tasks.pop(server_id, None)
|
|
608
642
|
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
self.status_tracker.set_status(server_id, ServerState.STOPPED)
|
|
614
|
-
self.status_tracker.record_stop_time(server_id)
|
|
615
|
-
logger.info(f"Disabled server synchronously: {server_id}")
|
|
616
|
-
return True
|
|
617
|
-
return False
|
|
643
|
+
task.add_done_callback(cleanup_task)
|
|
644
|
+
|
|
645
|
+
logger.info(f"Scheduled background stop for server: {server_id}")
|
|
646
|
+
return True # Return immediately - server will stop in background
|
|
618
647
|
|
|
619
648
|
except RuntimeError:
|
|
620
649
|
# No async loop, just disable the server
|