code-puppy 0.0.126__py3-none-any.whl → 0.0.128__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +1 -0
- code_puppy/agent.py +65 -69
- code_puppy/agents/agent_code_puppy.py +0 -3
- code_puppy/agents/runtime_manager.py +212 -0
- code_puppy/command_line/command_handler.py +56 -25
- code_puppy/command_line/mcp_commands.py +1298 -0
- code_puppy/command_line/meta_command_handler.py +3 -2
- code_puppy/command_line/model_picker_completion.py +21 -8
- code_puppy/main.py +52 -157
- code_puppy/mcp/__init__.py +23 -0
- code_puppy/mcp/async_lifecycle.py +237 -0
- code_puppy/mcp/circuit_breaker.py +218 -0
- code_puppy/mcp/config_wizard.py +437 -0
- code_puppy/mcp/dashboard.py +291 -0
- code_puppy/mcp/error_isolation.py +360 -0
- code_puppy/mcp/examples/retry_example.py +208 -0
- code_puppy/mcp/health_monitor.py +549 -0
- code_puppy/mcp/managed_server.py +346 -0
- code_puppy/mcp/manager.py +701 -0
- code_puppy/mcp/registry.py +412 -0
- code_puppy/mcp/retry_manager.py +321 -0
- code_puppy/mcp/server_registry_catalog.py +751 -0
- code_puppy/mcp/status_tracker.py +355 -0
- code_puppy/messaging/spinner/textual_spinner.py +6 -2
- code_puppy/model_factory.py +19 -4
- code_puppy/models.json +22 -4
- code_puppy/tui/app.py +19 -27
- code_puppy/tui/tests/test_agent_command.py +22 -15
- {code_puppy-0.0.126.data → code_puppy-0.0.128.data}/data/code_puppy/models.json +22 -4
- {code_puppy-0.0.126.dist-info → code_puppy-0.0.128.dist-info}/METADATA +2 -3
- {code_puppy-0.0.126.dist-info → code_puppy-0.0.128.dist-info}/RECORD +34 -18
- {code_puppy-0.0.126.dist-info → code_puppy-0.0.128.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.126.dist-info → code_puppy-0.0.128.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.126.dist-info → code_puppy-0.0.128.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Server Status Tracker for monitoring MCP server runtime status.
|
|
3
|
+
|
|
4
|
+
This module provides the ServerStatusTracker class that tracks the runtime
|
|
5
|
+
status of MCP servers including state, metrics, and events.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import threading
|
|
10
|
+
from collections import deque, defaultdict
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from datetime import datetime, timedelta
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
|
|
15
|
+
from .managed_server import ServerState
|
|
16
|
+
|
|
17
|
+
# Configure logging
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class Event:
|
|
23
|
+
"""Data class representing a server event."""
|
|
24
|
+
timestamp: datetime
|
|
25
|
+
event_type: str # "started", "stopped", "error", "health_check", etc.
|
|
26
|
+
details: Dict
|
|
27
|
+
server_id: str
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ServerStatusTracker:
|
|
31
|
+
"""
|
|
32
|
+
Tracks the runtime status of MCP servers including state, metrics, and events.
|
|
33
|
+
|
|
34
|
+
This class provides in-memory storage for server states, metadata, and events
|
|
35
|
+
with thread-safe operations using locks. Events are stored using collections.deque
|
|
36
|
+
for automatic size limiting.
|
|
37
|
+
|
|
38
|
+
Example usage:
|
|
39
|
+
tracker = ServerStatusTracker()
|
|
40
|
+
tracker.set_status("server1", ServerState.RUNNING)
|
|
41
|
+
tracker.record_event("server1", "started", {"message": "Server started successfully"})
|
|
42
|
+
events = tracker.get_events("server1", limit=10)
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
def __init__(self):
|
|
46
|
+
"""Initialize the status tracker with thread-safe data structures."""
|
|
47
|
+
# Thread safety lock
|
|
48
|
+
self._lock = threading.RLock()
|
|
49
|
+
|
|
50
|
+
# Server states (server_id -> ServerState)
|
|
51
|
+
self._server_states: Dict[str, ServerState] = {}
|
|
52
|
+
|
|
53
|
+
# Server metadata (server_id -> key -> value)
|
|
54
|
+
self._server_metadata: Dict[str, Dict[str, Any]] = defaultdict(dict)
|
|
55
|
+
|
|
56
|
+
# Server events (server_id -> deque of events)
|
|
57
|
+
# Using deque with maxlen for automatic size limiting
|
|
58
|
+
self._server_events: Dict[str, deque] = defaultdict(lambda: deque(maxlen=1000))
|
|
59
|
+
|
|
60
|
+
# Server timing information
|
|
61
|
+
self._start_times: Dict[str, datetime] = {}
|
|
62
|
+
self._stop_times: Dict[str, datetime] = {}
|
|
63
|
+
|
|
64
|
+
logger.info("ServerStatusTracker initialized")
|
|
65
|
+
|
|
66
|
+
def set_status(self, server_id: str, state: ServerState) -> None:
|
|
67
|
+
"""
|
|
68
|
+
Set the current state of a server.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
server_id: Unique identifier for the server
|
|
72
|
+
state: New server state
|
|
73
|
+
"""
|
|
74
|
+
with self._lock:
|
|
75
|
+
old_state = self._server_states.get(server_id)
|
|
76
|
+
self._server_states[server_id] = state
|
|
77
|
+
|
|
78
|
+
# Record state change event
|
|
79
|
+
self.record_event(
|
|
80
|
+
server_id,
|
|
81
|
+
"state_change",
|
|
82
|
+
{
|
|
83
|
+
"old_state": old_state.value if old_state else None,
|
|
84
|
+
"new_state": state.value,
|
|
85
|
+
"message": f"State changed from {old_state.value if old_state else 'unknown'} to {state.value}"
|
|
86
|
+
}
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
logger.debug(f"Server {server_id} state changed: {old_state} -> {state}")
|
|
90
|
+
|
|
91
|
+
def get_status(self, server_id: str) -> ServerState:
|
|
92
|
+
"""
|
|
93
|
+
Get the current state of a server.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
server_id: Unique identifier for the server
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Current server state, defaults to STOPPED if not found
|
|
100
|
+
"""
|
|
101
|
+
with self._lock:
|
|
102
|
+
return self._server_states.get(server_id, ServerState.STOPPED)
|
|
103
|
+
|
|
104
|
+
def set_metadata(self, server_id: str, key: str, value: Any) -> None:
|
|
105
|
+
"""
|
|
106
|
+
Set metadata value for a server.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
server_id: Unique identifier for the server
|
|
110
|
+
key: Metadata key
|
|
111
|
+
value: Metadata value (can be any type)
|
|
112
|
+
"""
|
|
113
|
+
with self._lock:
|
|
114
|
+
if server_id not in self._server_metadata:
|
|
115
|
+
self._server_metadata[server_id] = {}
|
|
116
|
+
|
|
117
|
+
old_value = self._server_metadata[server_id].get(key)
|
|
118
|
+
self._server_metadata[server_id][key] = value
|
|
119
|
+
|
|
120
|
+
# Record metadata change event
|
|
121
|
+
self.record_event(
|
|
122
|
+
server_id,
|
|
123
|
+
"metadata_update",
|
|
124
|
+
{
|
|
125
|
+
"key": key,
|
|
126
|
+
"old_value": old_value,
|
|
127
|
+
"new_value": value,
|
|
128
|
+
"message": f"Metadata '{key}' updated"
|
|
129
|
+
}
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
logger.debug(f"Server {server_id} metadata updated: {key} = {value}")
|
|
133
|
+
|
|
134
|
+
def get_metadata(self, server_id: str, key: str) -> Any:
|
|
135
|
+
"""
|
|
136
|
+
Get metadata value for a server.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
server_id: Unique identifier for the server
|
|
140
|
+
key: Metadata key
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Metadata value or None if not found
|
|
144
|
+
"""
|
|
145
|
+
with self._lock:
|
|
146
|
+
return self._server_metadata.get(server_id, {}).get(key)
|
|
147
|
+
|
|
148
|
+
def record_event(self, server_id: str, event_type: str, details: Dict) -> None:
|
|
149
|
+
"""
|
|
150
|
+
Record an event for a server.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
server_id: Unique identifier for the server
|
|
154
|
+
event_type: Type of event (e.g., "started", "stopped", "error", "health_check")
|
|
155
|
+
details: Dictionary containing event details
|
|
156
|
+
"""
|
|
157
|
+
with self._lock:
|
|
158
|
+
event = Event(
|
|
159
|
+
timestamp=datetime.now(),
|
|
160
|
+
event_type=event_type,
|
|
161
|
+
details=details.copy() if details else {}, # Copy to prevent modification
|
|
162
|
+
server_id=server_id
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Add to deque (automatically handles size limiting)
|
|
166
|
+
self._server_events[server_id].append(event)
|
|
167
|
+
|
|
168
|
+
logger.debug(f"Event recorded for server {server_id}: {event_type}")
|
|
169
|
+
|
|
170
|
+
def get_events(self, server_id: str, limit: int = 100) -> List[Event]:
|
|
171
|
+
"""
|
|
172
|
+
Get recent events for a server.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
server_id: Unique identifier for the server
|
|
176
|
+
limit: Maximum number of events to return (default: 100)
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
List of events ordered by timestamp (most recent first)
|
|
180
|
+
"""
|
|
181
|
+
with self._lock:
|
|
182
|
+
events = list(self._server_events.get(server_id, deque()))
|
|
183
|
+
|
|
184
|
+
# Return most recent events first, limited by count
|
|
185
|
+
events.reverse() # Most recent first
|
|
186
|
+
return events[:limit]
|
|
187
|
+
|
|
188
|
+
def clear_events(self, server_id: str) -> None:
|
|
189
|
+
"""
|
|
190
|
+
Clear all events for a server.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
server_id: Unique identifier for the server
|
|
194
|
+
"""
|
|
195
|
+
with self._lock:
|
|
196
|
+
if server_id in self._server_events:
|
|
197
|
+
self._server_events[server_id].clear()
|
|
198
|
+
logger.info(f"Cleared all events for server: {server_id}")
|
|
199
|
+
|
|
200
|
+
def get_uptime(self, server_id: str) -> Optional[timedelta]:
|
|
201
|
+
"""
|
|
202
|
+
Calculate uptime for a server based on start/stop times.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
server_id: Unique identifier for the server
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
Server uptime as timedelta, or None if server never started
|
|
209
|
+
"""
|
|
210
|
+
with self._lock:
|
|
211
|
+
start_time = self._start_times.get(server_id)
|
|
212
|
+
if start_time is None:
|
|
213
|
+
return None
|
|
214
|
+
|
|
215
|
+
# If server is currently running, calculate from start time to now
|
|
216
|
+
current_state = self.get_status(server_id)
|
|
217
|
+
if current_state == ServerState.RUNNING:
|
|
218
|
+
return datetime.now() - start_time
|
|
219
|
+
|
|
220
|
+
# If server is stopped, calculate from start to stop time
|
|
221
|
+
stop_time = self._stop_times.get(server_id)
|
|
222
|
+
if stop_time is not None and stop_time > start_time:
|
|
223
|
+
return stop_time - start_time
|
|
224
|
+
|
|
225
|
+
# If we have start time but no valid stop time, assume currently running
|
|
226
|
+
return datetime.now() - start_time
|
|
227
|
+
|
|
228
|
+
def record_start_time(self, server_id: str) -> None:
|
|
229
|
+
"""
|
|
230
|
+
Record the start time for a server.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
server_id: Unique identifier for the server
|
|
234
|
+
"""
|
|
235
|
+
with self._lock:
|
|
236
|
+
start_time = datetime.now()
|
|
237
|
+
self._start_times[server_id] = start_time
|
|
238
|
+
|
|
239
|
+
# Record start event
|
|
240
|
+
self.record_event(
|
|
241
|
+
server_id,
|
|
242
|
+
"started",
|
|
243
|
+
{
|
|
244
|
+
"start_time": start_time.isoformat(),
|
|
245
|
+
"message": "Server started"
|
|
246
|
+
}
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
logger.info(f"Recorded start time for server: {server_id}")
|
|
250
|
+
|
|
251
|
+
def record_stop_time(self, server_id: str) -> None:
|
|
252
|
+
"""
|
|
253
|
+
Record the stop time for a server.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
server_id: Unique identifier for the server
|
|
257
|
+
"""
|
|
258
|
+
with self._lock:
|
|
259
|
+
stop_time = datetime.now()
|
|
260
|
+
self._stop_times[server_id] = stop_time
|
|
261
|
+
|
|
262
|
+
# Calculate final uptime
|
|
263
|
+
start_time = self._start_times.get(server_id)
|
|
264
|
+
uptime = None
|
|
265
|
+
if start_time:
|
|
266
|
+
uptime = stop_time - start_time
|
|
267
|
+
|
|
268
|
+
# Record stop event
|
|
269
|
+
self.record_event(
|
|
270
|
+
server_id,
|
|
271
|
+
"stopped",
|
|
272
|
+
{
|
|
273
|
+
"stop_time": stop_time.isoformat(),
|
|
274
|
+
"uptime_seconds": uptime.total_seconds() if uptime else None,
|
|
275
|
+
"message": "Server stopped"
|
|
276
|
+
}
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
logger.info(f"Recorded stop time for server: {server_id}")
|
|
280
|
+
|
|
281
|
+
def get_all_server_ids(self) -> List[str]:
|
|
282
|
+
"""
|
|
283
|
+
Get all server IDs that have been tracked.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
List of all server IDs
|
|
287
|
+
"""
|
|
288
|
+
with self._lock:
|
|
289
|
+
# Combine all sources of server IDs
|
|
290
|
+
all_ids = set()
|
|
291
|
+
all_ids.update(self._server_states.keys())
|
|
292
|
+
all_ids.update(self._server_metadata.keys())
|
|
293
|
+
all_ids.update(self._server_events.keys())
|
|
294
|
+
all_ids.update(self._start_times.keys())
|
|
295
|
+
all_ids.update(self._stop_times.keys())
|
|
296
|
+
|
|
297
|
+
return sorted(list(all_ids))
|
|
298
|
+
|
|
299
|
+
def get_server_summary(self, server_id: str) -> Dict[str, Any]:
|
|
300
|
+
"""
|
|
301
|
+
Get comprehensive summary of server status.
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
server_id: Unique identifier for the server
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
Dictionary containing current state, metadata, recent events, and uptime
|
|
308
|
+
"""
|
|
309
|
+
with self._lock:
|
|
310
|
+
return {
|
|
311
|
+
"server_id": server_id,
|
|
312
|
+
"state": self.get_status(server_id).value,
|
|
313
|
+
"metadata": self._server_metadata.get(server_id, {}).copy(),
|
|
314
|
+
"recent_events_count": len(self._server_events.get(server_id, deque())),
|
|
315
|
+
"uptime": self.get_uptime(server_id),
|
|
316
|
+
"start_time": self._start_times.get(server_id),
|
|
317
|
+
"stop_time": self._stop_times.get(server_id),
|
|
318
|
+
"last_event_time": (
|
|
319
|
+
list(self._server_events.get(server_id, deque()))[-1].timestamp
|
|
320
|
+
if server_id in self._server_events and len(self._server_events[server_id]) > 0
|
|
321
|
+
else None
|
|
322
|
+
)
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
def cleanup_old_data(self, days_to_keep: int = 7) -> None:
|
|
326
|
+
"""
|
|
327
|
+
Clean up old data to prevent memory bloat.
|
|
328
|
+
|
|
329
|
+
Args:
|
|
330
|
+
days_to_keep: Number of days of data to keep (default: 7)
|
|
331
|
+
"""
|
|
332
|
+
cutoff_time = datetime.now() - timedelta(days=days_to_keep)
|
|
333
|
+
|
|
334
|
+
with self._lock:
|
|
335
|
+
cleaned_servers = []
|
|
336
|
+
|
|
337
|
+
for server_id in list(self._server_events.keys()):
|
|
338
|
+
events = self._server_events[server_id]
|
|
339
|
+
if events:
|
|
340
|
+
# Filter out old events
|
|
341
|
+
original_count = len(events)
|
|
342
|
+
# Convert to list, filter, then create new deque
|
|
343
|
+
filtered_events = [
|
|
344
|
+
event for event in events
|
|
345
|
+
if event.timestamp >= cutoff_time
|
|
346
|
+
]
|
|
347
|
+
|
|
348
|
+
# Replace the deque with filtered events
|
|
349
|
+
self._server_events[server_id] = deque(filtered_events, maxlen=1000)
|
|
350
|
+
|
|
351
|
+
if len(filtered_events) < original_count:
|
|
352
|
+
cleaned_servers.append(server_id)
|
|
353
|
+
|
|
354
|
+
if cleaned_servers:
|
|
355
|
+
logger.info(f"Cleaned old events for {len(cleaned_servers)} servers")
|
|
@@ -80,7 +80,7 @@ class TextualSpinner(Static):
|
|
|
80
80
|
self._paused = True
|
|
81
81
|
self._timer.pause()
|
|
82
82
|
# Store current state but don't clear it completely
|
|
83
|
-
self._previous_state = self.
|
|
83
|
+
self._previous_state = self.renderable
|
|
84
84
|
self.update("")
|
|
85
85
|
|
|
86
86
|
def resume(self):
|
|
@@ -94,4 +94,8 @@ class TextualSpinner(Static):
|
|
|
94
94
|
if self._is_spinning and self._timer and self._paused:
|
|
95
95
|
self._paused = False
|
|
96
96
|
self._timer.resume()
|
|
97
|
-
|
|
97
|
+
# Restore previous state instead of immediately updating display
|
|
98
|
+
if self._previous_state:
|
|
99
|
+
self.update(self._previous_state)
|
|
100
|
+
else:
|
|
101
|
+
self.update_frame_display()
|
code_puppy/model_factory.py
CHANGED
|
@@ -9,10 +9,11 @@ from anthropic import AsyncAnthropic
|
|
|
9
9
|
from openai import AsyncAzureOpenAI # For Azure OpenAI client
|
|
10
10
|
from pydantic_ai.models.anthropic import AnthropicModel
|
|
11
11
|
from pydantic_ai.models.gemini import GeminiModel
|
|
12
|
-
from pydantic_ai.models.openai import
|
|
12
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
13
13
|
from pydantic_ai.providers.anthropic import AnthropicProvider
|
|
14
14
|
from pydantic_ai.providers.google_gla import GoogleGLAProvider
|
|
15
15
|
from pydantic_ai.providers.openai import OpenAIProvider
|
|
16
|
+
from pydantic_ai.providers.cerebras import CerebrasProvider
|
|
16
17
|
|
|
17
18
|
from . import callbacks
|
|
18
19
|
from .config import EXTRA_MODELS_FILE
|
|
@@ -116,7 +117,7 @@ class ModelFactory:
|
|
|
116
117
|
elif model_type == "openai":
|
|
117
118
|
provider = OpenAIProvider(api_key=os.environ.get("OPENAI_API_KEY", ""))
|
|
118
119
|
|
|
119
|
-
model =
|
|
120
|
+
model = OpenAIChatModel(model_name=model_config["name"], provider=provider)
|
|
120
121
|
setattr(model, "provider", provider)
|
|
121
122
|
return model
|
|
122
123
|
|
|
@@ -191,7 +192,7 @@ class ModelFactory:
|
|
|
191
192
|
max_retries=azure_max_retries,
|
|
192
193
|
)
|
|
193
194
|
provider = OpenAIProvider(openai_client=azure_client)
|
|
194
|
-
model =
|
|
195
|
+
model = OpenAIChatModel(model_name=model_config["name"], provider=provider)
|
|
195
196
|
setattr(model, "provider", provider)
|
|
196
197
|
return model
|
|
197
198
|
|
|
@@ -206,7 +207,7 @@ class ModelFactory:
|
|
|
206
207
|
provider_args["api_key"] = api_key
|
|
207
208
|
provider = OpenAIProvider(**provider_args)
|
|
208
209
|
|
|
209
|
-
model =
|
|
210
|
+
model = OpenAIChatModel(model_name=model_config["name"], provider=provider)
|
|
210
211
|
setattr(model, "provider", provider)
|
|
211
212
|
return model
|
|
212
213
|
|
|
@@ -231,5 +232,19 @@ class ModelFactory:
|
|
|
231
232
|
google_gla = CustomGoogleGLAProvider(api_key=api_key)
|
|
232
233
|
model = GeminiModel(model_name=model_config["name"], provider=google_gla)
|
|
233
234
|
return model
|
|
235
|
+
elif model_type == "cerebras":
|
|
236
|
+
url, headers, verify, api_key = get_custom_config(model_config)
|
|
237
|
+
client = create_async_client(headers=headers, verify=verify)
|
|
238
|
+
provider_args = dict(
|
|
239
|
+
api_key=api_key,
|
|
240
|
+
http_client=client,
|
|
241
|
+
)
|
|
242
|
+
if api_key:
|
|
243
|
+
provider_args["api_key"] = api_key
|
|
244
|
+
provider = CerebrasProvider(**provider_args)
|
|
245
|
+
|
|
246
|
+
model = OpenAIChatModel(model_name=model_config["name"], provider=provider)
|
|
247
|
+
setattr(model, "provider", provider)
|
|
248
|
+
return model
|
|
234
249
|
else:
|
|
235
250
|
raise ValueError(f"Unsupported model type: {model_type}")
|
code_puppy/models.json
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
"context_length": 400000
|
|
6
6
|
},
|
|
7
7
|
"Cerebras-Qwen3-Coder-480b": {
|
|
8
|
-
"type": "
|
|
8
|
+
"type": "cerebras",
|
|
9
9
|
"name": "qwen-3-coder-480b",
|
|
10
10
|
"custom_endpoint": {
|
|
11
11
|
"url": "https://api.cerebras.ai/v1",
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
"context_length": 131072
|
|
15
15
|
},
|
|
16
16
|
"Cerebras-Qwen3-235b-a22b-instruct-2507": {
|
|
17
|
-
"type": "
|
|
17
|
+
"type": "cerebras",
|
|
18
18
|
"name": "qwen-3-235b-a22b-instruct-2507",
|
|
19
19
|
"custom_endpoint": {
|
|
20
20
|
"url": "https://api.cerebras.ai/v1",
|
|
@@ -23,7 +23,7 @@
|
|
|
23
23
|
"context_length": 64000
|
|
24
24
|
},
|
|
25
25
|
"Cerebras-gpt-oss-120b": {
|
|
26
|
-
"type": "
|
|
26
|
+
"type": "cerebras",
|
|
27
27
|
"name": "gpt-oss-120b",
|
|
28
28
|
"custom_endpoint": {
|
|
29
29
|
"url": "https://api.cerebras.ai/v1",
|
|
@@ -32,7 +32,7 @@
|
|
|
32
32
|
"context_length": 131072
|
|
33
33
|
},
|
|
34
34
|
"Cerebras-Qwen-3-32b": {
|
|
35
|
-
"type": "
|
|
35
|
+
"type": "cerebras",
|
|
36
36
|
"name": "qwen-3-32b",
|
|
37
37
|
"custom_endpoint": {
|
|
38
38
|
"url": "https://api.cerebras.ai/v1",
|
|
@@ -45,6 +45,24 @@
|
|
|
45
45
|
"name": "o3",
|
|
46
46
|
"context_length": 200000
|
|
47
47
|
},
|
|
48
|
+
"grok-4": {
|
|
49
|
+
"type": "custom_openai",
|
|
50
|
+
"name": "grok-4",
|
|
51
|
+
"custom_endpoint": {
|
|
52
|
+
"url": "https://api.x.ai/v1",
|
|
53
|
+
"api_key": "$XAI_API_KEY"
|
|
54
|
+
},
|
|
55
|
+
"context_length": 256000
|
|
56
|
+
},
|
|
57
|
+
"grok-code-fast-1": {
|
|
58
|
+
"type": "custom_openai",
|
|
59
|
+
"name": "grok-code-fast-1",
|
|
60
|
+
"custom_endpoint": {
|
|
61
|
+
"url": "https://api.x.ai/v1",
|
|
62
|
+
"api_key": "$XAI_API_KEY"
|
|
63
|
+
},
|
|
64
|
+
"context_length": 256000
|
|
65
|
+
},
|
|
48
66
|
"gemini-2.5-flash-preview-05-20": {
|
|
49
67
|
"type": "gemini",
|
|
50
68
|
"name": "gemini-2.5-flash-preview-05-20",
|
code_puppy/tui/app.py
CHANGED
|
@@ -13,6 +13,7 @@ from textual.reactive import reactive
|
|
|
13
13
|
from textual.widgets import Footer, ListView
|
|
14
14
|
|
|
15
15
|
from code_puppy.agent import get_code_generation_agent, get_custom_usage_limits
|
|
16
|
+
from code_puppy.agents.runtime_manager import get_runtime_agent_manager
|
|
16
17
|
from code_puppy.command_line.command_handler import handle_command
|
|
17
18
|
from code_puppy.config import (
|
|
18
19
|
get_model_name,
|
|
@@ -95,7 +96,7 @@ class CodePuppyTUI(App):
|
|
|
95
96
|
|
|
96
97
|
def __init__(self, initial_command: str = None, **kwargs):
|
|
97
98
|
super().__init__(**kwargs)
|
|
98
|
-
self.
|
|
99
|
+
self.agent_manager = None
|
|
99
100
|
self._current_worker = None
|
|
100
101
|
self.initial_command = initial_command
|
|
101
102
|
|
|
@@ -125,7 +126,8 @@ class CodePuppyTUI(App):
|
|
|
125
126
|
self.current_model = get_model_name()
|
|
126
127
|
self.puppy_name = get_puppy_name()
|
|
127
128
|
|
|
128
|
-
|
|
129
|
+
# Use runtime manager to ensure we always have the current agent
|
|
130
|
+
self.agent_manager = get_runtime_agent_manager()
|
|
129
131
|
|
|
130
132
|
# Update status bar
|
|
131
133
|
status_bar = self.query_one(StatusBar)
|
|
@@ -138,6 +140,10 @@ class CodePuppyTUI(App):
|
|
|
138
140
|
"Welcome to Code Puppy 🐶!\n💨 YOLO mode is enabled in TUI: commands will execute without confirmation."
|
|
139
141
|
)
|
|
140
142
|
|
|
143
|
+
# Get current agent and display info
|
|
144
|
+
get_code_generation_agent()
|
|
145
|
+
self.add_system_message(f"🐕 Loaded agent '{self.puppy_name}' with model '{self.current_model}'")
|
|
146
|
+
|
|
141
147
|
# Start the message renderer EARLY to catch startup messages
|
|
142
148
|
# Using call_after_refresh to start it as soon as possible after mount
|
|
143
149
|
self.call_after_refresh(self.start_message_renderer_sync)
|
|
@@ -413,8 +419,7 @@ class CodePuppyTUI(App):
|
|
|
413
419
|
if message.strip().startswith("/agent"):
|
|
414
420
|
# The command handler will emit messages directly to our messaging system
|
|
415
421
|
handle_command(message.strip())
|
|
416
|
-
#
|
|
417
|
-
self.agent = get_code_generation_agent()
|
|
422
|
+
# Agent manager will automatically use the latest agent
|
|
418
423
|
return
|
|
419
424
|
|
|
420
425
|
# Handle exit commands
|
|
@@ -435,31 +440,18 @@ class CodePuppyTUI(App):
|
|
|
435
440
|
return
|
|
436
441
|
|
|
437
442
|
# Process with agent
|
|
438
|
-
if self.
|
|
443
|
+
if self.agent_manager:
|
|
439
444
|
try:
|
|
440
445
|
self.update_agent_progress("Processing", 25)
|
|
441
446
|
|
|
442
|
-
#
|
|
447
|
+
# Use agent_manager's run_with_mcp to handle MCP servers properly
|
|
443
448
|
try:
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
usage_limits=get_custom_usage_limits(),
|
|
451
|
-
)
|
|
452
|
-
except Exception as mcp_error:
|
|
453
|
-
# Log MCP error and fall back to running without MCP servers
|
|
454
|
-
self.log(f"MCP server error: {str(mcp_error)}")
|
|
455
|
-
self.add_system_message(
|
|
456
|
-
"⚠️ MCP server error, running without MCP servers"
|
|
457
|
-
)
|
|
458
|
-
result = await self.agent.run(
|
|
459
|
-
message,
|
|
460
|
-
message_history=get_message_history(),
|
|
461
|
-
usage_limits=get_custom_usage_limits(),
|
|
462
|
-
)
|
|
449
|
+
self.update_agent_progress("Processing", 50)
|
|
450
|
+
result = await self.agent_manager.run_with_mcp(
|
|
451
|
+
message,
|
|
452
|
+
message_history=get_message_history(),
|
|
453
|
+
usage_limits=get_custom_usage_limits(),
|
|
454
|
+
)
|
|
463
455
|
|
|
464
456
|
if not result or not hasattr(result, "output"):
|
|
465
457
|
self.add_error_message("Invalid response format from agent")
|
|
@@ -496,7 +488,7 @@ class CodePuppyTUI(App):
|
|
|
496
488
|
f"Agent processing failed: {str(agent_error)}"
|
|
497
489
|
)
|
|
498
490
|
else:
|
|
499
|
-
self.add_error_message("Agent not initialized")
|
|
491
|
+
self.add_error_message("Agent manager not initialized")
|
|
500
492
|
|
|
501
493
|
except Exception as e:
|
|
502
494
|
self.add_error_message(f"Error processing message: {str(e)}")
|
|
@@ -618,7 +610,7 @@ class CodePuppyTUI(App):
|
|
|
618
610
|
new_model = get_model_name()
|
|
619
611
|
self.current_model = new_model
|
|
620
612
|
# Reinitialize agent with new model
|
|
621
|
-
self.
|
|
613
|
+
self.agent_manager.reload_agent()
|
|
622
614
|
|
|
623
615
|
# Update status bar
|
|
624
616
|
status_bar = self.query_one(StatusBar)
|
|
@@ -8,16 +8,18 @@ from code_puppy.tui.app import CodePuppyTUI
|
|
|
8
8
|
class TestTUIAgentCommand:
|
|
9
9
|
"""Test the TUI's handling of /agent commands."""
|
|
10
10
|
|
|
11
|
-
@patch("code_puppy.tui.app.
|
|
11
|
+
@patch("code_puppy.tui.app.get_runtime_agent_manager")
|
|
12
12
|
@patch("code_puppy.tui.app.handle_command")
|
|
13
|
-
def test_tui_handles_agent_command(self, mock_handle_command,
|
|
13
|
+
def test_tui_handles_agent_command(self, mock_handle_command, mock_get_manager):
|
|
14
14
|
"""Test that TUI properly delegates /agent commands to command handler."""
|
|
15
15
|
# Create a TUI app instance
|
|
16
16
|
app = CodePuppyTUI()
|
|
17
17
|
|
|
18
|
-
# Mock the agent
|
|
18
|
+
# Mock the agent manager and agent
|
|
19
19
|
mock_agent_instance = MagicMock()
|
|
20
|
-
|
|
20
|
+
mock_manager = MagicMock()
|
|
21
|
+
mock_manager.get_agent.return_value = mock_agent_instance
|
|
22
|
+
mock_get_manager.return_value = mock_manager
|
|
21
23
|
|
|
22
24
|
# Mock handle_command to simulate successful processing
|
|
23
25
|
mock_handle_command.return_value = True
|
|
@@ -44,22 +46,27 @@ class TestTUIAgentCommand:
|
|
|
44
46
|
# Verify that handle_command was called with the correct argument
|
|
45
47
|
mock_handle_command.assert_called_once_with(message)
|
|
46
48
|
|
|
47
|
-
# Verify that
|
|
48
|
-
|
|
49
|
+
# Verify that agent manager's get_agent was called to refresh the agent instance
|
|
50
|
+
mock_manager.get_agent.assert_called()
|
|
49
51
|
|
|
50
|
-
@patch("code_puppy.tui.app.
|
|
51
|
-
def test_tui_refreshes_agent_after_command(self,
|
|
52
|
+
@patch("code_puppy.tui.app.get_runtime_agent_manager")
|
|
53
|
+
def test_tui_refreshes_agent_after_command(self, mock_get_manager):
|
|
52
54
|
"""Test that TUI refreshes its agent instance after processing /agent command."""
|
|
53
55
|
# Create a TUI app instance
|
|
54
56
|
app = CodePuppyTUI()
|
|
55
57
|
|
|
56
|
-
#
|
|
58
|
+
# Mock the agent manager
|
|
59
|
+
mock_manager = MagicMock()
|
|
57
60
|
initial_agent = MagicMock()
|
|
58
|
-
app.agent = initial_agent
|
|
59
|
-
|
|
60
|
-
# Mock get_code_generation_agent to return a new agent instance
|
|
61
61
|
new_agent = MagicMock()
|
|
62
|
-
|
|
62
|
+
|
|
63
|
+
# Set initial agent
|
|
64
|
+
app.agent = initial_agent
|
|
65
|
+
app.agent_manager = mock_manager
|
|
66
|
+
|
|
67
|
+
# Mock manager to return a new agent instance
|
|
68
|
+
mock_manager.get_agent.return_value = new_agent
|
|
69
|
+
mock_get_manager.return_value = mock_manager
|
|
63
70
|
|
|
64
71
|
# Simulate that an /agent command was processed
|
|
65
72
|
with patch("code_puppy.tui.app.handle_command"):
|
|
@@ -68,5 +75,5 @@ class TestTUIAgentCommand:
|
|
|
68
75
|
loop = asyncio.get_event_loop()
|
|
69
76
|
loop.run_until_complete(app.process_message("/agent code-puppy"))
|
|
70
77
|
|
|
71
|
-
# Verify that the agent was refreshed
|
|
72
|
-
|
|
78
|
+
# Verify that the agent was refreshed through the manager
|
|
79
|
+
mock_manager.get_agent.assert_called()
|