code-puppy 0.0.287__py3-none-any.whl → 0.0.323__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/__init__.py +3 -1
- code_puppy/agents/agent_code_puppy.py +5 -4
- code_puppy/agents/agent_creator_agent.py +22 -18
- code_puppy/agents/agent_manager.py +2 -2
- code_puppy/agents/base_agent.py +496 -102
- code_puppy/callbacks.py +8 -0
- code_puppy/chatgpt_codex_client.py +283 -0
- code_puppy/cli_runner.py +795 -0
- code_puppy/command_line/add_model_menu.py +19 -16
- code_puppy/command_line/attachments.py +10 -5
- code_puppy/command_line/autosave_menu.py +269 -41
- code_puppy/command_line/colors_menu.py +515 -0
- code_puppy/command_line/command_handler.py +10 -24
- code_puppy/command_line/config_commands.py +106 -25
- code_puppy/command_line/core_commands.py +32 -20
- code_puppy/command_line/mcp/add_command.py +3 -16
- code_puppy/command_line/mcp/base.py +0 -3
- code_puppy/command_line/mcp/catalog_server_installer.py +15 -15
- code_puppy/command_line/mcp/custom_server_form.py +66 -5
- code_puppy/command_line/mcp/custom_server_installer.py +17 -17
- code_puppy/command_line/mcp/edit_command.py +15 -22
- code_puppy/command_line/mcp/handler.py +7 -2
- code_puppy/command_line/mcp/help_command.py +2 -2
- code_puppy/command_line/mcp/install_command.py +10 -14
- code_puppy/command_line/mcp/install_menu.py +2 -6
- code_puppy/command_line/mcp/list_command.py +2 -2
- code_puppy/command_line/mcp/logs_command.py +174 -65
- code_puppy/command_line/mcp/remove_command.py +2 -2
- code_puppy/command_line/mcp/restart_command.py +7 -2
- code_puppy/command_line/mcp/search_command.py +16 -10
- code_puppy/command_line/mcp/start_all_command.py +16 -6
- code_puppy/command_line/mcp/start_command.py +12 -10
- code_puppy/command_line/mcp/status_command.py +4 -5
- code_puppy/command_line/mcp/stop_all_command.py +5 -1
- code_puppy/command_line/mcp/stop_command.py +6 -4
- code_puppy/command_line/mcp/test_command.py +2 -2
- code_puppy/command_line/mcp/wizard_utils.py +20 -16
- code_puppy/command_line/model_settings_menu.py +53 -7
- code_puppy/command_line/motd.py +1 -1
- code_puppy/command_line/pin_command_completion.py +82 -7
- code_puppy/command_line/prompt_toolkit_completion.py +32 -9
- code_puppy/command_line/session_commands.py +11 -4
- code_puppy/config.py +217 -53
- code_puppy/error_logging.py +118 -0
- code_puppy/gemini_code_assist.py +385 -0
- code_puppy/keymap.py +126 -0
- code_puppy/main.py +5 -745
- code_puppy/mcp_/__init__.py +17 -0
- code_puppy/mcp_/blocking_startup.py +63 -36
- code_puppy/mcp_/captured_stdio_server.py +1 -1
- code_puppy/mcp_/config_wizard.py +4 -4
- code_puppy/mcp_/dashboard.py +15 -6
- code_puppy/mcp_/managed_server.py +25 -5
- code_puppy/mcp_/manager.py +65 -0
- code_puppy/mcp_/mcp_logs.py +224 -0
- code_puppy/mcp_/registry.py +6 -6
- code_puppy/messaging/__init__.py +184 -2
- code_puppy/messaging/bus.py +610 -0
- code_puppy/messaging/commands.py +167 -0
- code_puppy/messaging/markdown_patches.py +57 -0
- code_puppy/messaging/message_queue.py +3 -3
- code_puppy/messaging/messages.py +470 -0
- code_puppy/messaging/renderers.py +43 -141
- code_puppy/messaging/rich_renderer.py +900 -0
- code_puppy/messaging/spinner/console_spinner.py +39 -2
- code_puppy/model_factory.py +292 -53
- code_puppy/model_utils.py +57 -48
- code_puppy/models.json +19 -5
- code_puppy/plugins/__init__.py +152 -10
- code_puppy/plugins/chatgpt_oauth/config.py +20 -12
- code_puppy/plugins/chatgpt_oauth/oauth_flow.py +5 -6
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +3 -3
- code_puppy/plugins/chatgpt_oauth/test_plugin.py +30 -13
- code_puppy/plugins/chatgpt_oauth/utils.py +180 -65
- code_puppy/plugins/claude_code_oauth/config.py +15 -11
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +28 -0
- code_puppy/plugins/claude_code_oauth/utils.py +6 -1
- code_puppy/plugins/example_custom_command/register_callbacks.py +2 -2
- code_puppy/plugins/oauth_puppy_html.py +3 -0
- code_puppy/plugins/shell_safety/agent_shell_safety.py +1 -134
- code_puppy/plugins/shell_safety/command_cache.py +156 -0
- code_puppy/plugins/shell_safety/register_callbacks.py +77 -3
- code_puppy/prompts/codex_system_prompt.md +310 -0
- code_puppy/pydantic_patches.py +131 -0
- code_puppy/session_storage.py +2 -1
- code_puppy/status_display.py +7 -5
- code_puppy/terminal_utils.py +126 -0
- code_puppy/tools/agent_tools.py +131 -70
- code_puppy/tools/browser/browser_control.py +10 -14
- code_puppy/tools/browser/browser_interactions.py +20 -28
- code_puppy/tools/browser/browser_locators.py +27 -29
- code_puppy/tools/browser/browser_navigation.py +9 -9
- code_puppy/tools/browser/browser_screenshot.py +12 -14
- code_puppy/tools/browser/browser_scripts.py +17 -29
- code_puppy/tools/browser/browser_workflows.py +24 -25
- code_puppy/tools/browser/camoufox_manager.py +22 -26
- code_puppy/tools/command_runner.py +410 -88
- code_puppy/tools/common.py +51 -38
- code_puppy/tools/file_modifications.py +98 -24
- code_puppy/tools/file_operations.py +113 -202
- code_puppy/version_checker.py +28 -13
- {code_puppy-0.0.287.data → code_puppy-0.0.323.data}/data/code_puppy/models.json +19 -5
- {code_puppy-0.0.287.dist-info → code_puppy-0.0.323.dist-info}/METADATA +3 -8
- code_puppy-0.0.323.dist-info/RECORD +168 -0
- code_puppy/tui_state.py +0 -55
- code_puppy-0.0.287.dist-info/RECORD +0 -153
- {code_puppy-0.0.287.data → code_puppy-0.0.323.data}/data/code_puppy/models_dev_api.json +0 -0
- {code_puppy-0.0.287.dist-info → code_puppy-0.0.323.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.287.dist-info → code_puppy-0.0.323.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.287.dist-info → code_puppy-0.0.323.dist-info}/licenses/LICENSE +0 -0
|
@@ -149,6 +149,98 @@ def load_stored_tokens() -> Optional[Dict[str, Any]]:
|
|
|
149
149
|
return None
|
|
150
150
|
|
|
151
151
|
|
|
152
|
+
def get_valid_access_token() -> Optional[str]:
|
|
153
|
+
"""Get a valid access token, refreshing if expired.
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Valid access token string, or None if not authenticated or refresh failed.
|
|
157
|
+
"""
|
|
158
|
+
tokens = load_stored_tokens()
|
|
159
|
+
if not tokens:
|
|
160
|
+
logger.debug("No stored ChatGPT OAuth tokens found")
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
access_token = tokens.get("access_token")
|
|
164
|
+
if not access_token:
|
|
165
|
+
logger.debug("No access_token in stored tokens")
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
# Check if token is expired by parsing JWT claims
|
|
169
|
+
claims = parse_jwt_claims(access_token)
|
|
170
|
+
if claims:
|
|
171
|
+
exp = claims.get("exp")
|
|
172
|
+
if exp and isinstance(exp, (int, float)):
|
|
173
|
+
# Add 30 second buffer before expiry
|
|
174
|
+
if time.time() > exp - 30:
|
|
175
|
+
logger.info("ChatGPT OAuth token expired, attempting refresh")
|
|
176
|
+
refreshed = refresh_access_token()
|
|
177
|
+
if refreshed:
|
|
178
|
+
return refreshed
|
|
179
|
+
logger.warning("Token refresh failed")
|
|
180
|
+
return None
|
|
181
|
+
|
|
182
|
+
return access_token
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def refresh_access_token() -> Optional[str]:
|
|
186
|
+
"""Refresh the access token using the refresh token.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
New access token if refresh succeeded, None otherwise.
|
|
190
|
+
"""
|
|
191
|
+
tokens = load_stored_tokens()
|
|
192
|
+
if not tokens:
|
|
193
|
+
return None
|
|
194
|
+
|
|
195
|
+
refresh_token = tokens.get("refresh_token")
|
|
196
|
+
if not refresh_token:
|
|
197
|
+
logger.debug("No refresh_token available")
|
|
198
|
+
return None
|
|
199
|
+
|
|
200
|
+
payload = {
|
|
201
|
+
"grant_type": "refresh_token",
|
|
202
|
+
"refresh_token": refresh_token,
|
|
203
|
+
"client_id": CHATGPT_OAUTH_CONFIG["client_id"],
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
headers = {
|
|
207
|
+
"Content-Type": "application/x-www-form-urlencoded",
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
try:
|
|
211
|
+
response = requests.post(
|
|
212
|
+
CHATGPT_OAUTH_CONFIG["token_url"],
|
|
213
|
+
data=payload,
|
|
214
|
+
headers=headers,
|
|
215
|
+
timeout=30,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if response.status_code == 200:
|
|
219
|
+
new_tokens = response.json()
|
|
220
|
+
# Merge with existing tokens (preserve account_id, etc.)
|
|
221
|
+
tokens.update(
|
|
222
|
+
{
|
|
223
|
+
"access_token": new_tokens.get("access_token"),
|
|
224
|
+
"refresh_token": new_tokens.get("refresh_token", refresh_token),
|
|
225
|
+
"id_token": new_tokens.get("id_token", tokens.get("id_token")),
|
|
226
|
+
"last_refresh": datetime.datetime.now(datetime.timezone.utc)
|
|
227
|
+
.isoformat()
|
|
228
|
+
.replace("+00:00", "Z"),
|
|
229
|
+
}
|
|
230
|
+
)
|
|
231
|
+
if save_tokens(tokens):
|
|
232
|
+
logger.info("Successfully refreshed ChatGPT OAuth token")
|
|
233
|
+
return tokens["access_token"]
|
|
234
|
+
else:
|
|
235
|
+
logger.error(
|
|
236
|
+
"Token refresh failed: %s - %s", response.status_code, response.text
|
|
237
|
+
)
|
|
238
|
+
except Exception as exc:
|
|
239
|
+
logger.error("Token refresh error: %s", exc)
|
|
240
|
+
|
|
241
|
+
return None
|
|
242
|
+
|
|
243
|
+
|
|
152
244
|
def save_tokens(tokens: Dict[str, Any]) -> bool:
|
|
153
245
|
if tokens is None:
|
|
154
246
|
raise TypeError("tokens cannot be None")
|
|
@@ -248,103 +340,126 @@ def exchange_code_for_tokens(
|
|
|
248
340
|
return None
|
|
249
341
|
|
|
250
342
|
|
|
251
|
-
|
|
252
|
-
|
|
343
|
+
# Default models available via ChatGPT Codex API
|
|
344
|
+
# These are the known models that work with ChatGPT OAuth tokens
|
|
345
|
+
# Based on codex-rs CLI and shell-scripts/codex-call.sh
|
|
346
|
+
DEFAULT_CODEX_MODELS = [
|
|
347
|
+
"gpt-5.2",
|
|
348
|
+
"gpt-5.2-codex",
|
|
349
|
+
]
|
|
350
|
+
|
|
253
351
|
|
|
254
|
-
|
|
255
|
-
|
|
352
|
+
def fetch_chatgpt_models(access_token: str, account_id: str) -> Optional[List[str]]:
|
|
353
|
+
"""Fetch available models from ChatGPT Codex API.
|
|
354
|
+
|
|
355
|
+
Attempts to fetch models from the API, but falls back to a default list
|
|
356
|
+
of known Codex-compatible models if the API is unavailable.
|
|
256
357
|
|
|
257
358
|
Args:
|
|
258
|
-
|
|
359
|
+
access_token: OAuth access token for authentication
|
|
360
|
+
account_id: ChatGPT account ID (required for the API)
|
|
259
361
|
|
|
260
362
|
Returns:
|
|
261
|
-
List of
|
|
363
|
+
List of model IDs, or default list if API fails
|
|
262
364
|
"""
|
|
263
|
-
|
|
264
|
-
base_url = CHATGPT_OAUTH_CONFIG["api_base_url"].rstrip("/")
|
|
265
|
-
models_url = f"{base_url}/v1/models"
|
|
365
|
+
import platform
|
|
266
366
|
|
|
267
|
-
#
|
|
268
|
-
|
|
367
|
+
# Build the models URL with client version
|
|
368
|
+
client_version = CHATGPT_OAUTH_CONFIG.get("client_version", "0.72.0")
|
|
369
|
+
base_url = CHATGPT_OAUTH_CONFIG["api_base_url"].rstrip("/")
|
|
370
|
+
models_url = f"{base_url}/models"
|
|
371
|
+
|
|
372
|
+
# Build User-Agent to match codex-rs CLI format
|
|
373
|
+
originator = CHATGPT_OAUTH_CONFIG.get("originator", "codex_cli_rs")
|
|
374
|
+
os_name = platform.system()
|
|
375
|
+
if os_name == "Darwin":
|
|
376
|
+
os_name = "Mac OS"
|
|
377
|
+
os_version = platform.release()
|
|
378
|
+
arch = platform.machine()
|
|
379
|
+
user_agent = (
|
|
380
|
+
f"{originator}/{client_version} ({os_name} {os_version}; {arch}) "
|
|
381
|
+
"Terminal_Codex_CLI"
|
|
382
|
+
)
|
|
269
383
|
|
|
270
384
|
headers = {
|
|
271
|
-
"Authorization": f"Bearer {
|
|
385
|
+
"Authorization": f"Bearer {access_token}",
|
|
386
|
+
"ChatGPT-Account-Id": account_id,
|
|
387
|
+
"User-Agent": user_agent,
|
|
388
|
+
"originator": originator,
|
|
389
|
+
"Accept": "application/json",
|
|
272
390
|
}
|
|
273
391
|
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
if response.status_code != 200:
|
|
278
|
-
logger.error(
|
|
279
|
-
"Failed to fetch models: HTTP %d - %s",
|
|
280
|
-
response.status_code,
|
|
281
|
-
response.text,
|
|
282
|
-
)
|
|
283
|
-
return None
|
|
284
|
-
|
|
285
|
-
# Parse JSON response
|
|
286
|
-
try:
|
|
287
|
-
data = response.json()
|
|
288
|
-
if "data" not in data or not isinstance(data["data"], list):
|
|
289
|
-
logger.error("Invalid response format: missing 'data' list")
|
|
290
|
-
return None
|
|
291
|
-
except (json.JSONDecodeError, ValueError) as exc:
|
|
292
|
-
logger.error("Failed to parse JSON response: %s", exc)
|
|
293
|
-
return None
|
|
294
|
-
|
|
295
|
-
# Filter models: start with "gpt-" or "o1-" and not in blocklist
|
|
296
|
-
filtered_models = []
|
|
297
|
-
seen_models = set() # For deduplication while preserving order
|
|
298
|
-
|
|
299
|
-
for model in data["data"]:
|
|
300
|
-
# Skip None entries
|
|
301
|
-
if model is None:
|
|
302
|
-
continue
|
|
392
|
+
# Query params
|
|
393
|
+
params = {"client_version": client_version}
|
|
303
394
|
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
continue
|
|
307
|
-
|
|
308
|
-
# Skip if already seen (deduplication)
|
|
309
|
-
if model_id in seen_models:
|
|
310
|
-
continue
|
|
311
|
-
|
|
312
|
-
# Check if model starts with allowed prefixes and not in blocklist
|
|
313
|
-
if (
|
|
314
|
-
model_id.startswith("gpt-") or model_id.startswith("o1-")
|
|
315
|
-
) and model_id not in blocklist:
|
|
316
|
-
filtered_models.append(model_id)
|
|
317
|
-
seen_models.add(model_id)
|
|
395
|
+
try:
|
|
396
|
+
response = requests.get(models_url, headers=headers, params=params, timeout=30)
|
|
318
397
|
|
|
319
|
-
|
|
398
|
+
if response.status_code == 200:
|
|
399
|
+
# Parse JSON response
|
|
400
|
+
try:
|
|
401
|
+
data = response.json()
|
|
402
|
+
# The response has a "models" key with list of model objects
|
|
403
|
+
if "models" in data and isinstance(data["models"], list):
|
|
404
|
+
models = []
|
|
405
|
+
for model in data["models"]:
|
|
406
|
+
if model is None:
|
|
407
|
+
continue
|
|
408
|
+
model_id = (
|
|
409
|
+
model.get("slug") or model.get("id") or model.get("name")
|
|
410
|
+
)
|
|
411
|
+
if model_id:
|
|
412
|
+
models.append(model_id)
|
|
413
|
+
if models:
|
|
414
|
+
return models
|
|
415
|
+
except (json.JSONDecodeError, ValueError) as exc:
|
|
416
|
+
logger.warning("Failed to parse models response: %s", exc)
|
|
417
|
+
|
|
418
|
+
# API didn't return valid models, use default list
|
|
419
|
+
logger.info(
|
|
420
|
+
"Models endpoint returned %d, using default model list",
|
|
421
|
+
response.status_code,
|
|
422
|
+
)
|
|
320
423
|
|
|
321
424
|
except requests.exceptions.Timeout:
|
|
322
|
-
logger.
|
|
323
|
-
return None
|
|
425
|
+
logger.warning("Timeout fetching models, using default list")
|
|
324
426
|
except requests.exceptions.RequestException as exc:
|
|
325
|
-
logger.
|
|
326
|
-
return None
|
|
427
|
+
logger.warning("Network error fetching models: %s, using default list", exc)
|
|
327
428
|
except Exception as exc:
|
|
328
|
-
logger.
|
|
329
|
-
return None
|
|
429
|
+
logger.warning("Error fetching models: %s, using default list", exc)
|
|
330
430
|
|
|
431
|
+
# Return default models when API fails
|
|
432
|
+
logger.info("Using default Codex models: %s", DEFAULT_CODEX_MODELS)
|
|
433
|
+
return DEFAULT_CODEX_MODELS
|
|
331
434
|
|
|
332
|
-
|
|
435
|
+
|
|
436
|
+
def add_models_to_extra_config(models: List[str]) -> bool:
|
|
333
437
|
"""Add ChatGPT models to chatgpt_models.json configuration."""
|
|
334
438
|
try:
|
|
335
439
|
chatgpt_models = load_chatgpt_models()
|
|
336
440
|
added = 0
|
|
337
441
|
for model_name in models:
|
|
338
442
|
prefixed = f"{CHATGPT_OAUTH_CONFIG['prefix']}{model_name}"
|
|
443
|
+
|
|
444
|
+
# Determine supported settings based on model type
|
|
445
|
+
# All GPT-5.x models support reasoning_effort and verbosity
|
|
446
|
+
supported_settings = ["reasoning_effort", "verbosity"]
|
|
447
|
+
|
|
448
|
+
# Only codex models support xhigh reasoning effort
|
|
449
|
+
# Regular gpt-5.2 is capped at "high"
|
|
450
|
+
is_codex = "codex" in model_name.lower()
|
|
451
|
+
|
|
339
452
|
chatgpt_models[prefixed] = {
|
|
340
|
-
"type": "
|
|
453
|
+
"type": "chatgpt_oauth",
|
|
341
454
|
"name": model_name,
|
|
342
455
|
"custom_endpoint": {
|
|
456
|
+
# Codex API uses chatgpt.com/backend-api/codex, not api.openai.com
|
|
343
457
|
"url": CHATGPT_OAUTH_CONFIG["api_base_url"],
|
|
344
|
-
"api_key": "${" + CHATGPT_OAUTH_CONFIG["api_key_env_var"] + "}",
|
|
345
458
|
},
|
|
346
459
|
"context_length": CHATGPT_OAUTH_CONFIG["default_context_length"],
|
|
347
460
|
"oauth_source": "chatgpt-oauth-plugin",
|
|
461
|
+
"supported_settings": supported_settings,
|
|
462
|
+
"supports_xhigh_reasoning": is_codex,
|
|
348
463
|
}
|
|
349
464
|
added += 1
|
|
350
465
|
if save_chatgpt_models(chatgpt_models):
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
2
|
from typing import Any, Dict
|
|
3
3
|
|
|
4
|
+
from code_puppy import config
|
|
5
|
+
|
|
4
6
|
# Claude Code OAuth configuration
|
|
5
7
|
CLAUDE_CODE_OAUTH_CONFIG: Dict[str, Any] = {
|
|
6
8
|
# OAuth endpoints inferred from official Claude Code OAuth flow
|
|
@@ -17,8 +19,8 @@ CLAUDE_CODE_OAUTH_CONFIG: Dict[str, Any] = {
|
|
|
17
19
|
"callback_timeout": 180,
|
|
18
20
|
# Console redirect fallback (for manual flows, if needed)
|
|
19
21
|
"console_redirect_uri": "https://console.anthropic.com/oauth/code/callback",
|
|
20
|
-
# Local configuration
|
|
21
|
-
"token_storage":
|
|
22
|
+
# Local configuration (uses XDG_DATA_HOME)
|
|
23
|
+
"token_storage": None, # Set dynamically in get_token_storage_path()
|
|
22
24
|
# Model configuration
|
|
23
25
|
"prefix": "claude-code-",
|
|
24
26
|
"default_context_length": 200000,
|
|
@@ -28,19 +30,21 @@ CLAUDE_CODE_OAUTH_CONFIG: Dict[str, Any] = {
|
|
|
28
30
|
|
|
29
31
|
|
|
30
32
|
def get_token_storage_path() -> Path:
|
|
31
|
-
"""Get the path for storing OAuth tokens."""
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
return
|
|
33
|
+
"""Get the path for storing OAuth tokens (uses XDG_DATA_HOME)."""
|
|
34
|
+
data_dir = Path(config.DATA_DIR)
|
|
35
|
+
data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
|
|
36
|
+
return data_dir / "claude_code_oauth.json"
|
|
35
37
|
|
|
36
38
|
|
|
37
39
|
def get_config_dir() -> Path:
|
|
38
|
-
"""Get the Code Puppy configuration directory."""
|
|
39
|
-
config_dir = Path(
|
|
40
|
-
config_dir.mkdir(parents=True, exist_ok=True)
|
|
40
|
+
"""Get the Code Puppy configuration directory (uses XDG_CONFIG_HOME)."""
|
|
41
|
+
config_dir = Path(config.CONFIG_DIR)
|
|
42
|
+
config_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
|
|
41
43
|
return config_dir
|
|
42
44
|
|
|
43
45
|
|
|
44
46
|
def get_claude_models_path() -> Path:
|
|
45
|
-
"""Get the path to the dedicated claude_models.json file."""
|
|
46
|
-
|
|
47
|
+
"""Get the path to the dedicated claude_models.json file (uses XDG_DATA_HOME)."""
|
|
48
|
+
data_dir = Path(config.DATA_DIR)
|
|
49
|
+
data_dir.mkdir(parents=True, exist_ok=True, mode=0o700)
|
|
50
|
+
return data_dir / "claude_models.json"
|
|
@@ -180,6 +180,31 @@ def _custom_help() -> List[Tuple[str, str]]:
|
|
|
180
180
|
]
|
|
181
181
|
|
|
182
182
|
|
|
183
|
+
def _reload_current_agent() -> None:
|
|
184
|
+
"""Reload the current agent so new auth tokens are picked up immediately."""
|
|
185
|
+
try:
|
|
186
|
+
from code_puppy.agents import get_current_agent
|
|
187
|
+
|
|
188
|
+
current_agent = get_current_agent()
|
|
189
|
+
if current_agent is None:
|
|
190
|
+
logger.debug("No current agent to reload")
|
|
191
|
+
return
|
|
192
|
+
|
|
193
|
+
# JSON agents may need to refresh their config before reload
|
|
194
|
+
if hasattr(current_agent, "refresh_config"):
|
|
195
|
+
try:
|
|
196
|
+
current_agent.refresh_config()
|
|
197
|
+
except Exception:
|
|
198
|
+
# Non-fatal, continue to reload
|
|
199
|
+
pass
|
|
200
|
+
|
|
201
|
+
current_agent.reload_code_generation_agent()
|
|
202
|
+
emit_info("Active agent reloaded with new authentication")
|
|
203
|
+
except Exception as e:
|
|
204
|
+
emit_warning(f"Authentication succeeded but agent reload failed: {e}")
|
|
205
|
+
logger.exception("Failed to reload agent after authentication")
|
|
206
|
+
|
|
207
|
+
|
|
183
208
|
def _perform_authentication() -> None:
|
|
184
209
|
context = prepare_oauth_context()
|
|
185
210
|
code = _await_callback(context)
|
|
@@ -219,6 +244,9 @@ def _perform_authentication() -> None:
|
|
|
219
244
|
"Claude Code models added to your configuration. Use the `claude-code-` prefix!"
|
|
220
245
|
)
|
|
221
246
|
|
|
247
|
+
# Reload the current agent so the new auth token is picked up immediately
|
|
248
|
+
_reload_current_agent()
|
|
249
|
+
|
|
222
250
|
|
|
223
251
|
def _handle_custom_command(command: str, name: str) -> Optional[bool]:
|
|
224
252
|
if not name:
|
|
@@ -356,7 +356,11 @@ def add_models_to_extra_config(models: List[str]) -> bool:
|
|
|
356
356
|
"custom_endpoint": {
|
|
357
357
|
"url": CLAUDE_CODE_OAUTH_CONFIG["api_base_url"],
|
|
358
358
|
"api_key": access_token,
|
|
359
|
-
"headers": {
|
|
359
|
+
"headers": {
|
|
360
|
+
"anthropic-beta": "oauth-2025-04-20,interleaved-thinking-2025-05-14",
|
|
361
|
+
"x-app": "cli",
|
|
362
|
+
"User-Agent": "claude-cli/2.0.61 (external, cli)",
|
|
363
|
+
},
|
|
360
364
|
},
|
|
361
365
|
"context_length": CLAUDE_CODE_OAUTH_CONFIG["default_context_length"],
|
|
362
366
|
"oauth_source": "claude-code-plugin",
|
|
@@ -364,6 +368,7 @@ def add_models_to_extra_config(models: List[str]) -> bool:
|
|
|
364
368
|
"temperature",
|
|
365
369
|
"extended_thinking",
|
|
366
370
|
"budget_tokens",
|
|
371
|
+
"interleaved_thinking",
|
|
367
372
|
],
|
|
368
373
|
}
|
|
369
374
|
added += 1
|
|
@@ -39,9 +39,9 @@ def _handle_custom_command(command: str, name: str):
|
|
|
39
39
|
rest = command.split(maxsplit=1)
|
|
40
40
|
if len(rest) == 2:
|
|
41
41
|
text = rest[1]
|
|
42
|
-
emit_info(f"
|
|
42
|
+
emit_info(f"example plugin echo -> {text}")
|
|
43
43
|
return text
|
|
44
|
-
emit_info("
|
|
44
|
+
emit_info("example plugin echo (empty)")
|
|
45
45
|
return ""
|
|
46
46
|
|
|
47
47
|
return None
|
|
@@ -8,6 +8,7 @@ CLAUDE_LOGO_URL = "https://voideditor.com/claude-icon.png"
|
|
|
8
8
|
CHATGPT_LOGO_URL = (
|
|
9
9
|
"https://freelogopng.com/images/all_img/1681038325chatgpt-logo-transparent.png"
|
|
10
10
|
)
|
|
11
|
+
GEMINI_LOGO_URL = "https://upload.wikimedia.org/wikipedia/commons/thumb/8/8a/Google_Gemini_logo.svg/512px-Google_Gemini_logo.svg.png"
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def oauth_success_html(service_name: str, extra_message: Optional[str] = None) -> str:
|
|
@@ -222,4 +223,6 @@ def _service_targets(service_name: str) -> Tuple[str, str, str, str]:
|
|
|
222
223
|
return "🐕🦺🧨", CLAUDE_LOGO_URL, "Claude logo", ""
|
|
223
224
|
if "chat" in normalized or "gpt" in normalized:
|
|
224
225
|
return "🐶🚀", CHATGPT_LOGO_URL, "ChatGPT logo", "invert"
|
|
226
|
+
if "gemini" in normalized or "google" in normalized:
|
|
227
|
+
return "🐶✨", GEMINI_LOGO_URL, "Gemini logo", ""
|
|
225
228
|
return "🐾💥", CHATGPT_LOGO_URL, "mystery logo", "invert"
|
|
@@ -5,13 +5,12 @@ It's designed to be ultra-lightweight with a concise prompt (<200 tokens) and
|
|
|
5
5
|
uses structured output for reliable parsing.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
import asyncio
|
|
9
8
|
from typing import TYPE_CHECKING, List
|
|
10
9
|
|
|
11
10
|
from code_puppy.agents.base_agent import BaseAgent
|
|
12
11
|
|
|
13
12
|
if TYPE_CHECKING:
|
|
14
|
-
|
|
13
|
+
pass
|
|
15
14
|
|
|
16
15
|
|
|
17
16
|
class ShellSafetyAgent(BaseAgent):
|
|
@@ -68,135 +67,3 @@ class ShellSafetyAgent(BaseAgent):
|
|
|
68
67
|
def get_available_tools(self) -> List[str]:
|
|
69
68
|
"""This agent uses no tools - pure reasoning only."""
|
|
70
69
|
return []
|
|
71
|
-
|
|
72
|
-
async def assess_command(
|
|
73
|
-
self, command: str, cwd: str | None = None
|
|
74
|
-
) -> "ShellSafetyAssessment":
|
|
75
|
-
"""Assess the safety risk of a shell command.
|
|
76
|
-
|
|
77
|
-
Args:
|
|
78
|
-
command: The shell command to assess
|
|
79
|
-
cwd: Optional working directory context
|
|
80
|
-
|
|
81
|
-
Returns:
|
|
82
|
-
ShellSafetyAssessment with risk level and reasoning
|
|
83
|
-
|
|
84
|
-
Note:
|
|
85
|
-
On timeout or error, defaults to 'high' risk with error reasoning
|
|
86
|
-
to fail safe. Optionally uses DBOS for durable execution tracking.
|
|
87
|
-
"""
|
|
88
|
-
import uuid
|
|
89
|
-
|
|
90
|
-
from pydantic_ai import Agent, UsageLimits
|
|
91
|
-
|
|
92
|
-
from code_puppy.config import get_use_dbos
|
|
93
|
-
from code_puppy.model_factory import ModelFactory
|
|
94
|
-
from code_puppy.tools.command_runner import ShellSafetyAssessment
|
|
95
|
-
|
|
96
|
-
try:
|
|
97
|
-
# Build the assessment prompt
|
|
98
|
-
prompt = f"Assess this shell command:\n\nCommand: {command}"
|
|
99
|
-
if cwd:
|
|
100
|
-
prompt += f"\nWorking directory: {cwd}"
|
|
101
|
-
|
|
102
|
-
# Get the current model
|
|
103
|
-
model_name = self.get_model_name()
|
|
104
|
-
models_config = ModelFactory.load_config()
|
|
105
|
-
|
|
106
|
-
if model_name not in models_config:
|
|
107
|
-
# Fall back to high risk if model config fails
|
|
108
|
-
return ShellSafetyAssessment(
|
|
109
|
-
risk="high",
|
|
110
|
-
reasoning="Model configuration unavailable - failing safe",
|
|
111
|
-
)
|
|
112
|
-
|
|
113
|
-
model = ModelFactory.get_model(model_name, models_config)
|
|
114
|
-
|
|
115
|
-
# Handle claude-code models: swap instructions and prepend system prompt
|
|
116
|
-
from code_puppy.model_utils import prepare_prompt_for_model
|
|
117
|
-
|
|
118
|
-
instructions = self.get_system_prompt()
|
|
119
|
-
prepared = prepare_prompt_for_model(model_name, instructions, prompt)
|
|
120
|
-
instructions = prepared.instructions
|
|
121
|
-
prompt = prepared.user_prompt
|
|
122
|
-
|
|
123
|
-
from code_puppy.model_factory import make_model_settings
|
|
124
|
-
|
|
125
|
-
model_settings = make_model_settings(model_name)
|
|
126
|
-
|
|
127
|
-
temp_agent = Agent(
|
|
128
|
-
model=model,
|
|
129
|
-
system_prompt=instructions,
|
|
130
|
-
retries=1,
|
|
131
|
-
output_type=ShellSafetyAssessment,
|
|
132
|
-
model_settings=model_settings,
|
|
133
|
-
)
|
|
134
|
-
|
|
135
|
-
# Generate unique agent name and workflow ID for DBOS (if enabled)
|
|
136
|
-
agent_name = f"shell-safety-{uuid.uuid4().hex[:8]}"
|
|
137
|
-
workflow_id = f"shell-safety-{uuid.uuid4().hex[:8]}"
|
|
138
|
-
|
|
139
|
-
# Wrap with DBOS if enabled (same pattern as agent_tools.py)
|
|
140
|
-
if get_use_dbos():
|
|
141
|
-
from pydantic_ai.durable_exec.dbos import DBOSAgent
|
|
142
|
-
|
|
143
|
-
dbos_agent = DBOSAgent(temp_agent, name=agent_name)
|
|
144
|
-
temp_agent = dbos_agent
|
|
145
|
-
|
|
146
|
-
# Run the agent as a cancellable task
|
|
147
|
-
# Import the shared task registry for cancellation support
|
|
148
|
-
from code_puppy.tools.agent_tools import _active_subagent_tasks
|
|
149
|
-
|
|
150
|
-
if get_use_dbos():
|
|
151
|
-
from dbos import DBOS, SetWorkflowID
|
|
152
|
-
|
|
153
|
-
with SetWorkflowID(workflow_id):
|
|
154
|
-
task = asyncio.create_task(
|
|
155
|
-
temp_agent.run(
|
|
156
|
-
prompt,
|
|
157
|
-
usage_limits=UsageLimits(request_limit=1),
|
|
158
|
-
)
|
|
159
|
-
)
|
|
160
|
-
_active_subagent_tasks.add(task)
|
|
161
|
-
else:
|
|
162
|
-
task = asyncio.create_task(
|
|
163
|
-
temp_agent.run(
|
|
164
|
-
prompt,
|
|
165
|
-
usage_limits=UsageLimits(request_limit=1),
|
|
166
|
-
)
|
|
167
|
-
)
|
|
168
|
-
_active_subagent_tasks.add(task)
|
|
169
|
-
|
|
170
|
-
try:
|
|
171
|
-
result = await task
|
|
172
|
-
finally:
|
|
173
|
-
_active_subagent_tasks.discard(task)
|
|
174
|
-
if task.cancelled():
|
|
175
|
-
if get_use_dbos():
|
|
176
|
-
DBOS.cancel_workflow(workflow_id)
|
|
177
|
-
|
|
178
|
-
# Return the structured output
|
|
179
|
-
# The result.output should be a ShellSafetyAssessment due to the generic type
|
|
180
|
-
output = result.output
|
|
181
|
-
|
|
182
|
-
# If it's a string, try to parse it as JSON into ShellSafetyAssessment
|
|
183
|
-
if isinstance(output, str):
|
|
184
|
-
import json
|
|
185
|
-
|
|
186
|
-
try:
|
|
187
|
-
data = json.loads(output)
|
|
188
|
-
return ShellSafetyAssessment(**data)
|
|
189
|
-
except Exception:
|
|
190
|
-
# If parsing fails, fail safe
|
|
191
|
-
return ShellSafetyAssessment(
|
|
192
|
-
risk="high",
|
|
193
|
-
reasoning=f"Could not parse assessment output: {output[:100]}",
|
|
194
|
-
)
|
|
195
|
-
|
|
196
|
-
return output
|
|
197
|
-
|
|
198
|
-
except Exception as e:
|
|
199
|
-
return ShellSafetyAssessment(
|
|
200
|
-
risk="high",
|
|
201
|
-
reasoning=f"Safety assessment failed: {str(e)[:200]} - failing safe",
|
|
202
|
-
)
|