code-puppy 0.0.302__py3-none-any.whl → 0.0.323__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/agents/base_agent.py +373 -46
- code_puppy/chatgpt_codex_client.py +283 -0
- code_puppy/cli_runner.py +795 -0
- code_puppy/command_line/add_model_menu.py +8 -1
- code_puppy/command_line/autosave_menu.py +266 -35
- code_puppy/command_line/colors_menu.py +515 -0
- code_puppy/command_line/command_handler.py +8 -2
- code_puppy/command_line/config_commands.py +59 -10
- code_puppy/command_line/core_commands.py +19 -7
- code_puppy/command_line/mcp/edit_command.py +3 -1
- code_puppy/command_line/mcp/handler.py +7 -2
- code_puppy/command_line/mcp/install_command.py +8 -3
- code_puppy/command_line/mcp/logs_command.py +173 -64
- code_puppy/command_line/mcp/restart_command.py +7 -2
- code_puppy/command_line/mcp/search_command.py +10 -4
- code_puppy/command_line/mcp/start_all_command.py +16 -6
- code_puppy/command_line/mcp/start_command.py +3 -1
- code_puppy/command_line/mcp/status_command.py +2 -1
- code_puppy/command_line/mcp/stop_all_command.py +5 -1
- code_puppy/command_line/mcp/stop_command.py +3 -1
- code_puppy/command_line/mcp/wizard_utils.py +10 -4
- code_puppy/command_line/model_settings_menu.py +53 -7
- code_puppy/command_line/prompt_toolkit_completion.py +16 -2
- code_puppy/command_line/session_commands.py +11 -4
- code_puppy/config.py +103 -15
- code_puppy/keymap.py +8 -2
- code_puppy/main.py +5 -828
- code_puppy/mcp_/__init__.py +17 -0
- code_puppy/mcp_/blocking_startup.py +61 -32
- code_puppy/mcp_/config_wizard.py +5 -1
- code_puppy/mcp_/managed_server.py +23 -3
- code_puppy/mcp_/manager.py +65 -0
- code_puppy/mcp_/mcp_logs.py +224 -0
- code_puppy/messaging/__init__.py +20 -4
- code_puppy/messaging/bus.py +64 -0
- code_puppy/messaging/markdown_patches.py +57 -0
- code_puppy/messaging/messages.py +16 -0
- code_puppy/messaging/renderers.py +21 -9
- code_puppy/messaging/rich_renderer.py +113 -67
- code_puppy/messaging/spinner/console_spinner.py +34 -0
- code_puppy/model_factory.py +185 -30
- code_puppy/model_utils.py +57 -48
- code_puppy/models.json +19 -5
- code_puppy/plugins/chatgpt_oauth/config.py +5 -1
- code_puppy/plugins/chatgpt_oauth/oauth_flow.py +5 -6
- code_puppy/plugins/chatgpt_oauth/register_callbacks.py +3 -3
- code_puppy/plugins/chatgpt_oauth/test_plugin.py +26 -11
- code_puppy/plugins/chatgpt_oauth/utils.py +180 -65
- code_puppy/plugins/claude_code_oauth/register_callbacks.py +28 -0
- code_puppy/plugins/claude_code_oauth/utils.py +1 -0
- code_puppy/plugins/shell_safety/agent_shell_safety.py +1 -118
- code_puppy/plugins/shell_safety/register_callbacks.py +44 -3
- code_puppy/prompts/codex_system_prompt.md +310 -0
- code_puppy/pydantic_patches.py +131 -0
- code_puppy/terminal_utils.py +126 -0
- code_puppy/tools/agent_tools.py +34 -9
- code_puppy/tools/command_runner.py +361 -32
- code_puppy/tools/file_operations.py +33 -45
- {code_puppy-0.0.302.data → code_puppy-0.0.323.data}/data/code_puppy/models.json +19 -5
- {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/METADATA +1 -1
- {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/RECORD +65 -57
- {code_puppy-0.0.302.data → code_puppy-0.0.323.data}/data/code_puppy/models_dev_api.json +0 -0
- {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/licenses/LICENSE +0 -0
|
@@ -7,6 +7,8 @@ Provides interactive functionality for installing and configuring MCP servers.
|
|
|
7
7
|
import logging
|
|
8
8
|
from typing import Any, Dict, Optional
|
|
9
9
|
|
|
10
|
+
from rich.text import Text
|
|
11
|
+
|
|
10
12
|
from code_puppy.messaging import emit_error, emit_info, emit_prompt
|
|
11
13
|
|
|
12
14
|
# Configure logging
|
|
@@ -51,7 +53,7 @@ def run_interactive_install_wizard(manager, group_id: str) -> bool:
|
|
|
51
53
|
required_env_vars = selected_server.get_environment_vars()
|
|
52
54
|
if required_env_vars:
|
|
53
55
|
emit_info(
|
|
54
|
-
"\n[yellow]Required Environment Variables:[/yellow]",
|
|
56
|
+
Text.from_markup("\n[yellow]Required Environment Variables:[/yellow]"),
|
|
55
57
|
message_group=group_id,
|
|
56
58
|
)
|
|
57
59
|
for var in required_env_vars:
|
|
@@ -61,7 +63,8 @@ def run_interactive_install_wizard(manager, group_id: str) -> bool:
|
|
|
61
63
|
current_value = os.environ.get(var, "")
|
|
62
64
|
if current_value:
|
|
63
65
|
emit_info(
|
|
64
|
-
f" {var}: [green]Already set[/green]",
|
|
66
|
+
Text.from_markup(f" {var}: [green]Already set[/green]"),
|
|
67
|
+
message_group=group_id,
|
|
65
68
|
)
|
|
66
69
|
env_vars[var] = current_value
|
|
67
70
|
else:
|
|
@@ -73,7 +76,8 @@ def run_interactive_install_wizard(manager, group_id: str) -> bool:
|
|
|
73
76
|
required_cmd_args = selected_server.get_command_line_args()
|
|
74
77
|
if required_cmd_args:
|
|
75
78
|
emit_info(
|
|
76
|
-
"\n[yellow]Command Line Arguments:[/yellow]",
|
|
79
|
+
Text.from_markup("\n[yellow]Command Line Arguments:[/yellow]"),
|
|
80
|
+
message_group=group_id,
|
|
77
81
|
)
|
|
78
82
|
for arg_config in required_cmd_args:
|
|
79
83
|
name = arg_config.get("name", "")
|
|
@@ -312,7 +316,9 @@ def install_server_from_catalog(
|
|
|
312
316
|
json.dump(data, f, indent=2)
|
|
313
317
|
|
|
314
318
|
emit_info(
|
|
315
|
-
|
|
319
|
+
Text.from_markup(
|
|
320
|
+
f"[green]✓ Successfully installed server: {server_name}[/green]"
|
|
321
|
+
),
|
|
316
322
|
message_group=group_id,
|
|
317
323
|
)
|
|
318
324
|
emit_info(
|
|
@@ -58,7 +58,7 @@ SETTING_DEFINITIONS: Dict[str, Dict] = {
|
|
|
58
58
|
"name": "Reasoning Effort",
|
|
59
59
|
"description": "Controls how much effort GPT-5 models spend on reasoning. Higher = more thorough but slower.",
|
|
60
60
|
"type": "choice",
|
|
61
|
-
"choices": ["low", "medium", "high"],
|
|
61
|
+
"choices": ["minimal", "low", "medium", "high", "xhigh"],
|
|
62
62
|
"default": "medium",
|
|
63
63
|
},
|
|
64
64
|
"verbosity": {
|
|
@@ -72,7 +72,7 @@ SETTING_DEFINITIONS: Dict[str, Dict] = {
|
|
|
72
72
|
"name": "Extended Thinking",
|
|
73
73
|
"description": "Enable Claude's extended thinking mode for complex reasoning tasks.",
|
|
74
74
|
"type": "boolean",
|
|
75
|
-
"default":
|
|
75
|
+
"default": True,
|
|
76
76
|
},
|
|
77
77
|
"budget_tokens": {
|
|
78
78
|
"name": "Thinking Budget (tokens)",
|
|
@@ -84,6 +84,12 @@ SETTING_DEFINITIONS: Dict[str, Dict] = {
|
|
|
84
84
|
"default": 10000,
|
|
85
85
|
"format": "{:.0f}",
|
|
86
86
|
},
|
|
87
|
+
"interleaved_thinking": {
|
|
88
|
+
"name": "Interleaved Thinking",
|
|
89
|
+
"description": "Enable thinking between tool calls (Claude 4 only: Opus 4.5, Opus 4.1, Opus 4, Sonnet 4). Adds beta header. WARNING: On Vertex/Bedrock, this FAILS for non-Claude 4 models!",
|
|
90
|
+
"type": "boolean",
|
|
91
|
+
"default": False,
|
|
92
|
+
},
|
|
87
93
|
}
|
|
88
94
|
|
|
89
95
|
|
|
@@ -93,6 +99,42 @@ def _load_all_model_names() -> List[str]:
|
|
|
93
99
|
return list(models_config.keys())
|
|
94
100
|
|
|
95
101
|
|
|
102
|
+
def _get_setting_choices(
|
|
103
|
+
setting_key: str, model_name: Optional[str] = None
|
|
104
|
+
) -> List[str]:
|
|
105
|
+
"""Get the available choices for a setting, filtered by model capabilities.
|
|
106
|
+
|
|
107
|
+
For reasoning_effort, only codex models support 'xhigh' - regular GPT-5.2
|
|
108
|
+
models are capped at 'high'.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
setting_key: The setting name (e.g., 'reasoning_effort', 'verbosity')
|
|
112
|
+
model_name: Optional model name to filter choices for
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
List of valid choices for this setting and model combination.
|
|
116
|
+
"""
|
|
117
|
+
setting_def = SETTING_DEFINITIONS.get(setting_key, {})
|
|
118
|
+
if setting_def.get("type") != "choice":
|
|
119
|
+
return []
|
|
120
|
+
|
|
121
|
+
base_choices = setting_def.get("choices", [])
|
|
122
|
+
|
|
123
|
+
# For reasoning_effort, filter 'xhigh' based on model support
|
|
124
|
+
if setting_key == "reasoning_effort" and model_name:
|
|
125
|
+
models_config = ModelFactory.load_config()
|
|
126
|
+
model_config = models_config.get(model_name, {})
|
|
127
|
+
|
|
128
|
+
# Check if model supports xhigh reasoning
|
|
129
|
+
supports_xhigh = model_config.get("supports_xhigh_reasoning", False)
|
|
130
|
+
|
|
131
|
+
if not supports_xhigh:
|
|
132
|
+
# Remove xhigh from choices for non-codex models
|
|
133
|
+
return [c for c in base_choices if c != "xhigh"]
|
|
134
|
+
|
|
135
|
+
return base_choices
|
|
136
|
+
|
|
137
|
+
|
|
96
138
|
class ModelSettingsMenu:
|
|
97
139
|
"""Interactive TUI for model settings configuration.
|
|
98
140
|
|
|
@@ -427,7 +469,8 @@ class ModelSettingsMenu:
|
|
|
427
469
|
if setting_def.get("type") == "choice":
|
|
428
470
|
lines.append(("bold", " Options:"))
|
|
429
471
|
lines.append(("", "\n"))
|
|
430
|
-
|
|
472
|
+
# Get filtered choices based on model capabilities
|
|
473
|
+
choices = _get_setting_choices(setting_key, self.selected_model)
|
|
431
474
|
lines.append(
|
|
432
475
|
(
|
|
433
476
|
"fg:ansibrightblack",
|
|
@@ -514,8 +557,11 @@ class ModelSettingsMenu:
|
|
|
514
557
|
if current is not None:
|
|
515
558
|
self.edit_value = current
|
|
516
559
|
elif setting_def.get("type") == "choice":
|
|
517
|
-
# For choice settings, start with the default
|
|
518
|
-
|
|
560
|
+
# For choice settings, start with the default (using filtered choices)
|
|
561
|
+
choices = _get_setting_choices(setting_key, self.selected_model)
|
|
562
|
+
self.edit_value = setting_def.get(
|
|
563
|
+
"default", choices[0] if choices else None
|
|
564
|
+
)
|
|
519
565
|
elif setting_def.get("type") == "boolean":
|
|
520
566
|
# For boolean settings, start with the default
|
|
521
567
|
self.edit_value = setting_def.get("default", False)
|
|
@@ -541,8 +587,8 @@ class ModelSettingsMenu:
|
|
|
541
587
|
setting_def = SETTING_DEFINITIONS[setting_key]
|
|
542
588
|
|
|
543
589
|
if setting_def.get("type") == "choice":
|
|
544
|
-
# Cycle through choices
|
|
545
|
-
choices =
|
|
590
|
+
# Cycle through filtered choices based on model capabilities
|
|
591
|
+
choices = _get_setting_choices(setting_key, self.selected_model)
|
|
546
592
|
current_idx = (
|
|
547
593
|
choices.index(self.edit_value) if self.edit_value in choices else 0
|
|
548
594
|
)
|
|
@@ -582,12 +582,26 @@ async def get_input_with_combined_completion(
|
|
|
582
582
|
# Ctrl+X keybinding - exit with KeyboardInterrupt for shell command cancellation
|
|
583
583
|
@bindings.add(Keys.ControlX)
|
|
584
584
|
def _(event):
|
|
585
|
-
|
|
585
|
+
try:
|
|
586
|
+
event.app.exit(exception=KeyboardInterrupt)
|
|
587
|
+
except Exception:
|
|
588
|
+
# Ignore "Return value already set" errors when exit was already called
|
|
589
|
+
# This happens when user presses multiple exit keys in quick succession
|
|
590
|
+
pass
|
|
586
591
|
|
|
587
592
|
# Escape keybinding - exit with KeyboardInterrupt
|
|
588
593
|
@bindings.add(Keys.Escape)
|
|
589
594
|
def _(event):
|
|
590
|
-
|
|
595
|
+
try:
|
|
596
|
+
event.app.exit(exception=KeyboardInterrupt)
|
|
597
|
+
except Exception:
|
|
598
|
+
# Ignore "Return value already set" errors when exit was already called
|
|
599
|
+
pass
|
|
600
|
+
|
|
601
|
+
# NOTE: We intentionally do NOT override Ctrl+C here.
|
|
602
|
+
# prompt_toolkit's default Ctrl+C handler properly resets the terminal state on Windows.
|
|
603
|
+
# Overriding it with event.app.exit(exception=KeyboardInterrupt) can leave the terminal
|
|
604
|
+
# in a bad state where characters cannot be typed. Let prompt_toolkit handle Ctrl+C natively.
|
|
591
605
|
|
|
592
606
|
# Toggle multiline with Alt+M
|
|
593
607
|
@bindings.add(Keys.Escape, "m")
|
|
@@ -246,6 +246,8 @@ def handle_dump_context_command(command: str) -> bool:
|
|
|
246
246
|
)
|
|
247
247
|
def handle_load_context_command(command: str) -> bool:
|
|
248
248
|
"""Load message history from a file."""
|
|
249
|
+
from rich.text import Text
|
|
250
|
+
|
|
249
251
|
from code_puppy.agents.agent_manager import get_current_agent
|
|
250
252
|
from code_puppy.config import rotate_autosave_id
|
|
251
253
|
from code_puppy.messaging import emit_error, emit_info, emit_success, emit_warning
|
|
@@ -278,12 +280,17 @@ def handle_load_context_command(command: str) -> bool:
|
|
|
278
280
|
# Rotate autosave id to avoid overwriting any existing autosave
|
|
279
281
|
try:
|
|
280
282
|
new_id = rotate_autosave_id()
|
|
281
|
-
autosave_info =
|
|
283
|
+
autosave_info = Text.from_markup(
|
|
284
|
+
f"\n[dim]Autosave session rotated to: {new_id}[/dim]"
|
|
285
|
+
)
|
|
282
286
|
except Exception:
|
|
283
|
-
autosave_info = ""
|
|
287
|
+
autosave_info = Text("")
|
|
284
288
|
|
|
285
|
-
|
|
289
|
+
# Build the success message with proper Text concatenation
|
|
290
|
+
success_msg = Text(
|
|
286
291
|
f"✅ Context loaded: {len(history)} messages ({total_tokens} tokens)\n"
|
|
287
|
-
f"📁 From: {session_path}
|
|
292
|
+
f"📁 From: {session_path}"
|
|
288
293
|
)
|
|
294
|
+
success_msg.append_text(autosave_info)
|
|
295
|
+
emit_success(success_msg)
|
|
289
296
|
return True
|
code_puppy/config.py
CHANGED
|
@@ -212,6 +212,9 @@ def get_config_keys():
|
|
|
212
212
|
default_keys.append("enable_dbos")
|
|
213
213
|
# Add cancel agent key configuration
|
|
214
214
|
default_keys.append("cancel_agent_key")
|
|
215
|
+
# Add banner color keys
|
|
216
|
+
for banner_name in DEFAULT_BANNER_COLORS:
|
|
217
|
+
default_keys.append(f"banner_color_{banner_name}")
|
|
215
218
|
|
|
216
219
|
config = configparser.ConfigParser()
|
|
217
220
|
config.read(CONFIG_FILE)
|
|
@@ -256,9 +259,8 @@ def load_mcp_server_configs():
|
|
|
256
259
|
def _default_model_from_models_json():
|
|
257
260
|
"""Load the default model name from models.json.
|
|
258
261
|
|
|
259
|
-
|
|
260
|
-
Falls back to
|
|
261
|
-
As a last resort, falls back to ``gpt-5`` if the file cannot be read.
|
|
262
|
+
Returns the first model in models.json as the default.
|
|
263
|
+
Falls back to ``gpt-5`` if the file cannot be read.
|
|
262
264
|
"""
|
|
263
265
|
global _default_model_cache
|
|
264
266
|
|
|
@@ -270,11 +272,7 @@ def _default_model_from_models_json():
|
|
|
270
272
|
|
|
271
273
|
models_config = ModelFactory.load_config()
|
|
272
274
|
if models_config:
|
|
273
|
-
#
|
|
274
|
-
if "synthetic-GLM-4.6" in models_config:
|
|
275
|
-
_default_model_cache = "synthetic-GLM-4.6"
|
|
276
|
-
return "synthetic-GLM-4.6"
|
|
277
|
-
# Fall back to first model if synthetic-GLM-4.6 is not available
|
|
275
|
+
# Use first model in models.json as default
|
|
278
276
|
first_key = next(iter(models_config))
|
|
279
277
|
_default_model_cache = first_key
|
|
280
278
|
return first_key
|
|
@@ -497,8 +495,8 @@ def set_puppy_token(token: str):
|
|
|
497
495
|
|
|
498
496
|
|
|
499
497
|
def get_openai_reasoning_effort() -> str:
|
|
500
|
-
"""Return the configured OpenAI reasoning effort (low, medium, high)."""
|
|
501
|
-
allowed_values = {"low", "medium", "high"}
|
|
498
|
+
"""Return the configured OpenAI reasoning effort (minimal, low, medium, high, xhigh)."""
|
|
499
|
+
allowed_values = {"minimal", "low", "medium", "high", "xhigh"}
|
|
502
500
|
configured = (get_value("openai_reasoning_effort") or "medium").strip().lower()
|
|
503
501
|
if configured not in allowed_values:
|
|
504
502
|
return "medium"
|
|
@@ -507,7 +505,7 @@ def get_openai_reasoning_effort() -> str:
|
|
|
507
505
|
|
|
508
506
|
def set_openai_reasoning_effort(value: str) -> None:
|
|
509
507
|
"""Persist the OpenAI reasoning effort ensuring it remains within allowed values."""
|
|
510
|
-
allowed_values = {"low", "medium", "high"}
|
|
508
|
+
allowed_values = {"minimal", "low", "medium", "high", "xhigh"}
|
|
511
509
|
normalized = (value or "").strip().lower()
|
|
512
510
|
if normalized not in allowed_values:
|
|
513
511
|
raise ValueError(
|
|
@@ -658,10 +656,22 @@ def get_all_model_settings(model_name: str) -> dict:
|
|
|
658
656
|
for key, val in config[DEFAULT_SECTION].items():
|
|
659
657
|
if key.startswith(prefix) and val.strip():
|
|
660
658
|
setting_name = key[len(prefix) :]
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
659
|
+
# Handle different value types
|
|
660
|
+
val_stripped = val.strip()
|
|
661
|
+
# Check for boolean values first
|
|
662
|
+
if val_stripped.lower() in ("true", "false"):
|
|
663
|
+
settings[setting_name] = val_stripped.lower() == "true"
|
|
664
|
+
else:
|
|
665
|
+
# Try to parse as number (int first, then float)
|
|
666
|
+
try:
|
|
667
|
+
# Try int first for cleaner values like budget_tokens
|
|
668
|
+
if "." not in val_stripped:
|
|
669
|
+
settings[setting_name] = int(val_stripped)
|
|
670
|
+
else:
|
|
671
|
+
settings[setting_name] = float(val_stripped)
|
|
672
|
+
except (ValueError, TypeError):
|
|
673
|
+
# Keep as string if not a number
|
|
674
|
+
settings[setting_name] = val_stripped
|
|
665
675
|
|
|
666
676
|
return settings
|
|
667
677
|
|
|
@@ -1257,6 +1267,84 @@ def set_diff_deletion_color(color: str):
|
|
|
1257
1267
|
set_config_value("highlight_deletion_color", color)
|
|
1258
1268
|
|
|
1259
1269
|
|
|
1270
|
+
# =============================================================================
|
|
1271
|
+
# Banner Color Configuration
|
|
1272
|
+
# =============================================================================
|
|
1273
|
+
|
|
1274
|
+
# Default banner colors (Rich color names)
|
|
1275
|
+
# A beautiful jewel-tone palette with semantic meaning:
|
|
1276
|
+
# - Blues/Teals: Reading & navigation (calm, informational)
|
|
1277
|
+
# - Warm tones: Actions & changes (edits, shell commands)
|
|
1278
|
+
# - Purples: AI thinking & reasoning (the "brain" colors)
|
|
1279
|
+
# - Greens: Completions & success
|
|
1280
|
+
# - Neutrals: Search & listings
|
|
1281
|
+
DEFAULT_BANNER_COLORS = {
|
|
1282
|
+
"thinking": "deep_sky_blue4", # Sapphire - contemplation
|
|
1283
|
+
"agent_response": "medium_purple4", # Amethyst - main AI output
|
|
1284
|
+
"shell_command": "dark_orange3", # Amber - system commands
|
|
1285
|
+
"read_file": "steel_blue", # Steel - reading files
|
|
1286
|
+
"edit_file": "dark_goldenrod", # Gold - modifications
|
|
1287
|
+
"grep": "grey37", # Silver - search results
|
|
1288
|
+
"directory_listing": "dodger_blue2", # Sky - navigation
|
|
1289
|
+
"agent_reasoning": "dark_violet", # Violet - deep thought
|
|
1290
|
+
"invoke_agent": "deep_pink4", # Ruby - agent invocation
|
|
1291
|
+
"subagent_response": "sea_green3", # Emerald - sub-agent success
|
|
1292
|
+
"list_agents": "dark_slate_gray3", # Slate - neutral listing
|
|
1293
|
+
}
|
|
1294
|
+
|
|
1295
|
+
|
|
1296
|
+
def get_banner_color(banner_name: str) -> str:
|
|
1297
|
+
"""Get the background color for a specific banner.
|
|
1298
|
+
|
|
1299
|
+
Args:
|
|
1300
|
+
banner_name: The banner identifier (e.g., 'thinking', 'agent_response')
|
|
1301
|
+
|
|
1302
|
+
Returns:
|
|
1303
|
+
Rich color name or hex code for the banner background
|
|
1304
|
+
"""
|
|
1305
|
+
config_key = f"banner_color_{banner_name}"
|
|
1306
|
+
val = get_value(config_key)
|
|
1307
|
+
if val:
|
|
1308
|
+
return val
|
|
1309
|
+
return DEFAULT_BANNER_COLORS.get(banner_name, "blue")
|
|
1310
|
+
|
|
1311
|
+
|
|
1312
|
+
def set_banner_color(banner_name: str, color: str):
|
|
1313
|
+
"""Set the background color for a specific banner.
|
|
1314
|
+
|
|
1315
|
+
Args:
|
|
1316
|
+
banner_name: The banner identifier (e.g., 'thinking', 'agent_response')
|
|
1317
|
+
color: Rich color name or hex code
|
|
1318
|
+
"""
|
|
1319
|
+
config_key = f"banner_color_{banner_name}"
|
|
1320
|
+
set_config_value(config_key, color)
|
|
1321
|
+
|
|
1322
|
+
|
|
1323
|
+
def get_all_banner_colors() -> dict:
|
|
1324
|
+
"""Get all banner colors (configured or default).
|
|
1325
|
+
|
|
1326
|
+
Returns:
|
|
1327
|
+
Dict mapping banner names to their colors
|
|
1328
|
+
"""
|
|
1329
|
+
return {name: get_banner_color(name) for name in DEFAULT_BANNER_COLORS}
|
|
1330
|
+
|
|
1331
|
+
|
|
1332
|
+
def reset_banner_color(banner_name: str):
|
|
1333
|
+
"""Reset a banner color to its default.
|
|
1334
|
+
|
|
1335
|
+
Args:
|
|
1336
|
+
banner_name: The banner identifier to reset
|
|
1337
|
+
"""
|
|
1338
|
+
default_color = DEFAULT_BANNER_COLORS.get(banner_name, "blue")
|
|
1339
|
+
set_banner_color(banner_name, default_color)
|
|
1340
|
+
|
|
1341
|
+
|
|
1342
|
+
def reset_all_banner_colors():
|
|
1343
|
+
"""Reset all banner colors to their defaults."""
|
|
1344
|
+
for name, color in DEFAULT_BANNER_COLORS.items():
|
|
1345
|
+
set_banner_color(name, color)
|
|
1346
|
+
|
|
1347
|
+
|
|
1260
1348
|
def get_current_autosave_id() -> str:
|
|
1261
1349
|
"""Get or create the current autosave session ID for this process."""
|
|
1262
1350
|
global _CURRENT_AUTOSAVE_ID
|
code_puppy/keymap.py
CHANGED
|
@@ -86,9 +86,15 @@ def cancel_agent_uses_signal() -> bool:
|
|
|
86
86
|
"""Check if the cancel agent key uses SIGINT (Ctrl+C).
|
|
87
87
|
|
|
88
88
|
Returns:
|
|
89
|
-
True if the cancel key is ctrl+c
|
|
90
|
-
False if it uses keyboard listener approach.
|
|
89
|
+
True if the cancel key is ctrl+c AND we're not on Windows
|
|
90
|
+
(uses SIGINT handler), False if it uses keyboard listener approach.
|
|
91
91
|
"""
|
|
92
|
+
import sys
|
|
93
|
+
|
|
94
|
+
# On Windows, always use keyboard listener - SIGINT is unreliable
|
|
95
|
+
if sys.platform == "win32":
|
|
96
|
+
return False
|
|
97
|
+
|
|
92
98
|
return get_cancel_agent_key() == "ctrl+c"
|
|
93
99
|
|
|
94
100
|
|