code-puppy 0.0.154__py3-none-any.whl → 0.0.156__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. code_puppy/agent.py +26 -5
  2. code_puppy/agents/agent_creator_agent.py +65 -13
  3. code_puppy/agents/json_agent.py +8 -0
  4. code_puppy/agents/runtime_manager.py +12 -4
  5. code_puppy/command_line/command_handler.py +83 -0
  6. code_puppy/command_line/mcp/install_command.py +50 -1
  7. code_puppy/command_line/mcp/wizard_utils.py +88 -17
  8. code_puppy/command_line/prompt_toolkit_completion.py +18 -2
  9. code_puppy/config.py +8 -2
  10. code_puppy/main.py +17 -4
  11. code_puppy/mcp/__init__.py +2 -2
  12. code_puppy/mcp/config_wizard.py +1 -1
  13. code_puppy/messaging/spinner/console_spinner.py +1 -1
  14. code_puppy/model_factory.py +13 -12
  15. code_puppy/models.json +26 -0
  16. code_puppy/round_robin_model.py +35 -18
  17. code_puppy/summarization_agent.py +1 -3
  18. code_puppy/tools/agent_tools.py +41 -138
  19. code_puppy/tools/file_operations.py +116 -96
  20. code_puppy/tui/app.py +1 -1
  21. {code_puppy-0.0.154.data → code_puppy-0.0.156.data}/data/code_puppy/models.json +26 -0
  22. {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/METADATA +4 -3
  23. {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/RECORD +26 -48
  24. code_puppy/token_utils.py +0 -67
  25. code_puppy/tools/token_check.py +0 -32
  26. code_puppy/tui/tests/__init__.py +0 -1
  27. code_puppy/tui/tests/test_agent_command.py +0 -79
  28. code_puppy/tui/tests/test_chat_message.py +0 -28
  29. code_puppy/tui/tests/test_chat_view.py +0 -88
  30. code_puppy/tui/tests/test_command_history.py +0 -89
  31. code_puppy/tui/tests/test_copy_button.py +0 -191
  32. code_puppy/tui/tests/test_custom_widgets.py +0 -27
  33. code_puppy/tui/tests/test_disclaimer.py +0 -27
  34. code_puppy/tui/tests/test_enums.py +0 -15
  35. code_puppy/tui/tests/test_file_browser.py +0 -60
  36. code_puppy/tui/tests/test_help.py +0 -38
  37. code_puppy/tui/tests/test_history_file_reader.py +0 -107
  38. code_puppy/tui/tests/test_input_area.py +0 -33
  39. code_puppy/tui/tests/test_settings.py +0 -44
  40. code_puppy/tui/tests/test_sidebar.py +0 -33
  41. code_puppy/tui/tests/test_sidebar_history.py +0 -153
  42. code_puppy/tui/tests/test_sidebar_history_navigation.py +0 -132
  43. code_puppy/tui/tests/test_status_bar.py +0 -54
  44. code_puppy/tui/tests/test_timestamped_history.py +0 -52
  45. code_puppy/tui/tests/test_tools.py +0 -82
  46. {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/WHEEL +0 -0
  47. {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/entry_points.txt +0 -0
  48. {code_puppy-0.0.154.dist-info → code_puppy-0.0.156.dist-info}/licenses/LICENSE +0 -0
code_puppy/config.py CHANGED
@@ -106,7 +106,13 @@ def get_config_keys():
106
106
  Returns the list of all config keys currently in puppy.cfg,
107
107
  plus certain preset expected keys (e.g. "yolo_mode", "model", "compaction_strategy", "message_limit", "allow_recursion").
108
108
  """
109
- default_keys = ["yolo_mode", "model", "compaction_strategy", "message_limit", "allow_recursion"]
109
+ default_keys = [
110
+ "yolo_mode",
111
+ "model",
112
+ "compaction_strategy",
113
+ "message_limit",
114
+ "allow_recursion",
115
+ ]
110
116
  config = configparser.ConfigParser()
111
117
  config.read(CONFIG_FILE)
112
118
  keys = set(config[DEFAULT_SECTION].keys()) if DEFAULT_SECTION in config else set()
@@ -173,7 +179,7 @@ def _default_model_from_models_json():
173
179
  first_key = next(iter(models_config)) # Raises StopIteration if empty
174
180
  _default_model_cache = first_key
175
181
  return first_key
176
- except Exception as e:
182
+ except Exception:
177
183
  # Any problem (network, file missing, empty dict, etc.) => fall back
178
184
  _default_model_cache = "gpt-5"
179
185
  return "gpt-5"
code_puppy/main.py CHANGED
@@ -290,7 +290,9 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
290
290
  if awaiting_input:
291
291
  # No spinner - use agent_manager's run_with_mcp method
292
292
  response = await agent_manager.run_with_mcp(
293
- initial_command, usage_limits=get_custom_usage_limits()
293
+ initial_command,
294
+ message_history=get_message_history(),
295
+ usage_limits=get_custom_usage_limits(),
294
296
  )
295
297
  else:
296
298
  # Use our custom spinner for better compatibility with user input
@@ -299,7 +301,11 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
299
301
  with ConsoleSpinner(console=display_console):
300
302
  # Use agent_manager's run_with_mcp method
301
303
  response = await agent_manager.run_with_mcp(
302
- initial_command, usage_limits=get_custom_usage_limits()
304
+ initial_command,
305
+ message_history=prune_interrupted_tool_calls(
306
+ get_message_history()
307
+ ),
308
+ usage_limits=get_custom_usage_limits(),
303
309
  )
304
310
  set_message_history(
305
311
  prune_interrupted_tool_calls(get_message_history())
@@ -426,7 +432,13 @@ async def interactive_mode(message_renderer, initial_command: str = None) -> Non
426
432
 
427
433
  runtime_manager = get_runtime_agent_manager()
428
434
  with ConsoleSpinner(console=message_renderer.console):
429
- result = await runtime_manager.run_with_mcp(task, get_custom_usage_limits())
435
+ result = await runtime_manager.run_with_mcp(
436
+ task,
437
+ get_custom_usage_limits(),
438
+ message_history=prune_interrupted_tool_calls(
439
+ get_message_history()
440
+ ),
441
+ )
430
442
  # Check if the task was cancelled (but don't show message if we just killed processes)
431
443
  if result is None:
432
444
  continue
@@ -495,7 +507,8 @@ async def execute_single_prompt(prompt: str, message_renderer) -> None:
495
507
 
496
508
  with ConsoleSpinner(console=message_renderer.console):
497
509
  response = await agent_manager.run_with_mcp(
498
- prompt, usage_limits=get_custom_usage_limits()
510
+ prompt,
511
+ usage_limits=get_custom_usage_limits(),
499
512
  )
500
513
 
501
514
  agent_response = response.output
@@ -1,7 +1,7 @@
1
1
  """MCP (Model Context Protocol) management system for Code Puppy.
2
2
 
3
- Note: Be careful not to create circular imports with config_wizard.py.
4
- config_wizard.py imports ServerConfig and get_mcp_manager directly from
3
+ Note: Be careful not to create circular imports with config_wizard.py.
4
+ config_wizard.py imports ServerConfig and get_mcp_manager directly from
5
5
  .manager to avoid circular dependencies with this package __init__.py
6
6
  """
7
7
 
@@ -1,7 +1,7 @@
1
1
  """
2
2
  MCP Configuration Wizard - Interactive setup for MCP servers.
3
3
 
4
- Note: This module imports ServerConfig and get_mcp_manager directly from
4
+ Note: This module imports ServerConfig and get_mcp_manager directly from
5
5
  .code_puppy.mcp.manager to avoid circular imports with the package __init__.py
6
6
  """
7
7
 
@@ -125,7 +125,7 @@ class ConsoleSpinner(SpinnerBase):
125
125
  self._live.refresh()
126
126
 
127
127
  # Short sleep to control animation speed
128
- time.sleep(0.1)
128
+ time.sleep(0.05)
129
129
  except Exception as e:
130
130
  print(f"\nSpinner error: {e}")
131
131
  self._is_spinning = False
@@ -6,9 +6,8 @@ from typing import Any, Dict
6
6
 
7
7
  import httpx
8
8
  from anthropic import AsyncAnthropic
9
- from openai import AsyncAzureOpenAI # For Azure OpenAI client
9
+ from openai import AsyncAzureOpenAI
10
10
  from pydantic_ai.models.anthropic import AnthropicModel
11
- from pydantic_ai.models.fallback import infer_model
12
11
  from pydantic_ai.models.gemini import GeminiModel
13
12
  from pydantic_ai.models.openai import OpenAIChatModel
14
13
  from pydantic_ai.providers.anthropic import AnthropicProvider
@@ -87,13 +86,13 @@ class ModelFactory:
87
86
  else:
88
87
  from code_puppy.config import MODELS_FILE
89
88
 
90
- if not pathlib.Path(MODELS_FILE).exists():
91
- with open(pathlib.Path(__file__).parent / "models.json", "r") as src:
92
- with open(pathlib.Path(MODELS_FILE), "w") as target:
93
- target.write(src.read())
89
+ with open(pathlib.Path(__file__).parent / "models.json", "r") as src:
90
+ with open(pathlib.Path(MODELS_FILE), "w") as target:
91
+ target.write(src.read())
94
92
 
95
93
  with open(MODELS_FILE, "r") as f:
96
94
  config = json.load(f)
95
+
97
96
  if pathlib.Path(EXTRA_MODELS_FILE).exists():
98
97
  with open(EXTRA_MODELS_FILE, "r") as f:
99
98
  extra_config = json.load(f)
@@ -248,25 +247,27 @@ class ModelFactory:
248
247
  model = OpenAIChatModel(model_name=model_config["name"], provider=provider)
249
248
  setattr(model, "provider", provider)
250
249
  return model
251
-
250
+
252
251
  elif model_type == "round_robin":
253
252
  # Get the list of model names to use in the round-robin
254
253
  model_names = model_config.get("models")
255
254
  if not model_names or not isinstance(model_names, list):
256
- raise ValueError(f"Round-robin model '{model_name}' requires a 'models' list in its configuration.")
257
-
255
+ raise ValueError(
256
+ f"Round-robin model '{model_name}' requires a 'models' list in its configuration."
257
+ )
258
+
258
259
  # Get the rotate_every parameter (default: 1)
259
260
  rotate_every = model_config.get("rotate_every", 1)
260
-
261
+
261
262
  # Resolve each model name to an actual model instance
262
263
  models = []
263
264
  for name in model_names:
264
265
  # Recursively get each model using the factory
265
266
  model = ModelFactory.get_model(name, config)
266
267
  models.append(model)
267
-
268
+
268
269
  # Create and return the round-robin model
269
270
  return RoundRobinModel(*models, rotate_every=rotate_every)
270
-
271
+
271
272
  else:
272
273
  raise ValueError(f"Unsupported model type: {model_type}")
code_puppy/models.json CHANGED
@@ -1,4 +1,30 @@
1
1
  {
2
+ "openrouter-sonoma-dusk-alpha": {
3
+ "type": "custom_openai",
4
+ "name": "openrouter/sonoma-dusk-alpha",
5
+ "custom_endpoint": {
6
+ "url": "https://openrouter.ai/api/v1",
7
+ "api_key": "$OPENROUTER_API_KEY",
8
+ "headers": {
9
+ "HTTP-Referer": "https://github.com/mpfaffenberger/code_puppy",
10
+ "X-Title": "Code Puppy"
11
+ }
12
+ },
13
+ "context_length": 2000000
14
+ },
15
+ "openrouter-sonoma-sky-alpha": {
16
+ "type": "custom_openai",
17
+ "name": "openrouter/sonoma-sky-alpha",
18
+ "custom_endpoint": {
19
+ "url": "https://openrouter.ai/api/v1",
20
+ "api_key": "$OPENROUTER_API_KEY",
21
+ "headers": {
22
+ "HTTP-Referer": "https://github.com/mpfaffenberger/code_puppy",
23
+ "X-Title": "Code Puppy"
24
+ }
25
+ },
26
+ "context_length": 2000000
27
+ },
2
28
  "gpt-5": {
3
29
  "type": "openai",
4
30
  "name": "gpt-5",
@@ -1,10 +1,18 @@
1
-
2
1
  from contextlib import asynccontextmanager, suppress
3
2
  from dataclasses import dataclass, field
4
- from typing import Any, Callable, AsyncIterator, List
3
+ from typing import Any, AsyncIterator, List
5
4
 
6
- from pydantic_ai.models import Model, ModelMessage, ModelSettings, ModelRequestParameters, ModelResponse, StreamedResponse
7
- from pydantic_ai.models.fallback import KnownModelName, infer_model, merge_model_settings
5
+ from pydantic_ai.models import (
6
+ Model,
7
+ ModelMessage,
8
+ ModelSettings,
9
+ ModelRequestParameters,
10
+ ModelResponse,
11
+ StreamedResponse,
12
+ )
13
+ from pydantic_ai.models.fallback import (
14
+ merge_model_settings,
15
+ )
8
16
  from pydantic_ai.result import RunContext
9
17
 
10
18
  try:
@@ -15,18 +23,21 @@ except ImportError:
15
23
  class DummySpan:
16
24
  def is_recording(self):
17
25
  return False
26
+
18
27
  def set_attributes(self, attributes):
19
28
  pass
29
+
20
30
  return DummySpan()
21
31
 
32
+
22
33
  @dataclass(init=False)
23
34
  class RoundRobinModel(Model):
24
35
  """A model that cycles through multiple models in a round-robin fashion.
25
-
36
+
26
37
  This model distributes requests across multiple candidate models to help
27
38
  overcome rate limits or distribute load.
28
39
  """
29
-
40
+
30
41
  models: List[Model]
31
42
  _current_index: int = field(default=0, repr=False)
32
43
  _model_name: str = field(repr=False)
@@ -37,10 +48,10 @@ class RoundRobinModel(Model):
37
48
  self,
38
49
  *models: Model,
39
50
  rotate_every: int = 1,
40
- settings: ModelSettings | None = None
51
+ settings: ModelSettings | None = None,
41
52
  ):
42
53
  """Initialize a round-robin model instance.
43
-
54
+
44
55
  Args:
45
56
  models: The model instances to cycle through.
46
57
  rotate_every: Number of requests before rotating to the next model (default: 1).
@@ -59,9 +70,9 @@ class RoundRobinModel(Model):
59
70
  @property
60
71
  def model_name(self) -> str:
61
72
  """The model name showing this is a round-robin model with its candidates."""
62
- base_name = f'round_robin:{",".join(model.model_name for model in self.models)}'
73
+ base_name = f"round_robin:{','.join(model.model_name for model in self.models)}"
63
74
  if self._rotate_every != 1:
64
- return f'{base_name}:rotate_every={self._rotate_every}'
75
+ return f"{base_name}:rotate_every={self._rotate_every}"
65
76
  return base_name
66
77
 
67
78
  @property
@@ -93,10 +104,14 @@ class RoundRobinModel(Model):
93
104
  current_model = self._get_next_model()
94
105
  # Use the current model's settings as base, then merge with provided settings
95
106
  merged_settings = merge_model_settings(current_model.settings, model_settings)
96
- customized_model_request_parameters = current_model.customize_request_parameters(model_request_parameters)
97
-
107
+ customized_model_request_parameters = (
108
+ current_model.customize_request_parameters(model_request_parameters)
109
+ )
110
+
98
111
  try:
99
- response = await current_model.request(messages, merged_settings, customized_model_request_parameters)
112
+ response = await current_model.request(
113
+ messages, merged_settings, customized_model_request_parameters
114
+ )
100
115
  self._set_span_attributes(current_model)
101
116
  return response
102
117
  except Exception as exc:
@@ -116,8 +131,10 @@ class RoundRobinModel(Model):
116
131
  current_model = self._get_next_model()
117
132
  # Use the current model's settings as base, then merge with provided settings
118
133
  merged_settings = merge_model_settings(current_model.settings, model_settings)
119
- customized_model_request_parameters = current_model.customize_request_parameters(model_request_parameters)
120
-
134
+ customized_model_request_parameters = (
135
+ current_model.customize_request_parameters(model_request_parameters)
136
+ )
137
+
121
138
  async with current_model.request_stream(
122
139
  messages, merged_settings, customized_model_request_parameters, run_context
123
140
  ) as response:
@@ -129,6 +146,6 @@ class RoundRobinModel(Model):
129
146
  with suppress(Exception):
130
147
  span = get_current_span()
131
148
  if span.is_recording():
132
- attributes = getattr(span, 'attributes', {})
133
- if attributes.get('gen_ai.request.model') == self.model_name:
134
- span.set_attributes(model.model_attributes(model))
149
+ attributes = getattr(span, "attributes", {})
150
+ if attributes.get("gen_ai.request.model") == self.model_name:
151
+ span.set_attributes(model.model_attributes(model))
@@ -53,9 +53,7 @@ def run_summarization_sync(prompt: str, message_history: List) -> List:
53
53
  def reload_summarization_agent():
54
54
  """Create a specialized agent for summarizing messages when context limit is reached."""
55
55
  models_config = ModelFactory.load_config()
56
- model_name = "gemini-2.5-pro"
57
- if model_name not in models_config:
58
- model_name = get_model_name()
56
+ model_name = get_model_name()
59
57
  model = ModelFactory.get_model(model_name, models_config)
60
58
 
61
59
  # Specialized instructions for summarization
@@ -21,213 +21,113 @@ from code_puppy.config import get_model_name
21
21
 
22
22
  class AgentInfo(BaseModel):
23
23
  """Information about an available agent."""
24
+
24
25
  name: str
25
26
  display_name: str
26
27
 
27
28
 
28
29
  class ListAgentsOutput(BaseModel):
29
30
  """Output for the list_agents tool."""
31
+
30
32
  agents: List[AgentInfo]
31
33
  error: str | None = None
32
34
 
33
35
 
34
36
  class AgentInvokeOutput(BaseModel):
35
37
  """Output for the invoke_agent tool."""
38
+
36
39
  response: str | None
37
40
  agent_name: str
38
41
  error: str | None = None
39
42
 
40
43
 
41
- def _list_agents(context: RunContext) -> ListAgentsOutput:
42
- """List all available sub-agents that can be invoked.
43
-
44
- Returns:
45
- ListAgentsOutput: A list of available agents with their names and display names.
46
- """
47
- group_id = generate_group_id("list_agents")
48
-
49
- emit_info(
50
- "\n[bold white on blue] LIST AGENTS [/bold white on blue]",
51
- message_group=group_id
52
- )
53
- emit_divider(message_group=group_id)
54
-
55
- try:
56
- # Get available agents from the agent manager
57
- agents_dict = get_available_agents()
58
-
59
- # Convert to list of AgentInfo objects
60
- agents = [
61
- AgentInfo(name=name, display_name=display_name)
62
- for name, display_name in agents_dict.items()
63
- ]
64
-
65
- # Display the agents in the console
66
- for agent in agents:
67
- emit_system_message(
68
- f"- [bold]{agent.name}[/bold]: {agent.display_name}",
69
- message_group=group_id
70
- )
71
-
72
- emit_divider(message_group=group_id)
73
- return ListAgentsOutput(agents=agents)
74
-
75
- except Exception as e:
76
- error_msg = f"Error listing agents: {str(e)}"
77
- emit_error(error_msg, message_group=group_id)
78
- emit_divider(message_group=group_id)
79
- return ListAgentsOutput(agents=[], error=error_msg)
80
-
81
-
82
- def _invoke_agent(context: RunContext, agent_name: str, prompt: str) -> AgentInvokeOutput:
83
- """Invoke a specific sub-agent with a given prompt.
84
-
85
- Args:
86
- agent_name: The name of the agent to invoke
87
- prompt: The prompt to send to the agent
88
-
89
- Returns:
90
- AgentInvokeOutput: The agent's response to the prompt
91
- """
92
- group_id = generate_group_id("invoke_agent", agent_name)
93
-
94
- emit_info(
95
- f"\n[bold white on blue] INVOKE AGENT [/bold white on blue] {agent_name}",
96
- message_group=group_id
97
- )
98
- emit_divider(message_group=group_id)
99
- emit_system_message(f"Prompt: {prompt}", message_group=group_id)
100
- emit_divider(message_group=group_id)
101
-
102
- try:
103
- # Load the specified agent config
104
- agent_config = load_agent_config(agent_name)
105
-
106
- # Get the current model for creating a temporary agent
107
- model_name = get_model_name()
108
- models_config = ModelFactory.load_config()
109
- model = ModelFactory.get_model(model_name, models_config)
110
-
111
- # Create a temporary agent instance to avoid interfering with current agent state
112
- instructions = agent_config.get_system_prompt()
113
- temp_agent = Agent(
114
- model=model,
115
- instructions=instructions,
116
- output_type=str,
117
- retries=3,
118
- )
119
-
120
- # Register the tools that the agent needs
121
- from code_puppy.tools import register_tools_for_agent
122
- agent_tools = agent_config.get_available_tools()
123
-
124
- # Avoid recursive tool registration - if the agent has the same tools
125
- # as the current agent, skip registration to prevent conflicts
126
- current_agent_tools = ["list_agents", "invoke_agent"]
127
- if set(agent_tools) != set(current_agent_tools):
128
- register_tools_for_agent(temp_agent, agent_tools)
129
-
130
- # Run the temporary agent with the provided prompt
131
- result = temp_agent.run_sync(prompt)
132
-
133
- # Extract the response from the result
134
- response = result.output
135
-
136
- emit_system_message(f"Response: {response}", message_group=group_id)
137
- emit_divider(message_group=group_id)
138
-
139
- return AgentInvokeOutput(response=response, agent_name=agent_name)
140
-
141
- except Exception as e:
142
- error_msg = f"Error invoking agent '{agent_name}': {str(e)}"
143
- emit_error(error_msg, message_group=group_id)
144
- emit_divider(message_group=group_id)
145
- return AgentInvokeOutput(response=None, agent_name=agent_name, error=error_msg)
146
-
147
-
148
44
  def register_list_agents(agent):
149
45
  """Register the list_agents tool with the provided agent.
150
-
46
+
151
47
  Args:
152
48
  agent: The agent to register the tool with
153
49
  """
50
+
154
51
  @agent.tool
155
52
  def list_agents(context: RunContext) -> ListAgentsOutput:
156
53
  """List all available sub-agents that can be invoked.
157
-
54
+
158
55
  Returns:
159
56
  ListAgentsOutput: A list of available agents with their names and display names.
160
57
  """
161
58
  # Generate a group ID for this tool execution
162
59
  group_id = generate_group_id("list_agents")
163
-
60
+
164
61
  emit_info(
165
62
  "\n[bold white on blue] LIST AGENTS [/bold white on blue]",
166
- message_group=group_id
63
+ message_group=group_id,
167
64
  )
168
65
  emit_divider(message_group=group_id)
169
-
66
+
170
67
  try:
171
68
  # Get available agents from the agent manager
172
69
  agents_dict = get_available_agents()
173
-
70
+
174
71
  # Convert to list of AgentInfo objects
175
72
  agents = [
176
73
  AgentInfo(name=name, display_name=display_name)
177
74
  for name, display_name in agents_dict.items()
178
75
  ]
179
-
76
+
180
77
  # Display the agents in the console
181
78
  for agent_item in agents:
182
79
  emit_system_message(
183
- f"- [bold]{agent_item.name}[/bold]: {agent_item.display_name}",
184
- message_group=group_id
80
+ f"- [bold]{agent_item.name}[/bold]: {agent_item.display_name}",
81
+ message_group=group_id,
185
82
  )
186
-
83
+
187
84
  emit_divider(message_group=group_id)
188
85
  return ListAgentsOutput(agents=agents)
189
-
86
+
190
87
  except Exception as e:
191
88
  error_msg = f"Error listing agents: {str(e)}"
192
89
  emit_error(error_msg, message_group=group_id)
193
90
  emit_divider(message_group=group_id)
194
91
  return ListAgentsOutput(agents=[], error=error_msg)
195
-
92
+
196
93
  return list_agents
197
94
 
198
95
 
199
96
  def register_invoke_agent(agent):
200
97
  """Register the invoke_agent tool with the provided agent.
201
-
98
+
202
99
  Args:
203
100
  agent: The agent to register the tool with
204
101
  """
102
+
205
103
  @agent.tool
206
- def invoke_agent(context: RunContext, agent_name: str, prompt: str) -> AgentInvokeOutput:
104
+ def invoke_agent(
105
+ context: RunContext, agent_name: str, prompt: str
106
+ ) -> AgentInvokeOutput:
207
107
  """Invoke a specific sub-agent with a given prompt.
208
-
108
+
209
109
  Args:
210
110
  agent_name: The name of the agent to invoke
211
111
  prompt: The prompt to send to the agent
212
-
112
+
213
113
  Returns:
214
114
  AgentInvokeOutput: The agent's response to the prompt
215
115
  """
216
116
  # Generate a group ID for this tool execution
217
117
  group_id = generate_group_id("invoke_agent", agent_name)
218
-
118
+
219
119
  emit_info(
220
120
  f"\n[bold white on blue] INVOKE AGENT [/bold white on blue] {agent_name}",
221
- message_group=group_id
121
+ message_group=group_id,
222
122
  )
223
123
  emit_divider(message_group=group_id)
224
124
  emit_system_message(f"Prompt: {prompt}", message_group=group_id)
225
125
  emit_divider(message_group=group_id)
226
-
126
+
227
127
  try:
228
128
  # Load the specified agent config
229
129
  agent_config = load_agent_config(agent_name)
230
-
130
+
231
131
  # Get the current model for creating a temporary agent
232
132
  model_name = get_model_name()
233
133
  models_config = ModelFactory.load_config()
@@ -235,9 +135,9 @@ def register_invoke_agent(agent):
235
135
  # Only proceed if we have a valid model configuration
236
136
  if model_name not in models_config:
237
137
  raise ValueError(f"Model '{model_name}' not found in configuration")
238
-
138
+
239
139
  model = ModelFactory.get_model(model_name, models_config)
240
-
140
+
241
141
  # Create a temporary agent instance to avoid interfering with current agent state
242
142
  instructions = agent_config.get_system_prompt()
243
143
  temp_agent = Agent(
@@ -246,27 +146,30 @@ def register_invoke_agent(agent):
246
146
  output_type=str,
247
147
  retries=3,
248
148
  )
249
-
149
+
250
150
  # Register the tools that the agent needs
251
151
  from code_puppy.tools import register_tools_for_agent
152
+
252
153
  agent_tools = agent_config.get_available_tools()
253
154
  register_tools_for_agent(temp_agent, agent_tools)
254
-
155
+
255
156
  # Run the temporary agent with the provided prompt
256
157
  result = temp_agent.run_sync(prompt)
257
-
158
+
258
159
  # Extract the response from the result
259
160
  response = result.output
260
-
161
+
261
162
  emit_system_message(f"Response: {response}", message_group=group_id)
262
163
  emit_divider(message_group=group_id)
263
-
164
+
264
165
  return AgentInvokeOutput(response=response, agent_name=agent_name)
265
-
166
+
266
167
  except Exception as e:
267
168
  error_msg = f"Error invoking agent '{agent_name}': {str(e)}"
268
169
  emit_error(error_msg, message_group=group_id)
269
170
  emit_divider(message_group=group_id)
270
- return AgentInvokeOutput(response=None, agent_name=agent_name, error=error_msg)
271
-
272
- return invoke_agent
171
+ return AgentInvokeOutput(
172
+ response=None, agent_name=agent_name, error=error_msg
173
+ )
174
+
175
+ return invoke_agent