shotgun-sh 0.3.3.dev1__py3-none-any.whl → 0.4.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. shotgun/agents/agent_manager.py +191 -23
  2. shotgun/agents/common.py +78 -77
  3. shotgun/agents/config/manager.py +42 -1
  4. shotgun/agents/config/models.py +16 -0
  5. shotgun/agents/conversation/history/file_content_deduplication.py +66 -43
  6. shotgun/agents/export.py +12 -13
  7. shotgun/agents/models.py +66 -1
  8. shotgun/agents/plan.py +12 -13
  9. shotgun/agents/research.py +13 -10
  10. shotgun/agents/router/__init__.py +47 -0
  11. shotgun/agents/router/models.py +376 -0
  12. shotgun/agents/router/router.py +185 -0
  13. shotgun/agents/router/tools/__init__.py +18 -0
  14. shotgun/agents/router/tools/delegation_tools.py +503 -0
  15. shotgun/agents/router/tools/plan_tools.py +322 -0
  16. shotgun/agents/specify.py +12 -13
  17. shotgun/agents/tasks.py +12 -13
  18. shotgun/agents/tools/file_management.py +49 -1
  19. shotgun/agents/tools/registry.py +2 -0
  20. shotgun/agents/tools/web_search/__init__.py +1 -2
  21. shotgun/agents/tools/web_search/gemini.py +1 -3
  22. shotgun/codebase/core/change_detector.py +1 -1
  23. shotgun/codebase/core/ingestor.py +1 -1
  24. shotgun/codebase/core/manager.py +1 -1
  25. shotgun/prompts/agents/export.j2 +2 -0
  26. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -10
  27. shotgun/prompts/agents/partials/router_delegation_mode.j2 +36 -0
  28. shotgun/prompts/agents/plan.j2 +24 -12
  29. shotgun/prompts/agents/research.j2 +70 -31
  30. shotgun/prompts/agents/router.j2 +440 -0
  31. shotgun/prompts/agents/specify.j2 +39 -16
  32. shotgun/prompts/agents/state/system_state.j2 +15 -6
  33. shotgun/prompts/agents/tasks.j2 +58 -34
  34. shotgun/tui/app.py +5 -6
  35. shotgun/tui/components/mode_indicator.py +120 -25
  36. shotgun/tui/components/status_bar.py +2 -2
  37. shotgun/tui/dependencies.py +64 -9
  38. shotgun/tui/protocols.py +37 -0
  39. shotgun/tui/screens/chat/chat.tcss +9 -1
  40. shotgun/tui/screens/chat/chat_screen.py +643 -11
  41. shotgun/tui/screens/chat_screen/command_providers.py +0 -87
  42. shotgun/tui/screens/chat_screen/history/agent_response.py +7 -3
  43. shotgun/tui/screens/chat_screen/history/chat_history.py +12 -0
  44. shotgun/tui/screens/chat_screen/history/formatters.py +53 -15
  45. shotgun/tui/screens/chat_screen/history/partial_response.py +11 -1
  46. shotgun/tui/screens/chat_screen/messages.py +219 -0
  47. shotgun/tui/screens/onboarding.py +30 -26
  48. shotgun/tui/utils/mode_progress.py +20 -86
  49. shotgun/tui/widgets/__init__.py +2 -1
  50. shotgun/tui/widgets/approval_widget.py +152 -0
  51. shotgun/tui/widgets/cascade_confirmation_widget.py +203 -0
  52. shotgun/tui/widgets/plan_panel.py +129 -0
  53. shotgun/tui/widgets/step_checkpoint_widget.py +180 -0
  54. {shotgun_sh-0.3.3.dev1.dist-info → shotgun_sh-0.4.0.dev1.dist-info}/METADATA +3 -3
  55. {shotgun_sh-0.3.3.dev1.dist-info → shotgun_sh-0.4.0.dev1.dist-info}/RECORD +58 -45
  56. {shotgun_sh-0.3.3.dev1.dist-info → shotgun_sh-0.4.0.dev1.dist-info}/WHEEL +0 -0
  57. {shotgun_sh-0.3.3.dev1.dist-info → shotgun_sh-0.4.0.dev1.dist-info}/entry_points.txt +0 -0
  58. {shotgun_sh-0.3.3.dev1.dist-info → shotgun_sh-0.4.0.dev1.dist-info}/licenses/LICENSE +0 -0
@@ -51,7 +51,7 @@ class ConfigMigrationError(Exception):
51
51
  ProviderConfig = OpenAIConfig | AnthropicConfig | GoogleConfig | ShotgunAccountConfig
52
52
 
53
53
  # Current config version
54
- CURRENT_CONFIG_VERSION = 5
54
+ CURRENT_CONFIG_VERSION = 6
55
55
 
56
56
  # Backup directory name
57
57
  BACKUP_DIR_NAME = "backup"
@@ -183,6 +183,26 @@ def _migrate_v4_to_v5(data: dict[str, Any]) -> dict[str, Any]:
183
183
  return data
184
184
 
185
185
 
186
+ def _migrate_v5_to_v6(data: dict[str, Any]) -> dict[str, Any]:
187
+ """Migrate config from version 5 to version 6.
188
+
189
+ Changes:
190
+ - Add 'router_mode' field with default 'planning'
191
+
192
+ Args:
193
+ data: Config data dict at version 5
194
+
195
+ Returns:
196
+ Modified config data dict at version 6
197
+ """
198
+ if "router_mode" not in data:
199
+ data["router_mode"] = "planning"
200
+ logger.info("Migrated config v5->v6: added router_mode field")
201
+
202
+ data["config_version"] = 6
203
+ return data
204
+
205
+
186
206
  def _apply_migrations(data: dict[str, Any]) -> dict[str, Any]:
187
207
  """Apply all necessary migrations to bring config to current version.
188
208
 
@@ -203,6 +223,7 @@ def _apply_migrations(data: dict[str, Any]) -> dict[str, Any]:
203
223
  2: _migrate_v2_to_v3,
204
224
  3: _migrate_v3_to_v4,
205
225
  4: _migrate_v4_to_v5,
226
+ 5: _migrate_v5_to_v6,
206
227
  }
207
228
 
208
229
  # Apply migrations sequentially
@@ -772,6 +793,26 @@ class ConfigManager:
772
793
  await self.save(config)
773
794
  logger.info("Updated Shotgun Account configuration")
774
795
 
796
+ async def get_router_mode(self) -> str:
797
+ """Get the saved router mode.
798
+
799
+ Returns:
800
+ The router mode string ('planning' or 'drafting')
801
+ """
802
+ config = await self.load()
803
+ return config.router_mode
804
+
805
+ async def set_router_mode(self, mode: str) -> None:
806
+ """Save the router mode.
807
+
808
+ Args:
809
+ mode: Router mode to save ('planning' or 'drafting')
810
+ """
811
+ config = await self.load()
812
+ config.router_mode = mode
813
+ await self.save(config)
814
+ logger.debug("Router mode saved: %s", mode)
815
+
775
816
 
776
817
  # Global singleton instance
777
818
  _config_manager_instance: ConfigManager | None = None
@@ -226,6 +226,18 @@ class ShotgunAccountConfig(BaseModel):
226
226
  default=None, description="Default workspace ID for shared specs"
227
227
  )
228
228
 
229
+ @property
230
+ def has_valid_account(self) -> bool:
231
+ """Check if the user has a valid Shotgun Account configured.
232
+
233
+ Returns:
234
+ True if api_key is set and non-empty, False otherwise
235
+ """
236
+ if self.api_key is None:
237
+ return False
238
+ value = self.api_key.get_secret_value()
239
+ return bool(value and value.strip())
240
+
229
241
 
230
242
  class MarketingMessageRecord(BaseModel):
231
243
  """Record of when a marketing message was shown to the user."""
@@ -277,3 +289,7 @@ class ShotgunConfig(BaseModel):
277
289
  default=None,
278
290
  description="Path to the backup file created when migration failed",
279
291
  )
292
+ router_mode: str = Field(
293
+ default="planning",
294
+ description="Router execution mode: 'planning' or 'drafting'",
295
+ )
@@ -5,8 +5,6 @@ tool returns before LLM-based compaction. Files are still accessible via
5
5
  `retrieve_code` (codebase) or `read_file` (.shotgun/ folder).
6
6
  """
7
7
 
8
- import copy
9
- import re
10
8
  from enum import StrEnum
11
9
  from typing import Any
12
10
 
@@ -43,40 +41,46 @@ SHOTGUN_PLACEHOLDER = (
43
41
  "**Content**: [Removed for compaction - file persisted in .shotgun/ folder]"
44
42
  )
45
43
 
46
- # Pattern for parsing file_read output (codebase files)
47
- # Format: **File**: `path`\n**Size**: N bytes\n[optional encoding]\n\n**Content**:\n```lang\ncontent```
48
- CODEBASE_FILE_PATTERN = re.compile(
49
- r"\*\*File\*\*:\s*`([^`]+)`\s*\n" # File path
50
- r"\*\*Size\*\*:\s*(\d+)\s*bytes\s*\n" # Size in bytes
51
- r"(?:\*\*Encoding\*\*:.*?\n)?" # Optional encoding line
52
- r"\n\*\*Content\*\*:\s*\n" # Blank line + Content header
53
- r"```(\w*)\n" # Language tag
54
- r"(.*?)```", # Actual content
55
- re.DOTALL,
56
- )
44
+ # Simple prefix for detecting file_read output format
45
+ # Instead of using regex, we just check for the expected prefix and extract the file path
46
+ CODEBASE_FILE_PREFIX = "**File**: `"
47
+
57
48
 
49
+ def _extract_file_path(content: str) -> str | None:
50
+ """Extract file path from file_read tool return content.
58
51
 
59
- def _parse_codebase_file_content(
60
- content: str,
61
- ) -> tuple[str, int, str, str] | None:
62
- """Parse file_read tool return content.
52
+ Uses simple string operations instead of regex for maximum performance.
53
+ The file_read tool output format is: **File**: `path`\\n...
63
54
 
64
55
  Args:
65
56
  content: The tool return content string
66
57
 
67
58
  Returns:
68
- Tuple of (file_path, size_bytes, language, actual_content) or None if not parseable
59
+ The file path or None if format doesn't match
69
60
  """
70
- match = CODEBASE_FILE_PATTERN.search(content)
71
- if not match:
61
+ # Fast check: content must start with expected prefix
62
+ if not content.startswith(CODEBASE_FILE_PREFIX):
63
+ return None
64
+
65
+ # Find the closing backtick after the prefix
66
+ prefix_len = len(CODEBASE_FILE_PREFIX)
67
+ backtick_pos = content.find("`", prefix_len)
68
+
69
+ if backtick_pos == -1:
72
70
  return None
73
71
 
74
- file_path = match.group(1)
75
- size_bytes = int(match.group(2))
76
- language = match.group(3) or ""
77
- actual_content = match.group(4)
72
+ return content[prefix_len:backtick_pos]
73
+
74
+
75
+ def _get_language_from_path(file_path: str) -> str:
76
+ """Infer programming language from file extension."""
77
+ from pathlib import Path
78
+
79
+ from shotgun.codebase.core.language_config import get_language_config
78
80
 
79
- return file_path, size_bytes, language, actual_content
81
+ ext = Path(file_path).suffix
82
+ config = get_language_config(ext)
83
+ return config.name if config else "unknown"
80
84
 
81
85
 
82
86
  def _create_codebase_placeholder(file_path: str, size_bytes: int, language: str) -> str:
@@ -110,6 +114,11 @@ def deduplicate_file_content(
110
114
  This is a deterministic pre-compaction pass that reduces tokens without
111
115
  requiring an LLM. Files remain accessible via their respective tools.
112
116
 
117
+ This function uses copy-on-write semantics: only messages that need
118
+ modification are copied, while unmodified messages are reused by reference.
119
+ This significantly reduces memory allocation and processing time for large
120
+ conversations where only a subset of messages contain file content.
121
+
113
122
  Args:
114
123
  messages: Conversation history
115
124
  retention_window: Keep full content in last N messages (for recent context)
@@ -120,15 +129,17 @@ def deduplicate_file_content(
120
129
  if not messages:
121
130
  return messages, 0
122
131
 
123
- # Deep copy to avoid modifying original
124
- modified_messages = copy.deepcopy(messages)
125
132
  total_tokens_saved = 0
126
133
  files_deduplicated = 0
127
134
 
128
135
  # Calculate retention boundary (keep last N messages intact)
129
- retention_start = max(0, len(modified_messages) - retention_window)
136
+ retention_start = max(0, len(messages) - retention_window)
137
+
138
+ # Track which message indices need replacement
139
+ # We use a dict to store index -> new_message mappings
140
+ replacements: dict[int, ModelMessage] = {}
130
141
 
131
- for msg_idx, message in enumerate(modified_messages):
142
+ for msg_idx, message in enumerate(messages):
132
143
  # Skip messages in retention window
133
144
  if msg_idx >= retention_start:
134
145
  continue
@@ -159,18 +170,18 @@ def deduplicate_file_content(
159
170
 
160
171
  # Handle codebase file reads (file_read)
161
172
  if tool_name == FileReadTool.CODEBASE:
162
- parsed = _parse_codebase_file_content(content)
163
- if parsed:
164
- file_path, size_bytes, language, actual_content = parsed
165
- # Only replace if actual content is substantial
166
- if len(actual_content) >= MIN_CONTENT_LENGTH:
167
- replacement = _create_codebase_placeholder(
168
- file_path, size_bytes, language
169
- )
170
- logger.debug(
171
- f"Deduplicating codebase file: {file_path} "
172
- f"({size_bytes} bytes)"
173
- )
173
+ file_path = _extract_file_path(content)
174
+ if file_path:
175
+ # Use content length as size estimate (includes formatting overhead
176
+ # but close enough for deduplication purposes)
177
+ size_bytes = len(content)
178
+ language = _get_language_from_path(file_path)
179
+ replacement = _create_codebase_placeholder(
180
+ file_path, size_bytes, language
181
+ )
182
+ logger.debug(
183
+ f"Deduplicating codebase file: {file_path} ({size_bytes} bytes)"
184
+ )
174
185
 
175
186
  # Handle .shotgun/ file reads (read_file)
176
187
  elif tool_name == FileReadTool.SHOTGUN_FOLDER:
@@ -203,9 +214,21 @@ def deduplicate_file_content(
203
214
  else:
204
215
  new_parts.append(part)
205
216
 
206
- # Replace message with new parts if modified
217
+ # Only create a new message if parts were actually modified
207
218
  if message_modified:
208
- modified_messages[msg_idx] = ModelRequest(parts=new_parts)
219
+ replacements[msg_idx] = ModelRequest(parts=new_parts)
220
+
221
+ # If no modifications were made, return original list (no allocation needed)
222
+ if not replacements:
223
+ return messages, 0
224
+
225
+ # Build result list with copy-on-write: reuse unmodified messages
226
+ modified_messages: list[ModelMessage] = []
227
+ for idx, msg in enumerate(messages):
228
+ if idx in replacements:
229
+ modified_messages.append(replacements[idx])
230
+ else:
231
+ modified_messages.append(msg)
209
232
 
210
233
  if files_deduplicated > 0:
211
234
  logger.info(
shotgun/agents/export.py CHANGED
@@ -2,16 +2,15 @@
2
2
 
3
3
  from functools import partial
4
4
 
5
- from pydantic_ai import (
6
- Agent,
7
- )
8
5
  from pydantic_ai.agent import AgentRunResult
9
6
  from pydantic_ai.messages import ModelMessage
10
7
 
11
8
  from shotgun.agents.config import ProviderType
9
+ from shotgun.agents.models import ShotgunAgent
12
10
  from shotgun.logging_config import get_logger
13
11
 
14
12
  from .common import (
13
+ EventStreamHandler,
15
14
  add_system_status_message,
16
15
  build_agent_system_prompt,
17
16
  create_base_agent,
@@ -25,7 +24,7 @@ logger = get_logger(__name__)
25
24
 
26
25
  async def create_export_agent(
27
26
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
28
- ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
27
+ ) -> tuple[ShotgunAgent, AgentDeps]:
29
28
  """Create an export agent with file management capabilities.
30
29
 
31
30
  Args:
@@ -49,39 +48,39 @@ async def create_export_agent(
49
48
 
50
49
 
51
50
  async def run_export_agent(
52
- agent: Agent[AgentDeps, AgentResponse],
53
- instruction: str,
51
+ agent: ShotgunAgent,
52
+ prompt: str,
54
53
  deps: AgentDeps,
55
54
  message_history: list[ModelMessage] | None = None,
55
+ event_stream_handler: EventStreamHandler | None = None,
56
56
  ) -> AgentRunResult[AgentResponse]:
57
- """Export artifacts based on the given instruction.
57
+ """Export artifacts based on the given prompt.
58
58
 
59
59
  Args:
60
60
  agent: The configured export agent
61
- instruction: The export instruction
61
+ prompt: The export prompt
62
62
  deps: Agent dependencies
63
63
  message_history: Optional message history for conversation continuity
64
+ event_stream_handler: Optional callback for streaming events
64
65
 
65
66
  Returns:
66
67
  AgentRunResult containing the export process output
67
68
  """
68
- logger.debug("📤 Starting export for instruction: %s", instruction)
69
+ logger.debug("📤 Starting export for prompt: %s", prompt)
69
70
 
70
71
  message_history = await add_system_status_message(deps, message_history)
71
72
 
72
- # Let the agent use its tools to read existing artifacts and export them
73
- full_prompt = f"Export artifacts or findings based on: {instruction}"
74
-
75
73
  try:
76
74
  # Create usage limits for responsible API usage
77
75
  usage_limits = create_usage_limits()
78
76
 
79
77
  result = await run_agent(
80
78
  agent=agent,
81
- prompt=full_prompt,
79
+ prompt=prompt,
82
80
  deps=deps,
83
81
  message_history=message_history,
84
82
  usage_limits=usage_limits,
83
+ event_stream_handler=event_stream_handler,
85
84
  )
86
85
 
87
86
  logger.debug("✅ Export completed successfully")
shotgun/agents/models.py CHANGED
@@ -6,7 +6,7 @@ from collections.abc import Callable
6
6
  from datetime import datetime
7
7
  from enum import StrEnum
8
8
  from pathlib import Path
9
- from typing import TYPE_CHECKING
9
+ from typing import TYPE_CHECKING, TypeAlias
10
10
 
11
11
  from pydantic import BaseModel, ConfigDict, Field
12
12
  from pydantic_ai import RunContext
@@ -16,9 +16,57 @@ from shotgun.agents.usage_manager import SessionUsageManager, get_session_usage_
16
16
  from .config.models import ModelConfig
17
17
 
18
18
  if TYPE_CHECKING:
19
+ from pydantic_ai import Agent
20
+
21
+ from shotgun.agents.router.models import RouterDeps
19
22
  from shotgun.codebase.service import CodebaseService
20
23
 
21
24
 
25
+ class SubAgentContext(BaseModel):
26
+ """
27
+ Context passed to sub-agents so they know they're being orchestrated.
28
+
29
+ When sub-agents receive this context, they should:
30
+ - Be more concise (router handles user communication)
31
+ - Focus on their specific task
32
+ - Return structured results
33
+ """
34
+
35
+ is_router_delegated: bool = Field(
36
+ default=True, description="Always True when passed to sub-agent"
37
+ )
38
+ plan_goal: str = Field(
39
+ default="", description="High-level goal from execution plan"
40
+ )
41
+ current_step_id: str | None = Field(
42
+ default=None, description="ID of the current execution step"
43
+ )
44
+ current_step_title: str | None = Field(
45
+ default=None, description="Title of the current execution step"
46
+ )
47
+
48
+
49
+ class AgentSystemPromptContext(BaseModel):
50
+ """Context passed to agent system prompt templates.
51
+
52
+ This model standardizes the context variables passed to Jinja2 templates
53
+ when rendering agent system prompts. Using a model makes it easier to
54
+ test template context construction and ensures type safety.
55
+ """
56
+
57
+ interactive_mode: bool = Field(
58
+ description="Whether the agent is running in interactive mode"
59
+ )
60
+ mode: str = Field(description="The agent type (research, specify, plan, etc.)")
61
+ sub_agent_context: SubAgentContext | None = Field(
62
+ default=None, description="Context when running as a sub-agent of the router"
63
+ )
64
+ router_mode: str | None = Field(
65
+ default=None,
66
+ description="Router mode value (planning/drafting) if router agent",
67
+ )
68
+
69
+
22
70
  class AgentResponse(BaseModel):
23
71
  """Structured response from an agent with optional clarifying questions.
24
72
 
@@ -51,6 +99,7 @@ class AgentType(StrEnum):
51
99
  PLAN = "plan"
52
100
  TASKS = "tasks"
53
101
  EXPORT = "export"
102
+ ROUTER = "router"
54
103
 
55
104
 
56
105
  class PipelineConfigEntry(BaseModel):
@@ -319,6 +368,11 @@ class AgentDeps(AgentRuntimeOptions):
319
368
  description="Current agent mode for file scoping",
320
369
  )
321
370
 
371
+ sub_agent_context: SubAgentContext | None = Field(
372
+ default=None,
373
+ description="Context when agent is delegated to by router",
374
+ )
375
+
322
376
 
323
377
  # Rebuild model to resolve forward references after imports are available
324
378
  try:
@@ -328,3 +382,14 @@ try:
328
382
  except ImportError:
329
383
  # Services may not be available in all contexts
330
384
  pass
385
+
386
+
387
+ # Type alias for the standard agent type used throughout the codebase
388
+ ShotgunAgent: TypeAlias = "Agent[AgentDeps, AgentResponse]"
389
+
390
+ # Type alias for router agent (uses RouterDeps which extends AgentDeps)
391
+ # Note: Agent is contravariant in deps, so RouterAgent is NOT a subtype of ShotgunAgent
392
+ RouterAgent: TypeAlias = "Agent[RouterDeps, AgentResponse]"
393
+
394
+ # Union type for any agent type (used in AgentManager)
395
+ AnyAgent: TypeAlias = "ShotgunAgent | RouterAgent"
shotgun/agents/plan.py CHANGED
@@ -2,16 +2,15 @@
2
2
 
3
3
  from functools import partial
4
4
 
5
- from pydantic_ai import (
6
- Agent,
7
- )
8
5
  from pydantic_ai.agent import AgentRunResult
9
6
  from pydantic_ai.messages import ModelMessage
10
7
 
11
8
  from shotgun.agents.config import ProviderType
9
+ from shotgun.agents.models import ShotgunAgent
12
10
  from shotgun.logging_config import get_logger
13
11
 
14
12
  from .common import (
13
+ EventStreamHandler,
15
14
  add_system_status_message,
16
15
  build_agent_system_prompt,
17
16
  create_base_agent,
@@ -25,7 +24,7 @@ logger = get_logger(__name__)
25
24
 
26
25
  async def create_plan_agent(
27
26
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
28
- ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
27
+ ) -> tuple[ShotgunAgent, AgentDeps]:
29
28
  """Create a plan agent with artifact management capabilities.
30
29
 
31
30
  Args:
@@ -51,26 +50,25 @@ async def create_plan_agent(
51
50
 
52
51
 
53
52
  async def run_plan_agent(
54
- agent: Agent[AgentDeps, AgentResponse],
55
- goal: str,
53
+ agent: ShotgunAgent,
54
+ prompt: str,
56
55
  deps: AgentDeps,
57
56
  message_history: list[ModelMessage] | None = None,
57
+ event_stream_handler: EventStreamHandler | None = None,
58
58
  ) -> AgentRunResult[AgentResponse]:
59
- """Create or update a plan based on the given goal using artifacts.
59
+ """Create or update a plan based on the given prompt using artifacts.
60
60
 
61
61
  Args:
62
62
  agent: The configured plan agent
63
- goal: The planning goal or instruction
63
+ prompt: The planning prompt or instruction
64
64
  deps: Agent dependencies
65
65
  message_history: Optional message history for conversation continuity
66
+ event_stream_handler: Optional callback for streaming events
66
67
 
67
68
  Returns:
68
69
  AgentRunResult containing the planning process output
69
70
  """
70
- logger.debug("📋 Starting planning for goal: %s", goal)
71
-
72
- # Simple prompt - the agent system prompt has all the artifact instructions
73
- full_prompt = f"Create a comprehensive plan for: {goal}"
71
+ logger.debug("📋 Starting planning for prompt: %s", prompt)
74
72
 
75
73
  try:
76
74
  # Create usage limits for responsible API usage
@@ -80,10 +78,11 @@ async def run_plan_agent(
80
78
 
81
79
  result = await run_agent(
82
80
  agent=agent,
83
- prompt=full_prompt,
81
+ prompt=prompt,
84
82
  deps=deps,
85
83
  message_history=message_history,
86
84
  usage_limits=usage_limits,
85
+ event_stream_handler=event_stream_handler,
87
86
  )
88
87
 
89
88
  logger.debug("✅ Planning completed successfully")
@@ -2,18 +2,17 @@
2
2
 
3
3
  from functools import partial
4
4
 
5
- from pydantic_ai import (
6
- Agent,
7
- )
8
5
  from pydantic_ai.agent import AgentRunResult
9
6
  from pydantic_ai.messages import (
10
7
  ModelMessage,
11
8
  )
12
9
 
13
10
  from shotgun.agents.config import ProviderType
11
+ from shotgun.agents.models import ShotgunAgent
14
12
  from shotgun.logging_config import get_logger
15
13
 
16
14
  from .common import (
15
+ EventStreamHandler,
17
16
  add_system_status_message,
18
17
  build_agent_system_prompt,
19
18
  create_base_agent,
@@ -28,7 +27,7 @@ logger = get_logger(__name__)
28
27
 
29
28
  async def create_research_agent(
30
29
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
31
- ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
+ ) -> tuple[ShotgunAgent, AgentDeps]:
32
31
  """Create a research agent with web search and artifact management capabilities.
33
32
 
34
33
  Args:
@@ -65,22 +64,25 @@ async def create_research_agent(
65
64
 
66
65
 
67
66
  async def run_research_agent(
68
- agent: Agent[AgentDeps, AgentResponse],
69
- query: str,
67
+ agent: ShotgunAgent,
68
+ prompt: str,
70
69
  deps: AgentDeps,
71
70
  message_history: list[ModelMessage] | None = None,
71
+ event_stream_handler: EventStreamHandler | None = None,
72
72
  ) -> AgentRunResult[AgentResponse]:
73
- """Perform research on the given query and update research artifacts.
73
+ """Perform research on the given prompt and update research artifacts.
74
74
 
75
75
  Args:
76
76
  agent: The configured research agent
77
- query: The research query to investigate
77
+ prompt: The research prompt to investigate
78
78
  deps: Agent dependencies
79
+ message_history: Optional message history for conversation continuity
80
+ event_stream_handler: Optional callback for streaming events
79
81
 
80
82
  Returns:
81
83
  Summary of research findings
82
84
  """
83
- logger.debug("🔬 Starting research for query: %s", query)
85
+ logger.debug("🔬 Starting research for prompt: %s", prompt)
84
86
 
85
87
  message_history = await add_system_status_message(deps, message_history)
86
88
 
@@ -90,10 +92,11 @@ async def run_research_agent(
90
92
 
91
93
  result = await run_agent(
92
94
  agent=agent,
93
- prompt=query,
95
+ prompt=prompt,
94
96
  deps=deps,
95
97
  message_history=message_history,
96
98
  usage_limits=usage_limits,
99
+ event_stream_handler=event_stream_handler,
97
100
  )
98
101
 
99
102
  logger.debug("✅ Research completed successfully")
@@ -0,0 +1,47 @@
1
+ """Router Agent - The intelligent orchestrator for shotgun agents."""
2
+
3
+ from shotgun.agents.router.models import (
4
+ CascadeScope,
5
+ CreatePlanInput,
6
+ DelegationInput,
7
+ DelegationResult,
8
+ ExecutionPlan,
9
+ ExecutionStep,
10
+ ExecutionStepInput,
11
+ MarkStepDoneInput,
12
+ PlanApprovalStatus,
13
+ RemoveStepInput,
14
+ RouterDeps,
15
+ RouterMode,
16
+ StepCheckpointAction,
17
+ SubAgentResult,
18
+ SubAgentResultStatus,
19
+ ToolResult,
20
+ )
21
+ from shotgun.agents.router.router import create_router_agent, run_router_agent
22
+
23
+ __all__ = [
24
+ # Agent factory
25
+ "create_router_agent",
26
+ "run_router_agent",
27
+ # Enums
28
+ "RouterMode",
29
+ "PlanApprovalStatus",
30
+ "StepCheckpointAction",
31
+ "CascadeScope",
32
+ "SubAgentResultStatus",
33
+ # Plan models
34
+ "ExecutionStep",
35
+ "ExecutionPlan",
36
+ # Tool I/O models
37
+ "ExecutionStepInput",
38
+ "CreatePlanInput",
39
+ "MarkStepDoneInput",
40
+ "RemoveStepInput",
41
+ "DelegationInput",
42
+ "ToolResult",
43
+ "DelegationResult",
44
+ "SubAgentResult",
45
+ # Deps
46
+ "RouterDeps",
47
+ ]