shotgun-sh 0.2.6.dev1__py3-none-any.whl → 0.2.6.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

@@ -10,8 +10,6 @@ if TYPE_CHECKING:
10
10
 
11
11
  from pydantic_ai import (
12
12
  Agent,
13
- DeferredToolRequests,
14
- DeferredToolResults,
15
13
  RunContext,
16
14
  UsageLimits,
17
15
  )
@@ -37,7 +35,7 @@ from textual.widget import Widget
37
35
 
38
36
  from shotgun.agents.common import add_system_prompt_message, add_system_status_message
39
37
  from shotgun.agents.config.models import KeyProvider
40
- from shotgun.agents.models import AgentType, FileOperation
38
+ from shotgun.agents.models import AgentResponse, AgentType, FileOperation
41
39
  from shotgun.posthog_telemetry import track_event
42
40
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
43
41
  from shotgun.utils.source_detection import detect_source
@@ -45,7 +43,7 @@ from shotgun.utils.source_detection import detect_source
45
43
  from .export import create_export_agent
46
44
  from .history.compaction import apply_persistent_compaction
47
45
  from .messages import AgentSystemPrompt
48
- from .models import AgentDeps, AgentRuntimeOptions, UserAnswer
46
+ from .models import AgentDeps, AgentRuntimeOptions
49
47
  from .plan import create_plan_agent
50
48
  from .research import create_research_agent
51
49
  from .specify import create_specify_agent
@@ -92,6 +90,25 @@ class PartialResponseMessage(Message):
92
90
  self.is_last = is_last
93
91
 
94
92
 
93
+ class ClarifyingQuestionsMessage(Message):
94
+ """Event posted when agent returns clarifying questions."""
95
+
96
+ def __init__(
97
+ self,
98
+ questions: list[str],
99
+ response_text: str,
100
+ ) -> None:
101
+ """Initialize the clarifying questions message.
102
+
103
+ Args:
104
+ questions: List of clarifying questions from the agent
105
+ response_text: The agent's response text before asking questions
106
+ """
107
+ super().__init__()
108
+ self.questions = questions
109
+ self.response_text = response_text
110
+
111
+
95
112
  @dataclass(slots=True)
96
113
  class _PartialStreamState:
97
114
  """Tracks streamed messages while handling a single agent run."""
@@ -158,8 +175,12 @@ class AgentManager(Widget):
158
175
  self.recently_change_files: list[FileOperation] = []
159
176
  self._stream_state: _PartialStreamState | None = None
160
177
 
178
+ # Q&A mode state for structured output questions
179
+ self._qa_questions: list[str] | None = None
180
+ self._qa_mode_active: bool = False
181
+
161
182
  @property
162
- def current_agent(self) -> Agent[AgentDeps, str | DeferredToolRequests]:
183
+ def current_agent(self) -> Agent[AgentDeps, AgentResponse]:
163
184
  """Get the currently active agent.
164
185
 
165
186
  Returns:
@@ -167,9 +188,7 @@ class AgentManager(Widget):
167
188
  """
168
189
  return self._get_agent(self._current_agent_type)
169
190
 
170
- def _get_agent(
171
- self, agent_type: AgentType
172
- ) -> Agent[AgentDeps, str | DeferredToolRequests]:
191
+ def _get_agent(self, agent_type: AgentType) -> Agent[AgentDeps, AgentResponse]:
173
192
  """Get agent by type.
174
193
 
175
194
  Args:
@@ -252,9 +271,8 @@ class AgentManager(Widget):
252
271
  *,
253
272
  deps: AgentDeps | None = None,
254
273
  usage_limits: UsageLimits | None = None,
255
- deferred_tool_results: DeferredToolResults | None = None,
256
274
  **kwargs: Any,
257
- ) -> AgentRunResult[str | DeferredToolRequests]:
275
+ ) -> AgentRunResult[AgentResponse]:
258
276
  """Run the current agent with automatic message history management.
259
277
 
260
278
  This method wraps the agent's run method, automatically injecting the
@@ -264,7 +282,6 @@ class AgentManager(Widget):
264
282
  prompt: Optional prompt to send to the agent.
265
283
  deps: Optional dependencies override (defaults to manager's deps).
266
284
  usage_limits: Optional usage limits for the agent run.
267
- deferred_tool_results: Optional deferred tool results for continuing a conversation.
268
285
  **kwargs: Additional keyword arguments to pass to the agent.
269
286
 
270
287
  Returns:
@@ -274,15 +291,6 @@ class AgentManager(Widget):
274
291
  # Use merged deps (shared state + agent-specific system prompt) if not provided
275
292
  if deps is None:
276
293
  deps = self._create_merged_deps(self._current_agent_type)
277
- ask_user_part = self.get_unanswered_ask_user_part()
278
- if ask_user_part and prompt:
279
- if not deferred_tool_results:
280
- deferred_tool_results = DeferredToolResults()
281
- deferred_tool_results.calls[ask_user_part.tool_call_id] = UserAnswer(
282
- answer=prompt,
283
- tool_call_id=ask_user_part.tool_call_id,
284
- )
285
- prompt = None
286
294
 
287
295
  # Ensure deps is not None
288
296
  if deps is None:
@@ -378,20 +386,16 @@ class AgentManager(Widget):
378
386
  event_name,
379
387
  {
380
388
  "has_prompt": prompt is not None,
381
- "has_deferred_results": deferred_tool_results is not None,
382
389
  "model_name": model_name,
383
390
  },
384
391
  )
385
392
 
386
393
  try:
387
- result: AgentRunResult[
388
- str | DeferredToolRequests
389
- ] = await self.current_agent.run(
394
+ result: AgentRunResult[AgentResponse] = await self.current_agent.run(
390
395
  prompt,
391
396
  deps=deps,
392
397
  usage_limits=usage_limits,
393
398
  message_history=message_history,
394
- deferred_tool_results=deferred_tool_results,
395
399
  event_stream_handler=self._handle_event_stream
396
400
  if not is_gpt5_byok
397
401
  else None,
@@ -400,10 +404,68 @@ class AgentManager(Widget):
400
404
  finally:
401
405
  self._stream_state = None
402
406
 
407
+ # Agent ALWAYS returns AgentResponse with structured output
408
+ agent_response = result.output
409
+ logger.debug("Agent returned structured AgentResponse")
410
+
411
+ # Always add the agent's response messages to maintain conversation history
403
412
  self.ui_message_history = original_messages + cast(
404
413
  list[ModelRequest | ModelResponse | HintMessage], result.new_messages()
405
414
  )
406
415
 
416
+ # Check if there are clarifying questions
417
+ if agent_response.clarifying_questions:
418
+ logger.info(
419
+ f"Agent has {len(agent_response.clarifying_questions)} clarifying questions"
420
+ )
421
+
422
+ # Add agent's response first if present
423
+ if agent_response.response:
424
+ self.ui_message_history.append(
425
+ HintMessage(message=agent_response.response)
426
+ )
427
+
428
+ if len(agent_response.clarifying_questions) == 1:
429
+ # Single question - treat as non-blocking suggestion, DON'T enter Q&A mode
430
+ self.ui_message_history.append(
431
+ HintMessage(message=f"💡 {agent_response.clarifying_questions[0]}")
432
+ )
433
+ else:
434
+ # Multiple questions (2+) - enter Q&A mode
435
+ self._qa_questions = agent_response.clarifying_questions
436
+ self._qa_mode_active = True
437
+
438
+ # Show intro with list, then first question
439
+ questions_list_with_intro = (
440
+ f"I have {len(agent_response.clarifying_questions)} questions:\n\n"
441
+ + "\n".join(
442
+ f"{i + 1}. {q}"
443
+ for i, q in enumerate(agent_response.clarifying_questions)
444
+ )
445
+ )
446
+ self.ui_message_history.append(
447
+ HintMessage(message=questions_list_with_intro)
448
+ )
449
+ self.ui_message_history.append(
450
+ HintMessage(
451
+ message=f"**Q1:** {agent_response.clarifying_questions[0]}"
452
+ )
453
+ )
454
+
455
+ # Post event to TUI to update Q&A mode state (only for multiple questions)
456
+ self.post_message(
457
+ ClarifyingQuestionsMessage(
458
+ questions=agent_response.clarifying_questions,
459
+ response_text=agent_response.response,
460
+ )
461
+ )
462
+ else:
463
+ # No clarifying questions - just show the response if present
464
+ if agent_response.response and agent_response.response.strip():
465
+ self.ui_message_history.append(
466
+ HintMessage(message=agent_response.response)
467
+ )
468
+
407
469
  # Apply compaction to persistent message history to prevent cascading growth
408
470
  all_messages = result.all_messages()
409
471
  self.message_history = await apply_persistent_compaction(all_messages, deps)
@@ -416,6 +478,7 @@ class AgentManager(Widget):
416
478
  file_operations = deps.file_tracker.operations.copy()
417
479
  self.recently_change_files = file_operations
418
480
 
481
+ # Post message history update (hints are now added synchronously above)
419
482
  self._post_messages_updated(file_operations)
420
483
 
421
484
  return result
@@ -702,22 +765,6 @@ class AgentManager(Widget):
702
765
  self.ui_message_history.append(message)
703
766
  self._post_messages_updated()
704
767
 
705
- def get_unanswered_ask_user_part(self) -> ToolCallPart | None:
706
- if not self.message_history:
707
- return None
708
- self.last_response = self.message_history[-1]
709
- ## we're searching for unanswered ask_user parts
710
- found_tool = next(
711
- (
712
- part
713
- for part in self.message_history[-1].parts
714
- if isinstance(part, ToolCallPart) and part.tool_name == "ask_user"
715
- ),
716
- None,
717
- )
718
-
719
- return found_tool
720
-
721
768
 
722
769
  # Re-export AgentType for backward compatibility
723
770
  __all__ = [
@@ -725,4 +772,5 @@ __all__ = [
725
772
  "AgentType",
726
773
  "MessageHistoryUpdated",
727
774
  "PartialResponseMessage",
775
+ "ClarifyingQuestionsMessage",
728
776
  ]
shotgun/agents/common.py CHANGED
@@ -1,14 +1,11 @@
1
1
  """Common utilities for agent creation and management."""
2
2
 
3
- import asyncio
4
3
  from collections.abc import Callable
5
4
  from pathlib import Path
6
5
  from typing import Any
7
6
 
8
7
  from pydantic_ai import (
9
8
  Agent,
10
- DeferredToolRequests,
11
- DeferredToolResults,
12
9
  RunContext,
13
10
  UsageLimits,
14
11
  )
@@ -19,7 +16,7 @@ from pydantic_ai.messages import (
19
16
  )
20
17
 
21
18
  from shotgun.agents.config import ProviderType, get_provider_model
22
- from shotgun.agents.models import AgentType
19
+ from shotgun.agents.models import AgentResponse, AgentType
23
20
  from shotgun.logging_config import get_logger
24
21
  from shotgun.prompts import PromptLoader
25
22
  from shotgun.sdk.services import get_codebase_service
@@ -28,12 +25,10 @@ from shotgun.utils.datetime_utils import get_datetime_context
28
25
  from shotgun.utils.file_system_utils import get_shotgun_base_path
29
26
 
30
27
  from .history import token_limit_compactor
31
- from .history.compaction import apply_persistent_compaction
32
28
  from .messages import AgentSystemPrompt, SystemStatusPrompt
33
29
  from .models import AgentDeps, AgentRuntimeOptions, PipelineConfigEntry
34
30
  from .tools import (
35
31
  append_file,
36
- ask_user,
37
32
  codebase_shell,
38
33
  directory_lister,
39
34
  file_read,
@@ -106,7 +101,7 @@ def create_base_agent(
106
101
  additional_tools: list[Any] | None = None,
107
102
  provider: ProviderType | None = None,
108
103
  agent_mode: AgentType | None = None,
109
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
104
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
110
105
  """Create a base agent with common configuration.
111
106
 
112
107
  Args:
@@ -164,7 +159,7 @@ def create_base_agent(
164
159
 
165
160
  agent = Agent(
166
161
  model,
167
- output_type=[str, DeferredToolRequests],
162
+ output_type=AgentResponse,
168
163
  deps_type=AgentDeps,
169
164
  instrument=True,
170
165
  history_processors=[history_processor],
@@ -179,11 +174,6 @@ def create_base_agent(
179
174
  for tool in additional_tools or []:
180
175
  agent.tool_plain(tool)
181
176
 
182
- # Register interactive tool conditionally based on deps
183
- if deps.interactive_mode:
184
- agent.tool(ask_user)
185
- logger.debug("📞 Interactive mode enabled - ask_user tool registered")
186
-
187
177
  # Register common file management tools (always available)
188
178
  agent.tool(write_file)
189
179
  agent.tool(append_file)
@@ -323,7 +313,9 @@ def extract_markdown_toc(agent_mode: AgentType | None) -> str | None:
323
313
  if prior_toc:
324
314
  # Add section with XML tags
325
315
  toc_sections.append(
326
- f'<TABLE_OF_CONTENTS file_name="{prior_file}">\n{prior_toc}\n</TABLE_OF_CONTENTS>'
316
+ f'<TABLE_OF_CONTENTS file_name="{prior_file}">\n'
317
+ f"{prior_toc}\n"
318
+ f"</TABLE_OF_CONTENTS>"
327
319
  )
328
320
 
329
321
  # Extract TOC from own file (full detail)
@@ -334,7 +326,9 @@ def extract_markdown_toc(agent_mode: AgentType | None) -> str | None:
334
326
  # Put own file TOC at the beginning with XML tags
335
327
  toc_sections.insert(
336
328
  0,
337
- f'<TABLE_OF_CONTENTS file_name="{config.own_file}">\n{own_toc}\n</TABLE_OF_CONTENTS>',
329
+ f'<TABLE_OF_CONTENTS file_name="{config.own_file}">\n'
330
+ f"{own_toc}\n"
331
+ f"</TABLE_OF_CONTENTS>",
338
332
  )
339
333
 
340
334
  # Combine all sections
@@ -476,7 +470,8 @@ async def add_system_prompt_message(
476
470
  message_history = message_history or []
477
471
 
478
472
  # Create a minimal RunContext to call the system prompt function
479
- # We'll pass None for model and usage since they're not used by our system prompt functions
473
+ # We'll pass None for model and usage since they're not used
474
+ # by our system prompt functions
480
475
  context = type(
481
476
  "RunContext", (), {"deps": deps, "retry": 0, "model": None, "usage": None}
482
477
  )()
@@ -500,12 +495,12 @@ async def add_system_prompt_message(
500
495
 
501
496
 
502
497
  async def run_agent(
503
- agent: Agent[AgentDeps, str | DeferredToolRequests],
498
+ agent: Agent[AgentDeps, AgentResponse],
504
499
  prompt: str,
505
500
  deps: AgentDeps,
506
501
  message_history: list[ModelMessage] | None = None,
507
502
  usage_limits: UsageLimits | None = None,
508
- ) -> AgentRunResult[str | DeferredToolRequests]:
503
+ ) -> AgentRunResult[AgentResponse]:
509
504
  # Clear file tracker for new run
510
505
  deps.file_tracker.clear()
511
506
  logger.debug("🔧 Cleared file tracker for new agent run")
@@ -520,33 +515,6 @@ async def run_agent(
520
515
  message_history=message_history,
521
516
  )
522
517
 
523
- # Apply persistent compaction to prevent cascading token growth across CLI commands
524
- messages = await apply_persistent_compaction(result.all_messages(), deps)
525
- while isinstance(result.output, DeferredToolRequests):
526
- logger.info("got deferred tool requests")
527
- await deps.queue.join()
528
- requests = result.output
529
- done, _ = await asyncio.wait(deps.tasks)
530
-
531
- task_results = [task.result() for task in done]
532
- task_results_by_tool_call_id = {
533
- result.tool_call_id: result.answer for result in task_results
534
- }
535
- logger.info("got task results", task_results_by_tool_call_id)
536
- results = DeferredToolResults()
537
- for call in requests.calls:
538
- results.calls[call.tool_call_id] = task_results_by_tool_call_id[
539
- call.tool_call_id
540
- ]
541
- result = await agent.run(
542
- deps=deps,
543
- usage_limits=usage_limits,
544
- message_history=messages,
545
- deferred_tool_results=results,
546
- )
547
- # Apply persistent compaction to prevent cascading token growth in multi-turn loops
548
- messages = await apply_persistent_compaction(result.all_messages(), deps)
549
-
550
518
  # Log file operations summary if any files were modified
551
519
  if deps.file_tracker.operations:
552
520
  summary = deps.file_tracker.format_summary()
shotgun/agents/export.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
26
  def create_export_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create an export agent with file management capabilities.
31
30
 
32
31
  Args:
@@ -50,11 +49,11 @@ def create_export_agent(
50
49
 
51
50
 
52
51
  async def run_export_agent(
53
- agent: Agent[AgentDeps, str | DeferredToolRequests],
52
+ agent: Agent[AgentDeps, AgentResponse],
54
53
  instruction: str,
55
54
  deps: AgentDeps,
56
55
  message_history: list[ModelMessage] | None = None,
57
- ) -> AgentRunResult[str | DeferredToolRequests]:
56
+ ) -> AgentRunResult[AgentResponse]:
58
57
  """Export artifacts based on the given instruction.
59
58
 
60
59
  Args:
@@ -1,5 +1,6 @@
1
1
  """Anthropic token counting using official client."""
2
2
 
3
+ import logfire
3
4
  from pydantic_ai.messages import ModelMessage
4
5
 
5
6
  from shotgun.agents.config.models import KeyProvider
@@ -49,7 +50,15 @@ class AnthropicTokenCounter(TokenCounter):
49
50
  f"Initialized async Anthropic token counter for {model_name} via direct API"
50
51
  )
51
52
  except Exception as e:
52
- raise RuntimeError("Failed to initialize Anthropic async client") from e
53
+ logfire.exception(
54
+ f"Failed to initialize Anthropic token counter for {model_name}",
55
+ model_name=model_name,
56
+ key_provider=key_provider.value,
57
+ exception_type=type(e).__name__,
58
+ )
59
+ raise RuntimeError(
60
+ f"Failed to initialize Anthropic async client for {model_name}: {type(e).__name__}: {str(e)}"
61
+ ) from e
53
62
 
54
63
  async def count_tokens(self, text: str) -> int:
55
64
  """Count tokens using Anthropic's official API (async).
@@ -71,8 +80,19 @@ class AnthropicTokenCounter(TokenCounter):
71
80
  )
72
81
  return result.input_tokens
73
82
  except Exception as e:
83
+ # Create a preview of the text for logging (truncated to avoid huge logs)
84
+ text_preview = text[:100] + "..." if len(text) > 100 else text
85
+
86
+ logfire.exception(
87
+ f"Anthropic token counting failed for {self.model_name}",
88
+ model_name=self.model_name,
89
+ text_length=len(text),
90
+ text_preview=text_preview,
91
+ exception_type=type(e).__name__,
92
+ exception_message=str(e),
93
+ )
74
94
  raise RuntimeError(
75
- f"Anthropic token counting API failed for {self.model_name}"
95
+ f"Anthropic token counting API failed for {self.model_name}: {type(e).__name__}: {str(e)}"
76
96
  ) from e
77
97
 
78
98
  async def count_message_tokens(self, messages: list[ModelMessage]) -> int:
shotgun/agents/models.py CHANGED
@@ -19,6 +19,30 @@ if TYPE_CHECKING:
19
19
  from shotgun.codebase.service import CodebaseService
20
20
 
21
21
 
22
+ class AgentResponse(BaseModel):
23
+ """Structured response from an agent with optional clarifying questions.
24
+
25
+ This model provides a consistent response format for all agents:
26
+ - response: The main response text (can be empty if only asking questions)
27
+ - clarifying_questions: Optional list of questions to ask the user
28
+
29
+ When clarifying_questions is provided, the agent expects to receive
30
+ answers before continuing its work. This replaces the ask_questions tool.
31
+ """
32
+
33
+ response: str = Field(
34
+ description="The agent's response text. Always respond with some text summarizing what happened, whats next, etc.",
35
+ )
36
+ clarifying_questions: list[str] | None = Field(
37
+ default=None,
38
+ description="""
39
+ Optional list of clarifying questions to ask the user.
40
+ - Single question: Shown as a non-blocking suggestion (user can answer or continue with other prompts)
41
+ - Multiple questions (2+): Asked sequentially in Q&A mode (blocks input until all answered or cancelled)
42
+ """,
43
+ )
44
+
45
+
22
46
  class AgentType(StrEnum):
23
47
  """Enumeration for available agent types."""
24
48
 
@@ -73,6 +97,30 @@ class UserQuestion(BaseModel):
73
97
  )
74
98
 
75
99
 
100
+ class MultipleUserQuestions(BaseModel):
101
+ """Multiple questions to ask the user sequentially."""
102
+
103
+ model_config = ConfigDict(arbitrary_types_allowed=True)
104
+
105
+ questions: list[str] = Field(
106
+ description="List of questions to ask the user",
107
+ )
108
+ current_index: int = Field(
109
+ default=0,
110
+ description="Current question index being asked",
111
+ )
112
+ answers: list[str] = Field(
113
+ default_factory=list,
114
+ description="Accumulated answers from the user",
115
+ )
116
+ tool_call_id: str = Field(
117
+ description="Tool call id",
118
+ )
119
+ result: Future[UserAnswer] = Field(
120
+ description="Future that will contain all answers formatted as Q&A pairs"
121
+ )
122
+
123
+
76
124
  class AgentRuntimeOptions(BaseModel):
77
125
  """User interface options for agents."""
78
126
 
@@ -100,9 +148,9 @@ class AgentRuntimeOptions(BaseModel):
100
148
  description="Maximum number of iterations for agent loops",
101
149
  )
102
150
 
103
- queue: Queue[UserQuestion] = Field(
151
+ queue: Queue[UserQuestion | MultipleUserQuestions] = Field(
104
152
  default_factory=Queue,
105
- description="Queue for storing user responses",
153
+ description="Queue for storing user questions (single or multiple)",
106
154
  )
107
155
 
108
156
  tasks: list[Future[UserAnswer]] = Field(
shotgun/agents/plan.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
26
  def create_plan_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create a plan agent with artifact management capabilities.
31
30
 
32
31
  Args:
@@ -52,11 +51,11 @@ def create_plan_agent(
52
51
 
53
52
 
54
53
  async def run_plan_agent(
55
- agent: Agent[AgentDeps, str | DeferredToolRequests],
54
+ agent: Agent[AgentDeps, AgentResponse],
56
55
  goal: str,
57
56
  deps: AgentDeps,
58
57
  message_history: list[ModelMessage] | None = None,
59
- ) -> AgentRunResult[str | DeferredToolRequests]:
58
+ ) -> AgentRunResult[AgentResponse]:
60
59
  """Create or update a plan based on the given goal using artifacts.
61
60
 
62
61
  Args:
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import (
@@ -21,7 +20,7 @@ from .common import (
21
20
  create_usage_limits,
22
21
  run_agent,
23
22
  )
24
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
23
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
25
24
  from .tools import get_available_web_search_tools
26
25
 
27
26
  logger = get_logger(__name__)
@@ -29,7 +28,7 @@ logger = get_logger(__name__)
29
28
 
30
29
  def create_research_agent(
31
30
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
32
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
31
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
33
32
  """Create a research agent with web search and artifact management capabilities.
34
33
 
35
34
  Args:
@@ -66,11 +65,11 @@ def create_research_agent(
66
65
 
67
66
 
68
67
  async def run_research_agent(
69
- agent: Agent[AgentDeps, str | DeferredToolRequests],
68
+ agent: Agent[AgentDeps, AgentResponse],
70
69
  query: str,
71
70
  deps: AgentDeps,
72
71
  message_history: list[ModelMessage] | None = None,
73
- ) -> AgentRunResult[str | DeferredToolRequests]:
72
+ ) -> AgentRunResult[AgentResponse]:
74
73
  """Perform research on the given query and update research artifacts.
75
74
 
76
75
  Args:
shotgun/agents/specify.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
26
  def create_specify_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create a specify agent with artifact management capabilities.
31
30
 
32
31
  Args:
@@ -52,11 +51,11 @@ def create_specify_agent(
52
51
 
53
52
 
54
53
  async def run_specify_agent(
55
- agent: Agent[AgentDeps, str | DeferredToolRequests],
54
+ agent: Agent[AgentDeps, AgentResponse],
56
55
  requirement: str,
57
56
  deps: AgentDeps,
58
57
  message_history: list[ModelMessage] | None = None,
59
- ) -> AgentRunResult[str | DeferredToolRequests]:
58
+ ) -> AgentRunResult[AgentResponse]:
60
59
  """Create or update specifications based on the given requirement.
61
60
 
62
61
  Args: