shotgun-sh 0.2.6.dev1__py3-none-any.whl → 0.2.6.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

shotgun/agents/tasks.py CHANGED
@@ -4,7 +4,6 @@ from functools import partial
4
4
 
5
5
  from pydantic_ai import (
6
6
  Agent,
7
- DeferredToolRequests,
8
7
  )
9
8
  from pydantic_ai.agent import AgentRunResult
10
9
  from pydantic_ai.messages import ModelMessage
@@ -19,14 +18,14 @@ from .common import (
19
18
  create_usage_limits,
20
19
  run_agent,
21
20
  )
22
- from .models import AgentDeps, AgentRuntimeOptions, AgentType
21
+ from .models import AgentDeps, AgentResponse, AgentRuntimeOptions, AgentType
23
22
 
24
23
  logger = get_logger(__name__)
25
24
 
26
25
 
27
26
  def create_tasks_agent(
28
27
  agent_runtime_options: AgentRuntimeOptions, provider: ProviderType | None = None
29
- ) -> tuple[Agent[AgentDeps, str | DeferredToolRequests], AgentDeps]:
28
+ ) -> tuple[Agent[AgentDeps, AgentResponse], AgentDeps]:
30
29
  """Create a tasks agent with file management capabilities.
31
30
 
32
31
  Args:
@@ -50,11 +49,11 @@ def create_tasks_agent(
50
49
 
51
50
 
52
51
  async def run_tasks_agent(
53
- agent: Agent[AgentDeps, str | DeferredToolRequests],
52
+ agent: Agent[AgentDeps, AgentResponse],
54
53
  instruction: str,
55
54
  deps: AgentDeps,
56
55
  message_history: list[ModelMessage] | None = None,
57
- ) -> AgentRunResult[str | DeferredToolRequests]:
56
+ ) -> AgentRunResult[AgentResponse]:
58
57
  """Create or update tasks based on the given instruction.
59
58
 
60
59
  Args:
@@ -8,7 +8,6 @@ from .codebase import (
8
8
  retrieve_code,
9
9
  )
10
10
  from .file_management import append_file, read_file, write_file
11
- from .user_interaction import ask_user
12
11
  from .web_search import (
13
12
  anthropic_web_search_tool,
14
13
  gemini_web_search_tool,
@@ -21,7 +20,6 @@ __all__ = [
21
20
  "anthropic_web_search_tool",
22
21
  "gemini_web_search_tool",
23
22
  "get_available_web_search_tools",
24
- "ask_user",
25
23
  "read_file",
26
24
  "write_file",
27
25
  "append_file",
@@ -316,7 +316,7 @@ This project is about [making assumptions without reading files]...
316
316
  USER INTERACTION - CLARIFY EXPORT REQUIREMENTS:
317
317
 
318
318
  - ALWAYS ask clarifying questions when export requirements are unclear
319
- - Use ask_user tool to gather specific details about:
319
+ - Use clarifying questions to gather specific details about:
320
320
  - Target format and file type preferences
321
321
  - Intended use case and audience for the export
322
322
  - Specific content sections to include/exclude from files
@@ -7,7 +7,10 @@ Your extensive expertise spans, among other things:
7
7
  ## KEY RULES
8
8
 
9
9
  {% if interactive_mode %}
10
- 0. Always ask CLARIFYING QUESTIONS if the user's request is ambiguous or lacks sufficient detail. Do not make assumptions about what the user wants.
10
+ 0. Always ask CLARIFYING QUESTIONS using structured output if the user's request is ambiguous or lacks sufficient detail.
11
+ - Return your response with the clarifying_questions field populated
12
+ - Do not make assumptions about what the user wants
13
+ - Questions should be clear, specific, and answerable
11
14
  {% endif %}
12
15
  1. Above all, prefer using tools to do the work and NEVER respond with text.
13
16
  2. IMPORTANT: Always ask for review and go ahead to move forward after using write_file().
@@ -6,20 +6,37 @@
6
6
  {% if interactive_mode -%}
7
7
  IMPORTANT: USER INTERACTION IS ENABLED (interactive mode).
8
8
 
9
- - BEFORE GETTING TO WORK, ALWAYS THINK WHAT YOU'RE GOING TO DO AND ask_user() TO ACCEPT WHAT YOU'RE GOING TO DO.
10
- - ALWAYS USE ask_user TO REVIEW AND ACCEPT THE ARTIFACT SECTION YOU'VE WORKED ON BEFORE PROCEEDING TO THE NEXT SECTION.
11
- - AFTER USING write_artifact_section(), ALWAYS USE ask_user() TO REVIEW AND ACCEPT THE ARTIFACT SECTION YOU'VE WORKED ON BEFORE PROCEEDING TO THE NEXT SECTION.
9
+ ## Structured Output Format
10
+
11
+ You must return responses using this structured format:
12
+
13
+ ```json
14
+ {
15
+ "response": "Your main response text here",
16
+ "clarifying_questions": ["Question 1?", "Question 2?"] // Optional, only when needed
17
+ }
18
+ ```
19
+
20
+ ## When to Use Clarifying Questions
21
+
22
+ - BEFORE GETTING TO WORK: If the user's request is ambiguous, use clarifying_questions to ask what they want
23
+ - DURING WORK: After using write_file(), you can suggest that the user review it and ask any clarifying questions with clarifying_questions
12
24
  - Don't assume - ask for confirmation of your understanding
13
- - When in doubt about any aspect of the goal, ASK before proceeding
25
+ - When in doubt about any aspect of the goal, include clarifying_questions
26
+
27
+ ## Important Notes
28
+
29
+ - If you don't need to ask questions, set clarifying_questions to null or omit it
30
+ - Keep response field concise - a paragraph at most for user communication
31
+ - Questions should be clear, specific, and independently answerable
32
+ - Don't ask multiple questions in one string - use separate array items
14
33
 
15
34
  {% else -%}
16
35
 
17
36
  IMPORTANT: USER INTERACTION IS DISABLED (non-interactive mode).
18
- - You cannot ask clarifying questions using ask_user tool
37
+ - You cannot ask clarifying questions (clarifying_questions will be ignored)
19
38
  - Make reasonable assumptions based on best practices
20
39
  - Use sensible defaults when information is missing
21
- - Make reasonable assumptions based on industry best practices
22
- - Use sensible defaults when specific details are not provided
23
40
  - When in doubt, make reasonable assumptions and proceed with best practices
24
41
  {% endif %}
25
42
 
@@ -118,7 +118,7 @@ USER INTERACTION - REDUCE UNCERTAINTY:
118
118
  - FIRST read `research.md` and `specification.md` before asking ANY questions
119
119
  - ONLY ask clarifying questions AFTER reading the context files
120
120
  - Questions should be about gaps not covered in research/specification
121
- - Use ask_user tool to gather specific details about:
121
+ - Use clarifying questions to gather specific details about:
122
122
  - Information not found in the context files
123
123
  - Clarifications on ambiguous specifications
124
124
  - Priorities when multiple options exist
@@ -39,7 +39,7 @@ For research tasks:
39
39
  ## RESEARCH PRINCIPLES
40
40
 
41
41
  {% if interactive_mode -%}
42
- - CRITICAL: BEFORE RUNNING ANY SEARCH TOOL, ASK THE USER FOR APPROVAL USING ask_user(). FINISH THE QUESTION WITH ASKING FOR A GO AHEAD.
42
+ - CRITICAL: BEFORE RUNNING ANY SEARCH TOOL, ASK THE USER FOR APPROVAL using clarifying questions. Include what you plan to search for and ask if they want you to proceed.
43
43
  {% endif -%}
44
44
  - Build upon existing research rather than starting from scratch
45
45
  - Focus on practical, actionable information over theoretical concepts
@@ -99,7 +99,7 @@ Then organize tasks into logical sections:
99
99
  USER INTERACTION - ASK CLARIFYING QUESTIONS:
100
100
 
101
101
  - ALWAYS ask clarifying questions when the request is vague or ambiguous
102
- - Use ask_user tool to gather specific details about:
102
+ - Use clarifying questions to gather specific details about:
103
103
  - Specific features or functionality to prioritize
104
104
  - Technical constraints or preferences
105
105
  - Timeline and resource constraints
shotgun/telemetry.py CHANGED
@@ -65,6 +65,14 @@ def setup_logfire_observability() -> bool:
65
65
  # Instrument Pydantic AI for better observability
66
66
  logfire.instrument_pydantic_ai()
67
67
 
68
+ # Add LogfireLoggingHandler to root logger so logfire logs also go to file
69
+ import logging
70
+
71
+ root_logger = logging.getLogger()
72
+ logfire_handler = logfire.LogfireLoggingHandler()
73
+ root_logger.addHandler(logfire_handler)
74
+ logger.debug("Added LogfireLoggingHandler to root logger for file integration")
75
+
68
76
  # Set user context using baggage for all logs and spans
69
77
  try:
70
78
  from opentelemetry import baggage, context
@@ -20,10 +20,11 @@ from textual.keys import Keys
20
20
  from textual.reactive import reactive
21
21
  from textual.screen import ModalScreen, Screen
22
22
  from textual.widget import Widget
23
- from textual.widgets import Button, Label, Markdown, Static
23
+ from textual.widgets import Button, Label, Static
24
24
 
25
25
  from shotgun.agents.agent_manager import (
26
26
  AgentManager,
27
+ ClarifyingQuestionsMessage,
27
28
  MessageHistoryUpdated,
28
29
  PartialResponseMessage,
29
30
  )
@@ -37,7 +38,6 @@ from shotgun.agents.models import (
37
38
  AgentDeps,
38
39
  AgentType,
39
40
  FileOperationTracker,
40
- UserQuestion,
41
41
  )
42
42
  from shotgun.codebase.core.manager import CodebaseAlreadyIndexedError
43
43
  from shotgun.codebase.models import IndexProgress, ProgressPhase
@@ -113,10 +113,31 @@ class StatusBar(Widget):
113
113
  self.working = working
114
114
 
115
115
  def render(self) -> str:
116
+ # Check if in Q&A mode first (highest priority)
117
+ try:
118
+ chat_screen = self.screen
119
+ if isinstance(chat_screen, ChatScreen) and chat_screen.qa_mode:
120
+ return (
121
+ "[$foreground-muted][bold $text]esc[/] to exit Q&A mode • "
122
+ "[bold $text]enter[/] to send answer • [bold $text]ctrl+j[/] for newline[/]"
123
+ )
124
+ except Exception: # noqa: S110
125
+ # If we can't access chat screen, continue with normal display
126
+ pass
127
+
116
128
  if self.working:
117
- return """[$foreground-muted][bold $text]esc[/] to stop • [bold $text]enter[/] to send • [bold $text]ctrl+j[/] for newline • [bold $text]ctrl+p[/] command palette • [bold $text]shift+tab[/] cycle modes • /help for commands[/]"""
129
+ return (
130
+ "[$foreground-muted][bold $text]esc[/] to stop • "
131
+ "[bold $text]enter[/] to send • [bold $text]ctrl+j[/] for newline • "
132
+ "[bold $text]ctrl+p[/] command palette • [bold $text]shift+tab[/] cycle modes • "
133
+ "/help for commands[/]"
134
+ )
118
135
  else:
119
- return """[$foreground-muted][bold $text]enter[/] to send • [bold $text]ctrl+j[/] for newline • [bold $text]ctrl+p[/] command palette • [bold $text]shift+tab[/] cycle modes • /help for commands[/]"""
136
+ return (
137
+ "[$foreground-muted][bold $text]enter[/] to send • "
138
+ "[bold $text]ctrl+j[/] for newline • [bold $text]ctrl+p[/] command palette • "
139
+ "[bold $text]shift+tab[/] cycle modes • /help for commands[/]"
140
+ )
120
141
 
121
142
 
122
143
  class ModeIndicator(Widget):
@@ -141,6 +162,18 @@ class ModeIndicator(Widget):
141
162
 
142
163
  def render(self) -> str:
143
164
  """Render the mode indicator."""
165
+ # Check if in Q&A mode first
166
+ try:
167
+ chat_screen = self.screen
168
+ if isinstance(chat_screen, ChatScreen) and chat_screen.qa_mode:
169
+ return (
170
+ "[bold $text-accent]Q&A mode[/]"
171
+ "[$foreground-muted] (Answer the clarifying questions or ESC to cancel)[/]"
172
+ )
173
+ except Exception: # noqa: S110
174
+ # If we can't access chat screen, continue with normal display
175
+ pass
176
+
144
177
  mode_display = {
145
178
  AgentType.RESEARCH: "Research",
146
179
  AgentType.PLAN: "Planning",
@@ -149,10 +182,16 @@ class ModeIndicator(Widget):
149
182
  AgentType.EXPORT: "Export",
150
183
  }
151
184
  mode_description = {
152
- AgentType.RESEARCH: "Research topics with web search and synthesize findings",
185
+ AgentType.RESEARCH: (
186
+ "Research topics with web search and synthesize findings"
187
+ ),
153
188
  AgentType.PLAN: "Create comprehensive, actionable plans with milestones",
154
- AgentType.TASKS: "Generate specific, actionable tasks from research and plans",
155
- AgentType.SPECIFY: "Create detailed specifications and requirements documents",
189
+ AgentType.TASKS: (
190
+ "Generate specific, actionable tasks from research and plans"
191
+ ),
192
+ AgentType.SPECIFY: (
193
+ "Create detailed specifications and requirements documents"
194
+ ),
156
195
  AgentType.EXPORT: "Export artifacts and findings to various formats",
157
196
  }
158
197
 
@@ -163,7 +202,10 @@ class ModeIndicator(Widget):
163
202
  has_content = self.progress_checker.has_mode_content(self.mode)
164
203
  status_icon = " ✓" if has_content else ""
165
204
 
166
- return f"[bold $text-accent]{mode_title}{status_icon} mode[/][$foreground-muted] ({description})[/]"
205
+ return (
206
+ f"[bold $text-accent]{mode_title}{status_icon} mode[/]"
207
+ f"[$foreground-muted] ({description})[/]"
208
+ )
167
209
 
168
210
 
169
211
  class CodebaseIndexPromptScreen(ModalScreen[bool]):
@@ -199,7 +241,8 @@ class CodebaseIndexPromptScreen(ModalScreen[bool]):
199
241
  yield Static(
200
242
  f"Would you like to index the codebase at:\n{Path.cwd()}\n\n"
201
243
  "This is required for the agent to understand your code and answer "
202
- "questions about it. Without indexing, the agent cannot analyze your codebase."
244
+ "questions about it. Without indexing, the agent cannot analyze "
245
+ "your codebase."
203
246
  )
204
247
  with Container(id="index-prompt-buttons"):
205
248
  yield Button(
@@ -238,11 +281,16 @@ class ChatScreen(Screen[None]):
238
281
  history: PromptHistory = PromptHistory()
239
282
  messages = reactive(list[ModelMessage | HintMessage]())
240
283
  working = reactive(False)
241
- question: reactive[UserQuestion | None] = reactive(None)
242
284
  indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
243
285
  partial_message: reactive[ModelMessage | None] = reactive(None)
244
286
  _current_worker = None # Track the current running worker for cancellation
245
287
 
288
+ # Q&A mode state (for structured output clarifying questions)
289
+ qa_mode = reactive(False)
290
+ qa_questions: list[str] = []
291
+ qa_current_index = reactive(0)
292
+ qa_answers: list[str] = []
293
+
246
294
  def __init__(self, continue_session: bool = False) -> None:
247
295
  super().__init__()
248
296
  # Get the model configuration and services
@@ -282,11 +330,19 @@ class ChatScreen(Screen[None]):
282
330
  self._load_conversation()
283
331
 
284
332
  self.call_later(self.check_if_codebase_is_indexed)
285
- # Start the question listener worker to handle ask_user interactions
286
- self.call_later(self.add_question_listener)
287
333
 
288
334
  async def on_key(self, event: events.Key) -> None:
289
335
  """Handle key presses for cancellation."""
336
+ # If escape is pressed during Q&A mode, exit Q&A
337
+ if event.key in (Keys.Escape, Keys.ControlC) and self.qa_mode:
338
+ self._exit_qa_mode()
339
+ # Re-enable the input
340
+ prompt_input = self.query_one(PromptInput)
341
+ prompt_input.focus()
342
+ # Prevent the event from propagating (don't quit the app)
343
+ event.stop()
344
+ return
345
+
290
346
  # If escape or ctrl+c is pressed while agent is working, cancel the operation
291
347
  if (
292
348
  event.key in (Keys.Escape, Keys.ControlC)
@@ -372,24 +428,33 @@ class ChatScreen(Screen[None]):
372
428
  status_bar.working = is_working
373
429
  status_bar.refresh()
374
430
 
431
+ def watch_qa_mode(self, qa_mode_active: bool) -> None:
432
+ """Update UI when Q&A mode state changes."""
433
+ if self.is_mounted:
434
+ # Update status bar to show "ESC to exit Q&A mode"
435
+ status_bar = self.query_one(StatusBar)
436
+ status_bar.refresh()
437
+
438
+ # Update mode indicator to show "Q&A mode"
439
+ mode_indicator = self.query_one(ModeIndicator)
440
+ mode_indicator.refresh()
441
+
375
442
  def watch_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
376
443
  """Update the chat history when messages change."""
377
444
  if self.is_mounted:
378
445
  chat_history = self.query_one(ChatHistory)
379
446
  chat_history.update_messages(messages)
380
447
 
381
- def watch_question(self, question: UserQuestion | None) -> None:
382
- """Update the question display."""
383
- if self.is_mounted:
384
- question_display = self.query_one("#question-display", Markdown)
385
- if question:
386
- question_display.update(f"Question:\n\n{question.question}")
387
- question_display.display = True
388
- else:
389
- question_display.update("")
390
- question_display.display = False
391
-
392
448
  def action_toggle_mode(self) -> None:
449
+ # Prevent mode switching during Q&A
450
+ if self.qa_mode:
451
+ self.notify(
452
+ "Cannot switch modes while answering questions",
453
+ severity="warning",
454
+ timeout=3,
455
+ )
456
+ return
457
+
393
458
  modes = [
394
459
  AgentType.RESEARCH,
395
460
  AgentType.SPECIFY,
@@ -410,20 +475,11 @@ class ChatScreen(Screen[None]):
410
475
  else:
411
476
  self.notify("No usage hint available", severity="error")
412
477
 
413
- @work
414
- async def add_question_listener(self) -> None:
415
- while True:
416
- question = await self.deps.queue.get()
417
- self.question = question
418
- await question.result
419
- self.deps.queue.task_done()
420
-
421
478
  def compose(self) -> ComposeResult:
422
479
  """Create child widgets for the app."""
423
480
  with Container(id="window"):
424
481
  yield self.agent_manager
425
482
  yield ChatHistory()
426
- yield Markdown(markdown="", id="question-display")
427
483
  with Container(id="footer"):
428
484
  yield Spinner(
429
485
  text="Processing...",
@@ -464,6 +520,42 @@ class ChatScreen(Screen[None]):
464
520
  partial_response_widget = self.query_one(ChatHistory)
465
521
  partial_response_widget.partial_response = None
466
522
 
523
+ def _exit_qa_mode(self) -> None:
524
+ """Exit Q&A mode and clean up state."""
525
+ # Track cancellation event
526
+ track_event(
527
+ "qa_mode_cancelled",
528
+ {
529
+ "questions_total": len(self.qa_questions),
530
+ "questions_answered": len(self.qa_answers),
531
+ },
532
+ )
533
+
534
+ # Clear Q&A state
535
+ self.qa_mode = False
536
+ self.qa_questions = []
537
+ self.qa_answers = []
538
+ self.qa_current_index = 0
539
+
540
+ # Show cancellation message
541
+ self.mount_hint("⚠️ Q&A cancelled - You can continue the conversation.")
542
+
543
+ @on(ClarifyingQuestionsMessage)
544
+ def handle_clarifying_questions(self, event: ClarifyingQuestionsMessage) -> None:
545
+ """Handle clarifying questions from agent structured output.
546
+
547
+ Note: Hints are now added synchronously in agent_manager.run() before this
548
+ handler is called, so we only need to set up Q&A mode state here.
549
+ """
550
+ # Clear any streaming partial response (removes final_result JSON)
551
+ self._clear_partial_response()
552
+
553
+ # Enter Q&A mode
554
+ self.qa_mode = True
555
+ self.qa_questions = event.questions
556
+ self.qa_current_index = 0
557
+ self.qa_answers = []
558
+
467
559
  @on(MessageHistoryUpdated)
468
560
  def handle_message_history_updated(self, event: MessageHistoryUpdated) -> None:
469
561
  """Handle message history updates from the agent manager."""
@@ -519,6 +611,61 @@ class ChatScreen(Screen[None]):
519
611
  self.value = ""
520
612
  return
521
613
 
614
+ # Handle Q&A mode (from structured output clarifying questions)
615
+ if self.qa_mode and self.qa_questions:
616
+ # Collect answer
617
+ self.qa_answers.append(text)
618
+
619
+ # Show answer
620
+ if len(self.qa_questions) == 1:
621
+ self.agent_manager.add_hint_message(
622
+ HintMessage(message=f"**A:** {text}")
623
+ )
624
+ else:
625
+ q_num = self.qa_current_index + 1
626
+ self.agent_manager.add_hint_message(
627
+ HintMessage(message=f"**A{q_num}:** {text}")
628
+ )
629
+
630
+ # Move to next or finish
631
+ self.qa_current_index += 1
632
+
633
+ if self.qa_current_index < len(self.qa_questions):
634
+ # Show next question
635
+ next_q = self.qa_questions[self.qa_current_index]
636
+ next_q_num = self.qa_current_index + 1
637
+ self.agent_manager.add_hint_message(
638
+ HintMessage(message=f"**Q{next_q_num}:** {next_q}")
639
+ )
640
+ else:
641
+ # All answered - format and send back
642
+ if len(self.qa_questions) == 1:
643
+ # Single question - just send the answer
644
+ formatted_qa = f"Q: {self.qa_questions[0]}\nA: {self.qa_answers[0]}"
645
+ else:
646
+ # Multiple questions - format all Q&A pairs
647
+ formatted_qa = "\n\n".join(
648
+ f"Q{i + 1}: {q}\nA{i + 1}: {a}"
649
+ for i, (q, a) in enumerate(
650
+ zip(self.qa_questions, self.qa_answers, strict=True)
651
+ )
652
+ )
653
+
654
+ # Exit Q&A mode
655
+ self.qa_mode = False
656
+ self.qa_questions = []
657
+ self.qa_answers = []
658
+ self.qa_current_index = 0
659
+
660
+ # Send answers back to agent
661
+ self.run_agent(formatted_qa)
662
+
663
+ # Clear input
664
+ prompt_input = self.query_one(PromptInput)
665
+ prompt_input.clear()
666
+ self.value = ""
667
+ return
668
+
522
669
  # Check if it's a command
523
670
  if self.command_handler.is_command(text):
524
671
  success, response = self.command_handler.handle_command(text)
@@ -785,7 +932,9 @@ class ChatScreen(Screen[None]):
785
932
 
786
933
  def help_text_with_codebase(already_indexed: bool = False) -> str:
787
934
  return (
788
- "Howdy! Welcome to Shotgun - the context tool for software engineering. \n\nYou can research, build specs, plan, create tasks, and export context to your favorite code-gen agents.\n\n"
935
+ "Howdy! Welcome to Shotgun - the context tool for software engineering. \n\n"
936
+ "You can research, build specs, plan, create tasks, and export context to your "
937
+ "favorite code-gen agents.\n\n"
789
938
  f"{'' if already_indexed else 'Once your codebase is indexed, '}I can help with:\n\n"
790
939
  "- Speccing out a new feature\n"
791
940
  "- Onboarding you onto this project\n"
@@ -796,7 +945,9 @@ def help_text_with_codebase(already_indexed: bool = False) -> str:
796
945
 
797
946
  def help_text_empty_dir() -> str:
798
947
  return (
799
- "Howdy! Welcome to Shotgun - the context tool for software engineering.\n\nYou can research, build specs, plan, create tasks, and export context to your favorite code-gen agents.\n\n"
948
+ "Howdy! Welcome to Shotgun - the context tool for software engineering.\n\n"
949
+ "You can research, build specs, plan, create tasks, and export context to your "
950
+ "favorite code-gen agents.\n\n"
800
951
  "What would you like to build? Here are some examples:\n\n"
801
952
  "- Research FastAPI vs Django\n"
802
953
  "- Plan my new web app using React\n"
@@ -19,7 +19,6 @@ from textual.reactive import reactive
19
19
  from textual.widget import Widget
20
20
  from textual.widgets import Markdown
21
21
 
22
- from shotgun.agents.models import UserAnswer
23
22
  from shotgun.tui.components.vertical_tail import VerticalTail
24
23
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage, HintMessageWidget
25
24
 
@@ -103,42 +102,8 @@ class ChatHistory(Widget):
103
102
  self._rendered_count = len(filtered)
104
103
 
105
104
  def filtered_items(self) -> Generator[ModelMessage | HintMessage, None, None]:
106
- for idx, next_item in enumerate(self.items):
107
- prev_item = self.items[idx - 1] if idx > 0 else None
108
-
109
- if isinstance(prev_item, ModelRequest) and isinstance(
110
- next_item, ModelResponse
111
- ):
112
- ask_user_tool_response_part = next(
113
- (
114
- part
115
- for part in prev_item.parts
116
- if isinstance(part, ToolReturnPart)
117
- and part.tool_name == "ask_user"
118
- ),
119
- None,
120
- )
121
-
122
- ask_user_part = next(
123
- (
124
- part
125
- for part in next_item.parts
126
- if isinstance(part, ToolCallPart)
127
- and part.tool_name == "ask_user"
128
- ),
129
- None,
130
- )
131
-
132
- if not ask_user_part or not ask_user_tool_response_part:
133
- yield next_item
134
- continue
135
- if (
136
- ask_user_tool_response_part.tool_call_id
137
- == ask_user_part.tool_call_id
138
- ):
139
- continue # don't emit tool call that happens after tool response
140
-
141
- yield next_item
105
+ # Simply yield all items - no filtering needed now that ask_user/ask_questions are gone
106
+ yield from self.items
142
107
 
143
108
  def update_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
144
109
  """Update the displayed messages using incremental mounting."""
@@ -167,6 +132,9 @@ class ChatHistory(Widget):
167
132
 
168
133
  self._rendered_count = len(filtered)
169
134
 
135
+ # Scroll to bottom to show newly added messages
136
+ self.vertical_tail.scroll_end(animate=False)
137
+
170
138
 
171
139
  class UserQuestionWidget(Widget):
172
140
  def __init__(self, item: ModelRequest | None) -> None:
@@ -189,13 +157,8 @@ class UserQuestionWidget(Widget):
189
157
  f"**>** {part.content if isinstance(part.content, str) else ''}\n\n"
190
158
  )
191
159
  elif isinstance(part, ToolReturnPart):
192
- if part.tool_name == "ask_user":
193
- acc += f"**>** {part.content.answer if isinstance(part.content, UserAnswer) else part.content['answer']}\n\n"
194
- else:
195
- # acc += " ∟ finished\n\n" # let's not show anything yet
196
- pass
197
- elif isinstance(part, UserPromptPart):
198
- acc += f"**>** {part.content}\n\n"
160
+ # Don't show tool return parts in the UI
161
+ pass
199
162
  return acc
200
163
 
201
164
 
@@ -216,23 +179,15 @@ class AgentResponseWidget(Widget):
216
179
  if self.item is None:
217
180
  return ""
218
181
 
219
- # Check if there's an ask_user tool call
220
- has_ask_user = any(
221
- isinstance(part, ToolCallPart) and part.tool_name == "ask_user"
222
- for part in self.item.parts
223
- )
224
-
225
182
  for idx, part in enumerate(self.item.parts):
226
183
  if isinstance(part, TextPart):
227
- # Skip ALL text parts if there's an ask_user tool call
228
- if has_ask_user:
229
- continue
230
184
  # Only show the circle prefix if there's actual content
231
185
  if part.content and part.content.strip():
232
186
  acc += f"**⏺** {part.content}\n\n"
233
187
  elif isinstance(part, ToolCallPart):
234
188
  parts_str = self._format_tool_call_part(part)
235
- acc += parts_str + "\n\n"
189
+ if parts_str: # Only add if there's actual content
190
+ acc += parts_str + "\n\n"
236
191
  elif isinstance(part, BuiltinToolCallPart):
237
192
  # Format builtin tool calls better
238
193
  if part.tool_name and "search" in part.tool_name.lower():
@@ -286,9 +241,6 @@ class AgentResponseWidget(Widget):
286
241
  return args if isinstance(args, dict) else {}
287
242
 
288
243
  def _format_tool_call_part(self, part: ToolCallPart) -> str:
289
- if part.tool_name == "ask_user":
290
- return self._format_ask_user_part(part)
291
-
292
244
  # Parse args once (handles both JSON string and dict)
293
245
  args = self._parse_args(part.args)
294
246
 
@@ -359,10 +311,9 @@ class AgentResponseWidget(Widget):
359
311
  return f"{part.tool_name}({args['section_title']})"
360
312
  return f"{part.tool_name}()"
361
313
 
362
- if part.tool_name == "create_artifact":
363
- if "name" in args:
364
- return f"{part.tool_name}({args['name']})"
365
- return f"▪ {part.tool_name}()"
314
+ if part.tool_name == "final_result":
315
+ # Hide final_result tool calls completely - they're internal Pydantic AI mechanics
316
+ return ""
366
317
 
367
318
  # Default case for unrecognized tools - format args properly
368
319
  args = self._parse_args(part.args)
@@ -382,20 +333,3 @@ class AgentResponseWidget(Widget):
382
333
  return f"{part.tool_name}({args_str})"
383
334
  else:
384
335
  return f"{part.tool_name}()"
385
-
386
- def _format_ask_user_part(
387
- self,
388
- part: ToolCallPart,
389
- ) -> str:
390
- if isinstance(part.args, str):
391
- try:
392
- _args = json.loads(part.args) if part.args.strip() else {}
393
- except json.JSONDecodeError:
394
- _args = {}
395
- else:
396
- _args = part.args
397
-
398
- if isinstance(_args, dict) and "question" in _args:
399
- return f"{_args['question']}"
400
- else:
401
- return "❓ "
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: shotgun-sh
3
- Version: 0.2.6.dev1
3
+ Version: 0.2.6.dev3
4
4
  Summary: AI-powered research, planning, and task management CLI tool
5
5
  Project-URL: Homepage, https://shotgun.sh/
6
6
  Project-URL: Repository, https://github.com/shotgun-sh/shotgun