shotgun-sh 0.4.0.dev1__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. shotgun/agents/agent_manager.py +307 -8
  2. shotgun/agents/cancellation.py +103 -0
  3. shotgun/agents/common.py +12 -0
  4. shotgun/agents/config/README.md +0 -1
  5. shotgun/agents/config/manager.py +10 -7
  6. shotgun/agents/config/models.py +5 -27
  7. shotgun/agents/config/provider.py +44 -27
  8. shotgun/agents/conversation/history/token_counting/base.py +51 -9
  9. shotgun/agents/file_read.py +176 -0
  10. shotgun/agents/messages.py +15 -3
  11. shotgun/agents/models.py +24 -1
  12. shotgun/agents/router/models.py +8 -0
  13. shotgun/agents/router/tools/delegation_tools.py +55 -1
  14. shotgun/agents/router/tools/plan_tools.py +88 -7
  15. shotgun/agents/runner.py +17 -2
  16. shotgun/agents/tools/__init__.py +8 -0
  17. shotgun/agents/tools/codebase/directory_lister.py +27 -39
  18. shotgun/agents/tools/codebase/file_read.py +26 -35
  19. shotgun/agents/tools/codebase/query_graph.py +9 -0
  20. shotgun/agents/tools/codebase/retrieve_code.py +9 -0
  21. shotgun/agents/tools/file_management.py +32 -2
  22. shotgun/agents/tools/file_read_tools/__init__.py +7 -0
  23. shotgun/agents/tools/file_read_tools/multimodal_file_read.py +167 -0
  24. shotgun/agents/tools/markdown_tools/__init__.py +62 -0
  25. shotgun/agents/tools/markdown_tools/insert_section.py +148 -0
  26. shotgun/agents/tools/markdown_tools/models.py +86 -0
  27. shotgun/agents/tools/markdown_tools/remove_section.py +114 -0
  28. shotgun/agents/tools/markdown_tools/replace_section.py +119 -0
  29. shotgun/agents/tools/markdown_tools/utils.py +453 -0
  30. shotgun/agents/tools/registry.py +44 -6
  31. shotgun/agents/tools/web_search/openai.py +42 -23
  32. shotgun/attachments/__init__.py +41 -0
  33. shotgun/attachments/errors.py +60 -0
  34. shotgun/attachments/models.py +107 -0
  35. shotgun/attachments/parser.py +257 -0
  36. shotgun/attachments/processor.py +193 -0
  37. shotgun/build_constants.py +4 -7
  38. shotgun/cli/clear.py +2 -2
  39. shotgun/cli/codebase/commands.py +181 -65
  40. shotgun/cli/compact.py +2 -2
  41. shotgun/cli/context.py +2 -2
  42. shotgun/cli/error_handler.py +2 -2
  43. shotgun/cli/run.py +90 -0
  44. shotgun/cli/spec/backup.py +2 -1
  45. shotgun/codebase/__init__.py +2 -0
  46. shotgun/codebase/benchmarks/__init__.py +35 -0
  47. shotgun/codebase/benchmarks/benchmark_runner.py +309 -0
  48. shotgun/codebase/benchmarks/exporters.py +119 -0
  49. shotgun/codebase/benchmarks/formatters/__init__.py +49 -0
  50. shotgun/codebase/benchmarks/formatters/base.py +34 -0
  51. shotgun/codebase/benchmarks/formatters/json_formatter.py +106 -0
  52. shotgun/codebase/benchmarks/formatters/markdown.py +136 -0
  53. shotgun/codebase/benchmarks/models.py +129 -0
  54. shotgun/codebase/core/__init__.py +4 -0
  55. shotgun/codebase/core/call_resolution.py +91 -0
  56. shotgun/codebase/core/change_detector.py +11 -6
  57. shotgun/codebase/core/errors.py +159 -0
  58. shotgun/codebase/core/extractors/__init__.py +23 -0
  59. shotgun/codebase/core/extractors/base.py +138 -0
  60. shotgun/codebase/core/extractors/factory.py +63 -0
  61. shotgun/codebase/core/extractors/go/__init__.py +7 -0
  62. shotgun/codebase/core/extractors/go/extractor.py +122 -0
  63. shotgun/codebase/core/extractors/javascript/__init__.py +7 -0
  64. shotgun/codebase/core/extractors/javascript/extractor.py +132 -0
  65. shotgun/codebase/core/extractors/protocol.py +109 -0
  66. shotgun/codebase/core/extractors/python/__init__.py +7 -0
  67. shotgun/codebase/core/extractors/python/extractor.py +141 -0
  68. shotgun/codebase/core/extractors/rust/__init__.py +7 -0
  69. shotgun/codebase/core/extractors/rust/extractor.py +139 -0
  70. shotgun/codebase/core/extractors/types.py +15 -0
  71. shotgun/codebase/core/extractors/typescript/__init__.py +7 -0
  72. shotgun/codebase/core/extractors/typescript/extractor.py +92 -0
  73. shotgun/codebase/core/gitignore.py +252 -0
  74. shotgun/codebase/core/ingestor.py +644 -354
  75. shotgun/codebase/core/kuzu_compat.py +119 -0
  76. shotgun/codebase/core/language_config.py +239 -0
  77. shotgun/codebase/core/manager.py +256 -46
  78. shotgun/codebase/core/metrics_collector.py +310 -0
  79. shotgun/codebase/core/metrics_types.py +347 -0
  80. shotgun/codebase/core/parallel_executor.py +424 -0
  81. shotgun/codebase/core/work_distributor.py +254 -0
  82. shotgun/codebase/core/worker.py +768 -0
  83. shotgun/codebase/indexing_state.py +86 -0
  84. shotgun/codebase/models.py +94 -0
  85. shotgun/codebase/service.py +13 -0
  86. shotgun/exceptions.py +9 -9
  87. shotgun/main.py +3 -16
  88. shotgun/posthog_telemetry.py +165 -24
  89. shotgun/prompts/agents/file_read.j2 +48 -0
  90. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +19 -47
  91. shotgun/prompts/agents/partials/content_formatting.j2 +12 -33
  92. shotgun/prompts/agents/partials/interactive_mode.j2 +9 -32
  93. shotgun/prompts/agents/partials/router_delegation_mode.j2 +21 -22
  94. shotgun/prompts/agents/plan.j2 +14 -0
  95. shotgun/prompts/agents/router.j2 +531 -258
  96. shotgun/prompts/agents/specify.j2 +14 -0
  97. shotgun/prompts/agents/state/codebase/codebase_graphs_available.j2 +14 -1
  98. shotgun/prompts/agents/state/system_state.j2 +13 -11
  99. shotgun/prompts/agents/tasks.j2 +14 -0
  100. shotgun/settings.py +49 -10
  101. shotgun/tui/app.py +149 -18
  102. shotgun/tui/commands/__init__.py +9 -1
  103. shotgun/tui/components/attachment_bar.py +87 -0
  104. shotgun/tui/components/prompt_input.py +25 -28
  105. shotgun/tui/components/status_bar.py +14 -7
  106. shotgun/tui/dependencies.py +3 -8
  107. shotgun/tui/protocols.py +18 -0
  108. shotgun/tui/screens/chat/chat.tcss +15 -0
  109. shotgun/tui/screens/chat/chat_screen.py +766 -235
  110. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +8 -4
  111. shotgun/tui/screens/chat_screen/attachment_hint.py +40 -0
  112. shotgun/tui/screens/chat_screen/command_providers.py +0 -10
  113. shotgun/tui/screens/chat_screen/history/chat_history.py +54 -14
  114. shotgun/tui/screens/chat_screen/history/formatters.py +22 -0
  115. shotgun/tui/screens/chat_screen/history/user_question.py +25 -3
  116. shotgun/tui/screens/database_locked_dialog.py +219 -0
  117. shotgun/tui/screens/database_timeout_dialog.py +158 -0
  118. shotgun/tui/screens/kuzu_error_dialog.py +135 -0
  119. shotgun/tui/screens/model_picker.py +1 -3
  120. shotgun/tui/screens/models.py +11 -0
  121. shotgun/tui/state/processing_state.py +19 -0
  122. shotgun/tui/widgets/widget_coordinator.py +18 -0
  123. shotgun/utils/file_system_utils.py +4 -1
  124. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/METADATA +87 -34
  125. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/RECORD +128 -79
  126. shotgun/cli/export.py +0 -81
  127. shotgun/cli/plan.py +0 -73
  128. shotgun/cli/research.py +0 -93
  129. shotgun/cli/specify.py +0 -70
  130. shotgun/cli/tasks.py +0 -78
  131. shotgun/sentry_telemetry.py +0 -232
  132. shotgun/tui/screens/onboarding.py +0 -584
  133. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/WHEEL +0 -0
  134. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/entry_points.txt +0 -0
  135. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/licenses/LICENSE +0 -0
@@ -19,7 +19,10 @@ from tenacity import (
19
19
  if TYPE_CHECKING:
20
20
  from shotgun.agents.conversation import ConversationState
21
21
 
22
+ import base64
23
+
22
24
  from pydantic_ai import (
25
+ BinaryContent,
23
26
  RunContext,
24
27
  UsageLimits,
25
28
  )
@@ -40,11 +43,13 @@ from pydantic_ai.messages import (
40
43
  TextPartDelta,
41
44
  ToolCallPart,
42
45
  ToolCallPartDelta,
46
+ UserContent,
43
47
  UserPromptPart,
44
48
  )
45
49
  from textual.message import Message
46
50
  from textual.widget import Widget
47
51
 
52
+ from shotgun.agents.cancellation import CancellableStreamIterator
48
53
  from shotgun.agents.common import add_system_prompt_message, add_system_status_message
49
54
  from shotgun.agents.config.models import (
50
55
  KeyProvider,
@@ -67,18 +72,19 @@ from shotgun.agents.models import (
67
72
  RouterAgent,
68
73
  ShotgunAgent,
69
74
  )
75
+ from shotgun.attachments import FileAttachment
70
76
  from shotgun.posthog_telemetry import track_event
71
77
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
72
78
  from shotgun.utils.source_detection import detect_source
73
79
 
74
80
  from .conversation.history.compaction import apply_persistent_compaction
75
81
  from .export import create_export_agent
76
- from .messages import AgentSystemPrompt
82
+ from .messages import AgentSystemPrompt, InternalPromptPart
77
83
  from .models import AgentDeps, AgentRuntimeOptions
78
84
  from .plan import create_plan_agent
79
85
  from .research import create_research_agent
80
86
  from .router import create_router_agent
81
- from .router.models import RouterDeps
87
+ from .router.models import RouterDeps, RouterMode
82
88
  from .specify import create_specify_agent
83
89
  from .tasks import create_tasks_agent
84
90
 
@@ -171,6 +177,29 @@ class ClarifyingQuestionsMessage(Message):
171
177
  self.response_text = response_text
172
178
 
173
179
 
180
+ class FileRequestPendingMessage(Message):
181
+ """Event posted when agent requests files to be loaded.
182
+
183
+ This triggers the TUI to load the requested files and resume
184
+ the agent with the file contents in the next prompt.
185
+ """
186
+
187
+ def __init__(
188
+ self,
189
+ file_paths: list[str],
190
+ response_text: str,
191
+ ) -> None:
192
+ """Initialize the file request pending message.
193
+
194
+ Args:
195
+ file_paths: List of file paths the agent wants to read
196
+ response_text: The agent's response text before requesting files
197
+ """
198
+ super().__init__()
199
+ self.file_paths = file_paths
200
+ self.response_text = response_text
201
+
202
+
174
203
  class CompactionStartedMessage(Message):
175
204
  """Event posted when conversation compaction starts."""
176
205
 
@@ -343,6 +372,10 @@ class AgentManager(Widget):
343
372
  self._qa_questions: list[str] | None = None
344
373
  self._qa_mode_active: bool = False
345
374
 
375
+ # File request state for structured output file loading
376
+ self._file_request_pending: bool = False
377
+ self._pending_file_requests: list[str] = []
378
+
346
379
  async def _ensure_agents_initialized(self) -> None:
347
380
  """Ensure all agents are initialized (lazy initialization)."""
348
381
  if self._agents_initialized:
@@ -486,6 +519,72 @@ class AgentManager(Widget):
486
519
  """
487
520
  return self._get_agent(self._current_agent_type)
488
521
 
522
+ @property
523
+ def file_request_pending(self) -> bool:
524
+ """Check if there's a pending file request."""
525
+ return self._file_request_pending
526
+
527
+ @property
528
+ def pending_file_requests(self) -> list[str]:
529
+ """Get the list of pending file requests."""
530
+ return self._pending_file_requests
531
+
532
+ def process_file_requests(self) -> list[tuple[str, BinaryContent]]:
533
+ """Process pending file requests and return loaded content.
534
+
535
+ This method is called by the TUI after FileRequestPendingMessage is received.
536
+ It loads the requested files as BinaryContent and clears the pending state.
537
+
538
+ Returns:
539
+ List of (file_path, BinaryContent) tuples for files that were successfully loaded.
540
+ """
541
+ if not self._file_request_pending:
542
+ return []
543
+
544
+ # MIME type mapping for supported file types
545
+ mime_types: dict[str, str] = {
546
+ ".pdf": "application/pdf",
547
+ ".png": "image/png",
548
+ ".jpg": "image/jpeg",
549
+ ".jpeg": "image/jpeg",
550
+ ".gif": "image/gif",
551
+ ".webp": "image/webp",
552
+ }
553
+
554
+ loaded_files: list[tuple[str, BinaryContent]] = []
555
+ for file_path_str in self._pending_file_requests:
556
+ try:
557
+ path = Path(file_path_str).expanduser().resolve()
558
+ if not path.exists():
559
+ logger.warning(f"Requested file not found: {path}")
560
+ continue
561
+
562
+ # Get MIME type
563
+ suffix = path.suffix.lower()
564
+ mime_type = mime_types.get(suffix)
565
+ if mime_type is None:
566
+ logger.warning(f"Unsupported file type: {suffix} for {path}")
567
+ continue
568
+
569
+ # Read file and create BinaryContent
570
+ data = path.read_bytes()
571
+ loaded_files.append(
572
+ (str(path), BinaryContent(data=data, media_type=mime_type))
573
+ )
574
+ logger.debug(f"Loaded file: {path} ({len(data)} bytes)")
575
+
576
+ except Exception as e:
577
+ logger.error(f"Error loading file {file_path_str}: {e}")
578
+
579
+ # Clear pending state
580
+ self._file_request_pending = False
581
+ self._pending_file_requests = []
582
+
583
+ logger.info(
584
+ f"Loaded {len(loaded_files)} of {len(self._pending_file_requests)} requested files"
585
+ )
586
+ return loaded_files
587
+
489
588
  def _get_agent(self, agent_type: AgentType) -> AnyAgent:
490
589
  """Get agent by type.
491
590
 
@@ -585,7 +684,7 @@ class AgentManager(Widget):
585
684
  async def _run_agent_with_retry(
586
685
  self,
587
686
  agent: AnyAgent,
588
- prompt: str | None,
687
+ prompt: str | Sequence[UserContent] | None,
589
688
  deps: AgentDeps,
590
689
  usage_limits: UsageLimits | None,
591
690
  message_history: list[ModelMessage],
@@ -596,7 +695,8 @@ class AgentManager(Widget):
596
695
 
597
696
  Args:
598
697
  agent: The agent to run (ShotgunAgent or RouterAgent).
599
- prompt: Optional prompt to send to the agent.
698
+ prompt: Optional prompt to send to the agent. Can be a string,
699
+ a sequence of UserContent (for multimodal), or None.
600
700
  deps: Agent dependencies (AgentDeps or RouterDeps).
601
701
  usage_limits: Optional usage limits.
602
702
  message_history: Message history to provide to agent.
@@ -630,6 +730,8 @@ class AgentManager(Widget):
630
730
  self,
631
731
  prompt: str | None = None,
632
732
  *,
733
+ attachment: FileAttachment | None = None,
734
+ file_contents: list[tuple[str, BinaryContent]] | None = None,
633
735
  deps: AgentDeps | None = None,
634
736
  usage_limits: UsageLimits | None = None,
635
737
  **kwargs: Any,
@@ -641,6 +743,9 @@ class AgentManager(Widget):
641
743
 
642
744
  Args:
643
745
  prompt: Optional prompt to send to the agent.
746
+ attachment: Optional file attachment to include as multimodal content.
747
+ file_contents: Optional list of (file_path, BinaryContent) tuples to include
748
+ as multimodal content. Used when resuming after file_requests.
644
749
  deps: Optional dependencies override (defaults to manager's deps).
645
750
  usage_limits: Optional usage limits for the agent run.
646
751
  **kwargs: Additional keyword arguments to pass to the agent.
@@ -753,7 +858,7 @@ class AgentManager(Widget):
753
858
  "**Options:**\n"
754
859
  "- Get a [Shotgun Account](https://shotgun.sh) - streaming works out of the box\n"
755
860
  "- Complete [Biometric Verification](https://platform.openai.com/settings/organization/general) with OpenAI, then:\n"
756
- " 1. Press `Ctrl+P` → Open Provider Setup\n"
861
+ " 1. Press `/` → Open Provider Setup\n"
757
862
  " 2. Select OpenAI → Clear key\n"
758
863
  " 3. Re-add your OpenAI API key\n\n"
759
864
  "Continuing without streaming (responses will appear all at once)."
@@ -769,13 +874,46 @@ class AgentManager(Widget):
769
874
  {
770
875
  "has_prompt": prompt is not None,
771
876
  "model_name": model_name,
877
+ "has_attachment": attachment is not None,
772
878
  },
773
879
  )
774
880
 
881
+ # Construct multimodal prompt if attachment or file_contents is provided
882
+ user_prompt: str | Sequence[UserContent] | None = prompt
883
+
884
+ if file_contents:
885
+ # File contents from file_requests - construct multimodal prompt with files
886
+ content_parts: list[UserContent] = [
887
+ prompt or "Here are the files you requested:"
888
+ ]
889
+ for file_path, binary in file_contents:
890
+ content_parts.append(f"\n\n--- File: {file_path} ---")
891
+ content_parts.append(binary)
892
+ user_prompt = content_parts
893
+ logger.debug(
894
+ "Constructed multimodal prompt with requested files",
895
+ extra={"num_files": len(file_contents)},
896
+ )
897
+ elif attachment and attachment.content_base64:
898
+ # Use BinaryContent which is supported by all providers (OpenAI, Anthropic, Google)
899
+ binary_data = base64.b64decode(attachment.content_base64)
900
+ binary_content = BinaryContent(
901
+ data=binary_data,
902
+ media_type=attachment.mime_type,
903
+ )
904
+ user_prompt = [prompt or "", binary_content]
905
+ logger.debug(
906
+ "Constructed multimodal prompt with attachment",
907
+ extra={
908
+ "attachment_type": attachment.file_type.value,
909
+ "attachment_size": attachment.file_size_bytes,
910
+ },
911
+ )
912
+
775
913
  try:
776
914
  result: AgentRunResult[AgentResponse] = await self._run_agent_with_retry(
777
915
  agent=self.current_agent,
778
- prompt=prompt,
916
+ prompt=user_prompt,
779
917
  deps=deps,
780
918
  usage_limits=usage_limits,
781
919
  message_history=message_history,
@@ -861,17 +999,38 @@ class AgentManager(Widget):
861
999
  )
862
1000
 
863
1001
  # Deduplicate: skip user prompts that are already in original_messages
1002
+ # Note: We compare content only, not timestamps, since UserPromptPart
1003
+ # has a timestamp field that differs between instances
1004
+ def get_user_prompt_text(
1005
+ request: ModelRequest,
1006
+ ) -> str | None:
1007
+ """Extract just the text content from a ModelRequest for deduplication.
1008
+
1009
+ When content is multimodal (list with text + binary), extract just the text.
1010
+ This ensures text-only and multimodal versions of the same prompt match.
1011
+ """
1012
+ for part in request.parts:
1013
+ if isinstance(part, UserPromptPart):
1014
+ content = part.content
1015
+ if isinstance(content, str):
1016
+ return content
1017
+ elif isinstance(content, list):
1018
+ # Multimodal content - extract text strings only
1019
+ text_parts = [item for item in content if isinstance(item, str)]
1020
+ return text_parts[0] if text_parts else None
1021
+ return None
1022
+
864
1023
  deduplicated_new_messages = []
865
1024
  for msg in new_messages:
866
1025
  # Check if this is a user prompt that's already in original_messages
867
1026
  if isinstance(msg, ModelRequest) and any(
868
1027
  isinstance(part, UserPromptPart) for part in msg.parts
869
1028
  ):
1029
+ msg_text = get_user_prompt_text(msg)
870
1030
  # Check if an identical user prompt is already in original_messages
871
1031
  already_exists = any(
872
1032
  isinstance(existing, ModelRequest)
873
- and any(isinstance(p, UserPromptPart) for p in existing.parts)
874
- and existing.parts == msg.parts
1033
+ and get_user_prompt_text(existing) == msg_text
875
1034
  for existing in original_messages[
876
1035
  -5:
877
1036
  ] # Check last 5 messages for efficiency
@@ -881,6 +1040,13 @@ class AgentManager(Widget):
881
1040
 
882
1041
  deduplicated_new_messages.append(msg)
883
1042
 
1043
+ # Mark file resume prompts as internal (hidden from UI)
1044
+ # When file_contents is provided, the prompt is system-generated, not user input
1045
+ if file_contents:
1046
+ deduplicated_new_messages = self._mark_as_internal_prompts(
1047
+ deduplicated_new_messages
1048
+ )
1049
+
884
1050
  self.ui_message_history = original_messages + deduplicated_new_messages
885
1051
 
886
1052
  # Get file operations early so we can use them for contextual messages
@@ -895,6 +1061,48 @@ class AgentManager(Widget):
895
1061
  },
896
1062
  )
897
1063
 
1064
+ # Check if there are file requests (takes priority over clarifying questions)
1065
+ # But ignore file_requests if we just provided file_contents (prevents infinite loops)
1066
+ if agent_response.file_requests and not file_contents:
1067
+ logger.info(
1068
+ f"Agent requested {len(agent_response.file_requests)} files to be loaded"
1069
+ )
1070
+
1071
+ # Set pending state
1072
+ self._file_request_pending = True
1073
+ self._pending_file_requests = agent_response.file_requests
1074
+
1075
+ # Add agent's response as hint if present
1076
+ if agent_response.response:
1077
+ self.ui_message_history.append(
1078
+ HintMessage(message=agent_response.response)
1079
+ )
1080
+
1081
+ # Add file loading indicator
1082
+ files_list = "\n".join(f"- `{p}`" for p in agent_response.file_requests)
1083
+ self.ui_message_history.append(
1084
+ HintMessage(message=f"📁 Loading requested files:\n{files_list}")
1085
+ )
1086
+
1087
+ # Post UI update with hint messages
1088
+ self._post_messages_updated([])
1089
+
1090
+ # Post event to TUI to load files and resume
1091
+ self.post_message(
1092
+ FileRequestPendingMessage(
1093
+ file_paths=agent_response.file_requests,
1094
+ response_text=agent_response.response,
1095
+ )
1096
+ )
1097
+
1098
+ return result
1099
+ elif agent_response.file_requests and file_contents:
1100
+ # We just provided files, ignore any new file_requests to prevent loops
1101
+ logger.debug(
1102
+ "Ignoring file_requests (files were just provided): %s",
1103
+ agent_response.file_requests,
1104
+ )
1105
+
898
1106
  # Check if there are clarifying questions
899
1107
  if agent_response.clarifying_questions:
900
1108
  logger.info(
@@ -918,11 +1126,16 @@ class AgentManager(Widget):
918
1126
  self.ui_message_history.append(
919
1127
  HintMessage(message=f"💡 {agent_response.clarifying_questions[0]}")
920
1128
  )
1129
+ # Add plan hint for Drafting mode (Planning mode uses PlanPanelWidget)
1130
+ self._maybe_add_plan_hint_drafting_mode(deps)
921
1131
  else:
922
1132
  # Multiple questions (2+) - enter Q&A mode
923
1133
  self._qa_questions = agent_response.clarifying_questions
924
1134
  self._qa_mode_active = True
925
1135
 
1136
+ # In Drafting mode, show plan BEFORE Q&A questions (without "Shall I continue?")
1137
+ self._maybe_add_plan_hint_drafting_mode(deps, in_qa_mode=True)
1138
+
926
1139
  # Show intro with list, then first question
927
1140
  questions_list_with_intro = (
928
1141
  f"I have {len(agent_response.clarifying_questions)} questions:\n\n"
@@ -984,6 +1197,9 @@ class AgentManager(Widget):
984
1197
  HintMessage(message="✅ Task completed")
985
1198
  )
986
1199
 
1200
+ # Add plan hint for Drafting mode (Planning mode uses PlanPanelWidget)
1201
+ self._maybe_add_plan_hint_drafting_mode(deps)
1202
+
987
1203
  # Post UI update immediately so user sees the response without delay
988
1204
  # (file operations will be posted after compaction to avoid duplicates)
989
1205
  logger.debug("Posting immediate UI update with hint message")
@@ -1082,6 +1298,11 @@ class AgentManager(Widget):
1082
1298
  else:
1083
1299
  partial_parts = []
1084
1300
 
1301
+ # Wrap stream with cancellable iterator for responsive ESC handling
1302
+ deps = _ctx.deps
1303
+ if deps.cancellation_event:
1304
+ stream = CancellableStreamIterator(stream, deps.cancellation_event)
1305
+
1085
1306
  async for event in stream:
1086
1307
  try:
1087
1308
  if isinstance(event, PartStartEvent):
@@ -1359,6 +1580,47 @@ class AgentManager(Widget):
1359
1580
  # Common path is a file, show parent directory
1360
1581
  return f"📁 Modified {num_files} files in: `{path_obj.parent}`"
1361
1582
 
1583
+ def _maybe_add_plan_hint_drafting_mode(
1584
+ self, deps: AgentDeps, in_qa_mode: bool = False
1585
+ ) -> None:
1586
+ """Add execution plan hint for router agent in Drafting mode only.
1587
+
1588
+ In Drafting mode, there's no PlanPanelWidget, so we show the plan
1589
+ in the chat history with a "Shall I continue?" prompt (unless in Q&A mode).
1590
+
1591
+ In Planning mode, the PlanPanelWidget handles plan display.
1592
+
1593
+ Args:
1594
+ deps: Agent dependencies (may be RouterDeps for router agent)
1595
+ in_qa_mode: If True, skip the "Shall I continue?" prompt since user
1596
+ needs to answer Q&A questions first.
1597
+ """
1598
+ if self._current_agent_type != AgentType.ROUTER:
1599
+ return
1600
+
1601
+ if not isinstance(deps, RouterDeps):
1602
+ return
1603
+
1604
+ # Only show plan hints in Drafting mode
1605
+ # Planning mode uses PlanPanelWidget instead
1606
+ if deps.router_mode != RouterMode.DRAFTING:
1607
+ return
1608
+
1609
+ if deps.current_plan is None:
1610
+ return
1611
+
1612
+ plan_display = deps.current_plan.format_for_display()
1613
+
1614
+ # In drafting mode, if plan is not complete and NOT in Q&A mode,
1615
+ # prompt user to continue
1616
+ if not deps.current_plan.is_complete() and not in_qa_mode:
1617
+ plan_display += "\n\n**Shall I continue?**"
1618
+
1619
+ logger.debug("Adding plan hint to UI history (Drafting mode)")
1620
+ self.ui_message_history.append(
1621
+ HintMessage(message=f"**Current Plan**\n\n{plan_display}")
1622
+ )
1623
+
1362
1624
  def _post_messages_updated(
1363
1625
  self, file_operations: list[FileOperation] | None = None
1364
1626
  ) -> None:
@@ -1371,6 +1633,43 @@ class AgentManager(Widget):
1371
1633
  )
1372
1634
  )
1373
1635
 
1636
+ def _mark_as_internal_prompts(
1637
+ self,
1638
+ messages: list[ModelRequest | ModelResponse | HintMessage],
1639
+ ) -> list[ModelRequest | ModelResponse | HintMessage]:
1640
+ """Mark UserPromptPart as InternalPromptPart for system-generated prompts.
1641
+
1642
+ Used when file_contents is provided - the resume prompt is system-generated,
1643
+ not actual user input, and should be hidden from the UI.
1644
+
1645
+ Args:
1646
+ messages: List of messages that may contain user prompts to mark as internal
1647
+
1648
+ Returns:
1649
+ List of messages with UserPromptPart converted to InternalPromptPart
1650
+ """
1651
+ result: list[ModelRequest | ModelResponse | HintMessage] = []
1652
+ for msg in messages:
1653
+ if isinstance(msg, ModelRequest):
1654
+ new_parts: list[ModelRequestPart] = []
1655
+ for part in msg.parts:
1656
+ if isinstance(part, UserPromptPart) and not isinstance(
1657
+ part, InternalPromptPart
1658
+ ):
1659
+ # Convert to InternalPromptPart
1660
+ new_parts.append(
1661
+ InternalPromptPart(
1662
+ content=part.content,
1663
+ timestamp=part.timestamp,
1664
+ )
1665
+ )
1666
+ else:
1667
+ new_parts.append(part)
1668
+ result.append(ModelRequest(parts=new_parts))
1669
+ else:
1670
+ result.append(msg)
1671
+ return result
1672
+
1374
1673
  def _filter_system_prompts(
1375
1674
  self, messages: list[ModelMessage | HintMessage]
1376
1675
  ) -> list[ModelMessage | HintMessage]:
@@ -0,0 +1,103 @@
1
+ """Cancellation utilities for agent execution.
2
+
3
+ This module provides utilities for responsive cancellation of agent operations,
4
+ particularly for handling ESC key presses during LLM streaming.
5
+ """
6
+
7
+ import asyncio
8
+ from collections.abc import AsyncIterable, AsyncIterator
9
+ from typing import TypeVar
10
+
11
+ from shotgun.logging_config import get_logger
12
+
13
+ logger = get_logger(__name__)
14
+
15
+ T = TypeVar("T")
16
+
17
+ CANCELLATION_CHECK_INTERVAL = 0.5 # Check every 500ms
18
+ CANCELLATION_MESSAGE = "Operation cancelled by user"
19
+
20
+
21
+ class CancellableStreamIterator(AsyncIterator[T]):
22
+ """Wraps an async iterable to check for cancellation periodically.
23
+
24
+ This allows ESC cancellation to be responsive even when the underlying
25
+ stream (LLM chunks) is slow to produce events. Instead of blocking
26
+ indefinitely on the next chunk, we timeout periodically and check
27
+ if cancellation was requested.
28
+
29
+ Example:
30
+ ```python
31
+ cancellation_event = asyncio.Event()
32
+
33
+ async def process_stream(stream):
34
+ wrapped = CancellableStreamIterator(stream, cancellation_event)
35
+ async for event in wrapped:
36
+ process(event)
37
+
38
+ # In another task, set the event to cancel:
39
+ cancellation_event.set()
40
+ ```
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ stream: AsyncIterable[T],
46
+ cancellation_event: asyncio.Event | None = None,
47
+ check_interval: float = CANCELLATION_CHECK_INTERVAL,
48
+ ) -> None:
49
+ """Initialize the cancellable stream iterator.
50
+
51
+ Args:
52
+ stream: The underlying async iterable to wrap
53
+ cancellation_event: Event that signals cancellation when set
54
+ check_interval: How often to check for cancellation (seconds)
55
+ """
56
+ self._stream = stream
57
+ self._iterator: AsyncIterator[T] | None = None
58
+ self._cancellation_event = cancellation_event
59
+ self._check_interval = check_interval
60
+ self._pending_task: asyncio.Task[T] | None = None
61
+
62
+ def __aiter__(self) -> AsyncIterator[T]:
63
+ return self
64
+
65
+ async def __anext__(self) -> T:
66
+ if self._iterator is None:
67
+ self._iterator = self._stream.__aiter__()
68
+
69
+ # Create a task for the next item if we don't have one pending
70
+ if self._pending_task is None:
71
+ # Capture iterator reference for the coroutine
72
+ iterator = self._iterator
73
+
74
+ async def get_next() -> T:
75
+ return await iterator.__anext__()
76
+
77
+ self._pending_task = asyncio.create_task(get_next())
78
+
79
+ while True:
80
+ # Check if cancellation was requested
81
+ if self._cancellation_event and self._cancellation_event.is_set():
82
+ logger.debug("Cancellation detected in stream iterator")
83
+ # Cancel the pending task and raise
84
+ self._pending_task.cancel()
85
+ self._pending_task = None
86
+ raise asyncio.CancelledError(CANCELLATION_MESSAGE)
87
+
88
+ # Wait for the task with a short timeout
89
+ # Using asyncio.wait instead of wait_for to avoid cancelling the task on timeout
90
+ done, _ = await asyncio.wait(
91
+ [self._pending_task],
92
+ timeout=self._check_interval,
93
+ return_when=asyncio.FIRST_COMPLETED,
94
+ )
95
+
96
+ if done:
97
+ # Task completed - get result and clear pending task
98
+ task = done.pop()
99
+ self._pending_task = None
100
+ # Re-raise StopAsyncIteration or return the result
101
+ return task.result()
102
+
103
+ # Task not done yet, loop and check cancellation again
shotgun/agents/common.py CHANGED
@@ -38,8 +38,11 @@ from .tools import (
38
38
  codebase_shell,
39
39
  directory_lister,
40
40
  file_read,
41
+ insert_markdown_section,
41
42
  query_graph,
42
43
  read_file,
44
+ remove_markdown_section,
45
+ replace_markdown_section,
43
46
  retrieve_code,
44
47
  write_file,
45
48
  )
@@ -69,6 +72,11 @@ async def add_system_status_message(
69
72
  await deps.codebase_service.list_graphs_for_directory()
70
73
  )
71
74
 
75
+ # Get graphs currently being indexed
76
+ indexing_graph_ids: set[str] = set()
77
+ if deps.codebase_service:
78
+ indexing_graph_ids = deps.codebase_service.indexing.get_active_ids()
79
+
72
80
  # Get existing files for the agent
73
81
  existing_files = get_agent_existing_files(deps.agent_mode)
74
82
 
@@ -94,6 +102,7 @@ async def add_system_status_message(
94
102
  system_state = prompt_loader.render(
95
103
  "agents/state/system_state.j2",
96
104
  codebase_understanding_graphs=codebase_understanding_graphs,
105
+ indexing_graph_ids=indexing_graph_ids,
97
106
  is_tui_context=deps.is_tui_context,
98
107
  existing_files=existing_files,
99
108
  markdown_toc=markdown_toc,
@@ -198,6 +207,9 @@ async def create_base_agent(
198
207
  agent.tool(write_file)
199
208
  agent.tool(append_file)
200
209
  agent.tool(read_file)
210
+ agent.tool(replace_markdown_section)
211
+ agent.tool(insert_markdown_section)
212
+ agent.tool(remove_markdown_section)
201
213
 
202
214
  # Register codebase understanding tools (conditional)
203
215
  if load_codebase_understanding_tools:
@@ -41,7 +41,6 @@ This directory contains the configuration management system for Shotgun, includi
41
41
  - **Title**: "feat: add config migration for streaming capability field (v4->v5)"
42
42
  - **Key Changes**:
43
43
  - Added `supports_streaming` field to OpenAI config
44
- - Added `shown_onboarding_popup` timestamp field
45
44
  - Added `supabase_jwt` to Shotgun Account config
46
45
 
47
46
  ## Migration System
@@ -377,9 +377,9 @@ class ConfigManager:
377
377
 
378
378
  # Find default model for this provider
379
379
  provider_models = {
380
- ProviderType.OPENAI: ModelName.GPT_5_1,
381
- ProviderType.ANTHROPIC: ModelName.CLAUDE_HAIKU_4_5,
382
- ProviderType.GOOGLE: ModelName.GEMINI_2_5_PRO,
380
+ ProviderType.OPENAI: ModelName.GPT_5_2,
381
+ ProviderType.ANTHROPIC: ModelName.CLAUDE_SONNET_4_5,
382
+ ProviderType.GOOGLE: ModelName.GEMINI_3_PRO_PREVIEW,
383
383
  }
384
384
 
385
385
  if provider in provider_models:
@@ -521,15 +521,18 @@ class ConfigManager:
521
521
  if provider_enum is None:
522
522
  raise RuntimeError("Provider enum should not be None for LLM providers")
523
523
  other_providers = [p for p in ProviderType if p != provider_enum]
524
- has_other_keys = any(self.has_provider_key(p) for p in other_providers)
524
+ has_other_keys = any(
525
+ self._provider_has_api_key(self._get_provider_config(config, p))
526
+ for p in other_providers
527
+ )
525
528
  if not has_other_keys:
526
529
  # Set selected_model to this provider's default model
527
530
  from .models import ModelName
528
531
 
529
532
  provider_models = {
530
- ProviderType.OPENAI: ModelName.GPT_5_1,
531
- ProviderType.ANTHROPIC: ModelName.CLAUDE_HAIKU_4_5,
532
- ProviderType.GOOGLE: ModelName.GEMINI_2_5_PRO,
533
+ ProviderType.OPENAI: ModelName.GPT_5_2,
534
+ ProviderType.ANTHROPIC: ModelName.CLAUDE_SONNET_4_5,
535
+ ProviderType.GOOGLE: ModelName.GEMINI_3_PRO_PREVIEW,
533
536
  }
534
537
  if provider_enum in provider_models:
535
538
  config.selected_model = provider_models[provider_enum]