shotgun-sh 0.2.8.dev2__py3-none-any.whl → 0.3.3.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. shotgun/agents/agent_manager.py +382 -60
  2. shotgun/agents/common.py +15 -9
  3. shotgun/agents/config/README.md +89 -0
  4. shotgun/agents/config/__init__.py +10 -1
  5. shotgun/agents/config/constants.py +0 -6
  6. shotgun/agents/config/manager.py +383 -82
  7. shotgun/agents/config/models.py +122 -18
  8. shotgun/agents/config/provider.py +81 -15
  9. shotgun/agents/config/streaming_test.py +119 -0
  10. shotgun/agents/context_analyzer/__init__.py +28 -0
  11. shotgun/agents/context_analyzer/analyzer.py +475 -0
  12. shotgun/agents/context_analyzer/constants.py +9 -0
  13. shotgun/agents/context_analyzer/formatter.py +115 -0
  14. shotgun/agents/context_analyzer/models.py +212 -0
  15. shotgun/agents/conversation/__init__.py +18 -0
  16. shotgun/agents/conversation/filters.py +164 -0
  17. shotgun/agents/conversation/history/chunking.py +278 -0
  18. shotgun/agents/{history → conversation/history}/compaction.py +36 -5
  19. shotgun/agents/{history → conversation/history}/constants.py +5 -0
  20. shotgun/agents/conversation/history/file_content_deduplication.py +216 -0
  21. shotgun/agents/{history → conversation/history}/history_processors.py +380 -8
  22. shotgun/agents/{history → conversation/history}/token_counting/anthropic.py +25 -1
  23. shotgun/agents/{history → conversation/history}/token_counting/base.py +14 -3
  24. shotgun/agents/{history → conversation/history}/token_counting/openai.py +11 -1
  25. shotgun/agents/{history → conversation/history}/token_counting/sentencepiece_counter.py +8 -0
  26. shotgun/agents/{history → conversation/history}/token_counting/tokenizer_cache.py +3 -1
  27. shotgun/agents/{history → conversation/history}/token_counting/utils.py +0 -3
  28. shotgun/agents/{conversation_manager.py → conversation/manager.py} +36 -20
  29. shotgun/agents/{conversation_history.py → conversation/models.py} +8 -92
  30. shotgun/agents/error/__init__.py +11 -0
  31. shotgun/agents/error/models.py +19 -0
  32. shotgun/agents/export.py +2 -2
  33. shotgun/agents/plan.py +2 -2
  34. shotgun/agents/research.py +3 -3
  35. shotgun/agents/runner.py +230 -0
  36. shotgun/agents/specify.py +2 -2
  37. shotgun/agents/tasks.py +2 -2
  38. shotgun/agents/tools/codebase/codebase_shell.py +6 -0
  39. shotgun/agents/tools/codebase/directory_lister.py +6 -0
  40. shotgun/agents/tools/codebase/file_read.py +11 -2
  41. shotgun/agents/tools/codebase/query_graph.py +6 -0
  42. shotgun/agents/tools/codebase/retrieve_code.py +6 -0
  43. shotgun/agents/tools/file_management.py +27 -7
  44. shotgun/agents/tools/registry.py +217 -0
  45. shotgun/agents/tools/web_search/__init__.py +8 -8
  46. shotgun/agents/tools/web_search/anthropic.py +8 -2
  47. shotgun/agents/tools/web_search/gemini.py +7 -1
  48. shotgun/agents/tools/web_search/openai.py +8 -2
  49. shotgun/agents/tools/web_search/utils.py +2 -2
  50. shotgun/agents/usage_manager.py +16 -11
  51. shotgun/api_endpoints.py +7 -3
  52. shotgun/build_constants.py +2 -2
  53. shotgun/cli/clear.py +53 -0
  54. shotgun/cli/compact.py +188 -0
  55. shotgun/cli/config.py +8 -5
  56. shotgun/cli/context.py +154 -0
  57. shotgun/cli/error_handler.py +24 -0
  58. shotgun/cli/export.py +34 -34
  59. shotgun/cli/feedback.py +4 -2
  60. shotgun/cli/models.py +1 -0
  61. shotgun/cli/plan.py +34 -34
  62. shotgun/cli/research.py +18 -10
  63. shotgun/cli/spec/__init__.py +5 -0
  64. shotgun/cli/spec/backup.py +81 -0
  65. shotgun/cli/spec/commands.py +132 -0
  66. shotgun/cli/spec/models.py +48 -0
  67. shotgun/cli/spec/pull_service.py +219 -0
  68. shotgun/cli/specify.py +20 -19
  69. shotgun/cli/tasks.py +34 -34
  70. shotgun/cli/update.py +16 -2
  71. shotgun/codebase/core/change_detector.py +5 -3
  72. shotgun/codebase/core/code_retrieval.py +4 -2
  73. shotgun/codebase/core/ingestor.py +163 -15
  74. shotgun/codebase/core/manager.py +13 -4
  75. shotgun/codebase/core/nl_query.py +1 -1
  76. shotgun/codebase/models.py +2 -0
  77. shotgun/exceptions.py +357 -0
  78. shotgun/llm_proxy/__init__.py +17 -0
  79. shotgun/llm_proxy/client.py +215 -0
  80. shotgun/llm_proxy/models.py +137 -0
  81. shotgun/logging_config.py +60 -27
  82. shotgun/main.py +77 -11
  83. shotgun/posthog_telemetry.py +38 -29
  84. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +28 -2
  85. shotgun/prompts/agents/partials/interactive_mode.j2 +3 -3
  86. shotgun/prompts/agents/plan.j2 +16 -0
  87. shotgun/prompts/agents/research.j2 +16 -3
  88. shotgun/prompts/agents/specify.j2 +54 -1
  89. shotgun/prompts/agents/state/system_state.j2 +0 -2
  90. shotgun/prompts/agents/tasks.j2 +16 -0
  91. shotgun/prompts/history/chunk_summarization.j2 +34 -0
  92. shotgun/prompts/history/combine_summaries.j2 +53 -0
  93. shotgun/sdk/codebase.py +14 -3
  94. shotgun/sentry_telemetry.py +163 -16
  95. shotgun/settings.py +243 -0
  96. shotgun/shotgun_web/__init__.py +67 -1
  97. shotgun/shotgun_web/client.py +42 -1
  98. shotgun/shotgun_web/constants.py +46 -0
  99. shotgun/shotgun_web/exceptions.py +29 -0
  100. shotgun/shotgun_web/models.py +390 -0
  101. shotgun/shotgun_web/shared_specs/__init__.py +32 -0
  102. shotgun/shotgun_web/shared_specs/file_scanner.py +175 -0
  103. shotgun/shotgun_web/shared_specs/hasher.py +83 -0
  104. shotgun/shotgun_web/shared_specs/models.py +71 -0
  105. shotgun/shotgun_web/shared_specs/upload_pipeline.py +329 -0
  106. shotgun/shotgun_web/shared_specs/utils.py +34 -0
  107. shotgun/shotgun_web/specs_client.py +703 -0
  108. shotgun/shotgun_web/supabase_client.py +31 -0
  109. shotgun/telemetry.py +10 -33
  110. shotgun/tui/app.py +310 -46
  111. shotgun/tui/commands/__init__.py +1 -1
  112. shotgun/tui/components/context_indicator.py +179 -0
  113. shotgun/tui/components/mode_indicator.py +70 -0
  114. shotgun/tui/components/status_bar.py +48 -0
  115. shotgun/tui/containers.py +91 -0
  116. shotgun/tui/dependencies.py +39 -0
  117. shotgun/tui/layout.py +5 -0
  118. shotgun/tui/protocols.py +45 -0
  119. shotgun/tui/screens/chat/__init__.py +5 -0
  120. shotgun/tui/screens/chat/chat.tcss +54 -0
  121. shotgun/tui/screens/chat/chat_screen.py +1531 -0
  122. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +243 -0
  123. shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
  124. shotgun/tui/screens/chat/help_text.py +40 -0
  125. shotgun/tui/screens/chat/prompt_history.py +48 -0
  126. shotgun/tui/screens/chat.tcss +11 -0
  127. shotgun/tui/screens/chat_screen/command_providers.py +91 -4
  128. shotgun/tui/screens/chat_screen/hint_message.py +76 -1
  129. shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
  130. shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
  131. shotgun/tui/screens/chat_screen/history/chat_history.py +115 -0
  132. shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
  133. shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
  134. shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
  135. shotgun/tui/screens/confirmation_dialog.py +191 -0
  136. shotgun/tui/screens/directory_setup.py +45 -41
  137. shotgun/tui/screens/feedback.py +14 -7
  138. shotgun/tui/screens/github_issue.py +111 -0
  139. shotgun/tui/screens/model_picker.py +77 -32
  140. shotgun/tui/screens/onboarding.py +580 -0
  141. shotgun/tui/screens/pipx_migration.py +205 -0
  142. shotgun/tui/screens/provider_config.py +116 -35
  143. shotgun/tui/screens/shared_specs/__init__.py +21 -0
  144. shotgun/tui/screens/shared_specs/create_spec_dialog.py +273 -0
  145. shotgun/tui/screens/shared_specs/models.py +56 -0
  146. shotgun/tui/screens/shared_specs/share_specs_dialog.py +390 -0
  147. shotgun/tui/screens/shared_specs/upload_progress_screen.py +452 -0
  148. shotgun/tui/screens/shotgun_auth.py +112 -18
  149. shotgun/tui/screens/spec_pull.py +288 -0
  150. shotgun/tui/screens/welcome.py +137 -11
  151. shotgun/tui/services/__init__.py +5 -0
  152. shotgun/tui/services/conversation_service.py +187 -0
  153. shotgun/tui/state/__init__.py +7 -0
  154. shotgun/tui/state/processing_state.py +185 -0
  155. shotgun/tui/utils/mode_progress.py +14 -7
  156. shotgun/tui/widgets/__init__.py +5 -0
  157. shotgun/tui/widgets/widget_coordinator.py +263 -0
  158. shotgun/utils/file_system_utils.py +22 -2
  159. shotgun/utils/marketing.py +110 -0
  160. shotgun/utils/update_checker.py +69 -14
  161. shotgun_sh-0.3.3.dev1.dist-info/METADATA +472 -0
  162. shotgun_sh-0.3.3.dev1.dist-info/RECORD +229 -0
  163. {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/WHEEL +1 -1
  164. {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/entry_points.txt +1 -0
  165. {shotgun_sh-0.2.8.dev2.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/licenses/LICENSE +1 -1
  166. shotgun/tui/screens/chat.py +0 -996
  167. shotgun/tui/screens/chat_screen/history.py +0 -335
  168. shotgun_sh-0.2.8.dev2.dist-info/METADATA +0 -126
  169. shotgun_sh-0.2.8.dev2.dist-info/RECORD +0 -155
  170. /shotgun/agents/{history → conversation/history}/__init__.py +0 -0
  171. /shotgun/agents/{history → conversation/history}/context_extraction.py +0 -0
  172. /shotgun/agents/{history → conversation/history}/history_building.py +0 -0
  173. /shotgun/agents/{history → conversation/history}/message_utils.py +0 -0
  174. /shotgun/agents/{history → conversation/history}/token_counting/__init__.py +0 -0
  175. /shotgun/agents/{history → conversation/history}/token_estimation.py +0 -0
@@ -17,7 +17,7 @@ from tenacity import (
17
17
  )
18
18
 
19
19
  if TYPE_CHECKING:
20
- from shotgun.agents.conversation_history import ConversationState
20
+ from shotgun.agents.conversation import ConversationState
21
21
 
22
22
  from pydantic_ai import (
23
23
  Agent,
@@ -40,19 +40,36 @@ from pydantic_ai.messages import (
40
40
  SystemPromptPart,
41
41
  ToolCallPart,
42
42
  ToolCallPartDelta,
43
+ UserPromptPart,
43
44
  )
44
45
  from textual.message import Message
45
46
  from textual.widget import Widget
46
47
 
47
48
  from shotgun.agents.common import add_system_prompt_message, add_system_status_message
48
- from shotgun.agents.config.models import KeyProvider
49
- from shotgun.agents.models import AgentResponse, AgentType, FileOperation
49
+ from shotgun.agents.config.models import (
50
+ KeyProvider,
51
+ ModelConfig,
52
+ ModelName,
53
+ ProviderType,
54
+ )
55
+ from shotgun.agents.context_analyzer import (
56
+ ContextAnalysis,
57
+ ContextAnalyzer,
58
+ ContextCompositionTelemetry,
59
+ ContextFormatter,
60
+ )
61
+ from shotgun.agents.models import (
62
+ AgentResponse,
63
+ AgentType,
64
+ FileOperation,
65
+ FileOperationTracker,
66
+ )
50
67
  from shotgun.posthog_telemetry import track_event
51
68
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
52
69
  from shotgun.utils.source_detection import detect_source
53
70
 
71
+ from .conversation.history.compaction import apply_persistent_compaction
54
72
  from .export import create_export_agent
55
- from .history.compaction import apply_persistent_compaction
56
73
  from .messages import AgentSystemPrompt
57
74
  from .models import AgentDeps, AgentRuntimeOptions
58
75
  from .plan import create_plan_agent
@@ -149,6 +166,44 @@ class ClarifyingQuestionsMessage(Message):
149
166
  self.response_text = response_text
150
167
 
151
168
 
169
+ class CompactionStartedMessage(Message):
170
+ """Event posted when conversation compaction starts."""
171
+
172
+
173
+ class CompactionCompletedMessage(Message):
174
+ """Event posted when conversation compaction completes."""
175
+
176
+
177
+ class AgentStreamingStarted(Message):
178
+ """Event posted when agent starts streaming responses."""
179
+
180
+
181
+ class AgentStreamingCompleted(Message):
182
+ """Event posted when agent finishes streaming responses."""
183
+
184
+
185
+ @dataclass(frozen=True)
186
+ class ModelConfigUpdated:
187
+ """Data returned when AI model configuration changes.
188
+
189
+ Used as a return value from ModelPickerScreen to communicate model
190
+ selection back to the calling screen.
191
+
192
+ Attributes:
193
+ old_model: Previous model name (None if first selection)
194
+ new_model: New model name
195
+ provider: LLM provider (OpenAI, Anthropic, Google)
196
+ key_provider: Authentication method (BYOK or Shotgun)
197
+ model_config: Complete model configuration
198
+ """
199
+
200
+ old_model: ModelName | None
201
+ new_model: ModelName
202
+ provider: ProviderType
203
+ key_provider: KeyProvider
204
+ model_config: ModelConfig
205
+
206
+
152
207
  @dataclass(slots=True)
153
208
  class _PartialStreamState:
154
209
  """Tracks streamed messages while handling a single agent run."""
@@ -180,7 +235,7 @@ class AgentManager(Widget):
180
235
  self.deps = deps
181
236
 
182
237
  # Create AgentRuntimeOptions from deps for agent creation
183
- agent_runtime_options = AgentRuntimeOptions(
238
+ self._agent_runtime_options = AgentRuntimeOptions(
184
239
  interactive_mode=self.deps.interactive_mode,
185
240
  working_directory=self.deps.working_directory,
186
241
  is_tui_context=self.deps.is_tui_context,
@@ -189,22 +244,18 @@ class AgentManager(Widget):
189
244
  tasks=self.deps.tasks,
190
245
  )
191
246
 
192
- # Initialize all agents and store their specific deps
193
- self.research_agent, self.research_deps = create_research_agent(
194
- agent_runtime_options=agent_runtime_options
195
- )
196
- self.plan_agent, self.plan_deps = create_plan_agent(
197
- agent_runtime_options=agent_runtime_options
198
- )
199
- self.tasks_agent, self.tasks_deps = create_tasks_agent(
200
- agent_runtime_options=agent_runtime_options
201
- )
202
- self.specify_agent, self.specify_deps = create_specify_agent(
203
- agent_runtime_options=agent_runtime_options
204
- )
205
- self.export_agent, self.export_deps = create_export_agent(
206
- agent_runtime_options=agent_runtime_options
207
- )
247
+ # Lazy initialization - agents created on first access
248
+ self._research_agent: Agent[AgentDeps, AgentResponse] | None = None
249
+ self._research_deps: AgentDeps | None = None
250
+ self._plan_agent: Agent[AgentDeps, AgentResponse] | None = None
251
+ self._plan_deps: AgentDeps | None = None
252
+ self._tasks_agent: Agent[AgentDeps, AgentResponse] | None = None
253
+ self._tasks_deps: AgentDeps | None = None
254
+ self._specify_agent: Agent[AgentDeps, AgentResponse] | None = None
255
+ self._specify_deps: AgentDeps | None = None
256
+ self._export_agent: Agent[AgentDeps, AgentResponse] | None = None
257
+ self._export_deps: AgentDeps | None = None
258
+ self._agents_initialized = False
208
259
 
209
260
  # Track current active agent
210
261
  self._current_agent_type: AgentType = initial_type
@@ -219,6 +270,119 @@ class AgentManager(Widget):
219
270
  self._qa_questions: list[str] | None = None
220
271
  self._qa_mode_active: bool = False
221
272
 
273
+ async def _ensure_agents_initialized(self) -> None:
274
+ """Ensure all agents are initialized (lazy initialization)."""
275
+ if self._agents_initialized:
276
+ return
277
+
278
+ # Initialize all agents asynchronously
279
+ self._research_agent, self._research_deps = await create_research_agent(
280
+ agent_runtime_options=self._agent_runtime_options
281
+ )
282
+ self._plan_agent, self._plan_deps = await create_plan_agent(
283
+ agent_runtime_options=self._agent_runtime_options
284
+ )
285
+ self._tasks_agent, self._tasks_deps = await create_tasks_agent(
286
+ agent_runtime_options=self._agent_runtime_options
287
+ )
288
+ self._specify_agent, self._specify_deps = await create_specify_agent(
289
+ agent_runtime_options=self._agent_runtime_options
290
+ )
291
+ self._export_agent, self._export_deps = await create_export_agent(
292
+ agent_runtime_options=self._agent_runtime_options
293
+ )
294
+ self._agents_initialized = True
295
+
296
+ @property
297
+ def research_agent(self) -> Agent[AgentDeps, AgentResponse]:
298
+ """Get research agent (must call _ensure_agents_initialized first)."""
299
+ if self._research_agent is None:
300
+ raise RuntimeError(
301
+ "Agents not initialized. Call _ensure_agents_initialized() first."
302
+ )
303
+ return self._research_agent
304
+
305
+ @property
306
+ def research_deps(self) -> AgentDeps:
307
+ """Get research deps (must call _ensure_agents_initialized first)."""
308
+ if self._research_deps is None:
309
+ raise RuntimeError(
310
+ "Agents not initialized. Call _ensure_agents_initialized() first."
311
+ )
312
+ return self._research_deps
313
+
314
+ @property
315
+ def plan_agent(self) -> Agent[AgentDeps, AgentResponse]:
316
+ """Get plan agent (must call _ensure_agents_initialized first)."""
317
+ if self._plan_agent is None:
318
+ raise RuntimeError(
319
+ "Agents not initialized. Call _ensure_agents_initialized() first."
320
+ )
321
+ return self._plan_agent
322
+
323
+ @property
324
+ def plan_deps(self) -> AgentDeps:
325
+ """Get plan deps (must call _ensure_agents_initialized first)."""
326
+ if self._plan_deps is None:
327
+ raise RuntimeError(
328
+ "Agents not initialized. Call _ensure_agents_initialized() first."
329
+ )
330
+ return self._plan_deps
331
+
332
+ @property
333
+ def tasks_agent(self) -> Agent[AgentDeps, AgentResponse]:
334
+ """Get tasks agent (must call _ensure_agents_initialized first)."""
335
+ if self._tasks_agent is None:
336
+ raise RuntimeError(
337
+ "Agents not initialized. Call _ensure_agents_initialized() first."
338
+ )
339
+ return self._tasks_agent
340
+
341
+ @property
342
+ def tasks_deps(self) -> AgentDeps:
343
+ """Get tasks deps (must call _ensure_agents_initialized first)."""
344
+ if self._tasks_deps is None:
345
+ raise RuntimeError(
346
+ "Agents not initialized. Call _ensure_agents_initialized() first."
347
+ )
348
+ return self._tasks_deps
349
+
350
+ @property
351
+ def specify_agent(self) -> Agent[AgentDeps, AgentResponse]:
352
+ """Get specify agent (must call _ensure_agents_initialized first)."""
353
+ if self._specify_agent is None:
354
+ raise RuntimeError(
355
+ "Agents not initialized. Call _ensure_agents_initialized() first."
356
+ )
357
+ return self._specify_agent
358
+
359
+ @property
360
+ def specify_deps(self) -> AgentDeps:
361
+ """Get specify deps (must call _ensure_agents_initialized first)."""
362
+ if self._specify_deps is None:
363
+ raise RuntimeError(
364
+ "Agents not initialized. Call _ensure_agents_initialized() first."
365
+ )
366
+ return self._specify_deps
367
+
368
+ @property
369
+ def export_agent(self) -> Agent[AgentDeps, AgentResponse]:
370
+ """Get export agent (must call _ensure_agents_initialized first)."""
371
+ if self._export_agent is None:
372
+ raise RuntimeError(
373
+ "Agents not initialized. Call _ensure_agents_initialized() first."
374
+ )
375
+ return self._export_agent
376
+
377
+ @property
378
+ def export_deps(self) -> AgentDeps:
379
+ """Get export deps (must call _ensure_agents_initialized first)."""
380
+ if self._export_deps is None:
381
+ raise RuntimeError(
382
+ "Agents not initialized. Call _ensure_agents_initialized() first."
383
+ )
384
+ return self._export_deps
385
+
222
386
  @property
223
387
  def current_agent(self) -> Agent[AgentDeps, AgentResponse]:
224
388
  """Get the currently active agent.
@@ -370,6 +534,9 @@ class AgentManager(Widget):
370
534
  Returns:
371
535
  The agent run result.
372
536
  """
537
+ # Ensure agents are initialized before running
538
+ await self._ensure_agents_initialized()
539
+
373
540
  logger.info(f"Running agent {self._current_agent_type.value}")
374
541
  # Use merged deps (shared state + agent-specific system prompt) if not provided
375
542
  if deps is None:
@@ -382,19 +549,11 @@ class AgentManager(Widget):
382
549
  # Clear file tracker before each run to track only this run's operations
383
550
  deps.file_tracker.clear()
384
551
 
385
- # Add user prompt if present (will be shown immediately via post_messages_updated)
386
- if prompt:
387
- user_request = ModelRequest.user_text_prompt(prompt)
388
- self.ui_message_history.append(user_request)
389
-
390
- # Always post update before run to show user message (or current state if no prompt)
391
- self._post_messages_updated()
552
+ # Don't manually add the user prompt - Pydantic AI will include it in result.new_messages()
553
+ # This prevents duplicates and confusion with incremental mounting
392
554
 
393
- # Save history WITHOUT the just-added prompt to avoid duplicates
394
- # (result.new_messages() will include the prompt)
395
- original_messages = (
396
- self.ui_message_history[:-1] if prompt else self.ui_message_history.copy()
397
- )
555
+ # Save current message history before the run
556
+ original_messages = self.ui_message_history.copy()
398
557
 
399
558
  # Start with persistent message history
400
559
  message_history = self.message_history
@@ -456,19 +615,33 @@ class AgentManager(Widget):
456
615
  self._stream_state = _PartialStreamState()
457
616
 
458
617
  model_name = ""
618
+ supports_streaming = True # Default to streaming enabled
619
+
459
620
  if hasattr(deps, "llm_model") and deps.llm_model is not None:
460
621
  model_name = deps.llm_model.name
622
+ supports_streaming = deps.llm_model.supports_streaming
461
623
 
462
- # Check if it's a Shotgun account
463
- is_shotgun_account = (
464
- hasattr(deps, "llm_model")
465
- and deps.llm_model is not None
466
- and deps.llm_model.key_provider == KeyProvider.SHOTGUN
467
- )
468
-
469
- # Only disable streaming for GPT-5 if NOT a Shotgun account
470
- # Shotgun accounts support streaming for GPT-5
471
- is_gpt5_byok = "gpt-5" in model_name.lower() and not is_shotgun_account
624
+ # Add hint message if streaming is disabled for BYOK GPT-5 models
625
+ if (
626
+ not supports_streaming
627
+ and deps.llm_model.key_provider == KeyProvider.BYOK
628
+ ):
629
+ self.ui_message_history.append(
630
+ HintMessage(
631
+ message=(
632
+ "⚠️ **Streaming not available for GPT-5**\n\n"
633
+ "Your OpenAI organization doesn't have streaming enabled for this model.\n\n"
634
+ "**Options:**\n"
635
+ "- Get a [Shotgun Account](https://shotgun.sh) - streaming works out of the box\n"
636
+ "- Complete [Biometric Verification](https://platform.openai.com/settings/organization/general) with OpenAI, then:\n"
637
+ " 1. Press `Ctrl+P` → Open Provider Setup\n"
638
+ " 2. Select OpenAI → Clear key\n"
639
+ " 3. Re-add your OpenAI API key\n\n"
640
+ "Continuing without streaming (responses will appear all at once)."
641
+ )
642
+ )
643
+ )
644
+ self._post_messages_updated()
472
645
 
473
646
  # Track message send event
474
647
  event_name = f"message_send_{self._current_agent_type.value}"
@@ -488,7 +661,7 @@ class AgentManager(Widget):
488
661
  usage_limits=usage_limits,
489
662
  message_history=message_history,
490
663
  event_stream_handler=self._handle_event_stream
491
- if not is_gpt5_byok
664
+ if supports_streaming
492
665
  else None,
493
666
  **kwargs,
494
667
  )
@@ -562,11 +735,35 @@ class AgentManager(Widget):
562
735
  },
563
736
  )
564
737
 
565
- # Always add the agent's response messages to maintain conversation history
566
- self.ui_message_history = original_messages + cast(
738
+ # Merge agent's response messages, avoiding duplicates
739
+ # The TUI may have already added the user prompt, so check for it
740
+ new_messages = cast(
567
741
  list[ModelRequest | ModelResponse | HintMessage], result.new_messages()
568
742
  )
569
743
 
744
+ # Deduplicate: skip user prompts that are already in original_messages
745
+ deduplicated_new_messages = []
746
+ for msg in new_messages:
747
+ # Check if this is a user prompt that's already in original_messages
748
+ if isinstance(msg, ModelRequest) and any(
749
+ isinstance(part, UserPromptPart) for part in msg.parts
750
+ ):
751
+ # Check if an identical user prompt is already in original_messages
752
+ already_exists = any(
753
+ isinstance(existing, ModelRequest)
754
+ and any(isinstance(p, UserPromptPart) for p in existing.parts)
755
+ and existing.parts == msg.parts
756
+ for existing in original_messages[
757
+ -5:
758
+ ] # Check last 5 messages for efficiency
759
+ )
760
+ if already_exists:
761
+ continue # Skip this duplicate user prompt
762
+
763
+ deduplicated_new_messages.append(msg)
764
+
765
+ self.ui_message_history = original_messages + deduplicated_new_messages
766
+
570
767
  # Get file operations early so we can use them for contextual messages
571
768
  file_operations = deps.file_tracker.operations.copy()
572
769
  self.recently_change_files = file_operations
@@ -591,6 +788,12 @@ class AgentManager(Widget):
591
788
  HintMessage(message=agent_response.response)
592
789
  )
593
790
 
791
+ # Add file operation hints before questions (so they appear first in UI)
792
+ if file_operations:
793
+ file_hint = self._create_file_operation_hint(file_operations)
794
+ if file_hint:
795
+ self.ui_message_history.append(HintMessage(message=file_hint))
796
+
594
797
  if len(agent_response.clarifying_questions) == 1:
595
798
  # Single question - treat as non-blocking suggestion, DON'T enter Q&A mode
596
799
  self.ui_message_history.append(
@@ -626,11 +829,9 @@ class AgentManager(Widget):
626
829
  )
627
830
  )
628
831
 
629
- # Post UI update with hint messages and file operations
630
- logger.debug(
631
- "Posting UI update for Q&A mode with hint messages and file operations"
632
- )
633
- self._post_messages_updated(file_operations)
832
+ # Post UI update with hint messages (file operations will be posted after compaction)
833
+ logger.debug("Posting UI update for Q&A mode with hint messages")
834
+ self._post_messages_updated([])
634
835
  else:
635
836
  # No clarifying questions - show the response or a default success message
636
837
  if agent_response.response and agent_response.response.strip():
@@ -665,19 +866,31 @@ class AgentManager(Widget):
665
866
  )
666
867
 
667
868
  # Post UI update immediately so user sees the response without delay
668
- logger.debug(
669
- "Posting immediate UI update with hint message and file operations"
670
- )
671
- self._post_messages_updated(file_operations)
869
+ # (file operations will be posted after compaction to avoid duplicates)
870
+ logger.debug("Posting immediate UI update with hint message")
871
+ self._post_messages_updated([])
672
872
 
673
873
  # Apply compaction to persistent message history to prevent cascading growth
674
874
  all_messages = result.all_messages()
875
+ messages_before_compaction = len(all_messages)
876
+ compaction_occurred = False
877
+
675
878
  try:
676
879
  logger.debug(
677
880
  "Starting message history compaction",
678
881
  extra={"message_count": len(all_messages)},
679
882
  )
883
+ # Notify UI that compaction is starting
884
+ self.post_message(CompactionStartedMessage())
885
+
680
886
  self.message_history = await apply_persistent_compaction(all_messages, deps)
887
+
888
+ # Track if compaction actually modified the history
889
+ compaction_occurred = len(self.message_history) != len(all_messages)
890
+
891
+ # Notify UI that compaction is complete
892
+ self.post_message(CompactionCompletedMessage())
893
+
681
894
  logger.debug(
682
895
  "Completed message history compaction",
683
896
  extra={
@@ -699,9 +912,17 @@ class AgentManager(Widget):
699
912
  # Fallback: use uncompacted messages to prevent data loss
700
913
  self.message_history = all_messages
701
914
 
915
+ # Track context composition telemetry
916
+ await self._track_context_analysis(
917
+ compaction_occurred=compaction_occurred,
918
+ messages_before_compaction=messages_before_compaction
919
+ if compaction_occurred
920
+ else None,
921
+ )
922
+
702
923
  usage = result.usage()
703
924
  if hasattr(deps, "llm_model") and deps.llm_model is not None:
704
- deps.usage_manager.add_usage(
925
+ await deps.usage_manager.add_usage(
705
926
  usage, model_name=deps.llm_model.name, provider=deps.llm_model.provider
706
927
  )
707
928
  else:
@@ -710,8 +931,13 @@ class AgentManager(Widget):
710
931
  extra={"agent_mode": self._current_agent_type.value},
711
932
  )
712
933
 
713
- # UI updates are now posted immediately in each branch (Q&A or non-Q&A)
714
- # before compaction, so no duplicate posting needed here
934
+ # Post final UI update after compaction completes
935
+ # This ensures widgets that depend on message_history (like context indicator)
936
+ # receive the updated history after compaction
937
+ logger.debug(
938
+ "Posting final UI update after compaction with updated message_history"
939
+ )
940
+ self._post_messages_updated(file_operations)
715
941
 
716
942
  return result
717
943
 
@@ -722,6 +948,9 @@ class AgentManager(Widget):
722
948
  ) -> None:
723
949
  """Process streamed events and forward partial updates to the UI."""
724
950
 
951
+ # Notify UI that streaming has started
952
+ self.post_message(AgentStreamingStarted())
953
+
725
954
  state = self._stream_state
726
955
  if state is None:
727
956
  state = self._stream_state = _PartialStreamState()
@@ -900,6 +1129,9 @@ class AgentManager(Widget):
900
1129
  self._post_partial_message(True)
901
1130
  state.current_response = None
902
1131
 
1132
+ # Notify UI that streaming has completed
1133
+ self.post_message(AgentStreamingCompleted())
1134
+
903
1135
  def _build_partial_response(
904
1136
  self, parts: list[ModelResponsePart | ToolCallPartDelta]
905
1137
  ) -> ModelResponse | None:
@@ -927,6 +1159,38 @@ class AgentManager(Widget):
927
1159
  )
928
1160
  )
929
1161
 
1162
+ def _create_file_operation_hint(
1163
+ self, file_operations: list[FileOperation]
1164
+ ) -> str | None:
1165
+ """Create a hint message for file operations.
1166
+
1167
+ Args:
1168
+ file_operations: List of file operations to create a hint for
1169
+
1170
+ Returns:
1171
+ Hint message string or None if no operations
1172
+ """
1173
+ if not file_operations:
1174
+ return None
1175
+
1176
+ tracker = FileOperationTracker(operations=file_operations)
1177
+ display_path = tracker.get_display_path()
1178
+
1179
+ if not display_path:
1180
+ return None
1181
+
1182
+ path_obj = Path(display_path)
1183
+
1184
+ if len(file_operations) == 1:
1185
+ return f"📝 Modified: `{display_path}`"
1186
+ else:
1187
+ num_files = len({op.file_path for op in file_operations})
1188
+ if path_obj.is_dir():
1189
+ return f"📁 Modified {num_files} files in: `{display_path}`"
1190
+ else:
1191
+ # Common path is a file, show parent directory
1192
+ return f"📁 Modified {num_files} files in: `{path_obj.parent}`"
1193
+
930
1194
  def _post_messages_updated(
931
1195
  self, file_operations: list[FileOperation] | None = None
932
1196
  ) -> None:
@@ -988,13 +1252,69 @@ class AgentManager(Widget):
988
1252
  def get_usage_hint(self) -> str | None:
989
1253
  return self.deps.usage_manager.build_usage_hint()
990
1254
 
1255
+ async def get_context_hint(self) -> str | None:
1256
+ """Get conversation context analysis as a formatted hint.
1257
+
1258
+ Returns:
1259
+ Markdown-formatted string with context composition statistics, or None if unavailable
1260
+ """
1261
+ analysis = await self.get_context_analysis()
1262
+ if analysis:
1263
+ return ContextFormatter.format_markdown(analysis)
1264
+ return None
1265
+
1266
+ async def get_context_analysis(self) -> ContextAnalysis | None:
1267
+ """Get conversation context analysis as structured data.
1268
+
1269
+ Returns:
1270
+ ContextAnalysis object with token usage data, or None if unavailable
1271
+ """
1272
+
1273
+ try:
1274
+ analyzer = ContextAnalyzer(self.deps.llm_model)
1275
+ return await analyzer.analyze_conversation(
1276
+ self.message_history, self.ui_message_history
1277
+ )
1278
+ except Exception as e:
1279
+ logger.error(f"Failed to generate context analysis: {e}", exc_info=True)
1280
+ return None
1281
+
1282
+ async def _track_context_analysis(
1283
+ self,
1284
+ compaction_occurred: bool = False,
1285
+ messages_before_compaction: int | None = None,
1286
+ ) -> None:
1287
+ """Track context composition telemetry to PostHog.
1288
+
1289
+ Args:
1290
+ compaction_occurred: Whether compaction was applied
1291
+ messages_before_compaction: Message count before compaction, if it occurred
1292
+ """
1293
+ try:
1294
+ analyzer = ContextAnalyzer(self.deps.llm_model)
1295
+ analysis = await analyzer.analyze_conversation(
1296
+ self.message_history, self.ui_message_history
1297
+ )
1298
+
1299
+ # Create telemetry model from analysis
1300
+ telemetry = ContextCompositionTelemetry.from_analysis(
1301
+ analysis,
1302
+ compaction_occurred=compaction_occurred,
1303
+ messages_before_compaction=messages_before_compaction,
1304
+ )
1305
+
1306
+ # Send to PostHog using model_dump() for dict conversion
1307
+ track_event("agent_context_composition", telemetry.model_dump())
1308
+ except Exception as e:
1309
+ logger.warning(f"Failed to track context analysis: {e}")
1310
+
991
1311
  def get_conversation_state(self) -> "ConversationState":
992
1312
  """Get the current conversation state.
993
1313
 
994
1314
  Returns:
995
1315
  ConversationState object containing UI and agent messages and current type
996
1316
  """
997
- from shotgun.agents.conversation_history import ConversationState
1317
+ from shotgun.agents.conversation import ConversationState
998
1318
 
999
1319
  return ConversationState(
1000
1320
  agent_messages=self.message_history.copy(),
@@ -1035,7 +1355,9 @@ class AgentManager(Widget):
1035
1355
  __all__ = [
1036
1356
  "AgentManager",
1037
1357
  "AgentType",
1358
+ "ClarifyingQuestionsMessage",
1359
+ "CompactionCompletedMessage",
1360
+ "CompactionStartedMessage",
1038
1361
  "MessageHistoryUpdated",
1039
1362
  "PartialResponseMessage",
1040
- "ClarifyingQuestionsMessage",
1041
1363
  ]