shotgun-sh 0.2.11.dev1__py3-none-any.whl → 0.2.11.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (67) hide show
  1. shotgun/agents/agent_manager.py +150 -27
  2. shotgun/agents/common.py +14 -8
  3. shotgun/agents/config/manager.py +64 -33
  4. shotgun/agents/config/models.py +21 -1
  5. shotgun/agents/config/provider.py +2 -2
  6. shotgun/agents/context_analyzer/analyzer.py +2 -24
  7. shotgun/agents/conversation_manager.py +22 -13
  8. shotgun/agents/export.py +2 -2
  9. shotgun/agents/history/token_counting/anthropic.py +17 -1
  10. shotgun/agents/history/token_counting/base.py +14 -3
  11. shotgun/agents/history/token_counting/openai.py +8 -0
  12. shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
  13. shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
  14. shotgun/agents/history/token_counting/utils.py +0 -3
  15. shotgun/agents/plan.py +2 -2
  16. shotgun/agents/research.py +3 -3
  17. shotgun/agents/specify.py +2 -2
  18. shotgun/agents/tasks.py +2 -2
  19. shotgun/agents/tools/codebase/file_read.py +5 -2
  20. shotgun/agents/tools/file_management.py +11 -7
  21. shotgun/agents/tools/web_search/__init__.py +8 -8
  22. shotgun/agents/tools/web_search/anthropic.py +2 -2
  23. shotgun/agents/tools/web_search/gemini.py +1 -1
  24. shotgun/agents/tools/web_search/openai.py +1 -1
  25. shotgun/agents/tools/web_search/utils.py +2 -2
  26. shotgun/agents/usage_manager.py +16 -11
  27. shotgun/cli/clear.py +2 -1
  28. shotgun/cli/compact.py +3 -3
  29. shotgun/cli/config.py +8 -5
  30. shotgun/cli/context.py +2 -2
  31. shotgun/cli/export.py +1 -1
  32. shotgun/cli/feedback.py +4 -2
  33. shotgun/cli/plan.py +1 -1
  34. shotgun/cli/research.py +1 -1
  35. shotgun/cli/specify.py +1 -1
  36. shotgun/cli/tasks.py +1 -1
  37. shotgun/codebase/core/change_detector.py +5 -3
  38. shotgun/codebase/core/code_retrieval.py +4 -2
  39. shotgun/codebase/core/ingestor.py +10 -8
  40. shotgun/codebase/core/manager.py +3 -3
  41. shotgun/codebase/core/nl_query.py +1 -1
  42. shotgun/logging_config.py +10 -17
  43. shotgun/main.py +3 -1
  44. shotgun/posthog_telemetry.py +14 -4
  45. shotgun/sentry_telemetry.py +3 -1
  46. shotgun/telemetry.py +3 -1
  47. shotgun/tui/app.py +62 -51
  48. shotgun/tui/components/context_indicator.py +43 -0
  49. shotgun/tui/containers.py +15 -17
  50. shotgun/tui/dependencies.py +2 -2
  51. shotgun/tui/screens/chat/chat_screen.py +75 -15
  52. shotgun/tui/screens/chat/help_text.py +16 -15
  53. shotgun/tui/screens/feedback.py +4 -4
  54. shotgun/tui/screens/model_picker.py +21 -20
  55. shotgun/tui/screens/provider_config.py +50 -27
  56. shotgun/tui/screens/shotgun_auth.py +2 -2
  57. shotgun/tui/screens/welcome.py +14 -11
  58. shotgun/tui/services/conversation_service.py +8 -8
  59. shotgun/tui/utils/mode_progress.py +14 -7
  60. shotgun/tui/widgets/widget_coordinator.py +15 -0
  61. shotgun/utils/file_system_utils.py +19 -0
  62. shotgun/utils/marketing.py +110 -0
  63. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev3.dist-info}/METADATA +2 -1
  64. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev3.dist-info}/RECORD +67 -66
  65. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev3.dist-info}/WHEEL +0 -0
  66. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev3.dist-info}/entry_points.txt +0 -0
  67. {shotgun_sh-0.2.11.dev1.dist-info → shotgun_sh-0.2.11.dev3.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,7 @@
1
1
  """Context window indicator component for showing model usage."""
2
2
 
3
3
  from textual.reactive import reactive
4
+ from textual.timer import Timer
4
5
  from textual.widgets import Static
5
6
 
6
7
  from shotgun.agents.config.models import MODEL_SPECS, ModelName
@@ -20,6 +21,10 @@ class ContextIndicator(Static):
20
21
 
21
22
  context_analysis: reactive[ContextAnalysis | None] = reactive(None)
22
23
  model_name: reactive[ModelName | None] = reactive(None)
24
+ is_streaming: reactive[bool] = reactive(False)
25
+
26
+ _animation_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
27
+ _animation_index = 0
23
28
 
24
29
  def __init__(
25
30
  self,
@@ -29,6 +34,7 @@ class ContextIndicator(Static):
29
34
  classes: str | None = None,
30
35
  ) -> None:
31
36
  super().__init__(name=name, id=id, classes=classes)
37
+ self._animation_timer: Timer | None = None
32
38
 
33
39
  def update_context(
34
40
  self, analysis: ContextAnalysis | None, model: ModelName | None
@@ -43,6 +49,38 @@ class ContextIndicator(Static):
43
49
  self.model_name = model
44
50
  self._refresh_display()
45
51
 
52
+ def set_streaming(self, streaming: bool) -> None:
53
+ """Enable or disable streaming animation.
54
+
55
+ Args:
56
+ streaming: Whether to show streaming animation
57
+ """
58
+ self.is_streaming = streaming
59
+ if streaming:
60
+ self._start_animation()
61
+ else:
62
+ self._stop_animation()
63
+
64
+ def _start_animation(self) -> None:
65
+ """Start the pulsing animation."""
66
+ if self._animation_timer is None:
67
+ self._animation_timer = self.set_interval(0.1, self._animate_frame)
68
+
69
+ def _stop_animation(self) -> None:
70
+ """Stop the pulsing animation."""
71
+ if self._animation_timer is not None:
72
+ self._animation_timer.stop()
73
+ self._animation_timer = None
74
+ self._animation_index = 0
75
+ self._refresh_display()
76
+
77
+ def _animate_frame(self) -> None:
78
+ """Advance the animation frame."""
79
+ self._animation_index = (self._animation_index + 1) % len(
80
+ self._animation_frames
81
+ )
82
+ self._refresh_display()
83
+
46
84
  def _get_percentage_color(self, percentage: float) -> str:
47
85
  """Get color for percentage based on threshold.
48
86
 
@@ -112,6 +150,11 @@ class ContextIndicator(Static):
112
150
  f"[{color}]{percentage}% ({current_tokens}/{max_tokens})[/]",
113
151
  ]
114
152
 
153
+ # Add streaming animation indicator if streaming
154
+ if self.is_streaming:
155
+ animation_char = self._animation_frames[self._animation_index]
156
+ parts.append(f"[bold cyan]{animation_char}[/]")
157
+
115
158
  # Add model name if available
116
159
  if self.model_name:
117
160
  model_spec = MODEL_SPECS.get(self.model_name)
shotgun/tui/containers.py CHANGED
@@ -5,10 +5,8 @@ from typing import TYPE_CHECKING
5
5
  from dependency_injector import containers, providers
6
6
  from pydantic_ai import RunContext
7
7
 
8
- from shotgun.agents.agent_manager import AgentManager
9
- from shotgun.agents.config import get_provider_model
10
8
  from shotgun.agents.conversation_manager import ConversationManager
11
- from shotgun.agents.models import AgentDeps, AgentType
9
+ from shotgun.agents.models import AgentDeps
12
10
  from shotgun.sdk.codebase import CodebaseSDK
13
11
  from shotgun.tui.commands import CommandHandler
14
12
  from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
@@ -35,13 +33,19 @@ class TUIContainer(containers.DeclarativeContainer):
35
33
 
36
34
  This container manages the lifecycle and dependencies of all TUI components,
37
35
  ensuring consistent configuration and facilitating testing.
36
+
37
+ Note: model_config and agent_deps are created lazily via async factory methods
38
+ since get_provider_model() is now async.
38
39
  """
39
40
 
40
41
  # Configuration
41
42
  config = providers.Configuration()
42
43
 
43
44
  # Core dependencies
44
- model_config = providers.Singleton(get_provider_model)
45
+ # TODO: Figure out a better solution for async dependency injection
46
+ # model_config is now loaded lazily via create_default_tui_deps()
47
+ # because get_provider_model() is async. This breaks the DI pattern
48
+ # and should be refactored to support async factories properly.
45
49
 
46
50
  storage_dir = providers.Singleton(lambda: get_shotgun_home() / "codebases")
47
51
 
@@ -51,15 +55,10 @@ class TUIContainer(containers.DeclarativeContainer):
51
55
 
52
56
  system_prompt_fn = providers.Object(_placeholder_system_prompt)
53
57
 
54
- # AgentDeps singleton
55
- agent_deps = providers.Singleton(
56
- AgentDeps,
57
- interactive_mode=True,
58
- is_tui_context=True,
59
- llm_model=model_config,
60
- codebase_service=codebase_service,
61
- system_prompt_fn=system_prompt_fn,
62
- )
58
+ # TODO: Figure out a better solution for async dependency injection
59
+ # AgentDeps is now created via async create_default_tui_deps()
60
+ # instead of using DI container's Singleton provider because it requires
61
+ # async model_config initialization
63
62
 
64
63
  # Service singletons
65
64
  codebase_sdk = providers.Singleton(CodebaseSDK)
@@ -74,10 +73,9 @@ class TUIContainer(containers.DeclarativeContainer):
74
73
  ConversationService, conversation_manager=conversation_manager
75
74
  )
76
75
 
77
- # Factory for AgentManager (needs agent_type parameter)
78
- agent_manager_factory = providers.Factory(
79
- AgentManager, deps=agent_deps, initial_type=providers.Object(AgentType.RESEARCH)
80
- )
76
+ # TODO: Figure out a better solution for async dependency injection
77
+ # AgentManager factory removed - create via async initialization
78
+ # since it requires async agent creation
81
79
 
82
80
  # Factory for ProcessingStateManager (needs ChatScreen reference)
83
81
  processing_state_factory = providers.Factory(
@@ -8,7 +8,7 @@ from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
8
8
  from shotgun.utils import get_shotgun_home
9
9
 
10
10
 
11
- def create_default_tui_deps() -> AgentDeps:
11
+ async def create_default_tui_deps() -> AgentDeps:
12
12
  """Create default AgentDeps for TUI components.
13
13
 
14
14
  This creates a standard AgentDeps configuration suitable for interactive
@@ -21,7 +21,7 @@ def create_default_tui_deps() -> AgentDeps:
21
21
  Returns:
22
22
  Configured AgentDeps instance ready for TUI use.
23
23
  """
24
- model_config = get_provider_model()
24
+ model_config = await get_provider_model()
25
25
  storage_dir = get_shotgun_home() / "codebases"
26
26
  codebase_service = FilteredCodebaseService(storage_dir)
27
27
 
@@ -75,6 +75,7 @@ from shotgun.tui.state.processing_state import ProcessingStateManager
75
75
  from shotgun.tui.utils.mode_progress import PlaceholderHints
76
76
  from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
77
77
  from shotgun.utils import get_shotgun_home
78
+ from shotgun.utils.marketing import MarketingManager
78
79
 
79
80
  logger = logging.getLogger(__name__)
80
81
 
@@ -386,11 +387,11 @@ class ChatScreen(Screen[None]):
386
387
  # Save to conversation file
387
388
  conversation_file = get_shotgun_home() / "conversation.json"
388
389
  manager = ConversationManager(conversation_file)
389
- conversation = manager.load()
390
+ conversation = await manager.load()
390
391
 
391
392
  if conversation:
392
393
  conversation.set_agent_messages(compacted_messages)
393
- manager.save(conversation)
394
+ await manager.save(conversation)
394
395
 
395
396
  # Post compaction completed event
396
397
  self.agent_manager.post_message(CompactionCompletedMessage())
@@ -502,6 +503,34 @@ class ChatScreen(Screen[None]):
502
503
  f"[CONTEXT] Failed to update context indicator: {e}", exc_info=True
503
504
  )
504
505
 
506
+ @work(exclusive=False)
507
+ async def update_context_indicator_with_messages(
508
+ self,
509
+ agent_messages: list[ModelMessage],
510
+ ui_messages: list[ModelMessage | HintMessage],
511
+ ) -> None:
512
+ """Update the context indicator with specific message sets (for streaming updates).
513
+
514
+ Args:
515
+ agent_messages: Agent message history including streaming messages (for token counting)
516
+ ui_messages: UI message history including hints and streaming messages
517
+ """
518
+ try:
519
+ from shotgun.agents.context_analyzer.analyzer import ContextAnalyzer
520
+
521
+ analyzer = ContextAnalyzer(self.deps.llm_model)
522
+ # Analyze the combined message histories for accurate progressive token counts
523
+ analysis = await analyzer.analyze_conversation(agent_messages, ui_messages)
524
+
525
+ if analysis:
526
+ model_name = self.deps.llm_model.name
527
+ self.widget_coordinator.update_context_indicator(analysis, model_name)
528
+ except Exception as e:
529
+ logger.error(
530
+ f"Failed to update context indicator with streaming messages: {e}",
531
+ exc_info=True,
532
+ )
533
+
505
534
  def compose(self) -> ComposeResult:
506
535
  """Create child widgets for the app."""
507
536
  with Container(id="window"):
@@ -551,7 +580,7 @@ class ChatScreen(Screen[None]):
551
580
  # Keep all ModelResponse and other message types
552
581
  filtered_event_messages.append(msg)
553
582
 
554
- # Build new message list
583
+ # Build new message list combining existing messages with new streaming content
555
584
  new_message_list = self.messages + cast(
556
585
  list[ModelMessage | HintMessage], filtered_event_messages
557
586
  )
@@ -561,6 +590,13 @@ class ChatScreen(Screen[None]):
561
590
  self.partial_message, new_message_list
562
591
  )
563
592
 
593
+ # Update context indicator with full message history including streaming messages
594
+ # Combine existing agent history with new streaming messages for accurate token count
595
+ combined_agent_history = self.agent_manager.message_history + event.messages
596
+ self.update_context_indicator_with_messages(
597
+ combined_agent_history, new_message_list
598
+ )
599
+
564
600
  def _clear_partial_response(self) -> None:
565
601
  # Use widget coordinator to clear partial response
566
602
  self.widget_coordinator.set_partial_response(None, self.messages)
@@ -602,7 +638,9 @@ class ChatScreen(Screen[None]):
602
638
  self.qa_answers = []
603
639
 
604
640
  @on(MessageHistoryUpdated)
605
- def handle_message_history_updated(self, event: MessageHistoryUpdated) -> None:
641
+ async def handle_message_history_updated(
642
+ self, event: MessageHistoryUpdated
643
+ ) -> None:
606
644
  """Handle message history updates from the agent manager."""
607
645
  self._clear_partial_response()
608
646
  self.messages = event.messages
@@ -644,6 +682,14 @@ class ChatScreen(Screen[None]):
644
682
 
645
683
  self.mount_hint(message)
646
684
 
685
+ # Check and display any marketing messages
686
+ from shotgun.tui.app import ShotgunApp
687
+
688
+ app = cast(ShotgunApp, self.app)
689
+ await MarketingManager.check_and_display_messages(
690
+ app.config_manager, event.file_operations, self.mount_hint
691
+ )
692
+
647
693
  @on(CompactionStartedMessage)
648
694
  def handle_compaction_started(self, event: CompactionStartedMessage) -> None:
649
695
  """Update spinner text when compaction starts."""
@@ -1048,6 +1094,9 @@ class ChatScreen(Screen[None]):
1048
1094
  self.processing_state.start_processing("Processing...")
1049
1095
  self.processing_state.bind_worker(get_current_worker())
1050
1096
 
1097
+ # Start context indicator animation immediately
1098
+ self.widget_coordinator.set_context_streaming(True)
1099
+
1051
1100
  prompt = message
1052
1101
 
1053
1102
  try:
@@ -1083,6 +1132,8 @@ class ChatScreen(Screen[None]):
1083
1132
  self.mount_hint(hint)
1084
1133
  finally:
1085
1134
  self.processing_state.stop_processing()
1135
+ # Stop context indicator animation
1136
+ self.widget_coordinator.set_context_streaming(False)
1086
1137
 
1087
1138
  # Save conversation after each interaction
1088
1139
  self._save_conversation()
@@ -1091,20 +1142,29 @@ class ChatScreen(Screen[None]):
1091
1142
 
1092
1143
  def _save_conversation(self) -> None:
1093
1144
  """Save the current conversation to persistent storage."""
1094
- # Use conversation service for saving
1095
- self.conversation_service.save_conversation(self.agent_manager)
1145
+ # Use conversation service for saving (run async in background)
1146
+ self.run_worker(
1147
+ self.conversation_service.save_conversation(self.agent_manager),
1148
+ exclusive=False,
1149
+ )
1096
1150
 
1097
1151
  def _load_conversation(self) -> None:
1098
1152
  """Load conversation from persistent storage."""
1099
- # Use conversation service for restoration
1100
- success, error_msg, restored_type = (
1101
- self.conversation_service.restore_conversation(
1153
+
1154
+ # Use conversation service for restoration (run async)
1155
+ async def _do_load() -> None:
1156
+ (
1157
+ success,
1158
+ error_msg,
1159
+ restored_type,
1160
+ ) = await self.conversation_service.restore_conversation(
1102
1161
  self.agent_manager, self.deps.usage_manager
1103
1162
  )
1104
- )
1105
1163
 
1106
- if not success and error_msg:
1107
- self.mount_hint(error_msg)
1108
- elif success and restored_type:
1109
- # Update the current mode to match restored conversation
1110
- self.mode = restored_type
1164
+ if not success and error_msg:
1165
+ self.mount_hint(error_msg)
1166
+ elif success and restored_type:
1167
+ # Update the current mode to match restored conversation
1168
+ self.mode = restored_type
1169
+
1170
+ self.run_worker(_do_load(), exclusive=False)
@@ -11,14 +11,14 @@ def help_text_with_codebase(already_indexed: bool = False) -> str:
11
11
  Formatted help text string.
12
12
  """
13
13
  return (
14
- "Howdy! Welcome to Shotgun - the context tool for software engineering. \n\n"
15
- "You can research, build specs, plan, create tasks, and export context to your "
16
- "favorite code-gen agents.\n\n"
17
- f"{'' if already_indexed else 'Once your codebase is indexed, '}I can help with:\n\n"
18
- "- Speccing out a new feature\n"
19
- "- Onboarding you onto this project\n"
20
- "- Helping with a refactor spec\n"
21
- "- Creating AGENTS.md file for this project\n"
14
+ "Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
15
+ "Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
16
+ f"{'It' if already_indexed else 'Once your codebase is indexed, it'} can help you:\n"
17
+ "- Research your codebase and spec out new features\n"
18
+ "- Create implementation plans that fit your architecture\n"
19
+ "- Generate AGENTS.md files for AI coding agents\n"
20
+ "- Onboard to existing projects or plan refactors\n\n"
21
+ "Ready to build something? Let's go.\n"
22
22
  )
23
23
 
24
24
 
@@ -29,11 +29,12 @@ def help_text_empty_dir() -> str:
29
29
  Formatted help text string.
30
30
  """
31
31
  return (
32
- "Howdy! Welcome to Shotgun - the context tool for software engineering.\n\n"
33
- "You can research, build specs, plan, create tasks, and export context to your "
34
- "favorite code-gen agents.\n\n"
35
- "What would you like to build? Here are some examples:\n\n"
36
- "- Research FastAPI vs Django\n"
37
- "- Plan my new web app using React\n"
38
- "- Create PRD for my planned product\n"
32
+ "Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
33
+ "Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
34
+ "It can help you:\n"
35
+ "- Research your codebase and spec out new features\n"
36
+ "- Create implementation plans that fit your architecture\n"
37
+ "- Generate AGENTS.md files for AI coding agents\n"
38
+ "- Onboard to existing projects or plan refactors\n\n"
39
+ "Ready to build something? Let's go.\n"
39
40
  )
@@ -125,8 +125,8 @@ class FeedbackScreen(Screen[Feedback | None]):
125
125
  self.set_focus(self.query_one("#feedback-description", TextArea))
126
126
 
127
127
  @on(Button.Pressed, "#submit")
128
- def _on_submit_pressed(self) -> None:
129
- self._submit_feedback()
128
+ async def _on_submit_pressed(self) -> None:
129
+ await self._submit_feedback()
130
130
 
131
131
  @on(Button.Pressed, "#cancel")
132
132
  def _on_cancel_pressed(self) -> None:
@@ -171,7 +171,7 @@ class FeedbackScreen(Screen[Feedback | None]):
171
171
  }
172
172
  return placeholders.get(kind, "Enter your feedback...")
173
173
 
174
- def _submit_feedback(self) -> None:
174
+ async def _submit_feedback(self) -> None:
175
175
  text_area = self.query_one("#feedback-description", TextArea)
176
176
  description = text_area.text.strip()
177
177
 
@@ -182,7 +182,7 @@ class FeedbackScreen(Screen[Feedback | None]):
182
182
  return
183
183
 
184
184
  app = cast("ShotgunApp", self.app)
185
- shotgun_instance_id = app.config_manager.get_shotgun_instance_id()
185
+ shotgun_instance_id = await app.config_manager.get_shotgun_instance_id()
186
186
 
187
187
  feedback = Feedback(
188
188
  kind=self.selected_kind,
@@ -98,7 +98,7 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
98
98
  yield Button("Select \\[ENTER]", variant="primary", id="select")
99
99
  yield Button("Done \\[ESC]", id="done")
100
100
 
101
- def _rebuild_model_list(self) -> None:
101
+ async def _rebuild_model_list(self) -> None:
102
102
  """Rebuild the model list from current config.
103
103
 
104
104
  This method is called both on first show and when screen is resumed
@@ -108,7 +108,7 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
108
108
 
109
109
  # Load current config with force_reload to get latest API keys
110
110
  config_manager = self.config_manager
111
- config = config_manager.load(force_reload=True)
111
+ config = await config_manager.load(force_reload=True)
112
112
 
113
113
  # Log provider key status
114
114
  logger.debug(
@@ -133,7 +133,7 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
133
133
  logger.debug("Removed %d existing model items from list", old_count)
134
134
 
135
135
  # Add new items (labels already have correct text including current indicator)
136
- new_items = self._build_model_items(config)
136
+ new_items = await self._build_model_items(config)
137
137
  for item in new_items:
138
138
  list_view.append(item)
139
139
  logger.debug("Added %d available model items to list", len(new_items))
@@ -153,7 +153,7 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
153
153
  def on_show(self) -> None:
154
154
  """Rebuild model list when screen is first shown."""
155
155
  logger.debug("ModelPickerScreen.on_show() called")
156
- self._rebuild_model_list()
156
+ self.run_worker(self._rebuild_model_list(), exclusive=False)
157
157
 
158
158
  def on_screenresume(self) -> None:
159
159
  """Rebuild model list when screen is resumed (subsequent visits).
@@ -162,7 +162,7 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
162
162
  ensuring the model list reflects any config changes made while away.
163
163
  """
164
164
  logger.debug("ModelPickerScreen.on_screenresume() called")
165
- self._rebuild_model_list()
165
+ self.run_worker(self._rebuild_model_list(), exclusive=False)
166
166
 
167
167
  def action_done(self) -> None:
168
168
  self.dismiss()
@@ -193,14 +193,14 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
193
193
  app = cast("ShotgunApp", self.app)
194
194
  return app.config_manager
195
195
 
196
- def refresh_model_labels(self) -> None:
196
+ async def refresh_model_labels(self) -> None:
197
197
  """Update the list view entries to reflect current selection.
198
198
 
199
199
  Note: This method only updates labels for currently displayed models.
200
200
  To rebuild the entire list after provider changes, on_show() should be used.
201
201
  """
202
202
  # Load config once with force_reload
203
- config = self.config_manager.load(force_reload=True)
203
+ config = await self.config_manager.load(force_reload=True)
204
204
  current_model = config.selected_model or get_default_model_for_provider(config)
205
205
 
206
206
  # Update labels for available models only
@@ -215,9 +215,11 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
215
215
  self._model_label(model_name, is_current=model_name == current_model)
216
216
  )
217
217
 
218
- def _build_model_items(self, config: ShotgunConfig | None = None) -> list[ListItem]:
218
+ async def _build_model_items(
219
+ self, config: ShotgunConfig | None = None
220
+ ) -> list[ListItem]:
219
221
  if config is None:
220
- config = self.config_manager.load(force_reload=True)
222
+ config = await self.config_manager.load(force_reload=True)
221
223
 
222
224
  items: list[ListItem] = []
223
225
  current_model = self.selected_model
@@ -246,9 +248,7 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
246
248
  return model_name
247
249
  return None
248
250
 
249
- def _is_model_available(
250
- self, model_name: ModelName, config: ShotgunConfig | None = None
251
- ) -> bool:
251
+ def _is_model_available(self, model_name: ModelName, config: ShotgunConfig) -> bool:
252
252
  """Check if a model is available based on provider key configuration.
253
253
 
254
254
  A model is available if:
@@ -257,14 +257,11 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
257
257
 
258
258
  Args:
259
259
  model_name: The model to check availability for
260
- config: Optional pre-loaded config to avoid multiple reloads
260
+ config: Pre-loaded config (must be provided)
261
261
 
262
262
  Returns:
263
263
  True if the model can be used, False otherwise
264
264
  """
265
- if config is None:
266
- config = self.config_manager.load(force_reload=True)
267
-
268
265
  # If Shotgun Account is configured, all models are available
269
266
  if self.config_manager._provider_has_api_key(config.shotgun):
270
267
  logger.debug("Model %s available (Shotgun Account configured)", model_name)
@@ -325,17 +322,21 @@ class ModelPickerScreen(Screen[ModelConfigUpdated | None]):
325
322
 
326
323
  def _select_model(self) -> None:
327
324
  """Save the selected model."""
325
+ self.run_worker(self._do_select_model(), exclusive=True)
326
+
327
+ async def _do_select_model(self) -> None:
328
+ """Async implementation of model selection."""
328
329
  try:
329
330
  # Get old model before updating
330
- config = self.config_manager.load()
331
+ config = await self.config_manager.load()
331
332
  old_model = config.selected_model
332
333
 
333
334
  # Update the selected model in config
334
- self.config_manager.update_selected_model(self.selected_model)
335
- self.refresh_model_labels()
335
+ await self.config_manager.update_selected_model(self.selected_model)
336
+ await self.refresh_model_labels()
336
337
 
337
338
  # Get the full model config with provider information
338
- model_config = get_provider_model(self.selected_model)
339
+ model_config = await get_provider_model(self.selected_model)
339
340
 
340
341
  # Dismiss the screen and return the model config update to the caller
341
342
  self.dismiss(