shotgun-sh 0.2.3.dev2__py3-none-any.whl → 0.2.11.dev5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shotgun-sh might be problematic. Click here for more details.
- shotgun/agents/agent_manager.py +664 -75
- shotgun/agents/common.py +76 -70
- shotgun/agents/config/constants.py +0 -6
- shotgun/agents/config/manager.py +78 -36
- shotgun/agents/config/models.py +41 -1
- shotgun/agents/config/provider.py +70 -15
- shotgun/agents/context_analyzer/__init__.py +28 -0
- shotgun/agents/context_analyzer/analyzer.py +471 -0
- shotgun/agents/context_analyzer/constants.py +9 -0
- shotgun/agents/context_analyzer/formatter.py +115 -0
- shotgun/agents/context_analyzer/models.py +212 -0
- shotgun/agents/conversation_history.py +125 -2
- shotgun/agents/conversation_manager.py +57 -19
- shotgun/agents/export.py +6 -7
- shotgun/agents/history/compaction.py +9 -4
- shotgun/agents/history/context_extraction.py +93 -6
- shotgun/agents/history/history_processors.py +14 -2
- shotgun/agents/history/token_counting/anthropic.py +49 -11
- shotgun/agents/history/token_counting/base.py +14 -3
- shotgun/agents/history/token_counting/openai.py +8 -0
- shotgun/agents/history/token_counting/sentencepiece_counter.py +8 -0
- shotgun/agents/history/token_counting/tokenizer_cache.py +3 -1
- shotgun/agents/history/token_counting/utils.py +0 -3
- shotgun/agents/models.py +50 -2
- shotgun/agents/plan.py +6 -7
- shotgun/agents/research.py +7 -8
- shotgun/agents/specify.py +6 -7
- shotgun/agents/tasks.py +6 -7
- shotgun/agents/tools/__init__.py +0 -2
- shotgun/agents/tools/codebase/codebase_shell.py +6 -0
- shotgun/agents/tools/codebase/directory_lister.py +6 -0
- shotgun/agents/tools/codebase/file_read.py +11 -2
- shotgun/agents/tools/codebase/query_graph.py +6 -0
- shotgun/agents/tools/codebase/retrieve_code.py +6 -0
- shotgun/agents/tools/file_management.py +82 -16
- shotgun/agents/tools/registry.py +217 -0
- shotgun/agents/tools/web_search/__init__.py +30 -18
- shotgun/agents/tools/web_search/anthropic.py +26 -5
- shotgun/agents/tools/web_search/gemini.py +23 -11
- shotgun/agents/tools/web_search/openai.py +22 -13
- shotgun/agents/tools/web_search/utils.py +2 -2
- shotgun/agents/usage_manager.py +16 -11
- shotgun/api_endpoints.py +7 -3
- shotgun/build_constants.py +1 -1
- shotgun/cli/clear.py +53 -0
- shotgun/cli/compact.py +186 -0
- shotgun/cli/config.py +8 -5
- shotgun/cli/context.py +111 -0
- shotgun/cli/export.py +1 -1
- shotgun/cli/feedback.py +4 -2
- shotgun/cli/models.py +1 -0
- shotgun/cli/plan.py +1 -1
- shotgun/cli/research.py +1 -1
- shotgun/cli/specify.py +1 -1
- shotgun/cli/tasks.py +1 -1
- shotgun/cli/update.py +16 -2
- shotgun/codebase/core/change_detector.py +5 -3
- shotgun/codebase/core/code_retrieval.py +4 -2
- shotgun/codebase/core/ingestor.py +10 -8
- shotgun/codebase/core/manager.py +13 -4
- shotgun/codebase/core/nl_query.py +1 -1
- shotgun/llm_proxy/__init__.py +5 -2
- shotgun/llm_proxy/clients.py +12 -7
- shotgun/logging_config.py +18 -27
- shotgun/main.py +73 -11
- shotgun/posthog_telemetry.py +23 -7
- shotgun/prompts/agents/export.j2 +18 -1
- shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
- shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
- shotgun/prompts/agents/plan.j2 +1 -1
- shotgun/prompts/agents/research.j2 +1 -1
- shotgun/prompts/agents/specify.j2 +270 -3
- shotgun/prompts/agents/state/system_state.j2 +4 -0
- shotgun/prompts/agents/tasks.j2 +1 -1
- shotgun/prompts/loader.py +2 -2
- shotgun/prompts/tools/web_search.j2 +14 -0
- shotgun/sentry_telemetry.py +7 -16
- shotgun/settings.py +238 -0
- shotgun/telemetry.py +18 -33
- shotgun/tui/app.py +243 -43
- shotgun/tui/commands/__init__.py +1 -1
- shotgun/tui/components/context_indicator.py +179 -0
- shotgun/tui/components/mode_indicator.py +70 -0
- shotgun/tui/components/status_bar.py +48 -0
- shotgun/tui/containers.py +91 -0
- shotgun/tui/dependencies.py +39 -0
- shotgun/tui/protocols.py +45 -0
- shotgun/tui/screens/chat/__init__.py +5 -0
- shotgun/tui/screens/chat/chat.tcss +54 -0
- shotgun/tui/screens/chat/chat_screen.py +1202 -0
- shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
- shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
- shotgun/tui/screens/chat/help_text.py +40 -0
- shotgun/tui/screens/chat/prompt_history.py +48 -0
- shotgun/tui/screens/chat.tcss +11 -0
- shotgun/tui/screens/chat_screen/command_providers.py +78 -2
- shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
- shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
- shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
- shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
- shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
- shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
- shotgun/tui/screens/confirmation_dialog.py +151 -0
- shotgun/tui/screens/feedback.py +4 -4
- shotgun/tui/screens/github_issue.py +102 -0
- shotgun/tui/screens/model_picker.py +49 -24
- shotgun/tui/screens/onboarding.py +431 -0
- shotgun/tui/screens/pipx_migration.py +153 -0
- shotgun/tui/screens/provider_config.py +50 -27
- shotgun/tui/screens/shotgun_auth.py +2 -2
- shotgun/tui/screens/welcome.py +32 -10
- shotgun/tui/services/__init__.py +5 -0
- shotgun/tui/services/conversation_service.py +184 -0
- shotgun/tui/state/__init__.py +7 -0
- shotgun/tui/state/processing_state.py +185 -0
- shotgun/tui/utils/mode_progress.py +14 -7
- shotgun/tui/widgets/__init__.py +5 -0
- shotgun/tui/widgets/widget_coordinator.py +262 -0
- shotgun/utils/datetime_utils.py +77 -0
- shotgun/utils/file_system_utils.py +22 -2
- shotgun/utils/marketing.py +110 -0
- shotgun/utils/update_checker.py +69 -14
- shotgun_sh-0.2.11.dev5.dist-info/METADATA +130 -0
- shotgun_sh-0.2.11.dev5.dist-info/RECORD +193 -0
- {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/entry_points.txt +1 -0
- {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/licenses/LICENSE +1 -1
- shotgun/agents/tools/user_interaction.py +0 -37
- shotgun/tui/screens/chat.py +0 -804
- shotgun/tui/screens/chat_screen/history.py +0 -352
- shotgun_sh-0.2.3.dev2.dist-info/METADATA +0 -467
- shotgun_sh-0.2.3.dev2.dist-info/RECORD +0 -154
- {shotgun_sh-0.2.3.dev2.dist-info → shotgun_sh-0.2.11.dev5.dist-info}/WHEEL +0 -0
shotgun/tui/commands/__init__.py
CHANGED
|
@@ -57,7 +57,7 @@ class CommandHandler:
|
|
|
57
57
|
**Keyboard Shortcuts:**
|
|
58
58
|
|
|
59
59
|
* `Enter` - Send message
|
|
60
|
-
* `Ctrl+P` - Open command palette
|
|
60
|
+
* `Ctrl+P` - Open command palette (for usage, context, and other commands)
|
|
61
61
|
* `Shift+Tab` - Cycle agent modes
|
|
62
62
|
* `Ctrl+C` - Quit application
|
|
63
63
|
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""Context window indicator component for showing model usage."""
|
|
2
|
+
|
|
3
|
+
from textual.reactive import reactive
|
|
4
|
+
from textual.timer import Timer
|
|
5
|
+
from textual.widgets import Static
|
|
6
|
+
|
|
7
|
+
from shotgun.agents.config.models import MODEL_SPECS, ModelName
|
|
8
|
+
from shotgun.agents.context_analyzer.models import ContextAnalysis
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ContextIndicator(Static):
|
|
12
|
+
"""Display context window usage and current model name."""
|
|
13
|
+
|
|
14
|
+
DEFAULT_CSS = """
|
|
15
|
+
ContextIndicator {
|
|
16
|
+
width: auto;
|
|
17
|
+
height: 1;
|
|
18
|
+
text-align: right;
|
|
19
|
+
}
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
context_analysis: reactive[ContextAnalysis | None] = reactive(None)
|
|
23
|
+
model_name: reactive[ModelName | None] = reactive(None)
|
|
24
|
+
is_streaming: reactive[bool] = reactive(False)
|
|
25
|
+
|
|
26
|
+
_animation_frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
|
27
|
+
_animation_index = 0
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
*,
|
|
32
|
+
name: str | None = None,
|
|
33
|
+
id: str | None = None,
|
|
34
|
+
classes: str | None = None,
|
|
35
|
+
) -> None:
|
|
36
|
+
super().__init__(name=name, id=id, classes=classes)
|
|
37
|
+
self._animation_timer: Timer | None = None
|
|
38
|
+
|
|
39
|
+
def update_context(
|
|
40
|
+
self, analysis: ContextAnalysis | None, model: ModelName | None
|
|
41
|
+
) -> None:
|
|
42
|
+
"""Update the context indicator with new analysis and model data.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
analysis: Context analysis with token usage data
|
|
46
|
+
model: Current model name
|
|
47
|
+
"""
|
|
48
|
+
self.context_analysis = analysis
|
|
49
|
+
self.model_name = model
|
|
50
|
+
self._refresh_display()
|
|
51
|
+
|
|
52
|
+
def set_streaming(self, streaming: bool) -> None:
|
|
53
|
+
"""Enable or disable streaming animation.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
streaming: Whether to show streaming animation
|
|
57
|
+
"""
|
|
58
|
+
self.is_streaming = streaming
|
|
59
|
+
if streaming:
|
|
60
|
+
self._start_animation()
|
|
61
|
+
else:
|
|
62
|
+
self._stop_animation()
|
|
63
|
+
|
|
64
|
+
def _start_animation(self) -> None:
|
|
65
|
+
"""Start the pulsing animation."""
|
|
66
|
+
if self._animation_timer is None:
|
|
67
|
+
self._animation_timer = self.set_interval(0.1, self._animate_frame)
|
|
68
|
+
|
|
69
|
+
def _stop_animation(self) -> None:
|
|
70
|
+
"""Stop the pulsing animation."""
|
|
71
|
+
if self._animation_timer is not None:
|
|
72
|
+
self._animation_timer.stop()
|
|
73
|
+
self._animation_timer = None
|
|
74
|
+
self._animation_index = 0
|
|
75
|
+
self._refresh_display()
|
|
76
|
+
|
|
77
|
+
def _animate_frame(self) -> None:
|
|
78
|
+
"""Advance the animation frame."""
|
|
79
|
+
self._animation_index = (self._animation_index + 1) % len(
|
|
80
|
+
self._animation_frames
|
|
81
|
+
)
|
|
82
|
+
self._refresh_display()
|
|
83
|
+
|
|
84
|
+
def _get_percentage_color(self, percentage: float) -> str:
|
|
85
|
+
"""Get color for percentage based on threshold.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
percentage: Usage percentage (0-100)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Color name for Textual markup
|
|
92
|
+
"""
|
|
93
|
+
if percentage < 60:
|
|
94
|
+
return "#00ff00" # Green
|
|
95
|
+
elif percentage < 85:
|
|
96
|
+
return "#ffff00" # Yellow
|
|
97
|
+
else:
|
|
98
|
+
return "#ff0000" # Red
|
|
99
|
+
|
|
100
|
+
def _format_token_count(self, tokens: int) -> str:
|
|
101
|
+
"""Format token count for display (e.g., 115000 -> "115K").
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
tokens: Token count
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
Formatted string
|
|
108
|
+
"""
|
|
109
|
+
if tokens >= 1_000_000:
|
|
110
|
+
return f"{tokens / 1_000_000:.1f}M"
|
|
111
|
+
elif tokens >= 1_000:
|
|
112
|
+
return f"{tokens / 1_000:.0f}K"
|
|
113
|
+
else:
|
|
114
|
+
return str(tokens)
|
|
115
|
+
|
|
116
|
+
def _refresh_display(self) -> None:
|
|
117
|
+
"""Refresh the display with current context data."""
|
|
118
|
+
# If no analysis yet, show placeholder with model name or empty
|
|
119
|
+
if self.context_analysis is None:
|
|
120
|
+
if self.model_name:
|
|
121
|
+
model_spec = MODEL_SPECS.get(self.model_name)
|
|
122
|
+
model_display = (
|
|
123
|
+
model_spec.short_name if model_spec else str(self.model_name)
|
|
124
|
+
)
|
|
125
|
+
self.update(f"[bold]{model_display}[/bold]")
|
|
126
|
+
else:
|
|
127
|
+
self.update("")
|
|
128
|
+
return
|
|
129
|
+
|
|
130
|
+
analysis = self.context_analysis
|
|
131
|
+
|
|
132
|
+
# Calculate percentage
|
|
133
|
+
if analysis.max_usable_tokens > 0:
|
|
134
|
+
percentage = round(
|
|
135
|
+
(analysis.agent_context_tokens / analysis.max_usable_tokens) * 100, 1
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
percentage = 0.0
|
|
139
|
+
|
|
140
|
+
# Format token counts
|
|
141
|
+
current_tokens = self._format_token_count(analysis.agent_context_tokens)
|
|
142
|
+
max_tokens = self._format_token_count(analysis.max_usable_tokens)
|
|
143
|
+
|
|
144
|
+
# Get color based on percentage
|
|
145
|
+
color = self._get_percentage_color(percentage)
|
|
146
|
+
|
|
147
|
+
# Build the display string - always show full context info
|
|
148
|
+
parts = [
|
|
149
|
+
"[$foreground-muted]Context window:[/]",
|
|
150
|
+
f"[{color}]{percentage}% ({current_tokens}/{max_tokens})[/]",
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
# Add streaming animation indicator if streaming
|
|
154
|
+
if self.is_streaming:
|
|
155
|
+
animation_char = self._animation_frames[self._animation_index]
|
|
156
|
+
parts.append(f"[bold cyan]{animation_char}[/]")
|
|
157
|
+
|
|
158
|
+
# Add model name if available
|
|
159
|
+
if self.model_name:
|
|
160
|
+
model_spec = MODEL_SPECS.get(self.model_name)
|
|
161
|
+
model_display = (
|
|
162
|
+
model_spec.short_name if model_spec else str(self.model_name)
|
|
163
|
+
)
|
|
164
|
+
parts.extend(
|
|
165
|
+
[
|
|
166
|
+
"[$foreground-muted]|[/]",
|
|
167
|
+
f"[bold]{model_display}[/bold]",
|
|
168
|
+
]
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
self.update(" ".join(parts))
|
|
172
|
+
|
|
173
|
+
def watch_context_analysis(self, analysis: ContextAnalysis | None) -> None:
|
|
174
|
+
"""React to context analysis changes."""
|
|
175
|
+
self._refresh_display()
|
|
176
|
+
|
|
177
|
+
def watch_model_name(self, model: ModelName | None) -> None:
|
|
178
|
+
"""React to model name changes."""
|
|
179
|
+
self._refresh_display()
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Widget to display the current agent mode."""
|
|
2
|
+
|
|
3
|
+
from textual.widget import Widget
|
|
4
|
+
|
|
5
|
+
from shotgun.agents.models import AgentType
|
|
6
|
+
from shotgun.tui.protocols import QAStateProvider
|
|
7
|
+
from shotgun.tui.utils.mode_progress import PlaceholderHints
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ModeIndicator(Widget):
|
|
11
|
+
"""Widget to display the current agent mode."""
|
|
12
|
+
|
|
13
|
+
DEFAULT_CSS = """
|
|
14
|
+
ModeIndicator {
|
|
15
|
+
text-wrap: wrap;
|
|
16
|
+
padding-left: 1;
|
|
17
|
+
}
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, mode: AgentType) -> None:
|
|
21
|
+
"""Initialize the mode indicator.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
mode: The current agent type/mode.
|
|
25
|
+
"""
|
|
26
|
+
super().__init__()
|
|
27
|
+
self.mode = mode
|
|
28
|
+
self.progress_checker = PlaceholderHints().progress_checker
|
|
29
|
+
|
|
30
|
+
def render(self) -> str:
|
|
31
|
+
"""Render the mode indicator."""
|
|
32
|
+
# Check if in Q&A mode first
|
|
33
|
+
if isinstance(self.screen, QAStateProvider) and self.screen.qa_mode:
|
|
34
|
+
return (
|
|
35
|
+
"[bold $text-accent]Q&A mode[/]"
|
|
36
|
+
"[$foreground-muted] (Answer the clarifying questions or ESC to cancel)[/]"
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
mode_display = {
|
|
40
|
+
AgentType.RESEARCH: "Research",
|
|
41
|
+
AgentType.PLAN: "Planning",
|
|
42
|
+
AgentType.TASKS: "Tasks",
|
|
43
|
+
AgentType.SPECIFY: "Specify",
|
|
44
|
+
AgentType.EXPORT: "Export",
|
|
45
|
+
}
|
|
46
|
+
mode_description = {
|
|
47
|
+
AgentType.RESEARCH: (
|
|
48
|
+
"Research topics with web search and synthesize findings"
|
|
49
|
+
),
|
|
50
|
+
AgentType.PLAN: "Create comprehensive, actionable plans with milestones",
|
|
51
|
+
AgentType.TASKS: (
|
|
52
|
+
"Generate specific, actionable tasks from research and plans"
|
|
53
|
+
),
|
|
54
|
+
AgentType.SPECIFY: (
|
|
55
|
+
"Create detailed specifications and requirements documents"
|
|
56
|
+
),
|
|
57
|
+
AgentType.EXPORT: "Export artifacts and findings to various formats",
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
mode_title = mode_display.get(self.mode, self.mode.value.title())
|
|
61
|
+
description = mode_description.get(self.mode, "")
|
|
62
|
+
|
|
63
|
+
# Check if mode has content
|
|
64
|
+
has_content = self.progress_checker.has_mode_content(self.mode)
|
|
65
|
+
status_icon = " ✓" if has_content else ""
|
|
66
|
+
|
|
67
|
+
return (
|
|
68
|
+
f"[bold $text-accent]{mode_title}{status_icon} mode[/]"
|
|
69
|
+
f"[$foreground-muted] ({description})[/]"
|
|
70
|
+
)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""Widget to display the status bar with contextual help text."""
|
|
2
|
+
|
|
3
|
+
from textual.widget import Widget
|
|
4
|
+
|
|
5
|
+
from shotgun.tui.protocols import QAStateProvider
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class StatusBar(Widget):
|
|
9
|
+
"""Widget to display the status bar with contextual help text."""
|
|
10
|
+
|
|
11
|
+
DEFAULT_CSS = """
|
|
12
|
+
StatusBar {
|
|
13
|
+
text-wrap: wrap;
|
|
14
|
+
padding-left: 1;
|
|
15
|
+
}
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, working: bool = False) -> None:
|
|
19
|
+
"""Initialize the status bar.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
working: Whether an agent is currently working.
|
|
23
|
+
"""
|
|
24
|
+
super().__init__()
|
|
25
|
+
self.working = working
|
|
26
|
+
|
|
27
|
+
def render(self) -> str:
|
|
28
|
+
"""Render the status bar with contextual help text."""
|
|
29
|
+
# Check if in Q&A mode first (highest priority)
|
|
30
|
+
if isinstance(self.screen, QAStateProvider) and self.screen.qa_mode:
|
|
31
|
+
return (
|
|
32
|
+
"[$foreground-muted][bold $text]esc[/] to exit Q&A mode • "
|
|
33
|
+
"[bold $text]enter[/] to send answer • [bold $text]ctrl+j[/] for newline[/]"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
if self.working:
|
|
37
|
+
return (
|
|
38
|
+
"[$foreground-muted][bold $text]esc[/] to stop • "
|
|
39
|
+
"[bold $text]enter[/] to send • [bold $text]ctrl+j[/] for newline • "
|
|
40
|
+
"[bold $text]ctrl+p[/] command palette • [bold $text]shift+tab[/] cycle modes • "
|
|
41
|
+
"/help for commands[/]"
|
|
42
|
+
)
|
|
43
|
+
else:
|
|
44
|
+
return (
|
|
45
|
+
"[$foreground-muted][bold $text]enter[/] to send • "
|
|
46
|
+
"[bold $text]ctrl+j[/] for newline • [bold $text]ctrl+p[/] command palette • "
|
|
47
|
+
"[bold $text]shift+tab[/] cycle modes • /help for commands[/]"
|
|
48
|
+
)
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""Dependency injection container for TUI components."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
from dependency_injector import containers, providers
|
|
6
|
+
from pydantic_ai import RunContext
|
|
7
|
+
|
|
8
|
+
from shotgun.agents.conversation_manager import ConversationManager
|
|
9
|
+
from shotgun.agents.models import AgentDeps
|
|
10
|
+
from shotgun.sdk.codebase import CodebaseSDK
|
|
11
|
+
from shotgun.tui.commands import CommandHandler
|
|
12
|
+
from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
|
|
13
|
+
from shotgun.tui.services.conversation_service import ConversationService
|
|
14
|
+
from shotgun.tui.state.processing_state import ProcessingStateManager
|
|
15
|
+
from shotgun.tui.utils.mode_progress import PlaceholderHints
|
|
16
|
+
from shotgun.tui.widgets.widget_coordinator import WidgetCoordinator
|
|
17
|
+
from shotgun.utils import get_shotgun_home
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Placeholder system prompt function (agents provide their own)
|
|
24
|
+
# Using Object provider to pass the function itself, not call it
|
|
25
|
+
def _placeholder_system_prompt(ctx: "RunContext[AgentDeps]") -> str:
|
|
26
|
+
raise RuntimeError(
|
|
27
|
+
"This should not be called - agents provide their own system_prompt_fn"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class TUIContainer(containers.DeclarativeContainer):
|
|
32
|
+
"""Dependency injection container for TUI components.
|
|
33
|
+
|
|
34
|
+
This container manages the lifecycle and dependencies of all TUI components,
|
|
35
|
+
ensuring consistent configuration and facilitating testing.
|
|
36
|
+
|
|
37
|
+
Note: model_config and agent_deps are created lazily via async factory methods
|
|
38
|
+
since get_provider_model() is now async.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
# Configuration
|
|
42
|
+
config = providers.Configuration()
|
|
43
|
+
|
|
44
|
+
# Core dependencies
|
|
45
|
+
# TODO: Figure out a better solution for async dependency injection
|
|
46
|
+
# model_config is now loaded lazily via create_default_tui_deps()
|
|
47
|
+
# because get_provider_model() is async. This breaks the DI pattern
|
|
48
|
+
# and should be refactored to support async factories properly.
|
|
49
|
+
|
|
50
|
+
storage_dir = providers.Singleton(lambda: get_shotgun_home() / "codebases")
|
|
51
|
+
|
|
52
|
+
codebase_service = providers.Singleton(
|
|
53
|
+
FilteredCodebaseService, storage_dir=storage_dir
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
system_prompt_fn = providers.Object(_placeholder_system_prompt)
|
|
57
|
+
|
|
58
|
+
# TODO: Figure out a better solution for async dependency injection
|
|
59
|
+
# AgentDeps is now created via async create_default_tui_deps()
|
|
60
|
+
# instead of using DI container's Singleton provider because it requires
|
|
61
|
+
# async model_config initialization
|
|
62
|
+
|
|
63
|
+
# Service singletons
|
|
64
|
+
codebase_sdk = providers.Singleton(CodebaseSDK)
|
|
65
|
+
|
|
66
|
+
command_handler = providers.Singleton(CommandHandler)
|
|
67
|
+
|
|
68
|
+
placeholder_hints = providers.Singleton(PlaceholderHints)
|
|
69
|
+
|
|
70
|
+
conversation_manager = providers.Singleton(ConversationManager)
|
|
71
|
+
|
|
72
|
+
conversation_service = providers.Factory(
|
|
73
|
+
ConversationService, conversation_manager=conversation_manager
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# TODO: Figure out a better solution for async dependency injection
|
|
77
|
+
# AgentManager factory removed - create via async initialization
|
|
78
|
+
# since it requires async agent creation
|
|
79
|
+
|
|
80
|
+
# Factory for ProcessingStateManager (needs ChatScreen reference)
|
|
81
|
+
processing_state_factory = providers.Factory(
|
|
82
|
+
ProcessingStateManager,
|
|
83
|
+
screen=providers.Object(None), # Will be overridden when creating ChatScreen
|
|
84
|
+
telemetry_context=providers.Object({}), # Will be overridden when creating
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Factory for WidgetCoordinator (needs ChatScreen reference)
|
|
88
|
+
widget_coordinator_factory = providers.Factory(
|
|
89
|
+
WidgetCoordinator,
|
|
90
|
+
screen=providers.Object(None), # Will be overridden when creating ChatScreen
|
|
91
|
+
)
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Dependency creation utilities for TUI components."""
|
|
2
|
+
|
|
3
|
+
from pydantic_ai import RunContext
|
|
4
|
+
|
|
5
|
+
from shotgun.agents.config import get_provider_model
|
|
6
|
+
from shotgun.agents.models import AgentDeps
|
|
7
|
+
from shotgun.tui.filtered_codebase_service import FilteredCodebaseService
|
|
8
|
+
from shotgun.utils import get_shotgun_home
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
async def create_default_tui_deps() -> AgentDeps:
|
|
12
|
+
"""Create default AgentDeps for TUI components.
|
|
13
|
+
|
|
14
|
+
This creates a standard AgentDeps configuration suitable for interactive
|
|
15
|
+
TUI usage with:
|
|
16
|
+
- Interactive mode enabled
|
|
17
|
+
- TUI context flag set
|
|
18
|
+
- Filtered codebase service (restricted to CWD)
|
|
19
|
+
- Placeholder system prompt (agents provide their own)
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Configured AgentDeps instance ready for TUI use.
|
|
23
|
+
"""
|
|
24
|
+
model_config = await get_provider_model()
|
|
25
|
+
storage_dir = get_shotgun_home() / "codebases"
|
|
26
|
+
codebase_service = FilteredCodebaseService(storage_dir)
|
|
27
|
+
|
|
28
|
+
def _placeholder_system_prompt_fn(ctx: RunContext[AgentDeps]) -> str:
|
|
29
|
+
raise RuntimeError(
|
|
30
|
+
"This should not be called - agents provide their own system_prompt_fn"
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
return AgentDeps(
|
|
34
|
+
interactive_mode=True,
|
|
35
|
+
is_tui_context=True,
|
|
36
|
+
llm_model=model_config,
|
|
37
|
+
codebase_service=codebase_service,
|
|
38
|
+
system_prompt_fn=_placeholder_system_prompt_fn,
|
|
39
|
+
)
|
shotgun/tui/protocols.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Protocol definitions for TUI components.
|
|
2
|
+
|
|
3
|
+
These protocols define interfaces that components can depend on without
|
|
4
|
+
creating circular imports. Screens like ChatScreen can satisfy these
|
|
5
|
+
protocols without explicitly implementing them.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Protocol, runtime_checkable
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@runtime_checkable
|
|
12
|
+
class QAStateProvider(Protocol):
|
|
13
|
+
"""Protocol for screens that provide Q&A mode state.
|
|
14
|
+
|
|
15
|
+
This protocol allows components to check if they're on a screen with
|
|
16
|
+
Q&A mode without importing the concrete ChatScreen class, eliminating
|
|
17
|
+
circular dependencies.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def qa_mode(self) -> bool:
|
|
22
|
+
"""Whether Q&A mode is currently active.
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
True if Q&A mode is active, False otherwise.
|
|
26
|
+
"""
|
|
27
|
+
...
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@runtime_checkable
|
|
31
|
+
class ProcessingStateProvider(Protocol):
|
|
32
|
+
"""Protocol for screens that provide processing state.
|
|
33
|
+
|
|
34
|
+
This protocol allows components to check if they're on a screen with
|
|
35
|
+
an active agent processing without importing the concrete ChatScreen class.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def working(self) -> bool:
|
|
40
|
+
"""Whether an agent is currently working.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
True if an agent is processing, False otherwise.
|
|
44
|
+
"""
|
|
45
|
+
...
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
ChatHistory {
|
|
2
|
+
height: auto;
|
|
3
|
+
}
|
|
4
|
+
|
|
5
|
+
PromptInput {
|
|
6
|
+
min-height: 3;
|
|
7
|
+
max-height: 7;
|
|
8
|
+
height: auto;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
StatusBar {
|
|
12
|
+
height: auto;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
ModeIndicator {
|
|
16
|
+
height: auto;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
#footer {
|
|
20
|
+
dock: bottom;
|
|
21
|
+
height: auto;
|
|
22
|
+
padding: 1 1 1 2;
|
|
23
|
+
max-height: 14;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
#window {
|
|
27
|
+
align: left bottom;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
.hidden {
|
|
31
|
+
display: none;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
#footer > Grid {
|
|
35
|
+
height: auto;
|
|
36
|
+
grid-columns: 1fr auto;
|
|
37
|
+
grid-size: 2;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
#right-footer-indicators {
|
|
42
|
+
width: auto;
|
|
43
|
+
height: auto;
|
|
44
|
+
layout: vertical;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
#context-indicator {
|
|
48
|
+
text-align: end;
|
|
49
|
+
height: 1;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
#indexing-job-display {
|
|
53
|
+
text-align: end;
|
|
54
|
+
}
|