shotgun-sh 0.1.9__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shotgun-sh might be problematic. Click here for more details.
- shotgun/agents/agent_manager.py +761 -52
- shotgun/agents/common.py +80 -75
- shotgun/agents/config/constants.py +21 -10
- shotgun/agents/config/manager.py +322 -97
- shotgun/agents/config/models.py +114 -84
- shotgun/agents/config/provider.py +232 -88
- shotgun/agents/context_analyzer/__init__.py +28 -0
- shotgun/agents/context_analyzer/analyzer.py +471 -0
- shotgun/agents/context_analyzer/constants.py +9 -0
- shotgun/agents/context_analyzer/formatter.py +115 -0
- shotgun/agents/context_analyzer/models.py +212 -0
- shotgun/agents/conversation_history.py +125 -2
- shotgun/agents/conversation_manager.py +57 -19
- shotgun/agents/export.py +6 -7
- shotgun/agents/history/compaction.py +23 -3
- shotgun/agents/history/context_extraction.py +93 -6
- shotgun/agents/history/history_processors.py +179 -11
- shotgun/agents/history/token_counting/__init__.py +31 -0
- shotgun/agents/history/token_counting/anthropic.py +127 -0
- shotgun/agents/history/token_counting/base.py +78 -0
- shotgun/agents/history/token_counting/openai.py +90 -0
- shotgun/agents/history/token_counting/sentencepiece_counter.py +127 -0
- shotgun/agents/history/token_counting/tokenizer_cache.py +92 -0
- shotgun/agents/history/token_counting/utils.py +144 -0
- shotgun/agents/history/token_estimation.py +12 -12
- shotgun/agents/llm.py +62 -0
- shotgun/agents/models.py +59 -4
- shotgun/agents/plan.py +6 -7
- shotgun/agents/research.py +7 -8
- shotgun/agents/specify.py +6 -7
- shotgun/agents/tasks.py +6 -7
- shotgun/agents/tools/__init__.py +0 -2
- shotgun/agents/tools/codebase/codebase_shell.py +6 -0
- shotgun/agents/tools/codebase/directory_lister.py +6 -0
- shotgun/agents/tools/codebase/file_read.py +11 -2
- shotgun/agents/tools/codebase/query_graph.py +6 -0
- shotgun/agents/tools/codebase/retrieve_code.py +6 -0
- shotgun/agents/tools/file_management.py +82 -16
- shotgun/agents/tools/registry.py +217 -0
- shotgun/agents/tools/web_search/__init__.py +55 -16
- shotgun/agents/tools/web_search/anthropic.py +76 -51
- shotgun/agents/tools/web_search/gemini.py +50 -27
- shotgun/agents/tools/web_search/openai.py +26 -17
- shotgun/agents/tools/web_search/utils.py +2 -2
- shotgun/agents/usage_manager.py +164 -0
- shotgun/api_endpoints.py +15 -0
- shotgun/cli/clear.py +53 -0
- shotgun/cli/codebase/commands.py +71 -2
- shotgun/cli/compact.py +186 -0
- shotgun/cli/config.py +41 -67
- shotgun/cli/context.py +111 -0
- shotgun/cli/export.py +1 -1
- shotgun/cli/feedback.py +50 -0
- shotgun/cli/models.py +3 -2
- shotgun/cli/plan.py +1 -1
- shotgun/cli/research.py +1 -1
- shotgun/cli/specify.py +1 -1
- shotgun/cli/tasks.py +1 -1
- shotgun/cli/update.py +18 -5
- shotgun/codebase/core/change_detector.py +5 -3
- shotgun/codebase/core/code_retrieval.py +4 -2
- shotgun/codebase/core/ingestor.py +169 -19
- shotgun/codebase/core/manager.py +177 -13
- shotgun/codebase/core/nl_query.py +1 -1
- shotgun/codebase/models.py +28 -3
- shotgun/codebase/service.py +14 -2
- shotgun/exceptions.py +32 -0
- shotgun/llm_proxy/__init__.py +19 -0
- shotgun/llm_proxy/clients.py +44 -0
- shotgun/llm_proxy/constants.py +15 -0
- shotgun/logging_config.py +18 -27
- shotgun/main.py +91 -4
- shotgun/posthog_telemetry.py +87 -40
- shotgun/prompts/agents/export.j2 +18 -1
- shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +5 -1
- shotgun/prompts/agents/partials/interactive_mode.j2 +24 -7
- shotgun/prompts/agents/plan.j2 +1 -1
- shotgun/prompts/agents/research.j2 +1 -1
- shotgun/prompts/agents/specify.j2 +270 -3
- shotgun/prompts/agents/state/system_state.j2 +4 -0
- shotgun/prompts/agents/tasks.j2 +1 -1
- shotgun/prompts/codebase/partials/cypher_rules.j2 +13 -0
- shotgun/prompts/loader.py +2 -2
- shotgun/prompts/tools/web_search.j2 +14 -0
- shotgun/sdk/codebase.py +60 -2
- shotgun/sentry_telemetry.py +28 -21
- shotgun/settings.py +238 -0
- shotgun/shotgun_web/__init__.py +19 -0
- shotgun/shotgun_web/client.py +138 -0
- shotgun/shotgun_web/constants.py +21 -0
- shotgun/shotgun_web/models.py +47 -0
- shotgun/telemetry.py +24 -36
- shotgun/tui/app.py +275 -23
- shotgun/tui/commands/__init__.py +1 -1
- shotgun/tui/components/context_indicator.py +179 -0
- shotgun/tui/components/mode_indicator.py +70 -0
- shotgun/tui/components/status_bar.py +48 -0
- shotgun/tui/components/vertical_tail.py +6 -0
- shotgun/tui/containers.py +91 -0
- shotgun/tui/dependencies.py +39 -0
- shotgun/tui/filtered_codebase_service.py +46 -0
- shotgun/tui/protocols.py +45 -0
- shotgun/tui/screens/chat/__init__.py +5 -0
- shotgun/tui/screens/chat/chat.tcss +54 -0
- shotgun/tui/screens/chat/chat_screen.py +1234 -0
- shotgun/tui/screens/chat/codebase_index_prompt_screen.py +64 -0
- shotgun/tui/screens/chat/codebase_index_selection.py +12 -0
- shotgun/tui/screens/chat/help_text.py +40 -0
- shotgun/tui/screens/chat/prompt_history.py +48 -0
- shotgun/tui/screens/chat.tcss +11 -0
- shotgun/tui/screens/chat_screen/command_providers.py +226 -11
- shotgun/tui/screens/chat_screen/history/__init__.py +22 -0
- shotgun/tui/screens/chat_screen/history/agent_response.py +66 -0
- shotgun/tui/screens/chat_screen/history/chat_history.py +116 -0
- shotgun/tui/screens/chat_screen/history/formatters.py +115 -0
- shotgun/tui/screens/chat_screen/history/partial_response.py +43 -0
- shotgun/tui/screens/chat_screen/history/user_question.py +42 -0
- shotgun/tui/screens/confirmation_dialog.py +151 -0
- shotgun/tui/screens/feedback.py +193 -0
- shotgun/tui/screens/github_issue.py +102 -0
- shotgun/tui/screens/model_picker.py +352 -0
- shotgun/tui/screens/onboarding.py +431 -0
- shotgun/tui/screens/pipx_migration.py +153 -0
- shotgun/tui/screens/provider_config.py +156 -39
- shotgun/tui/screens/shotgun_auth.py +295 -0
- shotgun/tui/screens/welcome.py +198 -0
- shotgun/tui/services/__init__.py +5 -0
- shotgun/tui/services/conversation_service.py +184 -0
- shotgun/tui/state/__init__.py +7 -0
- shotgun/tui/state/processing_state.py +185 -0
- shotgun/tui/utils/mode_progress.py +14 -7
- shotgun/tui/widgets/__init__.py +5 -0
- shotgun/tui/widgets/widget_coordinator.py +262 -0
- shotgun/utils/datetime_utils.py +77 -0
- shotgun/utils/env_utils.py +13 -0
- shotgun/utils/file_system_utils.py +22 -2
- shotgun/utils/marketing.py +110 -0
- shotgun/utils/source_detection.py +16 -0
- shotgun/utils/update_checker.py +73 -21
- shotgun_sh-0.2.11.dist-info/METADATA +130 -0
- shotgun_sh-0.2.11.dist-info/RECORD +194 -0
- {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/entry_points.txt +1 -0
- {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/licenses/LICENSE +1 -1
- shotgun/agents/history/token_counting.py +0 -429
- shotgun/agents/tools/user_interaction.py +0 -37
- shotgun/tui/screens/chat.py +0 -818
- shotgun/tui/screens/chat_screen/history.py +0 -222
- shotgun_sh-0.1.9.dist-info/METADATA +0 -466
- shotgun_sh-0.1.9.dist-info/RECORD +0 -131
- {shotgun_sh-0.1.9.dist-info → shotgun_sh-0.2.11.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"""Modal dialog for codebase indexing prompts."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from textual import on
|
|
6
|
+
from textual.app import ComposeResult
|
|
7
|
+
from textual.containers import Container
|
|
8
|
+
from textual.screen import ModalScreen
|
|
9
|
+
from textual.widgets import Button, Label, Static
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class CodebaseIndexPromptScreen(ModalScreen[bool]):
|
|
13
|
+
"""Modal dialog asking whether to index the detected codebase."""
|
|
14
|
+
|
|
15
|
+
DEFAULT_CSS = """
|
|
16
|
+
CodebaseIndexPromptScreen {
|
|
17
|
+
align: center middle;
|
|
18
|
+
background: rgba(0, 0, 0, 0.0);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
CodebaseIndexPromptScreen > #index-prompt-dialog {
|
|
22
|
+
width: 60%;
|
|
23
|
+
max-width: 60;
|
|
24
|
+
height: auto;
|
|
25
|
+
border: wide $primary;
|
|
26
|
+
padding: 1 2;
|
|
27
|
+
layout: vertical;
|
|
28
|
+
background: $surface;
|
|
29
|
+
height: auto;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
#index-prompt-buttons {
|
|
33
|
+
layout: horizontal;
|
|
34
|
+
align-horizontal: right;
|
|
35
|
+
height: auto;
|
|
36
|
+
}
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def compose(self) -> ComposeResult:
|
|
40
|
+
with Container(id="index-prompt-dialog"):
|
|
41
|
+
yield Label("Index this codebase?", id="index-prompt-title")
|
|
42
|
+
yield Static(
|
|
43
|
+
f"Would you like to index the codebase at:\n{Path.cwd()}\n\n"
|
|
44
|
+
"This is required for the agent to understand your code and answer "
|
|
45
|
+
"questions about it. Without indexing, the agent cannot analyze "
|
|
46
|
+
"your codebase."
|
|
47
|
+
)
|
|
48
|
+
with Container(id="index-prompt-buttons"):
|
|
49
|
+
yield Button(
|
|
50
|
+
"Index now",
|
|
51
|
+
id="index-prompt-confirm",
|
|
52
|
+
variant="primary",
|
|
53
|
+
)
|
|
54
|
+
yield Button("Not now", id="index-prompt-cancel")
|
|
55
|
+
|
|
56
|
+
@on(Button.Pressed, "#index-prompt-cancel")
|
|
57
|
+
def handle_cancel(self, event: Button.Pressed) -> None:
|
|
58
|
+
event.stop()
|
|
59
|
+
self.dismiss(False)
|
|
60
|
+
|
|
61
|
+
@on(Button.Pressed, "#index-prompt-confirm")
|
|
62
|
+
def handle_confirm(self, event: Button.Pressed) -> None:
|
|
63
|
+
event.stop()
|
|
64
|
+
self.dismiss(True)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Helper functions for chat screen help text."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def help_text_with_codebase(already_indexed: bool = False) -> str:
|
|
5
|
+
"""Generate help text for when a codebase is available.
|
|
6
|
+
|
|
7
|
+
Args:
|
|
8
|
+
already_indexed: Whether the codebase is already indexed.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
Formatted help text string.
|
|
12
|
+
"""
|
|
13
|
+
return (
|
|
14
|
+
"Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
|
|
15
|
+
"Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
|
|
16
|
+
f"{'It' if already_indexed else 'Once your codebase is indexed, it'} can help you:\n"
|
|
17
|
+
"- Research your codebase and spec out new features\n"
|
|
18
|
+
"- Create implementation plans that fit your architecture\n"
|
|
19
|
+
"- Generate AGENTS.md files for AI coding agents\n"
|
|
20
|
+
"- Onboard to existing projects or plan refactors\n\n"
|
|
21
|
+
"Ready to build something? Let's go.\n"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def help_text_empty_dir() -> str:
|
|
26
|
+
"""Generate help text for empty directory.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Formatted help text string.
|
|
30
|
+
"""
|
|
31
|
+
return (
|
|
32
|
+
"Howdy! Welcome to Shotgun - Spec Driven Development for Developers and AI Agents.\n\n"
|
|
33
|
+
"Shotgun writes codebase-aware specs for your AI coding agents so they don't derail.\n\n"
|
|
34
|
+
"It can help you:\n"
|
|
35
|
+
"- Research your codebase and spec out new features\n"
|
|
36
|
+
"- Create implementation plans that fit your architecture\n"
|
|
37
|
+
"- Generate AGENTS.md files for AI coding agents\n"
|
|
38
|
+
"- Onboard to existing projects or plan refactors\n\n"
|
|
39
|
+
"Ready to build something? Let's go.\n"
|
|
40
|
+
)
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""Prompt history management for chat screen."""
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class PromptHistory(BaseModel):
|
|
7
|
+
"""Manages prompt history for navigation in chat input."""
|
|
8
|
+
|
|
9
|
+
prompts: list[str] = Field(default_factory=lambda: ["Hello there!"])
|
|
10
|
+
curr: int | None = None
|
|
11
|
+
|
|
12
|
+
def next(self) -> str:
|
|
13
|
+
"""Navigate to next prompt in history.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
The next prompt in history.
|
|
17
|
+
"""
|
|
18
|
+
if self.curr is None:
|
|
19
|
+
self.curr = -1
|
|
20
|
+
else:
|
|
21
|
+
self.curr = -1
|
|
22
|
+
return self.prompts[self.curr]
|
|
23
|
+
|
|
24
|
+
def prev(self) -> str:
|
|
25
|
+
"""Navigate to previous prompt in history.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
The previous prompt in history.
|
|
29
|
+
|
|
30
|
+
Raises:
|
|
31
|
+
Exception: If current entry is None.
|
|
32
|
+
"""
|
|
33
|
+
if self.curr is None:
|
|
34
|
+
raise Exception("current entry is none")
|
|
35
|
+
if self.curr == -1:
|
|
36
|
+
self.curr = None
|
|
37
|
+
return ""
|
|
38
|
+
self.curr += 1
|
|
39
|
+
return ""
|
|
40
|
+
|
|
41
|
+
def append(self, text: str) -> None:
|
|
42
|
+
"""Add a new prompt to history.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
text: The prompt text to add.
|
|
46
|
+
"""
|
|
47
|
+
self.prompts.append(text)
|
|
48
|
+
self.curr = None
|
shotgun/tui/screens/chat.tcss
CHANGED
|
@@ -5,6 +5,8 @@ from textual.command import DiscoveryHit, Hit, Provider
|
|
|
5
5
|
|
|
6
6
|
from shotgun.agents.models import AgentType
|
|
7
7
|
from shotgun.codebase.models import CodebaseGraph
|
|
8
|
+
from shotgun.tui.screens.model_picker import ModelPickerScreen
|
|
9
|
+
from shotgun.tui.screens.provider_config import ProviderConfigScreen
|
|
8
10
|
|
|
9
11
|
if TYPE_CHECKING:
|
|
10
12
|
from shotgun.tui.screens.chat import ChatScreen
|
|
@@ -96,6 +98,70 @@ class AgentModeProvider(Provider):
|
|
|
96
98
|
yield Hit(score, matcher.highlight(title), callback, help=help_text)
|
|
97
99
|
|
|
98
100
|
|
|
101
|
+
class UsageProvider(Provider):
|
|
102
|
+
"""Command provider for agent mode switching."""
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def chat_screen(self) -> "ChatScreen":
|
|
106
|
+
from shotgun.tui.screens.chat import ChatScreen
|
|
107
|
+
|
|
108
|
+
return cast(ChatScreen, self.screen)
|
|
109
|
+
|
|
110
|
+
async def discover(self) -> AsyncGenerator[DiscoveryHit, None]:
|
|
111
|
+
"""Provide default mode switching commands when palette opens."""
|
|
112
|
+
yield DiscoveryHit(
|
|
113
|
+
"Show usage",
|
|
114
|
+
self.chat_screen.action_show_usage,
|
|
115
|
+
help="Display usage information for the current session",
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
async def search(self, query: str) -> AsyncGenerator[Hit, None]:
|
|
119
|
+
"""Search for mode commands."""
|
|
120
|
+
matcher = self.matcher(query)
|
|
121
|
+
|
|
122
|
+
async for discovery_hit in self.discover():
|
|
123
|
+
score = matcher.match(discovery_hit.text or "")
|
|
124
|
+
if score > 0:
|
|
125
|
+
yield Hit(
|
|
126
|
+
score,
|
|
127
|
+
matcher.highlight(discovery_hit.text or ""),
|
|
128
|
+
discovery_hit.command,
|
|
129
|
+
help=discovery_hit.help,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class ContextProvider(Provider):
|
|
134
|
+
"""Command provider for showing conversation context analysis."""
|
|
135
|
+
|
|
136
|
+
@property
|
|
137
|
+
def chat_screen(self) -> "ChatScreen":
|
|
138
|
+
from shotgun.tui.screens.chat import ChatScreen
|
|
139
|
+
|
|
140
|
+
return cast(ChatScreen, self.screen)
|
|
141
|
+
|
|
142
|
+
async def discover(self) -> AsyncGenerator[DiscoveryHit, None]:
|
|
143
|
+
"""Provide context command when palette opens."""
|
|
144
|
+
yield DiscoveryHit(
|
|
145
|
+
"Show context",
|
|
146
|
+
self.chat_screen.action_show_context,
|
|
147
|
+
help="Display conversation context composition and statistics",
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
async def search(self, query: str) -> AsyncGenerator[Hit, None]:
|
|
151
|
+
"""Search for context command."""
|
|
152
|
+
matcher = self.matcher(query)
|
|
153
|
+
|
|
154
|
+
async for discovery_hit in self.discover():
|
|
155
|
+
score = matcher.match(discovery_hit.text or "")
|
|
156
|
+
if score > 0:
|
|
157
|
+
yield Hit(
|
|
158
|
+
score,
|
|
159
|
+
matcher.highlight(discovery_hit.text or ""),
|
|
160
|
+
discovery_hit.command,
|
|
161
|
+
help=discovery_hit.help,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
|
|
99
165
|
class ProviderSetupProvider(Provider):
|
|
100
166
|
"""Command palette entries for provider configuration."""
|
|
101
167
|
|
|
@@ -107,7 +173,13 @@ class ProviderSetupProvider(Provider):
|
|
|
107
173
|
|
|
108
174
|
def open_provider_config(self) -> None:
|
|
109
175
|
"""Show the provider configuration screen."""
|
|
110
|
-
self.chat_screen.app.push_screen(
|
|
176
|
+
self.chat_screen.app.push_screen(ProviderConfigScreen())
|
|
177
|
+
|
|
178
|
+
def open_model_picker(self) -> None:
|
|
179
|
+
"""Show the model picker screen."""
|
|
180
|
+
self.chat_screen.app.push_screen(
|
|
181
|
+
ModelPickerScreen(), callback=self.chat_screen.handle_model_selected
|
|
182
|
+
)
|
|
111
183
|
|
|
112
184
|
async def discover(self) -> AsyncGenerator[DiscoveryHit, None]:
|
|
113
185
|
yield DiscoveryHit(
|
|
@@ -115,9 +187,15 @@ class ProviderSetupProvider(Provider):
|
|
|
115
187
|
self.open_provider_config,
|
|
116
188
|
help="⚙️ Manage API keys for available providers",
|
|
117
189
|
)
|
|
190
|
+
yield DiscoveryHit(
|
|
191
|
+
"Select AI Model",
|
|
192
|
+
self.open_model_picker,
|
|
193
|
+
help="🤖 Choose which AI model to use",
|
|
194
|
+
)
|
|
118
195
|
|
|
119
196
|
async def search(self, query: str) -> AsyncGenerator[Hit, None]:
|
|
120
197
|
matcher = self.matcher(query)
|
|
198
|
+
|
|
121
199
|
title = "Open Provider Setup"
|
|
122
200
|
score = matcher.match(title)
|
|
123
201
|
if score > 0:
|
|
@@ -128,6 +206,16 @@ class ProviderSetupProvider(Provider):
|
|
|
128
206
|
help="⚙️ Manage API keys for available providers",
|
|
129
207
|
)
|
|
130
208
|
|
|
209
|
+
title = "Select AI Model"
|
|
210
|
+
score = matcher.match(title)
|
|
211
|
+
if score > 0:
|
|
212
|
+
yield Hit(
|
|
213
|
+
score,
|
|
214
|
+
matcher.highlight(title),
|
|
215
|
+
self.open_model_picker,
|
|
216
|
+
help="🤖 Choose which AI model to use",
|
|
217
|
+
)
|
|
218
|
+
|
|
131
219
|
|
|
132
220
|
class CodebaseCommandProvider(Provider):
|
|
133
221
|
"""Command palette entries for codebase management."""
|
|
@@ -139,30 +227,30 @@ class CodebaseCommandProvider(Provider):
|
|
|
139
227
|
return cast(ChatScreen, self.screen)
|
|
140
228
|
|
|
141
229
|
async def discover(self) -> AsyncGenerator[DiscoveryHit, None]:
|
|
142
|
-
yield DiscoveryHit(
|
|
143
|
-
"Codebase: Index Codebase",
|
|
144
|
-
self.chat_screen.index_codebase_command,
|
|
145
|
-
help="Index a repository into the codebase graph",
|
|
146
|
-
)
|
|
147
230
|
yield DiscoveryHit(
|
|
148
231
|
"Codebase: Delete Codebase Index",
|
|
149
232
|
self.chat_screen.delete_codebase_command,
|
|
150
233
|
help="Delete an existing codebase index",
|
|
151
234
|
)
|
|
235
|
+
yield DiscoveryHit(
|
|
236
|
+
"Codebase: Index Codebase",
|
|
237
|
+
self.chat_screen.index_codebase_command,
|
|
238
|
+
help="Index a repository into the codebase graph",
|
|
239
|
+
)
|
|
152
240
|
|
|
153
241
|
async def search(self, query: str) -> AsyncGenerator[Hit, None]:
|
|
154
242
|
matcher = self.matcher(query)
|
|
155
243
|
commands = [
|
|
156
|
-
(
|
|
157
|
-
"Codebase: Index Codebase",
|
|
158
|
-
self.chat_screen.index_codebase_command,
|
|
159
|
-
"Index a repository into the codebase graph",
|
|
160
|
-
),
|
|
161
244
|
(
|
|
162
245
|
"Codebase: Delete Codebase Index",
|
|
163
246
|
self.chat_screen.delete_codebase_command,
|
|
164
247
|
"Delete an existing codebase index",
|
|
165
248
|
),
|
|
249
|
+
(
|
|
250
|
+
"Codebase: Index Codebase",
|
|
251
|
+
self.chat_screen.index_codebase_command,
|
|
252
|
+
"Index a repository into the codebase graph",
|
|
253
|
+
),
|
|
166
254
|
]
|
|
167
255
|
for title, callback, help_text in commands:
|
|
168
256
|
score = matcher.match(title)
|
|
@@ -217,3 +305,130 @@ class DeleteCodebasePaletteProvider(Provider):
|
|
|
217
305
|
),
|
|
218
306
|
help=graph.repo_path,
|
|
219
307
|
)
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
class UnifiedCommandProvider(Provider):
|
|
311
|
+
"""Unified command provider with all commands in alphabetical order."""
|
|
312
|
+
|
|
313
|
+
@property
|
|
314
|
+
def chat_screen(self) -> "ChatScreen":
|
|
315
|
+
from shotgun.tui.screens.chat import ChatScreen
|
|
316
|
+
|
|
317
|
+
return cast(ChatScreen, self.screen)
|
|
318
|
+
|
|
319
|
+
def open_provider_config(self) -> None:
|
|
320
|
+
"""Show the provider configuration screen."""
|
|
321
|
+
self.chat_screen.app.push_screen(ProviderConfigScreen())
|
|
322
|
+
|
|
323
|
+
def open_model_picker(self) -> None:
|
|
324
|
+
"""Show the model picker screen."""
|
|
325
|
+
self.chat_screen.app.push_screen(
|
|
326
|
+
ModelPickerScreen(), callback=self.chat_screen.handle_model_selected
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
async def discover(self) -> AsyncGenerator[DiscoveryHit, None]:
|
|
330
|
+
"""Provide commands in alphabetical order when palette opens."""
|
|
331
|
+
# Alphabetically ordered commands
|
|
332
|
+
yield DiscoveryHit(
|
|
333
|
+
"Clear Conversation",
|
|
334
|
+
self.chat_screen.action_clear_conversation,
|
|
335
|
+
help="Clear the entire conversation history",
|
|
336
|
+
)
|
|
337
|
+
yield DiscoveryHit(
|
|
338
|
+
"Codebase: Delete Codebase Index",
|
|
339
|
+
self.chat_screen.delete_codebase_command,
|
|
340
|
+
help="Delete an existing codebase index",
|
|
341
|
+
)
|
|
342
|
+
yield DiscoveryHit(
|
|
343
|
+
"Codebase: Index Codebase",
|
|
344
|
+
self.chat_screen.index_codebase_command,
|
|
345
|
+
help="Index a repository into the codebase graph",
|
|
346
|
+
)
|
|
347
|
+
yield DiscoveryHit(
|
|
348
|
+
"Compact Conversation",
|
|
349
|
+
self.chat_screen.action_compact_conversation,
|
|
350
|
+
help="Reduce conversation size by compacting message history",
|
|
351
|
+
)
|
|
352
|
+
yield DiscoveryHit(
|
|
353
|
+
"Open Provider Setup",
|
|
354
|
+
self.open_provider_config,
|
|
355
|
+
help="⚙️ Manage API keys for available providers",
|
|
356
|
+
)
|
|
357
|
+
yield DiscoveryHit(
|
|
358
|
+
"Select AI Model",
|
|
359
|
+
self.open_model_picker,
|
|
360
|
+
help="🤖 Choose which AI model to use",
|
|
361
|
+
)
|
|
362
|
+
yield DiscoveryHit(
|
|
363
|
+
"Show context",
|
|
364
|
+
self.chat_screen.action_show_context,
|
|
365
|
+
help="Display conversation context composition and statistics",
|
|
366
|
+
)
|
|
367
|
+
yield DiscoveryHit(
|
|
368
|
+
"Show usage",
|
|
369
|
+
self.chat_screen.action_show_usage,
|
|
370
|
+
help="Display usage information for the current session",
|
|
371
|
+
)
|
|
372
|
+
yield DiscoveryHit(
|
|
373
|
+
"View Onboarding",
|
|
374
|
+
self.chat_screen.action_view_onboarding,
|
|
375
|
+
help="View the onboarding tutorial and helpful resources",
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
async def search(self, query: str) -> AsyncGenerator[Hit, None]:
|
|
379
|
+
"""Search for commands in alphabetical order."""
|
|
380
|
+
matcher = self.matcher(query)
|
|
381
|
+
|
|
382
|
+
# Define all commands in alphabetical order
|
|
383
|
+
commands = [
|
|
384
|
+
(
|
|
385
|
+
"Clear Conversation",
|
|
386
|
+
self.chat_screen.action_clear_conversation,
|
|
387
|
+
"Clear the entire conversation history",
|
|
388
|
+
),
|
|
389
|
+
(
|
|
390
|
+
"Codebase: Delete Codebase Index",
|
|
391
|
+
self.chat_screen.delete_codebase_command,
|
|
392
|
+
"Delete an existing codebase index",
|
|
393
|
+
),
|
|
394
|
+
(
|
|
395
|
+
"Codebase: Index Codebase",
|
|
396
|
+
self.chat_screen.index_codebase_command,
|
|
397
|
+
"Index a repository into the codebase graph",
|
|
398
|
+
),
|
|
399
|
+
(
|
|
400
|
+
"Compact Conversation",
|
|
401
|
+
self.chat_screen.action_compact_conversation,
|
|
402
|
+
"Reduce conversation size by compacting message history",
|
|
403
|
+
),
|
|
404
|
+
(
|
|
405
|
+
"Open Provider Setup",
|
|
406
|
+
self.open_provider_config,
|
|
407
|
+
"⚙️ Manage API keys for available providers",
|
|
408
|
+
),
|
|
409
|
+
(
|
|
410
|
+
"Select AI Model",
|
|
411
|
+
self.open_model_picker,
|
|
412
|
+
"🤖 Choose which AI model to use",
|
|
413
|
+
),
|
|
414
|
+
(
|
|
415
|
+
"Show context",
|
|
416
|
+
self.chat_screen.action_show_context,
|
|
417
|
+
"Display conversation context composition and statistics",
|
|
418
|
+
),
|
|
419
|
+
(
|
|
420
|
+
"Show usage",
|
|
421
|
+
self.chat_screen.action_show_usage,
|
|
422
|
+
"Display usage information for the current session",
|
|
423
|
+
),
|
|
424
|
+
(
|
|
425
|
+
"View Onboarding",
|
|
426
|
+
self.chat_screen.action_view_onboarding,
|
|
427
|
+
"View the onboarding tutorial and helpful resources",
|
|
428
|
+
),
|
|
429
|
+
]
|
|
430
|
+
|
|
431
|
+
for title, callback, help_text in commands:
|
|
432
|
+
score = matcher.match(title)
|
|
433
|
+
if score > 0:
|
|
434
|
+
yield Hit(score, matcher.highlight(title), callback, help=help_text)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""Chat history package - displays conversation messages in the TUI.
|
|
2
|
+
|
|
3
|
+
This package provides widgets for displaying chat history including:
|
|
4
|
+
- User questions
|
|
5
|
+
- Agent responses
|
|
6
|
+
- Tool calls
|
|
7
|
+
- Streaming/partial responses
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from .agent_response import AgentResponseWidget
|
|
11
|
+
from .chat_history import ChatHistory
|
|
12
|
+
from .formatters import ToolFormatter
|
|
13
|
+
from .partial_response import PartialResponseWidget
|
|
14
|
+
from .user_question import UserQuestionWidget
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"ChatHistory",
|
|
18
|
+
"PartialResponseWidget",
|
|
19
|
+
"AgentResponseWidget",
|
|
20
|
+
"UserQuestionWidget",
|
|
21
|
+
"ToolFormatter",
|
|
22
|
+
]
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Agent response widget for chat history."""
|
|
2
|
+
|
|
3
|
+
from pydantic_ai.messages import (
|
|
4
|
+
BuiltinToolCallPart,
|
|
5
|
+
BuiltinToolReturnPart,
|
|
6
|
+
ModelResponse,
|
|
7
|
+
TextPart,
|
|
8
|
+
ThinkingPart,
|
|
9
|
+
ToolCallPart,
|
|
10
|
+
)
|
|
11
|
+
from textual.app import ComposeResult
|
|
12
|
+
from textual.widget import Widget
|
|
13
|
+
from textual.widgets import Markdown
|
|
14
|
+
|
|
15
|
+
from .formatters import ToolFormatter
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgentResponseWidget(Widget):
|
|
19
|
+
"""Widget that displays agent responses in the chat history."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, item: ModelResponse | None) -> None:
|
|
22
|
+
super().__init__()
|
|
23
|
+
self.item = item
|
|
24
|
+
|
|
25
|
+
def compose(self) -> ComposeResult:
|
|
26
|
+
self.display = self.item is not None
|
|
27
|
+
if self.item is None:
|
|
28
|
+
yield Markdown(markdown="")
|
|
29
|
+
else:
|
|
30
|
+
yield Markdown(markdown=self.compute_output())
|
|
31
|
+
|
|
32
|
+
def compute_output(self) -> str:
|
|
33
|
+
"""Compute the markdown output for the agent response."""
|
|
34
|
+
acc = ""
|
|
35
|
+
if self.item is None:
|
|
36
|
+
return ""
|
|
37
|
+
|
|
38
|
+
for idx, part in enumerate(self.item.parts):
|
|
39
|
+
if isinstance(part, TextPart):
|
|
40
|
+
# Only show the circle prefix if there's actual content
|
|
41
|
+
if part.content and part.content.strip():
|
|
42
|
+
acc += f"**⏺** {part.content}\n\n"
|
|
43
|
+
elif isinstance(part, ToolCallPart):
|
|
44
|
+
parts_str = ToolFormatter.format_tool_call_part(part)
|
|
45
|
+
if parts_str: # Only add if there's actual content
|
|
46
|
+
acc += parts_str + "\n\n"
|
|
47
|
+
elif isinstance(part, BuiltinToolCallPart):
|
|
48
|
+
# Format builtin tool calls using registry
|
|
49
|
+
formatted = ToolFormatter.format_builtin_tool_call(part)
|
|
50
|
+
if formatted: # Only add if not hidden
|
|
51
|
+
acc += formatted + "\n\n"
|
|
52
|
+
elif isinstance(part, BuiltinToolReturnPart):
|
|
53
|
+
# Don't show tool return parts in the UI
|
|
54
|
+
pass
|
|
55
|
+
elif isinstance(part, ThinkingPart):
|
|
56
|
+
if (
|
|
57
|
+
idx == len(self.item.parts) - 1
|
|
58
|
+
): # show the thinking part only if it's the last part
|
|
59
|
+
acc += (
|
|
60
|
+
f"thinking: {part.content}\n\n"
|
|
61
|
+
if part.content
|
|
62
|
+
else "Thinking..."
|
|
63
|
+
)
|
|
64
|
+
else:
|
|
65
|
+
continue
|
|
66
|
+
return acc.strip()
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""Chat history widget - main container for message display."""
|
|
2
|
+
|
|
3
|
+
from collections.abc import Generator, Sequence
|
|
4
|
+
|
|
5
|
+
from pydantic_ai.messages import (
|
|
6
|
+
ModelMessage,
|
|
7
|
+
ModelRequest,
|
|
8
|
+
ModelResponse,
|
|
9
|
+
UserPromptPart,
|
|
10
|
+
)
|
|
11
|
+
from textual.app import ComposeResult
|
|
12
|
+
from textual.reactive import reactive
|
|
13
|
+
from textual.widget import Widget
|
|
14
|
+
|
|
15
|
+
from shotgun.tui.components.vertical_tail import VerticalTail
|
|
16
|
+
from shotgun.tui.screens.chat_screen.hint_message import HintMessage, HintMessageWidget
|
|
17
|
+
|
|
18
|
+
from .agent_response import AgentResponseWidget
|
|
19
|
+
from .partial_response import PartialResponseWidget
|
|
20
|
+
from .user_question import UserQuestionWidget
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ChatHistory(Widget):
|
|
24
|
+
"""Main widget for displaying chat message history."""
|
|
25
|
+
|
|
26
|
+
DEFAULT_CSS = """
|
|
27
|
+
VerticalTail {
|
|
28
|
+
align: left bottom;
|
|
29
|
+
|
|
30
|
+
}
|
|
31
|
+
VerticalTail > * {
|
|
32
|
+
height: auto;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
Horizontal {
|
|
36
|
+
height: auto;
|
|
37
|
+
background: $secondary-muted;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
Markdown {
|
|
41
|
+
height: auto;
|
|
42
|
+
}
|
|
43
|
+
"""
|
|
44
|
+
partial_response: reactive[ModelMessage | None] = reactive(None)
|
|
45
|
+
|
|
46
|
+
def __init__(self) -> None:
|
|
47
|
+
super().__init__()
|
|
48
|
+
self.items: Sequence[ModelMessage | HintMessage] = []
|
|
49
|
+
self.vertical_tail: VerticalTail | None = None
|
|
50
|
+
self.partial_response = None
|
|
51
|
+
self._rendered_count = 0 # Track how many messages have been mounted
|
|
52
|
+
|
|
53
|
+
def compose(self) -> ComposeResult:
|
|
54
|
+
"""Compose the chat history widget."""
|
|
55
|
+
self.vertical_tail = VerticalTail()
|
|
56
|
+
|
|
57
|
+
filtered = list(self.filtered_items())
|
|
58
|
+
with self.vertical_tail:
|
|
59
|
+
for item in filtered:
|
|
60
|
+
if isinstance(item, ModelRequest):
|
|
61
|
+
yield UserQuestionWidget(item)
|
|
62
|
+
elif isinstance(item, HintMessage):
|
|
63
|
+
yield HintMessageWidget(item)
|
|
64
|
+
elif isinstance(item, ModelResponse):
|
|
65
|
+
yield AgentResponseWidget(item)
|
|
66
|
+
yield PartialResponseWidget(self.partial_response).data_bind(
|
|
67
|
+
item=ChatHistory.partial_response
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Track how many messages were rendered during initial compose
|
|
71
|
+
self._rendered_count = len(filtered)
|
|
72
|
+
|
|
73
|
+
def filtered_items(self) -> Generator[ModelMessage | HintMessage, None, None]:
|
|
74
|
+
"""Filter and yield items for display."""
|
|
75
|
+
for item in self.items:
|
|
76
|
+
# Skip ModelRequest messages that only contain ToolReturnPart
|
|
77
|
+
# (these are internal tool results, not user prompts)
|
|
78
|
+
if isinstance(item, ModelRequest):
|
|
79
|
+
has_user_content = any(
|
|
80
|
+
isinstance(part, UserPromptPart) for part in item.parts
|
|
81
|
+
)
|
|
82
|
+
if not has_user_content:
|
|
83
|
+
# This is just a tool return, skip displaying it
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
yield item
|
|
87
|
+
|
|
88
|
+
def update_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
|
|
89
|
+
"""Update the displayed messages using incremental mounting."""
|
|
90
|
+
if not self.vertical_tail:
|
|
91
|
+
return
|
|
92
|
+
|
|
93
|
+
self.items = messages
|
|
94
|
+
filtered = list(self.filtered_items())
|
|
95
|
+
|
|
96
|
+
# Only mount new messages that haven't been rendered yet
|
|
97
|
+
if len(filtered) > self._rendered_count:
|
|
98
|
+
new_messages = filtered[self._rendered_count :]
|
|
99
|
+
for item in new_messages:
|
|
100
|
+
widget: Widget
|
|
101
|
+
if isinstance(item, ModelRequest):
|
|
102
|
+
widget = UserQuestionWidget(item)
|
|
103
|
+
elif isinstance(item, HintMessage):
|
|
104
|
+
widget = HintMessageWidget(item)
|
|
105
|
+
elif isinstance(item, ModelResponse):
|
|
106
|
+
widget = AgentResponseWidget(item)
|
|
107
|
+
else:
|
|
108
|
+
continue
|
|
109
|
+
|
|
110
|
+
# Mount before the PartialResponseWidget
|
|
111
|
+
self.vertical_tail.mount(widget, before=self.vertical_tail.children[-1])
|
|
112
|
+
|
|
113
|
+
self._rendered_count = len(filtered)
|
|
114
|
+
|
|
115
|
+
# Scroll to bottom to show newly added messages
|
|
116
|
+
self.vertical_tail.scroll_end(animate=False)
|