shotgun-sh 0.1.0.dev31__py3-none-any.whl → 0.1.1.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shotgun-sh might be problematic. Click here for more details.
- shotgun/agents/agent_manager.py +73 -32
- shotgun/agents/config/constants.py +0 -1
- shotgun/agents/config/models.py +0 -3
- shotgun/agents/config/provider.py +6 -6
- shotgun/codebase/core/cypher_models.py +46 -0
- shotgun/codebase/core/nl_query.py +180 -39
- shotgun/codebase/service.py +17 -0
- shotgun/prompts/codebase/cypher_system.j2 +15 -1
- shotgun/tui/commands/__init__.py +10 -9
- shotgun/tui/components/vertical_tail.py +0 -15
- shotgun/tui/screens/chat.py +62 -23
- shotgun/tui/screens/chat_screen/history.py +17 -30
- {shotgun_sh-0.1.0.dev31.dist-info → shotgun_sh-0.1.1.dev1.dist-info}/METADATA +1 -1
- {shotgun_sh-0.1.0.dev31.dist-info → shotgun_sh-0.1.1.dev1.dist-info}/RECORD +17 -16
- {shotgun_sh-0.1.0.dev31.dist-info → shotgun_sh-0.1.1.dev1.dist-info}/WHEEL +0 -0
- {shotgun_sh-0.1.0.dev31.dist-info → shotgun_sh-0.1.1.dev1.dist-info}/entry_points.txt +0 -0
- {shotgun_sh-0.1.0.dev31.dist-info → shotgun_sh-0.1.1.dev1.dist-info}/licenses/LICENSE +0 -0
shotgun/agents/agent_manager.py
CHANGED
|
@@ -76,20 +76,25 @@ class MessageHistoryUpdated(Message):
|
|
|
76
76
|
class PartialResponseMessage(Message):
|
|
77
77
|
"""Event posted when a partial response is received."""
|
|
78
78
|
|
|
79
|
-
def __init__(
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
message: ModelResponse | None,
|
|
82
|
+
messages: list[ModelMessage],
|
|
83
|
+
is_last: bool,
|
|
84
|
+
) -> None:
|
|
80
85
|
"""Initialize the partial response message."""
|
|
81
86
|
super().__init__()
|
|
82
87
|
self.message = message
|
|
88
|
+
self.messages = messages
|
|
83
89
|
self.is_last = is_last
|
|
84
90
|
|
|
85
91
|
|
|
86
92
|
@dataclass(slots=True)
|
|
87
93
|
class _PartialStreamState:
|
|
88
|
-
"""Tracks
|
|
94
|
+
"""Tracks streamed messages while handling a single agent run."""
|
|
89
95
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
final_sent: bool = False
|
|
96
|
+
messages: list[ModelRequest | ModelResponse] = field(default_factory=list)
|
|
97
|
+
current_response: ModelResponse | None = None
|
|
93
98
|
|
|
94
99
|
|
|
95
100
|
class AgentManager(Widget):
|
|
@@ -272,6 +277,7 @@ class AgentManager(Widget):
|
|
|
272
277
|
|
|
273
278
|
# Clear file tracker before each run to track only this run's operations
|
|
274
279
|
deps.file_tracker.clear()
|
|
280
|
+
original_messages = self.ui_message_history.copy()
|
|
275
281
|
|
|
276
282
|
if prompt:
|
|
277
283
|
self.ui_message_history.append(ModelRequest.user_text_prompt(prompt))
|
|
@@ -356,16 +362,11 @@ class AgentManager(Widget):
|
|
|
356
362
|
**kwargs,
|
|
357
363
|
)
|
|
358
364
|
finally:
|
|
359
|
-
# If the stream ended unexpectedly without a final result, clear accumulated state.
|
|
360
|
-
if self._stream_state is not None and not self._stream_state.final_sent:
|
|
361
|
-
partial_message = self._build_partial_response(self._stream_state.parts)
|
|
362
|
-
if partial_message is not None:
|
|
363
|
-
self._post_partial_message(partial_message, True)
|
|
364
365
|
self._stream_state = None
|
|
365
366
|
|
|
366
|
-
self.ui_message_history =
|
|
367
|
-
|
|
368
|
-
|
|
367
|
+
self.ui_message_history = original_messages + cast(
|
|
368
|
+
list[ModelRequest | ModelResponse | HintMessage], result.new_messages()
|
|
369
|
+
)
|
|
369
370
|
|
|
370
371
|
# Apply compaction to persistent message history to prevent cascading growth
|
|
371
372
|
all_messages = result.all_messages()
|
|
@@ -390,7 +391,13 @@ class AgentManager(Widget):
|
|
|
390
391
|
if state is None:
|
|
391
392
|
state = self._stream_state = _PartialStreamState()
|
|
392
393
|
|
|
393
|
-
|
|
394
|
+
if state.current_response is not None:
|
|
395
|
+
partial_parts: list[ModelResponsePart | ToolCallPartDelta] = list(
|
|
396
|
+
state.current_response.parts
|
|
397
|
+
# cast(Sequence[ModelResponsePart], state.current_response.parts)
|
|
398
|
+
)
|
|
399
|
+
else:
|
|
400
|
+
partial_parts = []
|
|
394
401
|
|
|
395
402
|
async for event in stream:
|
|
396
403
|
try:
|
|
@@ -409,8 +416,8 @@ class AgentManager(Widget):
|
|
|
409
416
|
|
|
410
417
|
partial_message = self._build_partial_response(partial_parts)
|
|
411
418
|
if partial_message is not None:
|
|
412
|
-
state.
|
|
413
|
-
self._post_partial_message(
|
|
419
|
+
state.current_response = partial_message
|
|
420
|
+
self._post_partial_message(False)
|
|
414
421
|
|
|
415
422
|
elif isinstance(event, PartDeltaEvent):
|
|
416
423
|
index = event.index
|
|
@@ -435,8 +442,8 @@ class AgentManager(Widget):
|
|
|
435
442
|
|
|
436
443
|
partial_message = self._build_partial_response(partial_parts)
|
|
437
444
|
if partial_message is not None:
|
|
438
|
-
state.
|
|
439
|
-
self._post_partial_message(
|
|
445
|
+
state.current_response = partial_message
|
|
446
|
+
self._post_partial_message(False)
|
|
440
447
|
|
|
441
448
|
elif isinstance(event, FunctionToolCallEvent):
|
|
442
449
|
existing_call_idx = next(
|
|
@@ -448,29 +455,54 @@ class AgentManager(Widget):
|
|
|
448
455
|
),
|
|
449
456
|
None,
|
|
450
457
|
)
|
|
458
|
+
|
|
451
459
|
if existing_call_idx is not None:
|
|
452
460
|
partial_parts[existing_call_idx] = event.part
|
|
461
|
+
elif state.messages:
|
|
462
|
+
existing_call_idx = next(
|
|
463
|
+
(
|
|
464
|
+
i
|
|
465
|
+
for i, part in enumerate(state.messages[-1].parts)
|
|
466
|
+
if isinstance(part, ToolCallPart)
|
|
467
|
+
and part.tool_call_id == event.part.tool_call_id
|
|
468
|
+
),
|
|
469
|
+
None,
|
|
470
|
+
)
|
|
453
471
|
else:
|
|
454
472
|
partial_parts.append(event.part)
|
|
455
473
|
partial_message = self._build_partial_response(partial_parts)
|
|
456
474
|
if partial_message is not None:
|
|
457
|
-
state.
|
|
458
|
-
self._post_partial_message(
|
|
475
|
+
state.current_response = partial_message
|
|
476
|
+
self._post_partial_message(False)
|
|
459
477
|
elif isinstance(event, FunctionToolResultEvent):
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
478
|
+
request_message = ModelRequest(parts=[event.result])
|
|
479
|
+
state.messages.append(request_message)
|
|
480
|
+
if (
|
|
481
|
+
event.result.tool_name == "ask_user"
|
|
482
|
+
): # special handling to ask_user, because deferred tool results mean we missed the user response
|
|
483
|
+
self.ui_message_history.append(request_message)
|
|
484
|
+
self._post_messages_updated()
|
|
485
|
+
## this is what the user responded with
|
|
486
|
+
self._post_partial_message(is_last=False)
|
|
468
487
|
|
|
488
|
+
elif isinstance(event, FinalResultEvent):
|
|
489
|
+
pass
|
|
469
490
|
except Exception: # pragma: no cover - defensive logging
|
|
470
491
|
logger.exception(
|
|
471
492
|
"Error while handling agent stream event", extra={"event": event}
|
|
472
493
|
)
|
|
473
494
|
|
|
495
|
+
final_message = state.current_response or self._build_partial_response(
|
|
496
|
+
partial_parts
|
|
497
|
+
)
|
|
498
|
+
if final_message is not None:
|
|
499
|
+
state.current_response = final_message
|
|
500
|
+
if final_message not in state.messages:
|
|
501
|
+
state.messages.append(final_message)
|
|
502
|
+
state.current_response = None
|
|
503
|
+
self._post_partial_message(True)
|
|
504
|
+
state.current_response = None
|
|
505
|
+
|
|
474
506
|
def _build_partial_response(
|
|
475
507
|
self, parts: list[ModelResponsePart | ToolCallPartDelta]
|
|
476
508
|
) -> ModelResponse | None:
|
|
@@ -483,11 +515,20 @@ class AgentManager(Widget):
|
|
|
483
515
|
return None
|
|
484
516
|
return ModelResponse(parts=list(completed_parts))
|
|
485
517
|
|
|
486
|
-
def _post_partial_message(
|
|
487
|
-
self, message: ModelResponse | None, is_last: bool
|
|
488
|
-
) -> None:
|
|
518
|
+
def _post_partial_message(self, is_last: bool) -> None:
|
|
489
519
|
"""Post a partial message to the UI."""
|
|
490
|
-
self.
|
|
520
|
+
if self._stream_state is None:
|
|
521
|
+
return
|
|
522
|
+
self.post_message(
|
|
523
|
+
PartialResponseMessage(
|
|
524
|
+
self._stream_state.current_response
|
|
525
|
+
if self._stream_state.current_response
|
|
526
|
+
not in self._stream_state.messages
|
|
527
|
+
else None,
|
|
528
|
+
self._stream_state.messages,
|
|
529
|
+
is_last,
|
|
530
|
+
)
|
|
531
|
+
)
|
|
491
532
|
|
|
492
533
|
def _post_messages_updated(
|
|
493
534
|
self, file_operations: list[FileOperation] | None = None
|
shotgun/agents/config/models.py
CHANGED
|
@@ -117,21 +117,18 @@ class OpenAIConfig(BaseModel):
|
|
|
117
117
|
"""Configuration for OpenAI provider."""
|
|
118
118
|
|
|
119
119
|
api_key: SecretStr | None = None
|
|
120
|
-
model_name: str = "gpt-5"
|
|
121
120
|
|
|
122
121
|
|
|
123
122
|
class AnthropicConfig(BaseModel):
|
|
124
123
|
"""Configuration for Anthropic provider."""
|
|
125
124
|
|
|
126
125
|
api_key: SecretStr | None = None
|
|
127
|
-
model_name: str = "claude-opus-4-1"
|
|
128
126
|
|
|
129
127
|
|
|
130
128
|
class GoogleConfig(BaseModel):
|
|
131
129
|
"""Configuration for Google provider."""
|
|
132
130
|
|
|
133
131
|
api_key: SecretStr | None = None
|
|
134
|
-
model_name: str = "gemini-2.5-pro"
|
|
135
132
|
|
|
136
133
|
|
|
137
134
|
class ShotgunConfig(BaseModel):
|
|
@@ -127,8 +127,8 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
|
|
|
127
127
|
f"OpenAI API key not configured. Set via environment variable {OPENAI_API_KEY_ENV} or config."
|
|
128
128
|
)
|
|
129
129
|
|
|
130
|
-
# Get model spec
|
|
131
|
-
model_name =
|
|
130
|
+
# Get model spec - hardcoded to gpt-5
|
|
131
|
+
model_name = "gpt-5"
|
|
132
132
|
if model_name not in MODEL_SPECS:
|
|
133
133
|
raise ValueError(f"Model '{model_name}' not found")
|
|
134
134
|
spec = MODEL_SPECS[model_name]
|
|
@@ -149,8 +149,8 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
|
|
|
149
149
|
f"Anthropic API key not configured. Set via environment variable {ANTHROPIC_API_KEY_ENV} or config."
|
|
150
150
|
)
|
|
151
151
|
|
|
152
|
-
# Get model spec
|
|
153
|
-
model_name =
|
|
152
|
+
# Get model spec - hardcoded to claude-opus-4-1
|
|
153
|
+
model_name = "claude-opus-4-1"
|
|
154
154
|
if model_name not in MODEL_SPECS:
|
|
155
155
|
raise ValueError(f"Model '{model_name}' not found")
|
|
156
156
|
spec = MODEL_SPECS[model_name]
|
|
@@ -171,8 +171,8 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
|
|
|
171
171
|
f"Gemini API key not configured. Set via environment variable {GEMINI_API_KEY_ENV} or config."
|
|
172
172
|
)
|
|
173
173
|
|
|
174
|
-
# Get model spec
|
|
175
|
-
model_name =
|
|
174
|
+
# Get model spec - hardcoded to gemini-2.5-pro
|
|
175
|
+
model_name = "gemini-2.5-pro"
|
|
176
176
|
if model_name not in MODEL_SPECS:
|
|
177
177
|
raise ValueError(f"Model '{model_name}' not found")
|
|
178
178
|
spec = MODEL_SPECS[model_name]
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""Pydantic models and exceptions for Cypher query generation."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CypherGenerationResponse(BaseModel):
|
|
9
|
+
"""Structured response from LLM for Cypher query generation.
|
|
10
|
+
|
|
11
|
+
This model ensures the LLM explicitly indicates whether it can generate
|
|
12
|
+
a valid Cypher query and provides a reason if it cannot.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
cypher_query: str | None = Field(
|
|
16
|
+
default=None,
|
|
17
|
+
description="The generated Cypher query, or None if generation not possible",
|
|
18
|
+
)
|
|
19
|
+
can_generate_valid_cypher: bool = Field(
|
|
20
|
+
description="Whether a valid Cypher query can be generated for this request"
|
|
21
|
+
)
|
|
22
|
+
reason_cannot_generate: str | None = Field(
|
|
23
|
+
default=None,
|
|
24
|
+
description="Explanation why query cannot be generated (if applicable)",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
def model_post_init(self, __context: Any) -> None:
|
|
28
|
+
"""Validate that reason is provided when query cannot be generated."""
|
|
29
|
+
if not self.can_generate_valid_cypher and not self.reason_cannot_generate:
|
|
30
|
+
self.reason_cannot_generate = "No reason provided"
|
|
31
|
+
if self.can_generate_valid_cypher and not self.cypher_query:
|
|
32
|
+
raise ValueError(
|
|
33
|
+
"cypher_query must be provided when can_generate_valid_cypher is True"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class CypherGenerationNotPossibleError(Exception):
|
|
38
|
+
"""Raised when LLM cannot generate valid Cypher for the query.
|
|
39
|
+
|
|
40
|
+
This typically happens when the query is conceptual rather than structural,
|
|
41
|
+
or when it requires interpretation beyond what can be expressed in Cypher.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(self, reason: str):
|
|
45
|
+
self.reason = reason
|
|
46
|
+
super().__init__(f"Cannot generate Cypher query: {reason}")
|
|
@@ -4,15 +4,13 @@ import time
|
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from typing import TYPE_CHECKING
|
|
6
6
|
|
|
7
|
-
from pydantic_ai
|
|
8
|
-
ModelRequest,
|
|
9
|
-
SystemPromptPart,
|
|
10
|
-
TextPart,
|
|
11
|
-
UserPromptPart,
|
|
12
|
-
)
|
|
7
|
+
from pydantic_ai import Agent
|
|
13
8
|
|
|
14
9
|
from shotgun.agents.config import get_provider_model
|
|
15
|
-
from shotgun.
|
|
10
|
+
from shotgun.codebase.core.cypher_models import (
|
|
11
|
+
CypherGenerationNotPossibleError,
|
|
12
|
+
CypherGenerationResponse,
|
|
13
|
+
)
|
|
16
14
|
from shotgun.logging_config import get_logger
|
|
17
15
|
from shotgun.prompts import PromptLoader
|
|
18
16
|
|
|
@@ -25,42 +23,52 @@ logger = get_logger(__name__)
|
|
|
25
23
|
prompt_loader = PromptLoader()
|
|
26
24
|
|
|
27
25
|
|
|
28
|
-
async def llm_cypher_prompt(
|
|
29
|
-
|
|
26
|
+
async def llm_cypher_prompt(
|
|
27
|
+
system_prompt: str, user_prompt: str
|
|
28
|
+
) -> CypherGenerationResponse:
|
|
29
|
+
"""Generate a Cypher query from a natural language prompt using structured output.
|
|
30
30
|
|
|
31
31
|
Args:
|
|
32
32
|
system_prompt: The system prompt defining the behavior and context for the LLM
|
|
33
33
|
user_prompt: The user's natural language query
|
|
34
34
|
Returns:
|
|
35
|
-
|
|
35
|
+
CypherGenerationResponse with cypher_query, can_generate flag, and reason if not
|
|
36
36
|
"""
|
|
37
37
|
model_config = get_provider_model()
|
|
38
|
-
|
|
39
|
-
#
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
ModelRequest(
|
|
45
|
-
parts=[
|
|
46
|
-
SystemPromptPart(content=system_prompt),
|
|
47
|
-
UserPromptPart(content=user_prompt),
|
|
48
|
-
]
|
|
49
|
-
),
|
|
50
|
-
],
|
|
51
|
-
max_tokens=2000, # Cypher queries are short, 2000 tokens is plenty
|
|
38
|
+
|
|
39
|
+
# Create an agent with structured output for Cypher generation
|
|
40
|
+
cypher_agent = Agent(
|
|
41
|
+
model=model_config.model_instance,
|
|
42
|
+
output_type=CypherGenerationResponse,
|
|
43
|
+
retries=2,
|
|
52
44
|
)
|
|
53
45
|
|
|
54
|
-
|
|
55
|
-
|
|
46
|
+
# Combine system and user prompts
|
|
47
|
+
combined_prompt = f"{system_prompt}\n\nUser Query: {user_prompt}"
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
# Run the agent to get structured response
|
|
51
|
+
result = await cypher_agent.run(combined_prompt)
|
|
52
|
+
response = result.output
|
|
53
|
+
|
|
54
|
+
# Log the structured response for debugging
|
|
55
|
+
logger.debug(
|
|
56
|
+
"Cypher generation response - can_generate: %s, query: %s, reason: %s",
|
|
57
|
+
response.can_generate_valid_cypher,
|
|
58
|
+
response.cypher_query[:50] if response.cypher_query else None,
|
|
59
|
+
response.reason_cannot_generate,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return response
|
|
56
63
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
+
except Exception as e:
|
|
65
|
+
logger.error("Failed to generate Cypher query with structured output: %s", e)
|
|
66
|
+
# Return a failure response
|
|
67
|
+
return CypherGenerationResponse(
|
|
68
|
+
cypher_query=None,
|
|
69
|
+
can_generate_valid_cypher=False,
|
|
70
|
+
reason_cannot_generate=f"LLM error: {str(e)}",
|
|
71
|
+
)
|
|
64
72
|
|
|
65
73
|
|
|
66
74
|
async def generate_cypher(natural_language_query: str) -> str:
|
|
@@ -71,6 +79,10 @@ async def generate_cypher(natural_language_query: str) -> str:
|
|
|
71
79
|
|
|
72
80
|
Returns:
|
|
73
81
|
Generated Cypher query
|
|
82
|
+
|
|
83
|
+
Raises:
|
|
84
|
+
CypherGenerationNotPossibleError: If the query cannot be converted to Cypher
|
|
85
|
+
RuntimeError: If there's an error during generation
|
|
74
86
|
"""
|
|
75
87
|
# Get current time for context
|
|
76
88
|
current_timestamp = int(time.time())
|
|
@@ -88,8 +100,30 @@ async def generate_cypher(natural_language_query: str) -> str:
|
|
|
88
100
|
)
|
|
89
101
|
|
|
90
102
|
try:
|
|
91
|
-
|
|
92
|
-
|
|
103
|
+
response = await llm_cypher_prompt(system_prompt, enhanced_query)
|
|
104
|
+
|
|
105
|
+
# Check if the LLM could generate a valid Cypher query
|
|
106
|
+
if not response.can_generate_valid_cypher:
|
|
107
|
+
logger.info(
|
|
108
|
+
"Cannot generate Cypher for query '%s': %s",
|
|
109
|
+
natural_language_query,
|
|
110
|
+
response.reason_cannot_generate,
|
|
111
|
+
)
|
|
112
|
+
raise CypherGenerationNotPossibleError(
|
|
113
|
+
response.reason_cannot_generate or "Query cannot be converted to Cypher"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
if not response.cypher_query:
|
|
117
|
+
raise ValueError("LLM indicated success but provided no query")
|
|
118
|
+
|
|
119
|
+
cleaned_query = clean_cypher_response(response.cypher_query)
|
|
120
|
+
|
|
121
|
+
# Validate Cypher keywords
|
|
122
|
+
is_valid, validation_error = validate_cypher_keywords(cleaned_query)
|
|
123
|
+
if not is_valid:
|
|
124
|
+
logger.warning(f"Generated query has invalid syntax: {validation_error}")
|
|
125
|
+
logger.warning(f"Problematic query: {cleaned_query}")
|
|
126
|
+
raise ValueError(f"Generated query validation failed: {validation_error}")
|
|
93
127
|
|
|
94
128
|
# Validate UNION ALL queries
|
|
95
129
|
is_valid, validation_error = validate_union_query(cleaned_query)
|
|
@@ -100,6 +134,8 @@ async def generate_cypher(natural_language_query: str) -> str:
|
|
|
100
134
|
|
|
101
135
|
return cleaned_query
|
|
102
136
|
|
|
137
|
+
except CypherGenerationNotPossibleError:
|
|
138
|
+
raise # Re-raise as-is
|
|
103
139
|
except Exception as e:
|
|
104
140
|
raise RuntimeError(f"Failed to generate Cypher query: {e}") from e
|
|
105
141
|
|
|
@@ -170,8 +206,31 @@ MATCH (f:Function) RETURN f.name, f.qualified_name // WRONG: missing third colu
|
|
|
170
206
|
base_system_prompt=prompt_loader.render("codebase/cypher_system.j2"),
|
|
171
207
|
)
|
|
172
208
|
|
|
173
|
-
|
|
174
|
-
|
|
209
|
+
response = await llm_cypher_prompt(enhanced_system_prompt, enhanced_query)
|
|
210
|
+
|
|
211
|
+
# Check if the LLM could generate a valid Cypher query
|
|
212
|
+
if not response.can_generate_valid_cypher:
|
|
213
|
+
logger.info(
|
|
214
|
+
"Cannot generate Cypher for retry query '%s': %s",
|
|
215
|
+
natural_language_query,
|
|
216
|
+
response.reason_cannot_generate,
|
|
217
|
+
)
|
|
218
|
+
raise CypherGenerationNotPossibleError(
|
|
219
|
+
response.reason_cannot_generate
|
|
220
|
+
or "Query cannot be converted to Cypher even with error context"
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
if not response.cypher_query:
|
|
224
|
+
raise ValueError("LLM indicated success but provided no query on retry")
|
|
225
|
+
|
|
226
|
+
cleaned_query = clean_cypher_response(response.cypher_query)
|
|
227
|
+
|
|
228
|
+
# Validate Cypher keywords
|
|
229
|
+
is_valid, validation_error = validate_cypher_keywords(cleaned_query)
|
|
230
|
+
if not is_valid:
|
|
231
|
+
logger.warning(f"Generated query has invalid syntax: {validation_error}")
|
|
232
|
+
logger.warning(f"Problematic query: {cleaned_query}")
|
|
233
|
+
raise ValueError(f"Generated query validation failed: {validation_error}")
|
|
175
234
|
|
|
176
235
|
# Validate UNION ALL queries
|
|
177
236
|
is_valid, validation_error = validate_union_query(cleaned_query)
|
|
@@ -182,6 +241,8 @@ MATCH (f:Function) RETURN f.name, f.qualified_name // WRONG: missing third colu
|
|
|
182
241
|
|
|
183
242
|
return cleaned_query
|
|
184
243
|
|
|
244
|
+
except CypherGenerationNotPossibleError:
|
|
245
|
+
raise # Re-raise as-is
|
|
185
246
|
except Exception as e:
|
|
186
247
|
raise RuntimeError(
|
|
187
248
|
f"Failed to generate Cypher query with error context: {e}"
|
|
@@ -202,6 +263,10 @@ async def generate_cypher_openai_async(
|
|
|
202
263
|
|
|
203
264
|
Returns:
|
|
204
265
|
Generated Cypher query
|
|
266
|
+
|
|
267
|
+
Raises:
|
|
268
|
+
CypherGenerationNotPossibleError: If the query cannot be converted to Cypher
|
|
269
|
+
RuntimeError: If there's an error during generation
|
|
205
270
|
"""
|
|
206
271
|
# Get current time for context
|
|
207
272
|
current_timestamp = int(time.time())
|
|
@@ -219,9 +284,26 @@ async def generate_cypher_openai_async(
|
|
|
219
284
|
)
|
|
220
285
|
|
|
221
286
|
try:
|
|
222
|
-
|
|
223
|
-
|
|
287
|
+
response = await llm_cypher_prompt(system_prompt, enhanced_query)
|
|
288
|
+
|
|
289
|
+
# Check if the LLM could generate a valid Cypher query
|
|
290
|
+
if not response.can_generate_valid_cypher:
|
|
291
|
+
logger.info(
|
|
292
|
+
"Cannot generate Cypher for query '%s': %s",
|
|
293
|
+
natural_language_query,
|
|
294
|
+
response.reason_cannot_generate,
|
|
295
|
+
)
|
|
296
|
+
raise CypherGenerationNotPossibleError(
|
|
297
|
+
response.reason_cannot_generate or "Query cannot be converted to Cypher"
|
|
298
|
+
)
|
|
224
299
|
|
|
300
|
+
if not response.cypher_query:
|
|
301
|
+
raise ValueError("LLM indicated success but provided no query")
|
|
302
|
+
|
|
303
|
+
return clean_cypher_response(response.cypher_query)
|
|
304
|
+
|
|
305
|
+
except CypherGenerationNotPossibleError:
|
|
306
|
+
raise # Re-raise as-is
|
|
225
307
|
except Exception as e:
|
|
226
308
|
logger.error(f"OpenAI API error: {e}")
|
|
227
309
|
raise RuntimeError(f"Failed to generate Cypher query: {e}") from e
|
|
@@ -288,6 +370,65 @@ def validate_union_query(cypher_query: str) -> tuple[bool, str]:
|
|
|
288
370
|
return True, ""
|
|
289
371
|
|
|
290
372
|
|
|
373
|
+
def validate_cypher_keywords(query: str) -> tuple[bool, str]:
|
|
374
|
+
"""Validate that a query starts with valid Kuzu Cypher keywords.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
query: The Cypher query to validate
|
|
378
|
+
|
|
379
|
+
Returns:
|
|
380
|
+
Tuple of (is_valid, error_message)
|
|
381
|
+
"""
|
|
382
|
+
# Valid Kuzu Cypher starting keywords based on parser expectations
|
|
383
|
+
valid_cypher_keywords = {
|
|
384
|
+
"ALTER",
|
|
385
|
+
"ATTACH",
|
|
386
|
+
"BEGIN",
|
|
387
|
+
"CALL",
|
|
388
|
+
"CHECKPOINT",
|
|
389
|
+
"COMMENT",
|
|
390
|
+
"COMMIT",
|
|
391
|
+
"COPY",
|
|
392
|
+
"CREATE",
|
|
393
|
+
"DELETE",
|
|
394
|
+
"DETACH",
|
|
395
|
+
"DROP",
|
|
396
|
+
"EXPLAIN",
|
|
397
|
+
"EXPORT",
|
|
398
|
+
"FORCE",
|
|
399
|
+
"IMPORT",
|
|
400
|
+
"INSTALL",
|
|
401
|
+
"LOAD",
|
|
402
|
+
"MATCH",
|
|
403
|
+
"MERGE",
|
|
404
|
+
"OPTIONAL",
|
|
405
|
+
"PROFILE",
|
|
406
|
+
"RETURN",
|
|
407
|
+
"ROLLBACK",
|
|
408
|
+
"SET",
|
|
409
|
+
"UNWIND",
|
|
410
|
+
"UNINSTALL",
|
|
411
|
+
"UPDATE",
|
|
412
|
+
"USE",
|
|
413
|
+
"WITH",
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
query = query.strip()
|
|
417
|
+
if not query:
|
|
418
|
+
return False, "Empty query"
|
|
419
|
+
|
|
420
|
+
# Get the first word
|
|
421
|
+
first_word = query.upper().split()[0] if query else ""
|
|
422
|
+
|
|
423
|
+
if first_word not in valid_cypher_keywords:
|
|
424
|
+
return (
|
|
425
|
+
False,
|
|
426
|
+
f"Query doesn't start with valid Cypher keyword. Found: '{first_word}'",
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
return True, ""
|
|
430
|
+
|
|
431
|
+
|
|
291
432
|
def clean_cypher_response(response_text: str) -> str:
|
|
292
433
|
"""Clean up common LLM formatting artifacts from a Cypher query.
|
|
293
434
|
|
shotgun/codebase/service.py
CHANGED
|
@@ -4,6 +4,7 @@ import time
|
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
7
|
+
from shotgun.codebase.core.cypher_models import CypherGenerationNotPossibleError
|
|
7
8
|
from shotgun.codebase.core.manager import CodebaseGraphManager
|
|
8
9
|
from shotgun.codebase.core.nl_query import generate_cypher
|
|
9
10
|
from shotgun.codebase.models import CodebaseGraph, QueryResult, QueryType
|
|
@@ -190,6 +191,22 @@ class CodebaseService:
|
|
|
190
191
|
error=None,
|
|
191
192
|
)
|
|
192
193
|
|
|
194
|
+
except CypherGenerationNotPossibleError as e:
|
|
195
|
+
# Handle queries that cannot be converted to Cypher
|
|
196
|
+
execution_time = (time.time() - start_time) * 1000
|
|
197
|
+
logger.info(f"Query cannot be converted to Cypher: {e.reason}")
|
|
198
|
+
|
|
199
|
+
return QueryResult(
|
|
200
|
+
query=query,
|
|
201
|
+
cypher_query=None,
|
|
202
|
+
results=[],
|
|
203
|
+
column_names=[],
|
|
204
|
+
row_count=0,
|
|
205
|
+
execution_time_ms=execution_time,
|
|
206
|
+
success=False,
|
|
207
|
+
error=f"This query cannot be converted to Cypher: {e.reason}",
|
|
208
|
+
)
|
|
209
|
+
|
|
193
210
|
except Exception as e:
|
|
194
211
|
execution_time = (time.time() - start_time) * 1000
|
|
195
212
|
logger.error(f"Query execution failed: {e}")
|
|
@@ -25,4 +25,18 @@ Your goal is to return appropriate properties for each node type. Common propert
|
|
|
25
25
|
{% include 'codebase/partials/temporal_context.j2' %}
|
|
26
26
|
|
|
27
27
|
**6. Output Format**
|
|
28
|
-
|
|
28
|
+
You must return a structured JSON response with the following fields:
|
|
29
|
+
- `cypher_query`: The generated Cypher query string (or null if not possible)
|
|
30
|
+
- `can_generate_valid_cypher`: Boolean indicating if a valid Cypher query can be generated
|
|
31
|
+
- `reason_cannot_generate`: String explaining why generation isn't possible (or null if successful)
|
|
32
|
+
|
|
33
|
+
**IMPORTANT:** Some queries cannot be expressed in Cypher:
|
|
34
|
+
- Conceptual questions requiring interpretation (e.g., "What is the main purpose of this codebase?")
|
|
35
|
+
- Questions about code quality or best practices
|
|
36
|
+
- Questions requiring semantic understanding beyond structure
|
|
37
|
+
|
|
38
|
+
For these, set `can_generate_valid_cypher` to false and provide a clear explanation in `reason_cannot_generate`.
|
|
39
|
+
|
|
40
|
+
Examples:
|
|
41
|
+
- Query: "Show all classes" → can_generate_valid_cypher: true, cypher_query: "MATCH (c:Class) RETURN c.name, c.qualified_name;"
|
|
42
|
+
- Query: "What is the main purpose of this codebase?" → can_generate_valid_cypher: false, reason_cannot_generate: "This is a conceptual question requiring interpretation and analysis of the code's overall design and intent, rather than a structural query about specific code elements."
|
shotgun/tui/commands/__init__.py
CHANGED
|
@@ -55,17 +55,18 @@ class CommandHandler:
|
|
|
55
55
|
• `/help` - Show this help message
|
|
56
56
|
|
|
57
57
|
**Keyboard Shortcuts:**
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
58
|
+
|
|
59
|
+
* `Enter` - Send message
|
|
60
|
+
* `Ctrl+P` - Open command palette
|
|
61
|
+
* `Shift+Tab` - Cycle agent modes
|
|
62
|
+
* `Ctrl+C` - Quit application
|
|
62
63
|
|
|
63
64
|
**Agent Modes:**
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
65
|
+
* **Research** - Research topics with web search and synthesize findings
|
|
66
|
+
* **Specify** - Create detailed specifications and requirements documents
|
|
67
|
+
* **Planning** - Create comprehensive, actionable plans with milestones
|
|
68
|
+
* **Tasks** - Generate specific, actionable tasks from research and plans
|
|
69
|
+
* **Export** - Export artifacts and findings to various formats
|
|
69
70
|
|
|
70
71
|
**Usage:**
|
|
71
72
|
Type your message and press Enter to chat with the AI. The AI will respond based on the current mode."""
|
|
@@ -7,21 +7,6 @@ class VerticalTail(VerticalScroll):
|
|
|
7
7
|
|
|
8
8
|
auto_scroll = reactive(True, layout=False)
|
|
9
9
|
|
|
10
|
-
def on_mount(self) -> None:
|
|
11
|
-
"""Set up auto-scrolling when the widget is mounted."""
|
|
12
|
-
# Start at the bottom
|
|
13
|
-
if self.auto_scroll:
|
|
14
|
-
self.scroll_end(animate=False)
|
|
15
|
-
|
|
16
|
-
def on_descendant_mount(self) -> None:
|
|
17
|
-
"""Auto-scroll when a new child is added."""
|
|
18
|
-
if self.auto_scroll:
|
|
19
|
-
# Check if we're near the bottom (within 1 line of scroll)
|
|
20
|
-
at_bottom = self.scroll_y >= self.max_scroll_y - 1
|
|
21
|
-
if at_bottom:
|
|
22
|
-
# Use call_after_refresh to ensure layout is updated first
|
|
23
|
-
self.call_after_refresh(self.scroll_end, animate=False)
|
|
24
|
-
|
|
25
10
|
def watch_auto_scroll(self, value: bool) -> None:
|
|
26
11
|
"""Handle auto_scroll property changes."""
|
|
27
12
|
if value:
|
shotgun/tui/screens/chat.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
import logging
|
|
2
3
|
from collections.abc import Iterable
|
|
3
4
|
from dataclasses import dataclass
|
|
4
5
|
from pathlib import Path
|
|
6
|
+
from typing import cast
|
|
5
7
|
|
|
6
8
|
from pydantic_ai import DeferredToolResults, RunContext
|
|
7
9
|
from pydantic_ai.messages import (
|
|
@@ -11,7 +13,7 @@ from pydantic_ai.messages import (
|
|
|
11
13
|
TextPart,
|
|
12
14
|
UserPromptPart,
|
|
13
15
|
)
|
|
14
|
-
from textual import on, work
|
|
16
|
+
from textual import events, on, work
|
|
15
17
|
from textual.app import ComposeResult
|
|
16
18
|
from textual.command import CommandPalette
|
|
17
19
|
from textual.containers import Container, Grid
|
|
@@ -101,8 +103,20 @@ class StatusBar(Widget):
|
|
|
101
103
|
}
|
|
102
104
|
"""
|
|
103
105
|
|
|
106
|
+
def __init__(self, working: bool = False) -> None:
|
|
107
|
+
"""Initialize the status bar.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
working: Whether an agent is currently working.
|
|
111
|
+
"""
|
|
112
|
+
super().__init__()
|
|
113
|
+
self.working = working
|
|
114
|
+
|
|
104
115
|
def render(self) -> str:
|
|
105
|
-
|
|
116
|
+
if self.working:
|
|
117
|
+
return """[$foreground-muted][bold $text]esc[/] to stop • [bold $text]enter[/] to send • [bold $text]ctrl+p[/] command palette • [bold $text]shift+tab[/] cycle modes • /help for commands[/]"""
|
|
118
|
+
else:
|
|
119
|
+
return """[$foreground-muted][bold $text]enter[/] to send • [bold $text]ctrl+p[/] command palette • [bold $text]shift+tab[/] cycle modes • /help for commands[/]"""
|
|
106
120
|
|
|
107
121
|
|
|
108
122
|
class ModeIndicator(Widget):
|
|
@@ -328,6 +342,7 @@ class ChatScreen(Screen[None]):
|
|
|
328
342
|
question: reactive[UserQuestion | None] = reactive(None)
|
|
329
343
|
indexing_job: reactive[CodebaseIndexSelection | None] = reactive(None)
|
|
330
344
|
partial_message: reactive[ModelMessage | None] = reactive(None)
|
|
345
|
+
_current_worker = None # Track the current running worker for cancellation
|
|
331
346
|
|
|
332
347
|
def __init__(self, continue_session: bool = False) -> None:
|
|
333
348
|
super().__init__()
|
|
@@ -369,6 +384,18 @@ class ChatScreen(Screen[None]):
|
|
|
369
384
|
# Start the question listener worker to handle ask_user interactions
|
|
370
385
|
self.call_later(self.add_question_listener)
|
|
371
386
|
|
|
387
|
+
async def on_key(self, event: events.Key) -> None:
|
|
388
|
+
"""Handle key presses for cancellation."""
|
|
389
|
+
# If escape is pressed while agent is working, cancel the operation
|
|
390
|
+
if event.key == "escape" and self.working and self._current_worker:
|
|
391
|
+
# Cancel the running agent worker
|
|
392
|
+
self._current_worker.cancel()
|
|
393
|
+
# Show cancellation message
|
|
394
|
+
self.mount_hint("⚠️ Cancelling operation...")
|
|
395
|
+
# Re-enable the input
|
|
396
|
+
prompt_input = self.query_one(PromptInput)
|
|
397
|
+
prompt_input.focus()
|
|
398
|
+
|
|
372
399
|
@work
|
|
373
400
|
async def check_if_codebase_is_indexed(self) -> None:
|
|
374
401
|
cur_dir = Path.cwd().resolve()
|
|
@@ -384,7 +411,7 @@ class ChatScreen(Screen[None]):
|
|
|
384
411
|
await self.codebase_sdk.list_codebases_for_directory()
|
|
385
412
|
).graphs
|
|
386
413
|
if accessible_graphs:
|
|
387
|
-
self.mount_hint(help_text_with_codebase())
|
|
414
|
+
self.mount_hint(help_text_with_codebase(already_indexed=True))
|
|
388
415
|
return
|
|
389
416
|
|
|
390
417
|
should_index = await self.app.push_screen_wait(CodebaseIndexPromptScreen())
|
|
@@ -392,6 +419,8 @@ class ChatScreen(Screen[None]):
|
|
|
392
419
|
self.mount_hint(help_text_empty_dir())
|
|
393
420
|
return
|
|
394
421
|
|
|
422
|
+
self.mount_hint(help_text_with_codebase(already_indexed=False))
|
|
423
|
+
|
|
395
424
|
self.index_codebase_command()
|
|
396
425
|
|
|
397
426
|
def watch_mode(self, new_mode: AgentType) -> None:
|
|
@@ -418,6 +447,11 @@ class ChatScreen(Screen[None]):
|
|
|
418
447
|
spinner.set_classes("" if is_working else "hidden")
|
|
419
448
|
spinner.display = is_working
|
|
420
449
|
|
|
450
|
+
# Update the status bar to show/hide "ESC to stop"
|
|
451
|
+
status_bar = self.query_one(StatusBar)
|
|
452
|
+
status_bar.working = is_working
|
|
453
|
+
status_bar.refresh()
|
|
454
|
+
|
|
421
455
|
def watch_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
|
|
422
456
|
"""Update the chat history when messages change."""
|
|
423
457
|
if self.is_mounted:
|
|
@@ -468,7 +502,7 @@ class ChatScreen(Screen[None]):
|
|
|
468
502
|
id="spinner",
|
|
469
503
|
classes="" if self.working else "hidden",
|
|
470
504
|
)
|
|
471
|
-
yield StatusBar()
|
|
505
|
+
yield StatusBar(working=self.working)
|
|
472
506
|
yield PromptInput(
|
|
473
507
|
text=self.value,
|
|
474
508
|
highlight_cursor_line=False,
|
|
@@ -487,10 +521,12 @@ class ChatScreen(Screen[None]):
|
|
|
487
521
|
def handle_partial_response(self, event: PartialResponseMessage) -> None:
|
|
488
522
|
self.partial_message = event.message
|
|
489
523
|
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
524
|
+
history = self.query_one(ChatHistory)
|
|
525
|
+
history.update_messages(
|
|
526
|
+
self.messages + cast(list[ModelMessage | HintMessage], event.messages)
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
history.partial_response = self.partial_message
|
|
494
530
|
|
|
495
531
|
def _clear_partial_response(self) -> None:
|
|
496
532
|
partial_response_widget = self.query_one(ChatHistory)
|
|
@@ -658,7 +694,6 @@ class ChatScreen(Screen[None]):
|
|
|
658
694
|
timeout=8,
|
|
659
695
|
)
|
|
660
696
|
|
|
661
|
-
self.mount_hint(codebase_indexed_hint(selection.name))
|
|
662
697
|
except CodebaseAlreadyIndexedError as exc:
|
|
663
698
|
logger.warning(f"Codebase already indexed: {exc}")
|
|
664
699
|
self.notify(str(exc), severity="warning")
|
|
@@ -684,6 +719,11 @@ class ChatScreen(Screen[None]):
|
|
|
684
719
|
prompt = None
|
|
685
720
|
self.working = True
|
|
686
721
|
|
|
722
|
+
# Store the worker so we can cancel it if needed
|
|
723
|
+
from textual.worker import get_current_worker
|
|
724
|
+
|
|
725
|
+
self._current_worker = get_current_worker()
|
|
726
|
+
|
|
687
727
|
if self.question:
|
|
688
728
|
# This is a response to a question from the agent
|
|
689
729
|
self.question.result.set_result(
|
|
@@ -701,11 +741,17 @@ class ChatScreen(Screen[None]):
|
|
|
701
741
|
# This is a new user prompt
|
|
702
742
|
prompt = message
|
|
703
743
|
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
744
|
+
try:
|
|
745
|
+
await self.agent_manager.run(
|
|
746
|
+
prompt=prompt,
|
|
747
|
+
deferred_tool_results=deferred_tool_results,
|
|
748
|
+
)
|
|
749
|
+
except asyncio.CancelledError:
|
|
750
|
+
# Handle cancellation gracefully - DO NOT re-raise
|
|
751
|
+
self.mount_hint("⚠️ Operation cancelled by user")
|
|
752
|
+
finally:
|
|
753
|
+
self.working = False
|
|
754
|
+
self._current_worker = None
|
|
709
755
|
|
|
710
756
|
# Save conversation after each interaction
|
|
711
757
|
self._save_conversation()
|
|
@@ -751,17 +797,10 @@ class ChatScreen(Screen[None]):
|
|
|
751
797
|
self.mode = AgentType(conversation.last_agent_model)
|
|
752
798
|
|
|
753
799
|
|
|
754
|
-
def
|
|
755
|
-
return (
|
|
756
|
-
f"Codebase **{codebase_name}** indexed successfully. You can now use it in your chat.\n\n"
|
|
757
|
-
+ help_text_with_codebase()
|
|
758
|
-
)
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
def help_text_with_codebase() -> str:
|
|
800
|
+
def help_text_with_codebase(already_indexed: bool = False) -> str:
|
|
762
801
|
return (
|
|
763
802
|
"Howdy! Welcome to Shotgun - the context tool for software engineering. \n\nYou can research, build specs, plan, create tasks, and export context to your favorite code-gen agents.\n\n"
|
|
764
|
-
"I can help with:\n\n"
|
|
803
|
+
f"{'' if already_indexed else 'Once your codebase is indexed, '}I can help with:\n\n"
|
|
765
804
|
"- Speccing out a new feature\n"
|
|
766
805
|
"- Onboarding you onto this project\n"
|
|
767
806
|
"- Helping with a refactor spec\n"
|
|
@@ -12,6 +12,7 @@ from pydantic_ai.messages import (
|
|
|
12
12
|
ThinkingPart,
|
|
13
13
|
ToolCallPart,
|
|
14
14
|
ToolReturnPart,
|
|
15
|
+
UserPromptPart,
|
|
15
16
|
)
|
|
16
17
|
from textual.app import ComposeResult
|
|
17
18
|
from textual.reactive import reactive
|
|
@@ -39,6 +40,7 @@ class PartialResponseWidget(Widget): # TODO: doesn't work lol
|
|
|
39
40
|
self.item = item
|
|
40
41
|
|
|
41
42
|
def compose(self) -> ComposeResult:
|
|
43
|
+
yield Markdown(markdown="**partial response**")
|
|
42
44
|
if self.item is None:
|
|
43
45
|
pass
|
|
44
46
|
elif self.item.kind == "response":
|
|
@@ -76,7 +78,7 @@ class ChatHistory(Widget):
|
|
|
76
78
|
|
|
77
79
|
def __init__(self) -> None:
|
|
78
80
|
super().__init__()
|
|
79
|
-
self.items:
|
|
81
|
+
self.items: Sequence[ModelMessage | HintMessage] = []
|
|
80
82
|
self.vertical_tail: VerticalTail | None = None
|
|
81
83
|
self.partial_response = None
|
|
82
84
|
|
|
@@ -94,9 +96,7 @@ class ChatHistory(Widget):
|
|
|
94
96
|
yield PartialResponseWidget(self.partial_response).data_bind(
|
|
95
97
|
item=ChatHistory.partial_response
|
|
96
98
|
)
|
|
97
|
-
|
|
98
|
-
def watch_partial_response(self, _partial_response: ModelMessage | None) -> None:
|
|
99
|
-
self.call_after_refresh(self.autoscroll)
|
|
99
|
+
self.call_later(self.autoscroll)
|
|
100
100
|
|
|
101
101
|
def update_messages(self, messages: list[ModelMessage | HintMessage]) -> None:
|
|
102
102
|
"""Update the displayed messages without recomposing."""
|
|
@@ -105,12 +105,11 @@ class ChatHistory(Widget):
|
|
|
105
105
|
|
|
106
106
|
self.items = messages
|
|
107
107
|
self.refresh(recompose=True)
|
|
108
|
-
|
|
109
|
-
self.autoscroll()
|
|
108
|
+
self.call_later(self.autoscroll)
|
|
110
109
|
|
|
111
110
|
def autoscroll(self) -> None:
|
|
112
111
|
if self.vertical_tail:
|
|
113
|
-
self.vertical_tail.scroll_end(animate=False)
|
|
112
|
+
self.vertical_tail.scroll_end(animate=False, immediate=False, force=True)
|
|
114
113
|
|
|
115
114
|
|
|
116
115
|
class UserQuestionWidget(Widget):
|
|
@@ -123,23 +122,24 @@ class UserQuestionWidget(Widget):
|
|
|
123
122
|
if self.item is None:
|
|
124
123
|
yield Markdown(markdown="")
|
|
125
124
|
else:
|
|
126
|
-
prompt =
|
|
127
|
-
|
|
128
|
-
)
|
|
129
|
-
yield Markdown(markdown=f"**>** {prompt}")
|
|
125
|
+
prompt = self.format_prompt_parts(self.item.parts)
|
|
126
|
+
yield Markdown(markdown=prompt)
|
|
130
127
|
|
|
131
128
|
def format_prompt_parts(self, parts: Sequence[ModelRequestPart]) -> str:
|
|
132
129
|
acc = ""
|
|
133
130
|
for part in parts:
|
|
134
|
-
if isinstance(part,
|
|
131
|
+
if isinstance(part, UserPromptPart):
|
|
135
132
|
acc += (
|
|
136
133
|
f"**>** {part.content if isinstance(part.content, str) else ''}\n\n"
|
|
137
134
|
)
|
|
138
|
-
elif isinstance(part,
|
|
135
|
+
elif isinstance(part, ToolReturnPart):
|
|
139
136
|
if part.tool_name == "ask_user" and isinstance(part.content, dict):
|
|
140
137
|
acc += f"**>** {part.content['answer']}\n\n"
|
|
141
138
|
else:
|
|
142
|
-
acc += "∟ finished\n\n" # let's not show anything yet
|
|
139
|
+
# acc += " ∟ finished\n\n" # let's not show anything yet
|
|
140
|
+
pass
|
|
141
|
+
elif isinstance(part, UserPromptPart):
|
|
142
|
+
acc += f"**>** {part.content}\n\n"
|
|
143
143
|
return acc
|
|
144
144
|
|
|
145
145
|
|
|
@@ -153,7 +153,7 @@ class AgentResponseWidget(Widget):
|
|
|
153
153
|
if self.item is None:
|
|
154
154
|
yield Markdown(markdown="")
|
|
155
155
|
else:
|
|
156
|
-
yield Markdown(markdown=
|
|
156
|
+
yield Markdown(markdown=self.compute_output())
|
|
157
157
|
|
|
158
158
|
def compute_output(self) -> str:
|
|
159
159
|
acc = ""
|
|
@@ -161,18 +161,12 @@ class AgentResponseWidget(Widget):
|
|
|
161
161
|
return ""
|
|
162
162
|
for idx, part in enumerate(self.item.parts):
|
|
163
163
|
if isinstance(part, TextPart):
|
|
164
|
-
acc += part.content
|
|
164
|
+
acc += f"**⏺** {part.content}\n\n"
|
|
165
165
|
elif isinstance(part, ToolCallPart):
|
|
166
166
|
parts_str = self._format_tool_call_part(part)
|
|
167
167
|
acc += parts_str + "\n\n"
|
|
168
|
-
elif isinstance(part, ToolReturnPart):
|
|
169
|
-
acc += (
|
|
170
|
-
f"tool ({part.tool_name}) return: "
|
|
171
|
-
+ self._format_tool_return_call_part(part)
|
|
172
|
-
+ "\n\n"
|
|
173
|
-
)
|
|
174
168
|
elif isinstance(part, BuiltinToolCallPart):
|
|
175
|
-
acc += f"
|
|
169
|
+
acc += f"{part.tool_name}({part.args})\n\n"
|
|
176
170
|
elif isinstance(part, BuiltinToolReturnPart):
|
|
177
171
|
acc += f"builtin tool ({part.tool_name}) return: {part.content}\n\n"
|
|
178
172
|
elif isinstance(part, ThinkingPart):
|
|
@@ -226,10 +220,3 @@ class AgentResponseWidget(Widget):
|
|
|
226
220
|
return f"{_args['question']}"
|
|
227
221
|
else:
|
|
228
222
|
return "❓ "
|
|
229
|
-
|
|
230
|
-
def _format_tool_return_call_part(self, part: ToolReturnPart) -> str:
|
|
231
|
-
content = part.content
|
|
232
|
-
if part.tool_name == "ask_user":
|
|
233
|
-
response = content.get("answer", "") if isinstance(content, dict) else ""
|
|
234
|
-
return f"**⏺** {response}"
|
|
235
|
-
return f"∟ {content}"
|
|
@@ -7,7 +7,7 @@ shotgun/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
7
7
|
shotgun/sentry_telemetry.py,sha256=0W0o810ewFpIcdPsi_q4uKLiaP6zDYRRE5MHpIbQIPo,2954
|
|
8
8
|
shotgun/telemetry.py,sha256=Ves6Ih3hshpKVNVAUUmwRdtW8NkTjFPg8hEqvFKZ0t0,3208
|
|
9
9
|
shotgun/agents/__init__.py,sha256=8Jzv1YsDuLyNPFJyckSr_qI4ehTVeDyIMDW4omsfPGc,25
|
|
10
|
-
shotgun/agents/agent_manager.py,sha256=
|
|
10
|
+
shotgun/agents/agent_manager.py,sha256=xw9xNEwVU-P4NGqF8W6mzVw4HNqtSfegAN9atog7aEo,23813
|
|
11
11
|
shotgun/agents/common.py,sha256=vt7ECq1rT6GR5Rt63t0whH0R0cydrk7Mty2KyPL8mEg,19045
|
|
12
12
|
shotgun/agents/conversation_history.py,sha256=5J8_1yxdZiiWTq22aDio88DkBDZ4_Lh_p5Iy5_ENszc,3898
|
|
13
13
|
shotgun/agents/conversation_manager.py,sha256=fxAvXbEl3Cl2ugJ4N9aWXaqZtkrnfj3QzwjWC4LFXwI,3514
|
|
@@ -19,10 +19,10 @@ shotgun/agents/research.py,sha256=lYG7Rytcitop8mXs3isMI3XvYzzI3JH9u0VZz6K9zfo,32
|
|
|
19
19
|
shotgun/agents/specify.py,sha256=7MoMxfIn34G27mw6wrp_F0i2O5rid476L3kHFONDCd0,3137
|
|
20
20
|
shotgun/agents/tasks.py,sha256=nk8zIl24o01hfzOGyWSbeVWeke6OGseO4Ppciurh13U,2999
|
|
21
21
|
shotgun/agents/config/__init__.py,sha256=Fl8K_81zBpm-OfOW27M_WWLSFdaHHek6lWz95iDREjQ,318
|
|
22
|
-
shotgun/agents/config/constants.py,sha256=
|
|
22
|
+
shotgun/agents/config/constants.py,sha256=MogArrb2r5rFI6BBzc6NhPz1payGeM6K-t5oIFbJgxg,494
|
|
23
23
|
shotgun/agents/config/manager.py,sha256=kwMbPjz0kEH_WCQAamESGjHdE8d_P-ztel4NL4FWNUw,10662
|
|
24
|
-
shotgun/agents/config/models.py,sha256=
|
|
25
|
-
shotgun/agents/config/provider.py,sha256=
|
|
24
|
+
shotgun/agents/config/models.py,sha256=vpVXrtiHsDt2D_h7BLyMiiQeT97vAz2L6lYKx2SEMjo,5909
|
|
25
|
+
shotgun/agents/config/provider.py,sha256=pVWf_WM3MNWH0v2fU-peBCqx49X-nW81piQ_M-AKWRE,7249
|
|
26
26
|
shotgun/agents/history/__init__.py,sha256=XFQj2a6fxDqVg0Q3juvN9RjV_RJbgvFZtQOCOjVJyp4,147
|
|
27
27
|
shotgun/agents/history/compaction.py,sha256=KY_ZvRvvlrB6eLPGqtlC6H8h4HPPOtuPcUkgQJUjK5I,2890
|
|
28
28
|
shotgun/agents/history/constants.py,sha256=yWY8rrTZarLA3flCCMB_hS2NMvUDRDTwP4D4j7MIh1w,446
|
|
@@ -62,14 +62,15 @@ shotgun/cli/codebase/commands.py,sha256=zvcM9gjHHO6styhXojb_1bnpq-Cozh2c77ZOIjw4
|
|
|
62
62
|
shotgun/cli/codebase/models.py,sha256=B9vs-d-Bq0aS6FZKebhHT-9tw90Y5f6k_t71VlZpL8k,374
|
|
63
63
|
shotgun/codebase/__init__.py,sha256=QBgFE2Abd5Vl7_NdYOglF9S6d-vIjkb3C0cpIYoHZEU,309
|
|
64
64
|
shotgun/codebase/models.py,sha256=hxjbfDUka8loTApXq9KTvkXKt272fzdjr5u2ImYrNtk,4367
|
|
65
|
-
shotgun/codebase/service.py,sha256=
|
|
65
|
+
shotgun/codebase/service.py,sha256=CZR5f1vZyUS3gVXuDiZj0cIuuxiR7fbkHK65PTPAovI,7504
|
|
66
66
|
shotgun/codebase/core/__init__.py,sha256=GWWhJEqChiDXAF4omYCgzgoZmJjwsAf6P1aZ5Bl8OE0,1170
|
|
67
67
|
shotgun/codebase/core/change_detector.py,sha256=kWCYLWzRzb3IGGOj71KBn7UOCOKMpINJbOBDf98aMxE,12409
|
|
68
68
|
shotgun/codebase/core/code_retrieval.py,sha256=_JVyyQKHDFm3dxOOua1mw9eIIOHIVz3-I8aZtEsEj1E,7927
|
|
69
|
+
shotgun/codebase/core/cypher_models.py,sha256=Yfysfa9lLguILftkmtuJCN3kLBFIo7WW7NigM-Zr-W4,1735
|
|
69
70
|
shotgun/codebase/core/ingestor.py,sha256=H_kVCqdOKmnQpjcXvUdPFpep8OC2AbOhhE-9HKr_XZM,59836
|
|
70
71
|
shotgun/codebase/core/language_config.py,sha256=vsqHyuFnumRPRBV1lMOxWKNOIiClO6FyfKQR0fGrtl4,8934
|
|
71
72
|
shotgun/codebase/core/manager.py,sha256=6gyjfACbC5n1Hdy-JQIEDH2aNAlesUS9plQP_FHoJ94,59277
|
|
72
|
-
shotgun/codebase/core/nl_query.py,sha256=
|
|
73
|
+
shotgun/codebase/core/nl_query.py,sha256=kPoSJXBlm5rLhzOofZhqPVMJ_Lj3rV2H6sld6BwtMdg,16115
|
|
73
74
|
shotgun/codebase/core/parser_loader.py,sha256=LZRrDS8Sp518jIu3tQW-BxdwJ86lnsTteI478ER9Td8,4278
|
|
74
75
|
shotgun/prompts/__init__.py,sha256=RswUm0HMdfm2m2YKUwUsEdRIwoczdbI7zlucoEvHYRo,132
|
|
75
76
|
shotgun/prompts/loader.py,sha256=jy24-E02pCSmz2651aCT2NgHfRrHAGMYvKrD6gs0Er8,4424
|
|
@@ -87,7 +88,7 @@ shotgun/prompts/agents/state/system_state.j2,sha256=TQPnCLtmiNwQCbMxnCE7nLhXMJpK
|
|
|
87
88
|
shotgun/prompts/agents/state/codebase/codebase_graphs_available.j2,sha256=U-hy-H9bPwV0sYIHTZ5TESxc5EOCtntI8GUZOmJipJw,601
|
|
88
89
|
shotgun/prompts/codebase/__init__.py,sha256=NYuPMtmYM2ptuwf3YxVuotNlJOUq0hnjmwlzKcJkGK4,42
|
|
89
90
|
shotgun/prompts/codebase/cypher_query_patterns.j2,sha256=ufTx_xT3VoS76KcVUbIgGQx-bJoJHx3bBE3dagAXv18,8913
|
|
90
|
-
shotgun/prompts/codebase/cypher_system.j2,sha256=
|
|
91
|
+
shotgun/prompts/codebase/cypher_system.j2,sha256=jo8d_AIoyAd0zKCvPXSmYGBxvtulMsCfeaOTdOfeC5g,2620
|
|
91
92
|
shotgun/prompts/codebase/enhanced_query_context.j2,sha256=WzGnFaBLZO-mOdkZ_u_PewSu9niKy87DKNL4uzQq1Jg,724
|
|
92
93
|
shotgun/prompts/codebase/partials/cypher_rules.j2,sha256=vtc5OqTp-z5Rq_ti-_RG31bVOIA_iNe80_x3CdxO6bs,2397
|
|
93
94
|
shotgun/prompts/codebase/partials/graph_schema.j2,sha256=fUsD1ZgU1pIWUzrs97jHq3TatKeGSvZgG8XP5gCQUJc,1939
|
|
@@ -103,12 +104,12 @@ shotgun/sdk/services.py,sha256=J4PJFSxCQ6--u7rb3Ta-9eYtlYcxcbnzrMP6ThyCnw4,705
|
|
|
103
104
|
shotgun/tui/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
104
105
|
shotgun/tui/app.py,sha256=t0IAQbGr0lKKEoBVnp85DcmZ-V92bi79SjyEE2uKpuw,3990
|
|
105
106
|
shotgun/tui/styles.tcss,sha256=ETyyw1bpMBOqTi5RLcAJUScdPWTvAWEqE9YcT0kVs_E,121
|
|
106
|
-
shotgun/tui/commands/__init__.py,sha256=
|
|
107
|
+
shotgun/tui/commands/__init__.py,sha256=8D5lvtpqMW5-fF7Bg3oJtUzU75cKOv6aUaHYYszydU8,2518
|
|
107
108
|
shotgun/tui/components/prompt_input.py,sha256=Ss-htqraHZAPaehGE4x86ij0veMjc4UgadMXpbdXr40,2229
|
|
108
109
|
shotgun/tui/components/spinner.py,sha256=ovTDeaJ6FD6chZx_Aepia6R3UkPOVJ77EKHfRmn39MY,2427
|
|
109
110
|
shotgun/tui/components/splash.py,sha256=vppy9vEIEvywuUKRXn2y11HwXSRkQZHLYoVjhDVdJeU,1267
|
|
110
|
-
shotgun/tui/components/vertical_tail.py,sha256=
|
|
111
|
-
shotgun/tui/screens/chat.py,sha256=
|
|
111
|
+
shotgun/tui/components/vertical_tail.py,sha256=GavHXNMq1X8hc0juDLKDWTW9seRLk3VlhBBMl60uPG0,439
|
|
112
|
+
shotgun/tui/screens/chat.py,sha256=U3FfJj0c3HIXxPW8W7wRRWP5WruxUBFcHdsShQ_sMoQ,29820
|
|
112
113
|
shotgun/tui/screens/chat.tcss,sha256=2Yq3E23jxsySYsgZf4G1AYrYVcpX0UDW6kNNI0tDmtM,437
|
|
113
114
|
shotgun/tui/screens/directory_setup.py,sha256=lIZ1J4A6g5Q2ZBX8epW7BhR96Dmdcg22CyiM5S-I5WU,3237
|
|
114
115
|
shotgun/tui/screens/provider_config.py,sha256=A_tvDHF5KLP5PV60LjMJ_aoOdT3TjI6_g04UIUqGPqM,7126
|
|
@@ -116,15 +117,15 @@ shotgun/tui/screens/splash.py,sha256=E2MsJihi3c9NY1L28o_MstDxGwrCnnV7zdq00MrGAsw
|
|
|
116
117
|
shotgun/tui/screens/chat_screen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
117
118
|
shotgun/tui/screens/chat_screen/command_providers.py,sha256=55JIH9T8QnyHRsMoXhOi87FiVM-d6o7OKpCe82uDP9I,7840
|
|
118
119
|
shotgun/tui/screens/chat_screen/hint_message.py,sha256=WOpbk8q7qt7eOHTyyHvh_IQIaublVDeJGaLpsxEk9FA,933
|
|
119
|
-
shotgun/tui/screens/chat_screen/history.py,sha256=
|
|
120
|
+
shotgun/tui/screens/chat_screen/history.py,sha256=JjQOKjCZpLBcw9CMorkBjOt2U5Ikr81hEQRtQmhw_KM,7459
|
|
120
121
|
shotgun/tui/utils/__init__.py,sha256=cFjDfoXTRBq29wgP7TGRWUu1eFfiIG-LLOzjIGfadgI,150
|
|
121
122
|
shotgun/tui/utils/mode_progress.py,sha256=lseRRo7kMWLkBzI3cU5vqJmS2ZcCjyRYf9Zwtvc-v58,10931
|
|
122
123
|
shotgun/utils/__init__.py,sha256=WinIEp9oL2iMrWaDkXz2QX4nYVPAm8C9aBSKTeEwLtE,198
|
|
123
124
|
shotgun/utils/env_utils.py,sha256=8QK5aw_f_V2AVTleQQlcL0RnD4sPJWXlDG46fsHu0d8,1057
|
|
124
125
|
shotgun/utils/file_system_utils.py,sha256=l-0p1bEHF34OU19MahnRFdClHufThfGAjQ431teAIp0,1004
|
|
125
126
|
shotgun/utils/update_checker.py,sha256=Xf-7w3Pos3etzCoT771gJe2HLkA8_V2GrqWy7ni9UqA,11373
|
|
126
|
-
shotgun_sh-0.1.
|
|
127
|
-
shotgun_sh-0.1.
|
|
128
|
-
shotgun_sh-0.1.
|
|
129
|
-
shotgun_sh-0.1.
|
|
130
|
-
shotgun_sh-0.1.
|
|
127
|
+
shotgun_sh-0.1.1.dev1.dist-info/METADATA,sha256=G5DvLCDWW-t_-ofUGOU2nB9dxvHKMrQkcac_xlFykdk,11196
|
|
128
|
+
shotgun_sh-0.1.1.dev1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
129
|
+
shotgun_sh-0.1.1.dev1.dist-info/entry_points.txt,sha256=asZxLU4QILneq0MWW10saVCZc4VWhZfb0wFZvERnzfA,45
|
|
130
|
+
shotgun_sh-0.1.1.dev1.dist-info/licenses/LICENSE,sha256=YebsZl590zCHrF_acCU5pmNt0pnAfD2DmAnevJPB1tY,1065
|
|
131
|
+
shotgun_sh-0.1.1.dev1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|