shotgun-sh 0.1.0.dev31__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shotgun-sh might be problematic. Click here for more details.
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/PKG-INFO +1 -1
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/pyproject.toml +1 -1
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/agent_manager.py +73 -32
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/config/constants.py +0 -1
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/config/models.py +0 -3
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/config/provider.py +6 -6
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/build_constants.py +2 -2
- shotgun_sh-0.1.1/src/shotgun/codebase/core/cypher_models.py +46 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/nl_query.py +180 -39
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/service.py +17 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/codebase/cypher_system.j2 +15 -1
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/commands/__init__.py +10 -9
- shotgun_sh-0.1.1/src/shotgun/tui/components/vertical_tail.py +13 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/chat.py +62 -23
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/chat_screen/history.py +17 -30
- shotgun_sh-0.1.0.dev31/src/shotgun/tui/components/vertical_tail.py +0 -28
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/.gitignore +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/LICENSE +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/README.md +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/hatch_build.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/common.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/config/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/config/manager.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/conversation_history.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/conversation_manager.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/export.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/compaction.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/constants.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/context_extraction.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/history_building.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/history_processors.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/message_utils.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/token_counting.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/history/token_estimation.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/messages.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/models.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/plan.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/research.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/specify.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tasks.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/codebase/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/codebase/codebase_shell.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/codebase/directory_lister.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/codebase/file_read.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/codebase/models.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/codebase/query_graph.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/codebase/retrieve_code.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/file_management.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/user_interaction.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/web_search/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/web_search/anthropic.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/web_search/gemini.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/web_search/openai.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/agents/tools/web_search/utils.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/codebase/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/codebase/commands.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/codebase/models.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/config.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/export.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/models.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/plan.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/research.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/specify.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/tasks.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/update.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/cli/utils.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/change_detector.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/code_retrieval.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/ingestor.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/language_config.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/manager.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/core/parser_loader.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/codebase/models.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/logging_config.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/main.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/posthog_telemetry.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/export.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/partials/codebase_understanding.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/partials/content_formatting.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/partials/interactive_mode.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/plan.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/research.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/specify.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/state/codebase/codebase_graphs_available.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/state/system_state.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/agents/tasks.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/codebase/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/codebase/cypher_query_patterns.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/codebase/enhanced_query_context.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/codebase/partials/cypher_rules.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/codebase/partials/graph_schema.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/codebase/partials/temporal_context.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/history/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/history/incremental_summarization.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/history/summarization.j2 +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/prompts/loader.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/py.typed +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/sdk/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/sdk/codebase.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/sdk/exceptions.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/sdk/models.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/sdk/services.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/sentry_telemetry.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/telemetry.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/app.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/components/prompt_input.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/components/spinner.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/components/splash.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/chat.tcss +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/chat_screen/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/chat_screen/command_providers.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/chat_screen/hint_message.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/directory_setup.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/provider_config.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/screens/splash.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/styles.tcss +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/utils/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/tui/utils/mode_progress.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/utils/__init__.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/utils/env_utils.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/utils/file_system_utils.py +0 -0
- {shotgun_sh-0.1.0.dev31 → shotgun_sh-0.1.1}/src/shotgun/utils/update_checker.py +0 -0
|
@@ -76,20 +76,25 @@ class MessageHistoryUpdated(Message):
|
|
|
76
76
|
class PartialResponseMessage(Message):
|
|
77
77
|
"""Event posted when a partial response is received."""
|
|
78
78
|
|
|
79
|
-
def __init__(
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
message: ModelResponse | None,
|
|
82
|
+
messages: list[ModelMessage],
|
|
83
|
+
is_last: bool,
|
|
84
|
+
) -> None:
|
|
80
85
|
"""Initialize the partial response message."""
|
|
81
86
|
super().__init__()
|
|
82
87
|
self.message = message
|
|
88
|
+
self.messages = messages
|
|
83
89
|
self.is_last = is_last
|
|
84
90
|
|
|
85
91
|
|
|
86
92
|
@dataclass(slots=True)
|
|
87
93
|
class _PartialStreamState:
|
|
88
|
-
"""Tracks
|
|
94
|
+
"""Tracks streamed messages while handling a single agent run."""
|
|
89
95
|
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
final_sent: bool = False
|
|
96
|
+
messages: list[ModelRequest | ModelResponse] = field(default_factory=list)
|
|
97
|
+
current_response: ModelResponse | None = None
|
|
93
98
|
|
|
94
99
|
|
|
95
100
|
class AgentManager(Widget):
|
|
@@ -272,6 +277,7 @@ class AgentManager(Widget):
|
|
|
272
277
|
|
|
273
278
|
# Clear file tracker before each run to track only this run's operations
|
|
274
279
|
deps.file_tracker.clear()
|
|
280
|
+
original_messages = self.ui_message_history.copy()
|
|
275
281
|
|
|
276
282
|
if prompt:
|
|
277
283
|
self.ui_message_history.append(ModelRequest.user_text_prompt(prompt))
|
|
@@ -356,16 +362,11 @@ class AgentManager(Widget):
|
|
|
356
362
|
**kwargs,
|
|
357
363
|
)
|
|
358
364
|
finally:
|
|
359
|
-
# If the stream ended unexpectedly without a final result, clear accumulated state.
|
|
360
|
-
if self._stream_state is not None and not self._stream_state.final_sent:
|
|
361
|
-
partial_message = self._build_partial_response(self._stream_state.parts)
|
|
362
|
-
if partial_message is not None:
|
|
363
|
-
self._post_partial_message(partial_message, True)
|
|
364
365
|
self._stream_state = None
|
|
365
366
|
|
|
366
|
-
self.ui_message_history =
|
|
367
|
-
|
|
368
|
-
|
|
367
|
+
self.ui_message_history = original_messages + cast(
|
|
368
|
+
list[ModelRequest | ModelResponse | HintMessage], result.new_messages()
|
|
369
|
+
)
|
|
369
370
|
|
|
370
371
|
# Apply compaction to persistent message history to prevent cascading growth
|
|
371
372
|
all_messages = result.all_messages()
|
|
@@ -390,7 +391,13 @@ class AgentManager(Widget):
|
|
|
390
391
|
if state is None:
|
|
391
392
|
state = self._stream_state = _PartialStreamState()
|
|
392
393
|
|
|
393
|
-
|
|
394
|
+
if state.current_response is not None:
|
|
395
|
+
partial_parts: list[ModelResponsePart | ToolCallPartDelta] = list(
|
|
396
|
+
state.current_response.parts
|
|
397
|
+
# cast(Sequence[ModelResponsePart], state.current_response.parts)
|
|
398
|
+
)
|
|
399
|
+
else:
|
|
400
|
+
partial_parts = []
|
|
394
401
|
|
|
395
402
|
async for event in stream:
|
|
396
403
|
try:
|
|
@@ -409,8 +416,8 @@ class AgentManager(Widget):
|
|
|
409
416
|
|
|
410
417
|
partial_message = self._build_partial_response(partial_parts)
|
|
411
418
|
if partial_message is not None:
|
|
412
|
-
state.
|
|
413
|
-
self._post_partial_message(
|
|
419
|
+
state.current_response = partial_message
|
|
420
|
+
self._post_partial_message(False)
|
|
414
421
|
|
|
415
422
|
elif isinstance(event, PartDeltaEvent):
|
|
416
423
|
index = event.index
|
|
@@ -435,8 +442,8 @@ class AgentManager(Widget):
|
|
|
435
442
|
|
|
436
443
|
partial_message = self._build_partial_response(partial_parts)
|
|
437
444
|
if partial_message is not None:
|
|
438
|
-
state.
|
|
439
|
-
self._post_partial_message(
|
|
445
|
+
state.current_response = partial_message
|
|
446
|
+
self._post_partial_message(False)
|
|
440
447
|
|
|
441
448
|
elif isinstance(event, FunctionToolCallEvent):
|
|
442
449
|
existing_call_idx = next(
|
|
@@ -448,29 +455,54 @@ class AgentManager(Widget):
|
|
|
448
455
|
),
|
|
449
456
|
None,
|
|
450
457
|
)
|
|
458
|
+
|
|
451
459
|
if existing_call_idx is not None:
|
|
452
460
|
partial_parts[existing_call_idx] = event.part
|
|
461
|
+
elif state.messages:
|
|
462
|
+
existing_call_idx = next(
|
|
463
|
+
(
|
|
464
|
+
i
|
|
465
|
+
for i, part in enumerate(state.messages[-1].parts)
|
|
466
|
+
if isinstance(part, ToolCallPart)
|
|
467
|
+
and part.tool_call_id == event.part.tool_call_id
|
|
468
|
+
),
|
|
469
|
+
None,
|
|
470
|
+
)
|
|
453
471
|
else:
|
|
454
472
|
partial_parts.append(event.part)
|
|
455
473
|
partial_message = self._build_partial_response(partial_parts)
|
|
456
474
|
if partial_message is not None:
|
|
457
|
-
state.
|
|
458
|
-
self._post_partial_message(
|
|
475
|
+
state.current_response = partial_message
|
|
476
|
+
self._post_partial_message(False)
|
|
459
477
|
elif isinstance(event, FunctionToolResultEvent):
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
478
|
+
request_message = ModelRequest(parts=[event.result])
|
|
479
|
+
state.messages.append(request_message)
|
|
480
|
+
if (
|
|
481
|
+
event.result.tool_name == "ask_user"
|
|
482
|
+
): # special handling to ask_user, because deferred tool results mean we missed the user response
|
|
483
|
+
self.ui_message_history.append(request_message)
|
|
484
|
+
self._post_messages_updated()
|
|
485
|
+
## this is what the user responded with
|
|
486
|
+
self._post_partial_message(is_last=False)
|
|
468
487
|
|
|
488
|
+
elif isinstance(event, FinalResultEvent):
|
|
489
|
+
pass
|
|
469
490
|
except Exception: # pragma: no cover - defensive logging
|
|
470
491
|
logger.exception(
|
|
471
492
|
"Error while handling agent stream event", extra={"event": event}
|
|
472
493
|
)
|
|
473
494
|
|
|
495
|
+
final_message = state.current_response or self._build_partial_response(
|
|
496
|
+
partial_parts
|
|
497
|
+
)
|
|
498
|
+
if final_message is not None:
|
|
499
|
+
state.current_response = final_message
|
|
500
|
+
if final_message not in state.messages:
|
|
501
|
+
state.messages.append(final_message)
|
|
502
|
+
state.current_response = None
|
|
503
|
+
self._post_partial_message(True)
|
|
504
|
+
state.current_response = None
|
|
505
|
+
|
|
474
506
|
def _build_partial_response(
|
|
475
507
|
self, parts: list[ModelResponsePart | ToolCallPartDelta]
|
|
476
508
|
) -> ModelResponse | None:
|
|
@@ -483,11 +515,20 @@ class AgentManager(Widget):
|
|
|
483
515
|
return None
|
|
484
516
|
return ModelResponse(parts=list(completed_parts))
|
|
485
517
|
|
|
486
|
-
def _post_partial_message(
|
|
487
|
-
self, message: ModelResponse | None, is_last: bool
|
|
488
|
-
) -> None:
|
|
518
|
+
def _post_partial_message(self, is_last: bool) -> None:
|
|
489
519
|
"""Post a partial message to the UI."""
|
|
490
|
-
self.
|
|
520
|
+
if self._stream_state is None:
|
|
521
|
+
return
|
|
522
|
+
self.post_message(
|
|
523
|
+
PartialResponseMessage(
|
|
524
|
+
self._stream_state.current_response
|
|
525
|
+
if self._stream_state.current_response
|
|
526
|
+
not in self._stream_state.messages
|
|
527
|
+
else None,
|
|
528
|
+
self._stream_state.messages,
|
|
529
|
+
is_last,
|
|
530
|
+
)
|
|
531
|
+
)
|
|
491
532
|
|
|
492
533
|
def _post_messages_updated(
|
|
493
534
|
self, file_operations: list[FileOperation] | None = None
|
|
@@ -117,21 +117,18 @@ class OpenAIConfig(BaseModel):
|
|
|
117
117
|
"""Configuration for OpenAI provider."""
|
|
118
118
|
|
|
119
119
|
api_key: SecretStr | None = None
|
|
120
|
-
model_name: str = "gpt-5"
|
|
121
120
|
|
|
122
121
|
|
|
123
122
|
class AnthropicConfig(BaseModel):
|
|
124
123
|
"""Configuration for Anthropic provider."""
|
|
125
124
|
|
|
126
125
|
api_key: SecretStr | None = None
|
|
127
|
-
model_name: str = "claude-opus-4-1"
|
|
128
126
|
|
|
129
127
|
|
|
130
128
|
class GoogleConfig(BaseModel):
|
|
131
129
|
"""Configuration for Google provider."""
|
|
132
130
|
|
|
133
131
|
api_key: SecretStr | None = None
|
|
134
|
-
model_name: str = "gemini-2.5-pro"
|
|
135
132
|
|
|
136
133
|
|
|
137
134
|
class ShotgunConfig(BaseModel):
|
|
@@ -127,8 +127,8 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
|
|
|
127
127
|
f"OpenAI API key not configured. Set via environment variable {OPENAI_API_KEY_ENV} or config."
|
|
128
128
|
)
|
|
129
129
|
|
|
130
|
-
# Get model spec
|
|
131
|
-
model_name =
|
|
130
|
+
# Get model spec - hardcoded to gpt-5
|
|
131
|
+
model_name = "gpt-5"
|
|
132
132
|
if model_name not in MODEL_SPECS:
|
|
133
133
|
raise ValueError(f"Model '{model_name}' not found")
|
|
134
134
|
spec = MODEL_SPECS[model_name]
|
|
@@ -149,8 +149,8 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
|
|
|
149
149
|
f"Anthropic API key not configured. Set via environment variable {ANTHROPIC_API_KEY_ENV} or config."
|
|
150
150
|
)
|
|
151
151
|
|
|
152
|
-
# Get model spec
|
|
153
|
-
model_name =
|
|
152
|
+
# Get model spec - hardcoded to claude-opus-4-1
|
|
153
|
+
model_name = "claude-opus-4-1"
|
|
154
154
|
if model_name not in MODEL_SPECS:
|
|
155
155
|
raise ValueError(f"Model '{model_name}' not found")
|
|
156
156
|
spec = MODEL_SPECS[model_name]
|
|
@@ -171,8 +171,8 @@ def get_provider_model(provider: ProviderType | None = None) -> ModelConfig:
|
|
|
171
171
|
f"Gemini API key not configured. Set via environment variable {GEMINI_API_KEY_ENV} or config."
|
|
172
172
|
)
|
|
173
173
|
|
|
174
|
-
# Get model spec
|
|
175
|
-
model_name =
|
|
174
|
+
# Get model spec - hardcoded to gemini-2.5-pro
|
|
175
|
+
model_name = "gemini-2.5-pro"
|
|
176
176
|
if model_name not in MODEL_SPECS:
|
|
177
177
|
raise ValueError(f"Model '{model_name}' not found")
|
|
178
178
|
spec = MODEL_SPECS[model_name]
|
|
@@ -12,8 +12,8 @@ POSTHOG_API_KEY = ''
|
|
|
12
12
|
POSTHOG_PROJECT_ID = '191396'
|
|
13
13
|
|
|
14
14
|
# Logfire configuration embedded at build time (only for dev builds)
|
|
15
|
-
LOGFIRE_ENABLED = '
|
|
16
|
-
LOGFIRE_TOKEN = '
|
|
15
|
+
LOGFIRE_ENABLED = ''
|
|
16
|
+
LOGFIRE_TOKEN = ''
|
|
17
17
|
|
|
18
18
|
# Build metadata
|
|
19
19
|
BUILD_TIME_ENV = "production" if SENTRY_DSN else "development"
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""Pydantic models and exceptions for Cypher query generation."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CypherGenerationResponse(BaseModel):
|
|
9
|
+
"""Structured response from LLM for Cypher query generation.
|
|
10
|
+
|
|
11
|
+
This model ensures the LLM explicitly indicates whether it can generate
|
|
12
|
+
a valid Cypher query and provides a reason if it cannot.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
cypher_query: str | None = Field(
|
|
16
|
+
default=None,
|
|
17
|
+
description="The generated Cypher query, or None if generation not possible",
|
|
18
|
+
)
|
|
19
|
+
can_generate_valid_cypher: bool = Field(
|
|
20
|
+
description="Whether a valid Cypher query can be generated for this request"
|
|
21
|
+
)
|
|
22
|
+
reason_cannot_generate: str | None = Field(
|
|
23
|
+
default=None,
|
|
24
|
+
description="Explanation why query cannot be generated (if applicable)",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
def model_post_init(self, __context: Any) -> None:
|
|
28
|
+
"""Validate that reason is provided when query cannot be generated."""
|
|
29
|
+
if not self.can_generate_valid_cypher and not self.reason_cannot_generate:
|
|
30
|
+
self.reason_cannot_generate = "No reason provided"
|
|
31
|
+
if self.can_generate_valid_cypher and not self.cypher_query:
|
|
32
|
+
raise ValueError(
|
|
33
|
+
"cypher_query must be provided when can_generate_valid_cypher is True"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class CypherGenerationNotPossibleError(Exception):
|
|
38
|
+
"""Raised when LLM cannot generate valid Cypher for the query.
|
|
39
|
+
|
|
40
|
+
This typically happens when the query is conceptual rather than structural,
|
|
41
|
+
or when it requires interpretation beyond what can be expressed in Cypher.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(self, reason: str):
|
|
45
|
+
self.reason = reason
|
|
46
|
+
super().__init__(f"Cannot generate Cypher query: {reason}")
|
|
@@ -4,15 +4,13 @@ import time
|
|
|
4
4
|
from datetime import datetime
|
|
5
5
|
from typing import TYPE_CHECKING
|
|
6
6
|
|
|
7
|
-
from pydantic_ai
|
|
8
|
-
ModelRequest,
|
|
9
|
-
SystemPromptPart,
|
|
10
|
-
TextPart,
|
|
11
|
-
UserPromptPart,
|
|
12
|
-
)
|
|
7
|
+
from pydantic_ai import Agent
|
|
13
8
|
|
|
14
9
|
from shotgun.agents.config import get_provider_model
|
|
15
|
-
from shotgun.
|
|
10
|
+
from shotgun.codebase.core.cypher_models import (
|
|
11
|
+
CypherGenerationNotPossibleError,
|
|
12
|
+
CypherGenerationResponse,
|
|
13
|
+
)
|
|
16
14
|
from shotgun.logging_config import get_logger
|
|
17
15
|
from shotgun.prompts import PromptLoader
|
|
18
16
|
|
|
@@ -25,42 +23,52 @@ logger = get_logger(__name__)
|
|
|
25
23
|
prompt_loader = PromptLoader()
|
|
26
24
|
|
|
27
25
|
|
|
28
|
-
async def llm_cypher_prompt(
|
|
29
|
-
|
|
26
|
+
async def llm_cypher_prompt(
|
|
27
|
+
system_prompt: str, user_prompt: str
|
|
28
|
+
) -> CypherGenerationResponse:
|
|
29
|
+
"""Generate a Cypher query from a natural language prompt using structured output.
|
|
30
30
|
|
|
31
31
|
Args:
|
|
32
32
|
system_prompt: The system prompt defining the behavior and context for the LLM
|
|
33
33
|
user_prompt: The user's natural language query
|
|
34
34
|
Returns:
|
|
35
|
-
|
|
35
|
+
CypherGenerationResponse with cypher_query, can_generate flag, and reason if not
|
|
36
36
|
"""
|
|
37
37
|
model_config = get_provider_model()
|
|
38
|
-
|
|
39
|
-
#
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
ModelRequest(
|
|
45
|
-
parts=[
|
|
46
|
-
SystemPromptPart(content=system_prompt),
|
|
47
|
-
UserPromptPart(content=user_prompt),
|
|
48
|
-
]
|
|
49
|
-
),
|
|
50
|
-
],
|
|
51
|
-
max_tokens=2000, # Cypher queries are short, 2000 tokens is plenty
|
|
38
|
+
|
|
39
|
+
# Create an agent with structured output for Cypher generation
|
|
40
|
+
cypher_agent = Agent(
|
|
41
|
+
model=model_config.model_instance,
|
|
42
|
+
output_type=CypherGenerationResponse,
|
|
43
|
+
retries=2,
|
|
52
44
|
)
|
|
53
45
|
|
|
54
|
-
|
|
55
|
-
|
|
46
|
+
# Combine system and user prompts
|
|
47
|
+
combined_prompt = f"{system_prompt}\n\nUser Query: {user_prompt}"
|
|
48
|
+
|
|
49
|
+
try:
|
|
50
|
+
# Run the agent to get structured response
|
|
51
|
+
result = await cypher_agent.run(combined_prompt)
|
|
52
|
+
response = result.output
|
|
53
|
+
|
|
54
|
+
# Log the structured response for debugging
|
|
55
|
+
logger.debug(
|
|
56
|
+
"Cypher generation response - can_generate: %s, query: %s, reason: %s",
|
|
57
|
+
response.can_generate_valid_cypher,
|
|
58
|
+
response.cypher_query[:50] if response.cypher_query else None,
|
|
59
|
+
response.reason_cannot_generate,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return response
|
|
56
63
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
+
except Exception as e:
|
|
65
|
+
logger.error("Failed to generate Cypher query with structured output: %s", e)
|
|
66
|
+
# Return a failure response
|
|
67
|
+
return CypherGenerationResponse(
|
|
68
|
+
cypher_query=None,
|
|
69
|
+
can_generate_valid_cypher=False,
|
|
70
|
+
reason_cannot_generate=f"LLM error: {str(e)}",
|
|
71
|
+
)
|
|
64
72
|
|
|
65
73
|
|
|
66
74
|
async def generate_cypher(natural_language_query: str) -> str:
|
|
@@ -71,6 +79,10 @@ async def generate_cypher(natural_language_query: str) -> str:
|
|
|
71
79
|
|
|
72
80
|
Returns:
|
|
73
81
|
Generated Cypher query
|
|
82
|
+
|
|
83
|
+
Raises:
|
|
84
|
+
CypherGenerationNotPossibleError: If the query cannot be converted to Cypher
|
|
85
|
+
RuntimeError: If there's an error during generation
|
|
74
86
|
"""
|
|
75
87
|
# Get current time for context
|
|
76
88
|
current_timestamp = int(time.time())
|
|
@@ -88,8 +100,30 @@ async def generate_cypher(natural_language_query: str) -> str:
|
|
|
88
100
|
)
|
|
89
101
|
|
|
90
102
|
try:
|
|
91
|
-
|
|
92
|
-
|
|
103
|
+
response = await llm_cypher_prompt(system_prompt, enhanced_query)
|
|
104
|
+
|
|
105
|
+
# Check if the LLM could generate a valid Cypher query
|
|
106
|
+
if not response.can_generate_valid_cypher:
|
|
107
|
+
logger.info(
|
|
108
|
+
"Cannot generate Cypher for query '%s': %s",
|
|
109
|
+
natural_language_query,
|
|
110
|
+
response.reason_cannot_generate,
|
|
111
|
+
)
|
|
112
|
+
raise CypherGenerationNotPossibleError(
|
|
113
|
+
response.reason_cannot_generate or "Query cannot be converted to Cypher"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
if not response.cypher_query:
|
|
117
|
+
raise ValueError("LLM indicated success but provided no query")
|
|
118
|
+
|
|
119
|
+
cleaned_query = clean_cypher_response(response.cypher_query)
|
|
120
|
+
|
|
121
|
+
# Validate Cypher keywords
|
|
122
|
+
is_valid, validation_error = validate_cypher_keywords(cleaned_query)
|
|
123
|
+
if not is_valid:
|
|
124
|
+
logger.warning(f"Generated query has invalid syntax: {validation_error}")
|
|
125
|
+
logger.warning(f"Problematic query: {cleaned_query}")
|
|
126
|
+
raise ValueError(f"Generated query validation failed: {validation_error}")
|
|
93
127
|
|
|
94
128
|
# Validate UNION ALL queries
|
|
95
129
|
is_valid, validation_error = validate_union_query(cleaned_query)
|
|
@@ -100,6 +134,8 @@ async def generate_cypher(natural_language_query: str) -> str:
|
|
|
100
134
|
|
|
101
135
|
return cleaned_query
|
|
102
136
|
|
|
137
|
+
except CypherGenerationNotPossibleError:
|
|
138
|
+
raise # Re-raise as-is
|
|
103
139
|
except Exception as e:
|
|
104
140
|
raise RuntimeError(f"Failed to generate Cypher query: {e}") from e
|
|
105
141
|
|
|
@@ -170,8 +206,31 @@ MATCH (f:Function) RETURN f.name, f.qualified_name // WRONG: missing third colu
|
|
|
170
206
|
base_system_prompt=prompt_loader.render("codebase/cypher_system.j2"),
|
|
171
207
|
)
|
|
172
208
|
|
|
173
|
-
|
|
174
|
-
|
|
209
|
+
response = await llm_cypher_prompt(enhanced_system_prompt, enhanced_query)
|
|
210
|
+
|
|
211
|
+
# Check if the LLM could generate a valid Cypher query
|
|
212
|
+
if not response.can_generate_valid_cypher:
|
|
213
|
+
logger.info(
|
|
214
|
+
"Cannot generate Cypher for retry query '%s': %s",
|
|
215
|
+
natural_language_query,
|
|
216
|
+
response.reason_cannot_generate,
|
|
217
|
+
)
|
|
218
|
+
raise CypherGenerationNotPossibleError(
|
|
219
|
+
response.reason_cannot_generate
|
|
220
|
+
or "Query cannot be converted to Cypher even with error context"
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
if not response.cypher_query:
|
|
224
|
+
raise ValueError("LLM indicated success but provided no query on retry")
|
|
225
|
+
|
|
226
|
+
cleaned_query = clean_cypher_response(response.cypher_query)
|
|
227
|
+
|
|
228
|
+
# Validate Cypher keywords
|
|
229
|
+
is_valid, validation_error = validate_cypher_keywords(cleaned_query)
|
|
230
|
+
if not is_valid:
|
|
231
|
+
logger.warning(f"Generated query has invalid syntax: {validation_error}")
|
|
232
|
+
logger.warning(f"Problematic query: {cleaned_query}")
|
|
233
|
+
raise ValueError(f"Generated query validation failed: {validation_error}")
|
|
175
234
|
|
|
176
235
|
# Validate UNION ALL queries
|
|
177
236
|
is_valid, validation_error = validate_union_query(cleaned_query)
|
|
@@ -182,6 +241,8 @@ MATCH (f:Function) RETURN f.name, f.qualified_name // WRONG: missing third colu
|
|
|
182
241
|
|
|
183
242
|
return cleaned_query
|
|
184
243
|
|
|
244
|
+
except CypherGenerationNotPossibleError:
|
|
245
|
+
raise # Re-raise as-is
|
|
185
246
|
except Exception as e:
|
|
186
247
|
raise RuntimeError(
|
|
187
248
|
f"Failed to generate Cypher query with error context: {e}"
|
|
@@ -202,6 +263,10 @@ async def generate_cypher_openai_async(
|
|
|
202
263
|
|
|
203
264
|
Returns:
|
|
204
265
|
Generated Cypher query
|
|
266
|
+
|
|
267
|
+
Raises:
|
|
268
|
+
CypherGenerationNotPossibleError: If the query cannot be converted to Cypher
|
|
269
|
+
RuntimeError: If there's an error during generation
|
|
205
270
|
"""
|
|
206
271
|
# Get current time for context
|
|
207
272
|
current_timestamp = int(time.time())
|
|
@@ -219,9 +284,26 @@ async def generate_cypher_openai_async(
|
|
|
219
284
|
)
|
|
220
285
|
|
|
221
286
|
try:
|
|
222
|
-
|
|
223
|
-
|
|
287
|
+
response = await llm_cypher_prompt(system_prompt, enhanced_query)
|
|
288
|
+
|
|
289
|
+
# Check if the LLM could generate a valid Cypher query
|
|
290
|
+
if not response.can_generate_valid_cypher:
|
|
291
|
+
logger.info(
|
|
292
|
+
"Cannot generate Cypher for query '%s': %s",
|
|
293
|
+
natural_language_query,
|
|
294
|
+
response.reason_cannot_generate,
|
|
295
|
+
)
|
|
296
|
+
raise CypherGenerationNotPossibleError(
|
|
297
|
+
response.reason_cannot_generate or "Query cannot be converted to Cypher"
|
|
298
|
+
)
|
|
224
299
|
|
|
300
|
+
if not response.cypher_query:
|
|
301
|
+
raise ValueError("LLM indicated success but provided no query")
|
|
302
|
+
|
|
303
|
+
return clean_cypher_response(response.cypher_query)
|
|
304
|
+
|
|
305
|
+
except CypherGenerationNotPossibleError:
|
|
306
|
+
raise # Re-raise as-is
|
|
225
307
|
except Exception as e:
|
|
226
308
|
logger.error(f"OpenAI API error: {e}")
|
|
227
309
|
raise RuntimeError(f"Failed to generate Cypher query: {e}") from e
|
|
@@ -288,6 +370,65 @@ def validate_union_query(cypher_query: str) -> tuple[bool, str]:
|
|
|
288
370
|
return True, ""
|
|
289
371
|
|
|
290
372
|
|
|
373
|
+
def validate_cypher_keywords(query: str) -> tuple[bool, str]:
|
|
374
|
+
"""Validate that a query starts with valid Kuzu Cypher keywords.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
query: The Cypher query to validate
|
|
378
|
+
|
|
379
|
+
Returns:
|
|
380
|
+
Tuple of (is_valid, error_message)
|
|
381
|
+
"""
|
|
382
|
+
# Valid Kuzu Cypher starting keywords based on parser expectations
|
|
383
|
+
valid_cypher_keywords = {
|
|
384
|
+
"ALTER",
|
|
385
|
+
"ATTACH",
|
|
386
|
+
"BEGIN",
|
|
387
|
+
"CALL",
|
|
388
|
+
"CHECKPOINT",
|
|
389
|
+
"COMMENT",
|
|
390
|
+
"COMMIT",
|
|
391
|
+
"COPY",
|
|
392
|
+
"CREATE",
|
|
393
|
+
"DELETE",
|
|
394
|
+
"DETACH",
|
|
395
|
+
"DROP",
|
|
396
|
+
"EXPLAIN",
|
|
397
|
+
"EXPORT",
|
|
398
|
+
"FORCE",
|
|
399
|
+
"IMPORT",
|
|
400
|
+
"INSTALL",
|
|
401
|
+
"LOAD",
|
|
402
|
+
"MATCH",
|
|
403
|
+
"MERGE",
|
|
404
|
+
"OPTIONAL",
|
|
405
|
+
"PROFILE",
|
|
406
|
+
"RETURN",
|
|
407
|
+
"ROLLBACK",
|
|
408
|
+
"SET",
|
|
409
|
+
"UNWIND",
|
|
410
|
+
"UNINSTALL",
|
|
411
|
+
"UPDATE",
|
|
412
|
+
"USE",
|
|
413
|
+
"WITH",
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
query = query.strip()
|
|
417
|
+
if not query:
|
|
418
|
+
return False, "Empty query"
|
|
419
|
+
|
|
420
|
+
# Get the first word
|
|
421
|
+
first_word = query.upper().split()[0] if query else ""
|
|
422
|
+
|
|
423
|
+
if first_word not in valid_cypher_keywords:
|
|
424
|
+
return (
|
|
425
|
+
False,
|
|
426
|
+
f"Query doesn't start with valid Cypher keyword. Found: '{first_word}'",
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
return True, ""
|
|
430
|
+
|
|
431
|
+
|
|
291
432
|
def clean_cypher_response(response_text: str) -> str:
|
|
292
433
|
"""Clean up common LLM formatting artifacts from a Cypher query.
|
|
293
434
|
|
|
@@ -4,6 +4,7 @@ import time
|
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
7
|
+
from shotgun.codebase.core.cypher_models import CypherGenerationNotPossibleError
|
|
7
8
|
from shotgun.codebase.core.manager import CodebaseGraphManager
|
|
8
9
|
from shotgun.codebase.core.nl_query import generate_cypher
|
|
9
10
|
from shotgun.codebase.models import CodebaseGraph, QueryResult, QueryType
|
|
@@ -190,6 +191,22 @@ class CodebaseService:
|
|
|
190
191
|
error=None,
|
|
191
192
|
)
|
|
192
193
|
|
|
194
|
+
except CypherGenerationNotPossibleError as e:
|
|
195
|
+
# Handle queries that cannot be converted to Cypher
|
|
196
|
+
execution_time = (time.time() - start_time) * 1000
|
|
197
|
+
logger.info(f"Query cannot be converted to Cypher: {e.reason}")
|
|
198
|
+
|
|
199
|
+
return QueryResult(
|
|
200
|
+
query=query,
|
|
201
|
+
cypher_query=None,
|
|
202
|
+
results=[],
|
|
203
|
+
column_names=[],
|
|
204
|
+
row_count=0,
|
|
205
|
+
execution_time_ms=execution_time,
|
|
206
|
+
success=False,
|
|
207
|
+
error=f"This query cannot be converted to Cypher: {e.reason}",
|
|
208
|
+
)
|
|
209
|
+
|
|
193
210
|
except Exception as e:
|
|
194
211
|
execution_time = (time.time() - start_time) * 1000
|
|
195
212
|
logger.error(f"Query execution failed: {e}")
|