shotgun-sh 0.2.17__py3-none-any.whl → 0.3.3.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. shotgun/agents/agent_manager.py +28 -14
  2. shotgun/agents/common.py +1 -1
  3. shotgun/agents/config/README.md +89 -0
  4. shotgun/agents/config/__init__.py +10 -1
  5. shotgun/agents/config/manager.py +323 -53
  6. shotgun/agents/config/models.py +85 -21
  7. shotgun/agents/config/provider.py +51 -13
  8. shotgun/agents/config/streaming_test.py +119 -0
  9. shotgun/agents/context_analyzer/analyzer.py +6 -2
  10. shotgun/agents/conversation/__init__.py +18 -0
  11. shotgun/agents/conversation/filters.py +164 -0
  12. shotgun/agents/conversation/history/chunking.py +278 -0
  13. shotgun/agents/{history → conversation/history}/compaction.py +27 -1
  14. shotgun/agents/{history → conversation/history}/constants.py +5 -0
  15. shotgun/agents/conversation/history/file_content_deduplication.py +216 -0
  16. shotgun/agents/{history → conversation/history}/history_processors.py +267 -3
  17. shotgun/agents/{history → conversation/history}/token_counting/anthropic.py +8 -0
  18. shotgun/agents/{conversation_manager.py → conversation/manager.py} +1 -1
  19. shotgun/agents/{conversation_history.py → conversation/models.py} +8 -94
  20. shotgun/agents/error/__init__.py +11 -0
  21. shotgun/agents/error/models.py +19 -0
  22. shotgun/agents/runner.py +230 -0
  23. shotgun/agents/tools/web_search/openai.py +1 -1
  24. shotgun/build_constants.py +2 -2
  25. shotgun/cli/clear.py +1 -1
  26. shotgun/cli/compact.py +5 -3
  27. shotgun/cli/context.py +44 -1
  28. shotgun/cli/error_handler.py +24 -0
  29. shotgun/cli/export.py +34 -34
  30. shotgun/cli/plan.py +34 -34
  31. shotgun/cli/research.py +17 -9
  32. shotgun/cli/spec/__init__.py +5 -0
  33. shotgun/cli/spec/backup.py +81 -0
  34. shotgun/cli/spec/commands.py +132 -0
  35. shotgun/cli/spec/models.py +48 -0
  36. shotgun/cli/spec/pull_service.py +219 -0
  37. shotgun/cli/specify.py +20 -19
  38. shotgun/cli/tasks.py +34 -34
  39. shotgun/codebase/core/ingestor.py +153 -7
  40. shotgun/codebase/models.py +2 -0
  41. shotgun/exceptions.py +325 -0
  42. shotgun/llm_proxy/__init__.py +17 -0
  43. shotgun/llm_proxy/client.py +215 -0
  44. shotgun/llm_proxy/models.py +137 -0
  45. shotgun/logging_config.py +42 -0
  46. shotgun/main.py +4 -0
  47. shotgun/posthog_telemetry.py +1 -1
  48. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +28 -3
  49. shotgun/prompts/agents/partials/interactive_mode.j2 +3 -3
  50. shotgun/prompts/agents/plan.j2 +16 -0
  51. shotgun/prompts/agents/research.j2 +16 -3
  52. shotgun/prompts/agents/specify.j2 +54 -1
  53. shotgun/prompts/agents/state/system_state.j2 +0 -2
  54. shotgun/prompts/agents/tasks.j2 +16 -0
  55. shotgun/prompts/history/chunk_summarization.j2 +34 -0
  56. shotgun/prompts/history/combine_summaries.j2 +53 -0
  57. shotgun/sdk/codebase.py +14 -3
  58. shotgun/settings.py +5 -0
  59. shotgun/shotgun_web/__init__.py +67 -1
  60. shotgun/shotgun_web/client.py +42 -1
  61. shotgun/shotgun_web/constants.py +46 -0
  62. shotgun/shotgun_web/exceptions.py +29 -0
  63. shotgun/shotgun_web/models.py +390 -0
  64. shotgun/shotgun_web/shared_specs/__init__.py +32 -0
  65. shotgun/shotgun_web/shared_specs/file_scanner.py +175 -0
  66. shotgun/shotgun_web/shared_specs/hasher.py +83 -0
  67. shotgun/shotgun_web/shared_specs/models.py +71 -0
  68. shotgun/shotgun_web/shared_specs/upload_pipeline.py +329 -0
  69. shotgun/shotgun_web/shared_specs/utils.py +34 -0
  70. shotgun/shotgun_web/specs_client.py +703 -0
  71. shotgun/shotgun_web/supabase_client.py +31 -0
  72. shotgun/tui/app.py +73 -9
  73. shotgun/tui/containers.py +1 -1
  74. shotgun/tui/layout.py +5 -0
  75. shotgun/tui/screens/chat/chat_screen.py +372 -95
  76. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +196 -17
  77. shotgun/tui/screens/chat_screen/command_providers.py +13 -2
  78. shotgun/tui/screens/chat_screen/hint_message.py +76 -1
  79. shotgun/tui/screens/confirmation_dialog.py +40 -0
  80. shotgun/tui/screens/directory_setup.py +45 -41
  81. shotgun/tui/screens/feedback.py +10 -3
  82. shotgun/tui/screens/github_issue.py +11 -2
  83. shotgun/tui/screens/model_picker.py +28 -8
  84. shotgun/tui/screens/onboarding.py +149 -0
  85. shotgun/tui/screens/pipx_migration.py +58 -6
  86. shotgun/tui/screens/provider_config.py +66 -8
  87. shotgun/tui/screens/shared_specs/__init__.py +21 -0
  88. shotgun/tui/screens/shared_specs/create_spec_dialog.py +273 -0
  89. shotgun/tui/screens/shared_specs/models.py +56 -0
  90. shotgun/tui/screens/shared_specs/share_specs_dialog.py +390 -0
  91. shotgun/tui/screens/shared_specs/upload_progress_screen.py +452 -0
  92. shotgun/tui/screens/shotgun_auth.py +110 -16
  93. shotgun/tui/screens/spec_pull.py +288 -0
  94. shotgun/tui/screens/welcome.py +123 -0
  95. shotgun/tui/services/conversation_service.py +5 -2
  96. shotgun/tui/widgets/widget_coordinator.py +1 -1
  97. {shotgun_sh-0.2.17.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/METADATA +9 -2
  98. {shotgun_sh-0.2.17.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/RECORD +112 -77
  99. {shotgun_sh-0.2.17.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/WHEEL +1 -1
  100. /shotgun/agents/{history → conversation/history}/__init__.py +0 -0
  101. /shotgun/agents/{history → conversation/history}/context_extraction.py +0 -0
  102. /shotgun/agents/{history → conversation/history}/history_building.py +0 -0
  103. /shotgun/agents/{history → conversation/history}/message_utils.py +0 -0
  104. /shotgun/agents/{history → conversation/history}/token_counting/__init__.py +0 -0
  105. /shotgun/agents/{history → conversation/history}/token_counting/base.py +0 -0
  106. /shotgun/agents/{history → conversation/history}/token_counting/openai.py +0 -0
  107. /shotgun/agents/{history → conversation/history}/token_counting/sentencepiece_counter.py +0 -0
  108. /shotgun/agents/{history → conversation/history}/token_counting/tokenizer_cache.py +0 -0
  109. /shotgun/agents/{history → conversation/history}/token_counting/utils.py +0 -0
  110. /shotgun/agents/{history → conversation/history}/token_estimation.py +0 -0
  111. {shotgun_sh-0.2.17.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/entry_points.txt +0 -0
  112. {shotgun_sh-0.2.17.dist-info → shotgun_sh-0.3.3.dev1.dist-info}/licenses/LICENSE +0 -0
@@ -13,6 +13,7 @@ from pydantic_ai.messages import (
13
13
  UserPromptPart,
14
14
  )
15
15
 
16
+ from shotgun.agents.conversation.filters import filter_orphaned_tool_responses
16
17
  from shotgun.agents.llm import shotgun_model_request
17
18
  from shotgun.agents.messages import AgentSystemPrompt, SystemStatusPrompt
18
19
  from shotgun.agents.models import AgentDeps
@@ -21,7 +22,7 @@ from shotgun.logging_config import get_logger
21
22
  from shotgun.posthog_telemetry import track_event
22
23
  from shotgun.prompts import PromptLoader
23
24
 
24
- from .constants import SUMMARY_MARKER, TOKEN_LIMIT_RATIO
25
+ from .constants import CHUNK_SAFE_RATIO, SUMMARY_MARKER, TOKEN_LIMIT_RATIO
25
26
  from .context_extraction import extract_context_from_messages
26
27
  from .history_building import ensure_ends_with_model_request
27
28
  from .message_utils import (
@@ -38,7 +39,7 @@ from .token_estimation import (
38
39
  )
39
40
 
40
41
  if TYPE_CHECKING:
41
- pass
42
+ from . import chunking
42
43
 
43
44
 
44
45
  class ContextProtocol(Protocol):
@@ -416,6 +417,9 @@ async def token_limit_compactor(
416
417
  compacted_messages, messages
417
418
  )
418
419
 
420
+ # Filter out orphaned tool responses (tool responses without tool calls)
421
+ compacted_messages = filter_orphaned_tool_responses(compacted_messages)
422
+
419
423
  logger.debug(
420
424
  f"Incremental compaction complete: {len(messages)} -> {len(compacted_messages)} messages"
421
425
  )
@@ -489,10 +493,32 @@ async def _full_compaction(
489
493
  deps: AgentDeps,
490
494
  messages: list[ModelMessage],
491
495
  ) -> list[ModelMessage]:
492
- """Perform full compaction for first-time summarization."""
496
+ """Perform full compaction for first-time summarization.
497
+
498
+ If the conversation is too large for single-pass compaction, delegates
499
+ to chunked compaction which breaks the conversation into logical chunks.
500
+ """
493
501
  # Extract context from all messages
494
502
  context = extract_context_from_messages(messages)
495
503
 
504
+ # Check if context would exceed model limit for compaction request
505
+ # We use CHUNK_SAFE_RATIO (70%) to leave room for prompt overhead
506
+ max_safe_input = int(deps.llm_model.max_input_tokens * CHUNK_SAFE_RATIO)
507
+
508
+ # Estimate context tokens
509
+ context_request: list[ModelMessage] = [ModelRequest.user_text_prompt(context)]
510
+ context_tokens = await estimate_tokens_from_messages(
511
+ context_request, deps.llm_model
512
+ )
513
+
514
+ if context_tokens > max_safe_input:
515
+ # Context too large for single-pass compaction - use chunked approach
516
+ logger.info(
517
+ f"Context ({context_tokens:,} tokens) exceeds safe limit "
518
+ f"({max_safe_input:,} tokens), using chunked compaction"
519
+ )
520
+ return await _chunked_compaction(deps, messages)
521
+
496
522
  # Use regular summarization prompt
497
523
  summarization_prompt = prompt_loader.render("history/summarization.j2")
498
524
  request_messages: list[ModelMessage] = [
@@ -565,6 +591,9 @@ async def _full_compaction(
565
591
  # Ensure history ends with ModelRequest for PydanticAI compatibility
566
592
  compacted_messages = ensure_ends_with_model_request(compacted_messages, messages)
567
593
 
594
+ # Filter out orphaned tool responses (tool responses without tool calls)
595
+ compacted_messages = filter_orphaned_tool_responses(compacted_messages)
596
+
568
597
  # Track full compaction event
569
598
  messages_before = len(messages)
570
599
  messages_after = len(compacted_messages)
@@ -592,3 +621,238 @@ async def _full_compaction(
592
621
  )
593
622
 
594
623
  return compacted_messages
624
+
625
+
626
+ async def _chunked_compaction(
627
+ deps: AgentDeps,
628
+ messages: list[ModelMessage],
629
+ ) -> list[ModelMessage]:
630
+ """Perform chunked compaction for oversized conversations.
631
+
632
+ Breaks the conversation into logical chunks, summarizes each sequentially,
633
+ then combines the summaries into a master summary.
634
+ """
635
+ from .chunking import chunk_messages_for_compaction
636
+
637
+ # Split into chunks and retention window
638
+ chunks, retained_messages = await chunk_messages_for_compaction(
639
+ messages, deps.llm_model
640
+ )
641
+
642
+ if not chunks:
643
+ # No chunks to summarize (conversation too small), return retained messages
644
+ logger.debug("No chunks to summarize, returning retained messages")
645
+ return retained_messages
646
+
647
+ # Track chunked compaction
648
+ total_chunks = len(chunks)
649
+ logger.info(f"Starting chunked compaction: {total_chunks} chunks to process")
650
+
651
+ # Summarize each chunk sequentially
652
+ chunk_summaries: list[str] = []
653
+ for chunk in chunks:
654
+ try:
655
+ summary = await _summarize_chunk(chunk, total_chunks, deps)
656
+ chunk_summaries.append(summary)
657
+ logger.debug(
658
+ f"Chunk {chunk.chunk_index + 1}/{total_chunks} summarized successfully"
659
+ )
660
+ except Exception as e:
661
+ logger.warning(
662
+ f"Failed to summarize chunk {chunk.chunk_index + 1}/{total_chunks}: {e}"
663
+ )
664
+ # Continue with other chunks - we'll note the gap in fusion
665
+ chunk_summaries.append(
666
+ f"[Chunk {chunk.chunk_index + 1} summary unavailable]"
667
+ )
668
+
669
+ # Combine summaries into master summary
670
+ if len(chunk_summaries) == 1:
671
+ final_summary = chunk_summaries[0]
672
+ else:
673
+ final_summary = await _combine_chunk_summaries(chunk_summaries, deps)
674
+
675
+ # Build final compacted history
676
+ compacted = _build_chunked_compaction_result(
677
+ final_summary, messages, retained_messages, deps
678
+ )
679
+
680
+ # Track chunked compaction event
681
+ track_event(
682
+ "chunked_compaction_triggered",
683
+ {
684
+ "num_chunks": total_chunks,
685
+ "chunks_succeeded": sum(
686
+ 1 for s in chunk_summaries if not s.startswith("[Chunk")
687
+ ),
688
+ "retention_window_size": len(retained_messages),
689
+ "model_name": deps.llm_model.name.value,
690
+ "provider": deps.llm_model.provider.value,
691
+ },
692
+ )
693
+
694
+ return compacted
695
+
696
+
697
+ async def _summarize_chunk(
698
+ chunk: "chunking.Chunk",
699
+ total_chunks: int,
700
+ deps: AgentDeps,
701
+ ) -> str:
702
+ """Summarize a single chunk of messages."""
703
+ chunk_messages = chunk.get_all_messages()
704
+ context = extract_context_from_messages(chunk_messages)
705
+
706
+ # Use chunk summarization template
707
+ chunk_prompt = prompt_loader.render(
708
+ "history/chunk_summarization.j2",
709
+ chunk_index=chunk.chunk_index + 1,
710
+ total_chunks=total_chunks,
711
+ chunk_content=context,
712
+ )
713
+
714
+ request_messages: list[ModelMessage] = [
715
+ ModelRequest.user_text_prompt(context, instructions=chunk_prompt)
716
+ ]
717
+
718
+ max_tokens = await calculate_max_summarization_tokens(
719
+ deps.llm_model, request_messages
720
+ )
721
+
722
+ log_summarization_request(
723
+ deps.llm_model,
724
+ max_tokens,
725
+ chunk_prompt,
726
+ context[:500] + "..." if len(context) > 500 else context,
727
+ f"CHUNK_{chunk.chunk_index + 1}",
728
+ )
729
+
730
+ response = await shotgun_model_request(
731
+ model_config=deps.llm_model,
732
+ messages=request_messages,
733
+ model_settings=ModelSettings(max_tokens=max_tokens),
734
+ )
735
+
736
+ log_summarization_response(response, f"CHUNK_{chunk.chunk_index + 1}")
737
+
738
+ if response.parts and isinstance(response.parts[0], TextPart):
739
+ return response.parts[0].content
740
+ return ""
741
+
742
+
743
+ async def _combine_chunk_summaries(
744
+ summaries: list[str],
745
+ deps: AgentDeps,
746
+ ) -> str:
747
+ """Combine multiple chunk summaries into a unified summary."""
748
+ # Check if combined summaries exceed limit (may need recursive combination)
749
+ combined_text = "\n\n".join(summaries)
750
+ combined_request: list[ModelMessage] = [
751
+ ModelRequest.user_text_prompt(combined_text)
752
+ ]
753
+ combined_tokens = await estimate_tokens_from_messages(
754
+ combined_request, deps.llm_model
755
+ )
756
+
757
+ max_safe_input = int(deps.llm_model.max_input_tokens * CHUNK_SAFE_RATIO)
758
+
759
+ if combined_tokens > max_safe_input:
760
+ # Recursive: split summaries in half and combine each half first
761
+ logger.warning(
762
+ f"Combined summaries too large ({combined_tokens:,} tokens), "
763
+ f"applying recursive combination"
764
+ )
765
+ mid = len(summaries) // 2
766
+ first_half = await _combine_chunk_summaries(summaries[:mid], deps)
767
+ second_half = await _combine_chunk_summaries(summaries[mid:], deps)
768
+ summaries = [first_half, second_half]
769
+
770
+ # Use combination template
771
+ combine_prompt = prompt_loader.render(
772
+ "history/combine_summaries.j2",
773
+ num_summaries=len(summaries),
774
+ chunk_summaries=summaries,
775
+ )
776
+
777
+ request_messages: list[ModelMessage] = [
778
+ ModelRequest.user_text_prompt(
779
+ "\n\n---\n\n".join(summaries), instructions=combine_prompt
780
+ )
781
+ ]
782
+
783
+ max_tokens = await calculate_max_summarization_tokens(
784
+ deps.llm_model, request_messages
785
+ )
786
+
787
+ log_summarization_request(
788
+ deps.llm_model,
789
+ max_tokens,
790
+ combine_prompt,
791
+ f"[{len(summaries)} summaries to combine]",
792
+ "COMBINE",
793
+ )
794
+
795
+ response = await shotgun_model_request(
796
+ model_config=deps.llm_model,
797
+ messages=request_messages,
798
+ model_settings=ModelSettings(max_tokens=max_tokens),
799
+ )
800
+
801
+ log_summarization_response(response, "COMBINE")
802
+
803
+ if response.parts and isinstance(response.parts[0], TextPart):
804
+ return response.parts[0].content
805
+ return ""
806
+
807
+
808
+ def _build_chunked_compaction_result(
809
+ final_summary: str,
810
+ original_messages: list[ModelMessage],
811
+ retained_messages: list[ModelMessage],
812
+ deps: AgentDeps,
813
+ ) -> list[ModelMessage]:
814
+ """Build the final compacted history from chunked compaction."""
815
+ from pydantic_ai.messages import ModelRequestPart
816
+
817
+ # Extract system context from original messages
818
+ agent_prompt = get_agent_system_prompt(original_messages) or ""
819
+ system_status = get_latest_system_status(original_messages) or ""
820
+ first_user = get_first_user_request(original_messages) or ""
821
+
822
+ # Create marked summary
823
+ summary_part = TextPart(content=f"{SUMMARY_MARKER} {final_summary}")
824
+ summary_message = ModelResponse(parts=[summary_part])
825
+
826
+ # Build compacted structure
827
+ compacted: list[ModelMessage] = []
828
+
829
+ # Initial request with system context
830
+ parts: list[ModelRequestPart] = []
831
+ if agent_prompt:
832
+ parts.append(AgentSystemPrompt(content=agent_prompt))
833
+ if system_status:
834
+ parts.append(SystemStatusPrompt(content=system_status))
835
+ if first_user:
836
+ parts.append(UserPromptPart(content=first_user))
837
+
838
+ if parts:
839
+ compacted.append(ModelRequest(parts=parts))
840
+
841
+ # Add summary
842
+ compacted.append(summary_message)
843
+
844
+ # Add retained messages (recent context)
845
+ compacted.extend(retained_messages)
846
+
847
+ # Ensure ends with ModelRequest for PydanticAI compatibility
848
+ compacted = ensure_ends_with_model_request(compacted, original_messages)
849
+
850
+ # Filter orphaned tool responses
851
+ compacted = filter_orphaned_tool_responses(compacted)
852
+
853
+ logger.info(
854
+ f"Chunked compaction complete: {len(original_messages)} messages -> "
855
+ f"{len(compacted)} messages (retained {len(retained_messages)} recent)"
856
+ )
857
+
858
+ return compacted
@@ -1,6 +1,7 @@
1
1
  """Anthropic token counting using official client."""
2
2
 
3
3
  import logfire
4
+ from anthropic import APIStatusError
4
5
  from pydantic_ai.messages import ModelMessage
5
6
 
6
7
  from shotgun.agents.config.models import KeyProvider
@@ -103,6 +104,13 @@ class AnthropicTokenCounter(TokenCounter):
103
104
  exception_type=type(e).__name__,
104
105
  exception_message=str(e),
105
106
  )
107
+
108
+ # Re-raise API errors directly so they can be classified by the runner
109
+ # This allows proper error classification for BYOK users (authentication, rate limits, etc.)
110
+ if isinstance(e, APIStatusError):
111
+ raise
112
+
113
+ # Only wrap library-level errors in RuntimeError
106
114
  raise RuntimeError(
107
115
  f"Anthropic token counting API failed for {self.model_name}: {type(e).__name__}: {str(e)}"
108
116
  ) from e
@@ -11,7 +11,7 @@ from shotgun.logging_config import get_logger
11
11
  from shotgun.utils import get_shotgun_home
12
12
  from shotgun.utils.file_system_utils import async_copy_file
13
13
 
14
- from .conversation_history import ConversationHistory
14
+ from .models import ConversationHistory
15
15
 
16
16
  logger = get_logger(__name__)
17
17
 
@@ -1,7 +1,5 @@
1
- """Models and utilities for persisting TUI conversation history."""
1
+ """Models for persisting TUI conversation history."""
2
2
 
3
- import json
4
- import logging
5
3
  from datetime import datetime
6
4
  from typing import Any, cast
7
5
 
@@ -16,101 +14,15 @@ from pydantic_core import to_jsonable_python
16
14
 
17
15
  from shotgun.tui.screens.chat_screen.hint_message import HintMessage
18
16
 
19
- __all__ = ["HintMessage", "ConversationHistory"]
20
-
21
- logger = logging.getLogger(__name__)
17
+ from .filters import (
18
+ filter_incomplete_messages,
19
+ filter_orphaned_tool_responses,
20
+ is_tool_call_complete,
21
+ )
22
22
 
23
23
  SerializedMessage = dict[str, Any]
24
24
 
25
25
 
26
- def is_tool_call_complete(tool_call: ToolCallPart) -> bool:
27
- """Check if a tool call has valid, complete JSON arguments.
28
-
29
- Args:
30
- tool_call: The tool call part to validate
31
-
32
- Returns:
33
- True if the tool call args are valid JSON, False otherwise
34
- """
35
- if tool_call.args is None:
36
- return True # No args is valid
37
-
38
- if isinstance(tool_call.args, dict):
39
- return True # Already parsed dict is valid
40
-
41
- if not isinstance(tool_call.args, str):
42
- return False
43
-
44
- # Try to parse the JSON string
45
- try:
46
- json.loads(tool_call.args)
47
- return True
48
- except (json.JSONDecodeError, ValueError) as e:
49
- # Log incomplete tool call detection
50
- args_preview = (
51
- tool_call.args[:100] + "..."
52
- if len(tool_call.args) > 100
53
- else tool_call.args
54
- )
55
- logger.info(
56
- "Detected incomplete tool call in validation",
57
- extra={
58
- "tool_name": tool_call.tool_name,
59
- "tool_call_id": tool_call.tool_call_id,
60
- "args_preview": args_preview,
61
- "error": str(e),
62
- },
63
- )
64
- return False
65
-
66
-
67
- def filter_incomplete_messages(messages: list[ModelMessage]) -> list[ModelMessage]:
68
- """Filter out messages with incomplete tool calls.
69
-
70
- Args:
71
- messages: List of messages to filter
72
-
73
- Returns:
74
- List of messages with only complete tool calls
75
- """
76
- filtered: list[ModelMessage] = []
77
- filtered_count = 0
78
- filtered_tool_names: list[str] = []
79
-
80
- for message in messages:
81
- # Only check ModelResponse messages for tool calls
82
- if not isinstance(message, ModelResponse):
83
- filtered.append(message)
84
- continue
85
-
86
- # Check if any tool calls are incomplete
87
- has_incomplete_tool_call = False
88
- for part in message.parts:
89
- if isinstance(part, ToolCallPart) and not is_tool_call_complete(part):
90
- has_incomplete_tool_call = True
91
- filtered_tool_names.append(part.tool_name)
92
- break
93
-
94
- # Only include messages without incomplete tool calls
95
- if not has_incomplete_tool_call:
96
- filtered.append(message)
97
- else:
98
- filtered_count += 1
99
-
100
- # Log if any messages were filtered
101
- if filtered_count > 0:
102
- logger.info(
103
- "Filtered incomplete messages before saving",
104
- extra={
105
- "filtered_count": filtered_count,
106
- "total_messages": len(messages),
107
- "filtered_tool_names": filtered_tool_names,
108
- },
109
- )
110
-
111
- return filtered
112
-
113
-
114
26
  class ConversationState(BaseModel):
115
27
  """Represents the complete state of a conversation in memory."""
116
28
 
@@ -144,6 +56,8 @@ class ConversationHistory(BaseModel):
144
56
  """
145
57
  # Filter out messages with incomplete tool calls to prevent corruption
146
58
  filtered_messages = filter_incomplete_messages(messages)
59
+ # Filter out orphaned tool responses (tool responses without tool calls)
60
+ filtered_messages = filter_orphaned_tool_responses(filtered_messages)
147
61
 
148
62
  # Serialize ModelMessage list to JSON-serializable format
149
63
  self.agent_history = to_jsonable_python(
@@ -0,0 +1,11 @@
1
+ """Agent error handling module.
2
+
3
+ This module provides the AgentErrorContext model used by AgentRunner
4
+ for error classification.
5
+ """
6
+
7
+ from shotgun.agents.error.models import AgentErrorContext
8
+
9
+ __all__ = [
10
+ "AgentErrorContext",
11
+ ]
@@ -0,0 +1,19 @@
1
+ """Pydantic models for agent error handling."""
2
+
3
+ from typing import Any
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field
6
+
7
+
8
+ class AgentErrorContext(BaseModel):
9
+ """Context information needed to classify and handle agent errors.
10
+
11
+ Attributes:
12
+ exception: The exception that was raised
13
+ is_shotgun_account: Whether the user is using a Shotgun Account
14
+ """
15
+
16
+ model_config = ConfigDict(arbitrary_types_allowed=True)
17
+
18
+ exception: Any = Field(...)
19
+ is_shotgun_account: bool