shotgun-sh 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shotgun-sh might be problematic. Click here for more details.

Files changed (130) hide show
  1. shotgun/__init__.py +5 -0
  2. shotgun/agents/__init__.py +1 -0
  3. shotgun/agents/agent_manager.py +651 -0
  4. shotgun/agents/common.py +549 -0
  5. shotgun/agents/config/__init__.py +13 -0
  6. shotgun/agents/config/constants.py +17 -0
  7. shotgun/agents/config/manager.py +294 -0
  8. shotgun/agents/config/models.py +185 -0
  9. shotgun/agents/config/provider.py +206 -0
  10. shotgun/agents/conversation_history.py +106 -0
  11. shotgun/agents/conversation_manager.py +105 -0
  12. shotgun/agents/export.py +96 -0
  13. shotgun/agents/history/__init__.py +5 -0
  14. shotgun/agents/history/compaction.py +85 -0
  15. shotgun/agents/history/constants.py +19 -0
  16. shotgun/agents/history/context_extraction.py +108 -0
  17. shotgun/agents/history/history_building.py +104 -0
  18. shotgun/agents/history/history_processors.py +426 -0
  19. shotgun/agents/history/message_utils.py +84 -0
  20. shotgun/agents/history/token_counting.py +429 -0
  21. shotgun/agents/history/token_estimation.py +138 -0
  22. shotgun/agents/messages.py +35 -0
  23. shotgun/agents/models.py +275 -0
  24. shotgun/agents/plan.py +98 -0
  25. shotgun/agents/research.py +108 -0
  26. shotgun/agents/specify.py +98 -0
  27. shotgun/agents/tasks.py +96 -0
  28. shotgun/agents/tools/__init__.py +34 -0
  29. shotgun/agents/tools/codebase/__init__.py +28 -0
  30. shotgun/agents/tools/codebase/codebase_shell.py +256 -0
  31. shotgun/agents/tools/codebase/directory_lister.py +141 -0
  32. shotgun/agents/tools/codebase/file_read.py +144 -0
  33. shotgun/agents/tools/codebase/models.py +252 -0
  34. shotgun/agents/tools/codebase/query_graph.py +67 -0
  35. shotgun/agents/tools/codebase/retrieve_code.py +81 -0
  36. shotgun/agents/tools/file_management.py +218 -0
  37. shotgun/agents/tools/user_interaction.py +37 -0
  38. shotgun/agents/tools/web_search/__init__.py +60 -0
  39. shotgun/agents/tools/web_search/anthropic.py +144 -0
  40. shotgun/agents/tools/web_search/gemini.py +85 -0
  41. shotgun/agents/tools/web_search/openai.py +98 -0
  42. shotgun/agents/tools/web_search/utils.py +20 -0
  43. shotgun/build_constants.py +20 -0
  44. shotgun/cli/__init__.py +1 -0
  45. shotgun/cli/codebase/__init__.py +5 -0
  46. shotgun/cli/codebase/commands.py +202 -0
  47. shotgun/cli/codebase/models.py +21 -0
  48. shotgun/cli/config.py +275 -0
  49. shotgun/cli/export.py +81 -0
  50. shotgun/cli/models.py +10 -0
  51. shotgun/cli/plan.py +73 -0
  52. shotgun/cli/research.py +85 -0
  53. shotgun/cli/specify.py +69 -0
  54. shotgun/cli/tasks.py +78 -0
  55. shotgun/cli/update.py +152 -0
  56. shotgun/cli/utils.py +25 -0
  57. shotgun/codebase/__init__.py +12 -0
  58. shotgun/codebase/core/__init__.py +46 -0
  59. shotgun/codebase/core/change_detector.py +358 -0
  60. shotgun/codebase/core/code_retrieval.py +243 -0
  61. shotgun/codebase/core/ingestor.py +1497 -0
  62. shotgun/codebase/core/language_config.py +297 -0
  63. shotgun/codebase/core/manager.py +1662 -0
  64. shotgun/codebase/core/nl_query.py +331 -0
  65. shotgun/codebase/core/parser_loader.py +128 -0
  66. shotgun/codebase/models.py +111 -0
  67. shotgun/codebase/service.py +206 -0
  68. shotgun/logging_config.py +227 -0
  69. shotgun/main.py +167 -0
  70. shotgun/posthog_telemetry.py +158 -0
  71. shotgun/prompts/__init__.py +5 -0
  72. shotgun/prompts/agents/__init__.py +1 -0
  73. shotgun/prompts/agents/export.j2 +350 -0
  74. shotgun/prompts/agents/partials/codebase_understanding.j2 +87 -0
  75. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +37 -0
  76. shotgun/prompts/agents/partials/content_formatting.j2 +65 -0
  77. shotgun/prompts/agents/partials/interactive_mode.j2 +26 -0
  78. shotgun/prompts/agents/plan.j2 +144 -0
  79. shotgun/prompts/agents/research.j2 +69 -0
  80. shotgun/prompts/agents/specify.j2 +51 -0
  81. shotgun/prompts/agents/state/codebase/codebase_graphs_available.j2 +19 -0
  82. shotgun/prompts/agents/state/system_state.j2 +31 -0
  83. shotgun/prompts/agents/tasks.j2 +143 -0
  84. shotgun/prompts/codebase/__init__.py +1 -0
  85. shotgun/prompts/codebase/cypher_query_patterns.j2 +223 -0
  86. shotgun/prompts/codebase/cypher_system.j2 +28 -0
  87. shotgun/prompts/codebase/enhanced_query_context.j2 +10 -0
  88. shotgun/prompts/codebase/partials/cypher_rules.j2 +24 -0
  89. shotgun/prompts/codebase/partials/graph_schema.j2 +30 -0
  90. shotgun/prompts/codebase/partials/temporal_context.j2 +21 -0
  91. shotgun/prompts/history/__init__.py +1 -0
  92. shotgun/prompts/history/incremental_summarization.j2 +53 -0
  93. shotgun/prompts/history/summarization.j2 +46 -0
  94. shotgun/prompts/loader.py +140 -0
  95. shotgun/py.typed +0 -0
  96. shotgun/sdk/__init__.py +13 -0
  97. shotgun/sdk/codebase.py +219 -0
  98. shotgun/sdk/exceptions.py +17 -0
  99. shotgun/sdk/models.py +189 -0
  100. shotgun/sdk/services.py +23 -0
  101. shotgun/sentry_telemetry.py +87 -0
  102. shotgun/telemetry.py +93 -0
  103. shotgun/tui/__init__.py +0 -0
  104. shotgun/tui/app.py +116 -0
  105. shotgun/tui/commands/__init__.py +76 -0
  106. shotgun/tui/components/prompt_input.py +69 -0
  107. shotgun/tui/components/spinner.py +86 -0
  108. shotgun/tui/components/splash.py +25 -0
  109. shotgun/tui/components/vertical_tail.py +13 -0
  110. shotgun/tui/screens/chat.py +782 -0
  111. shotgun/tui/screens/chat.tcss +43 -0
  112. shotgun/tui/screens/chat_screen/__init__.py +0 -0
  113. shotgun/tui/screens/chat_screen/command_providers.py +219 -0
  114. shotgun/tui/screens/chat_screen/hint_message.py +40 -0
  115. shotgun/tui/screens/chat_screen/history.py +221 -0
  116. shotgun/tui/screens/directory_setup.py +113 -0
  117. shotgun/tui/screens/provider_config.py +221 -0
  118. shotgun/tui/screens/splash.py +31 -0
  119. shotgun/tui/styles.tcss +10 -0
  120. shotgun/tui/utils/__init__.py +5 -0
  121. shotgun/tui/utils/mode_progress.py +257 -0
  122. shotgun/utils/__init__.py +5 -0
  123. shotgun/utils/env_utils.py +35 -0
  124. shotgun/utils/file_system_utils.py +36 -0
  125. shotgun/utils/update_checker.py +375 -0
  126. shotgun_sh-0.1.0.dist-info/METADATA +466 -0
  127. shotgun_sh-0.1.0.dist-info/RECORD +130 -0
  128. shotgun_sh-0.1.0.dist-info/WHEEL +4 -0
  129. shotgun_sh-0.1.0.dist-info/entry_points.txt +2 -0
  130. shotgun_sh-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,426 @@
1
+ """History processors for managing conversation history in Shotgun agents."""
2
+
3
+ from typing import TYPE_CHECKING, Any, Protocol
4
+
5
+ from pydantic_ai.messages import (
6
+ ModelMessage,
7
+ ModelRequest,
8
+ ModelResponse,
9
+ TextPart,
10
+ UserPromptPart,
11
+ )
12
+
13
+ from shotgun.agents.config.models import shotgun_model_request
14
+ from shotgun.agents.messages import AgentSystemPrompt, SystemStatusPrompt
15
+ from shotgun.agents.models import AgentDeps
16
+ from shotgun.logging_config import get_logger
17
+ from shotgun.prompts import PromptLoader
18
+
19
+ from .constants import SUMMARY_MARKER, TOKEN_LIMIT_RATIO
20
+ from .context_extraction import extract_context_from_messages
21
+ from .history_building import ensure_ends_with_model_request
22
+ from .message_utils import (
23
+ get_agent_system_prompt,
24
+ get_first_user_request,
25
+ get_latest_system_status,
26
+ )
27
+ from .token_estimation import (
28
+ calculate_max_summarization_tokens as _calculate_max_summarization_tokens,
29
+ )
30
+ from .token_estimation import (
31
+ estimate_post_summary_tokens,
32
+ estimate_tokens_from_messages,
33
+ )
34
+
35
+ if TYPE_CHECKING:
36
+ pass
37
+
38
+
39
+ class ContextProtocol(Protocol):
40
+ """Protocol defining the interface needed by token_limit_compactor."""
41
+
42
+ deps: AgentDeps
43
+ usage: Any # Optional usage information
44
+
45
+
46
+ logger = get_logger(__name__)
47
+
48
+ # Global prompt loader instance
49
+ prompt_loader = PromptLoader()
50
+
51
+
52
+ def is_summary_part(part: Any) -> bool:
53
+ """Check if a message part is a compacted summary."""
54
+ return isinstance(part, TextPart) and part.content.startswith(SUMMARY_MARKER)
55
+
56
+
57
+ def find_last_summary_index(messages: list[ModelMessage]) -> int | None:
58
+ """Find the index of the last summary in the message history.
59
+
60
+ Args:
61
+ messages: List of messages in the conversation history
62
+ Returns:
63
+ Index of the last summary message, or None if no summary exists.
64
+ """
65
+ for i in range(len(messages) - 1, -1, -1):
66
+ if isinstance(messages[i], ModelResponse):
67
+ for part in messages[i].parts:
68
+ if is_summary_part(part):
69
+ return i
70
+ return None
71
+
72
+
73
+ def extract_summary_content(summary_part: Any) -> str:
74
+ """Extract the summary content without the marker prefix."""
75
+ if isinstance(summary_part, TextPart):
76
+ return summary_part.content[len(SUMMARY_MARKER) :].strip()
77
+ return ""
78
+
79
+
80
+ def create_marked_summary_part(summary_response: Any) -> TextPart:
81
+ """Create a TextPart with the summary marker prefix.
82
+
83
+ This consolidates the duplicate summary creation logic.
84
+ """
85
+ first_part = summary_response.parts[0]
86
+ if isinstance(first_part, TextPart):
87
+ summary_content = f"{SUMMARY_MARKER} {first_part.content}"
88
+ return TextPart(content=summary_content)
89
+ else:
90
+ # Fallback in case the response part is not TextPart
91
+ summary_content = f"{SUMMARY_MARKER} Summary content unavailable"
92
+ return TextPart(content=summary_content)
93
+
94
+
95
+ def log_summarization_request(
96
+ model: Any, max_tokens: int, prompt: str, context: str, request_type: str
97
+ ) -> None:
98
+ """Log detailed summarization request information.
99
+
100
+ Consolidates duplicate logging patterns across the codebase.
101
+ """
102
+ logger.debug(f"{request_type} SUMMARIZATION REQUEST - Model: {model}")
103
+ logger.debug(f"{request_type} SUMMARIZATION REQUEST - Max tokens: {max_tokens}")
104
+ logger.debug(f"{request_type} SUMMARIZATION REQUEST - Instructions: {prompt}")
105
+ logger.debug(f"{request_type} SUMMARIZATION REQUEST - Context: {context}")
106
+
107
+
108
+ def log_summarization_response(response: Any, request_type: str) -> None:
109
+ """Log detailed summarization response information.
110
+
111
+ Consolidates duplicate logging patterns across the codebase.
112
+ """
113
+ logger.debug(f"{request_type} SUMMARIZATION RESPONSE - Full response: {response}")
114
+ logger.debug(
115
+ f"{request_type} SUMMARIZATION RESPONSE - Content: "
116
+ f"{response.parts[0] if response.parts else 'No content'}"
117
+ )
118
+ logger.debug(f"{request_type} SUMMARIZATION RESPONSE - Usage: {response.usage}")
119
+
120
+
121
+ # Use centralized calculate_max_summarization_tokens function
122
+ calculate_max_summarization_tokens = _calculate_max_summarization_tokens
123
+
124
+
125
+ async def token_limit_compactor(
126
+ ctx: ContextProtocol,
127
+ messages: list[ModelMessage],
128
+ ) -> list[ModelMessage]:
129
+ """Compact message history based on token limits with incremental processing.
130
+
131
+ This incremental compactor prevents cascading summarization by:
132
+ 1. Preserving existing summaries
133
+ 2. Only processing NEW messages since the last summary
134
+ 3. Combining summaries incrementally
135
+ 4. Never re-processing already compacted content
136
+
137
+ Args:
138
+ ctx: Run context with usage information and dependencies
139
+ messages: Current conversation history
140
+
141
+ Returns:
142
+ Compacted list of messages within token limits
143
+ """
144
+ # Extract dependencies from context
145
+ deps = ctx.deps
146
+
147
+ # Get token limit from model configuration
148
+ model_max_tokens = deps.llm_model.max_input_tokens
149
+ max_tokens = int(model_max_tokens * TOKEN_LIMIT_RATIO)
150
+
151
+ # Find existing summaries to determine compaction strategy
152
+ last_summary_index = find_last_summary_index(messages)
153
+
154
+ if last_summary_index is not None:
155
+ # Check if post-summary conversation exceeds threshold for incremental compaction
156
+ post_summary_tokens = estimate_post_summary_tokens(
157
+ messages, last_summary_index, deps.llm_model
158
+ )
159
+ post_summary_percentage = (
160
+ (post_summary_tokens / max_tokens) * 100 if max_tokens > 0 else 0
161
+ )
162
+
163
+ logger.debug(
164
+ f"Found existing summary at index {last_summary_index}. "
165
+ f"Post-summary tokens: {post_summary_tokens}, threshold: {max_tokens}, "
166
+ f"percentage: {post_summary_percentage:.2f}%%"
167
+ )
168
+
169
+ # Only do incremental compaction if post-summary conversation exceeds threshold
170
+ if post_summary_tokens < max_tokens:
171
+ logger.debug(
172
+ f"Post-summary conversation under threshold ({post_summary_tokens} < {max_tokens}), "
173
+ f"keeping all {len(messages)} messages"
174
+ )
175
+ return messages
176
+
177
+ # INCREMENTAL COMPACTION: Process new messages since last summary
178
+ logger.debug(
179
+ "Post-summary conversation exceeds threshold, performing incremental compaction"
180
+ )
181
+
182
+ # Extract existing summary content
183
+ summary_message = messages[last_summary_index]
184
+ existing_summary_part = None
185
+ for part in summary_message.parts:
186
+ if is_summary_part(part):
187
+ existing_summary_part = part
188
+ break
189
+
190
+ if not existing_summary_part:
191
+ logger.warning(
192
+ "Found summary index but no summary part, falling back to full compaction"
193
+ )
194
+ return await _full_compaction(deps, messages)
195
+
196
+ existing_summary = extract_summary_content(existing_summary_part)
197
+
198
+ # Get messages AFTER the last summary for incremental processing
199
+ messages_to_process = messages[last_summary_index + 1 :]
200
+
201
+ if not messages_to_process:
202
+ logger.debug(
203
+ "No new messages since last summary, returning existing history"
204
+ )
205
+ return messages
206
+
207
+ # Extract context from new messages only
208
+ new_context = extract_context_from_messages(messages_to_process)
209
+
210
+ # Check if there's meaningful content (responses) to summarize
211
+ has_meaningful_content = any(
212
+ isinstance(msg, ModelResponse) for msg in messages_to_process
213
+ )
214
+
215
+ # If there are only user requests and no responses, no need to summarize
216
+ if not has_meaningful_content or not new_context.strip():
217
+ logger.debug(
218
+ "No meaningful new content to summarize, returning existing history"
219
+ )
220
+ return messages
221
+
222
+ # Use incremental summarization prompt with proper template variables
223
+ try:
224
+ incremental_prompt = prompt_loader.render(
225
+ "history/incremental_summarization.j2",
226
+ existing_summary=existing_summary,
227
+ new_messages=new_context,
228
+ )
229
+ except Exception:
230
+ # Fallback to regular summarization if incremental template doesn't exist yet
231
+ logger.warning(
232
+ "Incremental summarization template not found, using regular template"
233
+ )
234
+ incremental_prompt = prompt_loader.render("history/summarization.j2")
235
+ # Combine existing and new context for fallback
236
+ new_context = (
237
+ f"EXISTING SUMMARY:\n{existing_summary}\n\nNEW MESSAGES:\n{new_context}"
238
+ )
239
+
240
+ # Create incremental summary
241
+ request_messages: list[ModelMessage] = [
242
+ ModelRequest.user_text_prompt(new_context, instructions=incremental_prompt)
243
+ ]
244
+
245
+ # Calculate optimal max_tokens for summarization
246
+ max_tokens = calculate_max_summarization_tokens(
247
+ deps.llm_model, request_messages
248
+ )
249
+
250
+ # Debug logging using shared utilities
251
+ log_summarization_request(
252
+ deps.llm_model, max_tokens, incremental_prompt, new_context, "INCREMENTAL"
253
+ )
254
+
255
+ # Use shotgun wrapper to ensure full token utilization
256
+ summary_response = await shotgun_model_request(
257
+ model_config=deps.llm_model,
258
+ messages=request_messages,
259
+ max_tokens=max_tokens, # Use calculated optimal tokens for summarization
260
+ )
261
+
262
+ log_summarization_response(summary_response, "INCREMENTAL")
263
+
264
+ # Calculate token reduction (from new messages only)
265
+ new_tokens = len(new_context.split()) # Rough estimate
266
+ summary_tokens = (
267
+ summary_response.usage.output_tokens if summary_response.usage else 0
268
+ )
269
+ logger.debug(
270
+ f"Incremental compaction: processed {len(messages_to_process)} new messages, "
271
+ f"reduced ~{new_tokens} tokens to {summary_tokens} tokens"
272
+ )
273
+
274
+ # Build the new compacted history with the updated summary
275
+ new_summary_part = create_marked_summary_part(summary_response)
276
+
277
+ # Extract essential context from messages before the last summary (if any)
278
+ agent_prompt = ""
279
+ system_status = ""
280
+ first_user_prompt = ""
281
+ if last_summary_index > 0:
282
+ # Get agent system prompt and first user from original conversation
283
+ agent_prompt = get_agent_system_prompt(messages[:last_summary_index]) or ""
284
+ first_user_prompt = (
285
+ get_first_user_request(messages[:last_summary_index]) or ""
286
+ )
287
+
288
+ # Get the latest system status from all messages
289
+ system_status = get_latest_system_status(messages) or ""
290
+
291
+ # Create the updated summary message
292
+ updated_summary_message = ModelResponse(parts=[new_summary_part])
293
+
294
+ # Build final compacted history with CLEAN structure
295
+ compacted_messages: list[ModelMessage] = []
296
+
297
+ # Build parts for the initial request
298
+ from pydantic_ai.messages import ModelRequestPart
299
+
300
+ parts: list[ModelRequestPart] = []
301
+ if agent_prompt:
302
+ parts.append(AgentSystemPrompt(content=agent_prompt))
303
+ if system_status:
304
+ parts.append(SystemStatusPrompt(content=system_status))
305
+ if first_user_prompt:
306
+ parts.append(UserPromptPart(content=first_user_prompt))
307
+
308
+ # Only add if we have at least one part
309
+ if parts:
310
+ compacted_messages.append(ModelRequest(parts=parts))
311
+
312
+ # Add the summary
313
+ compacted_messages.append(updated_summary_message)
314
+
315
+ # Ensure history ends with ModelRequest for PydanticAI compatibility
316
+ compacted_messages = ensure_ends_with_model_request(
317
+ compacted_messages, messages
318
+ )
319
+
320
+ logger.debug(
321
+ f"Incremental compaction complete: {len(messages)} -> {len(compacted_messages)} messages"
322
+ )
323
+ return compacted_messages
324
+
325
+ else:
326
+ # Check if total conversation exceeds threshold for full compaction
327
+ total_tokens = estimate_tokens_from_messages(messages, deps.llm_model)
328
+ total_percentage = (total_tokens / max_tokens) * 100 if max_tokens > 0 else 0
329
+
330
+ logger.debug(
331
+ f"No existing summary found. Total tokens: {total_tokens}, threshold: {max_tokens}, "
332
+ f"percentage: {total_percentage:.2f}%%"
333
+ )
334
+
335
+ # Only do full compaction if total conversation exceeds threshold
336
+ if total_tokens < max_tokens:
337
+ logger.debug(
338
+ f"Total conversation under threshold ({total_tokens} < {max_tokens}), "
339
+ f"keeping all {len(messages)} messages"
340
+ )
341
+ return messages
342
+
343
+ # FIRST-TIME COMPACTION: Process all messages
344
+ logger.debug(
345
+ "Total conversation exceeds threshold, performing initial full compaction"
346
+ )
347
+ return await _full_compaction(deps, messages)
348
+
349
+
350
+ async def _full_compaction(
351
+ deps: AgentDeps,
352
+ messages: list[ModelMessage],
353
+ ) -> list[ModelMessage]:
354
+ """Perform full compaction for first-time summarization."""
355
+ # Extract context from all messages
356
+ context = extract_context_from_messages(messages)
357
+
358
+ # Use regular summarization prompt
359
+ summarization_prompt = prompt_loader.render("history/summarization.j2")
360
+ request_messages: list[ModelMessage] = [
361
+ ModelRequest.user_text_prompt(context, instructions=summarization_prompt)
362
+ ]
363
+
364
+ # Calculate optimal max_tokens for summarization
365
+ max_tokens = calculate_max_summarization_tokens(deps.llm_model, request_messages)
366
+
367
+ # Debug logging using shared utilities
368
+ log_summarization_request(
369
+ deps.llm_model, max_tokens, summarization_prompt, context, "FULL"
370
+ )
371
+
372
+ # Use shotgun wrapper to ensure full token utilization
373
+ summary_response = await shotgun_model_request(
374
+ model_config=deps.llm_model,
375
+ messages=request_messages,
376
+ max_tokens=max_tokens, # Use calculated optimal tokens for summarization
377
+ )
378
+
379
+ # Calculate token reduction
380
+ current_tokens = estimate_tokens_from_messages(messages, deps.llm_model)
381
+ summary_usage = summary_response.usage
382
+ reduction_percentage = (
383
+ ((current_tokens - summary_usage.output_tokens) / current_tokens) * 100
384
+ if current_tokens > 0 and summary_usage
385
+ else 0
386
+ )
387
+
388
+ log_summarization_response(summary_response, "FULL")
389
+
390
+ # Log token reduction (already calculated above)
391
+ logger.debug(
392
+ "Full compaction: %s tokens -> %s tokens (%.2f%% reduction)",
393
+ current_tokens,
394
+ summary_usage.output_tokens if summary_usage else 0,
395
+ reduction_percentage,
396
+ )
397
+
398
+ # Mark summary with special prefix
399
+ marked_summary_part = create_marked_summary_part(summary_response)
400
+
401
+ # Build compacted history structure
402
+ agent_prompt = get_agent_system_prompt(messages) or ""
403
+ system_status = get_latest_system_status(messages) or ""
404
+ user_prompt = get_first_user_request(messages) or ""
405
+
406
+ # Build parts for the initial request
407
+ from pydantic_ai.messages import ModelRequestPart
408
+
409
+ parts: list[ModelRequestPart] = []
410
+ if agent_prompt:
411
+ parts.append(AgentSystemPrompt(content=agent_prompt))
412
+ if system_status:
413
+ parts.append(SystemStatusPrompt(content=system_status))
414
+ if user_prompt:
415
+ parts.append(UserPromptPart(content=user_prompt))
416
+
417
+ # Create base structure
418
+ compacted_messages: list[ModelMessage] = []
419
+ if parts:
420
+ compacted_messages.append(ModelRequest(parts=parts))
421
+ compacted_messages.append(ModelResponse(parts=[marked_summary_part]))
422
+
423
+ # Ensure history ends with ModelRequest for PydanticAI compatibility
424
+ compacted_messages = ensure_ends_with_model_request(compacted_messages, messages)
425
+
426
+ return compacted_messages
@@ -0,0 +1,84 @@
1
+ """Utility functions for working with PydanticAI messages."""
2
+
3
+ from pydantic_ai.messages import (
4
+ ModelMessage,
5
+ ModelRequest,
6
+ SystemPromptPart,
7
+ UserPromptPart,
8
+ )
9
+
10
+ from shotgun.agents.messages import AgentSystemPrompt, SystemStatusPrompt
11
+
12
+
13
+ def get_first_user_request(messages: list[ModelMessage]) -> str | None:
14
+ """Extract first user request content from messages."""
15
+ for msg in messages:
16
+ if isinstance(msg, ModelRequest):
17
+ for part in msg.parts:
18
+ if isinstance(part, UserPromptPart) and isinstance(part.content, str):
19
+ return part.content
20
+ return None
21
+
22
+
23
+ def get_last_user_request(messages: list[ModelMessage]) -> ModelRequest | None:
24
+ """Extract the last user request from messages."""
25
+ for msg in reversed(messages):
26
+ if isinstance(msg, ModelRequest):
27
+ for part in msg.parts:
28
+ if isinstance(part, UserPromptPart):
29
+ return msg
30
+ return None
31
+
32
+
33
+ def get_user_content_from_request(request: ModelRequest) -> str | None:
34
+ """Extract user prompt content from a ModelRequest."""
35
+ for part in request.parts:
36
+ if isinstance(part, UserPromptPart) and isinstance(part.content, str):
37
+ return part.content
38
+ return None
39
+
40
+
41
+ def get_system_prompt(messages: list[ModelMessage]) -> str | None:
42
+ """Extract system prompt from messages (any SystemPromptPart)."""
43
+ for msg in messages:
44
+ if isinstance(msg, ModelRequest):
45
+ for part in msg.parts:
46
+ if isinstance(part, SystemPromptPart):
47
+ return part.content
48
+ return None
49
+
50
+
51
+ def get_agent_system_prompt(messages: list[ModelMessage]) -> str | None:
52
+ """Extract the main agent system prompt from messages.
53
+
54
+ Prioritizes AgentSystemPrompt but falls back to generic SystemPromptPart
55
+ if no AgentSystemPrompt is found.
56
+ """
57
+ # First try to find AgentSystemPrompt
58
+ for msg in messages:
59
+ if isinstance(msg, ModelRequest):
60
+ for part in msg.parts:
61
+ if isinstance(part, AgentSystemPrompt):
62
+ return part.content
63
+
64
+ # Fall back to any SystemPromptPart (excluding SystemStatusPrompt)
65
+ for msg in messages:
66
+ if isinstance(msg, ModelRequest):
67
+ for part in msg.parts:
68
+ if isinstance(part, SystemPromptPart) and not isinstance(
69
+ part, SystemStatusPrompt
70
+ ):
71
+ return part.content
72
+
73
+ return None
74
+
75
+
76
+ def get_latest_system_status(messages: list[ModelMessage]) -> str | None:
77
+ """Extract the most recent system status prompt from messages."""
78
+ # Iterate in reverse to find the most recent status
79
+ for msg in reversed(messages):
80
+ if isinstance(msg, ModelRequest):
81
+ for part in msg.parts:
82
+ if isinstance(part, SystemStatusPrompt):
83
+ return part.content
84
+ return None