code-puppy 0.0.172__py3-none-any.whl → 0.0.174__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. code_puppy/agent.py +14 -14
  2. code_puppy/agents/__init__.py +4 -6
  3. code_puppy/agents/agent_manager.py +15 -187
  4. code_puppy/agents/base_agent.py +798 -4
  5. code_puppy/command_line/command_handler.py +40 -41
  6. code_puppy/command_line/mcp/add_command.py +1 -1
  7. code_puppy/command_line/mcp/install_command.py +1 -1
  8. code_puppy/command_line/mcp/start_all_command.py +3 -6
  9. code_puppy/command_line/mcp/start_command.py +0 -5
  10. code_puppy/command_line/mcp/stop_all_command.py +3 -6
  11. code_puppy/command_line/mcp/stop_command.py +2 -6
  12. code_puppy/command_line/model_picker_completion.py +2 -2
  13. code_puppy/command_line/prompt_toolkit_completion.py +2 -2
  14. code_puppy/config.py +2 -3
  15. code_puppy/main.py +13 -49
  16. code_puppy/messaging/message_queue.py +4 -4
  17. code_puppy/summarization_agent.py +2 -2
  18. code_puppy/tools/agent_tools.py +5 -4
  19. code_puppy/tools/browser/vqa_agent.py +1 -3
  20. code_puppy/tools/command_runner.py +1 -1
  21. code_puppy/tui/app.py +49 -78
  22. code_puppy/tui/screens/settings.py +2 -2
  23. code_puppy/tui_state.py +55 -0
  24. {code_puppy-0.0.172.dist-info → code_puppy-0.0.174.dist-info}/METADATA +2 -2
  25. {code_puppy-0.0.172.dist-info → code_puppy-0.0.174.dist-info}/RECORD +29 -33
  26. code_puppy/agents/agent_orchestrator.json +0 -26
  27. code_puppy/agents/runtime_manager.py +0 -272
  28. code_puppy/command_line/meta_command_handler.py +0 -153
  29. code_puppy/message_history_processor.py +0 -486
  30. code_puppy/state_management.py +0 -159
  31. {code_puppy-0.0.172.data → code_puppy-0.0.174.data}/data/code_puppy/models.json +0 -0
  32. {code_puppy-0.0.172.dist-info → code_puppy-0.0.174.dist-info}/WHEEL +0 -0
  33. {code_puppy-0.0.172.dist-info → code_puppy-0.0.174.dist-info}/entry_points.txt +0 -0
  34. {code_puppy-0.0.172.dist-info → code_puppy-0.0.174.dist-info}/licenses/LICENSE +0 -0
@@ -1,486 +0,0 @@
1
- import json
2
- import queue
3
- from typing import Any, List, Set, Tuple
4
-
5
- import pydantic
6
- from pydantic_ai.messages import (
7
- ModelMessage,
8
- ModelRequest,
9
- TextPart,
10
- ToolCallPart,
11
- ToolCallPartDelta,
12
- ToolReturn,
13
- ToolReturnPart,
14
- )
15
-
16
- from code_puppy.config import (
17
- get_model_name,
18
- get_protected_token_count,
19
- get_compaction_threshold,
20
- get_compaction_strategy,
21
- )
22
- from code_puppy.messaging import emit_error, emit_info, emit_warning
23
- from code_puppy.model_factory import ModelFactory
24
- from code_puppy.state_management import (
25
- add_compacted_message_hash,
26
- get_compacted_message_hashes,
27
- get_message_history,
28
- hash_message,
29
- set_message_history,
30
- )
31
- from code_puppy.summarization_agent import run_summarization_sync
32
-
33
- # Protected tokens are now configurable via get_protected_token_count()
34
- # Default is 50000 but can be customized in ~/.code_puppy/puppy.cfg
35
-
36
-
37
- def stringify_message_part(part) -> str:
38
- """
39
- Convert a message part to a string representation for token estimation or other uses.
40
-
41
- Args:
42
- part: A message part that may contain content or be a tool call
43
-
44
- Returns:
45
- String representation of the message part
46
- """
47
- result = ""
48
- if hasattr(part, "part_kind"):
49
- result += part.part_kind + ": "
50
- else:
51
- result += str(type(part)) + ": "
52
-
53
- # Handle content
54
- if hasattr(part, "content") and part.content:
55
- # Handle different content types
56
- if isinstance(part.content, str):
57
- result = part.content
58
- elif isinstance(part.content, pydantic.BaseModel):
59
- result = json.dumps(part.content.model_dump())
60
- elif isinstance(part.content, dict):
61
- result = json.dumps(part.content)
62
- else:
63
- result = str(part.content)
64
-
65
- # Handle tool calls which may have additional token costs
66
- # If part also has content, we'll process tool calls separately
67
- if hasattr(part, "tool_name") and part.tool_name:
68
- # Estimate tokens for tool name and parameters
69
- tool_text = part.tool_name
70
- if hasattr(part, "args"):
71
- tool_text += f" {str(part.args)}"
72
- result += tool_text
73
-
74
- return result
75
-
76
-
77
- def estimate_tokens_for_message(message: ModelMessage) -> int:
78
- """
79
- Estimate the number of tokens in a message using len(message) - 4.
80
- Simple and fast replacement for tiktoken.
81
- """
82
- total_tokens = 0
83
-
84
- for part in message.parts:
85
- part_str = stringify_message_part(part)
86
- if part_str:
87
- total_tokens += len(part_str)
88
-
89
- return int(max(1, total_tokens) / 4)
90
-
91
-
92
- def filter_huge_messages(messages: List[ModelMessage]) -> List[ModelMessage]:
93
- if not messages:
94
- return []
95
-
96
- # Never drop the system prompt, even if it is extremely large.
97
- system_message, *rest = messages
98
- filtered_rest = [
99
- m for m in rest if estimate_tokens_for_message(m) < 50000
100
- ]
101
- return [system_message] + filtered_rest
102
-
103
-
104
- def _is_tool_call_part(part: Any) -> bool:
105
- if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
106
- return True
107
-
108
- part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
109
- if part_kind == "tool-call":
110
- return True
111
-
112
- has_tool_name = getattr(part, "tool_name", None) is not None
113
- has_args = getattr(part, "args", None) is not None
114
- has_args_delta = getattr(part, "args_delta", None) is not None
115
-
116
- return bool(has_tool_name and (has_args or has_args_delta))
117
-
118
-
119
- def _is_tool_return_part(part: Any) -> bool:
120
- if isinstance(part, (ToolReturnPart, ToolReturn)):
121
- return True
122
-
123
- part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
124
- if part_kind in {"tool-return", "tool-result"}:
125
- return True
126
-
127
- if getattr(part, "tool_call_id", None) is None:
128
- return False
129
-
130
- has_content = getattr(part, "content", None) is not None
131
- has_content_delta = getattr(part, "content_delta", None) is not None
132
- return bool(has_content or has_content_delta)
133
-
134
-
135
- def split_messages_for_protected_summarization(
136
- messages: List[ModelMessage],
137
- ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
138
- """
139
- Split messages into two groups: messages to summarize and protected recent messages.
140
-
141
- Returns:
142
- Tuple of (messages_to_summarize, protected_messages)
143
-
144
- The protected_messages are the most recent messages that total up to the configured protected token count.
145
- The system message (first message) is always protected.
146
- All other messages that don't fit in the protected zone will be summarized.
147
- """
148
- if len(messages) <= 1: # Just system message or empty
149
- return [], messages
150
-
151
- # Always protect the system message (first message)
152
- system_message = messages[0]
153
- system_tokens = estimate_tokens_for_message(system_message)
154
-
155
- if len(messages) == 1:
156
- return [], messages
157
-
158
- # Get the configured protected token count
159
- protected_tokens_limit = get_protected_token_count()
160
-
161
- # Calculate tokens for messages from most recent backwards (excluding system message)
162
- protected_messages = []
163
- protected_token_count = system_tokens # Start with system message tokens
164
-
165
- # Go backwards through non-system messages to find protected zone
166
- for i in range(len(messages) - 1, 0, -1): # Stop at 1, not 0 (skip system message)
167
- message = messages[i]
168
- message_tokens = estimate_tokens_for_message(message)
169
-
170
- # If adding this message would exceed protected tokens, stop here
171
- if protected_token_count + message_tokens > protected_tokens_limit:
172
- break
173
-
174
- protected_messages.append(message)
175
- protected_token_count += message_tokens
176
-
177
- # Messages that were added while scanning backwards are currently in reverse order.
178
- # Reverse them to restore chronological ordering, then prepend the system prompt.
179
- protected_messages.reverse()
180
- protected_messages.insert(0, system_message)
181
-
182
- # Messages to summarize are everything between the system message and the
183
- # protected tail zone we just constructed.
184
- protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
185
- messages_to_summarize = messages[1:protected_start_idx]
186
-
187
- emit_info(
188
- f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
189
- )
190
- emit_info(f"📝 Summarizing {len(messages_to_summarize)} older messages")
191
-
192
- return messages_to_summarize, protected_messages
193
-
194
-
195
- def summarize_messages(
196
- messages: List[ModelMessage], with_protection: bool = True
197
- ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
198
- """
199
- Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
200
-
201
- Returns:
202
- Tuple of (compacted_messages, summarized_source_messages)
203
- where compacted_messages always preserves the original system message
204
- as the first entry.
205
- """
206
- messages_to_summarize: List[ModelMessage]
207
- protected_messages: List[ModelMessage]
208
-
209
- if with_protection:
210
- messages_to_summarize, protected_messages = (
211
- split_messages_for_protected_summarization(messages)
212
- )
213
- else:
214
- messages_to_summarize = messages[1:] if messages else []
215
- protected_messages = messages[:1]
216
-
217
- if not messages:
218
- return [], []
219
-
220
- system_message = messages[0]
221
-
222
- if not messages_to_summarize:
223
- # Nothing to summarize, so just return the original sequence
224
- return prune_interrupted_tool_calls(messages), []
225
-
226
- instructions = (
227
- "The input will be a log of Agentic AI steps that have been taken"
228
- " as well as user queries, etc. Summarize the contents of these steps."
229
- " The high level details should remain but the bulk of the content from tool-call"
230
- " responses should be compacted and summarized. For example if you see a tool-call"
231
- " reading a file, and the file contents are large, then in your summary you might just"
232
- " write: * used read_file on space_invaders.cpp - contents removed."
233
- "\n Make sure your result is a bulleted list of all steps and interactions."
234
- "\n\nNOTE: This summary represents older conversation history. Recent messages are preserved separately."
235
- )
236
-
237
- try:
238
- new_messages = run_summarization_sync(
239
- instructions, message_history=messages_to_summarize
240
- )
241
-
242
- if not isinstance(new_messages, list):
243
- emit_warning(
244
- "Summarization agent returned non-list output; wrapping into message request"
245
- )
246
- new_messages = [ModelRequest([TextPart(str(new_messages))])]
247
-
248
- compacted: List[ModelMessage] = [system_message] + list(new_messages)
249
-
250
- # Drop the system message from protected_messages because we already included it
251
- protected_tail = [msg for msg in protected_messages if msg is not system_message]
252
-
253
- compacted.extend(protected_tail)
254
-
255
- return prune_interrupted_tool_calls(compacted), messages_to_summarize
256
- except Exception as e:
257
- emit_error(f"Summarization failed during compaction: {e}")
258
- return messages, [] # Return original messages on failure
259
-
260
-
261
- def summarize_message(message: ModelMessage) -> ModelMessage:
262
- try:
263
- # If the message looks like a system/instructions message, skip summarization
264
- instructions = getattr(message, "instructions", None)
265
- if instructions:
266
- return message
267
- # If any part is a tool call, skip summarization
268
- for part in message.parts:
269
- if isinstance(part, ToolCallPart) or getattr(part, "tool_name", None):
270
- return message
271
- # Build prompt from textual content parts
272
- content_bits: List[str] = []
273
- for part in message.parts:
274
- s = stringify_message_part(part)
275
- if s:
276
- content_bits.append(s)
277
- if not content_bits:
278
- return message
279
- prompt = "Please summarize the following user message:\n" + "\n".join(
280
- content_bits
281
- )
282
- output_text = run_summarization_sync(prompt)
283
- summarized = ModelRequest([TextPart(output_text)])
284
- return summarized
285
- except Exception as e:
286
- emit_error(f"Summarization failed: {e}")
287
- return message
288
-
289
-
290
- def get_model_context_length() -> int:
291
- """
292
- Get the context length for the currently configured model from models.json
293
- """
294
- model_configs = ModelFactory.load_config()
295
- model_name = get_model_name()
296
-
297
- # Get context length from model config
298
- model_config = model_configs.get(model_name, {})
299
- context_length = model_config.get("context_length", 128000) # Default value
300
-
301
- # Reserve 10% of context for response
302
- return int(context_length)
303
-
304
-
305
- def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMessage]:
306
- """
307
- Remove any messages that participate in mismatched tool call sequences.
308
-
309
- A mismatched tool call id is one that appears in a ToolCall (model/tool request)
310
- without a corresponding tool return, or vice versa. We preserve original order
311
- and only drop messages that contain parts referencing mismatched tool_call_ids.
312
- """
313
- if not messages:
314
- return messages
315
-
316
- tool_call_ids: Set[str] = set()
317
- tool_return_ids: Set[str] = set()
318
-
319
- # First pass: collect ids for calls vs returns
320
- for msg in messages:
321
- for part in getattr(msg, "parts", []) or []:
322
- tool_call_id = getattr(part, "tool_call_id", None)
323
- if not tool_call_id:
324
- continue
325
-
326
- if _is_tool_call_part(part) and not _is_tool_return_part(part):
327
- tool_call_ids.add(tool_call_id)
328
- elif _is_tool_return_part(part):
329
- tool_return_ids.add(tool_call_id)
330
-
331
- mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
332
- if not mismatched:
333
- return messages
334
-
335
- pruned: List[ModelMessage] = []
336
- dropped_count = 0
337
- for msg in messages:
338
- has_mismatched = False
339
- for part in getattr(msg, "parts", []) or []:
340
- tcid = getattr(part, "tool_call_id", None)
341
- if tcid and tcid in mismatched:
342
- has_mismatched = True
343
- break
344
- if has_mismatched:
345
- dropped_count += 1
346
- continue
347
- pruned.append(msg)
348
-
349
- if dropped_count:
350
- emit_warning(
351
- f"Pruned {dropped_count} message(s) with mismatched tool_call_id pairs"
352
- )
353
- return pruned
354
-
355
-
356
- def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
357
- cleaned_history = prune_interrupted_tool_calls(messages)
358
-
359
- total_current_tokens = sum(
360
- estimate_tokens_for_message(msg) for msg in cleaned_history
361
- )
362
-
363
- model_max = get_model_context_length()
364
-
365
- proportion_used = total_current_tokens / model_max if model_max else 0
366
-
367
- # Check if we're in TUI mode and can update the status bar
368
- from code_puppy.state_management import get_tui_app_instance, is_tui_mode
369
-
370
- if is_tui_mode():
371
- tui_app = get_tui_app_instance()
372
- if tui_app:
373
- try:
374
- # Update the status bar instead of emitting a chat message
375
- status_bar = tui_app.query_one("StatusBar")
376
- status_bar.update_token_info(
377
- total_current_tokens, model_max, proportion_used
378
- )
379
- except Exception as e:
380
- emit_error(e)
381
- # Fallback to chat message if status bar update fails
382
- emit_info(
383
- f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n",
384
- message_group="token_context_status",
385
- )
386
- else:
387
- # Fallback if no TUI app instance
388
- emit_info(
389
- f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n",
390
- message_group="token_context_status",
391
- )
392
- else:
393
- # Non-TUI mode - emit to console as before
394
- emit_info(
395
- f"\n[bold white on blue] Tokens in context: {total_current_tokens}, total model capacity: {model_max}, proportion used: {proportion_used:.2f} [/bold white on blue] \n"
396
- )
397
- # Get the configured compaction threshold
398
- compaction_threshold = get_compaction_threshold()
399
-
400
- # Get the configured compaction strategy
401
- compaction_strategy = get_compaction_strategy()
402
-
403
- if proportion_used > compaction_threshold:
404
- filtered_history = filter_huge_messages(cleaned_history)
405
-
406
- if compaction_strategy == "truncation":
407
- protected_tokens = get_protected_token_count()
408
- result_messages = truncation(filtered_history, protected_tokens)
409
- summarized_messages: List[ModelMessage] = []
410
- else:
411
- result_messages, summarized_messages = summarize_messages(
412
- filtered_history
413
- )
414
-
415
- final_token_count = sum(
416
- estimate_tokens_for_message(msg) for msg in result_messages
417
- )
418
- # Update status bar with final token count if in TUI mode
419
- if is_tui_mode():
420
- tui_app = get_tui_app_instance()
421
- if tui_app:
422
- try:
423
- status_bar = tui_app.query_one("StatusBar")
424
- status_bar.update_token_info(
425
- final_token_count, model_max, final_token_count / model_max
426
- )
427
- except Exception:
428
- emit_info(
429
- f"Final token count after processing: {final_token_count}",
430
- message_group="token_context_status",
431
- )
432
- else:
433
- emit_info(
434
- f"Final token count after processing: {final_token_count}",
435
- message_group="token_context_status",
436
- )
437
- else:
438
- emit_info(f"Final token count after processing: {final_token_count}")
439
- set_message_history(result_messages)
440
- for m in summarized_messages:
441
- add_compacted_message_hash(hash_message(m))
442
- return result_messages
443
-
444
- set_message_history(cleaned_history)
445
- return cleaned_history
446
-
447
-
448
- def truncation(
449
- messages: List[ModelMessage], protected_tokens: int
450
- ) -> List[ModelMessage]:
451
- emit_info("Truncating message history to manage token usage")
452
- result = [messages[0]] # Always keep the first message (system prompt)
453
- num_tokens = 0
454
- stack = queue.LifoQueue()
455
-
456
- # Put messages in reverse order (most recent first) into the stack
457
- # but break when we exceed protected_tokens
458
- for idx, msg in enumerate(reversed(messages[1:])): # Skip the first message
459
- num_tokens += estimate_tokens_for_message(msg)
460
- if num_tokens > protected_tokens:
461
- break
462
- stack.put(msg)
463
-
464
- # Pop messages from stack to get them in chronological order
465
- while not stack.empty():
466
- result.append(stack.get())
467
-
468
- result = prune_interrupted_tool_calls(result)
469
- return result
470
-
471
-
472
- def message_history_accumulator(messages: List[Any]):
473
- existing_history = list(get_message_history())
474
- seen_hashes = {hash_message(message) for message in existing_history}
475
- compacted_hashes = get_compacted_message_hashes()
476
-
477
- for message in messages:
478
- message_hash = hash_message(message)
479
- if message_hash in seen_hashes or message_hash in compacted_hashes:
480
- continue
481
- existing_history.append(message)
482
- seen_hashes.add(message_hash)
483
-
484
- updated_history = message_history_processor(existing_history)
485
- set_message_history(updated_history)
486
- return updated_history
@@ -1,159 +0,0 @@
1
- import json
2
- from types import ModuleType
3
- from typing import Any, List, Set
4
-
5
- import pydantic
6
-
7
- from code_puppy.messaging import emit_info
8
-
9
- _tui_mode: bool = False
10
- _tui_app_instance: Any = None
11
-
12
-
13
- def _require_agent_manager() -> ModuleType:
14
- """Import the agent manager module, raising if it is unavailable."""
15
- try:
16
- from code_puppy.agents import agent_manager
17
- except Exception as error: # pragma: no cover - import errors surface immediately
18
- raise RuntimeError("Agent manager module unavailable") from error
19
- return agent_manager
20
-
21
-
22
- def add_compacted_message_hash(message_hash: str) -> None:
23
- """Add a message hash to the set of compacted message hashes."""
24
- manager = _require_agent_manager()
25
- manager.add_current_agent_compacted_message_hash(message_hash)
26
-
27
-
28
- def get_compacted_message_hashes() -> Set[str]:
29
- """Get the set of compacted message hashes."""
30
- manager = _require_agent_manager()
31
- return manager.get_current_agent_compacted_message_hashes()
32
-
33
-
34
- def set_tui_mode(enabled: bool) -> None:
35
- """Set the global TUI mode state.
36
-
37
- Args:
38
- enabled: True if running in TUI mode, False otherwise
39
- """
40
- global _tui_mode
41
- _tui_mode = enabled
42
-
43
-
44
- def is_tui_mode() -> bool:
45
- """Check if the application is running in TUI mode.
46
-
47
- Returns:
48
- True if running in TUI mode, False otherwise
49
- """
50
- return _tui_mode
51
-
52
-
53
- def set_tui_app_instance(app_instance: Any) -> None:
54
- """Set the global TUI app instance reference.
55
-
56
- Args:
57
- app_instance: The TUI app instance
58
- """
59
- global _tui_app_instance
60
- _tui_app_instance = app_instance
61
-
62
-
63
- def get_tui_app_instance() -> Any:
64
- """Get the current TUI app instance.
65
-
66
- Returns:
67
- The TUI app instance if available, None otherwise
68
- """
69
- return _tui_app_instance
70
-
71
-
72
- def get_tui_mode() -> bool:
73
- """Get the current TUI mode state.
74
-
75
- Returns:
76
- True if running in TUI mode, False otherwise
77
- """
78
- return _tui_mode
79
-
80
-
81
- def get_message_history() -> List[Any]:
82
- """Get message history for the active agent."""
83
- manager = _require_agent_manager()
84
- return manager.get_current_agent_message_history()
85
-
86
-
87
- def set_message_history(history: List[Any]) -> None:
88
- """Replace the message history for the active agent."""
89
- manager = _require_agent_manager()
90
- manager.set_current_agent_message_history(history)
91
-
92
-
93
- def clear_message_history() -> None:
94
- """Clear message history for the active agent."""
95
- manager = _require_agent_manager()
96
- manager.clear_current_agent_message_history()
97
-
98
-
99
- def append_to_message_history(message: Any) -> None:
100
- """Append a message to the active agent's history."""
101
- manager = _require_agent_manager()
102
- manager.append_to_current_agent_message_history(message)
103
-
104
-
105
- def extend_message_history(history: List[Any]) -> None:
106
- """Extend the active agent's message history."""
107
- manager = _require_agent_manager()
108
- manager.extend_current_agent_message_history(history)
109
-
110
-
111
- def _stringify_part(part: Any) -> str:
112
- """Create a stable string representation for a message part.
113
-
114
- We deliberately ignore timestamps so identical content hashes the same even when
115
- emitted at different times. This prevents status updates from blowing up the
116
- history when they are repeated with new timestamps."""
117
-
118
- attributes: List[str] = [part.__class__.__name__]
119
-
120
- # Role/instructions help disambiguate parts that otherwise share content
121
- if hasattr(part, "role") and part.role:
122
- attributes.append(f"role={part.role}")
123
- if hasattr(part, "instructions") and part.instructions:
124
- attributes.append(f"instructions={part.instructions}")
125
-
126
- if hasattr(part, "tool_call_id") and part.tool_call_id:
127
- attributes.append(f"tool_call_id={part.tool_call_id}")
128
-
129
- if hasattr(part, "tool_name") and part.tool_name:
130
- attributes.append(f"tool_name={part.tool_name}")
131
-
132
- content = getattr(part, "content", None)
133
- if content is None:
134
- attributes.append("content=None")
135
- elif isinstance(content, str):
136
- attributes.append(f"content={content}")
137
- elif isinstance(content, pydantic.BaseModel):
138
- attributes.append(f"content={json.dumps(content.model_dump(), sort_keys=True)}")
139
- elif isinstance(content, dict):
140
- attributes.append(f"content={json.dumps(content, sort_keys=True)}")
141
- else:
142
- attributes.append(f"content={repr(content)}")
143
- result = "|".join(attributes)
144
- return result
145
-
146
-
147
- def hash_message(message: Any) -> int:
148
- """Create a stable hash for a model message that ignores timestamps."""
149
- role = getattr(message, "role", None)
150
- instructions = getattr(message, "instructions", None)
151
- header_bits: List[str] = []
152
- if role:
153
- header_bits.append(f"role={role}")
154
- if instructions:
155
- header_bits.append(f"instructions={instructions}")
156
-
157
- part_strings = [_stringify_part(part) for part in getattr(message, "parts", [])]
158
- canonical = "||".join(header_bits + part_strings)
159
- return hash(canonical)