code-puppy 0.0.170__py3-none-any.whl → 0.0.172__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. code_puppy/agent.py +10 -2
  2. code_puppy/agents/agent_creator_agent.py +0 -3
  3. code_puppy/agents/agent_qa_kitten.py +203 -0
  4. code_puppy/agents/base_agent.py +9 -0
  5. code_puppy/command_line/command_handler.py +68 -28
  6. code_puppy/command_line/mcp/add_command.py +1 -1
  7. code_puppy/command_line/mcp/base.py +1 -1
  8. code_puppy/command_line/mcp/install_command.py +1 -1
  9. code_puppy/command_line/mcp/list_command.py +1 -1
  10. code_puppy/command_line/mcp/search_command.py +1 -1
  11. code_puppy/command_line/mcp/start_all_command.py +1 -1
  12. code_puppy/command_line/mcp/status_command.py +2 -2
  13. code_puppy/command_line/mcp/stop_all_command.py +1 -1
  14. code_puppy/command_line/mcp/utils.py +1 -1
  15. code_puppy/command_line/mcp/wizard_utils.py +2 -2
  16. code_puppy/config.py +142 -12
  17. code_puppy/http_utils.py +50 -24
  18. code_puppy/{mcp → mcp_}/config_wizard.py +1 -1
  19. code_puppy/{mcp → mcp_}/examples/retry_example.py +1 -1
  20. code_puppy/{mcp → mcp_}/managed_server.py +1 -1
  21. code_puppy/{mcp → mcp_}/server_registry_catalog.py +1 -3
  22. code_puppy/message_history_processor.py +121 -125
  23. code_puppy/state_management.py +86 -127
  24. code_puppy/tools/__init__.py +103 -6
  25. code_puppy/tools/browser/__init__.py +0 -0
  26. code_puppy/tools/browser/browser_control.py +293 -0
  27. code_puppy/tools/browser/browser_interactions.py +552 -0
  28. code_puppy/tools/browser/browser_locators.py +642 -0
  29. code_puppy/tools/browser/browser_navigation.py +251 -0
  30. code_puppy/tools/browser/browser_screenshot.py +242 -0
  31. code_puppy/tools/browser/browser_scripts.py +478 -0
  32. code_puppy/tools/browser/browser_workflows.py +196 -0
  33. code_puppy/tools/browser/camoufox_manager.py +194 -0
  34. code_puppy/tools/browser/vqa_agent.py +66 -0
  35. code_puppy/tools/browser_control.py +293 -0
  36. code_puppy/tools/browser_interactions.py +552 -0
  37. code_puppy/tools/browser_locators.py +642 -0
  38. code_puppy/tools/browser_navigation.py +251 -0
  39. code_puppy/tools/browser_screenshot.py +278 -0
  40. code_puppy/tools/browser_scripts.py +478 -0
  41. code_puppy/tools/browser_workflows.py +215 -0
  42. code_puppy/tools/camoufox_manager.py +150 -0
  43. code_puppy/tools/command_runner.py +12 -7
  44. code_puppy/tools/file_operations.py +7 -7
  45. code_puppy/tui/app.py +4 -2
  46. code_puppy/tui/components/custom_widgets.py +1 -1
  47. code_puppy/tui/screens/mcp_install_wizard.py +8 -8
  48. {code_puppy-0.0.170.dist-info → code_puppy-0.0.172.dist-info}/METADATA +4 -2
  49. {code_puppy-0.0.170.dist-info → code_puppy-0.0.172.dist-info}/RECORD +66 -47
  50. /code_puppy/{mcp → mcp_}/__init__.py +0 -0
  51. /code_puppy/{mcp → mcp_}/async_lifecycle.py +0 -0
  52. /code_puppy/{mcp → mcp_}/blocking_startup.py +0 -0
  53. /code_puppy/{mcp → mcp_}/captured_stdio_server.py +0 -0
  54. /code_puppy/{mcp → mcp_}/circuit_breaker.py +0 -0
  55. /code_puppy/{mcp → mcp_}/dashboard.py +0 -0
  56. /code_puppy/{mcp → mcp_}/error_isolation.py +0 -0
  57. /code_puppy/{mcp → mcp_}/health_monitor.py +0 -0
  58. /code_puppy/{mcp → mcp_}/manager.py +0 -0
  59. /code_puppy/{mcp → mcp_}/registry.py +0 -0
  60. /code_puppy/{mcp → mcp_}/retry_manager.py +0 -0
  61. /code_puppy/{mcp → mcp_}/status_tracker.py +0 -0
  62. /code_puppy/{mcp → mcp_}/system_tools.py +0 -0
  63. {code_puppy-0.0.170.data → code_puppy-0.0.172.data}/data/code_puppy/models.json +0 -0
  64. {code_puppy-0.0.170.dist-info → code_puppy-0.0.172.dist-info}/WHEEL +0 -0
  65. {code_puppy-0.0.170.dist-info → code_puppy-0.0.172.dist-info}/entry_points.txt +0 -0
  66. {code_puppy-0.0.170.dist-info → code_puppy-0.0.172.dist-info}/licenses/LICENSE +0 -0
@@ -3,7 +3,15 @@ import queue
3
3
  from typing import Any, List, Set, Tuple
4
4
 
5
5
  import pydantic
6
- from pydantic_ai.messages import ModelMessage, ModelRequest, TextPart, ToolCallPart
6
+ from pydantic_ai.messages import (
7
+ ModelMessage,
8
+ ModelRequest,
9
+ TextPart,
10
+ ToolCallPart,
11
+ ToolCallPartDelta,
12
+ ToolReturn,
13
+ ToolReturnPart,
14
+ )
7
15
 
8
16
  from code_puppy.config import (
9
17
  get_model_name,
@@ -82,9 +90,46 @@ def estimate_tokens_for_message(message: ModelMessage) -> int:
82
90
 
83
91
 
84
92
  def filter_huge_messages(messages: List[ModelMessage]) -> List[ModelMessage]:
85
- filtered = [m for m in messages if estimate_tokens_for_message(m) < 50000]
86
- pruned = prune_interrupted_tool_calls(filtered)
87
- return pruned
93
+ if not messages:
94
+ return []
95
+
96
+ # Never drop the system prompt, even if it is extremely large.
97
+ system_message, *rest = messages
98
+ filtered_rest = [
99
+ m for m in rest if estimate_tokens_for_message(m) < 50000
100
+ ]
101
+ return [system_message] + filtered_rest
102
+
103
+
104
+ def _is_tool_call_part(part: Any) -> bool:
105
+ if isinstance(part, (ToolCallPart, ToolCallPartDelta)):
106
+ return True
107
+
108
+ part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
109
+ if part_kind == "tool-call":
110
+ return True
111
+
112
+ has_tool_name = getattr(part, "tool_name", None) is not None
113
+ has_args = getattr(part, "args", None) is not None
114
+ has_args_delta = getattr(part, "args_delta", None) is not None
115
+
116
+ return bool(has_tool_name and (has_args or has_args_delta))
117
+
118
+
119
+ def _is_tool_return_part(part: Any) -> bool:
120
+ if isinstance(part, (ToolReturnPart, ToolReturn)):
121
+ return True
122
+
123
+ part_kind = (getattr(part, "part_kind", "") or "").replace("_", "-")
124
+ if part_kind in {"tool-return", "tool-result"}:
125
+ return True
126
+
127
+ if getattr(part, "tool_call_id", None) is None:
128
+ return False
129
+
130
+ has_content = getattr(part, "content", None) is not None
131
+ has_content_delta = getattr(part, "content_delta", None) is not None
132
+ return bool(has_content or has_content_delta)
88
133
 
89
134
 
90
135
  def split_messages_for_protected_summarization(
@@ -126,19 +171,18 @@ def split_messages_for_protected_summarization(
126
171
  if protected_token_count + message_tokens > protected_tokens_limit:
127
172
  break
128
173
 
129
- protected_messages.insert(0, message) # Insert at beginning to maintain order
174
+ protected_messages.append(message)
130
175
  protected_token_count += message_tokens
131
176
 
132
- # Add system message at the beginning of protected messages
177
+ # Messages that were added while scanning backwards are currently in reverse order.
178
+ # Reverse them to restore chronological ordering, then prepend the system prompt.
179
+ protected_messages.reverse()
133
180
  protected_messages.insert(0, system_message)
134
181
 
135
- # Messages to summarize are everything between system message and protected zone
136
- protected_start_idx = (
137
- len(messages) - len(protected_messages) + 1
138
- ) # +1 because system message is protected
139
- messages_to_summarize = messages[
140
- 1:protected_start_idx
141
- ] # Start from 1 to skip system message
182
+ # Messages to summarize are everything between the system message and the
183
+ # protected tail zone we just constructed.
184
+ protected_start_idx = max(1, len(messages) - (len(protected_messages) - 1))
185
+ messages_to_summarize = messages[1:protected_start_idx]
142
186
 
143
187
  emit_info(
144
188
  f"🔒 Protecting {len(protected_messages)} recent messages ({protected_token_count} tokens, limit: {protected_tokens_limit})"
@@ -148,99 +192,36 @@ def split_messages_for_protected_summarization(
148
192
  return messages_to_summarize, protected_messages
149
193
 
150
194
 
151
- def deduplicate_tool_returns(messages: List[ModelMessage]) -> List[ModelMessage]:
152
- """
153
- Remove duplicate tool returns while preserving the first occurrence for each tool_call_id.
154
-
155
- This function identifies tool-return parts that share the same tool_call_id and
156
- removes duplicates, keeping only the first return for each id. This prevents
157
- conversation corruption from duplicate tool_result blocks.
158
- """
159
- if not messages:
160
- return messages
161
-
162
- seen_tool_returns: Set[str] = set()
163
- deduplicated: List[ModelMessage] = []
164
- removed_count = 0
165
-
166
- for msg in messages:
167
- # Check if this message has any parts we need to filter
168
- if not hasattr(msg, "parts") or not msg.parts:
169
- deduplicated.append(msg)
170
- continue
171
-
172
- # Filter parts within this message
173
- filtered_parts = []
174
- msg_had_duplicates = False
175
-
176
- for part in msg.parts:
177
- tool_call_id = getattr(part, "tool_call_id", None)
178
- part_kind = getattr(part, "part_kind", None)
179
-
180
- # Check if this is a tool-return part
181
- if tool_call_id and part_kind in {
182
- "tool-return",
183
- "tool-result",
184
- "tool_result",
185
- }:
186
- if tool_call_id in seen_tool_returns:
187
- # This is a duplicate return, skip it
188
- msg_had_duplicates = True
189
- removed_count += 1
190
- continue
191
- else:
192
- # First occurrence of this return, keep it
193
- seen_tool_returns.add(tool_call_id)
194
- filtered_parts.append(part)
195
- else:
196
- # Not a tool return, always keep
197
- filtered_parts.append(part)
198
-
199
- # If we filtered out parts, create a new message with filtered parts
200
- if msg_had_duplicates and filtered_parts:
201
- # Create a new message with the same attributes but filtered parts
202
- new_msg = type(msg)(parts=filtered_parts)
203
- # Copy over other attributes if they exist
204
- for attr_name in dir(msg):
205
- if (
206
- not attr_name.startswith("_")
207
- and attr_name != "parts"
208
- and hasattr(msg, attr_name)
209
- ):
210
- try:
211
- setattr(new_msg, attr_name, getattr(msg, attr_name))
212
- except (AttributeError, TypeError):
213
- # Skip attributes that can't be set
214
- pass
215
- deduplicated.append(new_msg)
216
- elif filtered_parts: # No duplicates but has parts
217
- deduplicated.append(msg)
218
- # If no parts remain after filtering, drop the entire message
219
-
220
- if removed_count > 0:
221
- emit_warning(f"Removed {removed_count} duplicate tool-return part(s)")
222
-
223
- return deduplicated
224
-
225
-
226
195
  def summarize_messages(
227
- messages: List[ModelMessage], with_protection=True
196
+ messages: List[ModelMessage], with_protection: bool = True
228
197
  ) -> Tuple[List[ModelMessage], List[ModelMessage]]:
229
198
  """
230
199
  Summarize messages while protecting recent messages up to PROTECTED_TOKENS.
231
200
 
232
201
  Returns:
233
- List of messages: [system_message, summary_of_old_messages, ...protected_recent_messages]
202
+ Tuple of (compacted_messages, summarized_source_messages)
203
+ where compacted_messages always preserves the original system message
204
+ as the first entry.
234
205
  """
235
- messages_to_summarize, protected_messages = messages, []
206
+ messages_to_summarize: List[ModelMessage]
207
+ protected_messages: List[ModelMessage]
208
+
236
209
  if with_protection:
237
210
  messages_to_summarize, protected_messages = (
238
211
  split_messages_for_protected_summarization(messages)
239
212
  )
213
+ else:
214
+ messages_to_summarize = messages[1:] if messages else []
215
+ protected_messages = messages[:1]
216
+
217
+ if not messages:
218
+ return [], []
219
+
220
+ system_message = messages[0]
240
221
 
241
222
  if not messages_to_summarize:
242
- # Nothing to summarize, return protected messages as-is
243
- return protected_messages, messages_to_summarize
223
+ # Nothing to summarize, so just return the original sequence
224
+ return prune_interrupted_tool_calls(messages), []
244
225
 
245
226
  instructions = (
246
227
  "The input will be a log of Agentic AI steps that have been taken"
@@ -257,12 +238,24 @@ def summarize_messages(
257
238
  new_messages = run_summarization_sync(
258
239
  instructions, message_history=messages_to_summarize
259
240
  )
260
- # Return: [system_message, summary, ...protected_recent_messages]
261
- result = new_messages + protected_messages[1:]
262
- return prune_interrupted_tool_calls(result), messages_to_summarize
241
+
242
+ if not isinstance(new_messages, list):
243
+ emit_warning(
244
+ "Summarization agent returned non-list output; wrapping into message request"
245
+ )
246
+ new_messages = [ModelRequest([TextPart(str(new_messages))])]
247
+
248
+ compacted: List[ModelMessage] = [system_message] + list(new_messages)
249
+
250
+ # Drop the system message from protected_messages because we already included it
251
+ protected_tail = [msg for msg in protected_messages if msg is not system_message]
252
+
253
+ compacted.extend(protected_tail)
254
+
255
+ return prune_interrupted_tool_calls(compacted), messages_to_summarize
263
256
  except Exception as e:
264
257
  emit_error(f"Summarization failed during compaction: {e}")
265
- return messages, messages_to_summarize # Return original messages on failure
258
+ return messages, [] # Return original messages on failure
266
259
 
267
260
 
268
261
  def summarize_message(message: ModelMessage) -> ModelMessage:
@@ -329,11 +322,10 @@ def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMess
329
322
  tool_call_id = getattr(part, "tool_call_id", None)
330
323
  if not tool_call_id:
331
324
  continue
332
- # Heuristic: if it's an explicit ToolCallPart or has a tool_name/args,
333
- # consider it a call; otherwise it's a return/result.
334
- if part.part_kind == "tool-call":
325
+
326
+ if _is_tool_call_part(part) and not _is_tool_return_part(part):
335
327
  tool_call_ids.add(tool_call_id)
336
- else:
328
+ elif _is_tool_return_part(part):
337
329
  tool_return_ids.add(tool_call_id)
338
330
 
339
331
  mismatched: Set[str] = tool_call_ids.symmetric_difference(tool_return_ids)
@@ -362,12 +354,15 @@ def prune_interrupted_tool_calls(messages: List[ModelMessage]) -> List[ModelMess
362
354
 
363
355
 
364
356
  def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage]:
365
- # First, prune any interrupted/mismatched tool-call conversations
366
- total_current_tokens = sum(estimate_tokens_for_message(msg) for msg in messages)
357
+ cleaned_history = prune_interrupted_tool_calls(messages)
358
+
359
+ total_current_tokens = sum(
360
+ estimate_tokens_for_message(msg) for msg in cleaned_history
361
+ )
367
362
 
368
363
  model_max = get_model_context_length()
369
364
 
370
- proportion_used = total_current_tokens / model_max
365
+ proportion_used = total_current_tokens / model_max if model_max else 0
371
366
 
372
367
  # Check if we're in TUI mode and can update the status bar
373
368
  from code_puppy.state_management import get_tui_app_instance, is_tui_mode
@@ -406,17 +401,15 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
406
401
  compaction_strategy = get_compaction_strategy()
407
402
 
408
403
  if proportion_used > compaction_threshold:
404
+ filtered_history = filter_huge_messages(cleaned_history)
405
+
409
406
  if compaction_strategy == "truncation":
410
- # Use truncation instead of summarization
411
407
  protected_tokens = get_protected_token_count()
412
- result_messages = truncation(
413
- filter_huge_messages(messages), protected_tokens
414
- )
415
- summarized_messages = [] # No summarization in truncation mode
408
+ result_messages = truncation(filtered_history, protected_tokens)
409
+ summarized_messages: List[ModelMessage] = []
416
410
  else:
417
- # Default to summarization
418
411
  result_messages, summarized_messages = summarize_messages(
419
- filter_huge_messages(messages)
412
+ filtered_history
420
413
  )
421
414
 
422
415
  final_token_count = sum(
@@ -447,7 +440,9 @@ def message_history_processor(messages: List[ModelMessage]) -> List[ModelMessage
447
440
  for m in summarized_messages:
448
441
  add_compacted_message_hash(hash_message(m))
449
442
  return result_messages
450
- return messages
443
+
444
+ set_message_history(cleaned_history)
445
+ return cleaned_history
451
446
 
452
447
 
453
448
  def truncation(
@@ -475,16 +470,17 @@ def truncation(
475
470
 
476
471
 
477
472
  def message_history_accumulator(messages: List[Any]):
478
- _message_history = get_message_history()
479
- message_history_hashes = set([hash_message(m) for m in _message_history])
480
- for msg in messages:
481
- if (
482
- hash_message(msg) not in message_history_hashes
483
- and hash_message(msg) not in get_compacted_message_hashes()
484
- ):
485
- _message_history.append(msg)
486
-
487
- # Apply message history trimming using the main processor
488
- # This ensures we maintain global state while still managing context limits
489
- message_history_processor(_message_history)
490
- return get_message_history()
473
+ existing_history = list(get_message_history())
474
+ seen_hashes = {hash_message(message) for message in existing_history}
475
+ compacted_hashes = get_compacted_message_hashes()
476
+
477
+ for message in messages:
478
+ message_hash = hash_message(message)
479
+ if message_hash in seen_hashes or message_hash in compacted_hashes:
480
+ continue
481
+ existing_history.append(message)
482
+ seen_hashes.add(message_hash)
483
+
484
+ updated_history = message_history_processor(existing_history)
485
+ set_message_history(updated_history)
486
+ return updated_history
@@ -1,44 +1,34 @@
1
- from typing import Any, List
1
+ import json
2
+ from types import ModuleType
3
+ from typing import Any, List, Set
2
4
 
3
- # Legacy global state - maintained for backward compatibility
4
- _message_history: List[Any] = []
5
- _compacted_message_hashes = set()
5
+ import pydantic
6
+
7
+ from code_puppy.messaging import emit_info
6
8
 
7
- # Flag to control whether to use agent-specific history (True) or global history (False)
8
- _use_agent_specific_history = True
9
9
  _tui_mode: bool = False
10
10
  _tui_app_instance: Any = None
11
11
 
12
12
 
13
+ def _require_agent_manager() -> ModuleType:
14
+ """Import the agent manager module, raising if it is unavailable."""
15
+ try:
16
+ from code_puppy.agents import agent_manager
17
+ except Exception as error: # pragma: no cover - import errors surface immediately
18
+ raise RuntimeError("Agent manager module unavailable") from error
19
+ return agent_manager
20
+
21
+
13
22
  def add_compacted_message_hash(message_hash: str) -> None:
14
23
  """Add a message hash to the set of compacted message hashes."""
15
- if _use_agent_specific_history:
16
- try:
17
- from code_puppy.agents.agent_manager import (
18
- add_current_agent_compacted_message_hash,
19
- )
24
+ manager = _require_agent_manager()
25
+ manager.add_current_agent_compacted_message_hash(message_hash)
20
26
 
21
- add_current_agent_compacted_message_hash(message_hash)
22
- return
23
- except Exception:
24
- # Fallback to global if agent system fails
25
- pass
26
- _compacted_message_hashes.add(message_hash)
27
27
 
28
-
29
- def get_compacted_message_hashes():
28
+ def get_compacted_message_hashes() -> Set[str]:
30
29
  """Get the set of compacted message hashes."""
31
- if _use_agent_specific_history:
32
- try:
33
- from code_puppy.agents.agent_manager import (
34
- get_current_agent_compacted_message_hashes,
35
- )
36
-
37
- return get_current_agent_compacted_message_hashes()
38
- except Exception:
39
- # Fallback to global if agent system fails
40
- pass
41
- return _compacted_message_hashes
30
+ manager = _require_agent_manager()
31
+ return manager.get_current_agent_compacted_message_hashes()
42
32
 
43
33
 
44
34
  def set_tui_mode(enabled: bool) -> None:
@@ -89,112 +79,81 @@ def get_tui_mode() -> bool:
89
79
 
90
80
 
91
81
  def get_message_history() -> List[Any]:
92
- """Get message history - uses agent-specific history if enabled, otherwise global."""
93
- if _use_agent_specific_history:
94
- try:
95
- from code_puppy.agents.agent_manager import (
96
- get_current_agent_message_history,
97
- )
98
-
99
- return get_current_agent_message_history()
100
- except Exception:
101
- # Fallback to global if agent system fails
102
- return _message_history
103
- return _message_history
82
+ """Get message history for the active agent."""
83
+ manager = _require_agent_manager()
84
+ return manager.get_current_agent_message_history()
104
85
 
105
86
 
106
87
  def set_message_history(history: List[Any]) -> None:
107
- """Set message history - uses agent-specific history if enabled, otherwise global."""
108
- if _use_agent_specific_history:
109
- try:
110
- from code_puppy.agents.agent_manager import (
111
- set_current_agent_message_history,
112
- )
113
-
114
- set_current_agent_message_history(history)
115
- return
116
- except Exception:
117
- # Fallback to global if agent system fails
118
- pass
119
- global _message_history
120
- _message_history = history
88
+ """Replace the message history for the active agent."""
89
+ manager = _require_agent_manager()
90
+ manager.set_current_agent_message_history(history)
121
91
 
122
92
 
123
93
  def clear_message_history() -> None:
124
- """Clear message history - uses agent-specific history if enabled, otherwise global."""
125
- if _use_agent_specific_history:
126
- try:
127
- from code_puppy.agents.agent_manager import (
128
- clear_current_agent_message_history,
129
- )
130
-
131
- clear_current_agent_message_history()
132
- return
133
- except Exception:
134
- # Fallback to global if agent system fails
135
- pass
136
- global _message_history
137
- _message_history = []
94
+ """Clear message history for the active agent."""
95
+ manager = _require_agent_manager()
96
+ manager.clear_current_agent_message_history()
138
97
 
139
98
 
140
99
  def append_to_message_history(message: Any) -> None:
141
- """Append to message history - uses agent-specific history if enabled, otherwise global."""
142
- if _use_agent_specific_history:
143
- try:
144
- from code_puppy.agents.agent_manager import (
145
- append_to_current_agent_message_history,
146
- )
147
-
148
- append_to_current_agent_message_history(message)
149
- return
150
- except Exception:
151
- # Fallback to global if agent system fails
152
- pass
153
- _message_history.append(message)
100
+ """Append a message to the active agent's history."""
101
+ manager = _require_agent_manager()
102
+ manager.append_to_current_agent_message_history(message)
154
103
 
155
104
 
156
105
  def extend_message_history(history: List[Any]) -> None:
157
- """Extend message history - uses agent-specific history if enabled, otherwise global."""
158
- if _use_agent_specific_history:
159
- try:
160
- from code_puppy.agents.agent_manager import (
161
- extend_current_agent_message_history,
162
- )
163
-
164
- extend_current_agent_message_history(history)
165
- return
166
- except Exception:
167
- # Fallback to global if agent system fails
168
- pass
169
- _message_history.extend(history)
170
-
171
-
172
- def set_use_agent_specific_history(enabled: bool) -> None:
173
- """Enable or disable agent-specific message history.
174
-
175
- Args:
176
- enabled: True to use per-agent history, False to use global history.
177
- """
178
- global _use_agent_specific_history
179
- _use_agent_specific_history = enabled
180
-
181
-
182
- def is_using_agent_specific_history() -> bool:
183
- """Check if agent-specific message history is enabled.
184
-
185
- Returns:
186
- True if using per-agent history, False if using global history.
187
- """
188
- return _use_agent_specific_history
189
-
190
-
191
- def hash_message(message):
192
- hashable_entities = []
193
- for part in message.parts:
194
- if hasattr(part, "timestamp"):
195
- hashable_entities.append(part.timestamp.isoformat())
196
- elif hasattr(part, "tool_call_id"):
197
- hashable_entities.append(part.tool_call_id)
198
- else:
199
- hashable_entities.append(part.content)
200
- return hash(",".join(hashable_entities))
106
+ """Extend the active agent's message history."""
107
+ manager = _require_agent_manager()
108
+ manager.extend_current_agent_message_history(history)
109
+
110
+
111
+ def _stringify_part(part: Any) -> str:
112
+ """Create a stable string representation for a message part.
113
+
114
+ We deliberately ignore timestamps so identical content hashes the same even when
115
+ emitted at different times. This prevents status updates from blowing up the
116
+ history when they are repeated with new timestamps."""
117
+
118
+ attributes: List[str] = [part.__class__.__name__]
119
+
120
+ # Role/instructions help disambiguate parts that otherwise share content
121
+ if hasattr(part, "role") and part.role:
122
+ attributes.append(f"role={part.role}")
123
+ if hasattr(part, "instructions") and part.instructions:
124
+ attributes.append(f"instructions={part.instructions}")
125
+
126
+ if hasattr(part, "tool_call_id") and part.tool_call_id:
127
+ attributes.append(f"tool_call_id={part.tool_call_id}")
128
+
129
+ if hasattr(part, "tool_name") and part.tool_name:
130
+ attributes.append(f"tool_name={part.tool_name}")
131
+
132
+ content = getattr(part, "content", None)
133
+ if content is None:
134
+ attributes.append("content=None")
135
+ elif isinstance(content, str):
136
+ attributes.append(f"content={content}")
137
+ elif isinstance(content, pydantic.BaseModel):
138
+ attributes.append(f"content={json.dumps(content.model_dump(), sort_keys=True)}")
139
+ elif isinstance(content, dict):
140
+ attributes.append(f"content={json.dumps(content, sort_keys=True)}")
141
+ else:
142
+ attributes.append(f"content={repr(content)}")
143
+ result = "|".join(attributes)
144
+ return result
145
+
146
+
147
+ def hash_message(message: Any) -> int:
148
+ """Create a stable hash for a model message that ignores timestamps."""
149
+ role = getattr(message, "role", None)
150
+ instructions = getattr(message, "instructions", None)
151
+ header_bits: List[str] = []
152
+ if role:
153
+ header_bits.append(f"role={role}")
154
+ if instructions:
155
+ header_bits.append(f"instructions={instructions}")
156
+
157
+ part_strings = [_stringify_part(part) for part in getattr(message, "parts", [])]
158
+ canonical = "||".join(header_bits + part_strings)
159
+ return hash(canonical)