AbstractRuntime 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. abstractruntime/__init__.py +7 -2
  2. abstractruntime/core/config.py +14 -1
  3. abstractruntime/core/event_keys.py +62 -0
  4. abstractruntime/core/models.py +12 -1
  5. abstractruntime/core/runtime.py +2444 -14
  6. abstractruntime/core/vars.py +95 -0
  7. abstractruntime/evidence/__init__.py +10 -0
  8. abstractruntime/evidence/recorder.py +325 -0
  9. abstractruntime/integrations/abstractcore/__init__.py +3 -0
  10. abstractruntime/integrations/abstractcore/constants.py +19 -0
  11. abstractruntime/integrations/abstractcore/default_tools.py +134 -0
  12. abstractruntime/integrations/abstractcore/effect_handlers.py +255 -6
  13. abstractruntime/integrations/abstractcore/factory.py +95 -10
  14. abstractruntime/integrations/abstractcore/llm_client.py +456 -52
  15. abstractruntime/integrations/abstractcore/mcp_worker.py +586 -0
  16. abstractruntime/integrations/abstractcore/observability.py +80 -0
  17. abstractruntime/integrations/abstractcore/summarizer.py +154 -0
  18. abstractruntime/integrations/abstractcore/tool_executor.py +481 -24
  19. abstractruntime/memory/__init__.py +21 -0
  20. abstractruntime/memory/active_context.py +746 -0
  21. abstractruntime/memory/active_memory.py +452 -0
  22. abstractruntime/memory/compaction.py +105 -0
  23. abstractruntime/rendering/__init__.py +17 -0
  24. abstractruntime/rendering/agent_trace_report.py +256 -0
  25. abstractruntime/rendering/json_stringify.py +136 -0
  26. abstractruntime/scheduler/scheduler.py +93 -2
  27. abstractruntime/storage/__init__.py +3 -1
  28. abstractruntime/storage/artifacts.py +20 -5
  29. abstractruntime/storage/json_files.py +15 -2
  30. abstractruntime/storage/observable.py +99 -0
  31. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.0.dist-info}/METADATA +5 -1
  32. abstractruntime-0.4.0.dist-info/RECORD +49 -0
  33. abstractruntime-0.4.0.dist-info/entry_points.txt +2 -0
  34. abstractruntime-0.2.0.dist-info/RECORD +0 -32
  35. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.0.dist-info}/WHEEL +0 -0
  36. {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,154 @@
1
+ """abstractruntime.integrations.abstractcore.summarizer
2
+
3
+ Integration with AbstractCore's BasicSummarizer for chat compaction.
4
+
5
+ This module provides a wrapper around AbstractCore's BasicSummarizer
6
+ that respects environment token limits (max_tokens, max_output_tokens)
7
+ for adaptive chunking during conversation compaction.
8
+
9
+ Design:
10
+ - The kernel (runtime.py) uses this via dependency injection
11
+ - BasicSummarizer handles adaptive chunking based on max_tokens
12
+ - When max_tokens == -1: Uses model's full capability (AUTO mode)
13
+ - When max_tokens > 0: Uses explicit limit (environment constraint)
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ from typing import Any, Dict, List, Optional, Protocol
19
+
20
+
21
+ class ChatSummarizer(Protocol):
22
+ """Protocol for chat history summarization.
23
+
24
+ This protocol allows the runtime kernel to use summarization
25
+ without directly importing AbstractCore.
26
+ """
27
+
28
+ def summarize_chat_history(
29
+ self,
30
+ messages: List[Dict[str, Any]],
31
+ *,
32
+ preserve_recent: int = 6,
33
+ focus: Optional[str] = None,
34
+ compression_mode: str = "standard",
35
+ ) -> Dict[str, Any]:
36
+ """Summarize chat history with adaptive chunking.
37
+
38
+ Args:
39
+ messages: List of message dicts with 'role' and 'content' keys
40
+ preserve_recent: Number of recent messages to keep intact (default 6)
41
+ focus: Optional focus for summarization
42
+ compression_mode: How aggressively to compress (light|standard|heavy)
43
+
44
+ Returns:
45
+ Dict with keys: summary, key_points, confidence, focus_alignment
46
+ """
47
+ ...
48
+
49
+
50
+ class AbstractCoreChatSummarizer:
51
+ """Wrapper around AbstractCore's BasicSummarizer for runtime integration.
52
+
53
+ This class:
54
+ - Wraps BasicSummarizer with token limits from RuntimeConfig
55
+ - Handles adaptive chunking automatically via BasicSummarizer
56
+ - Returns JSON-safe dicts for storage in RunState.vars
57
+
58
+ Example:
59
+ >>> summarizer = AbstractCoreChatSummarizer(
60
+ ... llm=llm_instance,
61
+ ... max_tokens=32768,
62
+ ... max_output_tokens=4096,
63
+ ... )
64
+ >>> result = summarizer.summarize_chat_history(messages)
65
+ >>> print(result["summary"])
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ llm,
71
+ *,
72
+ max_tokens: int = -1,
73
+ max_output_tokens: int = -1,
74
+ ):
75
+ """Initialize the summarizer with token limits.
76
+
77
+ Args:
78
+ llm: AbstractCore LLM instance (from create_llm or provider)
79
+ max_tokens: Maximum context tokens. -1 = AUTO (use model capability)
80
+ max_output_tokens: Maximum output tokens. -1 = AUTO
81
+ """
82
+ from abstractcore.processing import BasicSummarizer
83
+
84
+ self._summarizer = BasicSummarizer(
85
+ llm=llm,
86
+ max_tokens=max_tokens,
87
+ max_output_tokens=max_output_tokens,
88
+ )
89
+ self._max_tokens = max_tokens
90
+ self._max_output_tokens = max_output_tokens
91
+
92
+ def summarize_chat_history(
93
+ self,
94
+ messages: List[Dict[str, Any]],
95
+ *,
96
+ preserve_recent: int = 6,
97
+ focus: Optional[str] = None,
98
+ compression_mode: str = "standard",
99
+ ) -> Dict[str, Any]:
100
+ """Summarize chat history with adaptive chunking.
101
+
102
+ When max_tokens > 0 and messages exceed the limit, BasicSummarizer
103
+ automatically uses map-reduce chunking to process the content.
104
+
105
+ Args:
106
+ messages: List of message dicts with 'role' and 'content' keys
107
+ preserve_recent: Number of recent messages to keep intact (default 6)
108
+ focus: Optional focus for summarization (e.g., "key decisions")
109
+ compression_mode: How aggressively to compress (light|standard|heavy)
110
+
111
+ Returns:
112
+ Dict with keys:
113
+ - summary: The summarized text
114
+ - key_points: List of key points extracted
115
+ - confidence: Confidence score (0-1)
116
+ - focus_alignment: How well summary addresses focus (0-1)
117
+ """
118
+ from abstractcore.processing import CompressionMode
119
+
120
+ # Map string to enum
121
+ mode_map = {
122
+ "light": CompressionMode.LIGHT,
123
+ "standard": CompressionMode.STANDARD,
124
+ "heavy": CompressionMode.HEAVY,
125
+ }
126
+ mode = mode_map.get(compression_mode.lower(), CompressionMode.STANDARD)
127
+
128
+ # Call BasicSummarizer - it handles adaptive chunking internally
129
+ result = self._summarizer.summarize_chat_history(
130
+ messages=messages,
131
+ preserve_recent=preserve_recent,
132
+ focus=focus,
133
+ compression_mode=mode,
134
+ )
135
+
136
+ # Return as JSON-safe dict for storage in RunState.vars
137
+ return {
138
+ "summary": result.summary,
139
+ "key_points": list(result.key_points) if result.key_points else [],
140
+ "confidence": float(result.confidence) if result.confidence else None,
141
+ "focus_alignment": float(result.focus_alignment) if result.focus_alignment else None,
142
+ "word_count_original": result.word_count_original,
143
+ "word_count_summary": result.word_count_summary,
144
+ }
145
+
146
+ @property
147
+ def max_tokens(self) -> int:
148
+ """Current max_tokens setting."""
149
+ return self._max_tokens
150
+
151
+ @property
152
+ def max_output_tokens(self) -> int:
153
+ """Current max_output_tokens setting."""
154
+ return self._max_output_tokens