AbstractRuntime 0.0.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. abstractruntime/__init__.py +7 -2
  2. abstractruntime/core/__init__.py +9 -2
  3. abstractruntime/core/config.py +114 -0
  4. abstractruntime/core/event_keys.py +62 -0
  5. abstractruntime/core/models.py +55 -1
  6. abstractruntime/core/runtime.py +2609 -24
  7. abstractruntime/core/vars.py +189 -0
  8. abstractruntime/evidence/__init__.py +10 -0
  9. abstractruntime/evidence/recorder.py +325 -0
  10. abstractruntime/integrations/abstractcore/__init__.py +9 -2
  11. abstractruntime/integrations/abstractcore/constants.py +19 -0
  12. abstractruntime/integrations/abstractcore/default_tools.py +134 -0
  13. abstractruntime/integrations/abstractcore/effect_handlers.py +288 -9
  14. abstractruntime/integrations/abstractcore/factory.py +133 -11
  15. abstractruntime/integrations/abstractcore/llm_client.py +547 -42
  16. abstractruntime/integrations/abstractcore/mcp_worker.py +586 -0
  17. abstractruntime/integrations/abstractcore/observability.py +80 -0
  18. abstractruntime/integrations/abstractcore/summarizer.py +154 -0
  19. abstractruntime/integrations/abstractcore/tool_executor.py +544 -8
  20. abstractruntime/memory/__init__.py +21 -0
  21. abstractruntime/memory/active_context.py +746 -0
  22. abstractruntime/memory/active_memory.py +452 -0
  23. abstractruntime/memory/compaction.py +105 -0
  24. abstractruntime/rendering/__init__.py +17 -0
  25. abstractruntime/rendering/agent_trace_report.py +256 -0
  26. abstractruntime/rendering/json_stringify.py +136 -0
  27. abstractruntime/scheduler/scheduler.py +93 -2
  28. abstractruntime/storage/__init__.py +3 -1
  29. abstractruntime/storage/artifacts.py +51 -5
  30. abstractruntime/storage/json_files.py +16 -3
  31. abstractruntime/storage/observable.py +99 -0
  32. {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/METADATA +5 -1
  33. abstractruntime-0.4.0.dist-info/RECORD +49 -0
  34. abstractruntime-0.4.0.dist-info/entry_points.txt +2 -0
  35. abstractruntime-0.0.1.dist-info/RECORD +0 -30
  36. {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/WHEEL +0 -0
  37. {abstractruntime-0.0.1.dist-info → abstractruntime-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -33,6 +33,7 @@ from .storage.base import QueryableRunStore
33
33
  from .storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
34
34
  from .storage.json_files import JsonFileRunStore, JsonlLedgerStore
35
35
  from .storage.ledger_chain import HashChainedLedgerStore, verify_ledger_chain
36
+ from .storage.observable import ObservableLedgerStore, ObservableLedgerStoreProtocol
36
37
  from .storage.snapshots import Snapshot, SnapshotStore, InMemorySnapshotStore, JsonSnapshotStore
37
38
  from .storage.artifacts import (
38
39
  Artifact,
@@ -54,6 +55,7 @@ from .scheduler import (
54
55
  ScheduledRuntime,
55
56
  create_scheduled_runtime,
56
57
  )
58
+ from .memory import ActiveContextPolicy, TimeRange
57
59
 
58
60
  __all__ = [
59
61
  # Core models
@@ -81,6 +83,8 @@ __all__ = [
81
83
  "JsonlLedgerStore",
82
84
  "HashChainedLedgerStore",
83
85
  "verify_ledger_chain",
86
+ "ObservableLedgerStore",
87
+ "ObservableLedgerStoreProtocol",
84
88
  "Snapshot",
85
89
  "SnapshotStore",
86
90
  "InMemorySnapshotStore",
@@ -104,7 +108,8 @@ __all__ = [
104
108
  "RetryPolicy",
105
109
  "NoRetryPolicy",
106
110
  "compute_idempotency_key",
111
+ # Memory
112
+ "ActiveContextPolicy",
113
+ "TimeRange",
107
114
  ]
108
115
 
109
-
110
-
@@ -1,19 +1,26 @@
1
1
  """Core runtime primitives."""
2
2
 
3
- from .models import Effect, EffectType, RunState, RunStatus, StepPlan, WaitReason, WaitState
3
+ from .config import RuntimeConfig
4
+ from .models import Effect, EffectType, LimitWarning, RunState, RunStatus, StepPlan, WaitReason, WaitState
4
5
  from .runtime import Runtime
5
6
  from .spec import WorkflowSpec
7
+ from .vars import LIMITS, ensure_limits, get_limits
6
8
 
7
9
  __all__ = [
8
10
  "Effect",
9
11
  "EffectType",
12
+ "LimitWarning",
13
+ "LIMITS",
10
14
  "RunState",
11
15
  "RunStatus",
16
+ "Runtime",
17
+ "RuntimeConfig",
12
18
  "StepPlan",
13
19
  "WaitReason",
14
20
  "WaitState",
15
21
  "WorkflowSpec",
16
- "Runtime",
22
+ "ensure_limits",
23
+ "get_limits",
17
24
  ]
18
25
 
19
26
 
@@ -0,0 +1,114 @@
1
+ """abstractruntime.core.config
2
+
3
+ Runtime configuration for resource limits and model capabilities.
4
+
5
+ This module provides a RuntimeConfig dataclass that centralizes configuration
6
+ for runtime resource limits (iterations, tokens, history) and model capabilities.
7
+ The config is used to initialize the `_limits` namespace in RunState.vars.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from dataclasses import dataclass, field
13
+ from typing import Any, Dict, Optional
14
+
15
+
16
+ @dataclass(frozen=True)
17
+ class RuntimeConfig:
18
+ """Configuration for runtime resource limits and model capabilities.
19
+
20
+ This configuration is used by the Runtime to:
21
+ 1. Initialize the `_limits` namespace in RunState.vars when starting a run
22
+ 2. Provide model capability information for resource tracking
23
+ 3. Configure warning thresholds for proactive notifications
24
+
25
+ Attributes:
26
+ max_iterations: Maximum number of reasoning iterations (default: 25)
27
+ warn_iterations_pct: Percentage threshold for iteration warnings (default: 80)
28
+ max_tokens: Maximum context window tokens (None = use model capabilities)
29
+ max_output_tokens: Maximum tokens for LLM response (None = provider default)
30
+ warn_tokens_pct: Percentage threshold for token warnings (default: 80)
31
+ max_history_messages: Maximum conversation history messages (-1 = unlimited)
32
+ provider: Default provider id for this Runtime (best-effort; used for run metadata)
33
+ model: Default model id for this Runtime (best-effort; used for run metadata)
34
+ model_capabilities: Dict of model capabilities from LLM provider
35
+
36
+ Example:
37
+ >>> config = RuntimeConfig(max_iterations=50, max_tokens=65536)
38
+ >>> limits = config.to_limits_dict()
39
+ >>> limits["max_iterations"]
40
+ 50
41
+ """
42
+
43
+ # Iteration control
44
+ max_iterations: int = 25
45
+ warn_iterations_pct: int = 80
46
+
47
+ # Token/context window management
48
+ max_tokens: Optional[int] = None # None = query from model capabilities
49
+ max_output_tokens: Optional[int] = None # None = use provider default
50
+ warn_tokens_pct: int = 80
51
+
52
+ # History management
53
+ max_history_messages: int = -1 # -1 = unlimited (send all messages)
54
+
55
+ # Default routing metadata (optional; depends on how the Runtime was constructed)
56
+ provider: Optional[str] = None
57
+ model: Optional[str] = None
58
+
59
+ # Model capabilities (populated from LLM client)
60
+ model_capabilities: Dict[str, Any] = field(default_factory=dict)
61
+
62
+ def to_limits_dict(self) -> Dict[str, Any]:
63
+ """Convert to _limits namespace dict for RunState.vars.
64
+
65
+ Returns:
66
+ Dict with canonical limit values for storage in RunState.vars["_limits"].
67
+ Uses model_capabilities as fallback for max_tokens if not explicitly set.
68
+ """
69
+ max_output_tokens = self.max_output_tokens
70
+ if max_output_tokens is None:
71
+ # Best-effort: persist the provider/model default so agent logic can reason about
72
+ # output-size constraints (e.g., chunk large tool arguments like file contents).
73
+ max_output_tokens = self.model_capabilities.get("max_output_tokens")
74
+ return {
75
+ # Iteration control
76
+ "max_iterations": self.max_iterations,
77
+ "current_iteration": 0,
78
+
79
+ # Token management
80
+ "max_tokens": self.max_tokens or self.model_capabilities.get("max_tokens", 32768),
81
+ "max_output_tokens": max_output_tokens,
82
+ "estimated_tokens_used": 0,
83
+
84
+ # History management
85
+ "max_history_messages": self.max_history_messages,
86
+
87
+ # Warning thresholds
88
+ "warn_iterations_pct": self.warn_iterations_pct,
89
+ "warn_tokens_pct": self.warn_tokens_pct,
90
+ }
91
+
92
+ def with_capabilities(self, capabilities: Dict[str, Any]) -> "RuntimeConfig":
93
+ """Create a new RuntimeConfig with updated model capabilities.
94
+
95
+ This is useful for merging model capabilities from an LLM client
96
+ into an existing configuration.
97
+
98
+ Args:
99
+ capabilities: Dict of model capabilities (e.g., from get_model_capabilities())
100
+
101
+ Returns:
102
+ New RuntimeConfig with merged capabilities
103
+ """
104
+ return RuntimeConfig(
105
+ max_iterations=self.max_iterations,
106
+ warn_iterations_pct=self.warn_iterations_pct,
107
+ max_tokens=self.max_tokens,
108
+ max_output_tokens=self.max_output_tokens,
109
+ warn_tokens_pct=self.warn_tokens_pct,
110
+ max_history_messages=self.max_history_messages,
111
+ provider=self.provider,
112
+ model=self.model,
113
+ model_capabilities=capabilities,
114
+ )
@@ -0,0 +1,62 @@
1
+ """abstractruntime.core.event_keys
2
+
3
+ Durable event key conventions.
4
+
5
+ Why this exists:
6
+ - `WAIT_EVENT` needs a stable `wait_key` that external hosts can compute.
7
+ - Visual editors and other hosts (AbstractCode, servers) must agree on the same
8
+ key format without importing UI-specific code.
9
+
10
+ We keep this module dependency-light (stdlib only).
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from typing import Optional
16
+
17
+
18
+ def build_event_wait_key(
19
+ *,
20
+ scope: str,
21
+ name: str,
22
+ session_id: Optional[str] = None,
23
+ workflow_id: Optional[str] = None,
24
+ run_id: Optional[str] = None,
25
+ ) -> str:
26
+ """Build a durable wait_key for event-driven workflows.
27
+
28
+ Format:
29
+ evt:{scope}:{scope_id}:{name}
30
+
31
+ Scopes:
32
+ - session: `scope_id` is the workflow instance/session identifier (recommended default)
33
+ - workflow: `scope_id` is the workflow_id
34
+ - run: `scope_id` is the run_id
35
+ - global: `scope_id` is the literal string "global"
36
+ """
37
+ scope_norm = str(scope or "session").strip().lower()
38
+ name_norm = str(name or "").strip()
39
+ if not name_norm:
40
+ raise ValueError("event name is required")
41
+
42
+ scope_id: Optional[str]
43
+ if scope_norm == "session":
44
+ scope_id = str(session_id or "").strip() if session_id is not None else ""
45
+ elif scope_norm == "workflow":
46
+ scope_id = str(workflow_id or "").strip() if workflow_id is not None else ""
47
+ elif scope_norm == "run":
48
+ scope_id = str(run_id or "").strip() if run_id is not None else ""
49
+ elif scope_norm == "global":
50
+ scope_id = "global"
51
+ else:
52
+ raise ValueError(f"unknown event scope: {scope!r}")
53
+
54
+ if not scope_id:
55
+ raise ValueError(f"missing scope id for scope={scope_norm!r}")
56
+
57
+ return f"evt:{scope_norm}:{scope_id}:{name_norm}"
58
+
59
+
60
+
61
+
62
+
@@ -46,10 +46,22 @@ class EffectType(str, Enum):
46
46
  WAIT_EVENT = "wait_event"
47
47
  WAIT_UNTIL = "wait_until"
48
48
  ASK_USER = "ask_user"
49
+ ANSWER_USER = "answer_user"
50
+
51
+ # Eventing
52
+ EMIT_EVENT = "emit_event"
49
53
 
50
54
  # Integrations (implemented via pluggable handlers)
51
55
  LLM_CALL = "llm_call"
52
56
  TOOL_CALLS = "tool_calls"
57
+ MEMORY_QUERY = "memory_query"
58
+ MEMORY_TAG = "memory_tag"
59
+ MEMORY_COMPACT = "memory_compact"
60
+ MEMORY_NOTE = "memory_note"
61
+ MEMORY_REHYDRATE = "memory_rehydrate"
62
+
63
+ # Debug / inspection (schema-only tools -> runtime effects)
64
+ VARS_QUERY = "vars_query"
53
65
 
54
66
  # Composition
55
67
  START_SUBWORKFLOW = "start_subworkflow"
@@ -130,10 +142,20 @@ class RunState:
130
142
 
131
143
  # Optional provenance fields
132
144
  actor_id: Optional[str] = None
145
+ session_id: Optional[str] = None
133
146
  parent_run_id: Optional[str] = None # For subworkflow tracking
134
147
 
135
148
  @classmethod
136
- def new(cls, *, workflow_id: str, entry_node: str, actor_id: Optional[str] = None, vars: Optional[Dict[str, Any]] = None, parent_run_id: Optional[str] = None) -> "RunState":
149
+ def new(
150
+ cls,
151
+ *,
152
+ workflow_id: str,
153
+ entry_node: str,
154
+ actor_id: Optional[str] = None,
155
+ session_id: Optional[str] = None,
156
+ vars: Optional[Dict[str, Any]] = None,
157
+ parent_run_id: Optional[str] = None,
158
+ ) -> "RunState":
137
159
  return cls(
138
160
  run_id=str(uuid.uuid4()),
139
161
  workflow_id=workflow_id,
@@ -141,6 +163,7 @@ class RunState:
141
163
  current_node=entry_node,
142
164
  vars=vars or {},
143
165
  actor_id=actor_id,
166
+ session_id=session_id,
144
167
  parent_run_id=parent_run_id,
145
168
  )
146
169
 
@@ -170,6 +193,7 @@ class StepRecord:
170
193
 
171
194
  # Optional provenance/integrity
172
195
  actor_id: Optional[str] = None
196
+ session_id: Optional[str] = None
173
197
 
174
198
  # Retry and idempotency fields
175
199
  attempt: int = 1 # Current attempt number (1-indexed)
@@ -201,6 +225,7 @@ class StepRecord:
201
225
  "result_key": effect.result_key,
202
226
  } if effect else None,
203
227
  actor_id=run.actor_id,
228
+ session_id=getattr(run, "session_id", None),
204
229
  attempt=attempt,
205
230
  idempotency_key=idempotency_key,
206
231
  )
@@ -237,3 +262,32 @@ class StepRecord:
237
262
  return self
238
263
 
239
264
 
265
+ @dataclass
266
+ class LimitWarning:
267
+ """Warning about approaching or exceeding a runtime limit.
268
+
269
+ Generated by Runtime.check_limits() to proactively notify about
270
+ resource constraints before they cause failures.
271
+
272
+ Attributes:
273
+ limit_type: Type of limit ("iterations", "tokens", "history")
274
+ status: Warning status ("warning" at threshold, "exceeded" at limit)
275
+ current: Current value of the resource
276
+ maximum: Maximum allowed value
277
+ pct: Percentage of limit used (computed in __post_init__)
278
+
279
+ Example:
280
+ >>> warning = LimitWarning("iterations", "warning", 20, 25)
281
+ >>> warning.pct
282
+ 80.0
283
+ """
284
+
285
+ limit_type: str # "iterations", "tokens", "history"
286
+ status: str # "warning", "exceeded"
287
+ current: int
288
+ maximum: int
289
+ pct: float = 0.0
290
+
291
+ def __post_init__(self) -> None:
292
+ if self.maximum > 0:
293
+ self.pct = round(self.current / self.maximum * 100, 1)