AbstractRuntime 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +7 -2
- abstractruntime/core/config.py +14 -1
- abstractruntime/core/event_keys.py +62 -0
- abstractruntime/core/models.py +12 -1
- abstractruntime/core/runtime.py +2444 -14
- abstractruntime/core/vars.py +95 -0
- abstractruntime/evidence/__init__.py +10 -0
- abstractruntime/evidence/recorder.py +325 -0
- abstractruntime/integrations/abstractcore/__init__.py +3 -0
- abstractruntime/integrations/abstractcore/constants.py +19 -0
- abstractruntime/integrations/abstractcore/default_tools.py +134 -0
- abstractruntime/integrations/abstractcore/effect_handlers.py +255 -6
- abstractruntime/integrations/abstractcore/factory.py +95 -10
- abstractruntime/integrations/abstractcore/llm_client.py +456 -52
- abstractruntime/integrations/abstractcore/mcp_worker.py +586 -0
- abstractruntime/integrations/abstractcore/observability.py +80 -0
- abstractruntime/integrations/abstractcore/summarizer.py +154 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +481 -24
- abstractruntime/memory/__init__.py +21 -0
- abstractruntime/memory/active_context.py +746 -0
- abstractruntime/memory/active_memory.py +452 -0
- abstractruntime/memory/compaction.py +105 -0
- abstractruntime/rendering/__init__.py +17 -0
- abstractruntime/rendering/agent_trace_report.py +256 -0
- abstractruntime/rendering/json_stringify.py +136 -0
- abstractruntime/scheduler/scheduler.py +93 -2
- abstractruntime/storage/__init__.py +3 -1
- abstractruntime/storage/artifacts.py +20 -5
- abstractruntime/storage/json_files.py +15 -2
- abstractruntime/storage/observable.py +99 -0
- {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.0.dist-info}/METADATA +5 -1
- abstractruntime-0.4.0.dist-info/RECORD +49 -0
- abstractruntime-0.4.0.dist-info/entry_points.txt +2 -0
- abstractruntime-0.2.0.dist-info/RECORD +0 -32
- {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.0.dist-info}/WHEEL +0 -0
- {abstractruntime-0.2.0.dist-info → abstractruntime-0.4.0.dist-info}/licenses/LICENSE +0 -0
abstractruntime/__init__.py
CHANGED
|
@@ -33,6 +33,7 @@ from .storage.base import QueryableRunStore
|
|
|
33
33
|
from .storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
|
|
34
34
|
from .storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
35
35
|
from .storage.ledger_chain import HashChainedLedgerStore, verify_ledger_chain
|
|
36
|
+
from .storage.observable import ObservableLedgerStore, ObservableLedgerStoreProtocol
|
|
36
37
|
from .storage.snapshots import Snapshot, SnapshotStore, InMemorySnapshotStore, JsonSnapshotStore
|
|
37
38
|
from .storage.artifacts import (
|
|
38
39
|
Artifact,
|
|
@@ -54,6 +55,7 @@ from .scheduler import (
|
|
|
54
55
|
ScheduledRuntime,
|
|
55
56
|
create_scheduled_runtime,
|
|
56
57
|
)
|
|
58
|
+
from .memory import ActiveContextPolicy, TimeRange
|
|
57
59
|
|
|
58
60
|
__all__ = [
|
|
59
61
|
# Core models
|
|
@@ -81,6 +83,8 @@ __all__ = [
|
|
|
81
83
|
"JsonlLedgerStore",
|
|
82
84
|
"HashChainedLedgerStore",
|
|
83
85
|
"verify_ledger_chain",
|
|
86
|
+
"ObservableLedgerStore",
|
|
87
|
+
"ObservableLedgerStoreProtocol",
|
|
84
88
|
"Snapshot",
|
|
85
89
|
"SnapshotStore",
|
|
86
90
|
"InMemorySnapshotStore",
|
|
@@ -104,7 +108,8 @@ __all__ = [
|
|
|
104
108
|
"RetryPolicy",
|
|
105
109
|
"NoRetryPolicy",
|
|
106
110
|
"compute_idempotency_key",
|
|
111
|
+
# Memory
|
|
112
|
+
"ActiveContextPolicy",
|
|
113
|
+
"TimeRange",
|
|
107
114
|
]
|
|
108
115
|
|
|
109
|
-
|
|
110
|
-
|
abstractruntime/core/config.py
CHANGED
|
@@ -29,6 +29,8 @@ class RuntimeConfig:
|
|
|
29
29
|
max_output_tokens: Maximum tokens for LLM response (None = provider default)
|
|
30
30
|
warn_tokens_pct: Percentage threshold for token warnings (default: 80)
|
|
31
31
|
max_history_messages: Maximum conversation history messages (-1 = unlimited)
|
|
32
|
+
provider: Default provider id for this Runtime (best-effort; used for run metadata)
|
|
33
|
+
model: Default model id for this Runtime (best-effort; used for run metadata)
|
|
32
34
|
model_capabilities: Dict of model capabilities from LLM provider
|
|
33
35
|
|
|
34
36
|
Example:
|
|
@@ -50,6 +52,10 @@ class RuntimeConfig:
|
|
|
50
52
|
# History management
|
|
51
53
|
max_history_messages: int = -1 # -1 = unlimited (send all messages)
|
|
52
54
|
|
|
55
|
+
# Default routing metadata (optional; depends on how the Runtime was constructed)
|
|
56
|
+
provider: Optional[str] = None
|
|
57
|
+
model: Optional[str] = None
|
|
58
|
+
|
|
53
59
|
# Model capabilities (populated from LLM client)
|
|
54
60
|
model_capabilities: Dict[str, Any] = field(default_factory=dict)
|
|
55
61
|
|
|
@@ -60,6 +66,11 @@ class RuntimeConfig:
|
|
|
60
66
|
Dict with canonical limit values for storage in RunState.vars["_limits"].
|
|
61
67
|
Uses model_capabilities as fallback for max_tokens if not explicitly set.
|
|
62
68
|
"""
|
|
69
|
+
max_output_tokens = self.max_output_tokens
|
|
70
|
+
if max_output_tokens is None:
|
|
71
|
+
# Best-effort: persist the provider/model default so agent logic can reason about
|
|
72
|
+
# output-size constraints (e.g., chunk large tool arguments like file contents).
|
|
73
|
+
max_output_tokens = self.model_capabilities.get("max_output_tokens")
|
|
63
74
|
return {
|
|
64
75
|
# Iteration control
|
|
65
76
|
"max_iterations": self.max_iterations,
|
|
@@ -67,7 +78,7 @@ class RuntimeConfig:
|
|
|
67
78
|
|
|
68
79
|
# Token management
|
|
69
80
|
"max_tokens": self.max_tokens or self.model_capabilities.get("max_tokens", 32768),
|
|
70
|
-
"max_output_tokens":
|
|
81
|
+
"max_output_tokens": max_output_tokens,
|
|
71
82
|
"estimated_tokens_used": 0,
|
|
72
83
|
|
|
73
84
|
# History management
|
|
@@ -97,5 +108,7 @@ class RuntimeConfig:
|
|
|
97
108
|
max_output_tokens=self.max_output_tokens,
|
|
98
109
|
warn_tokens_pct=self.warn_tokens_pct,
|
|
99
110
|
max_history_messages=self.max_history_messages,
|
|
111
|
+
provider=self.provider,
|
|
112
|
+
model=self.model,
|
|
100
113
|
model_capabilities=capabilities,
|
|
101
114
|
)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""abstractruntime.core.event_keys
|
|
2
|
+
|
|
3
|
+
Durable event key conventions.
|
|
4
|
+
|
|
5
|
+
Why this exists:
|
|
6
|
+
- `WAIT_EVENT` needs a stable `wait_key` that external hosts can compute.
|
|
7
|
+
- Visual editors and other hosts (AbstractCode, servers) must agree on the same
|
|
8
|
+
key format without importing UI-specific code.
|
|
9
|
+
|
|
10
|
+
We keep this module dependency-light (stdlib only).
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
from typing import Optional
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def build_event_wait_key(
|
|
19
|
+
*,
|
|
20
|
+
scope: str,
|
|
21
|
+
name: str,
|
|
22
|
+
session_id: Optional[str] = None,
|
|
23
|
+
workflow_id: Optional[str] = None,
|
|
24
|
+
run_id: Optional[str] = None,
|
|
25
|
+
) -> str:
|
|
26
|
+
"""Build a durable wait_key for event-driven workflows.
|
|
27
|
+
|
|
28
|
+
Format:
|
|
29
|
+
evt:{scope}:{scope_id}:{name}
|
|
30
|
+
|
|
31
|
+
Scopes:
|
|
32
|
+
- session: `scope_id` is the workflow instance/session identifier (recommended default)
|
|
33
|
+
- workflow: `scope_id` is the workflow_id
|
|
34
|
+
- run: `scope_id` is the run_id
|
|
35
|
+
- global: `scope_id` is the literal string "global"
|
|
36
|
+
"""
|
|
37
|
+
scope_norm = str(scope or "session").strip().lower()
|
|
38
|
+
name_norm = str(name or "").strip()
|
|
39
|
+
if not name_norm:
|
|
40
|
+
raise ValueError("event name is required")
|
|
41
|
+
|
|
42
|
+
scope_id: Optional[str]
|
|
43
|
+
if scope_norm == "session":
|
|
44
|
+
scope_id = str(session_id or "").strip() if session_id is not None else ""
|
|
45
|
+
elif scope_norm == "workflow":
|
|
46
|
+
scope_id = str(workflow_id or "").strip() if workflow_id is not None else ""
|
|
47
|
+
elif scope_norm == "run":
|
|
48
|
+
scope_id = str(run_id or "").strip() if run_id is not None else ""
|
|
49
|
+
elif scope_norm == "global":
|
|
50
|
+
scope_id = "global"
|
|
51
|
+
else:
|
|
52
|
+
raise ValueError(f"unknown event scope: {scope!r}")
|
|
53
|
+
|
|
54
|
+
if not scope_id:
|
|
55
|
+
raise ValueError(f"missing scope id for scope={scope_norm!r}")
|
|
56
|
+
|
|
57
|
+
return f"evt:{scope_norm}:{scope_id}:{name_norm}"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
|
abstractruntime/core/models.py
CHANGED
|
@@ -46,10 +46,22 @@ class EffectType(str, Enum):
|
|
|
46
46
|
WAIT_EVENT = "wait_event"
|
|
47
47
|
WAIT_UNTIL = "wait_until"
|
|
48
48
|
ASK_USER = "ask_user"
|
|
49
|
+
ANSWER_USER = "answer_user"
|
|
50
|
+
|
|
51
|
+
# Eventing
|
|
52
|
+
EMIT_EVENT = "emit_event"
|
|
49
53
|
|
|
50
54
|
# Integrations (implemented via pluggable handlers)
|
|
51
55
|
LLM_CALL = "llm_call"
|
|
52
56
|
TOOL_CALLS = "tool_calls"
|
|
57
|
+
MEMORY_QUERY = "memory_query"
|
|
58
|
+
MEMORY_TAG = "memory_tag"
|
|
59
|
+
MEMORY_COMPACT = "memory_compact"
|
|
60
|
+
MEMORY_NOTE = "memory_note"
|
|
61
|
+
MEMORY_REHYDRATE = "memory_rehydrate"
|
|
62
|
+
|
|
63
|
+
# Debug / inspection (schema-only tools -> runtime effects)
|
|
64
|
+
VARS_QUERY = "vars_query"
|
|
53
65
|
|
|
54
66
|
# Composition
|
|
55
67
|
START_SUBWORKFLOW = "start_subworkflow"
|
|
@@ -279,4 +291,3 @@ class LimitWarning:
|
|
|
279
291
|
def __post_init__(self) -> None:
|
|
280
292
|
if self.maximum > 0:
|
|
281
293
|
self.pct = round(self.current / self.maximum * 100, 1)
|
|
282
|
-
|