AbstractRuntime 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +76 -1
- abstractruntime/core/config.py +68 -1
- abstractruntime/core/models.py +5 -0
- abstractruntime/core/policy.py +74 -3
- abstractruntime/core/runtime.py +1002 -126
- abstractruntime/core/vars.py +8 -2
- abstractruntime/evidence/recorder.py +1 -1
- abstractruntime/history_bundle.py +772 -0
- abstractruntime/integrations/abstractcore/__init__.py +3 -0
- abstractruntime/integrations/abstractcore/default_tools.py +127 -3
- abstractruntime/integrations/abstractcore/effect_handlers.py +2440 -99
- abstractruntime/integrations/abstractcore/embeddings_client.py +69 -0
- abstractruntime/integrations/abstractcore/factory.py +68 -20
- abstractruntime/integrations/abstractcore/llm_client.py +447 -15
- abstractruntime/integrations/abstractcore/mcp_worker.py +1 -0
- abstractruntime/integrations/abstractcore/session_attachments.py +946 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +31 -10
- abstractruntime/integrations/abstractcore/workspace_scoped_tools.py +561 -0
- abstractruntime/integrations/abstractmemory/__init__.py +3 -0
- abstractruntime/integrations/abstractmemory/effect_handlers.py +946 -0
- abstractruntime/memory/active_context.py +6 -1
- abstractruntime/memory/kg_packets.py +164 -0
- abstractruntime/memory/memact_composer.py +175 -0
- abstractruntime/memory/recall_levels.py +163 -0
- abstractruntime/memory/token_budget.py +86 -0
- abstractruntime/storage/__init__.py +4 -1
- abstractruntime/storage/artifacts.py +158 -30
- abstractruntime/storage/base.py +17 -1
- abstractruntime/storage/commands.py +339 -0
- abstractruntime/storage/in_memory.py +41 -1
- abstractruntime/storage/json_files.py +195 -12
- abstractruntime/storage/observable.py +38 -1
- abstractruntime/storage/offloading.py +433 -0
- abstractruntime/storage/sqlite.py +836 -0
- abstractruntime/visualflow_compiler/__init__.py +29 -0
- abstractruntime/visualflow_compiler/adapters/__init__.py +11 -0
- abstractruntime/visualflow_compiler/adapters/agent_adapter.py +126 -0
- abstractruntime/visualflow_compiler/adapters/context_adapter.py +109 -0
- abstractruntime/visualflow_compiler/adapters/control_adapter.py +615 -0
- abstractruntime/visualflow_compiler/adapters/effect_adapter.py +1051 -0
- abstractruntime/visualflow_compiler/adapters/event_adapter.py +307 -0
- abstractruntime/visualflow_compiler/adapters/function_adapter.py +97 -0
- abstractruntime/visualflow_compiler/adapters/memact_adapter.py +114 -0
- abstractruntime/visualflow_compiler/adapters/subflow_adapter.py +74 -0
- abstractruntime/visualflow_compiler/adapters/variable_adapter.py +316 -0
- abstractruntime/visualflow_compiler/compiler.py +3832 -0
- abstractruntime/visualflow_compiler/flow.py +247 -0
- abstractruntime/visualflow_compiler/visual/__init__.py +13 -0
- abstractruntime/visualflow_compiler/visual/agent_ids.py +29 -0
- abstractruntime/visualflow_compiler/visual/builtins.py +1376 -0
- abstractruntime/visualflow_compiler/visual/code_executor.py +214 -0
- abstractruntime/visualflow_compiler/visual/executor.py +2804 -0
- abstractruntime/visualflow_compiler/visual/models.py +211 -0
- abstractruntime/workflow_bundle/__init__.py +52 -0
- abstractruntime/workflow_bundle/models.py +236 -0
- abstractruntime/workflow_bundle/packer.py +317 -0
- abstractruntime/workflow_bundle/reader.py +87 -0
- abstractruntime/workflow_bundle/registry.py +587 -0
- abstractruntime-0.4.1.dist-info/METADATA +177 -0
- abstractruntime-0.4.1.dist-info/RECORD +86 -0
- abstractruntime-0.4.0.dist-info/METADATA +0 -167
- abstractruntime-0.4.0.dist-info/RECORD +0 -49
- {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/WHEEL +0 -0
- {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/entry_points.txt +0 -0
- {abstractruntime-0.4.0.dist-info → abstractruntime-0.4.1.dist-info}/licenses/LICENSE +0 -0
abstractruntime/__init__.py
CHANGED
|
@@ -32,9 +32,27 @@ from .core.policy import (
|
|
|
32
32
|
from .storage.base import QueryableRunStore
|
|
33
33
|
from .storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
|
|
34
34
|
from .storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
35
|
+
from .storage.sqlite import (
|
|
36
|
+
SqliteCommandCursorStore,
|
|
37
|
+
SqliteCommandStore,
|
|
38
|
+
SqliteDatabase,
|
|
39
|
+
SqliteLedgerStore,
|
|
40
|
+
SqliteRunStore,
|
|
41
|
+
)
|
|
42
|
+
from .storage.commands import (
|
|
43
|
+
CommandAppendResult,
|
|
44
|
+
CommandCursorStore,
|
|
45
|
+
CommandRecord,
|
|
46
|
+
CommandStore,
|
|
47
|
+
InMemoryCommandCursorStore,
|
|
48
|
+
InMemoryCommandStore,
|
|
49
|
+
JsonFileCommandCursorStore,
|
|
50
|
+
JsonlCommandStore,
|
|
51
|
+
)
|
|
35
52
|
from .storage.ledger_chain import HashChainedLedgerStore, verify_ledger_chain
|
|
36
53
|
from .storage.observable import ObservableLedgerStore, ObservableLedgerStoreProtocol
|
|
37
54
|
from .storage.snapshots import Snapshot, SnapshotStore, InMemorySnapshotStore, JsonSnapshotStore
|
|
55
|
+
from .storage.offloading import OffloadingLedgerStore, OffloadingRunStore, offload_large_values
|
|
38
56
|
from .storage.artifacts import (
|
|
39
57
|
Artifact,
|
|
40
58
|
ArtifactMetadata,
|
|
@@ -56,6 +74,28 @@ from .scheduler import (
|
|
|
56
74
|
create_scheduled_runtime,
|
|
57
75
|
)
|
|
58
76
|
from .memory import ActiveContextPolicy, TimeRange
|
|
77
|
+
from .workflow_bundle import (
|
|
78
|
+
WORKFLOW_BUNDLE_FORMAT_VERSION_V1,
|
|
79
|
+
InstalledWorkflowBundle,
|
|
80
|
+
WorkflowBundle,
|
|
81
|
+
WorkflowBundleEntrypoint,
|
|
82
|
+
WorkflowBundleError,
|
|
83
|
+
WorkflowBundleManifest,
|
|
84
|
+
WorkflowBundleRegistry,
|
|
85
|
+
WorkflowBundleRegistryError,
|
|
86
|
+
WorkflowEntrypointRef,
|
|
87
|
+
default_workflow_bundles_dir,
|
|
88
|
+
open_workflow_bundle,
|
|
89
|
+
sanitize_bundle_id,
|
|
90
|
+
sanitize_bundle_version,
|
|
91
|
+
workflow_bundle_manifest_from_dict,
|
|
92
|
+
workflow_bundle_manifest_to_dict,
|
|
93
|
+
)
|
|
94
|
+
from .history_bundle import (
|
|
95
|
+
RUN_HISTORY_BUNDLE_VERSION_V1,
|
|
96
|
+
export_run_history_bundle,
|
|
97
|
+
persist_workflow_snapshot,
|
|
98
|
+
)
|
|
59
99
|
|
|
60
100
|
__all__ = [
|
|
61
101
|
# Core models
|
|
@@ -81,10 +121,26 @@ __all__ = [
|
|
|
81
121
|
"InMemoryLedgerStore",
|
|
82
122
|
"JsonFileRunStore",
|
|
83
123
|
"JsonlLedgerStore",
|
|
124
|
+
"SqliteDatabase",
|
|
125
|
+
"SqliteRunStore",
|
|
126
|
+
"SqliteLedgerStore",
|
|
127
|
+
"CommandRecord",
|
|
128
|
+
"CommandAppendResult",
|
|
129
|
+
"CommandStore",
|
|
130
|
+
"CommandCursorStore",
|
|
131
|
+
"InMemoryCommandStore",
|
|
132
|
+
"JsonlCommandStore",
|
|
133
|
+
"InMemoryCommandCursorStore",
|
|
134
|
+
"JsonFileCommandCursorStore",
|
|
135
|
+
"SqliteCommandStore",
|
|
136
|
+
"SqliteCommandCursorStore",
|
|
84
137
|
"HashChainedLedgerStore",
|
|
85
138
|
"verify_ledger_chain",
|
|
86
139
|
"ObservableLedgerStore",
|
|
87
140
|
"ObservableLedgerStoreProtocol",
|
|
141
|
+
"OffloadingRunStore",
|
|
142
|
+
"OffloadingLedgerStore",
|
|
143
|
+
"offload_large_values",
|
|
88
144
|
"Snapshot",
|
|
89
145
|
"SnapshotStore",
|
|
90
146
|
"InMemorySnapshotStore",
|
|
@@ -111,5 +167,24 @@ __all__ = [
|
|
|
111
167
|
# Memory
|
|
112
168
|
"ActiveContextPolicy",
|
|
113
169
|
"TimeRange",
|
|
170
|
+
# WorkflowBundles (portable distribution unit)
|
|
171
|
+
"WORKFLOW_BUNDLE_FORMAT_VERSION_V1",
|
|
172
|
+
"WorkflowBundleError",
|
|
173
|
+
"WorkflowBundleEntrypoint",
|
|
174
|
+
"WorkflowBundleManifest",
|
|
175
|
+
"WorkflowBundle",
|
|
176
|
+
"InstalledWorkflowBundle",
|
|
177
|
+
"WorkflowBundleRegistry",
|
|
178
|
+
"WorkflowBundleRegistryError",
|
|
179
|
+
"WorkflowEntrypointRef",
|
|
180
|
+
"default_workflow_bundles_dir",
|
|
181
|
+
"sanitize_bundle_id",
|
|
182
|
+
"sanitize_bundle_version",
|
|
183
|
+
"workflow_bundle_manifest_from_dict",
|
|
184
|
+
"workflow_bundle_manifest_to_dict",
|
|
185
|
+
"open_workflow_bundle",
|
|
186
|
+
# Run history bundle (portable replay)
|
|
187
|
+
"RUN_HISTORY_BUNDLE_VERSION_V1",
|
|
188
|
+
"export_run_history_bundle",
|
|
189
|
+
"persist_workflow_snapshot",
|
|
114
190
|
]
|
|
115
|
-
|
abstractruntime/core/config.py
CHANGED
|
@@ -12,6 +12,13 @@ from __future__ import annotations
|
|
|
12
12
|
from dataclasses import dataclass, field
|
|
13
13
|
from typing import Any, Dict, Optional
|
|
14
14
|
|
|
15
|
+
from .vars import DEFAULT_MAX_TOKENS
|
|
16
|
+
|
|
17
|
+
# Truncation policy: keep mechanisms, but default to disabled.
|
|
18
|
+
# A positive value enables a conservative auto-cap for `max_input_tokens` when callers do not
|
|
19
|
+
# explicitly set an input budget. `-1` disables this cap (no automatic truncation).
|
|
20
|
+
DEFAULT_RECOMMENDED_MAX_INPUT_TOKENS = -1
|
|
21
|
+
|
|
15
22
|
|
|
16
23
|
@dataclass(frozen=True)
|
|
17
24
|
class RuntimeConfig:
|
|
@@ -47,6 +54,7 @@ class RuntimeConfig:
|
|
|
47
54
|
# Token/context window management
|
|
48
55
|
max_tokens: Optional[int] = None # None = query from model capabilities
|
|
49
56
|
max_output_tokens: Optional[int] = None # None = use provider default
|
|
57
|
+
max_input_tokens: Optional[int] = None # None = auto-calculate from max_tokens/max_output_tokens
|
|
50
58
|
warn_tokens_pct: int = 80
|
|
51
59
|
|
|
52
60
|
# History management
|
|
@@ -66,19 +74,77 @@ class RuntimeConfig:
|
|
|
66
74
|
Dict with canonical limit values for storage in RunState.vars["_limits"].
|
|
67
75
|
Uses model_capabilities as fallback for max_tokens if not explicitly set.
|
|
68
76
|
"""
|
|
77
|
+
max_tokens = self.max_tokens
|
|
78
|
+
if max_tokens is None:
|
|
79
|
+
max_tokens = self.model_capabilities.get("max_tokens")
|
|
80
|
+
if max_tokens is None:
|
|
81
|
+
max_tokens = DEFAULT_MAX_TOKENS
|
|
82
|
+
|
|
69
83
|
max_output_tokens = self.max_output_tokens
|
|
70
84
|
if max_output_tokens is None:
|
|
71
85
|
# Best-effort: persist the provider/model default so agent logic can reason about
|
|
72
86
|
# output-size constraints (e.g., chunk large tool arguments like file contents).
|
|
73
87
|
max_output_tokens = self.model_capabilities.get("max_output_tokens")
|
|
88
|
+
# If capabilities are unavailable and max_output_tokens is unset, keep it as None
|
|
89
|
+
# (meaning: provider default). Do not force a conservative output cap here.
|
|
90
|
+
|
|
91
|
+
# ADR-0008 alignment:
|
|
92
|
+
# - max_tokens: total context window size
|
|
93
|
+
# - max_output_tokens: output budget
|
|
94
|
+
# - max_input_tokens: explicit or derived input budget (may be smaller than max_tokens-max_output_tokens)
|
|
95
|
+
#
|
|
96
|
+
# Constraint: max_input_tokens + max_output_tokens + delta <= max_tokens
|
|
97
|
+
delta = 256
|
|
98
|
+
effective_max_input_tokens = self.max_input_tokens
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
max_tokens_int = int(max_tokens) if max_tokens is not None else None
|
|
102
|
+
except Exception:
|
|
103
|
+
max_tokens_int = None
|
|
104
|
+
try:
|
|
105
|
+
max_output_int = int(max_output_tokens) if max_output_tokens is not None else None
|
|
106
|
+
except Exception:
|
|
107
|
+
max_output_int = None
|
|
108
|
+
|
|
109
|
+
if (
|
|
110
|
+
max_tokens_int is not None
|
|
111
|
+
and max_tokens_int > 0
|
|
112
|
+
and max_output_int is not None
|
|
113
|
+
and max_output_int >= 0
|
|
114
|
+
and effective_max_input_tokens is not None
|
|
115
|
+
):
|
|
116
|
+
# If callers explicitly set max_input_tokens, clamp it to the context-window constraint.
|
|
117
|
+
max_allowed_in = max(0, int(max_tokens_int) - int(max_output_int) - int(delta))
|
|
118
|
+
try:
|
|
119
|
+
effective_max_input_tokens = int(effective_max_input_tokens)
|
|
120
|
+
except Exception:
|
|
121
|
+
effective_max_input_tokens = max_allowed_in
|
|
122
|
+
if effective_max_input_tokens < 0:
|
|
123
|
+
effective_max_input_tokens = 0
|
|
124
|
+
if effective_max_input_tokens > max_allowed_in:
|
|
125
|
+
effective_max_input_tokens = max_allowed_in
|
|
126
|
+
|
|
127
|
+
# Optional conservative auto-cap (disabled by default with -1).
|
|
128
|
+
if (
|
|
129
|
+
self.max_input_tokens is None
|
|
130
|
+
and effective_max_input_tokens is not None
|
|
131
|
+
and isinstance(DEFAULT_RECOMMENDED_MAX_INPUT_TOKENS, int)
|
|
132
|
+
and DEFAULT_RECOMMENDED_MAX_INPUT_TOKENS > 0
|
|
133
|
+
):
|
|
134
|
+
try:
|
|
135
|
+
effective_max_input_tokens = min(int(effective_max_input_tokens), int(DEFAULT_RECOMMENDED_MAX_INPUT_TOKENS))
|
|
136
|
+
except Exception:
|
|
137
|
+
pass
|
|
138
|
+
|
|
74
139
|
return {
|
|
75
140
|
# Iteration control
|
|
76
141
|
"max_iterations": self.max_iterations,
|
|
77
142
|
"current_iteration": 0,
|
|
78
143
|
|
|
79
144
|
# Token management
|
|
80
|
-
"max_tokens":
|
|
145
|
+
"max_tokens": max_tokens,
|
|
81
146
|
"max_output_tokens": max_output_tokens,
|
|
147
|
+
"max_input_tokens": effective_max_input_tokens,
|
|
82
148
|
"estimated_tokens_used": 0,
|
|
83
149
|
|
|
84
150
|
# History management
|
|
@@ -106,6 +172,7 @@ class RuntimeConfig:
|
|
|
106
172
|
warn_iterations_pct=self.warn_iterations_pct,
|
|
107
173
|
max_tokens=self.max_tokens,
|
|
108
174
|
max_output_tokens=self.max_output_tokens,
|
|
175
|
+
max_input_tokens=self.max_input_tokens,
|
|
109
176
|
warn_tokens_pct=self.warn_tokens_pct,
|
|
110
177
|
max_history_messages=self.max_history_messages,
|
|
111
178
|
provider=self.provider,
|
abstractruntime/core/models.py
CHANGED
|
@@ -60,6 +60,11 @@ class EffectType(str, Enum):
|
|
|
60
60
|
MEMORY_NOTE = "memory_note"
|
|
61
61
|
MEMORY_REHYDRATE = "memory_rehydrate"
|
|
62
62
|
|
|
63
|
+
# Semantic / KG memory (host-provided handlers)
|
|
64
|
+
MEMORY_KG_ASSERT = "memory_kg_assert"
|
|
65
|
+
MEMORY_KG_QUERY = "memory_kg_query"
|
|
66
|
+
MEMORY_KG_RESOLVE = "memory_kg_resolve"
|
|
67
|
+
|
|
63
68
|
# Debug / inspection (schema-only tools -> runtime effects)
|
|
64
69
|
VARS_QUERY = "vars_query"
|
|
65
70
|
|
abstractruntime/core/policy.py
CHANGED
|
@@ -15,7 +15,76 @@ import json
|
|
|
15
15
|
from dataclasses import dataclass
|
|
16
16
|
from typing import Any, Dict, Optional, Protocol
|
|
17
17
|
|
|
18
|
-
from .models import Effect, RunState
|
|
18
|
+
from .models import Effect, EffectType, RunState
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _loads_dict_like(value: Any) -> Optional[Dict[str, Any]]:
|
|
22
|
+
if value is None:
|
|
23
|
+
return None
|
|
24
|
+
if isinstance(value, dict):
|
|
25
|
+
return dict(value)
|
|
26
|
+
if not isinstance(value, str):
|
|
27
|
+
return None
|
|
28
|
+
text = value.strip()
|
|
29
|
+
if not text:
|
|
30
|
+
return None
|
|
31
|
+
try:
|
|
32
|
+
parsed = json.loads(text)
|
|
33
|
+
except Exception:
|
|
34
|
+
return None
|
|
35
|
+
return parsed if isinstance(parsed, dict) else None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _normalize_tool_call_for_idempotency(value: Any) -> Any:
|
|
39
|
+
if not isinstance(value, dict):
|
|
40
|
+
return value
|
|
41
|
+
|
|
42
|
+
out = dict(value)
|
|
43
|
+
# Provider/model-emitted IDs are not semantic; remove them from the idempotency hash.
|
|
44
|
+
for k in ("call_id", "id", "runtime_call_id", "model_call_id", "idempotency_key"):
|
|
45
|
+
out.pop(k, None)
|
|
46
|
+
|
|
47
|
+
name = out.get("name")
|
|
48
|
+
if isinstance(name, str):
|
|
49
|
+
out["name"] = name.strip()
|
|
50
|
+
|
|
51
|
+
args = out.get("arguments")
|
|
52
|
+
if isinstance(args, str):
|
|
53
|
+
parsed = _loads_dict_like(args)
|
|
54
|
+
out["arguments"] = parsed if isinstance(parsed, dict) else {}
|
|
55
|
+
elif not isinstance(args, dict):
|
|
56
|
+
out["arguments"] = {}
|
|
57
|
+
|
|
58
|
+
func = out.get("function")
|
|
59
|
+
if isinstance(func, dict):
|
|
60
|
+
# Some callers pass OpenAI-style shapes; preserve semantics, but strip IDs.
|
|
61
|
+
out["function"] = _normalize_tool_call_for_idempotency(func)
|
|
62
|
+
|
|
63
|
+
return out
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _normalize_effect_payload_for_idempotency(effect: Effect) -> Dict[str, Any]:
|
|
67
|
+
if not isinstance(effect.payload, dict):
|
|
68
|
+
return {}
|
|
69
|
+
payload = dict(effect.payload)
|
|
70
|
+
|
|
71
|
+
if effect.type != EffectType.TOOL_CALLS:
|
|
72
|
+
return payload
|
|
73
|
+
|
|
74
|
+
tool_calls = payload.get("tool_calls")
|
|
75
|
+
if isinstance(tool_calls, list):
|
|
76
|
+
payload["tool_calls"] = [_normalize_tool_call_for_idempotency(tc) for tc in tool_calls]
|
|
77
|
+
|
|
78
|
+
allowed_tools = payload.get("allowed_tools")
|
|
79
|
+
if isinstance(allowed_tools, list):
|
|
80
|
+
uniq = {
|
|
81
|
+
str(t).strip()
|
|
82
|
+
for t in allowed_tools
|
|
83
|
+
if isinstance(t, str) and t.strip()
|
|
84
|
+
}
|
|
85
|
+
payload["allowed_tools"] = sorted(uniq)
|
|
86
|
+
|
|
87
|
+
return payload
|
|
19
88
|
|
|
20
89
|
|
|
21
90
|
class EffectPolicy(Protocol):
|
|
@@ -110,11 +179,12 @@ class DefaultEffectPolicy:
|
|
|
110
179
|
This ensures the same effect at the same point in the same run
|
|
111
180
|
gets the same key, enabling deduplication on restart.
|
|
112
181
|
"""
|
|
182
|
+
normalized_payload = _normalize_effect_payload_for_idempotency(effect)
|
|
113
183
|
key_data = {
|
|
114
184
|
"run_id": run.run_id,
|
|
115
185
|
"node_id": node_id,
|
|
116
186
|
"effect_type": effect.type.value,
|
|
117
|
-
"effect_payload":
|
|
187
|
+
"effect_payload": normalized_payload,
|
|
118
188
|
}
|
|
119
189
|
key_json = json.dumps(key_data, sort_keys=True, separators=(",", ":"))
|
|
120
190
|
return hashlib.sha256(key_json.encode()).hexdigest()[:32]
|
|
@@ -156,11 +226,12 @@ def compute_idempotency_key(
|
|
|
156
226
|
|
|
157
227
|
Useful when you need to compute a key without a full policy.
|
|
158
228
|
"""
|
|
229
|
+
normalized_payload = _normalize_effect_payload_for_idempotency(effect)
|
|
159
230
|
key_data = {
|
|
160
231
|
"run_id": run_id,
|
|
161
232
|
"node_id": node_id,
|
|
162
233
|
"effect_type": effect.type.value,
|
|
163
|
-
"effect_payload":
|
|
234
|
+
"effect_payload": normalized_payload,
|
|
164
235
|
}
|
|
165
236
|
key_json = json.dumps(key_data, sort_keys=True, separators=(",", ":"))
|
|
166
237
|
return hashlib.sha256(key_json.encode()).hexdigest()[:32]
|