AbstractRuntime 0.0.1__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,19 +1,26 @@
1
1
  """Core runtime primitives."""
2
2
 
3
- from .models import Effect, EffectType, RunState, RunStatus, StepPlan, WaitReason, WaitState
3
+ from .config import RuntimeConfig
4
+ from .models import Effect, EffectType, LimitWarning, RunState, RunStatus, StepPlan, WaitReason, WaitState
4
5
  from .runtime import Runtime
5
6
  from .spec import WorkflowSpec
7
+ from .vars import LIMITS, ensure_limits, get_limits
6
8
 
7
9
  __all__ = [
8
10
  "Effect",
9
11
  "EffectType",
12
+ "LimitWarning",
13
+ "LIMITS",
10
14
  "RunState",
11
15
  "RunStatus",
16
+ "Runtime",
17
+ "RuntimeConfig",
12
18
  "StepPlan",
13
19
  "WaitReason",
14
20
  "WaitState",
15
21
  "WorkflowSpec",
16
- "Runtime",
22
+ "ensure_limits",
23
+ "get_limits",
17
24
  ]
18
25
 
19
26
 
@@ -0,0 +1,101 @@
1
+ """abstractruntime.core.config
2
+
3
+ Runtime configuration for resource limits and model capabilities.
4
+
5
+ This module provides a RuntimeConfig dataclass that centralizes configuration
6
+ for runtime resource limits (iterations, tokens, history) and model capabilities.
7
+ The config is used to initialize the `_limits` namespace in RunState.vars.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from dataclasses import dataclass, field
13
+ from typing import Any, Dict, Optional
14
+
15
+
16
+ @dataclass(frozen=True)
17
+ class RuntimeConfig:
18
+ """Configuration for runtime resource limits and model capabilities.
19
+
20
+ This configuration is used by the Runtime to:
21
+ 1. Initialize the `_limits` namespace in RunState.vars when starting a run
22
+ 2. Provide model capability information for resource tracking
23
+ 3. Configure warning thresholds for proactive notifications
24
+
25
+ Attributes:
26
+ max_iterations: Maximum number of reasoning iterations (default: 25)
27
+ warn_iterations_pct: Percentage threshold for iteration warnings (default: 80)
28
+ max_tokens: Maximum context window tokens (None = use model capabilities)
29
+ max_output_tokens: Maximum tokens for LLM response (None = provider default)
30
+ warn_tokens_pct: Percentage threshold for token warnings (default: 80)
31
+ max_history_messages: Maximum conversation history messages (-1 = unlimited)
32
+ model_capabilities: Dict of model capabilities from LLM provider
33
+
34
+ Example:
35
+ >>> config = RuntimeConfig(max_iterations=50, max_tokens=65536)
36
+ >>> limits = config.to_limits_dict()
37
+ >>> limits["max_iterations"]
38
+ 50
39
+ """
40
+
41
+ # Iteration control
42
+ max_iterations: int = 25
43
+ warn_iterations_pct: int = 80
44
+
45
+ # Token/context window management
46
+ max_tokens: Optional[int] = None # None = query from model capabilities
47
+ max_output_tokens: Optional[int] = None # None = use provider default
48
+ warn_tokens_pct: int = 80
49
+
50
+ # History management
51
+ max_history_messages: int = -1 # -1 = unlimited (send all messages)
52
+
53
+ # Model capabilities (populated from LLM client)
54
+ model_capabilities: Dict[str, Any] = field(default_factory=dict)
55
+
56
+ def to_limits_dict(self) -> Dict[str, Any]:
57
+ """Convert to _limits namespace dict for RunState.vars.
58
+
59
+ Returns:
60
+ Dict with canonical limit values for storage in RunState.vars["_limits"].
61
+ Uses model_capabilities as fallback for max_tokens if not explicitly set.
62
+ """
63
+ return {
64
+ # Iteration control
65
+ "max_iterations": self.max_iterations,
66
+ "current_iteration": 0,
67
+
68
+ # Token management
69
+ "max_tokens": self.max_tokens or self.model_capabilities.get("max_tokens", 32768),
70
+ "max_output_tokens": self.max_output_tokens,
71
+ "estimated_tokens_used": 0,
72
+
73
+ # History management
74
+ "max_history_messages": self.max_history_messages,
75
+
76
+ # Warning thresholds
77
+ "warn_iterations_pct": self.warn_iterations_pct,
78
+ "warn_tokens_pct": self.warn_tokens_pct,
79
+ }
80
+
81
+ def with_capabilities(self, capabilities: Dict[str, Any]) -> "RuntimeConfig":
82
+ """Create a new RuntimeConfig with updated model capabilities.
83
+
84
+ This is useful for merging model capabilities from an LLM client
85
+ into an existing configuration.
86
+
87
+ Args:
88
+ capabilities: Dict of model capabilities (e.g., from get_model_capabilities())
89
+
90
+ Returns:
91
+ New RuntimeConfig with merged capabilities
92
+ """
93
+ return RuntimeConfig(
94
+ max_iterations=self.max_iterations,
95
+ warn_iterations_pct=self.warn_iterations_pct,
96
+ max_tokens=self.max_tokens,
97
+ max_output_tokens=self.max_output_tokens,
98
+ warn_tokens_pct=self.warn_tokens_pct,
99
+ max_history_messages=self.max_history_messages,
100
+ model_capabilities=capabilities,
101
+ )
@@ -130,10 +130,20 @@ class RunState:
130
130
 
131
131
  # Optional provenance fields
132
132
  actor_id: Optional[str] = None
133
+ session_id: Optional[str] = None
133
134
  parent_run_id: Optional[str] = None # For subworkflow tracking
134
135
 
135
136
  @classmethod
136
- def new(cls, *, workflow_id: str, entry_node: str, actor_id: Optional[str] = None, vars: Optional[Dict[str, Any]] = None, parent_run_id: Optional[str] = None) -> "RunState":
137
+ def new(
138
+ cls,
139
+ *,
140
+ workflow_id: str,
141
+ entry_node: str,
142
+ actor_id: Optional[str] = None,
143
+ session_id: Optional[str] = None,
144
+ vars: Optional[Dict[str, Any]] = None,
145
+ parent_run_id: Optional[str] = None,
146
+ ) -> "RunState":
137
147
  return cls(
138
148
  run_id=str(uuid.uuid4()),
139
149
  workflow_id=workflow_id,
@@ -141,6 +151,7 @@ class RunState:
141
151
  current_node=entry_node,
142
152
  vars=vars or {},
143
153
  actor_id=actor_id,
154
+ session_id=session_id,
144
155
  parent_run_id=parent_run_id,
145
156
  )
146
157
 
@@ -170,6 +181,7 @@ class StepRecord:
170
181
 
171
182
  # Optional provenance/integrity
172
183
  actor_id: Optional[str] = None
184
+ session_id: Optional[str] = None
173
185
 
174
186
  # Retry and idempotency fields
175
187
  attempt: int = 1 # Current attempt number (1-indexed)
@@ -201,6 +213,7 @@ class StepRecord:
201
213
  "result_key": effect.result_key,
202
214
  } if effect else None,
203
215
  actor_id=run.actor_id,
216
+ session_id=getattr(run, "session_id", None),
204
217
  attempt=attempt,
205
218
  idempotency_key=idempotency_key,
206
219
  )
@@ -237,3 +250,33 @@ class StepRecord:
237
250
  return self
238
251
 
239
252
 
253
+ @dataclass
254
+ class LimitWarning:
255
+ """Warning about approaching or exceeding a runtime limit.
256
+
257
+ Generated by Runtime.check_limits() to proactively notify about
258
+ resource constraints before they cause failures.
259
+
260
+ Attributes:
261
+ limit_type: Type of limit ("iterations", "tokens", "history")
262
+ status: Warning status ("warning" at threshold, "exceeded" at limit)
263
+ current: Current value of the resource
264
+ maximum: Maximum allowed value
265
+ pct: Percentage of limit used (computed in __post_init__)
266
+
267
+ Example:
268
+ >>> warning = LimitWarning("iterations", "warning", 20, 25)
269
+ >>> warning.pct
270
+ 80.0
271
+ """
272
+
273
+ limit_type: str # "iterations", "tokens", "history"
274
+ status: str # "warning", "exceeded"
275
+ current: int
276
+ maximum: int
277
+ pct: float = 0.0
278
+
279
+ def __post_init__(self) -> None:
280
+ if self.maximum > 0:
281
+ self.pct = round(self.current / self.maximum * 100, 1)
282
+
@@ -23,9 +23,11 @@ from datetime import datetime, timezone
23
23
  from typing import Any, Callable, Dict, Optional
24
24
  import inspect
25
25
 
26
+ from .config import RuntimeConfig
26
27
  from .models import (
27
28
  Effect,
28
29
  EffectType,
30
+ LimitWarning,
29
31
  RunState,
30
32
  RunStatus,
31
33
  StepPlan,
@@ -92,6 +94,7 @@ class Runtime:
92
94
  workflow_registry: Optional[Any] = None,
93
95
  artifact_store: Optional[Any] = None,
94
96
  effect_policy: Optional[EffectPolicy] = None,
97
+ config: Optional[RuntimeConfig] = None,
95
98
  ):
96
99
  self._run_store = run_store
97
100
  self._ledger_store = ledger_store
@@ -99,6 +102,7 @@ class Runtime:
99
102
  self._workflow_registry = workflow_registry
100
103
  self._artifact_store = artifact_store
101
104
  self._effect_policy: EffectPolicy = effect_policy or DefaultEffectPolicy()
105
+ self._config: RuntimeConfig = config or RuntimeConfig()
102
106
 
103
107
  self._handlers: Dict[EffectType, EffectHandler] = {}
104
108
  self._register_builtin_handlers()
@@ -146,8 +150,33 @@ class Runtime:
146
150
  """Set the effect policy for retry and idempotency."""
147
151
  self._effect_policy = policy
148
152
 
149
- def start(self, *, workflow: WorkflowSpec, vars: Optional[Dict[str, Any]] = None, actor_id: Optional[str] = None, parent_run_id: Optional[str] = None) -> str:
150
- run = RunState.new(workflow_id=workflow.workflow_id, entry_node=workflow.entry_node, vars=vars, actor_id=actor_id, parent_run_id=parent_run_id)
153
+ @property
154
+ def config(self) -> RuntimeConfig:
155
+ """Access the runtime configuration."""
156
+ return self._config
157
+
158
+ def start(
159
+ self,
160
+ *,
161
+ workflow: WorkflowSpec,
162
+ vars: Optional[Dict[str, Any]] = None,
163
+ actor_id: Optional[str] = None,
164
+ session_id: Optional[str] = None,
165
+ parent_run_id: Optional[str] = None,
166
+ ) -> str:
167
+ # Initialize vars with _limits from config if not already set
168
+ vars = dict(vars or {})
169
+ if "_limits" not in vars:
170
+ vars["_limits"] = self._config.to_limits_dict()
171
+
172
+ run = RunState.new(
173
+ workflow_id=workflow.workflow_id,
174
+ entry_node=workflow.entry_node,
175
+ vars=vars,
176
+ actor_id=actor_id,
177
+ session_id=session_id,
178
+ parent_run_id=parent_run_id,
179
+ )
151
180
  self._run_store.save(run)
152
181
  return run.run_id
153
182
 
@@ -189,6 +218,126 @@ class Runtime:
189
218
  def get_ledger(self, run_id: str) -> list[dict[str, Any]]:
190
219
  return self._ledger_store.list(run_id)
191
220
 
221
+ # ---------------------------------------------------------------------
222
+ # Limit Management
223
+ # ---------------------------------------------------------------------
224
+
225
+ def get_limit_status(self, run_id: str) -> Dict[str, Any]:
226
+ """Get current limit status for a run.
227
+
228
+ Returns a structured dict with information about iterations, tokens,
229
+ and history limits, including whether warning thresholds are reached.
230
+
231
+ Args:
232
+ run_id: The run to check
233
+
234
+ Returns:
235
+ Dict with "iterations", "tokens", and "history" status info
236
+
237
+ Raises:
238
+ KeyError: If run_id not found
239
+ """
240
+ run = self.get_state(run_id)
241
+ limits = run.vars.get("_limits", {})
242
+
243
+ def pct(current: int, maximum: int) -> float:
244
+ return round(current / maximum * 100, 1) if maximum > 0 else 0
245
+
246
+ current_iter = int(limits.get("current_iteration", 0) or 0)
247
+ max_iter = int(limits.get("max_iterations", 25) or 25)
248
+ tokens_used = int(limits.get("estimated_tokens_used", 0) or 0)
249
+ max_tokens = int(limits.get("max_tokens", 32768) or 32768)
250
+
251
+ return {
252
+ "iterations": {
253
+ "current": current_iter,
254
+ "max": max_iter,
255
+ "pct": pct(current_iter, max_iter),
256
+ "warning": pct(current_iter, max_iter) >= limits.get("warn_iterations_pct", 80),
257
+ },
258
+ "tokens": {
259
+ "estimated_used": tokens_used,
260
+ "max": max_tokens,
261
+ "pct": pct(tokens_used, max_tokens),
262
+ "warning": pct(tokens_used, max_tokens) >= limits.get("warn_tokens_pct", 80),
263
+ },
264
+ "history": {
265
+ "max_messages": limits.get("max_history_messages", -1),
266
+ },
267
+ }
268
+
269
+ def check_limits(self, run: RunState) -> list[LimitWarning]:
270
+ """Check if any limits are approaching or exceeded.
271
+
272
+ This is the hybrid enforcement model: the runtime provides warnings,
273
+ workflow nodes are responsible for enforcement decisions.
274
+
275
+ Args:
276
+ run: The RunState to check
277
+
278
+ Returns:
279
+ List of LimitWarning objects for any limits at warning threshold or exceeded
280
+ """
281
+ warnings: list[LimitWarning] = []
282
+ limits = run.vars.get("_limits", {})
283
+
284
+ # Check iterations
285
+ current = int(limits.get("current_iteration", 0) or 0)
286
+ max_iter = int(limits.get("max_iterations", 25) or 25)
287
+ warn_pct = int(limits.get("warn_iterations_pct", 80) or 80)
288
+
289
+ if max_iter > 0:
290
+ if current >= max_iter:
291
+ warnings.append(LimitWarning("iterations", "exceeded", current, max_iter))
292
+ elif (current / max_iter * 100) >= warn_pct:
293
+ warnings.append(LimitWarning("iterations", "warning", current, max_iter))
294
+
295
+ # Check tokens
296
+ tokens_used = int(limits.get("estimated_tokens_used", 0) or 0)
297
+ max_tokens = int(limits.get("max_tokens", 32768) or 32768)
298
+ warn_tokens_pct = int(limits.get("warn_tokens_pct", 80) or 80)
299
+
300
+ if max_tokens > 0 and tokens_used > 0:
301
+ if tokens_used >= max_tokens:
302
+ warnings.append(LimitWarning("tokens", "exceeded", tokens_used, max_tokens))
303
+ elif (tokens_used / max_tokens * 100) >= warn_tokens_pct:
304
+ warnings.append(LimitWarning("tokens", "warning", tokens_used, max_tokens))
305
+
306
+ return warnings
307
+
308
+ def update_limits(self, run_id: str, updates: Dict[str, Any]) -> None:
309
+ """Update limits for a running workflow.
310
+
311
+ This allows mid-session updates (e.g., from /max-tokens command).
312
+ Only allowed limit keys are updated; unknown keys are ignored.
313
+
314
+ Args:
315
+ run_id: The run to update
316
+ updates: Dict of limit updates (e.g., {"max_tokens": 65536})
317
+
318
+ Raises:
319
+ KeyError: If run_id not found
320
+ """
321
+ run = self.get_state(run_id)
322
+ limits = run.vars.setdefault("_limits", {})
323
+
324
+ allowed_keys = {
325
+ "max_iterations",
326
+ "max_tokens",
327
+ "max_output_tokens",
328
+ "max_history_messages",
329
+ "warn_iterations_pct",
330
+ "warn_tokens_pct",
331
+ "estimated_tokens_used",
332
+ "current_iteration",
333
+ }
334
+
335
+ for key, value in updates.items():
336
+ if key in allowed_keys:
337
+ limits[key] = value
338
+
339
+ self._run_store.save(run)
340
+
192
341
  def tick(self, *, workflow: WorkflowSpec, run_id: str, max_steps: int = 100) -> RunState:
193
342
  run = self.get_state(run_id)
194
343
  if run.status in (RunStatus.COMPLETED, RunStatus.FAILED):
@@ -399,18 +548,25 @@ class Runtime:
399
548
  # correct resume semantics for waiting effects without duplicating payload fields.
400
549
  try:
401
550
  sig = inspect.signature(handler)
551
+ except (TypeError, ValueError):
552
+ sig = None
553
+
554
+ if sig is not None:
402
555
  params = list(sig.parameters.values())
403
556
  has_varargs = any(p.kind == inspect.Parameter.VAR_POSITIONAL for p in params)
404
557
  if has_varargs or len(params) >= 3:
405
558
  return handler(run, effect, default_next_node)
406
559
  return handler(run, effect)
407
- except Exception:
408
- # If signature inspection fails, fall back to attempting the new call form,
409
- # then the legacy form.
410
- try:
411
- return handler(run, effect, default_next_node)
412
- except TypeError:
560
+
561
+ # If signature inspection fails, fall back to attempting the new call form,
562
+ # then the legacy form (only for arity-mismatch TypeError).
563
+ try:
564
+ return handler(run, effect, default_next_node)
565
+ except TypeError as e:
566
+ msg = str(e)
567
+ if "positional" in msg and "argument" in msg and ("given" in msg or "required" in msg):
413
568
  return handler(run, effect)
569
+ raise
414
570
 
415
571
  def _apply_resume_payload(self, run: RunState, *, payload: Dict[str, Any], override_node: Optional[str]) -> None:
416
572
  run.status = RunStatus.RUNNING
@@ -515,6 +671,7 @@ class Runtime:
515
671
  workflow=sub_workflow,
516
672
  vars=sub_vars,
517
673
  actor_id=run.actor_id, # Inherit actor from parent
674
+ session_id=getattr(run, "session_id", None), # Inherit session from parent
518
675
  parent_run_id=run.run_id, # Track parent for hierarchy
519
676
  )
520
677
 
@@ -577,5 +734,3 @@ def _set_nested(target: Dict[str, Any], dotted_key: str, value: Any) -> None:
577
734
  cur[p] = nxt
578
735
  cur = nxt
579
736
  cur[parts[-1]] = value
580
-
581
-
@@ -0,0 +1,94 @@
1
+ """RunState.vars namespacing helpers.
2
+
3
+ AbstractRuntime treats `RunState.vars` as JSON-serializable user/workflow state.
4
+ To avoid key collisions and to clarify ownership, we use a simple convention:
5
+
6
+ - `context`: user-facing context (task, conversation, inputs)
7
+ - `scratchpad`: agent/workflow working memory (iteration counters, plans)
8
+ - `_runtime`: runtime/host-managed metadata (tool specs, inbox, etc.)
9
+ - `_temp`: ephemeral step-to-step values (llm_response, tool_results, etc.)
10
+ - `_limits`: runtime resource limits (max_iterations, max_tokens, etc.)
11
+
12
+ This is a convention, not a strict schema; helpers here are intentionally small.
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ from typing import Any, Dict
18
+
19
+ CONTEXT = "context"
20
+ SCRATCHPAD = "scratchpad"
21
+ RUNTIME = "_runtime"
22
+ TEMP = "_temp"
23
+ LIMITS = "_limits" # Canonical storage for runtime resource limits
24
+
25
+
26
+ def ensure_namespaces(vars: Dict[str, Any]) -> Dict[str, Any]:
27
+ """Ensure the four canonical namespaces exist and are dicts."""
28
+ for key in (CONTEXT, SCRATCHPAD, RUNTIME, TEMP):
29
+ current = vars.get(key)
30
+ if not isinstance(current, dict):
31
+ vars[key] = {}
32
+ return vars
33
+
34
+
35
+ def get_namespace(vars: Dict[str, Any], key: str) -> Dict[str, Any]:
36
+ ensure_namespaces(vars)
37
+ return vars[key] # type: ignore[return-value]
38
+
39
+
40
+ def get_context(vars: Dict[str, Any]) -> Dict[str, Any]:
41
+ return get_namespace(vars, CONTEXT)
42
+
43
+
44
+ def get_scratchpad(vars: Dict[str, Any]) -> Dict[str, Any]:
45
+ return get_namespace(vars, SCRATCHPAD)
46
+
47
+
48
+ def get_runtime(vars: Dict[str, Any]) -> Dict[str, Any]:
49
+ return get_namespace(vars, RUNTIME)
50
+
51
+
52
+ def get_temp(vars: Dict[str, Any]) -> Dict[str, Any]:
53
+ return get_namespace(vars, TEMP)
54
+
55
+
56
+ def clear_temp(vars: Dict[str, Any]) -> None:
57
+ get_temp(vars).clear()
58
+
59
+
60
+ def get_limits(vars: Dict[str, Any]) -> Dict[str, Any]:
61
+ """Get the _limits namespace, creating with defaults if missing."""
62
+ if LIMITS not in vars or not isinstance(vars.get(LIMITS), dict):
63
+ vars[LIMITS] = _default_limits()
64
+ return vars[LIMITS] # type: ignore[return-value]
65
+
66
+
67
+ def ensure_limits(vars: Dict[str, Any]) -> Dict[str, Any]:
68
+ """Ensure _limits namespace exists with defaults.
69
+
70
+ This is the canonical location for runtime resource limits:
71
+ - max_iterations / current_iteration: Iteration control
72
+ - max_tokens / estimated_tokens_used: Token/context window management
73
+ - max_history_messages: Conversation history limit (-1 = unlimited)
74
+ - warn_*_pct: Warning thresholds for proactive notifications
75
+
76
+ Returns:
77
+ The _limits dict (mutable reference into vars)
78
+ """
79
+ return get_limits(vars)
80
+
81
+
82
+ def _default_limits() -> Dict[str, Any]:
83
+ """Return default limits dict."""
84
+ return {
85
+ "max_iterations": 25,
86
+ "current_iteration": 0,
87
+ "max_tokens": 32768,
88
+ "max_output_tokens": None,
89
+ "max_history_messages": -1,
90
+ "estimated_tokens_used": 0,
91
+ "warn_iterations_pct": 80,
92
+ "warn_tokens_pct": 80,
93
+ }
94
+
@@ -7,16 +7,18 @@ Provides:
7
7
  - Tool executors (executed + passthrough)
8
8
  - Effect handlers wiring
9
9
  - Convenience runtime factories for local/remote/hybrid modes
10
+ - RuntimeConfig for limits and model capabilities
10
11
 
11
12
  Importing this module is the explicit opt-in to an AbstractCore dependency.
12
13
  """
13
14
 
15
+ from ...core.config import RuntimeConfig
14
16
  from .llm_client import (
15
17
  AbstractCoreLLMClient,
16
18
  LocalAbstractCoreLLMClient,
17
19
  RemoteAbstractCoreLLMClient,
18
20
  )
19
- from .tool_executor import AbstractCoreToolExecutor, PassthroughToolExecutor, ToolExecutor
21
+ from .tool_executor import AbstractCoreToolExecutor, MappingToolExecutor, PassthroughToolExecutor, ToolExecutor
20
22
  from .effect_handlers import build_effect_handlers
21
23
  from .factory import (
22
24
  create_hybrid_runtime,
@@ -30,9 +32,12 @@ __all__ = [
30
32
  "AbstractCoreLLMClient",
31
33
  "LocalAbstractCoreLLMClient",
32
34
  "RemoteAbstractCoreLLMClient",
35
+ "RuntimeConfig",
33
36
  "ToolExecutor",
37
+ "MappingToolExecutor",
34
38
  "AbstractCoreToolExecutor",
35
39
  "PassthroughToolExecutor",
40
+
36
41
  "build_effect_handlers",
37
42
  "create_local_runtime",
38
43
  "create_remote_runtime",
@@ -40,4 +45,3 @@ __all__ = [
40
45
  "create_local_file_runtime",
41
46
  "create_remote_file_runtime",
42
47
  ]
43
-
@@ -22,6 +22,18 @@ from .logging import get_logger
22
22
  logger = get_logger(__name__)
23
23
 
24
24
 
25
+ def _trace_context(run: RunState) -> Dict[str, str]:
26
+ ctx: Dict[str, str] = {"run_id": run.run_id}
27
+ if run.actor_id:
28
+ ctx["actor_id"] = str(run.actor_id)
29
+ session_id = getattr(run, "session_id", None)
30
+ if session_id:
31
+ ctx["session_id"] = str(session_id)
32
+ if run.parent_run_id:
33
+ ctx["parent_run_id"] = str(run.parent_run_id)
34
+ return ctx
35
+
36
+
25
37
  def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
26
38
  def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
27
39
  payload = dict(effect.payload or {})
@@ -29,7 +41,15 @@ def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
29
41
  messages = payload.get("messages")
30
42
  system_prompt = payload.get("system_prompt")
31
43
  tools = payload.get("tools")
32
- params = payload.get("params")
44
+ raw_params = payload.get("params")
45
+ params = dict(raw_params) if isinstance(raw_params, dict) else {}
46
+
47
+ # Propagate durable trace context into AbstractCore calls.
48
+ trace_metadata = params.get("trace_metadata")
49
+ if not isinstance(trace_metadata, dict):
50
+ trace_metadata = {}
51
+ trace_metadata.update(_trace_context(run))
52
+ params["trace_metadata"] = trace_metadata
33
53
 
34
54
  if not prompt and not messages:
35
55
  return EffectOutcome.failed("llm_call requires payload.prompt or payload.messages")
@@ -50,13 +70,24 @@ def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
50
70
  return _handler
51
71
 
52
72
 
53
- def make_tool_calls_handler(*, tools: ToolExecutor) -> EffectHandler:
73
+ def make_tool_calls_handler(*, tools: Optional[ToolExecutor] = None) -> EffectHandler:
74
+ """Create a TOOL_CALLS effect handler.
75
+
76
+ Tool execution is performed exclusively via the host-configured ToolExecutor.
77
+ This keeps `RunState.vars` and ledger payloads JSON-safe (durable execution).
78
+ """
54
79
  def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
55
80
  payload = dict(effect.payload or {})
56
81
  tool_calls = payload.get("tool_calls")
57
82
  if not isinstance(tool_calls, list):
58
83
  return EffectOutcome.failed("tool_calls requires payload.tool_calls (list)")
59
84
 
85
+ if tools is None:
86
+ return EffectOutcome.failed(
87
+ "TOOL_CALLS requires a ToolExecutor; configure Runtime with "
88
+ "MappingToolExecutor/AbstractCoreToolExecutor/PassthroughToolExecutor."
89
+ )
90
+
60
91
  try:
61
92
  result = tools.execute(tool_calls=tool_calls)
62
93
  except Exception as e:
@@ -81,9 +112,8 @@ def make_tool_calls_handler(*, tools: ToolExecutor) -> EffectHandler:
81
112
  return _handler
82
113
 
83
114
 
84
- def build_effect_handlers(*, llm: AbstractCoreLLMClient, tools: ToolExecutor) -> Dict[EffectType, Any]:
115
+ def build_effect_handlers(*, llm: AbstractCoreLLMClient, tools: ToolExecutor = None) -> Dict[EffectType, Any]:
85
116
  return {
86
117
  EffectType.LLM_CALL: make_llm_call_handler(llm=llm),
87
118
  EffectType.TOOL_CALLS: make_tool_calls_handler(tools=tools),
88
119
  }
89
-
@@ -15,6 +15,7 @@ from __future__ import annotations
15
15
  from pathlib import Path
16
16
  from typing import Any, Dict, Optional
17
17
 
18
+ from ...core.config import RuntimeConfig
18
19
  from ...core.runtime import Runtime
19
20
  from ...storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
20
21
  from ...storage.json_files import JsonFileRunStore, JsonlLedgerStore
@@ -45,7 +46,27 @@ def create_local_runtime(
45
46
  tool_executor: Optional[ToolExecutor] = None,
46
47
  context: Optional[Any] = None,
47
48
  effect_policy: Optional[Any] = None,
49
+ config: Optional[RuntimeConfig] = None,
48
50
  ) -> Runtime:
51
+ """Create a runtime with local LLM execution via AbstractCore.
52
+
53
+ Args:
54
+ provider: LLM provider (e.g., "ollama", "openai")
55
+ model: Model name
56
+ llm_kwargs: Additional kwargs for LLM client
57
+ run_store: Storage for run state (default: in-memory)
58
+ ledger_store: Storage for ledger (default: in-memory)
59
+ tool_executor: Optional custom tool executor. If not provided, defaults
60
+ to `AbstractCoreToolExecutor()` (AbstractCore global tool registry).
61
+ context: Optional context object
62
+ effect_policy: Optional effect policy (retry, etc.)
63
+ config: Optional RuntimeConfig for limits and model capabilities.
64
+ If not provided, model capabilities are queried from the LLM client.
65
+
66
+ Note:
67
+ For durable execution, tool callables should never be stored in `RunState.vars`
68
+ or passed in effect payloads. Prefer `MappingToolExecutor.from_tools([...])`.
69
+ """
49
70
  if run_store is None or ledger_store is None:
50
71
  run_store, ledger_store = _default_in_memory_stores()
51
72
 
@@ -53,7 +74,22 @@ def create_local_runtime(
53
74
  tools = tool_executor or AbstractCoreToolExecutor()
54
75
  handlers = build_effect_handlers(llm=llm_client, tools=tools)
55
76
 
56
- return Runtime(run_store=run_store, ledger_store=ledger_store, effect_handlers=handlers, context=context, effect_policy=effect_policy)
77
+ # Query model capabilities and merge into config
78
+ capabilities = llm_client.get_model_capabilities()
79
+ if config is None:
80
+ config = RuntimeConfig(model_capabilities=capabilities)
81
+ else:
82
+ # Merge capabilities into provided config
83
+ config = config.with_capabilities(capabilities)
84
+
85
+ return Runtime(
86
+ run_store=run_store,
87
+ ledger_store=ledger_store,
88
+ effect_handlers=handlers,
89
+ context=context,
90
+ effect_policy=effect_policy,
91
+ config=config,
92
+ )
57
93
 
58
94
 
59
95
  def create_remote_runtime(
@@ -116,6 +152,7 @@ def create_local_file_runtime(
116
152
  model: str,
117
153
  llm_kwargs: Optional[Dict[str, Any]] = None,
118
154
  context: Optional[Any] = None,
155
+ config: Optional[RuntimeConfig] = None,
119
156
  ) -> Runtime:
120
157
  run_store, ledger_store = _default_file_stores(base_dir=base_dir)
121
158
  return create_local_runtime(
@@ -125,6 +162,7 @@ def create_local_file_runtime(
125
162
  run_store=run_store,
126
163
  ledger_store=ledger_store,
127
164
  context=context,
165
+ config=config,
128
166
  )
129
167
 
130
168
 
@@ -147,4 +185,3 @@ def create_remote_file_runtime(
147
185
  ledger_store=ledger_store,
148
186
  context=context,
149
187
  )
150
-
@@ -13,14 +13,21 @@ Remote mode is the preferred way to support per-request dynamic routing (e.g. `b
13
13
 
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, is_dataclass
17
- from typing import Any, Dict, List, Optional, Protocol
16
+ import json
17
+ from dataclasses import asdict, dataclass, is_dataclass
18
+ from typing import Any, Dict, List, Optional, Protocol, Tuple
18
19
 
19
20
  from .logging import get_logger
20
21
 
21
22
  logger = get_logger(__name__)
22
23
 
23
24
 
25
+ @dataclass(frozen=True)
26
+ class HttpResponse:
27
+ body: Dict[str, Any]
28
+ headers: Dict[str, str]
29
+
30
+
24
31
  class RequestSender(Protocol):
25
32
  def post(
26
33
  self,
@@ -29,7 +36,7 @@ class RequestSender(Protocol):
29
36
  headers: Dict[str, str],
30
37
  json: Dict[str, Any],
31
38
  timeout: float,
32
- ) -> Dict[str, Any]: ...
39
+ ) -> Any: ...
33
40
 
34
41
 
35
42
  class AbstractCoreLLMClient(Protocol):
@@ -81,7 +88,12 @@ def _normalize_local_response(resp: Any) -> Dict[str, Any]:
81
88
 
82
89
  # Dict-like already
83
90
  if isinstance(resp, dict):
84
- return _jsonable(resp)
91
+ out = _jsonable(resp)
92
+ if isinstance(out, dict):
93
+ meta = out.get("metadata")
94
+ if isinstance(meta, dict) and "trace_id" in meta and "trace_id" not in out:
95
+ out["trace_id"] = meta["trace_id"]
96
+ return out
85
97
 
86
98
  # Pydantic structured output
87
99
  if hasattr(resp, "model_dump") or hasattr(resp, "dict"):
@@ -92,6 +104,8 @@ def _normalize_local_response(resp: Any) -> Dict[str, Any]:
92
104
  "usage": None,
93
105
  "model": None,
94
106
  "finish_reason": None,
107
+ "metadata": None,
108
+ "trace_id": None,
95
109
  }
96
110
 
97
111
  # AbstractCore GenerateResponse
@@ -100,6 +114,12 @@ def _normalize_local_response(resp: Any) -> Dict[str, Any]:
100
114
  usage = getattr(resp, "usage", None)
101
115
  model = getattr(resp, "model", None)
102
116
  finish_reason = getattr(resp, "finish_reason", None)
117
+ metadata = getattr(resp, "metadata", None)
118
+ trace_id: Optional[str] = None
119
+ if isinstance(metadata, dict):
120
+ raw = metadata.get("trace_id")
121
+ if raw is not None:
122
+ trace_id = str(raw)
103
123
 
104
124
  return {
105
125
  "content": content,
@@ -108,6 +128,8 @@ def _normalize_local_response(resp: Any) -> Dict[str, Any]:
108
128
  "usage": _jsonable(usage) if usage is not None else None,
109
129
  "model": model,
110
130
  "finish_reason": finish_reason,
131
+ "metadata": _jsonable(metadata) if metadata is not None else None,
132
+ "trace_id": trace_id,
111
133
  }
112
134
 
113
135
 
@@ -126,7 +148,11 @@ class LocalAbstractCoreLLMClient:
126
148
 
127
149
  self._provider = provider
128
150
  self._model = model
129
- self._llm = create_llm(provider, model=model, **(llm_kwargs or {}))
151
+ kwargs = dict(llm_kwargs or {})
152
+ kwargs.setdefault("enable_tracing", True)
153
+ if kwargs.get("enable_tracing"):
154
+ kwargs.setdefault("max_traces", 0)
155
+ self._llm = create_llm(provider, model=model, **kwargs)
130
156
  self._tool_handler = UniversalToolHandler(model)
131
157
 
132
158
  def generate(
@@ -144,41 +170,75 @@ class LocalAbstractCoreLLMClient:
144
170
  # do not create new providers per call unless the host explicitly chooses to.
145
171
  params.pop("base_url", None)
146
172
 
147
- # If tools provided, use UniversalToolHandler to format them into prompt
148
- # This works for models without native tool support
149
- effective_prompt = prompt
150
- if tools:
173
+ capabilities: List[str] = []
174
+ get_capabilities = getattr(self._llm, "get_capabilities", None)
175
+ if callable(get_capabilities):
176
+ try:
177
+ capabilities = list(get_capabilities())
178
+ except Exception:
179
+ capabilities = []
180
+ supports_tools = "tools" in set(c.lower() for c in capabilities)
181
+
182
+ if tools and not supports_tools:
183
+ # Fallback tool calling via prompting for providers/models without native tool support.
151
184
  from abstractcore.tools import ToolDefinition
152
- tool_defs = []
153
- for t in tools:
154
- tool_defs.append(ToolDefinition(
185
+
186
+ tool_defs = [
187
+ ToolDefinition(
155
188
  name=t.get("name", ""),
156
189
  description=t.get("description", ""),
157
190
  parameters=t.get("parameters", {}),
158
- ))
191
+ )
192
+ for t in tools
193
+ ]
159
194
  tools_prompt = self._tool_handler.format_tools_prompt(tool_defs)
160
195
  effective_prompt = f"{tools_prompt}\n\nUser request: {prompt}"
161
196
 
197
+ resp = self._llm.generate(
198
+ prompt=effective_prompt,
199
+ messages=messages,
200
+ system_prompt=system_prompt,
201
+ stream=False,
202
+ **params,
203
+ )
204
+ result = _normalize_local_response(resp)
205
+
206
+ # Parse tool calls from response content.
207
+ if result.get("content"):
208
+ parsed = self._tool_handler.parse_response(result["content"], mode="prompted")
209
+ if parsed.tool_calls:
210
+ result["tool_calls"] = [
211
+ {"name": tc.name, "arguments": tc.arguments, "call_id": tc.call_id}
212
+ for tc in parsed.tool_calls
213
+ ]
214
+ return result
215
+
162
216
  resp = self._llm.generate(
163
- prompt=effective_prompt,
217
+ prompt=str(prompt or ""),
164
218
  messages=messages,
165
219
  system_prompt=system_prompt,
220
+ tools=tools,
166
221
  stream=False,
167
222
  **params,
168
223
  )
169
-
170
- result = _normalize_local_response(resp)
171
-
172
- # Parse tool calls from response if tools were provided
173
- if tools and result.get("content"):
174
- parsed = self._tool_handler.parse_response(result["content"], mode="prompted")
175
- if parsed.tool_calls:
176
- result["tool_calls"] = [
177
- {"name": tc.name, "arguments": tc.arguments, "call_id": tc.call_id}
178
- for tc in parsed.tool_calls
179
- ]
180
-
181
- return result
224
+ return _normalize_local_response(resp)
225
+
226
+ def get_model_capabilities(self) -> Dict[str, Any]:
227
+ """Get model capabilities including max_tokens, vision_support, etc.
228
+
229
+ Uses AbstractCore's architecture detection system to query model limits
230
+ and features. This allows the runtime to be aware of model constraints
231
+ for resource tracking and warnings.
232
+
233
+ Returns:
234
+ Dict with model capabilities. Always includes 'max_tokens' (default 32768).
235
+ """
236
+ try:
237
+ from abstractcore.architectures.detection import get_model_capabilities
238
+ return get_model_capabilities(self._model)
239
+ except Exception:
240
+ # Safe fallback if detection fails
241
+ return {"max_tokens": 32768}
182
242
 
183
243
 
184
244
  class HttpxRequestSender:
@@ -196,10 +256,28 @@ class HttpxRequestSender:
196
256
  headers: Dict[str, str],
197
257
  json: Dict[str, Any],
198
258
  timeout: float,
199
- ) -> Dict[str, Any]:
259
+ ) -> HttpResponse:
200
260
  resp = self._httpx.post(url, headers=headers, json=json, timeout=timeout)
201
261
  resp.raise_for_status()
202
- return resp.json()
262
+ return HttpResponse(body=resp.json(), headers=dict(resp.headers))
263
+
264
+
265
+ def _unwrap_http_response(value: Any) -> Tuple[Dict[str, Any], Dict[str, str]]:
266
+ if isinstance(value, dict):
267
+ return value, {}
268
+ body = getattr(value, "body", None)
269
+ headers = getattr(value, "headers", None)
270
+ if isinstance(body, dict) and isinstance(headers, dict):
271
+ return body, headers
272
+ json_fn = getattr(value, "json", None)
273
+ hdrs = getattr(value, "headers", None)
274
+ if callable(json_fn) and hdrs is not None:
275
+ try:
276
+ payload = json_fn()
277
+ except Exception:
278
+ payload = {}
279
+ return payload if isinstance(payload, dict) else {"data": _jsonable(payload)}, dict(hdrs)
280
+ return {"data": _jsonable(value)}, {}
203
281
 
204
282
 
205
283
  class RemoteAbstractCoreLLMClient:
@@ -230,6 +308,23 @@ class RemoteAbstractCoreLLMClient:
230
308
  params: Optional[Dict[str, Any]] = None,
231
309
  ) -> Dict[str, Any]:
232
310
  params = dict(params or {})
311
+ req_headers = dict(self._headers)
312
+
313
+ trace_metadata = params.pop("trace_metadata", None)
314
+ if isinstance(trace_metadata, dict) and trace_metadata:
315
+ req_headers["X-AbstractCore-Trace-Metadata"] = json.dumps(
316
+ trace_metadata, ensure_ascii=False, separators=(",", ":")
317
+ )
318
+ header_map = {
319
+ "actor_id": "X-AbstractCore-Actor-Id",
320
+ "session_id": "X-AbstractCore-Session-Id",
321
+ "run_id": "X-AbstractCore-Run-Id",
322
+ "parent_run_id": "X-AbstractCore-Parent-Run-Id",
323
+ }
324
+ for key, header in header_map.items():
325
+ val = trace_metadata.get(key)
326
+ if val is not None and header not in req_headers:
327
+ req_headers[header] = str(val)
233
328
 
234
329
  # Build OpenAI-like messages for AbstractCore server.
235
330
  out_messages: List[Dict[str, str]] = []
@@ -268,7 +363,10 @@ class RemoteAbstractCoreLLMClient:
268
363
  body["tools"] = tools
269
364
 
270
365
  url = f"{self._server_base_url}/v1/chat/completions"
271
- resp = self._sender.post(url, headers=self._headers, json=body, timeout=self._timeout_s)
366
+ raw = self._sender.post(url, headers=req_headers, json=body, timeout=self._timeout_s)
367
+ resp, resp_headers = _unwrap_http_response(raw)
368
+ lower_headers = {str(k).lower(): str(v) for k, v in resp_headers.items()}
369
+ trace_id = lower_headers.get("x-abstractcore-trace-id") or lower_headers.get("x-trace-id")
272
370
 
273
371
  # Normalize OpenAI-like response.
274
372
  try:
@@ -281,6 +379,8 @@ class RemoteAbstractCoreLLMClient:
281
379
  "usage": _jsonable(resp.get("usage")) if resp.get("usage") is not None else None,
282
380
  "model": resp.get("model"),
283
381
  "finish_reason": choice0.get("finish_reason"),
382
+ "metadata": {"trace_id": trace_id} if trace_id else None,
383
+ "trace_id": trace_id,
284
384
  }
285
385
  except Exception:
286
386
  # Fallback: return the raw response in JSON-safe form.
@@ -292,5 +392,6 @@ class RemoteAbstractCoreLLMClient:
292
392
  "usage": None,
293
393
  "model": resp.get("model") if isinstance(resp, dict) else None,
294
394
  "finish_reason": None,
395
+ "metadata": {"trace_id": trace_id} if trace_id else None,
396
+ "trace_id": trace_id,
295
397
  }
296
-
@@ -13,7 +13,7 @@ pause until the host resumes with the tool results.
13
13
  from __future__ import annotations
14
14
 
15
15
  from dataclasses import asdict, is_dataclass
16
- from typing import Any, Dict, List, Optional, Protocol
16
+ from typing import Any, Callable, Dict, List, Optional, Protocol, Sequence
17
17
 
18
18
  from .logging import get_logger
19
19
 
@@ -24,6 +24,85 @@ class ToolExecutor(Protocol):
24
24
  def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]: ...
25
25
 
26
26
 
27
+ class MappingToolExecutor:
28
+ """Executes tool calls using an explicit {tool_name -> callable} mapping.
29
+
30
+ This is the recommended durable execution path: the mapping is held by the
31
+ host/runtime process and is never persisted inside RunState.
32
+ """
33
+
34
+ def __init__(self, tool_map: Dict[str, Callable[..., Any]]):
35
+ self._tool_map = dict(tool_map)
36
+
37
+ @classmethod
38
+ def from_tools(cls, tools: Sequence[Callable[..., Any]]) -> "MappingToolExecutor":
39
+ tool_map: Dict[str, Callable[..., Any]] = {}
40
+ for t in tools:
41
+ tool_def = getattr(t, "_tool_definition", None)
42
+ if tool_def is not None:
43
+ name = str(getattr(tool_def, "name", "") or "")
44
+ func = getattr(tool_def, "function", None) or t
45
+ else:
46
+ name = str(getattr(t, "__name__", "") or "")
47
+ func = t
48
+
49
+ if not name:
50
+ raise ValueError("Tool is missing a name")
51
+ if not callable(func):
52
+ raise ValueError(f"Tool '{name}' is not callable")
53
+ if name in tool_map:
54
+ raise ValueError(f"Duplicate tool name '{name}'")
55
+
56
+ tool_map[name] = func
57
+
58
+ return cls(tool_map)
59
+
60
+ def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
61
+ results: List[Dict[str, Any]] = []
62
+
63
+ for tc in tool_calls:
64
+ name = str(tc.get("name", "") or "")
65
+ arguments = dict(tc.get("arguments") or {})
66
+ call_id = str(tc.get("call_id") or "")
67
+
68
+ func = self._tool_map.get(name)
69
+ if func is None:
70
+ results.append(
71
+ {
72
+ "call_id": call_id,
73
+ "name": name,
74
+ "success": False,
75
+ "output": None,
76
+ "error": f"Tool '{name}' not found",
77
+ }
78
+ )
79
+ continue
80
+
81
+ try:
82
+ output = func(**arguments)
83
+ results.append(
84
+ {
85
+ "call_id": call_id,
86
+ "name": name,
87
+ "success": True,
88
+ "output": _jsonable(output),
89
+ "error": None,
90
+ }
91
+ )
92
+ except Exception as e:
93
+ results.append(
94
+ {
95
+ "call_id": call_id,
96
+ "name": name,
97
+ "success": False,
98
+ "output": None,
99
+ "error": str(e),
100
+ }
101
+ )
102
+
103
+ return {"mode": "executed", "results": results}
104
+
105
+
27
106
  def _jsonable(value: Any) -> Any:
28
107
  if value is None:
29
108
  return None
@@ -65,10 +144,11 @@ class AbstractCoreToolExecutor:
65
144
 
66
145
  results = execute_tools(calls)
67
146
  normalized = []
68
- for r in results:
147
+ for call, r in zip(calls, results):
69
148
  normalized.append(
70
149
  {
71
150
  "call_id": getattr(r, "call_id", ""),
151
+ "name": getattr(call, "name", ""),
72
152
  "success": bool(getattr(r, "success", False)),
73
153
  "output": _jsonable(getattr(r, "output", None)),
74
154
  "error": getattr(r, "error", None),
@@ -86,4 +166,3 @@ class PassthroughToolExecutor:
86
166
 
87
167
  def execute(self, *, tool_calls: List[Dict[str, Any]]) -> Dict[str, Any]:
88
168
  return {"mode": self._mode, "tool_calls": _jsonable(tool_calls)}
89
-
@@ -221,6 +221,37 @@ class ArtifactStore(ABC):
221
221
  count += 1
222
222
  return count
223
223
 
224
+ def search(
225
+ self,
226
+ *,
227
+ run_id: Optional[str] = None,
228
+ content_type: Optional[str] = None,
229
+ tags: Optional[Dict[str, str]] = None,
230
+ limit: int = 1000,
231
+ ) -> List[ArtifactMetadata]:
232
+ """Filter artifacts by simple metadata fields.
233
+
234
+ This is intentionally a *metadata filter*, not semantic search. Semantic/embedding
235
+ retrieval belongs in AbstractMemory or higher-level components.
236
+ """
237
+ if run_id is None:
238
+ candidates = list(self.list_all(limit=limit))
239
+ else:
240
+ candidates = list(self.list_by_run(run_id))
241
+
242
+ if content_type is not None:
243
+ candidates = [m for m in candidates if m.content_type == content_type]
244
+
245
+ if tags:
246
+ candidates = [
247
+ m
248
+ for m in candidates
249
+ if all((m.tags or {}).get(k) == v for k, v in tags.items())
250
+ ]
251
+
252
+ candidates.sort(key=lambda m: m.created_at, reverse=True)
253
+ return candidates[:limit]
254
+
224
255
  # Convenience methods
225
256
 
226
257
  def store_text(
@@ -88,6 +88,7 @@ class JsonFileRunStore(RunStore):
88
88
  created_at=data.get("created_at"),
89
89
  updated_at=data.get("updated_at"),
90
90
  actor_id=data.get("actor_id"),
91
+ session_id=data.get("session_id"),
91
92
  parent_run_id=data.get("parent_run_id"),
92
93
  )
93
94
 
@@ -205,4 +206,3 @@ class JsonlLedgerStore(LedgerStore):
205
206
  out.append(json.loads(line))
206
207
  return out
207
208
 
208
-
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: AbstractRuntime
3
- Version: 0.0.1
3
+ Version: 0.2.0
4
4
  Summary: AbstractRuntime: a durable graph runner designed to pair with AbstractCore.
5
5
  Project-URL: AbstractCore (website), https://www.abstractcore.ai/
6
6
  Project-URL: AbstractCore (GitHub), https://github.com/lpalbou/abstractruntime
@@ -1,30 +1,32 @@
1
1
  abstractruntime/__init__.py,sha256=oobR5VlwZgxJKWr7hmjwVEGgYakHQt4oJNrdxcj6Nsc,2547
2
- abstractruntime/core/__init__.py,sha256=X15NaswmuhT9DcGksOkbmJ2cFcWsBCUH4s-Hx_nHnBo,353
3
- abstractruntime/core/models.py,sha256=7Qvosfc91ylHeCFt1SrEQFfHZp8Gnl2aq5cdfprvLj4,6923
2
+ abstractruntime/core/__init__.py,sha256=msUcfYjAwjkiEgvi-twteo1H11oBclYJFXqYVWlf8JQ,547
3
+ abstractruntime/core/config.py,sha256=dQnWCpGL5fFV2y_KOhTjLVu5fzhcOfUnbLjd3uMcRCs,3922
4
+ abstractruntime/core/models.py,sha256=JmuDpM8jjzcr6lc0yv7oBTSrzCtID4Pz21eVDDcQnKk,8142
4
5
  abstractruntime/core/policy.py,sha256=C8tmxaY8YCTvs8_5-5Ns6tsFdCVE_G2vHBOiEckeg9Y,5115
5
- abstractruntime/core/runtime.py,sha256=U2GgFJry7fJYXS4Lu4460BGwJal-Ak1zaBakYCJYb0Y,22170
6
+ abstractruntime/core/runtime.py,sha256=ZPERZuvtVCBEhWPKXTO2K_7mwnBZd1FAjubp5uiV-Ck,27653
6
7
  abstractruntime/core/spec.py,sha256=SBxAFaaIe9eJTRDMEgDub-GaF2McHTDkEgIl3L-DXbE,1461
8
+ abstractruntime/core/vars.py,sha256=ghe9WkjlOuVbIgN86V2RXRVqpd0D9qNmoBz_Xf30hqw,2995
7
9
  abstractruntime/identity/__init__.py,sha256=aV_aA6lfqsIQMPE2S0B0AKi0rnb-_vmKYpgv1wWoaq8,119
8
10
  abstractruntime/identity/fingerprint.py,sha256=axQFEHSJFsrYGNkikkfEVhNNQxdR8wBmv4TuVbTs0lM,1748
9
11
  abstractruntime/integrations/__init__.py,sha256=CnhKNxeT-rCeJRURWOXT8YBZ7HJPOESJROV5cnEwJoQ,408
10
- abstractruntime/integrations/abstractcore/__init__.py,sha256=BRoXxUGJYAVYu8zI3_1m21K01yvDE1ZTS4p-23kEl0w,1142
11
- abstractruntime/integrations/abstractcore/effect_handlers.py,sha256=_5NRvnx7UIrtw4BuWBEY0PBEuLALdCqIGdbl4HSPsd4,3221
12
- abstractruntime/integrations/abstractcore/factory.py,sha256=OQP77JFJmYKhL2rMWqgAp66S_VCnESr0wE0O9cO5WxI,4857
13
- abstractruntime/integrations/abstractcore/llm_client.py,sha256=jTyNfKWnbIqLMA6OOt51biZ0tTzlqG_fv4de9kUdINs,9440
12
+ abstractruntime/integrations/abstractcore/__init__.py,sha256=txcjiJD7ETRyQMjQQ8zeSrlxpAHAsRjObU7fGLIoNbg,1302
13
+ abstractruntime/integrations/abstractcore/effect_handlers.py,sha256=Zwl5-v6HKgdCUdfuiXOCU3LphKsAL_LxfdSIG4PKudg,4467
14
+ abstractruntime/integrations/abstractcore/factory.py,sha256=r7ZtojoDLObe16gKsp0h04EliVAlbFl8aeXcuw6o9sw,6339
15
+ abstractruntime/integrations/abstractcore/llm_client.py,sha256=KA-GDb8yhcOA93tbNugrtdOPITJQYPsds5IFLe6JdIk,13654
14
16
  abstractruntime/integrations/abstractcore/logging.py,sha256=iYmibudvLXs83hhF-dpbgEoyUdzTo8tnT4dV-cC6uyE,683
15
- abstractruntime/integrations/abstractcore/tool_executor.py,sha256=fLq-YX6EDiSzMKHC2b3KqFt28D46cOnbpRV5auDM4MU,2720
17
+ abstractruntime/integrations/abstractcore/tool_executor.py,sha256=yZq3txNlm00Xa-m7KfUN3_6ACKXGOZLnyslAI-CA-fs,5528
16
18
  abstractruntime/scheduler/__init__.py,sha256=ZwFJQrBN3aQOv7xuGXsURXEEBJAHV6qVQy13DYvbhqw,337
17
19
  abstractruntime/scheduler/convenience.py,sha256=Rremvw_P3JMQ-NOkwn7ATlD5HPkKxRtSGJRfBkimyJY,10278
18
20
  abstractruntime/scheduler/registry.py,sha256=0iqcTcCV0bYmhw-T7n8TFoZXVkhBRZt89AebXz_Z5fc,2969
19
21
  abstractruntime/scheduler/scheduler.py,sha256=Z3dIwz0e7bP7c_S_VoclgY1Fjw7NxFez_wsst-dYVT8,13535
20
22
  abstractruntime/storage/__init__.py,sha256=KSg4V-0Ge_BWFnm_a-XsKezNdtUhUBUuvfsvRKUiDUo,702
21
- abstractruntime/storage/artifacts.py,sha256=IyVWOVqe76xjYSjleQC6kG-H92FKRzbzWf6qWzEhImE,14542
23
+ abstractruntime/storage/artifacts.py,sha256=xIWR2Es4W4j3w3GJj1L4qrrRG4mkgiUagFucV_Cggio,15570
22
24
  abstractruntime/storage/base.py,sha256=QkNjtHRhqRHHg5FbEP9CVNjL97RTFcy4y8vNRPtVVvc,2758
23
25
  abstractruntime/storage/in_memory.py,sha256=baSlhu5ZPEFS82PvYwW89n0PbK7JmS1H07qlrPf40rI,3534
24
- abstractruntime/storage/json_files.py,sha256=o-xfPPupDB5ZwgaAWgQbHdpLySLuoNzdOz5Moqm9H_Q,6908
26
+ abstractruntime/storage/json_files.py,sha256=txj3deVXlhK2fXFquUEvPfhGCc5k4pxIKVR9FXJIugU,6954
25
27
  abstractruntime/storage/ledger_chain.py,sha256=TnAWacQ9e58RAg2vKP8OU6WN8Re1PdqN72g574A2CGA,4717
26
28
  abstractruntime/storage/snapshots.py,sha256=-IUlZ40Vxcyl3hKzKk_IxYxm9zumBhkSAzrcL9WpmcU,6481
27
- abstractruntime-0.0.1.dist-info/METADATA,sha256=cUa0Tmy-8jmuP2VR1EJwoQxC_d8i6RnVTQUr6-KkfOA,4997
28
- abstractruntime-0.0.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
29
- abstractruntime-0.0.1.dist-info/licenses/LICENSE,sha256=6rL4UIO5IdK59THf7fx0q6Hmxp5grSFi7-kWLcczseA,1083
30
- abstractruntime-0.0.1.dist-info/RECORD,,
29
+ abstractruntime-0.2.0.dist-info/METADATA,sha256=eeIBiGaa-l8bYfUj1pqMNKnQGOT08xZ3d41zRDahZ1Q,4997
30
+ abstractruntime-0.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
31
+ abstractruntime-0.2.0.dist-info/licenses/LICENSE,sha256=6rL4UIO5IdK59THf7fx0q6Hmxp5grSFi7-kWLcczseA,1083
32
+ abstractruntime-0.2.0.dist-info/RECORD,,