AbstractRuntime 0.0.0__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractruntime/__init__.py +104 -2
- abstractruntime/core/__init__.py +26 -0
- abstractruntime/core/config.py +101 -0
- abstractruntime/core/models.py +282 -0
- abstractruntime/core/policy.py +166 -0
- abstractruntime/core/runtime.py +736 -0
- abstractruntime/core/spec.py +53 -0
- abstractruntime/core/vars.py +94 -0
- abstractruntime/identity/__init__.py +7 -0
- abstractruntime/identity/fingerprint.py +57 -0
- abstractruntime/integrations/__init__.py +11 -0
- abstractruntime/integrations/abstractcore/__init__.py +47 -0
- abstractruntime/integrations/abstractcore/effect_handlers.py +119 -0
- abstractruntime/integrations/abstractcore/factory.py +187 -0
- abstractruntime/integrations/abstractcore/llm_client.py +397 -0
- abstractruntime/integrations/abstractcore/logging.py +27 -0
- abstractruntime/integrations/abstractcore/tool_executor.py +168 -0
- abstractruntime/scheduler/__init__.py +13 -0
- abstractruntime/scheduler/convenience.py +324 -0
- abstractruntime/scheduler/registry.py +101 -0
- abstractruntime/scheduler/scheduler.py +431 -0
- abstractruntime/storage/__init__.py +25 -0
- abstractruntime/storage/artifacts.py +519 -0
- abstractruntime/storage/base.py +107 -0
- abstractruntime/storage/in_memory.py +119 -0
- abstractruntime/storage/json_files.py +208 -0
- abstractruntime/storage/ledger_chain.py +153 -0
- abstractruntime/storage/snapshots.py +217 -0
- abstractruntime-0.2.0.dist-info/METADATA +163 -0
- abstractruntime-0.2.0.dist-info/RECORD +32 -0
- {abstractruntime-0.0.0.dist-info → abstractruntime-0.2.0.dist-info}/licenses/LICENSE +3 -1
- abstractruntime-0.0.0.dist-info/METADATA +0 -89
- abstractruntime-0.0.0.dist-info/RECORD +0 -5
- {abstractruntime-0.0.0.dist-info → abstractruntime-0.2.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""abstractruntime.core.spec
|
|
2
|
+
|
|
3
|
+
Workflow specification and node contract.
|
|
4
|
+
|
|
5
|
+
In v0.1 we keep the spec simple:
|
|
6
|
+
- The workflow is a graph: nodes identified by string IDs.
|
|
7
|
+
- Nodes are implemented as Python callables (handlers).
|
|
8
|
+
- This kernel does not define a UI DSL; AbstractFlow can produce specs.
|
|
9
|
+
|
|
10
|
+
Durability note:
|
|
11
|
+
- We persist *RunState* and a *ledger*.
|
|
12
|
+
- We assume the workflow spec + node handlers are available at resume time.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
from dataclasses import dataclass
|
|
18
|
+
from typing import Callable, Dict, Optional, Protocol
|
|
19
|
+
|
|
20
|
+
from .models import RunState, StepPlan
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class RunContext(Protocol):
|
|
24
|
+
"""Dependency injection surface for node handlers.
|
|
25
|
+
|
|
26
|
+
This is intentionally small and can be extended later.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def now_iso(self) -> str: ...
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
NodeHandler = Callable[[RunState, RunContext], StepPlan]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass(frozen=True)
|
|
36
|
+
class WorkflowSpec:
|
|
37
|
+
workflow_id: str
|
|
38
|
+
entry_node: str
|
|
39
|
+
nodes: Dict[str, NodeHandler]
|
|
40
|
+
|
|
41
|
+
def get_node(self, node_id: str) -> NodeHandler:
|
|
42
|
+
if node_id not in self.nodes:
|
|
43
|
+
raise KeyError(f"Unknown node_id '{node_id}' in workflow '{self.workflow_id}'")
|
|
44
|
+
return self.nodes[node_id]
|
|
45
|
+
|
|
46
|
+
def is_terminal(self, node_id: str) -> bool:
|
|
47
|
+
"""A workflow is terminal when the node returns StepPlan.complete_output.
|
|
48
|
+
|
|
49
|
+
The runtime decides termination based on StepPlan, not a dedicated node type.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
return False # evaluated at runtime
|
|
53
|
+
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""RunState.vars namespacing helpers.
|
|
2
|
+
|
|
3
|
+
AbstractRuntime treats `RunState.vars` as JSON-serializable user/workflow state.
|
|
4
|
+
To avoid key collisions and to clarify ownership, we use a simple convention:
|
|
5
|
+
|
|
6
|
+
- `context`: user-facing context (task, conversation, inputs)
|
|
7
|
+
- `scratchpad`: agent/workflow working memory (iteration counters, plans)
|
|
8
|
+
- `_runtime`: runtime/host-managed metadata (tool specs, inbox, etc.)
|
|
9
|
+
- `_temp`: ephemeral step-to-step values (llm_response, tool_results, etc.)
|
|
10
|
+
- `_limits`: runtime resource limits (max_iterations, max_tokens, etc.)
|
|
11
|
+
|
|
12
|
+
This is a convention, not a strict schema; helpers here are intentionally small.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
from typing import Any, Dict
|
|
18
|
+
|
|
19
|
+
CONTEXT = "context"
|
|
20
|
+
SCRATCHPAD = "scratchpad"
|
|
21
|
+
RUNTIME = "_runtime"
|
|
22
|
+
TEMP = "_temp"
|
|
23
|
+
LIMITS = "_limits" # Canonical storage for runtime resource limits
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def ensure_namespaces(vars: Dict[str, Any]) -> Dict[str, Any]:
|
|
27
|
+
"""Ensure the four canonical namespaces exist and are dicts."""
|
|
28
|
+
for key in (CONTEXT, SCRATCHPAD, RUNTIME, TEMP):
|
|
29
|
+
current = vars.get(key)
|
|
30
|
+
if not isinstance(current, dict):
|
|
31
|
+
vars[key] = {}
|
|
32
|
+
return vars
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_namespace(vars: Dict[str, Any], key: str) -> Dict[str, Any]:
|
|
36
|
+
ensure_namespaces(vars)
|
|
37
|
+
return vars[key] # type: ignore[return-value]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def get_context(vars: Dict[str, Any]) -> Dict[str, Any]:
|
|
41
|
+
return get_namespace(vars, CONTEXT)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_scratchpad(vars: Dict[str, Any]) -> Dict[str, Any]:
|
|
45
|
+
return get_namespace(vars, SCRATCHPAD)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_runtime(vars: Dict[str, Any]) -> Dict[str, Any]:
|
|
49
|
+
return get_namespace(vars, RUNTIME)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def get_temp(vars: Dict[str, Any]) -> Dict[str, Any]:
|
|
53
|
+
return get_namespace(vars, TEMP)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def clear_temp(vars: Dict[str, Any]) -> None:
|
|
57
|
+
get_temp(vars).clear()
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def get_limits(vars: Dict[str, Any]) -> Dict[str, Any]:
|
|
61
|
+
"""Get the _limits namespace, creating with defaults if missing."""
|
|
62
|
+
if LIMITS not in vars or not isinstance(vars.get(LIMITS), dict):
|
|
63
|
+
vars[LIMITS] = _default_limits()
|
|
64
|
+
return vars[LIMITS] # type: ignore[return-value]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def ensure_limits(vars: Dict[str, Any]) -> Dict[str, Any]:
|
|
68
|
+
"""Ensure _limits namespace exists with defaults.
|
|
69
|
+
|
|
70
|
+
This is the canonical location for runtime resource limits:
|
|
71
|
+
- max_iterations / current_iteration: Iteration control
|
|
72
|
+
- max_tokens / estimated_tokens_used: Token/context window management
|
|
73
|
+
- max_history_messages: Conversation history limit (-1 = unlimited)
|
|
74
|
+
- warn_*_pct: Warning thresholds for proactive notifications
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
The _limits dict (mutable reference into vars)
|
|
78
|
+
"""
|
|
79
|
+
return get_limits(vars)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _default_limits() -> Dict[str, Any]:
|
|
83
|
+
"""Return default limits dict."""
|
|
84
|
+
return {
|
|
85
|
+
"max_iterations": 25,
|
|
86
|
+
"current_iteration": 0,
|
|
87
|
+
"max_tokens": 32768,
|
|
88
|
+
"max_output_tokens": None,
|
|
89
|
+
"max_history_messages": -1,
|
|
90
|
+
"estimated_tokens_used": 0,
|
|
91
|
+
"warn_iterations_pct": 80,
|
|
92
|
+
"warn_tokens_pct": 80,
|
|
93
|
+
}
|
|
94
|
+
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""abstractruntime.identity.fingerprint
|
|
2
|
+
|
|
3
|
+
AI fingerprint / provenance (v0.1 - data model + hashing only).
|
|
4
|
+
|
|
5
|
+
Important:
|
|
6
|
+
- This does NOT implement cryptographic signatures yet (no non-forgeability).
|
|
7
|
+
- It provides a stable, deterministic *identifier* given stable inputs.
|
|
8
|
+
|
|
9
|
+
Backlog item will define:
|
|
10
|
+
- keypair-based identity (public key) and signing of ledger chains.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import hashlib
|
|
16
|
+
import json
|
|
17
|
+
from dataclasses import dataclass, asdict
|
|
18
|
+
from typing import Any, Dict, Optional
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _canonical_json(data: Dict[str, Any]) -> str:
|
|
22
|
+
return json.dumps(data, sort_keys=True, separators=(",", ":"), ensure_ascii=False)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def sha256_hex(text: str) -> str:
|
|
26
|
+
return hashlib.sha256(text.encode("utf-8")).hexdigest()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass(frozen=True)
|
|
30
|
+
class ActorFingerprint:
|
|
31
|
+
"""A stable identifier for an actor (agent/service/human).
|
|
32
|
+
|
|
33
|
+
This is intentionally minimal. For accountability you typically want:
|
|
34
|
+
- stable actor id
|
|
35
|
+
- metadata about the owner/org
|
|
36
|
+
- (future) signature key to make logs tamper-evident
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
actor_id: str
|
|
40
|
+
kind: str # "agent" | "human" | "service"
|
|
41
|
+
display_name: Optional[str] = None
|
|
42
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
43
|
+
|
|
44
|
+
@classmethod
|
|
45
|
+
def from_metadata(cls, *, kind: str, display_name: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> "ActorFingerprint":
|
|
46
|
+
payload = {
|
|
47
|
+
"kind": kind,
|
|
48
|
+
"display_name": display_name,
|
|
49
|
+
"metadata": metadata or {},
|
|
50
|
+
}
|
|
51
|
+
actor_id = f"ar_{sha256_hex(_canonical_json(payload))[:24]}"
|
|
52
|
+
return cls(actor_id=actor_id, kind=kind, display_name=display_name, metadata=metadata or {})
|
|
53
|
+
|
|
54
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
55
|
+
return asdict(self)
|
|
56
|
+
|
|
57
|
+
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""abstractruntime.integrations
|
|
2
|
+
|
|
3
|
+
Integration modules live here.
|
|
4
|
+
|
|
5
|
+
Design rule (layered coupling):
|
|
6
|
+
- The **kernel** (`abstractruntime.core`, `abstractruntime.storage`, `abstractruntime.identity`) stays dependency-light.
|
|
7
|
+
- Optional integration packages may import heavier dependencies (e.g. AbstractCore) and provide effect handlers.
|
|
8
|
+
|
|
9
|
+
This package intentionally does not import any integration by default.
|
|
10
|
+
"""
|
|
11
|
+
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""abstractruntime.integrations.abstractcore
|
|
2
|
+
|
|
3
|
+
AbstractCore integration package.
|
|
4
|
+
|
|
5
|
+
Provides:
|
|
6
|
+
- LLM clients (local + remote)
|
|
7
|
+
- Tool executors (executed + passthrough)
|
|
8
|
+
- Effect handlers wiring
|
|
9
|
+
- Convenience runtime factories for local/remote/hybrid modes
|
|
10
|
+
- RuntimeConfig for limits and model capabilities
|
|
11
|
+
|
|
12
|
+
Importing this module is the explicit opt-in to an AbstractCore dependency.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
from ...core.config import RuntimeConfig
|
|
16
|
+
from .llm_client import (
|
|
17
|
+
AbstractCoreLLMClient,
|
|
18
|
+
LocalAbstractCoreLLMClient,
|
|
19
|
+
RemoteAbstractCoreLLMClient,
|
|
20
|
+
)
|
|
21
|
+
from .tool_executor import AbstractCoreToolExecutor, MappingToolExecutor, PassthroughToolExecutor, ToolExecutor
|
|
22
|
+
from .effect_handlers import build_effect_handlers
|
|
23
|
+
from .factory import (
|
|
24
|
+
create_hybrid_runtime,
|
|
25
|
+
create_local_file_runtime,
|
|
26
|
+
create_local_runtime,
|
|
27
|
+
create_remote_file_runtime,
|
|
28
|
+
create_remote_runtime,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
"AbstractCoreLLMClient",
|
|
33
|
+
"LocalAbstractCoreLLMClient",
|
|
34
|
+
"RemoteAbstractCoreLLMClient",
|
|
35
|
+
"RuntimeConfig",
|
|
36
|
+
"ToolExecutor",
|
|
37
|
+
"MappingToolExecutor",
|
|
38
|
+
"AbstractCoreToolExecutor",
|
|
39
|
+
"PassthroughToolExecutor",
|
|
40
|
+
|
|
41
|
+
"build_effect_handlers",
|
|
42
|
+
"create_local_runtime",
|
|
43
|
+
"create_remote_runtime",
|
|
44
|
+
"create_hybrid_runtime",
|
|
45
|
+
"create_local_file_runtime",
|
|
46
|
+
"create_remote_file_runtime",
|
|
47
|
+
]
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""abstractruntime.integrations.abstractcore.effect_handlers
|
|
2
|
+
|
|
3
|
+
Effect handlers wiring for AbstractRuntime.
|
|
4
|
+
|
|
5
|
+
These handlers implement:
|
|
6
|
+
- `EffectType.LLM_CALL`
|
|
7
|
+
- `EffectType.TOOL_CALLS`
|
|
8
|
+
|
|
9
|
+
They are designed to keep `RunState.vars` JSON-safe.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
from typing import Any, Dict, Optional
|
|
15
|
+
|
|
16
|
+
from ...core.models import Effect, EffectType, RunState, WaitReason, WaitState
|
|
17
|
+
from ...core.runtime import EffectOutcome, EffectHandler
|
|
18
|
+
from .llm_client import AbstractCoreLLMClient
|
|
19
|
+
from .tool_executor import ToolExecutor
|
|
20
|
+
from .logging import get_logger
|
|
21
|
+
|
|
22
|
+
logger = get_logger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _trace_context(run: RunState) -> Dict[str, str]:
|
|
26
|
+
ctx: Dict[str, str] = {"run_id": run.run_id}
|
|
27
|
+
if run.actor_id:
|
|
28
|
+
ctx["actor_id"] = str(run.actor_id)
|
|
29
|
+
session_id = getattr(run, "session_id", None)
|
|
30
|
+
if session_id:
|
|
31
|
+
ctx["session_id"] = str(session_id)
|
|
32
|
+
if run.parent_run_id:
|
|
33
|
+
ctx["parent_run_id"] = str(run.parent_run_id)
|
|
34
|
+
return ctx
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def make_llm_call_handler(*, llm: AbstractCoreLLMClient) -> EffectHandler:
|
|
38
|
+
def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
39
|
+
payload = dict(effect.payload or {})
|
|
40
|
+
prompt = payload.get("prompt")
|
|
41
|
+
messages = payload.get("messages")
|
|
42
|
+
system_prompt = payload.get("system_prompt")
|
|
43
|
+
tools = payload.get("tools")
|
|
44
|
+
raw_params = payload.get("params")
|
|
45
|
+
params = dict(raw_params) if isinstance(raw_params, dict) else {}
|
|
46
|
+
|
|
47
|
+
# Propagate durable trace context into AbstractCore calls.
|
|
48
|
+
trace_metadata = params.get("trace_metadata")
|
|
49
|
+
if not isinstance(trace_metadata, dict):
|
|
50
|
+
trace_metadata = {}
|
|
51
|
+
trace_metadata.update(_trace_context(run))
|
|
52
|
+
params["trace_metadata"] = trace_metadata
|
|
53
|
+
|
|
54
|
+
if not prompt and not messages:
|
|
55
|
+
return EffectOutcome.failed("llm_call requires payload.prompt or payload.messages")
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
result = llm.generate(
|
|
59
|
+
prompt=str(prompt or ""),
|
|
60
|
+
messages=messages,
|
|
61
|
+
system_prompt=system_prompt,
|
|
62
|
+
tools=tools,
|
|
63
|
+
params=params,
|
|
64
|
+
)
|
|
65
|
+
return EffectOutcome.completed(result=result)
|
|
66
|
+
except Exception as e:
|
|
67
|
+
logger.error("LLM_CALL failed", error=str(e))
|
|
68
|
+
return EffectOutcome.failed(str(e))
|
|
69
|
+
|
|
70
|
+
return _handler
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def make_tool_calls_handler(*, tools: Optional[ToolExecutor] = None) -> EffectHandler:
|
|
74
|
+
"""Create a TOOL_CALLS effect handler.
|
|
75
|
+
|
|
76
|
+
Tool execution is performed exclusively via the host-configured ToolExecutor.
|
|
77
|
+
This keeps `RunState.vars` and ledger payloads JSON-safe (durable execution).
|
|
78
|
+
"""
|
|
79
|
+
def _handler(run: RunState, effect: Effect, default_next_node: Optional[str]) -> EffectOutcome:
|
|
80
|
+
payload = dict(effect.payload or {})
|
|
81
|
+
tool_calls = payload.get("tool_calls")
|
|
82
|
+
if not isinstance(tool_calls, list):
|
|
83
|
+
return EffectOutcome.failed("tool_calls requires payload.tool_calls (list)")
|
|
84
|
+
|
|
85
|
+
if tools is None:
|
|
86
|
+
return EffectOutcome.failed(
|
|
87
|
+
"TOOL_CALLS requires a ToolExecutor; configure Runtime with "
|
|
88
|
+
"MappingToolExecutor/AbstractCoreToolExecutor/PassthroughToolExecutor."
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
result = tools.execute(tool_calls=tool_calls)
|
|
93
|
+
except Exception as e:
|
|
94
|
+
logger.error("TOOL_CALLS execution failed", error=str(e))
|
|
95
|
+
return EffectOutcome.failed(str(e))
|
|
96
|
+
|
|
97
|
+
mode = result.get("mode")
|
|
98
|
+
if mode and mode != "executed":
|
|
99
|
+
# Passthrough/untrusted mode: pause until an external host resumes with tool results.
|
|
100
|
+
wait_key = payload.get("wait_key") or f"tool_calls:{run.run_id}:{run.current_node}"
|
|
101
|
+
wait = WaitState(
|
|
102
|
+
reason=WaitReason.EVENT,
|
|
103
|
+
wait_key=str(wait_key),
|
|
104
|
+
resume_to_node=payload.get("resume_to_node") or default_next_node,
|
|
105
|
+
result_key=effect.result_key,
|
|
106
|
+
details={"mode": mode, "tool_calls": tool_calls},
|
|
107
|
+
)
|
|
108
|
+
return EffectOutcome.waiting(wait)
|
|
109
|
+
|
|
110
|
+
return EffectOutcome.completed(result=result)
|
|
111
|
+
|
|
112
|
+
return _handler
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def build_effect_handlers(*, llm: AbstractCoreLLMClient, tools: ToolExecutor = None) -> Dict[EffectType, Any]:
|
|
116
|
+
return {
|
|
117
|
+
EffectType.LLM_CALL: make_llm_call_handler(llm=llm),
|
|
118
|
+
EffectType.TOOL_CALLS: make_tool_calls_handler(tools=tools),
|
|
119
|
+
}
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""abstractruntime.integrations.abstractcore.factory
|
|
2
|
+
|
|
3
|
+
Convenience constructors for a Runtime wired to AbstractCore.
|
|
4
|
+
|
|
5
|
+
These helpers implement the three supported execution modes:
|
|
6
|
+
- local: in-process LLM + local tool execution
|
|
7
|
+
- remote: HTTP to AbstractCore server + tool passthrough
|
|
8
|
+
- hybrid: HTTP to AbstractCore server + local tool execution
|
|
9
|
+
|
|
10
|
+
The caller supplies storage backends (in-memory or file-based).
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any, Dict, Optional
|
|
17
|
+
|
|
18
|
+
from ...core.config import RuntimeConfig
|
|
19
|
+
from ...core.runtime import Runtime
|
|
20
|
+
from ...storage.in_memory import InMemoryLedgerStore, InMemoryRunStore
|
|
21
|
+
from ...storage.json_files import JsonFileRunStore, JsonlLedgerStore
|
|
22
|
+
from ...storage.base import LedgerStore, RunStore
|
|
23
|
+
|
|
24
|
+
from .effect_handlers import build_effect_handlers
|
|
25
|
+
from .llm_client import LocalAbstractCoreLLMClient, RemoteAbstractCoreLLMClient
|
|
26
|
+
from .tool_executor import AbstractCoreToolExecutor, PassthroughToolExecutor, ToolExecutor
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _default_in_memory_stores() -> tuple[RunStore, LedgerStore]:
|
|
30
|
+
return InMemoryRunStore(), InMemoryLedgerStore()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _default_file_stores(*, base_dir: str | Path) -> tuple[RunStore, LedgerStore]:
|
|
34
|
+
base = Path(base_dir)
|
|
35
|
+
base.mkdir(parents=True, exist_ok=True)
|
|
36
|
+
return JsonFileRunStore(base), JsonlLedgerStore(base)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def create_local_runtime(
|
|
40
|
+
*,
|
|
41
|
+
provider: str,
|
|
42
|
+
model: str,
|
|
43
|
+
llm_kwargs: Optional[Dict[str, Any]] = None,
|
|
44
|
+
run_store: Optional[RunStore] = None,
|
|
45
|
+
ledger_store: Optional[LedgerStore] = None,
|
|
46
|
+
tool_executor: Optional[ToolExecutor] = None,
|
|
47
|
+
context: Optional[Any] = None,
|
|
48
|
+
effect_policy: Optional[Any] = None,
|
|
49
|
+
config: Optional[RuntimeConfig] = None,
|
|
50
|
+
) -> Runtime:
|
|
51
|
+
"""Create a runtime with local LLM execution via AbstractCore.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
provider: LLM provider (e.g., "ollama", "openai")
|
|
55
|
+
model: Model name
|
|
56
|
+
llm_kwargs: Additional kwargs for LLM client
|
|
57
|
+
run_store: Storage for run state (default: in-memory)
|
|
58
|
+
ledger_store: Storage for ledger (default: in-memory)
|
|
59
|
+
tool_executor: Optional custom tool executor. If not provided, defaults
|
|
60
|
+
to `AbstractCoreToolExecutor()` (AbstractCore global tool registry).
|
|
61
|
+
context: Optional context object
|
|
62
|
+
effect_policy: Optional effect policy (retry, etc.)
|
|
63
|
+
config: Optional RuntimeConfig for limits and model capabilities.
|
|
64
|
+
If not provided, model capabilities are queried from the LLM client.
|
|
65
|
+
|
|
66
|
+
Note:
|
|
67
|
+
For durable execution, tool callables should never be stored in `RunState.vars`
|
|
68
|
+
or passed in effect payloads. Prefer `MappingToolExecutor.from_tools([...])`.
|
|
69
|
+
"""
|
|
70
|
+
if run_store is None or ledger_store is None:
|
|
71
|
+
run_store, ledger_store = _default_in_memory_stores()
|
|
72
|
+
|
|
73
|
+
llm_client = LocalAbstractCoreLLMClient(provider=provider, model=model, llm_kwargs=llm_kwargs)
|
|
74
|
+
tools = tool_executor or AbstractCoreToolExecutor()
|
|
75
|
+
handlers = build_effect_handlers(llm=llm_client, tools=tools)
|
|
76
|
+
|
|
77
|
+
# Query model capabilities and merge into config
|
|
78
|
+
capabilities = llm_client.get_model_capabilities()
|
|
79
|
+
if config is None:
|
|
80
|
+
config = RuntimeConfig(model_capabilities=capabilities)
|
|
81
|
+
else:
|
|
82
|
+
# Merge capabilities into provided config
|
|
83
|
+
config = config.with_capabilities(capabilities)
|
|
84
|
+
|
|
85
|
+
return Runtime(
|
|
86
|
+
run_store=run_store,
|
|
87
|
+
ledger_store=ledger_store,
|
|
88
|
+
effect_handlers=handlers,
|
|
89
|
+
context=context,
|
|
90
|
+
effect_policy=effect_policy,
|
|
91
|
+
config=config,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def create_remote_runtime(
|
|
96
|
+
*,
|
|
97
|
+
server_base_url: str,
|
|
98
|
+
model: str,
|
|
99
|
+
headers: Optional[Dict[str, str]] = None,
|
|
100
|
+
timeout_s: float = 60.0,
|
|
101
|
+
run_store: Optional[RunStore] = None,
|
|
102
|
+
ledger_store: Optional[LedgerStore] = None,
|
|
103
|
+
tool_executor: Optional[ToolExecutor] = None,
|
|
104
|
+
context: Optional[Any] = None,
|
|
105
|
+
) -> Runtime:
|
|
106
|
+
if run_store is None or ledger_store is None:
|
|
107
|
+
run_store, ledger_store = _default_in_memory_stores()
|
|
108
|
+
|
|
109
|
+
llm_client = RemoteAbstractCoreLLMClient(
|
|
110
|
+
server_base_url=server_base_url,
|
|
111
|
+
model=model,
|
|
112
|
+
headers=headers,
|
|
113
|
+
timeout_s=timeout_s,
|
|
114
|
+
)
|
|
115
|
+
tools = tool_executor or PassthroughToolExecutor()
|
|
116
|
+
handlers = build_effect_handlers(llm=llm_client, tools=tools)
|
|
117
|
+
|
|
118
|
+
return Runtime(run_store=run_store, ledger_store=ledger_store, effect_handlers=handlers, context=context)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def create_hybrid_runtime(
|
|
122
|
+
*,
|
|
123
|
+
server_base_url: str,
|
|
124
|
+
model: str,
|
|
125
|
+
headers: Optional[Dict[str, str]] = None,
|
|
126
|
+
timeout_s: float = 60.0,
|
|
127
|
+
run_store: Optional[RunStore] = None,
|
|
128
|
+
ledger_store: Optional[LedgerStore] = None,
|
|
129
|
+
context: Optional[Any] = None,
|
|
130
|
+
) -> Runtime:
|
|
131
|
+
"""Remote LLM via AbstractCore server, local tool execution."""
|
|
132
|
+
|
|
133
|
+
if run_store is None or ledger_store is None:
|
|
134
|
+
run_store, ledger_store = _default_in_memory_stores()
|
|
135
|
+
|
|
136
|
+
llm_client = RemoteAbstractCoreLLMClient(
|
|
137
|
+
server_base_url=server_base_url,
|
|
138
|
+
model=model,
|
|
139
|
+
headers=headers,
|
|
140
|
+
timeout_s=timeout_s,
|
|
141
|
+
)
|
|
142
|
+
tools = AbstractCoreToolExecutor()
|
|
143
|
+
handlers = build_effect_handlers(llm=llm_client, tools=tools)
|
|
144
|
+
|
|
145
|
+
return Runtime(run_store=run_store, ledger_store=ledger_store, effect_handlers=handlers, context=context)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def create_local_file_runtime(
|
|
149
|
+
*,
|
|
150
|
+
base_dir: str | Path,
|
|
151
|
+
provider: str,
|
|
152
|
+
model: str,
|
|
153
|
+
llm_kwargs: Optional[Dict[str, Any]] = None,
|
|
154
|
+
context: Optional[Any] = None,
|
|
155
|
+
config: Optional[RuntimeConfig] = None,
|
|
156
|
+
) -> Runtime:
|
|
157
|
+
run_store, ledger_store = _default_file_stores(base_dir=base_dir)
|
|
158
|
+
return create_local_runtime(
|
|
159
|
+
provider=provider,
|
|
160
|
+
model=model,
|
|
161
|
+
llm_kwargs=llm_kwargs,
|
|
162
|
+
run_store=run_store,
|
|
163
|
+
ledger_store=ledger_store,
|
|
164
|
+
context=context,
|
|
165
|
+
config=config,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def create_remote_file_runtime(
|
|
170
|
+
*,
|
|
171
|
+
base_dir: str | Path,
|
|
172
|
+
server_base_url: str,
|
|
173
|
+
model: str,
|
|
174
|
+
headers: Optional[Dict[str, str]] = None,
|
|
175
|
+
timeout_s: float = 60.0,
|
|
176
|
+
context: Optional[Any] = None,
|
|
177
|
+
) -> Runtime:
|
|
178
|
+
run_store, ledger_store = _default_file_stores(base_dir=base_dir)
|
|
179
|
+
return create_remote_runtime(
|
|
180
|
+
server_base_url=server_base_url,
|
|
181
|
+
model=model,
|
|
182
|
+
headers=headers,
|
|
183
|
+
timeout_s=timeout_s,
|
|
184
|
+
run_store=run_store,
|
|
185
|
+
ledger_store=ledger_store,
|
|
186
|
+
context=context,
|
|
187
|
+
)
|