ase-python 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ase/__init__.py +21 -0
- ase/adapters/__init__.py +14 -0
- ase/adapters/contract.py +28 -0
- ase/adapters/frameworks/__init__.py +17 -0
- ase/adapters/frameworks/base.py +259 -0
- ase/adapters/frameworks/langgraph.py +19 -0
- ase/adapters/frameworks/mcp.py +68 -0
- ase/adapters/frameworks/openai_agents.py +19 -0
- ase/adapters/frameworks/pydantic_ai.py +19 -0
- ase/adapters/io.py +50 -0
- ase/adapters/model.py +89 -0
- ase/adapters/protocol.py +72 -0
- ase/adapters/replay.py +261 -0
- ase/cli/__init__.py +7 -0
- ase/cli/_trace_outputs.py +40 -0
- ase/cli/adapter_cmd.py +38 -0
- ase/cli/certify_cmd.py +74 -0
- ase/cli/compare.py +145 -0
- ase/cli/doctor_cmd.py +45 -0
- ase/cli/examples_cmd.py +27 -0
- ase/cli/history_cmd.py +126 -0
- ase/cli/import_cmd.py +34 -0
- ase/cli/main.py +134 -0
- ase/cli/replay_cmd.py +48 -0
- ase/cli/report.py +115 -0
- ase/cli/spec_cmd.py +53 -0
- ase/cli/test_cmd.py +121 -0
- ase/config/env_loader.py +71 -0
- ase/config/loader.py +82 -0
- ase/config/model.py +51 -0
- ase/conformance/__init__.py +7 -0
- ase/conformance/matrix.py +111 -0
- ase/conformance/model.py +91 -0
- ase/conformance/schema.py +37 -0
- ase/conformance/service.py +194 -0
- ase/core/engine.py +348 -0
- ase/errors.py +59 -0
- ase/evaluation/__init__.py +7 -0
- ase/evaluation/base.py +63 -0
- ase/evaluation/consistency.py +79 -0
- ase/evaluation/correctness.py +117 -0
- ase/evaluation/efficiency.py +145 -0
- ase/evaluation/engine.py +182 -0
- ase/evaluation/policy.py +134 -0
- ase/evaluation/scoring.py +64 -0
- ase/evaluation/trace_summary.py +36 -0
- ase/examples_matrix.py +118 -0
- ase/reporting/__init__.py +7 -0
- ase/reporting/json_report.py +45 -0
- ase/reporting/junit.py +38 -0
- ase/reporting/markdown.py +32 -0
- ase/reporting/terminal.py +66 -0
- ase/scenario/__init__.py +7 -0
- ase/scenario/model.py +294 -0
- ase/scenario/parser.py +40 -0
- ase/storage/__init__.py +7 -0
- ase/storage/trace_store.py +136 -0
- ase/trace/__init__.py +7 -0
- ase/trace/builder.py +175 -0
- ase/trace/model.py +264 -0
- ase/trace/otel_export.py +75 -0
- ase/trace/otel_import.py +96 -0
- ase/trace/redaction.py +10 -0
- ase/trace/serializer.py +50 -0
- ase_python-0.1.0.dist-info/METADATA +184 -0
- ase_python-0.1.0.dist-info/RECORD +69 -0
- ase_python-0.1.0.dist-info/WHEEL +4 -0
- ase_python-0.1.0.dist-info/entry_points.txt +2 -0
- ase_python-0.1.0.dist-info/licenses/LICENSE +105 -0
ase/__init__.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""ASE — Agent Simulation Engine.
|
|
2
|
+
|
|
3
|
+
Public API surface. Import from here, not from internal modules.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from pkgutil import extend_path
|
|
10
|
+
|
|
11
|
+
__path__ = extend_path(__path__, __name__)
|
|
12
|
+
|
|
13
|
+
# ASE does not rely on third-party Pydantic plugins. Disabling plugin auto-load
|
|
14
|
+
# keeps CLI startup deterministic even when unrelated plugins are installed in
|
|
15
|
+
# the active environment.
|
|
16
|
+
os.environ.setdefault("PYDANTIC_DISABLE_PLUGINS", "1")
|
|
17
|
+
|
|
18
|
+
from ase.errors import ASEError
|
|
19
|
+
|
|
20
|
+
__version__ = "0.1.0"
|
|
21
|
+
__all__ = ["ASEError", "__version__"]
|
ase/adapters/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""Public adapter SDK exports."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ase.adapters.io import InMemoryEventSink, JsonlFileEventSink
|
|
6
|
+
from ase.adapters.model import AdapterEvent, AdapterEventType, AdapterVerificationResult
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"AdapterEvent",
|
|
10
|
+
"AdapterEventType",
|
|
11
|
+
"AdapterVerificationResult",
|
|
12
|
+
"InMemoryEventSink",
|
|
13
|
+
"JsonlFileEventSink",
|
|
14
|
+
]
|
ase/adapters/contract.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Adapter contract shared by official ASE framework integrations."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Protocol
|
|
6
|
+
|
|
7
|
+
from ase.adapters.model import AdapterEvent
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AdapterContract(Protocol):
|
|
11
|
+
"""Define the stable surface ASE adapters expose to frameworks and tests."""
|
|
12
|
+
|
|
13
|
+
@property
|
|
14
|
+
def name(self) -> str:
|
|
15
|
+
"""Return the stable adapter identifier used in traces and reports."""
|
|
16
|
+
|
|
17
|
+
@property
|
|
18
|
+
def transport(self) -> str:
|
|
19
|
+
"""Return the transport used to emit adapter events."""
|
|
20
|
+
|
|
21
|
+
def emit(self, event: AdapterEvent) -> None:
|
|
22
|
+
"""Persist one normalized ASE adapter event."""
|
|
23
|
+
|
|
24
|
+
def enrich_policy_context(self, context: dict[str, object]) -> dict[str, object]:
|
|
25
|
+
"""Attach adapter metadata used by evaluators and reporters."""
|
|
26
|
+
|
|
27
|
+
def inject_determinism(self, fixtures: dict[str, object]) -> dict[str, object]:
|
|
28
|
+
"""Expose deterministic fixtures to the framework runtime."""
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Official framework adapter SDK exports."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ase.adapters.frameworks.base import FrameworkAdapterBase
|
|
6
|
+
from ase.adapters.frameworks.langgraph import LangGraphAdapter
|
|
7
|
+
from ase.adapters.frameworks.mcp import MCPAdapter
|
|
8
|
+
from ase.adapters.frameworks.openai_agents import OpenAIAgentsAdapter
|
|
9
|
+
from ase.adapters.frameworks.pydantic_ai import PydanticAIAdapter
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"FrameworkAdapterBase",
|
|
13
|
+
"LangGraphAdapter",
|
|
14
|
+
"MCPAdapter",
|
|
15
|
+
"OpenAIAgentsAdapter",
|
|
16
|
+
"PydanticAIAdapter",
|
|
17
|
+
]
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
"""Shared adapter SDK primitives for framework-specific integrations."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, cast
|
|
6
|
+
|
|
7
|
+
import ulid
|
|
8
|
+
|
|
9
|
+
from ase.adapters.contract import AdapterContract
|
|
10
|
+
from ase.adapters.io import AdapterEventSink
|
|
11
|
+
from ase.adapters.model import AdapterEvent, AdapterEventType
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class FrameworkAdapterBase(AdapterContract):
|
|
15
|
+
"""Common event-emission helpers shared across official adapters."""
|
|
16
|
+
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
sink: AdapterEventSink,
|
|
20
|
+
*,
|
|
21
|
+
name: str,
|
|
22
|
+
framework: str,
|
|
23
|
+
language: str,
|
|
24
|
+
transport: str = "jsonl-stdio",
|
|
25
|
+
version: str | None = None,
|
|
26
|
+
) -> None:
|
|
27
|
+
self._sink = sink
|
|
28
|
+
self._name = name
|
|
29
|
+
self._framework = framework
|
|
30
|
+
self._language = language
|
|
31
|
+
self._transport = transport
|
|
32
|
+
self._version = version
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def name(self) -> str:
|
|
36
|
+
"""Return the stable adapter name for certification output."""
|
|
37
|
+
return self._name
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def transport(self) -> str:
|
|
41
|
+
"""Return the transport used for emitted adapter events."""
|
|
42
|
+
return self._transport
|
|
43
|
+
|
|
44
|
+
def emit(self, event: AdapterEvent) -> None:
|
|
45
|
+
"""Write a fully formed event to the configured sink."""
|
|
46
|
+
self._sink.write(event)
|
|
47
|
+
|
|
48
|
+
def enrich_policy_context(self, context: dict[str, object]) -> dict[str, object]:
|
|
49
|
+
"""Attach framework metadata used by policy evaluators and reporters."""
|
|
50
|
+
return {
|
|
51
|
+
**context,
|
|
52
|
+
"adapter_name": self._name,
|
|
53
|
+
"framework": self._framework,
|
|
54
|
+
"language": self._language,
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
def inject_determinism(self, fixtures: dict[str, object]) -> dict[str, object]:
|
|
58
|
+
"""Expose fixtures unchanged as adapter runtime inputs by default."""
|
|
59
|
+
return dict(fixtures)
|
|
60
|
+
|
|
61
|
+
def agent_start(self, agent_id: str, name: str, **metadata: object) -> None:
|
|
62
|
+
"""Emit an agent_start event with framework metadata attached."""
|
|
63
|
+
self.emit(
|
|
64
|
+
self._event(
|
|
65
|
+
AdapterEventType.AGENT_START,
|
|
66
|
+
agent_id=agent_id,
|
|
67
|
+
name=name,
|
|
68
|
+
metadata=metadata,
|
|
69
|
+
)
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
def agent_end(
|
|
73
|
+
self,
|
|
74
|
+
agent_id: str,
|
|
75
|
+
*,
|
|
76
|
+
status: str = "passed",
|
|
77
|
+
message: str | None = None,
|
|
78
|
+
**metadata: object,
|
|
79
|
+
) -> None:
|
|
80
|
+
"""Emit an agent_end event for the active framework run."""
|
|
81
|
+
self.emit(
|
|
82
|
+
self._event(
|
|
83
|
+
AdapterEventType.AGENT_END,
|
|
84
|
+
agent_id=agent_id,
|
|
85
|
+
status=status,
|
|
86
|
+
message=message,
|
|
87
|
+
metadata=metadata,
|
|
88
|
+
)
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def tool_start(
|
|
92
|
+
self,
|
|
93
|
+
agent_id: str,
|
|
94
|
+
*,
|
|
95
|
+
span_id: str,
|
|
96
|
+
tool_kind: str,
|
|
97
|
+
method: str,
|
|
98
|
+
target: str,
|
|
99
|
+
name: str | None = None,
|
|
100
|
+
protocol: str | None = None,
|
|
101
|
+
data: dict[str, object] | None = None,
|
|
102
|
+
) -> None:
|
|
103
|
+
"""Emit a tool_start event for a framework tool call."""
|
|
104
|
+
self.emit(
|
|
105
|
+
self._event(
|
|
106
|
+
AdapterEventType.TOOL_START,
|
|
107
|
+
agent_id=agent_id,
|
|
108
|
+
span_id=span_id,
|
|
109
|
+
tool_kind=tool_kind,
|
|
110
|
+
method=method,
|
|
111
|
+
target=target,
|
|
112
|
+
name=name,
|
|
113
|
+
protocol=protocol,
|
|
114
|
+
data=data,
|
|
115
|
+
)
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
def tool_end(
|
|
119
|
+
self,
|
|
120
|
+
agent_id: str,
|
|
121
|
+
*,
|
|
122
|
+
span_id: str,
|
|
123
|
+
tool_kind: str,
|
|
124
|
+
method: str,
|
|
125
|
+
target: str,
|
|
126
|
+
status: str = "passed",
|
|
127
|
+
protocol: str | None = None,
|
|
128
|
+
data: dict[str, object] | None = None,
|
|
129
|
+
message: str | None = None,
|
|
130
|
+
) -> None:
|
|
131
|
+
"""Emit a tool_end event for a framework tool call."""
|
|
132
|
+
self.emit(
|
|
133
|
+
self._event(
|
|
134
|
+
AdapterEventType.TOOL_END,
|
|
135
|
+
agent_id=agent_id,
|
|
136
|
+
span_id=span_id,
|
|
137
|
+
tool_kind=tool_kind,
|
|
138
|
+
method=method,
|
|
139
|
+
target=target,
|
|
140
|
+
status=status,
|
|
141
|
+
protocol=protocol,
|
|
142
|
+
data=data,
|
|
143
|
+
message=message,
|
|
144
|
+
)
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
def session_write(
|
|
148
|
+
self,
|
|
149
|
+
agent_id: str,
|
|
150
|
+
session_id: str,
|
|
151
|
+
*,
|
|
152
|
+
key: str,
|
|
153
|
+
value: object,
|
|
154
|
+
) -> None:
|
|
155
|
+
"""Emit a session_write event for stateful frameworks."""
|
|
156
|
+
self.emit(
|
|
157
|
+
self._event(
|
|
158
|
+
AdapterEventType.SESSION_WRITE,
|
|
159
|
+
agent_id=agent_id,
|
|
160
|
+
session_id=session_id,
|
|
161
|
+
data={"key": key, "value": value},
|
|
162
|
+
)
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def session_read(
|
|
166
|
+
self,
|
|
167
|
+
agent_id: str,
|
|
168
|
+
session_id: str,
|
|
169
|
+
*,
|
|
170
|
+
key: str,
|
|
171
|
+
value: object,
|
|
172
|
+
) -> None:
|
|
173
|
+
"""Emit a session_read event for frameworks that expose state reads."""
|
|
174
|
+
self.emit(
|
|
175
|
+
self._event(
|
|
176
|
+
AdapterEventType.SESSION_READ,
|
|
177
|
+
agent_id=agent_id,
|
|
178
|
+
session_id=session_id,
|
|
179
|
+
data={"key": key, "value": value},
|
|
180
|
+
)
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
def handoff(
|
|
184
|
+
self,
|
|
185
|
+
agent_id: str,
|
|
186
|
+
target_agent_id: str,
|
|
187
|
+
*,
|
|
188
|
+
name: str,
|
|
189
|
+
protocol: str = "adapter",
|
|
190
|
+
data: dict[str, object] | None = None,
|
|
191
|
+
) -> None:
|
|
192
|
+
"""Emit a handoff event between two agents."""
|
|
193
|
+
self.emit(
|
|
194
|
+
self._event(
|
|
195
|
+
AdapterEventType.HANDOFF,
|
|
196
|
+
agent_id=agent_id,
|
|
197
|
+
target_agent_id=target_agent_id,
|
|
198
|
+
name=name,
|
|
199
|
+
protocol=protocol,
|
|
200
|
+
data=data,
|
|
201
|
+
)
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def approval(
|
|
205
|
+
self,
|
|
206
|
+
agent_id: str,
|
|
207
|
+
approval_id: str,
|
|
208
|
+
*,
|
|
209
|
+
granted: bool = True,
|
|
210
|
+
) -> None:
|
|
211
|
+
"""Emit an approval event used by policy-aware flows."""
|
|
212
|
+
self.emit(
|
|
213
|
+
self._event(
|
|
214
|
+
AdapterEventType.APPROVAL,
|
|
215
|
+
agent_id=agent_id,
|
|
216
|
+
approval_id=approval_id,
|
|
217
|
+
granted=granted,
|
|
218
|
+
)
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
def stream_chunk(
|
|
222
|
+
self,
|
|
223
|
+
agent_id: str,
|
|
224
|
+
*,
|
|
225
|
+
chunk_index: int,
|
|
226
|
+
content: str,
|
|
227
|
+
protocol: str = "stream",
|
|
228
|
+
) -> None:
|
|
229
|
+
"""Emit a streaming chunk event for realtime-style frameworks."""
|
|
230
|
+
self.emit(
|
|
231
|
+
self._event(
|
|
232
|
+
AdapterEventType.STREAM_CHUNK,
|
|
233
|
+
agent_id=agent_id,
|
|
234
|
+
protocol=protocol,
|
|
235
|
+
data={"chunk_index": chunk_index, "content": content},
|
|
236
|
+
)
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
def _event(self, event_type: AdapterEventType, **kwargs: object) -> AdapterEvent:
|
|
240
|
+
"""Build a framework event with standard adapter metadata attached."""
|
|
241
|
+
raw_metadata = kwargs.pop("metadata", None)
|
|
242
|
+
metadata = cast(dict[str, Any], raw_metadata or {})
|
|
243
|
+
if kwargs.get("data") is None:
|
|
244
|
+
kwargs.pop("data", None)
|
|
245
|
+
if kwargs.get("message") is None:
|
|
246
|
+
kwargs.pop("message", None)
|
|
247
|
+
metadata.setdefault("adapter_name", self._name)
|
|
248
|
+
metadata.setdefault("framework", self._framework)
|
|
249
|
+
metadata.setdefault("language", self._language)
|
|
250
|
+
if self._version is not None:
|
|
251
|
+
metadata.setdefault("adapter_version", self._version)
|
|
252
|
+
metadata.setdefault("transport", self._transport)
|
|
253
|
+
event_kwargs = cast(dict[str, Any], kwargs)
|
|
254
|
+
return AdapterEvent(
|
|
255
|
+
event_type=event_type,
|
|
256
|
+
event_id=str(ulid.new()),
|
|
257
|
+
metadata=metadata,
|
|
258
|
+
**event_kwargs,
|
|
259
|
+
)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""LangGraph adapter helpers for ASE's neutral event protocol."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ase.adapters.frameworks.base import FrameworkAdapterBase
|
|
6
|
+
from ase.adapters.io import AdapterEventSink
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class LangGraphAdapter(FrameworkAdapterBase):
|
|
10
|
+
"""Bind the generic adapter protocol to LangGraph metadata."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, sink: AdapterEventSink, *, version: str | None = None) -> None:
|
|
13
|
+
super().__init__(
|
|
14
|
+
sink,
|
|
15
|
+
name="langgraph-python",
|
|
16
|
+
framework="langgraph",
|
|
17
|
+
language="python",
|
|
18
|
+
version=version,
|
|
19
|
+
)
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""Official adapter SDK surface for MCP-backed runtimes."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ase.adapters.frameworks.base import FrameworkAdapterBase
|
|
6
|
+
from ase.adapters.io import AdapterEventSink
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MCPAdapter(FrameworkAdapterBase):
|
|
10
|
+
"""Emit ASE adapter events for MCP tools and resource access flows."""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
sink: AdapterEventSink,
|
|
15
|
+
*,
|
|
16
|
+
version: str | None = None,
|
|
17
|
+
) -> None:
|
|
18
|
+
super().__init__(
|
|
19
|
+
sink,
|
|
20
|
+
name="mcp-python",
|
|
21
|
+
framework="mcp",
|
|
22
|
+
language="python",
|
|
23
|
+
version=version,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def resource_read(
|
|
27
|
+
self,
|
|
28
|
+
agent_id: str,
|
|
29
|
+
*,
|
|
30
|
+
session_id: str,
|
|
31
|
+
target: str,
|
|
32
|
+
value: object,
|
|
33
|
+
) -> None:
|
|
34
|
+
"""Emit MCP resource-read state without inflating tool-call metrics."""
|
|
35
|
+
self.session_read(agent_id, session_id, key=target, value=value)
|
|
36
|
+
|
|
37
|
+
def resource_write(
|
|
38
|
+
self,
|
|
39
|
+
agent_id: str,
|
|
40
|
+
*,
|
|
41
|
+
span_id: str,
|
|
42
|
+
target: str,
|
|
43
|
+
approval_id: str,
|
|
44
|
+
session_id: str | None = None,
|
|
45
|
+
) -> None:
|
|
46
|
+
"""Emit an approval-backed MCP resource write tool flow."""
|
|
47
|
+
self.approval(agent_id, approval_id, granted=True)
|
|
48
|
+
if session_id is not None:
|
|
49
|
+
self.session_write(agent_id, session_id, key="mcp_target", value=target)
|
|
50
|
+
self.tool_start(
|
|
51
|
+
agent_id,
|
|
52
|
+
span_id=span_id,
|
|
53
|
+
tool_kind="filesystem",
|
|
54
|
+
method="WRITE",
|
|
55
|
+
target=target,
|
|
56
|
+
name="mcp_resource_write",
|
|
57
|
+
protocol="mcp",
|
|
58
|
+
)
|
|
59
|
+
self.tool_end(
|
|
60
|
+
agent_id,
|
|
61
|
+
span_id=span_id,
|
|
62
|
+
tool_kind="filesystem",
|
|
63
|
+
method="WRITE",
|
|
64
|
+
target=target,
|
|
65
|
+
protocol="mcp",
|
|
66
|
+
data={"status_code": 200, "resource": target},
|
|
67
|
+
message="mcp write completed",
|
|
68
|
+
)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""OpenAI Agents adapter helpers for ASE's neutral event protocol."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ase.adapters.frameworks.base import FrameworkAdapterBase
|
|
6
|
+
from ase.adapters.io import AdapterEventSink
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OpenAIAgentsAdapter(FrameworkAdapterBase):
|
|
10
|
+
"""Bind the generic adapter protocol to OpenAI Agents metadata."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, sink: AdapterEventSink, *, version: str | None = None) -> None:
|
|
13
|
+
super().__init__(
|
|
14
|
+
sink,
|
|
15
|
+
name="openai-agents-python",
|
|
16
|
+
framework="openai-agents",
|
|
17
|
+
language="python",
|
|
18
|
+
version=version,
|
|
19
|
+
)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""PydanticAI adapter helpers for ASE's neutral event protocol."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from ase.adapters.frameworks.base import FrameworkAdapterBase
|
|
6
|
+
from ase.adapters.io import AdapterEventSink
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PydanticAIAdapter(FrameworkAdapterBase):
|
|
10
|
+
"""Bind the generic adapter protocol to PydanticAI metadata."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, sink: AdapterEventSink, *, version: str | None = None) -> None:
|
|
13
|
+
super().__init__(
|
|
14
|
+
sink,
|
|
15
|
+
name="pydantic-ai-python",
|
|
16
|
+
framework="pydantic-ai",
|
|
17
|
+
language="python",
|
|
18
|
+
version=version,
|
|
19
|
+
)
|
ase/adapters/io.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
"""Event sinks for official ASE adapter SDKs."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Protocol
|
|
8
|
+
|
|
9
|
+
from ase.adapters.model import AdapterEvent
|
|
10
|
+
from ase.errors import AdapterError
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AdapterEventSink(Protocol):
|
|
14
|
+
"""Define the minimal write surface required by adapter SDKs."""
|
|
15
|
+
|
|
16
|
+
def write(self, event: AdapterEvent) -> None:
|
|
17
|
+
"""Persist one adapter event."""
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class JsonlFileEventSink:
|
|
21
|
+
"""Append adapter events to disk for replay and certification."""
|
|
22
|
+
|
|
23
|
+
def __init__(self, path: Path) -> None:
|
|
24
|
+
self._path = path
|
|
25
|
+
|
|
26
|
+
def write(self, event: AdapterEvent) -> None:
|
|
27
|
+
"""Write one JSONL adapter event with contextual file errors."""
|
|
28
|
+
self._path.parent.mkdir(parents=True, exist_ok=True)
|
|
29
|
+
try:
|
|
30
|
+
with self._path.open("a", encoding="utf-8") as handle:
|
|
31
|
+
handle.write(json.dumps(event.model_dump(), separators=(",", ":")))
|
|
32
|
+
handle.write("\n")
|
|
33
|
+
except OSError as exc:
|
|
34
|
+
raise AdapterError(f"failed to write adapter event sink {self._path}: {exc}") from exc
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class InMemoryEventSink:
|
|
38
|
+
"""Retain adapter events in memory for tests and local examples."""
|
|
39
|
+
|
|
40
|
+
def __init__(self) -> None:
|
|
41
|
+
self._events: list[AdapterEvent] = []
|
|
42
|
+
|
|
43
|
+
def write(self, event: AdapterEvent) -> None:
|
|
44
|
+
"""Append one event to the in-memory sink."""
|
|
45
|
+
self._events.append(event)
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def events(self) -> list[AdapterEvent]:
|
|
49
|
+
"""Return a snapshot of all emitted events."""
|
|
50
|
+
return list(self._events)
|
ase/adapters/model.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""Adapter protocol models for ingesting external agent runtimes into ASE."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import time
|
|
6
|
+
from enum import StrEnum
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel, Field, model_validator
|
|
10
|
+
|
|
11
|
+
ADAPTER_PROTOCOL_VERSION = 1
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AdapterTransport(StrEnum):
|
|
15
|
+
"""Describe how an external runtime sends adapter events to ASE."""
|
|
16
|
+
|
|
17
|
+
JSONL_STDIO = "jsonl-stdio"
|
|
18
|
+
HTTP = "http"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AdapterEventType(StrEnum):
|
|
22
|
+
"""Enumerate normalized lifecycle and protocol events for adapters."""
|
|
23
|
+
|
|
24
|
+
AGENT_START = "agent_start"
|
|
25
|
+
AGENT_END = "agent_end"
|
|
26
|
+
TOOL_START = "tool_start"
|
|
27
|
+
TOOL_END = "tool_end"
|
|
28
|
+
HANDOFF = "handoff"
|
|
29
|
+
GUARDRAIL = "guardrail"
|
|
30
|
+
APPROVAL = "approval"
|
|
31
|
+
SESSION_READ = "session_read"
|
|
32
|
+
SESSION_WRITE = "session_write"
|
|
33
|
+
HUMAN_FEEDBACK = "human_feedback"
|
|
34
|
+
STREAM_CHUNK = "stream_chunk"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class AdapterEvent(BaseModel):
|
|
38
|
+
"""Represent one normalized adapter event emitted by a framework runtime."""
|
|
39
|
+
|
|
40
|
+
protocol_version: int = ADAPTER_PROTOCOL_VERSION
|
|
41
|
+
event_type: AdapterEventType
|
|
42
|
+
timestamp_ms: float = Field(default_factory=lambda: time.time() * 1000)
|
|
43
|
+
event_id: str
|
|
44
|
+
span_id: str | None = None
|
|
45
|
+
run_id: str | None = None
|
|
46
|
+
agent_id: str | None = None
|
|
47
|
+
parent_agent_id: str | None = None
|
|
48
|
+
target_agent_id: str | None = None
|
|
49
|
+
name: str | None = None
|
|
50
|
+
tool_kind: str | None = None
|
|
51
|
+
method: str | None = None
|
|
52
|
+
target: str | None = None
|
|
53
|
+
session_id: str | None = None
|
|
54
|
+
approval_id: str | None = None
|
|
55
|
+
granted: bool | None = None
|
|
56
|
+
status: str | None = None
|
|
57
|
+
protocol: str | None = None
|
|
58
|
+
message: str | None = None
|
|
59
|
+
data: dict[str, Any] = Field(default_factory=dict)
|
|
60
|
+
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
61
|
+
|
|
62
|
+
@model_validator(mode="after")
|
|
63
|
+
def validate_required_fields(self) -> AdapterEvent:
|
|
64
|
+
"""Enforce event-specific fields so replay remains deterministic."""
|
|
65
|
+
if (
|
|
66
|
+
self.event_type in {AdapterEventType.TOOL_START, AdapterEventType.TOOL_END}
|
|
67
|
+
and (not self.tool_kind or not self.method or not self.target)
|
|
68
|
+
):
|
|
69
|
+
raise ValueError("tool_start/tool_end require tool_kind, method, and target")
|
|
70
|
+
if self.event_type == AdapterEventType.APPROVAL and not self.approval_id:
|
|
71
|
+
raise ValueError("approval requires approval_id")
|
|
72
|
+
if self.event_type in {
|
|
73
|
+
AdapterEventType.SESSION_READ,
|
|
74
|
+
AdapterEventType.SESSION_WRITE,
|
|
75
|
+
} and not self.session_id:
|
|
76
|
+
raise ValueError("session_read/session_write require session_id")
|
|
77
|
+
if self.event_type == AdapterEventType.HANDOFF and not self.target_agent_id:
|
|
78
|
+
raise ValueError("handoff requires target_agent_id")
|
|
79
|
+
return self
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class AdapterVerificationResult(BaseModel):
|
|
83
|
+
"""Capture pass/fail validation results for an adapter event stream."""
|
|
84
|
+
|
|
85
|
+
passed: bool
|
|
86
|
+
total_events: int
|
|
87
|
+
event_type_counts: dict[str, int] = Field(default_factory=dict)
|
|
88
|
+
errors: list[str] = Field(default_factory=list)
|
|
89
|
+
warnings: list[str] = Field(default_factory=list)
|
ase/adapters/protocol.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""Read and verify adapter JSONL event streams."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from collections import Counter
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from ase.adapters.model import AdapterEvent, AdapterEventType, AdapterVerificationResult
|
|
10
|
+
from ase.errors import AdapterProtocolError
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def read_jsonl_events(path: Path) -> list[AdapterEvent]:
|
|
14
|
+
"""Load a JSONL adapter event file with contextual parse errors."""
|
|
15
|
+
if not path.exists():
|
|
16
|
+
raise AdapterProtocolError(f"adapter event file not found: {path}")
|
|
17
|
+
events: list[AdapterEvent] = []
|
|
18
|
+
try:
|
|
19
|
+
lines = path.read_text(encoding="utf-8").splitlines()
|
|
20
|
+
except OSError as exc:
|
|
21
|
+
raise AdapterProtocolError(f"failed to read adapter event file {path}: {exc}") from exc
|
|
22
|
+
for index, line in enumerate(lines, start=1):
|
|
23
|
+
if not line.strip():
|
|
24
|
+
continue
|
|
25
|
+
try:
|
|
26
|
+
payload = json.loads(line)
|
|
27
|
+
events.append(AdapterEvent.model_validate(payload))
|
|
28
|
+
except Exception as exc:
|
|
29
|
+
raise AdapterProtocolError(
|
|
30
|
+
f"invalid adapter event at {path}:{index}: {exc}"
|
|
31
|
+
) from exc
|
|
32
|
+
return events
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def verify_events(events: list[AdapterEvent]) -> AdapterVerificationResult:
|
|
36
|
+
"""Validate event ordering and structural invariants for replay safety."""
|
|
37
|
+
errors: list[str] = []
|
|
38
|
+
warnings: list[str] = []
|
|
39
|
+
counts = Counter(event.event_type.value for event in events)
|
|
40
|
+
if not events:
|
|
41
|
+
errors.append("adapter event stream is empty")
|
|
42
|
+
if counts[AdapterEventType.AGENT_START.value] == 0:
|
|
43
|
+
errors.append("missing agent_start event")
|
|
44
|
+
if counts[AdapterEventType.AGENT_END.value] == 0:
|
|
45
|
+
errors.append("missing agent_end event")
|
|
46
|
+
|
|
47
|
+
open_spans: set[str] = set()
|
|
48
|
+
for event in events:
|
|
49
|
+
if event.event_type == AdapterEventType.TOOL_START:
|
|
50
|
+
open_spans.add(event.span_id or event.event_id)
|
|
51
|
+
if event.event_type == AdapterEventType.TOOL_END:
|
|
52
|
+
span_id = event.span_id or event.event_id
|
|
53
|
+
if span_id not in open_spans:
|
|
54
|
+
errors.append(f"tool_end missing matching tool_start for span {span_id}")
|
|
55
|
+
else:
|
|
56
|
+
open_spans.remove(span_id)
|
|
57
|
+
if open_spans:
|
|
58
|
+
warnings.append("adapter event stream ended with open tool spans")
|
|
59
|
+
|
|
60
|
+
return AdapterVerificationResult(
|
|
61
|
+
passed=not errors,
|
|
62
|
+
total_events=len(events),
|
|
63
|
+
event_type_counts=dict(counts),
|
|
64
|
+
errors=errors,
|
|
65
|
+
warnings=warnings,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def read_and_verify(path: Path) -> tuple[list[AdapterEvent], AdapterVerificationResult]:
|
|
70
|
+
"""Load one adapter event file and return both events and verification."""
|
|
71
|
+
events = read_jsonl_events(path)
|
|
72
|
+
return events, verify_events(events)
|