prefactor-core 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefactor_core-0.1.0/.gitignore +71 -0
- prefactor_core-0.1.0/PKG-INFO +273 -0
- prefactor_core-0.1.0/README.md +262 -0
- prefactor_core-0.1.0/examples/agent_e2e.py +295 -0
- prefactor_core-0.1.0/pyproject.toml +25 -0
- prefactor_core-0.1.0/src/prefactor_core/__init__.py +57 -0
- prefactor_core-0.1.0/src/prefactor_core/client.py +405 -0
- prefactor_core-0.1.0/src/prefactor_core/config.py +60 -0
- prefactor_core-0.1.0/src/prefactor_core/context_stack.py +118 -0
- prefactor_core-0.1.0/src/prefactor_core/exceptions.py +49 -0
- prefactor_core-0.1.0/src/prefactor_core/managers/__init__.py +6 -0
- prefactor_core-0.1.0/src/prefactor_core/managers/agent_instance.py +255 -0
- prefactor_core-0.1.0/src/prefactor_core/managers/span.py +323 -0
- prefactor_core-0.1.0/src/prefactor_core/models.py +67 -0
- prefactor_core-0.1.0/src/prefactor_core/operations.py +61 -0
- prefactor_core-0.1.0/src/prefactor_core/queue/__init__.py +16 -0
- prefactor_core-0.1.0/src/prefactor_core/queue/base.py +91 -0
- prefactor_core-0.1.0/src/prefactor_core/queue/executor.py +183 -0
- prefactor_core-0.1.0/src/prefactor_core/queue/memory.py +104 -0
- prefactor_core-0.1.0/src/prefactor_core/schema_registry.py +263 -0
- prefactor_core-0.1.0/src/prefactor_core/span_context.py +192 -0
- prefactor_core-0.1.0/tests/test_client.py +100 -0
- prefactor_core-0.1.0/tests/test_imports.py +92 -0
- prefactor_core-0.1.0/tests/test_queue.py +185 -0
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# mise
|
|
2
|
+
.mise.local.toml
|
|
3
|
+
.mise.*.local.toml
|
|
4
|
+
|
|
5
|
+
# lefthook
|
|
6
|
+
lefthook-local.yml
|
|
7
|
+
|
|
8
|
+
# direnv
|
|
9
|
+
.direnv
|
|
10
|
+
|
|
11
|
+
# Python
|
|
12
|
+
__pycache__/
|
|
13
|
+
*.py[cod]
|
|
14
|
+
*$py.class
|
|
15
|
+
*.so
|
|
16
|
+
.Python
|
|
17
|
+
build/
|
|
18
|
+
develop-eggs/
|
|
19
|
+
dist/
|
|
20
|
+
downloads/
|
|
21
|
+
eggs/
|
|
22
|
+
.eggs/
|
|
23
|
+
lib/
|
|
24
|
+
lib64/
|
|
25
|
+
parts/
|
|
26
|
+
sdist/
|
|
27
|
+
var/
|
|
28
|
+
wheels/
|
|
29
|
+
pip-wheel-metadata/
|
|
30
|
+
share/python-wheels/
|
|
31
|
+
*.egg-info/
|
|
32
|
+
.installed.cfg
|
|
33
|
+
*.egg
|
|
34
|
+
MANIFEST
|
|
35
|
+
|
|
36
|
+
# Virtual environments
|
|
37
|
+
.venv/
|
|
38
|
+
venv/
|
|
39
|
+
ENV/
|
|
40
|
+
env/
|
|
41
|
+
|
|
42
|
+
# uv
|
|
43
|
+
.uv/
|
|
44
|
+
uv.lock
|
|
45
|
+
|
|
46
|
+
# Type checkers
|
|
47
|
+
.mypy_cache/
|
|
48
|
+
.dmypy.json
|
|
49
|
+
dmypy.json
|
|
50
|
+
.pytype/
|
|
51
|
+
.pyre/
|
|
52
|
+
.ty_cache/
|
|
53
|
+
|
|
54
|
+
# Ruff
|
|
55
|
+
.ruff_cache/
|
|
56
|
+
|
|
57
|
+
# IDEs
|
|
58
|
+
.vscode/
|
|
59
|
+
.idea/
|
|
60
|
+
*.swp
|
|
61
|
+
*.swo
|
|
62
|
+
*~
|
|
63
|
+
|
|
64
|
+
# Testing
|
|
65
|
+
.pytest_cache/
|
|
66
|
+
.coverage
|
|
67
|
+
htmlcov/
|
|
68
|
+
|
|
69
|
+
# Env
|
|
70
|
+
.env
|
|
71
|
+
mise.local.toml
|
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: prefactor-core
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Core Prefactor SDK with async queue-based operations
|
|
5
|
+
Author-email: Prefactor Pty Ltd <josh@prefactor.tech>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: <4.0.0,>=3.11.0
|
|
8
|
+
Requires-Dist: prefactor-http>=0.1.0
|
|
9
|
+
Requires-Dist: pydantic>=2.0.0
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
|
|
12
|
+
# Prefactor Core
|
|
13
|
+
|
|
14
|
+
High-level Prefactor SDK with async queue-based processing.
|
|
15
|
+
|
|
16
|
+
## Features
|
|
17
|
+
|
|
18
|
+
- **Queue-Based Processing**: Operations are queued and processed asynchronously by a worker pool
|
|
19
|
+
- **Non-Blocking API**: Agent execution is never blocked by observability calls
|
|
20
|
+
- **Automatic Parent Detection**: Nested spans automatically detect their parent from the context stack
|
|
21
|
+
- **Schema Registry**: Compose and register span schemas before instance creation
|
|
22
|
+
- **Configurable Workers**: Tune concurrency and retry behavior for the background queue
|
|
23
|
+
|
|
24
|
+
## Installation
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
pip install prefactor-core
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
## Quick Start
|
|
31
|
+
|
|
32
|
+
```python
|
|
33
|
+
import asyncio
|
|
34
|
+
from prefactor_core import PrefactorCoreClient, PrefactorCoreConfig, SchemaRegistry
|
|
35
|
+
from prefactor_http import HttpClientConfig
|
|
36
|
+
|
|
37
|
+
registry = SchemaRegistry()
|
|
38
|
+
registry.register_type(
|
|
39
|
+
name="agent:llm",
|
|
40
|
+
params_schema={
|
|
41
|
+
"type": "object",
|
|
42
|
+
"properties": {
|
|
43
|
+
"model": {"type": "string"},
|
|
44
|
+
"prompt": {"type": "string"},
|
|
45
|
+
},
|
|
46
|
+
"required": ["model", "prompt"],
|
|
47
|
+
},
|
|
48
|
+
result_schema={
|
|
49
|
+
"type": "object",
|
|
50
|
+
"properties": {"response": {"type": "string"}},
|
|
51
|
+
},
|
|
52
|
+
title="LLM Call",
|
|
53
|
+
description="A call to a language model",
|
|
54
|
+
template="{{model}}: {{prompt}} → {{response}}",
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
async def main():
|
|
58
|
+
config = PrefactorCoreConfig(
|
|
59
|
+
http_config=HttpClientConfig(
|
|
60
|
+
api_url="https://api.prefactor.ai",
|
|
61
|
+
api_token="your-token",
|
|
62
|
+
),
|
|
63
|
+
schema_registry=registry,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
async with PrefactorCoreClient(config) as client:
|
|
67
|
+
instance = await client.create_agent_instance(
|
|
68
|
+
agent_id="my-agent",
|
|
69
|
+
agent_version={"name": "My Agent", "external_identifier": "v1.0.0"},
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
await instance.start()
|
|
73
|
+
|
|
74
|
+
async with instance.span("agent:llm") as span:
|
|
75
|
+
await span.start({"model": "gpt-4", "prompt": "Hello"})
|
|
76
|
+
result = await call_llm()
|
|
77
|
+
await span.complete({"response": result})
|
|
78
|
+
|
|
79
|
+
await instance.finish()
|
|
80
|
+
|
|
81
|
+
asyncio.run(main())
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## API Reference
|
|
85
|
+
|
|
86
|
+
### `PrefactorCoreClient`
|
|
87
|
+
|
|
88
|
+
The main entry point. Use as an async context manager or call `initialize()` / `close()` manually.
|
|
89
|
+
|
|
90
|
+
```python
|
|
91
|
+
client = PrefactorCoreClient(config)
|
|
92
|
+
await client.initialize()
|
|
93
|
+
# ... use client ...
|
|
94
|
+
await client.close()
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
#### `create_agent_instance`
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
handle = await client.create_agent_instance(
|
|
101
|
+
agent_id="my-agent",
|
|
102
|
+
agent_version={"name": "My Agent", "external_identifier": "v1.0.0"},
|
|
103
|
+
agent_schema_version=None, # Optional: auto-generated if schema_registry is configured
|
|
104
|
+
external_schema_version_id=None, # Optional: reference an existing schema version
|
|
105
|
+
) -> AgentInstanceHandle
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
#### `span` (context manager)
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
async with client.span(
|
|
112
|
+
instance_id="instance_123",
|
|
113
|
+
schema_name="agent:llm",
|
|
114
|
+
parent_span_id=None, # Optional: auto-detected from context stack if omitted
|
|
115
|
+
payload=None, # Optional: used as params if span.start() is never called explicitly
|
|
116
|
+
) as span:
|
|
117
|
+
await span.start({"model": "gpt-4", "prompt": "Hello"})
|
|
118
|
+
result = await call_llm()
|
|
119
|
+
await span.complete({"response": result})
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
### `AgentInstanceHandle`
|
|
123
|
+
|
|
124
|
+
Returned by `create_agent_instance`. Manages the lifecycle of a single agent instance.
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
handle.id # -> str
|
|
128
|
+
|
|
129
|
+
await handle.start()
|
|
130
|
+
await handle.finish()
|
|
131
|
+
|
|
132
|
+
async with handle.span("agent:llm") as span:
|
|
133
|
+
...
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
### `SpanContext`
|
|
137
|
+
|
|
138
|
+
The object yielded by span context managers. Spans follow a three-phase lifecycle:
|
|
139
|
+
|
|
140
|
+
1. **Enter context** — span is prepared locally, no HTTP call yet.
|
|
141
|
+
2. **`await span.start(payload)`** — POSTs the span to the API as `active` with the given params payload.
|
|
142
|
+
3. **`await span.complete(result)`** / **`span.fail(result)`** / **`span.cancel()`** — finishes the span with a terminal status.
|
|
143
|
+
|
|
144
|
+
If `start()` or a finish method is not called explicitly, the context manager handles them automatically on exit.
|
|
145
|
+
|
|
146
|
+
```python
|
|
147
|
+
span.id # -> str (API-generated after start())
|
|
148
|
+
|
|
149
|
+
await span.start(payload: dict) # POST span as active with params payload
|
|
150
|
+
await span.complete(result: dict) # finish with status "complete"
|
|
151
|
+
await span.fail(result: dict) # finish with status "failed"
|
|
152
|
+
await span.cancel() # finish with status "cancelled"
|
|
153
|
+
|
|
154
|
+
span.set_result(data: dict) # accumulate result data for auto-finish
|
|
155
|
+
await span.finish() # finish with current status (default: "complete")
|
|
156
|
+
```
|
|
157
|
+
|
|
158
|
+
**Status note:** `cancel()` can be called before or after `start()`. If called before `start()`, the span is posted as `pending` and immediately cancelled — the only valid pre-active cancellation path the API supports.
|
|
159
|
+
|
|
160
|
+
#### Full lifecycle example
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
async with instance.span("agent:llm") as span:
|
|
164
|
+
await span.start({"model": "gpt-4", "prompt": "Hello"})
|
|
165
|
+
try:
|
|
166
|
+
result = await call_llm()
|
|
167
|
+
await span.complete({"response": result})
|
|
168
|
+
except Exception as exc:
|
|
169
|
+
await span.fail({"error": str(exc)})
|
|
170
|
+
|
|
171
|
+
# Cancel before starting (e.g. a conditional step that is skipped):
|
|
172
|
+
async with instance.span("agent:retrieval") as span:
|
|
173
|
+
if not needed:
|
|
174
|
+
await span.cancel()
|
|
175
|
+
else:
|
|
176
|
+
await span.start({"query": "..."})
|
|
177
|
+
docs = await retrieve()
|
|
178
|
+
await span.complete({"documents": docs, "count": len(docs)})
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
## Configuration
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
from prefactor_core import PrefactorCoreConfig, QueueConfig
|
|
185
|
+
from prefactor_http import HttpClientConfig
|
|
186
|
+
|
|
187
|
+
config = PrefactorCoreConfig(
|
|
188
|
+
http_config=HttpClientConfig(
|
|
189
|
+
api_url="https://api.prefactor.ai",
|
|
190
|
+
api_token="your-token",
|
|
191
|
+
),
|
|
192
|
+
queue_config=QueueConfig(
|
|
193
|
+
num_workers=3, # Number of background workers
|
|
194
|
+
max_retries=3, # Retries per operation
|
|
195
|
+
retry_delay_base=1.0, # Base delay (seconds) for exponential backoff
|
|
196
|
+
),
|
|
197
|
+
schema_registry=None, # Optional: SchemaRegistry instance
|
|
198
|
+
)
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
## Schema Registry
|
|
202
|
+
|
|
203
|
+
Use `SchemaRegistry` to compose span schemas from multiple sources and auto-generate the `agent_schema_version` passed to `create_agent_instance`.
|
|
204
|
+
|
|
205
|
+
```python
|
|
206
|
+
from prefactor_core import SchemaRegistry
|
|
207
|
+
|
|
208
|
+
registry = SchemaRegistry()
|
|
209
|
+
|
|
210
|
+
registry.register_type(
|
|
211
|
+
name="agent:llm",
|
|
212
|
+
params_schema={
|
|
213
|
+
"type": "object",
|
|
214
|
+
"properties": {
|
|
215
|
+
"model": {"type": "string"},
|
|
216
|
+
"prompt": {"type": "string"},
|
|
217
|
+
},
|
|
218
|
+
"required": ["model", "prompt"],
|
|
219
|
+
},
|
|
220
|
+
result_schema={
|
|
221
|
+
"type": "object",
|
|
222
|
+
"properties": {"response": {"type": "string"}},
|
|
223
|
+
},
|
|
224
|
+
title="LLM Call",
|
|
225
|
+
description="A call to a language model",
|
|
226
|
+
template="{{model}}: {{prompt}} → {{response}}",
|
|
227
|
+
)
|
|
228
|
+
registry.register_type(
|
|
229
|
+
name="agent:tool",
|
|
230
|
+
params_schema={"type": "object", "properties": {...}},
|
|
231
|
+
result_schema={"type": "object", "properties": {...}},
|
|
232
|
+
title="Tool Call",
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
config = PrefactorCoreConfig(
|
|
236
|
+
http_config=...,
|
|
237
|
+
schema_registry=registry,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
async with PrefactorCoreClient(config) as client:
|
|
241
|
+
# agent_schema_version is generated automatically from the registry
|
|
242
|
+
instance = await client.create_agent_instance(
|
|
243
|
+
agent_id="my-agent",
|
|
244
|
+
agent_version={"name": "My Agent", "external_identifier": "v1.0.0"},
|
|
245
|
+
)
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
## Error Handling
|
|
249
|
+
|
|
250
|
+
```python
|
|
251
|
+
from prefactor_core import (
|
|
252
|
+
PrefactorCoreError,
|
|
253
|
+
ClientNotInitializedError,
|
|
254
|
+
ClientAlreadyInitializedError,
|
|
255
|
+
OperationError,
|
|
256
|
+
InstanceNotFoundError,
|
|
257
|
+
SpanNotFoundError,
|
|
258
|
+
)
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
## Architecture
|
|
262
|
+
|
|
263
|
+
The client uses a three-layer design:
|
|
264
|
+
|
|
265
|
+
1. **Queue infrastructure**: `InMemoryQueue` + `TaskExecutor` worker pool process operations in the background
|
|
266
|
+
2. **Managers**: `AgentInstanceManager` and `SpanManager` translate high-level calls into `Operation` objects and route them to the HTTP client
|
|
267
|
+
3. **Client API**: `PrefactorCoreClient` exposes the user-facing interface and wires the layers together
|
|
268
|
+
|
|
269
|
+
All observability operations are enqueued and executed asynchronously — the calling code is never blocked waiting for API responses.
|
|
270
|
+
|
|
271
|
+
## License
|
|
272
|
+
|
|
273
|
+
MIT
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
# Prefactor Core
|
|
2
|
+
|
|
3
|
+
High-level Prefactor SDK with async queue-based processing.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Queue-Based Processing**: Operations are queued and processed asynchronously by a worker pool
|
|
8
|
+
- **Non-Blocking API**: Agent execution is never blocked by observability calls
|
|
9
|
+
- **Automatic Parent Detection**: Nested spans automatically detect their parent from the context stack
|
|
10
|
+
- **Schema Registry**: Compose and register span schemas before instance creation
|
|
11
|
+
- **Configurable Workers**: Tune concurrency and retry behavior for the background queue
|
|
12
|
+
|
|
13
|
+
## Installation
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
pip install prefactor-core
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Quick Start
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
import asyncio
|
|
23
|
+
from prefactor_core import PrefactorCoreClient, PrefactorCoreConfig, SchemaRegistry
|
|
24
|
+
from prefactor_http import HttpClientConfig
|
|
25
|
+
|
|
26
|
+
registry = SchemaRegistry()
|
|
27
|
+
registry.register_type(
|
|
28
|
+
name="agent:llm",
|
|
29
|
+
params_schema={
|
|
30
|
+
"type": "object",
|
|
31
|
+
"properties": {
|
|
32
|
+
"model": {"type": "string"},
|
|
33
|
+
"prompt": {"type": "string"},
|
|
34
|
+
},
|
|
35
|
+
"required": ["model", "prompt"],
|
|
36
|
+
},
|
|
37
|
+
result_schema={
|
|
38
|
+
"type": "object",
|
|
39
|
+
"properties": {"response": {"type": "string"}},
|
|
40
|
+
},
|
|
41
|
+
title="LLM Call",
|
|
42
|
+
description="A call to a language model",
|
|
43
|
+
template="{{model}}: {{prompt}} → {{response}}",
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
async def main():
|
|
47
|
+
config = PrefactorCoreConfig(
|
|
48
|
+
http_config=HttpClientConfig(
|
|
49
|
+
api_url="https://api.prefactor.ai",
|
|
50
|
+
api_token="your-token",
|
|
51
|
+
),
|
|
52
|
+
schema_registry=registry,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
async with PrefactorCoreClient(config) as client:
|
|
56
|
+
instance = await client.create_agent_instance(
|
|
57
|
+
agent_id="my-agent",
|
|
58
|
+
agent_version={"name": "My Agent", "external_identifier": "v1.0.0"},
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
await instance.start()
|
|
62
|
+
|
|
63
|
+
async with instance.span("agent:llm") as span:
|
|
64
|
+
await span.start({"model": "gpt-4", "prompt": "Hello"})
|
|
65
|
+
result = await call_llm()
|
|
66
|
+
await span.complete({"response": result})
|
|
67
|
+
|
|
68
|
+
await instance.finish()
|
|
69
|
+
|
|
70
|
+
asyncio.run(main())
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## API Reference
|
|
74
|
+
|
|
75
|
+
### `PrefactorCoreClient`
|
|
76
|
+
|
|
77
|
+
The main entry point. Use as an async context manager or call `initialize()` / `close()` manually.
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
client = PrefactorCoreClient(config)
|
|
81
|
+
await client.initialize()
|
|
82
|
+
# ... use client ...
|
|
83
|
+
await client.close()
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
#### `create_agent_instance`
|
|
87
|
+
|
|
88
|
+
```python
|
|
89
|
+
handle = await client.create_agent_instance(
|
|
90
|
+
agent_id="my-agent",
|
|
91
|
+
agent_version={"name": "My Agent", "external_identifier": "v1.0.0"},
|
|
92
|
+
agent_schema_version=None, # Optional: auto-generated if schema_registry is configured
|
|
93
|
+
external_schema_version_id=None, # Optional: reference an existing schema version
|
|
94
|
+
) -> AgentInstanceHandle
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
#### `span` (context manager)
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
async with client.span(
|
|
101
|
+
instance_id="instance_123",
|
|
102
|
+
schema_name="agent:llm",
|
|
103
|
+
parent_span_id=None, # Optional: auto-detected from context stack if omitted
|
|
104
|
+
payload=None, # Optional: used as params if span.start() is never called explicitly
|
|
105
|
+
) as span:
|
|
106
|
+
await span.start({"model": "gpt-4", "prompt": "Hello"})
|
|
107
|
+
result = await call_llm()
|
|
108
|
+
await span.complete({"response": result})
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
### `AgentInstanceHandle`
|
|
112
|
+
|
|
113
|
+
Returned by `create_agent_instance`. Manages the lifecycle of a single agent instance.
|
|
114
|
+
|
|
115
|
+
```python
|
|
116
|
+
handle.id # -> str
|
|
117
|
+
|
|
118
|
+
await handle.start()
|
|
119
|
+
await handle.finish()
|
|
120
|
+
|
|
121
|
+
async with handle.span("agent:llm") as span:
|
|
122
|
+
...
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
### `SpanContext`
|
|
126
|
+
|
|
127
|
+
The object yielded by span context managers. Spans follow a three-phase lifecycle:
|
|
128
|
+
|
|
129
|
+
1. **Enter context** — span is prepared locally, no HTTP call yet.
|
|
130
|
+
2. **`await span.start(payload)`** — POSTs the span to the API as `active` with the given params payload.
|
|
131
|
+
3. **`await span.complete(result)`** / **`span.fail(result)`** / **`span.cancel()`** — finishes the span with a terminal status.
|
|
132
|
+
|
|
133
|
+
If `start()` or a finish method is not called explicitly, the context manager handles them automatically on exit.
|
|
134
|
+
|
|
135
|
+
```python
|
|
136
|
+
span.id # -> str (API-generated after start())
|
|
137
|
+
|
|
138
|
+
await span.start(payload: dict) # POST span as active with params payload
|
|
139
|
+
await span.complete(result: dict) # finish with status "complete"
|
|
140
|
+
await span.fail(result: dict) # finish with status "failed"
|
|
141
|
+
await span.cancel() # finish with status "cancelled"
|
|
142
|
+
|
|
143
|
+
span.set_result(data: dict) # accumulate result data for auto-finish
|
|
144
|
+
await span.finish() # finish with current status (default: "complete")
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
**Status note:** `cancel()` can be called before or after `start()`. If called before `start()`, the span is posted as `pending` and immediately cancelled — the only valid pre-active cancellation path the API supports.
|
|
148
|
+
|
|
149
|
+
#### Full lifecycle example
|
|
150
|
+
|
|
151
|
+
```python
|
|
152
|
+
async with instance.span("agent:llm") as span:
|
|
153
|
+
await span.start({"model": "gpt-4", "prompt": "Hello"})
|
|
154
|
+
try:
|
|
155
|
+
result = await call_llm()
|
|
156
|
+
await span.complete({"response": result})
|
|
157
|
+
except Exception as exc:
|
|
158
|
+
await span.fail({"error": str(exc)})
|
|
159
|
+
|
|
160
|
+
# Cancel before starting (e.g. a conditional step that is skipped):
|
|
161
|
+
async with instance.span("agent:retrieval") as span:
|
|
162
|
+
if not needed:
|
|
163
|
+
await span.cancel()
|
|
164
|
+
else:
|
|
165
|
+
await span.start({"query": "..."})
|
|
166
|
+
docs = await retrieve()
|
|
167
|
+
await span.complete({"documents": docs, "count": len(docs)})
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
## Configuration
|
|
171
|
+
|
|
172
|
+
```python
|
|
173
|
+
from prefactor_core import PrefactorCoreConfig, QueueConfig
|
|
174
|
+
from prefactor_http import HttpClientConfig
|
|
175
|
+
|
|
176
|
+
config = PrefactorCoreConfig(
|
|
177
|
+
http_config=HttpClientConfig(
|
|
178
|
+
api_url="https://api.prefactor.ai",
|
|
179
|
+
api_token="your-token",
|
|
180
|
+
),
|
|
181
|
+
queue_config=QueueConfig(
|
|
182
|
+
num_workers=3, # Number of background workers
|
|
183
|
+
max_retries=3, # Retries per operation
|
|
184
|
+
retry_delay_base=1.0, # Base delay (seconds) for exponential backoff
|
|
185
|
+
),
|
|
186
|
+
schema_registry=None, # Optional: SchemaRegistry instance
|
|
187
|
+
)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
## Schema Registry
|
|
191
|
+
|
|
192
|
+
Use `SchemaRegistry` to compose span schemas from multiple sources and auto-generate the `agent_schema_version` passed to `create_agent_instance`.
|
|
193
|
+
|
|
194
|
+
```python
|
|
195
|
+
from prefactor_core import SchemaRegistry
|
|
196
|
+
|
|
197
|
+
registry = SchemaRegistry()
|
|
198
|
+
|
|
199
|
+
registry.register_type(
|
|
200
|
+
name="agent:llm",
|
|
201
|
+
params_schema={
|
|
202
|
+
"type": "object",
|
|
203
|
+
"properties": {
|
|
204
|
+
"model": {"type": "string"},
|
|
205
|
+
"prompt": {"type": "string"},
|
|
206
|
+
},
|
|
207
|
+
"required": ["model", "prompt"],
|
|
208
|
+
},
|
|
209
|
+
result_schema={
|
|
210
|
+
"type": "object",
|
|
211
|
+
"properties": {"response": {"type": "string"}},
|
|
212
|
+
},
|
|
213
|
+
title="LLM Call",
|
|
214
|
+
description="A call to a language model",
|
|
215
|
+
template="{{model}}: {{prompt}} → {{response}}",
|
|
216
|
+
)
|
|
217
|
+
registry.register_type(
|
|
218
|
+
name="agent:tool",
|
|
219
|
+
params_schema={"type": "object", "properties": {...}},
|
|
220
|
+
result_schema={"type": "object", "properties": {...}},
|
|
221
|
+
title="Tool Call",
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
config = PrefactorCoreConfig(
|
|
225
|
+
http_config=...,
|
|
226
|
+
schema_registry=registry,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
async with PrefactorCoreClient(config) as client:
|
|
230
|
+
# agent_schema_version is generated automatically from the registry
|
|
231
|
+
instance = await client.create_agent_instance(
|
|
232
|
+
agent_id="my-agent",
|
|
233
|
+
agent_version={"name": "My Agent", "external_identifier": "v1.0.0"},
|
|
234
|
+
)
|
|
235
|
+
```
|
|
236
|
+
|
|
237
|
+
## Error Handling
|
|
238
|
+
|
|
239
|
+
```python
|
|
240
|
+
from prefactor_core import (
|
|
241
|
+
PrefactorCoreError,
|
|
242
|
+
ClientNotInitializedError,
|
|
243
|
+
ClientAlreadyInitializedError,
|
|
244
|
+
OperationError,
|
|
245
|
+
InstanceNotFoundError,
|
|
246
|
+
SpanNotFoundError,
|
|
247
|
+
)
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
## Architecture
|
|
251
|
+
|
|
252
|
+
The client uses a three-layer design:
|
|
253
|
+
|
|
254
|
+
1. **Queue infrastructure**: `InMemoryQueue` + `TaskExecutor` worker pool process operations in the background
|
|
255
|
+
2. **Managers**: `AgentInstanceManager` and `SpanManager` translate high-level calls into `Operation` objects and route them to the HTTP client
|
|
256
|
+
3. **Client API**: `PrefactorCoreClient` exposes the user-facing interface and wires the layers together
|
|
257
|
+
|
|
258
|
+
All observability operations are enqueued and executed asynchronously — the calling code is never blocked waiting for API responses.
|
|
259
|
+
|
|
260
|
+
## License
|
|
261
|
+
|
|
262
|
+
MIT
|