onceonly-sdk 2.0.2__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onceonly/__init__.py +26 -1
- onceonly/_http.py +26 -4
- onceonly/_util.py +3 -1
- onceonly/ai.py +378 -31
- onceonly/ai_models.py +27 -0
- onceonly/client.py +77 -4
- onceonly/decorators.py +87 -1
- onceonly/governance.py +471 -0
- onceonly/models.py +58 -7
- onceonly/version.py +1 -1
- onceonly_sdk-3.0.0.dist-info/METADATA +1031 -0
- onceonly_sdk-3.0.0.dist-info/RECORD +18 -0
- {onceonly_sdk-2.0.2.dist-info → onceonly_sdk-3.0.0.dist-info}/WHEEL +1 -1
- onceonly_sdk-2.0.2.dist-info/METADATA +0 -216
- onceonly_sdk-2.0.2.dist-info/RECORD +0 -17
- {onceonly_sdk-2.0.2.dist-info → onceonly_sdk-3.0.0.dist-info}/licenses/LICENSE +0 -0
- {onceonly_sdk-2.0.2.dist-info → onceonly_sdk-3.0.0.dist-info}/top_level.txt +0 -0
onceonly/__init__.py
CHANGED
|
@@ -1,6 +1,14 @@
|
|
|
1
1
|
from .version import __version__
|
|
2
2
|
from .client import OnceOnly, create_client
|
|
3
|
-
from .models import
|
|
3
|
+
from .models import (
|
|
4
|
+
CheckLockResult,
|
|
5
|
+
Policy,
|
|
6
|
+
AgentStatus,
|
|
7
|
+
AgentLogItem,
|
|
8
|
+
AgentMetrics,
|
|
9
|
+
)
|
|
10
|
+
from .decorators import idempotent, idempotent_ai
|
|
11
|
+
from .ai_models import AiToolResult
|
|
4
12
|
from .exceptions import (
|
|
5
13
|
OnceOnlyError,
|
|
6
14
|
UnauthorizedError,
|
|
@@ -13,12 +21,29 @@ from .exceptions import (
|
|
|
13
21
|
__all__ = [
|
|
14
22
|
"OnceOnly",
|
|
15
23
|
"create_client",
|
|
24
|
+
|
|
25
|
+
# Core idempotency
|
|
16
26
|
"CheckLockResult",
|
|
27
|
+
|
|
28
|
+
# Agent Governance
|
|
29
|
+
"Policy",
|
|
30
|
+
"AgentStatus",
|
|
31
|
+
"AgentLogItem",
|
|
32
|
+
"AgentMetrics",
|
|
33
|
+
|
|
34
|
+
# Decorators
|
|
35
|
+
"idempotent",
|
|
36
|
+
"idempotent_ai",
|
|
37
|
+
|
|
38
|
+
"AiToolResult",
|
|
39
|
+
|
|
40
|
+
# Errors
|
|
17
41
|
"OnceOnlyError",
|
|
18
42
|
"UnauthorizedError",
|
|
19
43
|
"OverLimitError",
|
|
20
44
|
"RateLimitError",
|
|
21
45
|
"ValidationError",
|
|
22
46
|
"ApiError",
|
|
47
|
+
|
|
23
48
|
"__version__",
|
|
24
49
|
]
|
onceonly/_http.py
CHANGED
|
@@ -78,10 +78,21 @@ def request_with_retries_sync(
|
|
|
78
78
|
base_backoff: float,
|
|
79
79
|
max_backoff: float,
|
|
80
80
|
) -> httpx.Response:
|
|
81
|
+
RETRYABLE_STATUS = {429, 500, 502, 503, 504}
|
|
82
|
+
|
|
81
83
|
attempt = 0
|
|
82
84
|
while True:
|
|
83
|
-
|
|
84
|
-
|
|
85
|
+
try:
|
|
86
|
+
resp = fn()
|
|
87
|
+
except httpx.RequestError:
|
|
88
|
+
if attempt >= max_retries:
|
|
89
|
+
raise
|
|
90
|
+
sleep_s = min(max_backoff, base_backoff * (2**attempt))
|
|
91
|
+
time.sleep(max(0.0, float(sleep_s)))
|
|
92
|
+
attempt += 1
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
if resp.status_code not in RETRYABLE_STATUS or attempt >= max_retries:
|
|
85
96
|
return resp
|
|
86
97
|
|
|
87
98
|
ra = _parse_retry_after(resp)
|
|
@@ -97,10 +108,21 @@ async def request_with_retries_async(
|
|
|
97
108
|
base_backoff: float,
|
|
98
109
|
max_backoff: float,
|
|
99
110
|
) -> httpx.Response:
|
|
111
|
+
RETRYABLE_STATUS = {429, 500, 502, 503, 504}
|
|
112
|
+
|
|
100
113
|
attempt = 0
|
|
101
114
|
while True:
|
|
102
|
-
|
|
103
|
-
|
|
115
|
+
try:
|
|
116
|
+
resp = await fn()
|
|
117
|
+
except httpx.RequestError:
|
|
118
|
+
if attempt >= max_retries:
|
|
119
|
+
raise
|
|
120
|
+
sleep_s = min(max_backoff, base_backoff * (2**attempt))
|
|
121
|
+
await asyncio.sleep(max(0.0, float(sleep_s)))
|
|
122
|
+
attempt += 1
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
if resp.status_code not in RETRYABLE_STATUS or attempt >= max_retries:
|
|
104
126
|
return resp
|
|
105
127
|
|
|
106
128
|
ra = _parse_retry_after(resp)
|
onceonly/_util.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import json
|
|
4
|
+
|
|
3
5
|
from dataclasses import is_dataclass, asdict
|
|
4
6
|
from typing import Any, Dict, Mapping, Optional, Union
|
|
5
7
|
|
|
@@ -39,7 +41,7 @@ def to_metadata_dict(metadata: Optional[MetadataLike]) -> Optional[Dict[str, Any
|
|
|
39
41
|
# mapping
|
|
40
42
|
if isinstance(metadata, Mapping):
|
|
41
43
|
try:
|
|
42
|
-
return dict(metadata)
|
|
44
|
+
return json.loads(json.dumps(dict(metadata), ensure_ascii=False, default=str))
|
|
43
45
|
except Exception:
|
|
44
46
|
return {"value": str(metadata)}
|
|
45
47
|
|
onceonly/ai.py
CHANGED
|
@@ -2,10 +2,11 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import time
|
|
5
|
+
import inspect
|
|
5
6
|
import logging
|
|
6
|
-
from typing import Any, Dict, Optional, Awaitable, Callable
|
|
7
|
-
|
|
8
7
|
import httpx
|
|
8
|
+
import threading
|
|
9
|
+
from typing import Any, Dict, Optional, Awaitable, Callable, Union
|
|
9
10
|
|
|
10
11
|
from ._http import (
|
|
11
12
|
parse_json_or_raise,
|
|
@@ -13,7 +14,7 @@ from ._http import (
|
|
|
13
14
|
request_with_retries_async,
|
|
14
15
|
)
|
|
15
16
|
from ._util import to_metadata_dict, MetadataLike
|
|
16
|
-
from .ai_models import AiRun, AiStatus, AiResult
|
|
17
|
+
from .ai_models import AiRun, AiStatus, AiResult, AiToolResult
|
|
17
18
|
|
|
18
19
|
logger = logging.getLogger("onceonly")
|
|
19
20
|
|
|
@@ -51,17 +52,111 @@ class AiClient:
|
|
|
51
52
|
self._retry_backoff = float(retry_backoff)
|
|
52
53
|
self._retry_max_backoff = float(retry_max_backoff)
|
|
53
54
|
|
|
55
|
+
@staticmethod
|
|
56
|
+
def _result_to_dict(value: Any) -> Optional[Dict[str, Any]]:
|
|
57
|
+
if value is None:
|
|
58
|
+
return None
|
|
59
|
+
if isinstance(value, dict):
|
|
60
|
+
return value
|
|
61
|
+
|
|
62
|
+
md = getattr(value, "model_dump", None) # pydantic v2
|
|
63
|
+
if callable(md):
|
|
64
|
+
try:
|
|
65
|
+
out = md()
|
|
66
|
+
return out if isinstance(out, dict) else {"data": out}
|
|
67
|
+
except Exception:
|
|
68
|
+
return {"value": str(value)}
|
|
69
|
+
|
|
70
|
+
dct = getattr(value, "dict", None) # pydantic v1
|
|
71
|
+
if callable(dct):
|
|
72
|
+
try:
|
|
73
|
+
out = dct()
|
|
74
|
+
return out if isinstance(out, dict) else {"data": out}
|
|
75
|
+
except Exception:
|
|
76
|
+
return {"value": str(value)}
|
|
77
|
+
|
|
78
|
+
if hasattr(value, "__dataclass_fields__"):
|
|
79
|
+
try:
|
|
80
|
+
import dataclasses
|
|
81
|
+
out = dataclasses.asdict(value)
|
|
82
|
+
return out if isinstance(out, dict) else {"data": out}
|
|
83
|
+
except Exception:
|
|
84
|
+
return {"value": str(value)}
|
|
85
|
+
|
|
86
|
+
return {"value": str(value)}
|
|
87
|
+
|
|
88
|
+
def _start_heartbeat_thread(
|
|
89
|
+
self,
|
|
90
|
+
*,
|
|
91
|
+
key: str,
|
|
92
|
+
lease_id: str,
|
|
93
|
+
ttl: Optional[int],
|
|
94
|
+
extend_every: float,
|
|
95
|
+
) -> "threading.Event":
|
|
96
|
+
stop = threading.Event()
|
|
97
|
+
|
|
98
|
+
def _loop() -> None:
|
|
99
|
+
while not stop.is_set():
|
|
100
|
+
try:
|
|
101
|
+
self.extend(key=key, lease_id=lease_id, ttl=ttl)
|
|
102
|
+
except Exception:
|
|
103
|
+
pass
|
|
104
|
+
stop.wait(max(1.0, float(extend_every)))
|
|
105
|
+
|
|
106
|
+
t = threading.Thread(target=_loop, name="onceonly-ai-heartbeat", daemon=True)
|
|
107
|
+
t.start()
|
|
108
|
+
return stop
|
|
109
|
+
|
|
110
|
+
async def _start_heartbeat_task(
|
|
111
|
+
self,
|
|
112
|
+
*,
|
|
113
|
+
key: str,
|
|
114
|
+
lease_id: str,
|
|
115
|
+
ttl: Optional[int],
|
|
116
|
+
extend_every: float,
|
|
117
|
+
) -> "asyncio.Task[None]":
|
|
118
|
+
async def _loop() -> None:
|
|
119
|
+
while True:
|
|
120
|
+
try:
|
|
121
|
+
await self.extend_async(key=key, lease_id=lease_id, ttl=ttl)
|
|
122
|
+
except Exception:
|
|
123
|
+
pass
|
|
124
|
+
await asyncio.sleep(max(1.0, float(extend_every)))
|
|
125
|
+
|
|
126
|
+
return asyncio.create_task(_loop())
|
|
127
|
+
|
|
54
128
|
# ------------------------------------------------------------------
|
|
55
129
|
# High-level: /ai/run + /ai/status + /ai/result
|
|
56
130
|
# ------------------------------------------------------------------
|
|
57
131
|
|
|
58
|
-
def run(
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
132
|
+
def run(
|
|
133
|
+
self,
|
|
134
|
+
key: Optional[str] = None,
|
|
135
|
+
ttl: Optional[int] = None,
|
|
136
|
+
metadata: Optional[MetadataLike] = None,
|
|
137
|
+
*,
|
|
138
|
+
agent_id: Optional[str] = None,
|
|
139
|
+
tool: Optional[str] = None,
|
|
140
|
+
args: Optional[Dict[str, Any]] = None,
|
|
141
|
+
spend_usd: Optional[float] = None,
|
|
142
|
+
) -> Union[AiRun, AiToolResult]:
|
|
143
|
+
if key is None:
|
|
144
|
+
if not agent_id or not tool:
|
|
145
|
+
raise ValueError("ai.run requires key=... OR agent_id=... and tool=...")
|
|
146
|
+
payload: Dict[str, Any] = {"agent_id": str(agent_id), "tool": str(tool)}
|
|
147
|
+
if args is not None:
|
|
148
|
+
payload["args"] = dict(args)
|
|
149
|
+
if spend_usd is not None:
|
|
150
|
+
payload["spend_usd"] = float(spend_usd)
|
|
151
|
+
else:
|
|
152
|
+
if agent_id or tool or args or spend_usd is not None:
|
|
153
|
+
raise ValueError("ai.run: provide either key=... OR agent_id/tool, not both")
|
|
154
|
+
payload = {"key": key}
|
|
155
|
+
if ttl is not None:
|
|
156
|
+
payload["ttl"] = int(ttl)
|
|
157
|
+
md = to_metadata_dict(metadata)
|
|
158
|
+
if md is not None:
|
|
159
|
+
payload["metadata"] = md
|
|
65
160
|
|
|
66
161
|
resp = request_with_retries_sync(
|
|
67
162
|
lambda: self._c.post("/ai/run", json=payload),
|
|
@@ -71,6 +166,9 @@ class AiClient:
|
|
|
71
166
|
)
|
|
72
167
|
data = parse_json_or_raise(resp)
|
|
73
168
|
|
|
169
|
+
if "allowed" in data or "decision" in data:
|
|
170
|
+
return AiToolResult.from_dict(data)
|
|
171
|
+
|
|
74
172
|
logger.debug("ai.run key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
|
|
75
173
|
return AiRun.from_dict(data)
|
|
76
174
|
|
|
@@ -105,15 +203,14 @@ class AiClient:
|
|
|
105
203
|
timeout: float = 60.0,
|
|
106
204
|
poll_min: float = 0.5,
|
|
107
205
|
poll_max: float = 5.0,
|
|
206
|
+
auto_extend: bool = True,
|
|
207
|
+
extend_every: float = 30.0,
|
|
208
|
+
lease_id: Optional[str] = None,
|
|
209
|
+
ttl: Optional[int] = None,
|
|
108
210
|
) -> AiResult:
|
|
109
|
-
"""
|
|
110
|
-
Polls /ai/status until completed/failed or timeout.
|
|
111
|
-
|
|
112
|
-
NOTE: We do NOT return status="timeout" because backend model statuses
|
|
113
|
-
are usually limited to: not_found|in_progress|completed|failed.
|
|
114
|
-
Instead we return status="failed" with error_code="timeout".
|
|
115
|
-
"""
|
|
116
211
|
t0 = time.time()
|
|
212
|
+
last_ext = 0.0
|
|
213
|
+
|
|
117
214
|
while True:
|
|
118
215
|
st = self.status(key)
|
|
119
216
|
if st.status in ("completed", "failed"):
|
|
@@ -122,32 +219,218 @@ class AiClient:
|
|
|
122
219
|
if time.time() - t0 >= timeout:
|
|
123
220
|
return AiResult(ok=False, status="failed", key=key, error_code="timeout")
|
|
124
221
|
|
|
222
|
+
# heartbeat (best-effort)
|
|
223
|
+
if auto_extend and lease_id:
|
|
224
|
+
now = time.time()
|
|
225
|
+
if now - last_ext >= float(extend_every):
|
|
226
|
+
try:
|
|
227
|
+
self.extend(key=key, lease_id=lease_id, ttl=ttl)
|
|
228
|
+
except Exception:
|
|
229
|
+
pass
|
|
230
|
+
last_ext = now
|
|
231
|
+
|
|
125
232
|
sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
|
|
126
233
|
sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
|
|
127
234
|
time.sleep(sleep_s)
|
|
128
235
|
|
|
129
236
|
def run_and_wait(
|
|
130
237
|
self,
|
|
131
|
-
key: str,
|
|
238
|
+
key: Optional[str] = None,
|
|
132
239
|
*,
|
|
133
240
|
ttl: Optional[int] = None,
|
|
134
241
|
metadata: Optional[MetadataLike] = None,
|
|
242
|
+
agent_id: Optional[str] = None,
|
|
243
|
+
tool: Optional[str] = None,
|
|
244
|
+
args: Optional[Dict[str, Any]] = None,
|
|
245
|
+
spend_usd: Optional[float] = None,
|
|
246
|
+
timeout: float = 60.0,
|
|
247
|
+
poll_min: float = 0.5,
|
|
248
|
+
poll_max: float = 5.0,
|
|
249
|
+
auto_extend: bool = True,
|
|
250
|
+
extend_every: float = 30.0,
|
|
251
|
+
) -> Union[AiResult, AiToolResult]:
|
|
252
|
+
if key is None and (agent_id or tool):
|
|
253
|
+
return self.run(
|
|
254
|
+
key=None,
|
|
255
|
+
agent_id=agent_id,
|
|
256
|
+
tool=tool,
|
|
257
|
+
args=args,
|
|
258
|
+
spend_usd=spend_usd,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
if key is None:
|
|
262
|
+
raise ValueError("ai.run_and_wait requires key=... OR agent_id/tool for tool execution")
|
|
263
|
+
|
|
264
|
+
run = self.run(key=key, ttl=ttl, metadata=metadata)
|
|
265
|
+
return self.wait(
|
|
266
|
+
key=key,
|
|
267
|
+
timeout=timeout,
|
|
268
|
+
poll_min=poll_min,
|
|
269
|
+
poll_max=poll_max,
|
|
270
|
+
auto_extend=auto_extend,
|
|
271
|
+
extend_every=extend_every,
|
|
272
|
+
lease_id=run.lease_id,
|
|
273
|
+
ttl=ttl,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
def run_tool(
|
|
277
|
+
self,
|
|
278
|
+
*,
|
|
279
|
+
agent_id: str,
|
|
280
|
+
tool: str,
|
|
281
|
+
args: Optional[Dict[str, Any]] = None,
|
|
282
|
+
spend_usd: Optional[float] = None,
|
|
283
|
+
) -> AiToolResult:
|
|
284
|
+
res = self.run(
|
|
285
|
+
key=None,
|
|
286
|
+
agent_id=agent_id,
|
|
287
|
+
tool=tool,
|
|
288
|
+
args=args,
|
|
289
|
+
spend_usd=spend_usd,
|
|
290
|
+
)
|
|
291
|
+
assert isinstance(res, AiToolResult)
|
|
292
|
+
return res
|
|
293
|
+
|
|
294
|
+
def run_fn(
|
|
295
|
+
self,
|
|
296
|
+
key: str,
|
|
297
|
+
fn: Callable[[], Any],
|
|
298
|
+
*,
|
|
299
|
+
ttl: int = 300,
|
|
300
|
+
metadata: Optional[MetadataLike] = None,
|
|
301
|
+
extend_every: float = 30.0,
|
|
302
|
+
wait_on_conflict: bool = True,
|
|
303
|
+
timeout: float = 60.0,
|
|
304
|
+
poll_min: float = 0.5,
|
|
305
|
+
poll_max: float = 5.0,
|
|
306
|
+
error_code: str = "fn_error",
|
|
307
|
+
) -> AiResult:
|
|
308
|
+
"""
|
|
309
|
+
Local execution, exactly-once:
|
|
310
|
+
- POST /ai/lease (charged only if acquired)
|
|
311
|
+
- Heartbeat: /ai/extend while fn runs (best effort)
|
|
312
|
+
- POST /ai/complete or /ai/fail
|
|
313
|
+
- Returns /ai/result (typed)
|
|
314
|
+
"""
|
|
315
|
+
lease = self.lease(key=key, ttl=int(ttl), metadata=metadata)
|
|
316
|
+
status = str(lease.get("status") or "").lower()
|
|
317
|
+
|
|
318
|
+
if status == "acquired":
|
|
319
|
+
lease_id = lease.get("lease_id")
|
|
320
|
+
if not lease_id:
|
|
321
|
+
return AiResult(ok=False, status="failed", key=key, error_code="missing_lease_id")
|
|
322
|
+
|
|
323
|
+
stop = self._start_heartbeat_thread(key=key, lease_id=str(lease_id), ttl=int(ttl),
|
|
324
|
+
extend_every=extend_every)
|
|
325
|
+
try:
|
|
326
|
+
out = fn()
|
|
327
|
+
res_dict = self._result_to_dict(out)
|
|
328
|
+
self.complete(key=key, lease_id=str(lease_id), result=res_dict)
|
|
329
|
+
except Exception:
|
|
330
|
+
try:
|
|
331
|
+
self.fail(key=key, lease_id=str(lease_id), error_code=error_code)
|
|
332
|
+
except Exception:
|
|
333
|
+
pass
|
|
334
|
+
raise
|
|
335
|
+
finally:
|
|
336
|
+
stop.set()
|
|
337
|
+
|
|
338
|
+
return self.result(key)
|
|
339
|
+
|
|
340
|
+
if status in ("completed", "failed"):
|
|
341
|
+
return self.result(key)
|
|
342
|
+
|
|
343
|
+
# in_progress / locked / etc.
|
|
344
|
+
if wait_on_conflict:
|
|
345
|
+
return self.wait(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)
|
|
346
|
+
|
|
347
|
+
return AiResult(ok=False, status=status or "in_progress", key=key, error_code="not_acquired")
|
|
348
|
+
|
|
349
|
+
async def run_fn_async(
|
|
350
|
+
self,
|
|
351
|
+
key: str,
|
|
352
|
+
fn: Callable[[], Any],
|
|
353
|
+
*,
|
|
354
|
+
ttl: int = 300,
|
|
355
|
+
metadata: Optional[MetadataLike] = None,
|
|
356
|
+
extend_every: float = 30.0,
|
|
357
|
+
wait_on_conflict: bool = True,
|
|
135
358
|
timeout: float = 60.0,
|
|
136
359
|
poll_min: float = 0.5,
|
|
137
360
|
poll_max: float = 5.0,
|
|
361
|
+
error_code: str = "fn_error",
|
|
138
362
|
) -> AiResult:
|
|
139
|
-
self.
|
|
140
|
-
|
|
363
|
+
lease = await self.lease_async(key=key, ttl=int(ttl), metadata=metadata)
|
|
364
|
+
status = str(lease.get("status") or "").lower()
|
|
365
|
+
|
|
366
|
+
if status == "acquired":
|
|
367
|
+
lease_id = lease.get("lease_id")
|
|
368
|
+
if not lease_id:
|
|
369
|
+
return AiResult(ok=False, status="failed", key=key, error_code="missing_lease_id")
|
|
370
|
+
|
|
371
|
+
hb = await self._start_heartbeat_task(
|
|
372
|
+
key=key, lease_id=str(lease_id), ttl=int(ttl), extend_every=extend_every
|
|
373
|
+
)
|
|
374
|
+
try:
|
|
375
|
+
out = fn()
|
|
376
|
+
if inspect.isawaitable(out):
|
|
377
|
+
out = await out
|
|
378
|
+
|
|
379
|
+
res_dict = self._result_to_dict(out)
|
|
380
|
+
await self.complete_async(key=key, lease_id=str(lease_id), result=res_dict)
|
|
381
|
+
except Exception:
|
|
382
|
+
try:
|
|
383
|
+
await self.fail_async(key=key, lease_id=str(lease_id), error_code=error_code)
|
|
384
|
+
except Exception:
|
|
385
|
+
pass
|
|
386
|
+
raise
|
|
387
|
+
finally:
|
|
388
|
+
hb.cancel()
|
|
389
|
+
try:
|
|
390
|
+
await hb
|
|
391
|
+
except Exception:
|
|
392
|
+
pass
|
|
393
|
+
|
|
394
|
+
return await self.result_async(key)
|
|
395
|
+
|
|
396
|
+
if status in ("completed", "failed"):
|
|
397
|
+
return await self.result_async(key)
|
|
398
|
+
|
|
399
|
+
if wait_on_conflict:
|
|
400
|
+
return await self.wait_async(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)
|
|
401
|
+
|
|
402
|
+
return AiResult(ok=False, status=status or "in_progress", key=key, error_code="not_acquired")
|
|
141
403
|
|
|
142
404
|
# -------------------- async high-level --------------------
|
|
143
405
|
|
|
144
|
-
async def run_async(
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
406
|
+
async def run_async(
|
|
407
|
+
self,
|
|
408
|
+
key: Optional[str] = None,
|
|
409
|
+
ttl: Optional[int] = None,
|
|
410
|
+
metadata: Optional[MetadataLike] = None,
|
|
411
|
+
*,
|
|
412
|
+
agent_id: Optional[str] = None,
|
|
413
|
+
tool: Optional[str] = None,
|
|
414
|
+
args: Optional[Dict[str, Any]] = None,
|
|
415
|
+
spend_usd: Optional[float] = None,
|
|
416
|
+
) -> Union[AiRun, AiToolResult]:
|
|
417
|
+
if key is None:
|
|
418
|
+
if not agent_id or not tool:
|
|
419
|
+
raise ValueError("ai.run_async requires key=... OR agent_id=... and tool=...")
|
|
420
|
+
payload: Dict[str, Any] = {"agent_id": str(agent_id), "tool": str(tool)}
|
|
421
|
+
if args is not None:
|
|
422
|
+
payload["args"] = dict(args)
|
|
423
|
+
if spend_usd is not None:
|
|
424
|
+
payload["spend_usd"] = float(spend_usd)
|
|
425
|
+
else:
|
|
426
|
+
if agent_id or tool or args or spend_usd is not None:
|
|
427
|
+
raise ValueError("ai.run_async: provide either key=... OR agent_id/tool, not both")
|
|
428
|
+
payload = {"key": key}
|
|
429
|
+
if ttl is not None:
|
|
430
|
+
payload["ttl"] = int(ttl)
|
|
431
|
+
md = to_metadata_dict(metadata)
|
|
432
|
+
if md is not None:
|
|
433
|
+
payload["metadata"] = md
|
|
151
434
|
|
|
152
435
|
c = await self._get_ac()
|
|
153
436
|
resp = await request_with_retries_async(
|
|
@@ -158,6 +441,9 @@ class AiClient:
|
|
|
158
441
|
)
|
|
159
442
|
data = parse_json_or_raise(resp)
|
|
160
443
|
|
|
444
|
+
if "allowed" in data or "decision" in data:
|
|
445
|
+
return AiToolResult.from_dict(data)
|
|
446
|
+
|
|
161
447
|
logger.debug("ai.run_async key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
|
|
162
448
|
return AiRun.from_dict(data)
|
|
163
449
|
|
|
@@ -194,8 +480,14 @@ class AiClient:
|
|
|
194
480
|
timeout: float = 60.0,
|
|
195
481
|
poll_min: float = 0.5,
|
|
196
482
|
poll_max: float = 5.0,
|
|
483
|
+
auto_extend: bool = True,
|
|
484
|
+
extend_every: float = 30.0,
|
|
485
|
+
lease_id: Optional[str] = None,
|
|
486
|
+
ttl: Optional[int] = None,
|
|
197
487
|
) -> AiResult:
|
|
198
488
|
t0 = time.time()
|
|
489
|
+
last_ext = 0.0
|
|
490
|
+
|
|
199
491
|
while True:
|
|
200
492
|
st = await self.status_async(key)
|
|
201
493
|
if st.status in ("completed", "failed"):
|
|
@@ -204,22 +496,77 @@ class AiClient:
|
|
|
204
496
|
if time.time() - t0 >= timeout:
|
|
205
497
|
return AiResult(ok=False, status="failed", key=key, error_code="timeout")
|
|
206
498
|
|
|
499
|
+
# heartbeat (best-effort)
|
|
500
|
+
if auto_extend and lease_id:
|
|
501
|
+
now = time.time()
|
|
502
|
+
if now - last_ext >= float(extend_every):
|
|
503
|
+
try:
|
|
504
|
+
await self.extend_async(key=key, lease_id=lease_id, ttl=ttl)
|
|
505
|
+
except Exception:
|
|
506
|
+
pass
|
|
507
|
+
last_ext = now
|
|
508
|
+
|
|
207
509
|
sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
|
|
208
510
|
sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
|
|
209
511
|
await asyncio.sleep(sleep_s)
|
|
210
512
|
|
|
211
513
|
async def run_and_wait_async(
|
|
212
514
|
self,
|
|
213
|
-
key: str,
|
|
515
|
+
key: Optional[str] = None,
|
|
214
516
|
*,
|
|
215
517
|
ttl: Optional[int] = None,
|
|
216
518
|
metadata: Optional[MetadataLike] = None,
|
|
519
|
+
agent_id: Optional[str] = None,
|
|
520
|
+
tool: Optional[str] = None,
|
|
521
|
+
args: Optional[Dict[str, Any]] = None,
|
|
522
|
+
spend_usd: Optional[float] = None,
|
|
217
523
|
timeout: float = 60.0,
|
|
218
524
|
poll_min: float = 0.5,
|
|
219
525
|
poll_max: float = 5.0,
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
526
|
+
auto_extend: bool = True,
|
|
527
|
+
extend_every: float = 30.0,
|
|
528
|
+
) -> Union[AiResult, AiToolResult]:
|
|
529
|
+
if key is None and (agent_id or tool):
|
|
530
|
+
return await self.run_async(
|
|
531
|
+
key=None,
|
|
532
|
+
agent_id=agent_id,
|
|
533
|
+
tool=tool,
|
|
534
|
+
args=args,
|
|
535
|
+
spend_usd=spend_usd,
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
if key is None:
|
|
539
|
+
raise ValueError("ai.run_and_wait_async requires key=... OR agent_id/tool for tool execution")
|
|
540
|
+
|
|
541
|
+
run = await self.run_async(key=key, ttl=ttl, metadata=metadata)
|
|
542
|
+
return await self.wait_async(
|
|
543
|
+
key=key,
|
|
544
|
+
timeout=timeout,
|
|
545
|
+
poll_min=poll_min,
|
|
546
|
+
poll_max=poll_max,
|
|
547
|
+
auto_extend=auto_extend,
|
|
548
|
+
extend_every=extend_every,
|
|
549
|
+
lease_id=run.lease_id,
|
|
550
|
+
ttl=ttl,
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
async def run_tool_async(
|
|
554
|
+
self,
|
|
555
|
+
*,
|
|
556
|
+
agent_id: str,
|
|
557
|
+
tool: str,
|
|
558
|
+
args: Optional[Dict[str, Any]] = None,
|
|
559
|
+
spend_usd: Optional[float] = None,
|
|
560
|
+
) -> AiToolResult:
|
|
561
|
+
res = await self.run_async(
|
|
562
|
+
key=None,
|
|
563
|
+
agent_id=agent_id,
|
|
564
|
+
tool=tool,
|
|
565
|
+
args=args,
|
|
566
|
+
spend_usd=spend_usd,
|
|
567
|
+
)
|
|
568
|
+
assert isinstance(res, AiToolResult)
|
|
569
|
+
return res
|
|
223
570
|
|
|
224
571
|
# ------------------------------------------------------------------
|
|
225
572
|
# Low-level lease API (sync) - returns raw dicts (backend models)
|
onceonly/ai_models.py
CHANGED
|
@@ -15,6 +15,9 @@ class AiRun:
|
|
|
15
15
|
key: str
|
|
16
16
|
lease_id: Optional[str] = None
|
|
17
17
|
version: int = 0
|
|
18
|
+
ttl: Optional[int] = None
|
|
19
|
+
ttl_left: Optional[int] = None
|
|
20
|
+
first_seen_at: Optional[str] = None
|
|
18
21
|
charged: Optional[int] = None
|
|
19
22
|
usage: Optional[int] = None
|
|
20
23
|
limit: Optional[int] = None
|
|
@@ -32,6 +35,9 @@ class AiRun:
|
|
|
32
35
|
key=str(d.get("key") or ""),
|
|
33
36
|
lease_id=d.get("lease_id"),
|
|
34
37
|
version=int(d.get("version") or 0),
|
|
38
|
+
ttl=d.get("ttl"),
|
|
39
|
+
ttl_left=d.get("ttl_left"),
|
|
40
|
+
first_seen_at=d.get("first_seen_at"),
|
|
35
41
|
charged=d.get("charged"),
|
|
36
42
|
usage=d.get("usage"),
|
|
37
43
|
limit=d.get("limit"),
|
|
@@ -95,3 +101,24 @@ class AiResult:
|
|
|
95
101
|
error_code=d.get("error_code"),
|
|
96
102
|
done_at=d.get("done_at"),
|
|
97
103
|
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@dataclass(frozen=True)
|
|
107
|
+
class AiToolResult:
|
|
108
|
+
ok: bool
|
|
109
|
+
allowed: bool
|
|
110
|
+
decision: str
|
|
111
|
+
policy_reason: Optional[str] = None
|
|
112
|
+
risk_level: Optional[str] = None
|
|
113
|
+
result: Optional[Dict[str, Any]] = None
|
|
114
|
+
|
|
115
|
+
@staticmethod
|
|
116
|
+
def from_dict(d: Dict[str, Any]) -> "AiToolResult":
|
|
117
|
+
return AiToolResult(
|
|
118
|
+
ok=bool(d.get("ok", False)),
|
|
119
|
+
allowed=bool(d.get("allowed", False)),
|
|
120
|
+
decision=str(d.get("decision") or ""),
|
|
121
|
+
policy_reason=d.get("policy_reason"),
|
|
122
|
+
risk_level=d.get("risk_level"),
|
|
123
|
+
result=_dict_or_none(d.get("result")),
|
|
124
|
+
)
|