onceonly-sdk 1.2.0__tar.gz → 2.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. onceonly_sdk-2.0.0/PKG-INFO +140 -0
  2. onceonly_sdk-2.0.0/README.md +115 -0
  3. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/onceonly/__init__.py +2 -2
  4. onceonly_sdk-2.0.0/onceonly/_http.py +109 -0
  5. onceonly_sdk-2.0.0/onceonly/_util.py +46 -0
  6. onceonly_sdk-2.0.0/onceonly/ai.py +195 -0
  7. onceonly_sdk-2.0.0/onceonly/ai_models.py +97 -0
  8. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/onceonly/client.py +134 -162
  9. onceonly_sdk-2.0.0/onceonly/decorators.py +174 -0
  10. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/onceonly/exceptions.py +5 -1
  11. onceonly_sdk-2.0.0/onceonly/integrations/__init__.py +1 -0
  12. onceonly_sdk-2.0.0/onceonly/integrations/langchain.py +168 -0
  13. onceonly_sdk-2.0.0/onceonly/models.py +27 -0
  14. onceonly_sdk-2.0.0/onceonly/version.py +1 -0
  15. onceonly_sdk-2.0.0/onceonly_sdk.egg-info/PKG-INFO +140 -0
  16. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/onceonly_sdk.egg-info/SOURCES.txt +10 -1
  17. onceonly_sdk-2.0.0/onceonly_sdk.egg-info/requires.txt +9 -0
  18. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/pyproject.toml +8 -12
  19. onceonly_sdk-2.0.0/tests/test_client_behavior.py +42 -0
  20. onceonly_sdk-2.0.0/tests/test_integrations.py +151 -0
  21. onceonly_sdk-1.2.0/tests/test_check_lock.py → onceonly_sdk-2.0.0/tests/test_parsing.py +30 -6
  22. onceonly_sdk-1.2.0/PKG-INFO +0 -153
  23. onceonly_sdk-1.2.0/README.md +0 -132
  24. onceonly_sdk-1.2.0/onceonly/decorators.py +0 -80
  25. onceonly_sdk-1.2.0/onceonly/models.py +0 -16
  26. onceonly_sdk-1.2.0/onceonly_sdk.egg-info/PKG-INFO +0 -153
  27. onceonly_sdk-1.2.0/onceonly_sdk.egg-info/requires.txt +0 -4
  28. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/LICENSE +0 -0
  29. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/onceonly_sdk.egg-info/dependency_links.txt +0 -0
  30. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/onceonly_sdk.egg-info/top_level.txt +0 -0
  31. {onceonly_sdk-1.2.0 → onceonly_sdk-2.0.0}/setup.cfg +0 -0
@@ -0,0 +1,140 @@
1
+ Metadata-Version: 2.4
2
+ Name: onceonly-sdk
3
+ Version: 2.0.0
4
+ Summary: Python SDK for OnceOnly idempotency API
5
+ Author-email: OnceOnly <support@onceonly.tech>
6
+ License: MIT
7
+ Project-URL: Homepage, https://onceonly.tech/
8
+ Project-URL: Documentation, https://onceonly.tech/docs/
9
+ Project-URL: Repository, https://github.com/mykolademyanov/onceonly-python
10
+ Keywords: idempotency,automation,zapier,make,ai-agents
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Operating System :: OS Independent
14
+ Requires-Python: >=3.9
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: httpx>=0.25
18
+ Provides-Extra: test
19
+ Requires-Dist: pytest>=7.0; extra == "test"
20
+ Requires-Dist: pytest-asyncio>=0.23; extra == "test"
21
+ Requires-Dist: anyio>=4.0; extra == "test"
22
+ Provides-Extra: langchain
23
+ Requires-Dist: langchain-core>=0.1.0; extra == "langchain"
24
+ Dynamic: license-file
25
+
26
+ # OnceOnly Python SDK
27
+
28
+ **The Idempotency Layer for AI Agents, Webhooks, and Distributed Systems.**
29
+
30
+ OnceOnly is a high-performance Python SDK that ensures **exactly-once execution**.
31
+ It prevents duplicate actions (payments, emails, tool calls) in unstable environments like
32
+ AI agents, webhooks, retries, or background workers.
33
+
34
+ Website: https://onceonly.tech/ai/
35
+ Documentation: https://onceonly.tech/docs/
36
+
37
+ ---
38
+
39
+ ## Features
40
+
41
+ - Sync + Async client (httpx-based)
42
+ - Fail-open mode for production safety
43
+ - Stable idempotency keys (supports Pydantic & dataclasses)
44
+ - Decorator for zero-boilerplate usage
45
+ - Optional AI / LangChain integrations
46
+
47
+ ---
48
+
49
+ ## Installation
50
+
51
+ ```bash
52
+ pip install onceonly-sdk
53
+ ```
54
+
55
+ ### With LangChain support included:
56
+
57
+ ```bash
58
+ pip install "onceonly-sdk[langchain]"
59
+ ```
60
+
61
+ ---
62
+
63
+ ## Quick Start
64
+
65
+ ```python
66
+ from onceonly import OnceOnly
67
+
68
+ client = OnceOnly(
69
+ api_key="once_live_...",
70
+ fail_open=True # default: continues if API is down
71
+ )
72
+
73
+ res = client.check_lock(key="order:123", ttl=300)
74
+
75
+ if res.duplicate:
76
+ print("Duplicate blocked")
77
+ else:
78
+ print("First execution")
79
+ ```
80
+
81
+ ---
82
+
83
+ ## AI Agents / LangChain Integration 🤖
84
+
85
+ OnceOnly integrates cleanly with AI-agent frameworks like LangChain.
86
+
87
+ ```python
88
+ from onceonly.integrations.langchain import make_idempotent_tool
89
+
90
+ tool = make_idempotent_tool(
91
+ original_tool,
92
+ client=client,
93
+ key_prefix="agent:tool"
94
+ )
95
+ ```
96
+
97
+ Repeated tool calls with the same inputs will execute **exactly once**, even across retries or agent restarts.
98
+
99
+ ---
100
+
101
+ ## Decorator
102
+
103
+ ```python
104
+ from onceonly.decorators import idempotent
105
+
106
+ @idempotent(client, ttl=3600)
107
+ def process_order(order_id):
108
+ ...
109
+ ```
110
+
111
+ Idempotency keys are generated automatically and are stable across restarts.
112
+
113
+ ---
114
+
115
+ ## Fail-Open Mode
116
+
117
+ Fail-open is enabled by default.
118
+
119
+ Network errors, timeouts, or server errors (5xx) will **not break your application**.
120
+ The SDK will allow execution to continue safely.
121
+
122
+ Fail-open never applies to:
123
+ - Auth errors (401 / 403)
124
+ - Plan limits (402)
125
+ - Validation errors (422)
126
+ - Rate limits (429)
127
+
128
+ ---
129
+
130
+ ## Support
131
+
132
+ Need help?
133
+ Email: support@onceonly.tech
134
+ Or open an issue on GitHub.
135
+
136
+ ---
137
+
138
+ ## License
139
+
140
+ MIT
@@ -0,0 +1,115 @@
1
+ # OnceOnly Python SDK
2
+
3
+ **The Idempotency Layer for AI Agents, Webhooks, and Distributed Systems.**
4
+
5
+ OnceOnly is a high-performance Python SDK that ensures **exactly-once execution**.
6
+ It prevents duplicate actions (payments, emails, tool calls) in unstable environments like
7
+ AI agents, webhooks, retries, or background workers.
8
+
9
+ Website: https://onceonly.tech/ai/
10
+ Documentation: https://onceonly.tech/docs/
11
+
12
+ ---
13
+
14
+ ## Features
15
+
16
+ - Sync + Async client (httpx-based)
17
+ - Fail-open mode for production safety
18
+ - Stable idempotency keys (supports Pydantic & dataclasses)
19
+ - Decorator for zero-boilerplate usage
20
+ - Optional AI / LangChain integrations
21
+
22
+ ---
23
+
24
+ ## Installation
25
+
26
+ ```bash
27
+ pip install onceonly-sdk
28
+ ```
29
+
30
+ ### With LangChain support included:
31
+
32
+ ```bash
33
+ pip install "onceonly-sdk[langchain]"
34
+ ```
35
+
36
+ ---
37
+
38
+ ## Quick Start
39
+
40
+ ```python
41
+ from onceonly import OnceOnly
42
+
43
+ client = OnceOnly(
44
+ api_key="once_live_...",
45
+ fail_open=True # default: continues if API is down
46
+ )
47
+
48
+ res = client.check_lock(key="order:123", ttl=300)
49
+
50
+ if res.duplicate:
51
+ print("Duplicate blocked")
52
+ else:
53
+ print("First execution")
54
+ ```
55
+
56
+ ---
57
+
58
+ ## AI Agents / LangChain Integration 🤖
59
+
60
+ OnceOnly integrates cleanly with AI-agent frameworks like LangChain.
61
+
62
+ ```python
63
+ from onceonly.integrations.langchain import make_idempotent_tool
64
+
65
+ tool = make_idempotent_tool(
66
+ original_tool,
67
+ client=client,
68
+ key_prefix="agent:tool"
69
+ )
70
+ ```
71
+
72
+ Repeated tool calls with the same inputs will execute **exactly once**, even across retries or agent restarts.
73
+
74
+ ---
75
+
76
+ ## Decorator
77
+
78
+ ```python
79
+ from onceonly.decorators import idempotent
80
+
81
+ @idempotent(client, ttl=3600)
82
+ def process_order(order_id):
83
+ ...
84
+ ```
85
+
86
+ Idempotency keys are generated automatically and are stable across restarts.
87
+
88
+ ---
89
+
90
+ ## Fail-Open Mode
91
+
92
+ Fail-open is enabled by default.
93
+
94
+ Network errors, timeouts, or server errors (5xx) will **not break your application**.
95
+ The SDK will allow execution to continue safely.
96
+
97
+ Fail-open never applies to:
98
+ - Auth errors (401 / 403)
99
+ - Plan limits (402)
100
+ - Validation errors (422)
101
+ - Rate limits (429)
102
+
103
+ ---
104
+
105
+ ## Support
106
+
107
+ Need help?
108
+ Email: support@onceonly.tech
109
+ Or open an issue on GitHub.
110
+
111
+ ---
112
+
113
+ ## License
114
+
115
+ MIT
@@ -1,3 +1,4 @@
1
+ from .version import __version__
1
2
  from .client import OnceOnly, create_client
2
3
  from .models import CheckLockResult
3
4
  from .exceptions import (
@@ -9,8 +10,6 @@ from .exceptions import (
9
10
  ApiError,
10
11
  )
11
12
 
12
- __version__ = "1.2.0"
13
-
14
13
  __all__ = [
15
14
  "OnceOnly",
16
15
  "create_client",
@@ -21,4 +20,5 @@ __all__ = [
21
20
  "RateLimitError",
22
21
  "ValidationError",
23
22
  "ApiError",
23
+ "__version__",
24
24
  ]
@@ -0,0 +1,109 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import time
5
+ from typing import Any, Dict, Optional, Union, Callable, Awaitable
6
+
7
+ import httpx
8
+
9
+ from .exceptions import ApiError, UnauthorizedError, OverLimitError, RateLimitError, ValidationError
10
+
11
+
12
+ def try_extract_detail(resp: httpx.Response) -> Optional[Union[Dict[str, Any], str, Any]]:
13
+ try:
14
+ j = resp.json()
15
+ if isinstance(j, dict) and "detail" in j:
16
+ return j.get("detail")
17
+ return j
18
+ except Exception:
19
+ return None
20
+
21
+
22
+ def error_text(resp: httpx.Response, default: str) -> str:
23
+ d = try_extract_detail(resp)
24
+ if isinstance(d, dict):
25
+ return d.get("error") or d.get("message") or default
26
+ if isinstance(d, str) and d.strip():
27
+ return d
28
+ return default
29
+
30
+
31
+ def _parse_retry_after(resp: httpx.Response) -> Optional[float]:
32
+ # headers are case-insensitive in httpx
33
+ ra = resp.headers.get("Retry-After")
34
+ if not ra:
35
+ return None
36
+ ra = ra.strip()
37
+ try:
38
+ return float(ra)
39
+ except Exception:
40
+ return None
41
+
42
+
43
+ def parse_json_or_raise(resp: httpx.Response) -> Dict[str, Any]:
44
+ # typed errors
45
+ if resp.status_code in (401, 403):
46
+ raise UnauthorizedError(error_text(resp, "Invalid API Key (Unauthorized)."))
47
+
48
+ if resp.status_code == 402:
49
+ d = try_extract_detail(resp)
50
+ raise OverLimitError("Usage limit reached. Please upgrade your plan.", detail=d if isinstance(d, dict) else {})
51
+
52
+ if resp.status_code == 429:
53
+ retry_after = _parse_retry_after(resp)
54
+ raise RateLimitError(error_text(resp, "Rate limit exceeded. Please slow down."), retry_after_sec=retry_after)
55
+
56
+ if resp.status_code == 422:
57
+ raise ValidationError(error_text(resp, f"Validation Error: {resp.text}"))
58
+
59
+ if resp.status_code < 200 or resp.status_code >= 300:
60
+ d = try_extract_detail(resp)
61
+ raise ApiError(
62
+ error_text(resp, f"API Error ({resp.status_code})"),
63
+ status_code=resp.status_code,
64
+ detail=d if isinstance(d, dict) else {},
65
+ )
66
+
67
+ try:
68
+ data = resp.json()
69
+ except Exception:
70
+ data = {}
71
+ return data if isinstance(data, dict) else {"data": data}
72
+
73
+
74
+ def request_with_retries_sync(
75
+ fn: Callable[[], httpx.Response],
76
+ *,
77
+ max_retries: int,
78
+ base_backoff: float,
79
+ max_backoff: float,
80
+ ) -> httpx.Response:
81
+ attempt = 0
82
+ while True:
83
+ resp = fn()
84
+ if resp.status_code != 429 or attempt >= max_retries:
85
+ return resp
86
+
87
+ ra = _parse_retry_after(resp)
88
+ sleep_s = ra if ra is not None else min(max_backoff, base_backoff * (2**attempt))
89
+ time.sleep(max(0.0, float(sleep_s)))
90
+ attempt += 1
91
+
92
+
93
+ async def request_with_retries_async(
94
+ fn: Callable[[], Awaitable[httpx.Response]],
95
+ *,
96
+ max_retries: int,
97
+ base_backoff: float,
98
+ max_backoff: float,
99
+ ) -> httpx.Response:
100
+ attempt = 0
101
+ while True:
102
+ resp = await fn()
103
+ if resp.status_code != 429 or attempt >= max_retries:
104
+ return resp
105
+
106
+ ra = _parse_retry_after(resp)
107
+ sleep_s = ra if ra is not None else min(max_backoff, base_backoff * (2**attempt))
108
+ await asyncio.sleep(max(0.0, float(sleep_s)))
109
+ attempt += 1
@@ -0,0 +1,46 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import is_dataclass, asdict
4
+ from typing import Any, Dict, Mapping, Optional, Union
5
+
6
+ MetadataLike = Union[Mapping[str, Any], Any] # Mapping | pydantic model | dataclass | any
7
+
8
+
9
+ def to_metadata_dict(metadata: Optional[MetadataLike]) -> Optional[Dict[str, Any]]:
10
+ """
11
+ Accepts:
12
+ - Mapping[str, Any]
13
+ - Pydantic model (duck-typed: has model_dump())
14
+ - dataclass
15
+ - anything else => {"value": str(obj)}
16
+
17
+ Returns plain JSON-ready dict (best effort).
18
+ """
19
+ if metadata is None:
20
+ return None
21
+
22
+ # Pydantic v2
23
+ md = getattr(metadata, "model_dump", None)
24
+ if callable(md):
25
+ try:
26
+ out = md()
27
+ return out if isinstance(out, dict) else {"data": out}
28
+ except Exception:
29
+ return {"value": str(metadata)}
30
+
31
+ # dataclass
32
+ if is_dataclass(metadata):
33
+ try:
34
+ out = asdict(metadata)
35
+ return out if isinstance(out, dict) else {"data": out}
36
+ except Exception:
37
+ return {"value": str(metadata)}
38
+
39
+ # mapping
40
+ if isinstance(metadata, Mapping):
41
+ try:
42
+ return dict(metadata)
43
+ except Exception:
44
+ return {"value": str(metadata)}
45
+
46
+ return {"value": str(metadata)}
@@ -0,0 +1,195 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import time
5
+ import logging
6
+ from typing import Any, Dict, Optional, Awaitable, Callable
7
+
8
+ import httpx
9
+
10
+ from ._http import (
11
+ parse_json_or_raise,
12
+ request_with_retries_sync,
13
+ request_with_retries_async,
14
+ )
15
+ from ._util import to_metadata_dict, MetadataLike
16
+ from .ai_models import AiRun, AiStatus, AiResult
17
+
18
+ logger = logging.getLogger("onceonly")
19
+
20
+
21
+ class AiClient:
22
+ """
23
+ AI helpers for long-running backend tasks.
24
+
25
+ Typical usage for agents:
26
+ result = client.ai.run_and_wait(key="job:123", metadata={...})
27
+
28
+ Endpoints:
29
+ - POST /ai/run => start/attach to a run (idempotent by key)
30
+ - GET /ai/status => poll status
31
+ - GET /ai/result => fetch final result (completed/failed)
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ sync_client: httpx.Client,
37
+ async_client_getter: Callable[[], Awaitable[httpx.AsyncClient]],
38
+ *,
39
+ max_retries_429: int = 0,
40
+ retry_backoff: float = 0.5,
41
+ retry_max_backoff: float = 5.0,
42
+ ):
43
+ self._c = sync_client
44
+ self._get_ac = async_client_getter
45
+
46
+ self._max_retries_429 = int(max_retries_429)
47
+ self._retry_backoff = float(retry_backoff)
48
+ self._retry_max_backoff = float(retry_max_backoff)
49
+
50
+ # ---- sync ----
51
+
52
+ def run(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> AiRun:
53
+ payload: Dict[str, Any] = {"key": key}
54
+ if ttl is not None:
55
+ payload["ttl"] = int(ttl)
56
+ md = to_metadata_dict(metadata)
57
+ if md is not None:
58
+ payload["metadata"] = md
59
+
60
+ resp = request_with_retries_sync(
61
+ lambda: self._c.post("/ai/run", json=payload),
62
+ max_retries=self._max_retries_429,
63
+ base_backoff=self._retry_backoff,
64
+ max_backoff=self._retry_max_backoff,
65
+ )
66
+ data = parse_json_or_raise(resp)
67
+
68
+ logger.debug("ai.run key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
69
+ return AiRun.from_dict(data)
70
+
71
+ def status(self, key: str) -> AiStatus:
72
+ resp = request_with_retries_sync(
73
+ lambda: self._c.get("/ai/status", params={"key": key}),
74
+ max_retries=self._max_retries_429,
75
+ base_backoff=self._retry_backoff,
76
+ max_backoff=self._retry_max_backoff,
77
+ )
78
+ data = parse_json_or_raise(resp)
79
+
80
+ logger.debug("ai.status key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
81
+ return AiStatus.from_dict(data)
82
+
83
+ def result(self, key: str) -> AiResult:
84
+ resp = request_with_retries_sync(
85
+ lambda: self._c.get("/ai/result", params={"key": key}),
86
+ max_retries=self._max_retries_429,
87
+ base_backoff=self._retry_backoff,
88
+ max_backoff=self._retry_max_backoff,
89
+ )
90
+ data = parse_json_or_raise(resp)
91
+
92
+ logger.debug("ai.result key=%s status=%s", key, data.get("status"))
93
+ return AiResult.from_dict(data)
94
+
95
+ def wait(self, key: str, timeout: float = 60.0, poll_min: float = 0.5, poll_max: float = 5.0) -> AiResult:
96
+ t0 = time.time()
97
+ while True:
98
+ st = self.status(key)
99
+ if st.status in ("completed", "failed"):
100
+ return self.result(key)
101
+
102
+ if time.time() - t0 >= timeout:
103
+ return AiResult(ok=False, status="timeout", key=key, error_code="timeout")
104
+
105
+ sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
106
+ sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
107
+ time.sleep(sleep_s)
108
+
109
+ def run_and_wait(
110
+ self,
111
+ key: str,
112
+ *,
113
+ ttl: Optional[int] = None,
114
+ metadata: Optional[MetadataLike] = None,
115
+ timeout: float = 60.0,
116
+ poll_min: float = 0.5,
117
+ poll_max: float = 5.0,
118
+ ) -> AiResult:
119
+ self.run(key=key, ttl=ttl, metadata=metadata)
120
+ return self.wait(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)
121
+
122
+ # ---- async ----
123
+
124
+ async def run_async(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> AiRun:
125
+ payload: Dict[str, Any] = {"key": key}
126
+ if ttl is not None:
127
+ payload["ttl"] = int(ttl)
128
+ md = to_metadata_dict(metadata)
129
+ if md is not None:
130
+ payload["metadata"] = md
131
+
132
+ c = await self._get_ac()
133
+ resp = await request_with_retries_async(
134
+ lambda: c.post("/ai/run", json=payload),
135
+ max_retries=self._max_retries_429,
136
+ base_backoff=self._retry_backoff,
137
+ max_backoff=self._retry_max_backoff,
138
+ )
139
+ data = parse_json_or_raise(resp)
140
+
141
+ logger.debug("ai.run_async key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
142
+ return AiRun.from_dict(data)
143
+
144
+ async def status_async(self, key: str) -> AiStatus:
145
+ c = await self._get_ac()
146
+ resp = await request_with_retries_async(
147
+ lambda: c.get("/ai/status", params={"key": key}),
148
+ max_retries=self._max_retries_429,
149
+ base_backoff=self._retry_backoff,
150
+ max_backoff=self._retry_max_backoff,
151
+ )
152
+ data = parse_json_or_raise(resp)
153
+
154
+ logger.debug("ai.status_async key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
155
+ return AiStatus.from_dict(data)
156
+
157
+ async def result_async(self, key: str) -> AiResult:
158
+ c = await self._get_ac()
159
+ resp = await request_with_retries_async(
160
+ lambda: c.get("/ai/result", params={"key": key}),
161
+ max_retries=self._max_retries_429,
162
+ base_backoff=self._retry_backoff,
163
+ max_backoff=self._retry_max_backoff,
164
+ )
165
+ data = parse_json_or_raise(resp)
166
+
167
+ logger.debug("ai.result_async key=%s status=%s", key, data.get("status"))
168
+ return AiResult.from_dict(data)
169
+
170
+ async def wait_async(self, key: str, timeout: float = 60.0, poll_min: float = 0.5, poll_max: float = 5.0) -> AiResult:
171
+ t0 = time.time()
172
+ while True:
173
+ st = await self.status_async(key)
174
+ if st.status in ("completed", "failed"):
175
+ return await self.result_async(key)
176
+
177
+ if time.time() - t0 >= timeout:
178
+ return AiResult(ok=False, status="timeout", key=key, error_code="timeout")
179
+
180
+ sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
181
+ sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
182
+ await asyncio.sleep(sleep_s)
183
+
184
+ async def run_and_wait_async(
185
+ self,
186
+ key: str,
187
+ *,
188
+ ttl: Optional[int] = None,
189
+ metadata: Optional[MetadataLike] = None,
190
+ timeout: float = 60.0,
191
+ poll_min: float = 0.5,
192
+ poll_max: float = 5.0,
193
+ ) -> AiResult:
194
+ await self.run_async(key=key, ttl=ttl, metadata=metadata)
195
+ return await self.wait_async(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)