onceonly-sdk 2.0.0__tar.gz → 2.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/PKG-INFO +55 -8
  2. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/README.md +54 -7
  3. onceonly_sdk-2.0.1/onceonly/ai.py +403 -0
  4. onceonly_sdk-2.0.1/onceonly/version.py +1 -0
  5. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly_sdk.egg-info/PKG-INFO +55 -8
  6. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/pyproject.toml +1 -1
  7. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/tests/test_client_behavior.py +34 -0
  8. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/tests/test_integrations.py +41 -0
  9. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/tests/test_parsing.py +27 -0
  10. onceonly_sdk-2.0.0/onceonly/ai.py +0 -195
  11. onceonly_sdk-2.0.0/onceonly/version.py +0 -1
  12. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/LICENSE +0 -0
  13. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/__init__.py +0 -0
  14. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/_http.py +0 -0
  15. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/_util.py +0 -0
  16. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/ai_models.py +0 -0
  17. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/client.py +0 -0
  18. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/decorators.py +0 -0
  19. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/exceptions.py +0 -0
  20. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/integrations/__init__.py +0 -0
  21. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/integrations/langchain.py +0 -0
  22. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly/models.py +0 -0
  23. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly_sdk.egg-info/SOURCES.txt +0 -0
  24. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly_sdk.egg-info/dependency_links.txt +0 -0
  25. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly_sdk.egg-info/requires.txt +0 -0
  26. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/onceonly_sdk.egg-info/top_level.txt +0 -0
  27. {onceonly_sdk-2.0.0 → onceonly_sdk-2.0.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onceonly-sdk
3
- Version: 2.0.0
3
+ Version: 2.0.1
4
4
  Summary: Python SDK for OnceOnly idempotency API
5
5
  Author-email: OnceOnly <support@onceonly.tech>
6
6
  License: MIT
@@ -41,7 +41,8 @@ Documentation: https://onceonly.tech/docs/
41
41
  - Sync + Async client (httpx-based)
42
42
  - Fail-open mode for production safety
43
43
  - Stable idempotency keys (supports Pydantic & dataclasses)
44
- - Decorator for zero-boilerplate usage
44
+ - Decorators for zero-boilerplate usage
45
+ - Native AI API (long-running jobs, local side-effects)
45
46
  - Optional AI / LangChain integrations
46
47
 
47
48
  ---
@@ -60,7 +61,7 @@ pip install "onceonly-sdk[langchain]"
60
61
 
61
62
  ---
62
63
 
63
- ## Quick Start
64
+ ## Quick Start (Webhooks / Automations)
64
65
 
65
66
  ```python
66
67
  from onceonly import OnceOnly
@@ -78,11 +79,54 @@ else:
78
79
  print("First execution")
79
80
  ```
80
81
 
82
+ Use `check_lock()` for:
83
+ - Webhooks
84
+ - Make / Zapier scenarios
85
+ - Cron jobs
86
+ - Distributed workers
87
+
88
+ ---
89
+
90
+ ## AI Jobs (Server-side)
91
+
92
+ Use the AI API for long-running or asynchronous jobs.
93
+
94
+ ```python
95
+ result = client.ai.run_and_wait(
96
+ key="ai:job:daily_summary:2026-01-09",
97
+ metadata={"task": "daily_summary", "model": "gpt-4.1"},
98
+ timeout=60,
99
+ )
100
+
101
+ print(result.status)
102
+ print(result.result)
103
+ ```
104
+
105
+ - Charged **once per key**
106
+ - Polling is free
107
+ - Safe across retries and restarts
108
+
81
109
  ---
82
110
 
83
- ## AI Agents / LangChain Integration 🤖
111
+ ## AI Agents / Local Side-Effects
84
112
 
85
- OnceOnly integrates cleanly with AI-agent frameworks like LangChain.
113
+ Use the AI Lease API when your code performs the side-effect locally
114
+ (payments, emails, webhooks) but still needs exactly-once guarantees.
115
+
116
+ ```python
117
+ lease = client.ai.lease(key="ai:agent:charge:user_42:invoice_100", ttl=300)
118
+
119
+ if lease["status"] == "acquired":
120
+ try:
121
+ do_side_effect()
122
+ client.ai.complete(key=KEY, lease_id=lease["lease_id"], result={"ok": True})
123
+ except Exception:
124
+ client.ai.fail(key=KEY, lease_id=lease["lease_id"], error_code="failed")
125
+ ```
126
+
127
+ ---
128
+
129
+ ## LangChain Integration 🤖
86
130
 
87
131
  ```python
88
132
  from onceonly.integrations.langchain import make_idempotent_tool
@@ -94,11 +138,14 @@ tool = make_idempotent_tool(
94
138
  )
95
139
  ```
96
140
 
97
- Repeated tool calls with the same inputs will execute **exactly once**, even across retries or agent restarts.
141
+ Repeated tool calls with the same inputs will execute **exactly once**,
142
+ even across retries or agent restarts.
143
+
144
+ See `examples/ai/` for canonical patterns.
98
145
 
99
146
  ---
100
147
 
101
- ## Decorator
148
+ ## Decorators
102
149
 
103
150
  ```python
104
151
  from onceonly.decorators import idempotent
@@ -108,7 +155,7 @@ def process_order(order_id):
108
155
  ...
109
156
  ```
110
157
 
111
- Idempotency keys are generated automatically and are stable across restarts.
158
+ Idempotency keys are generated automatically and remain stable across restarts.
112
159
 
113
160
  ---
114
161
 
@@ -16,7 +16,8 @@ Documentation: https://onceonly.tech/docs/
16
16
  - Sync + Async client (httpx-based)
17
17
  - Fail-open mode for production safety
18
18
  - Stable idempotency keys (supports Pydantic & dataclasses)
19
- - Decorator for zero-boilerplate usage
19
+ - Decorators for zero-boilerplate usage
20
+ - Native AI API (long-running jobs, local side-effects)
20
21
  - Optional AI / LangChain integrations
21
22
 
22
23
  ---
@@ -35,7 +36,7 @@ pip install "onceonly-sdk[langchain]"
35
36
 
36
37
  ---
37
38
 
38
- ## Quick Start
39
+ ## Quick Start (Webhooks / Automations)
39
40
 
40
41
  ```python
41
42
  from onceonly import OnceOnly
@@ -53,11 +54,54 @@ else:
53
54
  print("First execution")
54
55
  ```
55
56
 
57
+ Use `check_lock()` for:
58
+ - Webhooks
59
+ - Make / Zapier scenarios
60
+ - Cron jobs
61
+ - Distributed workers
62
+
63
+ ---
64
+
65
+ ## AI Jobs (Server-side)
66
+
67
+ Use the AI API for long-running or asynchronous jobs.
68
+
69
+ ```python
70
+ result = client.ai.run_and_wait(
71
+ key="ai:job:daily_summary:2026-01-09",
72
+ metadata={"task": "daily_summary", "model": "gpt-4.1"},
73
+ timeout=60,
74
+ )
75
+
76
+ print(result.status)
77
+ print(result.result)
78
+ ```
79
+
80
+ - Charged **once per key**
81
+ - Polling is free
82
+ - Safe across retries and restarts
83
+
56
84
  ---
57
85
 
58
- ## AI Agents / LangChain Integration 🤖
86
+ ## AI Agents / Local Side-Effects
59
87
 
60
- OnceOnly integrates cleanly with AI-agent frameworks like LangChain.
88
+ Use the AI Lease API when your code performs the side-effect locally
89
+ (payments, emails, webhooks) but still needs exactly-once guarantees.
90
+
91
+ ```python
92
+ lease = client.ai.lease(key="ai:agent:charge:user_42:invoice_100", ttl=300)
93
+
94
+ if lease["status"] == "acquired":
95
+ try:
96
+ do_side_effect()
97
+ client.ai.complete(key=KEY, lease_id=lease["lease_id"], result={"ok": True})
98
+ except Exception:
99
+ client.ai.fail(key=KEY, lease_id=lease["lease_id"], error_code="failed")
100
+ ```
101
+
102
+ ---
103
+
104
+ ## LangChain Integration 🤖
61
105
 
62
106
  ```python
63
107
  from onceonly.integrations.langchain import make_idempotent_tool
@@ -69,11 +113,14 @@ tool = make_idempotent_tool(
69
113
  )
70
114
  ```
71
115
 
72
- Repeated tool calls with the same inputs will execute **exactly once**, even across retries or agent restarts.
116
+ Repeated tool calls with the same inputs will execute **exactly once**,
117
+ even across retries or agent restarts.
118
+
119
+ See `examples/ai/` for canonical patterns.
73
120
 
74
121
  ---
75
122
 
76
- ## Decorator
123
+ ## Decorators
77
124
 
78
125
  ```python
79
126
  from onceonly.decorators import idempotent
@@ -83,7 +130,7 @@ def process_order(order_id):
83
130
  ...
84
131
  ```
85
132
 
86
- Idempotency keys are generated automatically and are stable across restarts.
133
+ Idempotency keys are generated automatically and remain stable across restarts.
87
134
 
88
135
  ---
89
136
 
@@ -0,0 +1,403 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import time
5
+ import logging
6
+ from typing import Any, Dict, Optional, Awaitable, Callable
7
+
8
+ import httpx
9
+
10
+ from ._http import (
11
+ parse_json_or_raise,
12
+ request_with_retries_sync,
13
+ request_with_retries_async,
14
+ )
15
+ from ._util import to_metadata_dict, MetadataLike
16
+ from .ai_models import AiRun, AiStatus, AiResult
17
+
18
+ logger = logging.getLogger("onceonly")
19
+
20
+
21
+ class AiClient:
22
+ """
23
+ AI helpers for long-running backend tasks.
24
+
25
+ High-level:
26
+ - POST /ai/run => start/attach to a run (idempotent by key)
27
+ - GET /ai/status => poll status
28
+ - GET /ai/result => fetch final result (completed/failed)
29
+
30
+ Low-level lease API (for local side effects / agent tools):
31
+ - POST /ai/lease
32
+ - POST /ai/extend
33
+ - POST /ai/complete
34
+ - POST /ai/fail
35
+ - POST /ai/cancel
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ sync_client: httpx.Client,
41
+ async_client_getter: Callable[[], Awaitable[httpx.AsyncClient]],
42
+ *,
43
+ max_retries_429: int = 0,
44
+ retry_backoff: float = 0.5,
45
+ retry_max_backoff: float = 5.0,
46
+ ):
47
+ self._c = sync_client
48
+ self._get_ac = async_client_getter
49
+
50
+ self._max_retries_429 = int(max_retries_429)
51
+ self._retry_backoff = float(retry_backoff)
52
+ self._retry_max_backoff = float(retry_max_backoff)
53
+
54
+ # ------------------------------------------------------------------
55
+ # High-level: /ai/run + /ai/status + /ai/result
56
+ # ------------------------------------------------------------------
57
+
58
+ def run(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> AiRun:
59
+ payload: Dict[str, Any] = {"key": key}
60
+ if ttl is not None:
61
+ payload["ttl"] = int(ttl)
62
+ md = to_metadata_dict(metadata)
63
+ if md is not None:
64
+ payload["metadata"] = md
65
+
66
+ resp = request_with_retries_sync(
67
+ lambda: self._c.post("/ai/run", json=payload),
68
+ max_retries=self._max_retries_429,
69
+ base_backoff=self._retry_backoff,
70
+ max_backoff=self._retry_max_backoff,
71
+ )
72
+ data = parse_json_or_raise(resp)
73
+
74
+ logger.debug("ai.run key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
75
+ return AiRun.from_dict(data)
76
+
77
+ def status(self, key: str) -> AiStatus:
78
+ resp = request_with_retries_sync(
79
+ lambda: self._c.get("/ai/status", params={"key": key}),
80
+ max_retries=self._max_retries_429,
81
+ base_backoff=self._retry_backoff,
82
+ max_backoff=self._retry_max_backoff,
83
+ )
84
+ data = parse_json_or_raise(resp)
85
+
86
+ logger.debug("ai.status key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
87
+ return AiStatus.from_dict(data)
88
+
89
+ def result(self, key: str) -> AiResult:
90
+ resp = request_with_retries_sync(
91
+ lambda: self._c.get("/ai/result", params={"key": key}),
92
+ max_retries=self._max_retries_429,
93
+ base_backoff=self._retry_backoff,
94
+ max_backoff=self._retry_max_backoff,
95
+ )
96
+ data = parse_json_or_raise(resp)
97
+
98
+ logger.debug("ai.result key=%s status=%s", key, data.get("status"))
99
+ return AiResult.from_dict(data)
100
+
101
+ def wait(
102
+ self,
103
+ key: str,
104
+ *,
105
+ timeout: float = 60.0,
106
+ poll_min: float = 0.5,
107
+ poll_max: float = 5.0,
108
+ ) -> AiResult:
109
+ """
110
+ Polls /ai/status until completed/failed or timeout.
111
+
112
+ NOTE: We do NOT return status="timeout" because backend model statuses
113
+ are usually limited to: not_found|in_progress|completed|failed.
114
+ Instead we return status="failed" with error_code="timeout".
115
+ """
116
+ t0 = time.time()
117
+ while True:
118
+ st = self.status(key)
119
+ if st.status in ("completed", "failed"):
120
+ return self.result(key)
121
+
122
+ if time.time() - t0 >= timeout:
123
+ return AiResult(ok=False, status="failed", key=key, error_code="timeout")
124
+
125
+ sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
126
+ sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
127
+ time.sleep(sleep_s)
128
+
129
+ def run_and_wait(
130
+ self,
131
+ key: str,
132
+ *,
133
+ ttl: Optional[int] = None,
134
+ metadata: Optional[MetadataLike] = None,
135
+ timeout: float = 60.0,
136
+ poll_min: float = 0.5,
137
+ poll_max: float = 5.0,
138
+ ) -> AiResult:
139
+ self.run(key=key, ttl=ttl, metadata=metadata)
140
+ return self.wait(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)
141
+
142
+ # -------------------- async high-level --------------------
143
+
144
+ async def run_async(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> AiRun:
145
+ payload: Dict[str, Any] = {"key": key}
146
+ if ttl is not None:
147
+ payload["ttl"] = int(ttl)
148
+ md = to_metadata_dict(metadata)
149
+ if md is not None:
150
+ payload["metadata"] = md
151
+
152
+ c = await self._get_ac()
153
+ resp = await request_with_retries_async(
154
+ lambda: c.post("/ai/run", json=payload),
155
+ max_retries=self._max_retries_429,
156
+ base_backoff=self._retry_backoff,
157
+ max_backoff=self._retry_max_backoff,
158
+ )
159
+ data = parse_json_or_raise(resp)
160
+
161
+ logger.debug("ai.run_async key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
162
+ return AiRun.from_dict(data)
163
+
164
+ async def status_async(self, key: str) -> AiStatus:
165
+ c = await self._get_ac()
166
+ resp = await request_with_retries_async(
167
+ lambda: c.get("/ai/status", params={"key": key}),
168
+ max_retries=self._max_retries_429,
169
+ base_backoff=self._retry_backoff,
170
+ max_backoff=self._retry_max_backoff,
171
+ )
172
+ data = parse_json_or_raise(resp)
173
+
174
+ logger.debug("ai.status_async key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
175
+ return AiStatus.from_dict(data)
176
+
177
+ async def result_async(self, key: str) -> AiResult:
178
+ c = await self._get_ac()
179
+ resp = await request_with_retries_async(
180
+ lambda: c.get("/ai/result", params={"key": key}),
181
+ max_retries=self._max_retries_429,
182
+ base_backoff=self._retry_backoff,
183
+ max_backoff=self._retry_max_backoff,
184
+ )
185
+ data = parse_json_or_raise(resp)
186
+
187
+ logger.debug("ai.result_async key=%s status=%s", key, data.get("status"))
188
+ return AiResult.from_dict(data)
189
+
190
+ async def wait_async(
191
+ self,
192
+ key: str,
193
+ *,
194
+ timeout: float = 60.0,
195
+ poll_min: float = 0.5,
196
+ poll_max: float = 5.0,
197
+ ) -> AiResult:
198
+ t0 = time.time()
199
+ while True:
200
+ st = await self.status_async(key)
201
+ if st.status in ("completed", "failed"):
202
+ return await self.result_async(key)
203
+
204
+ if time.time() - t0 >= timeout:
205
+ return AiResult(ok=False, status="failed", key=key, error_code="timeout")
206
+
207
+ sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
208
+ sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
209
+ await asyncio.sleep(sleep_s)
210
+
211
+ async def run_and_wait_async(
212
+ self,
213
+ key: str,
214
+ *,
215
+ ttl: Optional[int] = None,
216
+ metadata: Optional[MetadataLike] = None,
217
+ timeout: float = 60.0,
218
+ poll_min: float = 0.5,
219
+ poll_max: float = 5.0,
220
+ ) -> AiResult:
221
+ await self.run_async(key=key, ttl=ttl, metadata=metadata)
222
+ return await self.wait_async(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)
223
+
224
+ # ------------------------------------------------------------------
225
+ # Low-level lease API (sync) - returns raw dicts (backend models)
226
+ # ------------------------------------------------------------------
227
+
228
+ def lease(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> Dict[str, Any]:
229
+ payload: Dict[str, Any] = {"key": key}
230
+ if ttl is not None:
231
+ payload["ttl"] = int(ttl)
232
+ md = to_metadata_dict(metadata)
233
+ if md is not None:
234
+ payload["metadata"] = md
235
+
236
+ resp = request_with_retries_sync(
237
+ lambda: self._c.post("/ai/lease", json=payload),
238
+ max_retries=self._max_retries_429,
239
+ base_backoff=self._retry_backoff,
240
+ max_backoff=self._retry_max_backoff,
241
+ )
242
+ return parse_json_or_raise(resp)
243
+
244
+ def extend(self, key: str, lease_id: str, ttl: Optional[int] = None) -> Dict[str, Any]:
245
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id}
246
+ if ttl is not None:
247
+ payload["ttl"] = int(ttl)
248
+
249
+ resp = request_with_retries_sync(
250
+ lambda: self._c.post("/ai/extend", json=payload),
251
+ max_retries=self._max_retries_429,
252
+ base_backoff=self._retry_backoff,
253
+ max_backoff=self._retry_max_backoff,
254
+ )
255
+ return parse_json_or_raise(resp)
256
+
257
+ def complete(
258
+ self,
259
+ key: str,
260
+ lease_id: str,
261
+ *,
262
+ result: Optional[Dict[str, Any]] = None,
263
+ result_hash: Optional[str] = None,
264
+ ) -> Dict[str, Any]:
265
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id}
266
+ if result_hash is not None:
267
+ payload["result_hash"] = str(result_hash)
268
+ if result is not None:
269
+ payload["result"] = result
270
+
271
+ resp = request_with_retries_sync(
272
+ lambda: self._c.post("/ai/complete", json=payload),
273
+ max_retries=self._max_retries_429,
274
+ base_backoff=self._retry_backoff,
275
+ max_backoff=self._retry_max_backoff,
276
+ )
277
+ return parse_json_or_raise(resp)
278
+
279
+ def fail(
280
+ self,
281
+ key: str,
282
+ lease_id: str,
283
+ *,
284
+ error_code: str,
285
+ error_hash: Optional[str] = None,
286
+ ) -> Dict[str, Any]:
287
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id, "error_code": str(error_code)}
288
+ if error_hash is not None:
289
+ payload["error_hash"] = str(error_hash)
290
+
291
+ resp = request_with_retries_sync(
292
+ lambda: self._c.post("/ai/fail", json=payload),
293
+ max_retries=self._max_retries_429,
294
+ base_backoff=self._retry_backoff,
295
+ max_backoff=self._retry_max_backoff,
296
+ )
297
+ return parse_json_or_raise(resp)
298
+
299
+ def cancel(self, key: str, lease_id: str, reason: Optional[str] = None) -> Dict[str, Any]:
300
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id}
301
+ if reason:
302
+ payload["reason"] = str(reason)
303
+
304
+ resp = request_with_retries_sync(
305
+ lambda: self._c.post("/ai/cancel", json=payload),
306
+ max_retries=self._max_retries_429,
307
+ base_backoff=self._retry_backoff,
308
+ max_backoff=self._retry_max_backoff,
309
+ )
310
+ return parse_json_or_raise(resp)
311
+
312
+ # ------------------------------------------------------------------
313
+ # Low-level lease API (async) - returns raw dicts (backend models)
314
+ # ------------------------------------------------------------------
315
+
316
+ async def lease_async(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> Dict[str, Any]:
317
+ payload: Dict[str, Any] = {"key": key}
318
+ if ttl is not None:
319
+ payload["ttl"] = int(ttl)
320
+ md = to_metadata_dict(metadata)
321
+ if md is not None:
322
+ payload["metadata"] = md
323
+
324
+ c = await self._get_ac()
325
+ resp = await request_with_retries_async(
326
+ lambda: c.post("/ai/lease", json=payload),
327
+ max_retries=self._max_retries_429,
328
+ base_backoff=self._retry_backoff,
329
+ max_backoff=self._retry_max_backoff,
330
+ )
331
+ return parse_json_or_raise(resp)
332
+
333
+ async def extend_async(self, key: str, lease_id: str, ttl: Optional[int] = None) -> Dict[str, Any]:
334
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id}
335
+ if ttl is not None:
336
+ payload["ttl"] = int(ttl)
337
+
338
+ c = await self._get_ac()
339
+ resp = await request_with_retries_async(
340
+ lambda: c.post("/ai/extend", json=payload),
341
+ max_retries=self._max_retries_429,
342
+ base_backoff=self._retry_backoff,
343
+ max_backoff=self._retry_max_backoff,
344
+ )
345
+ return parse_json_or_raise(resp)
346
+
347
+ async def complete_async(
348
+ self,
349
+ key: str,
350
+ lease_id: str,
351
+ *,
352
+ result: Optional[Dict[str, Any]] = None,
353
+ result_hash: Optional[str] = None,
354
+ ) -> Dict[str, Any]:
355
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id}
356
+ if result_hash is not None:
357
+ payload["result_hash"] = str(result_hash)
358
+ if result is not None:
359
+ payload["result"] = result
360
+
361
+ c = await self._get_ac()
362
+ resp = await request_with_retries_async(
363
+ lambda: c.post("/ai/complete", json=payload),
364
+ max_retries=self._max_retries_429,
365
+ base_backoff=self._retry_backoff,
366
+ max_backoff=self._retry_max_backoff,
367
+ )
368
+ return parse_json_or_raise(resp)
369
+
370
+ async def fail_async(
371
+ self,
372
+ key: str,
373
+ lease_id: str,
374
+ *,
375
+ error_code: str,
376
+ error_hash: Optional[str] = None,
377
+ ) -> Dict[str, Any]:
378
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id, "error_code": str(error_code)}
379
+ if error_hash is not None:
380
+ payload["error_hash"] = str(error_hash)
381
+
382
+ c = await self._get_ac()
383
+ resp = await request_with_retries_async(
384
+ lambda: c.post("/ai/fail", json=payload),
385
+ max_retries=self._max_retries_429,
386
+ base_backoff=self._retry_backoff,
387
+ max_backoff=self._retry_max_backoff,
388
+ )
389
+ return parse_json_or_raise(resp)
390
+
391
+ async def cancel_async(self, key: str, lease_id: str, reason: Optional[str] = None) -> Dict[str, Any]:
392
+ payload: Dict[str, Any] = {"key": key, "lease_id": lease_id}
393
+ if reason:
394
+ payload["reason"] = str(reason)
395
+
396
+ c = await self._get_ac()
397
+ resp = await request_with_retries_async(
398
+ lambda: c.post("/ai/cancel", json=payload),
399
+ max_retries=self._max_retries_429,
400
+ base_backoff=self._retry_backoff,
401
+ max_backoff=self._retry_max_backoff,
402
+ )
403
+ return parse_json_or_raise(resp)
@@ -0,0 +1 @@
1
+ __version__ = "2.0.1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: onceonly-sdk
3
- Version: 2.0.0
3
+ Version: 2.0.1
4
4
  Summary: Python SDK for OnceOnly idempotency API
5
5
  Author-email: OnceOnly <support@onceonly.tech>
6
6
  License: MIT
@@ -41,7 +41,8 @@ Documentation: https://onceonly.tech/docs/
41
41
  - Sync + Async client (httpx-based)
42
42
  - Fail-open mode for production safety
43
43
  - Stable idempotency keys (supports Pydantic & dataclasses)
44
- - Decorator for zero-boilerplate usage
44
+ - Decorators for zero-boilerplate usage
45
+ - Native AI API (long-running jobs, local side-effects)
45
46
  - Optional AI / LangChain integrations
46
47
 
47
48
  ---
@@ -60,7 +61,7 @@ pip install "onceonly-sdk[langchain]"
60
61
 
61
62
  ---
62
63
 
63
- ## Quick Start
64
+ ## Quick Start (Webhooks / Automations)
64
65
 
65
66
  ```python
66
67
  from onceonly import OnceOnly
@@ -78,11 +79,54 @@ else:
78
79
  print("First execution")
79
80
  ```
80
81
 
82
+ Use `check_lock()` for:
83
+ - Webhooks
84
+ - Make / Zapier scenarios
85
+ - Cron jobs
86
+ - Distributed workers
87
+
88
+ ---
89
+
90
+ ## AI Jobs (Server-side)
91
+
92
+ Use the AI API for long-running or asynchronous jobs.
93
+
94
+ ```python
95
+ result = client.ai.run_and_wait(
96
+ key="ai:job:daily_summary:2026-01-09",
97
+ metadata={"task": "daily_summary", "model": "gpt-4.1"},
98
+ timeout=60,
99
+ )
100
+
101
+ print(result.status)
102
+ print(result.result)
103
+ ```
104
+
105
+ - Charged **once per key**
106
+ - Polling is free
107
+ - Safe across retries and restarts
108
+
81
109
  ---
82
110
 
83
- ## AI Agents / LangChain Integration 🤖
111
+ ## AI Agents / Local Side-Effects
84
112
 
85
- OnceOnly integrates cleanly with AI-agent frameworks like LangChain.
113
+ Use the AI Lease API when your code performs the side-effect locally
114
+ (payments, emails, webhooks) but still needs exactly-once guarantees.
115
+
116
+ ```python
117
+ lease = client.ai.lease(key="ai:agent:charge:user_42:invoice_100", ttl=300)
118
+
119
+ if lease["status"] == "acquired":
120
+ try:
121
+ do_side_effect()
122
+ client.ai.complete(key=KEY, lease_id=lease["lease_id"], result={"ok": True})
123
+ except Exception:
124
+ client.ai.fail(key=KEY, lease_id=lease["lease_id"], error_code="failed")
125
+ ```
126
+
127
+ ---
128
+
129
+ ## LangChain Integration 🤖
86
130
 
87
131
  ```python
88
132
  from onceonly.integrations.langchain import make_idempotent_tool
@@ -94,11 +138,14 @@ tool = make_idempotent_tool(
94
138
  )
95
139
  ```
96
140
 
97
- Repeated tool calls with the same inputs will execute **exactly once**, even across retries or agent restarts.
141
+ Repeated tool calls with the same inputs will execute **exactly once**,
142
+ even across retries or agent restarts.
143
+
144
+ See `examples/ai/` for canonical patterns.
98
145
 
99
146
  ---
100
147
 
101
- ## Decorator
148
+ ## Decorators
102
149
 
103
150
  ```python
104
151
  from onceonly.decorators import idempotent
@@ -108,7 +155,7 @@ def process_order(order_id):
108
155
  ...
109
156
  ```
110
157
 
111
- Idempotency keys are generated automatically and are stable across restarts.
158
+ Idempotency keys are generated automatically and remain stable across restarts.
112
159
 
113
160
  ---
114
161
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "onceonly-sdk"
7
- version = "2.0.0"
7
+ version = "2.0.1"
8
8
  description = "Python SDK for OnceOnly idempotency API"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.9"
@@ -3,6 +3,11 @@ import httpx
3
3
  from unittest.mock import MagicMock
4
4
 
5
5
  from onceonly.client import OnceOnly
6
+ from onceonly.exceptions import UnauthorizedError, OverLimitError, ValidationError
7
+
8
+
9
+ def _mk_req():
10
+ return httpx.Request("POST", "https://api.onceonly.tech/v1/check-lock")
6
11
 
7
12
 
8
13
  def test_fail_open_on_network_error():
@@ -40,3 +45,32 @@ def test_fail_open_on_500():
40
45
  assert res.duplicate is False
41
46
  assert res.raw.get("fail_open") is True
42
47
  assert res.raw.get("reason") == "api_5xx"
48
+
49
+
50
+ def test_fail_open_does_not_mask_4xx_errors():
51
+ mock_http = MagicMock()
52
+
53
+ client = OnceOnly("apikey", sync_client=mock_http, fail_open=True)
54
+
55
+ # 401/403 should raise UnauthorizedError (NOT fail-open)
56
+ mock_http.post.return_value = httpx.Response(401, json={"detail": "nope"}, request=_mk_req())
57
+ with pytest.raises(UnauthorizedError):
58
+ client.check_lock("k")
59
+
60
+ mock_http.post.return_value = httpx.Response(403, json={"detail": "nope"}, request=_mk_req())
61
+ with pytest.raises(UnauthorizedError):
62
+ client.check_lock("k")
63
+
64
+ # 402 should raise OverLimitError (NOT fail-open)
65
+ mock_http.post.return_value = httpx.Response(
66
+ 402,
67
+ json={"detail": {"error": "limit", "plan": "free", "limit": 1000, "usage": 1001}},
68
+ request=_mk_req(),
69
+ )
70
+ with pytest.raises(OverLimitError):
71
+ client.check_lock("k")
72
+
73
+ # 422 should raise ValidationError (NOT fail-open)
74
+ mock_http.post.return_value = httpx.Response(422, json={"detail": "Validation Error"}, request=_mk_req())
75
+ with pytest.raises(ValidationError):
76
+ client.check_lock("k")
@@ -125,6 +125,47 @@ def test_langchain_duplicate_behavior_structured(mock_client):
125
125
  assert "duplicate" in res.lower()
126
126
 
127
127
 
128
+ def test_langchain_duplicate_does_not_call_original_tool():
129
+ try:
130
+ from langchain_core.tools import StructuredTool
131
+ except ImportError:
132
+ pytest.skip("LangChain not installed")
133
+
134
+ mock_client = MagicMock(spec=OnceOnly)
135
+ mock_client.check_lock.return_value = CheckLockResult(
136
+ locked=False,
137
+ duplicate=True,
138
+ key="k",
139
+ ttl=60,
140
+ first_seen_at="now",
141
+ request_id="r",
142
+ status_code=409,
143
+ raw={},
144
+ )
145
+
146
+ called = {"n": 0}
147
+
148
+ def refund_user(user_id: str, amount: int) -> str:
149
+ called["n"] += 1
150
+ return "original run"
151
+
152
+ original_tool = StructuredTool.from_function(
153
+ func=refund_user,
154
+ name="refund_tool",
155
+ description="Refunds a user",
156
+ )
157
+
158
+ wrapped = make_idempotent_tool(original_tool, client=mock_client, key_prefix="test_prefix")
159
+
160
+ out = wrapped.invoke({"user_id": "u_100", "amount": 50})
161
+
162
+ assert called["n"] == 0
163
+ assert isinstance(out, str)
164
+ assert "duplicate" in out.lower()
165
+ assert "skipped" in out.lower()
166
+ mock_client.check_lock.assert_called_once()
167
+
168
+
128
169
  def test_single_input_tool_supported(mock_client):
129
170
  """
130
171
  Ensure single-input tools still work.
@@ -149,3 +149,30 @@ def test_500_non_json_body():
149
149
  resp = mk_response(500, text="<html>502 Bad Gateway</html>")
150
150
  with pytest.raises(ApiError):
151
151
  c._parse_check_lock_response(resp, fallback_key="a", fallback_ttl=60)
152
+
153
+
154
+ def test_header_locked_without_body_json():
155
+ c = OnceOnly("k")
156
+ resp = mk_response(
157
+ 200,
158
+ json_data={}, # empty body, but header says locked
159
+ headers={"X-OnceOnly-Status": "locked", "X-Request-Id": "rid_locked"},
160
+ )
161
+ r = c._parse_check_lock_response(resp, fallback_key="a", fallback_ttl=60)
162
+ assert r.locked is True
163
+ assert r.duplicate is False
164
+ assert r.request_id == "rid_locked"
165
+
166
+
167
+ def test_ttl_fallback_when_missing_in_json():
168
+ c = OnceOnly("k")
169
+ resp = mk_response(
170
+ 200,
171
+ json_data={"success": True, "status": "locked", "key": "a"}, # no ttl
172
+ headers={"X-OnceOnly-Status": "locked", "X-Request-Id": "rid_ttl"},
173
+ )
174
+ r = c._parse_check_lock_response(resp, fallback_key="a", fallback_ttl=123)
175
+ assert r.locked is True
176
+ assert r.duplicate is False
177
+ assert r.ttl == 123
178
+ assert r.request_id == "rid_ttl"
@@ -1,195 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import asyncio
4
- import time
5
- import logging
6
- from typing import Any, Dict, Optional, Awaitable, Callable
7
-
8
- import httpx
9
-
10
- from ._http import (
11
- parse_json_or_raise,
12
- request_with_retries_sync,
13
- request_with_retries_async,
14
- )
15
- from ._util import to_metadata_dict, MetadataLike
16
- from .ai_models import AiRun, AiStatus, AiResult
17
-
18
- logger = logging.getLogger("onceonly")
19
-
20
-
21
- class AiClient:
22
- """
23
- AI helpers for long-running backend tasks.
24
-
25
- Typical usage for agents:
26
- result = client.ai.run_and_wait(key="job:123", metadata={...})
27
-
28
- Endpoints:
29
- - POST /ai/run => start/attach to a run (idempotent by key)
30
- - GET /ai/status => poll status
31
- - GET /ai/result => fetch final result (completed/failed)
32
- """
33
-
34
- def __init__(
35
- self,
36
- sync_client: httpx.Client,
37
- async_client_getter: Callable[[], Awaitable[httpx.AsyncClient]],
38
- *,
39
- max_retries_429: int = 0,
40
- retry_backoff: float = 0.5,
41
- retry_max_backoff: float = 5.0,
42
- ):
43
- self._c = sync_client
44
- self._get_ac = async_client_getter
45
-
46
- self._max_retries_429 = int(max_retries_429)
47
- self._retry_backoff = float(retry_backoff)
48
- self._retry_max_backoff = float(retry_max_backoff)
49
-
50
- # ---- sync ----
51
-
52
- def run(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> AiRun:
53
- payload: Dict[str, Any] = {"key": key}
54
- if ttl is not None:
55
- payload["ttl"] = int(ttl)
56
- md = to_metadata_dict(metadata)
57
- if md is not None:
58
- payload["metadata"] = md
59
-
60
- resp = request_with_retries_sync(
61
- lambda: self._c.post("/ai/run", json=payload),
62
- max_retries=self._max_retries_429,
63
- base_backoff=self._retry_backoff,
64
- max_backoff=self._retry_max_backoff,
65
- )
66
- data = parse_json_or_raise(resp)
67
-
68
- logger.debug("ai.run key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
69
- return AiRun.from_dict(data)
70
-
71
- def status(self, key: str) -> AiStatus:
72
- resp = request_with_retries_sync(
73
- lambda: self._c.get("/ai/status", params={"key": key}),
74
- max_retries=self._max_retries_429,
75
- base_backoff=self._retry_backoff,
76
- max_backoff=self._retry_max_backoff,
77
- )
78
- data = parse_json_or_raise(resp)
79
-
80
- logger.debug("ai.status key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
81
- return AiStatus.from_dict(data)
82
-
83
- def result(self, key: str) -> AiResult:
84
- resp = request_with_retries_sync(
85
- lambda: self._c.get("/ai/result", params={"key": key}),
86
- max_retries=self._max_retries_429,
87
- base_backoff=self._retry_backoff,
88
- max_backoff=self._retry_max_backoff,
89
- )
90
- data = parse_json_or_raise(resp)
91
-
92
- logger.debug("ai.result key=%s status=%s", key, data.get("status"))
93
- return AiResult.from_dict(data)
94
-
95
- def wait(self, key: str, timeout: float = 60.0, poll_min: float = 0.5, poll_max: float = 5.0) -> AiResult:
96
- t0 = time.time()
97
- while True:
98
- st = self.status(key)
99
- if st.status in ("completed", "failed"):
100
- return self.result(key)
101
-
102
- if time.time() - t0 >= timeout:
103
- return AiResult(ok=False, status="timeout", key=key, error_code="timeout")
104
-
105
- sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
106
- sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
107
- time.sleep(sleep_s)
108
-
109
- def run_and_wait(
110
- self,
111
- key: str,
112
- *,
113
- ttl: Optional[int] = None,
114
- metadata: Optional[MetadataLike] = None,
115
- timeout: float = 60.0,
116
- poll_min: float = 0.5,
117
- poll_max: float = 5.0,
118
- ) -> AiResult:
119
- self.run(key=key, ttl=ttl, metadata=metadata)
120
- return self.wait(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)
121
-
122
- # ---- async ----
123
-
124
- async def run_async(self, key: str, ttl: Optional[int] = None, metadata: Optional[MetadataLike] = None) -> AiRun:
125
- payload: Dict[str, Any] = {"key": key}
126
- if ttl is not None:
127
- payload["ttl"] = int(ttl)
128
- md = to_metadata_dict(metadata)
129
- if md is not None:
130
- payload["metadata"] = md
131
-
132
- c = await self._get_ac()
133
- resp = await request_with_retries_async(
134
- lambda: c.post("/ai/run", json=payload),
135
- max_retries=self._max_retries_429,
136
- base_backoff=self._retry_backoff,
137
- max_backoff=self._retry_max_backoff,
138
- )
139
- data = parse_json_or_raise(resp)
140
-
141
- logger.debug("ai.run_async key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
142
- return AiRun.from_dict(data)
143
-
144
- async def status_async(self, key: str) -> AiStatus:
145
- c = await self._get_ac()
146
- resp = await request_with_retries_async(
147
- lambda: c.get("/ai/status", params={"key": key}),
148
- max_retries=self._max_retries_429,
149
- base_backoff=self._retry_backoff,
150
- max_backoff=self._retry_max_backoff,
151
- )
152
- data = parse_json_or_raise(resp)
153
-
154
- logger.debug("ai.status_async key=%s status=%s version=%s", key, data.get("status"), data.get("version"))
155
- return AiStatus.from_dict(data)
156
-
157
- async def result_async(self, key: str) -> AiResult:
158
- c = await self._get_ac()
159
- resp = await request_with_retries_async(
160
- lambda: c.get("/ai/result", params={"key": key}),
161
- max_retries=self._max_retries_429,
162
- base_backoff=self._retry_backoff,
163
- max_backoff=self._retry_max_backoff,
164
- )
165
- data = parse_json_or_raise(resp)
166
-
167
- logger.debug("ai.result_async key=%s status=%s", key, data.get("status"))
168
- return AiResult.from_dict(data)
169
-
170
- async def wait_async(self, key: str, timeout: float = 60.0, poll_min: float = 0.5, poll_max: float = 5.0) -> AiResult:
171
- t0 = time.time()
172
- while True:
173
- st = await self.status_async(key)
174
- if st.status in ("completed", "failed"):
175
- return await self.result_async(key)
176
-
177
- if time.time() - t0 >= timeout:
178
- return AiResult(ok=False, status="timeout", key=key, error_code="timeout")
179
-
180
- sleep_s = st.retry_after_sec if isinstance(st.retry_after_sec, int) else poll_min
181
- sleep_s = max(poll_min, min(poll_max, float(sleep_s)))
182
- await asyncio.sleep(sleep_s)
183
-
184
- async def run_and_wait_async(
185
- self,
186
- key: str,
187
- *,
188
- ttl: Optional[int] = None,
189
- metadata: Optional[MetadataLike] = None,
190
- timeout: float = 60.0,
191
- poll_min: float = 0.5,
192
- poll_max: float = 5.0,
193
- ) -> AiResult:
194
- await self.run_async(key=key, ttl=ttl, metadata=metadata)
195
- return await self.wait_async(key=key, timeout=timeout, poll_min=poll_min, poll_max=poll_max)
@@ -1 +0,0 @@
1
- __version__ = "2.0.0"
File without changes
File without changes