loremem 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,44 @@
1
+ # Python
2
+ .venv/
3
+ venv/
4
+ __pycache__/
5
+ *.pyc
6
+ *.pyo
7
+ *.pyd
8
+ .Python
9
+ *.egg-info/
10
+ dist/
11
+ build/
12
+
13
+ # Env / secrets
14
+ .env
15
+ .env.*
16
+ !.env.example
17
+
18
+ # Test artefacts
19
+ .pytest_cache/
20
+ .coverage
21
+ htmlcov/
22
+
23
+ # Type checker / linter
24
+ .mypy_cache/
25
+ .ruff_cache/
26
+
27
+ # Editor
28
+ .vscode/
29
+ .idea/
30
+ *.swp
31
+ *.swo
32
+
33
+ # OS
34
+ .DS_Store
35
+ Thumbs.db
36
+
37
+ # Private product docs (keep local only)
38
+ *.md
39
+ !README.md
40
+
41
+ # Node (for future frontend)
42
+ node_modules/
43
+ .next/
44
+ .nuxt/
loremem-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,202 @@
1
+ Metadata-Version: 2.4
2
+ Name: loremem
3
+ Version: 0.1.0
4
+ Summary: Lore SDK — inject organizational memory into any AI agent
5
+ Project-URL: Homepage, https://github.com/mr-shakib/lore
6
+ Project-URL: Documentation, https://github.com/mr-shakib/lore/tree/main/sdk/python
7
+ Project-URL: Repository, https://github.com/mr-shakib/lore
8
+ Project-URL: Issues, https://github.com/mr-shakib/lore/issues
9
+ License: MIT
10
+ Keywords: agents,ai,context,llm,lore,memory
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
+ Requires-Python: >=3.9
21
+ Requires-Dist: httpx>=0.27.0
22
+ Provides-Extra: dev
23
+ Requires-Dist: pytest-asyncio>=0.23; extra == 'dev'
24
+ Requires-Dist: pytest>=8.0; extra == 'dev'
25
+ Requires-Dist: respx>=0.21; extra == 'dev'
26
+ Description-Content-Type: text/markdown
27
+
28
+ # loremem — Python SDK for Lore
29
+
30
+ > Inject your company's organizational memory into any AI agent in 3 lines of code.
31
+
32
+ **Lore** captures every human correction of an AI output, structures it into a company knowledge graph, and feeds it back to your AI agents so they stop making the same mistakes twice.
33
+
34
+ `loremem` is the Python SDK for accessing Lore's Context Injection API.
35
+
36
+ ---
37
+
38
+ ## Installation
39
+
40
+ ```bash
41
+ # From PyPI (when published)
42
+ pip install loremem
43
+
44
+ # From GitHub (MVP — no PyPI publish required)
45
+ pip install git+https://github.com/mr-shakib/lore#subdirectory=sdk/python
46
+ ```
47
+
48
+ ---
49
+
50
+ ## Quickstart (3 minutes)
51
+
52
+ ```python
53
+ from loremem import LoreClient
54
+
55
+ client = LoreClient(
56
+ api_key="sk-lore-xxxx", # from POST /v1/auth/api-keys
57
+ workspace_id="ws_yourworkspace", # your Lore workspace ID
58
+ )
59
+
60
+ # ── Step 1: Get context before your LLM call ──────────────────────────────────
61
+
62
+ ctx = client.get_context(
63
+ query="Draft an MSA for Acme Corp",
64
+ tool="contract-drafting-agent",
65
+ hints={"jurisdiction": "US", "customer_tier": "enterprise"},
66
+ entities=["Acme Corp"],
67
+ )
68
+
69
+ # Prepend to your system prompt
70
+ system_prompt = ctx.formatted_injection + "\n\n" + YOUR_BASE_SYSTEM_PROMPT
71
+ response = openai_client.chat.completions.create(
72
+ model="gpt-4o",
73
+ messages=[{"role": "system", "content": system_prompt}, ...],
74
+ )
75
+
76
+ # ── Step 2: Report corrections so Lore learns ─────────────────────────────────
77
+
78
+ # When a human edits the AI output:
79
+ client.report_correction(
80
+ ai_output_id="draft_acme_msa_v1",
81
+ summary="Changed indemnity clause from UK to US_STANDARD template",
82
+ tool="contract-drafting-agent",
83
+ context_tags={"customer": "Acme Corp", "document_type": "MSA"},
84
+ actor_id="james@company.com",
85
+ )
86
+
87
+ # When a human approves the AI output without changes (positive signal):
88
+ client.report_output(
89
+ output_id="draft_acme_msa_v2",
90
+ tool="contract-drafting-agent",
91
+ summary="MSA draft approved — no changes needed",
92
+ actor_id="james@company.com",
93
+ )
94
+ ```
95
+
96
+ After a few corrections, Lore automatically proposes rules like:
97
+ > *"US clients require the US_STANDARD indemnity template"*
98
+
99
+ Confirm the rule once → it's injected into every future AI call automatically.
100
+
101
+ ---
102
+
103
+ ## Async usage
104
+
105
+ For async agent frameworks (LangChain, CrewAI, FastAPI-based agents):
106
+
107
+ ```python
108
+ from loremem import AsyncLoreClient
109
+
110
+ client = AsyncLoreClient(api_key="sk-lore-xxxx", workspace_id="ws_acme")
111
+
112
+ ctx = await client.get_context(
113
+ query="Route this support ticket",
114
+ tool="support-triage-agent",
115
+ )
116
+
117
+ await client.report_correction(
118
+ ai_output_id="ticket_001",
119
+ summary="Re-routed from Tier 1 to Enterprise team",
120
+ tool="support-triage-agent",
121
+ )
122
+ ```
123
+
124
+ ---
125
+
126
+ ## Never-throw guarantee
127
+
128
+ Every method in `LoreClient` and `AsyncLoreClient` is designed to **never raise exceptions**. If Lore is unavailable, misconfigured, or rate-limited:
129
+
130
+ - `get_context()` returns an empty `ContextResponse` (`.formatted_injection == ""`)
131
+ - `report_correction()` and `report_output()` return `ReportResult(accepted=False)`
132
+ - A `WARNING` is logged via Python's standard `logging` module
133
+
134
+ **Lore's unavailability will never cause your AI agent to break.**
135
+
136
+ ```python
137
+ import logging
138
+ logging.getLogger("loremem").setLevel(logging.WARNING) # optional: see SDK warnings
139
+ ```
140
+
141
+ ---
142
+
143
+ ## API reference
144
+
145
+ ### `LoreClient(api_key, workspace_id, base_url?)`
146
+
147
+ | Parameter | Type | Description |
148
+ |---|---|---|
149
+ | `api_key` | `str` | Lore API key (`sk-lore-...`) |
150
+ | `workspace_id` | `str` | Your workspace ID |
151
+ | `base_url` | `str` | Default: production Lore API. Set to `http://localhost:8000` for local dev |
152
+
153
+ ### `get_context(query, tool, hints?, entities?, max_rules?, max_tokens?)`
154
+
155
+ Returns a `ContextResponse`:
156
+
157
+ | Field | Type | Description |
158
+ |---|---|---|
159
+ | `formatted_injection` | `str` | Ready-to-use string — prepend to system prompt |
160
+ | `context_id` | `str` | Unique ID for this context response |
161
+ | `rules` | `list[dict]` | Active rules that matched |
162
+ | `entities` | `list[dict]` | Entity profiles that matched |
163
+ | `decisions` | `list[dict]` | Decision records that matched |
164
+ | `cached` | `bool` | True if served from 15-min cache |
165
+
166
+ ### `report_correction(ai_output_id, summary, tool, context_tags?, actor_id?)`
167
+
168
+ Call when a human edits or overrides an AI output.
169
+
170
+ ### `report_output(output_id, tool, summary?, context_tags?, actor_id?)`
171
+
172
+ Call when a human approves an AI output unchanged (positive signal).
173
+
174
+ ---
175
+
176
+ ## Getting an API key
177
+
178
+ ```bash
179
+ # Create a key (requires Clerk JWT from the dashboard, or bootstrap via Supabase directly)
180
+ curl -X POST https://lore-m0st.onrender.com/v1/auth/api-keys \
181
+ -H "Authorization: Bearer <clerk_jwt>" \
182
+ -H "Content-Type: application/json" \
183
+ -d '{"name": "Production SDK key"}'
184
+ ```
185
+
186
+ ---
187
+
188
+ ## Local development
189
+
190
+ ```python
191
+ client = LoreClient(
192
+ api_key="sk-lore-xxxx",
193
+ workspace_id="ws_test",
194
+ base_url="http://localhost:8000", # local FastAPI server
195
+ )
196
+ ```
197
+
198
+ ---
199
+
200
+ ## License
201
+
202
+ MIT
@@ -0,0 +1,175 @@
1
+ # loremem — Python SDK for Lore
2
+
3
+ > Inject your company's organizational memory into any AI agent in 3 lines of code.
4
+
5
+ **Lore** captures every human correction of an AI output, structures it into a company knowledge graph, and feeds it back to your AI agents so they stop making the same mistakes twice.
6
+
7
+ `loremem` is the Python SDK for accessing Lore's Context Injection API.
8
+
9
+ ---
10
+
11
+ ## Installation
12
+
13
+ ```bash
14
+ # From PyPI (when published)
15
+ pip install loremem
16
+
17
+ # From GitHub (MVP — no PyPI publish required)
18
+ pip install git+https://github.com/mr-shakib/lore#subdirectory=sdk/python
19
+ ```
20
+
21
+ ---
22
+
23
+ ## Quickstart (3 minutes)
24
+
25
+ ```python
26
+ from loremem import LoreClient
27
+
28
+ client = LoreClient(
29
+ api_key="sk-lore-xxxx", # from POST /v1/auth/api-keys
30
+ workspace_id="ws_yourworkspace", # your Lore workspace ID
31
+ )
32
+
33
+ # ── Step 1: Get context before your LLM call ──────────────────────────────────
34
+
35
+ ctx = client.get_context(
36
+ query="Draft an MSA for Acme Corp",
37
+ tool="contract-drafting-agent",
38
+ hints={"jurisdiction": "US", "customer_tier": "enterprise"},
39
+ entities=["Acme Corp"],
40
+ )
41
+
42
+ # Prepend to your system prompt
43
+ system_prompt = ctx.formatted_injection + "\n\n" + YOUR_BASE_SYSTEM_PROMPT
44
+ response = openai_client.chat.completions.create(
45
+ model="gpt-4o",
46
+ messages=[{"role": "system", "content": system_prompt}, ...],
47
+ )
48
+
49
+ # ── Step 2: Report corrections so Lore learns ─────────────────────────────────
50
+
51
+ # When a human edits the AI output:
52
+ client.report_correction(
53
+ ai_output_id="draft_acme_msa_v1",
54
+ summary="Changed indemnity clause from UK to US_STANDARD template",
55
+ tool="contract-drafting-agent",
56
+ context_tags={"customer": "Acme Corp", "document_type": "MSA"},
57
+ actor_id="james@company.com",
58
+ )
59
+
60
+ # When a human approves the AI output without changes (positive signal):
61
+ client.report_output(
62
+ output_id="draft_acme_msa_v2",
63
+ tool="contract-drafting-agent",
64
+ summary="MSA draft approved — no changes needed",
65
+ actor_id="james@company.com",
66
+ )
67
+ ```
68
+
69
+ After a few corrections, Lore automatically proposes rules like:
70
+ > *"US clients require the US_STANDARD indemnity template"*
71
+
72
+ Confirm the rule once → it's injected into every future AI call automatically.
73
+
74
+ ---
75
+
76
+ ## Async usage
77
+
78
+ For async agent frameworks (LangChain, CrewAI, FastAPI-based agents):
79
+
80
+ ```python
81
+ from loremem import AsyncLoreClient
82
+
83
+ client = AsyncLoreClient(api_key="sk-lore-xxxx", workspace_id="ws_acme")
84
+
85
+ ctx = await client.get_context(
86
+ query="Route this support ticket",
87
+ tool="support-triage-agent",
88
+ )
89
+
90
+ await client.report_correction(
91
+ ai_output_id="ticket_001",
92
+ summary="Re-routed from Tier 1 to Enterprise team",
93
+ tool="support-triage-agent",
94
+ )
95
+ ```
96
+
97
+ ---
98
+
99
+ ## Never-throw guarantee
100
+
101
+ Every method in `LoreClient` and `AsyncLoreClient` is designed to **never raise exceptions**. If Lore is unavailable, misconfigured, or rate-limited:
102
+
103
+ - `get_context()` returns an empty `ContextResponse` (`.formatted_injection == ""`)
104
+ - `report_correction()` and `report_output()` return `ReportResult(accepted=False)`
105
+ - A `WARNING` is logged via Python's standard `logging` module
106
+
107
+ **Lore's unavailability will never cause your AI agent to break.**
108
+
109
+ ```python
110
+ import logging
111
+ logging.getLogger("loremem").setLevel(logging.WARNING) # optional: see SDK warnings
112
+ ```
113
+
114
+ ---
115
+
116
+ ## API reference
117
+
118
+ ### `LoreClient(api_key, workspace_id, base_url?)`
119
+
120
+ | Parameter | Type | Description |
121
+ |---|---|---|
122
+ | `api_key` | `str` | Lore API key (`sk-lore-...`) |
123
+ | `workspace_id` | `str` | Your workspace ID |
124
+ | `base_url` | `str` | Default: production Lore API. Set to `http://localhost:8000` for local dev |
125
+
126
+ ### `get_context(query, tool, hints?, entities?, max_rules?, max_tokens?)`
127
+
128
+ Returns a `ContextResponse`:
129
+
130
+ | Field | Type | Description |
131
+ |---|---|---|
132
+ | `formatted_injection` | `str` | Ready-to-use string — prepend to system prompt |
133
+ | `context_id` | `str` | Unique ID for this context response |
134
+ | `rules` | `list[dict]` | Active rules that matched |
135
+ | `entities` | `list[dict]` | Entity profiles that matched |
136
+ | `decisions` | `list[dict]` | Decision records that matched |
137
+ | `cached` | `bool` | True if served from 15-min cache |
138
+
139
+ ### `report_correction(ai_output_id, summary, tool, context_tags?, actor_id?)`
140
+
141
+ Call when a human edits or overrides an AI output.
142
+
143
+ ### `report_output(output_id, tool, summary?, context_tags?, actor_id?)`
144
+
145
+ Call when a human approves an AI output unchanged (positive signal).
146
+
147
+ ---
148
+
149
+ ## Getting an API key
150
+
151
+ ```bash
152
+ # Create a key (requires Clerk JWT from the dashboard, or bootstrap via Supabase directly)
153
+ curl -X POST https://lore-m0st.onrender.com/v1/auth/api-keys \
154
+ -H "Authorization: Bearer <clerk_jwt>" \
155
+ -H "Content-Type: application/json" \
156
+ -d '{"name": "Production SDK key"}'
157
+ ```
158
+
159
+ ---
160
+
161
+ ## Local development
162
+
163
+ ```python
164
+ client = LoreClient(
165
+ api_key="sk-lore-xxxx",
166
+ workspace_id="ws_test",
167
+ base_url="http://localhost:8000", # local FastAPI server
168
+ )
169
+ ```
170
+
171
+ ---
172
+
173
+ ## License
174
+
175
+ MIT
@@ -0,0 +1,33 @@
1
+ """
2
+ loremem — The Python SDK for Lore Organizational Memory.
3
+
4
+ Quick start:
5
+ from loremem import LoreClient
6
+
7
+ client = LoreClient(
8
+ api_key="sk-lore-xxxx",
9
+ workspace_id="ws_yourworkspace",
10
+ )
11
+
12
+ # Before your LLM call
13
+ ctx = client.get_context(query="Draft MSA for Acme Corp", tool="contract-agent")
14
+ system_prompt = ctx.formatted_injection + base_system_prompt
15
+
16
+ # After a human corrects the AI output
17
+ client.report_correction(
18
+ ai_output_id="output_001",
19
+ summary="Changed jurisdiction from UK to US",
20
+ tool="contract-agent",
21
+ )
22
+ """
23
+
24
+ from loremem.client import AsyncLoreClient, LoreClient
25
+ from loremem.models import ContextResponse, ReportResult
26
+
27
+ __version__ = "0.1.0"
28
+ __all__ = [
29
+ "LoreClient",
30
+ "AsyncLoreClient",
31
+ "ContextResponse",
32
+ "ReportResult",
33
+ ]
@@ -0,0 +1,227 @@
1
+ """
2
+ loremem HTTP transport layer.
3
+
4
+ Handles:
5
+ - Bearer auth header injection
6
+ - Retry with exponential backoff (3 attempts)
7
+ - Timeout enforcement (5s context, 10s writes)
8
+ - Error classification into loremem exceptions
9
+ - Async variant (AsyncTransport) for async callers
10
+
11
+ httpx is used for both sync and async. It is the only required dependency.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import logging
17
+ import time
18
+ from typing import Any
19
+
20
+ import httpx
21
+
22
+ from loremem.exceptions import (
23
+ AuthError,
24
+ LoreMemError,
25
+ NetworkError,
26
+ RateLimitError,
27
+ ServerError,
28
+ )
29
+
30
+ logger = logging.getLogger("loremem")
31
+
32
+ _MAX_RETRIES = 3
33
+ _BACKOFF_BASE = 0.5 # seconds — 0.5, 1.0, 2.0
34
+ _CONTEXT_TIMEOUT = 5.0 # seconds — context must be fast
35
+ _WRITE_TIMEOUT = 10.0 # seconds — reporting calls are less time-sensitive
36
+
37
+
38
+ def _classify(response: httpx.Response) -> None:
39
+ """Raise the appropriate exception for a non-2xx response."""
40
+ status = response.status_code
41
+ try:
42
+ detail = response.json().get("detail", response.text)
43
+ except Exception:
44
+ detail = response.text or str(status)
45
+
46
+ if status == 401:
47
+ raise AuthError(f"Authentication failed: {detail}")
48
+ if status == 403:
49
+ raise AuthError(f"Forbidden — workspace mismatch: {detail}")
50
+ if status == 429:
51
+ raise RateLimitError(f"Rate limit exceeded: {detail}")
52
+ if status >= 500:
53
+ raise ServerError(f"Lore API error {status}: {detail}")
54
+ raise LoreMemError(f"Unexpected response {status}: {detail}")
55
+
56
+
57
+ def _should_retry(exc: Exception) -> bool:
58
+ """Only retry on network errors and 5xx; not on 4xx."""
59
+ return isinstance(exc, (NetworkError, ServerError, httpx.TransportError))
60
+
61
+
62
+ # ── Sync transport ────────────────────────────────────────────────────────────
63
+
64
+
65
+ class Transport:
66
+ """Synchronous HTTP transport with retry + backoff."""
67
+
68
+ def __init__(self, base_url: str, api_key: str) -> None:
69
+ self._base_url = base_url.rstrip("/")
70
+ self._headers = {
71
+ "Authorization": f"Bearer {api_key}",
72
+ "Content-Type": "application/json",
73
+ "User-Agent": "loremem-python/0.1.0",
74
+ }
75
+
76
+ def post(
77
+ self,
78
+ path: str,
79
+ payload: dict[str, Any],
80
+ timeout: float = _WRITE_TIMEOUT,
81
+ ) -> dict[str, Any]:
82
+ """POST with retry. Returns parsed JSON body."""
83
+ url = f"{self._base_url}{path}"
84
+ last_exc: Exception = LoreMemError("Unknown error")
85
+
86
+ for attempt in range(_MAX_RETRIES):
87
+ try:
88
+ response = httpx.post(
89
+ url,
90
+ json=payload,
91
+ headers=self._headers,
92
+ timeout=timeout,
93
+ )
94
+ if response.is_success:
95
+ return response.json()
96
+ _classify(response)
97
+
98
+ except (AuthError, RateLimitError, LoreMemError):
99
+ raise # non-retryable
100
+ except httpx.TimeoutException as exc:
101
+ last_exc = NetworkError(f"Request timed out: {exc}")
102
+ except httpx.TransportError as exc:
103
+ last_exc = NetworkError(f"Network error: {exc}")
104
+ except ServerError as exc:
105
+ last_exc = exc
106
+
107
+ if attempt < _MAX_RETRIES - 1:
108
+ time.sleep(_BACKOFF_BASE * (2 ** attempt))
109
+
110
+ raise last_exc
111
+
112
+ def get(
113
+ self,
114
+ path: str,
115
+ params: dict[str, Any] | None = None,
116
+ timeout: float = _WRITE_TIMEOUT,
117
+ ) -> dict[str, Any]:
118
+ """GET with retry. Returns parsed JSON body."""
119
+ url = f"{self._base_url}{path}"
120
+ last_exc: Exception = LoreMemError("Unknown error")
121
+
122
+ for attempt in range(_MAX_RETRIES):
123
+ try:
124
+ response = httpx.get(
125
+ url,
126
+ params=params,
127
+ headers=self._headers,
128
+ timeout=timeout,
129
+ )
130
+ if response.is_success:
131
+ return response.json()
132
+ _classify(response)
133
+
134
+ except (AuthError, RateLimitError, LoreMemError):
135
+ raise
136
+ except httpx.TimeoutException as exc:
137
+ last_exc = NetworkError(f"Request timed out: {exc}")
138
+ except httpx.TransportError as exc:
139
+ last_exc = NetworkError(f"Network error: {exc}")
140
+ except ServerError as exc:
141
+ last_exc = exc
142
+
143
+ if attempt < _MAX_RETRIES - 1:
144
+ time.sleep(_BACKOFF_BASE * (2 ** attempt))
145
+
146
+ raise last_exc
147
+
148
+
149
+ # ── Async transport ───────────────────────────────────────────────────────────
150
+
151
+
152
+ class AsyncTransport:
153
+ """Async HTTP transport with retry + backoff. Use with async AI agent frameworks."""
154
+
155
+ def __init__(self, base_url: str, api_key: str) -> None:
156
+ self._base_url = base_url.rstrip("/")
157
+ self._headers = {
158
+ "Authorization": f"Bearer {api_key}",
159
+ "Content-Type": "application/json",
160
+ "User-Agent": "loremem-python/0.1.0",
161
+ }
162
+
163
+ async def post(
164
+ self,
165
+ path: str,
166
+ payload: dict[str, Any],
167
+ timeout: float = _WRITE_TIMEOUT,
168
+ ) -> dict[str, Any]:
169
+ import asyncio
170
+
171
+ url = f"{self._base_url}{path}"
172
+ last_exc: Exception = LoreMemError("Unknown error")
173
+
174
+ async with httpx.AsyncClient(headers=self._headers, timeout=timeout) as client:
175
+ for attempt in range(_MAX_RETRIES):
176
+ try:
177
+ response = await client.post(url, json=payload)
178
+ if response.is_success:
179
+ return response.json()
180
+ _classify(response)
181
+
182
+ except (AuthError, RateLimitError, LoreMemError):
183
+ raise
184
+ except httpx.TimeoutException as exc:
185
+ last_exc = NetworkError(f"Request timed out: {exc}")
186
+ except httpx.TransportError as exc:
187
+ last_exc = NetworkError(f"Network error: {exc}")
188
+ except ServerError as exc:
189
+ last_exc = exc
190
+
191
+ if attempt < _MAX_RETRIES - 1:
192
+ await asyncio.sleep(_BACKOFF_BASE * (2 ** attempt))
193
+
194
+ raise last_exc
195
+
196
+ async def get(
197
+ self,
198
+ path: str,
199
+ params: dict[str, Any] | None = None,
200
+ timeout: float = _WRITE_TIMEOUT,
201
+ ) -> dict[str, Any]:
202
+ import asyncio
203
+
204
+ url = f"{self._base_url}{path}"
205
+ last_exc: Exception = LoreMemError("Unknown error")
206
+
207
+ async with httpx.AsyncClient(headers=self._headers, timeout=timeout) as client:
208
+ for attempt in range(_MAX_RETRIES):
209
+ try:
210
+ response = await client.get(url, params=params)
211
+ if response.is_success:
212
+ return response.json()
213
+ _classify(response)
214
+
215
+ except (AuthError, RateLimitError, LoreMemError):
216
+ raise
217
+ except httpx.TimeoutException as exc:
218
+ last_exc = NetworkError(f"Request timed out: {exc}")
219
+ except httpx.TransportError as exc:
220
+ last_exc = NetworkError(f"Network error: {exc}")
221
+ except ServerError as exc:
222
+ last_exc = exc
223
+
224
+ if attempt < _MAX_RETRIES - 1:
225
+ await asyncio.sleep(_BACKOFF_BASE * (2 ** attempt))
226
+
227
+ raise last_exc