affinity-sdk 0.9.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- affinity/__init__.py +139 -0
- affinity/cli/__init__.py +7 -0
- affinity/cli/click_compat.py +27 -0
- affinity/cli/commands/__init__.py +1 -0
- affinity/cli/commands/_entity_files_dump.py +219 -0
- affinity/cli/commands/_list_entry_fields.py +41 -0
- affinity/cli/commands/_v1_parsing.py +77 -0
- affinity/cli/commands/company_cmds.py +2139 -0
- affinity/cli/commands/completion_cmd.py +33 -0
- affinity/cli/commands/config_cmds.py +540 -0
- affinity/cli/commands/entry_cmds.py +33 -0
- affinity/cli/commands/field_cmds.py +413 -0
- affinity/cli/commands/interaction_cmds.py +875 -0
- affinity/cli/commands/list_cmds.py +3152 -0
- affinity/cli/commands/note_cmds.py +433 -0
- affinity/cli/commands/opportunity_cmds.py +1174 -0
- affinity/cli/commands/person_cmds.py +1980 -0
- affinity/cli/commands/query_cmd.py +444 -0
- affinity/cli/commands/relationship_strength_cmds.py +62 -0
- affinity/cli/commands/reminder_cmds.py +595 -0
- affinity/cli/commands/resolve_url_cmd.py +127 -0
- affinity/cli/commands/session_cmds.py +84 -0
- affinity/cli/commands/task_cmds.py +110 -0
- affinity/cli/commands/version_cmd.py +29 -0
- affinity/cli/commands/whoami_cmd.py +36 -0
- affinity/cli/config.py +108 -0
- affinity/cli/context.py +749 -0
- affinity/cli/csv_utils.py +195 -0
- affinity/cli/date_utils.py +42 -0
- affinity/cli/decorators.py +77 -0
- affinity/cli/errors.py +28 -0
- affinity/cli/field_utils.py +355 -0
- affinity/cli/formatters.py +551 -0
- affinity/cli/help_json.py +283 -0
- affinity/cli/logging.py +100 -0
- affinity/cli/main.py +261 -0
- affinity/cli/options.py +53 -0
- affinity/cli/paths.py +32 -0
- affinity/cli/progress.py +183 -0
- affinity/cli/query/__init__.py +163 -0
- affinity/cli/query/aggregates.py +357 -0
- affinity/cli/query/dates.py +194 -0
- affinity/cli/query/exceptions.py +147 -0
- affinity/cli/query/executor.py +1236 -0
- affinity/cli/query/filters.py +248 -0
- affinity/cli/query/models.py +333 -0
- affinity/cli/query/output.py +331 -0
- affinity/cli/query/parser.py +619 -0
- affinity/cli/query/planner.py +430 -0
- affinity/cli/query/progress.py +270 -0
- affinity/cli/query/schema.py +439 -0
- affinity/cli/render.py +1589 -0
- affinity/cli/resolve.py +222 -0
- affinity/cli/resolvers.py +249 -0
- affinity/cli/results.py +308 -0
- affinity/cli/runner.py +218 -0
- affinity/cli/serialization.py +65 -0
- affinity/cli/session_cache.py +276 -0
- affinity/cli/types.py +70 -0
- affinity/client.py +771 -0
- affinity/clients/__init__.py +19 -0
- affinity/clients/http.py +3664 -0
- affinity/clients/pipeline.py +165 -0
- affinity/compare.py +501 -0
- affinity/downloads.py +114 -0
- affinity/exceptions.py +615 -0
- affinity/filters.py +1128 -0
- affinity/hooks.py +198 -0
- affinity/inbound_webhooks.py +302 -0
- affinity/models/__init__.py +163 -0
- affinity/models/entities.py +798 -0
- affinity/models/pagination.py +513 -0
- affinity/models/rate_limit_snapshot.py +48 -0
- affinity/models/secondary.py +413 -0
- affinity/models/types.py +663 -0
- affinity/policies.py +40 -0
- affinity/progress.py +22 -0
- affinity/py.typed +0 -0
- affinity/services/__init__.py +42 -0
- affinity/services/companies.py +1286 -0
- affinity/services/lists.py +1892 -0
- affinity/services/opportunities.py +1330 -0
- affinity/services/persons.py +1348 -0
- affinity/services/rate_limits.py +173 -0
- affinity/services/tasks.py +193 -0
- affinity/services/v1_only.py +2445 -0
- affinity/types.py +83 -0
- affinity_sdk-0.9.5.dist-info/METADATA +622 -0
- affinity_sdk-0.9.5.dist-info/RECORD +92 -0
- affinity_sdk-0.9.5.dist-info/WHEEL +4 -0
- affinity_sdk-0.9.5.dist-info/entry_points.txt +2 -0
- affinity_sdk-0.9.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,513 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pagination, response wrappers, and utility models.
|
|
3
|
+
|
|
4
|
+
Provides type-safe access to paginated API responses.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from collections.abc import AsyncIterator, Awaitable, Callable, Iterator
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from typing import Generic, TypeVar
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
|
|
14
|
+
|
|
15
|
+
from ..exceptions import TooManyResultsError
|
|
16
|
+
|
|
17
|
+
T = TypeVar("T")
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"FilterStats",
|
|
21
|
+
"PaginationProgress",
|
|
22
|
+
"PaginatedResponse",
|
|
23
|
+
"PageIterator",
|
|
24
|
+
"AsyncPageIterator",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
# Default limit for .all() method to prevent OOM
|
|
28
|
+
_DEFAULT_LIMIT = 100_000
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class PaginationProgress:
|
|
33
|
+
"""Progress information for pagination callbacks."""
|
|
34
|
+
|
|
35
|
+
page_number: int
|
|
36
|
+
"""1-indexed page number."""
|
|
37
|
+
|
|
38
|
+
items_in_page: int
|
|
39
|
+
"""Items in current page."""
|
|
40
|
+
|
|
41
|
+
items_so_far: int
|
|
42
|
+
"""Cumulative items *including* just-yielded page."""
|
|
43
|
+
|
|
44
|
+
has_next: bool
|
|
45
|
+
"""Whether more pages exist (matches Page.has_next)."""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class AffinityModel(BaseModel):
|
|
49
|
+
"""Base model with common configuration."""
|
|
50
|
+
|
|
51
|
+
model_config = ConfigDict(
|
|
52
|
+
extra="ignore",
|
|
53
|
+
populate_by_name=True,
|
|
54
|
+
use_enum_values=True,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# =============================================================================
|
|
59
|
+
# Pagination Models
|
|
60
|
+
# =============================================================================
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class PaginationInfo(AffinityModel):
|
|
64
|
+
"""V2 pagination info returned in responses."""
|
|
65
|
+
|
|
66
|
+
next_cursor: str | None = Field(None, alias="nextUrl")
|
|
67
|
+
prev_cursor: str | None = Field(None, alias="prevUrl")
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class PaginationInfoWithTotal(PaginationInfo):
|
|
71
|
+
"""Pagination with total count (used by some endpoints)."""
|
|
72
|
+
|
|
73
|
+
total_count: int = Field(0, alias="totalCount")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
# =============================================================================
|
|
77
|
+
# Generic Paginated Response
|
|
78
|
+
# =============================================================================
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass
|
|
82
|
+
class FilterStats:
|
|
83
|
+
"""Stats for client-side filtered pagination."""
|
|
84
|
+
|
|
85
|
+
scanned: int = 0 # Total physical rows scanned so far
|
|
86
|
+
matched: int = 0 # Total rows matching filter so far
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class PaginatedResponse(AffinityModel, Generic[T]):
|
|
90
|
+
"""
|
|
91
|
+
A paginated response from the API.
|
|
92
|
+
|
|
93
|
+
Provides access to the current page of results and pagination info.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
data: list[T] = Field(default_factory=list)
|
|
97
|
+
pagination: PaginationInfo = Field(default_factory=PaginationInfo)
|
|
98
|
+
_has_next_override: bool | None = PrivateAttr(default=None)
|
|
99
|
+
_filter_stats: FilterStats | None = PrivateAttr(default=None)
|
|
100
|
+
|
|
101
|
+
def __len__(self) -> int:
|
|
102
|
+
"""Number of items in current page."""
|
|
103
|
+
return len(self.data)
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def has_next(self) -> bool:
|
|
107
|
+
"""Whether there are more pages."""
|
|
108
|
+
if self._has_next_override is not None:
|
|
109
|
+
return self._has_next_override
|
|
110
|
+
return self.pagination.next_cursor is not None
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def next_cursor(self) -> str | None:
|
|
114
|
+
"""Cursor for the next page, if any."""
|
|
115
|
+
return self.pagination.next_cursor
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def filter_stats(self) -> FilterStats | None:
|
|
119
|
+
"""Stats for client-side filtered queries (scanned/matched counts)."""
|
|
120
|
+
return self._filter_stats
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
# =============================================================================
|
|
124
|
+
# Auto-paginating Iterator
|
|
125
|
+
# =============================================================================
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
class PageIterator(Generic[T]):
|
|
129
|
+
"""
|
|
130
|
+
Synchronous iterator that automatically fetches all pages.
|
|
131
|
+
|
|
132
|
+
Usage:
|
|
133
|
+
for item in client.companies.all():
|
|
134
|
+
print(item.name)
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
def __init__(
|
|
138
|
+
self,
|
|
139
|
+
fetch_page: Callable[[str | None], PaginatedResponse[T]],
|
|
140
|
+
initial_cursor: str | None = None,
|
|
141
|
+
):
|
|
142
|
+
self._fetch_page = fetch_page
|
|
143
|
+
self._next_cursor = initial_cursor
|
|
144
|
+
self._current_page: list[T] = []
|
|
145
|
+
self._index = 0
|
|
146
|
+
self._exhausted = False
|
|
147
|
+
|
|
148
|
+
def __iter__(self) -> Iterator[T]:
|
|
149
|
+
return self
|
|
150
|
+
|
|
151
|
+
def __next__(self) -> T:
|
|
152
|
+
while True:
|
|
153
|
+
# If we have items in current page, return next
|
|
154
|
+
if self._index < len(self._current_page):
|
|
155
|
+
item = self._current_page[self._index]
|
|
156
|
+
self._index += 1
|
|
157
|
+
return item
|
|
158
|
+
|
|
159
|
+
# Need to fetch next page
|
|
160
|
+
if self._exhausted:
|
|
161
|
+
raise StopIteration
|
|
162
|
+
|
|
163
|
+
requested_url = self._next_cursor
|
|
164
|
+
response = self._fetch_page(requested_url)
|
|
165
|
+
self._current_page = list(response.data)
|
|
166
|
+
self._next_cursor = response.next_cursor
|
|
167
|
+
self._index = 0
|
|
168
|
+
|
|
169
|
+
# Guard against pagination loops (no cursor progress).
|
|
170
|
+
if response.has_next and response.next_cursor == requested_url:
|
|
171
|
+
self._exhausted = True
|
|
172
|
+
|
|
173
|
+
# Empty pages can still legitimately include nextUrl; keep paging
|
|
174
|
+
# until we get data or the cursor is exhausted.
|
|
175
|
+
if not self._current_page:
|
|
176
|
+
if response.has_next and not self._exhausted:
|
|
177
|
+
continue
|
|
178
|
+
self._exhausted = True
|
|
179
|
+
raise StopIteration
|
|
180
|
+
|
|
181
|
+
if not response.has_next:
|
|
182
|
+
self._exhausted = True
|
|
183
|
+
|
|
184
|
+
def pages(
|
|
185
|
+
self,
|
|
186
|
+
*,
|
|
187
|
+
on_progress: Callable[[PaginationProgress], None] | None = None,
|
|
188
|
+
) -> Iterator[PaginatedResponse[T]]:
|
|
189
|
+
"""
|
|
190
|
+
Iterate through pages (not individual items).
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
on_progress: Optional callback fired after fetching each page.
|
|
194
|
+
Receives PaginationProgress with page_number, items_in_page,
|
|
195
|
+
items_so_far, and has_next. Callbacks should be lightweight;
|
|
196
|
+
heavy processing should happen outside the callback to avoid
|
|
197
|
+
blocking iteration.
|
|
198
|
+
|
|
199
|
+
Yields:
|
|
200
|
+
PaginatedResponse objects for each page.
|
|
201
|
+
|
|
202
|
+
Example:
|
|
203
|
+
def report(p: PaginationProgress):
|
|
204
|
+
print(f"Page {p.page_number}: {p.items_so_far} items so far")
|
|
205
|
+
|
|
206
|
+
for page in client.persons.all().pages(on_progress=report):
|
|
207
|
+
process(page.data)
|
|
208
|
+
"""
|
|
209
|
+
page_number = 0
|
|
210
|
+
items_so_far = 0
|
|
211
|
+
|
|
212
|
+
while True:
|
|
213
|
+
requested_url = self._next_cursor
|
|
214
|
+
response = self._fetch_page(requested_url)
|
|
215
|
+
self._next_cursor = response.next_cursor
|
|
216
|
+
page_number += 1
|
|
217
|
+
items_in_page = len(response.data)
|
|
218
|
+
items_so_far += items_in_page
|
|
219
|
+
|
|
220
|
+
# Guard against pagination loops
|
|
221
|
+
if response.has_next and response.next_cursor == requested_url:
|
|
222
|
+
if response.data:
|
|
223
|
+
if on_progress:
|
|
224
|
+
on_progress(
|
|
225
|
+
PaginationProgress(
|
|
226
|
+
page_number=page_number,
|
|
227
|
+
items_in_page=items_in_page,
|
|
228
|
+
items_so_far=items_so_far,
|
|
229
|
+
has_next=False, # Loop detected, no more pages
|
|
230
|
+
)
|
|
231
|
+
)
|
|
232
|
+
yield response
|
|
233
|
+
break
|
|
234
|
+
|
|
235
|
+
if response.data:
|
|
236
|
+
if on_progress:
|
|
237
|
+
on_progress(
|
|
238
|
+
PaginationProgress(
|
|
239
|
+
page_number=page_number,
|
|
240
|
+
items_in_page=items_in_page,
|
|
241
|
+
items_so_far=items_so_far,
|
|
242
|
+
has_next=response.has_next,
|
|
243
|
+
)
|
|
244
|
+
)
|
|
245
|
+
yield response
|
|
246
|
+
|
|
247
|
+
if not response.has_next:
|
|
248
|
+
break
|
|
249
|
+
|
|
250
|
+
def all(self, *, limit: int | None = _DEFAULT_LIMIT) -> list[T]:
|
|
251
|
+
"""
|
|
252
|
+
Fetch all items across all pages into a list.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
limit: Maximum items to fetch. Default 100,000. Set to None for unlimited.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
List of all items.
|
|
259
|
+
|
|
260
|
+
Raises:
|
|
261
|
+
TooManyResultsError: If results exceed limit.
|
|
262
|
+
|
|
263
|
+
Note:
|
|
264
|
+
The check occurs after extending results, so the final list may exceed
|
|
265
|
+
limit by up to one page before the error is raised.
|
|
266
|
+
|
|
267
|
+
Example:
|
|
268
|
+
# Default - safe for most use cases
|
|
269
|
+
persons = list(client.persons.all()) # Using iterator
|
|
270
|
+
|
|
271
|
+
# Or use .all() method with limit check
|
|
272
|
+
it = PageIterator(fetch_page)
|
|
273
|
+
persons = it.all() # Returns list, raises if > 100k
|
|
274
|
+
|
|
275
|
+
# Explicit unlimited for large exports
|
|
276
|
+
all_persons = it.all(limit=None)
|
|
277
|
+
|
|
278
|
+
# Custom limit
|
|
279
|
+
persons = it.all(limit=500_000)
|
|
280
|
+
"""
|
|
281
|
+
results: list[T] = []
|
|
282
|
+
|
|
283
|
+
for page in self.pages():
|
|
284
|
+
results.extend(page.data)
|
|
285
|
+
|
|
286
|
+
if limit is not None and len(results) > limit:
|
|
287
|
+
raise TooManyResultsError(
|
|
288
|
+
f"Exceeded limit={limit:,} items. "
|
|
289
|
+
f"Use pages() for streaming, add a filter, or pass limit=None."
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
return results
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
class AsyncPageIterator(Generic[T]):
|
|
296
|
+
"""
|
|
297
|
+
Asynchronous iterator that automatically fetches all pages.
|
|
298
|
+
|
|
299
|
+
Usage:
|
|
300
|
+
async for item in client.companies.all():
|
|
301
|
+
print(item.name)
|
|
302
|
+
"""
|
|
303
|
+
|
|
304
|
+
def __init__(
|
|
305
|
+
self,
|
|
306
|
+
fetch_page: Callable[[str | None], Awaitable[PaginatedResponse[T]]],
|
|
307
|
+
initial_cursor: str | None = None,
|
|
308
|
+
):
|
|
309
|
+
self._fetch_page = fetch_page
|
|
310
|
+
self._next_cursor = initial_cursor
|
|
311
|
+
self._current_page: list[T] = []
|
|
312
|
+
self._index = 0
|
|
313
|
+
self._exhausted = False
|
|
314
|
+
|
|
315
|
+
def __aiter__(self) -> AsyncIterator[T]:
|
|
316
|
+
return self
|
|
317
|
+
|
|
318
|
+
async def __anext__(self) -> T:
|
|
319
|
+
while True:
|
|
320
|
+
# If we have items in current page, return next
|
|
321
|
+
if self._index < len(self._current_page):
|
|
322
|
+
item = self._current_page[self._index]
|
|
323
|
+
self._index += 1
|
|
324
|
+
return item
|
|
325
|
+
|
|
326
|
+
# Need to fetch next page
|
|
327
|
+
if self._exhausted:
|
|
328
|
+
raise StopAsyncIteration
|
|
329
|
+
|
|
330
|
+
requested_url = self._next_cursor
|
|
331
|
+
response = await self._fetch_page(requested_url)
|
|
332
|
+
self._current_page = list(response.data)
|
|
333
|
+
self._next_cursor = response.next_cursor
|
|
334
|
+
self._index = 0
|
|
335
|
+
|
|
336
|
+
# Guard against pagination loops (no cursor progress).
|
|
337
|
+
if response.has_next and response.next_cursor == requested_url:
|
|
338
|
+
self._exhausted = True
|
|
339
|
+
|
|
340
|
+
# Empty pages can still legitimately include nextUrl; keep paging
|
|
341
|
+
# until we get data or the cursor is exhausted.
|
|
342
|
+
if not self._current_page:
|
|
343
|
+
if response.has_next and not self._exhausted:
|
|
344
|
+
continue
|
|
345
|
+
self._exhausted = True
|
|
346
|
+
raise StopAsyncIteration
|
|
347
|
+
|
|
348
|
+
if not response.has_next:
|
|
349
|
+
self._exhausted = True
|
|
350
|
+
|
|
351
|
+
async def pages(
|
|
352
|
+
self,
|
|
353
|
+
*,
|
|
354
|
+
on_progress: Callable[[PaginationProgress], None] | None = None,
|
|
355
|
+
) -> AsyncIterator[PaginatedResponse[T]]:
|
|
356
|
+
"""
|
|
357
|
+
Iterate through pages (not individual items).
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
on_progress: Optional callback fired after fetching each page.
|
|
361
|
+
Receives PaginationProgress with page_number, items_in_page,
|
|
362
|
+
items_so_far, and has_next. Callbacks should be lightweight;
|
|
363
|
+
heavy processing should happen outside the callback to avoid
|
|
364
|
+
blocking iteration.
|
|
365
|
+
|
|
366
|
+
Yields:
|
|
367
|
+
PaginatedResponse objects for each page.
|
|
368
|
+
|
|
369
|
+
Example:
|
|
370
|
+
def report(p: PaginationProgress):
|
|
371
|
+
print(f"Page {p.page_number}: {p.items_so_far} items so far")
|
|
372
|
+
|
|
373
|
+
async for page in client.persons.all().pages(on_progress=report):
|
|
374
|
+
process(page.data)
|
|
375
|
+
"""
|
|
376
|
+
page_number = 0
|
|
377
|
+
items_so_far = 0
|
|
378
|
+
|
|
379
|
+
while True:
|
|
380
|
+
requested_url = self._next_cursor
|
|
381
|
+
response = await self._fetch_page(requested_url)
|
|
382
|
+
self._next_cursor = response.next_cursor
|
|
383
|
+
page_number += 1
|
|
384
|
+
items_in_page = len(response.data)
|
|
385
|
+
items_so_far += items_in_page
|
|
386
|
+
|
|
387
|
+
# Guard against pagination loops
|
|
388
|
+
if response.has_next and response.next_cursor == requested_url:
|
|
389
|
+
if response.data:
|
|
390
|
+
if on_progress:
|
|
391
|
+
on_progress(
|
|
392
|
+
PaginationProgress(
|
|
393
|
+
page_number=page_number,
|
|
394
|
+
items_in_page=items_in_page,
|
|
395
|
+
items_so_far=items_so_far,
|
|
396
|
+
has_next=False, # Loop detected, no more pages
|
|
397
|
+
)
|
|
398
|
+
)
|
|
399
|
+
yield response
|
|
400
|
+
break
|
|
401
|
+
|
|
402
|
+
if response.data:
|
|
403
|
+
if on_progress:
|
|
404
|
+
on_progress(
|
|
405
|
+
PaginationProgress(
|
|
406
|
+
page_number=page_number,
|
|
407
|
+
items_in_page=items_in_page,
|
|
408
|
+
items_so_far=items_so_far,
|
|
409
|
+
has_next=response.has_next,
|
|
410
|
+
)
|
|
411
|
+
)
|
|
412
|
+
yield response
|
|
413
|
+
|
|
414
|
+
if not response.has_next:
|
|
415
|
+
break
|
|
416
|
+
|
|
417
|
+
async def all(self, *, limit: int | None = _DEFAULT_LIMIT) -> list[T]:
|
|
418
|
+
"""
|
|
419
|
+
Fetch all items across all pages into a list.
|
|
420
|
+
|
|
421
|
+
Args:
|
|
422
|
+
limit: Maximum items to fetch. Default 100,000. Set to None for unlimited.
|
|
423
|
+
|
|
424
|
+
Returns:
|
|
425
|
+
List of all items.
|
|
426
|
+
|
|
427
|
+
Raises:
|
|
428
|
+
TooManyResultsError: If results exceed limit.
|
|
429
|
+
|
|
430
|
+
Note:
|
|
431
|
+
The check occurs after extending results, so the final list may exceed
|
|
432
|
+
limit by up to one page before the error is raised.
|
|
433
|
+
|
|
434
|
+
Example:
|
|
435
|
+
# Default - safe for most use cases
|
|
436
|
+
persons = [p async for p in client.persons.all()] # Using async iterator
|
|
437
|
+
|
|
438
|
+
# Or use .all() method with limit check
|
|
439
|
+
it = AsyncPageIterator(fetch_page)
|
|
440
|
+
persons = await it.all() # Returns list, raises if > 100k
|
|
441
|
+
|
|
442
|
+
# Explicit unlimited for large exports
|
|
443
|
+
all_persons = await it.all(limit=None)
|
|
444
|
+
|
|
445
|
+
# Custom limit
|
|
446
|
+
persons = await it.all(limit=500_000)
|
|
447
|
+
"""
|
|
448
|
+
results: list[T] = []
|
|
449
|
+
|
|
450
|
+
async for page in self.pages():
|
|
451
|
+
results.extend(page.data)
|
|
452
|
+
|
|
453
|
+
if limit is not None and len(results) > limit:
|
|
454
|
+
raise TooManyResultsError(
|
|
455
|
+
f"Exceeded limit={limit:,} items. "
|
|
456
|
+
f"Use pages() for streaming, add a filter, or pass limit=None."
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
return results
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
# =============================================================================
|
|
463
|
+
# V1 Pagination Response (uses page_token)
|
|
464
|
+
# =============================================================================
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
class V1PaginatedResponse(AffinityModel, Generic[T]):
|
|
468
|
+
"""V1 API pagination format using page_token."""
|
|
469
|
+
|
|
470
|
+
data: list[T] = Field(default_factory=list)
|
|
471
|
+
next_page_token: str | None = Field(None, alias="nextPageToken")
|
|
472
|
+
|
|
473
|
+
@property
|
|
474
|
+
def has_next(self) -> bool:
|
|
475
|
+
return self.next_page_token is not None
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
# =============================================================================
|
|
479
|
+
# Batch Operation Response (V2)
|
|
480
|
+
# =============================================================================
|
|
481
|
+
|
|
482
|
+
|
|
483
|
+
class BatchOperationResult(AffinityModel):
|
|
484
|
+
"""Result of a single operation in a batch."""
|
|
485
|
+
|
|
486
|
+
field_id: str = Field(alias="fieldId")
|
|
487
|
+
success: bool
|
|
488
|
+
error: str | None = None
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
class BatchOperationResponse(AffinityModel):
|
|
492
|
+
"""Response from batch field operations."""
|
|
493
|
+
|
|
494
|
+
results: list[BatchOperationResult] = Field(default_factory=list)
|
|
495
|
+
|
|
496
|
+
@property
|
|
497
|
+
def all_successful(self) -> bool:
|
|
498
|
+
return all(r.success for r in self.results)
|
|
499
|
+
|
|
500
|
+
@property
|
|
501
|
+
def failures(self) -> list[BatchOperationResult]:
|
|
502
|
+
return [r for r in self.results if not r.success]
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
# =============================================================================
|
|
506
|
+
# Success Response (V1 delete operations)
|
|
507
|
+
# =============================================================================
|
|
508
|
+
|
|
509
|
+
|
|
510
|
+
class SuccessResponse(AffinityModel):
|
|
511
|
+
"""Simple success response from V1 delete operations."""
|
|
512
|
+
|
|
513
|
+
success: bool
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Unified, version-agnostic rate limit snapshot models.
|
|
3
|
+
|
|
4
|
+
These models represent the SDK's stable public surface for inspecting rate limit
|
|
5
|
+
state, independent of whether the underlying request used v1 or v2 endpoints.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Literal
|
|
12
|
+
|
|
13
|
+
from pydantic import Field
|
|
14
|
+
|
|
15
|
+
from .entities import AffinityModel
|
|
16
|
+
|
|
17
|
+
RateLimitSource = Literal["headers", "endpoint", "unknown"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class RateLimitBucket(AffinityModel):
|
|
21
|
+
"""A single rate limit bucket (quota window)."""
|
|
22
|
+
|
|
23
|
+
limit: int | None = None
|
|
24
|
+
remaining: int | None = None
|
|
25
|
+
reset_seconds: int | None = Field(None, alias="resetSeconds")
|
|
26
|
+
used: int | None = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class RateLimitSnapshot(AffinityModel):
|
|
30
|
+
"""
|
|
31
|
+
A best-effort snapshot of rate limit state.
|
|
32
|
+
|
|
33
|
+
Notes:
|
|
34
|
+
- `source="headers"` means the snapshot is derived from tracked HTTP response headers.
|
|
35
|
+
- `source="endpoint"` means the snapshot is derived from a dedicated endpoint response payload.
|
|
36
|
+
- `source="unknown"` means no reliable rate limit information has been observed yet.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
api_key_per_minute: RateLimitBucket = Field(
|
|
40
|
+
default_factory=RateLimitBucket, alias="apiKeyPerMinute"
|
|
41
|
+
)
|
|
42
|
+
org_monthly: RateLimitBucket = Field(default_factory=RateLimitBucket, alias="orgMonthly")
|
|
43
|
+
concurrent: RateLimitBucket | None = None
|
|
44
|
+
|
|
45
|
+
observed_at: datetime | None = Field(None, alias="observedAt")
|
|
46
|
+
age_seconds: float | None = Field(None, alias="ageSeconds")
|
|
47
|
+
source: RateLimitSource = "unknown"
|
|
48
|
+
request_id: str | None = Field(None, alias="requestId")
|