affinity-sdk 0.9.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- affinity/__init__.py +139 -0
- affinity/cli/__init__.py +7 -0
- affinity/cli/click_compat.py +27 -0
- affinity/cli/commands/__init__.py +1 -0
- affinity/cli/commands/_entity_files_dump.py +219 -0
- affinity/cli/commands/_list_entry_fields.py +41 -0
- affinity/cli/commands/_v1_parsing.py +77 -0
- affinity/cli/commands/company_cmds.py +2139 -0
- affinity/cli/commands/completion_cmd.py +33 -0
- affinity/cli/commands/config_cmds.py +540 -0
- affinity/cli/commands/entry_cmds.py +33 -0
- affinity/cli/commands/field_cmds.py +413 -0
- affinity/cli/commands/interaction_cmds.py +875 -0
- affinity/cli/commands/list_cmds.py +3152 -0
- affinity/cli/commands/note_cmds.py +433 -0
- affinity/cli/commands/opportunity_cmds.py +1174 -0
- affinity/cli/commands/person_cmds.py +1980 -0
- affinity/cli/commands/query_cmd.py +444 -0
- affinity/cli/commands/relationship_strength_cmds.py +62 -0
- affinity/cli/commands/reminder_cmds.py +595 -0
- affinity/cli/commands/resolve_url_cmd.py +127 -0
- affinity/cli/commands/session_cmds.py +84 -0
- affinity/cli/commands/task_cmds.py +110 -0
- affinity/cli/commands/version_cmd.py +29 -0
- affinity/cli/commands/whoami_cmd.py +36 -0
- affinity/cli/config.py +108 -0
- affinity/cli/context.py +749 -0
- affinity/cli/csv_utils.py +195 -0
- affinity/cli/date_utils.py +42 -0
- affinity/cli/decorators.py +77 -0
- affinity/cli/errors.py +28 -0
- affinity/cli/field_utils.py +355 -0
- affinity/cli/formatters.py +551 -0
- affinity/cli/help_json.py +283 -0
- affinity/cli/logging.py +100 -0
- affinity/cli/main.py +261 -0
- affinity/cli/options.py +53 -0
- affinity/cli/paths.py +32 -0
- affinity/cli/progress.py +183 -0
- affinity/cli/query/__init__.py +163 -0
- affinity/cli/query/aggregates.py +357 -0
- affinity/cli/query/dates.py +194 -0
- affinity/cli/query/exceptions.py +147 -0
- affinity/cli/query/executor.py +1236 -0
- affinity/cli/query/filters.py +248 -0
- affinity/cli/query/models.py +333 -0
- affinity/cli/query/output.py +331 -0
- affinity/cli/query/parser.py +619 -0
- affinity/cli/query/planner.py +430 -0
- affinity/cli/query/progress.py +270 -0
- affinity/cli/query/schema.py +439 -0
- affinity/cli/render.py +1589 -0
- affinity/cli/resolve.py +222 -0
- affinity/cli/resolvers.py +249 -0
- affinity/cli/results.py +308 -0
- affinity/cli/runner.py +218 -0
- affinity/cli/serialization.py +65 -0
- affinity/cli/session_cache.py +276 -0
- affinity/cli/types.py +70 -0
- affinity/client.py +771 -0
- affinity/clients/__init__.py +19 -0
- affinity/clients/http.py +3664 -0
- affinity/clients/pipeline.py +165 -0
- affinity/compare.py +501 -0
- affinity/downloads.py +114 -0
- affinity/exceptions.py +615 -0
- affinity/filters.py +1128 -0
- affinity/hooks.py +198 -0
- affinity/inbound_webhooks.py +302 -0
- affinity/models/__init__.py +163 -0
- affinity/models/entities.py +798 -0
- affinity/models/pagination.py +513 -0
- affinity/models/rate_limit_snapshot.py +48 -0
- affinity/models/secondary.py +413 -0
- affinity/models/types.py +663 -0
- affinity/policies.py +40 -0
- affinity/progress.py +22 -0
- affinity/py.typed +0 -0
- affinity/services/__init__.py +42 -0
- affinity/services/companies.py +1286 -0
- affinity/services/lists.py +1892 -0
- affinity/services/opportunities.py +1330 -0
- affinity/services/persons.py +1348 -0
- affinity/services/rate_limits.py +173 -0
- affinity/services/tasks.py +193 -0
- affinity/services/v1_only.py +2445 -0
- affinity/types.py +83 -0
- affinity_sdk-0.9.5.dist-info/METADATA +622 -0
- affinity_sdk-0.9.5.dist-info/RECORD +92 -0
- affinity_sdk-0.9.5.dist-info/WHEEL +4 -0
- affinity_sdk-0.9.5.dist-info/entry_points.txt +2 -0
- affinity_sdk-0.9.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Rate limit services (version-agnostic).
|
|
3
|
+
|
|
4
|
+
These services provide a unified public surface for inspecting and refreshing
|
|
5
|
+
rate limit information without exposing API versioning details.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import time
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
|
|
13
|
+
from ..clients.http import AsyncHTTPClient, HTTPClient, RateLimitState
|
|
14
|
+
from ..exceptions import AuthorizationError, NotFoundError
|
|
15
|
+
from ..models.rate_limit_snapshot import RateLimitBucket, RateLimitSnapshot
|
|
16
|
+
from ..models.secondary import RateLimits
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _bucket_from_headers(
|
|
20
|
+
*,
|
|
21
|
+
limit: int | None,
|
|
22
|
+
remaining: int | None,
|
|
23
|
+
reset_seconds: int | None,
|
|
24
|
+
age_seconds: float | None,
|
|
25
|
+
) -> RateLimitBucket:
|
|
26
|
+
effective_reset: int | None = None
|
|
27
|
+
if reset_seconds is not None:
|
|
28
|
+
if age_seconds is None:
|
|
29
|
+
effective_reset = reset_seconds
|
|
30
|
+
else:
|
|
31
|
+
effective_reset = max(0, int(reset_seconds - age_seconds))
|
|
32
|
+
|
|
33
|
+
used: int | None = None
|
|
34
|
+
if limit is not None and remaining is not None:
|
|
35
|
+
used_candidate = limit - remaining
|
|
36
|
+
if used_candidate >= 0:
|
|
37
|
+
used = used_candidate
|
|
38
|
+
|
|
39
|
+
return RateLimitBucket(
|
|
40
|
+
limit=limit, remaining=remaining, reset_seconds=effective_reset, used=used
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _snapshot_from_state(state: RateLimitState) -> RateLimitSnapshot:
|
|
45
|
+
raw = state.snapshot()
|
|
46
|
+
last_updated = raw.get("last_updated")
|
|
47
|
+
now = time.time()
|
|
48
|
+
|
|
49
|
+
age_seconds: float | None
|
|
50
|
+
observed_at: datetime | None
|
|
51
|
+
if isinstance(last_updated, (int, float)):
|
|
52
|
+
age_seconds = max(0.0, now - float(last_updated))
|
|
53
|
+
observed_at = datetime.fromtimestamp(float(last_updated), tz=timezone.utc)
|
|
54
|
+
else:
|
|
55
|
+
age_seconds = None
|
|
56
|
+
observed_at = None
|
|
57
|
+
|
|
58
|
+
api_key_bucket = _bucket_from_headers(
|
|
59
|
+
limit=raw.get("user_limit"),
|
|
60
|
+
remaining=raw.get("user_remaining"),
|
|
61
|
+
reset_seconds=raw.get("user_reset_seconds"),
|
|
62
|
+
age_seconds=age_seconds,
|
|
63
|
+
)
|
|
64
|
+
org_bucket = _bucket_from_headers(
|
|
65
|
+
limit=raw.get("org_limit"),
|
|
66
|
+
remaining=raw.get("org_remaining"),
|
|
67
|
+
reset_seconds=raw.get("org_reset_seconds"),
|
|
68
|
+
age_seconds=age_seconds,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
known = any(
|
|
72
|
+
v is not None
|
|
73
|
+
for v in (
|
|
74
|
+
api_key_bucket.limit,
|
|
75
|
+
api_key_bucket.remaining,
|
|
76
|
+
api_key_bucket.reset_seconds,
|
|
77
|
+
org_bucket.limit,
|
|
78
|
+
org_bucket.remaining,
|
|
79
|
+
org_bucket.reset_seconds,
|
|
80
|
+
)
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
return RateLimitSnapshot(
|
|
84
|
+
api_key_per_minute=api_key_bucket,
|
|
85
|
+
org_monthly=org_bucket,
|
|
86
|
+
observed_at=observed_at,
|
|
87
|
+
age_seconds=age_seconds,
|
|
88
|
+
source="headers" if known else "unknown",
|
|
89
|
+
request_id=raw.get("last_request_id"),
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _snapshot_from_endpoint(
|
|
94
|
+
limits: RateLimits,
|
|
95
|
+
*,
|
|
96
|
+
observed_at: datetime,
|
|
97
|
+
request_id: str | None,
|
|
98
|
+
) -> RateLimitSnapshot:
|
|
99
|
+
return RateLimitSnapshot(
|
|
100
|
+
api_key_per_minute=RateLimitBucket(
|
|
101
|
+
limit=limits.api_key_per_minute.limit,
|
|
102
|
+
remaining=limits.api_key_per_minute.remaining,
|
|
103
|
+
reset_seconds=limits.api_key_per_minute.reset,
|
|
104
|
+
used=limits.api_key_per_minute.used,
|
|
105
|
+
),
|
|
106
|
+
org_monthly=RateLimitBucket(
|
|
107
|
+
limit=limits.org_monthly.limit,
|
|
108
|
+
remaining=limits.org_monthly.remaining,
|
|
109
|
+
reset_seconds=limits.org_monthly.reset,
|
|
110
|
+
used=limits.org_monthly.used,
|
|
111
|
+
),
|
|
112
|
+
observed_at=observed_at,
|
|
113
|
+
age_seconds=0.0,
|
|
114
|
+
source="endpoint",
|
|
115
|
+
request_id=request_id,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class RateLimitService:
|
|
120
|
+
"""Unified rate limit service (sync)."""
|
|
121
|
+
|
|
122
|
+
def __init__(self, client: HTTPClient):
|
|
123
|
+
self._client = client
|
|
124
|
+
|
|
125
|
+
def snapshot(self) -> RateLimitSnapshot:
|
|
126
|
+
"""Return a best-effort snapshot derived from tracked response headers."""
|
|
127
|
+
return _snapshot_from_state(self._client.rate_limit_state)
|
|
128
|
+
|
|
129
|
+
def refresh(self) -> RateLimitSnapshot:
|
|
130
|
+
"""
|
|
131
|
+
Fetch/observe the best available rate limit snapshot now.
|
|
132
|
+
|
|
133
|
+
Strategy:
|
|
134
|
+
1) Try the dedicated endpoint (`GET /rate-limit`, internal v1 today).
|
|
135
|
+
2) If unavailable (403/404), fall back to `GET /auth/whoami` to observe headers.
|
|
136
|
+
"""
|
|
137
|
+
observed_at = datetime.now(tz=timezone.utc)
|
|
138
|
+
try:
|
|
139
|
+
data = self._client.get("/rate-limit", v1=True)
|
|
140
|
+
limits = RateLimits.model_validate(data.get("rate", {}))
|
|
141
|
+
request_id = self._client.rate_limit_state.snapshot().get("last_request_id")
|
|
142
|
+
return _snapshot_from_endpoint(limits, observed_at=observed_at, request_id=request_id)
|
|
143
|
+
except (AuthorizationError, NotFoundError):
|
|
144
|
+
# Fallback: make a lightweight request and return header-derived snapshot.
|
|
145
|
+
_ = self._client.get("/auth/whoami")
|
|
146
|
+
return self.snapshot()
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class AsyncRateLimitService:
|
|
150
|
+
"""Unified rate limit service (async)."""
|
|
151
|
+
|
|
152
|
+
def __init__(self, client: AsyncHTTPClient):
|
|
153
|
+
self._client = client
|
|
154
|
+
|
|
155
|
+
def snapshot(self) -> RateLimitSnapshot:
|
|
156
|
+
"""Return a best-effort snapshot derived from tracked response headers."""
|
|
157
|
+
return _snapshot_from_state(self._client.rate_limit_state)
|
|
158
|
+
|
|
159
|
+
async def refresh(self) -> RateLimitSnapshot:
|
|
160
|
+
"""
|
|
161
|
+
Fetch/observe the best available rate limit snapshot now.
|
|
162
|
+
|
|
163
|
+
Strategy mirrors the sync client.
|
|
164
|
+
"""
|
|
165
|
+
observed_at = datetime.now(tz=timezone.utc)
|
|
166
|
+
try:
|
|
167
|
+
data = await self._client.get("/rate-limit", v1=True)
|
|
168
|
+
limits = RateLimits.model_validate(data.get("rate", {}))
|
|
169
|
+
request_id = self._client.rate_limit_state.snapshot().get("last_request_id")
|
|
170
|
+
return _snapshot_from_endpoint(limits, observed_at=observed_at, request_id=request_id)
|
|
171
|
+
except (AuthorizationError, NotFoundError):
|
|
172
|
+
_ = await self._client.get("/auth/whoami")
|
|
173
|
+
return self.snapshot()
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Task service for long-running operations.
|
|
3
|
+
|
|
4
|
+
Provides polling utilities for async operations like merges.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import random
|
|
11
|
+
import time
|
|
12
|
+
from typing import TYPE_CHECKING
|
|
13
|
+
|
|
14
|
+
from ..exceptions import AffinityError
|
|
15
|
+
from ..exceptions import TimeoutError as AffinityTimeoutError
|
|
16
|
+
from ..models.secondary import MergeTask
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from ..clients.http import AsyncHTTPClient, HTTPClient
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class TaskStatus:
|
|
23
|
+
"""Known task status values."""
|
|
24
|
+
|
|
25
|
+
PENDING = "pending"
|
|
26
|
+
IN_PROGRESS = "in_progress"
|
|
27
|
+
SUCCESS = "success"
|
|
28
|
+
FAILED = "failed"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class TaskService:
|
|
32
|
+
"""
|
|
33
|
+
Service for managing long-running operations (tasks).
|
|
34
|
+
|
|
35
|
+
Provides utilities to poll and wait for async operations like merges.
|
|
36
|
+
|
|
37
|
+
Example:
|
|
38
|
+
# Start a merge operation
|
|
39
|
+
task_url = client.companies.merge(primary_id, duplicate_id)
|
|
40
|
+
|
|
41
|
+
# Wait for completion with timeout
|
|
42
|
+
task = client.tasks.wait(task_url, timeout=60.0)
|
|
43
|
+
if task.status == "success":
|
|
44
|
+
print("Merge completed!")
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
def __init__(self, client: HTTPClient):
|
|
48
|
+
self._client = client
|
|
49
|
+
|
|
50
|
+
def get(self, task_url: str) -> MergeTask:
|
|
51
|
+
"""
|
|
52
|
+
Get the current status of a task.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
task_url: The task URL returned from an async operation
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
MergeTask with current status
|
|
59
|
+
"""
|
|
60
|
+
# Extract task path from full URL if needed
|
|
61
|
+
data = self._client.get_url(task_url)
|
|
62
|
+
return MergeTask.model_validate(data)
|
|
63
|
+
|
|
64
|
+
def wait(
|
|
65
|
+
self,
|
|
66
|
+
task_url: str,
|
|
67
|
+
*,
|
|
68
|
+
timeout: float = 300.0,
|
|
69
|
+
poll_interval: float = 2.0,
|
|
70
|
+
max_poll_interval: float = 30.0,
|
|
71
|
+
) -> MergeTask:
|
|
72
|
+
"""
|
|
73
|
+
Wait for a task to complete with exponential backoff.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
task_url: The task URL returned from an async operation
|
|
77
|
+
timeout: Maximum time to wait in seconds (default: 5 minutes)
|
|
78
|
+
poll_interval: Initial polling interval in seconds
|
|
79
|
+
max_poll_interval: Maximum polling interval after backoff
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
MergeTask with final status
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
TimeoutError: If task doesn't complete within timeout
|
|
86
|
+
AffinityError: If task fails
|
|
87
|
+
"""
|
|
88
|
+
start_time = time.monotonic()
|
|
89
|
+
current_interval = poll_interval
|
|
90
|
+
|
|
91
|
+
while True:
|
|
92
|
+
task = self.get(task_url)
|
|
93
|
+
|
|
94
|
+
if task.status in (TaskStatus.SUCCESS, TaskStatus.FAILED):
|
|
95
|
+
if task.status == TaskStatus.FAILED:
|
|
96
|
+
raise AffinityError(
|
|
97
|
+
f"Task failed: {task_url}",
|
|
98
|
+
status_code=None,
|
|
99
|
+
response_body={"task": task.model_dump()},
|
|
100
|
+
)
|
|
101
|
+
return task
|
|
102
|
+
|
|
103
|
+
# Check timeout
|
|
104
|
+
elapsed = time.monotonic() - start_time
|
|
105
|
+
if elapsed >= timeout:
|
|
106
|
+
raise AffinityTimeoutError(f"Task did not complete within {timeout}s: {task_url}")
|
|
107
|
+
|
|
108
|
+
# Wait with jitter before next poll
|
|
109
|
+
jitter = random.uniform(0, current_interval * 0.1)
|
|
110
|
+
time.sleep(current_interval + jitter)
|
|
111
|
+
|
|
112
|
+
# Exponential backoff, capped at max
|
|
113
|
+
current_interval = min(current_interval * 1.5, max_poll_interval)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class AsyncTaskService:
|
|
117
|
+
"""
|
|
118
|
+
Async version of TaskService.
|
|
119
|
+
|
|
120
|
+
Example:
|
|
121
|
+
# Start a merge operation
|
|
122
|
+
task_url = await client.companies.merge(primary_id, duplicate_id)
|
|
123
|
+
|
|
124
|
+
# Wait for completion with timeout
|
|
125
|
+
task = await client.tasks.wait(task_url, timeout=60.0)
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
def __init__(self, client: AsyncHTTPClient):
|
|
129
|
+
self._client = client
|
|
130
|
+
|
|
131
|
+
async def get(self, task_url: str) -> MergeTask:
|
|
132
|
+
"""
|
|
133
|
+
Get the current status of a task.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
task_url: The task URL returned from an async operation
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
MergeTask with current status
|
|
140
|
+
"""
|
|
141
|
+
data = await self._client.get_url(task_url)
|
|
142
|
+
return MergeTask.model_validate(data)
|
|
143
|
+
|
|
144
|
+
async def wait(
|
|
145
|
+
self,
|
|
146
|
+
task_url: str,
|
|
147
|
+
*,
|
|
148
|
+
timeout: float = 300.0,
|
|
149
|
+
poll_interval: float = 2.0,
|
|
150
|
+
max_poll_interval: float = 30.0,
|
|
151
|
+
) -> MergeTask:
|
|
152
|
+
"""
|
|
153
|
+
Wait for a task to complete with exponential backoff.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
task_url: The task URL returned from an async operation
|
|
157
|
+
timeout: Maximum time to wait in seconds (default: 5 minutes)
|
|
158
|
+
poll_interval: Initial polling interval in seconds
|
|
159
|
+
max_poll_interval: Maximum polling interval after backoff
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
MergeTask with final status
|
|
163
|
+
|
|
164
|
+
Raises:
|
|
165
|
+
TimeoutError: If task doesn't complete within timeout
|
|
166
|
+
AffinityError: If task fails
|
|
167
|
+
"""
|
|
168
|
+
start_time = time.monotonic()
|
|
169
|
+
current_interval = poll_interval
|
|
170
|
+
|
|
171
|
+
while True:
|
|
172
|
+
task = await self.get(task_url)
|
|
173
|
+
|
|
174
|
+
if task.status in (TaskStatus.SUCCESS, TaskStatus.FAILED):
|
|
175
|
+
if task.status == TaskStatus.FAILED:
|
|
176
|
+
raise AffinityError(
|
|
177
|
+
f"Task failed: {task_url}",
|
|
178
|
+
status_code=None,
|
|
179
|
+
response_body={"task": task.model_dump()},
|
|
180
|
+
)
|
|
181
|
+
return task
|
|
182
|
+
|
|
183
|
+
# Check timeout
|
|
184
|
+
elapsed = time.monotonic() - start_time
|
|
185
|
+
if elapsed >= timeout:
|
|
186
|
+
raise AffinityTimeoutError(f"Task did not complete within {timeout}s: {task_url}")
|
|
187
|
+
|
|
188
|
+
# Wait with jitter before next poll
|
|
189
|
+
jitter = random.uniform(0, current_interval * 0.1)
|
|
190
|
+
await asyncio.sleep(current_interval + jitter)
|
|
191
|
+
|
|
192
|
+
# Exponential backoff, capped at max
|
|
193
|
+
current_interval = min(current_interval * 1.5, max_poll_interval)
|