affinity-sdk 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. affinity/__init__.py +139 -0
  2. affinity/cli/__init__.py +7 -0
  3. affinity/cli/click_compat.py +27 -0
  4. affinity/cli/commands/__init__.py +1 -0
  5. affinity/cli/commands/_entity_files_dump.py +219 -0
  6. affinity/cli/commands/_list_entry_fields.py +41 -0
  7. affinity/cli/commands/_v1_parsing.py +77 -0
  8. affinity/cli/commands/company_cmds.py +2139 -0
  9. affinity/cli/commands/completion_cmd.py +33 -0
  10. affinity/cli/commands/config_cmds.py +540 -0
  11. affinity/cli/commands/entry_cmds.py +33 -0
  12. affinity/cli/commands/field_cmds.py +413 -0
  13. affinity/cli/commands/interaction_cmds.py +875 -0
  14. affinity/cli/commands/list_cmds.py +3152 -0
  15. affinity/cli/commands/note_cmds.py +433 -0
  16. affinity/cli/commands/opportunity_cmds.py +1174 -0
  17. affinity/cli/commands/person_cmds.py +1980 -0
  18. affinity/cli/commands/query_cmd.py +444 -0
  19. affinity/cli/commands/relationship_strength_cmds.py +62 -0
  20. affinity/cli/commands/reminder_cmds.py +595 -0
  21. affinity/cli/commands/resolve_url_cmd.py +127 -0
  22. affinity/cli/commands/session_cmds.py +84 -0
  23. affinity/cli/commands/task_cmds.py +110 -0
  24. affinity/cli/commands/version_cmd.py +29 -0
  25. affinity/cli/commands/whoami_cmd.py +36 -0
  26. affinity/cli/config.py +108 -0
  27. affinity/cli/context.py +749 -0
  28. affinity/cli/csv_utils.py +195 -0
  29. affinity/cli/date_utils.py +42 -0
  30. affinity/cli/decorators.py +77 -0
  31. affinity/cli/errors.py +28 -0
  32. affinity/cli/field_utils.py +355 -0
  33. affinity/cli/formatters.py +551 -0
  34. affinity/cli/help_json.py +283 -0
  35. affinity/cli/logging.py +100 -0
  36. affinity/cli/main.py +261 -0
  37. affinity/cli/options.py +53 -0
  38. affinity/cli/paths.py +32 -0
  39. affinity/cli/progress.py +183 -0
  40. affinity/cli/query/__init__.py +163 -0
  41. affinity/cli/query/aggregates.py +357 -0
  42. affinity/cli/query/dates.py +194 -0
  43. affinity/cli/query/exceptions.py +147 -0
  44. affinity/cli/query/executor.py +1236 -0
  45. affinity/cli/query/filters.py +248 -0
  46. affinity/cli/query/models.py +333 -0
  47. affinity/cli/query/output.py +331 -0
  48. affinity/cli/query/parser.py +619 -0
  49. affinity/cli/query/planner.py +430 -0
  50. affinity/cli/query/progress.py +270 -0
  51. affinity/cli/query/schema.py +439 -0
  52. affinity/cli/render.py +1589 -0
  53. affinity/cli/resolve.py +222 -0
  54. affinity/cli/resolvers.py +249 -0
  55. affinity/cli/results.py +308 -0
  56. affinity/cli/runner.py +218 -0
  57. affinity/cli/serialization.py +65 -0
  58. affinity/cli/session_cache.py +276 -0
  59. affinity/cli/types.py +70 -0
  60. affinity/client.py +771 -0
  61. affinity/clients/__init__.py +19 -0
  62. affinity/clients/http.py +3664 -0
  63. affinity/clients/pipeline.py +165 -0
  64. affinity/compare.py +501 -0
  65. affinity/downloads.py +114 -0
  66. affinity/exceptions.py +615 -0
  67. affinity/filters.py +1128 -0
  68. affinity/hooks.py +198 -0
  69. affinity/inbound_webhooks.py +302 -0
  70. affinity/models/__init__.py +163 -0
  71. affinity/models/entities.py +798 -0
  72. affinity/models/pagination.py +513 -0
  73. affinity/models/rate_limit_snapshot.py +48 -0
  74. affinity/models/secondary.py +413 -0
  75. affinity/models/types.py +663 -0
  76. affinity/policies.py +40 -0
  77. affinity/progress.py +22 -0
  78. affinity/py.typed +0 -0
  79. affinity/services/__init__.py +42 -0
  80. affinity/services/companies.py +1286 -0
  81. affinity/services/lists.py +1892 -0
  82. affinity/services/opportunities.py +1330 -0
  83. affinity/services/persons.py +1348 -0
  84. affinity/services/rate_limits.py +173 -0
  85. affinity/services/tasks.py +193 -0
  86. affinity/services/v1_only.py +2445 -0
  87. affinity/types.py +83 -0
  88. affinity_sdk-0.9.5.dist-info/METADATA +622 -0
  89. affinity_sdk-0.9.5.dist-info/RECORD +92 -0
  90. affinity_sdk-0.9.5.dist-info/WHEEL +4 -0
  91. affinity_sdk-0.9.5.dist-info/entry_points.txt +2 -0
  92. affinity_sdk-0.9.5.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1236 @@
1
+ """Query executor.
2
+
3
+ Executes query plans by orchestrating SDK service calls.
4
+ This module is CLI-only and NOT part of the public SDK API.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ import time
11
+ from collections.abc import Callable
12
+ from dataclasses import dataclass, field
13
+ from typing import TYPE_CHECKING, Any, Protocol
14
+
15
+ from .aggregates import apply_having, compute_aggregates, group_and_aggregate
16
+ from .exceptions import (
17
+ QueryExecutionError,
18
+ QueryInterruptedError,
19
+ QuerySafetyLimitError,
20
+ QueryTimeoutError,
21
+ )
22
+ from .filters import compile_filter, resolve_field_path
23
+ from .models import ExecutionPlan, PlanStep, Query, QueryResult
24
+ from .schema import SCHEMA_REGISTRY, FetchStrategy, get_relationship
25
+
26
+ if TYPE_CHECKING:
27
+ from affinity import AsyncAffinity
28
+ from affinity.models.pagination import PaginationProgress
29
+
30
+
31
+ # =============================================================================
32
+ # Field Projection Utilities
33
+ # =============================================================================
34
+
35
+
36
+ def _set_nested_value(target: dict[str, Any], path: str, value: Any) -> None:
37
+ """Set a value at a nested path in a dict.
38
+
39
+ Creates intermediate dicts as needed.
40
+
41
+ Args:
42
+ target: Dict to set value in
43
+ path: Dot-separated path like "fields.Status"
44
+ value: Value to set
45
+ """
46
+ parts = path.split(".")
47
+ current = target
48
+ for part in parts[:-1]:
49
+ if part not in current:
50
+ current[part] = {}
51
+ current = current[part]
52
+ current[parts[-1]] = value
53
+
54
+
55
+ def _normalize_list_entry_fields(record: dict[str, Any]) -> dict[str, Any]:
56
+ """Normalize list entry field values from API format to query-friendly format.
57
+
58
+ The Affinity API returns field values on the entity inside the list entry::
59
+
60
+ {"entity": {"fields": {"requested": true, "data": {
61
+ "field-123": {"name": "Status", "value": {...}}
62
+ }}}}
63
+
64
+ This function extracts them into a top-level fields dict keyed by field name:
65
+ {"fields": {"Status": "Active"}}
66
+
67
+ This allows paths like "fields.Status" to work in filters/groupBy/aggregates.
68
+ """
69
+ # Field data is on entity.fields.data, not directly on the list entry
70
+ entity = record.get("entity")
71
+ if not entity or not isinstance(entity, dict):
72
+ return record
73
+
74
+ fields_container = entity.get("fields")
75
+ if not fields_container or not isinstance(fields_container, dict):
76
+ return record
77
+
78
+ fields_data = fields_container.get("data")
79
+ if not fields_data or not isinstance(fields_data, dict):
80
+ return record
81
+
82
+ # Extract field values into a dict keyed by field name
83
+ normalized_fields: dict[str, Any] = {}
84
+ for _field_id, field_obj in fields_data.items():
85
+ if not isinstance(field_obj, dict):
86
+ continue
87
+
88
+ field_name = field_obj.get("name")
89
+ if not field_name:
90
+ continue
91
+
92
+ value_wrapper = field_obj.get("value")
93
+ if value_wrapper is None:
94
+ normalized_fields[field_name] = None
95
+ continue
96
+
97
+ if isinstance(value_wrapper, dict):
98
+ data = value_wrapper.get("data")
99
+ # Handle dropdown/ranked-dropdown with text value
100
+ if isinstance(data, dict) and "text" in data:
101
+ normalized_fields[field_name] = data["text"]
102
+ # Handle multi-select (array of values)
103
+ elif isinstance(data, list):
104
+ # Extract text from each item if it's a dropdown list
105
+ extracted = []
106
+ for item in data:
107
+ if isinstance(item, dict) and "text" in item:
108
+ extracted.append(item["text"])
109
+ else:
110
+ extracted.append(item)
111
+ normalized_fields[field_name] = extracted
112
+ else:
113
+ normalized_fields[field_name] = data
114
+ else:
115
+ normalized_fields[field_name] = value_wrapper
116
+
117
+ # Replace the complex fields structure with a simple dict keyed by name
118
+ if normalized_fields:
119
+ record["fields"] = normalized_fields
120
+
121
+ return record
122
+
123
+
124
+ def _apply_select_projection(
125
+ records: list[dict[str, Any]], select: list[str]
126
+ ) -> list[dict[str, Any]]:
127
+ """Apply select clause projection to records.
128
+
129
+ Filters each record to only include fields specified in select.
130
+ Supports:
131
+ - Simple fields: "id", "firstName"
132
+ - Nested paths: "fields.Status", "address.city"
133
+ - Wildcard for fields: "fields.*" (includes all custom fields)
134
+
135
+ Args:
136
+ records: List of record dicts to project
137
+ select: List of field paths to include
138
+
139
+ Returns:
140
+ New list of projected records
141
+ """
142
+ if not select:
143
+ return records
144
+
145
+ # Check for fields.* wildcard - means include all fields
146
+ include_all_fields = "fields.*" in select
147
+ # Filter out the wildcard from paths to process
148
+ paths = [p for p in select if p != "fields.*"]
149
+
150
+ projected: list[dict[str, Any]] = []
151
+ for record in records:
152
+ new_record: dict[str, Any] = {}
153
+
154
+ # Apply explicit paths
155
+ for path in paths:
156
+ value = resolve_field_path(record, path)
157
+ if value is not None:
158
+ _set_nested_value(new_record, path, value)
159
+
160
+ # Handle fields.* wildcard - copy entire fields dict
161
+ if include_all_fields and "fields" in record:
162
+ new_record["fields"] = record["fields"]
163
+
164
+ projected.append(new_record)
165
+
166
+ return projected
167
+
168
+
169
+ # =============================================================================
170
+ # Progress Callback Protocol
171
+ # =============================================================================
172
+
173
+
174
+ class QueryProgressCallback(Protocol):
175
+ """Protocol for query execution progress callbacks."""
176
+
177
+ def on_step_start(self, step: PlanStep) -> None:
178
+ """Called when a step starts."""
179
+ ...
180
+
181
+ def on_step_progress(self, step: PlanStep, current: int, total: int | None) -> None:
182
+ """Called during step execution with progress update."""
183
+ ...
184
+
185
+ def on_step_complete(self, step: PlanStep, records: int) -> None:
186
+ """Called when a step completes."""
187
+ ...
188
+
189
+ def on_step_error(self, step: PlanStep, error: Exception) -> None:
190
+ """Called when a step fails."""
191
+ ...
192
+
193
+
194
+ class NullProgressCallback:
195
+ """No-op progress callback."""
196
+
197
+ def on_step_start(self, step: PlanStep) -> None:
198
+ pass
199
+
200
+ def on_step_progress(self, step: PlanStep, current: int, total: int | None) -> None:
201
+ pass
202
+
203
+ def on_step_complete(self, step: PlanStep, records: int) -> None:
204
+ pass
205
+
206
+ def on_step_error(self, step: PlanStep, error: Exception) -> None:
207
+ pass
208
+
209
+
210
+ # =============================================================================
211
+ # Execution Context
212
+ # =============================================================================
213
+
214
+
215
+ @dataclass
216
+ class ExecutionContext:
217
+ """Tracks state during query execution."""
218
+
219
+ query: Query
220
+ records: list[dict[str, Any]] = field(default_factory=list)
221
+ included: dict[str, list[dict[str, Any]]] = field(default_factory=dict)
222
+ relationship_counts: dict[str, dict[int, int]] = field(default_factory=dict)
223
+ current_step: int = 0
224
+ start_time: float = field(default_factory=time.time)
225
+ max_records: int = 10000
226
+ interrupted: bool = False
227
+ resolved_where: dict[str, Any] | None = None # Where clause with resolved names
228
+ warnings: list[str] = field(default_factory=list) # Warnings collected during execution
229
+ has_client_side_filter: bool = False # True if plan has client-side filter step
230
+
231
+ def check_timeout(self, timeout: float) -> None:
232
+ """Check if execution has exceeded timeout."""
233
+ elapsed = time.time() - self.start_time
234
+ if elapsed > timeout:
235
+ raise QueryTimeoutError(
236
+ f"Query execution exceeded timeout of {timeout}s",
237
+ timeout_seconds=timeout,
238
+ elapsed_seconds=elapsed,
239
+ partial_results=self.records,
240
+ )
241
+
242
+ def check_max_records(self) -> None:
243
+ """Check if max records limit has been reached."""
244
+ if len(self.records) >= self.max_records:
245
+ raise QuerySafetyLimitError(
246
+ f"Query would exceed maximum of {self.max_records} records",
247
+ limit_name="max_records",
248
+ limit_value=self.max_records,
249
+ estimated_value=len(self.records),
250
+ )
251
+
252
+ def build_result(self) -> QueryResult:
253
+ """Build final query result.
254
+
255
+ Applies select clause projection if specified in the query.
256
+ """
257
+ from ..results import ResultSummary
258
+
259
+ # Apply select projection if specified
260
+ data = self.records
261
+ if self.query.select:
262
+ data = _apply_select_projection(self.records, self.query.select)
263
+
264
+ # Build included counts for summary
265
+ included_counts: dict[str, int] | None = None
266
+ if self.included:
267
+ included_counts = {k: len(v) for k, v in self.included.items() if v}
268
+ if not included_counts:
269
+ included_counts = None
270
+
271
+ return QueryResult(
272
+ data=data,
273
+ included=self.included,
274
+ summary=ResultSummary(
275
+ total_rows=len(data),
276
+ included_counts=included_counts,
277
+ ),
278
+ meta={
279
+ "executionTime": time.time() - self.start_time,
280
+ "interrupted": self.interrupted,
281
+ },
282
+ warnings=self.warnings,
283
+ )
284
+
285
+
286
+ # =============================================================================
287
+ # Query Executor
288
+ # =============================================================================
289
+
290
+
291
+ class QueryExecutor:
292
+ """Executes query plans using SDK services.
293
+
294
+ This class orchestrates SDK service calls to execute structured queries.
295
+ It is CLI-specific and NOT part of the public SDK API.
296
+ """
297
+
298
+ def __init__(
299
+ self,
300
+ client: AsyncAffinity,
301
+ *,
302
+ progress: QueryProgressCallback | None = None,
303
+ concurrency: int = 10,
304
+ max_records: int = 10000,
305
+ timeout: float = 300.0,
306
+ allow_partial: bool = False,
307
+ ) -> None:
308
+ """Initialize the executor.
309
+
310
+ Args:
311
+ client: AsyncAffinity client for API calls
312
+ progress: Optional progress callback
313
+ concurrency: Max concurrent API calls for N+1 operations
314
+ max_records: Safety limit on total records
315
+ timeout: Total execution timeout in seconds
316
+ allow_partial: If True, return partial results on interruption
317
+ """
318
+ self.client = client
319
+ self.progress = progress or NullProgressCallback()
320
+ self.concurrency = concurrency
321
+ self.max_records = max_records
322
+ self.timeout = timeout
323
+ self.allow_partial = allow_partial
324
+ self.semaphore = asyncio.Semaphore(concurrency)
325
+
326
+ async def execute(self, plan: ExecutionPlan) -> QueryResult:
327
+ """Execute a query plan.
328
+
329
+ Args:
330
+ plan: The execution plan to run
331
+
332
+ Returns:
333
+ QueryResult with data and included records
334
+
335
+ Raises:
336
+ QueryExecutionError: If execution fails
337
+ QueryInterruptedError: If interrupted (Ctrl+C)
338
+ QueryTimeoutError: If timeout exceeded
339
+ QuerySafetyLimitError: If max_records exceeded
340
+ """
341
+ # Check if plan has client-side filter step
342
+ has_filter_step = any(step.operation == "filter" for step in plan.steps)
343
+
344
+ ctx = ExecutionContext(
345
+ query=plan.query,
346
+ max_records=self.max_records,
347
+ has_client_side_filter=has_filter_step,
348
+ )
349
+
350
+ try:
351
+ # Verify auth before starting
352
+ await self._verify_auth()
353
+
354
+ # Execute steps in dependency order
355
+ for step in plan.steps:
356
+ ctx.current_step = step.step_id
357
+ ctx.check_timeout(self.timeout)
358
+
359
+ self.progress.on_step_start(step)
360
+
361
+ try:
362
+ await self._execute_step(step, ctx)
363
+ self.progress.on_step_complete(step, len(ctx.records))
364
+ except Exception as e:
365
+ self.progress.on_step_error(step, e)
366
+ raise
367
+
368
+ return ctx.build_result()
369
+
370
+ except KeyboardInterrupt:
371
+ ctx.interrupted = True
372
+ if self.allow_partial and ctx.records:
373
+ return ctx.build_result()
374
+ raise QueryInterruptedError(
375
+ f"Query interrupted at step {ctx.current_step}. "
376
+ f"{len(ctx.records)} records fetched before interruption.",
377
+ step_id=ctx.current_step,
378
+ records_fetched=len(ctx.records),
379
+ partial_results=ctx.records,
380
+ ) from None
381
+
382
+ async def _verify_auth(self) -> None:
383
+ """Verify client is authenticated."""
384
+ try:
385
+ await self.client.whoami()
386
+ except Exception as e:
387
+ raise QueryExecutionError(
388
+ "Authentication failed. Check your API key before running queries.",
389
+ cause=e,
390
+ ) from None
391
+
392
+ async def _execute_step(self, step: PlanStep, ctx: ExecutionContext) -> None:
393
+ """Execute a single plan step."""
394
+ if step.operation == "fetch":
395
+ await self._execute_fetch(step, ctx)
396
+ elif step.operation == "filter":
397
+ self._execute_filter(step, ctx)
398
+ elif step.operation == "include":
399
+ await self._execute_include(step, ctx)
400
+ elif step.operation == "aggregate":
401
+ self._execute_aggregate(step, ctx)
402
+ elif step.operation == "sort":
403
+ self._execute_sort(step, ctx)
404
+ elif step.operation == "limit":
405
+ self._execute_limit(step, ctx)
406
+
407
+ async def _execute_fetch(self, step: PlanStep, ctx: ExecutionContext) -> None:
408
+ """Execute a fetch step.
409
+
410
+ Routes to appropriate fetch strategy based on schema configuration.
411
+ """
412
+ if step.entity is None:
413
+ raise QueryExecutionError("Fetch step missing entity", step=step)
414
+
415
+ schema = SCHEMA_REGISTRY.get(step.entity)
416
+ if schema is None:
417
+ raise QueryExecutionError(f"Unknown entity: {step.entity}", step=step)
418
+
419
+ try:
420
+ match schema.fetch_strategy:
421
+ case FetchStrategy.GLOBAL:
422
+ await self._fetch_global(step, ctx, schema)
423
+ case FetchStrategy.REQUIRES_PARENT:
424
+ await self._fetch_with_parent(step, ctx, schema)
425
+ case FetchStrategy.RELATIONSHIP_ONLY:
426
+ # Should never reach here - parser rejects these
427
+ raise QueryExecutionError(
428
+ f"'{step.entity}' cannot be queried directly. "
429
+ "This should have been caught at parse time.",
430
+ step=step,
431
+ )
432
+ except QueryExecutionError:
433
+ raise
434
+ except Exception as e:
435
+ raise QueryExecutionError(
436
+ f"Failed to fetch {step.entity}: {e}",
437
+ step=step,
438
+ cause=e,
439
+ partial_results=ctx.records,
440
+ ) from None
441
+
442
+ async def _fetch_global(
443
+ self,
444
+ step: PlanStep,
445
+ ctx: ExecutionContext,
446
+ schema: Any,
447
+ ) -> None:
448
+ """Fetch entities that support global iteration (service.all())."""
449
+ service = getattr(self.client, schema.service_attr)
450
+
451
+ def on_progress(p: PaginationProgress) -> None:
452
+ self.progress.on_step_progress(step, p.items_so_far, None)
453
+
454
+ async for page in service.all().pages(on_progress=on_progress):
455
+ for record in page.data:
456
+ record_dict = record.model_dump(mode="json", by_alias=True)
457
+ ctx.records.append(record_dict)
458
+
459
+ if self._should_stop(ctx):
460
+ return
461
+
462
+ async def _fetch_with_parent(
463
+ self,
464
+ step: PlanStep,
465
+ ctx: ExecutionContext,
466
+ schema: Any,
467
+ ) -> None:
468
+ """Fetch entities that require a parent ID filter.
469
+
470
+ Uses schema configuration to determine:
471
+ - Which field to extract from the where clause (parent_filter_field)
472
+ - What type to cast the ID to (parent_id_type)
473
+ - Which method to call on the parent service (parent_method_name)
474
+
475
+ Supports OR/IN conditions by extracting ALL parent IDs and fetching from each
476
+ in parallel, merging results.
477
+ """
478
+ # Resolve name-based lookups BEFORE extracting parent IDs
479
+ where = ctx.query.where
480
+ if where is not None:
481
+ # Convert WhereClause to dict for resolution
482
+ where_as_dict: dict[str, Any] = (
483
+ where.model_dump(mode="json", by_alias=True)
484
+ if hasattr(where, "model_dump")
485
+ else where # type: ignore[assignment]
486
+ )
487
+ where_dict = await self._resolve_list_names_to_ids(where_as_dict)
488
+ else:
489
+ where_dict = None
490
+
491
+ # Extract ALL parent IDs from where clause (supports OR/IN conditions)
492
+ parent_ids = self._extract_parent_ids(where_dict, schema.parent_filter_field)
493
+
494
+ # Store resolved where for use in filtering step
495
+ # NOTE: We store BEFORE field name→ID resolution because:
496
+ # - The normalized records have fields keyed by NAME (e.g., "Status")
497
+ # - Field ID resolution is only for the API call, not client-side filtering
498
+ if where_dict is not None:
499
+ ctx.resolved_where = where_dict
500
+
501
+ # Resolve field names to IDs for listEntries queries (after we know parent IDs)
502
+ # This is only used for the API call, NOT for client-side filtering
503
+ if where_dict is not None and parent_ids:
504
+ where_dict = await self._resolve_field_names_to_ids(where_dict, parent_ids)
505
+ if not parent_ids:
506
+ # Should never happen - parser validates this
507
+ raise QueryExecutionError(
508
+ f"Query for '{step.entity}' requires a '{schema.parent_filter_field}' filter.",
509
+ step=step,
510
+ )
511
+
512
+ # Get the parent service (e.g., client.lists)
513
+ parent_service = getattr(self.client, schema.service_attr)
514
+
515
+ # Cast all IDs to typed IDs if configured
516
+ if schema.parent_id_type:
517
+ from affinity import types as affinity_types
518
+
519
+ id_type = getattr(affinity_types, schema.parent_id_type)
520
+ parent_ids = [id_type(pid) for pid in parent_ids]
521
+
522
+ nested_method = getattr(parent_service, schema.parent_method_name)
523
+
524
+ # Resolve field_ids for listEntries queries
525
+ # This auto-detects which custom fields are referenced in the query
526
+ field_ids: list[str] | None = None
527
+ if step.entity == "listEntries" and parent_ids:
528
+ # Use first parent_id to get field metadata (all lists in an OR should have same fields)
529
+ # parent_ids are already typed IDs, extract the raw int
530
+ raw_parent_id = (
531
+ parent_ids[0].value if hasattr(parent_ids[0], "value") else int(parent_ids[0])
532
+ )
533
+ field_ids = await self._resolve_field_ids_for_list_entries(ctx, raw_parent_id)
534
+
535
+ # For single parent ID, use simple sequential fetch
536
+ if len(parent_ids) == 1:
537
+ await self._fetch_from_single_parent(
538
+ step, ctx, nested_method, parent_ids[0], field_ids=field_ids
539
+ )
540
+ return
541
+
542
+ # For multiple parent IDs, fetch in parallel
543
+ async def fetch_from_parent(parent_id: Any) -> list[dict[str, Any]]:
544
+ """Fetch all records from a single parent."""
545
+ nested_service = nested_method(parent_id)
546
+ results: list[dict[str, Any]] = []
547
+
548
+ # Try paginated iteration first
549
+ if hasattr(nested_service.all(), "pages"):
550
+ # Build pages() kwargs, including field_ids if provided
551
+ pages_kwargs: dict[str, Any] = {}
552
+ if field_ids is not None:
553
+ pages_kwargs["field_ids"] = field_ids
554
+
555
+ async for page in nested_service.all().pages(**pages_kwargs):
556
+ for record in page.data:
557
+ results.append(record.model_dump(mode="json", by_alias=True))
558
+ else:
559
+ async for record in nested_service.all():
560
+ results.append(record.model_dump(mode="json", by_alias=True))
561
+
562
+ return results
563
+
564
+ # Execute all fetches in parallel
565
+ all_results = await asyncio.gather(*[fetch_from_parent(pid) for pid in parent_ids])
566
+
567
+ # Merge results, respecting limits
568
+ for results in all_results:
569
+ for record_dict in results:
570
+ ctx.records.append(record_dict)
571
+ if self._should_stop(ctx):
572
+ return
573
+
574
+ # Report progress after each parent completes
575
+ self.progress.on_step_progress(step, len(ctx.records), None)
576
+
577
+ async def _fetch_from_single_parent(
578
+ self,
579
+ step: PlanStep,
580
+ ctx: ExecutionContext,
581
+ nested_method: Callable[..., Any],
582
+ parent_id: Any,
583
+ *,
584
+ field_ids: list[str] | None = None,
585
+ ) -> None:
586
+ """Fetch from a single parent with progress reporting.
587
+
588
+ Args:
589
+ step: The plan step being executed
590
+ ctx: Execution context
591
+ nested_method: Method to call with parent_id to get nested service
592
+ parent_id: The parent entity ID
593
+ field_ids: Optional list of field IDs to request for listEntries
594
+ """
595
+ nested_service = nested_method(parent_id)
596
+ items_fetched = 0
597
+
598
+ def on_progress(p: PaginationProgress) -> None:
599
+ nonlocal items_fetched
600
+ items_fetched = p.items_so_far
601
+ self.progress.on_step_progress(step, items_fetched, None)
602
+
603
+ # Check if service has a direct pages() method (e.g., AsyncListEntryService)
604
+ # This is preferred over all().pages() because it supports field_ids
605
+ if hasattr(nested_service, "pages") and callable(nested_service.pages):
606
+ # Build pages() kwargs
607
+ pages_kwargs: dict[str, Any] = {}
608
+ if field_ids is not None:
609
+ pages_kwargs["field_ids"] = field_ids
610
+
611
+ async for page in nested_service.pages(**pages_kwargs):
612
+ for record in page.data:
613
+ record_dict = record.model_dump(mode="json", by_alias=True)
614
+ # Normalize list entry fields for query-friendly access
615
+ record_dict = _normalize_list_entry_fields(record_dict)
616
+ ctx.records.append(record_dict)
617
+ items_fetched += 1
618
+ if self._should_stop(ctx):
619
+ return
620
+ # Report progress after each page
621
+ self.progress.on_step_progress(step, items_fetched, None)
622
+
623
+ # Try all().pages() for services that return PageIterator from all()
624
+ elif hasattr(nested_service.all(), "pages"):
625
+ pages_kwargs = {"on_progress": on_progress}
626
+ if field_ids is not None:
627
+ pages_kwargs["field_ids"] = field_ids
628
+
629
+ async for page in nested_service.all().pages(**pages_kwargs):
630
+ for record in page.data:
631
+ record_dict = record.model_dump(mode="json", by_alias=True)
632
+ # Normalize list entry fields for query-friendly access
633
+ record_dict = _normalize_list_entry_fields(record_dict)
634
+ ctx.records.append(record_dict)
635
+ if self._should_stop(ctx):
636
+ return
637
+ else:
638
+ # Fall back to async iteration for services without pages()
639
+ all_kwargs: dict[str, Any] = {}
640
+ if field_ids is not None:
641
+ all_kwargs["field_ids"] = field_ids
642
+
643
+ async for record in nested_service.all(**all_kwargs):
644
+ record_dict = record.model_dump(mode="json", by_alias=True)
645
+ # Normalize list entry fields for query-friendly access
646
+ record_dict = _normalize_list_entry_fields(record_dict)
647
+ ctx.records.append(record_dict)
648
+ items_fetched += 1
649
+
650
+ if items_fetched % 100 == 0:
651
+ self.progress.on_step_progress(step, items_fetched, None)
652
+
653
+ if self._should_stop(ctx):
654
+ return
655
+
656
+ def _should_stop(self, ctx: ExecutionContext) -> bool:
657
+ """Check if we should stop fetching.
658
+
659
+ The limit is only applied during fetch when there's NO client-side filter.
660
+ If there's a client-side filter, we must fetch all records first, then
661
+ filter, then apply limit - otherwise we might stop fetching before finding
662
+ any matching records.
663
+ """
664
+ if len(ctx.records) >= ctx.max_records:
665
+ return True
666
+ # Only apply limit during fetch if there's no client-side filter
667
+ if ctx.has_client_side_filter:
668
+ return False
669
+ return bool(ctx.query.limit and len(ctx.records) >= ctx.query.limit)
670
+
671
+ def _extract_parent_ids(self, where: Any, field_name: str | None) -> list[int]:
672
+ """Extract ALL parent ID values from where clause.
673
+
674
+ Handles all condition types:
675
+ - Direct eq: {"path": "listId", "op": "eq", "value": 12345}
676
+ - Direct eq (string): {"path": "listId", "op": "eq", "value": "12345"}
677
+ - Direct in: {"path": "listId", "op": "in", "value": [123, 456, 789]}
678
+ - AND: {"and": [{"path": "listId", "op": "eq", "value": 123}, ...]}
679
+ - OR: {"or": [{"path": "listId", "op": "eq", "value": 123},
680
+ {"path": "listId", "op": "eq", "value": 456}]}
681
+
682
+ Accepts both integer and string IDs (strings are converted to int).
683
+ Returns deduplicated list of all parent IDs found.
684
+ """
685
+ if where is None or field_name is None:
686
+ return []
687
+
688
+ if hasattr(where, "model_dump"):
689
+ where = where.model_dump(mode="json", by_alias=True)
690
+
691
+ if not isinstance(where, dict):
692
+ return []
693
+
694
+ def to_int(value: Any) -> int | None:
695
+ """Convert value to int, supporting both int and numeric strings."""
696
+ if isinstance(value, int):
697
+ return value
698
+ if isinstance(value, str):
699
+ try:
700
+ return int(value)
701
+ except ValueError:
702
+ return None
703
+ return None
704
+
705
+ ids: list[int] = []
706
+
707
+ # Direct condition with "eq" operator
708
+ if where.get("path") == field_name and where.get("op") == "eq":
709
+ value = where.get("value")
710
+ int_val = to_int(value)
711
+ if int_val is not None:
712
+ ids.append(int_val)
713
+
714
+ # Direct condition with "in" operator (list of IDs)
715
+ if where.get("path") == field_name and where.get("op") == "in":
716
+ value = where.get("value")
717
+ if isinstance(value, list):
718
+ for v in value:
719
+ int_val = to_int(v)
720
+ if int_val is not None:
721
+ ids.append(int_val)
722
+
723
+ # Compound "and" conditions - traverse recursively
724
+ if where.get("and"):
725
+ for condition in where["and"]:
726
+ ids.extend(self._extract_parent_ids(condition, field_name))
727
+
728
+ # Compound "or" conditions - traverse recursively
729
+ if where.get("or"):
730
+ for condition in where["or"]:
731
+ ids.extend(self._extract_parent_ids(condition, field_name))
732
+
733
+ # NOTE: "not" clauses are intentionally NOT traversed.
734
+ # Negated parent filters are rejected by the parser.
735
+
736
+ # Deduplicate while preserving order
737
+ seen: set[int] = set()
738
+ unique_ids: list[int] = []
739
+ for id_ in ids:
740
+ if id_ not in seen:
741
+ seen.add(id_)
742
+ unique_ids.append(id_)
743
+
744
+ return unique_ids
745
+
746
+ def _collect_field_refs_from_query(self, query: Query) -> set[str]:
747
+ """Collect all fields.* references from the query.
748
+
749
+ Scans select, groupBy, aggregate, and where clauses for fields.* paths
750
+ and returns the set of field names (without the "fields." prefix).
751
+
752
+ Supports the "fields.*" wildcard which indicates all fields are needed.
753
+
754
+ Returns:
755
+ Set of field names referenced, or {"*"} if all fields are needed.
756
+ """
757
+ field_names: set[str] = set()
758
+
759
+ # Check for fields.* wildcard in select
760
+ if query.select:
761
+ for path in query.select:
762
+ if path == "fields.*":
763
+ return {"*"} # Wildcard means all fields
764
+ if path.startswith("fields."):
765
+ field_names.add(path[7:]) # Remove "fields." prefix
766
+
767
+ # Collect from groupBy
768
+ if query.group_by:
769
+ if query.group_by == "fields.*":
770
+ return {"*"}
771
+ if query.group_by.startswith("fields."):
772
+ field_names.add(query.group_by[7:])
773
+
774
+ # Collect from aggregates
775
+ if query.aggregate:
776
+ for agg in query.aggregate.values():
777
+ for attr in ["sum", "avg", "min", "max", "first", "last"]:
778
+ field = getattr(agg, attr, None)
779
+ if field and isinstance(field, str):
780
+ if field == "fields.*":
781
+ return {"*"}
782
+ if field.startswith("fields."):
783
+ field_names.add(field[7:])
784
+ # Handle percentile which has nested structure
785
+ if agg.percentile and isinstance(agg.percentile, dict):
786
+ pct_field = agg.percentile.get("field", "")
787
+ if pct_field == "fields.*":
788
+ return {"*"}
789
+ if pct_field.startswith("fields."):
790
+ field_names.add(pct_field[7:])
791
+
792
+ # Collect from where clause (recursive)
793
+ if query.where:
794
+ where_dict = (
795
+ query.where.model_dump(mode="json", by_alias=True)
796
+ if hasattr(query.where, "model_dump")
797
+ else query.where
798
+ )
799
+ self._collect_field_refs_from_where(where_dict, field_names)
800
+ if "*" in field_names:
801
+ return {"*"}
802
+
803
+ return field_names
804
+
805
+ def _collect_field_refs_from_where(
806
+ self, where: dict[str, Any] | Any, field_names: set[str]
807
+ ) -> None:
808
+ """Recursively collect fields.* references from a where clause.
809
+
810
+ Args:
811
+ where: The where clause dict or sub-clause
812
+ field_names: Set to add field names to (modified in place)
813
+ """
814
+ if not isinstance(where, dict):
815
+ return
816
+
817
+ # Check if this is a direct condition with fields.* path
818
+ path = where.get("path", "")
819
+ if isinstance(path, str):
820
+ if path == "fields.*":
821
+ field_names.add("*")
822
+ return
823
+ if path.startswith("fields."):
824
+ field_names.add(path[7:])
825
+
826
+ # Recurse into compound conditions
827
+ for key in ["and", "or", "and_", "or_"]:
828
+ if key in where and isinstance(where[key], list):
829
+ for sub_clause in where[key]:
830
+ self._collect_field_refs_from_where(sub_clause, field_names)
831
+
832
+ # Recurse into not clause
833
+ for key in ["not", "not_"]:
834
+ if key in where:
835
+ self._collect_field_refs_from_where(where[key], field_names)
836
+
837
+ async def _resolve_field_ids_for_list_entries(
838
+ self,
839
+ ctx: ExecutionContext,
840
+ list_id: int,
841
+ ) -> list[str] | None:
842
+ """Resolve field names to IDs for listEntries queries.
843
+
844
+ Automatically detects which custom fields are referenced in the query
845
+ (in select, groupBy, aggregate, or where clauses) and requests them
846
+ from the API.
847
+
848
+ Args:
849
+ ctx: Execution context containing the query
850
+ list_id: The list ID to fetch field metadata from
851
+
852
+ Returns:
853
+ List of field IDs to request, or None if no custom fields needed.
854
+ If wildcard (fields.*) is used, returns all field IDs for the list.
855
+ """
856
+ from affinity.types import ListId
857
+
858
+ # Collect all field references from the query
859
+ field_names = self._collect_field_refs_from_query(ctx.query)
860
+
861
+ if not field_names:
862
+ # No field references in query - don't request any custom fields
863
+ # This avoids expensive API calls for lists with many fields
864
+ return None
865
+
866
+ # Ensure field name cache is populated for this list
867
+ if not hasattr(self, "_field_name_cache"):
868
+ self._field_name_cache: dict[str, dict[str, Any]] = {}
869
+
870
+ # Check if we need to fetch field metadata
871
+ cache_key = f"list_{list_id}"
872
+ if cache_key not in self._field_name_cache:
873
+ try:
874
+ fields = await self.client.lists.get_fields(ListId(list_id))
875
+ # Build a mapping of lowercase name -> field ID
876
+ field_map: dict[str, str] = {}
877
+ all_field_ids: list[str] = []
878
+ for field in fields:
879
+ if field.name:
880
+ field_map[field.name.lower()] = str(field.id)
881
+ all_field_ids.append(str(field.id))
882
+ self._field_name_cache[cache_key] = {
883
+ "by_name": field_map,
884
+ "all_ids": all_field_ids,
885
+ }
886
+ except Exception:
887
+ # If we can't fetch fields, continue without custom field values
888
+ return None
889
+
890
+ cache = self._field_name_cache[cache_key]
891
+
892
+ # Handle wildcard: return all field IDs
893
+ if "*" in field_names:
894
+ all_ids: list[str] = cache["all_ids"]
895
+ return all_ids
896
+
897
+ # Resolve specific field names to IDs
898
+ field_ids: list[str] = []
899
+ missing_fields: list[str] = []
900
+ for name in field_names:
901
+ field_id = cache["by_name"].get(name.lower())
902
+ if field_id is not None:
903
+ field_ids.append(field_id)
904
+ else:
905
+ missing_fields.append(name)
906
+
907
+ # Add warning for missing fields (don't break query - typos shouldn't fail)
908
+ if missing_fields:
909
+ available_fields = sorted(cache["by_name"].keys())
910
+ if len(missing_fields) == 1:
911
+ ctx.warnings.append(
912
+ f"Field 'fields.{missing_fields[0]}' not found on list. "
913
+ f"Available fields: {', '.join(available_fields[:10])}"
914
+ + ("..." if len(available_fields) > 10 else "")
915
+ )
916
+ else:
917
+ missing_str = ", ".join(f"fields.{f}" for f in missing_fields)
918
+ available_str = ", ".join(available_fields[:10])
919
+ suffix = "..." if len(available_fields) > 10 else ""
920
+ ctx.warnings.append(
921
+ f"Fields not found on list: {missing_str}. "
922
+ f"Available fields: {available_str}{suffix}"
923
+ )
924
+
925
+ return field_ids if field_ids else None
926
+
927
+ async def _resolve_list_names_to_ids(self, where: dict[str, Any]) -> dict[str, Any]:
928
+ """Resolve listName references to listId.
929
+
930
+ Transforms:
931
+ {"path": "listName", "op": "eq", "value": "My Deals"}
932
+ Into:
933
+ {"path": "listId", "op": "eq", "value": 12345}
934
+
935
+ Also handles:
936
+ {"path": "listName", "op": "in", "value": ["Deals", "Leads"]}
937
+
938
+ Cache behavior: The list name cache is populated once per QueryExecutor
939
+ instance. Since QueryExecutor is created fresh for each execute() call,
940
+ the cache is effectively per-query.
941
+ """
942
+ if not isinstance(where, dict):
943
+ return where
944
+
945
+ # Check if this is a listName condition
946
+ if where.get("path") == "listName":
947
+ names = where.get("value")
948
+ op = where.get("op")
949
+
950
+ # Fetch all lists once and cache
951
+ if not hasattr(self, "_list_name_cache"):
952
+ self._list_name_cache: dict[str, int] = {}
953
+ async for list_obj in self.client.lists.all():
954
+ self._list_name_cache[list_obj.name] = list_obj.id
955
+
956
+ if op == "eq" and isinstance(names, str):
957
+ list_id = self._list_name_cache.get(names)
958
+ if list_id is None:
959
+ raise QueryExecutionError(f"List not found: '{names}'")
960
+ return {"path": "listId", "op": "eq", "value": list_id}
961
+
962
+ if op == "in" and isinstance(names, list):
963
+ list_ids = []
964
+ for name in names:
965
+ list_id = self._list_name_cache.get(name)
966
+ if list_id is None:
967
+ raise QueryExecutionError(f"List not found: '{name}'")
968
+ list_ids.append(list_id)
969
+ return {"path": "listId", "op": "in", "value": list_ids}
970
+
971
+ # Recursively process compound conditions
972
+ result = dict(where)
973
+ if where.get("and"):
974
+ result["and"] = [await self._resolve_list_names_to_ids(c) for c in where["and"]]
975
+ if where.get("or"):
976
+ result["or"] = [await self._resolve_list_names_to_ids(c) for c in where["or"]]
977
+
978
+ return result
979
+
980
+ async def _resolve_field_names_to_ids(
981
+ self, where: dict[str, Any], list_ids: list[int]
982
+ ) -> dict[str, Any]:
983
+ """Resolve field name references to field IDs in fields.* paths.
984
+
985
+ Transforms:
986
+ {"path": "fields.Status", "op": "eq", "value": "Active"}
987
+ Into:
988
+ {"path": "fields.12345", "op": "eq", "value": "Active"}
989
+
990
+ Field names are resolved case-insensitively against the field definitions
991
+ for the specified list(s).
992
+
993
+ Args:
994
+ where: The where clause to transform
995
+ list_ids: List IDs to fetch field metadata from
996
+
997
+ Returns:
998
+ Transformed where clause with field names resolved to IDs
999
+ """
1000
+ if not isinstance(where, dict) or not list_ids:
1001
+ return where
1002
+
1003
+ # Build flat field name -> ID cache for all lists
1004
+ if not hasattr(self, "_field_name_to_id_cache"):
1005
+ self._field_name_to_id_cache: dict[str, str] = {}
1006
+
1007
+ from affinity.types import ListId
1008
+
1009
+ for list_id in list_ids:
1010
+ try:
1011
+ fields = await self.client.lists.get_fields(ListId(list_id))
1012
+ for field in fields:
1013
+ if field.name:
1014
+ # Map lowercase name to field ID
1015
+ self._field_name_to_id_cache[field.name.lower()] = str(field.id)
1016
+ except Exception:
1017
+ # If we can't fetch fields, continue without resolution
1018
+ pass
1019
+
1020
+ # Check if this is a fields.* condition
1021
+ path = where.get("path", "")
1022
+ if isinstance(path, str) and path.startswith("fields."):
1023
+ field_ref = path[7:] # Everything after "fields."
1024
+
1025
+ # Skip if already a field ID (numeric or "field-" prefix)
1026
+ if not field_ref.isdigit() and not field_ref.startswith("field-"):
1027
+ # Try to resolve by name (case-insensitive)
1028
+ field_id = self._field_name_to_id_cache.get(field_ref.lower())
1029
+ if field_id is not None:
1030
+ result = dict(where)
1031
+ result["path"] = f"fields.{field_id}"
1032
+ return result
1033
+
1034
+ # Recursively process compound conditions
1035
+ result = dict(where)
1036
+ if where.get("and"):
1037
+ result["and"] = [
1038
+ await self._resolve_field_names_to_ids(c, list_ids) for c in where["and"]
1039
+ ]
1040
+ if where.get("or"):
1041
+ result["or"] = [
1042
+ await self._resolve_field_names_to_ids(c, list_ids) for c in where["or"]
1043
+ ]
1044
+
1045
+ return result
1046
+
1047
+ def _execute_filter(self, _step: PlanStep, ctx: ExecutionContext) -> None:
1048
+ """Execute a client-side filter step."""
1049
+ from .models import WhereClause as WC
1050
+
1051
+ # Use resolved where clause if available (has listName → listId resolved)
1052
+ where: WC | None
1053
+ if ctx.resolved_where is not None:
1054
+ # Convert dict back to WhereClause for compile_filter
1055
+ where = WC.model_validate(ctx.resolved_where)
1056
+ else:
1057
+ where = ctx.query.where
1058
+ if where is None:
1059
+ return
1060
+
1061
+ filter_func = compile_filter(where)
1062
+ ctx.records = [r for r in ctx.records if filter_func(r)]
1063
+
1064
+ async def _execute_include(self, step: PlanStep, ctx: ExecutionContext) -> None:
1065
+ """Execute an include step (N+1 fetching)."""
1066
+ if step.relationship is None or step.entity is None:
1067
+ return
1068
+
1069
+ rel = get_relationship(step.entity, step.relationship)
1070
+ if rel is None:
1071
+ raise QueryExecutionError(
1072
+ f"Unknown relationship: {step.entity}.{step.relationship}",
1073
+ step=step,
1074
+ )
1075
+
1076
+ included_records: list[dict[str, Any]] = []
1077
+
1078
+ if rel.fetch_strategy == "entity_method":
1079
+ # N+1: fetch for each record
1080
+ async def fetch_one(record: dict[str, Any]) -> list[dict[str, Any]]:
1081
+ async with self.semaphore:
1082
+ entity_id = record.get("id")
1083
+ if entity_id is None:
1084
+ return []
1085
+
1086
+ service = getattr(self.client, step.entity or "")
1087
+ method = getattr(service, rel.method_or_service, None)
1088
+ if method is None:
1089
+ return []
1090
+
1091
+ try:
1092
+ ids = await method(entity_id)
1093
+ # If we got IDs, we need to fetch the full records
1094
+ # For now, just store the IDs
1095
+ return [{"id": id_} for id_ in ids]
1096
+ except Exception:
1097
+ return []
1098
+
1099
+ # Execute in parallel with bounded concurrency
1100
+ tasks = [fetch_one(r) for r in ctx.records]
1101
+ results = await asyncio.gather(*tasks)
1102
+
1103
+ for result in results:
1104
+ included_records.extend(result)
1105
+
1106
+ elif rel.fetch_strategy == "global_service":
1107
+ # Single filtered call per entity type
1108
+ # This is more efficient than N+1
1109
+ service = getattr(self.client, rel.method_or_service)
1110
+
1111
+ # Collect all entity IDs
1112
+ entity_ids = [r.get("id") for r in ctx.records if r.get("id") is not None]
1113
+
1114
+ for entity_id in entity_ids:
1115
+ try:
1116
+ filter_kwargs = {rel.filter_field: entity_id}
1117
+ response = await service.list(**filter_kwargs)
1118
+ for item in response.data:
1119
+ included_records.append(item.model_dump(mode="json", by_alias=True))
1120
+ except Exception:
1121
+ continue
1122
+
1123
+ ctx.included[step.relationship] = included_records
1124
+
1125
+ # Update progress
1126
+ self.progress.on_step_progress(step, len(included_records), None)
1127
+
1128
+ def _execute_aggregate(self, _step: PlanStep, ctx: ExecutionContext) -> None:
1129
+ """Execute aggregation step."""
1130
+ if ctx.query.aggregate is None:
1131
+ return
1132
+
1133
+ if ctx.query.group_by is not None:
1134
+ # Group and aggregate
1135
+ results = group_and_aggregate(
1136
+ ctx.records,
1137
+ ctx.query.group_by,
1138
+ ctx.query.aggregate,
1139
+ )
1140
+
1141
+ # Apply having if present
1142
+ if ctx.query.having is not None:
1143
+ results = apply_having(results, ctx.query.having)
1144
+
1145
+ ctx.records = results
1146
+ else:
1147
+ # Simple aggregate (single result)
1148
+ agg_result = compute_aggregates(ctx.records, ctx.query.aggregate)
1149
+ ctx.records = [agg_result]
1150
+
1151
+ def _execute_sort(self, _step: PlanStep, ctx: ExecutionContext) -> None:
1152
+ """Execute sort step."""
1153
+ order_by = ctx.query.order_by
1154
+ if order_by is None:
1155
+ return
1156
+
1157
+ # Build sort key function
1158
+ def sort_key(record: dict[str, Any]) -> tuple[Any, ...]:
1159
+ keys: list[Any] = []
1160
+ for order in order_by:
1161
+ value = resolve_field_path(record, order.field) if order.field else None
1162
+
1163
+ # Handle None values (sort to end)
1164
+ if value is None:
1165
+ if order.direction == "asc":
1166
+ keys.append((1, None))
1167
+ else:
1168
+ keys.append((0, None))
1169
+ elif order.direction == "asc":
1170
+ keys.append((0, value))
1171
+ else:
1172
+ # Negate for desc, but handle non-numeric
1173
+ try:
1174
+ keys.append((0, -value))
1175
+ except TypeError:
1176
+ keys.append((0, value))
1177
+
1178
+ return tuple(keys)
1179
+
1180
+ # Sort with stable algorithm
1181
+ try:
1182
+ ctx.records.sort(key=sort_key)
1183
+ except TypeError:
1184
+ # Mixed types - fall back to string comparison
1185
+ for order in reversed(order_by):
1186
+ reverse = order.direction == "desc"
1187
+ field = order.field or ""
1188
+
1189
+ def make_key(f: str) -> Callable[[dict[str, Any]], str]:
1190
+ return lambda r: str(resolve_field_path(r, f) or "")
1191
+
1192
+ ctx.records.sort(key=make_key(field), reverse=reverse)
1193
+
1194
+ def _execute_limit(self, _step: PlanStep, ctx: ExecutionContext) -> None:
1195
+ """Execute limit step."""
1196
+ if ctx.query.limit is not None:
1197
+ ctx.records = ctx.records[: ctx.query.limit]
1198
+
1199
+
1200
+ # =============================================================================
1201
+ # Convenience Function
1202
+ # =============================================================================
1203
+
1204
+
1205
+ async def execute_query(
1206
+ client: AsyncAffinity,
1207
+ plan: ExecutionPlan,
1208
+ *,
1209
+ progress: QueryProgressCallback | None = None,
1210
+ concurrency: int = 10,
1211
+ max_records: int = 10000,
1212
+ timeout: float = 300.0,
1213
+ ) -> QueryResult:
1214
+ """Execute a query plan.
1215
+
1216
+ Convenience function that creates an executor and runs the plan.
1217
+
1218
+ Args:
1219
+ client: AsyncAffinity client
1220
+ plan: Execution plan
1221
+ progress: Optional progress callback
1222
+ concurrency: Max concurrent API calls
1223
+ max_records: Safety limit
1224
+ timeout: Execution timeout
1225
+
1226
+ Returns:
1227
+ QueryResult
1228
+ """
1229
+ executor = QueryExecutor(
1230
+ client,
1231
+ progress=progress,
1232
+ concurrency=concurrency,
1233
+ max_records=max_records,
1234
+ timeout=timeout,
1235
+ )
1236
+ return await executor.execute(plan)